Blame view

fs/erofs/zdata.c 36.6 KB
29b24f6ca   Gao Xiang   staging: erofs: u...
1
  // SPDX-License-Identifier: GPL-2.0-only
02827e179   Gao Xiang   staging: erofs: a...
2
  /*
02827e179   Gao Xiang   staging: erofs: a...
3
4
5
   * Copyright (C) 2018 HUAWEI, Inc.
   *             http://www.huawei.com/
   * Created by Gao Xiang <gaoxiang25@huawei.com>
02827e179   Gao Xiang   staging: erofs: a...
6
   */
57b78c9fd   Gao Xiang   staging: erofs: r...
7
  #include "zdata.h"
274812334   Gao Xiang   staging: erofs: m...
8
  #include "compress.h"
3883a79ab   Gao Xiang   staging: erofs: i...
9
  #include <linux/prefetch.h>
284db12cf   Chen Gong   staging: erofs: a...
10
  #include <trace/events/erofs.h>
672e54761   Gao Xiang   staging: erofs: l...
11
12
13
14
15
  /*
   * a compressed_pages[] placeholder in order to avoid
   * being filled with file pages for in-place decompression.
   */
  #define PAGE_UNALLOCATED     ((void *)0x5F0E4B1D)
97e86a858   Gao Xiang   staging: erofs: t...
16
  /* how to allocate cached pages for a pcluster */
92e6efd56   Gao Xiang   staging: erofs: r...
17
18
19
20
21
22
23
24
25
26
27
28
29
  enum z_erofs_cache_alloctype {
  	DONTALLOC,	/* don't allocate any cached pages */
  	DELAYEDALLOC,	/* delayed allocation (at the time of submitting io) */
  };
  
  /*
   * tagged pointer with 1-bit tag for all compressed pages
   * tag 0 - the page is just found with an extra page reference
   */
  typedef tagptr1_t compressed_page_t;
  
  #define tag_compressed_page_justfound(page) \
  	tagptr_fold(compressed_page_t, page, 1)
3883a79ab   Gao Xiang   staging: erofs: i...
30
  static struct workqueue_struct *z_erofs_workqueue __read_mostly;
97e86a858   Gao Xiang   staging: erofs: t...
31
  static struct kmem_cache *pcluster_cachep __read_mostly;
3883a79ab   Gao Xiang   staging: erofs: i...
32
33
34
  
  void z_erofs_exit_zip_subsystem(void)
  {
3883a79ab   Gao Xiang   staging: erofs: i...
35
  	destroy_workqueue(z_erofs_workqueue);
97e86a858   Gao Xiang   staging: erofs: t...
36
  	kmem_cache_destroy(pcluster_cachep);
3883a79ab   Gao Xiang   staging: erofs: i...
37
  }
99634bf38   Gao Xiang   erofs: add "erofs...
38
  static inline int z_erofs_init_workqueue(void)
3883a79ab   Gao Xiang   staging: erofs: i...
39
  {
7dd68b147   Thomas Weißschuh   staging: erofs: u...
40
  	const unsigned int onlinecpus = num_possible_cpus();
97e86a858   Gao Xiang   staging: erofs: t...
41
  	const unsigned int flags = WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE;
3883a79ab   Gao Xiang   staging: erofs: i...
42
43
  
  	/*
97e86a858   Gao Xiang   staging: erofs: t...
44
45
  	 * no need to spawn too many threads, limiting threads could minimum
  	 * scheduling overhead, perhaps per-CPU threads should be better?
3883a79ab   Gao Xiang   staging: erofs: i...
46
  	 */
97e86a858   Gao Xiang   staging: erofs: t...
47
48
  	z_erofs_workqueue = alloc_workqueue("erofs_unzipd", flags,
  					    onlinecpus + onlinecpus / 4);
42d40b4ad   Cristian Sicilia   staging: erofs: u...
49
  	return z_erofs_workqueue ? 0 : -ENOMEM;
3883a79ab   Gao Xiang   staging: erofs: i...
50
  }
99634bf38   Gao Xiang   erofs: add "erofs...
51
  static void z_erofs_pcluster_init_once(void *ptr)
48d4bf3b0   Gao Xiang   staging: erofs: s...
52
  {
97e86a858   Gao Xiang   staging: erofs: t...
53
54
  	struct z_erofs_pcluster *pcl = ptr;
  	struct z_erofs_collection *cl = z_erofs_primarycollection(pcl);
48d4bf3b0   Gao Xiang   staging: erofs: s...
55
  	unsigned int i;
97e86a858   Gao Xiang   staging: erofs: t...
56
57
58
  	mutex_init(&cl->lock);
  	cl->nr_pages = 0;
  	cl->vcnt = 0;
48d4bf3b0   Gao Xiang   staging: erofs: s...
59
  	for (i = 0; i < Z_EROFS_CLUSTER_MAX_PAGES; ++i)
97e86a858   Gao Xiang   staging: erofs: t...
60
  		pcl->compressed_pages[i] = NULL;
48d4bf3b0   Gao Xiang   staging: erofs: s...
61
  }
99634bf38   Gao Xiang   erofs: add "erofs...
62
  static void z_erofs_pcluster_init_always(struct z_erofs_pcluster *pcl)
48d4bf3b0   Gao Xiang   staging: erofs: s...
63
  {
97e86a858   Gao Xiang   staging: erofs: t...
64
  	struct z_erofs_collection *cl = z_erofs_primarycollection(pcl);
48d4bf3b0   Gao Xiang   staging: erofs: s...
65

97e86a858   Gao Xiang   staging: erofs: t...
66
  	atomic_set(&pcl->obj.refcount, 1);
48d4bf3b0   Gao Xiang   staging: erofs: s...
67

97e86a858   Gao Xiang   staging: erofs: t...
68
69
  	DBG_BUGON(cl->nr_pages);
  	DBG_BUGON(cl->vcnt);
48d4bf3b0   Gao Xiang   staging: erofs: s...
70
  }
0a0b7e625   Gao Xiang   staging: erofs: a...
71
  int __init z_erofs_init_zip_subsystem(void)
3883a79ab   Gao Xiang   staging: erofs: i...
72
  {
97e86a858   Gao Xiang   staging: erofs: t...
73
74
  	pcluster_cachep = kmem_cache_create("erofs_compress",
  					    Z_EROFS_WORKGROUP_SIZE, 0,
99634bf38   Gao Xiang   erofs: add "erofs...
75
76
  					    SLAB_RECLAIM_ACCOUNT,
  					    z_erofs_pcluster_init_once);
97e86a858   Gao Xiang   staging: erofs: t...
77
  	if (pcluster_cachep) {
99634bf38   Gao Xiang   erofs: add "erofs...
78
  		if (!z_erofs_init_workqueue())
3883a79ab   Gao Xiang   staging: erofs: i...
79
  			return 0;
97e86a858   Gao Xiang   staging: erofs: t...
80
  		kmem_cache_destroy(pcluster_cachep);
3883a79ab   Gao Xiang   staging: erofs: i...
81
82
83
  	}
  	return -ENOMEM;
  }
97e86a858   Gao Xiang   staging: erofs: t...
84
85
86
  enum z_erofs_collectmode {
  	COLLECT_SECONDARY,
  	COLLECT_PRIMARY,
3883a79ab   Gao Xiang   staging: erofs: i...
87
  	/*
97e86a858   Gao Xiang   staging: erofs: t...
88
89
90
91
92
93
94
  	 * The current collection was the tail of an exist chain, in addition
  	 * that the previous processed chained collections are all decided to
  	 * be hooked up to it.
  	 * A new chain will be created for the remaining collections which are
  	 * not processed yet, therefore different from COLLECT_PRIMARY_FOLLOWED,
  	 * the next collection cannot reuse the whole page safely in
  	 * the following scenario:
a112152f6   Gao Xiang   staging: erofs: f...
95
96
  	 *  ________________________________________________________________
  	 * |      tail (partial) page     |       head (partial) page       |
97e86a858   Gao Xiang   staging: erofs: t...
97
  	 * |   (belongs to the next cl)   |   (belongs to the current cl)   |
a112152f6   Gao Xiang   staging: erofs: f...
98
99
  	 * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
  	 */
97e86a858   Gao Xiang   staging: erofs: t...
100
101
  	COLLECT_PRIMARY_HOOKED,
  	COLLECT_PRIMARY_FOLLOWED_NOINPLACE,
a112152f6   Gao Xiang   staging: erofs: f...
102
  	/*
97e86a858   Gao Xiang   staging: erofs: t...
103
104
105
106
107
108
  	 * The current collection has been linked with the owned chain, and
  	 * could also be linked with the remaining collections, which means
  	 * if the processing page is the tail page of the collection, thus
  	 * the current collection can safely use the whole page (since
  	 * the previous collection is under control) for in-place I/O, as
  	 * illustrated below:
a112152f6   Gao Xiang   staging: erofs: f...
109
  	 *  ________________________________________________________________
97e86a858   Gao Xiang   staging: erofs: t...
110
111
112
113
  	 * |  tail (partial) page |          head (partial) page           |
  	 * |  (of the current cl) |      (of the previous collection)      |
  	 * |  PRIMARY_FOLLOWED or |                                        |
  	 * |_____PRIMARY_HOOKED___|____________PRIMARY_FOLLOWED____________|
a112152f6   Gao Xiang   staging: erofs: f...
114
  	 *
97e86a858   Gao Xiang   staging: erofs: t...
115
  	 * [  (*) the above page can be used as inplace I/O.               ]
3883a79ab   Gao Xiang   staging: erofs: i...
116
  	 */
97e86a858   Gao Xiang   staging: erofs: t...
117
  	COLLECT_PRIMARY_FOLLOWED,
3883a79ab   Gao Xiang   staging: erofs: i...
118
  };
97e86a858   Gao Xiang   staging: erofs: t...
119
  struct z_erofs_collector {
3883a79ab   Gao Xiang   staging: erofs: i...
120
  	struct z_erofs_pagevec_ctor vector;
bfc4ccb15   Gao Xiang   staging: erofs: a...
121
  	struct z_erofs_pcluster *pcl, *tailpcl;
97e86a858   Gao Xiang   staging: erofs: t...
122
123
124
125
126
  	struct z_erofs_collection *cl;
  	struct page **compressedpages;
  	z_erofs_next_pcluster_t owned_head;
  
  	enum z_erofs_collectmode mode;
3883a79ab   Gao Xiang   staging: erofs: i...
127
  };
97e86a858   Gao Xiang   staging: erofs: t...
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
  struct z_erofs_decompress_frontend {
  	struct inode *const inode;
  
  	struct z_erofs_collector clt;
  	struct erofs_map_blocks map;
  
  	/* used for applying cache strategy on the fly */
  	bool backmost;
  	erofs_off_t headoffset;
  };
  
  #define COLLECTOR_INIT() { \
  	.owned_head = Z_EROFS_PCLUSTER_TAIL, \
  	.mode = COLLECT_PRIMARY_FOLLOWED }
  
  #define DECOMPRESS_FRONTEND_INIT(__i) { \
  	.inode = __i, .clt = COLLECTOR_INIT(), \
  	.backmost = true, }
  
  static struct page *z_pagemap_global[Z_EROFS_VMAP_GLOBAL_PAGES];
  static DEFINE_MUTEX(z_pagemap_global_lock);
3883a79ab   Gao Xiang   staging: erofs: i...
149

97e86a858   Gao Xiang   staging: erofs: t...
150
  static void preload_compressed_pages(struct z_erofs_collector *clt,
92e6efd56   Gao Xiang   staging: erofs: r...
151
  				     struct address_space *mc,
92e6efd56   Gao Xiang   staging: erofs: r...
152
  				     enum z_erofs_cache_alloctype type,
97e86a858   Gao Xiang   staging: erofs: t...
153
  				     struct list_head *pagepool)
105d4ad85   Gao Xiang   staging: erofs: i...
154
  {
97e86a858   Gao Xiang   staging: erofs: t...
155
156
157
158
  	const struct z_erofs_pcluster *pcl = clt->pcl;
  	const unsigned int clusterpages = BIT(pcl->clusterbits);
  	struct page **pages = clt->compressedpages;
  	pgoff_t index = pcl->obj.index + (pages - pcl->compressed_pages);
92e6efd56   Gao Xiang   staging: erofs: r...
159
  	bool standalone = true;
92e6efd56   Gao Xiang   staging: erofs: r...
160

97e86a858   Gao Xiang   staging: erofs: t...
161
  	if (clt->mode < COLLECT_PRIMARY_FOLLOWED)
92e6efd56   Gao Xiang   staging: erofs: r...
162
  		return;
97e86a858   Gao Xiang   staging: erofs: t...
163
  	for (; pages < pcl->compressed_pages + clusterpages; ++pages) {
92e6efd56   Gao Xiang   staging: erofs: r...
164
165
166
167
  		struct page *page;
  		compressed_page_t t;
  
  		/* the compressed page was loaded before */
97e86a858   Gao Xiang   staging: erofs: t...
168
  		if (READ_ONCE(*pages))
105d4ad85   Gao Xiang   staging: erofs: i...
169
  			continue;
97e86a858   Gao Xiang   staging: erofs: t...
170
  		page = find_get_page(mc, index);
92e6efd56   Gao Xiang   staging: erofs: r...
171
172
173
174
175
176
177
  
  		if (page) {
  			t = tag_compressed_page_justfound(page);
  		} else if (type == DELAYEDALLOC) {
  			t = tagptr_init(compressed_page_t, PAGE_UNALLOCATED);
  		} else {	/* DONTALLOC */
  			if (standalone)
97e86a858   Gao Xiang   staging: erofs: t...
178
  				clt->compressedpages = pages;
92e6efd56   Gao Xiang   staging: erofs: r...
179
180
  			standalone = false;
  			continue;
105d4ad85   Gao Xiang   staging: erofs: i...
181
  		}
97e86a858   Gao Xiang   staging: erofs: t...
182
  		if (!cmpxchg_relaxed(pages, NULL, tagptr_cast_ptr(t)))
105d4ad85   Gao Xiang   staging: erofs: i...
183
  			continue;
92e6efd56   Gao Xiang   staging: erofs: r...
184
185
  		if (page)
  			put_page(page);
105d4ad85   Gao Xiang   staging: erofs: i...
186
  	}
92e6efd56   Gao Xiang   staging: erofs: r...
187

97e86a858   Gao Xiang   staging: erofs: t...
188
189
  	if (standalone)		/* downgrade to PRIMARY_FOLLOWED_NOINPLACE */
  		clt->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE;
105d4ad85   Gao Xiang   staging: erofs: i...
190
191
192
  }
  
  /* called by erofs_shrinker to get rid of all compressed_pages */
47e541a17   Gao Xiang   staging: erofs: f...
193
  int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
97e86a858   Gao Xiang   staging: erofs: t...
194
  				       struct erofs_workgroup *grp)
105d4ad85   Gao Xiang   staging: erofs: i...
195
  {
97e86a858   Gao Xiang   staging: erofs: t...
196
197
  	struct z_erofs_pcluster *const pcl =
  		container_of(grp, struct z_erofs_pcluster, obj);
c1448fa88   Gao Xiang   staging: erofs: i...
198
  	struct address_space *const mapping = MNGD_MAPPING(sbi);
97e86a858   Gao Xiang   staging: erofs: t...
199
  	const unsigned int clusterpages = BIT(pcl->clusterbits);
105d4ad85   Gao Xiang   staging: erofs: i...
200
201
202
203
204
205
206
  	int i;
  
  	/*
  	 * refcount of workgroup is now freezed as 1,
  	 * therefore no need to worry about available decompression users.
  	 */
  	for (i = 0; i < clusterpages; ++i) {
97e86a858   Gao Xiang   staging: erofs: t...
207
  		struct page *page = pcl->compressed_pages[i];
105d4ad85   Gao Xiang   staging: erofs: i...
208

97e86a858   Gao Xiang   staging: erofs: t...
209
  		if (!page)
105d4ad85   Gao Xiang   staging: erofs: i...
210
211
212
213
214
  			continue;
  
  		/* block other users from reclaiming or migrating the page */
  		if (!trylock_page(page))
  			return -EBUSY;
8d8a09b09   Gao Xiang   erofs: remove all...
215
  		if (page->mapping != mapping)
97e86a858   Gao Xiang   staging: erofs: t...
216
  			continue;
105d4ad85   Gao Xiang   staging: erofs: i...
217

97e86a858   Gao Xiang   staging: erofs: t...
218
219
  		/* barrier is implied in the following 'unlock_page' */
  		WRITE_ONCE(pcl->compressed_pages[i], NULL);
105d4ad85   Gao Xiang   staging: erofs: i...
220
221
222
223
224
225
226
227
  		set_page_private(page, 0);
  		ClearPagePrivate(page);
  
  		unlock_page(page);
  		put_page(page);
  	}
  	return 0;
  }
47e541a17   Gao Xiang   staging: erofs: f...
228
229
  int erofs_try_to_free_cached_page(struct address_space *mapping,
  				  struct page *page)
105d4ad85   Gao Xiang   staging: erofs: i...
230
  {
97e86a858   Gao Xiang   staging: erofs: t...
231
232
  	struct z_erofs_pcluster *const pcl = (void *)page_private(page);
  	const unsigned int clusterpages = BIT(pcl->clusterbits);
105d4ad85   Gao Xiang   staging: erofs: i...
233
  	int ret = 0;	/* 0 - busy */
97e86a858   Gao Xiang   staging: erofs: t...
234
  	if (erofs_workgroup_try_to_freeze(&pcl->obj, 1)) {
105d4ad85   Gao Xiang   staging: erofs: i...
235
236
237
  		unsigned int i;
  
  		for (i = 0; i < clusterpages; ++i) {
97e86a858   Gao Xiang   staging: erofs: t...
238
239
  			if (pcl->compressed_pages[i] == page) {
  				WRITE_ONCE(pcl->compressed_pages[i], NULL);
105d4ad85   Gao Xiang   staging: erofs: i...
240
241
242
243
  				ret = 1;
  				break;
  			}
  		}
97e86a858   Gao Xiang   staging: erofs: t...
244
  		erofs_workgroup_unfreeze(&pcl->obj, 1);
105d4ad85   Gao Xiang   staging: erofs: i...
245

047d4abc4   Gao Xiang   staging: erofs: r...
246
247
248
249
  		if (ret) {
  			ClearPagePrivate(page);
  			put_page(page);
  		}
105d4ad85   Gao Xiang   staging: erofs: i...
250
251
252
  	}
  	return ret;
  }
105d4ad85   Gao Xiang   staging: erofs: i...
253

3883a79ab   Gao Xiang   staging: erofs: i...
254
  /* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
99634bf38   Gao Xiang   erofs: add "erofs...
255
256
  static inline bool z_erofs_try_inplace_io(struct z_erofs_collector *clt,
  					  struct page *page)
3883a79ab   Gao Xiang   staging: erofs: i...
257
  {
97e86a858   Gao Xiang   staging: erofs: t...
258
259
260
261
262
  	struct z_erofs_pcluster *const pcl = clt->pcl;
  	const unsigned int clusterpages = BIT(pcl->clusterbits);
  
  	while (clt->compressedpages < pcl->compressed_pages + clusterpages) {
  		if (!cmpxchg(clt->compressedpages++, NULL, page))
3883a79ab   Gao Xiang   staging: erofs: i...
263
264
  			return true;
  	}
3883a79ab   Gao Xiang   staging: erofs: i...
265
266
  	return false;
  }
97e86a858   Gao Xiang   staging: erofs: t...
267
268
269
270
  /* callers must be with collection lock held */
  static int z_erofs_attach_page(struct z_erofs_collector *clt,
  			       struct page *page,
  			       enum z_erofs_page_type type)
3883a79ab   Gao Xiang   staging: erofs: i...
271
272
273
  {
  	int ret;
  	bool occupied;
97e86a858   Gao Xiang   staging: erofs: t...
274
275
  	/* give priority for inplaceio */
  	if (clt->mode >= COLLECT_PRIMARY &&
447a3621b   Julian Merida   staging: erofs: f...
276
  	    type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
99634bf38   Gao Xiang   erofs: add "erofs...
277
  	    z_erofs_try_inplace_io(clt, page))
3883a79ab   Gao Xiang   staging: erofs: i...
278
  		return 0;
97e86a858   Gao Xiang   staging: erofs: t...
279
  	ret = z_erofs_pagevec_enqueue(&clt->vector,
046d64e11   Gao Xiang   staging: erofs: t...
280
  				      page, type, &occupied);
97e86a858   Gao Xiang   staging: erofs: t...
281
  	clt->cl->vcnt += (unsigned int)ret;
3883a79ab   Gao Xiang   staging: erofs: i...
282
283
284
  
  	return ret ? 0 : -EAGAIN;
  }
97e86a858   Gao Xiang   staging: erofs: t...
285
286
287
  static enum z_erofs_collectmode
  try_to_claim_pcluster(struct z_erofs_pcluster *pcl,
  		      z_erofs_next_pcluster_t *owned_head)
3883a79ab   Gao Xiang   staging: erofs: i...
288
  {
97e86a858   Gao Xiang   staging: erofs: t...
289
  	/* let's claim these following types of pclusters */
3883a79ab   Gao Xiang   staging: erofs: i...
290
  retry:
97e86a858   Gao Xiang   staging: erofs: t...
291
292
293
294
  	if (pcl->next == Z_EROFS_PCLUSTER_NIL) {
  		/* type 1, nil pcluster */
  		if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL,
  			    *owned_head) != Z_EROFS_PCLUSTER_NIL)
3883a79ab   Gao Xiang   staging: erofs: i...
295
  			goto retry;
97e86a858   Gao Xiang   staging: erofs: t...
296
  		*owned_head = &pcl->next;
a112152f6   Gao Xiang   staging: erofs: f...
297
  		/* lucky, I am the followee :) */
97e86a858   Gao Xiang   staging: erofs: t...
298
299
  		return COLLECT_PRIMARY_FOLLOWED;
  	} else if (pcl->next == Z_EROFS_PCLUSTER_TAIL) {
3883a79ab   Gao Xiang   staging: erofs: i...
300
301
302
303
304
  		/*
  		 * type 2, link to the end of a existing open chain,
  		 * be careful that its submission itself is governed
  		 * by the original owned chain.
  		 */
97e86a858   Gao Xiang   staging: erofs: t...
305
306
  		if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
  			    *owned_head) != Z_EROFS_PCLUSTER_TAIL)
3883a79ab   Gao Xiang   staging: erofs: i...
307
  			goto retry;
97e86a858   Gao Xiang   staging: erofs: t...
308
309
  		*owned_head = Z_EROFS_PCLUSTER_TAIL;
  		return COLLECT_PRIMARY_HOOKED;
a112152f6   Gao Xiang   staging: erofs: f...
310
  	}
97e86a858   Gao Xiang   staging: erofs: t...
311
  	return COLLECT_PRIMARY;	/* :( better luck next time */
3883a79ab   Gao Xiang   staging: erofs: i...
312
  }
97e86a858   Gao Xiang   staging: erofs: t...
313
314
315
  static struct z_erofs_collection *cllookup(struct z_erofs_collector *clt,
  					   struct inode *inode,
  					   struct erofs_map_blocks *map)
3883a79ab   Gao Xiang   staging: erofs: i...
316
  {
97e86a858   Gao Xiang   staging: erofs: t...
317
318
319
320
321
322
323
324
  	struct erofs_workgroup *grp;
  	struct z_erofs_pcluster *pcl;
  	struct z_erofs_collection *cl;
  	unsigned int length;
  	bool tag;
  
  	grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT, &tag);
  	if (!grp)
3883a79ab   Gao Xiang   staging: erofs: i...
325
  		return NULL;
3883a79ab   Gao Xiang   staging: erofs: i...
326

97e86a858   Gao Xiang   staging: erofs: t...
327
  	pcl = container_of(grp, struct z_erofs_pcluster, obj);
bfc4ccb15   Gao Xiang   staging: erofs: a...
328
329
330
331
332
  	if (clt->owned_head == &pcl->next || pcl == clt->tailpcl) {
  		DBG_BUGON(1);
  		erofs_workgroup_put(grp);
  		return ERR_PTR(-EFSCORRUPTED);
  	}
97e86a858   Gao Xiang   staging: erofs: t...
333
334
  
  	cl = z_erofs_primarycollection(pcl);
8d8a09b09   Gao Xiang   erofs: remove all...
335
  	if (cl->pageofs != (map->m_la & ~PAGE_MASK)) {
97e86a858   Gao Xiang   staging: erofs: t...
336
  		DBG_BUGON(1);
138e1a099   Gao Xiang   staging: erofs: a...
337
338
  		erofs_workgroup_put(grp);
  		return ERR_PTR(-EFSCORRUPTED);
97e86a858   Gao Xiang   staging: erofs: t...
339
  	}
3883a79ab   Gao Xiang   staging: erofs: i...
340

97e86a858   Gao Xiang   staging: erofs: t...
341
342
343
344
  	length = READ_ONCE(pcl->length);
  	if (length & Z_EROFS_PCLUSTER_FULL_LENGTH) {
  		if ((map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) > length) {
  			DBG_BUGON(1);
138e1a099   Gao Xiang   staging: erofs: a...
345
346
  			erofs_workgroup_put(grp);
  			return ERR_PTR(-EFSCORRUPTED);
97e86a858   Gao Xiang   staging: erofs: t...
347
348
349
  		}
  	} else {
  		unsigned int llen = map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT;
3883a79ab   Gao Xiang   staging: erofs: i...
350

97e86a858   Gao Xiang   staging: erofs: t...
351
352
  		if (map->m_flags & EROFS_MAP_FULL_MAPPED)
  			llen |= Z_EROFS_PCLUSTER_FULL_LENGTH;
3883a79ab   Gao Xiang   staging: erofs: i...
353

97e86a858   Gao Xiang   staging: erofs: t...
354
355
356
357
358
359
360
  		while (llen > length &&
  		       length != cmpxchg_relaxed(&pcl->length, length, llen)) {
  			cpu_relax();
  			length = READ_ONCE(pcl->length);
  		}
  	}
  	mutex_lock(&cl->lock);
bfc4ccb15   Gao Xiang   staging: erofs: a...
361
362
363
  	/* used to check tail merging loop due to corrupted images */
  	if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
  		clt->tailpcl = pcl;
97e86a858   Gao Xiang   staging: erofs: t...
364
  	clt->mode = try_to_claim_pcluster(pcl, &clt->owned_head);
bfc4ccb15   Gao Xiang   staging: erofs: a...
365
366
367
  	/* clean tailpcl if the current owned_head is Z_EROFS_PCLUSTER_TAIL */
  	if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
  		clt->tailpcl = NULL;
97e86a858   Gao Xiang   staging: erofs: t...
368
369
370
  	clt->pcl = pcl;
  	clt->cl = cl;
  	return cl;
3883a79ab   Gao Xiang   staging: erofs: i...
371
  }
97e86a858   Gao Xiang   staging: erofs: t...
372
373
374
  static struct z_erofs_collection *clregister(struct z_erofs_collector *clt,
  					     struct inode *inode,
  					     struct erofs_map_blocks *map)
3883a79ab   Gao Xiang   staging: erofs: i...
375
  {
97e86a858   Gao Xiang   staging: erofs: t...
376
377
378
  	struct z_erofs_pcluster *pcl;
  	struct z_erofs_collection *cl;
  	int err;
e5e3abbad   Gao Xiang   staging: erofs: d...
379

3883a79ab   Gao Xiang   staging: erofs: i...
380
  	/* no available workgroup, let's allocate one */
97e86a858   Gao Xiang   staging: erofs: t...
381
  	pcl = kmem_cache_alloc(pcluster_cachep, GFP_NOFS);
8d8a09b09   Gao Xiang   erofs: remove all...
382
  	if (!pcl)
3883a79ab   Gao Xiang   staging: erofs: i...
383
  		return ERR_PTR(-ENOMEM);
99634bf38   Gao Xiang   erofs: add "erofs...
384
  	z_erofs_pcluster_init_always(pcl);
97e86a858   Gao Xiang   staging: erofs: t...
385
  	pcl->obj.index = map->m_pa >> PAGE_SHIFT;
3883a79ab   Gao Xiang   staging: erofs: i...
386

97e86a858   Gao Xiang   staging: erofs: t...
387
388
389
  	pcl->length = (map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) |
  		(map->m_flags & EROFS_MAP_FULL_MAPPED ?
  			Z_EROFS_PCLUSTER_FULL_LENGTH : 0);
3883a79ab   Gao Xiang   staging: erofs: i...
390

97e86a858   Gao Xiang   staging: erofs: t...
391
392
393
394
  	if (map->m_flags & EROFS_MAP_ZIPPED)
  		pcl->algorithmformat = Z_EROFS_COMPRESSION_LZ4;
  	else
  		pcl->algorithmformat = Z_EROFS_COMPRESSION_SHIFTED;
a5876e24f   Gao Xiang   erofs: use erofs_...
395
  	pcl->clusterbits = EROFS_I(inode)->z_physical_clusterbits[0];
97e86a858   Gao Xiang   staging: erofs: t...
396
  	pcl->clusterbits -= PAGE_SHIFT;
b6a76183d   Gao Xiang   staging: erofs: i...
397

97e86a858   Gao Xiang   staging: erofs: t...
398
399
400
  	/* new pclusters should be claimed as type 1, primary and followed */
  	pcl->next = clt->owned_head;
  	clt->mode = COLLECT_PRIMARY_FOLLOWED;
3883a79ab   Gao Xiang   staging: erofs: i...
401

97e86a858   Gao Xiang   staging: erofs: t...
402
403
  	cl = z_erofs_primarycollection(pcl);
  	cl->pageofs = map->m_la & ~PAGE_MASK;
3883a79ab   Gao Xiang   staging: erofs: i...
404

23edf3abe   Gao Xiang   staging: erofs: l...
405
406
  	/*
  	 * lock all primary followed works before visible to others
97e86a858   Gao Xiang   staging: erofs: t...
407
  	 * and mutex_trylock *never* fails for a new pcluster.
23edf3abe   Gao Xiang   staging: erofs: l...
408
  	 */
97e86a858   Gao Xiang   staging: erofs: t...
409
  	mutex_trylock(&cl->lock);
23edf3abe   Gao Xiang   staging: erofs: l...
410

97e86a858   Gao Xiang   staging: erofs: t...
411
412
413
414
415
  	err = erofs_register_workgroup(inode->i_sb, &pcl->obj, 0);
  	if (err) {
  		mutex_unlock(&cl->lock);
  		kmem_cache_free(pcluster_cachep, pcl);
  		return ERR_PTR(-EAGAIN);
3883a79ab   Gao Xiang   staging: erofs: i...
416
  	}
bfc4ccb15   Gao Xiang   staging: erofs: a...
417
418
419
  	/* used to check tail merging loop due to corrupted images */
  	if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
  		clt->tailpcl = pcl;
97e86a858   Gao Xiang   staging: erofs: t...
420
421
422
423
  	clt->owned_head = &pcl->next;
  	clt->pcl = pcl;
  	clt->cl = cl;
  	return cl;
3883a79ab   Gao Xiang   staging: erofs: i...
424
  }
97e86a858   Gao Xiang   staging: erofs: t...
425
426
427
428
429
  static int z_erofs_collector_begin(struct z_erofs_collector *clt,
  				   struct inode *inode,
  				   struct erofs_map_blocks *map)
  {
  	struct z_erofs_collection *cl;
a112152f6   Gao Xiang   staging: erofs: f...
430

97e86a858   Gao Xiang   staging: erofs: t...
431
  	DBG_BUGON(clt->cl);
3883a79ab   Gao Xiang   staging: erofs: i...
432

97e86a858   Gao Xiang   staging: erofs: t...
433
434
435
  	/* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous collection */
  	DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_NIL);
  	DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
3883a79ab   Gao Xiang   staging: erofs: i...
436

97e86a858   Gao Xiang   staging: erofs: t...
437
438
439
  	if (!PAGE_ALIGNED(map->m_pa)) {
  		DBG_BUGON(1);
  		return -EINVAL;
3883a79ab   Gao Xiang   staging: erofs: i...
440
  	}
97e86a858   Gao Xiang   staging: erofs: t...
441
442
443
444
  repeat:
  	cl = cllookup(clt, inode, map);
  	if (!cl) {
  		cl = clregister(clt, inode, map);
3883a79ab   Gao Xiang   staging: erofs: i...
445

8d8a09b09   Gao Xiang   erofs: remove all...
446
  		if (cl == ERR_PTR(-EAGAIN))
97e86a858   Gao Xiang   staging: erofs: t...
447
  			goto repeat;
3883a79ab   Gao Xiang   staging: erofs: i...
448
  	}
97e86a858   Gao Xiang   staging: erofs: t...
449
450
451
452
453
454
455
456
457
  	if (IS_ERR(cl))
  		return PTR_ERR(cl);
  
  	z_erofs_pagevec_ctor_init(&clt->vector, Z_EROFS_NR_INLINE_PAGEVECS,
  				  cl->pagevec, cl->vcnt);
  
  	clt->compressedpages = clt->pcl->compressed_pages;
  	if (clt->mode <= COLLECT_PRIMARY) /* cannot do in-place I/O */
  		clt->compressedpages += Z_EROFS_CLUSTER_MAX_PAGES;
3883a79ab   Gao Xiang   staging: erofs: i...
458
459
460
461
  	return 0;
  }
  
  /*
97e86a858   Gao Xiang   staging: erofs: t...
462
463
   * keep in mind that no referenced pclusters will be freed
   * only after a RCU grace period.
3883a79ab   Gao Xiang   staging: erofs: i...
464
465
466
   */
  static void z_erofs_rcu_callback(struct rcu_head *head)
  {
97e86a858   Gao Xiang   staging: erofs: t...
467
468
  	struct z_erofs_collection *const cl =
  		container_of(head, struct z_erofs_collection, rcu);
3883a79ab   Gao Xiang   staging: erofs: i...
469

97e86a858   Gao Xiang   staging: erofs: t...
470
471
472
  	kmem_cache_free(pcluster_cachep,
  			container_of(cl, struct z_erofs_pcluster,
  				     primary_collection));
3883a79ab   Gao Xiang   staging: erofs: i...
473
474
475
476
  }
  
  void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
  {
97e86a858   Gao Xiang   staging: erofs: t...
477
478
479
  	struct z_erofs_pcluster *const pcl =
  		container_of(grp, struct z_erofs_pcluster, obj);
  	struct z_erofs_collection *const cl = z_erofs_primarycollection(pcl);
3883a79ab   Gao Xiang   staging: erofs: i...
480

97e86a858   Gao Xiang   staging: erofs: t...
481
  	call_rcu(&cl->rcu, z_erofs_rcu_callback);
3883a79ab   Gao Xiang   staging: erofs: i...
482
  }
97e86a858   Gao Xiang   staging: erofs: t...
483
  static void z_erofs_collection_put(struct z_erofs_collection *cl)
3883a79ab   Gao Xiang   staging: erofs: i...
484
  {
97e86a858   Gao Xiang   staging: erofs: t...
485
486
  	struct z_erofs_pcluster *const pcl =
  		container_of(cl, struct z_erofs_pcluster, primary_collection);
3883a79ab   Gao Xiang   staging: erofs: i...
487

97e86a858   Gao Xiang   staging: erofs: t...
488
  	erofs_workgroup_put(&pcl->obj);
3883a79ab   Gao Xiang   staging: erofs: i...
489
  }
97e86a858   Gao Xiang   staging: erofs: t...
490
  static bool z_erofs_collector_end(struct z_erofs_collector *clt)
3883a79ab   Gao Xiang   staging: erofs: i...
491
  {
97e86a858   Gao Xiang   staging: erofs: t...
492
  	struct z_erofs_collection *cl = clt->cl;
3883a79ab   Gao Xiang   staging: erofs: i...
493

97e86a858   Gao Xiang   staging: erofs: t...
494
  	if (!cl)
3883a79ab   Gao Xiang   staging: erofs: i...
495
  		return false;
97e86a858   Gao Xiang   staging: erofs: t...
496
497
  	z_erofs_pagevec_ctor_exit(&clt->vector, false);
  	mutex_unlock(&cl->lock);
3883a79ab   Gao Xiang   staging: erofs: i...
498
499
  
  	/*
97e86a858   Gao Xiang   staging: erofs: t...
500
501
  	 * if all pending pages are added, don't hold its reference
  	 * any longer if the pcluster isn't hosted by ourselves.
3883a79ab   Gao Xiang   staging: erofs: i...
502
  	 */
97e86a858   Gao Xiang   staging: erofs: t...
503
504
  	if (clt->mode < COLLECT_PRIMARY_FOLLOWED_NOINPLACE)
  		z_erofs_collection_put(cl);
3883a79ab   Gao Xiang   staging: erofs: i...
505

97e86a858   Gao Xiang   staging: erofs: t...
506
  	clt->cl = NULL;
3883a79ab   Gao Xiang   staging: erofs: i...
507
508
509
510
511
512
  	return true;
  }
  
  static inline struct page *__stagingpage_alloc(struct list_head *pagepool,
  					       gfp_t gfp)
  {
b25a15191   Gao Xiang   staging: erofs: r...
513
  	struct page *page = erofs_allocpage(pagepool, gfp, true);
3883a79ab   Gao Xiang   staging: erofs: i...
514
515
516
517
  
  	page->mapping = Z_EROFS_MAPPING_STAGING;
  	return page;
  }
97e86a858   Gao Xiang   staging: erofs: t...
518
  static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe,
4279f3f98   Gao Xiang   staging: erofs: t...
519
  				       unsigned int cachestrategy,
97e86a858   Gao Xiang   staging: erofs: t...
520
  				       erofs_off_t la)
92e6efd56   Gao Xiang   staging: erofs: r...
521
  {
4279f3f98   Gao Xiang   staging: erofs: t...
522
523
  	if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED)
  		return false;
92e6efd56   Gao Xiang   staging: erofs: r...
524
525
  	if (fe->backmost)
  		return true;
4279f3f98   Gao Xiang   staging: erofs: t...
526
527
  	return cachestrategy >= EROFS_ZIP_CACHE_READAROUND &&
  		la < fe->headoffset;
92e6efd56   Gao Xiang   staging: erofs: r...
528
  }
92e6efd56   Gao Xiang   staging: erofs: r...
529

97e86a858   Gao Xiang   staging: erofs: t...
530
  static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
3883a79ab   Gao Xiang   staging: erofs: i...
531
  				struct page *page,
97e86a858   Gao Xiang   staging: erofs: t...
532
  				struct list_head *pagepool)
3883a79ab   Gao Xiang   staging: erofs: i...
533
  {
97e86a858   Gao Xiang   staging: erofs: t...
534
535
  	struct inode *const inode = fe->inode;
  	struct erofs_sb_info *const sbi __maybe_unused = EROFS_I_SB(inode);
3b423417d   Chao Yu   staging: erofs: c...
536
  	struct erofs_map_blocks *const map = &fe->map;
97e86a858   Gao Xiang   staging: erofs: t...
537
  	struct z_erofs_collector *const clt = &fe->clt;
3883a79ab   Gao Xiang   staging: erofs: i...
538
  	const loff_t offset = page_offset(page);
dc76ea8c1   Gao Xiang   erofs: fix mis-in...
539
  	bool tight = true;
3883a79ab   Gao Xiang   staging: erofs: i...
540

92e6efd56   Gao Xiang   staging: erofs: r...
541
  	enum z_erofs_cache_alloctype cache_strategy;
3883a79ab   Gao Xiang   staging: erofs: i...
542
  	enum z_erofs_page_type page_type;
7dd68b147   Thomas Weißschuh   staging: erofs: u...
543
  	unsigned int cur, end, spiltted, index;
1e05ff36e   Gao Xiang   staging: erofs: c...
544
  	int err = 0;
3883a79ab   Gao Xiang   staging: erofs: i...
545
546
547
548
549
550
551
552
553
554
555
  
  	/* register locked file pages as online pages in pack */
  	z_erofs_onlinepage_init(page);
  
  	spiltted = 0;
  	end = PAGE_SIZE;
  repeat:
  	cur = end - 1;
  
  	/* lucky, within the range of the current map_blocks */
  	if (offset + cur >= map->m_la &&
447a3621b   Julian Merida   staging: erofs: f...
556
  	    offset + cur < map->m_la + map->m_llen) {
97e86a858   Gao Xiang   staging: erofs: t...
557
558
  		/* didn't get a valid collection previously (very rare) */
  		if (!clt->cl)
1e5ceeab6   Gao Xiang   staging: erofs: f...
559
  			goto restart_now;
3883a79ab   Gao Xiang   staging: erofs: i...
560
  		goto hitted;
1e5ceeab6   Gao Xiang   staging: erofs: f...
561
  	}
3883a79ab   Gao Xiang   staging: erofs: i...
562
563
  
  	/* go ahead the next map_blocks */
4f761fa25   Gao Xiang   erofs: rename err...
564
  	erofs_dbg("%s: [out-of-range] pos %llu", __func__, offset + cur);
3883a79ab   Gao Xiang   staging: erofs: i...
565

97e86a858   Gao Xiang   staging: erofs: t...
566
  	if (z_erofs_collector_end(clt))
f0c519fc2   Gao Xiang   staging: erofs: r...
567
  		fe->backmost = false;
3883a79ab   Gao Xiang   staging: erofs: i...
568
569
570
  
  	map->m_la = offset + cur;
  	map->m_llen = 0;
97e86a858   Gao Xiang   staging: erofs: t...
571
  	err = z_erofs_map_blocks_iter(inode, map, 0);
8d8a09b09   Gao Xiang   erofs: remove all...
572
  	if (err)
3883a79ab   Gao Xiang   staging: erofs: i...
573
  		goto err_out;
1e5ceeab6   Gao Xiang   staging: erofs: f...
574
  restart_now:
8d8a09b09   Gao Xiang   erofs: remove all...
575
  	if (!(map->m_flags & EROFS_MAP_MAPPED))
3883a79ab   Gao Xiang   staging: erofs: i...
576
  		goto hitted;
97e86a858   Gao Xiang   staging: erofs: t...
577
  	err = z_erofs_collector_begin(clt, inode, map);
8d8a09b09   Gao Xiang   erofs: remove all...
578
  	if (err)
3883a79ab   Gao Xiang   staging: erofs: i...
579
  		goto err_out;
92e6efd56   Gao Xiang   staging: erofs: r...
580
  	/* preload all compressed pages (maybe downgrade role if necessary) */
4279f3f98   Gao Xiang   staging: erofs: t...
581
  	if (should_alloc_managed_pages(fe, sbi->cache_strategy, map->m_la))
92e6efd56   Gao Xiang   staging: erofs: r...
582
583
584
  		cache_strategy = DELAYEDALLOC;
  	else
  		cache_strategy = DONTALLOC;
97e86a858   Gao Xiang   staging: erofs: t...
585
586
  	preload_compressed_pages(clt, MNGD_MAPPING(sbi),
  				 cache_strategy, pagepool);
105d4ad85   Gao Xiang   staging: erofs: i...
587

3883a79ab   Gao Xiang   staging: erofs: i...
588
  hitted:
dc76ea8c1   Gao Xiang   erofs: fix mis-in...
589
590
591
592
593
594
595
596
  	/*
  	 * Ensure the current partial page belongs to this submit chain rather
  	 * than other concurrent submit chains or the noio(bypass) chain since
  	 * those chains are handled asynchronously thus the page cannot be used
  	 * for inplace I/O or pagevec (should be processed in strict order.)
  	 */
  	tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED &&
  		  clt->mode != COLLECT_PRIMARY_FOLLOWED_NOINPLACE);
7dd68b147   Thomas Weißschuh   staging: erofs: u...
597
  	cur = end - min_t(unsigned int, offset + end - map->m_la, end);
8d8a09b09   Gao Xiang   erofs: remove all...
598
  	if (!(map->m_flags & EROFS_MAP_MAPPED)) {
3883a79ab   Gao Xiang   staging: erofs: i...
599
600
601
602
603
604
605
606
607
  		zero_user_segment(page, cur, end);
  		goto next_part;
  	}
  
  	/* let's derive page type */
  	page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD :
  		(!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
  			(tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
  				Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
a112152f6   Gao Xiang   staging: erofs: f...
608
  	if (cur)
97e86a858   Gao Xiang   staging: erofs: t...
609
  		tight &= (clt->mode >= COLLECT_PRIMARY_FOLLOWED);
a112152f6   Gao Xiang   staging: erofs: f...
610

3883a79ab   Gao Xiang   staging: erofs: i...
611
  retry:
97e86a858   Gao Xiang   staging: erofs: t...
612
  	err = z_erofs_attach_page(clt, page, page_type);
3883a79ab   Gao Xiang   staging: erofs: i...
613
614
615
  	/* should allocate an additional staging page for pagevec */
  	if (err == -EAGAIN) {
  		struct page *const newpage =
97e86a858   Gao Xiang   staging: erofs: t...
616
  			__stagingpage_alloc(pagepool, GFP_NOFS);
3883a79ab   Gao Xiang   staging: erofs: i...
617

97e86a858   Gao Xiang   staging: erofs: t...
618
619
  		err = z_erofs_attach_page(clt, newpage,
  					  Z_EROFS_PAGE_TYPE_EXCLUSIVE);
8d8a09b09   Gao Xiang   erofs: remove all...
620
  		if (!err)
3883a79ab   Gao Xiang   staging: erofs: i...
621
622
  			goto retry;
  	}
8d8a09b09   Gao Xiang   erofs: remove all...
623
  	if (err)
3883a79ab   Gao Xiang   staging: erofs: i...
624
  		goto err_out;
97e86a858   Gao Xiang   staging: erofs: t...
625
  	index = page->index - (map->m_la >> PAGE_SHIFT);
3883a79ab   Gao Xiang   staging: erofs: i...
626

3883a79ab   Gao Xiang   staging: erofs: i...
627
  	z_erofs_onlinepage_fixup(page, index, true);
3883a79ab   Gao Xiang   staging: erofs: i...
628

1e05ff36e   Gao Xiang   staging: erofs: c...
629
630
631
  	/* bump up the number of spiltted parts of a page */
  	++spiltted;
  	/* also update nr_pages */
97e86a858   Gao Xiang   staging: erofs: t...
632
  	clt->cl->nr_pages = max_t(pgoff_t, clt->cl->nr_pages, index + 1);
3883a79ab   Gao Xiang   staging: erofs: i...
633
634
635
  next_part:
  	/* can be used for verification */
  	map->m_llen = offset + cur - map->m_la;
2bc759643   Kristaps Čivkulis   staging: erofs: f...
636
637
  	end = cur;
  	if (end > 0)
3883a79ab   Gao Xiang   staging: erofs: i...
638
  		goto repeat;
1e05ff36e   Gao Xiang   staging: erofs: c...
639
  out:
3883a79ab   Gao Xiang   staging: erofs: i...
640
  	z_erofs_onlinepage_endio(page);
4f761fa25   Gao Xiang   erofs: rename err...
641
642
  	erofs_dbg("%s, finish page: %pK spiltted: %u map->m_llen %llu",
  		  __func__, page, spiltted, map->m_llen);
1e05ff36e   Gao Xiang   staging: erofs: c...
643
  	return err;
3883a79ab   Gao Xiang   staging: erofs: i...
644

1e05ff36e   Gao Xiang   staging: erofs: c...
645
  	/* if some error occurred while processing this page */
3883a79ab   Gao Xiang   staging: erofs: i...
646
  err_out:
1e05ff36e   Gao Xiang   staging: erofs: c...
647
648
  	SetPageError(page);
  	goto out;
3883a79ab   Gao Xiang   staging: erofs: i...
649
650
651
652
653
  }
  
  static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
  {
  	tagptr1_t t = tagptr_init(tagptr1_t, ptr);
97e86a858   Gao Xiang   staging: erofs: t...
654
  	struct z_erofs_unzip_io *io = tagptr_unfold_ptr(t);
3883a79ab   Gao Xiang   staging: erofs: i...
655
  	bool background = tagptr_unfold_tags(t);
848bd9acd   Gao Xiang   staging: erofs: f...
656
657
658
659
660
661
662
  	if (!background) {
  		unsigned long flags;
  
  		spin_lock_irqsave(&io->u.wait.lock, flags);
  		if (!atomic_add_return(bios, &io->pending_bios))
  			wake_up_locked(&io->u.wait);
  		spin_unlock_irqrestore(&io->u.wait.lock, flags);
3883a79ab   Gao Xiang   staging: erofs: i...
663
  		return;
848bd9acd   Gao Xiang   staging: erofs: f...
664
  	}
3883a79ab   Gao Xiang   staging: erofs: i...
665

848bd9acd   Gao Xiang   staging: erofs: f...
666
  	if (!atomic_add_return(bios, &io->pending_bios))
3883a79ab   Gao Xiang   staging: erofs: i...
667
  		queue_work(z_erofs_workqueue, &io->u.work);
3883a79ab   Gao Xiang   staging: erofs: i...
668
669
670
671
  }
  
  static inline void z_erofs_vle_read_endio(struct bio *bio)
  {
d61fbb6b1   Gao Xiang   staging: erofs: i...
672
  	struct erofs_sb_info *sbi = NULL;
14a56ec65   Gao Xiang   staging: erofs: s...
673
  	blk_status_t err = bio->bi_status;
3883a79ab   Gao Xiang   staging: erofs: i...
674
  	struct bio_vec *bvec;
6dc4f100c   Ming Lei   block: allow bio_...
675
  	struct bvec_iter_all iter_all;
3883a79ab   Gao Xiang   staging: erofs: i...
676

2b070cfe5   Christoph Hellwig   block: remove the...
677
  	bio_for_each_segment_all(bvec, bio, iter_all) {
3883a79ab   Gao Xiang   staging: erofs: i...
678
  		struct page *page = bvec->bv_page;
105d4ad85   Gao Xiang   staging: erofs: i...
679
  		bool cachemngd = false;
3883a79ab   Gao Xiang   staging: erofs: i...
680
681
  
  		DBG_BUGON(PageUptodate(page));
70b17991d   Gao Xiang   staging: erofs: u...
682
  		DBG_BUGON(!page->mapping);
3883a79ab   Gao Xiang   staging: erofs: i...
683

e2c71e74b   Gao Xiang   erofs: kill all e...
684
  		if (!sbi && !z_erofs_page_is_staging(page))
d61fbb6b1   Gao Xiang   staging: erofs: i...
685
  			sbi = EROFS_SB(page->mapping->host->i_sb);
105d4ad85   Gao Xiang   staging: erofs: i...
686

d61fbb6b1   Gao Xiang   staging: erofs: i...
687
688
689
  		/* sbi should already be gotten if the page is managed */
  		if (sbi)
  			cachemngd = erofs_page_is_managed(sbi, page);
105d4ad85   Gao Xiang   staging: erofs: i...
690

8d8a09b09   Gao Xiang   erofs: remove all...
691
  		if (err)
3883a79ab   Gao Xiang   staging: erofs: i...
692
  			SetPageError(page);
105d4ad85   Gao Xiang   staging: erofs: i...
693
694
695
696
697
  		else if (cachemngd)
  			SetPageUptodate(page);
  
  		if (cachemngd)
  			unlock_page(page);
3883a79ab   Gao Xiang   staging: erofs: i...
698
699
700
701
702
  	}
  
  	z_erofs_vle_unzip_kickoff(bio->bi_private, -1);
  	bio_put(bio);
  }
97e86a858   Gao Xiang   staging: erofs: t...
703
704
705
  static int z_erofs_decompress_pcluster(struct super_block *sb,
  				       struct z_erofs_pcluster *pcl,
  				       struct list_head *pagepool)
3883a79ab   Gao Xiang   staging: erofs: i...
706
707
  {
  	struct erofs_sb_info *const sbi = EROFS_SB(sb);
97e86a858   Gao Xiang   staging: erofs: t...
708
  	const unsigned int clusterpages = BIT(pcl->clusterbits);
3883a79ab   Gao Xiang   staging: erofs: i...
709
  	struct z_erofs_pagevec_ctor ctor;
97e86a858   Gao Xiang   staging: erofs: t...
710
711
  	unsigned int i, outputsize, llen, nr_pages;
  	struct page *pages_onstack[Z_EROFS_VMAP_ONSTACK_PAGES];
3883a79ab   Gao Xiang   staging: erofs: i...
712
  	struct page **pages, **compressed_pages, *page;
3883a79ab   Gao Xiang   staging: erofs: i...
713
714
  
  	enum z_erofs_page_type page_type;
b6a76183d   Gao Xiang   staging: erofs: i...
715
  	bool overlapped, partial;
97e86a858   Gao Xiang   staging: erofs: t...
716
  	struct z_erofs_collection *cl;
3883a79ab   Gao Xiang   staging: erofs: i...
717
718
719
  	int err;
  
  	might_sleep();
97e86a858   Gao Xiang   staging: erofs: t...
720
721
  	cl = z_erofs_primarycollection(pcl);
  	DBG_BUGON(!READ_ONCE(cl->nr_pages));
3883a79ab   Gao Xiang   staging: erofs: i...
722

97e86a858   Gao Xiang   staging: erofs: t...
723
724
  	mutex_lock(&cl->lock);
  	nr_pages = cl->nr_pages;
3883a79ab   Gao Xiang   staging: erofs: i...
725

8d8a09b09   Gao Xiang   erofs: remove all...
726
  	if (nr_pages <= Z_EROFS_VMAP_ONSTACK_PAGES) {
3883a79ab   Gao Xiang   staging: erofs: i...
727
  		pages = pages_onstack;
97e86a858   Gao Xiang   staging: erofs: t...
728
729
  	} else if (nr_pages <= Z_EROFS_VMAP_GLOBAL_PAGES &&
  		   mutex_trylock(&z_pagemap_global_lock)) {
3883a79ab   Gao Xiang   staging: erofs: i...
730
  		pages = z_pagemap_global;
97e86a858   Gao Xiang   staging: erofs: t...
731
  	} else {
441dfcc88   Chao Yu   staging: erofs: a...
732
  		gfp_t gfp_flags = GFP_KERNEL;
97e86a858   Gao Xiang   staging: erofs: t...
733
  		if (nr_pages > Z_EROFS_VMAP_GLOBAL_PAGES)
441dfcc88   Chao Yu   staging: erofs: a...
734
  			gfp_flags |= __GFP_NOFAIL;
447a3621b   Julian Merida   staging: erofs: f...
735
  		pages = kvmalloc_array(nr_pages, sizeof(struct page *),
441dfcc88   Chao Yu   staging: erofs: a...
736
  				       gfp_flags);
3883a79ab   Gao Xiang   staging: erofs: i...
737
738
  
  		/* fallback to global pagemap for the lowmem scenario */
8d8a09b09   Gao Xiang   erofs: remove all...
739
  		if (!pages) {
441dfcc88   Chao Yu   staging: erofs: a...
740
741
  			mutex_lock(&z_pagemap_global_lock);
  			pages = z_pagemap_global;
3883a79ab   Gao Xiang   staging: erofs: i...
742
743
744
745
746
  		}
  	}
  
  	for (i = 0; i < nr_pages; ++i)
  		pages[i] = NULL;
e12a0ce2f   Gao Xiang   staging: erofs: d...
747
  	err = 0;
fa61a33f5   Gao Xiang   staging: erofs: m...
748
  	z_erofs_pagevec_ctor_init(&ctor, Z_EROFS_NR_INLINE_PAGEVECS,
97e86a858   Gao Xiang   staging: erofs: t...
749
  				  cl->pagevec, 0);
3883a79ab   Gao Xiang   staging: erofs: i...
750

97e86a858   Gao Xiang   staging: erofs: t...
751
  	for (i = 0; i < cl->vcnt; ++i) {
7dd68b147   Thomas Weißschuh   staging: erofs: u...
752
  		unsigned int pagenr;
3883a79ab   Gao Xiang   staging: erofs: i...
753

046d64e11   Gao Xiang   staging: erofs: t...
754
  		page = z_erofs_pagevec_dequeue(&ctor, &page_type);
3883a79ab   Gao Xiang   staging: erofs: i...
755
756
  
  		/* all pages in pagevec ought to be valid */
42d40b4ad   Cristian Sicilia   staging: erofs: u...
757
758
  		DBG_BUGON(!page);
  		DBG_BUGON(!page->mapping);
3883a79ab   Gao Xiang   staging: erofs: i...
759

97e86a858   Gao Xiang   staging: erofs: t...
760
  		if (z_erofs_put_stagingpage(pagepool, page))
3883a79ab   Gao Xiang   staging: erofs: i...
761
762
763
764
765
766
  			continue;
  
  		if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD)
  			pagenr = 0;
  		else
  			pagenr = z_erofs_onlinepage_index(page);
70b17991d   Gao Xiang   staging: erofs: u...
767
  		DBG_BUGON(pagenr >= nr_pages);
e5e3abbad   Gao Xiang   staging: erofs: d...
768

e12a0ce2f   Gao Xiang   staging: erofs: d...
769
770
771
772
  		/*
  		 * currently EROFS doesn't support multiref(dedup),
  		 * so here erroring out one multiref page.
  		 */
8d8a09b09   Gao Xiang   erofs: remove all...
773
  		if (pages[pagenr]) {
e12a0ce2f   Gao Xiang   staging: erofs: d...
774
775
776
777
778
  			DBG_BUGON(1);
  			SetPageError(pages[pagenr]);
  			z_erofs_onlinepage_endio(pages[pagenr]);
  			err = -EFSCORRUPTED;
  		}
3883a79ab   Gao Xiang   staging: erofs: i...
779
780
  		pages[pagenr] = page;
  	}
3883a79ab   Gao Xiang   staging: erofs: i...
781
782
783
  	z_erofs_pagevec_ctor_exit(&ctor, true);
  
  	overlapped = false;
97e86a858   Gao Xiang   staging: erofs: t...
784
  	compressed_pages = pcl->compressed_pages;
3883a79ab   Gao Xiang   staging: erofs: i...
785
786
  
  	for (i = 0; i < clusterpages; ++i) {
7dd68b147   Thomas Weißschuh   staging: erofs: u...
787
  		unsigned int pagenr;
3883a79ab   Gao Xiang   staging: erofs: i...
788
789
790
791
  
  		page = compressed_pages[i];
  
  		/* all compressed pages ought to be valid */
42d40b4ad   Cristian Sicilia   staging: erofs: u...
792
793
  		DBG_BUGON(!page);
  		DBG_BUGON(!page->mapping);
3883a79ab   Gao Xiang   staging: erofs: i...
794

274812334   Gao Xiang   staging: erofs: m...
795
  		if (!z_erofs_page_is_staging(page)) {
d61fbb6b1   Gao Xiang   staging: erofs: i...
796
  			if (erofs_page_is_managed(sbi, page)) {
8d8a09b09   Gao Xiang   erofs: remove all...
797
  				if (!PageUptodate(page))
111524960   Gao Xiang   staging: erofs: f...
798
799
800
  					err = -EIO;
  				continue;
  			}
3883a79ab   Gao Xiang   staging: erofs: i...
801

111524960   Gao Xiang   staging: erofs: f...
802
803
804
805
806
  			/*
  			 * only if non-head page can be selected
  			 * for inplace decompression
  			 */
  			pagenr = z_erofs_onlinepage_index(page);
3883a79ab   Gao Xiang   staging: erofs: i...
807

111524960   Gao Xiang   staging: erofs: f...
808
  			DBG_BUGON(pagenr >= nr_pages);
8d8a09b09   Gao Xiang   erofs: remove all...
809
  			if (pages[pagenr]) {
e12a0ce2f   Gao Xiang   staging: erofs: d...
810
811
812
813
814
  				DBG_BUGON(1);
  				SetPageError(pages[pagenr]);
  				z_erofs_onlinepage_endio(pages[pagenr]);
  				err = -EFSCORRUPTED;
  			}
111524960   Gao Xiang   staging: erofs: f...
815
816
817
818
  			pages[pagenr] = page;
  
  			overlapped = true;
  		}
3883a79ab   Gao Xiang   staging: erofs: i...
819

111524960   Gao Xiang   staging: erofs: f...
820
  		/* PG_error needs checking for inplaced and staging pages */
8d8a09b09   Gao Xiang   erofs: remove all...
821
  		if (PageError(page)) {
111524960   Gao Xiang   staging: erofs: f...
822
823
824
  			DBG_BUGON(PageUptodate(page));
  			err = -EIO;
  		}
3883a79ab   Gao Xiang   staging: erofs: i...
825
  	}
8d8a09b09   Gao Xiang   erofs: remove all...
826
  	if (err)
111524960   Gao Xiang   staging: erofs: f...
827
  		goto out;
97e86a858   Gao Xiang   staging: erofs: t...
828
829
830
831
  	llen = pcl->length >> Z_EROFS_PCLUSTER_LENGTH_BIT;
  	if (nr_pages << PAGE_SHIFT >= cl->pageofs + llen) {
  		outputsize = llen;
  		partial = !(pcl->length & Z_EROFS_PCLUSTER_FULL_LENGTH);
b6a76183d   Gao Xiang   staging: erofs: i...
832
  	} else {
97e86a858   Gao Xiang   staging: erofs: t...
833
  		outputsize = (nr_pages << PAGE_SHIFT) - cl->pageofs;
b6a76183d   Gao Xiang   staging: erofs: i...
834
835
  		partial = true;
  	}
3883a79ab   Gao Xiang   staging: erofs: i...
836

88aaf5a79   Gao Xiang   staging: erofs: s...
837
838
839
840
  	err = z_erofs_decompress(&(struct z_erofs_decompress_req) {
  					.sb = sb,
  					.in = compressed_pages,
  					.out = pages,
97e86a858   Gao Xiang   staging: erofs: t...
841
  					.pageofs_out = cl->pageofs,
88aaf5a79   Gao Xiang   staging: erofs: s...
842
843
  					.inputsize = PAGE_SIZE,
  					.outputsize = outputsize,
97e86a858   Gao Xiang   staging: erofs: t...
844
  					.alg = pcl->algorithmformat,
88aaf5a79   Gao Xiang   staging: erofs: s...
845
  					.inplace_io = overlapped,
b6a76183d   Gao Xiang   staging: erofs: i...
846
  					.partial_decoding = partial
97e86a858   Gao Xiang   staging: erofs: t...
847
  				 }, pagepool);
3883a79ab   Gao Xiang   staging: erofs: i...
848
849
  
  out:
af692e117   Gao Xiang   staging: erofs: c...
850
851
852
  	/* must handle all compressed pages before endding pages */
  	for (i = 0; i < clusterpages; ++i) {
  		page = compressed_pages[i];
d61fbb6b1   Gao Xiang   staging: erofs: i...
853
  		if (erofs_page_is_managed(sbi, page))
af692e117   Gao Xiang   staging: erofs: c...
854
  			continue;
d61fbb6b1   Gao Xiang   staging: erofs: i...
855

af692e117   Gao Xiang   staging: erofs: c...
856
  		/* recycle all individual staging pages */
97e86a858   Gao Xiang   staging: erofs: t...
857
  		(void)z_erofs_put_stagingpage(pagepool, page);
af692e117   Gao Xiang   staging: erofs: c...
858
859
860
  
  		WRITE_ONCE(compressed_pages[i], NULL);
  	}
3883a79ab   Gao Xiang   staging: erofs: i...
861
862
  	for (i = 0; i < nr_pages; ++i) {
  		page = pages[i];
af692e117   Gao Xiang   staging: erofs: c...
863
864
  		if (!page)
  			continue;
42d40b4ad   Cristian Sicilia   staging: erofs: u...
865
  		DBG_BUGON(!page->mapping);
3883a79ab   Gao Xiang   staging: erofs: i...
866
867
  
  		/* recycle all individual staging pages */
97e86a858   Gao Xiang   staging: erofs: t...
868
  		if (z_erofs_put_stagingpage(pagepool, page))
3883a79ab   Gao Xiang   staging: erofs: i...
869
  			continue;
8d8a09b09   Gao Xiang   erofs: remove all...
870
  		if (err < 0)
3883a79ab   Gao Xiang   staging: erofs: i...
871
872
873
874
  			SetPageError(page);
  
  		z_erofs_onlinepage_endio(page);
  	}
3883a79ab   Gao Xiang   staging: erofs: i...
875
876
  	if (pages == z_pagemap_global)
  		mutex_unlock(&z_pagemap_global_lock);
8d8a09b09   Gao Xiang   erofs: remove all...
877
  	else if (pages != pages_onstack)
3883a79ab   Gao Xiang   staging: erofs: i...
878
  		kvfree(pages);
97e86a858   Gao Xiang   staging: erofs: t...
879
880
  	cl->nr_pages = 0;
  	cl->vcnt = 0;
3883a79ab   Gao Xiang   staging: erofs: i...
881

97e86a858   Gao Xiang   staging: erofs: t...
882
883
  	/* all cl locks MUST be taken before the following line */
  	WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL);
3883a79ab   Gao Xiang   staging: erofs: i...
884

97e86a858   Gao Xiang   staging: erofs: t...
885
886
  	/* all cl locks SHOULD be released right now */
  	mutex_unlock(&cl->lock);
3883a79ab   Gao Xiang   staging: erofs: i...
887

97e86a858   Gao Xiang   staging: erofs: t...
888
  	z_erofs_collection_put(cl);
3883a79ab   Gao Xiang   staging: erofs: i...
889
890
891
892
  	return err;
  }
  
  static void z_erofs_vle_unzip_all(struct super_block *sb,
97e86a858   Gao Xiang   staging: erofs: t...
893
894
  				  struct z_erofs_unzip_io *io,
  				  struct list_head *pagepool)
3883a79ab   Gao Xiang   staging: erofs: i...
895
  {
97e86a858   Gao Xiang   staging: erofs: t...
896
  	z_erofs_next_pcluster_t owned = io->head;
3883a79ab   Gao Xiang   staging: erofs: i...
897

97e86a858   Gao Xiang   staging: erofs: t...
898
899
  	while (owned != Z_EROFS_PCLUSTER_TAIL_CLOSED) {
  		struct z_erofs_pcluster *pcl;
3883a79ab   Gao Xiang   staging: erofs: i...
900
901
  
  		/* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
97e86a858   Gao Xiang   staging: erofs: t...
902
  		DBG_BUGON(owned == Z_EROFS_PCLUSTER_TAIL);
3883a79ab   Gao Xiang   staging: erofs: i...
903
904
  
  		/* no possible that 'owned' equals NULL */
97e86a858   Gao Xiang   staging: erofs: t...
905
  		DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL);
3883a79ab   Gao Xiang   staging: erofs: i...
906

97e86a858   Gao Xiang   staging: erofs: t...
907
908
  		pcl = container_of(owned, struct z_erofs_pcluster, next);
  		owned = READ_ONCE(pcl->next);
3883a79ab   Gao Xiang   staging: erofs: i...
909

97e86a858   Gao Xiang   staging: erofs: t...
910
  		z_erofs_decompress_pcluster(sb, pcl, pagepool);
3978c8e32   Gao Xiang   staging: erofs: r...
911
  	}
3883a79ab   Gao Xiang   staging: erofs: i...
912
913
914
915
  }
  
  static void z_erofs_vle_unzip_wq(struct work_struct *work)
  {
97e86a858   Gao Xiang   staging: erofs: t...
916
917
918
  	struct z_erofs_unzip_io_sb *iosb =
  		container_of(work, struct z_erofs_unzip_io_sb, io.u.work);
  	LIST_HEAD(pagepool);
3883a79ab   Gao Xiang   staging: erofs: i...
919

97e86a858   Gao Xiang   staging: erofs: t...
920
921
  	DBG_BUGON(iosb->io.head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
  	z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &pagepool);
3883a79ab   Gao Xiang   staging: erofs: i...
922

97e86a858   Gao Xiang   staging: erofs: t...
923
  	put_pages_list(&pagepool);
3883a79ab   Gao Xiang   staging: erofs: i...
924
925
  	kvfree(iosb);
  }
97e86a858   Gao Xiang   staging: erofs: t...
926
927
928
929
930
  static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
  					       unsigned int nr,
  					       struct list_head *pagepool,
  					       struct address_space *mc,
  					       gfp_t gfp)
9248fce71   Gao Xiang   staging: erofs: r...
931
932
933
  {
  	/* determined at compile time to avoid too many #ifdefs */
  	const bool nocache = __builtin_constant_p(mc) ? !mc : false;
97e86a858   Gao Xiang   staging: erofs: t...
934
  	const pgoff_t index = pcl->obj.index;
9248fce71   Gao Xiang   staging: erofs: r...
935
936
937
938
  	bool tocache = false;
  
  	struct address_space *mapping;
  	struct page *oldpage, *page;
92e6efd56   Gao Xiang   staging: erofs: r...
939
940
  	compressed_page_t t;
  	int justfound;
9248fce71   Gao Xiang   staging: erofs: r...
941
  repeat:
97e86a858   Gao Xiang   staging: erofs: t...
942
  	page = READ_ONCE(pcl->compressed_pages[nr]);
9248fce71   Gao Xiang   staging: erofs: r...
943
944
945
946
947
948
949
950
951
952
953
954
955
  	oldpage = page;
  
  	if (!page)
  		goto out_allocpage;
  
  	/*
  	 * the cached page has not been allocated and
  	 * an placeholder is out there, prepare it now.
  	 */
  	if (!nocache && page == PAGE_UNALLOCATED) {
  		tocache = true;
  		goto out_allocpage;
  	}
92e6efd56   Gao Xiang   staging: erofs: r...
956
957
958
959
  	/* process the target tagged pointer */
  	t = tagptr_init(compressed_page_t, page);
  	justfound = tagptr_unfold_tags(t);
  	page = tagptr_unfold_ptr(t);
9248fce71   Gao Xiang   staging: erofs: r...
960
961
962
963
964
965
966
  	mapping = READ_ONCE(page->mapping);
  
  	/*
  	 * if managed cache is disabled, it's no way to
  	 * get such a cached-like page.
  	 */
  	if (nocache) {
92e6efd56   Gao Xiang   staging: erofs: r...
967
968
969
970
  		/* if managed cache is disabled, it is impossible `justfound' */
  		DBG_BUGON(justfound);
  
  		/* and it should be locked, not uptodate, and not truncated */
9248fce71   Gao Xiang   staging: erofs: r...
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
  		DBG_BUGON(!PageLocked(page));
  		DBG_BUGON(PageUptodate(page));
  		DBG_BUGON(!mapping);
  		goto out;
  	}
  
  	/*
  	 * unmanaged (file) pages are all locked solidly,
  	 * therefore it is impossible for `mapping' to be NULL.
  	 */
  	if (mapping && mapping != mc)
  		/* ought to be unmanaged pages */
  		goto out;
  
  	lock_page(page);
92e6efd56   Gao Xiang   staging: erofs: r...
986
987
  	/* only true if page reclaim goes wrong, should never happen */
  	DBG_BUGON(justfound && PagePrivate(page));
9248fce71   Gao Xiang   staging: erofs: r...
988
989
  	/* the page is still in manage cache */
  	if (page->mapping == mc) {
97e86a858   Gao Xiang   staging: erofs: t...
990
  		WRITE_ONCE(pcl->compressed_pages[nr], page);
9248fce71   Gao Xiang   staging: erofs: r...
991

111524960   Gao Xiang   staging: erofs: f...
992
  		ClearPageError(page);
9248fce71   Gao Xiang   staging: erofs: r...
993
  		if (!PagePrivate(page)) {
92e6efd56   Gao Xiang   staging: erofs: r...
994
995
996
997
998
999
1000
1001
  			/*
  			 * impossible to be !PagePrivate(page) for
  			 * the current restriction as well if
  			 * the page is already in compressed_pages[].
  			 */
  			DBG_BUGON(!justfound);
  
  			justfound = 0;
97e86a858   Gao Xiang   staging: erofs: t...
1002
  			set_page_private(page, (unsigned long)pcl);
9248fce71   Gao Xiang   staging: erofs: r...
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
  			SetPagePrivate(page);
  		}
  
  		/* no need to submit io if it is already up-to-date */
  		if (PageUptodate(page)) {
  			unlock_page(page);
  			page = NULL;
  		}
  		goto out;
  	}
  
  	/*
  	 * the managed page has been truncated, it's unsafe to
  	 * reuse this one, let's allocate a new cache-managed page.
  	 */
  	DBG_BUGON(page->mapping);
92e6efd56   Gao Xiang   staging: erofs: r...
1019
  	DBG_BUGON(!justfound);
9248fce71   Gao Xiang   staging: erofs: r...
1020
1021
1022
1023
1024
1025
  
  	tocache = true;
  	unlock_page(page);
  	put_page(page);
  out_allocpage:
  	page = __stagingpage_alloc(pagepool, gfp);
97e86a858   Gao Xiang   staging: erofs: t...
1026
  	if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) {
9248fce71   Gao Xiang   staging: erofs: r...
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
  		list_add(&page->lru, pagepool);
  		cpu_relax();
  		goto repeat;
  	}
  	if (nocache || !tocache)
  		goto out;
  	if (add_to_page_cache_lru(page, mc, index + nr, gfp)) {
  		page->mapping = Z_EROFS_MAPPING_STAGING;
  		goto out;
  	}
97e86a858   Gao Xiang   staging: erofs: t...
1037
  	set_page_private(page, (unsigned long)pcl);
9248fce71   Gao Xiang   staging: erofs: r...
1038
1039
1040
1041
  	SetPagePrivate(page);
  out:	/* the only exit (for tracing and debugging) */
  	return page;
  }
97e86a858   Gao Xiang   staging: erofs: t...
1042
1043
1044
  static struct z_erofs_unzip_io *jobqueue_init(struct super_block *sb,
  					      struct z_erofs_unzip_io *io,
  					      bool foreground)
3883a79ab   Gao Xiang   staging: erofs: i...
1045
  {
97e86a858   Gao Xiang   staging: erofs: t...
1046
  	struct z_erofs_unzip_io_sb *iosb;
3883a79ab   Gao Xiang   staging: erofs: i...
1047

7146a4f02   Gao Xiang   staging: erofs: s...
1048
  	if (foreground) {
3883a79ab   Gao Xiang   staging: erofs: i...
1049
  		/* waitqueue available for foreground io */
7146a4f02   Gao Xiang   staging: erofs: s...
1050
  		DBG_BUGON(!io);
3883a79ab   Gao Xiang   staging: erofs: i...
1051
1052
1053
1054
1055
  
  		init_waitqueue_head(&io->u.wait);
  		atomic_set(&io->pending_bios, 0);
  		goto out;
  	}
a9f69bd55   Shobhit Kukreti   staging: erofs: R...
1056
  	iosb = kvzalloc(sizeof(*iosb), GFP_KERNEL | __GFP_NOFAIL);
7146a4f02   Gao Xiang   staging: erofs: s...
1057
  	DBG_BUGON(!iosb);
3883a79ab   Gao Xiang   staging: erofs: i...
1058

7146a4f02   Gao Xiang   staging: erofs: s...
1059
1060
  	/* initialize fields in the allocated descriptor */
  	io = &iosb->io;
3883a79ab   Gao Xiang   staging: erofs: i...
1061
1062
1063
  	iosb->sb = sb;
  	INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq);
  out:
97e86a858   Gao Xiang   staging: erofs: t...
1064
  	io->head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
3883a79ab   Gao Xiang   staging: erofs: i...
1065
1066
  	return io;
  }
97e86a858   Gao Xiang   staging: erofs: t...
1067
  /* define decompression jobqueue types */
7146a4f02   Gao Xiang   staging: erofs: s...
1068
  enum {
7146a4f02   Gao Xiang   staging: erofs: s...
1069
  	JQ_BYPASS,
7146a4f02   Gao Xiang   staging: erofs: s...
1070
1071
1072
1073
1074
  	JQ_SUBMIT,
  	NR_JOBQUEUES,
  };
  
  static void *jobqueueset_init(struct super_block *sb,
97e86a858   Gao Xiang   staging: erofs: t...
1075
1076
1077
  			      z_erofs_next_pcluster_t qtail[],
  			      struct z_erofs_unzip_io *q[],
  			      struct z_erofs_unzip_io *fgq,
7146a4f02   Gao Xiang   staging: erofs: s...
1078
1079
  			      bool forcefg)
  {
7146a4f02   Gao Xiang   staging: erofs: s...
1080
1081
  	/*
  	 * if managed cache is enabled, bypass jobqueue is needed,
97e86a858   Gao Xiang   staging: erofs: t...
1082
  	 * no need to read from device for all pclusters in this queue.
7146a4f02   Gao Xiang   staging: erofs: s...
1083
1084
1085
  	 */
  	q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, true);
  	qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
7146a4f02   Gao Xiang   staging: erofs: s...
1086
1087
1088
1089
1090
1091
  
  	q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, forcefg);
  	qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
  
  	return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], !forcefg));
  }
97e86a858   Gao Xiang   staging: erofs: t...
1092
1093
1094
  static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
  				    z_erofs_next_pcluster_t qtail[],
  				    z_erofs_next_pcluster_t owned_head)
7146a4f02   Gao Xiang   staging: erofs: s...
1095
  {
97e86a858   Gao Xiang   staging: erofs: t...
1096
1097
  	z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT];
  	z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS];
7146a4f02   Gao Xiang   staging: erofs: s...
1098

97e86a858   Gao Xiang   staging: erofs: t...
1099
1100
1101
  	DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
  	if (owned_head == Z_EROFS_PCLUSTER_TAIL)
  		owned_head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
7146a4f02   Gao Xiang   staging: erofs: s...
1102

97e86a858   Gao Xiang   staging: erofs: t...
1103
  	WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL_CLOSED);
7146a4f02   Gao Xiang   staging: erofs: s...
1104
1105
  
  	WRITE_ONCE(*submit_qtail, owned_head);
97e86a858   Gao Xiang   staging: erofs: t...
1106
  	WRITE_ONCE(*bypass_qtail, &pcl->next);
7146a4f02   Gao Xiang   staging: erofs: s...
1107

97e86a858   Gao Xiang   staging: erofs: t...
1108
  	qtail[JQ_BYPASS] = &pcl->next;
7146a4f02   Gao Xiang   staging: erofs: s...
1109
  }
97e86a858   Gao Xiang   staging: erofs: t...
1110
  static bool postsubmit_is_all_bypassed(struct z_erofs_unzip_io *q[],
7146a4f02   Gao Xiang   staging: erofs: s...
1111
1112
1113
1114
1115
1116
1117
1118
1119
  				       unsigned int nr_bios,
  				       bool force_fg)
  {
  	/*
  	 * although background is preferred, no one is pending for submission.
  	 * don't issue workqueue for decompression but drop it directly instead.
  	 */
  	if (force_fg || nr_bios)
  		return false;
97e86a858   Gao Xiang   staging: erofs: t...
1120
  	kvfree(container_of(q[JQ_SUBMIT], struct z_erofs_unzip_io_sb, io));
7146a4f02   Gao Xiang   staging: erofs: s...
1121
1122
  	return true;
  }
3883a79ab   Gao Xiang   staging: erofs: i...
1123
1124
  
  static bool z_erofs_vle_submit_all(struct super_block *sb,
97e86a858   Gao Xiang   staging: erofs: t...
1125
  				   z_erofs_next_pcluster_t owned_head,
3883a79ab   Gao Xiang   staging: erofs: i...
1126
  				   struct list_head *pagepool,
97e86a858   Gao Xiang   staging: erofs: t...
1127
  				   struct z_erofs_unzip_io *fgq,
3883a79ab   Gao Xiang   staging: erofs: i...
1128
1129
  				   bool force_fg)
  {
97e86a858   Gao Xiang   staging: erofs: t...
1130
1131
1132
  	struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb);
  	z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
  	struct z_erofs_unzip_io *q[NR_JOBQUEUES];
3883a79ab   Gao Xiang   staging: erofs: i...
1133
  	struct bio *bio;
7146a4f02   Gao Xiang   staging: erofs: s...
1134
  	void *bi_private;
3883a79ab   Gao Xiang   staging: erofs: i...
1135
1136
1137
  	/* since bio will be NULL, no need to initialize last_index */
  	pgoff_t uninitialized_var(last_index);
  	bool force_submit = false;
7dd68b147   Thomas Weißschuh   staging: erofs: u...
1138
  	unsigned int nr_bios;
3883a79ab   Gao Xiang   staging: erofs: i...
1139

8d8a09b09   Gao Xiang   erofs: remove all...
1140
  	if (owned_head == Z_EROFS_PCLUSTER_TAIL)
3883a79ab   Gao Xiang   staging: erofs: i...
1141
  		return false;
3883a79ab   Gao Xiang   staging: erofs: i...
1142
1143
  	force_submit = false;
  	bio = NULL;
7146a4f02   Gao Xiang   staging: erofs: s...
1144
1145
  	nr_bios = 0;
  	bi_private = jobqueueset_init(sb, qtail, q, fgq, force_fg);
3883a79ab   Gao Xiang   staging: erofs: i...
1146
1147
  
  	/* by default, all need io submission */
7146a4f02   Gao Xiang   staging: erofs: s...
1148
  	q[JQ_SUBMIT]->head = owned_head;
3883a79ab   Gao Xiang   staging: erofs: i...
1149
1150
  
  	do {
97e86a858   Gao Xiang   staging: erofs: t...
1151
1152
  		struct z_erofs_pcluster *pcl;
  		unsigned int clusterpages;
3883a79ab   Gao Xiang   staging: erofs: i...
1153
  		pgoff_t first_index;
9248fce71   Gao Xiang   staging: erofs: r...
1154
1155
  		struct page *page;
  		unsigned int i = 0, bypass = 0;
3883a79ab   Gao Xiang   staging: erofs: i...
1156
1157
1158
  		int err;
  
  		/* no possible 'owned_head' equals the following */
97e86a858   Gao Xiang   staging: erofs: t...
1159
1160
1161
1162
  		DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
  		DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL);
  
  		pcl = container_of(owned_head, struct z_erofs_pcluster, next);
3883a79ab   Gao Xiang   staging: erofs: i...
1163

97e86a858   Gao Xiang   staging: erofs: t...
1164
  		clusterpages = BIT(pcl->clusterbits);
3883a79ab   Gao Xiang   staging: erofs: i...
1165
1166
  
  		/* close the main owned chain at first */
97e86a858   Gao Xiang   staging: erofs: t...
1167
1168
  		owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
  				     Z_EROFS_PCLUSTER_TAIL_CLOSED);
3883a79ab   Gao Xiang   staging: erofs: i...
1169

97e86a858   Gao Xiang   staging: erofs: t...
1170
  		first_index = pcl->obj.index;
3883a79ab   Gao Xiang   staging: erofs: i...
1171
  		force_submit |= (first_index != last_index + 1);
3883a79ab   Gao Xiang   staging: erofs: i...
1172

9248fce71   Gao Xiang   staging: erofs: r...
1173
  repeat:
97e86a858   Gao Xiang   staging: erofs: t...
1174
1175
1176
  		page = pickup_page_for_submission(pcl, i, pagepool,
  						  MNGD_MAPPING(sbi),
  						  GFP_NOFS);
9248fce71   Gao Xiang   staging: erofs: r...
1177
1178
1179
1180
  		if (!page) {
  			force_submit = true;
  			++bypass;
  			goto skippage;
3883a79ab   Gao Xiang   staging: erofs: i...
1181
  		}
42d40b4ad   Cristian Sicilia   staging: erofs: u...
1182
  		if (bio && force_submit) {
3883a79ab   Gao Xiang   staging: erofs: i...
1183
  submit_bio_retry:
94e4e153b   Gao Xiang   erofs: kill __sub...
1184
  			submit_bio(bio);
3883a79ab   Gao Xiang   staging: erofs: i...
1185
1186
  			bio = NULL;
  		}
42d40b4ad   Cristian Sicilia   staging: erofs: u...
1187
  		if (!bio) {
a5c0b7802   Gao Xiang   erofs: localize e...
1188
1189
1190
1191
1192
1193
1194
  			bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
  
  			bio->bi_end_io = z_erofs_vle_read_endio;
  			bio_set_dev(bio, sb->s_bdev);
  			bio->bi_iter.bi_sector = (sector_t)(first_index + i) <<
  				LOG_SECTORS_PER_BLOCK;
  			bio->bi_private = bi_private;
94e4e153b   Gao Xiang   erofs: kill __sub...
1195
  			bio->bi_opf = REQ_OP_READ;
3883a79ab   Gao Xiang   staging: erofs: i...
1196
1197
1198
1199
1200
1201
1202
1203
1204
  			++nr_bios;
  		}
  
  		err = bio_add_page(bio, page, PAGE_SIZE, 0);
  		if (err < PAGE_SIZE)
  			goto submit_bio_retry;
  
  		force_submit = false;
  		last_index = first_index + i;
105d4ad85   Gao Xiang   staging: erofs: i...
1205
  skippage:
3883a79ab   Gao Xiang   staging: erofs: i...
1206
1207
  		if (++i < clusterpages)
  			goto repeat;
105d4ad85   Gao Xiang   staging: erofs: i...
1208

7146a4f02   Gao Xiang   staging: erofs: s...
1209
  		if (bypass < clusterpages)
97e86a858   Gao Xiang   staging: erofs: t...
1210
  			qtail[JQ_SUBMIT] = &pcl->next;
7146a4f02   Gao Xiang   staging: erofs: s...
1211
  		else
97e86a858   Gao Xiang   staging: erofs: t...
1212
1213
  			move_to_bypass_jobqueue(pcl, qtail, owned_head);
  	} while (owned_head != Z_EROFS_PCLUSTER_TAIL);
3883a79ab   Gao Xiang   staging: erofs: i...
1214

42d40b4ad   Cristian Sicilia   staging: erofs: u...
1215
  	if (bio)
94e4e153b   Gao Xiang   erofs: kill __sub...
1216
  		submit_bio(bio);
3883a79ab   Gao Xiang   staging: erofs: i...
1217

7146a4f02   Gao Xiang   staging: erofs: s...
1218
  	if (postsubmit_is_all_bypassed(q, nr_bios, force_fg))
105d4ad85   Gao Xiang   staging: erofs: i...
1219
  		return true;
3883a79ab   Gao Xiang   staging: erofs: i...
1220

7146a4f02   Gao Xiang   staging: erofs: s...
1221
  	z_erofs_vle_unzip_kickoff(bi_private, nr_bios);
3883a79ab   Gao Xiang   staging: erofs: i...
1222
1223
  	return true;
  }
97e86a858   Gao Xiang   staging: erofs: t...
1224
1225
  static void z_erofs_submit_and_unzip(struct super_block *sb,
  				     struct z_erofs_collector *clt,
3883a79ab   Gao Xiang   staging: erofs: i...
1226
1227
1228
  				     struct list_head *pagepool,
  				     bool force_fg)
  {
97e86a858   Gao Xiang   staging: erofs: t...
1229
  	struct z_erofs_unzip_io io[NR_JOBQUEUES];
3883a79ab   Gao Xiang   staging: erofs: i...
1230

97e86a858   Gao Xiang   staging: erofs: t...
1231
1232
  	if (!z_erofs_vle_submit_all(sb, clt->owned_head,
  				    pagepool, io, force_fg))
3883a79ab   Gao Xiang   staging: erofs: i...
1233
  		return;
97e86a858   Gao Xiang   staging: erofs: t...
1234
  	/* decompress no I/O pclusters immediately */
7146a4f02   Gao Xiang   staging: erofs: s...
1235
  	z_erofs_vle_unzip_all(sb, &io[JQ_BYPASS], pagepool);
4279f3f98   Gao Xiang   staging: erofs: t...
1236

3883a79ab   Gao Xiang   staging: erofs: i...
1237
1238
1239
1240
  	if (!force_fg)
  		return;
  
  	/* wait until all bios are completed */
7146a4f02   Gao Xiang   staging: erofs: s...
1241
1242
  	wait_event(io[JQ_SUBMIT].u.wait,
  		   !atomic_read(&io[JQ_SUBMIT].pending_bios));
3883a79ab   Gao Xiang   staging: erofs: i...
1243
1244
  
  	/* let's synchronous decompression */
7146a4f02   Gao Xiang   staging: erofs: s...
1245
  	z_erofs_vle_unzip_all(sb, &io[JQ_SUBMIT], pagepool);
3883a79ab   Gao Xiang   staging: erofs: i...
1246
1247
1248
1249
1250
1251
  }
  
  static int z_erofs_vle_normalaccess_readpage(struct file *file,
  					     struct page *page)
  {
  	struct inode *const inode = page->mapping->host;
97e86a858   Gao Xiang   staging: erofs: t...
1252
  	struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
3883a79ab   Gao Xiang   staging: erofs: i...
1253
1254
  	int err;
  	LIST_HEAD(pagepool);
ba9ce771b   Gao Xiang   staging: erofs: f...
1255
  	trace_erofs_readpage(page, false);
f0c519fc2   Gao Xiang   staging: erofs: r...
1256
  	f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
3883a79ab   Gao Xiang   staging: erofs: i...
1257
  	err = z_erofs_do_read_page(&f, page, &pagepool);
97e86a858   Gao Xiang   staging: erofs: t...
1258
  	(void)z_erofs_collector_end(&f.clt);
3883a79ab   Gao Xiang   staging: erofs: i...
1259

ee45197c8   Gao Xiang   staging: erofs: s...
1260
1261
1262
1263
  	/* if some compressed cluster ready, need submit them anyway */
  	z_erofs_submit_and_unzip(inode->i_sb, &f.clt, &pagepool, true);
  
  	if (err)
4f761fa25   Gao Xiang   erofs: rename err...
1264
  		erofs_err(inode->i_sb, "failed to read, err [%d]", err);
3883a79ab   Gao Xiang   staging: erofs: i...
1265

3b423417d   Chao Yu   staging: erofs: c...
1266
1267
  	if (f.map.mpage)
  		put_page(f.map.mpage);
3883a79ab   Gao Xiang   staging: erofs: i...
1268
1269
1270
  
  	/* clean up the remaining free pages */
  	put_pages_list(&pagepool);
ee45197c8   Gao Xiang   staging: erofs: s...
1271
  	return err;
3883a79ab   Gao Xiang   staging: erofs: i...
1272
  }
14f362b4f   Gao Xiang   staging: erofs: c...
1273
1274
1275
1276
1277
  static bool should_decompress_synchronously(struct erofs_sb_info *sbi,
  					    unsigned int nr)
  {
  	return nr <= sbi->max_sync_decompress_pages;
  }
5fb76bb04   Gao Xiang   staging: erofs: c...
1278
1279
1280
1281
  static int z_erofs_vle_normalaccess_readpages(struct file *filp,
  					      struct address_space *mapping,
  					      struct list_head *pages,
  					      unsigned int nr_pages)
3883a79ab   Gao Xiang   staging: erofs: i...
1282
1283
  {
  	struct inode *const inode = mapping->host;
5fb76bb04   Gao Xiang   staging: erofs: c...
1284
  	struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
3883a79ab   Gao Xiang   staging: erofs: i...
1285

14f362b4f   Gao Xiang   staging: erofs: c...
1286
  	bool sync = should_decompress_synchronously(sbi, nr_pages);
97e86a858   Gao Xiang   staging: erofs: t...
1287
  	struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
3883a79ab   Gao Xiang   staging: erofs: i...
1288
1289
1290
  	gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
  	struct page *head = NULL;
  	LIST_HEAD(pagepool);
284db12cf   Chen Gong   staging: erofs: a...
1291
1292
  	trace_erofs_readpages(mapping->host, lru_to_page(pages),
  			      nr_pages, false);
f0c519fc2   Gao Xiang   staging: erofs: r...
1293
  	f.headoffset = (erofs_off_t)lru_to_page(pages)->index << PAGE_SHIFT;
3883a79ab   Gao Xiang   staging: erofs: i...
1294
1295
1296
1297
1298
  	for (; nr_pages; --nr_pages) {
  		struct page *page = lru_to_page(pages);
  
  		prefetchw(&page->flags);
  		list_del(&page->lru);
2d9b5dcd9   Gao Xiang   staging: erofs: d...
1299
1300
1301
1302
1303
1304
  		/*
  		 * A pure asynchronous readahead is indicated if
  		 * a PG_readahead marked page is hitted at first.
  		 * Let's also do asynchronous decompression for this case.
  		 */
  		sync &= !(PageReadahead(page) && !head);
3883a79ab   Gao Xiang   staging: erofs: i...
1305
1306
1307
1308
  		if (add_to_page_cache_lru(page, mapping, page->index, gfp)) {
  			list_add(&page->lru, &pagepool);
  			continue;
  		}
3883a79ab   Gao Xiang   staging: erofs: i...
1309
1310
1311
  		set_page_private(page, (unsigned long)head);
  		head = page;
  	}
42d40b4ad   Cristian Sicilia   staging: erofs: u...
1312
  	while (head) {
3883a79ab   Gao Xiang   staging: erofs: i...
1313
1314
1315
1316
1317
1318
1319
  		struct page *page = head;
  		int err;
  
  		/* traversal in reverse order */
  		head = (void *)page_private(page);
  
  		err = z_erofs_do_read_page(&f, page, &pagepool);
a5876e24f   Gao Xiang   erofs: use erofs_...
1320
  		if (err)
4f761fa25   Gao Xiang   erofs: rename err...
1321
1322
1323
  			erofs_err(inode->i_sb,
  				  "readahead error at page %lu @ nid %llu",
  				  page->index, EROFS_I(inode)->nid);
3883a79ab   Gao Xiang   staging: erofs: i...
1324
1325
  		put_page(page);
  	}
97e86a858   Gao Xiang   staging: erofs: t...
1326
  	(void)z_erofs_collector_end(&f.clt);
3883a79ab   Gao Xiang   staging: erofs: i...
1327

97e86a858   Gao Xiang   staging: erofs: t...
1328
  	z_erofs_submit_and_unzip(inode->i_sb, &f.clt, &pagepool, sync);
3883a79ab   Gao Xiang   staging: erofs: i...
1329

3b423417d   Chao Yu   staging: erofs: c...
1330
1331
  	if (f.map.mpage)
  		put_page(f.map.mpage);
3883a79ab   Gao Xiang   staging: erofs: i...
1332
1333
1334
1335
1336
  
  	/* clean up the remaining free pages */
  	put_pages_list(&pagepool);
  	return 0;
  }
3883a79ab   Gao Xiang   staging: erofs: i...
1337
1338
1339
1340
  const struct address_space_operations z_erofs_vle_normalaccess_aops = {
  	.readpage = z_erofs_vle_normalaccess_readpage,
  	.readpages = z_erofs_vle_normalaccess_readpages,
  };
02827e179   Gao Xiang   staging: erofs: a...
1341