Blame view
fs/erofs/zdata.c
36.6 KB
29b24f6ca
|
1 |
// SPDX-License-Identifier: GPL-2.0-only |
02827e179
|
2 |
/* |
02827e179
|
3 4 5 |
* Copyright (C) 2018 HUAWEI, Inc. * http://www.huawei.com/ * Created by Gao Xiang <gaoxiang25@huawei.com> |
02827e179
|
6 |
*/ |
57b78c9fd
|
7 |
#include "zdata.h" |
274812334
|
8 |
#include "compress.h" |
3883a79ab
|
9 |
#include <linux/prefetch.h> |
284db12cf
|
10 |
#include <trace/events/erofs.h> |
672e54761
|
11 12 13 14 15 |
/* * a compressed_pages[] placeholder in order to avoid * being filled with file pages for in-place decompression. */ #define PAGE_UNALLOCATED ((void *)0x5F0E4B1D) |
97e86a858
|
16 |
/* how to allocate cached pages for a pcluster */ |
92e6efd56
|
17 18 19 20 21 22 23 24 25 26 27 28 29 |
enum z_erofs_cache_alloctype { DONTALLOC, /* don't allocate any cached pages */ DELAYEDALLOC, /* delayed allocation (at the time of submitting io) */ }; /* * tagged pointer with 1-bit tag for all compressed pages * tag 0 - the page is just found with an extra page reference */ typedef tagptr1_t compressed_page_t; #define tag_compressed_page_justfound(page) \ tagptr_fold(compressed_page_t, page, 1) |
3883a79ab
|
30 |
static struct workqueue_struct *z_erofs_workqueue __read_mostly; |
97e86a858
|
31 |
static struct kmem_cache *pcluster_cachep __read_mostly; |
3883a79ab
|
32 33 34 |
void z_erofs_exit_zip_subsystem(void) { |
3883a79ab
|
35 |
destroy_workqueue(z_erofs_workqueue); |
97e86a858
|
36 |
kmem_cache_destroy(pcluster_cachep); |
3883a79ab
|
37 |
} |
99634bf38
|
38 |
static inline int z_erofs_init_workqueue(void) |
3883a79ab
|
39 |
{ |
7dd68b147
|
40 |
const unsigned int onlinecpus = num_possible_cpus(); |
97e86a858
|
41 |
const unsigned int flags = WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE; |
3883a79ab
|
42 43 |
/* |
97e86a858
|
44 45 |
* no need to spawn too many threads, limiting threads could minimum * scheduling overhead, perhaps per-CPU threads should be better? |
3883a79ab
|
46 |
*/ |
97e86a858
|
47 48 |
z_erofs_workqueue = alloc_workqueue("erofs_unzipd", flags, onlinecpus + onlinecpus / 4); |
42d40b4ad
|
49 |
return z_erofs_workqueue ? 0 : -ENOMEM; |
3883a79ab
|
50 |
} |
99634bf38
|
51 |
static void z_erofs_pcluster_init_once(void *ptr) |
48d4bf3b0
|
52 |
{ |
97e86a858
|
53 54 |
struct z_erofs_pcluster *pcl = ptr; struct z_erofs_collection *cl = z_erofs_primarycollection(pcl); |
48d4bf3b0
|
55 |
unsigned int i; |
97e86a858
|
56 57 58 |
mutex_init(&cl->lock); cl->nr_pages = 0; cl->vcnt = 0; |
48d4bf3b0
|
59 |
for (i = 0; i < Z_EROFS_CLUSTER_MAX_PAGES; ++i) |
97e86a858
|
60 |
pcl->compressed_pages[i] = NULL; |
48d4bf3b0
|
61 |
} |
99634bf38
|
62 |
static void z_erofs_pcluster_init_always(struct z_erofs_pcluster *pcl) |
48d4bf3b0
|
63 |
{ |
97e86a858
|
64 |
struct z_erofs_collection *cl = z_erofs_primarycollection(pcl); |
48d4bf3b0
|
65 |
|
97e86a858
|
66 |
atomic_set(&pcl->obj.refcount, 1); |
48d4bf3b0
|
67 |
|
97e86a858
|
68 69 |
DBG_BUGON(cl->nr_pages); DBG_BUGON(cl->vcnt); |
48d4bf3b0
|
70 |
} |
0a0b7e625
|
71 |
int __init z_erofs_init_zip_subsystem(void) |
3883a79ab
|
72 |
{ |
97e86a858
|
73 74 |
pcluster_cachep = kmem_cache_create("erofs_compress", Z_EROFS_WORKGROUP_SIZE, 0, |
99634bf38
|
75 76 |
SLAB_RECLAIM_ACCOUNT, z_erofs_pcluster_init_once); |
97e86a858
|
77 |
if (pcluster_cachep) { |
99634bf38
|
78 |
if (!z_erofs_init_workqueue()) |
3883a79ab
|
79 |
return 0; |
97e86a858
|
80 |
kmem_cache_destroy(pcluster_cachep); |
3883a79ab
|
81 82 83 |
} return -ENOMEM; } |
97e86a858
|
84 85 86 |
enum z_erofs_collectmode { COLLECT_SECONDARY, COLLECT_PRIMARY, |
3883a79ab
|
87 |
/* |
97e86a858
|
88 89 90 91 92 93 94 |
* The current collection was the tail of an exist chain, in addition * that the previous processed chained collections are all decided to * be hooked up to it. * A new chain will be created for the remaining collections which are * not processed yet, therefore different from COLLECT_PRIMARY_FOLLOWED, * the next collection cannot reuse the whole page safely in * the following scenario: |
a112152f6
|
95 96 |
* ________________________________________________________________ * | tail (partial) page | head (partial) page | |
97e86a858
|
97 |
* | (belongs to the next cl) | (belongs to the current cl) | |
a112152f6
|
98 99 |
* |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________| */ |
97e86a858
|
100 101 |
COLLECT_PRIMARY_HOOKED, COLLECT_PRIMARY_FOLLOWED_NOINPLACE, |
a112152f6
|
102 |
/* |
97e86a858
|
103 104 105 106 107 108 |
* The current collection has been linked with the owned chain, and * could also be linked with the remaining collections, which means * if the processing page is the tail page of the collection, thus * the current collection can safely use the whole page (since * the previous collection is under control) for in-place I/O, as * illustrated below: |
a112152f6
|
109 |
* ________________________________________________________________ |
97e86a858
|
110 111 112 113 |
* | tail (partial) page | head (partial) page | * | (of the current cl) | (of the previous collection) | * | PRIMARY_FOLLOWED or | | * |_____PRIMARY_HOOKED___|____________PRIMARY_FOLLOWED____________| |
a112152f6
|
114 |
* |
97e86a858
|
115 |
* [ (*) the above page can be used as inplace I/O. ] |
3883a79ab
|
116 |
*/ |
97e86a858
|
117 |
COLLECT_PRIMARY_FOLLOWED, |
3883a79ab
|
118 |
}; |
97e86a858
|
119 |
struct z_erofs_collector { |
3883a79ab
|
120 |
struct z_erofs_pagevec_ctor vector; |
bfc4ccb15
|
121 |
struct z_erofs_pcluster *pcl, *tailpcl; |
97e86a858
|
122 123 124 125 126 |
struct z_erofs_collection *cl; struct page **compressedpages; z_erofs_next_pcluster_t owned_head; enum z_erofs_collectmode mode; |
3883a79ab
|
127 |
}; |
97e86a858
|
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 |
struct z_erofs_decompress_frontend { struct inode *const inode; struct z_erofs_collector clt; struct erofs_map_blocks map; /* used for applying cache strategy on the fly */ bool backmost; erofs_off_t headoffset; }; #define COLLECTOR_INIT() { \ .owned_head = Z_EROFS_PCLUSTER_TAIL, \ .mode = COLLECT_PRIMARY_FOLLOWED } #define DECOMPRESS_FRONTEND_INIT(__i) { \ .inode = __i, .clt = COLLECTOR_INIT(), \ .backmost = true, } static struct page *z_pagemap_global[Z_EROFS_VMAP_GLOBAL_PAGES]; static DEFINE_MUTEX(z_pagemap_global_lock); |
3883a79ab
|
149 |
|
97e86a858
|
150 |
static void preload_compressed_pages(struct z_erofs_collector *clt, |
92e6efd56
|
151 |
struct address_space *mc, |
92e6efd56
|
152 |
enum z_erofs_cache_alloctype type, |
97e86a858
|
153 |
struct list_head *pagepool) |
105d4ad85
|
154 |
{ |
97e86a858
|
155 156 157 158 |
const struct z_erofs_pcluster *pcl = clt->pcl; const unsigned int clusterpages = BIT(pcl->clusterbits); struct page **pages = clt->compressedpages; pgoff_t index = pcl->obj.index + (pages - pcl->compressed_pages); |
92e6efd56
|
159 |
bool standalone = true; |
92e6efd56
|
160 |
|
97e86a858
|
161 |
if (clt->mode < COLLECT_PRIMARY_FOLLOWED) |
92e6efd56
|
162 |
return; |
97e86a858
|
163 |
for (; pages < pcl->compressed_pages + clusterpages; ++pages) { |
92e6efd56
|
164 165 166 167 |
struct page *page; compressed_page_t t; /* the compressed page was loaded before */ |
97e86a858
|
168 |
if (READ_ONCE(*pages)) |
105d4ad85
|
169 |
continue; |
97e86a858
|
170 |
page = find_get_page(mc, index); |
92e6efd56
|
171 172 173 174 175 176 177 |
if (page) { t = tag_compressed_page_justfound(page); } else if (type == DELAYEDALLOC) { t = tagptr_init(compressed_page_t, PAGE_UNALLOCATED); } else { /* DONTALLOC */ if (standalone) |
97e86a858
|
178 |
clt->compressedpages = pages; |
92e6efd56
|
179 180 |
standalone = false; continue; |
105d4ad85
|
181 |
} |
97e86a858
|
182 |
if (!cmpxchg_relaxed(pages, NULL, tagptr_cast_ptr(t))) |
105d4ad85
|
183 |
continue; |
92e6efd56
|
184 185 |
if (page) put_page(page); |
105d4ad85
|
186 |
} |
92e6efd56
|
187 |
|
97e86a858
|
188 189 |
if (standalone) /* downgrade to PRIMARY_FOLLOWED_NOINPLACE */ clt->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE; |
105d4ad85
|
190 191 192 |
} /* called by erofs_shrinker to get rid of all compressed_pages */ |
47e541a17
|
193 |
int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, |
97e86a858
|
194 |
struct erofs_workgroup *grp) |
105d4ad85
|
195 |
{ |
97e86a858
|
196 197 |
struct z_erofs_pcluster *const pcl = container_of(grp, struct z_erofs_pcluster, obj); |
c1448fa88
|
198 |
struct address_space *const mapping = MNGD_MAPPING(sbi); |
97e86a858
|
199 |
const unsigned int clusterpages = BIT(pcl->clusterbits); |
105d4ad85
|
200 201 202 203 204 205 206 |
int i; /* * refcount of workgroup is now freezed as 1, * therefore no need to worry about available decompression users. */ for (i = 0; i < clusterpages; ++i) { |
97e86a858
|
207 |
struct page *page = pcl->compressed_pages[i]; |
105d4ad85
|
208 |
|
97e86a858
|
209 |
if (!page) |
105d4ad85
|
210 211 212 213 214 |
continue; /* block other users from reclaiming or migrating the page */ if (!trylock_page(page)) return -EBUSY; |
8d8a09b09
|
215 |
if (page->mapping != mapping) |
97e86a858
|
216 |
continue; |
105d4ad85
|
217 |
|
97e86a858
|
218 219 |
/* barrier is implied in the following 'unlock_page' */ WRITE_ONCE(pcl->compressed_pages[i], NULL); |
105d4ad85
|
220 221 222 223 224 225 226 227 |
set_page_private(page, 0); ClearPagePrivate(page); unlock_page(page); put_page(page); } return 0; } |
47e541a17
|
228 229 |
int erofs_try_to_free_cached_page(struct address_space *mapping, struct page *page) |
105d4ad85
|
230 |
{ |
97e86a858
|
231 232 |
struct z_erofs_pcluster *const pcl = (void *)page_private(page); const unsigned int clusterpages = BIT(pcl->clusterbits); |
105d4ad85
|
233 |
int ret = 0; /* 0 - busy */ |
97e86a858
|
234 |
if (erofs_workgroup_try_to_freeze(&pcl->obj, 1)) { |
105d4ad85
|
235 236 237 |
unsigned int i; for (i = 0; i < clusterpages; ++i) { |
97e86a858
|
238 239 |
if (pcl->compressed_pages[i] == page) { WRITE_ONCE(pcl->compressed_pages[i], NULL); |
105d4ad85
|
240 241 242 243 |
ret = 1; break; } } |
97e86a858
|
244 |
erofs_workgroup_unfreeze(&pcl->obj, 1); |
105d4ad85
|
245 |
|
047d4abc4
|
246 247 248 249 |
if (ret) { ClearPagePrivate(page); put_page(page); } |
105d4ad85
|
250 251 252 |
} return ret; } |
105d4ad85
|
253 |
|
3883a79ab
|
254 |
/* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */ |
99634bf38
|
255 256 |
static inline bool z_erofs_try_inplace_io(struct z_erofs_collector *clt, struct page *page) |
3883a79ab
|
257 |
{ |
97e86a858
|
258 259 260 261 262 |
struct z_erofs_pcluster *const pcl = clt->pcl; const unsigned int clusterpages = BIT(pcl->clusterbits); while (clt->compressedpages < pcl->compressed_pages + clusterpages) { if (!cmpxchg(clt->compressedpages++, NULL, page)) |
3883a79ab
|
263 264 |
return true; } |
3883a79ab
|
265 266 |
return false; } |
97e86a858
|
267 268 269 270 |
/* callers must be with collection lock held */ static int z_erofs_attach_page(struct z_erofs_collector *clt, struct page *page, enum z_erofs_page_type type) |
3883a79ab
|
271 272 273 |
{ int ret; bool occupied; |
97e86a858
|
274 275 |
/* give priority for inplaceio */ if (clt->mode >= COLLECT_PRIMARY && |
447a3621b
|
276 |
type == Z_EROFS_PAGE_TYPE_EXCLUSIVE && |
99634bf38
|
277 |
z_erofs_try_inplace_io(clt, page)) |
3883a79ab
|
278 |
return 0; |
97e86a858
|
279 |
ret = z_erofs_pagevec_enqueue(&clt->vector, |
046d64e11
|
280 |
page, type, &occupied); |
97e86a858
|
281 |
clt->cl->vcnt += (unsigned int)ret; |
3883a79ab
|
282 283 284 |
return ret ? 0 : -EAGAIN; } |
97e86a858
|
285 286 287 |
static enum z_erofs_collectmode try_to_claim_pcluster(struct z_erofs_pcluster *pcl, z_erofs_next_pcluster_t *owned_head) |
3883a79ab
|
288 |
{ |
97e86a858
|
289 |
/* let's claim these following types of pclusters */ |
3883a79ab
|
290 |
retry: |
97e86a858
|
291 292 293 294 |
if (pcl->next == Z_EROFS_PCLUSTER_NIL) { /* type 1, nil pcluster */ if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL, *owned_head) != Z_EROFS_PCLUSTER_NIL) |
3883a79ab
|
295 |
goto retry; |
97e86a858
|
296 |
*owned_head = &pcl->next; |
a112152f6
|
297 |
/* lucky, I am the followee :) */ |
97e86a858
|
298 299 |
return COLLECT_PRIMARY_FOLLOWED; } else if (pcl->next == Z_EROFS_PCLUSTER_TAIL) { |
3883a79ab
|
300 301 302 303 304 |
/* * type 2, link to the end of a existing open chain, * be careful that its submission itself is governed * by the original owned chain. */ |
97e86a858
|
305 306 |
if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, *owned_head) != Z_EROFS_PCLUSTER_TAIL) |
3883a79ab
|
307 |
goto retry; |
97e86a858
|
308 309 |
*owned_head = Z_EROFS_PCLUSTER_TAIL; return COLLECT_PRIMARY_HOOKED; |
a112152f6
|
310 |
} |
97e86a858
|
311 |
return COLLECT_PRIMARY; /* :( better luck next time */ |
3883a79ab
|
312 |
} |
97e86a858
|
313 314 315 |
static struct z_erofs_collection *cllookup(struct z_erofs_collector *clt, struct inode *inode, struct erofs_map_blocks *map) |
3883a79ab
|
316 |
{ |
97e86a858
|
317 318 319 320 321 322 323 324 |
struct erofs_workgroup *grp; struct z_erofs_pcluster *pcl; struct z_erofs_collection *cl; unsigned int length; bool tag; grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT, &tag); if (!grp) |
3883a79ab
|
325 |
return NULL; |
3883a79ab
|
326 |
|
97e86a858
|
327 |
pcl = container_of(grp, struct z_erofs_pcluster, obj); |
bfc4ccb15
|
328 329 330 331 332 |
if (clt->owned_head == &pcl->next || pcl == clt->tailpcl) { DBG_BUGON(1); erofs_workgroup_put(grp); return ERR_PTR(-EFSCORRUPTED); } |
97e86a858
|
333 334 |
cl = z_erofs_primarycollection(pcl); |
8d8a09b09
|
335 |
if (cl->pageofs != (map->m_la & ~PAGE_MASK)) { |
97e86a858
|
336 |
DBG_BUGON(1); |
138e1a099
|
337 338 |
erofs_workgroup_put(grp); return ERR_PTR(-EFSCORRUPTED); |
97e86a858
|
339 |
} |
3883a79ab
|
340 |
|
97e86a858
|
341 342 343 344 |
length = READ_ONCE(pcl->length); if (length & Z_EROFS_PCLUSTER_FULL_LENGTH) { if ((map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) > length) { DBG_BUGON(1); |
138e1a099
|
345 346 |
erofs_workgroup_put(grp); return ERR_PTR(-EFSCORRUPTED); |
97e86a858
|
347 348 349 |
} } else { unsigned int llen = map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT; |
3883a79ab
|
350 |
|
97e86a858
|
351 352 |
if (map->m_flags & EROFS_MAP_FULL_MAPPED) llen |= Z_EROFS_PCLUSTER_FULL_LENGTH; |
3883a79ab
|
353 |
|
97e86a858
|
354 355 356 357 358 359 360 |
while (llen > length && length != cmpxchg_relaxed(&pcl->length, length, llen)) { cpu_relax(); length = READ_ONCE(pcl->length); } } mutex_lock(&cl->lock); |
bfc4ccb15
|
361 362 363 |
/* used to check tail merging loop due to corrupted images */ if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL) clt->tailpcl = pcl; |
97e86a858
|
364 |
clt->mode = try_to_claim_pcluster(pcl, &clt->owned_head); |
bfc4ccb15
|
365 366 367 |
/* clean tailpcl if the current owned_head is Z_EROFS_PCLUSTER_TAIL */ if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL) clt->tailpcl = NULL; |
97e86a858
|
368 369 370 |
clt->pcl = pcl; clt->cl = cl; return cl; |
3883a79ab
|
371 |
} |
97e86a858
|
372 373 374 |
static struct z_erofs_collection *clregister(struct z_erofs_collector *clt, struct inode *inode, struct erofs_map_blocks *map) |
3883a79ab
|
375 |
{ |
97e86a858
|
376 377 378 |
struct z_erofs_pcluster *pcl; struct z_erofs_collection *cl; int err; |
e5e3abbad
|
379 |
|
3883a79ab
|
380 |
/* no available workgroup, let's allocate one */ |
97e86a858
|
381 |
pcl = kmem_cache_alloc(pcluster_cachep, GFP_NOFS); |
8d8a09b09
|
382 |
if (!pcl) |
3883a79ab
|
383 |
return ERR_PTR(-ENOMEM); |
99634bf38
|
384 |
z_erofs_pcluster_init_always(pcl); |
97e86a858
|
385 |
pcl->obj.index = map->m_pa >> PAGE_SHIFT; |
3883a79ab
|
386 |
|
97e86a858
|
387 388 389 |
pcl->length = (map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) | (map->m_flags & EROFS_MAP_FULL_MAPPED ? Z_EROFS_PCLUSTER_FULL_LENGTH : 0); |
3883a79ab
|
390 |
|
97e86a858
|
391 392 393 394 |
if (map->m_flags & EROFS_MAP_ZIPPED) pcl->algorithmformat = Z_EROFS_COMPRESSION_LZ4; else pcl->algorithmformat = Z_EROFS_COMPRESSION_SHIFTED; |
a5876e24f
|
395 |
pcl->clusterbits = EROFS_I(inode)->z_physical_clusterbits[0]; |
97e86a858
|
396 |
pcl->clusterbits -= PAGE_SHIFT; |
b6a76183d
|
397 |
|
97e86a858
|
398 399 400 |
/* new pclusters should be claimed as type 1, primary and followed */ pcl->next = clt->owned_head; clt->mode = COLLECT_PRIMARY_FOLLOWED; |
3883a79ab
|
401 |
|
97e86a858
|
402 403 |
cl = z_erofs_primarycollection(pcl); cl->pageofs = map->m_la & ~PAGE_MASK; |
3883a79ab
|
404 |
|
23edf3abe
|
405 406 |
/* * lock all primary followed works before visible to others |
97e86a858
|
407 |
* and mutex_trylock *never* fails for a new pcluster. |
23edf3abe
|
408 |
*/ |
97e86a858
|
409 |
mutex_trylock(&cl->lock); |
23edf3abe
|
410 |
|
97e86a858
|
411 412 413 414 415 |
err = erofs_register_workgroup(inode->i_sb, &pcl->obj, 0); if (err) { mutex_unlock(&cl->lock); kmem_cache_free(pcluster_cachep, pcl); return ERR_PTR(-EAGAIN); |
3883a79ab
|
416 |
} |
bfc4ccb15
|
417 418 419 |
/* used to check tail merging loop due to corrupted images */ if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL) clt->tailpcl = pcl; |
97e86a858
|
420 421 422 423 |
clt->owned_head = &pcl->next; clt->pcl = pcl; clt->cl = cl; return cl; |
3883a79ab
|
424 |
} |
97e86a858
|
425 426 427 428 429 |
static int z_erofs_collector_begin(struct z_erofs_collector *clt, struct inode *inode, struct erofs_map_blocks *map) { struct z_erofs_collection *cl; |
a112152f6
|
430 |
|
97e86a858
|
431 |
DBG_BUGON(clt->cl); |
3883a79ab
|
432 |
|
97e86a858
|
433 434 435 |
/* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous collection */ DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_NIL); DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); |
3883a79ab
|
436 |
|
97e86a858
|
437 438 439 |
if (!PAGE_ALIGNED(map->m_pa)) { DBG_BUGON(1); return -EINVAL; |
3883a79ab
|
440 |
} |
97e86a858
|
441 442 443 444 |
repeat: cl = cllookup(clt, inode, map); if (!cl) { cl = clregister(clt, inode, map); |
3883a79ab
|
445 |
|
8d8a09b09
|
446 |
if (cl == ERR_PTR(-EAGAIN)) |
97e86a858
|
447 |
goto repeat; |
3883a79ab
|
448 |
} |
97e86a858
|
449 450 451 452 453 454 455 456 457 |
if (IS_ERR(cl)) return PTR_ERR(cl); z_erofs_pagevec_ctor_init(&clt->vector, Z_EROFS_NR_INLINE_PAGEVECS, cl->pagevec, cl->vcnt); clt->compressedpages = clt->pcl->compressed_pages; if (clt->mode <= COLLECT_PRIMARY) /* cannot do in-place I/O */ clt->compressedpages += Z_EROFS_CLUSTER_MAX_PAGES; |
3883a79ab
|
458 459 460 461 |
return 0; } /* |
97e86a858
|
462 463 |
* keep in mind that no referenced pclusters will be freed * only after a RCU grace period. |
3883a79ab
|
464 465 466 |
*/ static void z_erofs_rcu_callback(struct rcu_head *head) { |
97e86a858
|
467 468 |
struct z_erofs_collection *const cl = container_of(head, struct z_erofs_collection, rcu); |
3883a79ab
|
469 |
|
97e86a858
|
470 471 472 |
kmem_cache_free(pcluster_cachep, container_of(cl, struct z_erofs_pcluster, primary_collection)); |
3883a79ab
|
473 474 475 476 |
} void erofs_workgroup_free_rcu(struct erofs_workgroup *grp) { |
97e86a858
|
477 478 479 |
struct z_erofs_pcluster *const pcl = container_of(grp, struct z_erofs_pcluster, obj); struct z_erofs_collection *const cl = z_erofs_primarycollection(pcl); |
3883a79ab
|
480 |
|
97e86a858
|
481 |
call_rcu(&cl->rcu, z_erofs_rcu_callback); |
3883a79ab
|
482 |
} |
97e86a858
|
483 |
static void z_erofs_collection_put(struct z_erofs_collection *cl) |
3883a79ab
|
484 |
{ |
97e86a858
|
485 486 |
struct z_erofs_pcluster *const pcl = container_of(cl, struct z_erofs_pcluster, primary_collection); |
3883a79ab
|
487 |
|
97e86a858
|
488 |
erofs_workgroup_put(&pcl->obj); |
3883a79ab
|
489 |
} |
97e86a858
|
490 |
static bool z_erofs_collector_end(struct z_erofs_collector *clt) |
3883a79ab
|
491 |
{ |
97e86a858
|
492 |
struct z_erofs_collection *cl = clt->cl; |
3883a79ab
|
493 |
|
97e86a858
|
494 |
if (!cl) |
3883a79ab
|
495 |
return false; |
97e86a858
|
496 497 |
z_erofs_pagevec_ctor_exit(&clt->vector, false); mutex_unlock(&cl->lock); |
3883a79ab
|
498 499 |
/* |
97e86a858
|
500 501 |
* if all pending pages are added, don't hold its reference * any longer if the pcluster isn't hosted by ourselves. |
3883a79ab
|
502 |
*/ |
97e86a858
|
503 504 |
if (clt->mode < COLLECT_PRIMARY_FOLLOWED_NOINPLACE) z_erofs_collection_put(cl); |
3883a79ab
|
505 |
|
97e86a858
|
506 |
clt->cl = NULL; |
3883a79ab
|
507 508 509 510 511 512 |
return true; } static inline struct page *__stagingpage_alloc(struct list_head *pagepool, gfp_t gfp) { |
b25a15191
|
513 |
struct page *page = erofs_allocpage(pagepool, gfp, true); |
3883a79ab
|
514 515 516 517 |
page->mapping = Z_EROFS_MAPPING_STAGING; return page; } |
97e86a858
|
518 |
static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe, |
4279f3f98
|
519 |
unsigned int cachestrategy, |
97e86a858
|
520 |
erofs_off_t la) |
92e6efd56
|
521 |
{ |
4279f3f98
|
522 523 |
if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED) return false; |
92e6efd56
|
524 525 |
if (fe->backmost) return true; |
4279f3f98
|
526 527 |
return cachestrategy >= EROFS_ZIP_CACHE_READAROUND && la < fe->headoffset; |
92e6efd56
|
528 |
} |
92e6efd56
|
529 |
|
97e86a858
|
530 |
static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, |
3883a79ab
|
531 |
struct page *page, |
97e86a858
|
532 |
struct list_head *pagepool) |
3883a79ab
|
533 |
{ |
97e86a858
|
534 535 |
struct inode *const inode = fe->inode; struct erofs_sb_info *const sbi __maybe_unused = EROFS_I_SB(inode); |
3b423417d
|
536 |
struct erofs_map_blocks *const map = &fe->map; |
97e86a858
|
537 |
struct z_erofs_collector *const clt = &fe->clt; |
3883a79ab
|
538 |
const loff_t offset = page_offset(page); |
dc76ea8c1
|
539 |
bool tight = true; |
3883a79ab
|
540 |
|
92e6efd56
|
541 |
enum z_erofs_cache_alloctype cache_strategy; |
3883a79ab
|
542 |
enum z_erofs_page_type page_type; |
7dd68b147
|
543 |
unsigned int cur, end, spiltted, index; |
1e05ff36e
|
544 |
int err = 0; |
3883a79ab
|
545 546 547 548 549 550 551 552 553 554 555 |
/* register locked file pages as online pages in pack */ z_erofs_onlinepage_init(page); spiltted = 0; end = PAGE_SIZE; repeat: cur = end - 1; /* lucky, within the range of the current map_blocks */ if (offset + cur >= map->m_la && |
447a3621b
|
556 |
offset + cur < map->m_la + map->m_llen) { |
97e86a858
|
557 558 |
/* didn't get a valid collection previously (very rare) */ if (!clt->cl) |
1e5ceeab6
|
559 |
goto restart_now; |
3883a79ab
|
560 |
goto hitted; |
1e5ceeab6
|
561 |
} |
3883a79ab
|
562 563 |
/* go ahead the next map_blocks */ |
4f761fa25
|
564 |
erofs_dbg("%s: [out-of-range] pos %llu", __func__, offset + cur); |
3883a79ab
|
565 |
|
97e86a858
|
566 |
if (z_erofs_collector_end(clt)) |
f0c519fc2
|
567 |
fe->backmost = false; |
3883a79ab
|
568 569 570 |
map->m_la = offset + cur; map->m_llen = 0; |
97e86a858
|
571 |
err = z_erofs_map_blocks_iter(inode, map, 0); |
8d8a09b09
|
572 |
if (err) |
3883a79ab
|
573 |
goto err_out; |
1e5ceeab6
|
574 |
restart_now: |
8d8a09b09
|
575 |
if (!(map->m_flags & EROFS_MAP_MAPPED)) |
3883a79ab
|
576 |
goto hitted; |
97e86a858
|
577 |
err = z_erofs_collector_begin(clt, inode, map); |
8d8a09b09
|
578 |
if (err) |
3883a79ab
|
579 |
goto err_out; |
92e6efd56
|
580 |
/* preload all compressed pages (maybe downgrade role if necessary) */ |
4279f3f98
|
581 |
if (should_alloc_managed_pages(fe, sbi->cache_strategy, map->m_la)) |
92e6efd56
|
582 583 584 |
cache_strategy = DELAYEDALLOC; else cache_strategy = DONTALLOC; |
97e86a858
|
585 586 |
preload_compressed_pages(clt, MNGD_MAPPING(sbi), cache_strategy, pagepool); |
105d4ad85
|
587 |
|
3883a79ab
|
588 |
hitted: |
dc76ea8c1
|
589 590 591 592 593 594 595 596 |
/* * Ensure the current partial page belongs to this submit chain rather * than other concurrent submit chains or the noio(bypass) chain since * those chains are handled asynchronously thus the page cannot be used * for inplace I/O or pagevec (should be processed in strict order.) */ tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED && clt->mode != COLLECT_PRIMARY_FOLLOWED_NOINPLACE); |
7dd68b147
|
597 |
cur = end - min_t(unsigned int, offset + end - map->m_la, end); |
8d8a09b09
|
598 |
if (!(map->m_flags & EROFS_MAP_MAPPED)) { |
3883a79ab
|
599 600 601 602 603 604 605 606 607 |
zero_user_segment(page, cur, end); goto next_part; } /* let's derive page type */ page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD : (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE : (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE : Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED)); |
a112152f6
|
608 |
if (cur) |
97e86a858
|
609 |
tight &= (clt->mode >= COLLECT_PRIMARY_FOLLOWED); |
a112152f6
|
610 |
|
3883a79ab
|
611 |
retry: |
97e86a858
|
612 |
err = z_erofs_attach_page(clt, page, page_type); |
3883a79ab
|
613 614 615 |
/* should allocate an additional staging page for pagevec */ if (err == -EAGAIN) { struct page *const newpage = |
97e86a858
|
616 |
__stagingpage_alloc(pagepool, GFP_NOFS); |
3883a79ab
|
617 |
|
97e86a858
|
618 619 |
err = z_erofs_attach_page(clt, newpage, Z_EROFS_PAGE_TYPE_EXCLUSIVE); |
8d8a09b09
|
620 |
if (!err) |
3883a79ab
|
621 622 |
goto retry; } |
8d8a09b09
|
623 |
if (err) |
3883a79ab
|
624 |
goto err_out; |
97e86a858
|
625 |
index = page->index - (map->m_la >> PAGE_SHIFT); |
3883a79ab
|
626 |
|
3883a79ab
|
627 |
z_erofs_onlinepage_fixup(page, index, true); |
3883a79ab
|
628 |
|
1e05ff36e
|
629 630 631 |
/* bump up the number of spiltted parts of a page */ ++spiltted; /* also update nr_pages */ |
97e86a858
|
632 |
clt->cl->nr_pages = max_t(pgoff_t, clt->cl->nr_pages, index + 1); |
3883a79ab
|
633 634 635 |
next_part: /* can be used for verification */ map->m_llen = offset + cur - map->m_la; |
2bc759643
|
636 637 |
end = cur; if (end > 0) |
3883a79ab
|
638 |
goto repeat; |
1e05ff36e
|
639 |
out: |
3883a79ab
|
640 |
z_erofs_onlinepage_endio(page); |
4f761fa25
|
641 642 |
erofs_dbg("%s, finish page: %pK spiltted: %u map->m_llen %llu", __func__, page, spiltted, map->m_llen); |
1e05ff36e
|
643 |
return err; |
3883a79ab
|
644 |
|
1e05ff36e
|
645 |
/* if some error occurred while processing this page */ |
3883a79ab
|
646 |
err_out: |
1e05ff36e
|
647 648 |
SetPageError(page); goto out; |
3883a79ab
|
649 650 651 652 653 |
} static void z_erofs_vle_unzip_kickoff(void *ptr, int bios) { tagptr1_t t = tagptr_init(tagptr1_t, ptr); |
97e86a858
|
654 |
struct z_erofs_unzip_io *io = tagptr_unfold_ptr(t); |
3883a79ab
|
655 |
bool background = tagptr_unfold_tags(t); |
848bd9acd
|
656 657 658 659 660 661 662 |
if (!background) { unsigned long flags; spin_lock_irqsave(&io->u.wait.lock, flags); if (!atomic_add_return(bios, &io->pending_bios)) wake_up_locked(&io->u.wait); spin_unlock_irqrestore(&io->u.wait.lock, flags); |
3883a79ab
|
663 |
return; |
848bd9acd
|
664 |
} |
3883a79ab
|
665 |
|
848bd9acd
|
666 |
if (!atomic_add_return(bios, &io->pending_bios)) |
3883a79ab
|
667 |
queue_work(z_erofs_workqueue, &io->u.work); |
3883a79ab
|
668 669 670 671 |
} static inline void z_erofs_vle_read_endio(struct bio *bio) { |
d61fbb6b1
|
672 |
struct erofs_sb_info *sbi = NULL; |
14a56ec65
|
673 |
blk_status_t err = bio->bi_status; |
3883a79ab
|
674 |
struct bio_vec *bvec; |
6dc4f100c
|
675 |
struct bvec_iter_all iter_all; |
3883a79ab
|
676 |
|
2b070cfe5
|
677 |
bio_for_each_segment_all(bvec, bio, iter_all) { |
3883a79ab
|
678 |
struct page *page = bvec->bv_page; |
105d4ad85
|
679 |
bool cachemngd = false; |
3883a79ab
|
680 681 |
DBG_BUGON(PageUptodate(page)); |
70b17991d
|
682 |
DBG_BUGON(!page->mapping); |
3883a79ab
|
683 |
|
e2c71e74b
|
684 |
if (!sbi && !z_erofs_page_is_staging(page)) |
d61fbb6b1
|
685 |
sbi = EROFS_SB(page->mapping->host->i_sb); |
105d4ad85
|
686 |
|
d61fbb6b1
|
687 688 689 |
/* sbi should already be gotten if the page is managed */ if (sbi) cachemngd = erofs_page_is_managed(sbi, page); |
105d4ad85
|
690 |
|
8d8a09b09
|
691 |
if (err) |
3883a79ab
|
692 |
SetPageError(page); |
105d4ad85
|
693 694 695 696 697 |
else if (cachemngd) SetPageUptodate(page); if (cachemngd) unlock_page(page); |
3883a79ab
|
698 699 700 701 702 |
} z_erofs_vle_unzip_kickoff(bio->bi_private, -1); bio_put(bio); } |
97e86a858
|
703 704 705 |
static int z_erofs_decompress_pcluster(struct super_block *sb, struct z_erofs_pcluster *pcl, struct list_head *pagepool) |
3883a79ab
|
706 707 |
{ struct erofs_sb_info *const sbi = EROFS_SB(sb); |
97e86a858
|
708 |
const unsigned int clusterpages = BIT(pcl->clusterbits); |
3883a79ab
|
709 |
struct z_erofs_pagevec_ctor ctor; |
97e86a858
|
710 711 |
unsigned int i, outputsize, llen, nr_pages; struct page *pages_onstack[Z_EROFS_VMAP_ONSTACK_PAGES]; |
3883a79ab
|
712 |
struct page **pages, **compressed_pages, *page; |
3883a79ab
|
713 714 |
enum z_erofs_page_type page_type; |
b6a76183d
|
715 |
bool overlapped, partial; |
97e86a858
|
716 |
struct z_erofs_collection *cl; |
3883a79ab
|
717 718 719 |
int err; might_sleep(); |
97e86a858
|
720 721 |
cl = z_erofs_primarycollection(pcl); DBG_BUGON(!READ_ONCE(cl->nr_pages)); |
3883a79ab
|
722 |
|
97e86a858
|
723 724 |
mutex_lock(&cl->lock); nr_pages = cl->nr_pages; |
3883a79ab
|
725 |
|
8d8a09b09
|
726 |
if (nr_pages <= Z_EROFS_VMAP_ONSTACK_PAGES) { |
3883a79ab
|
727 |
pages = pages_onstack; |
97e86a858
|
728 729 |
} else if (nr_pages <= Z_EROFS_VMAP_GLOBAL_PAGES && mutex_trylock(&z_pagemap_global_lock)) { |
3883a79ab
|
730 |
pages = z_pagemap_global; |
97e86a858
|
731 |
} else { |
441dfcc88
|
732 |
gfp_t gfp_flags = GFP_KERNEL; |
97e86a858
|
733 |
if (nr_pages > Z_EROFS_VMAP_GLOBAL_PAGES) |
441dfcc88
|
734 |
gfp_flags |= __GFP_NOFAIL; |
447a3621b
|
735 |
pages = kvmalloc_array(nr_pages, sizeof(struct page *), |
441dfcc88
|
736 |
gfp_flags); |
3883a79ab
|
737 738 |
/* fallback to global pagemap for the lowmem scenario */ |
8d8a09b09
|
739 |
if (!pages) { |
441dfcc88
|
740 741 |
mutex_lock(&z_pagemap_global_lock); pages = z_pagemap_global; |
3883a79ab
|
742 743 744 745 746 |
} } for (i = 0; i < nr_pages; ++i) pages[i] = NULL; |
e12a0ce2f
|
747 |
err = 0; |
fa61a33f5
|
748 |
z_erofs_pagevec_ctor_init(&ctor, Z_EROFS_NR_INLINE_PAGEVECS, |
97e86a858
|
749 |
cl->pagevec, 0); |
3883a79ab
|
750 |
|
97e86a858
|
751 |
for (i = 0; i < cl->vcnt; ++i) { |
7dd68b147
|
752 |
unsigned int pagenr; |
3883a79ab
|
753 |
|
046d64e11
|
754 |
page = z_erofs_pagevec_dequeue(&ctor, &page_type); |
3883a79ab
|
755 756 |
/* all pages in pagevec ought to be valid */ |
42d40b4ad
|
757 758 |
DBG_BUGON(!page); DBG_BUGON(!page->mapping); |
3883a79ab
|
759 |
|
97e86a858
|
760 |
if (z_erofs_put_stagingpage(pagepool, page)) |
3883a79ab
|
761 762 763 764 765 766 |
continue; if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD) pagenr = 0; else pagenr = z_erofs_onlinepage_index(page); |
70b17991d
|
767 |
DBG_BUGON(pagenr >= nr_pages); |
e5e3abbad
|
768 |
|
e12a0ce2f
|
769 770 771 772 |
/* * currently EROFS doesn't support multiref(dedup), * so here erroring out one multiref page. */ |
8d8a09b09
|
773 |
if (pages[pagenr]) { |
e12a0ce2f
|
774 775 776 777 778 |
DBG_BUGON(1); SetPageError(pages[pagenr]); z_erofs_onlinepage_endio(pages[pagenr]); err = -EFSCORRUPTED; } |
3883a79ab
|
779 780 |
pages[pagenr] = page; } |
3883a79ab
|
781 782 783 |
z_erofs_pagevec_ctor_exit(&ctor, true); overlapped = false; |
97e86a858
|
784 |
compressed_pages = pcl->compressed_pages; |
3883a79ab
|
785 786 |
for (i = 0; i < clusterpages; ++i) { |
7dd68b147
|
787 |
unsigned int pagenr; |
3883a79ab
|
788 789 790 791 |
page = compressed_pages[i]; /* all compressed pages ought to be valid */ |
42d40b4ad
|
792 793 |
DBG_BUGON(!page); DBG_BUGON(!page->mapping); |
3883a79ab
|
794 |
|
274812334
|
795 |
if (!z_erofs_page_is_staging(page)) { |
d61fbb6b1
|
796 |
if (erofs_page_is_managed(sbi, page)) { |
8d8a09b09
|
797 |
if (!PageUptodate(page)) |
111524960
|
798 799 800 |
err = -EIO; continue; } |
3883a79ab
|
801 |
|
111524960
|
802 803 804 805 806 |
/* * only if non-head page can be selected * for inplace decompression */ pagenr = z_erofs_onlinepage_index(page); |
3883a79ab
|
807 |
|
111524960
|
808 |
DBG_BUGON(pagenr >= nr_pages); |
8d8a09b09
|
809 |
if (pages[pagenr]) { |
e12a0ce2f
|
810 811 812 813 814 |
DBG_BUGON(1); SetPageError(pages[pagenr]); z_erofs_onlinepage_endio(pages[pagenr]); err = -EFSCORRUPTED; } |
111524960
|
815 816 817 818 |
pages[pagenr] = page; overlapped = true; } |
3883a79ab
|
819 |
|
111524960
|
820 |
/* PG_error needs checking for inplaced and staging pages */ |
8d8a09b09
|
821 |
if (PageError(page)) { |
111524960
|
822 823 824 |
DBG_BUGON(PageUptodate(page)); err = -EIO; } |
3883a79ab
|
825 |
} |
8d8a09b09
|
826 |
if (err) |
111524960
|
827 |
goto out; |
97e86a858
|
828 829 830 831 |
llen = pcl->length >> Z_EROFS_PCLUSTER_LENGTH_BIT; if (nr_pages << PAGE_SHIFT >= cl->pageofs + llen) { outputsize = llen; partial = !(pcl->length & Z_EROFS_PCLUSTER_FULL_LENGTH); |
b6a76183d
|
832 |
} else { |
97e86a858
|
833 |
outputsize = (nr_pages << PAGE_SHIFT) - cl->pageofs; |
b6a76183d
|
834 835 |
partial = true; } |
3883a79ab
|
836 |
|
88aaf5a79
|
837 838 839 840 |
err = z_erofs_decompress(&(struct z_erofs_decompress_req) { .sb = sb, .in = compressed_pages, .out = pages, |
97e86a858
|
841 |
.pageofs_out = cl->pageofs, |
88aaf5a79
|
842 843 |
.inputsize = PAGE_SIZE, .outputsize = outputsize, |
97e86a858
|
844 |
.alg = pcl->algorithmformat, |
88aaf5a79
|
845 |
.inplace_io = overlapped, |
b6a76183d
|
846 |
.partial_decoding = partial |
97e86a858
|
847 |
}, pagepool); |
3883a79ab
|
848 849 |
out: |
af692e117
|
850 851 852 |
/* must handle all compressed pages before endding pages */ for (i = 0; i < clusterpages; ++i) { page = compressed_pages[i]; |
d61fbb6b1
|
853 |
if (erofs_page_is_managed(sbi, page)) |
af692e117
|
854 |
continue; |
d61fbb6b1
|
855 |
|
af692e117
|
856 |
/* recycle all individual staging pages */ |
97e86a858
|
857 |
(void)z_erofs_put_stagingpage(pagepool, page); |
af692e117
|
858 859 860 |
WRITE_ONCE(compressed_pages[i], NULL); } |
3883a79ab
|
861 862 |
for (i = 0; i < nr_pages; ++i) { page = pages[i]; |
af692e117
|
863 864 |
if (!page) continue; |
42d40b4ad
|
865 |
DBG_BUGON(!page->mapping); |
3883a79ab
|
866 867 |
/* recycle all individual staging pages */ |
97e86a858
|
868 |
if (z_erofs_put_stagingpage(pagepool, page)) |
3883a79ab
|
869 |
continue; |
8d8a09b09
|
870 |
if (err < 0) |
3883a79ab
|
871 872 873 874 |
SetPageError(page); z_erofs_onlinepage_endio(page); } |
3883a79ab
|
875 876 |
if (pages == z_pagemap_global) mutex_unlock(&z_pagemap_global_lock); |
8d8a09b09
|
877 |
else if (pages != pages_onstack) |
3883a79ab
|
878 |
kvfree(pages); |
97e86a858
|
879 880 |
cl->nr_pages = 0; cl->vcnt = 0; |
3883a79ab
|
881 |
|
97e86a858
|
882 883 |
/* all cl locks MUST be taken before the following line */ WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL); |
3883a79ab
|
884 |
|
97e86a858
|
885 886 |
/* all cl locks SHOULD be released right now */ mutex_unlock(&cl->lock); |
3883a79ab
|
887 |
|
97e86a858
|
888 |
z_erofs_collection_put(cl); |
3883a79ab
|
889 890 891 892 |
return err; } static void z_erofs_vle_unzip_all(struct super_block *sb, |
97e86a858
|
893 894 |
struct z_erofs_unzip_io *io, struct list_head *pagepool) |
3883a79ab
|
895 |
{ |
97e86a858
|
896 |
z_erofs_next_pcluster_t owned = io->head; |
3883a79ab
|
897 |
|
97e86a858
|
898 899 |
while (owned != Z_EROFS_PCLUSTER_TAIL_CLOSED) { struct z_erofs_pcluster *pcl; |
3883a79ab
|
900 901 |
/* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */ |
97e86a858
|
902 |
DBG_BUGON(owned == Z_EROFS_PCLUSTER_TAIL); |
3883a79ab
|
903 904 |
/* no possible that 'owned' equals NULL */ |
97e86a858
|
905 |
DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL); |
3883a79ab
|
906 |
|
97e86a858
|
907 908 |
pcl = container_of(owned, struct z_erofs_pcluster, next); owned = READ_ONCE(pcl->next); |
3883a79ab
|
909 |
|
97e86a858
|
910 |
z_erofs_decompress_pcluster(sb, pcl, pagepool); |
3978c8e32
|
911 |
} |
3883a79ab
|
912 913 914 915 |
} static void z_erofs_vle_unzip_wq(struct work_struct *work) { |
97e86a858
|
916 917 918 |
struct z_erofs_unzip_io_sb *iosb = container_of(work, struct z_erofs_unzip_io_sb, io.u.work); LIST_HEAD(pagepool); |
3883a79ab
|
919 |
|
97e86a858
|
920 921 |
DBG_BUGON(iosb->io.head == Z_EROFS_PCLUSTER_TAIL_CLOSED); z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &pagepool); |
3883a79ab
|
922 |
|
97e86a858
|
923 |
put_pages_list(&pagepool); |
3883a79ab
|
924 925 |
kvfree(iosb); } |
97e86a858
|
926 927 928 929 930 |
static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, unsigned int nr, struct list_head *pagepool, struct address_space *mc, gfp_t gfp) |
9248fce71
|
931 932 933 |
{ /* determined at compile time to avoid too many #ifdefs */ const bool nocache = __builtin_constant_p(mc) ? !mc : false; |
97e86a858
|
934 |
const pgoff_t index = pcl->obj.index; |
9248fce71
|
935 936 937 938 |
bool tocache = false; struct address_space *mapping; struct page *oldpage, *page; |
92e6efd56
|
939 940 |
compressed_page_t t; int justfound; |
9248fce71
|
941 |
repeat: |
97e86a858
|
942 |
page = READ_ONCE(pcl->compressed_pages[nr]); |
9248fce71
|
943 944 945 946 947 948 949 950 951 952 953 954 955 |
oldpage = page; if (!page) goto out_allocpage; /* * the cached page has not been allocated and * an placeholder is out there, prepare it now. */ if (!nocache && page == PAGE_UNALLOCATED) { tocache = true; goto out_allocpage; } |
92e6efd56
|
956 957 958 959 |
/* process the target tagged pointer */ t = tagptr_init(compressed_page_t, page); justfound = tagptr_unfold_tags(t); page = tagptr_unfold_ptr(t); |
9248fce71
|
960 961 962 963 964 965 966 |
mapping = READ_ONCE(page->mapping); /* * if managed cache is disabled, it's no way to * get such a cached-like page. */ if (nocache) { |
92e6efd56
|
967 968 969 970 |
/* if managed cache is disabled, it is impossible `justfound' */ DBG_BUGON(justfound); /* and it should be locked, not uptodate, and not truncated */ |
9248fce71
|
971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 |
DBG_BUGON(!PageLocked(page)); DBG_BUGON(PageUptodate(page)); DBG_BUGON(!mapping); goto out; } /* * unmanaged (file) pages are all locked solidly, * therefore it is impossible for `mapping' to be NULL. */ if (mapping && mapping != mc) /* ought to be unmanaged pages */ goto out; lock_page(page); |
92e6efd56
|
986 987 |
/* only true if page reclaim goes wrong, should never happen */ DBG_BUGON(justfound && PagePrivate(page)); |
9248fce71
|
988 989 |
/* the page is still in manage cache */ if (page->mapping == mc) { |
97e86a858
|
990 |
WRITE_ONCE(pcl->compressed_pages[nr], page); |
9248fce71
|
991 |
|
111524960
|
992 |
ClearPageError(page); |
9248fce71
|
993 |
if (!PagePrivate(page)) { |
92e6efd56
|
994 995 996 997 998 999 1000 1001 |
/* * impossible to be !PagePrivate(page) for * the current restriction as well if * the page is already in compressed_pages[]. */ DBG_BUGON(!justfound); justfound = 0; |
97e86a858
|
1002 |
set_page_private(page, (unsigned long)pcl); |
9248fce71
|
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 |
SetPagePrivate(page); } /* no need to submit io if it is already up-to-date */ if (PageUptodate(page)) { unlock_page(page); page = NULL; } goto out; } /* * the managed page has been truncated, it's unsafe to * reuse this one, let's allocate a new cache-managed page. */ DBG_BUGON(page->mapping); |
92e6efd56
|
1019 |
DBG_BUGON(!justfound); |
9248fce71
|
1020 1021 1022 1023 1024 1025 |
tocache = true; unlock_page(page); put_page(page); out_allocpage: page = __stagingpage_alloc(pagepool, gfp); |
97e86a858
|
1026 |
if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) { |
9248fce71
|
1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 |
list_add(&page->lru, pagepool); cpu_relax(); goto repeat; } if (nocache || !tocache) goto out; if (add_to_page_cache_lru(page, mc, index + nr, gfp)) { page->mapping = Z_EROFS_MAPPING_STAGING; goto out; } |
97e86a858
|
1037 |
set_page_private(page, (unsigned long)pcl); |
9248fce71
|
1038 1039 1040 1041 |
SetPagePrivate(page); out: /* the only exit (for tracing and debugging) */ return page; } |
97e86a858
|
1042 1043 1044 |
static struct z_erofs_unzip_io *jobqueue_init(struct super_block *sb, struct z_erofs_unzip_io *io, bool foreground) |
3883a79ab
|
1045 |
{ |
97e86a858
|
1046 |
struct z_erofs_unzip_io_sb *iosb; |
3883a79ab
|
1047 |
|
7146a4f02
|
1048 |
if (foreground) { |
3883a79ab
|
1049 |
/* waitqueue available for foreground io */ |
7146a4f02
|
1050 |
DBG_BUGON(!io); |
3883a79ab
|
1051 1052 1053 1054 1055 |
init_waitqueue_head(&io->u.wait); atomic_set(&io->pending_bios, 0); goto out; } |
a9f69bd55
|
1056 |
iosb = kvzalloc(sizeof(*iosb), GFP_KERNEL | __GFP_NOFAIL); |
7146a4f02
|
1057 |
DBG_BUGON(!iosb); |
3883a79ab
|
1058 |
|
7146a4f02
|
1059 1060 |
/* initialize fields in the allocated descriptor */ io = &iosb->io; |
3883a79ab
|
1061 1062 1063 |
iosb->sb = sb; INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq); out: |
97e86a858
|
1064 |
io->head = Z_EROFS_PCLUSTER_TAIL_CLOSED; |
3883a79ab
|
1065 1066 |
return io; } |
97e86a858
|
1067 |
/* define decompression jobqueue types */ |
7146a4f02
|
1068 |
enum { |
7146a4f02
|
1069 |
JQ_BYPASS, |
7146a4f02
|
1070 1071 1072 1073 1074 |
JQ_SUBMIT, NR_JOBQUEUES, }; static void *jobqueueset_init(struct super_block *sb, |
97e86a858
|
1075 1076 1077 |
z_erofs_next_pcluster_t qtail[], struct z_erofs_unzip_io *q[], struct z_erofs_unzip_io *fgq, |
7146a4f02
|
1078 1079 |
bool forcefg) { |
7146a4f02
|
1080 1081 |
/* * if managed cache is enabled, bypass jobqueue is needed, |
97e86a858
|
1082 |
* no need to read from device for all pclusters in this queue. |
7146a4f02
|
1083 1084 1085 |
*/ q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, true); qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head; |
7146a4f02
|
1086 1087 1088 1089 1090 1091 |
q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, forcefg); qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head; return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], !forcefg)); } |
97e86a858
|
1092 1093 1094 |
static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, z_erofs_next_pcluster_t qtail[], z_erofs_next_pcluster_t owned_head) |
7146a4f02
|
1095 |
{ |
97e86a858
|
1096 1097 |
z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT]; z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS]; |
7146a4f02
|
1098 |
|
97e86a858
|
1099 1100 1101 |
DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); if (owned_head == Z_EROFS_PCLUSTER_TAIL) owned_head = Z_EROFS_PCLUSTER_TAIL_CLOSED; |
7146a4f02
|
1102 |
|
97e86a858
|
1103 |
WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL_CLOSED); |
7146a4f02
|
1104 1105 |
WRITE_ONCE(*submit_qtail, owned_head); |
97e86a858
|
1106 |
WRITE_ONCE(*bypass_qtail, &pcl->next); |
7146a4f02
|
1107 |
|
97e86a858
|
1108 |
qtail[JQ_BYPASS] = &pcl->next; |
7146a4f02
|
1109 |
} |
97e86a858
|
1110 |
static bool postsubmit_is_all_bypassed(struct z_erofs_unzip_io *q[], |
7146a4f02
|
1111 1112 1113 1114 1115 1116 1117 1118 1119 |
unsigned int nr_bios, bool force_fg) { /* * although background is preferred, no one is pending for submission. * don't issue workqueue for decompression but drop it directly instead. */ if (force_fg || nr_bios) return false; |
97e86a858
|
1120 |
kvfree(container_of(q[JQ_SUBMIT], struct z_erofs_unzip_io_sb, io)); |
7146a4f02
|
1121 1122 |
return true; } |
3883a79ab
|
1123 1124 |
static bool z_erofs_vle_submit_all(struct super_block *sb, |
97e86a858
|
1125 |
z_erofs_next_pcluster_t owned_head, |
3883a79ab
|
1126 |
struct list_head *pagepool, |
97e86a858
|
1127 |
struct z_erofs_unzip_io *fgq, |
3883a79ab
|
1128 1129 |
bool force_fg) { |
97e86a858
|
1130 1131 1132 |
struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb); z_erofs_next_pcluster_t qtail[NR_JOBQUEUES]; struct z_erofs_unzip_io *q[NR_JOBQUEUES]; |
3883a79ab
|
1133 |
struct bio *bio; |
7146a4f02
|
1134 |
void *bi_private; |
3883a79ab
|
1135 1136 1137 |
/* since bio will be NULL, no need to initialize last_index */ pgoff_t uninitialized_var(last_index); bool force_submit = false; |
7dd68b147
|
1138 |
unsigned int nr_bios; |
3883a79ab
|
1139 |
|
8d8a09b09
|
1140 |
if (owned_head == Z_EROFS_PCLUSTER_TAIL) |
3883a79ab
|
1141 |
return false; |
3883a79ab
|
1142 1143 |
force_submit = false; bio = NULL; |
7146a4f02
|
1144 1145 |
nr_bios = 0; bi_private = jobqueueset_init(sb, qtail, q, fgq, force_fg); |
3883a79ab
|
1146 1147 |
/* by default, all need io submission */ |
7146a4f02
|
1148 |
q[JQ_SUBMIT]->head = owned_head; |
3883a79ab
|
1149 1150 |
do { |
97e86a858
|
1151 1152 |
struct z_erofs_pcluster *pcl; unsigned int clusterpages; |
3883a79ab
|
1153 |
pgoff_t first_index; |
9248fce71
|
1154 1155 |
struct page *page; unsigned int i = 0, bypass = 0; |
3883a79ab
|
1156 1157 1158 |
int err; /* no possible 'owned_head' equals the following */ |
97e86a858
|
1159 1160 1161 1162 |
DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL); pcl = container_of(owned_head, struct z_erofs_pcluster, next); |
3883a79ab
|
1163 |
|
97e86a858
|
1164 |
clusterpages = BIT(pcl->clusterbits); |
3883a79ab
|
1165 1166 |
/* close the main owned chain at first */ |
97e86a858
|
1167 1168 |
owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, Z_EROFS_PCLUSTER_TAIL_CLOSED); |
3883a79ab
|
1169 |
|
97e86a858
|
1170 |
first_index = pcl->obj.index; |
3883a79ab
|
1171 |
force_submit |= (first_index != last_index + 1); |
3883a79ab
|
1172 |
|
9248fce71
|
1173 |
repeat: |
97e86a858
|
1174 1175 1176 |
page = pickup_page_for_submission(pcl, i, pagepool, MNGD_MAPPING(sbi), GFP_NOFS); |
9248fce71
|
1177 1178 1179 1180 |
if (!page) { force_submit = true; ++bypass; goto skippage; |
3883a79ab
|
1181 |
} |
42d40b4ad
|
1182 |
if (bio && force_submit) { |
3883a79ab
|
1183 |
submit_bio_retry: |
94e4e153b
|
1184 |
submit_bio(bio); |
3883a79ab
|
1185 1186 |
bio = NULL; } |
42d40b4ad
|
1187 |
if (!bio) { |
a5c0b7802
|
1188 1189 1190 1191 1192 1193 1194 |
bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); bio->bi_end_io = z_erofs_vle_read_endio; bio_set_dev(bio, sb->s_bdev); bio->bi_iter.bi_sector = (sector_t)(first_index + i) << LOG_SECTORS_PER_BLOCK; bio->bi_private = bi_private; |
94e4e153b
|
1195 |
bio->bi_opf = REQ_OP_READ; |
3883a79ab
|
1196 1197 1198 1199 1200 1201 1202 1203 1204 |
++nr_bios; } err = bio_add_page(bio, page, PAGE_SIZE, 0); if (err < PAGE_SIZE) goto submit_bio_retry; force_submit = false; last_index = first_index + i; |
105d4ad85
|
1205 |
skippage: |
3883a79ab
|
1206 1207 |
if (++i < clusterpages) goto repeat; |
105d4ad85
|
1208 |
|
7146a4f02
|
1209 |
if (bypass < clusterpages) |
97e86a858
|
1210 |
qtail[JQ_SUBMIT] = &pcl->next; |
7146a4f02
|
1211 |
else |
97e86a858
|
1212 1213 |
move_to_bypass_jobqueue(pcl, qtail, owned_head); } while (owned_head != Z_EROFS_PCLUSTER_TAIL); |
3883a79ab
|
1214 |
|
42d40b4ad
|
1215 |
if (bio) |
94e4e153b
|
1216 |
submit_bio(bio); |
3883a79ab
|
1217 |
|
7146a4f02
|
1218 |
if (postsubmit_is_all_bypassed(q, nr_bios, force_fg)) |
105d4ad85
|
1219 |
return true; |
3883a79ab
|
1220 |
|
7146a4f02
|
1221 |
z_erofs_vle_unzip_kickoff(bi_private, nr_bios); |
3883a79ab
|
1222 1223 |
return true; } |
97e86a858
|
1224 1225 |
static void z_erofs_submit_and_unzip(struct super_block *sb, struct z_erofs_collector *clt, |
3883a79ab
|
1226 1227 1228 |
struct list_head *pagepool, bool force_fg) { |
97e86a858
|
1229 |
struct z_erofs_unzip_io io[NR_JOBQUEUES]; |
3883a79ab
|
1230 |
|
97e86a858
|
1231 1232 |
if (!z_erofs_vle_submit_all(sb, clt->owned_head, pagepool, io, force_fg)) |
3883a79ab
|
1233 |
return; |
97e86a858
|
1234 |
/* decompress no I/O pclusters immediately */ |
7146a4f02
|
1235 |
z_erofs_vle_unzip_all(sb, &io[JQ_BYPASS], pagepool); |
4279f3f98
|
1236 |
|
3883a79ab
|
1237 1238 1239 1240 |
if (!force_fg) return; /* wait until all bios are completed */ |
7146a4f02
|
1241 1242 |
wait_event(io[JQ_SUBMIT].u.wait, !atomic_read(&io[JQ_SUBMIT].pending_bios)); |
3883a79ab
|
1243 1244 |
/* let's synchronous decompression */ |
7146a4f02
|
1245 |
z_erofs_vle_unzip_all(sb, &io[JQ_SUBMIT], pagepool); |
3883a79ab
|
1246 1247 1248 1249 1250 1251 |
} static int z_erofs_vle_normalaccess_readpage(struct file *file, struct page *page) { struct inode *const inode = page->mapping->host; |
97e86a858
|
1252 |
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); |
3883a79ab
|
1253 1254 |
int err; LIST_HEAD(pagepool); |
ba9ce771b
|
1255 |
trace_erofs_readpage(page, false); |
f0c519fc2
|
1256 |
f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT; |
3883a79ab
|
1257 |
err = z_erofs_do_read_page(&f, page, &pagepool); |
97e86a858
|
1258 |
(void)z_erofs_collector_end(&f.clt); |
3883a79ab
|
1259 |
|
ee45197c8
|
1260 1261 1262 1263 |
/* if some compressed cluster ready, need submit them anyway */ z_erofs_submit_and_unzip(inode->i_sb, &f.clt, &pagepool, true); if (err) |
4f761fa25
|
1264 |
erofs_err(inode->i_sb, "failed to read, err [%d]", err); |
3883a79ab
|
1265 |
|
3b423417d
|
1266 1267 |
if (f.map.mpage) put_page(f.map.mpage); |
3883a79ab
|
1268 1269 1270 |
/* clean up the remaining free pages */ put_pages_list(&pagepool); |
ee45197c8
|
1271 |
return err; |
3883a79ab
|
1272 |
} |
14f362b4f
|
1273 1274 1275 1276 1277 |
static bool should_decompress_synchronously(struct erofs_sb_info *sbi, unsigned int nr) { return nr <= sbi->max_sync_decompress_pages; } |
5fb76bb04
|
1278 1279 1280 1281 |
static int z_erofs_vle_normalaccess_readpages(struct file *filp, struct address_space *mapping, struct list_head *pages, unsigned int nr_pages) |
3883a79ab
|
1282 1283 |
{ struct inode *const inode = mapping->host; |
5fb76bb04
|
1284 |
struct erofs_sb_info *const sbi = EROFS_I_SB(inode); |
3883a79ab
|
1285 |
|
14f362b4f
|
1286 |
bool sync = should_decompress_synchronously(sbi, nr_pages); |
97e86a858
|
1287 |
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); |
3883a79ab
|
1288 1289 1290 |
gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL); struct page *head = NULL; LIST_HEAD(pagepool); |
284db12cf
|
1291 1292 |
trace_erofs_readpages(mapping->host, lru_to_page(pages), nr_pages, false); |
f0c519fc2
|
1293 |
f.headoffset = (erofs_off_t)lru_to_page(pages)->index << PAGE_SHIFT; |
3883a79ab
|
1294 1295 1296 1297 1298 |
for (; nr_pages; --nr_pages) { struct page *page = lru_to_page(pages); prefetchw(&page->flags); list_del(&page->lru); |
2d9b5dcd9
|
1299 1300 1301 1302 1303 1304 |
/* * A pure asynchronous readahead is indicated if * a PG_readahead marked page is hitted at first. * Let's also do asynchronous decompression for this case. */ sync &= !(PageReadahead(page) && !head); |
3883a79ab
|
1305 1306 1307 1308 |
if (add_to_page_cache_lru(page, mapping, page->index, gfp)) { list_add(&page->lru, &pagepool); continue; } |
3883a79ab
|
1309 1310 1311 |
set_page_private(page, (unsigned long)head); head = page; } |
42d40b4ad
|
1312 |
while (head) { |
3883a79ab
|
1313 1314 1315 1316 1317 1318 1319 |
struct page *page = head; int err; /* traversal in reverse order */ head = (void *)page_private(page); err = z_erofs_do_read_page(&f, page, &pagepool); |
a5876e24f
|
1320 |
if (err) |
4f761fa25
|
1321 1322 1323 |
erofs_err(inode->i_sb, "readahead error at page %lu @ nid %llu", page->index, EROFS_I(inode)->nid); |
3883a79ab
|
1324 1325 |
put_page(page); } |
97e86a858
|
1326 |
(void)z_erofs_collector_end(&f.clt); |
3883a79ab
|
1327 |
|
97e86a858
|
1328 |
z_erofs_submit_and_unzip(inode->i_sb, &f.clt, &pagepool, sync); |
3883a79ab
|
1329 |
|
3b423417d
|
1330 1331 |
if (f.map.mpage) put_page(f.map.mpage); |
3883a79ab
|
1332 1333 1334 1335 1336 |
/* clean up the remaining free pages */ put_pages_list(&pagepool); return 0; } |
3883a79ab
|
1337 1338 1339 1340 |
const struct address_space_operations z_erofs_vle_normalaccess_aops = { .readpage = z_erofs_vle_normalaccess_readpage, .readpages = z_erofs_vle_normalaccess_readpages, }; |
02827e179
|
1341 |