Commit 266cf658efcf6ac33541a46740f74f50c79d2b6b

Authored by David Howells
1 parent 03fb3d2af9

FS-Cache: Recruit a page flags for cache management

Recruit a page flag to aid in cache management.  The following extra flag is
defined:

 (1) PG_fscache (PG_private_2)

     The marked page is backed by a local cache and is pinning resources in the
     cache driver.

If PG_fscache is set, then things that checked for PG_private will now also
check for that.  This includes things like truncation and page invalidation.
The function page_has_private() had been added to make the checks for both
PG_private and PG_private_2 at the same time.

Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: Steve Dickson <steved@redhat.com>
Acked-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Acked-by: Rik van Riel <riel@redhat.com>
Acked-by: Al Viro <viro@zeniv.linux.org.uk>
Tested-by: Daire Byrne <Daire.Byrne@framestore.com>

Showing 8 changed files with 58 additions and 25 deletions Side-by-side Diff

... ... @@ -59,7 +59,8 @@
59 59 */
60 60 wait_on_page_writeback(page);
61 61  
62   - if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL))
  62 + if (page_has_private(page) &&
  63 + !try_to_release_page(page, GFP_KERNEL))
63 64 goto out_unlock;
64 65  
65 66 /*
include/linux/page-flags.h
... ... @@ -82,6 +82,7 @@
82 82 PG_arch_1,
83 83 PG_reserved,
84 84 PG_private, /* If pagecache, has fs-private data */
  85 + PG_private_2, /* If pagecache, has fs aux data */
85 86 PG_writeback, /* Page is under writeback */
86 87 #ifdef CONFIG_PAGEFLAGS_EXTENDED
87 88 PG_head, /* A head page */
... ... @@ -108,6 +109,12 @@
108 109 /* Filesystems */
109 110 PG_checked = PG_owner_priv_1,
110 111  
  112 + /* Two page bits are conscripted by FS-Cache to maintain local caching
  113 + * state. These bits are set on pages belonging to the netfs's inodes
  114 + * when those inodes are being locally cached.
  115 + */
  116 + PG_fscache = PG_private_2, /* page backed by cache */
  117 +
111 118 /* XEN */
112 119 PG_pinned = PG_owner_priv_1,
113 120 PG_savepinned = PG_dirty,
... ... @@ -194,8 +201,6 @@
194 201 PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */
195 202 PAGEFLAG(SavePinned, savepinned); /* Xen */
196 203 PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
197   -PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private)
198   - __SETPAGEFLAG(Private, private)
199 204 PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
200 205  
201 206 __PAGEFLAG(SlobPage, slob_page)
... ... @@ -205,6 +210,16 @@
205 210 __PAGEFLAG(SlubDebug, slub_debug)
206 211  
207 212 /*
  213 + * Private page markings that may be used by the filesystem that owns the page
  214 + * for its own purposes.
  215 + * - PG_private and PG_private_2 cause releasepage() and co to be invoked
  216 + */
  217 +PAGEFLAG(Private, private) __SETPAGEFLAG(Private, private)
  218 + __CLEARPAGEFLAG(Private, private)
  219 +PAGEFLAG(Private2, private_2) TESTSCFLAG(Private2, private_2)
  220 +PAGEFLAG(OwnerPriv1, owner_priv_1) TESTCLEARFLAG(OwnerPriv1, owner_priv_1)
  221 +
  222 +/*
208 223 * Only test-and-set exist for PG_writeback. The unconditional operators are
209 224 * risky: they bypass page accounting.
210 225 */
... ... @@ -384,9 +399,10 @@
384 399 * these flags set. It they are, there is a problem.
385 400 */
386 401 #define PAGE_FLAGS_CHECK_AT_FREE \
387   - (1 << PG_lru | 1 << PG_private | 1 << PG_locked | \
388   - 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \
389   - 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
  402 + (1 << PG_lru | 1 << PG_locked | \
  403 + 1 << PG_private | 1 << PG_private_2 | \
  404 + 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \
  405 + 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
390 406 __PG_UNEVICTABLE | __PG_MLOCKED)
391 407  
392 408 /*
... ... @@ -397,5 +413,17 @@
397 413 #define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1)
398 414  
399 415 #endif /* !__GENERATING_BOUNDS_H */
  416 +
  417 +/**
  418 + * page_has_private - Determine if page has private stuff
  419 + * @page: The page to be checked
  420 + *
  421 + * Determine if a page has private stuff, indicating that release routines
  422 + * should be invoked upon it.
  423 + */
  424 +#define page_has_private(page) \
  425 + ((page)->flags & ((1 << PG_private) | \
  426 + (1 << PG_private_2)))
  427 +
400 428 #endif /* PAGE_FLAGS_H */
... ... @@ -2463,6 +2463,9 @@
2463 2463 * (presumably at page->private). If the release was successful, return `1'.
2464 2464 * Otherwise return zero.
2465 2465 *
  2466 + * This may also be called if PG_fscache is set on a page, indicating that the
  2467 + * page is known to the local caching routines.
  2468 + *
2466 2469 * The @gfp_mask argument specifies whether I/O may be performed to release
2467 2470 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS).
2468 2471 *
... ... @@ -250,7 +250,7 @@
250 250 * The number of remaining references must be:
251 251 * 1 for anonymous pages without a mapping
252 252 * 2 for pages with a mapping
253   - * 3 for pages with a mapping and PagePrivate set.
  253 + * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
254 254 */
255 255 static int migrate_page_move_mapping(struct address_space *mapping,
256 256 struct page *newpage, struct page *page)
... ... @@ -270,7 +270,7 @@
270 270 pslot = radix_tree_lookup_slot(&mapping->page_tree,
271 271 page_index(page));
272 272  
273   - expected_count = 2 + !!PagePrivate(page);
  273 + expected_count = 2 + !!page_has_private(page);
274 274 if (page_count(page) != expected_count ||
275 275 (struct page *)radix_tree_deref_slot(pslot) != page) {
276 276 spin_unlock_irq(&mapping->tree_lock);
... ... @@ -386,7 +386,7 @@
386 386  
387 387 /*
388 388 * Common logic to directly migrate a single page suitable for
389   - * pages that do not use PagePrivate.
  389 + * pages that do not use PagePrivate/PagePrivate2.
390 390 *
391 391 * Pages are locked upon entry and exit.
392 392 */
... ... @@ -522,7 +522,7 @@
522 522 * Buffers may be managed in a filesystem specific way.
523 523 * We must have no buffers or drop them.
524 524 */
525   - if (PagePrivate(page) &&
  525 + if (page_has_private(page) &&
526 526 !try_to_release_page(page, GFP_KERNEL))
527 527 return -EAGAIN;
528 528  
... ... @@ -655,7 +655,7 @@
655 655 * free the metadata, so the page can be freed.
656 656 */
657 657 if (!page->mapping) {
658   - if (!PageAnon(page) && PagePrivate(page)) {
  658 + if (!PageAnon(page) && page_has_private(page)) {
659 659 /*
660 660 * Go direct to try_to_free_buffers() here because
661 661 * a) that's what try_to_release_page() would do anyway
... ... @@ -33,14 +33,15 @@
33 33  
34 34 /*
35 35 * see if a page needs releasing upon read_cache_pages() failure
36   - * - the caller of read_cache_pages() may have set PG_private before calling,
37   - * such as the NFS fs marking pages that are cached locally on disk, thus we
38   - * need to give the fs a chance to clean up in the event of an error
  36 + * - the caller of read_cache_pages() may have set PG_private or PG_fscache
  37 + * before calling, such as the NFS fs marking pages that are cached locally
  38 + * on disk, thus we need to give the fs a chance to clean up in the event of
  39 + * an error
39 40 */
40 41 static void read_cache_pages_invalidate_page(struct address_space *mapping,
41 42 struct page *page)
42 43 {
43   - if (PagePrivate(page)) {
  44 + if (page_has_private(page)) {
44 45 if (!trylock_page(page))
45 46 BUG();
46 47 page->mapping = mapping;
... ... @@ -448,8 +448,8 @@
448 448 for (i = 0; i < pagevec_count(pvec); i++) {
449 449 struct page *page = pvec->pages[i];
450 450  
451   - if (PagePrivate(page) && trylock_page(page)) {
452   - if (PagePrivate(page))
  451 + if (page_has_private(page) && trylock_page(page)) {
  452 + if (page_has_private(page))
453 453 try_to_release_page(page, 0);
454 454 unlock_page(page);
455 455 }
... ... @@ -50,7 +50,7 @@
50 50 static inline void truncate_partial_page(struct page *page, unsigned partial)
51 51 {
52 52 zero_user_segment(page, partial, PAGE_CACHE_SIZE);
53   - if (PagePrivate(page))
  53 + if (page_has_private(page))
54 54 do_invalidatepage(page, partial);
55 55 }
56 56  
... ... @@ -99,7 +99,7 @@
99 99 if (page->mapping != mapping)
100 100 return;
101 101  
102   - if (PagePrivate(page))
  102 + if (page_has_private(page))
103 103 do_invalidatepage(page, 0);
104 104  
105 105 cancel_dirty_page(page, PAGE_CACHE_SIZE);
... ... @@ -126,7 +126,7 @@
126 126 if (page->mapping != mapping)
127 127 return 0;
128 128  
129   - if (PagePrivate(page) && !try_to_release_page(page, 0))
  129 + if (page_has_private(page) && !try_to_release_page(page, 0))
130 130 return 0;
131 131  
132 132 clear_page_mlock(page);
... ... @@ -348,7 +348,7 @@
348 348 if (page->mapping != mapping)
349 349 return 0;
350 350  
351   - if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL))
  351 + if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
352 352 return 0;
353 353  
354 354 spin_lock_irq(&mapping->tree_lock);
... ... @@ -356,7 +356,7 @@
356 356 goto failed;
357 357  
358 358 clear_page_mlock(page);
359   - BUG_ON(PagePrivate(page));
  359 + BUG_ON(page_has_private(page));
360 360 __remove_from_page_cache(page);
361 361 spin_unlock_irq(&mapping->tree_lock);
362 362 page_cache_release(page); /* pagecache ref */
... ... @@ -283,7 +283,7 @@
283 283  
284 284 static inline int is_page_cache_freeable(struct page *page)
285 285 {
286   - return page_count(page) - !!PagePrivate(page) == 2;
  286 + return page_count(page) - !!page_has_private(page) == 2;
287 287 }
288 288  
289 289 static int may_write_to_queue(struct backing_dev_info *bdi)
... ... @@ -367,7 +367,7 @@
367 367 * Some data journaling orphaned pages can have
368 368 * page->mapping == NULL while being dirty with clean buffers.
369 369 */
370   - if (PagePrivate(page)) {
  370 + if (page_has_private(page)) {
371 371 if (try_to_free_buffers(page)) {
372 372 ClearPageDirty(page);
373 373 printk("%s: orphaned page\n", __func__);
... ... @@ -727,7 +727,7 @@
727 727 * process address space (page_count == 1) it can be freed.
728 728 * Otherwise, leave the page on the LRU so it is swappable.
729 729 */
730   - if (PagePrivate(page)) {
  730 + if (page_has_private(page)) {
731 731 if (!try_to_release_page(page, sc->gfp_mask))
732 732 goto activate_locked;
733 733 if (!mapping && page_count(page) == 1) {