Commit 03fb3d2af96c2783c3a5bc03f3d984cf422f0e69

Authored by David Howells
1 parent 8f0aa2f25b

FS-Cache: Release page->private after failed readahead

The attached patch causes read_cache_pages() to release page-private data on a
page for which add_to_page_cache() fails.  If the filler function fails, then
the problematic page is left attached to the pagecache (with appropriate flags
set, one presumes) and the remaining to-be-attached pages are invalidated and
discarded.  This permits pages with caching references associated with them to
be cleaned up.

The invalidatepage() address space op is called (indirectly) to do the honours.

Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: Steve Dickson <steved@redhat.com>
Acked-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Acked-by: Rik van Riel <riel@redhat.com>
Acked-by: Al Viro <viro@zeniv.linux.org.uk>
Tested-by: Daire Byrne <Daire.Byrne@framestore.com>

Showing 2 changed files with 38 additions and 3 deletions Side-by-side Diff

include/linux/page-flags.h
... ... @@ -182,7 +182,7 @@
182 182  
183 183 struct page; /* forward declaration */
184 184  
185   -TESTPAGEFLAG(Locked, locked)
  185 +TESTPAGEFLAG(Locked, locked) TESTSETFLAG(Locked, locked)
186 186 PAGEFLAG(Error, error)
187 187 PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
188 188 PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
... ... @@ -31,6 +31,41 @@
31 31  
32 32 #define list_to_page(head) (list_entry((head)->prev, struct page, lru))
33 33  
  34 +/*
  35 + * see if a page needs releasing upon read_cache_pages() failure
  36 + * - the caller of read_cache_pages() may have set PG_private before calling,
  37 + * such as the NFS fs marking pages that are cached locally on disk, thus we
  38 + * need to give the fs a chance to clean up in the event of an error
  39 + */
  40 +static void read_cache_pages_invalidate_page(struct address_space *mapping,
  41 + struct page *page)
  42 +{
  43 + if (PagePrivate(page)) {
  44 + if (!trylock_page(page))
  45 + BUG();
  46 + page->mapping = mapping;
  47 + do_invalidatepage(page, 0);
  48 + page->mapping = NULL;
  49 + unlock_page(page);
  50 + }
  51 + page_cache_release(page);
  52 +}
  53 +
  54 +/*
  55 + * release a list of pages, invalidating them first if need be
  56 + */
  57 +static void read_cache_pages_invalidate_pages(struct address_space *mapping,
  58 + struct list_head *pages)
  59 +{
  60 + struct page *victim;
  61 +
  62 + while (!list_empty(pages)) {
  63 + victim = list_to_page(pages);
  64 + list_del(&victim->lru);
  65 + read_cache_pages_invalidate_page(mapping, victim);
  66 + }
  67 +}
  68 +
34 69 /**
35 70 * read_cache_pages - populate an address space with some pages & start reads against them
36 71 * @mapping: the address_space
37 72  
... ... @@ -52,14 +87,14 @@
52 87 list_del(&page->lru);
53 88 if (add_to_page_cache_lru(page, mapping,
54 89 page->index, GFP_KERNEL)) {
55   - page_cache_release(page);
  90 + read_cache_pages_invalidate_page(mapping, page);
56 91 continue;
57 92 }
58 93 page_cache_release(page);
59 94  
60 95 ret = filler(data, page);
61 96 if (unlikely(ret)) {
62   - put_pages_list(pages);
  97 + read_cache_pages_invalidate_pages(mapping, pages);
63 98 break;
64 99 }
65 100 task_io_account_read(PAGE_CACHE_SIZE);