Commit c515e1fd361c2a08a9c2eb139396ec30a4f477dc

Authored by Dan Magenheimer
1 parent 077b1f83a6

mm/fs: add hooks to support cleancache

This fourth patch of eight in this cleancache series provides the
core hooks in VFS for: initializing cleancache per filesystem;
capturing clean pages reclaimed by page cache; attempting to get
pages from cleancache before filesystem read; and ensuring coherency
between pagecache, disk, and cleancache.  Note that the placement
of these hooks was stable from 2.6.18 to 2.6.38; a minor semantic
change was required due to a patchset in 2.6.39.

All hooks become no-ops if CONFIG_CLEANCACHE is unset, or become
a check of a boolean global if CONFIG_CLEANCACHE is set but no
cleancache "backend" has claimed cleancache_ops.

Details and a FAQ can be found in Documentation/vm/cleancache.txt

[v8: minchan.kim@gmail.com: adapt to new remove_from_page_cache function]
Signed-off-by: Chris Mason <chris.mason@oracle.com>
Signed-off-by: Dan Magenheimer <dan.magenheimer@oracle.com>
Reviewed-by: Jeremy Fitzhardinge <jeremy@goop.org>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Al Viro <viro@ZenIV.linux.org.uk>
Cc: Matthew Wilcox <matthew@wil.cx>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Rik Van Riel <riel@redhat.com>
Cc: Jan Beulich <JBeulich@novell.com>
Cc: Andreas Dilger <adilger@sun.com>
Cc: Ted Ts'o <tytso@mit.edu>
Cc: Mark Fasheh <mfasheh@suse.com>
Cc: Joel Becker <joel.becker@oracle.com>
Cc: Nitin Gupta <ngupta@vflare.org>

Showing 5 changed files with 32 additions and 0 deletions Side-by-side Diff

... ... @@ -41,6 +41,7 @@
41 41 #include <linux/bitops.h>
42 42 #include <linux/mpage.h>
43 43 #include <linux/bit_spinlock.h>
  44 +#include <linux/cleancache.h>
44 45  
45 46 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
46 47  
... ... @@ -269,6 +270,10 @@
269 270 invalidate_bh_lrus();
270 271 lru_add_drain_all(); /* make sure all lru add caches are flushed */
271 272 invalidate_mapping_pages(mapping, 0, -1);
  273 + /* 99% of the time, we don't need to flush the cleancache on the bdev.
  274 + * But, for the strange corners, lets be cautious
  275 + */
  276 + cleancache_flush_inode(mapping);
272 277 }
273 278 EXPORT_SYMBOL(invalidate_bdev);
274 279  
... ... @@ -27,6 +27,7 @@
27 27 #include <linux/writeback.h>
28 28 #include <linux/backing-dev.h>
29 29 #include <linux/pagevec.h>
  30 +#include <linux/cleancache.h>
30 31  
31 32 /*
32 33 * I/O completion handler for multipage BIOs.
... ... @@ -269,6 +270,12 @@
269 270 }
270 271 } else if (fully_mapped) {
271 272 SetPageMappedToDisk(page);
  273 + }
  274 +
  275 + if (fully_mapped && blocks_per_page == 1 && !PageUptodate(page) &&
  276 + cleancache_get_page(page) == 0) {
  277 + SetPageUptodate(page);
  278 + goto confused;
272 279 }
273 280  
274 281 /*
... ... @@ -31,6 +31,7 @@
31 31 #include <linux/mutex.h>
32 32 #include <linux/backing-dev.h>
33 33 #include <linux/rculist_bl.h>
  34 +#include <linux/cleancache.h>
34 35 #include "internal.h"
35 36  
36 37  
... ... @@ -112,6 +113,7 @@
112 113 s->s_maxbytes = MAX_NON_LFS;
113 114 s->s_op = &default_op;
114 115 s->s_time_gran = 1000000000;
  116 + s->cleancache_poolid = -1;
115 117 }
116 118 out:
117 119 return s;
... ... @@ -177,6 +179,7 @@
177 179 {
178 180 struct file_system_type *fs = s->s_type;
179 181 if (atomic_dec_and_test(&s->s_active)) {
  182 + cleancache_flush_fs(s);
180 183 fs->kill_sb(s);
181 184 /*
182 185 * We need to call rcu_barrier so all the delayed rcu free
... ... @@ -34,6 +34,7 @@
34 34 #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
35 35 #include <linux/memcontrol.h>
36 36 #include <linux/mm_inline.h> /* for page_is_file_cache() */
  37 +#include <linux/cleancache.h>
37 38 #include "internal.h"
38 39  
39 40 /*
... ... @@ -117,6 +118,16 @@
117 118 void __delete_from_page_cache(struct page *page)
118 119 {
119 120 struct address_space *mapping = page->mapping;
  121 +
  122 + /*
  123 + * if we're uptodate, flush out into the cleancache, otherwise
  124 + * invalidate any existing cleancache entries. We can't leave
  125 + * stale data around in the cleancache once our page is gone
  126 + */
  127 + if (PageUptodate(page) && PageMappedToDisk(page))
  128 + cleancache_put_page(page);
  129 + else
  130 + cleancache_flush_page(mapping, page);
120 131  
121 132 radix_tree_delete(&mapping->page_tree, page->index);
122 133 page->mapping = NULL;
... ... @@ -19,6 +19,7 @@
19 19 #include <linux/task_io_accounting_ops.h>
20 20 #include <linux/buffer_head.h> /* grr. try_to_release_page,
21 21 do_invalidatepage */
  22 +#include <linux/cleancache.h>
22 23 #include "internal.h"
23 24  
24 25  
... ... @@ -51,6 +52,7 @@
51 52 static inline void truncate_partial_page(struct page *page, unsigned partial)
52 53 {
53 54 zero_user_segment(page, partial, PAGE_CACHE_SIZE);
  55 + cleancache_flush_page(page->mapping, page);
54 56 if (page_has_private(page))
55 57 do_invalidatepage(page, partial);
56 58 }
... ... @@ -214,6 +216,7 @@
214 216 pgoff_t next;
215 217 int i;
216 218  
  219 + cleancache_flush_inode(mapping);
217 220 if (mapping->nrpages == 0)
218 221 return;
219 222  
... ... @@ -291,6 +294,7 @@
291 294 pagevec_release(&pvec);
292 295 mem_cgroup_uncharge_end();
293 296 }
  297 + cleancache_flush_inode(mapping);
294 298 }
295 299 EXPORT_SYMBOL(truncate_inode_pages_range);
296 300  
... ... @@ -440,6 +444,7 @@
440 444 int did_range_unmap = 0;
441 445 int wrapped = 0;
442 446  
  447 + cleancache_flush_inode(mapping);
443 448 pagevec_init(&pvec, 0);
444 449 next = start;
445 450 while (next <= end && !wrapped &&
... ... @@ -498,6 +503,7 @@
498 503 mem_cgroup_uncharge_end();
499 504 cond_resched();
500 505 }
  506 + cleancache_flush_inode(mapping);
501 507 return ret;
502 508 }
503 509 EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);