Commit 31475dd611209413bace21651a400afb91d0bd9d

Authored by Hugh Dickins
Committed by Linus Torvalds
1 parent 69f07ec938

mm: a few small updates for radix-swap

Remove PageSwapBacked (!page_is_file_cache) cases from
add_to_page_cache_locked() and add_to_page_cache_lru(): those pages now
go through shmem_add_to_page_cache().

Remove a comment on maximum tmpfs size from fsstack_copy_inode_size(),
and add a comment on swap entries to invalidate_mapping_pages().

And mincore_page() uses find_get_page() on what might be shmem or a
tmpfs file: allow for a radix_tree_exceptional_entry(), and proceed to
find_get_page() on swapper_space if so (oh, swapper_space needs #ifdef).

Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 4 changed files with 18 additions and 26 deletions Side-by-side Diff

... ... @@ -29,10 +29,7 @@
29 29 *
30 30 * We don't actually know what locking is used at the lower level;
31 31 * but if it's a filesystem that supports quotas, it will be using
32   - * i_lock as in inode_add_bytes(). tmpfs uses other locking, and
33   - * its 32-bit is (just) able to exceed 2TB i_size with the aid of
34   - * holes; but its i_blocks cannot carry into the upper long without
35   - * almost 2TB swap - let's ignore that case.
  32 + * i_lock as in inode_add_bytes().
36 33 */
37 34 if (sizeof(i_blocks) > sizeof(long))
38 35 spin_lock(&src->i_lock);
... ... @@ -33,7 +33,6 @@
33 33 #include <linux/cpuset.h>
34 34 #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
35 35 #include <linux/memcontrol.h>
36   -#include <linux/mm_inline.h> /* for page_is_file_cache() */
37 36 #include <linux/cleancache.h>
38 37 #include "internal.h"
39 38  
... ... @@ -462,6 +461,7 @@
462 461 int error;
463 462  
464 463 VM_BUG_ON(!PageLocked(page));
  464 + VM_BUG_ON(PageSwapBacked(page));
465 465  
466 466 error = mem_cgroup_cache_charge(page, current->mm,
467 467 gfp_mask & GFP_RECLAIM_MASK);
... ... @@ -479,8 +479,6 @@
479 479 if (likely(!error)) {
480 480 mapping->nrpages++;
481 481 __inc_zone_page_state(page, NR_FILE_PAGES);
482   - if (PageSwapBacked(page))
483   - __inc_zone_page_state(page, NR_SHMEM);
484 482 spin_unlock_irq(&mapping->tree_lock);
485 483 } else {
486 484 page->mapping = NULL;
487 485  
... ... @@ -502,22 +500,9 @@
502 500 {
503 501 int ret;
504 502  
505   - /*
506   - * Splice_read and readahead add shmem/tmpfs pages into the page cache
507   - * before shmem_readpage has a chance to mark them as SwapBacked: they
508   - * need to go on the anon lru below, and mem_cgroup_cache_charge
509   - * (called in add_to_page_cache) needs to know where they're going too.
510   - */
511   - if (mapping_cap_swap_backed(mapping))
512   - SetPageSwapBacked(page);
513   -
514 503 ret = add_to_page_cache(page, mapping, offset, gfp_mask);
515   - if (ret == 0) {
516   - if (page_is_file_cache(page))
517   - lru_cache_add_file(page);
518   - else
519   - lru_cache_add_anon(page);
520   - }
  504 + if (ret == 0)
  505 + lru_cache_add_file(page);
521 506 return ret;
522 507 }
523 508 EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
... ... @@ -69,12 +69,14 @@
69 69 * file will not get a swp_entry_t in its pte, but rather it is like
70 70 * any other file mapping (ie. marked !present and faulted in with
71 71 * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
72   - *
73   - * However when tmpfs moves the page from pagecache and into swapcache,
74   - * it is still in core, but the find_get_page below won't find it.
75   - * No big deal, but make a note of it.
76 72 */
77 73 page = find_get_page(mapping, pgoff);
  74 +#ifdef CONFIG_SWAP
  75 + if (radix_tree_exceptional_entry(page)) {
  76 + swp_entry_t swap = radix_to_swp_entry(page);
  77 + page = find_get_page(&swapper_space, swap.val);
  78 + }
  79 +#endif
78 80 if (page) {
79 81 present = PageUptodate(page);
80 82 page_cache_release(page);
... ... @@ -336,6 +336,14 @@
336 336 unsigned long count = 0;
337 337 int i;
338 338  
  339 + /*
  340 + * Note: this function may get called on a shmem/tmpfs mapping:
  341 + * pagevec_lookup() might then return 0 prematurely (because it
  342 + * got a gangful of swap entries); but it's hardly worth worrying
  343 + * about - it can rarely have anything to free from such a mapping
  344 + * (most pages are dirty), and already skips over any difficulties.
  345 + */
  346 +
339 347 pagevec_init(&pvec, 0);
340 348 while (index <= end && pagevec_lookup(&pvec, mapping, index,
341 349 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {