Commit 28ecb60906e86e74e9ad4ac7e0218d8631e73a94

Authored by Nick Piggin
Committed by Chris Mason
1 parent 0cad8a1130

Btrfs: use add_to_page_cache_lru, use __page_cache_alloc

Pagecache pages should be allocated with __page_cache_alloc, so they
obey pagecache memory policies.

add_to_page_cache_lru is exported, so it should be used. Benefits over
using a private pagevec: neater code, 128 bytes fewer stack used, percpu
lru ordering is preserved, and finally don't need to flush pagevec
before returning so batching may be shared with other LRU insertions.

Signed-off-by: Nick Piggin <npiggin@suse.de>:
Signed-off-by: Chris Mason <chris.mason@oracle.com>

Showing 2 changed files with 5 additions and 32 deletions Side-by-side Diff

fs/btrfs/compression.c
... ... @@ -31,7 +31,6 @@
31 31 #include <linux/swap.h>
32 32 #include <linux/writeback.h>
33 33 #include <linux/bit_spinlock.h>
34   -#include <linux/pagevec.h>
35 34 #include "compat.h"
36 35 #include "ctree.h"
37 36 #include "disk-io.h"
... ... @@ -445,7 +444,6 @@
445 444 unsigned long nr_pages = 0;
446 445 struct extent_map *em;
447 446 struct address_space *mapping = inode->i_mapping;
448   - struct pagevec pvec;
449 447 struct extent_map_tree *em_tree;
450 448 struct extent_io_tree *tree;
451 449 u64 end;
... ... @@ -461,7 +459,6 @@
461 459  
462 460 end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
463 461  
464   - pagevec_init(&pvec, 0);
465 462 while (last_offset < compressed_end) {
466 463 page_index = last_offset >> PAGE_CACHE_SHIFT;
467 464  
468 465  
469 466  
... ... @@ -478,26 +475,17 @@
478 475 goto next;
479 476 }
480 477  
481   - page = alloc_page(mapping_gfp_mask(mapping) & ~__GFP_FS);
  478 + page = __page_cache_alloc(mapping_gfp_mask(mapping) &
  479 + ~__GFP_FS);
482 480 if (!page)
483 481 break;
484 482  
485   - page->index = page_index;
486   - /*
487   - * what we want to do here is call add_to_page_cache_lru,
488   - * but that isn't exported, so we reproduce it here
489   - */
490   - if (add_to_page_cache(page, mapping,
491   - page->index, GFP_NOFS)) {
  483 + if (add_to_page_cache_lru(page, mapping, page_index,
  484 + GFP_NOFS)) {
492 485 page_cache_release(page);
493 486 goto next;
494 487 }
495 488  
496   - /* open coding of lru_cache_add, also not exported */
497   - page_cache_get(page);
498   - if (!pagevec_add(&pvec, page))
499   - __pagevec_lru_add_file(&pvec);
500   -
501 489 end = last_offset + PAGE_CACHE_SIZE - 1;
502 490 /*
503 491 * at this point, we have a locked page in the page cache
... ... @@ -551,8 +539,6 @@
551 539 next:
552 540 last_offset += PAGE_CACHE_SIZE;
553 541 }
554   - if (pagevec_count(&pvec))
555   - __pagevec_lru_add_file(&pvec);
556 542 return 0;
557 543 }
558 544  
fs/btrfs/extent_io.c
... ... @@ -2679,33 +2679,20 @@
2679 2679 {
2680 2680 struct bio *bio = NULL;
2681 2681 unsigned page_idx;
2682   - struct pagevec pvec;
2683 2682 unsigned long bio_flags = 0;
2684 2683  
2685   - pagevec_init(&pvec, 0);
2686 2684 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2687 2685 struct page *page = list_entry(pages->prev, struct page, lru);
2688 2686  
2689 2687 prefetchw(&page->flags);
2690 2688 list_del(&page->lru);
2691   - /*
2692   - * what we want to do here is call add_to_page_cache_lru,
2693   - * but that isn't exported, so we reproduce it here
2694   - */
2695   - if (!add_to_page_cache(page, mapping,
  2689 + if (!add_to_page_cache_lru(page, mapping,
2696 2690 page->index, GFP_KERNEL)) {
2697   -
2698   - /* open coding of lru_cache_add, also not exported */
2699   - page_cache_get(page);
2700   - if (!pagevec_add(&pvec, page))
2701   - __pagevec_lru_add_file(&pvec);
2702 2691 __extent_read_full_page(tree, page, get_extent,
2703 2692 &bio, 0, &bio_flags);
2704 2693 }
2705 2694 page_cache_release(page);
2706 2695 }
2707   - if (pagevec_count(&pvec))
2708   - __pagevec_lru_add_file(&pvec);
2709 2696 BUG_ON(!list_empty(pages));
2710 2697 if (bio)
2711 2698 submit_one_bio(READ, bio, 0, bio_flags);