Commit 4a4ede23dd902513b3a17d3e61cef9baf650d33e

Authored by Mel Gorman
Committed by Jiri Slaby
1 parent b4fc580f75

mm: move zone->pages_scanned into a vmstat counter

commit 0d5d823ab4e608ec7b52ac4410de4cb74bbe0edd upstream.

zone->pages_scanned is a write-intensive cache line during page reclaim
and it's also updated during page free.  Move the counter into vmstat to
take advantage of the per-cpu updates and do not update it in the free
paths unless necessary.

On a small UMA machine running tiobench the difference is marginal.  On
a 4-node machine the overhead is more noticable.  Note that automatic
NUMA balancing was disabled for this test as otherwise the system CPU
overhead is unpredictable.

          3.16.0-rc3  3.16.0-rc3  3.16.0-rc3
             vanillarearrange-v5   vmstat-v5
User          746.94      759.78      774.56
System      65336.22    58350.98    32847.27
Elapsed     27553.52    27282.02    27415.04

Note that the overhead reduction will vary depending on where exactly
pages are allocated and freed.

Signed-off-by: Mel Gorman <mgorman@suse.de>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Mel Gorman <mgorman@suse.de>
Signed-off-by: Jiri Slaby <jslaby@suse.cz>

Showing 4 changed files with 16 additions and 8 deletions Side-by-side Diff

include/linux/mmzone.h
... ... @@ -143,6 +143,7 @@
143 143 NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
144 144 NR_DIRTIED, /* page dirtyings since bootup */
145 145 NR_WRITTEN, /* page writings since bootup */
  146 + NR_PAGES_SCANNED, /* pages scanned since last reclaim */
146 147 #ifdef CONFIG_NUMA
147 148 NUMA_HIT, /* allocated in intended node */
148 149 NUMA_MISS, /* allocated in non intended node */
... ... @@ -478,7 +479,6 @@
478 479  
479 480 /* Fields commonly accessed by the page reclaim scanner */
480 481 spinlock_t lru_lock;
481   - unsigned long pages_scanned; /* since last reclaim */
482 482 struct lruvec lruvec;
483 483  
484 484 /*
... ... @@ -664,9 +664,12 @@
664 664 int migratetype = 0;
665 665 int batch_free = 0;
666 666 int to_free = count;
  667 + unsigned long nr_scanned;
667 668  
668 669 spin_lock(&zone->lock);
669   - zone->pages_scanned = 0;
  670 + nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
  671 + if (nr_scanned)
  672 + __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
670 673  
671 674 while (to_free) {
672 675 struct page *page;
673 676  
... ... @@ -715,8 +718,11 @@
715 718 unsigned int order,
716 719 int migratetype)
717 720 {
  721 + unsigned long nr_scanned;
718 722 spin_lock(&zone->lock);
719   - zone->pages_scanned = 0;
  723 + nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
  724 + if (nr_scanned)
  725 + __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
720 726  
721 727 __free_one_page(page, pfn, zone, order, migratetype);
722 728 if (unlikely(!is_migrate_isolate(migratetype)))
... ... @@ -3218,7 +3224,7 @@
3218 3224 K(zone_page_state(zone, NR_BOUNCE)),
3219 3225 K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
3220 3226 K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
3221   - zone->pages_scanned,
  3227 + K(zone_page_state(zone, NR_PAGES_SCANNED)),
3222 3228 (!zone_reclaimable(zone) ? "yes" : "no")
3223 3229 );
3224 3230 printk("lowmem_reserve[]:");
... ... @@ -163,7 +163,8 @@
163 163  
164 164 bool zone_reclaimable(struct zone *zone)
165 165 {
166   - return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
  166 + return zone_page_state(zone, NR_PAGES_SCANNED) <
  167 + zone_reclaimable_pages(zone) * 6;
167 168 }
168 169  
169 170 static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
... ... @@ -1470,7 +1471,7 @@
1470 1471 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
1471 1472  
1472 1473 if (global_reclaim(sc)) {
1473   - zone->pages_scanned += nr_scanned;
  1474 + __mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned);
1474 1475 if (current_is_kswapd())
1475 1476 __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned);
1476 1477 else
... ... @@ -1659,7 +1660,7 @@
1659 1660 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
1660 1661 &nr_scanned, sc, isolate_mode, lru);
1661 1662 if (global_reclaim(sc))
1662   - zone->pages_scanned += nr_scanned;
  1663 + __mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned);
1663 1664  
1664 1665 reclaim_stat->recent_scanned[file] += nr_taken;
1665 1666  
... ... @@ -761,6 +761,7 @@
761 761 "nr_shmem",
762 762 "nr_dirtied",
763 763 "nr_written",
  764 + "nr_pages_scanned",
764 765  
765 766 #ifdef CONFIG_NUMA
766 767 "numa_hit",
... ... @@ -1055,7 +1056,7 @@
1055 1056 min_wmark_pages(zone),
1056 1057 low_wmark_pages(zone),
1057 1058 high_wmark_pages(zone),
1058   - zone->pages_scanned,
  1059 + zone_page_state(zone, NR_PAGES_SCANNED),
1059 1060 zone->spanned_pages,
1060 1061 zone->present_pages,
1061 1062 zone->managed_pages);