Commit 3e2f41f1f64744f7942980d93cc93dd3e5924560

Authored by KOSAKI Motohiro
Committed by Linus Torvalds
1 parent a3d8e0549d

memcg: add zone_reclaim_stat

Introduce mem_cgroup_per_zone::reclaim_stat member and its statics
collecting function.

Now, get_scan_ratio() can calculate correct value on memcg reclaim.

[hugh@veritas.com: avoid reclaim_stat oops when disabled]
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 4 changed files with 83 additions and 23 deletions Side-by-side Diff

include/linux/memcontrol.h
... ... @@ -105,6 +105,10 @@
105 105 unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
106 106 struct zone *zone,
107 107 enum lru_list lru);
  108 +struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
  109 + struct zone *zone);
  110 +struct zone_reclaim_stat*
  111 +mem_cgroup_get_reclaim_stat_from_page(struct page *page);
108 112  
109 113 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
110 114 extern int do_swap_account;
... ... @@ -270,6 +274,18 @@
270 274 return 0;
271 275 }
272 276  
  277 +
  278 +static inline struct zone_reclaim_stat*
  279 +mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone)
  280 +{
  281 + return NULL;
  282 +}
  283 +
  284 +static inline struct zone_reclaim_stat*
  285 +mem_cgroup_get_reclaim_stat_from_page(struct page *page)
  286 +{
  287 + return NULL;
  288 +}
273 289  
274 290 #endif /* CONFIG_CGROUP_MEM_CONT */
275 291  
... ... @@ -103,6 +103,8 @@
103 103 */
104 104 struct list_head lists[NR_LRU_LISTS];
105 105 unsigned long count[NR_LRU_LISTS];
  106 +
  107 + struct zone_reclaim_stat reclaim_stat;
106 108 };
107 109 /* Macro for accessing counter */
108 110 #define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
... ... @@ -456,6 +458,33 @@
456 458 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
457 459  
458 460 return MEM_CGROUP_ZSTAT(mz, lru);
  461 +}
  462 +
  463 +struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
  464 + struct zone *zone)
  465 +{
  466 + int nid = zone->zone_pgdat->node_id;
  467 + int zid = zone_idx(zone);
  468 + struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
  469 +
  470 + return &mz->reclaim_stat;
  471 +}
  472 +
  473 +struct zone_reclaim_stat *
  474 +mem_cgroup_get_reclaim_stat_from_page(struct page *page)
  475 +{
  476 + struct page_cgroup *pc;
  477 + struct mem_cgroup_per_zone *mz;
  478 +
  479 + if (mem_cgroup_disabled())
  480 + return NULL;
  481 +
  482 + pc = lookup_page_cgroup(page);
  483 + mz = page_cgroup_zoneinfo(pc);
  484 + if (!mz)
  485 + return NULL;
  486 +
  487 + return &mz->reclaim_stat;
459 488 }
460 489  
461 490 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
... ... @@ -151,13 +151,32 @@
151 151 }
152 152 }
153 153  
  154 +static void update_page_reclaim_stat(struct zone *zone, struct page *page,
  155 + int file, int rotated)
  156 +{
  157 + struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat;
  158 + struct zone_reclaim_stat *memcg_reclaim_stat;
  159 +
  160 + memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page);
  161 +
  162 + reclaim_stat->recent_scanned[file]++;
  163 + if (rotated)
  164 + reclaim_stat->recent_rotated[file]++;
  165 +
  166 + if (!memcg_reclaim_stat)
  167 + return;
  168 +
  169 + memcg_reclaim_stat->recent_scanned[file]++;
  170 + if (rotated)
  171 + memcg_reclaim_stat->recent_rotated[file]++;
  172 +}
  173 +
154 174 /*
155 175 * FIXME: speed this up?
156 176 */
157 177 void activate_page(struct page *page)
158 178 {
159 179 struct zone *zone = page_zone(page);
160   - struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat;
161 180  
162 181 spin_lock_irq(&zone->lru_lock);
163 182 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
... ... @@ -170,8 +189,7 @@
170 189 add_page_to_lru_list(zone, page, lru);
171 190 __count_vm_event(PGACTIVATE);
172 191  
173   - reclaim_stat->recent_rotated[!!file]++;
174   - reclaim_stat->recent_scanned[!!file]++;
  192 + update_page_reclaim_stat(zone, page, !!file, 1);
175 193 }
176 194 spin_unlock_irq(&zone->lru_lock);
177 195 }
... ... @@ -386,7 +404,6 @@
386 404 {
387 405 int i;
388 406 struct zone *zone = NULL;
389   - struct zone_reclaim_stat *reclaim_stat = NULL;
390 407  
391 408 VM_BUG_ON(is_unevictable_lru(lru));
392 409  
393 410  
394 411  
395 412  
396 413  
... ... @@ -394,24 +411,23 @@
394 411 struct page *page = pvec->pages[i];
395 412 struct zone *pagezone = page_zone(page);
396 413 int file;
  414 + int active;
397 415  
398 416 if (pagezone != zone) {
399 417 if (zone)
400 418 spin_unlock_irq(&zone->lru_lock);
401 419 zone = pagezone;
402   - reclaim_stat = &zone->reclaim_stat;
403 420 spin_lock_irq(&zone->lru_lock);
404 421 }
405 422 VM_BUG_ON(PageActive(page));
406 423 VM_BUG_ON(PageUnevictable(page));
407 424 VM_BUG_ON(PageLRU(page));
408 425 SetPageLRU(page);
  426 + active = is_active_lru(lru);
409 427 file = is_file_lru(lru);
410   - reclaim_stat->recent_scanned[file]++;
411   - if (is_active_lru(lru)) {
  428 + if (active)
412 429 SetPageActive(page);
413   - reclaim_stat->recent_rotated[file]++;
414   - }
  430 + update_page_reclaim_stat(zone, page, file, active);
415 431 add_page_to_lru_list(zone, page, lru);
416 432 }
417 433 if (zone)
... ... @@ -133,6 +133,9 @@
133 133 static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone,
134 134 struct scan_control *sc)
135 135 {
  136 + if (!scan_global_lru(sc))
  137 + return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone);
  138 +
136 139 return &zone->reclaim_stat;
137 140 }
138 141  
139 142  
... ... @@ -1087,17 +1090,14 @@
1087 1090 __mod_zone_page_state(zone, NR_INACTIVE_ANON,
1088 1091 -count[LRU_INACTIVE_ANON]);
1089 1092  
1090   - if (scan_global_lru(sc)) {
  1093 + if (scan_global_lru(sc))
1091 1094 zone->pages_scanned += nr_scan;
1092   - reclaim_stat->recent_scanned[0] +=
1093   - count[LRU_INACTIVE_ANON];
1094   - reclaim_stat->recent_scanned[0] +=
1095   - count[LRU_ACTIVE_ANON];
1096   - reclaim_stat->recent_scanned[1] +=
1097   - count[LRU_INACTIVE_FILE];
1098   - reclaim_stat->recent_scanned[1] +=
1099   - count[LRU_ACTIVE_FILE];
1100   - }
  1095 +
  1096 + reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON];
  1097 + reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON];
  1098 + reclaim_stat->recent_scanned[1] += count[LRU_INACTIVE_FILE];
  1099 + reclaim_stat->recent_scanned[1] += count[LRU_ACTIVE_FILE];
  1100 +
1101 1101 spin_unlock_irq(&zone->lru_lock);
1102 1102  
1103 1103 nr_scanned += nr_scan;
... ... @@ -1155,7 +1155,7 @@
1155 1155 SetPageLRU(page);
1156 1156 lru = page_lru(page);
1157 1157 add_page_to_lru_list(zone, page, lru);
1158   - if (PageActive(page) && scan_global_lru(sc)) {
  1158 + if (PageActive(page)) {
1159 1159 int file = !!page_is_file_cache(page);
1160 1160 reclaim_stat->recent_rotated[file]++;
1161 1161 }
1162 1162  
... ... @@ -1230,8 +1230,8 @@
1230 1230 */
1231 1231 if (scan_global_lru(sc)) {
1232 1232 zone->pages_scanned += pgscanned;
1233   - reclaim_stat->recent_scanned[!!file] += pgmoved;
1234 1233 }
  1234 + reclaim_stat->recent_scanned[!!file] += pgmoved;
1235 1235  
1236 1236 if (file)
1237 1237 __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved);
... ... @@ -1272,8 +1272,7 @@
1272 1272 * This helps balance scan pressure between file and anonymous
1273 1273 * pages in get_scan_ratio.
1274 1274 */
1275   - if (scan_global_lru(sc))
1276   - reclaim_stat->recent_rotated[!!file] += pgmoved;
  1275 + reclaim_stat->recent_rotated[!!file] += pgmoved;
1277 1276  
1278 1277 while (!list_empty(&l_inactive)) {
1279 1278 page = lru_to_page(&l_inactive);