Commit b69408e88bd86b98feb7b9a38fd865e1ddb29827

Authored by Christoph Lameter
Committed by Linus Torvalds
1 parent 62695a84eb

vmscan: Use an indexed array for LRU variables

Currently we are defining explicit variables for the inactive and active
list.  An indexed array can be more generic and avoid repeating similar
code in several places in the reclaim code.

We are saving a few bytes in terms of code size:

Before:

   text    data     bss     dec     hex filename
4097753  573120 4092484 8763357  85b7dd vmlinux

After:

   text    data     bss     dec     hex filename
4097729  573120 4092484 8763333  85b7c5 vmlinux

Having an easy way to add new lru lists may ease future work on the
reclaim code.

Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 8 changed files with 171 additions and 170 deletions Side-by-side Diff

include/linux/memcontrol.h
... ... @@ -69,10 +69,8 @@
69 69 extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
70 70 int priority);
71 71  
72   -extern long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
73   - struct zone *zone, int priority);
74   -extern long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
75   - struct zone *zone, int priority);
  72 +extern long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
  73 + int priority, enum lru_list lru);
76 74  
77 75 #else /* CONFIG_CGROUP_MEM_RES_CTLR */
78 76 static inline void page_reset_bad_cgroup(struct page *page)
... ... @@ -159,14 +157,9 @@
159 157 {
160 158 }
161 159  
162   -static inline long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
163   - struct zone *zone, int priority)
164   -{
165   - return 0;
166   -}
167   -
168   -static inline long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
169   - struct zone *zone, int priority)
  160 +static inline long mem_cgroup_calc_reclaim(struct mem_cgroup *mem,
  161 + struct zone *zone, int priority,
  162 + enum lru_list lru)
170 163 {
171 164 return 0;
172 165 }
include/linux/mm_inline.h
1 1 static inline void
  2 +add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l)
  3 +{
  4 + list_add(&page->lru, &zone->lru[l].list);
  5 + __inc_zone_state(zone, NR_LRU_BASE + l);
  6 +}
  7 +
  8 +static inline void
  9 +del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l)
  10 +{
  11 + list_del(&page->lru);
  12 + __dec_zone_state(zone, NR_LRU_BASE + l);
  13 +}
  14 +
  15 +static inline void
2 16 add_page_to_active_list(struct zone *zone, struct page *page)
3 17 {
4   - list_add(&page->lru, &zone->active_list);
5   - __inc_zone_state(zone, NR_ACTIVE);
  18 + add_page_to_lru_list(zone, page, LRU_ACTIVE);
6 19 }
7 20  
8 21 static inline void
9 22 add_page_to_inactive_list(struct zone *zone, struct page *page)
10 23 {
11   - list_add(&page->lru, &zone->inactive_list);
12   - __inc_zone_state(zone, NR_INACTIVE);
  24 + add_page_to_lru_list(zone, page, LRU_INACTIVE);
13 25 }
14 26  
15 27 static inline void
16 28 del_page_from_active_list(struct zone *zone, struct page *page)
17 29 {
18   - list_del(&page->lru);
19   - __dec_zone_state(zone, NR_ACTIVE);
  30 + del_page_from_lru_list(zone, page, LRU_ACTIVE);
20 31 }
21 32  
22 33 static inline void
23 34 del_page_from_inactive_list(struct zone *zone, struct page *page)
24 35 {
25   - list_del(&page->lru);
26   - __dec_zone_state(zone, NR_INACTIVE);
  36 + del_page_from_lru_list(zone, page, LRU_INACTIVE);
27 37 }
28 38  
29 39 static inline void
30 40 del_page_from_lru(struct zone *zone, struct page *page)
31 41 {
  42 + enum lru_list l = LRU_INACTIVE;
  43 +
32 44 list_del(&page->lru);
33 45 if (PageActive(page)) {
34 46 __ClearPageActive(page);
35   - __dec_zone_state(zone, NR_ACTIVE);
36   - } else {
37   - __dec_zone_state(zone, NR_INACTIVE);
  47 + l = LRU_ACTIVE;
38 48 }
  49 + __dec_zone_state(zone, NR_LRU_BASE + l);
  50 +}
  51 +
  52 +/**
  53 + * page_lru - which LRU list should a page be on?
  54 + * @page: the page to test
  55 + *
  56 + * Returns the LRU list a page should be on, as an index
  57 + * into the array of LRU lists.
  58 + */
  59 +static inline enum lru_list page_lru(struct page *page)
  60 +{
  61 + enum lru_list lru = LRU_BASE;
  62 +
  63 + if (PageActive(page))
  64 + lru += LRU_ACTIVE;
  65 +
  66 + return lru;
39 67 }
include/linux/mmzone.h
... ... @@ -81,8 +81,9 @@
81 81 enum zone_stat_item {
82 82 /* First 128 byte cacheline (assuming 64 bit words) */
83 83 NR_FREE_PAGES,
84   - NR_INACTIVE,
85   - NR_ACTIVE,
  84 + NR_LRU_BASE,
  85 + NR_INACTIVE = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */
  86 + NR_ACTIVE, /* " " " " " */
86 87 NR_ANON_PAGES, /* Mapped anonymous pages */
87 88 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
88 89 only modified from process context */
... ... @@ -107,6 +108,19 @@
107 108 #endif
108 109 NR_VM_ZONE_STAT_ITEMS };
109 110  
  111 +enum lru_list {
  112 + LRU_BASE,
  113 + LRU_INACTIVE=LRU_BASE, /* must match order of NR_[IN]ACTIVE */
  114 + LRU_ACTIVE, /* " " " " " */
  115 + NR_LRU_LISTS };
  116 +
  117 +#define for_each_lru(l) for (l = 0; l < NR_LRU_LISTS; l++)
  118 +
  119 +static inline int is_active_lru(enum lru_list l)
  120 +{
  121 + return (l == LRU_ACTIVE);
  122 +}
  123 +
110 124 struct per_cpu_pages {
111 125 int count; /* number of pages in the list */
112 126 int high; /* high watermark, emptying needed */
... ... @@ -251,10 +265,10 @@
251 265  
252 266 /* Fields commonly accessed by the page reclaim scanner */
253 267 spinlock_t lru_lock;
254   - struct list_head active_list;
255   - struct list_head inactive_list;
256   - unsigned long nr_scan_active;
257   - unsigned long nr_scan_inactive;
  268 + struct {
  269 + struct list_head list;
  270 + unsigned long nr_scan;
  271 + } lru[NR_LRU_LISTS];
258 272 unsigned long pages_scanned; /* since last reclaim */
259 273 unsigned long flags; /* zone flags, see below */
260 274  
... ... @@ -32,6 +32,7 @@
32 32 #include <linux/fs.h>
33 33 #include <linux/seq_file.h>
34 34 #include <linux/vmalloc.h>
  35 +#include <linux/mm_inline.h>
35 36  
36 37 #include <asm/uaccess.h>
37 38  
38 39  
... ... @@ -85,22 +86,13 @@
85 86 /*
86 87 * per-zone information in memory controller.
87 88 */
88   -
89   -enum mem_cgroup_zstat_index {
90   - MEM_CGROUP_ZSTAT_ACTIVE,
91   - MEM_CGROUP_ZSTAT_INACTIVE,
92   -
93   - NR_MEM_CGROUP_ZSTAT,
94   -};
95   -
96 89 struct mem_cgroup_per_zone {
97 90 /*
98 91 * spin_lock to protect the per cgroup LRU
99 92 */
100 93 spinlock_t lru_lock;
101   - struct list_head active_list;
102   - struct list_head inactive_list;
103   - unsigned long count[NR_MEM_CGROUP_ZSTAT];
  94 + struct list_head lists[NR_LRU_LISTS];
  95 + unsigned long count[NR_LRU_LISTS];
104 96 };
105 97 /* Macro for accessing counter */
106 98 #define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
... ... @@ -227,7 +219,7 @@
227 219 }
228 220  
229 221 static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
230   - enum mem_cgroup_zstat_index idx)
  222 + enum lru_list idx)
231 223 {
232 224 int nid, zid;
233 225 struct mem_cgroup_per_zone *mz;
234 226  
... ... @@ -297,11 +289,9 @@
297 289 struct page_cgroup *pc)
298 290 {
299 291 int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
  292 + int lru = !!from;
300 293  
301   - if (from)
302   - MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
303   - else
304   - MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;
  294 + MEM_CGROUP_ZSTAT(mz, lru) -= 1;
305 295  
306 296 mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false);
307 297 list_del(&pc->lru);
308 298  
309 299  
310 300  
311 301  
312 302  
313 303  
314 304  
... ... @@ -310,37 +300,35 @@
310 300 static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz,
311 301 struct page_cgroup *pc)
312 302 {
313   - int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
  303 + int lru = LRU_INACTIVE;
314 304  
315   - if (!to) {
316   - MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
317   - list_add(&pc->lru, &mz->inactive_list);
318   - } else {
319   - MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
320   - list_add(&pc->lru, &mz->active_list);
321   - }
  305 + if (pc->flags & PAGE_CGROUP_FLAG_ACTIVE)
  306 + lru += LRU_ACTIVE;
  307 +
  308 + MEM_CGROUP_ZSTAT(mz, lru) += 1;
  309 + list_add(&pc->lru, &mz->lists[lru]);
  310 +
322 311 mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true);
323 312 }
324 313  
325 314 static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
326 315 {
327   - int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
328 316 struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
  317 + int lru = LRU_INACTIVE;
329 318  
330   - if (from)
331   - MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
332   - else
333   - MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;
  319 + if (pc->flags & PAGE_CGROUP_FLAG_ACTIVE)
  320 + lru += LRU_ACTIVE;
334 321  
335   - if (active) {
336   - MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
  322 + MEM_CGROUP_ZSTAT(mz, lru) -= 1;
  323 +
  324 + if (active)
337 325 pc->flags |= PAGE_CGROUP_FLAG_ACTIVE;
338   - list_move(&pc->lru, &mz->active_list);
339   - } else {
340   - MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
  326 + else
341 327 pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE;
342   - list_move(&pc->lru, &mz->inactive_list);
343   - }
  328 +
  329 + lru = !!active;
  330 + MEM_CGROUP_ZSTAT(mz, lru) += 1;
  331 + list_move(&pc->lru, &mz->lists[lru]);
344 332 }
345 333  
346 334 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
... ... @@ -412,8 +400,8 @@
412 400 {
413 401 unsigned long active, inactive;
414 402 /* active and inactive are the number of pages. 'long' is ok.*/
415   - active = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_ACTIVE);
416   - inactive = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_INACTIVE);
  403 + active = mem_cgroup_get_all_zonestat(mem, LRU_ACTIVE);
  404 + inactive = mem_cgroup_get_all_zonestat(mem, LRU_INACTIVE);
417 405 return (long) (active / (inactive + 1));
418 406 }
419 407  
420 408  
421 409  
422 410  
... ... @@ -444,28 +432,17 @@
444 432 * (see include/linux/mmzone.h)
445 433 */
446 434  
447   -long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
448   - struct zone *zone, int priority)
  435 +long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
  436 + int priority, enum lru_list lru)
449 437 {
450   - long nr_active;
  438 + long nr_pages;
451 439 int nid = zone->zone_pgdat->node_id;
452 440 int zid = zone_idx(zone);
453 441 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
454 442  
455   - nr_active = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE);
456   - return (nr_active >> priority);
457   -}
  443 + nr_pages = MEM_CGROUP_ZSTAT(mz, lru);
458 444  
459   -long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
460   - struct zone *zone, int priority)
461   -{
462   - long nr_inactive;
463   - int nid = zone->zone_pgdat->node_id;
464   - int zid = zone_idx(zone);
465   - struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
466   -
467   - nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE);
468   - return (nr_inactive >> priority);
  445 + return (nr_pages >> priority);
469 446 }
470 447  
471 448 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
472 449  
473 450  
... ... @@ -484,15 +461,12 @@
484 461 int nid = z->zone_pgdat->node_id;
485 462 int zid = zone_idx(z);
486 463 struct mem_cgroup_per_zone *mz;
  464 + int lru = !!active;
487 465  
488 466 BUG_ON(!mem_cont);
489 467 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
490   - if (active)
491   - src = &mz->active_list;
492   - else
493   - src = &mz->inactive_list;
  468 + src = &mz->lists[lru];
494 469  
495   -
496 470 spin_lock(&mz->lru_lock);
497 471 scan = 0;
498 472 list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
... ... @@ -863,7 +837,7 @@
863 837 #define FORCE_UNCHARGE_BATCH (128)
864 838 static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
865 839 struct mem_cgroup_per_zone *mz,
866   - int active)
  840 + enum lru_list lru)
867 841 {
868 842 struct page_cgroup *pc;
869 843 struct page *page;
... ... @@ -871,10 +845,7 @@
871 845 unsigned long flags;
872 846 struct list_head *list;
873 847  
874   - if (active)
875   - list = &mz->active_list;
876   - else
877   - list = &mz->inactive_list;
  848 + list = &mz->lists[lru];
878 849  
879 850 spin_lock_irqsave(&mz->lru_lock, flags);
880 851 while (!list_empty(list)) {
881 852  
... ... @@ -922,11 +893,10 @@
922 893 for_each_node_state(node, N_POSSIBLE)
923 894 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
924 895 struct mem_cgroup_per_zone *mz;
  896 + enum lru_list l;
925 897 mz = mem_cgroup_zoneinfo(mem, node, zid);
926   - /* drop all page_cgroup in active_list */
927   - mem_cgroup_force_empty_list(mem, mz, 1);
928   - /* drop all page_cgroup in inactive_list */
929   - mem_cgroup_force_empty_list(mem, mz, 0);
  898 + for_each_lru(l)
  899 + mem_cgroup_force_empty_list(mem, mz, l);
930 900 }
931 901 }
932 902 ret = 0;
933 903  
... ... @@ -1015,9 +985,9 @@
1015 985 unsigned long active, inactive;
1016 986  
1017 987 inactive = mem_cgroup_get_all_zonestat(mem_cont,
1018   - MEM_CGROUP_ZSTAT_INACTIVE);
  988 + LRU_INACTIVE);
1019 989 active = mem_cgroup_get_all_zonestat(mem_cont,
1020   - MEM_CGROUP_ZSTAT_ACTIVE);
  990 + LRU_ACTIVE);
1021 991 cb->fill(cb, "active", (active) * PAGE_SIZE);
1022 992 cb->fill(cb, "inactive", (inactive) * PAGE_SIZE);
1023 993 }
... ... @@ -1062,6 +1032,7 @@
1062 1032 {
1063 1033 struct mem_cgroup_per_node *pn;
1064 1034 struct mem_cgroup_per_zone *mz;
  1035 + enum lru_list l;
1065 1036 int zone, tmp = node;
1066 1037 /*
1067 1038 * This routine is called against possible nodes.
1068 1039  
... ... @@ -1082,9 +1053,9 @@
1082 1053  
1083 1054 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
1084 1055 mz = &pn->zoneinfo[zone];
1085   - INIT_LIST_HEAD(&mz->active_list);
1086   - INIT_LIST_HEAD(&mz->inactive_list);
1087 1056 spin_lock_init(&mz->lru_lock);
  1057 + for_each_lru(l)
  1058 + INIT_LIST_HEAD(&mz->lists[l]);
1088 1059 }
1089 1060 return 0;
1090 1061 }
... ... @@ -3414,6 +3414,7 @@
3414 3414 for (j = 0; j < MAX_NR_ZONES; j++) {
3415 3415 struct zone *zone = pgdat->node_zones + j;
3416 3416 unsigned long size, realsize, memmap_pages;
  3417 + enum lru_list l;
3417 3418  
3418 3419 size = zone_spanned_pages_in_node(nid, j, zones_size);
3419 3420 realsize = size - zone_absent_pages_in_node(nid, j,
... ... @@ -3465,10 +3466,10 @@
3465 3466 zone->prev_priority = DEF_PRIORITY;
3466 3467  
3467 3468 zone_pcp_init(zone);
3468   - INIT_LIST_HEAD(&zone->active_list);
3469   - INIT_LIST_HEAD(&zone->inactive_list);
3470   - zone->nr_scan_active = 0;
3471   - zone->nr_scan_inactive = 0;
  3469 + for_each_lru(l) {
  3470 + INIT_LIST_HEAD(&zone->lru[l].list);
  3471 + zone->lru[l].nr_scan = 0;
  3472 + }
3472 3473 zap_zone_vm_stats(zone);
3473 3474 zone->flags = 0;
3474 3475 if (!size)
... ... @@ -117,7 +117,7 @@
117 117 spin_lock(&zone->lru_lock);
118 118 }
119 119 if (PageLRU(page) && !PageActive(page)) {
120   - list_move_tail(&page->lru, &zone->inactive_list);
  120 + list_move_tail(&page->lru, &zone->lru[LRU_INACTIVE].list);
121 121 pgmoved++;
122 122 }
123 123 }
... ... @@ -819,10 +819,10 @@
819 819 int active)
820 820 {
821 821 if (active)
822   - return isolate_lru_pages(nr, &z->active_list, dst,
  822 + return isolate_lru_pages(nr, &z->lru[LRU_ACTIVE].list, dst,
823 823 scanned, order, mode);
824 824 else
825   - return isolate_lru_pages(nr, &z->inactive_list, dst,
  825 + return isolate_lru_pages(nr, &z->lru[LRU_INACTIVE].list, dst,
826 826 scanned, order, mode);
827 827 }
828 828  
... ... @@ -973,10 +973,7 @@
973 973 VM_BUG_ON(PageLRU(page));
974 974 SetPageLRU(page);
975 975 list_del(&page->lru);
976   - if (PageActive(page))
977   - add_page_to_active_list(zone, page);
978   - else
979   - add_page_to_inactive_list(zone, page);
  976 + add_page_to_lru_list(zone, page, page_lru(page));
980 977 if (!pagevec_add(&pvec, page)) {
981 978 spin_unlock_irq(&zone->lru_lock);
982 979 __pagevec_release(&pvec);
... ... @@ -1144,8 +1141,8 @@
1144 1141 int pgdeactivate = 0;
1145 1142 unsigned long pgscanned;
1146 1143 LIST_HEAD(l_hold); /* The pages which were snipped off */
1147   - LIST_HEAD(l_inactive); /* Pages to go onto the inactive_list */
1148   - LIST_HEAD(l_active); /* Pages to go onto the active_list */
  1144 + LIST_HEAD(l_active);
  1145 + LIST_HEAD(l_inactive);
1149 1146 struct page *page;
1150 1147 struct pagevec pvec;
1151 1148 int reclaim_mapped = 0;
... ... @@ -1194,7 +1191,7 @@
1194 1191 VM_BUG_ON(!PageActive(page));
1195 1192 ClearPageActive(page);
1196 1193  
1197   - list_move(&page->lru, &zone->inactive_list);
  1194 + list_move(&page->lru, &zone->lru[LRU_INACTIVE].list);
1198 1195 mem_cgroup_move_lists(page, false);
1199 1196 pgmoved++;
1200 1197 if (!pagevec_add(&pvec, page)) {
... ... @@ -1224,7 +1221,7 @@
1224 1221 SetPageLRU(page);
1225 1222 VM_BUG_ON(!PageActive(page));
1226 1223  
1227   - list_move(&page->lru, &zone->active_list);
  1224 + list_move(&page->lru, &zone->lru[LRU_ACTIVE].list);
1228 1225 mem_cgroup_move_lists(page, true);
1229 1226 pgmoved++;
1230 1227 if (!pagevec_add(&pvec, page)) {
1231 1228  
1232 1229  
1233 1230  
1234 1231  
1235 1232  
1236 1233  
1237 1234  
1238 1235  
... ... @@ -1244,65 +1241,64 @@
1244 1241 pagevec_release(&pvec);
1245 1242 }
1246 1243  
  1244 +static unsigned long shrink_list(enum lru_list l, unsigned long nr_to_scan,
  1245 + struct zone *zone, struct scan_control *sc, int priority)
  1246 +{
  1247 + if (l == LRU_ACTIVE) {
  1248 + shrink_active_list(nr_to_scan, zone, sc, priority);
  1249 + return 0;
  1250 + }
  1251 + return shrink_inactive_list(nr_to_scan, zone, sc);
  1252 +}
  1253 +
1247 1254 /*
1248 1255 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
1249 1256 */
1250 1257 static unsigned long shrink_zone(int priority, struct zone *zone,
1251 1258 struct scan_control *sc)
1252 1259 {
1253   - unsigned long nr_active;
1254   - unsigned long nr_inactive;
  1260 + unsigned long nr[NR_LRU_LISTS];
1255 1261 unsigned long nr_to_scan;
1256 1262 unsigned long nr_reclaimed = 0;
  1263 + enum lru_list l;
1257 1264  
1258 1265 if (scan_global_lru(sc)) {
1259 1266 /*
1260 1267 * Add one to nr_to_scan just to make sure that the kernel
1261 1268 * will slowly sift through the active list.
1262 1269 */
1263   - zone->nr_scan_active +=
1264   - (zone_page_state(zone, NR_ACTIVE) >> priority) + 1;
1265   - nr_active = zone->nr_scan_active;
1266   - zone->nr_scan_inactive +=
1267   - (zone_page_state(zone, NR_INACTIVE) >> priority) + 1;
1268   - nr_inactive = zone->nr_scan_inactive;
1269   - if (nr_inactive >= sc->swap_cluster_max)
1270   - zone->nr_scan_inactive = 0;
1271   - else
1272   - nr_inactive = 0;
1273   -
1274   - if (nr_active >= sc->swap_cluster_max)
1275   - zone->nr_scan_active = 0;
1276   - else
1277   - nr_active = 0;
  1270 + for_each_lru(l) {
  1271 + zone->lru[l].nr_scan += (zone_page_state(zone,
  1272 + NR_LRU_BASE + l) >> priority) + 1;
  1273 + nr[l] = zone->lru[l].nr_scan;
  1274 + if (nr[l] >= sc->swap_cluster_max)
  1275 + zone->lru[l].nr_scan = 0;
  1276 + else
  1277 + nr[l] = 0;
  1278 + }
1278 1279 } else {
1279 1280 /*
1280 1281 * This reclaim occurs not because zone memory shortage but
1281 1282 * because memory controller hits its limit.
1282 1283 * Then, don't modify zone reclaim related data.
1283 1284 */
1284   - nr_active = mem_cgroup_calc_reclaim_active(sc->mem_cgroup,
1285   - zone, priority);
  1285 + nr[LRU_ACTIVE] = mem_cgroup_calc_reclaim(sc->mem_cgroup,
  1286 + zone, priority, LRU_ACTIVE);
1286 1287  
1287   - nr_inactive = mem_cgroup_calc_reclaim_inactive(sc->mem_cgroup,
1288   - zone, priority);
  1288 + nr[LRU_INACTIVE] = mem_cgroup_calc_reclaim(sc->mem_cgroup,
  1289 + zone, priority, LRU_INACTIVE);
1289 1290 }
1290 1291  
1291   -
1292   - while (nr_active || nr_inactive) {
1293   - if (nr_active) {
1294   - nr_to_scan = min(nr_active,
  1292 + while (nr[LRU_ACTIVE] || nr[LRU_INACTIVE]) {
  1293 + for_each_lru(l) {
  1294 + if (nr[l]) {
  1295 + nr_to_scan = min(nr[l],
1295 1296 (unsigned long)sc->swap_cluster_max);
1296   - nr_active -= nr_to_scan;
1297   - shrink_active_list(nr_to_scan, zone, sc, priority);
1298   - }
  1297 + nr[l] -= nr_to_scan;
1299 1298  
1300   - if (nr_inactive) {
1301   - nr_to_scan = min(nr_inactive,
1302   - (unsigned long)sc->swap_cluster_max);
1303   - nr_inactive -= nr_to_scan;
1304   - nr_reclaimed += shrink_inactive_list(nr_to_scan, zone,
1305   - sc);
  1299 + nr_reclaimed += shrink_list(l, nr_to_scan,
  1300 + zone, sc, priority);
  1301 + }
1306 1302 }
1307 1303 }
1308 1304  
... ... @@ -1819,6 +1815,7 @@
1819 1815 {
1820 1816 struct zone *zone;
1821 1817 unsigned long nr_to_scan, ret = 0;
  1818 + enum lru_list l;
1822 1819  
1823 1820 for_each_zone(zone) {
1824 1821  
1825 1822  
1826 1823  
... ... @@ -1828,27 +1825,24 @@
1828 1825 if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
1829 1826 continue;
1830 1827  
1831   - /* For pass = 0 we don't shrink the active list */
1832   - if (pass > 0) {
1833   - zone->nr_scan_active +=
1834   - (zone_page_state(zone, NR_ACTIVE) >> prio) + 1;
1835   - if (zone->nr_scan_active >= nr_pages || pass > 3) {
1836   - zone->nr_scan_active = 0;
  1828 + for_each_lru(l) {
  1829 + /* For pass = 0 we don't shrink the active list */
  1830 + if (pass == 0 && l == LRU_ACTIVE)
  1831 + continue;
  1832 +
  1833 + zone->lru[l].nr_scan +=
  1834 + (zone_page_state(zone, NR_LRU_BASE + l)
  1835 + >> prio) + 1;
  1836 + if (zone->lru[l].nr_scan >= nr_pages || pass > 3) {
  1837 + zone->lru[l].nr_scan = 0;
1837 1838 nr_to_scan = min(nr_pages,
1838   - zone_page_state(zone, NR_ACTIVE));
1839   - shrink_active_list(nr_to_scan, zone, sc, prio);
  1839 + zone_page_state(zone,
  1840 + NR_LRU_BASE + l));
  1841 + ret += shrink_list(l, nr_to_scan, zone,
  1842 + sc, prio);
  1843 + if (ret >= nr_pages)
  1844 + return ret;
1840 1845 }
1841   - }
1842   -
1843   - zone->nr_scan_inactive +=
1844   - (zone_page_state(zone, NR_INACTIVE) >> prio) + 1;
1845   - if (zone->nr_scan_inactive >= nr_pages || pass > 3) {
1846   - zone->nr_scan_inactive = 0;
1847   - nr_to_scan = min(nr_pages,
1848   - zone_page_state(zone, NR_INACTIVE));
1849   - ret += shrink_inactive_list(nr_to_scan, zone, sc);
1850   - if (ret >= nr_pages)
1851   - return ret;
1852 1846 }
1853 1847 }
1854 1848  
... ... @@ -696,7 +696,8 @@
696 696 zone->pages_low,
697 697 zone->pages_high,
698 698 zone->pages_scanned,
699   - zone->nr_scan_active, zone->nr_scan_inactive,
  699 + zone->lru[LRU_ACTIVE].nr_scan,
  700 + zone->lru[LRU_INACTIVE].nr_scan,
700 701 zone->spanned_pages,
701 702 zone->present_pages);
702 703