Commit e815af95f94914993bbad279c71cf5fef9f4eaac
Committed by
Linus Torvalds
1 parent
70e24bdf6d
Exists in
master
and in
7 other branches
oom: change all_unreclaimable zone member to flags
Convert the int all_unreclaimable member of struct zone to unsigned long flags. This can now be used to specify several different zone flags such as all_unreclaimable and reclaim_in_progress, which can now be removed and converted to a per-zone flag. Flags are set and cleared as follows: zone_set_flag(struct zone *zone, zone_flags_t flag) zone_clear_flag(struct zone *zone, zone_flags_t flag) Defines the first zone flags, ZONE_ALL_UNRECLAIMABLE and ZONE_RECLAIM_LOCKED, which have the same semantics as the old zone->all_unreclaimable and zone->reclaim_in_progress, respectively. Also converts all current users that set or clear either flag to use the new interface. Helper functions are defined to test the flags: int zone_is_all_unreclaimable(const struct zone *zone) int zone_is_reclaim_locked(const struct zone *zone) All flag operators are of the atomic variety because there are currently readers that are implemented that do not take zone->lock. [akpm@linux-foundation.org: add needed include] Cc: Andrea Arcangeli <andrea@suse.de> Acked-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 4 changed files with 43 additions and 21 deletions Side-by-side Diff
include/linux/mmzone.h
... | ... | @@ -7,6 +7,7 @@ |
7 | 7 | #include <linux/spinlock.h> |
8 | 8 | #include <linux/list.h> |
9 | 9 | #include <linux/wait.h> |
10 | +#include <linux/bitops.h> | |
10 | 11 | #include <linux/cache.h> |
11 | 12 | #include <linux/threads.h> |
12 | 13 | #include <linux/numa.h> |
13 | 14 | |
... | ... | @@ -262,11 +263,8 @@ |
262 | 263 | unsigned long nr_scan_active; |
263 | 264 | unsigned long nr_scan_inactive; |
264 | 265 | unsigned long pages_scanned; /* since last reclaim */ |
265 | - int all_unreclaimable; /* All pages pinned */ | |
266 | + unsigned long flags; /* zone flags, see below */ | |
266 | 267 | |
267 | - /* A count of how many reclaimers are scanning this zone */ | |
268 | - atomic_t reclaim_in_progress; | |
269 | - | |
270 | 268 | /* Zone statistics */ |
271 | 269 | atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; |
272 | 270 | |
... | ... | @@ -342,6 +340,29 @@ |
342 | 340 | */ |
343 | 341 | const char *name; |
344 | 342 | } ____cacheline_internodealigned_in_smp; |
343 | + | |
344 | +typedef enum { | |
345 | + ZONE_ALL_UNRECLAIMABLE, /* all pages pinned */ | |
346 | + ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ | |
347 | +} zone_flags_t; | |
348 | + | |
349 | +static inline void zone_set_flag(struct zone *zone, zone_flags_t flag) | |
350 | +{ | |
351 | + set_bit(flag, &zone->flags); | |
352 | +} | |
353 | +static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag) | |
354 | +{ | |
355 | + clear_bit(flag, &zone->flags); | |
356 | +} | |
357 | + | |
358 | +static inline int zone_is_all_unreclaimable(const struct zone *zone) | |
359 | +{ | |
360 | + return test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->flags); | |
361 | +} | |
362 | +static inline int zone_is_reclaim_locked(const struct zone *zone) | |
363 | +{ | |
364 | + return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); | |
365 | +} | |
345 | 366 | |
346 | 367 | /* |
347 | 368 | * The "priority" of VM scanning is how much of the queues we will scan in one |
mm/page_alloc.c
... | ... | @@ -490,7 +490,7 @@ |
490 | 490 | struct list_head *list, int order) |
491 | 491 | { |
492 | 492 | spin_lock(&zone->lock); |
493 | - zone->all_unreclaimable = 0; | |
493 | + zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); | |
494 | 494 | zone->pages_scanned = 0; |
495 | 495 | while (count--) { |
496 | 496 | struct page *page; |
... | ... | @@ -507,7 +507,7 @@ |
507 | 507 | static void free_one_page(struct zone *zone, struct page *page, int order) |
508 | 508 | { |
509 | 509 | spin_lock(&zone->lock); |
510 | - zone->all_unreclaimable = 0; | |
510 | + zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); | |
511 | 511 | zone->pages_scanned = 0; |
512 | 512 | __free_one_page(page, zone, order); |
513 | 513 | spin_unlock(&zone->lock); |
... | ... | @@ -1851,7 +1851,7 @@ |
1851 | 1851 | K(zone_page_state(zone, NR_INACTIVE)), |
1852 | 1852 | K(zone->present_pages), |
1853 | 1853 | zone->pages_scanned, |
1854 | - (zone->all_unreclaimable ? "yes" : "no") | |
1854 | + (zone_is_all_unreclaimable(zone) ? "yes" : "no") | |
1855 | 1855 | ); |
1856 | 1856 | printk("lowmem_reserve[]:"); |
1857 | 1857 | for (i = 0; i < MAX_NR_ZONES; i++) |
... | ... | @@ -3372,7 +3372,7 @@ |
3372 | 3372 | zone->nr_scan_active = 0; |
3373 | 3373 | zone->nr_scan_inactive = 0; |
3374 | 3374 | zap_zone_vm_stats(zone); |
3375 | - atomic_set(&zone->reclaim_in_progress, 0); | |
3375 | + zone->flags = 0; | |
3376 | 3376 | if (!size) |
3377 | 3377 | continue; |
3378 | 3378 |
mm/vmscan.c
... | ... | @@ -1108,7 +1108,7 @@ |
1108 | 1108 | unsigned long nr_to_scan; |
1109 | 1109 | unsigned long nr_reclaimed = 0; |
1110 | 1110 | |
1111 | - atomic_inc(&zone->reclaim_in_progress); | |
1111 | + zone_set_flag(zone, ZONE_RECLAIM_LOCKED); | |
1112 | 1112 | |
1113 | 1113 | /* |
1114 | 1114 | * Add one to `nr_to_scan' just to make sure that the kernel will |
... | ... | @@ -1149,7 +1149,7 @@ |
1149 | 1149 | |
1150 | 1150 | throttle_vm_writeout(sc->gfp_mask); |
1151 | 1151 | |
1152 | - atomic_dec(&zone->reclaim_in_progress); | |
1152 | + zone_clear_flag(zone, ZONE_RECLAIM_LOCKED); | |
1153 | 1153 | return nr_reclaimed; |
1154 | 1154 | } |
1155 | 1155 | |
... | ... | @@ -1187,7 +1187,7 @@ |
1187 | 1187 | |
1188 | 1188 | note_zone_scanning_priority(zone, priority); |
1189 | 1189 | |
1190 | - if (zone->all_unreclaimable && priority != DEF_PRIORITY) | |
1190 | + if (zone_is_all_unreclaimable(zone) && priority != DEF_PRIORITY) | |
1191 | 1191 | continue; /* Let kswapd poll it */ |
1192 | 1192 | |
1193 | 1193 | sc->all_unreclaimable = 0; |
... | ... | @@ -1368,7 +1368,8 @@ |
1368 | 1368 | if (!populated_zone(zone)) |
1369 | 1369 | continue; |
1370 | 1370 | |
1371 | - if (zone->all_unreclaimable && priority != DEF_PRIORITY) | |
1371 | + if (zone_is_all_unreclaimable(zone) && | |
1372 | + priority != DEF_PRIORITY) | |
1372 | 1373 | continue; |
1373 | 1374 | |
1374 | 1375 | if (!zone_watermark_ok(zone, order, zone->pages_high, |
... | ... | @@ -1403,7 +1404,8 @@ |
1403 | 1404 | if (!populated_zone(zone)) |
1404 | 1405 | continue; |
1405 | 1406 | |
1406 | - if (zone->all_unreclaimable && priority != DEF_PRIORITY) | |
1407 | + if (zone_is_all_unreclaimable(zone) && | |
1408 | + priority != DEF_PRIORITY) | |
1407 | 1409 | continue; |
1408 | 1410 | |
1409 | 1411 | if (!zone_watermark_ok(zone, order, zone->pages_high, |
1410 | 1412 | |
... | ... | @@ -1424,12 +1426,13 @@ |
1424 | 1426 | lru_pages); |
1425 | 1427 | nr_reclaimed += reclaim_state->reclaimed_slab; |
1426 | 1428 | total_scanned += sc.nr_scanned; |
1427 | - if (zone->all_unreclaimable) | |
1429 | + if (zone_is_all_unreclaimable(zone)) | |
1428 | 1430 | continue; |
1429 | 1431 | if (nr_slab == 0 && zone->pages_scanned >= |
1430 | 1432 | (zone_page_state(zone, NR_ACTIVE) |
1431 | 1433 | + zone_page_state(zone, NR_INACTIVE)) * 6) |
1432 | - zone->all_unreclaimable = 1; | |
1434 | + zone_set_flag(zone, | |
1435 | + ZONE_ALL_UNRECLAIMABLE); | |
1433 | 1436 | /* |
1434 | 1437 | * If we've done a decent amount of scanning and |
1435 | 1438 | * the reclaim ratio is low, start doing writepage |
... | ... | @@ -1595,7 +1598,7 @@ |
1595 | 1598 | if (!populated_zone(zone)) |
1596 | 1599 | continue; |
1597 | 1600 | |
1598 | - if (zone->all_unreclaimable && prio != DEF_PRIORITY) | |
1601 | + if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY) | |
1599 | 1602 | continue; |
1600 | 1603 | |
1601 | 1604 | /* For pass = 0 we don't shrink the active list */ |
... | ... | @@ -1919,10 +1922,8 @@ |
1919 | 1922 | * not have reclaimable pages and if we should not delay the allocation |
1920 | 1923 | * then do not scan. |
1921 | 1924 | */ |
1922 | - if (!(gfp_mask & __GFP_WAIT) || | |
1923 | - zone->all_unreclaimable || | |
1924 | - atomic_read(&zone->reclaim_in_progress) > 0 || | |
1925 | - (current->flags & PF_MEMALLOC)) | |
1925 | + if (!(gfp_mask & __GFP_WAIT) || zone_is_all_unreclaimable(zone) || | |
1926 | + zone_is_reclaim_locked(zone) || (current->flags & PF_MEMALLOC)) | |
1926 | 1927 | return 0; |
1927 | 1928 | |
1928 | 1929 | /* |
mm/vmstat.c