Commit 1b79acc91115ba47e744b70bb166b77bd94f5855
Committed by
Linus Torvalds
1 parent
839a4fcc8a
Exists in
master
and in
20 other branches
mm, mem-hotplug: recalculate lowmem_reserve when memory hotplug occurs
Currently, memory hotplug calls setup_per_zone_wmarks() and calculate_zone_inactive_ratio(), but doesn't call setup_per_zone_lowmem_reserve(). It means the number of reserved pages aren't updated even if memory hot plug occur. This patch fixes it. Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 3 changed files with 8 additions and 7 deletions Side-by-side Diff
include/linux/mm.h
... | ... | @@ -1381,7 +1381,7 @@ |
1381 | 1381 | extern void memmap_init_zone(unsigned long, int, unsigned long, |
1382 | 1382 | unsigned long, enum memmap_context); |
1383 | 1383 | extern void setup_per_zone_wmarks(void); |
1384 | -extern void calculate_zone_inactive_ratio(struct zone *zone); | |
1384 | +extern int __meminit init_per_zone_wmark_min(void); | |
1385 | 1385 | extern void mem_init(void); |
1386 | 1386 | extern void __init mmap_init(void); |
1387 | 1387 | extern void show_mem(unsigned int flags); |
mm/memory_hotplug.c
... | ... | @@ -459,8 +459,9 @@ |
459 | 459 | zone_pcp_update(zone); |
460 | 460 | |
461 | 461 | mutex_unlock(&zonelists_mutex); |
462 | - setup_per_zone_wmarks(); | |
463 | - calculate_zone_inactive_ratio(zone); | |
462 | + | |
463 | + init_per_zone_wmark_min(); | |
464 | + | |
464 | 465 | if (onlined_pages) { |
465 | 466 | kswapd_run(zone_to_nid(zone)); |
466 | 467 | node_set_state(zone_to_nid(zone), N_HIGH_MEMORY); |
... | ... | @@ -893,8 +894,8 @@ |
893 | 894 | zone->zone_pgdat->node_present_pages -= offlined_pages; |
894 | 895 | totalram_pages -= offlined_pages; |
895 | 896 | |
896 | - setup_per_zone_wmarks(); | |
897 | - calculate_zone_inactive_ratio(zone); | |
897 | + init_per_zone_wmark_min(); | |
898 | + | |
898 | 899 | if (!node_present_pages(node)) { |
899 | 900 | node_clear_state(node, N_HIGH_MEMORY); |
900 | 901 | kswapd_stop(node); |
mm/page_alloc.c
... | ... | @@ -5094,7 +5094,7 @@ |
5094 | 5094 | * 1TB 101 10GB |
5095 | 5095 | * 10TB 320 32GB |
5096 | 5096 | */ |
5097 | -void __meminit calculate_zone_inactive_ratio(struct zone *zone) | |
5097 | +static void __meminit calculate_zone_inactive_ratio(struct zone *zone) | |
5098 | 5098 | { |
5099 | 5099 | unsigned int gb, ratio; |
5100 | 5100 | |
... | ... | @@ -5140,7 +5140,7 @@ |
5140 | 5140 | * 8192MB: 11584k |
5141 | 5141 | * 16384MB: 16384k |
5142 | 5142 | */ |
5143 | -static int __init init_per_zone_wmark_min(void) | |
5143 | +int __meminit init_per_zone_wmark_min(void) | |
5144 | 5144 | { |
5145 | 5145 | unsigned long lowmem_kbytes; |
5146 | 5146 |