Commit 3dcc0571cd64816309765b7c7e4691a4cadf2ee7

Authored by Jiang Liu
Committed by Linus Torvalds
1 parent 170a5a7eb2

mm: correctly update zone->managed_pages

Enhance adjust_managed_page_count() to adjust totalhigh_pages for
highmem pages.  And change code which directly adjusts totalram_pages to
use adjust_managed_page_count() because it adjusts totalram_pages,
totalhigh_pages and zone->managed_pages altogether in a safe way.

Remove inc_totalhigh_pages() and dec_totalhigh_pages() from xen/balloon
driver bacause adjust_managed_page_count() has already adjusted
totalhigh_pages.

This patch also fixes two bugs:

1) enhances virtio_balloon driver to adjust totalhigh_pages when
   reserve/unreserve pages.
2) enhance memory_hotplug.c to adjust totalhigh_pages when hot-removing
   memory.

We still need to deal with modifications of totalram_pages in file
arch/powerpc/platforms/pseries/cmm.c, but need help from PPC experts.

[akpm@linux-foundation.org: remove ifdef, per Wanpeng Li, virtio_balloon.c cleanup, per Sergei]
[akpm@linux-foundation.org: export adjust_managed_page_count() to modules, for drivers/virtio/virtio_balloon.c]
Signed-off-by: Jiang Liu <jiang.liu@huawei.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Wen Congyang <wency@cn.fujitsu.com>
Cc: Tang Chen <tangchen@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Minchan Kim <minchan@kernel.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: <sworddragon2@aol.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jianguo Wu <wujianguo@huawei.com>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 5 changed files with 19 additions and 40 deletions Side-by-side Diff

drivers/virtio/virtio_balloon.c
... ... @@ -148,7 +148,7 @@
148 148 }
149 149 set_page_pfns(vb->pfns + vb->num_pfns, page);
150 150 vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
151   - totalram_pages--;
  151 + adjust_managed_page_count(page, -1);
152 152 }
153 153  
154 154 /* Did we get any? */
... ... @@ -163,8 +163,9 @@
163 163  
164 164 /* Find pfns pointing at start of each page, get pages and free them. */
165 165 for (i = 0; i < num; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
166   - balloon_page_free(balloon_pfn_to_page(pfns[i]));
167   - totalram_pages++;
  166 + struct page *page = balloon_pfn_to_page(pfns[i]);
  167 + balloon_page_free(page);
  168 + adjust_managed_page_count(page, 1);
168 169 }
169 170 }
170 171  
drivers/xen/balloon.c
... ... @@ -89,14 +89,6 @@
89 89 /* We increase/decrease in batches which fit in a page */
90 90 static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)];
91 91  
92   -#ifdef CONFIG_HIGHMEM
93   -#define inc_totalhigh_pages() (totalhigh_pages++)
94   -#define dec_totalhigh_pages() (totalhigh_pages--)
95   -#else
96   -#define inc_totalhigh_pages() do {} while (0)
97   -#define dec_totalhigh_pages() do {} while (0)
98   -#endif
99   -
100 92 /* List of ballooned pages, threaded through the mem_map array. */
101 93 static LIST_HEAD(ballooned_pages);
102 94  
... ... @@ -132,9 +124,7 @@
132 124 static void balloon_append(struct page *page)
133 125 {
134 126 __balloon_append(page);
135   - if (PageHighMem(page))
136   - dec_totalhigh_pages();
137   - totalram_pages--;
  127 + adjust_managed_page_count(page, -1);
138 128 }
139 129  
140 130 /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
141 131  
142 132  
... ... @@ -151,13 +141,12 @@
151 141 page = list_entry(ballooned_pages.next, struct page, lru);
152 142 list_del(&page->lru);
153 143  
154   - if (PageHighMem(page)) {
  144 + if (PageHighMem(page))
155 145 balloon_stats.balloon_high--;
156   - inc_totalhigh_pages();
157   - } else
  146 + else
158 147 balloon_stats.balloon_low--;
159 148  
160   - totalram_pages++;
  149 + adjust_managed_page_count(page, 1);
161 150  
162 151 return page;
163 152 }
... ... @@ -372,9 +361,7 @@
372 361 #endif
373 362  
374 363 /* Relinquish the page back to the allocator. */
375   - ClearPageReserved(page);
376   - init_page_count(page);
377   - __free_page(page);
  364 + __free_reserved_page(page);
378 365 }
379 366  
380 367 balloon_stats.current_pages += rc;
... ... @@ -1263,7 +1263,7 @@
1263 1263 * side-effects, like CommitLimit going negative.
1264 1264 */
1265 1265 if (h->order > (MAX_ORDER - 1))
1266   - totalram_pages += 1 << h->order;
  1266 + adjust_managed_page_count(page, 1 << h->order);
1267 1267 }
1268 1268 }
1269 1269  
... ... @@ -772,20 +772,13 @@
772 772  
773 773 void __online_page_increment_counters(struct page *page)
774 774 {
775   - totalram_pages++;
776   -
777   -#ifdef CONFIG_HIGHMEM
778   - if (PageHighMem(page))
779   - totalhigh_pages++;
780   -#endif
  775 + adjust_managed_page_count(page, 1);
781 776 }
782 777 EXPORT_SYMBOL_GPL(__online_page_increment_counters);
783 778  
784 779 void __online_page_free(struct page *page)
785 780 {
786   - ClearPageReserved(page);
787   - init_page_count(page);
788   - __free_page(page);
  781 + __free_reserved_page(page);
789 782 }
790 783 EXPORT_SYMBOL_GPL(__online_page_free);
791 784  
... ... @@ -983,7 +976,6 @@
983 976 return ret;
984 977 }
985 978  
986   - zone->managed_pages += onlined_pages;
987 979 zone->present_pages += onlined_pages;
988 980  
989 981 pgdat_resize_lock(zone->zone_pgdat, &flags);
990 982  
... ... @@ -1572,14 +1564,12 @@
1572 1564 /* reset pagetype flags and makes migrate type to be MOVABLE */
1573 1565 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
1574 1566 /* removal success */
1575   - zone->managed_pages -= offlined_pages;
  1567 + adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages);
1576 1568 zone->present_pages -= offlined_pages;
1577 1569  
1578 1570 pgdat_resize_lock(zone->zone_pgdat, &flags);
1579 1571 zone->zone_pgdat->node_present_pages -= offlined_pages;
1580 1572 pgdat_resize_unlock(zone->zone_pgdat, &flags);
1581   -
1582   - totalram_pages -= offlined_pages;
1583 1573  
1584 1574 init_per_zone_wmark_min();
1585 1575  
... ... @@ -780,11 +780,7 @@
780 780 set_page_refcounted(page);
781 781 set_pageblock_migratetype(page, MIGRATE_CMA);
782 782 __free_pages(page, pageblock_order);
783   - totalram_pages += pageblock_nr_pages;
784   -#ifdef CONFIG_HIGHMEM
785   - if (PageHighMem(page))
786   - totalhigh_pages += pageblock_nr_pages;
787   -#endif
  783 + adjust_managed_page_count(page, pageblock_nr_pages);
788 784 }
789 785 #endif
790 786  
791 787  
... ... @@ -5207,8 +5203,13 @@
5207 5203 spin_lock(&managed_page_count_lock);
5208 5204 page_zone(page)->managed_pages += count;
5209 5205 totalram_pages += count;
  5206 +#ifdef CONFIG_HIGHMEM
  5207 + if (PageHighMem(page))
  5208 + totalhigh_pages += count;
  5209 +#endif
5210 5210 spin_unlock(&managed_page_count_lock);
5211 5211 }
  5212 +EXPORT_SYMBOL(adjust_managed_page_count);
5212 5213  
5213 5214 unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
5214 5215 {