Commit 0c0a4a517a31e05efb38304668198a873bfec6ca

Authored by Yasunori Goto
Committed by Linus Torvalds
1 parent 86f6dae137

memory hotplug: free memmaps allocated by bootmem

This patch is to free memmaps which is allocated by bootmem.

Freeing usemap is not necessary.  The pages of usemap may be necessary for
other sections.

If removing section is last section on the node, its section is the final user
of usemap page.  (usemaps are allocated on its section by previous patch.) But
it shouldn't be freed too, because the section must be logical offline state
which all pages are isolated against page allocater.  If it is freed, page
alloctor may use it which will be removed physically soon.  It will be
disaster.  So, this patch keeps it as it is.

Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Cc: Badari Pulavarty <pbadari@us.ibm.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Cc: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 4 changed files with 60 additions and 7 deletions Side-by-side Diff

... ... @@ -34,8 +34,7 @@
34 34 atomic_dec(&page->_count);
35 35 }
36 36  
37   -extern void __init __free_pages_bootmem(struct page *page,
38   - unsigned int order);
  37 +extern void __free_pages_bootmem(struct page *page, unsigned int order);
39 38  
40 39 /*
41 40 * function for dealing with page's order in buddy system.
... ... @@ -198,8 +198,18 @@
198 198 return register_new_memory(__pfn_to_section(phys_start_pfn));
199 199 }
200 200  
  201 +#ifdef CONFIG_SPARSEMEM_VMEMMAP
201 202 static int __remove_section(struct zone *zone, struct mem_section *ms)
202 203 {
  204 + /*
  205 + * XXX: Freeing memmap with vmemmap is not implement yet.
  206 + * This should be removed later.
  207 + */
  208 + return -EBUSY;
  209 +}
  210 +#else
  211 +static int __remove_section(struct zone *zone, struct mem_section *ms)
  212 +{
203 213 unsigned long flags;
204 214 struct pglist_data *pgdat = zone->zone_pgdat;
205 215 int ret = -EINVAL;
... ... @@ -216,6 +226,7 @@
216 226 pgdat_resize_unlock(pgdat, &flags);
217 227 return 0;
218 228 }
  229 +#endif
219 230  
220 231 /*
221 232 * Reasonably generic function for adding memory. It is
... ... @@ -546,7 +546,7 @@
546 546 /*
547 547 * permit the bootmem allocator to evade page validation on high-order frees
548 548 */
549   -void __init __free_pages_bootmem(struct page *page, unsigned int order)
  549 +void __free_pages_bootmem(struct page *page, unsigned int order)
550 550 {
551 551 if (order == 0) {
552 552 __ClearPageReserved(page);
... ... @@ -8,6 +8,7 @@
8 8 #include <linux/module.h>
9 9 #include <linux/spinlock.h>
10 10 #include <linux/vmalloc.h>
  11 +#include "internal.h"
11 12 #include <asm/dma.h>
12 13 #include <asm/pgalloc.h>
13 14 #include <asm/pgtable.h>
... ... @@ -376,6 +377,9 @@
376 377 {
377 378 return; /* XXX: Not implemented yet */
378 379 }
  380 +static void free_map_bootmem(struct page *page, unsigned long nr_pages)
  381 +{
  382 +}
379 383 #else
380 384 static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
381 385 {
382 386  
383 387  
384 388  
... ... @@ -413,17 +417,47 @@
413 417 free_pages((unsigned long)memmap,
414 418 get_order(sizeof(struct page) * nr_pages));
415 419 }
  420 +
  421 +static void free_map_bootmem(struct page *page, unsigned long nr_pages)
  422 +{
  423 + unsigned long maps_section_nr, removing_section_nr, i;
  424 + int magic;
  425 +
  426 + for (i = 0; i < nr_pages; i++, page++) {
  427 + magic = atomic_read(&page->_mapcount);
  428 +
  429 + BUG_ON(magic == NODE_INFO);
  430 +
  431 + maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
  432 + removing_section_nr = page->private;
  433 +
  434 + /*
  435 + * When this function is called, the removing section is
  436 + * logical offlined state. This means all pages are isolated
  437 + * from page allocator. If removing section's memmap is placed
  438 + * on the same section, it must not be freed.
  439 + * If it is freed, page allocator may allocate it which will
  440 + * be removed physically soon.
  441 + */
  442 + if (maps_section_nr != removing_section_nr)
  443 + put_page_bootmem(page);
  444 + }
  445 +}
416 446 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
417 447  
418 448 static void free_section_usemap(struct page *memmap, unsigned long *usemap)
419 449 {
  450 + struct page *usemap_page;
  451 + unsigned long nr_pages;
  452 +
420 453 if (!usemap)
421 454 return;
422 455  
  456 + usemap_page = virt_to_page(usemap);
423 457 /*
424 458 * Check to see if allocation came from hot-plug-add
425 459 */
426   - if (PageSlab(virt_to_page(usemap))) {
  460 + if (PageSlab(usemap_page)) {
427 461 kfree(usemap);
428 462 if (memmap)
429 463 __kfree_section_memmap(memmap, PAGES_PER_SECTION);
430 464  
... ... @@ -431,10 +465,19 @@
431 465 }
432 466  
433 467 /*
434   - * TODO: Allocations came from bootmem - how do I free up ?
  468 + * The usemap came from bootmem. This is packed with other usemaps
  469 + * on the section which has pgdat at boot time. Just keep it as is now.
435 470 */
436   - printk(KERN_WARNING "Not freeing up allocations from bootmem "
437   - "- leaking memory\n");
  471 +
  472 + if (memmap) {
  473 + struct page *memmap_page;
  474 + memmap_page = virt_to_page(memmap);
  475 +
  476 + nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
  477 + >> PAGE_SHIFT;
  478 +
  479 + free_map_bootmem(memmap_page, nr_pages);
  480 + }
438 481 }
439 482  
440 483 /*