Commit 4edd7ceff0662afde195da6f6c43e7cbe1ed2dc4

Authored by David Rientjes
Committed by Linus Torvalds
1 parent fe74ebb106

mm, hotplug: avoid compiling memory hotremove functions when disabled

__remove_pages() is only necessary for CONFIG_MEMORY_HOTREMOVE.  PowerPC
pseries will return -EOPNOTSUPP if unsupported.

Adding an #ifdef causes several other functions it depends on to also
become unnecessary, which saves in .text when disabled (it's disabled in
most defconfigs besides powerpc, including x86).  remove_memory_block()
becomes static since it is not referenced outside of
drivers/base/memory.c.

Build tested on x86 and powerpc with CONFIG_MEMORY_HOTREMOVE both enabled
and disabled.

Signed-off-by: David Rientjes <rientjes@google.com>
Acked-by: Toshi Kani <toshi.kani@hp.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Wen Congyang <wency@cn.fujitsu.com>
Cc: Tang Chen <tangchen@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 6 changed files with 113 additions and 90 deletions Side-by-side Diff

arch/powerpc/platforms/pseries/hotplug-memory.c
... ... @@ -72,6 +72,7 @@
72 72 return get_memblock_size();
73 73 }
74 74  
  75 +#ifdef CONFIG_MEMORY_HOTREMOVE
75 76 static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
76 77 {
77 78 unsigned long start, start_pfn;
... ... @@ -153,6 +154,17 @@
153 154 ret = pseries_remove_memblock(base, lmb_size);
154 155 return ret;
155 156 }
  157 +#else
  158 +static inline int pseries_remove_memblock(unsigned long base,
  159 + unsigned int memblock_size)
  160 +{
  161 + return -EOPNOTSUPP;
  162 +}
  163 +static inline int pseries_remove_memory(struct device_node *np)
  164 +{
  165 + return -EOPNOTSUPP;
  166 +}
  167 +#endif /* CONFIG_MEMORY_HOTREMOVE */
156 168  
157 169 static int pseries_add_memory(struct device_node *np)
158 170 {
drivers/base/memory.c
... ... @@ -93,16 +93,6 @@
93 93 return error;
94 94 }
95 95  
96   -static void
97   -unregister_memory(struct memory_block *memory)
98   -{
99   - BUG_ON(memory->dev.bus != &memory_subsys);
100   -
101   - /* drop the ref. we got in remove_memory_block() */
102   - kobject_put(&memory->dev.kobj);
103   - device_unregister(&memory->dev);
104   -}
105   -
106 96 unsigned long __weak memory_block_size_bytes(void)
107 97 {
108 98 return MIN_MEMORY_BLOCK_SIZE;
109 99  
... ... @@ -637,9 +627,29 @@
637 627 return ret;
638 628 }
639 629  
640   -int remove_memory_block(unsigned long node_id, struct mem_section *section,
641   - int phys_device)
  630 +/*
  631 + * need an interface for the VM to add new memory regions,
  632 + * but without onlining it.
  633 + */
  634 +int register_new_memory(int nid, struct mem_section *section)
642 635 {
  636 + return add_memory_section(nid, section, NULL, MEM_OFFLINE, HOTPLUG);
  637 +}
  638 +
  639 +#ifdef CONFIG_MEMORY_HOTREMOVE
  640 +static void
  641 +unregister_memory(struct memory_block *memory)
  642 +{
  643 + BUG_ON(memory->dev.bus != &memory_subsys);
  644 +
  645 + /* drop the ref. we got in remove_memory_block() */
  646 + kobject_put(&memory->dev.kobj);
  647 + device_unregister(&memory->dev);
  648 +}
  649 +
  650 +static int remove_memory_block(unsigned long node_id,
  651 + struct mem_section *section, int phys_device)
  652 +{
643 653 struct memory_block *mem;
644 654  
645 655 mutex_lock(&mem_sysfs_mutex);
... ... @@ -661,15 +671,6 @@
661 671 return 0;
662 672 }
663 673  
664   -/*
665   - * need an interface for the VM to add new memory regions,
666   - * but without onlining it.
667   - */
668   -int register_new_memory(int nid, struct mem_section *section)
669   -{
670   - return add_memory_section(nid, section, NULL, MEM_OFFLINE, HOTPLUG);
671   -}
672   -
673 674 int unregister_memory_section(struct mem_section *section)
674 675 {
675 676 if (!present_section(section))
... ... @@ -677,6 +678,7 @@
677 678  
678 679 return remove_memory_block(0, section, 0);
679 680 }
  681 +#endif /* CONFIG_MEMORY_HOTREMOVE */
680 682  
681 683 /*
682 684 * offline one memory block. If the memory block has been offlined, do nothing.
include/linux/memory.h
... ... @@ -115,9 +115,10 @@
115 115 extern int register_memory_isolate_notifier(struct notifier_block *nb);
116 116 extern void unregister_memory_isolate_notifier(struct notifier_block *nb);
117 117 extern int register_new_memory(int, struct mem_section *);
  118 +#ifdef CONFIG_MEMORY_HOTREMOVE
118 119 extern int unregister_memory_section(struct mem_section *);
  120 +#endif
119 121 extern int memory_dev_init(void);
120   -extern int remove_memory_block(unsigned long, struct mem_section *, int);
121 122 extern int memory_notify(unsigned long val, void *v);
122 123 extern int memory_isolate_notify(unsigned long val, void *v);
123 124 extern struct memory_block *find_memory_block_hinted(struct mem_section *,
include/linux/memory_hotplug.h
... ... @@ -97,12 +97,12 @@
97 97 #ifdef CONFIG_MEMORY_HOTREMOVE
98 98 extern bool is_pageblock_removable_nolock(struct page *page);
99 99 extern int arch_remove_memory(u64 start, u64 size);
  100 +extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
  101 + unsigned long nr_pages);
100 102 #endif /* CONFIG_MEMORY_HOTREMOVE */
101 103  
102 104 /* reasonably generic interface to expand the physical pages in a zone */
103 105 extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn,
104   - unsigned long nr_pages);
105   -extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
106 106 unsigned long nr_pages);
107 107  
108 108 #ifdef CONFIG_NUMA
... ... @@ -436,6 +436,40 @@
436 436 return register_new_memory(nid, __pfn_to_section(phys_start_pfn));
437 437 }
438 438  
  439 +/*
  440 + * Reasonably generic function for adding memory. It is
  441 + * expected that archs that support memory hotplug will
  442 + * call this function after deciding the zone to which to
  443 + * add the new pages.
  444 + */
  445 +int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
  446 + unsigned long nr_pages)
  447 +{
  448 + unsigned long i;
  449 + int err = 0;
  450 + int start_sec, end_sec;
  451 + /* during initialize mem_map, align hot-added range to section */
  452 + start_sec = pfn_to_section_nr(phys_start_pfn);
  453 + end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
  454 +
  455 + for (i = start_sec; i <= end_sec; i++) {
  456 + err = __add_section(nid, zone, i << PFN_SECTION_SHIFT);
  457 +
  458 + /*
  459 + * EEXIST is finally dealt with by ioresource collision
  460 + * check. see add_memory() => register_memory_resource()
  461 + * Warning will be printed if there is collision.
  462 + */
  463 + if (err && (err != -EEXIST))
  464 + break;
  465 + err = 0;
  466 + }
  467 +
  468 + return err;
  469 +}
  470 +EXPORT_SYMBOL_GPL(__add_pages);
  471 +
  472 +#ifdef CONFIG_MEMORY_HOTREMOVE
439 473 /* find the smallest valid pfn in the range [start_pfn, end_pfn) */
440 474 static int find_smallest_section_pfn(int nid, struct zone *zone,
441 475 unsigned long start_pfn,
... ... @@ -658,39 +692,6 @@
658 692 return 0;
659 693 }
660 694  
661   -/*
662   - * Reasonably generic function for adding memory. It is
663   - * expected that archs that support memory hotplug will
664   - * call this function after deciding the zone to which to
665   - * add the new pages.
666   - */
667   -int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
668   - unsigned long nr_pages)
669   -{
670   - unsigned long i;
671   - int err = 0;
672   - int start_sec, end_sec;
673   - /* during initialize mem_map, align hot-added range to section */
674   - start_sec = pfn_to_section_nr(phys_start_pfn);
675   - end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
676   -
677   - for (i = start_sec; i <= end_sec; i++) {
678   - err = __add_section(nid, zone, i << PFN_SECTION_SHIFT);
679   -
680   - /*
681   - * EEXIST is finally dealt with by ioresource collision
682   - * check. see add_memory() => register_memory_resource()
683   - * Warning will be printed if there is collision.
684   - */
685   - if (err && (err != -EEXIST))
686   - break;
687   - err = 0;
688   - }
689   -
690   - return err;
691   -}
692   -EXPORT_SYMBOL_GPL(__add_pages);
693   -
694 695 /**
695 696 * __remove_pages() - remove sections of pages from a zone
696 697 * @zone: zone from which pages need to be removed
... ... @@ -733,6 +734,7 @@
733 734 return ret;
734 735 }
735 736 EXPORT_SYMBOL_GPL(__remove_pages);
  737 +#endif /* CONFIG_MEMORY_HOTREMOVE */
736 738  
737 739 int set_online_page_callback(online_page_callback_t callback)
738 740 {
... ... @@ -620,6 +620,7 @@
620 620  
621 621 vmemmap_free(start, end);
622 622 }
  623 +#ifdef CONFIG_MEMORY_HOTREMOVE
623 624 static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
624 625 {
625 626 unsigned long start = (unsigned long)memmap;
... ... @@ -627,6 +628,7 @@
627 628  
628 629 vmemmap_free(start, end);
629 630 }
  631 +#endif /* CONFIG_MEMORY_HOTREMOVE */
630 632 #else
631 633 static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
632 634 {
... ... @@ -664,6 +666,7 @@
664 666 get_order(sizeof(struct page) * nr_pages));
665 667 }
666 668  
  669 +#ifdef CONFIG_MEMORY_HOTREMOVE
667 670 static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
668 671 {
669 672 unsigned long maps_section_nr, removing_section_nr, i;
670 673  
... ... @@ -690,40 +693,9 @@
690 693 put_page_bootmem(page);
691 694 }
692 695 }
  696 +#endif /* CONFIG_MEMORY_HOTREMOVE */
693 697 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
694 698  
695   -static void free_section_usemap(struct page *memmap, unsigned long *usemap)
696   -{
697   - struct page *usemap_page;
698   - unsigned long nr_pages;
699   -
700   - if (!usemap)
701   - return;
702   -
703   - usemap_page = virt_to_page(usemap);
704   - /*
705   - * Check to see if allocation came from hot-plug-add
706   - */
707   - if (PageSlab(usemap_page) || PageCompound(usemap_page)) {
708   - kfree(usemap);
709   - if (memmap)
710   - __kfree_section_memmap(memmap, PAGES_PER_SECTION);
711   - return;
712   - }
713   -
714   - /*
715   - * The usemap came from bootmem. This is packed with other usemaps
716   - * on the section which has pgdat at boot time. Just keep it as is now.
717   - */
718   -
719   - if (memmap) {
720   - nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
721   - >> PAGE_SHIFT;
722   -
723   - free_map_bootmem(memmap, nr_pages);
724   - }
725   -}
726   -
727 699 /*
728 700 * returns the number of sections whose mem_maps were properly
729 701 * set. If this is <=0, then that means that the passed-in
... ... @@ -800,6 +772,39 @@
800 772 }
801 773 #endif
802 774  
  775 +#ifdef CONFIG_MEMORY_HOTREMOVE
  776 +static void free_section_usemap(struct page *memmap, unsigned long *usemap)
  777 +{
  778 + struct page *usemap_page;
  779 + unsigned long nr_pages;
  780 +
  781 + if (!usemap)
  782 + return;
  783 +
  784 + usemap_page = virt_to_page(usemap);
  785 + /*
  786 + * Check to see if allocation came from hot-plug-add
  787 + */
  788 + if (PageSlab(usemap_page) || PageCompound(usemap_page)) {
  789 + kfree(usemap);
  790 + if (memmap)
  791 + __kfree_section_memmap(memmap, PAGES_PER_SECTION);
  792 + return;
  793 + }
  794 +
  795 + /*
  796 + * The usemap came from bootmem. This is packed with other usemaps
  797 + * on the section which has pgdat at boot time. Just keep it as is now.
  798 + */
  799 +
  800 + if (memmap) {
  801 + nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
  802 + >> PAGE_SHIFT;
  803 +
  804 + free_map_bootmem(memmap, nr_pages);
  805 + }
  806 +}
  807 +
803 808 void sparse_remove_one_section(struct zone *zone, struct mem_section *ms)
804 809 {
805 810 struct page *memmap = NULL;
... ... @@ -819,5 +824,6 @@
819 824 clear_hwpoisoned_pages(memmap, PAGES_PER_SECTION);
820 825 free_section_usemap(memmap, usemap);
821 826 }
822   -#endif
  827 +#endif /* CONFIG_MEMORY_HOTREMOVE */
  828 +#endif /* CONFIG_MEMORY_HOTPLUG */