Commit 170a5a7eb2bf10161197e5490fbc29ca4561aedb
Committed by
Linus Torvalds
1 parent
c3d5f5f0c2
Exists in
master
and in
20 other branches
mm: make __free_pages_bootmem() only available at boot time
In order to simpilify management of totalram_pages and zone->managed_pages, make __free_pages_bootmem() only available at boot time. With this change applied, __free_pages_bootmem() will only be used by bootmem.c and nobootmem.c at boot time, so mark it as __init. Other callers of __free_pages_bootmem() have been converted to use free_reserved_page(), which handles totalram_pages and zone->managed_pages in a safer way. This patch also fix a bug in free_pagetable() for x86_64, which should increase zone->managed_pages instead of zone->present_pages when freeing reserved pages. And now we have managed_pages_count_lock to protect totalram_pages and zone->managed_pages, so remove the redundant ppb_lock lock in put_page_bootmem(). This greatly simplifies the locking rules. Signed-off-by: Jiang Liu <jiang.liu@huawei.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Yinghai Lu <yinghai@kernel.org> Cc: Wen Congyang <wency@cn.fujitsu.com> Cc: Tang Chen <tangchen@cn.fujitsu.com> Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Minchan Kim <minchan@kernel.org> Cc: "Michael S. Tsirkin" <mst@redhat.com> Cc: <sworddragon2@aol.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: David Howells <dhowells@redhat.com> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Cc: Jianguo Wu <wujianguo@huawei.com> Cc: Joonsoo Kim <js1304@gmail.com> Cc: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Michel Lespinasse <walken@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Tejun Heo <tj@kernel.org> Cc: Will Deacon <will.deacon@arm.com> Cc: Russell King <rmk@arm.linux.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 3 changed files with 5 additions and 38 deletions Side-by-side Diff
arch/x86/mm/init_64.c
... | ... | @@ -712,36 +712,22 @@ |
712 | 712 | |
713 | 713 | static void __meminit free_pagetable(struct page *page, int order) |
714 | 714 | { |
715 | - struct zone *zone; | |
716 | - bool bootmem = false; | |
717 | 715 | unsigned long magic; |
718 | 716 | unsigned int nr_pages = 1 << order; |
719 | 717 | |
720 | 718 | /* bootmem page has reserved flag */ |
721 | 719 | if (PageReserved(page)) { |
722 | 720 | __ClearPageReserved(page); |
723 | - bootmem = true; | |
724 | 721 | |
725 | 722 | magic = (unsigned long)page->lru.next; |
726 | 723 | if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) { |
727 | 724 | while (nr_pages--) |
728 | 725 | put_page_bootmem(page++); |
729 | 726 | } else |
730 | - __free_pages_bootmem(page, order); | |
727 | + while (nr_pages--) | |
728 | + free_reserved_page(page++); | |
731 | 729 | } else |
732 | 730 | free_pages((unsigned long)page_address(page), order); |
733 | - | |
734 | - /* | |
735 | - * SECTION_INFO pages and MIX_SECTION_INFO pages | |
736 | - * are all allocated by bootmem. | |
737 | - */ | |
738 | - if (bootmem) { | |
739 | - zone = page_zone(page); | |
740 | - zone_span_writelock(zone); | |
741 | - zone->present_pages += nr_pages; | |
742 | - zone_span_writeunlock(zone); | |
743 | - totalram_pages += nr_pages; | |
744 | - } | |
745 | 731 | } |
746 | 732 | |
747 | 733 | static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd) |
mm/memory_hotplug.c
... | ... | @@ -101,12 +101,9 @@ |
101 | 101 | atomic_inc(&page->_count); |
102 | 102 | } |
103 | 103 | |
104 | -/* reference to __meminit __free_pages_bootmem is valid | |
105 | - * so use __ref to tell modpost not to generate a warning */ | |
106 | -void __ref put_page_bootmem(struct page *page) | |
104 | +void put_page_bootmem(struct page *page) | |
107 | 105 | { |
108 | 106 | unsigned long type; |
109 | - static DEFINE_MUTEX(ppb_lock); | |
110 | 107 | |
111 | 108 | type = (unsigned long) page->lru.next; |
112 | 109 | BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE || |
113 | 110 | |
... | ... | @@ -116,17 +113,8 @@ |
116 | 113 | ClearPagePrivate(page); |
117 | 114 | set_page_private(page, 0); |
118 | 115 | INIT_LIST_HEAD(&page->lru); |
119 | - | |
120 | - /* | |
121 | - * Please refer to comment for __free_pages_bootmem() | |
122 | - * for why we serialize here. | |
123 | - */ | |
124 | - mutex_lock(&ppb_lock); | |
125 | - __free_pages_bootmem(page, 0); | |
126 | - mutex_unlock(&ppb_lock); | |
127 | - totalram_pages++; | |
116 | + free_reserved_page(page); | |
128 | 117 | } |
129 | - | |
130 | 118 | } |
131 | 119 | |
132 | 120 | #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE |
mm/page_alloc.c
... | ... | @@ -745,14 +745,7 @@ |
745 | 745 | local_irq_restore(flags); |
746 | 746 | } |
747 | 747 | |
748 | -/* | |
749 | - * Read access to zone->managed_pages is safe because it's unsigned long, | |
750 | - * but we still need to serialize writers. Currently all callers of | |
751 | - * __free_pages_bootmem() except put_page_bootmem() should only be used | |
752 | - * at boot time. So for shorter boot time, we shift the burden to | |
753 | - * put_page_bootmem() to serialize writers. | |
754 | - */ | |
755 | -void __meminit __free_pages_bootmem(struct page *page, unsigned int order) | |
748 | +void __init __free_pages_bootmem(struct page *page, unsigned int order) | |
756 | 749 | { |
757 | 750 | unsigned int nr_pages = 1 << order; |
758 | 751 | unsigned int loop; |