Commit a3142c8e1dd57ff48040bdb3478cff9312543dc3
Committed by
Linus Torvalds
1 parent
0ceb331433
Exists in
master
and in
7 other branches
Fix section mismatch of memory hotplug related code.
This is to fix many section mismatches of code related to memory hotplug. I checked compile with memory hotplug on/off on ia64 and x86-64 box. Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 5 changed files with 26 additions and 22 deletions Side-by-side Diff
arch/ia64/mm/discontig.c
... | ... | @@ -693,6 +693,7 @@ |
693 | 693 | zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); |
694 | 694 | } |
695 | 695 | |
696 | +#ifdef CONFIG_MEMORY_HOTPLUG | |
696 | 697 | pg_data_t *arch_alloc_nodedata(int nid) |
697 | 698 | { |
698 | 699 | unsigned long size = compute_pernodesize(nid); |
... | ... | @@ -710,4 +711,5 @@ |
710 | 711 | pgdat_list[update_node] = update_pgdat; |
711 | 712 | scatter_node_data(); |
712 | 713 | } |
714 | +#endif |
arch/x86_64/mm/init.c
... | ... | @@ -172,7 +172,7 @@ |
172 | 172 | set_pte_phys(address, phys, prot); |
173 | 173 | } |
174 | 174 | |
175 | -unsigned long __initdata table_start, table_end; | |
175 | +unsigned long __meminitdata table_start, table_end; | |
176 | 176 | |
177 | 177 | static __meminit void *alloc_low_page(unsigned long *phys) |
178 | 178 | { |
... | ... | @@ -204,7 +204,7 @@ |
204 | 204 | } |
205 | 205 | |
206 | 206 | /* Must run before zap_low_mappings */ |
207 | -__init void *early_ioremap(unsigned long addr, unsigned long size) | |
207 | +__meminit void *early_ioremap(unsigned long addr, unsigned long size) | |
208 | 208 | { |
209 | 209 | unsigned long vaddr; |
210 | 210 | pmd_t *pmd, *last_pmd; |
... | ... | @@ -233,7 +233,7 @@ |
233 | 233 | } |
234 | 234 | |
235 | 235 | /* To avoid virtual aliases later */ |
236 | -__init void early_iounmap(void *addr, unsigned long size) | |
236 | +__meminit void early_iounmap(void *addr, unsigned long size) | |
237 | 237 | { |
238 | 238 | unsigned long vaddr; |
239 | 239 | pmd_t *pmd; |
drivers/acpi/numa.c
... | ... | @@ -228,7 +228,7 @@ |
228 | 228 | return 0; |
229 | 229 | } |
230 | 230 | |
231 | -int acpi_get_pxm(acpi_handle h) | |
231 | +int __meminit acpi_get_pxm(acpi_handle h) | |
232 | 232 | { |
233 | 233 | unsigned long pxm; |
234 | 234 | acpi_status status; |
... | ... | @@ -246,7 +246,7 @@ |
246 | 246 | } |
247 | 247 | EXPORT_SYMBOL(acpi_get_pxm); |
248 | 248 | |
249 | -int acpi_get_node(acpi_handle *handle) | |
249 | +int __meminit acpi_get_node(acpi_handle *handle) | |
250 | 250 | { |
251 | 251 | int pxm, node = -1; |
252 | 252 |
mm/page_alloc.c
... | ... | @@ -103,7 +103,7 @@ |
103 | 103 | |
104 | 104 | unsigned long __meminitdata nr_kernel_pages; |
105 | 105 | unsigned long __meminitdata nr_all_pages; |
106 | -static unsigned long __initdata dma_reserve; | |
106 | +static unsigned long __meminitdata dma_reserve; | |
107 | 107 | |
108 | 108 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP |
109 | 109 | /* |
... | ... | @@ -126,10 +126,10 @@ |
126 | 126 | #endif |
127 | 127 | #endif |
128 | 128 | |
129 | - struct node_active_region __initdata early_node_map[MAX_ACTIVE_REGIONS]; | |
130 | - int __initdata nr_nodemap_entries; | |
131 | - unsigned long __initdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; | |
132 | - unsigned long __initdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; | |
129 | + struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS]; | |
130 | + int __meminitdata nr_nodemap_entries; | |
131 | + unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; | |
132 | + unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; | |
133 | 133 | #ifdef CONFIG_MEMORY_HOTPLUG_RESERVE |
134 | 134 | unsigned long __initdata node_boundary_start_pfn[MAX_NUMNODES]; |
135 | 135 | unsigned long __initdata node_boundary_end_pfn[MAX_NUMNODES]; |
... | ... | @@ -2267,7 +2267,7 @@ |
2267 | 2267 | * Basic iterator support. Return the first range of PFNs for a node |
2268 | 2268 | * Note: nid == MAX_NUMNODES returns first region regardless of node |
2269 | 2269 | */ |
2270 | -static int __init first_active_region_index_in_nid(int nid) | |
2270 | +static int __meminit first_active_region_index_in_nid(int nid) | |
2271 | 2271 | { |
2272 | 2272 | int i; |
2273 | 2273 | |
... | ... | @@ -2282,7 +2282,7 @@ |
2282 | 2282 | * Basic iterator support. Return the next active range of PFNs for a node |
2283 | 2283 | * Note: nid == MAX_NUMNODES returns next region regardles of node |
2284 | 2284 | */ |
2285 | -static int __init next_active_region_index_in_nid(int index, int nid) | |
2285 | +static int __meminit next_active_region_index_in_nid(int index, int nid) | |
2286 | 2286 | { |
2287 | 2287 | for (index = index + 1; index < nr_nodemap_entries; index++) |
2288 | 2288 | if (nid == MAX_NUMNODES || early_node_map[index].nid == nid) |
... | ... | @@ -2435,7 +2435,7 @@ |
2435 | 2435 | * with no available memory, a warning is printed and the start and end |
2436 | 2436 | * PFNs will be 0. |
2437 | 2437 | */ |
2438 | -void __init get_pfn_range_for_nid(unsigned int nid, | |
2438 | +void __meminit get_pfn_range_for_nid(unsigned int nid, | |
2439 | 2439 | unsigned long *start_pfn, unsigned long *end_pfn) |
2440 | 2440 | { |
2441 | 2441 | int i; |
... | ... | @@ -2460,7 +2460,7 @@ |
2460 | 2460 | * Return the number of pages a zone spans in a node, including holes |
2461 | 2461 | * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() |
2462 | 2462 | */ |
2463 | -unsigned long __init zone_spanned_pages_in_node(int nid, | |
2463 | +unsigned long __meminit zone_spanned_pages_in_node(int nid, | |
2464 | 2464 | unsigned long zone_type, |
2465 | 2465 | unsigned long *ignored) |
2466 | 2466 | { |
... | ... | @@ -2488,7 +2488,7 @@ |
2488 | 2488 | * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, |
2489 | 2489 | * then all holes in the requested range will be accounted for. |
2490 | 2490 | */ |
2491 | -unsigned long __init __absent_pages_in_range(int nid, | |
2491 | +unsigned long __meminit __absent_pages_in_range(int nid, | |
2492 | 2492 | unsigned long range_start_pfn, |
2493 | 2493 | unsigned long range_end_pfn) |
2494 | 2494 | { |
... | ... | @@ -2548,7 +2548,7 @@ |
2548 | 2548 | } |
2549 | 2549 | |
2550 | 2550 | /* Return the number of page frames in holes in a zone on a node */ |
2551 | -unsigned long __init zone_absent_pages_in_node(int nid, | |
2551 | +unsigned long __meminit zone_absent_pages_in_node(int nid, | |
2552 | 2552 | unsigned long zone_type, |
2553 | 2553 | unsigned long *ignored) |
2554 | 2554 | { |
... | ... | @@ -2584,7 +2584,7 @@ |
2584 | 2584 | |
2585 | 2585 | #endif |
2586 | 2586 | |
2587 | -static void __init calculate_node_totalpages(struct pglist_data *pgdat, | |
2587 | +static void __meminit calculate_node_totalpages(struct pglist_data *pgdat, | |
2588 | 2588 | unsigned long *zones_size, unsigned long *zholes_size) |
2589 | 2589 | { |
2590 | 2590 | unsigned long realtotalpages, totalpages = 0; |
... | ... | @@ -2692,7 +2692,7 @@ |
2692 | 2692 | } |
2693 | 2693 | } |
2694 | 2694 | |
2695 | -static void __init alloc_node_mem_map(struct pglist_data *pgdat) | |
2695 | +static void __meminit alloc_node_mem_map(struct pglist_data *pgdat) | |
2696 | 2696 | { |
2697 | 2697 | /* Skip empty nodes */ |
2698 | 2698 | if (!pgdat->node_spanned_pages) |
mm/sparse.c
... | ... | @@ -61,7 +61,7 @@ |
61 | 61 | return section; |
62 | 62 | } |
63 | 63 | |
64 | -static int sparse_index_init(unsigned long section_nr, int nid) | |
64 | +static int __meminit sparse_index_init(unsigned long section_nr, int nid) | |
65 | 65 | { |
66 | 66 | static DEFINE_SPINLOCK(index_init_lock); |
67 | 67 | unsigned long root = SECTION_NR_TO_ROOT(section_nr); |
... | ... | @@ -138,7 +138,7 @@ |
138 | 138 | } |
139 | 139 | |
140 | 140 | /* Record a memory area against a node. */ |
141 | -void memory_present(int nid, unsigned long start, unsigned long end) | |
141 | +void __init memory_present(int nid, unsigned long start, unsigned long end) | |
142 | 142 | { |
143 | 143 | unsigned long pfn; |
144 | 144 | |
... | ... | @@ -197,7 +197,7 @@ |
197 | 197 | return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); |
198 | 198 | } |
199 | 199 | |
200 | -static int sparse_init_one_section(struct mem_section *ms, | |
200 | +static int __meminit sparse_init_one_section(struct mem_section *ms, | |
201 | 201 | unsigned long pnum, struct page *mem_map) |
202 | 202 | { |
203 | 203 | if (!valid_section(ms)) |
... | ... | @@ -209,7 +209,7 @@ |
209 | 209 | return 1; |
210 | 210 | } |
211 | 211 | |
212 | -static struct page *sparse_early_mem_map_alloc(unsigned long pnum) | |
212 | +static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) | |
213 | 213 | { |
214 | 214 | struct page *map; |
215 | 215 | struct mem_section *ms = __nr_to_section(pnum); |
... | ... | @@ -288,6 +288,7 @@ |
288 | 288 | } |
289 | 289 | } |
290 | 290 | |
291 | +#ifdef CONFIG_MEMORY_HOTPLUG | |
291 | 292 | /* |
292 | 293 | * returns the number of sections whose mem_maps were properly |
293 | 294 | * set. If this is <=0, then that means that the passed-in |
... | ... | @@ -327,4 +328,5 @@ |
327 | 328 | __kfree_section_memmap(memmap, nr_pages); |
328 | 329 | return ret; |
329 | 330 | } |
331 | +#endif |