Commit 0b0acbec1bed75ec1e1daa7f7006323a2a2b2844

Authored by Dave Hansen
Committed by Linus Torvalds
1 parent 3947be1969

[PATCH] memory hotplug: move section_mem_map alloc to sparse.c

This basically keeps up from having to extern __kmalloc_section_memmap().

The vaddr_in_vmalloc_area() helper could go in a vmalloc header, but that
header gets hard to work with, because it needs some arch-specific macros.
Just stick it in here for now, instead of creating another header.

Signed-off-by: Dave Hansen <haveblue@us.ibm.com>
Signed-off-by: Lion Vollnhals <webmaster@schiggl.de>
Signed-off-by: Jiri Slaby <xslaby@fi.muni.cz>
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 4 changed files with 75 additions and 57 deletions Side-by-side Diff

drivers/acpi/acpi_memhotplug.c
... ... @@ -200,8 +200,7 @@
200 200 * Note: Assume that this function returns zero on success
201 201 */
202 202 result = add_memory(mem_device->start_addr,
203   - (mem_device->end_addr - mem_device->start_addr) + 1,
204   - mem_device->read_write_attribute);
  203 + (mem_device->end_addr - mem_device->start_addr) + 1);
205 204 if (result) {
206 205 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "\nadd_memory failed\n"));
207 206 mem_device->state = MEMORY_INVALID_STATE;
... ... @@ -259,7 +258,7 @@
259 258 * Ask the VM to offline this memory range.
260 259 * Note: Assume that this function returns zero on success
261 260 */
262   - result = remove_memory(start, len, attr);
  261 + result = remove_memory(start, len);
263 262 if (result) {
264 263 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Hot-Remove failed.\n"));
265 264 return_VALUE(result);
drivers/base/memory.c
... ... @@ -340,14 +340,11 @@
340 340 static int add_memory_block(unsigned long node_id, struct mem_section *section,
341 341 unsigned long state, int phys_device)
342 342 {
343   - size_t size = sizeof(struct memory_block);
344   - struct memory_block *mem = kmalloc(size, GFP_KERNEL);
  343 + struct memory_block *mem = kzalloc(sizeof(*mem), GFP_KERNEL);
345 344 int ret = 0;
346 345  
347 346 if (!mem)
348 347 return -ENOMEM;
349   -
350   - memset(mem, 0, size);
351 348  
352 349 mem->phys_index = __section_nr(section);
353 350 mem->state = state;
... ... @@ -24,28 +24,6 @@
24 24  
25 25 #include <asm/tlbflush.h>
26 26  
27   -static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
28   -{
29   - struct page *page, *ret;
30   - unsigned long memmap_size = sizeof(struct page) * nr_pages;
31   -
32   - page = alloc_pages(GFP_KERNEL, get_order(memmap_size));
33   - if (page)
34   - goto got_map_page;
35   -
36   - ret = vmalloc(memmap_size);
37   - if (ret)
38   - goto got_map_ptr;
39   -
40   - return NULL;
41   -got_map_page:
42   - ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
43   -got_map_ptr:
44   - memset(ret, 0, memmap_size);
45   -
46   - return ret;
47   -}
48   -
49 27 extern void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn,
50 28 unsigned long size);
51 29 static void __add_zone(struct zone *zone, unsigned long phys_start_pfn)
52 30  
53 31  
... ... @@ -60,35 +38,15 @@
60 38 zonetable_add(zone, nid, zone_type, phys_start_pfn, nr_pages);
61 39 }
62 40  
63   -extern int sparse_add_one_section(struct zone *, unsigned long,
64   - struct page *mem_map);
  41 +extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
  42 + int nr_pages);
65 43 static int __add_section(struct zone *zone, unsigned long phys_start_pfn)
66 44 {
67 45 struct pglist_data *pgdat = zone->zone_pgdat;
68 46 int nr_pages = PAGES_PER_SECTION;
69   - struct page *memmap;
70 47 int ret;
71 48  
72   - /*
73   - * This can potentially allocate memory, and does its own
74   - * internal locking.
75   - */
76   - sparse_index_init(pfn_to_section_nr(phys_start_pfn), pgdat->node_id);
77   -
78   - pgdat_resize_lock(pgdat, &flags);
79   - memmap = __kmalloc_section_memmap(nr_pages);
80   - ret = sparse_add_one_section(zone, phys_start_pfn, memmap);
81   - pgdat_resize_unlock(pgdat, &flags);
82   -
83   - if (ret <= 0) {
84   - /* the mem_map didn't get used */
85   - if (memmap >= (struct page *)VMALLOC_START &&
86   - memmap < (struct page *)VMALLOC_END)
87   - vfree(memmap);
88   - else
89   - free_pages((unsigned long)memmap,
90   - get_order(sizeof(struct page) * nr_pages));
91   - }
  49 + ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
92 50  
93 51 if (ret < 0)
94 52 return ret;
... ... @@ -5,8 +5,10 @@
5 5 #include <linux/mm.h>
6 6 #include <linux/mmzone.h>
7 7 #include <linux/bootmem.h>
  8 +#include <linux/highmem.h>
8 9 #include <linux/module.h>
9 10 #include <linux/spinlock.h>
  11 +#include <linux/vmalloc.h>
10 12 #include <asm/dma.h>
11 13  
12 14 /*
... ... @@ -187,6 +189,45 @@
187 189 return NULL;
188 190 }
189 191  
  192 +static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
  193 +{
  194 + struct page *page, *ret;
  195 + unsigned long memmap_size = sizeof(struct page) * nr_pages;
  196 +
  197 + page = alloc_pages(GFP_KERNEL, get_order(memmap_size));
  198 + if (page)
  199 + goto got_map_page;
  200 +
  201 + ret = vmalloc(memmap_size);
  202 + if (ret)
  203 + goto got_map_ptr;
  204 +
  205 + return NULL;
  206 +got_map_page:
  207 + ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
  208 +got_map_ptr:
  209 + memset(ret, 0, memmap_size);
  210 +
  211 + return ret;
  212 +}
  213 +
  214 +static int vaddr_in_vmalloc_area(void *addr)
  215 +{
  216 + if (addr >= (void *)VMALLOC_START &&
  217 + addr < (void *)VMALLOC_END)
  218 + return 1;
  219 + return 0;
  220 +}
  221 +
  222 +static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
  223 +{
  224 + if (vaddr_in_vmalloc_area(memmap))
  225 + vfree(memmap);
  226 + else
  227 + free_pages((unsigned long)memmap,
  228 + get_order(sizeof(struct page) * nr_pages));
  229 +}
  230 +
190 231 /*
191 232 * Allocate the accumulated non-linear sections, allocate a mem_map
192 233 * for each and record the physical to section mapping.
193 234  
194 235  
195 236  
196 237  
... ... @@ -212,15 +253,38 @@
212 253 * set. If this is <=0, then that means that the passed-in
213 254 * map was not consumed and must be freed.
214 255 */
215   -int sparse_add_one_section(unsigned long start_pfn, int nr_pages, struct page *map)
  256 +int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
  257 + int nr_pages)
216 258 {
217   - struct mem_section *ms = __pfn_to_section(start_pfn);
  259 + unsigned long section_nr = pfn_to_section_nr(start_pfn);
  260 + struct pglist_data *pgdat = zone->zone_pgdat;
  261 + struct mem_section *ms;
  262 + struct page *memmap;
  263 + unsigned long flags;
  264 + int ret;
218 265  
219   - if (ms->section_mem_map & SECTION_MARKED_PRESENT)
220   - return -EEXIST;
  266 + /*
  267 + * no locking for this, because it does its own
  268 + * plus, it does a kmalloc
  269 + */
  270 + sparse_index_init(section_nr, pgdat->node_id);
  271 + memmap = __kmalloc_section_memmap(nr_pages);
221 272  
  273 + pgdat_resize_lock(pgdat, &flags);
  274 +
  275 + ms = __pfn_to_section(start_pfn);
  276 + if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
  277 + ret = -EEXIST;
  278 + goto out;
  279 + }
222 280 ms->section_mem_map |= SECTION_MARKED_PRESENT;
223 281  
224   - return sparse_init_one_section(ms, pfn_to_section_nr(start_pfn), map);
  282 + ret = sparse_init_one_section(ms, section_nr, memmap);
  283 +
  284 + if (ret <= 0)
  285 + __kfree_section_memmap(memmap, nr_pages);
  286 +out:
  287 + pgdat_resize_unlock(pgdat, &flags);
  288 + return ret;
225 289 }