Blame view

mm/sparse.c 13.2 KB
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
1
2
3
  /*
   * sparse memory mappings.
   */
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
4
5
6
  #include <linux/mm.h>
  #include <linux/mmzone.h>
  #include <linux/bootmem.h>
0b0acbec1   Dave Hansen   [PATCH] memory ho...
7
  #include <linux/highmem.h>
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
8
  #include <linux/module.h>
28ae55c98   Dave Hansen   [PATCH] sparsemem...
9
  #include <linux/spinlock.h>
0b0acbec1   Dave Hansen   [PATCH] memory ho...
10
  #include <linux/vmalloc.h>
0c0a4a517   Yasunori Goto   memory hotplug: f...
11
  #include "internal.h"
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
12
  #include <asm/dma.h>
8f6aac419   Christoph Lameter   Generic Virtual M...
13
14
  #include <asm/pgalloc.h>
  #include <asm/pgtable.h>
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
15
16
17
18
19
20
  
  /*
   * Permanent SPARSEMEM data:
   *
   * 1) mem_section	- memory sections, mem_map's for valid memory
   */
3e347261a   Bob Picco   [PATCH] sparsemem...
21
  #ifdef CONFIG_SPARSEMEM_EXTREME
802f192e4   Bob Picco   [PATCH] SPARSEMEM...
22
  struct mem_section *mem_section[NR_SECTION_ROOTS]
22fc6eccb   Ravikiran G Thirumalai   [PATCH] Change ma...
23
  	____cacheline_internodealigned_in_smp;
3e347261a   Bob Picco   [PATCH] sparsemem...
24
25
  #else
  struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
22fc6eccb   Ravikiran G Thirumalai   [PATCH] Change ma...
26
  	____cacheline_internodealigned_in_smp;
3e347261a   Bob Picco   [PATCH] sparsemem...
27
28
  #endif
  EXPORT_SYMBOL(mem_section);
89689ae7f   Christoph Lameter   [PATCH] Get rid o...
29
30
31
32
33
34
35
36
37
38
39
  #ifdef NODE_NOT_IN_PAGE_FLAGS
  /*
   * If we did not store the node number in the page then we have to
   * do a lookup in the section_to_node_table in order to find which
   * node the page belongs to.
   */
  #if MAX_NUMNODES <= 256
  static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
  #else
  static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
  #endif
25ba77c14   Andy Whitcroft   [PATCH] numa node...
40
  int page_to_nid(struct page *page)
89689ae7f   Christoph Lameter   [PATCH] Get rid o...
41
42
43
44
  {
  	return section_to_node_table[page_to_section(page)];
  }
  EXPORT_SYMBOL(page_to_nid);
85770ffe4   Andy Whitcroft   sparsemem: ensure...
45
46
47
48
49
50
51
52
53
  
  static void set_section_nid(unsigned long section_nr, int nid)
  {
  	section_to_node_table[section_nr] = nid;
  }
  #else /* !NODE_NOT_IN_PAGE_FLAGS */
  static inline void set_section_nid(unsigned long section_nr, int nid)
  {
  }
89689ae7f   Christoph Lameter   [PATCH] Get rid o...
54
  #endif
3e347261a   Bob Picco   [PATCH] sparsemem...
55
  #ifdef CONFIG_SPARSEMEM_EXTREME
577a32f62   Sam Ravnborg   mm: fix section m...
56
  static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
28ae55c98   Dave Hansen   [PATCH] sparsemem...
57
58
59
60
  {
  	struct mem_section *section = NULL;
  	unsigned long array_size = SECTIONS_PER_ROOT *
  				   sizeof(struct mem_section);
39d24e642   Mike Kravetz   [PATCH] add slab_...
61
  	if (slab_is_available())
46a66eecd   Mike Kravetz   [PATCH] sparsemem...
62
63
64
  		section = kmalloc_node(array_size, GFP_KERNEL, nid);
  	else
  		section = alloc_bootmem_node(NODE_DATA(nid), array_size);
28ae55c98   Dave Hansen   [PATCH] sparsemem...
65
66
67
68
69
  
  	if (section)
  		memset(section, 0, array_size);
  
  	return section;
3e347261a   Bob Picco   [PATCH] sparsemem...
70
  }
802f192e4   Bob Picco   [PATCH] SPARSEMEM...
71

a3142c8e1   Yasunori Goto   Fix section misma...
72
  static int __meminit sparse_index_init(unsigned long section_nr, int nid)
802f192e4   Bob Picco   [PATCH] SPARSEMEM...
73
  {
34af946a2   Ingo Molnar   [PATCH] spin/rwlo...
74
  	static DEFINE_SPINLOCK(index_init_lock);
28ae55c98   Dave Hansen   [PATCH] sparsemem...
75
76
77
  	unsigned long root = SECTION_NR_TO_ROOT(section_nr);
  	struct mem_section *section;
  	int ret = 0;
802f192e4   Bob Picco   [PATCH] SPARSEMEM...
78
79
  
  	if (mem_section[root])
28ae55c98   Dave Hansen   [PATCH] sparsemem...
80
  		return -EEXIST;
3e347261a   Bob Picco   [PATCH] sparsemem...
81

28ae55c98   Dave Hansen   [PATCH] sparsemem...
82
  	section = sparse_index_alloc(nid);
af0cd5a7c   WANG Cong   mm/sparse.c: chec...
83
84
  	if (!section)
  		return -ENOMEM;
28ae55c98   Dave Hansen   [PATCH] sparsemem...
85
86
87
88
89
  	/*
  	 * This lock keeps two different sections from
  	 * reallocating for the same index
  	 */
  	spin_lock(&index_init_lock);
3e347261a   Bob Picco   [PATCH] sparsemem...
90

28ae55c98   Dave Hansen   [PATCH] sparsemem...
91
92
93
94
95
96
97
98
99
100
101
102
103
104
  	if (mem_section[root]) {
  		ret = -EEXIST;
  		goto out;
  	}
  
  	mem_section[root] = section;
  out:
  	spin_unlock(&index_init_lock);
  	return ret;
  }
  #else /* !SPARSEMEM_EXTREME */
  static inline int sparse_index_init(unsigned long section_nr, int nid)
  {
  	return 0;
802f192e4   Bob Picco   [PATCH] SPARSEMEM...
105
  }
28ae55c98   Dave Hansen   [PATCH] sparsemem...
106
  #endif
4ca644d97   Dave Hansen   [PATCH] memory ho...
107
108
  /*
   * Although written for the SPARSEMEM_EXTREME case, this happens
cd881a6b2   Andy Whitcroft   sparsemem: clean ...
109
   * to also work for the flat array case because
4ca644d97   Dave Hansen   [PATCH] memory ho...
110
111
112
113
114
115
   * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
   */
  int __section_nr(struct mem_section* ms)
  {
  	unsigned long root_nr;
  	struct mem_section* root;
12783b002   Mike Kravetz   [PATCH] SPARSEMEM...
116
117
  	for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
  		root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
4ca644d97   Dave Hansen   [PATCH] memory ho...
118
119
120
121
122
123
124
125
126
  		if (!root)
  			continue;
  
  		if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
  		     break;
  	}
  
  	return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
  }
30c253e6d   Andy Whitcroft   [PATCH] sparsemem...
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
  /*
   * During early boot, before section_mem_map is used for an actual
   * mem_map, we use section_mem_map to store the section's NUMA
   * node.  This keeps us from having to use another data structure.  The
   * node information is cleared just before we store the real mem_map.
   */
  static inline unsigned long sparse_encode_early_nid(int nid)
  {
  	return (nid << SECTION_NID_SHIFT);
  }
  
  static inline int sparse_early_nid(struct mem_section *section)
  {
  	return (section->section_mem_map >> SECTION_NID_SHIFT);
  }
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
142
  /* Record a memory area against a node. */
a3142c8e1   Yasunori Goto   Fix section misma...
143
  void __init memory_present(int nid, unsigned long start, unsigned long end)
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
144
  {
bead9a3ab   Ingo Molnar   mm: sparsemem mem...
145
  	unsigned long max_arch_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
146
  	unsigned long pfn;
bead9a3ab   Ingo Molnar   mm: sparsemem mem...
147
148
149
150
151
152
153
154
  	/*
  	 * Sanity checks - do not allow an architecture to pass
  	 * in larger pfns than the maximum scope of sparsemem:
  	 */
  	if (start >= max_arch_pfn)
  		return;
  	if (end >= max_arch_pfn)
  		end = max_arch_pfn;
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
155
156
157
  	start &= PAGE_SECTION_MASK;
  	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
  		unsigned long section = pfn_to_section_nr(pfn);
802f192e4   Bob Picco   [PATCH] SPARSEMEM...
158
159
160
  		struct mem_section *ms;
  
  		sparse_index_init(section, nid);
85770ffe4   Andy Whitcroft   sparsemem: ensure...
161
  		set_section_nid(section, nid);
802f192e4   Bob Picco   [PATCH] SPARSEMEM...
162
163
164
  
  		ms = __nr_to_section(section);
  		if (!ms->section_mem_map)
30c253e6d   Andy Whitcroft   [PATCH] sparsemem...
165
166
  			ms->section_mem_map = sparse_encode_early_nid(nid) |
  							SECTION_MARKED_PRESENT;
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
  	}
  }
  
  /*
   * Only used by the i386 NUMA architecures, but relatively
   * generic code.
   */
  unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
  						     unsigned long end_pfn)
  {
  	unsigned long pfn;
  	unsigned long nr_pages = 0;
  
  	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
  		if (nid != early_pfn_to_nid(pfn))
  			continue;
540557b94   Andy Whitcroft   sparsemem: record...
183
  		if (pfn_present(pfn))
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
184
185
186
187
188
189
190
  			nr_pages += PAGES_PER_SECTION;
  	}
  
  	return nr_pages * sizeof(struct page);
  }
  
  /*
29751f699   Andy Whitcroft   [PATCH] sparsemem...
191
192
193
194
195
196
197
198
199
200
   * Subtle, we encode the real pfn into the mem_map such that
   * the identity pfn - section_mem_map will return the actual
   * physical page frame number.
   */
  static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
  {
  	return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
  }
  
  /*
ea01ea937   Badari Pulavarty   hotplug memory re...
201
   * Decode mem_map from the coded memmap
29751f699   Andy Whitcroft   [PATCH] sparsemem...
202
   */
29751f699   Andy Whitcroft   [PATCH] sparsemem...
203
204
  struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
  {
ea01ea937   Badari Pulavarty   hotplug memory re...
205
206
  	/* mask off the extra low bits of information */
  	coded_mem_map &= SECTION_MAP_MASK;
29751f699   Andy Whitcroft   [PATCH] sparsemem...
207
208
  	return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
  }
a3142c8e1   Yasunori Goto   Fix section misma...
209
  static int __meminit sparse_init_one_section(struct mem_section *ms,
5c0e30664   Mel Gorman   Fix corruption of...
210
211
  		unsigned long pnum, struct page *mem_map,
  		unsigned long *pageblock_bitmap)
29751f699   Andy Whitcroft   [PATCH] sparsemem...
212
  {
540557b94   Andy Whitcroft   sparsemem: record...
213
  	if (!present_section(ms))
29751f699   Andy Whitcroft   [PATCH] sparsemem...
214
  		return -EINVAL;
30c253e6d   Andy Whitcroft   [PATCH] sparsemem...
215
  	ms->section_mem_map &= ~SECTION_MAP_MASK;
540557b94   Andy Whitcroft   sparsemem: record...
216
217
  	ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
  							SECTION_HAS_MEM_MAP;
5c0e30664   Mel Gorman   Fix corruption of...
218
   	ms->pageblock_flags = pageblock_bitmap;
29751f699   Andy Whitcroft   [PATCH] sparsemem...
219
220
221
  
  	return 1;
  }
047532787   Yasunori Goto   memory hotplug: r...
222
  unsigned long usemap_size(void)
5c0e30664   Mel Gorman   Fix corruption of...
223
224
225
226
227
228
229
230
231
232
233
234
235
  {
  	unsigned long size_bytes;
  	size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8;
  	size_bytes = roundup(size_bytes, sizeof(unsigned long));
  	return size_bytes;
  }
  
  #ifdef CONFIG_MEMORY_HOTPLUG
  static unsigned long *__kmalloc_section_usemap(void)
  {
  	return kmalloc(usemap_size(), GFP_KERNEL);
  }
  #endif /* CONFIG_MEMORY_HOTPLUG */
a322f8ab6   Sam Ravnborg   mm: fix section m...
236
  static unsigned long *__init sparse_early_usemap_alloc(unsigned long pnum)
5c0e30664   Mel Gorman   Fix corruption of...
237
  {
516746444   Andrew Morton   revert "memory ho...
238
  	unsigned long *usemap;
5c0e30664   Mel Gorman   Fix corruption of...
239
240
  	struct mem_section *ms = __nr_to_section(pnum);
  	int nid = sparse_early_nid(ms);
516746444   Andrew Morton   revert "memory ho...
241
  	usemap = alloc_bootmem_node(NODE_DATA(nid), usemap_size());
5c0e30664   Mel Gorman   Fix corruption of...
242
243
244
245
246
  	if (usemap)
  		return usemap;
  
  	/* Stupid: suppress gcc warning for SPARSEMEM && !NUMA */
  	nid = 0;
d40cee245   Harvey Harrison   mm: remove remain...
247
248
  	printk(KERN_WARNING "%s: allocation failed
  ", __func__);
5c0e30664   Mel Gorman   Fix corruption of...
249
250
  	return NULL;
  }
8f6aac419   Christoph Lameter   Generic Virtual M...
251
  #ifndef CONFIG_SPARSEMEM_VMEMMAP
98f3cfc1d   Yasunori Goto   memory hotplug: H...
252
  struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
29751f699   Andy Whitcroft   [PATCH] sparsemem...
253
254
  {
  	struct page *map;
29751f699   Andy Whitcroft   [PATCH] sparsemem...
255
256
257
258
  
  	map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
  	if (map)
  		return map;
9d99217a0   Yasunori Goto   memory hotplug: a...
259
260
  	map = alloc_bootmem_pages_node(NODE_DATA(nid),
  		       PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION));
8f6aac419   Christoph Lameter   Generic Virtual M...
261
262
263
264
265
266
267
268
269
  	return map;
  }
  #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
  
  struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
  {
  	struct page *map;
  	struct mem_section *ms = __nr_to_section(pnum);
  	int nid = sparse_early_nid(ms);
98f3cfc1d   Yasunori Goto   memory hotplug: H...
270
  	map = sparse_mem_map_populate(pnum, nid);
29751f699   Andy Whitcroft   [PATCH] sparsemem...
271
272
  	if (map)
  		return map;
8f6aac419   Christoph Lameter   Generic Virtual M...
273
  	printk(KERN_ERR "%s: sparsemem memory map backing failed "
d40cee245   Harvey Harrison   mm: remove remain...
274
275
  			"some memory will not be available.
  ", __func__);
802f192e4   Bob Picco   [PATCH] SPARSEMEM...
276
  	ms->section_mem_map = 0;
29751f699   Andy Whitcroft   [PATCH] sparsemem...
277
278
  	return NULL;
  }
c2b91e2ee   Yinghai Lu   x86_64/mm: check ...
279
280
281
  void __attribute__((weak)) __meminit vmemmap_populate_print_last(void)
  {
  }
193faea92   Stephen Rothwell   Move three functi...
282
283
284
285
286
287
288
289
  /*
   * Allocate the accumulated non-linear sections, allocate a mem_map
   * for each and record the physical to section mapping.
   */
  void __init sparse_init(void)
  {
  	unsigned long pnum;
  	struct page *map;
5c0e30664   Mel Gorman   Fix corruption of...
290
  	unsigned long *usemap;
e123dd3f0   Yinghai Lu   mm: make mem_map ...
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
  	unsigned long **usemap_map;
  	int size;
  
  	/*
  	 * map is using big page (aka 2M in x86 64 bit)
  	 * usemap is less one page (aka 24 bytes)
  	 * so alloc 2M (with 2M align) and 24 bytes in turn will
  	 * make next 2M slip to one more 2M later.
  	 * then in big system, the memory will have a lot of holes...
  	 * here try to allocate 2M pages continously.
  	 *
  	 * powerpc need to call sparse_init_one_section right after each
  	 * sparse_early_mem_map_alloc, so allocate usemap_map at first.
  	 */
  	size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
  	usemap_map = alloc_bootmem(size);
  	if (!usemap_map)
  		panic("can not allocate usemap_map
  ");
193faea92   Stephen Rothwell   Move three functi...
310
311
  
  	for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
540557b94   Andy Whitcroft   sparsemem: record...
312
  		if (!present_section_nr(pnum))
193faea92   Stephen Rothwell   Move three functi...
313
  			continue;
e123dd3f0   Yinghai Lu   mm: make mem_map ...
314
315
  		usemap_map[pnum] = sparse_early_usemap_alloc(pnum);
  	}
193faea92   Stephen Rothwell   Move three functi...
316

e123dd3f0   Yinghai Lu   mm: make mem_map ...
317
318
  	for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
  		if (!present_section_nr(pnum))
193faea92   Stephen Rothwell   Move three functi...
319
  			continue;
5c0e30664   Mel Gorman   Fix corruption of...
320

e123dd3f0   Yinghai Lu   mm: make mem_map ...
321
  		usemap = usemap_map[pnum];
5c0e30664   Mel Gorman   Fix corruption of...
322
323
  		if (!usemap)
  			continue;
e123dd3f0   Yinghai Lu   mm: make mem_map ...
324
325
326
  		map = sparse_early_mem_map_alloc(pnum);
  		if (!map)
  			continue;
5c0e30664   Mel Gorman   Fix corruption of...
327
328
  		sparse_init_one_section(__nr_to_section(pnum), pnum, map,
  								usemap);
193faea92   Stephen Rothwell   Move three functi...
329
  	}
e123dd3f0   Yinghai Lu   mm: make mem_map ...
330

c2b91e2ee   Yinghai Lu   x86_64/mm: check ...
331
  	vmemmap_populate_print_last();
e123dd3f0   Yinghai Lu   mm: make mem_map ...
332
  	free_bootmem(__pa(usemap_map), size);
193faea92   Stephen Rothwell   Move three functi...
333
334
335
  }
  
  #ifdef CONFIG_MEMORY_HOTPLUG
98f3cfc1d   Yasunori Goto   memory hotplug: H...
336
337
338
339
340
341
342
343
344
345
346
  #ifdef CONFIG_SPARSEMEM_VMEMMAP
  static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
  						 unsigned long nr_pages)
  {
  	/* This will make the necessary allocations eventually. */
  	return sparse_mem_map_populate(pnum, nid);
  }
  static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
  {
  	return; /* XXX: Not implemented yet */
  }
0c0a4a517   Yasunori Goto   memory hotplug: f...
347
348
349
  static void free_map_bootmem(struct page *page, unsigned long nr_pages)
  {
  }
98f3cfc1d   Yasunori Goto   memory hotplug: H...
350
  #else
0b0acbec1   Dave Hansen   [PATCH] memory ho...
351
352
353
354
  static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
  {
  	struct page *page, *ret;
  	unsigned long memmap_size = sizeof(struct page) * nr_pages;
f2d0aa5bf   Yasunori Goto   [PATCH] memory ho...
355
  	page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
0b0acbec1   Dave Hansen   [PATCH] memory ho...
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
  	if (page)
  		goto got_map_page;
  
  	ret = vmalloc(memmap_size);
  	if (ret)
  		goto got_map_ptr;
  
  	return NULL;
  got_map_page:
  	ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
  got_map_ptr:
  	memset(ret, 0, memmap_size);
  
  	return ret;
  }
98f3cfc1d   Yasunori Goto   memory hotplug: H...
371
372
373
374
375
  static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
  						  unsigned long nr_pages)
  {
  	return __kmalloc_section_memmap(nr_pages);
  }
0b0acbec1   Dave Hansen   [PATCH] memory ho...
376
377
  static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
  {
9e2779fa2   Christoph Lameter   is_vmalloc_addr()...
378
  	if (is_vmalloc_addr(memmap))
0b0acbec1   Dave Hansen   [PATCH] memory ho...
379
380
381
382
383
  		vfree(memmap);
  	else
  		free_pages((unsigned long)memmap,
  			   get_order(sizeof(struct page) * nr_pages));
  }
0c0a4a517   Yasunori Goto   memory hotplug: f...
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
  
  static void free_map_bootmem(struct page *page, unsigned long nr_pages)
  {
  	unsigned long maps_section_nr, removing_section_nr, i;
  	int magic;
  
  	for (i = 0; i < nr_pages; i++, page++) {
  		magic = atomic_read(&page->_mapcount);
  
  		BUG_ON(magic == NODE_INFO);
  
  		maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
  		removing_section_nr = page->private;
  
  		/*
  		 * When this function is called, the removing section is
  		 * logical offlined state. This means all pages are isolated
  		 * from page allocator. If removing section's memmap is placed
  		 * on the same section, it must not be freed.
  		 * If it is freed, page allocator may allocate it which will
  		 * be removed physically soon.
  		 */
  		if (maps_section_nr != removing_section_nr)
  			put_page_bootmem(page);
  	}
  }
98f3cfc1d   Yasunori Goto   memory hotplug: H...
410
  #endif /* CONFIG_SPARSEMEM_VMEMMAP */
0b0acbec1   Dave Hansen   [PATCH] memory ho...
411

ea01ea937   Badari Pulavarty   hotplug memory re...
412
413
  static void free_section_usemap(struct page *memmap, unsigned long *usemap)
  {
0c0a4a517   Yasunori Goto   memory hotplug: f...
414
415
  	struct page *usemap_page;
  	unsigned long nr_pages;
ea01ea937   Badari Pulavarty   hotplug memory re...
416
417
  	if (!usemap)
  		return;
0c0a4a517   Yasunori Goto   memory hotplug: f...
418
  	usemap_page = virt_to_page(usemap);
ea01ea937   Badari Pulavarty   hotplug memory re...
419
420
421
  	/*
  	 * Check to see if allocation came from hot-plug-add
  	 */
0c0a4a517   Yasunori Goto   memory hotplug: f...
422
  	if (PageSlab(usemap_page)) {
ea01ea937   Badari Pulavarty   hotplug memory re...
423
424
425
426
427
428
429
  		kfree(usemap);
  		if (memmap)
  			__kfree_section_memmap(memmap, PAGES_PER_SECTION);
  		return;
  	}
  
  	/*
0c0a4a517   Yasunori Goto   memory hotplug: f...
430
431
  	 * The usemap came from bootmem. This is packed with other usemaps
  	 * on the section which has pgdat at boot time. Just keep it as is now.
ea01ea937   Badari Pulavarty   hotplug memory re...
432
  	 */
0c0a4a517   Yasunori Goto   memory hotplug: f...
433
434
435
436
437
438
439
440
441
442
  
  	if (memmap) {
  		struct page *memmap_page;
  		memmap_page = virt_to_page(memmap);
  
  		nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
  			>> PAGE_SHIFT;
  
  		free_map_bootmem(memmap_page, nr_pages);
  	}
ea01ea937   Badari Pulavarty   hotplug memory re...
443
  }
29751f699   Andy Whitcroft   [PATCH] sparsemem...
444
  /*
29751f699   Andy Whitcroft   [PATCH] sparsemem...
445
446
447
448
   * returns the number of sections whose mem_maps were properly
   * set.  If this is <=0, then that means that the passed-in
   * map was not consumed and must be freed.
   */
0b0acbec1   Dave Hansen   [PATCH] memory ho...
449
450
  int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
  			   int nr_pages)
29751f699   Andy Whitcroft   [PATCH] sparsemem...
451
  {
0b0acbec1   Dave Hansen   [PATCH] memory ho...
452
453
454
455
  	unsigned long section_nr = pfn_to_section_nr(start_pfn);
  	struct pglist_data *pgdat = zone->zone_pgdat;
  	struct mem_section *ms;
  	struct page *memmap;
5c0e30664   Mel Gorman   Fix corruption of...
456
  	unsigned long *usemap;
0b0acbec1   Dave Hansen   [PATCH] memory ho...
457
458
  	unsigned long flags;
  	int ret;
29751f699   Andy Whitcroft   [PATCH] sparsemem...
459

0b0acbec1   Dave Hansen   [PATCH] memory ho...
460
461
462
463
  	/*
  	 * no locking for this, because it does its own
  	 * plus, it does a kmalloc
  	 */
bbd068259   WANG Cong   mm/sparse.c: impr...
464
465
466
  	ret = sparse_index_init(section_nr, pgdat->node_id);
  	if (ret < 0 && ret != -EEXIST)
  		return ret;
98f3cfc1d   Yasunori Goto   memory hotplug: H...
467
  	memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages);
bbd068259   WANG Cong   mm/sparse.c: impr...
468
469
  	if (!memmap)
  		return -ENOMEM;
5c0e30664   Mel Gorman   Fix corruption of...
470
  	usemap = __kmalloc_section_usemap();
bbd068259   WANG Cong   mm/sparse.c: impr...
471
472
473
474
  	if (!usemap) {
  		__kfree_section_memmap(memmap, nr_pages);
  		return -ENOMEM;
  	}
0b0acbec1   Dave Hansen   [PATCH] memory ho...
475
476
  
  	pgdat_resize_lock(pgdat, &flags);
29751f699   Andy Whitcroft   [PATCH] sparsemem...
477

0b0acbec1   Dave Hansen   [PATCH] memory ho...
478
479
480
481
482
  	ms = __pfn_to_section(start_pfn);
  	if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
  		ret = -EEXIST;
  		goto out;
  	}
5c0e30664   Mel Gorman   Fix corruption of...
483

29751f699   Andy Whitcroft   [PATCH] sparsemem...
484
  	ms->section_mem_map |= SECTION_MARKED_PRESENT;
5c0e30664   Mel Gorman   Fix corruption of...
485
  	ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
0b0acbec1   Dave Hansen   [PATCH] memory ho...
486

0b0acbec1   Dave Hansen   [PATCH] memory ho...
487
488
  out:
  	pgdat_resize_unlock(pgdat, &flags);
bbd068259   WANG Cong   mm/sparse.c: impr...
489
490
  	if (ret <= 0) {
  		kfree(usemap);
46a66eecd   Mike Kravetz   [PATCH] sparsemem...
491
  		__kfree_section_memmap(memmap, nr_pages);
bbd068259   WANG Cong   mm/sparse.c: impr...
492
  	}
0b0acbec1   Dave Hansen   [PATCH] memory ho...
493
  	return ret;
29751f699   Andy Whitcroft   [PATCH] sparsemem...
494
  }
ea01ea937   Badari Pulavarty   hotplug memory re...
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
  
  void sparse_remove_one_section(struct zone *zone, struct mem_section *ms)
  {
  	struct page *memmap = NULL;
  	unsigned long *usemap = NULL;
  
  	if (ms->section_mem_map) {
  		usemap = ms->pageblock_flags;
  		memmap = sparse_decode_mem_map(ms->section_mem_map,
  						__section_nr(ms));
  		ms->section_mem_map = 0;
  		ms->pageblock_flags = NULL;
  	}
  
  	free_section_usemap(memmap, usemap);
  }
a3142c8e1   Yasunori Goto   Fix section misma...
511
  #endif