Blame view

mm/sparse.c 20.9 KB
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
1
2
3
  /*
   * sparse memory mappings.
   */
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
4
  #include <linux/mm.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
5
  #include <linux/slab.h>
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
6
7
  #include <linux/mmzone.h>
  #include <linux/bootmem.h>
3b32123d7   Gideon Israel Dsouza   mm: use macros fr...
8
  #include <linux/compiler.h>
0b0acbec1   Dave Hansen   [PATCH] memory ho...
9
  #include <linux/highmem.h>
b95f1b31b   Paul Gortmaker   mm: Map most file...
10
  #include <linux/export.h>
28ae55c98   Dave Hansen   [PATCH] sparsemem...
11
  #include <linux/spinlock.h>
0b0acbec1   Dave Hansen   [PATCH] memory ho...
12
  #include <linux/vmalloc.h>
3b32123d7   Gideon Israel Dsouza   mm: use macros fr...
13

0c0a4a517   Yasunori Goto   memory hotplug: f...
14
  #include "internal.h"
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
15
  #include <asm/dma.h>
8f6aac419   Christoph Lameter   Generic Virtual M...
16
17
  #include <asm/pgalloc.h>
  #include <asm/pgtable.h>
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
18
19
20
21
22
23
  
  /*
   * Permanent SPARSEMEM data:
   *
   * 1) mem_section	- memory sections, mem_map's for valid memory
   */
3e347261a   Bob Picco   [PATCH] sparsemem...
24
  #ifdef CONFIG_SPARSEMEM_EXTREME
802f192e4   Bob Picco   [PATCH] SPARSEMEM...
25
  struct mem_section *mem_section[NR_SECTION_ROOTS]
22fc6eccb   Ravikiran G Thirumalai   [PATCH] Change ma...
26
  	____cacheline_internodealigned_in_smp;
3e347261a   Bob Picco   [PATCH] sparsemem...
27
28
  #else
  struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
22fc6eccb   Ravikiran G Thirumalai   [PATCH] Change ma...
29
  	____cacheline_internodealigned_in_smp;
3e347261a   Bob Picco   [PATCH] sparsemem...
30
31
  #endif
  EXPORT_SYMBOL(mem_section);
89689ae7f   Christoph Lameter   [PATCH] Get rid o...
32
33
34
35
36
37
38
39
40
41
42
  #ifdef NODE_NOT_IN_PAGE_FLAGS
  /*
   * If we did not store the node number in the page then we have to
   * do a lookup in the section_to_node_table in order to find which
   * node the page belongs to.
   */
  #if MAX_NUMNODES <= 256
  static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
  #else
  static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
  #endif
33dd4e0ec   Ian Campbell   mm: make some str...
43
  int page_to_nid(const struct page *page)
89689ae7f   Christoph Lameter   [PATCH] Get rid o...
44
45
46
47
  {
  	return section_to_node_table[page_to_section(page)];
  }
  EXPORT_SYMBOL(page_to_nid);
85770ffe4   Andy Whitcroft   sparsemem: ensure...
48
49
50
51
52
53
54
55
56
  
  static void set_section_nid(unsigned long section_nr, int nid)
  {
  	section_to_node_table[section_nr] = nid;
  }
  #else /* !NODE_NOT_IN_PAGE_FLAGS */
  static inline void set_section_nid(unsigned long section_nr, int nid)
  {
  }
89689ae7f   Christoph Lameter   [PATCH] Get rid o...
57
  #endif
3e347261a   Bob Picco   [PATCH] sparsemem...
58
  #ifdef CONFIG_SPARSEMEM_EXTREME
577a32f62   Sam Ravnborg   mm: fix section m...
59
  static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
28ae55c98   Dave Hansen   [PATCH] sparsemem...
60
61
62
63
  {
  	struct mem_section *section = NULL;
  	unsigned long array_size = SECTIONS_PER_ROOT *
  				   sizeof(struct mem_section);
f52407ce2   Shaohua Li   memory hotplug: a...
64
65
  	if (slab_is_available()) {
  		if (node_state(nid, N_HIGH_MEMORY))
5b760e64a   Gavin Shan   mm/sparse: optimi...
66
  			section = kzalloc_node(array_size, GFP_KERNEL, nid);
f52407ce2   Shaohua Li   memory hotplug: a...
67
  		else
5b760e64a   Gavin Shan   mm/sparse: optimi...
68
69
  			section = kzalloc(array_size, GFP_KERNEL);
  	} else {
bb016b841   Santosh Shilimkar   mm/sparse: use me...
70
  		section = memblock_virt_alloc_node(array_size, nid);
5b760e64a   Gavin Shan   mm/sparse: optimi...
71
  	}
28ae55c98   Dave Hansen   [PATCH] sparsemem...
72
73
  
  	return section;
3e347261a   Bob Picco   [PATCH] sparsemem...
74
  }
802f192e4   Bob Picco   [PATCH] SPARSEMEM...
75

a3142c8e1   Yasunori Goto   Fix section misma...
76
  static int __meminit sparse_index_init(unsigned long section_nr, int nid)
802f192e4   Bob Picco   [PATCH] SPARSEMEM...
77
  {
28ae55c98   Dave Hansen   [PATCH] sparsemem...
78
79
  	unsigned long root = SECTION_NR_TO_ROOT(section_nr);
  	struct mem_section *section;
802f192e4   Bob Picco   [PATCH] SPARSEMEM...
80
81
  
  	if (mem_section[root])
28ae55c98   Dave Hansen   [PATCH] sparsemem...
82
  		return -EEXIST;
3e347261a   Bob Picco   [PATCH] sparsemem...
83

28ae55c98   Dave Hansen   [PATCH] sparsemem...
84
  	section = sparse_index_alloc(nid);
af0cd5a7c   WANG Cong   mm/sparse.c: chec...
85
86
  	if (!section)
  		return -ENOMEM;
28ae55c98   Dave Hansen   [PATCH] sparsemem...
87
88
  
  	mem_section[root] = section;
c1c951833   Gavin Shan   mm/sparse: remove...
89

9d1936cf8   Zhang Yanfei   mm/sparse: Remove...
90
  	return 0;
28ae55c98   Dave Hansen   [PATCH] sparsemem...
91
92
93
94
95
  }
  #else /* !SPARSEMEM_EXTREME */
  static inline int sparse_index_init(unsigned long section_nr, int nid)
  {
  	return 0;
802f192e4   Bob Picco   [PATCH] SPARSEMEM...
96
  }
28ae55c98   Dave Hansen   [PATCH] sparsemem...
97
  #endif
4ca644d97   Dave Hansen   [PATCH] memory ho...
98
99
  /*
   * Although written for the SPARSEMEM_EXTREME case, this happens
cd881a6b2   Andy Whitcroft   sparsemem: clean ...
100
   * to also work for the flat array case because
4ca644d97   Dave Hansen   [PATCH] memory ho...
101
102
103
104
105
106
   * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
   */
  int __section_nr(struct mem_section* ms)
  {
  	unsigned long root_nr;
  	struct mem_section* root;
12783b002   Mike Kravetz   [PATCH] SPARSEMEM...
107
108
  	for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
  		root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
4ca644d97   Dave Hansen   [PATCH] memory ho...
109
110
111
112
113
114
  		if (!root)
  			continue;
  
  		if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
  		     break;
  	}
db36a4611   Gavin Shan   mm/sparse: more c...
115
  	VM_BUG_ON(root_nr == NR_SECTION_ROOTS);
4ca644d97   Dave Hansen   [PATCH] memory ho...
116
117
  	return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
  }
30c253e6d   Andy Whitcroft   [PATCH] sparsemem...
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
  /*
   * During early boot, before section_mem_map is used for an actual
   * mem_map, we use section_mem_map to store the section's NUMA
   * node.  This keeps us from having to use another data structure.  The
   * node information is cleared just before we store the real mem_map.
   */
  static inline unsigned long sparse_encode_early_nid(int nid)
  {
  	return (nid << SECTION_NID_SHIFT);
  }
  
  static inline int sparse_early_nid(struct mem_section *section)
  {
  	return (section->section_mem_map >> SECTION_NID_SHIFT);
  }
2dbb51c49   Mel Gorman   mm: make defensiv...
133
134
135
  /* Validate the physical addressing limitations of the model */
  void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
  						unsigned long *end_pfn)
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
136
  {
2dbb51c49   Mel Gorman   mm: make defensiv...
137
  	unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
138

bead9a3ab   Ingo Molnar   mm: sparsemem mem...
139
140
141
142
  	/*
  	 * Sanity checks - do not allow an architecture to pass
  	 * in larger pfns than the maximum scope of sparsemem:
  	 */
2dbb51c49   Mel Gorman   mm: make defensiv...
143
144
145
146
147
148
149
150
  	if (*start_pfn > max_sparsemem_pfn) {
  		mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
  			"Start of range %lu -> %lu exceeds SPARSEMEM max %lu
  ",
  			*start_pfn, *end_pfn, max_sparsemem_pfn);
  		WARN_ON_ONCE(1);
  		*start_pfn = max_sparsemem_pfn;
  		*end_pfn = max_sparsemem_pfn;
ef161a986   Cyrill Gorcunov   mm: mminit_valida...
151
  	} else if (*end_pfn > max_sparsemem_pfn) {
2dbb51c49   Mel Gorman   mm: make defensiv...
152
153
154
155
156
157
158
159
160
161
162
163
164
  		mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
  			"End of range %lu -> %lu exceeds SPARSEMEM max %lu
  ",
  			*start_pfn, *end_pfn, max_sparsemem_pfn);
  		WARN_ON_ONCE(1);
  		*end_pfn = max_sparsemem_pfn;
  	}
  }
  
  /* Record a memory area against a node. */
  void __init memory_present(int nid, unsigned long start, unsigned long end)
  {
  	unsigned long pfn;
bead9a3ab   Ingo Molnar   mm: sparsemem mem...
165

d41dee369   Andy Whitcroft   [PATCH] sparsemem...
166
  	start &= PAGE_SECTION_MASK;
2dbb51c49   Mel Gorman   mm: make defensiv...
167
  	mminit_validate_memmodel_limits(&start, &end);
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
168
169
  	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
  		unsigned long section = pfn_to_section_nr(pfn);
802f192e4   Bob Picco   [PATCH] SPARSEMEM...
170
171
172
  		struct mem_section *ms;
  
  		sparse_index_init(section, nid);
85770ffe4   Andy Whitcroft   sparsemem: ensure...
173
  		set_section_nid(section, nid);
802f192e4   Bob Picco   [PATCH] SPARSEMEM...
174
175
176
  
  		ms = __nr_to_section(section);
  		if (!ms->section_mem_map)
30c253e6d   Andy Whitcroft   [PATCH] sparsemem...
177
178
  			ms->section_mem_map = sparse_encode_early_nid(nid) |
  							SECTION_MARKED_PRESENT;
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
179
180
181
182
183
184
185
186
187
188
189
190
  	}
  }
  
  /*
   * Only used by the i386 NUMA architecures, but relatively
   * generic code.
   */
  unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
  						     unsigned long end_pfn)
  {
  	unsigned long pfn;
  	unsigned long nr_pages = 0;
2dbb51c49   Mel Gorman   mm: make defensiv...
191
  	mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
192
193
194
  	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
  		if (nid != early_pfn_to_nid(pfn))
  			continue;
540557b94   Andy Whitcroft   sparsemem: record...
195
  		if (pfn_present(pfn))
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
196
197
198
199
200
201
202
  			nr_pages += PAGES_PER_SECTION;
  	}
  
  	return nr_pages * sizeof(struct page);
  }
  
  /*
29751f699   Andy Whitcroft   [PATCH] sparsemem...
203
204
205
206
207
208
209
210
211
212
   * Subtle, we encode the real pfn into the mem_map such that
   * the identity pfn - section_mem_map will return the actual
   * physical page frame number.
   */
  static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
  {
  	return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
  }
  
  /*
ea01ea937   Badari Pulavarty   hotplug memory re...
213
   * Decode mem_map from the coded memmap
29751f699   Andy Whitcroft   [PATCH] sparsemem...
214
   */
29751f699   Andy Whitcroft   [PATCH] sparsemem...
215
216
  struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
  {
ea01ea937   Badari Pulavarty   hotplug memory re...
217
218
  	/* mask off the extra low bits of information */
  	coded_mem_map &= SECTION_MAP_MASK;
29751f699   Andy Whitcroft   [PATCH] sparsemem...
219
220
  	return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
  }
a3142c8e1   Yasunori Goto   Fix section misma...
221
  static int __meminit sparse_init_one_section(struct mem_section *ms,
5c0e30664   Mel Gorman   Fix corruption of...
222
223
  		unsigned long pnum, struct page *mem_map,
  		unsigned long *pageblock_bitmap)
29751f699   Andy Whitcroft   [PATCH] sparsemem...
224
  {
540557b94   Andy Whitcroft   sparsemem: record...
225
  	if (!present_section(ms))
29751f699   Andy Whitcroft   [PATCH] sparsemem...
226
  		return -EINVAL;
30c253e6d   Andy Whitcroft   [PATCH] sparsemem...
227
  	ms->section_mem_map &= ~SECTION_MAP_MASK;
540557b94   Andy Whitcroft   sparsemem: record...
228
229
  	ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
  							SECTION_HAS_MEM_MAP;
5c0e30664   Mel Gorman   Fix corruption of...
230
   	ms->pageblock_flags = pageblock_bitmap;
29751f699   Andy Whitcroft   [PATCH] sparsemem...
231
232
233
  
  	return 1;
  }
047532787   Yasunori Goto   memory hotplug: r...
234
  unsigned long usemap_size(void)
5c0e30664   Mel Gorman   Fix corruption of...
235
236
237
238
239
240
241
242
243
244
245
246
247
  {
  	unsigned long size_bytes;
  	size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8;
  	size_bytes = roundup(size_bytes, sizeof(unsigned long));
  	return size_bytes;
  }
  
  #ifdef CONFIG_MEMORY_HOTPLUG
  static unsigned long *__kmalloc_section_usemap(void)
  {
  	return kmalloc(usemap_size(), GFP_KERNEL);
  }
  #endif /* CONFIG_MEMORY_HOTPLUG */
48c906823   Yasunori Goto   memory hotplug: a...
248
249
  #ifdef CONFIG_MEMORY_HOTREMOVE
  static unsigned long * __init
a4322e1ba   Yinghai Lu   sparsemem: Put us...
250
  sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
238305bb4   Johannes Weiner   mm: remove sparse...
251
  					 unsigned long size)
48c906823   Yasunori Goto   memory hotplug: a...
252
  {
99ab7b194   Yinghai Lu   mm: sparse: fix u...
253
254
255
  	unsigned long goal, limit;
  	unsigned long *p;
  	int nid;
48c906823   Yasunori Goto   memory hotplug: a...
256
257
258
  	/*
  	 * A page may contain usemaps for other sections preventing the
  	 * page being freed and making a section unremovable while
c800bcd5f   Li Zhong   sparse: fix comment
259
  	 * other sections referencing the usemap remain active. Similarly,
48c906823   Yasunori Goto   memory hotplug: a...
260
261
262
263
264
265
  	 * a pgdat can prevent a section being removed. If section A
  	 * contains a pgdat and section B contains the usemap, both
  	 * sections become inter-dependent. This allocates usemaps
  	 * from the same section as the pgdat where possible to avoid
  	 * this problem.
  	 */
07b4e2bc9   Yinghai Lu   mm: sparse: fix s...
266
  	goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
99ab7b194   Yinghai Lu   mm: sparse: fix u...
267
268
269
  	limit = goal + (1UL << PA_SECTION_SHIFT);
  	nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
  again:
bb016b841   Santosh Shilimkar   mm/sparse: use me...
270
271
272
  	p = memblock_virt_alloc_try_nid_nopanic(size,
  						SMP_CACHE_BYTES, goal, limit,
  						nid);
99ab7b194   Yinghai Lu   mm: sparse: fix u...
273
274
275
276
277
  	if (!p && limit) {
  		limit = 0;
  		goto again;
  	}
  	return p;
48c906823   Yasunori Goto   memory hotplug: a...
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
  }
  
  static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
  {
  	unsigned long usemap_snr, pgdat_snr;
  	static unsigned long old_usemap_snr = NR_MEM_SECTIONS;
  	static unsigned long old_pgdat_snr = NR_MEM_SECTIONS;
  	struct pglist_data *pgdat = NODE_DATA(nid);
  	int usemap_nid;
  
  	usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT);
  	pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
  	if (usemap_snr == pgdat_snr)
  		return;
  
  	if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
  		/* skip redundant message */
  		return;
  
  	old_usemap_snr = usemap_snr;
  	old_pgdat_snr = pgdat_snr;
  
  	usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
  	if (usemap_nid != nid) {
  		printk(KERN_INFO
  		       "node %d must be removed before remove section %ld
  ",
  		       nid, usemap_snr);
  		return;
  	}
  	/*
  	 * There is a circular dependency.
  	 * Some platforms allow un-removable section because they will just
  	 * gather other removable sections for dynamic partitioning.
  	 * Just notify un-removable section's number here.
  	 */
  	printk(KERN_INFO "Section %ld and %ld (node %d)", usemap_snr,
  	       pgdat_snr, nid);
  	printk(KERN_CONT
  	       " have a circular dependency on usemap and pgdat allocations
  ");
  }
  #else
  static unsigned long * __init
a4322e1ba   Yinghai Lu   sparsemem: Put us...
322
  sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
238305bb4   Johannes Weiner   mm: remove sparse...
323
  					 unsigned long size)
48c906823   Yasunori Goto   memory hotplug: a...
324
  {
bb016b841   Santosh Shilimkar   mm/sparse: use me...
325
  	return memblock_virt_alloc_node_nopanic(size, pgdat->node_id);
48c906823   Yasunori Goto   memory hotplug: a...
326
327
328
329
330
331
  }
  
  static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
  {
  }
  #endif /* CONFIG_MEMORY_HOTREMOVE */
187320932   Wanpeng Li   mm/sparse: introd...
332
  static void __init sparse_early_usemaps_alloc_node(void *data,
a4322e1ba   Yinghai Lu   sparsemem: Put us...
333
334
335
  				 unsigned long pnum_begin,
  				 unsigned long pnum_end,
  				 unsigned long usemap_count, int nodeid)
5c0e30664   Mel Gorman   Fix corruption of...
336
  {
a4322e1ba   Yinghai Lu   sparsemem: Put us...
337
338
  	void *usemap;
  	unsigned long pnum;
187320932   Wanpeng Li   mm/sparse: introd...
339
  	unsigned long **usemap_map = (unsigned long **)data;
a4322e1ba   Yinghai Lu   sparsemem: Put us...
340
  	int size = usemap_size();
5c0e30664   Mel Gorman   Fix corruption of...
341

a4322e1ba   Yinghai Lu   sparsemem: Put us...
342
  	usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
238305bb4   Johannes Weiner   mm: remove sparse...
343
  							  size * usemap_count);
f5bf18fa2   Nishanth Aravamudan   bootmem/sparsemem...
344
  	if (!usemap) {
238305bb4   Johannes Weiner   mm: remove sparse...
345
346
347
  		printk(KERN_WARNING "%s: allocation failed
  ", __func__);
  		return;
48c906823   Yasunori Goto   memory hotplug: a...
348
  	}
f5bf18fa2   Nishanth Aravamudan   bootmem/sparsemem...
349
350
351
352
353
354
  	for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
  		if (!present_section_nr(pnum))
  			continue;
  		usemap_map[pnum] = usemap;
  		usemap += size;
  		check_usemap_section_nr(nodeid, usemap_map[pnum]);
a4322e1ba   Yinghai Lu   sparsemem: Put us...
355
  	}
5c0e30664   Mel Gorman   Fix corruption of...
356
  }
8f6aac419   Christoph Lameter   Generic Virtual M...
357
  #ifndef CONFIG_SPARSEMEM_VMEMMAP
98f3cfc1d   Yasunori Goto   memory hotplug: H...
358
  struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
29751f699   Andy Whitcroft   [PATCH] sparsemem...
359
360
  {
  	struct page *map;
e48e67e08   Yinghai Lu   sparsemem: on no ...
361
  	unsigned long size;
29751f699   Andy Whitcroft   [PATCH] sparsemem...
362
363
364
365
  
  	map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
  	if (map)
  		return map;
e48e67e08   Yinghai Lu   sparsemem: on no ...
366
  	size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
bb016b841   Santosh Shilimkar   mm/sparse: use me...
367
368
369
  	map = memblock_virt_alloc_try_nid(size,
  					  PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
  					  BOOTMEM_ALLOC_ACCESSIBLE, nid);
8f6aac419   Christoph Lameter   Generic Virtual M...
370
371
  	return map;
  }
9bdac9142   Yinghai Lu   sparsemem: Put me...
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
  void __init sparse_mem_maps_populate_node(struct page **map_map,
  					  unsigned long pnum_begin,
  					  unsigned long pnum_end,
  					  unsigned long map_count, int nodeid)
  {
  	void *map;
  	unsigned long pnum;
  	unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
  
  	map = alloc_remap(nodeid, size * map_count);
  	if (map) {
  		for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
  			if (!present_section_nr(pnum))
  				continue;
  			map_map[pnum] = map;
  			map += size;
  		}
  		return;
  	}
  
  	size = PAGE_ALIGN(size);
bb016b841   Santosh Shilimkar   mm/sparse: use me...
393
394
395
  	map = memblock_virt_alloc_try_nid(size * map_count,
  					  PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
  					  BOOTMEM_ALLOC_ACCESSIBLE, nodeid);
9bdac9142   Yinghai Lu   sparsemem: Put me...
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
  	if (map) {
  		for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
  			if (!present_section_nr(pnum))
  				continue;
  			map_map[pnum] = map;
  			map += size;
  		}
  		return;
  	}
  
  	/* fallback */
  	for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
  		struct mem_section *ms;
  
  		if (!present_section_nr(pnum))
  			continue;
  		map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
  		if (map_map[pnum])
  			continue;
  		ms = __nr_to_section(pnum);
  		printk(KERN_ERR "%s: sparsemem memory map backing failed "
  			"some memory will not be available.
  ", __func__);
  		ms->section_mem_map = 0;
  	}
  }
8f6aac419   Christoph Lameter   Generic Virtual M...
422
  #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
81d0d950e   Yinghai Lu   sparsemem: Fix co...
423
  #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
187320932   Wanpeng Li   mm/sparse: introd...
424
  static void __init sparse_early_mem_maps_alloc_node(void *data,
9bdac9142   Yinghai Lu   sparsemem: Put me...
425
426
427
428
  				 unsigned long pnum_begin,
  				 unsigned long pnum_end,
  				 unsigned long map_count, int nodeid)
  {
187320932   Wanpeng Li   mm/sparse: introd...
429
  	struct page **map_map = (struct page **)data;
9bdac9142   Yinghai Lu   sparsemem: Put me...
430
431
432
  	sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end,
  					 map_count, nodeid);
  }
81d0d950e   Yinghai Lu   sparsemem: Fix co...
433
  #else
9e5c6da71   Adrian Bunk   make mm/sparse.c:...
434
  static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
8f6aac419   Christoph Lameter   Generic Virtual M...
435
436
437
438
  {
  	struct page *map;
  	struct mem_section *ms = __nr_to_section(pnum);
  	int nid = sparse_early_nid(ms);
98f3cfc1d   Yasunori Goto   memory hotplug: H...
439
  	map = sparse_mem_map_populate(pnum, nid);
29751f699   Andy Whitcroft   [PATCH] sparsemem...
440
441
  	if (map)
  		return map;
8f6aac419   Christoph Lameter   Generic Virtual M...
442
  	printk(KERN_ERR "%s: sparsemem memory map backing failed "
d40cee245   Harvey Harrison   mm: remove remain...
443
444
  			"some memory will not be available.
  ", __func__);
802f192e4   Bob Picco   [PATCH] SPARSEMEM...
445
  	ms->section_mem_map = 0;
29751f699   Andy Whitcroft   [PATCH] sparsemem...
446
447
  	return NULL;
  }
9bdac9142   Yinghai Lu   sparsemem: Put me...
448
  #endif
29751f699   Andy Whitcroft   [PATCH] sparsemem...
449

3b32123d7   Gideon Israel Dsouza   mm: use macros fr...
450
  void __weak __meminit vmemmap_populate_print_last(void)
c2b91e2ee   Yinghai Lu   x86_64/mm: check ...
451
452
  {
  }
a4322e1ba   Yinghai Lu   sparsemem: Put us...
453

187320932   Wanpeng Li   mm/sparse: introd...
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
  /**
   *  alloc_usemap_and_memmap - memory alloction for pageblock flags and vmemmap
   *  @map: usemap_map for pageblock flags or mmap_map for vmemmap
   */
  static void __init alloc_usemap_and_memmap(void (*alloc_func)
  					(void *, unsigned long, unsigned long,
  					unsigned long, int), void *data)
  {
  	unsigned long pnum;
  	unsigned long map_count;
  	int nodeid_begin = 0;
  	unsigned long pnum_begin = 0;
  
  	for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
  		struct mem_section *ms;
  
  		if (!present_section_nr(pnum))
  			continue;
  		ms = __nr_to_section(pnum);
  		nodeid_begin = sparse_early_nid(ms);
  		pnum_begin = pnum;
  		break;
  	}
  	map_count = 1;
  	for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
  		struct mem_section *ms;
  		int nodeid;
  
  		if (!present_section_nr(pnum))
  			continue;
  		ms = __nr_to_section(pnum);
  		nodeid = sparse_early_nid(ms);
  		if (nodeid == nodeid_begin) {
  			map_count++;
  			continue;
  		}
  		/* ok, we need to take cake of from pnum_begin to pnum - 1*/
  		alloc_func(data, pnum_begin, pnum,
  						map_count, nodeid_begin);
  		/* new start, update count etc*/
  		nodeid_begin = nodeid;
  		pnum_begin = pnum;
  		map_count = 1;
  	}
  	/* ok, last chunk */
  	alloc_func(data, pnum_begin, NR_MEM_SECTIONS,
  						map_count, nodeid_begin);
  }
193faea92   Stephen Rothwell   Move three functi...
502
503
504
505
506
507
508
509
  /*
   * Allocate the accumulated non-linear sections, allocate a mem_map
   * for each and record the physical to section mapping.
   */
  void __init sparse_init(void)
  {
  	unsigned long pnum;
  	struct page *map;
5c0e30664   Mel Gorman   Fix corruption of...
510
  	unsigned long *usemap;
e123dd3f0   Yinghai Lu   mm: make mem_map ...
511
  	unsigned long **usemap_map;
81d0d950e   Yinghai Lu   sparsemem: Fix co...
512
  	int size;
81d0d950e   Yinghai Lu   sparsemem: Fix co...
513
  #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
81d0d950e   Yinghai Lu   sparsemem: Fix co...
514
515
516
  	int size2;
  	struct page **map_map;
  #endif
e123dd3f0   Yinghai Lu   mm: make mem_map ...
517

55878e88c   Cody P Schafer   sparsemem: add BU...
518
519
  	/* see include/linux/mmzone.h 'struct mem_section' definition */
  	BUILD_BUG_ON(!is_power_of_2(sizeof(struct mem_section)));
ca57df79d   Xishi Qiu   mm: setup pageblo...
520
521
  	/* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
  	set_pageblock_order();
e123dd3f0   Yinghai Lu   mm: make mem_map ...
522
523
524
525
526
527
  	/*
  	 * map is using big page (aka 2M in x86 64 bit)
  	 * usemap is less one page (aka 24 bytes)
  	 * so alloc 2M (with 2M align) and 24 bytes in turn will
  	 * make next 2M slip to one more 2M later.
  	 * then in big system, the memory will have a lot of holes...
25985edce   Lucas De Marchi   Fix common misspe...
528
  	 * here try to allocate 2M pages continuously.
e123dd3f0   Yinghai Lu   mm: make mem_map ...
529
530
531
532
533
  	 *
  	 * powerpc need to call sparse_init_one_section right after each
  	 * sparse_early_mem_map_alloc, so allocate usemap_map at first.
  	 */
  	size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
bb016b841   Santosh Shilimkar   mm/sparse: use me...
534
  	usemap_map = memblock_virt_alloc(size, 0);
e123dd3f0   Yinghai Lu   mm: make mem_map ...
535
536
537
  	if (!usemap_map)
  		panic("can not allocate usemap_map
  ");
187320932   Wanpeng Li   mm/sparse: introd...
538
539
  	alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node,
  							(void *)usemap_map);
193faea92   Stephen Rothwell   Move three functi...
540

9bdac9142   Yinghai Lu   sparsemem: Put me...
541
542
  #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
  	size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
bb016b841   Santosh Shilimkar   mm/sparse: use me...
543
  	map_map = memblock_virt_alloc(size2, 0);
9bdac9142   Yinghai Lu   sparsemem: Put me...
544
545
546
  	if (!map_map)
  		panic("can not allocate map_map
  ");
187320932   Wanpeng Li   mm/sparse: introd...
547
548
  	alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node,
  							(void *)map_map);
9bdac9142   Yinghai Lu   sparsemem: Put me...
549
  #endif
e123dd3f0   Yinghai Lu   mm: make mem_map ...
550
551
  	for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
  		if (!present_section_nr(pnum))
193faea92   Stephen Rothwell   Move three functi...
552
  			continue;
5c0e30664   Mel Gorman   Fix corruption of...
553

e123dd3f0   Yinghai Lu   mm: make mem_map ...
554
  		usemap = usemap_map[pnum];
5c0e30664   Mel Gorman   Fix corruption of...
555
556
  		if (!usemap)
  			continue;
9bdac9142   Yinghai Lu   sparsemem: Put me...
557
558
559
  #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
  		map = map_map[pnum];
  #else
e123dd3f0   Yinghai Lu   mm: make mem_map ...
560
  		map = sparse_early_mem_map_alloc(pnum);
9bdac9142   Yinghai Lu   sparsemem: Put me...
561
  #endif
e123dd3f0   Yinghai Lu   mm: make mem_map ...
562
563
  		if (!map)
  			continue;
5c0e30664   Mel Gorman   Fix corruption of...
564
565
  		sparse_init_one_section(__nr_to_section(pnum), pnum, map,
  								usemap);
193faea92   Stephen Rothwell   Move three functi...
566
  	}
e123dd3f0   Yinghai Lu   mm: make mem_map ...
567

c2b91e2ee   Yinghai Lu   x86_64/mm: check ...
568
  	vmemmap_populate_print_last();
9bdac9142   Yinghai Lu   sparsemem: Put me...
569
  #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
bb016b841   Santosh Shilimkar   mm/sparse: use me...
570
  	memblock_free_early(__pa(map_map), size2);
9bdac9142   Yinghai Lu   sparsemem: Put me...
571
  #endif
bb016b841   Santosh Shilimkar   mm/sparse: use me...
572
  	memblock_free_early(__pa(usemap_map), size);
193faea92   Stephen Rothwell   Move three functi...
573
574
575
  }
  
  #ifdef CONFIG_MEMORY_HOTPLUG
98f3cfc1d   Yasunori Goto   memory hotplug: H...
576
  #ifdef CONFIG_SPARSEMEM_VMEMMAP
85b35feae   Zhang Yanfei   mm/sparsemem: use...
577
  static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid)
98f3cfc1d   Yasunori Goto   memory hotplug: H...
578
579
580
581
  {
  	/* This will make the necessary allocations eventually. */
  	return sparse_mem_map_populate(pnum, nid);
  }
85b35feae   Zhang Yanfei   mm/sparsemem: use...
582
  static void __kfree_section_memmap(struct page *memmap)
98f3cfc1d   Yasunori Goto   memory hotplug: H...
583
  {
0aad818b2   Johannes Weiner   sparse-vmemmap: s...
584
  	unsigned long start = (unsigned long)memmap;
85b35feae   Zhang Yanfei   mm/sparsemem: use...
585
  	unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
0aad818b2   Johannes Weiner   sparse-vmemmap: s...
586
587
  
  	vmemmap_free(start, end);
98f3cfc1d   Yasunori Goto   memory hotplug: H...
588
  }
4edd7ceff   David Rientjes   mm, hotplug: avoi...
589
  #ifdef CONFIG_MEMORY_HOTREMOVE
81556b025   Zhang Yanfei   mm/sparsemem: fix...
590
  static void free_map_bootmem(struct page *memmap)
0c0a4a517   Yasunori Goto   memory hotplug: f...
591
  {
0aad818b2   Johannes Weiner   sparse-vmemmap: s...
592
  	unsigned long start = (unsigned long)memmap;
81556b025   Zhang Yanfei   mm/sparsemem: fix...
593
  	unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
0aad818b2   Johannes Weiner   sparse-vmemmap: s...
594
595
  
  	vmemmap_free(start, end);
0c0a4a517   Yasunori Goto   memory hotplug: f...
596
  }
4edd7ceff   David Rientjes   mm, hotplug: avoi...
597
  #endif /* CONFIG_MEMORY_HOTREMOVE */
98f3cfc1d   Yasunori Goto   memory hotplug: H...
598
  #else
85b35feae   Zhang Yanfei   mm/sparsemem: use...
599
  static struct page *__kmalloc_section_memmap(void)
0b0acbec1   Dave Hansen   [PATCH] memory ho...
600
601
  {
  	struct page *page, *ret;
85b35feae   Zhang Yanfei   mm/sparsemem: use...
602
  	unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION;
0b0acbec1   Dave Hansen   [PATCH] memory ho...
603

f2d0aa5bf   Yasunori Goto   [PATCH] memory ho...
604
  	page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
0b0acbec1   Dave Hansen   [PATCH] memory ho...
605
606
607
608
609
610
611
612
613
614
615
  	if (page)
  		goto got_map_page;
  
  	ret = vmalloc(memmap_size);
  	if (ret)
  		goto got_map_ptr;
  
  	return NULL;
  got_map_page:
  	ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
  got_map_ptr:
0b0acbec1   Dave Hansen   [PATCH] memory ho...
616
617
618
  
  	return ret;
  }
85b35feae   Zhang Yanfei   mm/sparsemem: use...
619
  static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid)
98f3cfc1d   Yasunori Goto   memory hotplug: H...
620
  {
85b35feae   Zhang Yanfei   mm/sparsemem: use...
621
  	return __kmalloc_section_memmap();
98f3cfc1d   Yasunori Goto   memory hotplug: H...
622
  }
85b35feae   Zhang Yanfei   mm/sparsemem: use...
623
  static void __kfree_section_memmap(struct page *memmap)
0b0acbec1   Dave Hansen   [PATCH] memory ho...
624
  {
9e2779fa2   Christoph Lameter   is_vmalloc_addr()...
625
  	if (is_vmalloc_addr(memmap))
0b0acbec1   Dave Hansen   [PATCH] memory ho...
626
627
628
  		vfree(memmap);
  	else
  		free_pages((unsigned long)memmap,
85b35feae   Zhang Yanfei   mm/sparsemem: use...
629
  			   get_order(sizeof(struct page) * PAGES_PER_SECTION));
0b0acbec1   Dave Hansen   [PATCH] memory ho...
630
  }
0c0a4a517   Yasunori Goto   memory hotplug: f...
631

4edd7ceff   David Rientjes   mm, hotplug: avoi...
632
  #ifdef CONFIG_MEMORY_HOTREMOVE
81556b025   Zhang Yanfei   mm/sparsemem: fix...
633
  static void free_map_bootmem(struct page *memmap)
0c0a4a517   Yasunori Goto   memory hotplug: f...
634
635
  {
  	unsigned long maps_section_nr, removing_section_nr, i;
81556b025   Zhang Yanfei   mm/sparsemem: fix...
636
  	unsigned long magic, nr_pages;
ae64ffcac   Jianguo Wu   mm/vmemmap: fix w...
637
  	struct page *page = virt_to_page(memmap);
0c0a4a517   Yasunori Goto   memory hotplug: f...
638

81556b025   Zhang Yanfei   mm/sparsemem: fix...
639
640
  	nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
  		>> PAGE_SHIFT;
0c0a4a517   Yasunori Goto   memory hotplug: f...
641
  	for (i = 0; i < nr_pages; i++, page++) {
5f24ce5fd   Andrea Arcangeli   thp: remove PG_buddy
642
  		magic = (unsigned long) page->lru.next;
0c0a4a517   Yasunori Goto   memory hotplug: f...
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
  
  		BUG_ON(magic == NODE_INFO);
  
  		maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
  		removing_section_nr = page->private;
  
  		/*
  		 * When this function is called, the removing section is
  		 * logical offlined state. This means all pages are isolated
  		 * from page allocator. If removing section's memmap is placed
  		 * on the same section, it must not be freed.
  		 * If it is freed, page allocator may allocate it which will
  		 * be removed physically soon.
  		 */
  		if (maps_section_nr != removing_section_nr)
  			put_page_bootmem(page);
  	}
  }
4edd7ceff   David Rientjes   mm, hotplug: avoi...
661
  #endif /* CONFIG_MEMORY_HOTREMOVE */
98f3cfc1d   Yasunori Goto   memory hotplug: H...
662
  #endif /* CONFIG_SPARSEMEM_VMEMMAP */
0b0acbec1   Dave Hansen   [PATCH] memory ho...
663

29751f699   Andy Whitcroft   [PATCH] sparsemem...
664
  /*
29751f699   Andy Whitcroft   [PATCH] sparsemem...
665
666
667
668
   * returns the number of sections whose mem_maps were properly
   * set.  If this is <=0, then that means that the passed-in
   * map was not consumed and must be freed.
   */
85b35feae   Zhang Yanfei   mm/sparsemem: use...
669
  int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn)
29751f699   Andy Whitcroft   [PATCH] sparsemem...
670
  {
0b0acbec1   Dave Hansen   [PATCH] memory ho...
671
672
673
674
  	unsigned long section_nr = pfn_to_section_nr(start_pfn);
  	struct pglist_data *pgdat = zone->zone_pgdat;
  	struct mem_section *ms;
  	struct page *memmap;
5c0e30664   Mel Gorman   Fix corruption of...
675
  	unsigned long *usemap;
0b0acbec1   Dave Hansen   [PATCH] memory ho...
676
677
  	unsigned long flags;
  	int ret;
29751f699   Andy Whitcroft   [PATCH] sparsemem...
678

0b0acbec1   Dave Hansen   [PATCH] memory ho...
679
680
681
682
  	/*
  	 * no locking for this, because it does its own
  	 * plus, it does a kmalloc
  	 */
bbd068259   WANG Cong   mm/sparse.c: impr...
683
684
685
  	ret = sparse_index_init(section_nr, pgdat->node_id);
  	if (ret < 0 && ret != -EEXIST)
  		return ret;
85b35feae   Zhang Yanfei   mm/sparsemem: use...
686
  	memmap = kmalloc_section_memmap(section_nr, pgdat->node_id);
bbd068259   WANG Cong   mm/sparse.c: impr...
687
688
  	if (!memmap)
  		return -ENOMEM;
5c0e30664   Mel Gorman   Fix corruption of...
689
  	usemap = __kmalloc_section_usemap();
bbd068259   WANG Cong   mm/sparse.c: impr...
690
  	if (!usemap) {
85b35feae   Zhang Yanfei   mm/sparsemem: use...
691
  		__kfree_section_memmap(memmap);
bbd068259   WANG Cong   mm/sparse.c: impr...
692
693
  		return -ENOMEM;
  	}
0b0acbec1   Dave Hansen   [PATCH] memory ho...
694
695
  
  	pgdat_resize_lock(pgdat, &flags);
29751f699   Andy Whitcroft   [PATCH] sparsemem...
696

0b0acbec1   Dave Hansen   [PATCH] memory ho...
697
698
699
700
701
  	ms = __pfn_to_section(start_pfn);
  	if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
  		ret = -EEXIST;
  		goto out;
  	}
5c0e30664   Mel Gorman   Fix corruption of...
702

85b35feae   Zhang Yanfei   mm/sparsemem: use...
703
  	memset(memmap, 0, sizeof(struct page) * PAGES_PER_SECTION);
3ac19f8ef   Wen Congyang   memory-hotplug, m...
704

29751f699   Andy Whitcroft   [PATCH] sparsemem...
705
  	ms->section_mem_map |= SECTION_MARKED_PRESENT;
5c0e30664   Mel Gorman   Fix corruption of...
706
  	ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
0b0acbec1   Dave Hansen   [PATCH] memory ho...
707

0b0acbec1   Dave Hansen   [PATCH] memory ho...
708
709
  out:
  	pgdat_resize_unlock(pgdat, &flags);
bbd068259   WANG Cong   mm/sparse.c: impr...
710
711
  	if (ret <= 0) {
  		kfree(usemap);
85b35feae   Zhang Yanfei   mm/sparsemem: use...
712
  		__kfree_section_memmap(memmap);
bbd068259   WANG Cong   mm/sparse.c: impr...
713
  	}
0b0acbec1   Dave Hansen   [PATCH] memory ho...
714
  	return ret;
29751f699   Andy Whitcroft   [PATCH] sparsemem...
715
  }
ea01ea937   Badari Pulavarty   hotplug memory re...
716

f3deb6872   Zhang Yanfei   mm/sparse.c: put ...
717
  #ifdef CONFIG_MEMORY_HOTREMOVE
95a4774d0   Wen Congyang   memory-hotplug: u...
718
719
720
721
722
723
724
725
726
727
  #ifdef CONFIG_MEMORY_FAILURE
  static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
  {
  	int i;
  
  	if (!memmap)
  		return;
  
  	for (i = 0; i < PAGES_PER_SECTION; i++) {
  		if (PageHWPoison(&memmap[i])) {
293c07e31   Xishi Qiu   memory-failure: u...
728
  			atomic_long_sub(1, &num_poisoned_pages);
95a4774d0   Wen Congyang   memory-hotplug: u...
729
730
731
732
733
734
735
736
737
  			ClearPageHWPoison(&memmap[i]);
  		}
  	}
  }
  #else
  static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
  {
  }
  #endif
4edd7ceff   David Rientjes   mm, hotplug: avoi...
738
739
740
  static void free_section_usemap(struct page *memmap, unsigned long *usemap)
  {
  	struct page *usemap_page;
4edd7ceff   David Rientjes   mm, hotplug: avoi...
741
742
743
744
745
746
747
748
749
750
751
  
  	if (!usemap)
  		return;
  
  	usemap_page = virt_to_page(usemap);
  	/*
  	 * Check to see if allocation came from hot-plug-add
  	 */
  	if (PageSlab(usemap_page) || PageCompound(usemap_page)) {
  		kfree(usemap);
  		if (memmap)
85b35feae   Zhang Yanfei   mm/sparsemem: use...
752
  			__kfree_section_memmap(memmap);
4edd7ceff   David Rientjes   mm, hotplug: avoi...
753
754
755
756
757
758
759
  		return;
  	}
  
  	/*
  	 * The usemap came from bootmem. This is packed with other usemaps
  	 * on the section which has pgdat at boot time. Just keep it as is now.
  	 */
81556b025   Zhang Yanfei   mm/sparsemem: fix...
760
761
  	if (memmap)
  		free_map_bootmem(memmap);
4edd7ceff   David Rientjes   mm, hotplug: avoi...
762
  }
ea01ea937   Badari Pulavarty   hotplug memory re...
763
764
765
  void sparse_remove_one_section(struct zone *zone, struct mem_section *ms)
  {
  	struct page *memmap = NULL;
cd099682e   Tang Chen   memory-hotplug: m...
766
767
  	unsigned long *usemap = NULL, flags;
  	struct pglist_data *pgdat = zone->zone_pgdat;
ea01ea937   Badari Pulavarty   hotplug memory re...
768

cd099682e   Tang Chen   memory-hotplug: m...
769
  	pgdat_resize_lock(pgdat, &flags);
ea01ea937   Badari Pulavarty   hotplug memory re...
770
771
772
773
774
775
776
  	if (ms->section_mem_map) {
  		usemap = ms->pageblock_flags;
  		memmap = sparse_decode_mem_map(ms->section_mem_map,
  						__section_nr(ms));
  		ms->section_mem_map = 0;
  		ms->pageblock_flags = NULL;
  	}
cd099682e   Tang Chen   memory-hotplug: m...
777
  	pgdat_resize_unlock(pgdat, &flags);
ea01ea937   Badari Pulavarty   hotplug memory re...
778

95a4774d0   Wen Congyang   memory-hotplug: u...
779
  	clear_hwpoisoned_pages(memmap, PAGES_PER_SECTION);
ea01ea937   Badari Pulavarty   hotplug memory re...
780
781
  	free_section_usemap(memmap, usemap);
  }
4edd7ceff   David Rientjes   mm, hotplug: avoi...
782
783
  #endif /* CONFIG_MEMORY_HOTREMOVE */
  #endif /* CONFIG_MEMORY_HOTPLUG */