Blame view
mm/sparse.c
13.2 KB
d41dee369
|
1 2 3 |
/* * sparse memory mappings. */ |
d41dee369
|
4 5 6 |
#include <linux/mm.h> #include <linux/mmzone.h> #include <linux/bootmem.h> |
0b0acbec1
|
7 |
#include <linux/highmem.h> |
d41dee369
|
8 |
#include <linux/module.h> |
28ae55c98
|
9 |
#include <linux/spinlock.h> |
0b0acbec1
|
10 |
#include <linux/vmalloc.h> |
0c0a4a517
|
11 |
#include "internal.h" |
d41dee369
|
12 |
#include <asm/dma.h> |
8f6aac419
|
13 14 |
#include <asm/pgalloc.h> #include <asm/pgtable.h> |
d41dee369
|
15 16 17 18 19 20 |
/* * Permanent SPARSEMEM data: * * 1) mem_section - memory sections, mem_map's for valid memory */ |
3e347261a
|
21 |
#ifdef CONFIG_SPARSEMEM_EXTREME |
802f192e4
|
22 |
struct mem_section *mem_section[NR_SECTION_ROOTS] |
22fc6eccb
|
23 |
____cacheline_internodealigned_in_smp; |
3e347261a
|
24 25 |
#else struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] |
22fc6eccb
|
26 |
____cacheline_internodealigned_in_smp; |
3e347261a
|
27 28 |
#endif EXPORT_SYMBOL(mem_section); |
89689ae7f
|
29 30 31 32 33 34 35 36 37 38 39 |
#ifdef NODE_NOT_IN_PAGE_FLAGS /* * If we did not store the node number in the page then we have to * do a lookup in the section_to_node_table in order to find which * node the page belongs to. */ #if MAX_NUMNODES <= 256 static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; #else static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; #endif |
25ba77c14
|
40 |
int page_to_nid(struct page *page) |
89689ae7f
|
41 42 43 44 |
{ return section_to_node_table[page_to_section(page)]; } EXPORT_SYMBOL(page_to_nid); |
85770ffe4
|
45 46 47 48 49 50 51 52 53 |
static void set_section_nid(unsigned long section_nr, int nid) { section_to_node_table[section_nr] = nid; } #else /* !NODE_NOT_IN_PAGE_FLAGS */ static inline void set_section_nid(unsigned long section_nr, int nid) { } |
89689ae7f
|
54 |
#endif |
3e347261a
|
55 |
#ifdef CONFIG_SPARSEMEM_EXTREME |
577a32f62
|
56 |
static struct mem_section noinline __init_refok *sparse_index_alloc(int nid) |
28ae55c98
|
57 58 59 60 |
{ struct mem_section *section = NULL; unsigned long array_size = SECTIONS_PER_ROOT * sizeof(struct mem_section); |
39d24e642
|
61 |
if (slab_is_available()) |
46a66eecd
|
62 63 64 |
section = kmalloc_node(array_size, GFP_KERNEL, nid); else section = alloc_bootmem_node(NODE_DATA(nid), array_size); |
28ae55c98
|
65 66 67 68 69 |
if (section) memset(section, 0, array_size); return section; |
3e347261a
|
70 |
} |
802f192e4
|
71 |
|
a3142c8e1
|
72 |
static int __meminit sparse_index_init(unsigned long section_nr, int nid) |
802f192e4
|
73 |
{ |
34af946a2
|
74 |
static DEFINE_SPINLOCK(index_init_lock); |
28ae55c98
|
75 76 77 |
unsigned long root = SECTION_NR_TO_ROOT(section_nr); struct mem_section *section; int ret = 0; |
802f192e4
|
78 79 |
if (mem_section[root]) |
28ae55c98
|
80 |
return -EEXIST; |
3e347261a
|
81 |
|
28ae55c98
|
82 |
section = sparse_index_alloc(nid); |
af0cd5a7c
|
83 84 |
if (!section) return -ENOMEM; |
28ae55c98
|
85 86 87 88 89 |
/* * This lock keeps two different sections from * reallocating for the same index */ spin_lock(&index_init_lock); |
3e347261a
|
90 |
|
28ae55c98
|
91 92 93 94 95 96 97 98 99 100 101 102 103 104 |
if (mem_section[root]) { ret = -EEXIST; goto out; } mem_section[root] = section; out: spin_unlock(&index_init_lock); return ret; } #else /* !SPARSEMEM_EXTREME */ static inline int sparse_index_init(unsigned long section_nr, int nid) { return 0; |
802f192e4
|
105 |
} |
28ae55c98
|
106 |
#endif |
4ca644d97
|
107 108 |
/* * Although written for the SPARSEMEM_EXTREME case, this happens |
cd881a6b2
|
109 |
* to also work for the flat array case because |
4ca644d97
|
110 111 112 113 114 115 |
* NR_SECTION_ROOTS==NR_MEM_SECTIONS. */ int __section_nr(struct mem_section* ms) { unsigned long root_nr; struct mem_section* root; |
12783b002
|
116 117 |
for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) { root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); |
4ca644d97
|
118 119 120 121 122 123 124 125 126 |
if (!root) continue; if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT))) break; } return (root_nr * SECTIONS_PER_ROOT) + (ms - root); } |
30c253e6d
|
127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 |
/* * During early boot, before section_mem_map is used for an actual * mem_map, we use section_mem_map to store the section's NUMA * node. This keeps us from having to use another data structure. The * node information is cleared just before we store the real mem_map. */ static inline unsigned long sparse_encode_early_nid(int nid) { return (nid << SECTION_NID_SHIFT); } static inline int sparse_early_nid(struct mem_section *section) { return (section->section_mem_map >> SECTION_NID_SHIFT); } |
d41dee369
|
142 |
/* Record a memory area against a node. */ |
a3142c8e1
|
143 |
void __init memory_present(int nid, unsigned long start, unsigned long end) |
d41dee369
|
144 |
{ |
bead9a3ab
|
145 |
unsigned long max_arch_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); |
d41dee369
|
146 |
unsigned long pfn; |
bead9a3ab
|
147 148 149 150 151 152 153 154 |
/* * Sanity checks - do not allow an architecture to pass * in larger pfns than the maximum scope of sparsemem: */ if (start >= max_arch_pfn) return; if (end >= max_arch_pfn) end = max_arch_pfn; |
d41dee369
|
155 156 157 |
start &= PAGE_SECTION_MASK; for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { unsigned long section = pfn_to_section_nr(pfn); |
802f192e4
|
158 159 160 |
struct mem_section *ms; sparse_index_init(section, nid); |
85770ffe4
|
161 |
set_section_nid(section, nid); |
802f192e4
|
162 163 164 |
ms = __nr_to_section(section); if (!ms->section_mem_map) |
30c253e6d
|
165 166 |
ms->section_mem_map = sparse_encode_early_nid(nid) | SECTION_MARKED_PRESENT; |
d41dee369
|
167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 |
} } /* * Only used by the i386 NUMA architecures, but relatively * generic code. */ unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn, unsigned long end_pfn) { unsigned long pfn; unsigned long nr_pages = 0; for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { if (nid != early_pfn_to_nid(pfn)) continue; |
540557b94
|
183 |
if (pfn_present(pfn)) |
d41dee369
|
184 185 186 187 188 189 190 |
nr_pages += PAGES_PER_SECTION; } return nr_pages * sizeof(struct page); } /* |
29751f699
|
191 192 193 194 195 196 197 198 199 200 |
* Subtle, we encode the real pfn into the mem_map such that * the identity pfn - section_mem_map will return the actual * physical page frame number. */ static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum) { return (unsigned long)(mem_map - (section_nr_to_pfn(pnum))); } /* |
ea01ea937
|
201 |
* Decode mem_map from the coded memmap |
29751f699
|
202 |
*/ |
29751f699
|
203 204 |
struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) { |
ea01ea937
|
205 206 |
/* mask off the extra low bits of information */ coded_mem_map &= SECTION_MAP_MASK; |
29751f699
|
207 208 |
return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); } |
a3142c8e1
|
209 |
static int __meminit sparse_init_one_section(struct mem_section *ms, |
5c0e30664
|
210 211 |
unsigned long pnum, struct page *mem_map, unsigned long *pageblock_bitmap) |
29751f699
|
212 |
{ |
540557b94
|
213 |
if (!present_section(ms)) |
29751f699
|
214 |
return -EINVAL; |
30c253e6d
|
215 |
ms->section_mem_map &= ~SECTION_MAP_MASK; |
540557b94
|
216 217 |
ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) | SECTION_HAS_MEM_MAP; |
5c0e30664
|
218 |
ms->pageblock_flags = pageblock_bitmap; |
29751f699
|
219 220 221 |
return 1; } |
047532787
|
222 |
unsigned long usemap_size(void) |
5c0e30664
|
223 224 225 226 227 228 229 230 231 232 233 234 235 |
{ unsigned long size_bytes; size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8; size_bytes = roundup(size_bytes, sizeof(unsigned long)); return size_bytes; } #ifdef CONFIG_MEMORY_HOTPLUG static unsigned long *__kmalloc_section_usemap(void) { return kmalloc(usemap_size(), GFP_KERNEL); } #endif /* CONFIG_MEMORY_HOTPLUG */ |
a322f8ab6
|
236 |
static unsigned long *__init sparse_early_usemap_alloc(unsigned long pnum) |
5c0e30664
|
237 |
{ |
516746444
|
238 |
unsigned long *usemap; |
5c0e30664
|
239 240 |
struct mem_section *ms = __nr_to_section(pnum); int nid = sparse_early_nid(ms); |
516746444
|
241 |
usemap = alloc_bootmem_node(NODE_DATA(nid), usemap_size()); |
5c0e30664
|
242 243 244 245 246 |
if (usemap) return usemap; /* Stupid: suppress gcc warning for SPARSEMEM && !NUMA */ nid = 0; |
d40cee245
|
247 248 |
printk(KERN_WARNING "%s: allocation failed ", __func__); |
5c0e30664
|
249 250 |
return NULL; } |
8f6aac419
|
251 |
#ifndef CONFIG_SPARSEMEM_VMEMMAP |
98f3cfc1d
|
252 |
struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid) |
29751f699
|
253 254 |
{ struct page *map; |
29751f699
|
255 256 257 258 |
map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION); if (map) return map; |
9d99217a0
|
259 260 |
map = alloc_bootmem_pages_node(NODE_DATA(nid), PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION)); |
8f6aac419
|
261 262 263 264 265 266 267 268 269 |
return map; } #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) { struct page *map; struct mem_section *ms = __nr_to_section(pnum); int nid = sparse_early_nid(ms); |
98f3cfc1d
|
270 |
map = sparse_mem_map_populate(pnum, nid); |
29751f699
|
271 272 |
if (map) return map; |
8f6aac419
|
273 |
printk(KERN_ERR "%s: sparsemem memory map backing failed " |
d40cee245
|
274 275 |
"some memory will not be available. ", __func__); |
802f192e4
|
276 |
ms->section_mem_map = 0; |
29751f699
|
277 278 |
return NULL; } |
c2b91e2ee
|
279 280 281 |
void __attribute__((weak)) __meminit vmemmap_populate_print_last(void) { } |
193faea92
|
282 283 284 285 286 287 288 289 |
/* * Allocate the accumulated non-linear sections, allocate a mem_map * for each and record the physical to section mapping. */ void __init sparse_init(void) { unsigned long pnum; struct page *map; |
5c0e30664
|
290 |
unsigned long *usemap; |
e123dd3f0
|
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 |
unsigned long **usemap_map; int size; /* * map is using big page (aka 2M in x86 64 bit) * usemap is less one page (aka 24 bytes) * so alloc 2M (with 2M align) and 24 bytes in turn will * make next 2M slip to one more 2M later. * then in big system, the memory will have a lot of holes... * here try to allocate 2M pages continously. * * powerpc need to call sparse_init_one_section right after each * sparse_early_mem_map_alloc, so allocate usemap_map at first. */ size = sizeof(unsigned long *) * NR_MEM_SECTIONS; usemap_map = alloc_bootmem(size); if (!usemap_map) panic("can not allocate usemap_map "); |
193faea92
|
310 311 |
for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { |
540557b94
|
312 |
if (!present_section_nr(pnum)) |
193faea92
|
313 |
continue; |
e123dd3f0
|
314 315 |
usemap_map[pnum] = sparse_early_usemap_alloc(pnum); } |
193faea92
|
316 |
|
e123dd3f0
|
317 318 |
for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { if (!present_section_nr(pnum)) |
193faea92
|
319 |
continue; |
5c0e30664
|
320 |
|
e123dd3f0
|
321 |
usemap = usemap_map[pnum]; |
5c0e30664
|
322 323 |
if (!usemap) continue; |
e123dd3f0
|
324 325 326 |
map = sparse_early_mem_map_alloc(pnum); if (!map) continue; |
5c0e30664
|
327 328 |
sparse_init_one_section(__nr_to_section(pnum), pnum, map, usemap); |
193faea92
|
329 |
} |
e123dd3f0
|
330 |
|
c2b91e2ee
|
331 |
vmemmap_populate_print_last(); |
e123dd3f0
|
332 |
free_bootmem(__pa(usemap_map), size); |
193faea92
|
333 334 335 |
} #ifdef CONFIG_MEMORY_HOTPLUG |
98f3cfc1d
|
336 337 338 339 340 341 342 343 344 345 346 |
#ifdef CONFIG_SPARSEMEM_VMEMMAP static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, unsigned long nr_pages) { /* This will make the necessary allocations eventually. */ return sparse_mem_map_populate(pnum, nid); } static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) { return; /* XXX: Not implemented yet */ } |
0c0a4a517
|
347 348 349 |
static void free_map_bootmem(struct page *page, unsigned long nr_pages) { } |
98f3cfc1d
|
350 |
#else |
0b0acbec1
|
351 352 353 354 |
static struct page *__kmalloc_section_memmap(unsigned long nr_pages) { struct page *page, *ret; unsigned long memmap_size = sizeof(struct page) * nr_pages; |
f2d0aa5bf
|
355 |
page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size)); |
0b0acbec1
|
356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 |
if (page) goto got_map_page; ret = vmalloc(memmap_size); if (ret) goto got_map_ptr; return NULL; got_map_page: ret = (struct page *)pfn_to_kaddr(page_to_pfn(page)); got_map_ptr: memset(ret, 0, memmap_size); return ret; } |
98f3cfc1d
|
371 372 373 374 375 |
static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, unsigned long nr_pages) { return __kmalloc_section_memmap(nr_pages); } |
0b0acbec1
|
376 377 |
static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) { |
9e2779fa2
|
378 |
if (is_vmalloc_addr(memmap)) |
0b0acbec1
|
379 380 381 382 383 |
vfree(memmap); else free_pages((unsigned long)memmap, get_order(sizeof(struct page) * nr_pages)); } |
0c0a4a517
|
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 |
static void free_map_bootmem(struct page *page, unsigned long nr_pages) { unsigned long maps_section_nr, removing_section_nr, i; int magic; for (i = 0; i < nr_pages; i++, page++) { magic = atomic_read(&page->_mapcount); BUG_ON(magic == NODE_INFO); maps_section_nr = pfn_to_section_nr(page_to_pfn(page)); removing_section_nr = page->private; /* * When this function is called, the removing section is * logical offlined state. This means all pages are isolated * from page allocator. If removing section's memmap is placed * on the same section, it must not be freed. * If it is freed, page allocator may allocate it which will * be removed physically soon. */ if (maps_section_nr != removing_section_nr) put_page_bootmem(page); } } |
98f3cfc1d
|
410 |
#endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
0b0acbec1
|
411 |
|
ea01ea937
|
412 413 |
static void free_section_usemap(struct page *memmap, unsigned long *usemap) { |
0c0a4a517
|
414 415 |
struct page *usemap_page; unsigned long nr_pages; |
ea01ea937
|
416 417 |
if (!usemap) return; |
0c0a4a517
|
418 |
usemap_page = virt_to_page(usemap); |
ea01ea937
|
419 420 421 |
/* * Check to see if allocation came from hot-plug-add */ |
0c0a4a517
|
422 |
if (PageSlab(usemap_page)) { |
ea01ea937
|
423 424 425 426 427 428 429 |
kfree(usemap); if (memmap) __kfree_section_memmap(memmap, PAGES_PER_SECTION); return; } /* |
0c0a4a517
|
430 431 |
* The usemap came from bootmem. This is packed with other usemaps * on the section which has pgdat at boot time. Just keep it as is now. |
ea01ea937
|
432 |
*/ |
0c0a4a517
|
433 434 435 436 437 438 439 440 441 442 |
if (memmap) { struct page *memmap_page; memmap_page = virt_to_page(memmap); nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) >> PAGE_SHIFT; free_map_bootmem(memmap_page, nr_pages); } |
ea01ea937
|
443 |
} |
29751f699
|
444 |
/* |
29751f699
|
445 446 447 448 |
* returns the number of sections whose mem_maps were properly * set. If this is <=0, then that means that the passed-in * map was not consumed and must be freed. */ |
0b0acbec1
|
449 450 |
int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, int nr_pages) |
29751f699
|
451 |
{ |
0b0acbec1
|
452 453 454 455 |
unsigned long section_nr = pfn_to_section_nr(start_pfn); struct pglist_data *pgdat = zone->zone_pgdat; struct mem_section *ms; struct page *memmap; |
5c0e30664
|
456 |
unsigned long *usemap; |
0b0acbec1
|
457 458 |
unsigned long flags; int ret; |
29751f699
|
459 |
|
0b0acbec1
|
460 461 462 463 |
/* * no locking for this, because it does its own * plus, it does a kmalloc */ |
bbd068259
|
464 465 466 |
ret = sparse_index_init(section_nr, pgdat->node_id); if (ret < 0 && ret != -EEXIST) return ret; |
98f3cfc1d
|
467 |
memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages); |
bbd068259
|
468 469 |
if (!memmap) return -ENOMEM; |
5c0e30664
|
470 |
usemap = __kmalloc_section_usemap(); |
bbd068259
|
471 472 473 474 |
if (!usemap) { __kfree_section_memmap(memmap, nr_pages); return -ENOMEM; } |
0b0acbec1
|
475 476 |
pgdat_resize_lock(pgdat, &flags); |
29751f699
|
477 |
|
0b0acbec1
|
478 479 480 481 482 |
ms = __pfn_to_section(start_pfn); if (ms->section_mem_map & SECTION_MARKED_PRESENT) { ret = -EEXIST; goto out; } |
5c0e30664
|
483 |
|
29751f699
|
484 |
ms->section_mem_map |= SECTION_MARKED_PRESENT; |
5c0e30664
|
485 |
ret = sparse_init_one_section(ms, section_nr, memmap, usemap); |
0b0acbec1
|
486 |
|
0b0acbec1
|
487 488 |
out: pgdat_resize_unlock(pgdat, &flags); |
bbd068259
|
489 490 |
if (ret <= 0) { kfree(usemap); |
46a66eecd
|
491 |
__kfree_section_memmap(memmap, nr_pages); |
bbd068259
|
492 |
} |
0b0acbec1
|
493 |
return ret; |
29751f699
|
494 |
} |
ea01ea937
|
495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 |
void sparse_remove_one_section(struct zone *zone, struct mem_section *ms) { struct page *memmap = NULL; unsigned long *usemap = NULL; if (ms->section_mem_map) { usemap = ms->pageblock_flags; memmap = sparse_decode_mem_map(ms->section_mem_map, __section_nr(ms)); ms->section_mem_map = 0; ms->pageblock_flags = NULL; } free_section_usemap(memmap, usemap); } |
a3142c8e1
|
511 |
#endif |