Blame view
mm/bootmem.c
20.8 KB
1da177e4c
|
1 |
/* |
57cfc29ef
|
2 |
* bootmem - A boot-time physical memory allocator and configurator |
1da177e4c
|
3 4 |
* * Copyright (C) 1999 Ingo Molnar |
57cfc29ef
|
5 6 |
* 1999 Kanoj Sarcar, SGI * 2008 Johannes Weiner |
1da177e4c
|
7 |
* |
57cfc29ef
|
8 9 |
* Access to this subsystem has to be serialized externally (which is true * for the boot process anyway). |
1da177e4c
|
10 |
*/ |
1da177e4c
|
11 |
#include <linux/init.h> |
bbc7b92e3
|
12 |
#include <linux/pfn.h> |
5a0e3ad6a
|
13 |
#include <linux/slab.h> |
1da177e4c
|
14 |
#include <linux/bootmem.h> |
b95f1b31b
|
15 |
#include <linux/export.h> |
ec3a354bd
|
16 |
#include <linux/kmemleak.h> |
08677214e
|
17 |
#include <linux/range.h> |
72d7c3b33
|
18 |
#include <linux/memblock.h> |
e786e86a5
|
19 20 |
#include <asm/bug.h> |
1da177e4c
|
21 |
#include <asm/io.h> |
dfd54cbcc
|
22 |
#include <asm/processor.h> |
e786e86a5
|
23 |
|
1da177e4c
|
24 |
#include "internal.h" |
e782ab421
|
25 26 27 28 29 30 |
#ifndef CONFIG_NEED_MULTIPLE_NODES struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] }; EXPORT_SYMBOL(contig_page_data); #endif |
1da177e4c
|
31 32 33 |
unsigned long max_low_pfn; unsigned long min_low_pfn; unsigned long max_pfn; |
b61bfa3c4
|
34 |
bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata; |
636cc40cb
|
35 |
static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list); |
2e5237daf
|
36 37 38 39 40 41 42 43 44 45 46 47 48 |
static int bootmem_debug; static int __init bootmem_debug_setup(char *buf) { bootmem_debug = 1; return 0; } early_param("bootmem_debug", bootmem_debug_setup); #define bdebug(fmt, args...) ({ \ if (unlikely(bootmem_debug)) \ printk(KERN_INFO \ "bootmem::%s " fmt, \ |
80a914dc0
|
49 |
__func__, ## args); \ |
2e5237daf
|
50 |
}) |
df049a5f4
|
51 |
static unsigned long __init bootmap_bytes(unsigned long pages) |
223e8dc92
|
52 |
{ |
df049a5f4
|
53 |
unsigned long bytes = (pages + 7) / 8; |
223e8dc92
|
54 |
|
df049a5f4
|
55 |
return ALIGN(bytes, sizeof(long)); |
223e8dc92
|
56 |
} |
a66fd7dae
|
57 58 59 60 |
/** * bootmem_bootmap_pages - calculate bitmap size in pages * @pages: number of pages the bitmap has to represent */ |
f71bf0cac
|
61 |
unsigned long __init bootmem_bootmap_pages(unsigned long pages) |
1da177e4c
|
62 |
{ |
df049a5f4
|
63 |
unsigned long bytes = bootmap_bytes(pages); |
1da177e4c
|
64 |
|
df049a5f4
|
65 |
return PAGE_ALIGN(bytes) >> PAGE_SHIFT; |
1da177e4c
|
66 |
} |
f71bf0cac
|
67 |
|
679bc9fbb
|
68 69 70 |
/* * link bdata in order */ |
69d49e681
|
71 |
static void __init link_bootmem(bootmem_data_t *bdata) |
679bc9fbb
|
72 |
{ |
636cc40cb
|
73 |
struct list_head *iter; |
f71bf0cac
|
74 |
|
636cc40cb
|
75 76 77 78 |
list_for_each(iter, &bdata_list) { bootmem_data_t *ent; ent = list_entry(iter, bootmem_data_t, list); |
3560e249a
|
79 |
if (bdata->node_min_pfn < ent->node_min_pfn) |
636cc40cb
|
80 |
break; |
679bc9fbb
|
81 |
} |
636cc40cb
|
82 |
list_add_tail(&bdata->list, iter); |
679bc9fbb
|
83 |
} |
bbc7b92e3
|
84 |
/* |
1da177e4c
|
85 86 |
* Called once to set up the allocator itself. */ |
8ae044630
|
87 |
static unsigned long __init init_bootmem_core(bootmem_data_t *bdata, |
1da177e4c
|
88 89 |
unsigned long mapstart, unsigned long start, unsigned long end) { |
bbc7b92e3
|
90 |
unsigned long mapsize; |
1da177e4c
|
91 |
|
2dbb51c49
|
92 |
mminit_validate_memmodel_limits(&start, &end); |
bbc7b92e3
|
93 |
bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart)); |
3560e249a
|
94 |
bdata->node_min_pfn = start; |
1da177e4c
|
95 |
bdata->node_low_pfn = end; |
679bc9fbb
|
96 |
link_bootmem(bdata); |
1da177e4c
|
97 98 99 100 101 |
/* * Initially all pages are reserved - setup_arch() has to * register free RAM areas explicitly. */ |
df049a5f4
|
102 |
mapsize = bootmap_bytes(end - start); |
1da177e4c
|
103 |
memset(bdata->node_bootmem_map, 0xff, mapsize); |
2e5237daf
|
104 105 106 |
bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx ", bdata - bootmem_node_data, start, mapstart, end, mapsize); |
1da177e4c
|
107 108 |
return mapsize; } |
a66fd7dae
|
109 110 111 112 113 114 115 116 117 |
/** * init_bootmem_node - register a node as boot memory * @pgdat: node to register * @freepfn: pfn where the bitmap for this node is to be placed * @startpfn: first pfn on the node * @endpfn: first pfn after the node * * Returns the number of bytes needed to hold the bitmap for this node. */ |
223e8dc92
|
118 119 120 121 122 |
unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn, unsigned long startpfn, unsigned long endpfn) { return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn); } |
a66fd7dae
|
123 124 125 126 127 128 129 |
/** * init_bootmem - register boot memory * @start: pfn where the bitmap is to be placed * @pages: number of available physical pages * * Returns the number of bytes needed to hold the bitmap. */ |
223e8dc92
|
130 131 132 133 134 135 |
unsigned long __init init_bootmem(unsigned long start, unsigned long pages) { max_low_pfn = pages; min_low_pfn = start; return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages); } |
093258732
|
136 |
|
9f993ac3f
|
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 |
/* * free_bootmem_late - free bootmem pages directly to page allocator * @addr: starting address of the range * @size: size of the range in bytes * * This is only useful when the bootmem allocator has already been torn * down, but we are still initializing the system. Pages are given directly * to the page allocator, no bootmem metadata is updated because it is gone. */ void __init free_bootmem_late(unsigned long addr, unsigned long size) { unsigned long cursor, end; kmemleak_free_part(__va(addr), size); cursor = PFN_UP(addr); end = PFN_DOWN(addr + size); for (; cursor < end; cursor++) { __free_pages_bootmem(pfn_to_page(cursor), 0); totalram_pages++; } } |
223e8dc92
|
160 161 |
static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) { |
41546c174
|
162 |
int aligned; |
223e8dc92
|
163 |
struct page *page; |
41546c174
|
164 165 166 167 |
unsigned long start, end, pages, count = 0; if (!bdata->node_bootmem_map) return 0; |
3560e249a
|
168 |
start = bdata->node_min_pfn; |
41546c174
|
169 |
end = bdata->node_low_pfn; |
223e8dc92
|
170 |
/* |
41546c174
|
171 172 |
* If the start is aligned to the machines wordsize, we might * be able to free pages in bulks of that order. |
223e8dc92
|
173 |
*/ |
41546c174
|
174 |
aligned = !(start & (BITS_PER_LONG - 1)); |
223e8dc92
|
175 |
|
41546c174
|
176 177 178 |
bdebug("nid=%td start=%lx end=%lx aligned=%d ", bdata - bootmem_node_data, start, end, aligned); |
223e8dc92
|
179 |
|
41546c174
|
180 181 |
while (start < end) { unsigned long *map, idx, vec; |
223e8dc92
|
182 |
|
41546c174
|
183 |
map = bdata->node_bootmem_map; |
3560e249a
|
184 |
idx = start - bdata->node_min_pfn; |
41546c174
|
185 186 187 188 189 190 |
vec = ~map[idx / BITS_PER_LONG]; if (aligned && vec == ~0UL && start + BITS_PER_LONG < end) { int order = ilog2(BITS_PER_LONG); __free_pages_bootmem(pfn_to_page(start), order); |
223e8dc92
|
191 |
count += BITS_PER_LONG; |
41546c174
|
192 193 194 195 196 197 |
} else { unsigned long off = 0; while (vec && off < BITS_PER_LONG) { if (vec & 1) { page = pfn_to_page(start + off); |
223e8dc92
|
198 |
__free_pages_bootmem(page, 0); |
41546c174
|
199 |
count++; |
223e8dc92
|
200 |
} |
41546c174
|
201 202 |
vec >>= 1; off++; |
223e8dc92
|
203 |
} |
223e8dc92
|
204 |
} |
41546c174
|
205 |
start += BITS_PER_LONG; |
223e8dc92
|
206 |
} |
223e8dc92
|
207 |
page = virt_to_page(bdata->node_bootmem_map); |
3560e249a
|
208 |
pages = bdata->node_low_pfn - bdata->node_min_pfn; |
41546c174
|
209 210 211 212 |
pages = bootmem_bootmap_pages(pages); count += pages; while (pages--) __free_pages_bootmem(page++, 0); |
223e8dc92
|
213 |
|
2e5237daf
|
214 215 |
bdebug("nid=%td released=%lx ", bdata - bootmem_node_data, count); |
223e8dc92
|
216 217 |
return count; } |
a66fd7dae
|
218 219 220 221 222 223 |
/** * free_all_bootmem_node - release a node's free pages to the buddy allocator * @pgdat: node to be released * * Returns the number of pages actually released. */ |
223e8dc92
|
224 225 226 227 228 |
unsigned long __init free_all_bootmem_node(pg_data_t *pgdat) { register_page_bootmem_info_node(pgdat); return free_all_bootmem_core(pgdat->bdata); } |
a66fd7dae
|
229 230 231 232 233 |
/** * free_all_bootmem - release free pages to the buddy allocator * * Returns the number of pages actually released. */ |
223e8dc92
|
234 235 |
unsigned long __init free_all_bootmem(void) { |
aa235fc71
|
236 237 238 239 240 241 242 |
unsigned long total_pages = 0; bootmem_data_t *bdata; list_for_each_entry(bdata, &bdata_list, list) total_pages += free_all_bootmem_core(bdata); return total_pages; |
223e8dc92
|
243 |
} |
d747fa4bc
|
244 245 246 247 248 249 250 |
static void __init __free(bootmem_data_t *bdata, unsigned long sidx, unsigned long eidx) { unsigned long idx; bdebug("nid=%td start=%lx end=%lx ", bdata - bootmem_node_data, |
3560e249a
|
251 252 |
sidx + bdata->node_min_pfn, eidx + bdata->node_min_pfn); |
d747fa4bc
|
253 |
|
e2bf3cae5
|
254 255 |
if (bdata->hint_idx > sidx) bdata->hint_idx = sidx; |
d747fa4bc
|
256 257 258 259 260 261 262 263 264 265 266 267 268 269 |
for (idx = sidx; idx < eidx; idx++) if (!test_and_clear_bit(idx, bdata->node_bootmem_map)) BUG(); } static int __init __reserve(bootmem_data_t *bdata, unsigned long sidx, unsigned long eidx, int flags) { unsigned long idx; int exclusive = flags & BOOTMEM_EXCLUSIVE; bdebug("nid=%td start=%lx end=%lx flags=%x ", bdata - bootmem_node_data, |
3560e249a
|
270 271 |
sidx + bdata->node_min_pfn, eidx + bdata->node_min_pfn, |
d747fa4bc
|
272 273 274 275 276 277 278 279 280 281 |
flags); for (idx = sidx; idx < eidx; idx++) if (test_and_set_bit(idx, bdata->node_bootmem_map)) { if (exclusive) { __free(bdata, sidx, idx); return -EBUSY; } bdebug("silent double reserve of PFN %lx ", |
3560e249a
|
282 |
idx + bdata->node_min_pfn); |
d747fa4bc
|
283 284 285 |
} return 0; } |
e2bf3cae5
|
286 287 288 |
static int __init mark_bootmem_node(bootmem_data_t *bdata, unsigned long start, unsigned long end, int reserve, int flags) |
223e8dc92
|
289 290 |
{ unsigned long sidx, eidx; |
223e8dc92
|
291 |
|
e2bf3cae5
|
292 293 294 |
bdebug("nid=%td start=%lx end=%lx reserve=%d flags=%x ", bdata - bootmem_node_data, start, end, reserve, flags); |
223e8dc92
|
295 |
|
3560e249a
|
296 |
BUG_ON(start < bdata->node_min_pfn); |
e2bf3cae5
|
297 |
BUG_ON(end > bdata->node_low_pfn); |
223e8dc92
|
298 |
|
3560e249a
|
299 300 |
sidx = start - bdata->node_min_pfn; eidx = end - bdata->node_min_pfn; |
223e8dc92
|
301 |
|
e2bf3cae5
|
302 303 |
if (reserve) return __reserve(bdata, sidx, eidx, flags); |
223e8dc92
|
304 |
else |
e2bf3cae5
|
305 306 307 308 309 310 311 312 313 314 315 316 317 318 |
__free(bdata, sidx, eidx); return 0; } static int __init mark_bootmem(unsigned long start, unsigned long end, int reserve, int flags) { unsigned long pos; bootmem_data_t *bdata; pos = start; list_for_each_entry(bdata, &bdata_list, list) { int err; unsigned long max; |
3560e249a
|
319 320 |
if (pos < bdata->node_min_pfn || pos >= bdata->node_low_pfn) { |
e2bf3cae5
|
321 322 323 324 325 |
BUG_ON(pos != start); continue; } max = min(bdata->node_low_pfn, end); |
223e8dc92
|
326 |
|
e2bf3cae5
|
327 328 329 330 331 |
err = mark_bootmem_node(bdata, pos, max, reserve, flags); if (reserve && err) { mark_bootmem(start, pos, 0, 0); return err; } |
223e8dc92
|
332 |
|
e2bf3cae5
|
333 334 335 336 337 |
if (max == end) return 0; pos = bdata->node_low_pfn; } BUG(); |
223e8dc92
|
338 |
} |
a66fd7dae
|
339 340 341 342 343 344 345 346 |
/** * free_bootmem_node - mark a page range as usable * @pgdat: node the range resides on * @physaddr: starting address of the range * @size: size of the range in bytes * * Partial pages will be considered reserved and left as they are. * |
e2bf3cae5
|
347 |
* The range must reside completely on the specified node. |
a66fd7dae
|
348 |
*/ |
223e8dc92
|
349 350 351 |
void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, unsigned long size) { |
e2bf3cae5
|
352 |
unsigned long start, end; |
ec3a354bd
|
353 |
kmemleak_free_part(__va(physaddr), size); |
e2bf3cae5
|
354 355 356 357 |
start = PFN_UP(physaddr); end = PFN_DOWN(physaddr + size); mark_bootmem_node(pgdat->bdata, start, end, 0, 0); |
223e8dc92
|
358 |
} |
a66fd7dae
|
359 360 361 362 363 364 365 |
/** * free_bootmem - mark a page range as usable * @addr: starting address of the range * @size: size of the range in bytes * * Partial pages will be considered reserved and left as they are. * |
e2bf3cae5
|
366 |
* The range must be contiguous but may span node boundaries. |
a66fd7dae
|
367 |
*/ |
223e8dc92
|
368 369 |
void __init free_bootmem(unsigned long addr, unsigned long size) { |
e2bf3cae5
|
370 |
unsigned long start, end; |
a5645a61b
|
371 |
|
ec3a354bd
|
372 |
kmemleak_free_part(__va(addr), size); |
e2bf3cae5
|
373 374 |
start = PFN_UP(addr); end = PFN_DOWN(addr + size); |
1da177e4c
|
375 |
|
e2bf3cae5
|
376 |
mark_bootmem(start, end, 0, 0); |
1da177e4c
|
377 |
} |
a66fd7dae
|
378 379 380 381 382 383 384 385 386 |
/** * reserve_bootmem_node - mark a page range as reserved * @pgdat: node the range resides on * @physaddr: starting address of the range * @size: size of the range in bytes * @flags: reservation flags (see linux/bootmem.h) * * Partial pages will be reserved. * |
e2bf3cae5
|
387 |
* The range must reside completely on the specified node. |
a66fd7dae
|
388 |
*/ |
223e8dc92
|
389 390 |
int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, unsigned long size, int flags) |
1da177e4c
|
391 |
{ |
e2bf3cae5
|
392 |
unsigned long start, end; |
1da177e4c
|
393 |
|
e2bf3cae5
|
394 395 396 397 |
start = PFN_DOWN(physaddr); end = PFN_UP(physaddr + size); return mark_bootmem_node(pgdat->bdata, start, end, 1, flags); |
223e8dc92
|
398 |
} |
5a982cbc7
|
399 |
|
a66fd7dae
|
400 401 402 403 404 405 406 407 |
/** * reserve_bootmem - mark a page range as usable * @addr: starting address of the range * @size: size of the range in bytes * @flags: reservation flags (see linux/bootmem.h) * * Partial pages will be reserved. * |
e2bf3cae5
|
408 |
* The range must be contiguous but may span node boundaries. |
a66fd7dae
|
409 |
*/ |
223e8dc92
|
410 411 412 |
int __init reserve_bootmem(unsigned long addr, unsigned long size, int flags) { |
e2bf3cae5
|
413 |
unsigned long start, end; |
1da177e4c
|
414 |
|
e2bf3cae5
|
415 416 |
start = PFN_DOWN(addr); end = PFN_UP(addr + size); |
223e8dc92
|
417 |
|
e2bf3cae5
|
418 |
return mark_bootmem(start, end, 1, flags); |
1da177e4c
|
419 |
} |
f88eff74a
|
420 421 422 423 424 |
int __weak __init reserve_bootmem_generic(unsigned long phys, unsigned long len, int flags) { return reserve_bootmem(phys, len, flags); } |
8aa043d74
|
425 426 |
static unsigned long __init align_idx(struct bootmem_data *bdata, unsigned long idx, unsigned long step) |
481ebd0d7
|
427 428 429 430 431 432 433 434 435 436 |
{ unsigned long base = bdata->node_min_pfn; /* * Align the index with respect to the node start so that the * combination of both satisfies the requested alignment. */ return ALIGN(base + idx, step) - base; } |
8aa043d74
|
437 438 |
static unsigned long __init align_off(struct bootmem_data *bdata, unsigned long off, unsigned long align) |
481ebd0d7
|
439 440 441 442 443 444 445 |
{ unsigned long base = PFN_PHYS(bdata->node_min_pfn); /* Same as align_idx for byte offsets */ return ALIGN(base + off, align) - base; } |
d0c4f5702
|
446 447 448 |
static void * __init alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) |
1da177e4c
|
449 |
{ |
0f3caba21
|
450 |
unsigned long fallback = 0; |
5f2809e69
|
451 |
unsigned long min, max, start, sidx, midx, step; |
594fe1a04
|
452 453 454 455 |
bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx ", bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT, align, goal, limit); |
5f2809e69
|
456 457 458 |
BUG_ON(!size); BUG_ON(align & (align - 1)); BUG_ON(limit && goal + size > limit); |
1da177e4c
|
459 |
|
7c309a64d
|
460 461 |
if (!bdata->node_bootmem_map) return NULL; |
3560e249a
|
462 |
min = bdata->node_min_pfn; |
5f2809e69
|
463 |
max = bdata->node_low_pfn; |
9a2dc04cf
|
464 |
|
5f2809e69
|
465 466 467 468 469 470 |
goal >>= PAGE_SHIFT; limit >>= PAGE_SHIFT; if (limit && max > limit) max = limit; if (max <= min) |
9a2dc04cf
|
471 |
return NULL; |
5f2809e69
|
472 |
step = max(align >> PAGE_SHIFT, 1UL); |
281dd25cd
|
473 |
|
5f2809e69
|
474 475 476 477 |
if (goal && min < goal && goal < max) start = ALIGN(goal, step); else start = ALIGN(min, step); |
1da177e4c
|
478 |
|
481ebd0d7
|
479 |
sidx = start - bdata->node_min_pfn; |
3560e249a
|
480 |
midx = max - bdata->node_min_pfn; |
1da177e4c
|
481 |
|
5f2809e69
|
482 |
if (bdata->hint_idx > sidx) { |
0f3caba21
|
483 484 485 486 487 |
/* * Handle the valid case of sidx being zero and still * catch the fallback below. */ fallback = sidx + 1; |
481ebd0d7
|
488 |
sidx = align_idx(bdata, bdata->hint_idx, step); |
5f2809e69
|
489 |
} |
1da177e4c
|
490 |
|
5f2809e69
|
491 492 493 494 495 496 |
while (1) { int merge; void *region; unsigned long eidx, i, start_off, end_off; find_block: sidx = find_next_zero_bit(bdata->node_bootmem_map, midx, sidx); |
481ebd0d7
|
497 |
sidx = align_idx(bdata, sidx, step); |
5f2809e69
|
498 |
eidx = sidx + PFN_UP(size); |
ad09315ca
|
499 |
|
5f2809e69
|
500 |
if (sidx >= midx || eidx > midx) |
66d43e98e
|
501 |
break; |
1da177e4c
|
502 |
|
5f2809e69
|
503 504 |
for (i = sidx; i < eidx; i++) if (test_bit(i, bdata->node_bootmem_map)) { |
481ebd0d7
|
505 |
sidx = align_idx(bdata, i, step); |
5f2809e69
|
506 507 508 509 |
if (sidx == i) sidx += step; goto find_block; } |
1da177e4c
|
510 |
|
627240aaa
|
511 |
if (bdata->last_end_off & (PAGE_SIZE - 1) && |
5f2809e69
|
512 |
PFN_DOWN(bdata->last_end_off) + 1 == sidx) |
481ebd0d7
|
513 |
start_off = align_off(bdata, bdata->last_end_off, align); |
5f2809e69
|
514 515 516 517 518 519 520 521 522 523 524 525 |
else start_off = PFN_PHYS(sidx); merge = PFN_DOWN(start_off) < sidx; end_off = start_off + size; bdata->last_end_off = end_off; bdata->hint_idx = PFN_UP(end_off); /* * Reserve the area now: */ |
d747fa4bc
|
526 527 528 |
if (__reserve(bdata, PFN_DOWN(start_off) + merge, PFN_UP(end_off), BOOTMEM_EXCLUSIVE)) BUG(); |
5f2809e69
|
529 |
|
3560e249a
|
530 531 |
region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) + start_off); |
5f2809e69
|
532 |
memset(region, 0, size); |
008139d91
|
533 534 535 536 537 |
/* * The min_count is set to 0 so that bootmem allocated blocks * are never reported as leaks. */ kmemleak_alloc(region, size, 0, 0); |
5f2809e69
|
538 |
return region; |
1da177e4c
|
539 |
} |
0f3caba21
|
540 |
if (fallback) { |
481ebd0d7
|
541 |
sidx = align_idx(bdata, fallback - 1, step); |
0f3caba21
|
542 543 544 545 546 547 |
fallback = 0; goto find_block; } return NULL; } |
d0c4f5702
|
548 549 550 551 |
static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata, unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) { |
441c7e0a2
|
552 553 |
if (WARN_ON_ONCE(slab_is_available())) return kzalloc(size, GFP_NOWAIT); |
d0c4f5702
|
554 |
#ifdef CONFIG_HAVE_ARCH_BOOTMEM |
433f13a72
|
555 556 557 558 559 560 561 562 563 |
{ bootmem_data_t *p_bdata; p_bdata = bootmem_arch_preferred_node(bdata, size, align, goal, limit); if (p_bdata) return alloc_bootmem_core(p_bdata, size, align, goal, limit); } |
d0c4f5702
|
564 565 566 |
#endif return NULL; } |
0f3caba21
|
567 568 569 570 571 572 |
static void * __init ___alloc_bootmem_nopanic(unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) { bootmem_data_t *bdata; |
d0c4f5702
|
573 |
void *region; |
0f3caba21
|
574 575 |
restart: |
d0c4f5702
|
576 577 578 |
region = alloc_arch_preferred_bootmem(NULL, size, align, goal, limit); if (region) return region; |
0f3caba21
|
579 |
|
d0c4f5702
|
580 |
list_for_each_entry(bdata, &bdata_list, list) { |
0f3caba21
|
581 582 |
if (goal && bdata->node_low_pfn <= PFN_DOWN(goal)) continue; |
3560e249a
|
583 |
if (limit && bdata->node_min_pfn >= PFN_DOWN(limit)) |
0f3caba21
|
584 585 586 587 588 589 |
break; region = alloc_bootmem_core(bdata, size, align, goal, limit); if (region) return region; } |
5f2809e69
|
590 591 |
if (goal) { goal = 0; |
0f3caba21
|
592 |
goto restart; |
5f2809e69
|
593 |
} |
2e5237daf
|
594 |
|
5f2809e69
|
595 |
return NULL; |
1da177e4c
|
596 |
} |
a66fd7dae
|
597 598 599 600 601 602 603 604 605 606 607 608 609 |
/** * __alloc_bootmem_nopanic - allocate boot memory without panicking * @size: size of the request in bytes * @align: alignment of the region * @goal: preferred starting address of the region * * The goal is dropped if it can not be satisfied and the allocation will * fall back to memory below @goal. * * Allocation may happen on any node in the system. * * Returns NULL on failure. */ |
bb0923a66
|
610 |
void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align, |
0f3caba21
|
611 |
unsigned long goal) |
1da177e4c
|
612 |
{ |
08677214e
|
613 |
unsigned long limit = 0; |
08677214e
|
614 |
return ___alloc_bootmem_nopanic(size, align, goal, limit); |
0f3caba21
|
615 |
} |
1da177e4c
|
616 |
|
0f3caba21
|
617 618 619 620 621 622 623 624 625 626 627 628 629 |
static void * __init ___alloc_bootmem(unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) { void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit); if (mem) return mem; /* * Whoops, we cannot satisfy the allocation request. */ printk(KERN_ALERT "bootmem alloc of %lu bytes failed! ", size); panic("Out of memory"); |
a8062231d
|
630 631 |
return NULL; } |
1da177e4c
|
632 |
|
a66fd7dae
|
633 634 635 636 637 638 639 640 641 642 643 644 645 |
/** * __alloc_bootmem - allocate boot memory * @size: size of the request in bytes * @align: alignment of the region * @goal: preferred starting address of the region * * The goal is dropped if it can not be satisfied and the allocation will * fall back to memory below @goal. * * Allocation may happen on any node in the system. * * The function panics if the request can not be satisfied. */ |
bb0923a66
|
646 647 |
void * __init __alloc_bootmem(unsigned long size, unsigned long align, unsigned long goal) |
a8062231d
|
648 |
{ |
08677214e
|
649 |
unsigned long limit = 0; |
08677214e
|
650 |
return ___alloc_bootmem(size, align, goal, limit); |
1da177e4c
|
651 |
} |
4cc278b72
|
652 653 654 655 656 |
static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata, unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) { void *ptr; |
d0c4f5702
|
657 658 659 |
ptr = alloc_arch_preferred_bootmem(bdata, size, align, goal, limit); if (ptr) return ptr; |
4cc278b72
|
660 661 662 663 664 665 |
ptr = alloc_bootmem_core(bdata, size, align, goal, limit); if (ptr) return ptr; return ___alloc_bootmem(size, align, goal, limit); } |
a66fd7dae
|
666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 |
/** * __alloc_bootmem_node - allocate boot memory from a specific node * @pgdat: node to allocate from * @size: size of the request in bytes * @align: alignment of the region * @goal: preferred starting address of the region * * The goal is dropped if it can not be satisfied and the allocation will * fall back to memory below @goal. * * Allocation may fall back to any node in the system if the specified node * can not hold the requested memory. * * The function panics if the request can not be satisfied. */ |
bb0923a66
|
681 682 |
void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) |
1da177e4c
|
683 |
{ |
c91c4773b
|
684 685 |
if (WARN_ON_ONCE(slab_is_available())) return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); |
093258732
|
686 |
return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0); |
08677214e
|
687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 |
} void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) { #ifdef MAX_DMA32_PFN unsigned long end_pfn; if (WARN_ON_ONCE(slab_is_available())) return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); /* update goal according ...MAX_DMA32_PFN */ end_pfn = pgdat->node_start_pfn + pgdat->node_spanned_pages; if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) && (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) { void *ptr; unsigned long new_goal; new_goal = MAX_DMA32_PFN << PAGE_SHIFT; |
08677214e
|
707 708 |
ptr = alloc_bootmem_core(pgdat->bdata, size, align, new_goal, 0); |
08677214e
|
709 710 711 712 713 714 |
if (ptr) return ptr; } #endif return __alloc_bootmem_node(pgdat, size, align, goal); |
1da177e4c
|
715 |
} |
e70260aab
|
716 |
#ifdef CONFIG_SPARSEMEM |
a66fd7dae
|
717 718 719 720 721 722 723 |
/** * alloc_bootmem_section - allocate boot memory from a specific section * @size: size of the request in bytes * @section_nr: sparse map section to allocate from * * Return NULL on failure. */ |
e70260aab
|
724 725 726 |
void * __init alloc_bootmem_section(unsigned long size, unsigned long section_nr) { |
75a56cfe9
|
727 728 |
bootmem_data_t *bdata; unsigned long pfn, goal, limit; |
e70260aab
|
729 730 |
pfn = section_nr_to_pfn(section_nr); |
75a56cfe9
|
731 732 733 |
goal = pfn << PAGE_SHIFT; limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT; bdata = &bootmem_node_data[early_pfn_to_nid(pfn)]; |
e70260aab
|
734 |
|
75a56cfe9
|
735 |
return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, limit); |
e70260aab
|
736 737 |
} #endif |
b54bbf7b8
|
738 739 740 741 |
void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) { void *ptr; |
c91c4773b
|
742 743 |
if (WARN_ON_ONCE(slab_is_available())) return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); |
d0c4f5702
|
744 745 746 |
ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0); if (ptr) return ptr; |
b54bbf7b8
|
747 748 749 750 751 752 |
ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0); if (ptr) return ptr; return __alloc_bootmem_nopanic(size, align, goal); } |
dfd54cbcc
|
753 754 755 |
#ifndef ARCH_LOW_ADDRESS_LIMIT #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL #endif |
008857c1a
|
756 |
|
a66fd7dae
|
757 758 759 760 761 762 763 764 765 766 767 768 769 |
/** * __alloc_bootmem_low - allocate low boot memory * @size: size of the request in bytes * @align: alignment of the region * @goal: preferred starting address of the region * * The goal is dropped if it can not be satisfied and the allocation will * fall back to memory below @goal. * * Allocation may happen on any node in the system. * * The function panics if the request can not be satisfied. */ |
bb0923a66
|
770 771 |
void * __init __alloc_bootmem_low(unsigned long size, unsigned long align, unsigned long goal) |
008857c1a
|
772 |
{ |
0f3caba21
|
773 |
return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT); |
008857c1a
|
774 |
} |
a66fd7dae
|
775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 |
/** * __alloc_bootmem_low_node - allocate low boot memory from a specific node * @pgdat: node to allocate from * @size: size of the request in bytes * @align: alignment of the region * @goal: preferred starting address of the region * * The goal is dropped if it can not be satisfied and the allocation will * fall back to memory below @goal. * * Allocation may fall back to any node in the system if the specified node * can not hold the requested memory. * * The function panics if the request can not be satisfied. */ |
008857c1a
|
790 791 792 |
void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) { |
c91c4773b
|
793 794 |
if (WARN_ON_ONCE(slab_is_available())) return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); |
093258732
|
795 |
return ___alloc_bootmem_node(pgdat->bdata, size, align, |
b8ab9f820
|
796 |
goal, ARCH_LOW_ADDRESS_LIMIT); |
008857c1a
|
797 |
} |