Blame view
mm/bootmem.c
19.7 KB
1da177e4c
|
1 |
/* |
57cfc29ef
|
2 |
* bootmem - A boot-time physical memory allocator and configurator |
1da177e4c
|
3 4 |
* * Copyright (C) 1999 Ingo Molnar |
57cfc29ef
|
5 6 |
* 1999 Kanoj Sarcar, SGI * 2008 Johannes Weiner |
1da177e4c
|
7 |
* |
57cfc29ef
|
8 9 |
* Access to this subsystem has to be serialized externally (which is true * for the boot process anyway). |
1da177e4c
|
10 |
*/ |
1da177e4c
|
11 |
#include <linux/init.h> |
bbc7b92e3
|
12 |
#include <linux/pfn.h> |
1da177e4c
|
13 |
#include <linux/bootmem.h> |
1da177e4c
|
14 |
#include <linux/module.h> |
ec3a354bd
|
15 |
#include <linux/kmemleak.h> |
e786e86a5
|
16 17 |
#include <asm/bug.h> |
1da177e4c
|
18 |
#include <asm/io.h> |
dfd54cbcc
|
19 |
#include <asm/processor.h> |
e786e86a5
|
20 |
|
1da177e4c
|
21 |
#include "internal.h" |
1da177e4c
|
22 23 24 |
unsigned long max_low_pfn; unsigned long min_low_pfn; unsigned long max_pfn; |
92aa63a5a
|
25 26 27 28 29 30 31 |
#ifdef CONFIG_CRASH_DUMP /* * If we have booted due to a crash, max_pfn will be a very low value. We need * to know the amount of memory that the previous kernel used. */ unsigned long saved_max_pfn; #endif |
b61bfa3c4
|
32 |
bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata; |
636cc40cb
|
33 |
static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list); |
2e5237daf
|
34 35 36 37 38 39 40 41 42 43 44 45 46 |
static int bootmem_debug; static int __init bootmem_debug_setup(char *buf) { bootmem_debug = 1; return 0; } early_param("bootmem_debug", bootmem_debug_setup); #define bdebug(fmt, args...) ({ \ if (unlikely(bootmem_debug)) \ printk(KERN_INFO \ "bootmem::%s " fmt, \ |
80a914dc0
|
47 |
__func__, ## args); \ |
2e5237daf
|
48 |
}) |
df049a5f4
|
49 |
static unsigned long __init bootmap_bytes(unsigned long pages) |
223e8dc92
|
50 |
{ |
df049a5f4
|
51 |
unsigned long bytes = (pages + 7) / 8; |
223e8dc92
|
52 |
|
df049a5f4
|
53 |
return ALIGN(bytes, sizeof(long)); |
223e8dc92
|
54 |
} |
a66fd7dae
|
55 56 57 58 |
/** * bootmem_bootmap_pages - calculate bitmap size in pages * @pages: number of pages the bitmap has to represent */ |
f71bf0cac
|
59 |
unsigned long __init bootmem_bootmap_pages(unsigned long pages) |
1da177e4c
|
60 |
{ |
df049a5f4
|
61 |
unsigned long bytes = bootmap_bytes(pages); |
1da177e4c
|
62 |
|
df049a5f4
|
63 |
return PAGE_ALIGN(bytes) >> PAGE_SHIFT; |
1da177e4c
|
64 |
} |
f71bf0cac
|
65 |
|
679bc9fbb
|
66 67 68 |
/* * link bdata in order */ |
69d49e681
|
69 |
static void __init link_bootmem(bootmem_data_t *bdata) |
679bc9fbb
|
70 |
{ |
636cc40cb
|
71 |
struct list_head *iter; |
f71bf0cac
|
72 |
|
636cc40cb
|
73 74 75 76 |
list_for_each(iter, &bdata_list) { bootmem_data_t *ent; ent = list_entry(iter, bootmem_data_t, list); |
3560e249a
|
77 |
if (bdata->node_min_pfn < ent->node_min_pfn) |
636cc40cb
|
78 |
break; |
679bc9fbb
|
79 |
} |
636cc40cb
|
80 |
list_add_tail(&bdata->list, iter); |
679bc9fbb
|
81 |
} |
bbc7b92e3
|
82 |
/* |
1da177e4c
|
83 84 |
* Called once to set up the allocator itself. */ |
8ae044630
|
85 |
static unsigned long __init init_bootmem_core(bootmem_data_t *bdata, |
1da177e4c
|
86 87 |
unsigned long mapstart, unsigned long start, unsigned long end) { |
bbc7b92e3
|
88 |
unsigned long mapsize; |
1da177e4c
|
89 |
|
2dbb51c49
|
90 |
mminit_validate_memmodel_limits(&start, &end); |
bbc7b92e3
|
91 |
bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart)); |
3560e249a
|
92 |
bdata->node_min_pfn = start; |
1da177e4c
|
93 |
bdata->node_low_pfn = end; |
679bc9fbb
|
94 |
link_bootmem(bdata); |
1da177e4c
|
95 96 97 98 99 |
/* * Initially all pages are reserved - setup_arch() has to * register free RAM areas explicitly. */ |
df049a5f4
|
100 |
mapsize = bootmap_bytes(end - start); |
1da177e4c
|
101 |
memset(bdata->node_bootmem_map, 0xff, mapsize); |
2e5237daf
|
102 103 104 |
bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx ", bdata - bootmem_node_data, start, mapstart, end, mapsize); |
1da177e4c
|
105 106 |
return mapsize; } |
a66fd7dae
|
107 108 109 110 111 112 113 114 115 |
/** * init_bootmem_node - register a node as boot memory * @pgdat: node to register * @freepfn: pfn where the bitmap for this node is to be placed * @startpfn: first pfn on the node * @endpfn: first pfn after the node * * Returns the number of bytes needed to hold the bitmap for this node. */ |
223e8dc92
|
116 117 118 119 120 |
unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn, unsigned long startpfn, unsigned long endpfn) { return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn); } |
a66fd7dae
|
121 122 123 124 125 126 127 |
/** * init_bootmem - register boot memory * @start: pfn where the bitmap is to be placed * @pages: number of available physical pages * * Returns the number of bytes needed to hold the bitmap. */ |
223e8dc92
|
128 129 130 131 132 133 |
unsigned long __init init_bootmem(unsigned long start, unsigned long pages) { max_low_pfn = pages; min_low_pfn = start; return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages); } |
9f993ac3f
|
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 |
/* * free_bootmem_late - free bootmem pages directly to page allocator * @addr: starting address of the range * @size: size of the range in bytes * * This is only useful when the bootmem allocator has already been torn * down, but we are still initializing the system. Pages are given directly * to the page allocator, no bootmem metadata is updated because it is gone. */ void __init free_bootmem_late(unsigned long addr, unsigned long size) { unsigned long cursor, end; kmemleak_free_part(__va(addr), size); cursor = PFN_UP(addr); end = PFN_DOWN(addr + size); for (; cursor < end; cursor++) { __free_pages_bootmem(pfn_to_page(cursor), 0); totalram_pages++; } } |
223e8dc92
|
157 158 |
static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) { |
41546c174
|
159 |
int aligned; |
223e8dc92
|
160 |
struct page *page; |
41546c174
|
161 162 163 164 |
unsigned long start, end, pages, count = 0; if (!bdata->node_bootmem_map) return 0; |
3560e249a
|
165 |
start = bdata->node_min_pfn; |
41546c174
|
166 |
end = bdata->node_low_pfn; |
223e8dc92
|
167 |
/* |
41546c174
|
168 169 |
* If the start is aligned to the machines wordsize, we might * be able to free pages in bulks of that order. |
223e8dc92
|
170 |
*/ |
41546c174
|
171 |
aligned = !(start & (BITS_PER_LONG - 1)); |
223e8dc92
|
172 |
|
41546c174
|
173 174 175 |
bdebug("nid=%td start=%lx end=%lx aligned=%d ", bdata - bootmem_node_data, start, end, aligned); |
223e8dc92
|
176 |
|
41546c174
|
177 178 |
while (start < end) { unsigned long *map, idx, vec; |
223e8dc92
|
179 |
|
41546c174
|
180 |
map = bdata->node_bootmem_map; |
3560e249a
|
181 |
idx = start - bdata->node_min_pfn; |
41546c174
|
182 183 184 185 186 187 |
vec = ~map[idx / BITS_PER_LONG]; if (aligned && vec == ~0UL && start + BITS_PER_LONG < end) { int order = ilog2(BITS_PER_LONG); __free_pages_bootmem(pfn_to_page(start), order); |
223e8dc92
|
188 |
count += BITS_PER_LONG; |
41546c174
|
189 190 191 192 193 194 |
} else { unsigned long off = 0; while (vec && off < BITS_PER_LONG) { if (vec & 1) { page = pfn_to_page(start + off); |
223e8dc92
|
195 |
__free_pages_bootmem(page, 0); |
41546c174
|
196 |
count++; |
223e8dc92
|
197 |
} |
41546c174
|
198 199 |
vec >>= 1; off++; |
223e8dc92
|
200 |
} |
223e8dc92
|
201 |
} |
41546c174
|
202 |
start += BITS_PER_LONG; |
223e8dc92
|
203 |
} |
223e8dc92
|
204 |
page = virt_to_page(bdata->node_bootmem_map); |
3560e249a
|
205 |
pages = bdata->node_low_pfn - bdata->node_min_pfn; |
41546c174
|
206 207 208 209 |
pages = bootmem_bootmap_pages(pages); count += pages; while (pages--) __free_pages_bootmem(page++, 0); |
223e8dc92
|
210 |
|
2e5237daf
|
211 212 |
bdebug("nid=%td released=%lx ", bdata - bootmem_node_data, count); |
223e8dc92
|
213 214 |
return count; } |
a66fd7dae
|
215 216 217 218 219 220 |
/** * free_all_bootmem_node - release a node's free pages to the buddy allocator * @pgdat: node to be released * * Returns the number of pages actually released. */ |
223e8dc92
|
221 222 223 224 225 |
unsigned long __init free_all_bootmem_node(pg_data_t *pgdat) { register_page_bootmem_info_node(pgdat); return free_all_bootmem_core(pgdat->bdata); } |
a66fd7dae
|
226 227 228 229 230 |
/** * free_all_bootmem - release free pages to the buddy allocator * * Returns the number of pages actually released. */ |
223e8dc92
|
231 232 233 234 |
unsigned long __init free_all_bootmem(void) { return free_all_bootmem_core(NODE_DATA(0)->bdata); } |
d747fa4bc
|
235 236 237 238 239 240 241 |
static void __init __free(bootmem_data_t *bdata, unsigned long sidx, unsigned long eidx) { unsigned long idx; bdebug("nid=%td start=%lx end=%lx ", bdata - bootmem_node_data, |
3560e249a
|
242 243 |
sidx + bdata->node_min_pfn, eidx + bdata->node_min_pfn); |
d747fa4bc
|
244 |
|
e2bf3cae5
|
245 246 |
if (bdata->hint_idx > sidx) bdata->hint_idx = sidx; |
d747fa4bc
|
247 248 249 250 251 252 253 254 255 256 257 258 259 260 |
for (idx = sidx; idx < eidx; idx++) if (!test_and_clear_bit(idx, bdata->node_bootmem_map)) BUG(); } static int __init __reserve(bootmem_data_t *bdata, unsigned long sidx, unsigned long eidx, int flags) { unsigned long idx; int exclusive = flags & BOOTMEM_EXCLUSIVE; bdebug("nid=%td start=%lx end=%lx flags=%x ", bdata - bootmem_node_data, |
3560e249a
|
261 262 |
sidx + bdata->node_min_pfn, eidx + bdata->node_min_pfn, |
d747fa4bc
|
263 264 265 266 267 268 269 270 271 272 |
flags); for (idx = sidx; idx < eidx; idx++) if (test_and_set_bit(idx, bdata->node_bootmem_map)) { if (exclusive) { __free(bdata, sidx, idx); return -EBUSY; } bdebug("silent double reserve of PFN %lx ", |
3560e249a
|
273 |
idx + bdata->node_min_pfn); |
d747fa4bc
|
274 275 276 |
} return 0; } |
e2bf3cae5
|
277 278 279 |
static int __init mark_bootmem_node(bootmem_data_t *bdata, unsigned long start, unsigned long end, int reserve, int flags) |
223e8dc92
|
280 281 |
{ unsigned long sidx, eidx; |
223e8dc92
|
282 |
|
e2bf3cae5
|
283 284 285 |
bdebug("nid=%td start=%lx end=%lx reserve=%d flags=%x ", bdata - bootmem_node_data, start, end, reserve, flags); |
223e8dc92
|
286 |
|
3560e249a
|
287 |
BUG_ON(start < bdata->node_min_pfn); |
e2bf3cae5
|
288 |
BUG_ON(end > bdata->node_low_pfn); |
223e8dc92
|
289 |
|
3560e249a
|
290 291 |
sidx = start - bdata->node_min_pfn; eidx = end - bdata->node_min_pfn; |
223e8dc92
|
292 |
|
e2bf3cae5
|
293 294 |
if (reserve) return __reserve(bdata, sidx, eidx, flags); |
223e8dc92
|
295 |
else |
e2bf3cae5
|
296 297 298 299 300 301 302 303 304 305 306 307 308 309 |
__free(bdata, sidx, eidx); return 0; } static int __init mark_bootmem(unsigned long start, unsigned long end, int reserve, int flags) { unsigned long pos; bootmem_data_t *bdata; pos = start; list_for_each_entry(bdata, &bdata_list, list) { int err; unsigned long max; |
3560e249a
|
310 311 |
if (pos < bdata->node_min_pfn || pos >= bdata->node_low_pfn) { |
e2bf3cae5
|
312 313 314 315 316 |
BUG_ON(pos != start); continue; } max = min(bdata->node_low_pfn, end); |
223e8dc92
|
317 |
|
e2bf3cae5
|
318 319 320 321 322 |
err = mark_bootmem_node(bdata, pos, max, reserve, flags); if (reserve && err) { mark_bootmem(start, pos, 0, 0); return err; } |
223e8dc92
|
323 |
|
e2bf3cae5
|
324 325 326 327 328 |
if (max == end) return 0; pos = bdata->node_low_pfn; } BUG(); |
223e8dc92
|
329 |
} |
a66fd7dae
|
330 331 332 333 334 335 336 337 |
/** * free_bootmem_node - mark a page range as usable * @pgdat: node the range resides on * @physaddr: starting address of the range * @size: size of the range in bytes * * Partial pages will be considered reserved and left as they are. * |
e2bf3cae5
|
338 |
* The range must reside completely on the specified node. |
a66fd7dae
|
339 |
*/ |
223e8dc92
|
340 341 342 |
void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, unsigned long size) { |
e2bf3cae5
|
343 |
unsigned long start, end; |
ec3a354bd
|
344 |
kmemleak_free_part(__va(physaddr), size); |
e2bf3cae5
|
345 346 347 348 |
start = PFN_UP(physaddr); end = PFN_DOWN(physaddr + size); mark_bootmem_node(pgdat->bdata, start, end, 0, 0); |
223e8dc92
|
349 |
} |
a66fd7dae
|
350 351 352 353 354 355 356 |
/** * free_bootmem - mark a page range as usable * @addr: starting address of the range * @size: size of the range in bytes * * Partial pages will be considered reserved and left as they are. * |
e2bf3cae5
|
357 |
* The range must be contiguous but may span node boundaries. |
a66fd7dae
|
358 |
*/ |
223e8dc92
|
359 360 |
void __init free_bootmem(unsigned long addr, unsigned long size) { |
e2bf3cae5
|
361 |
unsigned long start, end; |
a5645a61b
|
362 |
|
ec3a354bd
|
363 |
kmemleak_free_part(__va(addr), size); |
e2bf3cae5
|
364 365 |
start = PFN_UP(addr); end = PFN_DOWN(addr + size); |
1da177e4c
|
366 |
|
e2bf3cae5
|
367 |
mark_bootmem(start, end, 0, 0); |
1da177e4c
|
368 |
} |
a66fd7dae
|
369 370 371 372 373 374 375 376 377 |
/** * reserve_bootmem_node - mark a page range as reserved * @pgdat: node the range resides on * @physaddr: starting address of the range * @size: size of the range in bytes * @flags: reservation flags (see linux/bootmem.h) * * Partial pages will be reserved. * |
e2bf3cae5
|
378 |
* The range must reside completely on the specified node. |
a66fd7dae
|
379 |
*/ |
223e8dc92
|
380 381 |
int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, unsigned long size, int flags) |
1da177e4c
|
382 |
{ |
e2bf3cae5
|
383 |
unsigned long start, end; |
1da177e4c
|
384 |
|
e2bf3cae5
|
385 386 387 388 |
start = PFN_DOWN(physaddr); end = PFN_UP(physaddr + size); return mark_bootmem_node(pgdat->bdata, start, end, 1, flags); |
223e8dc92
|
389 |
} |
5a982cbc7
|
390 |
|
a66fd7dae
|
391 392 393 394 395 396 397 398 |
/** * reserve_bootmem - mark a page range as usable * @addr: starting address of the range * @size: size of the range in bytes * @flags: reservation flags (see linux/bootmem.h) * * Partial pages will be reserved. * |
e2bf3cae5
|
399 |
* The range must be contiguous but may span node boundaries. |
a66fd7dae
|
400 |
*/ |
223e8dc92
|
401 402 403 |
int __init reserve_bootmem(unsigned long addr, unsigned long size, int flags) { |
e2bf3cae5
|
404 |
unsigned long start, end; |
1da177e4c
|
405 |
|
e2bf3cae5
|
406 407 |
start = PFN_DOWN(addr); end = PFN_UP(addr + size); |
223e8dc92
|
408 |
|
e2bf3cae5
|
409 |
return mark_bootmem(start, end, 1, flags); |
1da177e4c
|
410 |
} |
8aa043d74
|
411 412 |
static unsigned long __init align_idx(struct bootmem_data *bdata, unsigned long idx, unsigned long step) |
481ebd0d7
|
413 414 415 416 417 418 419 420 421 422 |
{ unsigned long base = bdata->node_min_pfn; /* * Align the index with respect to the node start so that the * combination of both satisfies the requested alignment. */ return ALIGN(base + idx, step) - base; } |
8aa043d74
|
423 424 |
static unsigned long __init align_off(struct bootmem_data *bdata, unsigned long off, unsigned long align) |
481ebd0d7
|
425 426 427 428 429 430 431 |
{ unsigned long base = PFN_PHYS(bdata->node_min_pfn); /* Same as align_idx for byte offsets */ return ALIGN(base + off, align) - base; } |
d0c4f5702
|
432 433 434 |
static void * __init alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) |
1da177e4c
|
435 |
{ |
0f3caba21
|
436 |
unsigned long fallback = 0; |
5f2809e69
|
437 |
unsigned long min, max, start, sidx, midx, step; |
594fe1a04
|
438 439 440 441 |
bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx ", bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT, align, goal, limit); |
5f2809e69
|
442 443 444 |
BUG_ON(!size); BUG_ON(align & (align - 1)); BUG_ON(limit && goal + size > limit); |
1da177e4c
|
445 |
|
7c309a64d
|
446 447 |
if (!bdata->node_bootmem_map) return NULL; |
3560e249a
|
448 |
min = bdata->node_min_pfn; |
5f2809e69
|
449 |
max = bdata->node_low_pfn; |
9a2dc04cf
|
450 |
|
5f2809e69
|
451 452 453 454 455 456 |
goal >>= PAGE_SHIFT; limit >>= PAGE_SHIFT; if (limit && max > limit) max = limit; if (max <= min) |
9a2dc04cf
|
457 |
return NULL; |
5f2809e69
|
458 |
step = max(align >> PAGE_SHIFT, 1UL); |
281dd25cd
|
459 |
|
5f2809e69
|
460 461 462 463 |
if (goal && min < goal && goal < max) start = ALIGN(goal, step); else start = ALIGN(min, step); |
1da177e4c
|
464 |
|
481ebd0d7
|
465 |
sidx = start - bdata->node_min_pfn; |
3560e249a
|
466 |
midx = max - bdata->node_min_pfn; |
1da177e4c
|
467 |
|
5f2809e69
|
468 |
if (bdata->hint_idx > sidx) { |
0f3caba21
|
469 470 471 472 473 |
/* * Handle the valid case of sidx being zero and still * catch the fallback below. */ fallback = sidx + 1; |
481ebd0d7
|
474 |
sidx = align_idx(bdata, bdata->hint_idx, step); |
5f2809e69
|
475 |
} |
1da177e4c
|
476 |
|
5f2809e69
|
477 478 479 480 481 482 |
while (1) { int merge; void *region; unsigned long eidx, i, start_off, end_off; find_block: sidx = find_next_zero_bit(bdata->node_bootmem_map, midx, sidx); |
481ebd0d7
|
483 |
sidx = align_idx(bdata, sidx, step); |
5f2809e69
|
484 |
eidx = sidx + PFN_UP(size); |
ad09315ca
|
485 |
|
5f2809e69
|
486 |
if (sidx >= midx || eidx > midx) |
66d43e98e
|
487 |
break; |
1da177e4c
|
488 |
|
5f2809e69
|
489 490 |
for (i = sidx; i < eidx; i++) if (test_bit(i, bdata->node_bootmem_map)) { |
481ebd0d7
|
491 |
sidx = align_idx(bdata, i, step); |
5f2809e69
|
492 493 494 495 |
if (sidx == i) sidx += step; goto find_block; } |
1da177e4c
|
496 |
|
627240aaa
|
497 |
if (bdata->last_end_off & (PAGE_SIZE - 1) && |
5f2809e69
|
498 |
PFN_DOWN(bdata->last_end_off) + 1 == sidx) |
481ebd0d7
|
499 |
start_off = align_off(bdata, bdata->last_end_off, align); |
5f2809e69
|
500 501 502 503 504 505 506 507 508 509 510 511 |
else start_off = PFN_PHYS(sidx); merge = PFN_DOWN(start_off) < sidx; end_off = start_off + size; bdata->last_end_off = end_off; bdata->hint_idx = PFN_UP(end_off); /* * Reserve the area now: */ |
d747fa4bc
|
512 513 514 |
if (__reserve(bdata, PFN_DOWN(start_off) + merge, PFN_UP(end_off), BOOTMEM_EXCLUSIVE)) BUG(); |
5f2809e69
|
515 |
|
3560e249a
|
516 517 |
region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) + start_off); |
5f2809e69
|
518 |
memset(region, 0, size); |
008139d91
|
519 520 521 522 523 |
/* * The min_count is set to 0 so that bootmem allocated blocks * are never reported as leaks. */ kmemleak_alloc(region, size, 0, 0); |
5f2809e69
|
524 |
return region; |
1da177e4c
|
525 |
} |
0f3caba21
|
526 |
if (fallback) { |
481ebd0d7
|
527 |
sidx = align_idx(bdata, fallback - 1, step); |
0f3caba21
|
528 529 530 531 532 533 |
fallback = 0; goto find_block; } return NULL; } |
d0c4f5702
|
534 535 536 537 |
static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata, unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) { |
441c7e0a2
|
538 539 |
if (WARN_ON_ONCE(slab_is_available())) return kzalloc(size, GFP_NOWAIT); |
d0c4f5702
|
540 |
#ifdef CONFIG_HAVE_ARCH_BOOTMEM |
433f13a72
|
541 542 543 544 545 546 547 548 549 |
{ bootmem_data_t *p_bdata; p_bdata = bootmem_arch_preferred_node(bdata, size, align, goal, limit); if (p_bdata) return alloc_bootmem_core(p_bdata, size, align, goal, limit); } |
d0c4f5702
|
550 551 552 |
#endif return NULL; } |
0f3caba21
|
553 554 555 556 557 558 |
static void * __init ___alloc_bootmem_nopanic(unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) { bootmem_data_t *bdata; |
d0c4f5702
|
559 |
void *region; |
0f3caba21
|
560 561 |
restart: |
d0c4f5702
|
562 563 564 |
region = alloc_arch_preferred_bootmem(NULL, size, align, goal, limit); if (region) return region; |
0f3caba21
|
565 |
|
d0c4f5702
|
566 |
list_for_each_entry(bdata, &bdata_list, list) { |
0f3caba21
|
567 568 |
if (goal && bdata->node_low_pfn <= PFN_DOWN(goal)) continue; |
3560e249a
|
569 |
if (limit && bdata->node_min_pfn >= PFN_DOWN(limit)) |
0f3caba21
|
570 571 572 573 574 575 |
break; region = alloc_bootmem_core(bdata, size, align, goal, limit); if (region) return region; } |
5f2809e69
|
576 577 |
if (goal) { goal = 0; |
0f3caba21
|
578 |
goto restart; |
5f2809e69
|
579 |
} |
2e5237daf
|
580 |
|
5f2809e69
|
581 |
return NULL; |
1da177e4c
|
582 |
} |
a66fd7dae
|
583 584 585 586 587 588 589 590 591 592 593 594 595 |
/** * __alloc_bootmem_nopanic - allocate boot memory without panicking * @size: size of the request in bytes * @align: alignment of the region * @goal: preferred starting address of the region * * The goal is dropped if it can not be satisfied and the allocation will * fall back to memory below @goal. * * Allocation may happen on any node in the system. * * Returns NULL on failure. */ |
bb0923a66
|
596 |
void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align, |
0f3caba21
|
597 |
unsigned long goal) |
1da177e4c
|
598 |
{ |
0f3caba21
|
599 600 |
return ___alloc_bootmem_nopanic(size, align, goal, 0); } |
1da177e4c
|
601 |
|
0f3caba21
|
602 603 604 605 606 607 608 609 610 611 612 613 614 |
static void * __init ___alloc_bootmem(unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) { void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit); if (mem) return mem; /* * Whoops, we cannot satisfy the allocation request. */ printk(KERN_ALERT "bootmem alloc of %lu bytes failed! ", size); panic("Out of memory"); |
a8062231d
|
615 616 |
return NULL; } |
1da177e4c
|
617 |
|
a66fd7dae
|
618 619 620 621 622 623 624 625 626 627 628 629 630 |
/** * __alloc_bootmem - allocate boot memory * @size: size of the request in bytes * @align: alignment of the region * @goal: preferred starting address of the region * * The goal is dropped if it can not be satisfied and the allocation will * fall back to memory below @goal. * * Allocation may happen on any node in the system. * * The function panics if the request can not be satisfied. */ |
bb0923a66
|
631 632 |
void * __init __alloc_bootmem(unsigned long size, unsigned long align, unsigned long goal) |
a8062231d
|
633 |
{ |
0f3caba21
|
634 |
return ___alloc_bootmem(size, align, goal, 0); |
1da177e4c
|
635 |
} |
4cc278b72
|
636 637 638 639 640 |
static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata, unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) { void *ptr; |
d0c4f5702
|
641 642 643 |
ptr = alloc_arch_preferred_bootmem(bdata, size, align, goal, limit); if (ptr) return ptr; |
4cc278b72
|
644 645 646 647 648 649 |
ptr = alloc_bootmem_core(bdata, size, align, goal, limit); if (ptr) return ptr; return ___alloc_bootmem(size, align, goal, limit); } |
a66fd7dae
|
650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 |
/** * __alloc_bootmem_node - allocate boot memory from a specific node * @pgdat: node to allocate from * @size: size of the request in bytes * @align: alignment of the region * @goal: preferred starting address of the region * * The goal is dropped if it can not be satisfied and the allocation will * fall back to memory below @goal. * * Allocation may fall back to any node in the system if the specified node * can not hold the requested memory. * * The function panics if the request can not be satisfied. */ |
bb0923a66
|
665 666 |
void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) |
1da177e4c
|
667 |
{ |
c91c4773b
|
668 669 |
if (WARN_ON_ONCE(slab_is_available())) return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); |
4cc278b72
|
670 |
return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0); |
1da177e4c
|
671 |
} |
e70260aab
|
672 |
#ifdef CONFIG_SPARSEMEM |
a66fd7dae
|
673 674 675 676 677 678 679 |
/** * alloc_bootmem_section - allocate boot memory from a specific section * @size: size of the request in bytes * @section_nr: sparse map section to allocate from * * Return NULL on failure. */ |
e70260aab
|
680 681 682 |
void * __init alloc_bootmem_section(unsigned long size, unsigned long section_nr) { |
75a56cfe9
|
683 684 |
bootmem_data_t *bdata; unsigned long pfn, goal, limit; |
e70260aab
|
685 686 |
pfn = section_nr_to_pfn(section_nr); |
75a56cfe9
|
687 688 689 |
goal = pfn << PAGE_SHIFT; limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT; bdata = &bootmem_node_data[early_pfn_to_nid(pfn)]; |
e70260aab
|
690 |
|
75a56cfe9
|
691 |
return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, limit); |
e70260aab
|
692 693 |
} #endif |
b54bbf7b8
|
694 695 696 697 |
void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) { void *ptr; |
c91c4773b
|
698 699 |
if (WARN_ON_ONCE(slab_is_available())) return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); |
d0c4f5702
|
700 701 702 |
ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0); if (ptr) return ptr; |
b54bbf7b8
|
703 704 705 706 707 708 |
ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0); if (ptr) return ptr; return __alloc_bootmem_nopanic(size, align, goal); } |
dfd54cbcc
|
709 710 711 |
#ifndef ARCH_LOW_ADDRESS_LIMIT #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL #endif |
008857c1a
|
712 |
|
a66fd7dae
|
713 714 715 716 717 718 719 720 721 722 723 724 725 |
/** * __alloc_bootmem_low - allocate low boot memory * @size: size of the request in bytes * @align: alignment of the region * @goal: preferred starting address of the region * * The goal is dropped if it can not be satisfied and the allocation will * fall back to memory below @goal. * * Allocation may happen on any node in the system. * * The function panics if the request can not be satisfied. */ |
bb0923a66
|
726 727 |
void * __init __alloc_bootmem_low(unsigned long size, unsigned long align, unsigned long goal) |
008857c1a
|
728 |
{ |
0f3caba21
|
729 |
return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT); |
008857c1a
|
730 |
} |
a66fd7dae
|
731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 |
/** * __alloc_bootmem_low_node - allocate low boot memory from a specific node * @pgdat: node to allocate from * @size: size of the request in bytes * @align: alignment of the region * @goal: preferred starting address of the region * * The goal is dropped if it can not be satisfied and the allocation will * fall back to memory below @goal. * * Allocation may fall back to any node in the system if the specified node * can not hold the requested memory. * * The function panics if the request can not be satisfied. */ |
008857c1a
|
746 747 748 |
void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) { |
c91c4773b
|
749 750 |
if (WARN_ON_ONCE(slab_is_available())) return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); |
4cc278b72
|
751 752 |
return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, ARCH_LOW_ADDRESS_LIMIT); |
008857c1a
|
753 |
} |