Blame view
mm/bootmem.c
20.8 KB
1da177e4c
|
1 |
/* |
57cfc29ef
|
2 |
* bootmem - A boot-time physical memory allocator and configurator |
1da177e4c
|
3 4 |
* * Copyright (C) 1999 Ingo Molnar |
57cfc29ef
|
5 6 |
* 1999 Kanoj Sarcar, SGI * 2008 Johannes Weiner |
1da177e4c
|
7 |
* |
57cfc29ef
|
8 9 |
* Access to this subsystem has to be serialized externally (which is true * for the boot process anyway). |
1da177e4c
|
10 |
*/ |
1da177e4c
|
11 |
#include <linux/init.h> |
bbc7b92e3
|
12 |
#include <linux/pfn.h> |
5a0e3ad6a
|
13 |
#include <linux/slab.h> |
b95f1b31b
|
14 |
#include <linux/export.h> |
ec3a354bd
|
15 |
#include <linux/kmemleak.h> |
08677214e
|
16 |
#include <linux/range.h> |
d85fbee89
|
17 18 |
#include <linux/bug.h> #include <linux/io.h> |
1d8bf926f
|
19 |
#include <linux/bootmem.h> |
e786e86a5
|
20 |
|
1da177e4c
|
21 |
#include "internal.h" |
e782ab421
|
22 23 24 25 26 27 |
#ifndef CONFIG_NEED_MULTIPLE_NODES struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] }; EXPORT_SYMBOL(contig_page_data); #endif |
1da177e4c
|
28 29 30 |
unsigned long max_low_pfn; unsigned long min_low_pfn; unsigned long max_pfn; |
8dd330300
|
31 |
unsigned long long max_possible_pfn; |
1da177e4c
|
32 |
|
b61bfa3c4
|
33 |
bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata; |
636cc40cb
|
34 |
static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list); |
2e5237daf
|
35 36 37 38 39 40 41 42 43 44 45 |
static int bootmem_debug; static int __init bootmem_debug_setup(char *buf) { bootmem_debug = 1; return 0; } early_param("bootmem_debug", bootmem_debug_setup); #define bdebug(fmt, args...) ({ \ if (unlikely(bootmem_debug)) \ |
1170532bb
|
46 |
pr_info("bootmem::%s " fmt, \ |
80a914dc0
|
47 |
__func__, ## args); \ |
2e5237daf
|
48 |
}) |
df049a5f4
|
49 |
static unsigned long __init bootmap_bytes(unsigned long pages) |
223e8dc92
|
50 |
{ |
9571a9829
|
51 |
unsigned long bytes = DIV_ROUND_UP(pages, 8); |
223e8dc92
|
52 |
|
df049a5f4
|
53 |
return ALIGN(bytes, sizeof(long)); |
223e8dc92
|
54 |
} |
a66fd7dae
|
55 56 57 58 |
/** * bootmem_bootmap_pages - calculate bitmap size in pages * @pages: number of pages the bitmap has to represent */ |
f71bf0cac
|
59 |
unsigned long __init bootmem_bootmap_pages(unsigned long pages) |
1da177e4c
|
60 |
{ |
df049a5f4
|
61 |
unsigned long bytes = bootmap_bytes(pages); |
1da177e4c
|
62 |
|
df049a5f4
|
63 |
return PAGE_ALIGN(bytes) >> PAGE_SHIFT; |
1da177e4c
|
64 |
} |
f71bf0cac
|
65 |
|
679bc9fbb
|
66 67 68 |
/* * link bdata in order */ |
69d49e681
|
69 |
static void __init link_bootmem(bootmem_data_t *bdata) |
679bc9fbb
|
70 |
{ |
5c2b8a162
|
71 |
bootmem_data_t *ent; |
f71bf0cac
|
72 |
|
5c2b8a162
|
73 74 75 76 77 |
list_for_each_entry(ent, &bdata_list, list) { if (bdata->node_min_pfn < ent->node_min_pfn) { list_add_tail(&bdata->list, &ent->list); return; } |
679bc9fbb
|
78 |
} |
5c2b8a162
|
79 80 |
list_add_tail(&bdata->list, &bdata_list); |
679bc9fbb
|
81 |
} |
bbc7b92e3
|
82 |
/* |
1da177e4c
|
83 84 |
* Called once to set up the allocator itself. */ |
8ae044630
|
85 |
static unsigned long __init init_bootmem_core(bootmem_data_t *bdata, |
1da177e4c
|
86 87 |
unsigned long mapstart, unsigned long start, unsigned long end) { |
bbc7b92e3
|
88 |
unsigned long mapsize; |
1da177e4c
|
89 |
|
2dbb51c49
|
90 |
mminit_validate_memmodel_limits(&start, &end); |
bbc7b92e3
|
91 |
bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart)); |
3560e249a
|
92 |
bdata->node_min_pfn = start; |
1da177e4c
|
93 |
bdata->node_low_pfn = end; |
679bc9fbb
|
94 |
link_bootmem(bdata); |
1da177e4c
|
95 96 97 98 99 |
/* * Initially all pages are reserved - setup_arch() has to * register free RAM areas explicitly. */ |
df049a5f4
|
100 |
mapsize = bootmap_bytes(end - start); |
1da177e4c
|
101 |
memset(bdata->node_bootmem_map, 0xff, mapsize); |
2e5237daf
|
102 103 104 |
bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx ", bdata - bootmem_node_data, start, mapstart, end, mapsize); |
1da177e4c
|
105 106 |
return mapsize; } |
a66fd7dae
|
107 108 109 110 111 112 113 114 115 |
/** * init_bootmem_node - register a node as boot memory * @pgdat: node to register * @freepfn: pfn where the bitmap for this node is to be placed * @startpfn: first pfn on the node * @endpfn: first pfn after the node * * Returns the number of bytes needed to hold the bitmap for this node. */ |
223e8dc92
|
116 117 118 119 120 |
unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn, unsigned long startpfn, unsigned long endpfn) { return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn); } |
a66fd7dae
|
121 122 123 124 125 126 127 |
/** * init_bootmem - register boot memory * @start: pfn where the bitmap is to be placed * @pages: number of available physical pages * * Returns the number of bytes needed to hold the bitmap. */ |
223e8dc92
|
128 129 130 131 132 133 |
unsigned long __init init_bootmem(unsigned long start, unsigned long pages) { max_low_pfn = pages; min_low_pfn = start; return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages); } |
093258732
|
134 |
|
9f993ac3f
|
135 136 |
/* * free_bootmem_late - free bootmem pages directly to page allocator |
81df9bff2
|
137 |
* @addr: starting physical address of the range |
9f993ac3f
|
138 139 140 141 142 143 |
* @size: size of the range in bytes * * This is only useful when the bootmem allocator has already been torn * down, but we are still initializing the system. Pages are given directly * to the page allocator, no bootmem metadata is updated because it is gone. */ |
81df9bff2
|
144 |
void __init free_bootmem_late(unsigned long physaddr, unsigned long size) |
9f993ac3f
|
145 146 |
{ unsigned long cursor, end; |
9099daed9
|
147 |
kmemleak_free_part_phys(physaddr, size); |
9f993ac3f
|
148 |
|
81df9bff2
|
149 150 |
cursor = PFN_UP(physaddr); end = PFN_DOWN(physaddr + size); |
9f993ac3f
|
151 152 |
for (; cursor < end; cursor++) { |
d70ddd7a5
|
153 |
__free_pages_bootmem(pfn_to_page(cursor), cursor, 0); |
9f993ac3f
|
154 155 156 |
totalram_pages++; } } |
223e8dc92
|
157 158 159 |
static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) { struct page *page; |
d70ddd7a5
|
160 |
unsigned long *map, start, end, pages, cur, count = 0; |
41546c174
|
161 162 163 |
if (!bdata->node_bootmem_map) return 0; |
4a099fb4b
|
164 |
map = bdata->node_bootmem_map; |
3560e249a
|
165 |
start = bdata->node_min_pfn; |
41546c174
|
166 |
end = bdata->node_low_pfn; |
799f933a8
|
167 168 169 |
bdebug("nid=%td start=%lx end=%lx ", bdata - bootmem_node_data, start, end); |
223e8dc92
|
170 |
|
41546c174
|
171 |
while (start < end) { |
4a099fb4b
|
172 |
unsigned long idx, vec; |
10d73e655
|
173 |
unsigned shift; |
223e8dc92
|
174 |
|
3560e249a
|
175 |
idx = start - bdata->node_min_pfn; |
10d73e655
|
176 177 178 179 180 |
shift = idx & (BITS_PER_LONG - 1); /* * vec holds at most BITS_PER_LONG map bits, * bit 0 corresponds to start. */ |
41546c174
|
181 |
vec = ~map[idx / BITS_PER_LONG]; |
10d73e655
|
182 183 184 185 186 187 188 |
if (shift) { vec >>= shift; if (end - start >= BITS_PER_LONG) vec |= ~map[idx / BITS_PER_LONG + 1] << (BITS_PER_LONG - shift); } |
799f933a8
|
189 190 191 192 193 194 |
/* * If we have a properly aligned and fully unreserved * BITS_PER_LONG block of pages in front of us, free * it in one go. */ if (IS_ALIGNED(start, BITS_PER_LONG) && vec == ~0UL) { |
41546c174
|
195 |
int order = ilog2(BITS_PER_LONG); |
d70ddd7a5
|
196 |
__free_pages_bootmem(pfn_to_page(start), start, order); |
223e8dc92
|
197 |
count += BITS_PER_LONG; |
799f933a8
|
198 |
start += BITS_PER_LONG; |
41546c174
|
199 |
} else { |
d70ddd7a5
|
200 |
cur = start; |
41546c174
|
201 |
|
10d73e655
|
202 203 |
start = ALIGN(start + 1, BITS_PER_LONG); while (vec && cur != start) { |
41546c174
|
204 |
if (vec & 1) { |
10d73e655
|
205 |
page = pfn_to_page(cur); |
d70ddd7a5
|
206 |
__free_pages_bootmem(page, cur, 0); |
41546c174
|
207 |
count++; |
223e8dc92
|
208 |
} |
41546c174
|
209 |
vec >>= 1; |
10d73e655
|
210 |
++cur; |
223e8dc92
|
211 |
} |
223e8dc92
|
212 |
} |
223e8dc92
|
213 |
} |
d70ddd7a5
|
214 |
cur = bdata->node_min_pfn; |
223e8dc92
|
215 |
page = virt_to_page(bdata->node_bootmem_map); |
3560e249a
|
216 |
pages = bdata->node_low_pfn - bdata->node_min_pfn; |
41546c174
|
217 218 |
pages = bootmem_bootmap_pages(pages); count += pages; |
5576646f3
|
219 |
while (pages--) |
d70ddd7a5
|
220 |
__free_pages_bootmem(page++, cur++, 0); |
1b4ace414
|
221 |
bdata->node_bootmem_map = NULL; |
223e8dc92
|
222 |
|
2e5237daf
|
223 224 |
bdebug("nid=%td released=%lx ", bdata - bootmem_node_data, count); |
223e8dc92
|
225 226 |
return count; } |
7b4b2a0d6
|
227 |
static int reset_managed_pages_done __initdata; |
f784a3f19
|
228 |
void reset_node_managed_pages(pg_data_t *pgdat) |
9feedc9d8
|
229 230 |
{ struct zone *z; |
9feedc9d8
|
231 |
for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) |
7b4b2a0d6
|
232 233 234 235 236 237 |
z->managed_pages = 0; } void __init reset_all_zones_managed_pages(void) { struct pglist_data *pgdat; |
f784a3f19
|
238 239 |
if (reset_managed_pages_done) return; |
7b4b2a0d6
|
240 241 |
for_each_online_pgdat(pgdat) reset_node_managed_pages(pgdat); |
f784a3f19
|
242 |
|
7b4b2a0d6
|
243 |
reset_managed_pages_done = 1; |
9feedc9d8
|
244 |
} |
a66fd7dae
|
245 |
/** |
a66fd7dae
|
246 247 248 249 |
* free_all_bootmem - release free pages to the buddy allocator * * Returns the number of pages actually released. */ |
223e8dc92
|
250 251 |
unsigned long __init free_all_bootmem(void) { |
aa235fc71
|
252 253 |
unsigned long total_pages = 0; bootmem_data_t *bdata; |
9feedc9d8
|
254 |
|
7b4b2a0d6
|
255 |
reset_all_zones_managed_pages(); |
aa235fc71
|
256 257 258 |
list_for_each_entry(bdata, &bdata_list, list) total_pages += free_all_bootmem_core(bdata); |
0c9885347
|
259 |
totalram_pages += total_pages; |
aa235fc71
|
260 |
return total_pages; |
223e8dc92
|
261 |
} |
d747fa4bc
|
262 263 264 265 266 267 268 |
static void __init __free(bootmem_data_t *bdata, unsigned long sidx, unsigned long eidx) { unsigned long idx; bdebug("nid=%td start=%lx end=%lx ", bdata - bootmem_node_data, |
3560e249a
|
269 270 |
sidx + bdata->node_min_pfn, eidx + bdata->node_min_pfn); |
d747fa4bc
|
271 |
|
1b4ace414
|
272 273 |
if (WARN_ON(bdata->node_bootmem_map == NULL)) return; |
e2bf3cae5
|
274 275 |
if (bdata->hint_idx > sidx) bdata->hint_idx = sidx; |
d747fa4bc
|
276 277 278 279 280 281 282 283 284 285 286 287 288 289 |
for (idx = sidx; idx < eidx; idx++) if (!test_and_clear_bit(idx, bdata->node_bootmem_map)) BUG(); } static int __init __reserve(bootmem_data_t *bdata, unsigned long sidx, unsigned long eidx, int flags) { unsigned long idx; int exclusive = flags & BOOTMEM_EXCLUSIVE; bdebug("nid=%td start=%lx end=%lx flags=%x ", bdata - bootmem_node_data, |
3560e249a
|
290 291 |
sidx + bdata->node_min_pfn, eidx + bdata->node_min_pfn, |
d747fa4bc
|
292 |
flags); |
1b4ace414
|
293 294 |
if (WARN_ON(bdata->node_bootmem_map == NULL)) return 0; |
d747fa4bc
|
295 296 297 298 299 300 301 302 |
for (idx = sidx; idx < eidx; idx++) if (test_and_set_bit(idx, bdata->node_bootmem_map)) { if (exclusive) { __free(bdata, sidx, idx); return -EBUSY; } bdebug("silent double reserve of PFN %lx ", |
3560e249a
|
303 |
idx + bdata->node_min_pfn); |
d747fa4bc
|
304 305 306 |
} return 0; } |
e2bf3cae5
|
307 308 309 |
static int __init mark_bootmem_node(bootmem_data_t *bdata, unsigned long start, unsigned long end, int reserve, int flags) |
223e8dc92
|
310 311 |
{ unsigned long sidx, eidx; |
223e8dc92
|
312 |
|
e2bf3cae5
|
313 314 315 |
bdebug("nid=%td start=%lx end=%lx reserve=%d flags=%x ", bdata - bootmem_node_data, start, end, reserve, flags); |
223e8dc92
|
316 |
|
3560e249a
|
317 |
BUG_ON(start < bdata->node_min_pfn); |
e2bf3cae5
|
318 |
BUG_ON(end > bdata->node_low_pfn); |
223e8dc92
|
319 |
|
3560e249a
|
320 321 |
sidx = start - bdata->node_min_pfn; eidx = end - bdata->node_min_pfn; |
223e8dc92
|
322 |
|
e2bf3cae5
|
323 324 |
if (reserve) return __reserve(bdata, sidx, eidx, flags); |
223e8dc92
|
325 |
else |
e2bf3cae5
|
326 327 328 329 330 331 332 333 334 335 336 337 338 339 |
__free(bdata, sidx, eidx); return 0; } static int __init mark_bootmem(unsigned long start, unsigned long end, int reserve, int flags) { unsigned long pos; bootmem_data_t *bdata; pos = start; list_for_each_entry(bdata, &bdata_list, list) { int err; unsigned long max; |
3560e249a
|
340 341 |
if (pos < bdata->node_min_pfn || pos >= bdata->node_low_pfn) { |
e2bf3cae5
|
342 343 344 345 346 |
BUG_ON(pos != start); continue; } max = min(bdata->node_low_pfn, end); |
223e8dc92
|
347 |
|
e2bf3cae5
|
348 349 350 351 352 |
err = mark_bootmem_node(bdata, pos, max, reserve, flags); if (reserve && err) { mark_bootmem(start, pos, 0, 0); return err; } |
223e8dc92
|
353 |
|
e2bf3cae5
|
354 355 356 357 358 |
if (max == end) return 0; pos = bdata->node_low_pfn; } BUG(); |
223e8dc92
|
359 |
} |
a66fd7dae
|
360 361 362 363 364 365 366 367 |
/** * free_bootmem_node - mark a page range as usable * @pgdat: node the range resides on * @physaddr: starting address of the range * @size: size of the range in bytes * * Partial pages will be considered reserved and left as they are. * |
e2bf3cae5
|
368 |
* The range must reside completely on the specified node. |
a66fd7dae
|
369 |
*/ |
223e8dc92
|
370 371 372 |
void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, unsigned long size) { |
e2bf3cae5
|
373 |
unsigned long start, end; |
9099daed9
|
374 |
kmemleak_free_part_phys(physaddr, size); |
ec3a354bd
|
375 |
|
e2bf3cae5
|
376 377 378 379 |
start = PFN_UP(physaddr); end = PFN_DOWN(physaddr + size); mark_bootmem_node(pgdat->bdata, start, end, 0, 0); |
223e8dc92
|
380 |
} |
a66fd7dae
|
381 382 |
/** * free_bootmem - mark a page range as usable |
81df9bff2
|
383 |
* @addr: starting physical address of the range |
a66fd7dae
|
384 385 386 387 |
* @size: size of the range in bytes * * Partial pages will be considered reserved and left as they are. * |
e2bf3cae5
|
388 |
* The range must be contiguous but may span node boundaries. |
a66fd7dae
|
389 |
*/ |
81df9bff2
|
390 |
void __init free_bootmem(unsigned long physaddr, unsigned long size) |
223e8dc92
|
391 |
{ |
e2bf3cae5
|
392 |
unsigned long start, end; |
a5645a61b
|
393 |
|
9099daed9
|
394 |
kmemleak_free_part_phys(physaddr, size); |
ec3a354bd
|
395 |
|
81df9bff2
|
396 397 |
start = PFN_UP(physaddr); end = PFN_DOWN(physaddr + size); |
1da177e4c
|
398 |
|
e2bf3cae5
|
399 |
mark_bootmem(start, end, 0, 0); |
1da177e4c
|
400 |
} |
a66fd7dae
|
401 402 403 404 405 406 407 408 409 |
/** * reserve_bootmem_node - mark a page range as reserved * @pgdat: node the range resides on * @physaddr: starting address of the range * @size: size of the range in bytes * @flags: reservation flags (see linux/bootmem.h) * * Partial pages will be reserved. * |
e2bf3cae5
|
410 |
* The range must reside completely on the specified node. |
a66fd7dae
|
411 |
*/ |
223e8dc92
|
412 413 |
int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, unsigned long size, int flags) |
1da177e4c
|
414 |
{ |
e2bf3cae5
|
415 |
unsigned long start, end; |
1da177e4c
|
416 |
|
e2bf3cae5
|
417 418 419 420 |
start = PFN_DOWN(physaddr); end = PFN_UP(physaddr + size); return mark_bootmem_node(pgdat->bdata, start, end, 1, flags); |
223e8dc92
|
421 |
} |
5a982cbc7
|
422 |
|
a66fd7dae
|
423 |
/** |
0d4ba4d7b
|
424 |
* reserve_bootmem - mark a page range as reserved |
a66fd7dae
|
425 426 427 428 429 430 |
* @addr: starting address of the range * @size: size of the range in bytes * @flags: reservation flags (see linux/bootmem.h) * * Partial pages will be reserved. * |
e2bf3cae5
|
431 |
* The range must be contiguous but may span node boundaries. |
a66fd7dae
|
432 |
*/ |
223e8dc92
|
433 434 435 |
int __init reserve_bootmem(unsigned long addr, unsigned long size, int flags) { |
e2bf3cae5
|
436 |
unsigned long start, end; |
1da177e4c
|
437 |
|
e2bf3cae5
|
438 439 |
start = PFN_DOWN(addr); end = PFN_UP(addr + size); |
223e8dc92
|
440 |
|
e2bf3cae5
|
441 |
return mark_bootmem(start, end, 1, flags); |
1da177e4c
|
442 |
} |
8aa043d74
|
443 444 |
static unsigned long __init align_idx(struct bootmem_data *bdata, unsigned long idx, unsigned long step) |
481ebd0d7
|
445 446 447 448 449 450 451 452 453 454 |
{ unsigned long base = bdata->node_min_pfn; /* * Align the index with respect to the node start so that the * combination of both satisfies the requested alignment. */ return ALIGN(base + idx, step) - base; } |
8aa043d74
|
455 456 |
static unsigned long __init align_off(struct bootmem_data *bdata, unsigned long off, unsigned long align) |
481ebd0d7
|
457 458 459 460 461 462 463 |
{ unsigned long base = PFN_PHYS(bdata->node_min_pfn); /* Same as align_idx for byte offsets */ return ALIGN(base + off, align) - base; } |
c6785b6bf
|
464 |
static void * __init alloc_bootmem_bdata(struct bootmem_data *bdata, |
d0c4f5702
|
465 466 |
unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) |
1da177e4c
|
467 |
{ |
0f3caba21
|
468 |
unsigned long fallback = 0; |
5f2809e69
|
469 |
unsigned long min, max, start, sidx, midx, step; |
594fe1a04
|
470 471 472 473 |
bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx ", bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT, align, goal, limit); |
5f2809e69
|
474 475 476 |
BUG_ON(!size); BUG_ON(align & (align - 1)); BUG_ON(limit && goal + size > limit); |
1da177e4c
|
477 |
|
7c309a64d
|
478 479 |
if (!bdata->node_bootmem_map) return NULL; |
3560e249a
|
480 |
min = bdata->node_min_pfn; |
5f2809e69
|
481 |
max = bdata->node_low_pfn; |
9a2dc04cf
|
482 |
|
5f2809e69
|
483 484 485 486 487 488 |
goal >>= PAGE_SHIFT; limit >>= PAGE_SHIFT; if (limit && max > limit) max = limit; if (max <= min) |
9a2dc04cf
|
489 |
return NULL; |
5f2809e69
|
490 |
step = max(align >> PAGE_SHIFT, 1UL); |
281dd25cd
|
491 |
|
5f2809e69
|
492 493 494 495 |
if (goal && min < goal && goal < max) start = ALIGN(goal, step); else start = ALIGN(min, step); |
1da177e4c
|
496 |
|
481ebd0d7
|
497 |
sidx = start - bdata->node_min_pfn; |
3560e249a
|
498 |
midx = max - bdata->node_min_pfn; |
1da177e4c
|
499 |
|
5f2809e69
|
500 |
if (bdata->hint_idx > sidx) { |
0f3caba21
|
501 502 503 504 505 |
/* * Handle the valid case of sidx being zero and still * catch the fallback below. */ fallback = sidx + 1; |
481ebd0d7
|
506 |
sidx = align_idx(bdata, bdata->hint_idx, step); |
5f2809e69
|
507 |
} |
1da177e4c
|
508 |
|
5f2809e69
|
509 510 511 512 513 514 |
while (1) { int merge; void *region; unsigned long eidx, i, start_off, end_off; find_block: sidx = find_next_zero_bit(bdata->node_bootmem_map, midx, sidx); |
481ebd0d7
|
515 |
sidx = align_idx(bdata, sidx, step); |
5f2809e69
|
516 |
eidx = sidx + PFN_UP(size); |
ad09315ca
|
517 |
|
5f2809e69
|
518 |
if (sidx >= midx || eidx > midx) |
66d43e98e
|
519 |
break; |
1da177e4c
|
520 |
|
5f2809e69
|
521 522 |
for (i = sidx; i < eidx; i++) if (test_bit(i, bdata->node_bootmem_map)) { |
481ebd0d7
|
523 |
sidx = align_idx(bdata, i, step); |
5f2809e69
|
524 525 526 527 |
if (sidx == i) sidx += step; goto find_block; } |
1da177e4c
|
528 |
|
627240aaa
|
529 |
if (bdata->last_end_off & (PAGE_SIZE - 1) && |
5f2809e69
|
530 |
PFN_DOWN(bdata->last_end_off) + 1 == sidx) |
481ebd0d7
|
531 |
start_off = align_off(bdata, bdata->last_end_off, align); |
5f2809e69
|
532 533 534 535 536 537 538 539 540 541 542 543 |
else start_off = PFN_PHYS(sidx); merge = PFN_DOWN(start_off) < sidx; end_off = start_off + size; bdata->last_end_off = end_off; bdata->hint_idx = PFN_UP(end_off); /* * Reserve the area now: */ |
d747fa4bc
|
544 545 546 |
if (__reserve(bdata, PFN_DOWN(start_off) + merge, PFN_UP(end_off), BOOTMEM_EXCLUSIVE)) BUG(); |
5f2809e69
|
547 |
|
3560e249a
|
548 549 |
region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) + start_off); |
5f2809e69
|
550 |
memset(region, 0, size); |
008139d91
|
551 552 553 554 555 |
/* * The min_count is set to 0 so that bootmem allocated blocks * are never reported as leaks. */ kmemleak_alloc(region, size, 0, 0); |
5f2809e69
|
556 |
return region; |
1da177e4c
|
557 |
} |
0f3caba21
|
558 |
if (fallback) { |
481ebd0d7
|
559 |
sidx = align_idx(bdata, fallback - 1, step); |
0f3caba21
|
560 561 562 563 564 565 |
fallback = 0; goto find_block; } return NULL; } |
c12ab504a
|
566 |
static void * __init alloc_bootmem_core(unsigned long size, |
0f3caba21
|
567 568 569 570 571 |
unsigned long align, unsigned long goal, unsigned long limit) { bootmem_data_t *bdata; |
d0c4f5702
|
572 |
void *region; |
0f3caba21
|
573 |
|
3f7dfe24b
|
574 575 |
if (WARN_ON_ONCE(slab_is_available())) return kzalloc(size, GFP_NOWAIT); |
0f3caba21
|
576 |
|
d0c4f5702
|
577 |
list_for_each_entry(bdata, &bdata_list, list) { |
0f3caba21
|
578 579 |
if (goal && bdata->node_low_pfn <= PFN_DOWN(goal)) continue; |
3560e249a
|
580 |
if (limit && bdata->node_min_pfn >= PFN_DOWN(limit)) |
0f3caba21
|
581 |
break; |
c6785b6bf
|
582 |
region = alloc_bootmem_bdata(bdata, size, align, goal, limit); |
0f3caba21
|
583 584 585 |
if (region) return region; } |
c12ab504a
|
586 587 588 589 590 591 592 593 594 595 596 597 598 599 |
return NULL; } static void * __init ___alloc_bootmem_nopanic(unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) { void *ptr; restart: ptr = alloc_bootmem_core(size, align, goal, limit); if (ptr) return ptr; |
5f2809e69
|
600 601 |
if (goal) { goal = 0; |
0f3caba21
|
602 |
goto restart; |
5f2809e69
|
603 |
} |
2e5237daf
|
604 |
|
5f2809e69
|
605 |
return NULL; |
1da177e4c
|
606 |
} |
a66fd7dae
|
607 608 609 610 611 612 613 614 615 616 617 618 619 |
/** * __alloc_bootmem_nopanic - allocate boot memory without panicking * @size: size of the request in bytes * @align: alignment of the region * @goal: preferred starting address of the region * * The goal is dropped if it can not be satisfied and the allocation will * fall back to memory below @goal. * * Allocation may happen on any node in the system. * * Returns NULL on failure. */ |
bb0923a66
|
620 |
void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align, |
0f3caba21
|
621 |
unsigned long goal) |
1da177e4c
|
622 |
{ |
08677214e
|
623 |
unsigned long limit = 0; |
08677214e
|
624 |
return ___alloc_bootmem_nopanic(size, align, goal, limit); |
0f3caba21
|
625 |
} |
1da177e4c
|
626 |
|
0f3caba21
|
627 628 629 630 631 632 633 634 635 636 |
static void * __init ___alloc_bootmem(unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) { void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit); if (mem) return mem; /* * Whoops, we cannot satisfy the allocation request. */ |
1170532bb
|
637 638 |
pr_alert("bootmem alloc of %lu bytes failed! ", size); |
0f3caba21
|
639 |
panic("Out of memory"); |
a8062231d
|
640 641 |
return NULL; } |
1da177e4c
|
642 |
|
a66fd7dae
|
643 644 645 646 647 648 649 650 651 652 653 654 655 |
/** * __alloc_bootmem - allocate boot memory * @size: size of the request in bytes * @align: alignment of the region * @goal: preferred starting address of the region * * The goal is dropped if it can not be satisfied and the allocation will * fall back to memory below @goal. * * Allocation may happen on any node in the system. * * The function panics if the request can not be satisfied. */ |
bb0923a66
|
656 657 |
void * __init __alloc_bootmem(unsigned long size, unsigned long align, unsigned long goal) |
a8062231d
|
658 |
{ |
08677214e
|
659 |
unsigned long limit = 0; |
08677214e
|
660 |
return ___alloc_bootmem(size, align, goal, limit); |
1da177e4c
|
661 |
} |
99ab7b194
|
662 |
void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat, |
4cc278b72
|
663 664 665 666 |
unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) { void *ptr; |
3f7dfe24b
|
667 |
if (WARN_ON_ONCE(slab_is_available())) |
1d8bf926f
|
668 |
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); |
ab3818432
|
669 |
again: |
d0c4f5702
|
670 |
|
c8f4a2d09
|
671 672 673 |
/* do not panic in alloc_bootmem_bdata() */ if (limit && goal + size > limit) limit = 0; |
e9079911e
|
674 |
ptr = alloc_bootmem_bdata(pgdat->bdata, size, align, goal, limit); |
4cc278b72
|
675 676 |
if (ptr) return ptr; |
ab3818432
|
677 678 679 680 681 682 683 684 |
ptr = alloc_bootmem_core(size, align, goal, limit); if (ptr) return ptr; if (goal) { goal = 0; goto again; } |
421456edd
|
685 686 687 688 689 690 |
return NULL; } void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) { |
e9079911e
|
691 |
return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0); |
421456edd
|
692 |
} |
e9079911e
|
693 |
void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, |
421456edd
|
694 695 696 697 |
unsigned long align, unsigned long goal, unsigned long limit) { void *ptr; |
e9079911e
|
698 |
ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0); |
421456edd
|
699 700 |
if (ptr) return ptr; |
1170532bb
|
701 702 |
pr_alert("bootmem alloc of %lu bytes failed! ", size); |
ab3818432
|
703 704 |
panic("Out of memory"); return NULL; |
4cc278b72
|
705 |
} |
a66fd7dae
|
706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 |
/** * __alloc_bootmem_node - allocate boot memory from a specific node * @pgdat: node to allocate from * @size: size of the request in bytes * @align: alignment of the region * @goal: preferred starting address of the region * * The goal is dropped if it can not be satisfied and the allocation will * fall back to memory below @goal. * * Allocation may fall back to any node in the system if the specified node * can not hold the requested memory. * * The function panics if the request can not be satisfied. */ |
bb0923a66
|
721 722 |
void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) |
1da177e4c
|
723 |
{ |
c91c4773b
|
724 725 |
if (WARN_ON_ONCE(slab_is_available())) return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); |
e9079911e
|
726 |
return ___alloc_bootmem_node(pgdat, size, align, goal, 0); |
08677214e
|
727 728 729 730 731 732 733 734 735 736 737 738 |
} void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) { #ifdef MAX_DMA32_PFN unsigned long end_pfn; if (WARN_ON_ONCE(slab_is_available())) return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); /* update goal according ...MAX_DMA32_PFN */ |
83285c72e
|
739 |
end_pfn = pgdat_end_pfn(pgdat); |
08677214e
|
740 741 742 743 744 745 746 |
if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) && (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) { void *ptr; unsigned long new_goal; new_goal = MAX_DMA32_PFN << PAGE_SHIFT; |
c6785b6bf
|
747 |
ptr = alloc_bootmem_bdata(pgdat->bdata, size, align, |
08677214e
|
748 |
new_goal, 0); |
08677214e
|
749 750 751 752 753 754 |
if (ptr) return ptr; } #endif return __alloc_bootmem_node(pgdat, size, align, goal); |
1da177e4c
|
755 |
} |
a66fd7dae
|
756 757 758 759 760 761 762 763 764 765 766 767 768 |
/** * __alloc_bootmem_low - allocate low boot memory * @size: size of the request in bytes * @align: alignment of the region * @goal: preferred starting address of the region * * The goal is dropped if it can not be satisfied and the allocation will * fall back to memory below @goal. * * Allocation may happen on any node in the system. * * The function panics if the request can not be satisfied. */ |
bb0923a66
|
769 770 |
void * __init __alloc_bootmem_low(unsigned long size, unsigned long align, unsigned long goal) |
008857c1a
|
771 |
{ |
0f3caba21
|
772 |
return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT); |
008857c1a
|
773 |
} |
38fa4175e
|
774 775 776 777 778 779 780 |
void * __init __alloc_bootmem_low_nopanic(unsigned long size, unsigned long align, unsigned long goal) { return ___alloc_bootmem_nopanic(size, align, goal, ARCH_LOW_ADDRESS_LIMIT); } |
a66fd7dae
|
781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 |
/** * __alloc_bootmem_low_node - allocate low boot memory from a specific node * @pgdat: node to allocate from * @size: size of the request in bytes * @align: alignment of the region * @goal: preferred starting address of the region * * The goal is dropped if it can not be satisfied and the allocation will * fall back to memory below @goal. * * Allocation may fall back to any node in the system if the specified node * can not hold the requested memory. * * The function panics if the request can not be satisfied. */ |
008857c1a
|
796 797 798 |
void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) { |
c91c4773b
|
799 800 |
if (WARN_ON_ONCE(slab_is_available())) return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); |
e9079911e
|
801 802 |
return ___alloc_bootmem_node(pgdat, size, align, goal, ARCH_LOW_ADDRESS_LIMIT); |
008857c1a
|
803 |
} |