Commit c3f72b5706716ada7923def513486ab7bb3a5301
1 parent
35a1f0bd07
Exists in
master
and in
20 other branches
memblock: Factor the lowest level alloc function
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Showing 1 changed file with 27 additions and 32 deletions Side-by-side Diff
mm/memblock.c
... | ... | @@ -294,8 +294,8 @@ |
294 | 294 | return (addr + (size - 1)) & ~(size - 1); |
295 | 295 | } |
296 | 296 | |
297 | -static u64 __init memblock_alloc_nid_unreserved(u64 start, u64 end, | |
298 | - u64 size, u64 align) | |
297 | +static u64 __init memblock_alloc_region(u64 start, u64 end, | |
298 | + u64 size, u64 align) | |
299 | 299 | { |
300 | 300 | u64 base, res_base; |
301 | 301 | long j; |
... | ... | @@ -318,6 +318,13 @@ |
318 | 318 | return ~(u64)0; |
319 | 319 | } |
320 | 320 | |
321 | +u64 __weak __init memblock_nid_range(u64 start, u64 end, int *nid) | |
322 | +{ | |
323 | + *nid = 0; | |
324 | + | |
325 | + return end; | |
326 | +} | |
327 | + | |
321 | 328 | static u64 __init memblock_alloc_nid_region(struct memblock_region *mp, |
322 | 329 | u64 size, u64 align, int nid) |
323 | 330 | { |
... | ... | @@ -333,8 +340,7 @@ |
333 | 340 | |
334 | 341 | this_end = memblock_nid_range(start, end, &this_nid); |
335 | 342 | if (this_nid == nid) { |
336 | - u64 ret = memblock_alloc_nid_unreserved(start, this_end, | |
337 | - size, align); | |
343 | + u64 ret = memblock_alloc_region(start, this_end, size, align); | |
338 | 344 | if (ret != ~(u64)0) |
339 | 345 | return ret; |
340 | 346 | } |
... | ... | @@ -351,6 +357,10 @@ |
351 | 357 | |
352 | 358 | BUG_ON(0 == size); |
353 | 359 | |
360 | + /* We do a bottom-up search for a region with the right | |
361 | + * nid since that's easier considering how memblock_nid_range() | |
362 | + * works | |
363 | + */ | |
354 | 364 | size = memblock_align_up(size, align); |
355 | 365 | |
356 | 366 | for (i = 0; i < mem->cnt; i++) { |
... | ... | @@ -383,7 +393,7 @@ |
383 | 393 | |
384 | 394 | u64 __init __memblock_alloc_base(u64 size, u64 align, u64 max_addr) |
385 | 395 | { |
386 | - long i, j; | |
396 | + long i; | |
387 | 397 | u64 base = 0; |
388 | 398 | u64 res_base; |
389 | 399 | |
390 | 400 | |
... | ... | @@ -396,33 +406,24 @@ |
396 | 406 | if (max_addr == MEMBLOCK_ALLOC_ANYWHERE) |
397 | 407 | max_addr = MEMBLOCK_REAL_LIMIT; |
398 | 408 | |
409 | + /* Pump up max_addr */ | |
410 | + if (max_addr == MEMBLOCK_ALLOC_ANYWHERE) | |
411 | + max_addr = ~(u64)0; | |
412 | + | |
413 | + /* We do a top-down search, this tends to limit memory | |
414 | + * fragmentation by keeping early boot allocs near the | |
415 | + * top of memory | |
416 | + */ | |
399 | 417 | for (i = memblock.memory.cnt - 1; i >= 0; i--) { |
400 | 418 | u64 memblockbase = memblock.memory.regions[i].base; |
401 | 419 | u64 memblocksize = memblock.memory.regions[i].size; |
402 | 420 | |
403 | 421 | if (memblocksize < size) |
404 | 422 | continue; |
405 | - if (max_addr == MEMBLOCK_ALLOC_ANYWHERE) | |
406 | - base = memblock_align_down(memblockbase + memblocksize - size, align); | |
407 | - else if (memblockbase < max_addr) { | |
408 | - base = min(memblockbase + memblocksize, max_addr); | |
409 | - base = memblock_align_down(base - size, align); | |
410 | - } else | |
411 | - continue; | |
412 | - | |
413 | - while (base && memblockbase <= base) { | |
414 | - j = memblock_overlaps_region(&memblock.reserved, base, size); | |
415 | - if (j < 0) { | |
416 | - /* this area isn't reserved, take it */ | |
417 | - if (memblock_add_region(&memblock.reserved, base, size) < 0) | |
418 | - return 0; | |
419 | - return base; | |
420 | - } | |
421 | - res_base = memblock.reserved.regions[j].base; | |
422 | - if (res_base < size) | |
423 | - break; | |
424 | - base = memblock_align_down(res_base - size, align); | |
425 | - } | |
423 | + base = min(memblockbase + memblocksize, max_addr); | |
424 | + res_base = memblock_alloc_region(memblockbase, base, size, align); | |
425 | + if (res_base != ~(u64)0) | |
426 | + return res_base; | |
426 | 427 | } |
427 | 428 | return 0; |
428 | 429 | } |
... | ... | @@ -526,12 +527,5 @@ |
526 | 527 | int memblock_is_region_reserved(u64 base, u64 size) |
527 | 528 | { |
528 | 529 | return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; |
529 | -} | |
530 | - | |
531 | -u64 __weak memblock_nid_range(u64 start, u64 end, int *nid) | |
532 | -{ | |
533 | - *nid = 0; | |
534 | - | |
535 | - return end; | |
536 | 530 | } |