Commit 2876d2923afe42ccdfb65a8bb286271fce78b626

Authored by Ye Li
1 parent 108a69331a

MLK-21885 lmb: Handle the overlap case for lmb reserve

lmb reserve is used to reserve some memory so that when loading images
(like kernel, dtb, initrd), images won't be loaded into the reserved memory.

The problem in current lmb is it does not handle the overlap case. When adding
a new reserved memory, if the memory region is overlap with regions already been
added in lmb, it will fail. One example is reserved memory in DTB may overlap with
u-boot relocate address. lmb reserves the u-boot relocate address firstly, so when
adding reserved memory from DTB, we will meet failure.

Actually if we handle the overlap case, we can resolve the overlap by using a max
common region for the overlap regions. So that this case won't fail.

Signed-off-by: Ye Li <ye.li@nxp.com>
Reviewed-by: Peng Fan <peng.fan@nxp.com>
(cherry picked from commit 37d86c68816dffde3dc8dcda5b9d67a195b2f9c2)

Showing 1 changed file with 36 additions and 8 deletions Side-by-side Diff

... ... @@ -173,7 +173,7 @@
173 173 break;
174 174 } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
175 175 /* regions overlap */
176   - return -1;
  176 + return -2;
177 177 }
178 178 }
179 179  
... ... @@ -266,13 +266,6 @@
266 266 return lmb_add_region(rgn, end + 1, rgnend - end);
267 267 }
268 268  
269   -long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size)
270   -{
271   - struct lmb_region *_rgn = &(lmb->reserved);
272   -
273   - return lmb_add_region(_rgn, base, size);
274   -}
275   -
276 269 static long lmb_overlaps_region(struct lmb_region *rgn, phys_addr_t base,
277 270 phys_size_t size)
278 271 {
... ... @@ -286,6 +279,41 @@
286 279 }
287 280  
288 281 return (i < rgn->cnt) ? i : -1;
  282 +}
  283 +
  284 +long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size)
  285 +{
  286 + struct lmb_region *_rgn = &(lmb->reserved);
  287 + long ret = lmb_add_region(_rgn, base, size);
  288 + long overlap_rgn;
  289 + phys_addr_t res_base;
  290 + phys_size_t res_size;
  291 +
  292 + /* Handle the overlap */
  293 + if (ret == -2) {
  294 + overlap_rgn = lmb_overlaps_region(_rgn, base, size);
  295 + res_base = lmb->reserved.region[overlap_rgn].base;
  296 + res_size = lmb->reserved.region[overlap_rgn].size;
  297 +
  298 + if ((base >= res_base) && ((base + size) <= (res_base + res_size))) {
  299 + /* new region is inside reserved region, so it is already reserved */
  300 + return 0;
  301 + } else {
  302 + if (base < res_base) {
  303 + ret = lmb_reserve(lmb, base, res_base - base);
  304 + if (ret < 0)
  305 + return ret;
  306 + }
  307 +
  308 + if ((base + size) > (res_base + res_size)) {
  309 + ret = lmb_reserve(lmb, res_base + res_size, (base + size) - (res_base + res_size));
  310 + if (ret < 0)
  311 + return ret;
  312 + }
  313 + }
  314 + }
  315 +
  316 + return ret;
289 317 }
290 318  
291 319 phys_addr_t lmb_alloc(struct lmb *lmb, phys_size_t size, ulong align)