Commit 4ed6552f715983bfc7d212c1199a1f796f1144ad

Authored by Kumar Gala
Committed by Marian Balakowicz
1 parent 4648c2e7a1

[new uImage] Introduce lmb from linux kernel for memory mgmt of boot images

Introduce the LMB lib used on PPC in the kernel as a clean way to manage
the memory spaces used by various boot images and structures.  This code
will allow us to simplify the code in bootm and its support functions.

Signed-off-by: Kumar Gala <galak@kernel.crashing.org>

Showing 5 changed files with 351 additions and 0 deletions Side-by-side Diff

... ... @@ -34,6 +34,7 @@
34 34 #include <zlib.h>
35 35 #include <bzlib.h>
36 36 #include <environment.h>
  37 +#include <lmb.h>
37 38 #include <asm/byteorder.h>
38 39  
39 40 #ifdef CFG_HUSH_PARSER
40 41  
41 42  
... ... @@ -118,9 +119,20 @@
118 119 ulong image_start, image_end;
119 120 ulong load_start, load_end;
120 121  
  122 + struct lmb lmb;
  123 +
121 124 memset ((void *)&images, 0, sizeof (images));
122 125 images.verify = getenv_verify();
  126 + images.lmb = &lmb;
123 127  
  128 + lmb_init(&lmb);
  129 +
  130 +#ifdef CFG_SDRAM_BASE
  131 + lmb_add(&lmb, CFG_SDRAM_BASE, gd->bd->bi_memsize);
  132 +#else
  133 + lmb_add(&lmb, 0, gd->bd->bi_memsize);
  134 +#endif
  135 +
124 136 /* get kernel image header, start address and length */
125 137 os_hdr = get_kernel (cmdtp, flag, argc, argv,
126 138 &images, &os_data, &os_len);
... ... @@ -236,6 +248,8 @@
236 248 }
237 249  
238 250 show_boot_progress (8);
  251 +
  252 + lmb_reserve(&lmb, load_start, (load_end - load_start));
239 253  
240 254 switch (os) {
241 255 default: /* handled by (original) Linux case */
... ... @@ -37,6 +37,7 @@
37 37 #include <command.h>
38 38  
39 39 #ifndef USE_HOSTCC
  40 +#include <lmb.h>
40 41 #include <linux/string.h>
41 42 #include <asm/u-boot.h>
42 43  
... ... @@ -203,6 +204,7 @@
203 204 char *fit_uname_fdt; /* FDT blob node unit name */
204 205 #endif
205 206 int verify; /* getenv("verify")[0] != 'n' */
  207 + struct lmb *lmb; /* for memory mgmt */
206 208 #endif
207 209 } bootm_headers_t;
208 210  
  1 +#ifndef _LINUX_LMB_H
  2 +#define _LINUX_LMB_H
  3 +#ifdef __KERNEL__
  4 +
  5 +#include <asm/types.h>
  6 +/*
  7 + * Logical memory blocks.
  8 + *
  9 + * Copyright (C) 2001 Peter Bergner, IBM Corp.
  10 + *
  11 + * This program is free software; you can redistribute it and/or
  12 + * modify it under the terms of the GNU General Public License
  13 + * as published by the Free Software Foundation; either version
  14 + * 2 of the License, or (at your option) any later version.
  15 + */
  16 +
  17 +#define MAX_LMB_REGIONS 8
  18 +
  19 +struct lmb_property {
  20 + ulong base;
  21 + ulong size;
  22 +};
  23 +
  24 +struct lmb_region {
  25 + unsigned long cnt;
  26 + ulong size;
  27 + struct lmb_property region[MAX_LMB_REGIONS+1];
  28 +};
  29 +
  30 +struct lmb {
  31 + struct lmb_region memory;
  32 + struct lmb_region reserved;
  33 +};
  34 +
  35 +extern struct lmb lmb;
  36 +
  37 +extern void lmb_init(struct lmb *lmb);
  38 +extern long lmb_add(struct lmb *lmb, ulong base, ulong size);
  39 +extern long lmb_reserve(struct lmb *lmb, ulong base, ulong size);
  40 +extern ulong lmb_alloc(struct lmb *lmb, ulong size, ulong align);
  41 +extern ulong lmb_alloc_base(struct lmb *lmb, ulong size, ulong align, ulong max_addr);
  42 +extern ulong __lmb_alloc_base(struct lmb *lmb, ulong size, ulong align, ulong max_addr);
  43 +extern int lmb_is_reserved(struct lmb *lmb, ulong addr);
  44 +
  45 +extern void lmb_dump_all(struct lmb *lmb);
  46 +
  47 +static inline ulong
  48 +lmb_size_bytes(struct lmb_region *type, unsigned long region_nr)
  49 +{
  50 + return type->region[region_nr].size;
  51 +}
  52 +#endif /* __KERNEL__ */
  53 +
  54 +#endif /* _LINUX_LMB_H */
lib_generic/Makefile
... ... @@ -34,6 +34,7 @@
34 34 COBJS-y += ctype.o
35 35 COBJS-y += display_options.o
36 36 COBJS-y += div64.o
  37 +COBJS-y += lmb.o
37 38 COBJS-y += ldiv.o
38 39 COBJS-y += sha1.o
39 40 COBJS-y += string.o
  1 +/*
  2 + * Procedures for maintaining information about logical memory blocks.
  3 + *
  4 + * Peter Bergner, IBM Corp. June 2001.
  5 + * Copyright (C) 2001 Peter Bergner.
  6 + *
  7 + * This program is free software; you can redistribute it and/or
  8 + * modify it under the terms of the GNU General Public License
  9 + * as published by the Free Software Foundation; either version
  10 + * 2 of the License, or (at your option) any later version.
  11 + */
  12 +
  13 +#include <common.h>
  14 +#include <lmb.h>
  15 +
  16 +#define LMB_ALLOC_ANYWHERE 0
  17 +
  18 +void lmb_dump_all(struct lmb *lmb)
  19 +{
  20 +#ifdef DEBUG
  21 + unsigned long i;
  22 +
  23 + debug("lmb_dump_all:\n");
  24 + debug(" memory.cnt = 0x%lx\n", lmb->memory.cnt);
  25 + debug(" memory.size = 0x%08x\n", lmb->memory.size);
  26 + for (i=0; i < lmb->memory.cnt ;i++) {
  27 + debug(" memory.reg[0x%x].base = 0x%08x\n", i,
  28 + lmb->memory.region[i].base);
  29 + debug(" .size = 0x%08x\n",
  30 + lmb->memory.region[i].size);
  31 + }
  32 +
  33 + debug("\n reserved.cnt = 0x%lx\n", lmb->reserved.cnt);
  34 + debug(" reserved.size = 0x%08x\n", lmb->reserved.size);
  35 + for (i=0; i < lmb->reserved.cnt ;i++) {
  36 + debug(" reserved.reg[0x%x].base = 0x%08x\n", i,
  37 + lmb->reserved.region[i].base);
  38 + debug(" .size = 0x%08x\n",
  39 + lmb->reserved.region[i].size);
  40 + }
  41 +#endif /* DEBUG */
  42 +}
  43 +
  44 +static unsigned long lmb_addrs_overlap(ulong base1,
  45 + ulong size1, ulong base2, ulong size2)
  46 +{
  47 + return ((base1 < (base2+size2)) && (base2 < (base1+size1)));
  48 +}
  49 +
  50 +static long lmb_addrs_adjacent(ulong base1, ulong size1,
  51 + ulong base2, ulong size2)
  52 +{
  53 + if (base2 == base1 + size1)
  54 + return 1;
  55 + else if (base1 == base2 + size2)
  56 + return -1;
  57 +
  58 + return 0;
  59 +}
  60 +
  61 +static long lmb_regions_adjacent(struct lmb_region *rgn,
  62 + unsigned long r1, unsigned long r2)
  63 +{
  64 + ulong base1 = rgn->region[r1].base;
  65 + ulong size1 = rgn->region[r1].size;
  66 + ulong base2 = rgn->region[r2].base;
  67 + ulong size2 = rgn->region[r2].size;
  68 +
  69 + return lmb_addrs_adjacent(base1, size1, base2, size2);
  70 +}
  71 +
  72 +static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
  73 +{
  74 + unsigned long i;
  75 +
  76 + for (i = r; i < rgn->cnt - 1; i++) {
  77 + rgn->region[i].base = rgn->region[i + 1].base;
  78 + rgn->region[i].size = rgn->region[i + 1].size;
  79 + }
  80 + rgn->cnt--;
  81 +}
  82 +
  83 +/* Assumption: base addr of region 1 < base addr of region 2 */
  84 +static void lmb_coalesce_regions(struct lmb_region *rgn,
  85 + unsigned long r1, unsigned long r2)
  86 +{
  87 + rgn->region[r1].size += rgn->region[r2].size;
  88 + lmb_remove_region(rgn, r2);
  89 +}
  90 +
  91 +void lmb_init(struct lmb *lmb)
  92 +{
  93 + /* Create a dummy zero size LMB which will get coalesced away later.
  94 + * This simplifies the lmb_add() code below...
  95 + */
  96 + lmb->memory.region[0].base = 0;
  97 + lmb->memory.region[0].size = 0;
  98 + lmb->memory.cnt = 1;
  99 + lmb->memory.size = 0;
  100 +
  101 + /* Ditto. */
  102 + lmb->reserved.region[0].base = 0;
  103 + lmb->reserved.region[0].size = 0;
  104 + lmb->reserved.cnt = 1;
  105 + lmb->reserved.size = 0;
  106 +}
  107 +
  108 +/* This routine called with relocation disabled. */
  109 +static long lmb_add_region(struct lmb_region *rgn, ulong base, ulong size)
  110 +{
  111 + unsigned long coalesced = 0;
  112 + long adjacent, i;
  113 +
  114 + if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) {
  115 + rgn->region[0].base = base;
  116 + rgn->region[0].size = size;
  117 + return 0;
  118 + }
  119 +
  120 + /* First try and coalesce this LMB with another. */
  121 + for (i=0; i < rgn->cnt; i++) {
  122 + ulong rgnbase = rgn->region[i].base;
  123 + ulong rgnsize = rgn->region[i].size;
  124 +
  125 + if ((rgnbase == base) && (rgnsize == size))
  126 + /* Already have this region, so we're done */
  127 + return 0;
  128 +
  129 + adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize);
  130 + if ( adjacent > 0 ) {
  131 + rgn->region[i].base -= size;
  132 + rgn->region[i].size += size;
  133 + coalesced++;
  134 + break;
  135 + }
  136 + else if ( adjacent < 0 ) {
  137 + rgn->region[i].size += size;
  138 + coalesced++;
  139 + break;
  140 + }
  141 + }
  142 +
  143 + if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) {
  144 + lmb_coalesce_regions(rgn, i, i+1);
  145 + coalesced++;
  146 + }
  147 +
  148 + if (coalesced)
  149 + return coalesced;
  150 + if (rgn->cnt >= MAX_LMB_REGIONS)
  151 + return -1;
  152 +
  153 + /* Couldn't coalesce the LMB, so add it to the sorted table. */
  154 + for (i = rgn->cnt-1; i >= 0; i--) {
  155 + if (base < rgn->region[i].base) {
  156 + rgn->region[i+1].base = rgn->region[i].base;
  157 + rgn->region[i+1].size = rgn->region[i].size;
  158 + } else {
  159 + rgn->region[i+1].base = base;
  160 + rgn->region[i+1].size = size;
  161 + break;
  162 + }
  163 + }
  164 +
  165 + if (base < rgn->region[0].base) {
  166 + rgn->region[0].base = base;
  167 + rgn->region[0].size = size;
  168 + }
  169 +
  170 + rgn->cnt++;
  171 +
  172 + return 0;
  173 +}
  174 +
  175 +/* This routine may be called with relocation disabled. */
  176 +long lmb_add(struct lmb *lmb, ulong base, ulong size)
  177 +{
  178 + struct lmb_region *_rgn = &(lmb->memory);
  179 +
  180 + return lmb_add_region(_rgn, base, size);
  181 +}
  182 +
  183 +long lmb_reserve(struct lmb *lmb, ulong base, ulong size)
  184 +{
  185 + struct lmb_region *_rgn = &(lmb->reserved);
  186 +
  187 + return lmb_add_region(_rgn, base, size);
  188 +}
  189 +
  190 +long lmb_overlaps_region(struct lmb_region *rgn, ulong base,
  191 + ulong size)
  192 +{
  193 + unsigned long i;
  194 +
  195 + for (i=0; i < rgn->cnt; i++) {
  196 + ulong rgnbase = rgn->region[i].base;
  197 + ulong rgnsize = rgn->region[i].size;
  198 + if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) {
  199 + break;
  200 + }
  201 + }
  202 +
  203 + return (i < rgn->cnt) ? i : -1;
  204 +}
  205 +
  206 +ulong lmb_alloc(struct lmb *lmb, ulong size, ulong align)
  207 +{
  208 + return lmb_alloc_base(lmb, size, align, LMB_ALLOC_ANYWHERE);
  209 +}
  210 +
  211 +ulong lmb_alloc_base(struct lmb *lmb, ulong size, ulong align, ulong max_addr)
  212 +{
  213 + ulong alloc;
  214 +
  215 + alloc = __lmb_alloc_base(lmb, size, align, max_addr);
  216 +
  217 + if (alloc == 0)
  218 + printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
  219 + size, max_addr);
  220 +
  221 + return alloc;
  222 +}
  223 +
  224 +static ulong lmb_align_down(ulong addr, ulong size)
  225 +{
  226 + return addr & ~(size - 1);
  227 +}
  228 +
  229 +static ulong lmb_align_up(ulong addr, ulong size)
  230 +{
  231 + return (addr + (size - 1)) & ~(size - 1);
  232 +}
  233 +
  234 +ulong __lmb_alloc_base(struct lmb *lmb, ulong size, ulong align, ulong max_addr)
  235 +{
  236 + long i, j;
  237 + ulong base = 0;
  238 +
  239 + for (i = lmb->memory.cnt-1; i >= 0; i--) {
  240 + ulong lmbbase = lmb->memory.region[i].base;
  241 + ulong lmbsize = lmb->memory.region[i].size;
  242 +
  243 + if (max_addr == LMB_ALLOC_ANYWHERE)
  244 + base = lmb_align_down(lmbbase + lmbsize - size, align);
  245 + else if (lmbbase < max_addr) {
  246 + base = min(lmbbase + lmbsize, max_addr);
  247 + base = lmb_align_down(base - size, align);
  248 + } else
  249 + continue;
  250 +
  251 + while ((lmbbase <= base) &&
  252 + ((j = lmb_overlaps_region(&(lmb->reserved), base, size)) >= 0) )
  253 + base = lmb_align_down(lmb->reserved.region[j].base - size,
  254 + align);
  255 +
  256 + if ((base != 0) && (lmbbase <= base))
  257 + break;
  258 + }
  259 +
  260 + if (i < 0)
  261 + return 0;
  262 +
  263 + if (lmb_add_region(&(lmb->reserved), base, lmb_align_up(size, align)) < 0)
  264 + return 0;
  265 +
  266 + return base;
  267 +}
  268 +
  269 +int lmb_is_reserved(struct lmb *lmb, ulong addr)
  270 +{
  271 + int i;
  272 +
  273 + for (i = 0; i < lmb->reserved.cnt; i++) {
  274 + ulong upper = lmb->reserved.region[i].base +
  275 + lmb->reserved.region[i].size - 1;
  276 + if ((addr >= lmb->reserved.region[i].base) && (addr <= upper))
  277 + return 1;
  278 + }
  279 + return 0;
  280 +}