Blame view

lib/lmb.c 11.5 KB
83d290c56   Tom Rini   SPDX: Convert all...
1
  // SPDX-License-Identifier: GPL-2.0+
4ed6552f7   Kumar Gala   [new uImage] Intr...
2
3
4
5
6
  /*
   * Procedures for maintaining information about logical memory blocks.
   *
   * Peter Bergner, IBM Corp.	June 2001.
   * Copyright (C) 2001 Peter Bergner.
4ed6552f7   Kumar Gala   [new uImage] Intr...
7
8
9
10
   */
  
  #include <common.h>
  #include <lmb.h>
336d4615f   Simon Glass   dm: core: Create ...
11
  #include <malloc.h>
4ed6552f7   Kumar Gala   [new uImage] Intr...
12
13
14
15
16
17
18
19
20
21
22
23
  
  #define LMB_ALLOC_ANYWHERE	0
  
  void lmb_dump_all(struct lmb *lmb)
  {
  #ifdef DEBUG
  	unsigned long i;
  
  	debug("lmb_dump_all:
  ");
  	debug("    memory.cnt		   = 0x%lx
  ", lmb->memory.cnt);
391fd93ab   Becky Bruce   Change lmb to use...
24
25
26
  	debug("    memory.size		   = 0x%llx
  ",
  	      (unsigned long long)lmb->memory.size);
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
27
  	for (i = 0; i < lmb->memory.cnt; i++) {
9b55a2536   Wolfgang Denk   Fix some more pri...
28
29
  		debug("    memory.reg[0x%lx].base   = 0x%llx
  ", i,
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
30
  		      (unsigned long long)lmb->memory.region[i].base);
391fd93ab   Becky Bruce   Change lmb to use...
31
32
  		debug("		   .size   = 0x%llx
  ",
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
33
  		      (unsigned long long)lmb->memory.region[i].size);
4ed6552f7   Kumar Gala   [new uImage] Intr...
34
  	}
9b55a2536   Wolfgang Denk   Fix some more pri...
35
36
37
38
39
40
  	debug("
      reserved.cnt	   = 0x%lx
  ",
  		lmb->reserved.cnt);
  	debug("    reserved.size	   = 0x%llx
  ",
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
41
42
  		(unsigned long long)lmb->reserved.size);
  	for (i = 0; i < lmb->reserved.cnt; i++) {
9b55a2536   Wolfgang Denk   Fix some more pri...
43
44
  		debug("    reserved.reg[0x%lx].base = 0x%llx
  ", i,
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
45
  		      (unsigned long long)lmb->reserved.region[i].base);
391fd93ab   Becky Bruce   Change lmb to use...
46
47
  		debug("		     .size = 0x%llx
  ",
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
48
  		      (unsigned long long)lmb->reserved.region[i].size);
4ed6552f7   Kumar Gala   [new uImage] Intr...
49
50
51
  	}
  #endif /* DEBUG */
  }
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
52
53
  static long lmb_addrs_overlap(phys_addr_t base1, phys_size_t size1,
  			      phys_addr_t base2, phys_size_t size2)
4ed6552f7   Kumar Gala   [new uImage] Intr...
54
  {
d67f33cf4   Simon Goldschmidt   lmb: fix allocati...
55
56
57
58
  	const phys_addr_t base1_end = base1 + size1 - 1;
  	const phys_addr_t base2_end = base2 + size2 - 1;
  
  	return ((base1 <= base2_end) && (base2 <= base1_end));
4ed6552f7   Kumar Gala   [new uImage] Intr...
59
  }
391fd93ab   Becky Bruce   Change lmb to use...
60
  static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1,
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
61
  			       phys_addr_t base2, phys_size_t size2)
4ed6552f7   Kumar Gala   [new uImage] Intr...
62
63
64
65
66
67
68
69
  {
  	if (base2 == base1 + size1)
  		return 1;
  	else if (base1 == base2 + size2)
  		return -1;
  
  	return 0;
  }
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
70
71
  static long lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1,
  				 unsigned long r2)
4ed6552f7   Kumar Gala   [new uImage] Intr...
72
  {
391fd93ab   Becky Bruce   Change lmb to use...
73
74
75
76
  	phys_addr_t base1 = rgn->region[r1].base;
  	phys_size_t size1 = rgn->region[r1].size;
  	phys_addr_t base2 = rgn->region[r2].base;
  	phys_size_t size2 = rgn->region[r2].size;
4ed6552f7   Kumar Gala   [new uImage] Intr...
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
  
  	return lmb_addrs_adjacent(base1, size1, base2, size2);
  }
  
  static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
  {
  	unsigned long i;
  
  	for (i = r; i < rgn->cnt - 1; i++) {
  		rgn->region[i].base = rgn->region[i + 1].base;
  		rgn->region[i].size = rgn->region[i + 1].size;
  	}
  	rgn->cnt--;
  }
  
  /* Assumption: base addr of region 1 < base addr of region 2 */
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
93
94
  static void lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1,
  				 unsigned long r2)
4ed6552f7   Kumar Gala   [new uImage] Intr...
95
96
97
98
99
100
101
  {
  	rgn->region[r1].size += rgn->region[r2].size;
  	lmb_remove_region(rgn, r2);
  }
  
  void lmb_init(struct lmb *lmb)
  {
d67f33cf4   Simon Goldschmidt   lmb: fix allocati...
102
  	lmb->memory.cnt = 0;
4ed6552f7   Kumar Gala   [new uImage] Intr...
103
  	lmb->memory.size = 0;
d67f33cf4   Simon Goldschmidt   lmb: fix allocati...
104
  	lmb->reserved.cnt = 0;
4ed6552f7   Kumar Gala   [new uImage] Intr...
105
106
  	lmb->reserved.size = 0;
  }
9cc2323fe   Simon Goldschmidt   lmb: handle more ...
107
  static void lmb_reserve_common(struct lmb *lmb, void *fdt_blob)
aa3c609e2   Simon Goldschmidt   fs: prevent overw...
108
  {
aa3c609e2   Simon Goldschmidt   fs: prevent overw...
109
110
111
112
113
114
  	arch_lmb_reserve(lmb);
  	board_lmb_reserve(lmb);
  
  	if (IMAGE_ENABLE_OF_LIBFDT && fdt_blob)
  		boot_fdt_add_mem_rsv_regions(lmb, fdt_blob);
  }
9cc2323fe   Simon Goldschmidt   lmb: handle more ...
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
  /* Initialize the struct, add memory and call arch/board reserve functions */
  void lmb_init_and_reserve(struct lmb *lmb, bd_t *bd, void *fdt_blob)
  {
  #ifdef CONFIG_NR_DRAM_BANKS
  	int i;
  #endif
  
  	lmb_init(lmb);
  #ifdef CONFIG_NR_DRAM_BANKS
  	for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
  		if (bd->bi_dram[i].size) {
  			lmb_add(lmb, bd->bi_dram[i].start,
  				bd->bi_dram[i].size);
  		}
  	}
  #else
  	if (bd->bi_memsize)
  		lmb_add(lmb, bd->bi_memstart, bd->bi_memsize);
  #endif
  	lmb_reserve_common(lmb, fdt_blob);
  }
  
  /* Initialize the struct, add memory and call arch/board reserve functions */
  void lmb_init_and_reserve_range(struct lmb *lmb, phys_addr_t base,
  				phys_size_t size, void *fdt_blob)
  {
  	lmb_init(lmb);
  	lmb_add(lmb, base, size);
  	lmb_reserve_common(lmb, fdt_blob);
  }
4ed6552f7   Kumar Gala   [new uImage] Intr...
145
  /* This routine called with relocation disabled. */
391fd93ab   Becky Bruce   Change lmb to use...
146
  static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t size)
4ed6552f7   Kumar Gala   [new uImage] Intr...
147
148
149
  {
  	unsigned long coalesced = 0;
  	long adjacent, i;
d67f33cf4   Simon Goldschmidt   lmb: fix allocati...
150
  	if (rgn->cnt == 0) {
4ed6552f7   Kumar Gala   [new uImage] Intr...
151
152
  		rgn->region[0].base = base;
  		rgn->region[0].size = size;
d67f33cf4   Simon Goldschmidt   lmb: fix allocati...
153
  		rgn->cnt = 1;
4ed6552f7   Kumar Gala   [new uImage] Intr...
154
155
156
157
  		return 0;
  	}
  
  	/* First try and coalesce this LMB with another. */
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
158
  	for (i = 0; i < rgn->cnt; i++) {
391fd93ab   Becky Bruce   Change lmb to use...
159
160
  		phys_addr_t rgnbase = rgn->region[i].base;
  		phys_size_t rgnsize = rgn->region[i].size;
4ed6552f7   Kumar Gala   [new uImage] Intr...
161
162
163
164
  
  		if ((rgnbase == base) && (rgnsize == size))
  			/* Already have this region, so we're done */
  			return 0;
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
165
166
  		adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
  		if (adjacent > 0) {
4ed6552f7   Kumar Gala   [new uImage] Intr...
167
168
169
170
  			rgn->region[i].base -= size;
  			rgn->region[i].size += size;
  			coalesced++;
  			break;
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
171
  		} else if (adjacent < 0) {
4ed6552f7   Kumar Gala   [new uImage] Intr...
172
173
174
  			rgn->region[i].size += size;
  			coalesced++;
  			break;
0f7c51a67   Simon Goldschmidt   lib: lmb: reservi...
175
176
  		} else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
  			/* regions overlap */
2876d2923   Ye Li   MLK-21885 lmb: Ha...
177
  			return -2;
4ed6552f7   Kumar Gala   [new uImage] Intr...
178
179
  		}
  	}
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
180
181
  	if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i + 1)) {
  		lmb_coalesce_regions(rgn, i, i + 1);
4ed6552f7   Kumar Gala   [new uImage] Intr...
182
183
184
185
186
187
188
189
190
191
192
  		coalesced++;
  	}
  
  	if (coalesced)
  		return coalesced;
  	if (rgn->cnt >= MAX_LMB_REGIONS)
  		return -1;
  
  	/* Couldn't coalesce the LMB, so add it to the sorted table. */
  	for (i = rgn->cnt-1; i >= 0; i--) {
  		if (base < rgn->region[i].base) {
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
193
194
  			rgn->region[i + 1].base = rgn->region[i].base;
  			rgn->region[i + 1].size = rgn->region[i].size;
4ed6552f7   Kumar Gala   [new uImage] Intr...
195
  		} else {
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
196
197
  			rgn->region[i + 1].base = base;
  			rgn->region[i + 1].size = size;
4ed6552f7   Kumar Gala   [new uImage] Intr...
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
  			break;
  		}
  	}
  
  	if (base < rgn->region[0].base) {
  		rgn->region[0].base = base;
  		rgn->region[0].size = size;
  	}
  
  	rgn->cnt++;
  
  	return 0;
  }
  
  /* This routine may be called with relocation disabled. */
391fd93ab   Becky Bruce   Change lmb to use...
213
  long lmb_add(struct lmb *lmb, phys_addr_t base, phys_size_t size)
4ed6552f7   Kumar Gala   [new uImage] Intr...
214
215
216
217
218
  {
  	struct lmb_region *_rgn = &(lmb->memory);
  
  	return lmb_add_region(_rgn, base, size);
  }
98874ff32   Andy Fleming   Fix LMB type issues
219
  long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size)
63796c4e6   Andy Fleming   Add lmb_free
220
221
  {
  	struct lmb_region *rgn = &(lmb->reserved);
98874ff32   Andy Fleming   Fix LMB type issues
222
  	phys_addr_t rgnbegin, rgnend;
d67f33cf4   Simon Goldschmidt   lmb: fix allocati...
223
  	phys_addr_t end = base + size - 1;
63796c4e6   Andy Fleming   Add lmb_free
224
225
226
227
228
  	int i;
  
  	rgnbegin = rgnend = 0; /* supress gcc warnings */
  
  	/* Find the region where (base, size) belongs to */
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
229
  	for (i = 0; i < rgn->cnt; i++) {
63796c4e6   Andy Fleming   Add lmb_free
230
  		rgnbegin = rgn->region[i].base;
d67f33cf4   Simon Goldschmidt   lmb: fix allocati...
231
  		rgnend = rgnbegin + rgn->region[i].size - 1;
63796c4e6   Andy Fleming   Add lmb_free
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
  
  		if ((rgnbegin <= base) && (end <= rgnend))
  			break;
  	}
  
  	/* Didn't find the region */
  	if (i == rgn->cnt)
  		return -1;
  
  	/* Check to see if we are removing entire region */
  	if ((rgnbegin == base) && (rgnend == end)) {
  		lmb_remove_region(rgn, i);
  		return 0;
  	}
  
  	/* Check to see if region is matching at the front */
  	if (rgnbegin == base) {
d67f33cf4   Simon Goldschmidt   lmb: fix allocati...
249
  		rgn->region[i].base = end + 1;
63796c4e6   Andy Fleming   Add lmb_free
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
  		rgn->region[i].size -= size;
  		return 0;
  	}
  
  	/* Check to see if the region is matching at the end */
  	if (rgnend == end) {
  		rgn->region[i].size -= size;
  		return 0;
  	}
  
  	/*
  	 * We need to split the entry -  adjust the current one to the
  	 * beginging of the hole and add the region after hole.
  	 */
  	rgn->region[i].size = base - rgn->region[i].base;
d67f33cf4   Simon Goldschmidt   lmb: fix allocati...
265
  	return lmb_add_region(rgn, end + 1, rgnend - end);
63796c4e6   Andy Fleming   Add lmb_free
266
  }
09d565c4a   Ye Li   MLK-22035-1 lmb: ...
267
268
269
270
271
272
  long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size)
  {
  	struct lmb_region *_rgn = &(lmb->reserved);
  
  	return lmb_add_region(_rgn, base, size);
  }
750a6ff46   Jeroen Hofstee   lmb: make local f...
273
  static long lmb_overlaps_region(struct lmb_region *rgn, phys_addr_t base,
391fd93ab   Becky Bruce   Change lmb to use...
274
  				phys_size_t size)
4ed6552f7   Kumar Gala   [new uImage] Intr...
275
276
  {
  	unsigned long i;
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
277
  	for (i = 0; i < rgn->cnt; i++) {
391fd93ab   Becky Bruce   Change lmb to use...
278
279
  		phys_addr_t rgnbase = rgn->region[i].base;
  		phys_size_t rgnsize = rgn->region[i].size;
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
280
  		if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
4ed6552f7   Kumar Gala   [new uImage] Intr...
281
  			break;
4ed6552f7   Kumar Gala   [new uImage] Intr...
282
283
284
285
  	}
  
  	return (i < rgn->cnt) ? i : -1;
  }
09d565c4a   Ye Li   MLK-22035-1 lmb: ...
286
  long lmb_reserve_overlap(struct lmb *lmb, phys_addr_t base, phys_size_t size)
2876d2923   Ye Li   MLK-21885 lmb: Ha...
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
  {
  	struct lmb_region *_rgn = &(lmb->reserved);
  	long ret = lmb_add_region(_rgn, base, size);
  	long overlap_rgn;
  	phys_addr_t res_base;
  	phys_size_t res_size;
  
  	/* Handle the overlap */
  	if (ret == -2) {
  		overlap_rgn = lmb_overlaps_region(_rgn, base, size);
  		res_base = lmb->reserved.region[overlap_rgn].base;
  		res_size = lmb->reserved.region[overlap_rgn].size;
  
  		if ((base >= res_base) && ((base + size) <= (res_base + res_size))) {
  			/* new region is inside reserved region, so it is already reserved */
  			return 0;
  		} else {
  			if (base < res_base) {
  				ret = lmb_reserve(lmb, base, res_base - base);
  				if (ret < 0)
  					return ret;
  			}
  
  			if ((base + size) > (res_base + res_size)) {
  				ret = lmb_reserve(lmb, res_base + res_size, (base + size) - (res_base + res_size));
  				if (ret < 0)
  					return ret;
  			}
  		}
  	}
  
  	return ret;
  }
391fd93ab   Becky Bruce   Change lmb to use...
320
  phys_addr_t lmb_alloc(struct lmb *lmb, phys_size_t size, ulong align)
4ed6552f7   Kumar Gala   [new uImage] Intr...
321
322
323
  {
  	return lmb_alloc_base(lmb, size, align, LMB_ALLOC_ANYWHERE);
  }
391fd93ab   Becky Bruce   Change lmb to use...
324
  phys_addr_t lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr)
4ed6552f7   Kumar Gala   [new uImage] Intr...
325
  {
391fd93ab   Becky Bruce   Change lmb to use...
326
  	phys_addr_t alloc;
4ed6552f7   Kumar Gala   [new uImage] Intr...
327
328
329
330
331
332
  
  	alloc = __lmb_alloc_base(lmb, size, align, max_addr);
  
  	if (alloc == 0)
  		printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.
  ",
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
333
  		       (ulong)size, (ulong)max_addr);
4ed6552f7   Kumar Gala   [new uImage] Intr...
334
335
336
  
  	return alloc;
  }
391fd93ab   Becky Bruce   Change lmb to use...
337
  static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
4ed6552f7   Kumar Gala   [new uImage] Intr...
338
339
340
  {
  	return addr & ~(size - 1);
  }
391fd93ab   Becky Bruce   Change lmb to use...
341
  phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr)
4ed6552f7   Kumar Gala   [new uImage] Intr...
342
  {
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
343
  	long i, rgn;
391fd93ab   Becky Bruce   Change lmb to use...
344
  	phys_addr_t base = 0;
7570a9941   Andy Fleming   Fix an underflow ...
345
  	phys_addr_t res_base;
4ed6552f7   Kumar Gala   [new uImage] Intr...
346

e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
347
  	for (i = lmb->memory.cnt - 1; i >= 0; i--) {
391fd93ab   Becky Bruce   Change lmb to use...
348
349
  		phys_addr_t lmbbase = lmb->memory.region[i].base;
  		phys_size_t lmbsize = lmb->memory.region[i].size;
4ed6552f7   Kumar Gala   [new uImage] Intr...
350

7570a9941   Andy Fleming   Fix an underflow ...
351
352
  		if (lmbsize < size)
  			continue;
4ed6552f7   Kumar Gala   [new uImage] Intr...
353
354
355
  		if (max_addr == LMB_ALLOC_ANYWHERE)
  			base = lmb_align_down(lmbbase + lmbsize - size, align);
  		else if (lmbbase < max_addr) {
ad3fda521   Stephen Warren   lib: lmb: fix ove...
356
357
358
359
  			base = lmbbase + lmbsize;
  			if (base < lmbbase)
  				base = -1;
  			base = min(base, max_addr);
4ed6552f7   Kumar Gala   [new uImage] Intr...
360
361
362
  			base = lmb_align_down(base - size, align);
  		} else
  			continue;
7570a9941   Andy Fleming   Fix an underflow ...
363
  		while (base && lmbbase <= base) {
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
364
365
  			rgn = lmb_overlaps_region(&lmb->reserved, base, size);
  			if (rgn < 0) {
7570a9941   Andy Fleming   Fix an underflow ...
366
367
  				/* This area isn't reserved, take it */
  				if (lmb_add_region(&lmb->reserved, base,
0f7c51a67   Simon Goldschmidt   lib: lmb: reservi...
368
  						   size) < 0)
7570a9941   Andy Fleming   Fix an underflow ...
369
370
371
  					return 0;
  				return base;
  			}
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
372
  			res_base = lmb->reserved.region[rgn].base;
7570a9941   Andy Fleming   Fix an underflow ...
373
374
375
376
  			if (res_base < size)
  				break;
  			base = lmb_align_down(res_base - size, align);
  		}
4ed6552f7   Kumar Gala   [new uImage] Intr...
377
  	}
7570a9941   Andy Fleming   Fix an underflow ...
378
  	return 0;
4ed6552f7   Kumar Gala   [new uImage] Intr...
379
  }
4cc8af803   Simon Goldschmidt   lib: lmb: extend ...
380
381
382
383
384
385
  /*
   * Try to allocate a specific address range: must be in defined memory but not
   * reserved
   */
  phys_addr_t lmb_alloc_addr(struct lmb *lmb, phys_addr_t base, phys_size_t size)
  {
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
386
  	long rgn;
4cc8af803   Simon Goldschmidt   lib: lmb: extend ...
387
388
  
  	/* Check if the requested address is in one of the memory regions */
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
389
390
  	rgn = lmb_overlaps_region(&lmb->memory, base, size);
  	if (rgn >= 0) {
4cc8af803   Simon Goldschmidt   lib: lmb: extend ...
391
392
393
394
  		/*
  		 * Check if the requested end address is in the same memory
  		 * region we found.
  		 */
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
395
396
397
  		if (lmb_addrs_overlap(lmb->memory.region[rgn].base,
  				      lmb->memory.region[rgn].size,
  				      base + size - 1, 1)) {
4cc8af803   Simon Goldschmidt   lib: lmb: extend ...
398
399
400
401
402
403
404
405
406
  			/* ok, reserve the memory */
  			if (lmb_reserve(lmb, base, size) >= 0)
  				return base;
  		}
  	}
  	return 0;
  }
  
  /* Return number of bytes from a given address that are free */
65304aade   Simon Goldschmidt   lib: lmb: rename ...
407
  phys_size_t lmb_get_free_size(struct lmb *lmb, phys_addr_t addr)
4cc8af803   Simon Goldschmidt   lib: lmb: extend ...
408
409
  {
  	int i;
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
410
  	long rgn;
4cc8af803   Simon Goldschmidt   lib: lmb: extend ...
411
412
  
  	/* check if the requested address is in the memory regions */
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
413
414
  	rgn = lmb_overlaps_region(&lmb->memory, addr, 1);
  	if (rgn >= 0) {
4cc8af803   Simon Goldschmidt   lib: lmb: extend ...
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
  		for (i = 0; i < lmb->reserved.cnt; i++) {
  			if (addr < lmb->reserved.region[i].base) {
  				/* first reserved range > requested address */
  				return lmb->reserved.region[i].base - addr;
  			}
  			if (lmb->reserved.region[i].base +
  			    lmb->reserved.region[i].size > addr) {
  				/* requested addr is in this reserved range */
  				return 0;
  			}
  		}
  		/* if we come here: no reserved ranges above requested addr */
  		return lmb->memory.region[lmb->memory.cnt - 1].base +
  		       lmb->memory.region[lmb->memory.cnt - 1].size - addr;
  	}
  	return 0;
  }
391fd93ab   Becky Bruce   Change lmb to use...
432
  int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr)
4ed6552f7   Kumar Gala   [new uImage] Intr...
433
434
435
436
  {
  	int i;
  
  	for (i = 0; i < lmb->reserved.cnt; i++) {
391fd93ab   Becky Bruce   Change lmb to use...
437
  		phys_addr_t upper = lmb->reserved.region[i].base +
4ed6552f7   Kumar Gala   [new uImage] Intr...
438
439
440
441
442
443
  			lmb->reserved.region[i].size - 1;
  		if ((addr >= lmb->reserved.region[i].base) && (addr <= upper))
  			return 1;
  	}
  	return 0;
  }
a16028da6   Mike Frysinger   lmb: only force o...
444

2c34f3f54   Jeroen Hofstee   lib:lmb: use __weak
445
  __weak void board_lmb_reserve(struct lmb *lmb)
a16028da6   Mike Frysinger   lmb: only force o...
446
447
448
  {
  	/* please define platform specific board_lmb_reserve() */
  }
a16028da6   Mike Frysinger   lmb: only force o...
449

2c34f3f54   Jeroen Hofstee   lib:lmb: use __weak
450
  __weak void arch_lmb_reserve(struct lmb *lmb)
a16028da6   Mike Frysinger   lmb: only force o...
451
452
453
  {
  	/* please define platform specific arch_lmb_reserve() */
  }