Blame view

lib/lmb.c 11.4 KB
83d290c56   Tom Rini   SPDX: Convert all...
1
  // SPDX-License-Identifier: GPL-2.0+
4ed6552f7   Kumar Gala   [new uImage] Intr...
2
3
4
5
6
  /*
   * Procedures for maintaining information about logical memory blocks.
   *
   * Peter Bergner, IBM Corp.	June 2001.
   * Copyright (C) 2001 Peter Bergner.
4ed6552f7   Kumar Gala   [new uImage] Intr...
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
   */
  
  #include <common.h>
  #include <lmb.h>
  
  #define LMB_ALLOC_ANYWHERE	0
  
  void lmb_dump_all(struct lmb *lmb)
  {
  #ifdef DEBUG
  	unsigned long i;
  
  	debug("lmb_dump_all:
  ");
  	debug("    memory.cnt		   = 0x%lx
  ", lmb->memory.cnt);
391fd93ab   Becky Bruce   Change lmb to use...
23
24
25
  	debug("    memory.size		   = 0x%llx
  ",
  	      (unsigned long long)lmb->memory.size);
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
26
  	for (i = 0; i < lmb->memory.cnt; i++) {
9b55a2536   Wolfgang Denk   Fix some more pri...
27
28
  		debug("    memory.reg[0x%lx].base   = 0x%llx
  ", i,
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
29
  		      (unsigned long long)lmb->memory.region[i].base);
391fd93ab   Becky Bruce   Change lmb to use...
30
31
  		debug("		   .size   = 0x%llx
  ",
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
32
  		      (unsigned long long)lmb->memory.region[i].size);
4ed6552f7   Kumar Gala   [new uImage] Intr...
33
  	}
9b55a2536   Wolfgang Denk   Fix some more pri...
34
35
36
37
38
39
  	debug("
      reserved.cnt	   = 0x%lx
  ",
  		lmb->reserved.cnt);
  	debug("    reserved.size	   = 0x%llx
  ",
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
40
41
  		(unsigned long long)lmb->reserved.size);
  	for (i = 0; i < lmb->reserved.cnt; i++) {
9b55a2536   Wolfgang Denk   Fix some more pri...
42
43
  		debug("    reserved.reg[0x%lx].base = 0x%llx
  ", i,
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
44
  		      (unsigned long long)lmb->reserved.region[i].base);
391fd93ab   Becky Bruce   Change lmb to use...
45
46
  		debug("		     .size = 0x%llx
  ",
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
47
  		      (unsigned long long)lmb->reserved.region[i].size);
4ed6552f7   Kumar Gala   [new uImage] Intr...
48
49
50
  	}
  #endif /* DEBUG */
  }
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
51
52
  static long lmb_addrs_overlap(phys_addr_t base1, phys_size_t size1,
  			      phys_addr_t base2, phys_size_t size2)
4ed6552f7   Kumar Gala   [new uImage] Intr...
53
  {
d67f33cf4   Simon Goldschmidt   lmb: fix allocati...
54
55
56
57
  	const phys_addr_t base1_end = base1 + size1 - 1;
  	const phys_addr_t base2_end = base2 + size2 - 1;
  
  	return ((base1 <= base2_end) && (base2 <= base1_end));
4ed6552f7   Kumar Gala   [new uImage] Intr...
58
  }
391fd93ab   Becky Bruce   Change lmb to use...
59
  static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1,
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
60
  			       phys_addr_t base2, phys_size_t size2)
4ed6552f7   Kumar Gala   [new uImage] Intr...
61
62
63
64
65
66
67
68
  {
  	if (base2 == base1 + size1)
  		return 1;
  	else if (base1 == base2 + size2)
  		return -1;
  
  	return 0;
  }
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
69
70
  static long lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1,
  				 unsigned long r2)
4ed6552f7   Kumar Gala   [new uImage] Intr...
71
  {
391fd93ab   Becky Bruce   Change lmb to use...
72
73
74
75
  	phys_addr_t base1 = rgn->region[r1].base;
  	phys_size_t size1 = rgn->region[r1].size;
  	phys_addr_t base2 = rgn->region[r2].base;
  	phys_size_t size2 = rgn->region[r2].size;
4ed6552f7   Kumar Gala   [new uImage] Intr...
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
  
  	return lmb_addrs_adjacent(base1, size1, base2, size2);
  }
  
  static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
  {
  	unsigned long i;
  
  	for (i = r; i < rgn->cnt - 1; i++) {
  		rgn->region[i].base = rgn->region[i + 1].base;
  		rgn->region[i].size = rgn->region[i + 1].size;
  	}
  	rgn->cnt--;
  }
  
  /* Assumption: base addr of region 1 < base addr of region 2 */
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
92
93
  static void lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1,
  				 unsigned long r2)
4ed6552f7   Kumar Gala   [new uImage] Intr...
94
95
96
97
98
99
100
  {
  	rgn->region[r1].size += rgn->region[r2].size;
  	lmb_remove_region(rgn, r2);
  }
  
  void lmb_init(struct lmb *lmb)
  {
d67f33cf4   Simon Goldschmidt   lmb: fix allocati...
101
  	lmb->memory.cnt = 0;
4ed6552f7   Kumar Gala   [new uImage] Intr...
102
  	lmb->memory.size = 0;
d67f33cf4   Simon Goldschmidt   lmb: fix allocati...
103
  	lmb->reserved.cnt = 0;
4ed6552f7   Kumar Gala   [new uImage] Intr...
104
105
  	lmb->reserved.size = 0;
  }
9cc2323fe   Simon Goldschmidt   lmb: handle more ...
106
  static void lmb_reserve_common(struct lmb *lmb, void *fdt_blob)
aa3c609e2   Simon Goldschmidt   fs: prevent overw...
107
  {
aa3c609e2   Simon Goldschmidt   fs: prevent overw...
108
109
110
111
112
113
  	arch_lmb_reserve(lmb);
  	board_lmb_reserve(lmb);
  
  	if (IMAGE_ENABLE_OF_LIBFDT && fdt_blob)
  		boot_fdt_add_mem_rsv_regions(lmb, fdt_blob);
  }
9cc2323fe   Simon Goldschmidt   lmb: handle more ...
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
  /* Initialize the struct, add memory and call arch/board reserve functions */
  void lmb_init_and_reserve(struct lmb *lmb, bd_t *bd, void *fdt_blob)
  {
  #ifdef CONFIG_NR_DRAM_BANKS
  	int i;
  #endif
  
  	lmb_init(lmb);
  #ifdef CONFIG_NR_DRAM_BANKS
  	for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
  		if (bd->bi_dram[i].size) {
  			lmb_add(lmb, bd->bi_dram[i].start,
  				bd->bi_dram[i].size);
  		}
  	}
  #else
  	if (bd->bi_memsize)
  		lmb_add(lmb, bd->bi_memstart, bd->bi_memsize);
  #endif
  	lmb_reserve_common(lmb, fdt_blob);
  }
  
  /* Initialize the struct, add memory and call arch/board reserve functions */
  void lmb_init_and_reserve_range(struct lmb *lmb, phys_addr_t base,
  				phys_size_t size, void *fdt_blob)
  {
  	lmb_init(lmb);
  	lmb_add(lmb, base, size);
  	lmb_reserve_common(lmb, fdt_blob);
  }
4ed6552f7   Kumar Gala   [new uImage] Intr...
144
  /* This routine called with relocation disabled. */
391fd93ab   Becky Bruce   Change lmb to use...
145
  static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t size)
4ed6552f7   Kumar Gala   [new uImage] Intr...
146
147
148
  {
  	unsigned long coalesced = 0;
  	long adjacent, i;
d67f33cf4   Simon Goldschmidt   lmb: fix allocati...
149
  	if (rgn->cnt == 0) {
4ed6552f7   Kumar Gala   [new uImage] Intr...
150
151
  		rgn->region[0].base = base;
  		rgn->region[0].size = size;
d67f33cf4   Simon Goldschmidt   lmb: fix allocati...
152
  		rgn->cnt = 1;
4ed6552f7   Kumar Gala   [new uImage] Intr...
153
154
155
156
  		return 0;
  	}
  
  	/* First try and coalesce this LMB with another. */
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
157
  	for (i = 0; i < rgn->cnt; i++) {
391fd93ab   Becky Bruce   Change lmb to use...
158
159
  		phys_addr_t rgnbase = rgn->region[i].base;
  		phys_size_t rgnsize = rgn->region[i].size;
4ed6552f7   Kumar Gala   [new uImage] Intr...
160
161
162
163
  
  		if ((rgnbase == base) && (rgnsize == size))
  			/* Already have this region, so we're done */
  			return 0;
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
164
165
  		adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
  		if (adjacent > 0) {
4ed6552f7   Kumar Gala   [new uImage] Intr...
166
167
168
169
  			rgn->region[i].base -= size;
  			rgn->region[i].size += size;
  			coalesced++;
  			break;
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
170
  		} else if (adjacent < 0) {
4ed6552f7   Kumar Gala   [new uImage] Intr...
171
172
173
  			rgn->region[i].size += size;
  			coalesced++;
  			break;
0f7c51a67   Simon Goldschmidt   lib: lmb: reservi...
174
175
  		} else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
  			/* regions overlap */
37d86c688   Ye Li   MLK-21885 lmb: Ha...
176
  			return -2;
4ed6552f7   Kumar Gala   [new uImage] Intr...
177
178
  		}
  	}
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
179
180
  	if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i + 1)) {
  		lmb_coalesce_regions(rgn, i, i + 1);
4ed6552f7   Kumar Gala   [new uImage] Intr...
181
182
183
184
185
186
187
188
189
190
191
  		coalesced++;
  	}
  
  	if (coalesced)
  		return coalesced;
  	if (rgn->cnt >= MAX_LMB_REGIONS)
  		return -1;
  
  	/* Couldn't coalesce the LMB, so add it to the sorted table. */
  	for (i = rgn->cnt-1; i >= 0; i--) {
  		if (base < rgn->region[i].base) {
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
192
193
  			rgn->region[i + 1].base = rgn->region[i].base;
  			rgn->region[i + 1].size = rgn->region[i].size;
4ed6552f7   Kumar Gala   [new uImage] Intr...
194
  		} else {
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
195
196
  			rgn->region[i + 1].base = base;
  			rgn->region[i + 1].size = size;
4ed6552f7   Kumar Gala   [new uImage] Intr...
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
  			break;
  		}
  	}
  
  	if (base < rgn->region[0].base) {
  		rgn->region[0].base = base;
  		rgn->region[0].size = size;
  	}
  
  	rgn->cnt++;
  
  	return 0;
  }
  
  /* This routine may be called with relocation disabled. */
391fd93ab   Becky Bruce   Change lmb to use...
212
  long lmb_add(struct lmb *lmb, phys_addr_t base, phys_size_t size)
4ed6552f7   Kumar Gala   [new uImage] Intr...
213
214
215
216
217
  {
  	struct lmb_region *_rgn = &(lmb->memory);
  
  	return lmb_add_region(_rgn, base, size);
  }
98874ff32   Andy Fleming   Fix LMB type issues
218
  long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size)
63796c4e6   Andy Fleming   Add lmb_free
219
220
  {
  	struct lmb_region *rgn = &(lmb->reserved);
98874ff32   Andy Fleming   Fix LMB type issues
221
  	phys_addr_t rgnbegin, rgnend;
d67f33cf4   Simon Goldschmidt   lmb: fix allocati...
222
  	phys_addr_t end = base + size - 1;
63796c4e6   Andy Fleming   Add lmb_free
223
224
225
226
227
  	int i;
  
  	rgnbegin = rgnend = 0; /* supress gcc warnings */
  
  	/* Find the region where (base, size) belongs to */
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
228
  	for (i = 0; i < rgn->cnt; i++) {
63796c4e6   Andy Fleming   Add lmb_free
229
  		rgnbegin = rgn->region[i].base;
d67f33cf4   Simon Goldschmidt   lmb: fix allocati...
230
  		rgnend = rgnbegin + rgn->region[i].size - 1;
63796c4e6   Andy Fleming   Add lmb_free
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
  
  		if ((rgnbegin <= base) && (end <= rgnend))
  			break;
  	}
  
  	/* Didn't find the region */
  	if (i == rgn->cnt)
  		return -1;
  
  	/* Check to see if we are removing entire region */
  	if ((rgnbegin == base) && (rgnend == end)) {
  		lmb_remove_region(rgn, i);
  		return 0;
  	}
  
  	/* Check to see if region is matching at the front */
  	if (rgnbegin == base) {
d67f33cf4   Simon Goldschmidt   lmb: fix allocati...
248
  		rgn->region[i].base = end + 1;
63796c4e6   Andy Fleming   Add lmb_free
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
  		rgn->region[i].size -= size;
  		return 0;
  	}
  
  	/* Check to see if the region is matching at the end */
  	if (rgnend == end) {
  		rgn->region[i].size -= size;
  		return 0;
  	}
  
  	/*
  	 * We need to split the entry -  adjust the current one to the
  	 * beginging of the hole and add the region after hole.
  	 */
  	rgn->region[i].size = base - rgn->region[i].base;
d67f33cf4   Simon Goldschmidt   lmb: fix allocati...
264
  	return lmb_add_region(rgn, end + 1, rgnend - end);
63796c4e6   Andy Fleming   Add lmb_free
265
  }
2109dc2a4   Ye Li   MLK-22035-1 lmb: ...
266
267
268
269
270
271
  long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size)
  {
  	struct lmb_region *_rgn = &(lmb->reserved);
  
  	return lmb_add_region(_rgn, base, size);
  }
750a6ff46   Jeroen Hofstee   lmb: make local f...
272
  static long lmb_overlaps_region(struct lmb_region *rgn, phys_addr_t base,
391fd93ab   Becky Bruce   Change lmb to use...
273
  				phys_size_t size)
4ed6552f7   Kumar Gala   [new uImage] Intr...
274
275
  {
  	unsigned long i;
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
276
  	for (i = 0; i < rgn->cnt; i++) {
391fd93ab   Becky Bruce   Change lmb to use...
277
278
  		phys_addr_t rgnbase = rgn->region[i].base;
  		phys_size_t rgnsize = rgn->region[i].size;
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
279
  		if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
4ed6552f7   Kumar Gala   [new uImage] Intr...
280
  			break;
4ed6552f7   Kumar Gala   [new uImage] Intr...
281
282
283
284
  	}
  
  	return (i < rgn->cnt) ? i : -1;
  }
2109dc2a4   Ye Li   MLK-22035-1 lmb: ...
285
  long lmb_reserve_overlap(struct lmb *lmb, phys_addr_t base, phys_size_t size)
37d86c688   Ye Li   MLK-21885 lmb: Ha...
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
  {
  	struct lmb_region *_rgn = &(lmb->reserved);
  	long ret = lmb_add_region(_rgn, base, size);
  	long overlap_rgn;
  	phys_addr_t res_base;
  	phys_size_t res_size;
  
  	/* Handle the overlap */
  	if (ret == -2) {
  		overlap_rgn = lmb_overlaps_region(_rgn, base, size);
  		res_base = lmb->reserved.region[overlap_rgn].base;
  		res_size = lmb->reserved.region[overlap_rgn].size;
  
  		if ((base >= res_base) && ((base + size) <= (res_base + res_size))) {
  			/* new region is inside reserved region, so it is already reserved */
  			return 0;
  		} else {
  			if (base < res_base) {
  				ret = lmb_reserve(lmb, base, res_base - base);
  				if (ret < 0)
  					return ret;
  			}
  
  			if ((base + size) > (res_base + res_size)) {
  				ret = lmb_reserve(lmb, res_base + res_size, (base + size) - (res_base + res_size));
  				if (ret < 0)
  					return ret;
  			}
  		}
  	}
  
  	return ret;
  }
391fd93ab   Becky Bruce   Change lmb to use...
319
  phys_addr_t lmb_alloc(struct lmb *lmb, phys_size_t size, ulong align)
4ed6552f7   Kumar Gala   [new uImage] Intr...
320
321
322
  {
  	return lmb_alloc_base(lmb, size, align, LMB_ALLOC_ANYWHERE);
  }
391fd93ab   Becky Bruce   Change lmb to use...
323
  phys_addr_t lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr)
4ed6552f7   Kumar Gala   [new uImage] Intr...
324
  {
391fd93ab   Becky Bruce   Change lmb to use...
325
  	phys_addr_t alloc;
4ed6552f7   Kumar Gala   [new uImage] Intr...
326
327
328
329
330
331
  
  	alloc = __lmb_alloc_base(lmb, size, align, max_addr);
  
  	if (alloc == 0)
  		printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.
  ",
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
332
  		       (ulong)size, (ulong)max_addr);
4ed6552f7   Kumar Gala   [new uImage] Intr...
333
334
335
  
  	return alloc;
  }
391fd93ab   Becky Bruce   Change lmb to use...
336
  static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
4ed6552f7   Kumar Gala   [new uImage] Intr...
337
338
339
  {
  	return addr & ~(size - 1);
  }
391fd93ab   Becky Bruce   Change lmb to use...
340
  phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr)
4ed6552f7   Kumar Gala   [new uImage] Intr...
341
  {
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
342
  	long i, rgn;
391fd93ab   Becky Bruce   Change lmb to use...
343
  	phys_addr_t base = 0;
7570a9941   Andy Fleming   Fix an underflow ...
344
  	phys_addr_t res_base;
4ed6552f7   Kumar Gala   [new uImage] Intr...
345

e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
346
  	for (i = lmb->memory.cnt - 1; i >= 0; i--) {
391fd93ab   Becky Bruce   Change lmb to use...
347
348
  		phys_addr_t lmbbase = lmb->memory.region[i].base;
  		phys_size_t lmbsize = lmb->memory.region[i].size;
4ed6552f7   Kumar Gala   [new uImage] Intr...
349

7570a9941   Andy Fleming   Fix an underflow ...
350
351
  		if (lmbsize < size)
  			continue;
4ed6552f7   Kumar Gala   [new uImage] Intr...
352
353
354
  		if (max_addr == LMB_ALLOC_ANYWHERE)
  			base = lmb_align_down(lmbbase + lmbsize - size, align);
  		else if (lmbbase < max_addr) {
ad3fda521   Stephen Warren   lib: lmb: fix ove...
355
356
357
358
  			base = lmbbase + lmbsize;
  			if (base < lmbbase)
  				base = -1;
  			base = min(base, max_addr);
4ed6552f7   Kumar Gala   [new uImage] Intr...
359
360
361
  			base = lmb_align_down(base - size, align);
  		} else
  			continue;
7570a9941   Andy Fleming   Fix an underflow ...
362
  		while (base && lmbbase <= base) {
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
363
364
  			rgn = lmb_overlaps_region(&lmb->reserved, base, size);
  			if (rgn < 0) {
7570a9941   Andy Fleming   Fix an underflow ...
365
366
  				/* This area isn't reserved, take it */
  				if (lmb_add_region(&lmb->reserved, base,
0f7c51a67   Simon Goldschmidt   lib: lmb: reservi...
367
  						   size) < 0)
7570a9941   Andy Fleming   Fix an underflow ...
368
369
370
  					return 0;
  				return base;
  			}
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
371
  			res_base = lmb->reserved.region[rgn].base;
7570a9941   Andy Fleming   Fix an underflow ...
372
373
374
375
  			if (res_base < size)
  				break;
  			base = lmb_align_down(res_base - size, align);
  		}
4ed6552f7   Kumar Gala   [new uImage] Intr...
376
  	}
7570a9941   Andy Fleming   Fix an underflow ...
377
  	return 0;
4ed6552f7   Kumar Gala   [new uImage] Intr...
378
  }
4cc8af803   Simon Goldschmidt   lib: lmb: extend ...
379
380
381
382
383
384
  /*
   * Try to allocate a specific address range: must be in defined memory but not
   * reserved
   */
  phys_addr_t lmb_alloc_addr(struct lmb *lmb, phys_addr_t base, phys_size_t size)
  {
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
385
  	long rgn;
4cc8af803   Simon Goldschmidt   lib: lmb: extend ...
386
387
  
  	/* Check if the requested address is in one of the memory regions */
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
388
389
  	rgn = lmb_overlaps_region(&lmb->memory, base, size);
  	if (rgn >= 0) {
4cc8af803   Simon Goldschmidt   lib: lmb: extend ...
390
391
392
393
  		/*
  		 * Check if the requested end address is in the same memory
  		 * region we found.
  		 */
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
394
395
396
  		if (lmb_addrs_overlap(lmb->memory.region[rgn].base,
  				      lmb->memory.region[rgn].size,
  				      base + size - 1, 1)) {
4cc8af803   Simon Goldschmidt   lib: lmb: extend ...
397
398
399
400
401
402
403
404
405
  			/* ok, reserve the memory */
  			if (lmb_reserve(lmb, base, size) >= 0)
  				return base;
  		}
  	}
  	return 0;
  }
  
  /* Return number of bytes from a given address that are free */
65304aade   Simon Goldschmidt   lib: lmb: rename ...
406
  phys_size_t lmb_get_free_size(struct lmb *lmb, phys_addr_t addr)
4cc8af803   Simon Goldschmidt   lib: lmb: extend ...
407
408
  {
  	int i;
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
409
  	long rgn;
4cc8af803   Simon Goldschmidt   lib: lmb: extend ...
410
411
  
  	/* check if the requested address is in the memory regions */
e35d2a755   Simon Goldschmidt   lib: lmb: cleanup...
412
413
  	rgn = lmb_overlaps_region(&lmb->memory, addr, 1);
  	if (rgn >= 0) {
4cc8af803   Simon Goldschmidt   lib: lmb: extend ...
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
  		for (i = 0; i < lmb->reserved.cnt; i++) {
  			if (addr < lmb->reserved.region[i].base) {
  				/* first reserved range > requested address */
  				return lmb->reserved.region[i].base - addr;
  			}
  			if (lmb->reserved.region[i].base +
  			    lmb->reserved.region[i].size > addr) {
  				/* requested addr is in this reserved range */
  				return 0;
  			}
  		}
  		/* if we come here: no reserved ranges above requested addr */
  		return lmb->memory.region[lmb->memory.cnt - 1].base +
  		       lmb->memory.region[lmb->memory.cnt - 1].size - addr;
  	}
  	return 0;
  }
391fd93ab   Becky Bruce   Change lmb to use...
431
  int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr)
4ed6552f7   Kumar Gala   [new uImage] Intr...
432
433
434
435
  {
  	int i;
  
  	for (i = 0; i < lmb->reserved.cnt; i++) {
391fd93ab   Becky Bruce   Change lmb to use...
436
  		phys_addr_t upper = lmb->reserved.region[i].base +
4ed6552f7   Kumar Gala   [new uImage] Intr...
437
438
439
440
441
442
  			lmb->reserved.region[i].size - 1;
  		if ((addr >= lmb->reserved.region[i].base) && (addr <= upper))
  			return 1;
  	}
  	return 0;
  }
a16028da6   Mike Frysinger   lmb: only force o...
443

2c34f3f54   Jeroen Hofstee   lib:lmb: use __weak
444
  __weak void board_lmb_reserve(struct lmb *lmb)
a16028da6   Mike Frysinger   lmb: only force o...
445
446
447
  {
  	/* please define platform specific board_lmb_reserve() */
  }
a16028da6   Mike Frysinger   lmb: only force o...
448

2c34f3f54   Jeroen Hofstee   lib:lmb: use __weak
449
  __weak void arch_lmb_reserve(struct lmb *lmb)
a16028da6   Mike Frysinger   lmb: only force o...
450
451
452
  {
  	/* please define platform specific arch_lmb_reserve() */
  }