Blame view

lib/lmb.c 8.37 KB
4ed6552f7   Kumar Gala   [new uImage] Intr...
1
2
3
4
5
6
  /*
   * Procedures for maintaining information about logical memory blocks.
   *
   * Peter Bergner, IBM Corp.	June 2001.
   * Copyright (C) 2001 Peter Bergner.
   *
1a4596601   Wolfgang Denk   Add GPL-2.0+ SPDX...
7
   * SPDX-License-Identifier:	GPL-2.0+
4ed6552f7   Kumar Gala   [new uImage] Intr...
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
   */
  
  #include <common.h>
  #include <lmb.h>
  
  #define LMB_ALLOC_ANYWHERE	0
  
  void lmb_dump_all(struct lmb *lmb)
  {
  #ifdef DEBUG
  	unsigned long i;
  
  	debug("lmb_dump_all:
  ");
  	debug("    memory.cnt		   = 0x%lx
  ", lmb->memory.cnt);
391fd93ab   Becky Bruce   Change lmb to use...
24
25
26
  	debug("    memory.size		   = 0x%llx
  ",
  	      (unsigned long long)lmb->memory.size);
4ed6552f7   Kumar Gala   [new uImage] Intr...
27
  	for (i=0; i < lmb->memory.cnt ;i++) {
9b55a2536   Wolfgang Denk   Fix some more pri...
28
29
30
  		debug("    memory.reg[0x%lx].base   = 0x%llx
  ", i,
  			(long long unsigned)lmb->memory.region[i].base);
391fd93ab   Becky Bruce   Change lmb to use...
31
32
  		debug("		   .size   = 0x%llx
  ",
9b55a2536   Wolfgang Denk   Fix some more pri...
33
  			(long long unsigned)lmb->memory.region[i].size);
4ed6552f7   Kumar Gala   [new uImage] Intr...
34
  	}
9b55a2536   Wolfgang Denk   Fix some more pri...
35
36
37
38
39
40
41
  	debug("
      reserved.cnt	   = 0x%lx
  ",
  		lmb->reserved.cnt);
  	debug("    reserved.size	   = 0x%llx
  ",
  		(long long unsigned)lmb->reserved.size);
4ed6552f7   Kumar Gala   [new uImage] Intr...
42
  	for (i=0; i < lmb->reserved.cnt ;i++) {
9b55a2536   Wolfgang Denk   Fix some more pri...
43
44
45
  		debug("    reserved.reg[0x%lx].base = 0x%llx
  ", i,
  			(long long unsigned)lmb->reserved.region[i].base);
391fd93ab   Becky Bruce   Change lmb to use...
46
47
  		debug("		     .size = 0x%llx
  ",
9b55a2536   Wolfgang Denk   Fix some more pri...
48
  			(long long unsigned)lmb->reserved.region[i].size);
4ed6552f7   Kumar Gala   [new uImage] Intr...
49
50
51
  	}
  #endif /* DEBUG */
  }
391fd93ab   Becky Bruce   Change lmb to use...
52
53
  static long lmb_addrs_overlap(phys_addr_t base1,
  		phys_size_t size1, phys_addr_t base2, phys_size_t size2)
4ed6552f7   Kumar Gala   [new uImage] Intr...
54
55
56
  {
  	return ((base1 < (base2+size2)) && (base2 < (base1+size1)));
  }
391fd93ab   Becky Bruce   Change lmb to use...
57
58
  static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1,
  		phys_addr_t base2, phys_size_t size2)
4ed6552f7   Kumar Gala   [new uImage] Intr...
59
60
61
62
63
64
65
66
67
68
69
70
  {
  	if (base2 == base1 + size1)
  		return 1;
  	else if (base1 == base2 + size2)
  		return -1;
  
  	return 0;
  }
  
  static long lmb_regions_adjacent(struct lmb_region *rgn,
  		unsigned long r1, unsigned long r2)
  {
391fd93ab   Becky Bruce   Change lmb to use...
71
72
73
74
  	phys_addr_t base1 = rgn->region[r1].base;
  	phys_size_t size1 = rgn->region[r1].size;
  	phys_addr_t base2 = rgn->region[r2].base;
  	phys_size_t size2 = rgn->region[r2].size;
4ed6552f7   Kumar Gala   [new uImage] Intr...
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
  
  	return lmb_addrs_adjacent(base1, size1, base2, size2);
  }
  
  static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
  {
  	unsigned long i;
  
  	for (i = r; i < rgn->cnt - 1; i++) {
  		rgn->region[i].base = rgn->region[i + 1].base;
  		rgn->region[i].size = rgn->region[i + 1].size;
  	}
  	rgn->cnt--;
  }
  
  /* Assumption: base addr of region 1 < base addr of region 2 */
  static void lmb_coalesce_regions(struct lmb_region *rgn,
  		unsigned long r1, unsigned long r2)
  {
  	rgn->region[r1].size += rgn->region[r2].size;
  	lmb_remove_region(rgn, r2);
  }
  
  void lmb_init(struct lmb *lmb)
  {
  	/* Create a dummy zero size LMB which will get coalesced away later.
  	 * This simplifies the lmb_add() code below...
  	 */
  	lmb->memory.region[0].base = 0;
  	lmb->memory.region[0].size = 0;
  	lmb->memory.cnt = 1;
  	lmb->memory.size = 0;
  
  	/* Ditto. */
  	lmb->reserved.region[0].base = 0;
  	lmb->reserved.region[0].size = 0;
  	lmb->reserved.cnt = 1;
  	lmb->reserved.size = 0;
  }
  
  /* This routine called with relocation disabled. */
391fd93ab   Becky Bruce   Change lmb to use...
116
  static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t size)
4ed6552f7   Kumar Gala   [new uImage] Intr...
117
118
119
120
121
122
123
124
125
126
127
128
  {
  	unsigned long coalesced = 0;
  	long adjacent, i;
  
  	if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) {
  		rgn->region[0].base = base;
  		rgn->region[0].size = size;
  		return 0;
  	}
  
  	/* First try and coalesce this LMB with another. */
  	for (i=0; i < rgn->cnt; i++) {
391fd93ab   Becky Bruce   Change lmb to use...
129
130
  		phys_addr_t rgnbase = rgn->region[i].base;
  		phys_size_t rgnsize = rgn->region[i].size;
4ed6552f7   Kumar Gala   [new uImage] Intr...
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
  
  		if ((rgnbase == base) && (rgnsize == size))
  			/* Already have this region, so we're done */
  			return 0;
  
  		adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize);
  		if ( adjacent > 0 ) {
  			rgn->region[i].base -= size;
  			rgn->region[i].size += size;
  			coalesced++;
  			break;
  		}
  		else if ( adjacent < 0 ) {
  			rgn->region[i].size += size;
  			coalesced++;
  			break;
  		}
  	}
  
  	if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) {
  		lmb_coalesce_regions(rgn, i, i+1);
  		coalesced++;
  	}
  
  	if (coalesced)
  		return coalesced;
  	if (rgn->cnt >= MAX_LMB_REGIONS)
  		return -1;
  
  	/* Couldn't coalesce the LMB, so add it to the sorted table. */
  	for (i = rgn->cnt-1; i >= 0; i--) {
  		if (base < rgn->region[i].base) {
  			rgn->region[i+1].base = rgn->region[i].base;
  			rgn->region[i+1].size = rgn->region[i].size;
  		} else {
  			rgn->region[i+1].base = base;
  			rgn->region[i+1].size = size;
  			break;
  		}
  	}
  
  	if (base < rgn->region[0].base) {
  		rgn->region[0].base = base;
  		rgn->region[0].size = size;
  	}
  
  	rgn->cnt++;
  
  	return 0;
  }
  
  /* This routine may be called with relocation disabled. */
391fd93ab   Becky Bruce   Change lmb to use...
183
  long lmb_add(struct lmb *lmb, phys_addr_t base, phys_size_t size)
4ed6552f7   Kumar Gala   [new uImage] Intr...
184
185
186
187
188
  {
  	struct lmb_region *_rgn = &(lmb->memory);
  
  	return lmb_add_region(_rgn, base, size);
  }
98874ff32   Andy Fleming   Fix LMB type issues
189
  long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size)
63796c4e6   Andy Fleming   Add lmb_free
190
191
  {
  	struct lmb_region *rgn = &(lmb->reserved);
98874ff32   Andy Fleming   Fix LMB type issues
192
193
  	phys_addr_t rgnbegin, rgnend;
  	phys_addr_t end = base + size;
63796c4e6   Andy Fleming   Add lmb_free
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
  	int i;
  
  	rgnbegin = rgnend = 0; /* supress gcc warnings */
  
  	/* Find the region where (base, size) belongs to */
  	for (i=0; i < rgn->cnt; i++) {
  		rgnbegin = rgn->region[i].base;
  		rgnend = rgnbegin + rgn->region[i].size;
  
  		if ((rgnbegin <= base) && (end <= rgnend))
  			break;
  	}
  
  	/* Didn't find the region */
  	if (i == rgn->cnt)
  		return -1;
  
  	/* Check to see if we are removing entire region */
  	if ((rgnbegin == base) && (rgnend == end)) {
  		lmb_remove_region(rgn, i);
  		return 0;
  	}
  
  	/* Check to see if region is matching at the front */
  	if (rgnbegin == base) {
  		rgn->region[i].base = end;
  		rgn->region[i].size -= size;
  		return 0;
  	}
  
  	/* Check to see if the region is matching at the end */
  	if (rgnend == end) {
  		rgn->region[i].size -= size;
  		return 0;
  	}
  
  	/*
  	 * We need to split the entry -  adjust the current one to the
  	 * beginging of the hole and add the region after hole.
  	 */
  	rgn->region[i].size = base - rgn->region[i].base;
  	return lmb_add_region(rgn, end, rgnend - end);
  }
391fd93ab   Becky Bruce   Change lmb to use...
237
  long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size)
4ed6552f7   Kumar Gala   [new uImage] Intr...
238
239
240
241
242
  {
  	struct lmb_region *_rgn = &(lmb->reserved);
  
  	return lmb_add_region(_rgn, base, size);
  }
391fd93ab   Becky Bruce   Change lmb to use...
243
244
  long lmb_overlaps_region(struct lmb_region *rgn, phys_addr_t base,
  				phys_size_t size)
4ed6552f7   Kumar Gala   [new uImage] Intr...
245
246
247
248
  {
  	unsigned long i;
  
  	for (i=0; i < rgn->cnt; i++) {
391fd93ab   Becky Bruce   Change lmb to use...
249
250
  		phys_addr_t rgnbase = rgn->region[i].base;
  		phys_size_t rgnsize = rgn->region[i].size;
4ed6552f7   Kumar Gala   [new uImage] Intr...
251
252
253
254
255
256
257
  		if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) {
  			break;
  		}
  	}
  
  	return (i < rgn->cnt) ? i : -1;
  }
391fd93ab   Becky Bruce   Change lmb to use...
258
  phys_addr_t lmb_alloc(struct lmb *lmb, phys_size_t size, ulong align)
4ed6552f7   Kumar Gala   [new uImage] Intr...
259
260
261
  {
  	return lmb_alloc_base(lmb, size, align, LMB_ALLOC_ANYWHERE);
  }
391fd93ab   Becky Bruce   Change lmb to use...
262
  phys_addr_t lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr)
4ed6552f7   Kumar Gala   [new uImage] Intr...
263
  {
391fd93ab   Becky Bruce   Change lmb to use...
264
  	phys_addr_t alloc;
4ed6552f7   Kumar Gala   [new uImage] Intr...
265
266
267
268
269
270
  
  	alloc = __lmb_alloc_base(lmb, size, align, max_addr);
  
  	if (alloc == 0)
  		printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.
  ",
9b55a2536   Wolfgang Denk   Fix some more pri...
271
  		      (ulong)size, (ulong)max_addr);
4ed6552f7   Kumar Gala   [new uImage] Intr...
272
273
274
  
  	return alloc;
  }
391fd93ab   Becky Bruce   Change lmb to use...
275
  static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
4ed6552f7   Kumar Gala   [new uImage] Intr...
276
277
278
  {
  	return addr & ~(size - 1);
  }
391fd93ab   Becky Bruce   Change lmb to use...
279
  static phys_addr_t lmb_align_up(phys_addr_t addr, ulong size)
4ed6552f7   Kumar Gala   [new uImage] Intr...
280
281
282
  {
  	return (addr + (size - 1)) & ~(size - 1);
  }
391fd93ab   Becky Bruce   Change lmb to use...
283
  phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr)
4ed6552f7   Kumar Gala   [new uImage] Intr...
284
285
  {
  	long i, j;
391fd93ab   Becky Bruce   Change lmb to use...
286
  	phys_addr_t base = 0;
7570a9941   Andy Fleming   Fix an underflow ...
287
  	phys_addr_t res_base;
4ed6552f7   Kumar Gala   [new uImage] Intr...
288
289
  
  	for (i = lmb->memory.cnt-1; i >= 0; i--) {
391fd93ab   Becky Bruce   Change lmb to use...
290
291
  		phys_addr_t lmbbase = lmb->memory.region[i].base;
  		phys_size_t lmbsize = lmb->memory.region[i].size;
4ed6552f7   Kumar Gala   [new uImage] Intr...
292

7570a9941   Andy Fleming   Fix an underflow ...
293
294
  		if (lmbsize < size)
  			continue;
4ed6552f7   Kumar Gala   [new uImage] Intr...
295
296
297
298
299
300
301
  		if (max_addr == LMB_ALLOC_ANYWHERE)
  			base = lmb_align_down(lmbbase + lmbsize - size, align);
  		else if (lmbbase < max_addr) {
  			base = min(lmbbase + lmbsize, max_addr);
  			base = lmb_align_down(base - size, align);
  		} else
  			continue;
7570a9941   Andy Fleming   Fix an underflow ...
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
  		while (base && lmbbase <= base) {
  			j = lmb_overlaps_region(&lmb->reserved, base, size);
  			if (j < 0) {
  				/* This area isn't reserved, take it */
  				if (lmb_add_region(&lmb->reserved, base,
  							lmb_align_up(size,
  								align)) < 0)
  					return 0;
  				return base;
  			}
  			res_base = lmb->reserved.region[j].base;
  			if (res_base < size)
  				break;
  			base = lmb_align_down(res_base - size, align);
  		}
4ed6552f7   Kumar Gala   [new uImage] Intr...
317
  	}
7570a9941   Andy Fleming   Fix an underflow ...
318
  	return 0;
4ed6552f7   Kumar Gala   [new uImage] Intr...
319
  }
391fd93ab   Becky Bruce   Change lmb to use...
320
  int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr)
4ed6552f7   Kumar Gala   [new uImage] Intr...
321
322
323
324
  {
  	int i;
  
  	for (i = 0; i < lmb->reserved.cnt; i++) {
391fd93ab   Becky Bruce   Change lmb to use...
325
  		phys_addr_t upper = lmb->reserved.region[i].base +
4ed6552f7   Kumar Gala   [new uImage] Intr...
326
327
328
329
330
331
  			lmb->reserved.region[i].size - 1;
  		if ((addr >= lmb->reserved.region[i].base) && (addr <= upper))
  			return 1;
  	}
  	return 0;
  }
a16028da6   Mike Frysinger   lmb: only force o...
332
333
334
335
336
337
338
339
340
341
342
343
  
  void __board_lmb_reserve(struct lmb *lmb)
  {
  	/* please define platform specific board_lmb_reserve() */
  }
  void board_lmb_reserve(struct lmb *lmb) __attribute__((weak, alias("__board_lmb_reserve")));
  
  void __arch_lmb_reserve(struct lmb *lmb)
  {
  	/* please define platform specific arch_lmb_reserve() */
  }
  void arch_lmb_reserve(struct lmb *lmb) __attribute__((weak, alias("__arch_lmb_reserve")));