Blame view

lib/lmb.c 11.4 KB
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
1
2
3
4
5
  /*
   * Procedures for maintaining information about logical memory blocks.
   *
   * Peter Bergner, IBM Corp.	June 2001.
   * Copyright (C) 2001 Peter Bergner.
d9b2b2a27   David S. Miller   [LIB]: Make Power...
6
   *
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
7
8
9
10
11
   *      This program is free software; you can redistribute it and/or
   *      modify it under the terms of the GNU General Public License
   *      as published by the Free Software Foundation; either version
   *      2 of the License, or (at your option) any later version.
   */
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
12
13
14
  #include <linux/kernel.h>
  #include <linux/init.h>
  #include <linux/bitops.h>
d9b2b2a27   David S. Miller   [LIB]: Make Power...
15
  #include <linux/lmb.h>
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
16

3b9331dac   Michael Ellerman   [PATCH] powerpc: ...
17
  #define LMB_ALLOC_ANYWHERE	0
eb481899a   Michael Ellerman   [PATCH] powerpc: ...
18
  struct lmb lmb;
faa6cfde7   David S. Miller   lmb: Make lmb deb...
19
20
21
22
23
24
25
26
27
  static int lmb_debug;
  
  static int __init early_lmb(char *p)
  {
  	if (p && strstr(p, "debug"))
  		lmb_debug = 1;
  	return 0;
  }
  early_param("lmb", early_lmb);
c37682d90   Michael Ellerman   lmb: Rework lmb_d...
28
  static void lmb_dump(struct lmb_region *region, char *name)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
29
  {
c37682d90   Michael Ellerman   lmb: Rework lmb_d...
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
  	unsigned long long base, size;
  	int i;
  
  	pr_info(" %s.cnt  = 0x%lx
  ", name, region->cnt);
  
  	for (i = 0; i < region->cnt; i++) {
  		base = region->region[i].base;
  		size = region->region[i].size;
  
  		pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes
  ",
  		    name, i, base, base + size - 1, size);
  	}
  }
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
45

c37682d90   Michael Ellerman   lmb: Rework lmb_d...
46
47
  void lmb_dump_all(void)
  {
faa6cfde7   David S. Miller   lmb: Make lmb deb...
48
49
  	if (!lmb_debug)
  		return;
c37682d90   Michael Ellerman   lmb: Rework lmb_d...
50
51
52
53
54
55
  	pr_info("LMB configuration:
  ");
  	pr_info(" rmo_size    = 0x%llx
  ", (unsigned long long)lmb.rmo_size);
  	pr_info(" memory.size = 0x%llx
  ", (unsigned long long)lmb.memory.size);
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
56

c37682d90   Michael Ellerman   lmb: Rework lmb_d...
57
58
  	lmb_dump(&lmb.memory, "memory");
  	lmb_dump(&lmb.reserved, "reserved");
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
59
  }
98d5c21c8   Badari Pulavarty   [POWERPC] Update ...
60
61
  static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2,
  					u64 size2)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
62
  {
300613e52   Paul Mackerras   [LMB] Fix some wh...
63
  	return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
64
  }
98d5c21c8   Badari Pulavarty   [POWERPC] Update ...
65
  static long lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
66
67
68
69
70
71
72
73
  {
  	if (base2 == base1 + size1)
  		return 1;
  	else if (base1 == base2 + size2)
  		return -1;
  
  	return 0;
  }
98d5c21c8   Badari Pulavarty   [POWERPC] Update ...
74
  static long lmb_regions_adjacent(struct lmb_region *rgn,
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
75
76
  		unsigned long r1, unsigned long r2)
  {
e5f270954   Becky Bruce   [LMB]: Make lmb s...
77
78
79
80
  	u64 base1 = rgn->region[r1].base;
  	u64 size1 = rgn->region[r1].size;
  	u64 base2 = rgn->region[r2].base;
  	u64 size2 = rgn->region[r2].size;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
81
82
83
  
  	return lmb_addrs_adjacent(base1, size1, base2, size2);
  }
98d5c21c8   Badari Pulavarty   [POWERPC] Update ...
84
  static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
85
86
  {
  	unsigned long i;
2babf5c2e   Michael Ellerman   [PATCH] powerpc: ...
87
88
89
  	for (i = r; i < rgn->cnt - 1; i++) {
  		rgn->region[i].base = rgn->region[i + 1].base;
  		rgn->region[i].size = rgn->region[i + 1].size;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
90
91
92
  	}
  	rgn->cnt--;
  }
2babf5c2e   Michael Ellerman   [PATCH] powerpc: ...
93
  /* Assumption: base addr of region 1 < base addr of region 2 */
98d5c21c8   Badari Pulavarty   [POWERPC] Update ...
94
  static void lmb_coalesce_regions(struct lmb_region *rgn,
2babf5c2e   Michael Ellerman   [PATCH] powerpc: ...
95
96
97
98
99
  		unsigned long r1, unsigned long r2)
  {
  	rgn->region[r1].size += rgn->region[r2].size;
  	lmb_remove_region(rgn, r2);
  }
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
100
101
102
103
104
105
106
107
108
109
110
111
112
113
  void __init lmb_init(void)
  {
  	/* Create a dummy zero size LMB which will get coalesced away later.
  	 * This simplifies the lmb_add() code below...
  	 */
  	lmb.memory.region[0].base = 0;
  	lmb.memory.region[0].size = 0;
  	lmb.memory.cnt = 1;
  
  	/* Ditto. */
  	lmb.reserved.region[0].base = 0;
  	lmb.reserved.region[0].size = 0;
  	lmb.reserved.cnt = 1;
  }
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
114
115
116
117
118
119
120
121
122
  void __init lmb_analyze(void)
  {
  	int i;
  
  	lmb.memory.size = 0;
  
  	for (i = 0; i < lmb.memory.cnt; i++)
  		lmb.memory.size += lmb.memory.region[i].size;
  }
98d5c21c8   Badari Pulavarty   [POWERPC] Update ...
123
  static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
124
  {
56d6d1a73   Manish Ahuja   [POWERPC] Fix loo...
125
126
  	unsigned long coalesced = 0;
  	long adjacent, i;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
127

27e6672bb   Kumar Gala   [LMB]: Fix initia...
128
129
130
131
132
  	if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) {
  		rgn->region[0].base = base;
  		rgn->region[0].size = size;
  		return 0;
  	}
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
133
  	/* First try and coalesce this LMB with another. */
300613e52   Paul Mackerras   [LMB] Fix some wh...
134
  	for (i = 0; i < rgn->cnt; i++) {
e5f270954   Becky Bruce   [LMB]: Make lmb s...
135
136
  		u64 rgnbase = rgn->region[i].base;
  		u64 rgnsize = rgn->region[i].size;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
137

eb6de2863   David Gibson   [POWERPC] Allow d...
138
139
140
  		if ((rgnbase == base) && (rgnsize == size))
  			/* Already have this region, so we're done */
  			return 0;
300613e52   Paul Mackerras   [LMB] Fix some wh...
141
142
  		adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
  		if (adjacent > 0) {
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
143
144
145
146
  			rgn->region[i].base -= size;
  			rgn->region[i].size += size;
  			coalesced++;
  			break;
300613e52   Paul Mackerras   [LMB] Fix some wh...
147
  		} else if (adjacent < 0) {
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
148
149
150
151
152
  			rgn->region[i].size += size;
  			coalesced++;
  			break;
  		}
  	}
300613e52   Paul Mackerras   [LMB] Fix some wh...
153
  	if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i+1)) {
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
154
155
156
157
158
159
160
161
162
163
  		lmb_coalesce_regions(rgn, i, i+1);
  		coalesced++;
  	}
  
  	if (coalesced)
  		return coalesced;
  	if (rgn->cnt >= MAX_LMB_REGIONS)
  		return -1;
  
  	/* Couldn't coalesce the LMB, so add it to the sorted table. */
300613e52   Paul Mackerras   [LMB] Fix some wh...
164
  	for (i = rgn->cnt - 1; i >= 0; i--) {
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
165
166
167
168
169
170
171
172
173
  		if (base < rgn->region[i].base) {
  			rgn->region[i+1].base = rgn->region[i].base;
  			rgn->region[i+1].size = rgn->region[i].size;
  		} else {
  			rgn->region[i+1].base = base;
  			rgn->region[i+1].size = size;
  			break;
  		}
  	}
74b20dad1   Kumar Gala   [LMB]: Fix lmb_ad...
174
175
176
177
178
  
  	if (base < rgn->region[0].base) {
  		rgn->region[0].base = base;
  		rgn->region[0].size = size;
  	}
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
179
180
181
182
  	rgn->cnt++;
  
  	return 0;
  }
98d5c21c8   Badari Pulavarty   [POWERPC] Update ...
183
  long lmb_add(u64 base, u64 size)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
184
  {
300613e52   Paul Mackerras   [LMB] Fix some wh...
185
  	struct lmb_region *_rgn = &lmb.memory;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
186
187
188
189
190
191
192
193
  
  	/* On pSeries LPAR systems, the first LMB is our RMO region. */
  	if (base == 0)
  		lmb.rmo_size = size;
  
  	return lmb_add_region(_rgn, base, size);
  
  }
98d5c21c8   Badari Pulavarty   [POWERPC] Update ...
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
  long lmb_remove(u64 base, u64 size)
  {
  	struct lmb_region *rgn = &(lmb.memory);
  	u64 rgnbegin, rgnend;
  	u64 end = base + size;
  	int i;
  
  	rgnbegin = rgnend = 0; /* supress gcc warnings */
  
  	/* Find the region where (base, size) belongs to */
  	for (i=0; i < rgn->cnt; i++) {
  		rgnbegin = rgn->region[i].base;
  		rgnend = rgnbegin + rgn->region[i].size;
  
  		if ((rgnbegin <= base) && (end <= rgnend))
  			break;
  	}
  
  	/* Didn't find the region */
  	if (i == rgn->cnt)
  		return -1;
  
  	/* Check to see if we are removing entire region */
  	if ((rgnbegin == base) && (rgnend == end)) {
  		lmb_remove_region(rgn, i);
  		return 0;
  	}
  
  	/* Check to see if region is matching at the front */
  	if (rgnbegin == base) {
  		rgn->region[i].base = end;
  		rgn->region[i].size -= size;
  		return 0;
  	}
  
  	/* Check to see if the region is matching at the end */
  	if (rgnend == end) {
  		rgn->region[i].size -= size;
  		return 0;
  	}
  
  	/*
  	 * We need to split the entry -  adjust the current one to the
  	 * beginging of the hole and add the region after hole.
  	 */
  	rgn->region[i].size = base - rgn->region[i].base;
  	return lmb_add_region(rgn, end, rgnend - end);
  }
e5f270954   Becky Bruce   [LMB]: Make lmb s...
242
  long __init lmb_reserve(u64 base, u64 size)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
243
  {
300613e52   Paul Mackerras   [LMB] Fix some wh...
244
  	struct lmb_region *_rgn = &lmb.reserved;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
245

8c20fafa8   Michael Ellerman   [PATCH] powerpc: ...
246
  	BUG_ON(0 == size);
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
247
248
  	return lmb_add_region(_rgn, base, size);
  }
300613e52   Paul Mackerras   [LMB] Fix some wh...
249
  long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
250
251
  {
  	unsigned long i;
300613e52   Paul Mackerras   [LMB] Fix some wh...
252
  	for (i = 0; i < rgn->cnt; i++) {
e5f270954   Becky Bruce   [LMB]: Make lmb s...
253
254
  		u64 rgnbase = rgn->region[i].base;
  		u64 rgnsize = rgn->region[i].size;
300613e52   Paul Mackerras   [LMB] Fix some wh...
255
  		if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
256
  			break;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
257
258
259
260
  	}
  
  	return (i < rgn->cnt) ? i : -1;
  }
c50f68c8a   David S. Miller   [LMB] Add lmb_all...
261
262
263
264
265
266
267
268
269
270
271
272
273
  static u64 lmb_align_down(u64 addr, u64 size)
  {
  	return addr & ~(size - 1);
  }
  
  static u64 lmb_align_up(u64 addr, u64 size)
  {
  	return (addr + (size - 1)) & ~(size - 1);
  }
  
  static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end,
  					   u64 size, u64 align)
  {
d9024df02   Paul Mackerras   [LMB] Restructure...
274
  	u64 base, res_base;
c50f68c8a   David S. Miller   [LMB] Add lmb_all...
275
276
277
  	long j;
  
  	base = lmb_align_down((end - size), align);
d9024df02   Paul Mackerras   [LMB] Restructure...
278
279
280
281
  	while (start <= base) {
  		j = lmb_overlaps_region(&lmb.reserved, base, size);
  		if (j < 0) {
  			/* this area isn't reserved, take it */
4978db5bd   David S. Miller   lmb: Fix inconsis...
282
  			if (lmb_add_region(&lmb.reserved, base, size) < 0)
d9024df02   Paul Mackerras   [LMB] Restructure...
283
284
285
286
287
288
289
  				base = ~(u64)0;
  			return base;
  		}
  		res_base = lmb.reserved.region[j].base;
  		if (res_base < size)
  			break;
  		base = lmb_align_down(res_base - size, align);
c50f68c8a   David S. Miller   [LMB] Add lmb_all...
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
  	}
  
  	return ~(u64)0;
  }
  
  static u64 __init lmb_alloc_nid_region(struct lmb_property *mp,
  				       u64 (*nid_range)(u64, u64, int *),
  				       u64 size, u64 align, int nid)
  {
  	u64 start, end;
  
  	start = mp->base;
  	end = start + mp->size;
  
  	start = lmb_align_up(start, align);
  	while (start < end) {
  		u64 this_end;
  		int this_nid;
  
  		this_end = nid_range(start, end, &this_nid);
  		if (this_nid == nid) {
  			u64 ret = lmb_alloc_nid_unreserved(start, this_end,
  							   size, align);
  			if (ret != ~(u64)0)
  				return ret;
  		}
  		start = this_end;
  	}
  
  	return ~(u64)0;
  }
  
  u64 __init lmb_alloc_nid(u64 size, u64 align, int nid,
  			 u64 (*nid_range)(u64 start, u64 end, int *nid))
  {
  	struct lmb_region *mem = &lmb.memory;
  	int i;
4978db5bd   David S. Miller   lmb: Fix inconsis...
327
328
329
  	BUG_ON(0 == size);
  
  	size = lmb_align_up(size, align);
c50f68c8a   David S. Miller   [LMB] Add lmb_all...
330
331
332
333
334
335
336
337
338
339
  	for (i = 0; i < mem->cnt; i++) {
  		u64 ret = lmb_alloc_nid_region(&mem->region[i],
  					       nid_range,
  					       size, align, nid);
  		if (ret != ~(u64)0)
  			return ret;
  	}
  
  	return lmb_alloc(size, align);
  }
e5f270954   Becky Bruce   [LMB]: Make lmb s...
340
  u64 __init lmb_alloc(u64 size, u64 align)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
341
342
343
  {
  	return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
  }
e5f270954   Becky Bruce   [LMB]: Make lmb s...
344
  u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
345
  {
e5f270954   Becky Bruce   [LMB]: Make lmb s...
346
  	u64 alloc;
d7a5b2ffa   Michael Ellerman   [PATCH] powerpc: ...
347
348
  
  	alloc = __lmb_alloc_base(size, align, max_addr);
2c276603c   Michael Ellerman   [PATCH] powerpc: ...
349
  	if (alloc == 0)
e5f270954   Becky Bruce   [LMB]: Make lmb s...
350
351
352
  		panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.
  ",
  		      (unsigned long long) size, (unsigned long long) max_addr);
d7a5b2ffa   Michael Ellerman   [PATCH] powerpc: ...
353
354
355
  
  	return alloc;
  }
e5f270954   Becky Bruce   [LMB]: Make lmb s...
356
  u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
d7a5b2ffa   Michael Ellerman   [PATCH] powerpc: ...
357
  {
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
358
  	long i, j;
e5f270954   Becky Bruce   [LMB]: Make lmb s...
359
  	u64 base = 0;
d9024df02   Paul Mackerras   [LMB] Restructure...
360
  	u64 res_base;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
361

8c20fafa8   Michael Ellerman   [PATCH] powerpc: ...
362
  	BUG_ON(0 == size);
4978db5bd   David S. Miller   lmb: Fix inconsis...
363
  	size = lmb_align_up(size, align);
d9b2b2a27   David S. Miller   [LIB]: Make Power...
364
  	/* On some platforms, make sure we allocate lowmem */
d9024df02   Paul Mackerras   [LMB] Restructure...
365
  	/* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
366
  	if (max_addr == LMB_ALLOC_ANYWHERE)
d9b2b2a27   David S. Miller   [LIB]: Make Power...
367
  		max_addr = LMB_REAL_LIMIT;
300613e52   Paul Mackerras   [LMB] Fix some wh...
368
  	for (i = lmb.memory.cnt - 1; i >= 0; i--) {
e5f270954   Becky Bruce   [LMB]: Make lmb s...
369
370
  		u64 lmbbase = lmb.memory.region[i].base;
  		u64 lmbsize = lmb.memory.region[i].size;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
371

d9024df02   Paul Mackerras   [LMB] Restructure...
372
373
  		if (lmbsize < size)
  			continue;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
374
  		if (max_addr == LMB_ALLOC_ANYWHERE)
d9b2b2a27   David S. Miller   [LIB]: Make Power...
375
  			base = lmb_align_down(lmbbase + lmbsize - size, align);
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
376
377
  		else if (lmbbase < max_addr) {
  			base = min(lmbbase + lmbsize, max_addr);
d9b2b2a27   David S. Miller   [LIB]: Make Power...
378
  			base = lmb_align_down(base - size, align);
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
379
380
  		} else
  			continue;
d9024df02   Paul Mackerras   [LMB] Restructure...
381
  		while (base && lmbbase <= base) {
300613e52   Paul Mackerras   [LMB] Fix some wh...
382
  			j = lmb_overlaps_region(&lmb.reserved, base, size);
d9024df02   Paul Mackerras   [LMB] Restructure...
383
384
  			if (j < 0) {
  				/* this area isn't reserved, take it */
4978db5bd   David S. Miller   lmb: Fix inconsis...
385
  				if (lmb_add_region(&lmb.reserved, base, size) < 0)
d9024df02   Paul Mackerras   [LMB] Restructure...
386
387
388
389
390
  					return 0;
  				return base;
  			}
  			res_base = lmb.reserved.region[j].base;
  			if (res_base < size)
300613e52   Paul Mackerras   [LMB] Fix some wh...
391
  				break;
d9024df02   Paul Mackerras   [LMB] Restructure...
392
  			base = lmb_align_down(res_base - size, align);
300613e52   Paul Mackerras   [LMB] Fix some wh...
393
  		}
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
394
  	}
d9024df02   Paul Mackerras   [LMB] Restructure...
395
  	return 0;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
396
397
398
  }
  
  /* You must call lmb_analyze() before this. */
e5f270954   Becky Bruce   [LMB]: Make lmb s...
399
  u64 __init lmb_phys_mem_size(void)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
400
401
402
  {
  	return lmb.memory.size;
  }
e5f270954   Becky Bruce   [LMB]: Make lmb s...
403
  u64 __init lmb_end_of_DRAM(void)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
404
405
406
407
408
  {
  	int idx = lmb.memory.cnt - 1;
  
  	return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
  }
2babf5c2e   Michael Ellerman   [PATCH] powerpc: ...
409
  /* You must call lmb_analyze() after this. */
e5f270954   Becky Bruce   [LMB]: Make lmb s...
410
  void __init lmb_enforce_memory_limit(u64 memory_limit)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
411
  {
e5f270954   Becky Bruce   [LMB]: Make lmb s...
412
413
  	unsigned long i;
  	u64 limit;
2babf5c2e   Michael Ellerman   [PATCH] powerpc: ...
414
  	struct lmb_property *p;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
415

300613e52   Paul Mackerras   [LMB] Fix some wh...
416
  	if (!memory_limit)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
417
  		return;
2babf5c2e   Michael Ellerman   [PATCH] powerpc: ...
418
  	/* Truncate the lmb regions to satisfy the memory limit. */
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
419
420
421
422
423
424
425
426
427
428
429
  	limit = memory_limit;
  	for (i = 0; i < lmb.memory.cnt; i++) {
  		if (limit > lmb.memory.region[i].size) {
  			limit -= lmb.memory.region[i].size;
  			continue;
  		}
  
  		lmb.memory.region[i].size = limit;
  		lmb.memory.cnt = i + 1;
  		break;
  	}
2babf5c2e   Michael Ellerman   [PATCH] powerpc: ...
430

30f30e130   Michael Ellerman   [POWERPC] Fix mem...
431
432
  	if (lmb.memory.region[0].size < lmb.rmo_size)
  		lmb.rmo_size = lmb.memory.region[0].size;
2babf5c2e   Michael Ellerman   [PATCH] powerpc: ...
433

ebb1951d3   David S. Miller   lmb: Fix reserved...
434
  	memory_limit = lmb_end_of_DRAM();
2babf5c2e   Michael Ellerman   [PATCH] powerpc: ...
435
436
437
438
439
440
441
442
443
444
445
446
447
448
  	/* And truncate any reserves above the limit also. */
  	for (i = 0; i < lmb.reserved.cnt; i++) {
  		p = &lmb.reserved.region[i];
  
  		if (p->base > memory_limit)
  			p->size = 0;
  		else if ((p->base + p->size) > memory_limit)
  			p->size = memory_limit - p->base;
  
  		if (p->size == 0) {
  			lmb_remove_region(&lmb.reserved, i);
  			i--;
  		}
  	}
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
449
  }
f98eeb4eb   Kumar Gala   [POWERPC] Fix han...
450

e5f270954   Becky Bruce   [LMB]: Make lmb s...
451
  int __init lmb_is_reserved(u64 addr)
f98eeb4eb   Kumar Gala   [POWERPC] Fix han...
452
453
454
455
  {
  	int i;
  
  	for (i = 0; i < lmb.reserved.cnt; i++) {
e5f270954   Becky Bruce   [LMB]: Make lmb s...
456
457
  		u64 upper = lmb.reserved.region[i].base +
  			lmb.reserved.region[i].size - 1;
f98eeb4eb   Kumar Gala   [POWERPC] Fix han...
458
459
460
461
462
  		if ((addr >= lmb.reserved.region[i].base) && (addr <= upper))
  			return 1;
  	}
  	return 0;
  }
9d88a2eb6   Badari Pulavarty   [POWERPC] Provide...
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
  
  /*
   * Given a <base, len>, find which memory regions belong to this range.
   * Adjust the request and return a contiguous chunk.
   */
  int lmb_find(struct lmb_property *res)
  {
  	int i;
  	u64 rstart, rend;
  
  	rstart = res->base;
  	rend = rstart + res->size - 1;
  
  	for (i = 0; i < lmb.memory.cnt; i++) {
  		u64 start = lmb.memory.region[i].base;
  		u64 end = start + lmb.memory.region[i].size - 1;
  
  		if (start > rend)
  			return -1;
  
  		if ((end >= rstart) && (start < rend)) {
  			/* adjust the request */
  			if (rstart < start)
  				rstart = start;
  			if (rend > end)
  				rend = end;
  			res->base = rstart;
  			res->size = rend - rstart + 1;
  			return 0;
  		}
  	}
  	return -1;
  }