Blame view

lib/lmb.c 11.7 KB
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
1
2
3
4
5
  /*
   * Procedures for maintaining information about logical memory blocks.
   *
   * Peter Bergner, IBM Corp.	June 2001.
   * Copyright (C) 2001 Peter Bergner.
d9b2b2a27   David S. Miller   [LIB]: Make Power...
6
   *
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
7
8
9
10
11
   *      This program is free software; you can redistribute it and/or
   *      modify it under the terms of the GNU General Public License
   *      as published by the Free Software Foundation; either version
   *      2 of the License, or (at your option) any later version.
   */
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
12
13
14
  #include <linux/kernel.h>
  #include <linux/init.h>
  #include <linux/bitops.h>
d9b2b2a27   David S. Miller   [LIB]: Make Power...
15
  #include <linux/lmb.h>
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
16

3b9331dac   Michael Ellerman   [PATCH] powerpc: ...
17
  #define LMB_ALLOC_ANYWHERE	0
eb481899a   Michael Ellerman   [PATCH] powerpc: ...
18
  struct lmb lmb;
faa6cfde7   David S. Miller   lmb: Make lmb deb...
19
20
21
22
23
24
25
26
27
  static int lmb_debug;
  
  static int __init early_lmb(char *p)
  {
  	if (p && strstr(p, "debug"))
  		lmb_debug = 1;
  	return 0;
  }
  early_param("lmb", early_lmb);
c37682d90   Michael Ellerman   lmb: Rework lmb_d...
28
  static void lmb_dump(struct lmb_region *region, char *name)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
29
  {
c37682d90   Michael Ellerman   lmb: Rework lmb_d...
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
  	unsigned long long base, size;
  	int i;
  
  	pr_info(" %s.cnt  = 0x%lx
  ", name, region->cnt);
  
  	for (i = 0; i < region->cnt; i++) {
  		base = region->region[i].base;
  		size = region->region[i].size;
  
  		pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes
  ",
  		    name, i, base, base + size - 1, size);
  	}
  }
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
45

c37682d90   Michael Ellerman   lmb: Rework lmb_d...
46
47
  void lmb_dump_all(void)
  {
faa6cfde7   David S. Miller   lmb: Make lmb deb...
48
49
  	if (!lmb_debug)
  		return;
c37682d90   Michael Ellerman   lmb: Rework lmb_d...
50
51
52
53
54
55
  	pr_info("LMB configuration:
  ");
  	pr_info(" rmo_size    = 0x%llx
  ", (unsigned long long)lmb.rmo_size);
  	pr_info(" memory.size = 0x%llx
  ", (unsigned long long)lmb.memory.size);
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
56

c37682d90   Michael Ellerman   lmb: Rework lmb_d...
57
58
  	lmb_dump(&lmb.memory, "memory");
  	lmb_dump(&lmb.reserved, "reserved");
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
59
  }
98d5c21c8   Badari Pulavarty   [POWERPC] Update ...
60
61
  static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2,
  					u64 size2)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
62
  {
300613e52   Paul Mackerras   [LMB] Fix some wh...
63
  	return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
64
  }
98d5c21c8   Badari Pulavarty   [POWERPC] Update ...
65
  static long lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
66
67
68
69
70
71
72
73
  {
  	if (base2 == base1 + size1)
  		return 1;
  	else if (base1 == base2 + size2)
  		return -1;
  
  	return 0;
  }
98d5c21c8   Badari Pulavarty   [POWERPC] Update ...
74
  static long lmb_regions_adjacent(struct lmb_region *rgn,
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
75
76
  		unsigned long r1, unsigned long r2)
  {
e5f270954   Becky Bruce   [LMB]: Make lmb s...
77
78
79
80
  	u64 base1 = rgn->region[r1].base;
  	u64 size1 = rgn->region[r1].size;
  	u64 base2 = rgn->region[r2].base;
  	u64 size2 = rgn->region[r2].size;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
81
82
83
  
  	return lmb_addrs_adjacent(base1, size1, base2, size2);
  }
98d5c21c8   Badari Pulavarty   [POWERPC] Update ...
84
  static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
85
86
  {
  	unsigned long i;
2babf5c2e   Michael Ellerman   [PATCH] powerpc: ...
87
88
89
  	for (i = r; i < rgn->cnt - 1; i++) {
  		rgn->region[i].base = rgn->region[i + 1].base;
  		rgn->region[i].size = rgn->region[i + 1].size;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
90
91
92
  	}
  	rgn->cnt--;
  }
2babf5c2e   Michael Ellerman   [PATCH] powerpc: ...
93
  /* Assumption: base addr of region 1 < base addr of region 2 */
98d5c21c8   Badari Pulavarty   [POWERPC] Update ...
94
  static void lmb_coalesce_regions(struct lmb_region *rgn,
2babf5c2e   Michael Ellerman   [PATCH] powerpc: ...
95
96
97
98
99
  		unsigned long r1, unsigned long r2)
  {
  	rgn->region[r1].size += rgn->region[r2].size;
  	lmb_remove_region(rgn, r2);
  }
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
100
101
102
103
104
105
106
107
108
109
110
111
112
113
  void __init lmb_init(void)
  {
  	/* Create a dummy zero size LMB which will get coalesced away later.
  	 * This simplifies the lmb_add() code below...
  	 */
  	lmb.memory.region[0].base = 0;
  	lmb.memory.region[0].size = 0;
  	lmb.memory.cnt = 1;
  
  	/* Ditto. */
  	lmb.reserved.region[0].base = 0;
  	lmb.reserved.region[0].size = 0;
  	lmb.reserved.cnt = 1;
  }
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
114
115
116
117
118
119
120
121
122
  void __init lmb_analyze(void)
  {
  	int i;
  
  	lmb.memory.size = 0;
  
  	for (i = 0; i < lmb.memory.cnt; i++)
  		lmb.memory.size += lmb.memory.region[i].size;
  }
98d5c21c8   Badari Pulavarty   [POWERPC] Update ...
123
  static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
124
  {
56d6d1a73   Manish Ahuja   [POWERPC] Fix loo...
125
126
  	unsigned long coalesced = 0;
  	long adjacent, i;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
127

27e6672bb   Kumar Gala   [LMB]: Fix initia...
128
129
130
131
132
  	if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) {
  		rgn->region[0].base = base;
  		rgn->region[0].size = size;
  		return 0;
  	}
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
133
  	/* First try and coalesce this LMB with another. */
300613e52   Paul Mackerras   [LMB] Fix some wh...
134
  	for (i = 0; i < rgn->cnt; i++) {
e5f270954   Becky Bruce   [LMB]: Make lmb s...
135
136
  		u64 rgnbase = rgn->region[i].base;
  		u64 rgnsize = rgn->region[i].size;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
137

eb6de2863   David Gibson   [POWERPC] Allow d...
138
139
140
  		if ((rgnbase == base) && (rgnsize == size))
  			/* Already have this region, so we're done */
  			return 0;
300613e52   Paul Mackerras   [LMB] Fix some wh...
141
142
  		adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
  		if (adjacent > 0) {
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
143
144
145
146
  			rgn->region[i].base -= size;
  			rgn->region[i].size += size;
  			coalesced++;
  			break;
300613e52   Paul Mackerras   [LMB] Fix some wh...
147
  		} else if (adjacent < 0) {
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
148
149
150
151
152
  			rgn->region[i].size += size;
  			coalesced++;
  			break;
  		}
  	}
300613e52   Paul Mackerras   [LMB] Fix some wh...
153
  	if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i+1)) {
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
154
155
156
157
158
159
160
161
162
163
  		lmb_coalesce_regions(rgn, i, i+1);
  		coalesced++;
  	}
  
  	if (coalesced)
  		return coalesced;
  	if (rgn->cnt >= MAX_LMB_REGIONS)
  		return -1;
  
  	/* Couldn't coalesce the LMB, so add it to the sorted table. */
300613e52   Paul Mackerras   [LMB] Fix some wh...
164
  	for (i = rgn->cnt - 1; i >= 0; i--) {
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
165
166
167
168
169
170
171
172
173
  		if (base < rgn->region[i].base) {
  			rgn->region[i+1].base = rgn->region[i].base;
  			rgn->region[i+1].size = rgn->region[i].size;
  		} else {
  			rgn->region[i+1].base = base;
  			rgn->region[i+1].size = size;
  			break;
  		}
  	}
74b20dad1   Kumar Gala   [LMB]: Fix lmb_ad...
174
175
176
177
178
  
  	if (base < rgn->region[0].base) {
  		rgn->region[0].base = base;
  		rgn->region[0].size = size;
  	}
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
179
180
181
182
  	rgn->cnt++;
  
  	return 0;
  }
98d5c21c8   Badari Pulavarty   [POWERPC] Update ...
183
  long lmb_add(u64 base, u64 size)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
184
  {
300613e52   Paul Mackerras   [LMB] Fix some wh...
185
  	struct lmb_region *_rgn = &lmb.memory;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
186
187
188
189
190
191
192
193
  
  	/* On pSeries LPAR systems, the first LMB is our RMO region. */
  	if (base == 0)
  		lmb.rmo_size = size;
  
  	return lmb_add_region(_rgn, base, size);
  
  }
24551f64d   Michael Ellerman   lmb: Add lmb_free()
194
  static long __lmb_remove(struct lmb_region *rgn, u64 base, u64 size)
98d5c21c8   Badari Pulavarty   [POWERPC] Update ...
195
  {
98d5c21c8   Badari Pulavarty   [POWERPC] Update ...
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
  	u64 rgnbegin, rgnend;
  	u64 end = base + size;
  	int i;
  
  	rgnbegin = rgnend = 0; /* supress gcc warnings */
  
  	/* Find the region where (base, size) belongs to */
  	for (i=0; i < rgn->cnt; i++) {
  		rgnbegin = rgn->region[i].base;
  		rgnend = rgnbegin + rgn->region[i].size;
  
  		if ((rgnbegin <= base) && (end <= rgnend))
  			break;
  	}
  
  	/* Didn't find the region */
  	if (i == rgn->cnt)
  		return -1;
  
  	/* Check to see if we are removing entire region */
  	if ((rgnbegin == base) && (rgnend == end)) {
  		lmb_remove_region(rgn, i);
  		return 0;
  	}
  
  	/* Check to see if region is matching at the front */
  	if (rgnbegin == base) {
  		rgn->region[i].base = end;
  		rgn->region[i].size -= size;
  		return 0;
  	}
  
  	/* Check to see if the region is matching at the end */
  	if (rgnend == end) {
  		rgn->region[i].size -= size;
  		return 0;
  	}
  
  	/*
  	 * We need to split the entry -  adjust the current one to the
  	 * beginging of the hole and add the region after hole.
  	 */
  	rgn->region[i].size = base - rgn->region[i].base;
  	return lmb_add_region(rgn, end, rgnend - end);
  }
24551f64d   Michael Ellerman   lmb: Add lmb_free()
241
242
243
244
245
246
247
248
249
  long lmb_remove(u64 base, u64 size)
  {
  	return __lmb_remove(&lmb.memory, base, size);
  }
  
  long __init lmb_free(u64 base, u64 size)
  {
  	return __lmb_remove(&lmb.reserved, base, size);
  }
e5f270954   Becky Bruce   [LMB]: Make lmb s...
250
  long __init lmb_reserve(u64 base, u64 size)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
251
  {
300613e52   Paul Mackerras   [LMB] Fix some wh...
252
  	struct lmb_region *_rgn = &lmb.reserved;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
253

8c20fafa8   Michael Ellerman   [PATCH] powerpc: ...
254
  	BUG_ON(0 == size);
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
255
256
  	return lmb_add_region(_rgn, base, size);
  }
c5df7f775   Albert Herranz   powerpc: allow io...
257
  long lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
258
259
  {
  	unsigned long i;
300613e52   Paul Mackerras   [LMB] Fix some wh...
260
  	for (i = 0; i < rgn->cnt; i++) {
e5f270954   Becky Bruce   [LMB]: Make lmb s...
261
262
  		u64 rgnbase = rgn->region[i].base;
  		u64 rgnsize = rgn->region[i].size;
300613e52   Paul Mackerras   [LMB] Fix some wh...
263
  		if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
264
  			break;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
265
266
267
268
  	}
  
  	return (i < rgn->cnt) ? i : -1;
  }
c50f68c8a   David S. Miller   [LMB] Add lmb_all...
269
270
271
272
273
274
275
276
277
278
279
280
281
  static u64 lmb_align_down(u64 addr, u64 size)
  {
  	return addr & ~(size - 1);
  }
  
  static u64 lmb_align_up(u64 addr, u64 size)
  {
  	return (addr + (size - 1)) & ~(size - 1);
  }
  
  static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end,
  					   u64 size, u64 align)
  {
d9024df02   Paul Mackerras   [LMB] Restructure...
282
  	u64 base, res_base;
c50f68c8a   David S. Miller   [LMB] Add lmb_all...
283
284
285
  	long j;
  
  	base = lmb_align_down((end - size), align);
d9024df02   Paul Mackerras   [LMB] Restructure...
286
287
288
289
  	while (start <= base) {
  		j = lmb_overlaps_region(&lmb.reserved, base, size);
  		if (j < 0) {
  			/* this area isn't reserved, take it */
4978db5bd   David S. Miller   lmb: Fix inconsis...
290
  			if (lmb_add_region(&lmb.reserved, base, size) < 0)
d9024df02   Paul Mackerras   [LMB] Restructure...
291
292
293
294
295
296
297
  				base = ~(u64)0;
  			return base;
  		}
  		res_base = lmb.reserved.region[j].base;
  		if (res_base < size)
  			break;
  		base = lmb_align_down(res_base - size, align);
c50f68c8a   David S. Miller   [LMB] Add lmb_all...
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
  	}
  
  	return ~(u64)0;
  }
  
  static u64 __init lmb_alloc_nid_region(struct lmb_property *mp,
  				       u64 (*nid_range)(u64, u64, int *),
  				       u64 size, u64 align, int nid)
  {
  	u64 start, end;
  
  	start = mp->base;
  	end = start + mp->size;
  
  	start = lmb_align_up(start, align);
  	while (start < end) {
  		u64 this_end;
  		int this_nid;
  
  		this_end = nid_range(start, end, &this_nid);
  		if (this_nid == nid) {
  			u64 ret = lmb_alloc_nid_unreserved(start, this_end,
  							   size, align);
  			if (ret != ~(u64)0)
  				return ret;
  		}
  		start = this_end;
  	}
  
  	return ~(u64)0;
  }
  
  u64 __init lmb_alloc_nid(u64 size, u64 align, int nid,
  			 u64 (*nid_range)(u64 start, u64 end, int *nid))
  {
  	struct lmb_region *mem = &lmb.memory;
  	int i;
4978db5bd   David S. Miller   lmb: Fix inconsis...
335
336
337
  	BUG_ON(0 == size);
  
  	size = lmb_align_up(size, align);
c50f68c8a   David S. Miller   [LMB] Add lmb_all...
338
339
340
341
342
343
344
345
346
347
  	for (i = 0; i < mem->cnt; i++) {
  		u64 ret = lmb_alloc_nid_region(&mem->region[i],
  					       nid_range,
  					       size, align, nid);
  		if (ret != ~(u64)0)
  			return ret;
  	}
  
  	return lmb_alloc(size, align);
  }
e5f270954   Becky Bruce   [LMB]: Make lmb s...
348
  u64 __init lmb_alloc(u64 size, u64 align)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
349
350
351
  {
  	return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
  }
e5f270954   Becky Bruce   [LMB]: Make lmb s...
352
  u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
353
  {
e5f270954   Becky Bruce   [LMB]: Make lmb s...
354
  	u64 alloc;
d7a5b2ffa   Michael Ellerman   [PATCH] powerpc: ...
355
356
  
  	alloc = __lmb_alloc_base(size, align, max_addr);
2c276603c   Michael Ellerman   [PATCH] powerpc: ...
357
  	if (alloc == 0)
e5f270954   Becky Bruce   [LMB]: Make lmb s...
358
359
360
  		panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.
  ",
  		      (unsigned long long) size, (unsigned long long) max_addr);
d7a5b2ffa   Michael Ellerman   [PATCH] powerpc: ...
361
362
363
  
  	return alloc;
  }
e5f270954   Becky Bruce   [LMB]: Make lmb s...
364
  u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
d7a5b2ffa   Michael Ellerman   [PATCH] powerpc: ...
365
  {
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
366
  	long i, j;
e5f270954   Becky Bruce   [LMB]: Make lmb s...
367
  	u64 base = 0;
d9024df02   Paul Mackerras   [LMB] Restructure...
368
  	u64 res_base;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
369

8c20fafa8   Michael Ellerman   [PATCH] powerpc: ...
370
  	BUG_ON(0 == size);
4978db5bd   David S. Miller   lmb: Fix inconsis...
371
  	size = lmb_align_up(size, align);
d9b2b2a27   David S. Miller   [LIB]: Make Power...
372
  	/* On some platforms, make sure we allocate lowmem */
d9024df02   Paul Mackerras   [LMB] Restructure...
373
  	/* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
374
  	if (max_addr == LMB_ALLOC_ANYWHERE)
d9b2b2a27   David S. Miller   [LIB]: Make Power...
375
  		max_addr = LMB_REAL_LIMIT;
300613e52   Paul Mackerras   [LMB] Fix some wh...
376
  	for (i = lmb.memory.cnt - 1; i >= 0; i--) {
e5f270954   Becky Bruce   [LMB]: Make lmb s...
377
378
  		u64 lmbbase = lmb.memory.region[i].base;
  		u64 lmbsize = lmb.memory.region[i].size;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
379

d9024df02   Paul Mackerras   [LMB] Restructure...
380
381
  		if (lmbsize < size)
  			continue;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
382
  		if (max_addr == LMB_ALLOC_ANYWHERE)
d9b2b2a27   David S. Miller   [LIB]: Make Power...
383
  			base = lmb_align_down(lmbbase + lmbsize - size, align);
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
384
385
  		else if (lmbbase < max_addr) {
  			base = min(lmbbase + lmbsize, max_addr);
d9b2b2a27   David S. Miller   [LIB]: Make Power...
386
  			base = lmb_align_down(base - size, align);
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
387
388
  		} else
  			continue;
d9024df02   Paul Mackerras   [LMB] Restructure...
389
  		while (base && lmbbase <= base) {
300613e52   Paul Mackerras   [LMB] Fix some wh...
390
  			j = lmb_overlaps_region(&lmb.reserved, base, size);
d9024df02   Paul Mackerras   [LMB] Restructure...
391
392
  			if (j < 0) {
  				/* this area isn't reserved, take it */
4978db5bd   David S. Miller   lmb: Fix inconsis...
393
  				if (lmb_add_region(&lmb.reserved, base, size) < 0)
d9024df02   Paul Mackerras   [LMB] Restructure...
394
395
396
397
398
  					return 0;
  				return base;
  			}
  			res_base = lmb.reserved.region[j].base;
  			if (res_base < size)
300613e52   Paul Mackerras   [LMB] Fix some wh...
399
  				break;
d9024df02   Paul Mackerras   [LMB] Restructure...
400
  			base = lmb_align_down(res_base - size, align);
300613e52   Paul Mackerras   [LMB] Fix some wh...
401
  		}
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
402
  	}
d9024df02   Paul Mackerras   [LMB] Restructure...
403
  	return 0;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
404
405
406
  }
  
  /* You must call lmb_analyze() before this. */
e5f270954   Becky Bruce   [LMB]: Make lmb s...
407
  u64 __init lmb_phys_mem_size(void)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
408
409
410
  {
  	return lmb.memory.size;
  }
4f8ee2c9c   Benjamin Herrenschmidt   lmb: Remove __ini...
411
  u64 lmb_end_of_DRAM(void)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
412
413
414
415
416
  {
  	int idx = lmb.memory.cnt - 1;
  
  	return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
  }
2babf5c2e   Michael Ellerman   [PATCH] powerpc: ...
417
  /* You must call lmb_analyze() after this. */
e5f270954   Becky Bruce   [LMB]: Make lmb s...
418
  void __init lmb_enforce_memory_limit(u64 memory_limit)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
419
  {
e5f270954   Becky Bruce   [LMB]: Make lmb s...
420
421
  	unsigned long i;
  	u64 limit;
2babf5c2e   Michael Ellerman   [PATCH] powerpc: ...
422
  	struct lmb_property *p;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
423

300613e52   Paul Mackerras   [LMB] Fix some wh...
424
  	if (!memory_limit)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
425
  		return;
2babf5c2e   Michael Ellerman   [PATCH] powerpc: ...
426
  	/* Truncate the lmb regions to satisfy the memory limit. */
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
427
428
429
430
431
432
433
434
435
436
437
  	limit = memory_limit;
  	for (i = 0; i < lmb.memory.cnt; i++) {
  		if (limit > lmb.memory.region[i].size) {
  			limit -= lmb.memory.region[i].size;
  			continue;
  		}
  
  		lmb.memory.region[i].size = limit;
  		lmb.memory.cnt = i + 1;
  		break;
  	}
2babf5c2e   Michael Ellerman   [PATCH] powerpc: ...
438

30f30e130   Michael Ellerman   [POWERPC] Fix mem...
439
440
  	if (lmb.memory.region[0].size < lmb.rmo_size)
  		lmb.rmo_size = lmb.memory.region[0].size;
2babf5c2e   Michael Ellerman   [PATCH] powerpc: ...
441

ebb1951d3   David S. Miller   lmb: Fix reserved...
442
  	memory_limit = lmb_end_of_DRAM();
2babf5c2e   Michael Ellerman   [PATCH] powerpc: ...
443
444
445
446
447
448
449
450
451
452
453
454
455
456
  	/* And truncate any reserves above the limit also. */
  	for (i = 0; i < lmb.reserved.cnt; i++) {
  		p = &lmb.reserved.region[i];
  
  		if (p->base > memory_limit)
  			p->size = 0;
  		else if ((p->base + p->size) > memory_limit)
  			p->size = memory_limit - p->base;
  
  		if (p->size == 0) {
  			lmb_remove_region(&lmb.reserved, i);
  			i--;
  		}
  	}
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
457
  }
f98eeb4eb   Kumar Gala   [POWERPC] Fix han...
458

e5f270954   Becky Bruce   [LMB]: Make lmb s...
459
  int __init lmb_is_reserved(u64 addr)
f98eeb4eb   Kumar Gala   [POWERPC] Fix han...
460
461
462
463
  {
  	int i;
  
  	for (i = 0; i < lmb.reserved.cnt; i++) {
e5f270954   Becky Bruce   [LMB]: Make lmb s...
464
465
  		u64 upper = lmb.reserved.region[i].base +
  			lmb.reserved.region[i].size - 1;
f98eeb4eb   Kumar Gala   [POWERPC] Fix han...
466
467
468
469
470
  		if ((addr >= lmb.reserved.region[i].base) && (addr <= upper))
  			return 1;
  	}
  	return 0;
  }
9d88a2eb6   Badari Pulavarty   [POWERPC] Provide...
471

c5df7f775   Albert Herranz   powerpc: allow io...
472
473
474
475
  int lmb_is_region_reserved(u64 base, u64 size)
  {
  	return lmb_overlaps_region(&lmb.reserved, base, size);
  }
9d88a2eb6   Badari Pulavarty   [POWERPC] Provide...
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
  /*
   * Given a <base, len>, find which memory regions belong to this range.
   * Adjust the request and return a contiguous chunk.
   */
  int lmb_find(struct lmb_property *res)
  {
  	int i;
  	u64 rstart, rend;
  
  	rstart = res->base;
  	rend = rstart + res->size - 1;
  
  	for (i = 0; i < lmb.memory.cnt; i++) {
  		u64 start = lmb.memory.region[i].base;
  		u64 end = start + lmb.memory.region[i].size - 1;
  
  		if (start > rend)
  			return -1;
  
  		if ((end >= rstart) && (start < rend)) {
  			/* adjust the request */
  			if (rstart < start)
  				rstart = start;
  			if (rend > end)
  				rend = end;
  			res->base = rstart;
  			res->size = rend - rstart + 1;
  			return 0;
  		}
  	}
  	return -1;
  }