Blame view

lib/lmb.c 11.6 KB
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
1
2
3
4
5
  /*
   * Procedures for maintaining information about logical memory blocks.
   *
   * Peter Bergner, IBM Corp.	June 2001.
   * Copyright (C) 2001 Peter Bergner.
d9b2b2a27   David S. Miller   [LIB]: Make Power...
6
   *
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
7
8
9
10
11
   *      This program is free software; you can redistribute it and/or
   *      modify it under the terms of the GNU General Public License
   *      as published by the Free Software Foundation; either version
   *      2 of the License, or (at your option) any later version.
   */
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
12
13
14
  #include <linux/kernel.h>
  #include <linux/init.h>
  #include <linux/bitops.h>
d9b2b2a27   David S. Miller   [LIB]: Make Power...
15
  #include <linux/lmb.h>
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
16

3b9331dac   Michael Ellerman   [PATCH] powerpc: ...
17
  #define LMB_ALLOC_ANYWHERE	0
eb481899a   Michael Ellerman   [PATCH] powerpc: ...
18
  struct lmb lmb;
faa6cfde7   David S. Miller   lmb: Make lmb deb...
19
20
21
22
23
24
25
26
27
  static int lmb_debug;
  
  static int __init early_lmb(char *p)
  {
  	if (p && strstr(p, "debug"))
  		lmb_debug = 1;
  	return 0;
  }
  early_param("lmb", early_lmb);
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
28
29
  void lmb_dump_all(void)
  {
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
30
  	unsigned long i;
faa6cfde7   David S. Miller   lmb: Make lmb deb...
31
32
33
34
35
36
37
38
39
  	if (!lmb_debug)
  		return;
  
  	pr_info("lmb_dump_all:
  ");
  	pr_info("    memory.cnt		  = 0x%lx
  ", lmb.memory.cnt);
  	pr_info("    memory.size		  = 0x%llx
  ",
e5f270954   Becky Bruce   [LMB]: Make lmb s...
40
  	    (unsigned long long)lmb.memory.size);
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
41
  	for (i=0; i < lmb.memory.cnt ;i++) {
faa6cfde7   David S. Miller   lmb: Make lmb deb...
42
43
  		pr_info("    memory.region[0x%lx].base       = 0x%llx
  ",
e5f270954   Becky Bruce   [LMB]: Make lmb s...
44
  		    i, (unsigned long long)lmb.memory.region[i].base);
faa6cfde7   David S. Miller   lmb: Make lmb deb...
45
46
  		pr_info("		      .size     = 0x%llx
  ",
e5f270954   Becky Bruce   [LMB]: Make lmb s...
47
  		    (unsigned long long)lmb.memory.region[i].size);
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
48
  	}
faa6cfde7   David S. Miller   lmb: Make lmb deb...
49
50
  	pr_info("    reserved.cnt	  = 0x%lx
  ", lmb.reserved.cnt);
f9ebcd9d4   Kumar Gala   lmb: Fix compile ...
51
52
53
  	pr_info("    reserved.size	  = 0x%llx
  ",
  	    (unsigned long long)lmb.memory.size);
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
54
  	for (i=0; i < lmb.reserved.cnt ;i++) {
faa6cfde7   David S. Miller   lmb: Make lmb deb...
55
56
  		pr_info("    reserved.region[0x%lx].base       = 0x%llx
  ",
e5f270954   Becky Bruce   [LMB]: Make lmb s...
57
  		    i, (unsigned long long)lmb.reserved.region[i].base);
faa6cfde7   David S. Miller   lmb: Make lmb deb...
58
59
  		pr_info("		      .size     = 0x%llx
  ",
e5f270954   Becky Bruce   [LMB]: Make lmb s...
60
  		    (unsigned long long)lmb.reserved.region[i].size);
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
61
  	}
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
62
  }
98d5c21c8   Badari Pulavarty   [POWERPC] Update ...
63
64
  static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2,
  					u64 size2)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
65
  {
300613e52   Paul Mackerras   [LMB] Fix some wh...
66
  	return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
67
  }
98d5c21c8   Badari Pulavarty   [POWERPC] Update ...
68
  static long lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
69
70
71
72
73
74
75
76
  {
  	if (base2 == base1 + size1)
  		return 1;
  	else if (base1 == base2 + size2)
  		return -1;
  
  	return 0;
  }
98d5c21c8   Badari Pulavarty   [POWERPC] Update ...
77
  static long lmb_regions_adjacent(struct lmb_region *rgn,
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
78
79
  		unsigned long r1, unsigned long r2)
  {
e5f270954   Becky Bruce   [LMB]: Make lmb s...
80
81
82
83
  	u64 base1 = rgn->region[r1].base;
  	u64 size1 = rgn->region[r1].size;
  	u64 base2 = rgn->region[r2].base;
  	u64 size2 = rgn->region[r2].size;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
84
85
86
  
  	return lmb_addrs_adjacent(base1, size1, base2, size2);
  }
98d5c21c8   Badari Pulavarty   [POWERPC] Update ...
87
  static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
88
89
  {
  	unsigned long i;
2babf5c2e   Michael Ellerman   [PATCH] powerpc: ...
90
91
92
  	for (i = r; i < rgn->cnt - 1; i++) {
  		rgn->region[i].base = rgn->region[i + 1].base;
  		rgn->region[i].size = rgn->region[i + 1].size;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
93
94
95
  	}
  	rgn->cnt--;
  }
2babf5c2e   Michael Ellerman   [PATCH] powerpc: ...
96
  /* Assumption: base addr of region 1 < base addr of region 2 */
98d5c21c8   Badari Pulavarty   [POWERPC] Update ...
97
  static void lmb_coalesce_regions(struct lmb_region *rgn,
2babf5c2e   Michael Ellerman   [PATCH] powerpc: ...
98
99
100
101
102
  		unsigned long r1, unsigned long r2)
  {
  	rgn->region[r1].size += rgn->region[r2].size;
  	lmb_remove_region(rgn, r2);
  }
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
103
104
105
106
107
108
109
110
111
112
113
114
115
116
  void __init lmb_init(void)
  {
  	/* Create a dummy zero size LMB which will get coalesced away later.
  	 * This simplifies the lmb_add() code below...
  	 */
  	lmb.memory.region[0].base = 0;
  	lmb.memory.region[0].size = 0;
  	lmb.memory.cnt = 1;
  
  	/* Ditto. */
  	lmb.reserved.region[0].base = 0;
  	lmb.reserved.region[0].size = 0;
  	lmb.reserved.cnt = 1;
  }
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
117
118
119
120
121
122
123
124
125
  void __init lmb_analyze(void)
  {
  	int i;
  
  	lmb.memory.size = 0;
  
  	for (i = 0; i < lmb.memory.cnt; i++)
  		lmb.memory.size += lmb.memory.region[i].size;
  }
98d5c21c8   Badari Pulavarty   [POWERPC] Update ...
126
  static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
127
  {
56d6d1a73   Manish Ahuja   [POWERPC] Fix loo...
128
129
  	unsigned long coalesced = 0;
  	long adjacent, i;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
130

27e6672bb   Kumar Gala   [LMB]: Fix initia...
131
132
133
134
135
  	if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) {
  		rgn->region[0].base = base;
  		rgn->region[0].size = size;
  		return 0;
  	}
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
136
  	/* First try and coalesce this LMB with another. */
300613e52   Paul Mackerras   [LMB] Fix some wh...
137
  	for (i = 0; i < rgn->cnt; i++) {
e5f270954   Becky Bruce   [LMB]: Make lmb s...
138
139
  		u64 rgnbase = rgn->region[i].base;
  		u64 rgnsize = rgn->region[i].size;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
140

eb6de2863   David Gibson   [POWERPC] Allow d...
141
142
143
  		if ((rgnbase == base) && (rgnsize == size))
  			/* Already have this region, so we're done */
  			return 0;
300613e52   Paul Mackerras   [LMB] Fix some wh...
144
145
  		adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
  		if (adjacent > 0) {
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
146
147
148
149
  			rgn->region[i].base -= size;
  			rgn->region[i].size += size;
  			coalesced++;
  			break;
300613e52   Paul Mackerras   [LMB] Fix some wh...
150
  		} else if (adjacent < 0) {
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
151
152
153
154
155
  			rgn->region[i].size += size;
  			coalesced++;
  			break;
  		}
  	}
300613e52   Paul Mackerras   [LMB] Fix some wh...
156
  	if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i+1)) {
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
157
158
159
160
161
162
163
164
165
166
  		lmb_coalesce_regions(rgn, i, i+1);
  		coalesced++;
  	}
  
  	if (coalesced)
  		return coalesced;
  	if (rgn->cnt >= MAX_LMB_REGIONS)
  		return -1;
  
  	/* Couldn't coalesce the LMB, so add it to the sorted table. */
300613e52   Paul Mackerras   [LMB] Fix some wh...
167
  	for (i = rgn->cnt - 1; i >= 0; i--) {
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
168
169
170
171
172
173
174
175
176
  		if (base < rgn->region[i].base) {
  			rgn->region[i+1].base = rgn->region[i].base;
  			rgn->region[i+1].size = rgn->region[i].size;
  		} else {
  			rgn->region[i+1].base = base;
  			rgn->region[i+1].size = size;
  			break;
  		}
  	}
74b20dad1   Kumar Gala   [LMB]: Fix lmb_ad...
177
178
179
180
181
  
  	if (base < rgn->region[0].base) {
  		rgn->region[0].base = base;
  		rgn->region[0].size = size;
  	}
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
182
183
184
185
  	rgn->cnt++;
  
  	return 0;
  }
98d5c21c8   Badari Pulavarty   [POWERPC] Update ...
186
  long lmb_add(u64 base, u64 size)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
187
  {
300613e52   Paul Mackerras   [LMB] Fix some wh...
188
  	struct lmb_region *_rgn = &lmb.memory;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
189
190
191
192
193
194
195
196
  
  	/* On pSeries LPAR systems, the first LMB is our RMO region. */
  	if (base == 0)
  		lmb.rmo_size = size;
  
  	return lmb_add_region(_rgn, base, size);
  
  }
98d5c21c8   Badari Pulavarty   [POWERPC] Update ...
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
  long lmb_remove(u64 base, u64 size)
  {
  	struct lmb_region *rgn = &(lmb.memory);
  	u64 rgnbegin, rgnend;
  	u64 end = base + size;
  	int i;
  
  	rgnbegin = rgnend = 0; /* supress gcc warnings */
  
  	/* Find the region where (base, size) belongs to */
  	for (i=0; i < rgn->cnt; i++) {
  		rgnbegin = rgn->region[i].base;
  		rgnend = rgnbegin + rgn->region[i].size;
  
  		if ((rgnbegin <= base) && (end <= rgnend))
  			break;
  	}
  
  	/* Didn't find the region */
  	if (i == rgn->cnt)
  		return -1;
  
  	/* Check to see if we are removing entire region */
  	if ((rgnbegin == base) && (rgnend == end)) {
  		lmb_remove_region(rgn, i);
  		return 0;
  	}
  
  	/* Check to see if region is matching at the front */
  	if (rgnbegin == base) {
  		rgn->region[i].base = end;
  		rgn->region[i].size -= size;
  		return 0;
  	}
  
  	/* Check to see if the region is matching at the end */
  	if (rgnend == end) {
  		rgn->region[i].size -= size;
  		return 0;
  	}
  
  	/*
  	 * We need to split the entry -  adjust the current one to the
  	 * beginging of the hole and add the region after hole.
  	 */
  	rgn->region[i].size = base - rgn->region[i].base;
  	return lmb_add_region(rgn, end, rgnend - end);
  }
e5f270954   Becky Bruce   [LMB]: Make lmb s...
245
  long __init lmb_reserve(u64 base, u64 size)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
246
  {
300613e52   Paul Mackerras   [LMB] Fix some wh...
247
  	struct lmb_region *_rgn = &lmb.reserved;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
248

8c20fafa8   Michael Ellerman   [PATCH] powerpc: ...
249
  	BUG_ON(0 == size);
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
250
251
  	return lmb_add_region(_rgn, base, size);
  }
300613e52   Paul Mackerras   [LMB] Fix some wh...
252
  long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
253
254
  {
  	unsigned long i;
300613e52   Paul Mackerras   [LMB] Fix some wh...
255
  	for (i = 0; i < rgn->cnt; i++) {
e5f270954   Becky Bruce   [LMB]: Make lmb s...
256
257
  		u64 rgnbase = rgn->region[i].base;
  		u64 rgnsize = rgn->region[i].size;
300613e52   Paul Mackerras   [LMB] Fix some wh...
258
  		if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
259
  			break;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
260
261
262
263
  	}
  
  	return (i < rgn->cnt) ? i : -1;
  }
c50f68c8a   David S. Miller   [LMB] Add lmb_all...
264
265
266
267
268
269
270
271
272
273
274
275
276
  static u64 lmb_align_down(u64 addr, u64 size)
  {
  	return addr & ~(size - 1);
  }
  
  static u64 lmb_align_up(u64 addr, u64 size)
  {
  	return (addr + (size - 1)) & ~(size - 1);
  }
  
  static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end,
  					   u64 size, u64 align)
  {
d9024df02   Paul Mackerras   [LMB] Restructure...
277
  	u64 base, res_base;
c50f68c8a   David S. Miller   [LMB] Add lmb_all...
278
279
280
  	long j;
  
  	base = lmb_align_down((end - size), align);
d9024df02   Paul Mackerras   [LMB] Restructure...
281
282
283
284
  	while (start <= base) {
  		j = lmb_overlaps_region(&lmb.reserved, base, size);
  		if (j < 0) {
  			/* this area isn't reserved, take it */
4978db5bd   David S. Miller   lmb: Fix inconsis...
285
  			if (lmb_add_region(&lmb.reserved, base, size) < 0)
d9024df02   Paul Mackerras   [LMB] Restructure...
286
287
288
289
290
291
292
  				base = ~(u64)0;
  			return base;
  		}
  		res_base = lmb.reserved.region[j].base;
  		if (res_base < size)
  			break;
  		base = lmb_align_down(res_base - size, align);
c50f68c8a   David S. Miller   [LMB] Add lmb_all...
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
  	}
  
  	return ~(u64)0;
  }
  
  static u64 __init lmb_alloc_nid_region(struct lmb_property *mp,
  				       u64 (*nid_range)(u64, u64, int *),
  				       u64 size, u64 align, int nid)
  {
  	u64 start, end;
  
  	start = mp->base;
  	end = start + mp->size;
  
  	start = lmb_align_up(start, align);
  	while (start < end) {
  		u64 this_end;
  		int this_nid;
  
  		this_end = nid_range(start, end, &this_nid);
  		if (this_nid == nid) {
  			u64 ret = lmb_alloc_nid_unreserved(start, this_end,
  							   size, align);
  			if (ret != ~(u64)0)
  				return ret;
  		}
  		start = this_end;
  	}
  
  	return ~(u64)0;
  }
  
  u64 __init lmb_alloc_nid(u64 size, u64 align, int nid,
  			 u64 (*nid_range)(u64 start, u64 end, int *nid))
  {
  	struct lmb_region *mem = &lmb.memory;
  	int i;
4978db5bd   David S. Miller   lmb: Fix inconsis...
330
331
332
  	BUG_ON(0 == size);
  
  	size = lmb_align_up(size, align);
c50f68c8a   David S. Miller   [LMB] Add lmb_all...
333
334
335
336
337
338
339
340
341
342
  	for (i = 0; i < mem->cnt; i++) {
  		u64 ret = lmb_alloc_nid_region(&mem->region[i],
  					       nid_range,
  					       size, align, nid);
  		if (ret != ~(u64)0)
  			return ret;
  	}
  
  	return lmb_alloc(size, align);
  }
e5f270954   Becky Bruce   [LMB]: Make lmb s...
343
  u64 __init lmb_alloc(u64 size, u64 align)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
344
345
346
  {
  	return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
  }
e5f270954   Becky Bruce   [LMB]: Make lmb s...
347
  u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
348
  {
e5f270954   Becky Bruce   [LMB]: Make lmb s...
349
  	u64 alloc;
d7a5b2ffa   Michael Ellerman   [PATCH] powerpc: ...
350
351
  
  	alloc = __lmb_alloc_base(size, align, max_addr);
2c276603c   Michael Ellerman   [PATCH] powerpc: ...
352
  	if (alloc == 0)
e5f270954   Becky Bruce   [LMB]: Make lmb s...
353
354
355
  		panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.
  ",
  		      (unsigned long long) size, (unsigned long long) max_addr);
d7a5b2ffa   Michael Ellerman   [PATCH] powerpc: ...
356
357
358
  
  	return alloc;
  }
e5f270954   Becky Bruce   [LMB]: Make lmb s...
359
  u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
d7a5b2ffa   Michael Ellerman   [PATCH] powerpc: ...
360
  {
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
361
  	long i, j;
e5f270954   Becky Bruce   [LMB]: Make lmb s...
362
  	u64 base = 0;
d9024df02   Paul Mackerras   [LMB] Restructure...
363
  	u64 res_base;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
364

8c20fafa8   Michael Ellerman   [PATCH] powerpc: ...
365
  	BUG_ON(0 == size);
4978db5bd   David S. Miller   lmb: Fix inconsis...
366
  	size = lmb_align_up(size, align);
d9b2b2a27   David S. Miller   [LIB]: Make Power...
367
  	/* On some platforms, make sure we allocate lowmem */
d9024df02   Paul Mackerras   [LMB] Restructure...
368
  	/* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
369
  	if (max_addr == LMB_ALLOC_ANYWHERE)
d9b2b2a27   David S. Miller   [LIB]: Make Power...
370
  		max_addr = LMB_REAL_LIMIT;
300613e52   Paul Mackerras   [LMB] Fix some wh...
371
  	for (i = lmb.memory.cnt - 1; i >= 0; i--) {
e5f270954   Becky Bruce   [LMB]: Make lmb s...
372
373
  		u64 lmbbase = lmb.memory.region[i].base;
  		u64 lmbsize = lmb.memory.region[i].size;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
374

d9024df02   Paul Mackerras   [LMB] Restructure...
375
376
  		if (lmbsize < size)
  			continue;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
377
  		if (max_addr == LMB_ALLOC_ANYWHERE)
d9b2b2a27   David S. Miller   [LIB]: Make Power...
378
  			base = lmb_align_down(lmbbase + lmbsize - size, align);
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
379
380
  		else if (lmbbase < max_addr) {
  			base = min(lmbbase + lmbsize, max_addr);
d9b2b2a27   David S. Miller   [LIB]: Make Power...
381
  			base = lmb_align_down(base - size, align);
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
382
383
  		} else
  			continue;
d9024df02   Paul Mackerras   [LMB] Restructure...
384
  		while (base && lmbbase <= base) {
300613e52   Paul Mackerras   [LMB] Fix some wh...
385
  			j = lmb_overlaps_region(&lmb.reserved, base, size);
d9024df02   Paul Mackerras   [LMB] Restructure...
386
387
  			if (j < 0) {
  				/* this area isn't reserved, take it */
4978db5bd   David S. Miller   lmb: Fix inconsis...
388
  				if (lmb_add_region(&lmb.reserved, base, size) < 0)
d9024df02   Paul Mackerras   [LMB] Restructure...
389
390
391
392
393
  					return 0;
  				return base;
  			}
  			res_base = lmb.reserved.region[j].base;
  			if (res_base < size)
300613e52   Paul Mackerras   [LMB] Fix some wh...
394
  				break;
d9024df02   Paul Mackerras   [LMB] Restructure...
395
  			base = lmb_align_down(res_base - size, align);
300613e52   Paul Mackerras   [LMB] Fix some wh...
396
  		}
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
397
  	}
d9024df02   Paul Mackerras   [LMB] Restructure...
398
  	return 0;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
399
400
401
  }
  
  /* You must call lmb_analyze() before this. */
e5f270954   Becky Bruce   [LMB]: Make lmb s...
402
  u64 __init lmb_phys_mem_size(void)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
403
404
405
  {
  	return lmb.memory.size;
  }
e5f270954   Becky Bruce   [LMB]: Make lmb s...
406
  u64 __init lmb_end_of_DRAM(void)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
407
408
409
410
411
  {
  	int idx = lmb.memory.cnt - 1;
  
  	return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
  }
2babf5c2e   Michael Ellerman   [PATCH] powerpc: ...
412
  /* You must call lmb_analyze() after this. */
e5f270954   Becky Bruce   [LMB]: Make lmb s...
413
  void __init lmb_enforce_memory_limit(u64 memory_limit)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
414
  {
e5f270954   Becky Bruce   [LMB]: Make lmb s...
415
416
  	unsigned long i;
  	u64 limit;
2babf5c2e   Michael Ellerman   [PATCH] powerpc: ...
417
  	struct lmb_property *p;
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
418

300613e52   Paul Mackerras   [LMB] Fix some wh...
419
  	if (!memory_limit)
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
420
  		return;
2babf5c2e   Michael Ellerman   [PATCH] powerpc: ...
421
  	/* Truncate the lmb regions to satisfy the memory limit. */
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
422
423
424
425
426
427
428
429
430
431
432
  	limit = memory_limit;
  	for (i = 0; i < lmb.memory.cnt; i++) {
  		if (limit > lmb.memory.region[i].size) {
  			limit -= lmb.memory.region[i].size;
  			continue;
  		}
  
  		lmb.memory.region[i].size = limit;
  		lmb.memory.cnt = i + 1;
  		break;
  	}
2babf5c2e   Michael Ellerman   [PATCH] powerpc: ...
433

30f30e130   Michael Ellerman   [POWERPC] Fix mem...
434
435
  	if (lmb.memory.region[0].size < lmb.rmo_size)
  		lmb.rmo_size = lmb.memory.region[0].size;
2babf5c2e   Michael Ellerman   [PATCH] powerpc: ...
436

ebb1951d3   David S. Miller   lmb: Fix reserved...
437
  	memory_limit = lmb_end_of_DRAM();
2babf5c2e   Michael Ellerman   [PATCH] powerpc: ...
438
439
440
441
442
443
444
445
446
447
448
449
450
451
  	/* And truncate any reserves above the limit also. */
  	for (i = 0; i < lmb.reserved.cnt; i++) {
  		p = &lmb.reserved.region[i];
  
  		if (p->base > memory_limit)
  			p->size = 0;
  		else if ((p->base + p->size) > memory_limit)
  			p->size = memory_limit - p->base;
  
  		if (p->size == 0) {
  			lmb_remove_region(&lmb.reserved, i);
  			i--;
  		}
  	}
7c8c6b977   Paul Mackerras   powerpc: Merge lm...
452
  }
f98eeb4eb   Kumar Gala   [POWERPC] Fix han...
453

e5f270954   Becky Bruce   [LMB]: Make lmb s...
454
  int __init lmb_is_reserved(u64 addr)
f98eeb4eb   Kumar Gala   [POWERPC] Fix han...
455
456
457
458
  {
  	int i;
  
  	for (i = 0; i < lmb.reserved.cnt; i++) {
e5f270954   Becky Bruce   [LMB]: Make lmb s...
459
460
  		u64 upper = lmb.reserved.region[i].base +
  			lmb.reserved.region[i].size - 1;
f98eeb4eb   Kumar Gala   [POWERPC] Fix han...
461
462
463
464
465
  		if ((addr >= lmb.reserved.region[i].base) && (addr <= upper))
  			return 1;
  	}
  	return 0;
  }
9d88a2eb6   Badari Pulavarty   [POWERPC] Provide...
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
  
  /*
   * Given a <base, len>, find which memory regions belong to this range.
   * Adjust the request and return a contiguous chunk.
   */
  int lmb_find(struct lmb_property *res)
  {
  	int i;
  	u64 rstart, rend;
  
  	rstart = res->base;
  	rend = rstart + res->size - 1;
  
  	for (i = 0; i < lmb.memory.cnt; i++) {
  		u64 start = lmb.memory.region[i].base;
  		u64 end = start + lmb.memory.region[i].size - 1;
  
  		if (start > rend)
  			return -1;
  
  		if ((end >= rstart) && (start < rend)) {
  			/* adjust the request */
  			if (rstart < start)
  				rstart = start;
  			if (rend > end)
  				rend = end;
  			res->base = rstart;
  			res->size = rend - rstart + 1;
  			return 0;
  		}
  	}
  	return -1;
  }