Commit d9b2b2a277219d4812311d995054ce4f95067725

Authored by David S. Miller
1 parent e760e716d4

[LIB]: Make PowerPC LMB code generic so sparc64 can use it too.

Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 34 changed files with 500 additions and 456 deletions Side-by-side Diff

arch/powerpc/Kconfig
... ... @@ -90,6 +90,7 @@
90 90 select HAVE_IDE
91 91 select HAVE_OPROFILE
92 92 select HAVE_KPROBES
  93 + select HAVE_LMB
93 94  
94 95 config EARLY_PRINTK
95 96 bool
arch/powerpc/kernel/btext.c
... ... @@ -7,6 +7,7 @@
7 7 #include <linux/string.h>
8 8 #include <linux/init.h>
9 9 #include <linux/module.h>
  10 +#include <linux/lmb.h>
10 11  
11 12 #include <asm/sections.h>
12 13 #include <asm/prom.h>
... ... @@ -15,7 +16,7 @@
15 16 #include <asm/mmu.h>
16 17 #include <asm/pgtable.h>
17 18 #include <asm/io.h>
18   -#include <asm/lmb.h>
  19 +#include <asm/prom.h>
19 20 #include <asm/processor.h>
20 21 #include <asm/udbg.h>
21 22  
arch/powerpc/kernel/crash.c
... ... @@ -24,12 +24,13 @@
24 24 #include <linux/init.h>
25 25 #include <linux/irq.h>
26 26 #include <linux/types.h>
  27 +#include <linux/lmb.h>
27 28  
28 29 #include <asm/processor.h>
29 30 #include <asm/machdep.h>
30 31 #include <asm/kexec.h>
31 32 #include <asm/kdump.h>
32   -#include <asm/lmb.h>
  33 +#include <asm/prom.h>
33 34 #include <asm/firmware.h>
34 35 #include <asm/smp.h>
35 36 #include <asm/system.h>
arch/powerpc/kernel/crash_dump.c
... ... @@ -13,8 +13,9 @@
13 13  
14 14 #include <linux/crash_dump.h>
15 15 #include <linux/bootmem.h>
  16 +#include <linux/lmb.h>
16 17 #include <asm/kdump.h>
17   -#include <asm/lmb.h>
  18 +#include <asm/prom.h>
18 19 #include <asm/firmware.h>
19 20 #include <asm/uaccess.h>
20 21  
arch/powerpc/kernel/machine_kexec.c
... ... @@ -12,8 +12,9 @@
12 12 #include <linux/kexec.h>
13 13 #include <linux/reboot.h>
14 14 #include <linux/threads.h>
  15 +#include <linux/lmb.h>
15 16 #include <asm/machdep.h>
16   -#include <asm/lmb.h>
  17 +#include <asm/prom.h>
17 18  
18 19 void machine_crash_shutdown(struct pt_regs *regs)
19 20 {
arch/powerpc/kernel/prom.c
... ... @@ -31,10 +31,10 @@
31 31 #include <linux/kexec.h>
32 32 #include <linux/debugfs.h>
33 33 #include <linux/irq.h>
  34 +#include <linux/lmb.h>
34 35  
35 36 #include <asm/prom.h>
36 37 #include <asm/rtas.h>
37   -#include <asm/lmb.h>
38 38 #include <asm/page.h>
39 39 #include <asm/processor.h>
40 40 #include <asm/irq.h>
arch/powerpc/kernel/rtas.c
... ... @@ -22,6 +22,7 @@
22 22 #include <linux/smp.h>
23 23 #include <linux/completion.h>
24 24 #include <linux/cpumask.h>
  25 +#include <linux/lmb.h>
25 26  
26 27 #include <asm/prom.h>
27 28 #include <asm/rtas.h>
... ... @@ -34,7 +35,6 @@
34 35 #include <asm/system.h>
35 36 #include <asm/delay.h>
36 37 #include <asm/uaccess.h>
37   -#include <asm/lmb.h>
38 38 #include <asm/udbg.h>
39 39 #include <asm/syscalls.h>
40 40 #include <asm/smp.h>
arch/powerpc/kernel/setup-common.c
... ... @@ -34,6 +34,7 @@
34 34 #include <linux/serial_8250.h>
35 35 #include <linux/debugfs.h>
36 36 #include <linux/percpu.h>
  37 +#include <linux/lmb.h>
37 38 #include <asm/io.h>
38 39 #include <asm/prom.h>
39 40 #include <asm/processor.h>
... ... @@ -56,7 +57,6 @@
56 57 #include <asm/cache.h>
57 58 #include <asm/page.h>
58 59 #include <asm/mmu.h>
59   -#include <asm/lmb.h>
60 60 #include <asm/xmon.h>
61 61 #include <asm/cputhreads.h>
62 62  
arch/powerpc/kernel/setup_64.c
... ... @@ -33,6 +33,7 @@
33 33 #include <linux/serial_8250.h>
34 34 #include <linux/bootmem.h>
35 35 #include <linux/pci.h>
  36 +#include <linux/lmb.h>
36 37 #include <asm/io.h>
37 38 #include <asm/kdump.h>
38 39 #include <asm/prom.h>
... ... @@ -55,7 +56,6 @@
55 56 #include <asm/cache.h>
56 57 #include <asm/page.h>
57 58 #include <asm/mmu.h>
58   -#include <asm/lmb.h>
59 59 #include <asm/firmware.h>
60 60 #include <asm/xmon.h>
61 61 #include <asm/udbg.h>
arch/powerpc/kernel/vdso.c
... ... @@ -21,13 +21,14 @@
21 21 #include <linux/elf.h>
22 22 #include <linux/security.h>
23 23 #include <linux/bootmem.h>
  24 +#include <linux/lmb.h>
24 25  
25 26 #include <asm/pgtable.h>
26 27 #include <asm/system.h>
27 28 #include <asm/processor.h>
28 29 #include <asm/mmu.h>
29 30 #include <asm/mmu_context.h>
30   -#include <asm/lmb.h>
  31 +#include <asm/prom.h>
31 32 #include <asm/machdep.h>
32 33 #include <asm/cputable.h>
33 34 #include <asm/sections.h>
arch/powerpc/mm/Makefile
... ... @@ -6,7 +6,7 @@
6 6 EXTRA_CFLAGS += -mno-minimal-toc
7 7 endif
8 8  
9   -obj-y := fault.o mem.o lmb.o \
  9 +obj-y := fault.o mem.o \
10 10 init_$(CONFIG_WORD_SIZE).o \
11 11 pgtable_$(CONFIG_WORD_SIZE).o \
12 12 mmu_context_$(CONFIG_WORD_SIZE).o
arch/powerpc/mm/hash_utils_64.c
... ... @@ -31,6 +31,7 @@
31 31 #include <linux/cache.h>
32 32 #include <linux/init.h>
33 33 #include <linux/signal.h>
  34 +#include <linux/lmb.h>
34 35  
35 36 #include <asm/processor.h>
36 37 #include <asm/pgtable.h>
... ... @@ -41,7 +42,7 @@
41 42 #include <asm/system.h>
42 43 #include <asm/uaccess.h>
43 44 #include <asm/machdep.h>
44   -#include <asm/lmb.h>
  45 +#include <asm/prom.h>
45 46 #include <asm/abs_addr.h>
46 47 #include <asm/tlbflush.h>
47 48 #include <asm/io.h>
arch/powerpc/mm/init_32.c
... ... @@ -30,6 +30,7 @@
30 30 #include <linux/highmem.h>
31 31 #include <linux/initrd.h>
32 32 #include <linux/pagemap.h>
  33 +#include <linux/lmb.h>
33 34  
34 35 #include <asm/pgalloc.h>
35 36 #include <asm/prom.h>
... ... @@ -41,7 +42,6 @@
41 42 #include <asm/machdep.h>
42 43 #include <asm/btext.h>
43 44 #include <asm/tlb.h>
44   -#include <asm/lmb.h>
45 45 #include <asm/sections.h>
46 46  
47 47 #include "mmu_decl.h"
arch/powerpc/mm/init_64.c
... ... @@ -38,11 +38,11 @@
38 38 #include <linux/nodemask.h>
39 39 #include <linux/module.h>
40 40 #include <linux/poison.h>
  41 +#include <linux/lmb.h>
41 42  
42 43 #include <asm/pgalloc.h>
43 44 #include <asm/page.h>
44 45 #include <asm/prom.h>
45   -#include <asm/lmb.h>
46 46 #include <asm/rtas.h>
47 47 #include <asm/io.h>
48 48 #include <asm/mmu_context.h>
arch/powerpc/mm/lmb.c
1   -/*
2   - * Procedures for maintaining information about logical memory blocks.
3   - *
4   - * Peter Bergner, IBM Corp. June 2001.
5   - * Copyright (C) 2001 Peter Bergner.
6   - *
7   - * This program is free software; you can redistribute it and/or
8   - * modify it under the terms of the GNU General Public License
9   - * as published by the Free Software Foundation; either version
10   - * 2 of the License, or (at your option) any later version.
11   - */
12   -
13   -#include <linux/kernel.h>
14   -#include <linux/init.h>
15   -#include <linux/bitops.h>
16   -#include <asm/types.h>
17   -#include <asm/page.h>
18   -#include <asm/prom.h>
19   -#include <asm/lmb.h>
20   -#ifdef CONFIG_PPC32
21   -#include "mmu_decl.h" /* for __max_low_memory */
22   -#endif
23   -
24   -#undef DEBUG
25   -
26   -#ifdef DEBUG
27   -#include <asm/udbg.h>
28   -#define DBG(fmt...) udbg_printf(fmt)
29   -#else
30   -#define DBG(fmt...)
31   -#endif
32   -
33   -#define LMB_ALLOC_ANYWHERE 0
34   -
35   -struct lmb lmb;
36   -
37   -void lmb_dump_all(void)
38   -{
39   -#ifdef DEBUG
40   - unsigned long i;
41   -
42   - DBG("lmb_dump_all:\n");
43   - DBG(" memory.cnt = 0x%lx\n", lmb.memory.cnt);
44   - DBG(" memory.size = 0x%lx\n", lmb.memory.size);
45   - for (i=0; i < lmb.memory.cnt ;i++) {
46   - DBG(" memory.region[0x%x].base = 0x%lx\n",
47   - i, lmb.memory.region[i].base);
48   - DBG(" .size = 0x%lx\n",
49   - lmb.memory.region[i].size);
50   - }
51   -
52   - DBG("\n reserved.cnt = 0x%lx\n", lmb.reserved.cnt);
53   - DBG(" reserved.size = 0x%lx\n", lmb.reserved.size);
54   - for (i=0; i < lmb.reserved.cnt ;i++) {
55   - DBG(" reserved.region[0x%x].base = 0x%lx\n",
56   - i, lmb.reserved.region[i].base);
57   - DBG(" .size = 0x%lx\n",
58   - lmb.reserved.region[i].size);
59   - }
60   -#endif /* DEBUG */
61   -}
62   -
63   -static unsigned long __init lmb_addrs_overlap(unsigned long base1,
64   - unsigned long size1, unsigned long base2, unsigned long size2)
65   -{
66   - return ((base1 < (base2+size2)) && (base2 < (base1+size1)));
67   -}
68   -
69   -static long __init lmb_addrs_adjacent(unsigned long base1, unsigned long size1,
70   - unsigned long base2, unsigned long size2)
71   -{
72   - if (base2 == base1 + size1)
73   - return 1;
74   - else if (base1 == base2 + size2)
75   - return -1;
76   -
77   - return 0;
78   -}
79   -
80   -static long __init lmb_regions_adjacent(struct lmb_region *rgn,
81   - unsigned long r1, unsigned long r2)
82   -{
83   - unsigned long base1 = rgn->region[r1].base;
84   - unsigned long size1 = rgn->region[r1].size;
85   - unsigned long base2 = rgn->region[r2].base;
86   - unsigned long size2 = rgn->region[r2].size;
87   -
88   - return lmb_addrs_adjacent(base1, size1, base2, size2);
89   -}
90   -
91   -static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r)
92   -{
93   - unsigned long i;
94   -
95   - for (i = r; i < rgn->cnt - 1; i++) {
96   - rgn->region[i].base = rgn->region[i + 1].base;
97   - rgn->region[i].size = rgn->region[i + 1].size;
98   - }
99   - rgn->cnt--;
100   -}
101   -
102   -/* Assumption: base addr of region 1 < base addr of region 2 */
103   -static void __init lmb_coalesce_regions(struct lmb_region *rgn,
104   - unsigned long r1, unsigned long r2)
105   -{
106   - rgn->region[r1].size += rgn->region[r2].size;
107   - lmb_remove_region(rgn, r2);
108   -}
109   -
110   -/* This routine called with relocation disabled. */
111   -void __init lmb_init(void)
112   -{
113   - /* Create a dummy zero size LMB which will get coalesced away later.
114   - * This simplifies the lmb_add() code below...
115   - */
116   - lmb.memory.region[0].base = 0;
117   - lmb.memory.region[0].size = 0;
118   - lmb.memory.cnt = 1;
119   -
120   - /* Ditto. */
121   - lmb.reserved.region[0].base = 0;
122   - lmb.reserved.region[0].size = 0;
123   - lmb.reserved.cnt = 1;
124   -}
125   -
126   -/* This routine may be called with relocation disabled. */
127   -void __init lmb_analyze(void)
128   -{
129   - int i;
130   -
131   - lmb.memory.size = 0;
132   -
133   - for (i = 0; i < lmb.memory.cnt; i++)
134   - lmb.memory.size += lmb.memory.region[i].size;
135   -}
136   -
137   -/* This routine called with relocation disabled. */
138   -static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base,
139   - unsigned long size)
140   -{
141   - unsigned long coalesced = 0;
142   - long adjacent, i;
143   -
144   - /* First try and coalesce this LMB with another. */
145   - for (i=0; i < rgn->cnt; i++) {
146   - unsigned long rgnbase = rgn->region[i].base;
147   - unsigned long rgnsize = rgn->region[i].size;
148   -
149   - if ((rgnbase == base) && (rgnsize == size))
150   - /* Already have this region, so we're done */
151   - return 0;
152   -
153   - adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize);
154   - if ( adjacent > 0 ) {
155   - rgn->region[i].base -= size;
156   - rgn->region[i].size += size;
157   - coalesced++;
158   - break;
159   - }
160   - else if ( adjacent < 0 ) {
161   - rgn->region[i].size += size;
162   - coalesced++;
163   - break;
164   - }
165   - }
166   -
167   - if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) {
168   - lmb_coalesce_regions(rgn, i, i+1);
169   - coalesced++;
170   - }
171   -
172   - if (coalesced)
173   - return coalesced;
174   - if (rgn->cnt >= MAX_LMB_REGIONS)
175   - return -1;
176   -
177   - /* Couldn't coalesce the LMB, so add it to the sorted table. */
178   - for (i = rgn->cnt-1; i >= 0; i--) {
179   - if (base < rgn->region[i].base) {
180   - rgn->region[i+1].base = rgn->region[i].base;
181   - rgn->region[i+1].size = rgn->region[i].size;
182   - } else {
183   - rgn->region[i+1].base = base;
184   - rgn->region[i+1].size = size;
185   - break;
186   - }
187   - }
188   - rgn->cnt++;
189   -
190   - return 0;
191   -}
192   -
193   -/* This routine may be called with relocation disabled. */
194   -long __init lmb_add(unsigned long base, unsigned long size)
195   -{
196   - struct lmb_region *_rgn = &(lmb.memory);
197   -
198   - /* On pSeries LPAR systems, the first LMB is our RMO region. */
199   - if (base == 0)
200   - lmb.rmo_size = size;
201   -
202   - return lmb_add_region(_rgn, base, size);
203   -
204   -}
205   -
206   -long __init lmb_reserve(unsigned long base, unsigned long size)
207   -{
208   - struct lmb_region *_rgn = &(lmb.reserved);
209   -
210   - BUG_ON(0 == size);
211   -
212   - return lmb_add_region(_rgn, base, size);
213   -}
214   -
215   -long __init lmb_overlaps_region(struct lmb_region *rgn, unsigned long base,
216   - unsigned long size)
217   -{
218   - unsigned long i;
219   -
220   - for (i=0; i < rgn->cnt; i++) {
221   - unsigned long rgnbase = rgn->region[i].base;
222   - unsigned long rgnsize = rgn->region[i].size;
223   - if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) {
224   - break;
225   - }
226   - }
227   -
228   - return (i < rgn->cnt) ? i : -1;
229   -}
230   -
231   -unsigned long __init lmb_alloc(unsigned long size, unsigned long align)
232   -{
233   - return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
234   -}
235   -
236   -unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align,
237   - unsigned long max_addr)
238   -{
239   - unsigned long alloc;
240   -
241   - alloc = __lmb_alloc_base(size, align, max_addr);
242   -
243   - if (alloc == 0)
244   - panic("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
245   - size, max_addr);
246   -
247   - return alloc;
248   -}
249   -
250   -unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align,
251   - unsigned long max_addr)
252   -{
253   - long i, j;
254   - unsigned long base = 0;
255   -
256   - BUG_ON(0 == size);
257   -
258   -#ifdef CONFIG_PPC32
259   - /* On 32-bit, make sure we allocate lowmem */
260   - if (max_addr == LMB_ALLOC_ANYWHERE)
261   - max_addr = __max_low_memory;
262   -#endif
263   - for (i = lmb.memory.cnt-1; i >= 0; i--) {
264   - unsigned long lmbbase = lmb.memory.region[i].base;
265   - unsigned long lmbsize = lmb.memory.region[i].size;
266   -
267   - if (max_addr == LMB_ALLOC_ANYWHERE)
268   - base = _ALIGN_DOWN(lmbbase + lmbsize - size, align);
269   - else if (lmbbase < max_addr) {
270   - base = min(lmbbase + lmbsize, max_addr);
271   - base = _ALIGN_DOWN(base - size, align);
272   - } else
273   - continue;
274   -
275   - while ((lmbbase <= base) &&
276   - ((j = lmb_overlaps_region(&lmb.reserved, base, size)) >= 0) )
277   - base = _ALIGN_DOWN(lmb.reserved.region[j].base - size,
278   - align);
279   -
280   - if ((base != 0) && (lmbbase <= base))
281   - break;
282   - }
283   -
284   - if (i < 0)
285   - return 0;
286   -
287   - lmb_add_region(&lmb.reserved, base, size);
288   -
289   - return base;
290   -}
291   -
292   -/* You must call lmb_analyze() before this. */
293   -unsigned long __init lmb_phys_mem_size(void)
294   -{
295   - return lmb.memory.size;
296   -}
297   -
298   -unsigned long __init lmb_end_of_DRAM(void)
299   -{
300   - int idx = lmb.memory.cnt - 1;
301   -
302   - return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
303   -}
304   -
305   -/* You must call lmb_analyze() after this. */
306   -void __init lmb_enforce_memory_limit(unsigned long memory_limit)
307   -{
308   - unsigned long i, limit;
309   - struct lmb_property *p;
310   -
311   - if (! memory_limit)
312   - return;
313   -
314   - /* Truncate the lmb regions to satisfy the memory limit. */
315   - limit = memory_limit;
316   - for (i = 0; i < lmb.memory.cnt; i++) {
317   - if (limit > lmb.memory.region[i].size) {
318   - limit -= lmb.memory.region[i].size;
319   - continue;
320   - }
321   -
322   - lmb.memory.region[i].size = limit;
323   - lmb.memory.cnt = i + 1;
324   - break;
325   - }
326   -
327   - if (lmb.memory.region[0].size < lmb.rmo_size)
328   - lmb.rmo_size = lmb.memory.region[0].size;
329   -
330   - /* And truncate any reserves above the limit also. */
331   - for (i = 0; i < lmb.reserved.cnt; i++) {
332   - p = &lmb.reserved.region[i];
333   -
334   - if (p->base > memory_limit)
335   - p->size = 0;
336   - else if ((p->base + p->size) > memory_limit)
337   - p->size = memory_limit - p->base;
338   -
339   - if (p->size == 0) {
340   - lmb_remove_region(&lmb.reserved, i);
341   - i--;
342   - }
343   - }
344   -}
345   -
346   -int __init lmb_is_reserved(unsigned long addr)
347   -{
348   - int i;
349   -
350   - for (i = 0; i < lmb.reserved.cnt; i++) {
351   - unsigned long upper = lmb.reserved.region[i].base +
352   - lmb.reserved.region[i].size - 1;
353   - if ((addr >= lmb.reserved.region[i].base) && (addr <= upper))
354   - return 1;
355   - }
356   - return 0;
357   -}
arch/powerpc/mm/mem.c
... ... @@ -31,6 +31,7 @@
31 31 #include <linux/initrd.h>
32 32 #include <linux/pagemap.h>
33 33 #include <linux/suspend.h>
  34 +#include <linux/lmb.h>
34 35  
35 36 #include <asm/pgalloc.h>
36 37 #include <asm/prom.h>
... ... @@ -42,7 +43,6 @@
42 43 #include <asm/machdep.h>
43 44 #include <asm/btext.h>
44 45 #include <asm/tlb.h>
45   -#include <asm/lmb.h>
46 46 #include <asm/sections.h>
47 47 #include <asm/vdso.h>
48 48  
arch/powerpc/mm/numa.c
... ... @@ -17,8 +17,9 @@
17 17 #include <linux/nodemask.h>
18 18 #include <linux/cpu.h>
19 19 #include <linux/notifier.h>
  20 +#include <linux/lmb.h>
20 21 #include <asm/sparsemem.h>
21   -#include <asm/lmb.h>
  22 +#include <asm/prom.h>
22 23 #include <asm/system.h>
23 24 #include <asm/smp.h>
24 25  
arch/powerpc/mm/ppc_mmu_32.c
... ... @@ -26,11 +26,11 @@
26 26 #include <linux/mm.h>
27 27 #include <linux/init.h>
28 28 #include <linux/highmem.h>
  29 +#include <linux/lmb.h>
29 30  
30 31 #include <asm/prom.h>
31 32 #include <asm/mmu.h>
32 33 #include <asm/machdep.h>
33   -#include <asm/lmb.h>
34 34  
35 35 #include "mmu_decl.h"
36 36  
arch/powerpc/mm/stab.c
... ... @@ -12,12 +12,14 @@
12 12 * 2 of the License, or (at your option) any later version.
13 13 */
14 14  
  15 +#include <linux/lmb.h>
  16 +
15 17 #include <asm/pgtable.h>
16 18 #include <asm/mmu.h>
17 19 #include <asm/mmu_context.h>
18 20 #include <asm/paca.h>
19 21 #include <asm/cputable.h>
20   -#include <asm/lmb.h>
  22 +#include <asm/prom.h>
21 23 #include <asm/abs_addr.h>
22 24 #include <asm/firmware.h>
23 25 #include <asm/iseries/hv_call.h>
arch/powerpc/platforms/cell/iommu.c
... ... @@ -28,13 +28,13 @@
28 28 #include <linux/notifier.h>
29 29 #include <linux/of.h>
30 30 #include <linux/of_platform.h>
  31 +#include <linux/lmb.h>
31 32  
32 33 #include <asm/prom.h>
33 34 #include <asm/iommu.h>
34 35 #include <asm/machdep.h>
35 36 #include <asm/pci-bridge.h>
36 37 #include <asm/udbg.h>
37   -#include <asm/lmb.h>
38 38 #include <asm/firmware.h>
39 39 #include <asm/cell-regs.h>
40 40  
arch/powerpc/platforms/maple/setup.c
... ... @@ -43,6 +43,7 @@
43 43 #include <linux/smp.h>
44 44 #include <linux/bitops.h>
45 45 #include <linux/of_device.h>
  46 +#include <linux/lmb.h>
46 47  
47 48 #include <asm/processor.h>
48 49 #include <asm/sections.h>
... ... @@ -57,7 +58,6 @@
57 58 #include <asm/dma.h>
58 59 #include <asm/cputable.h>
59 60 #include <asm/time.h>
60   -#include <asm/lmb.h>
61 61 #include <asm/mpic.h>
62 62 #include <asm/rtas.h>
63 63 #include <asm/udbg.h>
arch/powerpc/platforms/powermac/setup.c
... ... @@ -53,6 +53,7 @@
53 53 #include <linux/suspend.h>
54 54 #include <linux/of_device.h>
55 55 #include <linux/of_platform.h>
  56 +#include <linux/lmb.h>
56 57  
57 58 #include <asm/reg.h>
58 59 #include <asm/sections.h>
... ... @@ -74,7 +75,6 @@
74 75 #include <asm/iommu.h>
75 76 #include <asm/smu.h>
76 77 #include <asm/pmc.h>
77   -#include <asm/lmb.h>
78 78 #include <asm/udbg.h>
79 79  
80 80 #include "pmac.h"
arch/powerpc/platforms/ps3/htab.c
... ... @@ -19,9 +19,10 @@
19 19 */
20 20  
21 21 #include <linux/kernel.h>
  22 +#include <linux/lmb.h>
22 23  
23 24 #include <asm/machdep.h>
24   -#include <asm/lmb.h>
  25 +#include <asm/prom.h>
25 26 #include <asm/udbg.h>
26 27 #include <asm/lv1call.h>
27 28 #include <asm/ps3fb.h>
arch/powerpc/platforms/ps3/mm.c
... ... @@ -21,9 +21,10 @@
21 21 #include <linux/kernel.h>
22 22 #include <linux/module.h>
23 23 #include <linux/memory_hotplug.h>
  24 +#include <linux/lmb.h>
24 25  
25 26 #include <asm/firmware.h>
26   -#include <asm/lmb.h>
  27 +#include <asm/prom.h>
27 28 #include <asm/udbg.h>
28 29 #include <asm/lv1call.h>
29 30  
arch/powerpc/platforms/ps3/os-area.c
... ... @@ -24,8 +24,9 @@
24 24 #include <linux/fs.h>
25 25 #include <linux/syscalls.h>
26 26 #include <linux/ctype.h>
  27 +#include <linux/lmb.h>
27 28  
28   -#include <asm/lmb.h>
  29 +#include <asm/prom.h>
29 30  
30 31 #include "platform.h"
31 32  
arch/powerpc/sysdev/dart_iommu.c
... ... @@ -37,6 +37,7 @@
37 37 #include <linux/dma-mapping.h>
38 38 #include <linux/vmalloc.h>
39 39 #include <linux/suspend.h>
  40 +#include <linux/lmb.h>
40 41 #include <asm/io.h>
41 42 #include <asm/prom.h>
42 43 #include <asm/iommu.h>
... ... @@ -44,7 +45,6 @@
44 45 #include <asm/machdep.h>
45 46 #include <asm/abs_addr.h>
46 47 #include <asm/cacheflush.h>
47   -#include <asm/lmb.h>
48 48 #include <asm/ppc-pci.h>
49 49  
50 50 #include "dart.h"
arch/sparc64/Kconfig
... ... @@ -15,6 +15,7 @@
15 15 bool
16 16 default y
17 17 select HAVE_IDE
  18 + select HAVE_LMB
18 19 help
19 20 SPARC is a family of RISC microprocessors designed and marketed by
20 21 Sun Microsystems, incorporated. This port covers the newer 64-bit
include/asm-powerpc/abs_addr.h
... ... @@ -12,10 +12,11 @@
12 12 * 2 of the License, or (at your option) any later version.
13 13 */
14 14  
  15 +#include <linux/lmb.h>
  16 +
15 17 #include <asm/types.h>
16 18 #include <asm/page.h>
17 19 #include <asm/prom.h>
18   -#include <asm/lmb.h>
19 20 #include <asm/firmware.h>
20 21  
21 22 struct mschunks_map {
include/asm-powerpc/lmb.h
1 1 #ifndef _ASM_POWERPC_LMB_H
2 2 #define _ASM_POWERPC_LMB_H
3   -#ifdef __KERNEL__
4 3  
5   -/*
6   - * Definitions for talking to the Open Firmware PROM on
7   - * Power Macintosh computers.
8   - *
9   - * Copyright (C) 2001 Peter Bergner, IBM Corp.
10   - *
11   - * This program is free software; you can redistribute it and/or
12   - * modify it under the terms of the GNU General Public License
13   - * as published by the Free Software Foundation; either version
14   - * 2 of the License, or (at your option) any later version.
15   - */
  4 +#include <asm/udbg.h>
16 5  
17   -#include <linux/init.h>
18   -#include <asm/prom.h>
  6 +#define LMB_DBG(fmt...) udbg_printf(fmt)
19 7  
20   -#define MAX_LMB_REGIONS 128
  8 +#ifdef CONFIG_PPC32
  9 +extern unsigned long __max_low_memory;
  10 +#define LMB_REAL_LIMIT __max_low_memory
  11 +#else
  12 +#define LMB_REAL_LIMIT 0
  13 +#endif
21 14  
22   -struct lmb_property {
23   - unsigned long base;
24   - unsigned long size;
25   -};
26   -
27   -struct lmb_region {
28   - unsigned long cnt;
29   - unsigned long size;
30   - struct lmb_property region[MAX_LMB_REGIONS+1];
31   -};
32   -
33   -struct lmb {
34   - unsigned long debug;
35   - unsigned long rmo_size;
36   - struct lmb_region memory;
37   - struct lmb_region reserved;
38   -};
39   -
40   -extern struct lmb lmb;
41   -
42   -extern void __init lmb_init(void);
43   -extern void __init lmb_analyze(void);
44   -extern long __init lmb_add(unsigned long base, unsigned long size);
45   -extern long __init lmb_reserve(unsigned long base, unsigned long size);
46   -extern unsigned long __init lmb_alloc(unsigned long size, unsigned long align);
47   -extern unsigned long __init lmb_alloc_base(unsigned long size,
48   - unsigned long align, unsigned long max_addr);
49   -extern unsigned long __init __lmb_alloc_base(unsigned long size,
50   - unsigned long align, unsigned long max_addr);
51   -extern unsigned long __init lmb_phys_mem_size(void);
52   -extern unsigned long __init lmb_end_of_DRAM(void);
53   -extern void __init lmb_enforce_memory_limit(unsigned long memory_limit);
54   -extern int __init lmb_is_reserved(unsigned long addr);
55   -
56   -extern void lmb_dump_all(void);
57   -
58   -static inline unsigned long
59   -lmb_size_bytes(struct lmb_region *type, unsigned long region_nr)
60   -{
61   - return type->region[region_nr].size;
62   -}
63   -static inline unsigned long
64   -lmb_size_pages(struct lmb_region *type, unsigned long region_nr)
65   -{
66   - return lmb_size_bytes(type, region_nr) >> PAGE_SHIFT;
67   -}
68   -static inline unsigned long
69   -lmb_start_pfn(struct lmb_region *type, unsigned long region_nr)
70   -{
71   - return type->region[region_nr].base >> PAGE_SHIFT;
72   -}
73   -static inline unsigned long
74   -lmb_end_pfn(struct lmb_region *type, unsigned long region_nr)
75   -{
76   - return lmb_start_pfn(type, region_nr) +
77   - lmb_size_pages(type, region_nr);
78   -}
79   -
80   -#endif /* __KERNEL__ */
81 15 #endif /* _ASM_POWERPC_LMB_H */
include/asm-sparc64/lmb.h
  1 +#ifndef _SPARC64_LMB_H
  2 +#define _SPARC64_LMB_H
  3 +
  4 +#include <asm/oplib.h>
  5 +
  6 +#define LMB_DBG(fmt...) prom_printf(fmt)
  7 +
  8 +#define LMB_REAL_LIMIT 0
  9 +
  10 +#endif /* !(_SPARC64_LMB_H) */
  1 +#ifndef _LINUX_LMB_H
  2 +#define _LINUX_LMB_H
  3 +#ifdef __KERNEL__
  4 +
  5 +/*
  6 + * Logical memory blocks.
  7 + *
  8 + * Copyright (C) 2001 Peter Bergner, IBM Corp.
  9 + *
  10 + * This program is free software; you can redistribute it and/or
  11 + * modify it under the terms of the GNU General Public License
  12 + * as published by the Free Software Foundation; either version
  13 + * 2 of the License, or (at your option) any later version.
  14 + */
  15 +
  16 +#include <linux/init.h>
  17 +#include <linux/mm.h>
  18 +
  19 +#define MAX_LMB_REGIONS 128
  20 +
  21 +struct lmb_property {
  22 + unsigned long base;
  23 + unsigned long size;
  24 +};
  25 +
  26 +struct lmb_region {
  27 + unsigned long cnt;
  28 + unsigned long size;
  29 + struct lmb_property region[MAX_LMB_REGIONS+1];
  30 +};
  31 +
  32 +struct lmb {
  33 + unsigned long debug;
  34 + unsigned long rmo_size;
  35 + struct lmb_region memory;
  36 + struct lmb_region reserved;
  37 +};
  38 +
  39 +extern struct lmb lmb;
  40 +
  41 +extern void __init lmb_init(void);
  42 +extern void __init lmb_analyze(void);
  43 +extern long __init lmb_add(unsigned long base, unsigned long size);
  44 +extern long __init lmb_reserve(unsigned long base, unsigned long size);
  45 +extern unsigned long __init lmb_alloc(unsigned long size, unsigned long align);
  46 +extern unsigned long __init lmb_alloc_base(unsigned long size,
  47 + unsigned long align, unsigned long max_addr);
  48 +extern unsigned long __init __lmb_alloc_base(unsigned long size,
  49 + unsigned long align, unsigned long max_addr);
  50 +extern unsigned long __init lmb_phys_mem_size(void);
  51 +extern unsigned long __init lmb_end_of_DRAM(void);
  52 +extern void __init lmb_enforce_memory_limit(unsigned long memory_limit);
  53 +extern int __init lmb_is_reserved(unsigned long addr);
  54 +
  55 +extern void lmb_dump_all(void);
  56 +
  57 +static inline unsigned long
  58 +lmb_size_bytes(struct lmb_region *type, unsigned long region_nr)
  59 +{
  60 + return type->region[region_nr].size;
  61 +}
  62 +static inline unsigned long
  63 +lmb_size_pages(struct lmb_region *type, unsigned long region_nr)
  64 +{
  65 + return lmb_size_bytes(type, region_nr) >> PAGE_SHIFT;
  66 +}
  67 +static inline unsigned long
  68 +lmb_start_pfn(struct lmb_region *type, unsigned long region_nr)
  69 +{
  70 + return type->region[region_nr].base >> PAGE_SHIFT;
  71 +}
  72 +static inline unsigned long
  73 +lmb_end_pfn(struct lmb_region *type, unsigned long region_nr)
  74 +{
  75 + return lmb_start_pfn(type, region_nr) +
  76 + lmb_size_pages(type, region_nr);
  77 +}
  78 +
  79 +#include <asm/lmb.h>
  80 +
  81 +#endif /* __KERNEL__ */
  82 +
  83 +#endif /* _LINUX_LMB_H */
... ... @@ -141,5 +141,8 @@
141 141 config CHECK_SIGNATURE
142 142 bool
143 143  
  144 +config HAVE_LMB
  145 + boolean
  146 +
144 147 endmenu
... ... @@ -70,6 +70,8 @@
70 70  
71 71 lib-$(CONFIG_GENERIC_BUG) += bug.o
72 72  
  73 +obj-$(CONFIG_HAVE_LMB) += lmb.o
  74 +
73 75 hostprogs-y := gen_crc32table
74 76 clean-files := crc32table.h
75 77  
  1 +/*
  2 + * Procedures for maintaining information about logical memory blocks.
  3 + *
  4 + * Peter Bergner, IBM Corp. June 2001.
  5 + * Copyright (C) 2001 Peter Bergner.
  6 + *
  7 + * This program is free software; you can redistribute it and/or
  8 + * modify it under the terms of the GNU General Public License
  9 + * as published by the Free Software Foundation; either version
  10 + * 2 of the License, or (at your option) any later version.
  11 + */
  12 +
  13 +#include <linux/kernel.h>
  14 +#include <linux/init.h>
  15 +#include <linux/bitops.h>
  16 +#include <linux/lmb.h>
  17 +
  18 +#undef DEBUG
  19 +
  20 +#ifdef DEBUG
  21 +#define DBG(fmt...) LMB_DBG(fmt)
  22 +#else
  23 +#define DBG(fmt...)
  24 +#endif
  25 +
  26 +#define LMB_ALLOC_ANYWHERE 0
  27 +
  28 +struct lmb lmb;
  29 +
  30 +void lmb_dump_all(void)
  31 +{
  32 +#ifdef DEBUG
  33 + unsigned long i;
  34 +
  35 + DBG("lmb_dump_all:\n");
  36 + DBG(" memory.cnt = 0x%lx\n", lmb.memory.cnt);
  37 + DBG(" memory.size = 0x%lx\n", lmb.memory.size);
  38 + for (i=0; i < lmb.memory.cnt ;i++) {
  39 + DBG(" memory.region[0x%x].base = 0x%lx\n",
  40 + i, lmb.memory.region[i].base);
  41 + DBG(" .size = 0x%lx\n",
  42 + lmb.memory.region[i].size);
  43 + }
  44 +
  45 + DBG("\n reserved.cnt = 0x%lx\n", lmb.reserved.cnt);
  46 + DBG(" reserved.size = 0x%lx\n", lmb.reserved.size);
  47 + for (i=0; i < lmb.reserved.cnt ;i++) {
  48 + DBG(" reserved.region[0x%x].base = 0x%lx\n",
  49 + i, lmb.reserved.region[i].base);
  50 + DBG(" .size = 0x%lx\n",
  51 + lmb.reserved.region[i].size);
  52 + }
  53 +#endif /* DEBUG */
  54 +}
  55 +
  56 +static unsigned long __init lmb_addrs_overlap(unsigned long base1,
  57 + unsigned long size1, unsigned long base2, unsigned long size2)
  58 +{
  59 + return ((base1 < (base2+size2)) && (base2 < (base1+size1)));
  60 +}
  61 +
  62 +static long __init lmb_addrs_adjacent(unsigned long base1, unsigned long size1,
  63 + unsigned long base2, unsigned long size2)
  64 +{
  65 + if (base2 == base1 + size1)
  66 + return 1;
  67 + else if (base1 == base2 + size2)
  68 + return -1;
  69 +
  70 + return 0;
  71 +}
  72 +
  73 +static long __init lmb_regions_adjacent(struct lmb_region *rgn,
  74 + unsigned long r1, unsigned long r2)
  75 +{
  76 + unsigned long base1 = rgn->region[r1].base;
  77 + unsigned long size1 = rgn->region[r1].size;
  78 + unsigned long base2 = rgn->region[r2].base;
  79 + unsigned long size2 = rgn->region[r2].size;
  80 +
  81 + return lmb_addrs_adjacent(base1, size1, base2, size2);
  82 +}
  83 +
  84 +static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r)
  85 +{
  86 + unsigned long i;
  87 +
  88 + for (i = r; i < rgn->cnt - 1; i++) {
  89 + rgn->region[i].base = rgn->region[i + 1].base;
  90 + rgn->region[i].size = rgn->region[i + 1].size;
  91 + }
  92 + rgn->cnt--;
  93 +}
  94 +
  95 +/* Assumption: base addr of region 1 < base addr of region 2 */
  96 +static void __init lmb_coalesce_regions(struct lmb_region *rgn,
  97 + unsigned long r1, unsigned long r2)
  98 +{
  99 + rgn->region[r1].size += rgn->region[r2].size;
  100 + lmb_remove_region(rgn, r2);
  101 +}
  102 +
  103 +/* This routine called with relocation disabled. */
  104 +void __init lmb_init(void)
  105 +{
  106 + /* Create a dummy zero size LMB which will get coalesced away later.
  107 + * This simplifies the lmb_add() code below...
  108 + */
  109 + lmb.memory.region[0].base = 0;
  110 + lmb.memory.region[0].size = 0;
  111 + lmb.memory.cnt = 1;
  112 +
  113 + /* Ditto. */
  114 + lmb.reserved.region[0].base = 0;
  115 + lmb.reserved.region[0].size = 0;
  116 + lmb.reserved.cnt = 1;
  117 +}
  118 +
  119 +/* This routine may be called with relocation disabled. */
  120 +void __init lmb_analyze(void)
  121 +{
  122 + int i;
  123 +
  124 + lmb.memory.size = 0;
  125 +
  126 + for (i = 0; i < lmb.memory.cnt; i++)
  127 + lmb.memory.size += lmb.memory.region[i].size;
  128 +}
  129 +
  130 +/* This routine called with relocation disabled. */
  131 +static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base,
  132 + unsigned long size)
  133 +{
  134 + unsigned long coalesced = 0;
  135 + long adjacent, i;
  136 +
  137 + /* First try and coalesce this LMB with another. */
  138 + for (i=0; i < rgn->cnt; i++) {
  139 + unsigned long rgnbase = rgn->region[i].base;
  140 + unsigned long rgnsize = rgn->region[i].size;
  141 +
  142 + if ((rgnbase == base) && (rgnsize == size))
  143 + /* Already have this region, so we're done */
  144 + return 0;
  145 +
  146 + adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize);
  147 + if ( adjacent > 0 ) {
  148 + rgn->region[i].base -= size;
  149 + rgn->region[i].size += size;
  150 + coalesced++;
  151 + break;
  152 + }
  153 + else if ( adjacent < 0 ) {
  154 + rgn->region[i].size += size;
  155 + coalesced++;
  156 + break;
  157 + }
  158 + }
  159 +
  160 + if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) {
  161 + lmb_coalesce_regions(rgn, i, i+1);
  162 + coalesced++;
  163 + }
  164 +
  165 + if (coalesced)
  166 + return coalesced;
  167 + if (rgn->cnt >= MAX_LMB_REGIONS)
  168 + return -1;
  169 +
  170 + /* Couldn't coalesce the LMB, so add it to the sorted table. */
  171 + for (i = rgn->cnt-1; i >= 0; i--) {
  172 + if (base < rgn->region[i].base) {
  173 + rgn->region[i+1].base = rgn->region[i].base;
  174 + rgn->region[i+1].size = rgn->region[i].size;
  175 + } else {
  176 + rgn->region[i+1].base = base;
  177 + rgn->region[i+1].size = size;
  178 + break;
  179 + }
  180 + }
  181 + rgn->cnt++;
  182 +
  183 + return 0;
  184 +}
  185 +
  186 +/* This routine may be called with relocation disabled. */
  187 +long __init lmb_add(unsigned long base, unsigned long size)
  188 +{
  189 + struct lmb_region *_rgn = &(lmb.memory);
  190 +
  191 + /* On pSeries LPAR systems, the first LMB is our RMO region. */
  192 + if (base == 0)
  193 + lmb.rmo_size = size;
  194 +
  195 + return lmb_add_region(_rgn, base, size);
  196 +
  197 +}
  198 +
  199 +long __init lmb_reserve(unsigned long base, unsigned long size)
  200 +{
  201 + struct lmb_region *_rgn = &(lmb.reserved);
  202 +
  203 + BUG_ON(0 == size);
  204 +
  205 + return lmb_add_region(_rgn, base, size);
  206 +}
  207 +
  208 +long __init lmb_overlaps_region(struct lmb_region *rgn, unsigned long base,
  209 + unsigned long size)
  210 +{
  211 + unsigned long i;
  212 +
  213 + for (i=0; i < rgn->cnt; i++) {
  214 + unsigned long rgnbase = rgn->region[i].base;
  215 + unsigned long rgnsize = rgn->region[i].size;
  216 + if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) {
  217 + break;
  218 + }
  219 + }
  220 +
  221 + return (i < rgn->cnt) ? i : -1;
  222 +}
  223 +
  224 +unsigned long __init lmb_alloc(unsigned long size, unsigned long align)
  225 +{
  226 + return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
  227 +}
  228 +
  229 +unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align,
  230 + unsigned long max_addr)
  231 +{
  232 + unsigned long alloc;
  233 +
  234 + alloc = __lmb_alloc_base(size, align, max_addr);
  235 +
  236 + if (alloc == 0)
  237 + panic("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
  238 + size, max_addr);
  239 +
  240 + return alloc;
  241 +}
  242 +
  243 +static unsigned long lmb_align_down(unsigned long addr, unsigned long size)
  244 +{
  245 + return addr & ~(size - 1);
  246 +}
  247 +
  248 +unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align,
  249 + unsigned long max_addr)
  250 +{
  251 + long i, j;
  252 + unsigned long base = 0;
  253 +
  254 + BUG_ON(0 == size);
  255 +
  256 + /* On some platforms, make sure we allocate lowmem */
  257 + if (max_addr == LMB_ALLOC_ANYWHERE)
  258 + max_addr = LMB_REAL_LIMIT;
  259 +
  260 + for (i = lmb.memory.cnt-1; i >= 0; i--) {
  261 + unsigned long lmbbase = lmb.memory.region[i].base;
  262 + unsigned long lmbsize = lmb.memory.region[i].size;
  263 +
  264 + if (max_addr == LMB_ALLOC_ANYWHERE)
  265 + base = lmb_align_down(lmbbase + lmbsize - size, align);
  266 + else if (lmbbase < max_addr) {
  267 + base = min(lmbbase + lmbsize, max_addr);
  268 + base = lmb_align_down(base - size, align);
  269 + } else
  270 + continue;
  271 +
  272 + while ((lmbbase <= base) &&
  273 + ((j = lmb_overlaps_region(&lmb.reserved, base, size)) >= 0) )
  274 + base = lmb_align_down(lmb.reserved.region[j].base - size,
  275 + align);
  276 +
  277 + if ((base != 0) && (lmbbase <= base))
  278 + break;
  279 + }
  280 +
  281 + if (i < 0)
  282 + return 0;
  283 +
  284 + lmb_add_region(&lmb.reserved, base, size);
  285 +
  286 + return base;
  287 +}
  288 +
  289 +/* You must call lmb_analyze() before this. */
  290 +unsigned long __init lmb_phys_mem_size(void)
  291 +{
  292 + return lmb.memory.size;
  293 +}
  294 +
  295 +unsigned long __init lmb_end_of_DRAM(void)
  296 +{
  297 + int idx = lmb.memory.cnt - 1;
  298 +
  299 + return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
  300 +}
  301 +
  302 +/* You must call lmb_analyze() after this. */
  303 +void __init lmb_enforce_memory_limit(unsigned long memory_limit)
  304 +{
  305 + unsigned long i, limit;
  306 + struct lmb_property *p;
  307 +
  308 + if (! memory_limit)
  309 + return;
  310 +
  311 + /* Truncate the lmb regions to satisfy the memory limit. */
  312 + limit = memory_limit;
  313 + for (i = 0; i < lmb.memory.cnt; i++) {
  314 + if (limit > lmb.memory.region[i].size) {
  315 + limit -= lmb.memory.region[i].size;
  316 + continue;
  317 + }
  318 +
  319 + lmb.memory.region[i].size = limit;
  320 + lmb.memory.cnt = i + 1;
  321 + break;
  322 + }
  323 +
  324 + if (lmb.memory.region[0].size < lmb.rmo_size)
  325 + lmb.rmo_size = lmb.memory.region[0].size;
  326 +
  327 + /* And truncate any reserves above the limit also. */
  328 + for (i = 0; i < lmb.reserved.cnt; i++) {
  329 + p = &lmb.reserved.region[i];
  330 +
  331 + if (p->base > memory_limit)
  332 + p->size = 0;
  333 + else if ((p->base + p->size) > memory_limit)
  334 + p->size = memory_limit - p->base;
  335 +
  336 + if (p->size == 0) {
  337 + lmb_remove_region(&lmb.reserved, i);
  338 + i--;
  339 + }
  340 + }
  341 +}
  342 +
  343 +int __init lmb_is_reserved(unsigned long addr)
  344 +{
  345 + int i;
  346 +
  347 + for (i = 0; i < lmb.reserved.cnt; i++) {
  348 + unsigned long upper = lmb.reserved.region[i].base +
  349 + lmb.reserved.region[i].size - 1;
  350 + if ((addr >= lmb.reserved.region[i].base) && (addr <= upper))
  351 + return 1;
  352 + }
  353 + return 0;
  354 +}