Commit 6bd55f0bbaebb79b39e147aa864401fd0c94db82

Authored by Michal Simek
Committed by Michal Simek
1 parent 5b3084b582

microblaze: Fix coding style issues

Fix coding style issues reported by checkpatch.pl.

Signed-off-by: Michal Simek <monstr@monstr.eu>

Showing 32 changed files with 259 additions and 294 deletions Inline Diff

arch/microblaze/include/asm/io.h
1 /* 1 /*
2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> 2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2007-2009 PetaLogix 3 * Copyright (C) 2007-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
5 * 5 *
6 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive 7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details. 8 * for more details.
9 */ 9 */
10 10
11 #ifndef _ASM_MICROBLAZE_IO_H 11 #ifndef _ASM_MICROBLAZE_IO_H
12 #define _ASM_MICROBLAZE_IO_H 12 #define _ASM_MICROBLAZE_IO_H
13 13
14 #include <asm/byteorder.h> 14 #include <asm/byteorder.h>
15 #include <asm/page.h> 15 #include <asm/page.h>
16 #include <linux/types.h> 16 #include <linux/types.h>
17 #include <linux/mm.h> /* Get struct page {...} */ 17 #include <linux/mm.h> /* Get struct page {...} */
18 #include <asm-generic/iomap.h> 18 #include <asm-generic/iomap.h>
19 19
20 #ifndef CONFIG_PCI 20 #ifndef CONFIG_PCI
21 #define _IO_BASE 0 21 #define _IO_BASE 0
22 #define _ISA_MEM_BASE 0 22 #define _ISA_MEM_BASE 0
23 #define PCI_DRAM_OFFSET 0 23 #define PCI_DRAM_OFFSET 0
24 #else 24 #else
25 #define _IO_BASE isa_io_base 25 #define _IO_BASE isa_io_base
26 #define _ISA_MEM_BASE isa_mem_base 26 #define _ISA_MEM_BASE isa_mem_base
27 #define PCI_DRAM_OFFSET pci_dram_offset 27 #define PCI_DRAM_OFFSET pci_dram_offset
28 #endif 28 #endif
29 29
30 extern unsigned long isa_io_base; 30 extern unsigned long isa_io_base;
31 extern unsigned long pci_io_base; 31 extern unsigned long pci_io_base;
32 extern unsigned long pci_dram_offset; 32 extern unsigned long pci_dram_offset;
33 33
34 extern resource_size_t isa_mem_base; 34 extern resource_size_t isa_mem_base;
35 35
36 #define IO_SPACE_LIMIT (0xFFFFFFFF) 36 #define IO_SPACE_LIMIT (0xFFFFFFFF)
37 37
38 /* the following is needed to support PCI with some drivers */ 38 /* the following is needed to support PCI with some drivers */
39 39
40 #define mmiowb() 40 #define mmiowb()
41 41
42 static inline unsigned char __raw_readb(const volatile void __iomem *addr) 42 static inline unsigned char __raw_readb(const volatile void __iomem *addr)
43 { 43 {
44 return *(volatile unsigned char __force *)addr; 44 return *(volatile unsigned char __force *)addr;
45 } 45 }
46 static inline unsigned short __raw_readw(const volatile void __iomem *addr) 46 static inline unsigned short __raw_readw(const volatile void __iomem *addr)
47 { 47 {
48 return *(volatile unsigned short __force *)addr; 48 return *(volatile unsigned short __force *)addr;
49 } 49 }
50 static inline unsigned int __raw_readl(const volatile void __iomem *addr) 50 static inline unsigned int __raw_readl(const volatile void __iomem *addr)
51 { 51 {
52 return *(volatile unsigned int __force *)addr; 52 return *(volatile unsigned int __force *)addr;
53 } 53 }
54 static inline unsigned long __raw_readq(const volatile void __iomem *addr) 54 static inline unsigned long __raw_readq(const volatile void __iomem *addr)
55 { 55 {
56 return *(volatile unsigned long __force *)addr; 56 return *(volatile unsigned long __force *)addr;
57 } 57 }
58 static inline void __raw_writeb(unsigned char v, volatile void __iomem *addr) 58 static inline void __raw_writeb(unsigned char v, volatile void __iomem *addr)
59 { 59 {
60 *(volatile unsigned char __force *)addr = v; 60 *(volatile unsigned char __force *)addr = v;
61 } 61 }
62 static inline void __raw_writew(unsigned short v, volatile void __iomem *addr) 62 static inline void __raw_writew(unsigned short v, volatile void __iomem *addr)
63 { 63 {
64 *(volatile unsigned short __force *)addr = v; 64 *(volatile unsigned short __force *)addr = v;
65 } 65 }
66 static inline void __raw_writel(unsigned int v, volatile void __iomem *addr) 66 static inline void __raw_writel(unsigned int v, volatile void __iomem *addr)
67 { 67 {
68 *(volatile unsigned int __force *)addr = v; 68 *(volatile unsigned int __force *)addr = v;
69 } 69 }
70 static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr) 70 static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr)
71 { 71 {
72 *(volatile unsigned long __force *)addr = v; 72 *(volatile unsigned long __force *)addr = v;
73 } 73 }
74 74
75 /* 75 /*
76 * read (readb, readw, readl, readq) and write (writeb, writew, 76 * read (readb, readw, readl, readq) and write (writeb, writew,
77 * writel, writeq) accessors are for PCI and thus little endian. 77 * writel, writeq) accessors are for PCI and thus little endian.
78 * Linux 2.4 for Microblaze had this wrong. 78 * Linux 2.4 for Microblaze had this wrong.
79 */ 79 */
80 static inline unsigned char readb(const volatile void __iomem *addr) 80 static inline unsigned char readb(const volatile void __iomem *addr)
81 { 81 {
82 return *(volatile unsigned char __force *)addr; 82 return *(volatile unsigned char __force *)addr;
83 } 83 }
84 static inline unsigned short readw(const volatile void __iomem *addr) 84 static inline unsigned short readw(const volatile void __iomem *addr)
85 { 85 {
86 return le16_to_cpu(*(volatile unsigned short __force *)addr); 86 return le16_to_cpu(*(volatile unsigned short __force *)addr);
87 } 87 }
88 static inline unsigned int readl(const volatile void __iomem *addr) 88 static inline unsigned int readl(const volatile void __iomem *addr)
89 { 89 {
90 return le32_to_cpu(*(volatile unsigned int __force *)addr); 90 return le32_to_cpu(*(volatile unsigned int __force *)addr);
91 } 91 }
92 static inline void writeb(unsigned char v, volatile void __iomem *addr) 92 static inline void writeb(unsigned char v, volatile void __iomem *addr)
93 { 93 {
94 *(volatile unsigned char __force *)addr = v; 94 *(volatile unsigned char __force *)addr = v;
95 } 95 }
96 static inline void writew(unsigned short v, volatile void __iomem *addr) 96 static inline void writew(unsigned short v, volatile void __iomem *addr)
97 { 97 {
98 *(volatile unsigned short __force *)addr = cpu_to_le16(v); 98 *(volatile unsigned short __force *)addr = cpu_to_le16(v);
99 } 99 }
100 static inline void writel(unsigned int v, volatile void __iomem *addr) 100 static inline void writel(unsigned int v, volatile void __iomem *addr)
101 { 101 {
102 *(volatile unsigned int __force *)addr = cpu_to_le32(v); 102 *(volatile unsigned int __force *)addr = cpu_to_le32(v);
103 } 103 }
104 104
105 /* ioread and iowrite variants. thease are for now same as __raw_ 105 /* ioread and iowrite variants. thease are for now same as __raw_
106 * variants of accessors. we might check for endianess in the feature 106 * variants of accessors. we might check for endianess in the feature
107 */ 107 */
108 #define ioread8(addr) __raw_readb((u8 *)(addr)) 108 #define ioread8(addr) __raw_readb((u8 *)(addr))
109 #define ioread16(addr) __raw_readw((u16 *)(addr)) 109 #define ioread16(addr) __raw_readw((u16 *)(addr))
110 #define ioread32(addr) __raw_readl((u32 *)(addr)) 110 #define ioread32(addr) __raw_readl((u32 *)(addr))
111 #define iowrite8(v, addr) __raw_writeb((u8)(v), (u8 *)(addr)) 111 #define iowrite8(v, addr) __raw_writeb((u8)(v), (u8 *)(addr))
112 #define iowrite16(v, addr) __raw_writew((u16)(v), (u16 *)(addr)) 112 #define iowrite16(v, addr) __raw_writew((u16)(v), (u16 *)(addr))
113 #define iowrite32(v, addr) __raw_writel((u32)(v), (u32 *)(addr)) 113 #define iowrite32(v, addr) __raw_writel((u32)(v), (u32 *)(addr))
114 114
115 #define ioread16be(addr) __raw_readw((u16 *)(addr)) 115 #define ioread16be(addr) __raw_readw((u16 *)(addr))
116 #define ioread32be(addr) __raw_readl((u32 *)(addr)) 116 #define ioread32be(addr) __raw_readl((u32 *)(addr))
117 #define iowrite16be(v, addr) __raw_writew((u16)(v), (u16 *)(addr)) 117 #define iowrite16be(v, addr) __raw_writew((u16)(v), (u16 *)(addr))
118 #define iowrite32be(v, addr) __raw_writel((u32)(v), (u32 *)(addr)) 118 #define iowrite32be(v, addr) __raw_writel((u32)(v), (u32 *)(addr))
119 119
120 /* These are the definitions for the x86 IO instructions 120 /* These are the definitions for the x86 IO instructions
121 * inb/inw/inl/outb/outw/outl, the "string" versions 121 * inb/inw/inl/outb/outw/outl, the "string" versions
122 * insb/insw/insl/outsb/outsw/outsl, and the "pausing" versions 122 * insb/insw/insl/outsb/outsw/outsl, and the "pausing" versions
123 * inb_p/inw_p/... 123 * inb_p/inw_p/...
124 * The macros don't do byte-swapping. 124 * The macros don't do byte-swapping.
125 */ 125 */
126 #define inb(port) readb((u8 *)((port))) 126 #define inb(port) readb((u8 *)((port)))
127 #define outb(val, port) writeb((val), (u8 *)((unsigned long)(port))) 127 #define outb(val, port) writeb((val), (u8 *)((unsigned long)(port)))
128 #define inw(port) readw((u16 *)((port))) 128 #define inw(port) readw((u16 *)((port)))
129 #define outw(val, port) writew((val), (u16 *)((unsigned long)(port))) 129 #define outw(val, port) writew((val), (u16 *)((unsigned long)(port)))
130 #define inl(port) readl((u32 *)((port))) 130 #define inl(port) readl((u32 *)((port)))
131 #define outl(val, port) writel((val), (u32 *)((unsigned long)(port))) 131 #define outl(val, port) writel((val), (u32 *)((unsigned long)(port)))
132 132
133 #define inb_p(port) inb((port)) 133 #define inb_p(port) inb((port))
134 #define outb_p(val, port) outb((val), (port)) 134 #define outb_p(val, port) outb((val), (port))
135 #define inw_p(port) inw((port)) 135 #define inw_p(port) inw((port))
136 #define outw_p(val, port) outw((val), (port)) 136 #define outw_p(val, port) outw((val), (port))
137 #define inl_p(port) inl((port)) 137 #define inl_p(port) inl((port))
138 #define outl_p(val, port) outl((val), (port)) 138 #define outl_p(val, port) outl((val), (port))
139 139
140 #define memset_io(a, b, c) memset((void *)(a), (b), (c)) 140 #define memset_io(a, b, c) memset((void *)(a), (b), (c))
141 #define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c)) 141 #define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
142 #define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c)) 142 #define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
143 143
144 #ifdef CONFIG_MMU 144 #ifdef CONFIG_MMU
145 145
146 #define phys_to_virt(addr) ((void *)__phys_to_virt(addr)) 146 #define phys_to_virt(addr) ((void *)__phys_to_virt(addr))
147 #define virt_to_phys(addr) ((unsigned long)__virt_to_phys(addr)) 147 #define virt_to_phys(addr) ((unsigned long)__virt_to_phys(addr))
148 #define virt_to_bus(addr) ((unsigned long)__virt_to_phys(addr)) 148 #define virt_to_bus(addr) ((unsigned long)__virt_to_phys(addr))
149 149
150 #define page_to_bus(page) (page_to_phys(page)) 150 #define page_to_bus(page) (page_to_phys(page))
151 #define bus_to_virt(addr) (phys_to_virt(addr)) 151 #define bus_to_virt(addr) (phys_to_virt(addr))
152 152
153 extern void iounmap(void *addr); 153 extern void iounmap(void __iomem *addr);
154 /*extern void *__ioremap(phys_addr_t address, unsigned long size, 154 /*extern void *__ioremap(phys_addr_t address, unsigned long size,
155 unsigned long flags);*/ 155 unsigned long flags);*/
156 extern void __iomem *ioremap(phys_addr_t address, unsigned long size); 156 extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
157 #define ioremap_writethrough(addr, size) ioremap((addr), (size)) 157 #define ioremap_writethrough(addr, size) ioremap((addr), (size))
158 #define ioremap_nocache(addr, size) ioremap((addr), (size)) 158 #define ioremap_nocache(addr, size) ioremap((addr), (size))
159 #define ioremap_fullcache(addr, size) ioremap((addr), (size)) 159 #define ioremap_fullcache(addr, size) ioremap((addr), (size))
160 160
161 #else /* CONFIG_MMU */ 161 #else /* CONFIG_MMU */
162 162
163 /** 163 /**
164 * virt_to_phys - map virtual addresses to physical 164 * virt_to_phys - map virtual addresses to physical
165 * @address: address to remap 165 * @address: address to remap
166 * 166 *
167 * The returned physical address is the physical (CPU) mapping for 167 * The returned physical address is the physical (CPU) mapping for
168 * the memory address given. It is only valid to use this function on 168 * the memory address given. It is only valid to use this function on
169 * addresses directly mapped or allocated via kmalloc. 169 * addresses directly mapped or allocated via kmalloc.
170 * 170 *
171 * This function does not give bus mappings for DMA transfers. In 171 * This function does not give bus mappings for DMA transfers. In
172 * almost all conceivable cases a device driver should not be using 172 * almost all conceivable cases a device driver should not be using
173 * this function 173 * this function
174 */ 174 */
175 static inline unsigned long __iomem virt_to_phys(volatile void *address) 175 static inline unsigned long __iomem virt_to_phys(volatile void *address)
176 { 176 {
177 return __pa((unsigned long)address); 177 return __pa((unsigned long)address);
178 } 178 }
179 179
180 #define virt_to_bus virt_to_phys 180 #define virt_to_bus virt_to_phys
181 181
182 /** 182 /**
183 * phys_to_virt - map physical address to virtual 183 * phys_to_virt - map physical address to virtual
184 * @address: address to remap 184 * @address: address to remap
185 * 185 *
186 * The returned virtual address is a current CPU mapping for 186 * The returned virtual address is a current CPU mapping for
187 * the memory address given. It is only valid to use this function on 187 * the memory address given. It is only valid to use this function on
188 * addresses that have a kernel mapping 188 * addresses that have a kernel mapping
189 * 189 *
190 * This function does not handle bus mappings for DMA transfers. In 190 * This function does not handle bus mappings for DMA transfers. In
191 * almost all conceivable cases a device driver should not be using 191 * almost all conceivable cases a device driver should not be using
192 * this function 192 * this function
193 */ 193 */
194 static inline void *phys_to_virt(unsigned long address) 194 static inline void *phys_to_virt(unsigned long address)
195 { 195 {
196 return (void *)__va(address); 196 return (void *)__va(address);
197 } 197 }
198 198
199 #define bus_to_virt(a) phys_to_virt(a) 199 #define bus_to_virt(a) phys_to_virt(a)
200 200
201 static inline void __iomem *__ioremap(phys_addr_t address, unsigned long size, 201 static inline void __iomem *__ioremap(phys_addr_t address, unsigned long size,
202 unsigned long flags) 202 unsigned long flags)
203 { 203 {
204 return (void *)address; 204 return (void *)address;
205 } 205 }
206 206
207 #define ioremap(physaddr, size) ((void __iomem *)(unsigned long)(physaddr)) 207 #define ioremap(physaddr, size) ((void __iomem *)(unsigned long)(physaddr))
208 #define iounmap(addr) ((void)0) 208 #define iounmap(addr) ((void)0)
209 #define ioremap_nocache(physaddr, size) ioremap(physaddr, size) 209 #define ioremap_nocache(physaddr, size) ioremap(physaddr, size)
210 210
211 #endif /* CONFIG_MMU */ 211 #endif /* CONFIG_MMU */
212 212
213 /* 213 /*
214 * Convert a physical pointer to a virtual kernel pointer for /dev/mem 214 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
215 * access 215 * access
216 */ 216 */
217 #define xlate_dev_mem_ptr(p) __va(p) 217 #define xlate_dev_mem_ptr(p) __va(p)
218 218
219 /* 219 /*
220 * Convert a virtual cached pointer to an uncached pointer 220 * Convert a virtual cached pointer to an uncached pointer
221 */ 221 */
222 #define xlate_dev_kmem_ptr(p) p 222 #define xlate_dev_kmem_ptr(p) p
223 223
224 /* 224 /*
225 * Big Endian 225 * Big Endian
226 */ 226 */
227 #define out_be32(a, v) __raw_writel((v), (void __iomem __force *)(a)) 227 #define out_be32(a, v) __raw_writel((v), (void __iomem __force *)(a))
228 #define out_be16(a, v) __raw_writew((v), (a)) 228 #define out_be16(a, v) __raw_writew((v), (a))
229 229
230 #define in_be32(a) __raw_readl((const void __iomem __force *)(a)) 230 #define in_be32(a) __raw_readl((const void __iomem __force *)(a))
231 #define in_be16(a) __raw_readw(a) 231 #define in_be16(a) __raw_readw(a)
232 232
233 #define writel_be(v, a) out_be32((__force unsigned *)a, v) 233 #define writel_be(v, a) out_be32((__force unsigned *)a, v)
234 #define readl_be(a) in_be32((__force unsigned *)a) 234 #define readl_be(a) in_be32((__force unsigned *)a)
235 235
236 /* 236 /*
237 * Little endian 237 * Little endian
238 */ 238 */
239 239
240 #define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (a)) 240 #define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (a))
241 #define out_le16(a, v) __raw_writew(__cpu_to_le16(v), (a)) 241 #define out_le16(a, v) __raw_writew(__cpu_to_le16(v), (a))
242 242
243 #define in_le32(a) __le32_to_cpu(__raw_readl(a)) 243 #define in_le32(a) __le32_to_cpu(__raw_readl(a))
244 #define in_le16(a) __le16_to_cpu(__raw_readw(a)) 244 #define in_le16(a) __le16_to_cpu(__raw_readw(a))
245 245
246 /* Byte ops */ 246 /* Byte ops */
247 #define out_8(a, v) __raw_writeb((v), (a)) 247 #define out_8(a, v) __raw_writeb((v), (a))
248 #define in_8(a) __raw_readb(a) 248 #define in_8(a) __raw_readb(a)
249 249
250 #define mmiowb() 250 #define mmiowb()
251 251
252 #define ioport_map(port, nr) ((void __iomem *)(port)) 252 #define ioport_map(port, nr) ((void __iomem *)(port))
253 #define ioport_unmap(addr) 253 #define ioport_unmap(addr)
254 254
255 /* from asm-generic/io.h */ 255 /* from asm-generic/io.h */
256 #ifndef insb 256 #ifndef insb
257 static inline void insb(unsigned long addr, void *buffer, int count) 257 static inline void insb(unsigned long addr, void *buffer, int count)
258 { 258 {
259 if (count) { 259 if (count) {
260 u8 *buf = buffer; 260 u8 *buf = buffer;
261 do { 261 do {
262 u8 x = inb(addr); 262 u8 x = inb(addr);
263 *buf++ = x; 263 *buf++ = x;
264 } while (--count); 264 } while (--count);
265 } 265 }
266 } 266 }
267 #endif 267 #endif
268 268
269 #ifndef insw 269 #ifndef insw
270 static inline void insw(unsigned long addr, void *buffer, int count) 270 static inline void insw(unsigned long addr, void *buffer, int count)
271 { 271 {
272 if (count) { 272 if (count) {
273 u16 *buf = buffer; 273 u16 *buf = buffer;
274 do { 274 do {
275 u16 x = inw(addr); 275 u16 x = inw(addr);
276 *buf++ = x; 276 *buf++ = x;
277 } while (--count); 277 } while (--count);
278 } 278 }
279 } 279 }
280 #endif 280 #endif
281 281
282 #ifndef insl 282 #ifndef insl
283 static inline void insl(unsigned long addr, void *buffer, int count) 283 static inline void insl(unsigned long addr, void *buffer, int count)
284 { 284 {
285 if (count) { 285 if (count) {
286 u32 *buf = buffer; 286 u32 *buf = buffer;
287 do { 287 do {
288 u32 x = inl(addr); 288 u32 x = inl(addr);
289 *buf++ = x; 289 *buf++ = x;
290 } while (--count); 290 } while (--count);
291 } 291 }
292 } 292 }
293 #endif 293 #endif
294 294
295 #ifndef outsb 295 #ifndef outsb
296 static inline void outsb(unsigned long addr, const void *buffer, int count) 296 static inline void outsb(unsigned long addr, const void *buffer, int count)
297 { 297 {
298 if (count) { 298 if (count) {
299 const u8 *buf = buffer; 299 const u8 *buf = buffer;
300 do { 300 do {
301 outb(*buf++, addr); 301 outb(*buf++, addr);
302 } while (--count); 302 } while (--count);
303 } 303 }
304 } 304 }
305 #endif 305 #endif
306 306
307 #ifndef outsw 307 #ifndef outsw
308 static inline void outsw(unsigned long addr, const void *buffer, int count) 308 static inline void outsw(unsigned long addr, const void *buffer, int count)
309 { 309 {
310 if (count) { 310 if (count) {
311 const u16 *buf = buffer; 311 const u16 *buf = buffer;
312 do { 312 do {
313 outw(*buf++, addr); 313 outw(*buf++, addr);
314 } while (--count); 314 } while (--count);
315 } 315 }
316 } 316 }
317 #endif 317 #endif
318 318
319 #ifndef outsl 319 #ifndef outsl
320 static inline void outsl(unsigned long addr, const void *buffer, int count) 320 static inline void outsl(unsigned long addr, const void *buffer, int count)
321 { 321 {
322 if (count) { 322 if (count) {
323 const u32 *buf = buffer; 323 const u32 *buf = buffer;
324 do { 324 do {
325 outl(*buf++, addr); 325 outl(*buf++, addr);
326 } while (--count); 326 } while (--count);
327 } 327 }
328 } 328 }
329 #endif 329 #endif
330 330
331 #define ioread8_rep(p, dst, count) \ 331 #define ioread8_rep(p, dst, count) \
332 insb((unsigned long) (p), (dst), (count)) 332 insb((unsigned long) (p), (dst), (count))
333 #define ioread16_rep(p, dst, count) \ 333 #define ioread16_rep(p, dst, count) \
334 insw((unsigned long) (p), (dst), (count)) 334 insw((unsigned long) (p), (dst), (count))
335 #define ioread32_rep(p, dst, count) \ 335 #define ioread32_rep(p, dst, count) \
336 insl((unsigned long) (p), (dst), (count)) 336 insl((unsigned long) (p), (dst), (count))
337 337
338 #define iowrite8_rep(p, src, count) \ 338 #define iowrite8_rep(p, src, count) \
339 outsb((unsigned long) (p), (src), (count)) 339 outsb((unsigned long) (p), (src), (count))
340 #define iowrite16_rep(p, src, count) \ 340 #define iowrite16_rep(p, src, count) \
341 outsw((unsigned long) (p), (src), (count)) 341 outsw((unsigned long) (p), (src), (count))
342 #define iowrite32_rep(p, src, count) \ 342 #define iowrite32_rep(p, src, count) \
343 outsl((unsigned long) (p), (src), (count)) 343 outsl((unsigned long) (p), (src), (count))
344 344
345 #endif /* _ASM_MICROBLAZE_IO_H */ 345 #endif /* _ASM_MICROBLAZE_IO_H */
346 346
arch/microblaze/kernel/cpu/cache.c
1 /* 1 /*
2 * Cache control for MicroBlaze cache memories 2 * Cache control for MicroBlaze cache memories
3 * 3 *
4 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> 4 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2007-2009 PetaLogix 5 * Copyright (C) 2007-2009 PetaLogix
6 * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com> 6 * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com>
7 * 7 *
8 * This file is subject to the terms and conditions of the GNU General 8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this 9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details. 10 * archive for more details.
11 */ 11 */
12 12
13 #include <asm/cacheflush.h> 13 #include <asm/cacheflush.h>
14 #include <linux/cache.h> 14 #include <linux/cache.h>
15 #include <asm/cpuinfo.h> 15 #include <asm/cpuinfo.h>
16 #include <asm/pvr.h> 16 #include <asm/pvr.h>
17 17
18 static inline void __enable_icache_msr(void) 18 static inline void __enable_icache_msr(void)
19 { 19 {
20 __asm__ __volatile__ (" msrset r0, %0; \ 20 __asm__ __volatile__ (" msrset r0, %0;" \
21 nop; " \ 21 "nop;" \
22 : : "i" (MSR_ICE) : "memory"); 22 : : "i" (MSR_ICE) : "memory");
23 } 23 }
24 24
25 static inline void __disable_icache_msr(void) 25 static inline void __disable_icache_msr(void)
26 { 26 {
27 __asm__ __volatile__ (" msrclr r0, %0; \ 27 __asm__ __volatile__ (" msrclr r0, %0;" \
28 nop; " \ 28 "nop;" \
29 : : "i" (MSR_ICE) : "memory"); 29 : : "i" (MSR_ICE) : "memory");
30 } 30 }
31 31
32 static inline void __enable_dcache_msr(void) 32 static inline void __enable_dcache_msr(void)
33 { 33 {
34 __asm__ __volatile__ (" msrset r0, %0; \ 34 __asm__ __volatile__ (" msrset r0, %0;" \
35 nop; " \ 35 "nop;" \
36 : \ 36 : : "i" (MSR_DCE) : "memory");
37 : "i" (MSR_DCE) \
38 : "memory");
39 } 37 }
40 38
41 static inline void __disable_dcache_msr(void) 39 static inline void __disable_dcache_msr(void)
42 { 40 {
43 __asm__ __volatile__ (" msrclr r0, %0; \ 41 __asm__ __volatile__ (" msrclr r0, %0;" \
44 nop; " \ 42 "nop; " \
45 : \ 43 : : "i" (MSR_DCE) : "memory");
46 : "i" (MSR_DCE) \
47 : "memory");
48 } 44 }
49 45
50 static inline void __enable_icache_nomsr(void) 46 static inline void __enable_icache_nomsr(void)
51 { 47 {
52 __asm__ __volatile__ (" mfs r12, rmsr; \ 48 __asm__ __volatile__ (" mfs r12, rmsr;" \
53 nop; \ 49 "nop;" \
54 ori r12, r12, %0; \ 50 "ori r12, r12, %0;" \
55 mts rmsr, r12; \ 51 "mts rmsr, r12;" \
56 nop; " \ 52 "nop;" \
57 : \ 53 : : "i" (MSR_ICE) : "memory", "r12");
58 : "i" (MSR_ICE) \
59 : "memory", "r12");
60 } 54 }
61 55
62 static inline void __disable_icache_nomsr(void) 56 static inline void __disable_icache_nomsr(void)
63 { 57 {
64 __asm__ __volatile__ (" mfs r12, rmsr; \ 58 __asm__ __volatile__ (" mfs r12, rmsr;" \
65 nop; \ 59 "nop;" \
66 andi r12, r12, ~%0; \ 60 "andi r12, r12, ~%0;" \
67 mts rmsr, r12; \ 61 "mts rmsr, r12;" \
68 nop; " \ 62 "nop;" \
69 : \ 63 : : "i" (MSR_ICE) : "memory", "r12");
70 : "i" (MSR_ICE) \
71 : "memory", "r12");
72 } 64 }
73 65
74 static inline void __enable_dcache_nomsr(void) 66 static inline void __enable_dcache_nomsr(void)
75 { 67 {
76 __asm__ __volatile__ (" mfs r12, rmsr; \ 68 __asm__ __volatile__ (" mfs r12, rmsr;" \
77 nop; \ 69 "nop;" \
78 ori r12, r12, %0; \ 70 "ori r12, r12, %0;" \
79 mts rmsr, r12; \ 71 "mts rmsr, r12;" \
80 nop; " \ 72 "nop;" \
81 : \ 73 : : "i" (MSR_DCE) : "memory", "r12");
82 : "i" (MSR_DCE) \
83 : "memory", "r12");
84 } 74 }
85 75
86 static inline void __disable_dcache_nomsr(void) 76 static inline void __disable_dcache_nomsr(void)
87 { 77 {
88 __asm__ __volatile__ (" mfs r12, rmsr; \ 78 __asm__ __volatile__ (" mfs r12, rmsr;" \
89 nop; \ 79 "nop;" \
90 andi r12, r12, ~%0; \ 80 "andi r12, r12, ~%0;" \
91 mts rmsr, r12; \ 81 "mts rmsr, r12;" \
92 nop; " \ 82 "nop;" \
93 : \ 83 : : "i" (MSR_DCE) : "memory", "r12");
94 : "i" (MSR_DCE) \
95 : "memory", "r12");
96 } 84 }
97 85
98 86
99 /* Helper macro for computing the limits of cache range loops 87 /* Helper macro for computing the limits of cache range loops
100 * 88 *
101 * End address can be unaligned which is OK for C implementation. 89 * End address can be unaligned which is OK for C implementation.
102 * ASM implementation align it in ASM macros 90 * ASM implementation align it in ASM macros
103 */ 91 */
104 #define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \ 92 #define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \
105 do { \ 93 do { \
106 int align = ~(cache_line_length - 1); \ 94 int align = ~(cache_line_length - 1); \
107 end = min(start + cache_size, end); \ 95 end = min(start + cache_size, end); \
108 start &= align; \ 96 start &= align; \
109 } while (0); 97 } while (0)
110 98
111 /* 99 /*
112 * Helper macro to loop over the specified cache_size/line_length and 100 * Helper macro to loop over the specified cache_size/line_length and
113 * execute 'op' on that cacheline 101 * execute 'op' on that cacheline
114 */ 102 */
115 #define CACHE_ALL_LOOP(cache_size, line_length, op) \ 103 #define CACHE_ALL_LOOP(cache_size, line_length, op) \
116 do { \ 104 do { \
117 unsigned int len = cache_size - line_length; \ 105 unsigned int len = cache_size - line_length; \
118 int step = -line_length; \ 106 int step = -line_length; \
119 WARN_ON(step >= 0); \ 107 WARN_ON(step >= 0); \
120 \ 108 \
121 __asm__ __volatile__ (" 1: " #op " %0, r0; \ 109 __asm__ __volatile__ (" 1: " #op " %0, r0;" \
122 bgtid %0, 1b; \ 110 "bgtid %0, 1b;" \
123 addk %0, %0, %1; \ 111 "addk %0, %0, %1;" \
124 " : : "r" (len), "r" (step) \ 112 : : "r" (len), "r" (step) \
125 : "memory"); \ 113 : "memory"); \
126 } while (0); 114 } while (0)
127 115
128 /* Used for wdc.flush/clear which can use rB for offset which is not possible 116 /* Used for wdc.flush/clear which can use rB for offset which is not possible
129 * to use for simple wdc or wic. 117 * to use for simple wdc or wic.
130 * 118 *
131 * start address is cache aligned 119 * start address is cache aligned
132 * end address is not aligned, if end is aligned then I have to subtract 120 * end address is not aligned, if end is aligned then I have to subtract
133 * cacheline length because I can't flush/invalidate the next cacheline. 121 * cacheline length because I can't flush/invalidate the next cacheline.
134 * If is not, I align it because I will flush/invalidate whole line. 122 * If is not, I align it because I will flush/invalidate whole line.
135 */ 123 */
136 #define CACHE_RANGE_LOOP_2(start, end, line_length, op) \ 124 #define CACHE_RANGE_LOOP_2(start, end, line_length, op) \
137 do { \ 125 do { \
138 int step = -line_length; \ 126 int step = -line_length; \
139 int align = ~(line_length - 1); \ 127 int align = ~(line_length - 1); \
140 int count; \ 128 int count; \
141 end = ((end & align) == end) ? end - line_length : end & align; \ 129 end = ((end & align) == end) ? end - line_length : end & align; \
142 count = end - start; \ 130 count = end - start; \
143 WARN_ON(count < 0); \ 131 WARN_ON(count < 0); \
144 \ 132 \
145 __asm__ __volatile__ (" 1: " #op " %0, %1; \ 133 __asm__ __volatile__ (" 1: " #op " %0, %1;" \
146 bgtid %1, 1b; \ 134 "bgtid %1, 1b;" \
147 addk %1, %1, %2; \ 135 "addk %1, %1, %2;" \
148 " : : "r" (start), "r" (count), \ 136 : : "r" (start), "r" (count), \
149 "r" (step) : "memory"); \ 137 "r" (step) : "memory"); \
150 } while (0); 138 } while (0)
151 139
152 /* It is used only first parameter for OP - for wic, wdc */ 140 /* It is used only first parameter for OP - for wic, wdc */
153 #define CACHE_RANGE_LOOP_1(start, end, line_length, op) \ 141 #define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
154 do { \ 142 do { \
155 int volatile temp; \ 143 int volatile temp; \
156 int align = ~(line_length - 1); \ 144 int align = ~(line_length - 1); \
157 end = ((end & align) == end) ? end - line_length : end & align; \ 145 end = ((end & align) == end) ? end - line_length : end & align; \
158 WARN_ON(end - start < 0); \ 146 WARN_ON(end - start < 0); \
159 \ 147 \
160 __asm__ __volatile__ (" 1: " #op " %1, r0; \ 148 __asm__ __volatile__ (" 1: " #op " %1, r0;" \
161 cmpu %0, %1, %2; \ 149 "cmpu %0, %1, %2;" \
162 bgtid %0, 1b; \ 150 "bgtid %0, 1b;" \
163 addk %1, %1, %3; \ 151 "addk %1, %1, %3;" \
164 " : : "r" (temp), "r" (start), "r" (end),\ 152 : : "r" (temp), "r" (start), "r" (end), \
165 "r" (line_length) : "memory"); \ 153 "r" (line_length) : "memory"); \
166 } while (0); 154 } while (0)
167 155
168 #define ASM_LOOP 156 #define ASM_LOOP
169 157
170 static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end) 158 static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
171 { 159 {
172 unsigned long flags; 160 unsigned long flags;
173 #ifndef ASM_LOOP 161 #ifndef ASM_LOOP
174 int i; 162 int i;
175 #endif 163 #endif
176 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 164 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
177 (unsigned int)start, (unsigned int) end); 165 (unsigned int)start, (unsigned int) end);
178 166
179 CACHE_LOOP_LIMITS(start, end, 167 CACHE_LOOP_LIMITS(start, end,
180 cpuinfo.icache_line_length, cpuinfo.icache_size); 168 cpuinfo.icache_line_length, cpuinfo.icache_size);
181 169
182 local_irq_save(flags); 170 local_irq_save(flags);
183 __disable_icache_msr(); 171 __disable_icache_msr();
184 172
185 #ifdef ASM_LOOP 173 #ifdef ASM_LOOP
186 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); 174 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
187 #else 175 #else
188 for (i = start; i < end; i += cpuinfo.icache_line_length) 176 for (i = start; i < end; i += cpuinfo.icache_line_length)
189 __asm__ __volatile__ ("wic %0, r0;" \ 177 __asm__ __volatile__ ("wic %0, r0;" \
190 : : "r" (i)); 178 : : "r" (i));
191 #endif 179 #endif
192 __enable_icache_msr(); 180 __enable_icache_msr();
193 local_irq_restore(flags); 181 local_irq_restore(flags);
194 } 182 }
195 183
196 static void __flush_icache_range_nomsr_irq(unsigned long start, 184 static void __flush_icache_range_nomsr_irq(unsigned long start,
197 unsigned long end) 185 unsigned long end)
198 { 186 {
199 unsigned long flags; 187 unsigned long flags;
200 #ifndef ASM_LOOP 188 #ifndef ASM_LOOP
201 int i; 189 int i;
202 #endif 190 #endif
203 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 191 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
204 (unsigned int)start, (unsigned int) end); 192 (unsigned int)start, (unsigned int) end);
205 193
206 CACHE_LOOP_LIMITS(start, end, 194 CACHE_LOOP_LIMITS(start, end,
207 cpuinfo.icache_line_length, cpuinfo.icache_size); 195 cpuinfo.icache_line_length, cpuinfo.icache_size);
208 196
209 local_irq_save(flags); 197 local_irq_save(flags);
210 __disable_icache_nomsr(); 198 __disable_icache_nomsr();
211 199
212 #ifdef ASM_LOOP 200 #ifdef ASM_LOOP
213 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); 201 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
214 #else 202 #else
215 for (i = start; i < end; i += cpuinfo.icache_line_length) 203 for (i = start; i < end; i += cpuinfo.icache_line_length)
216 __asm__ __volatile__ ("wic %0, r0;" \ 204 __asm__ __volatile__ ("wic %0, r0;" \
217 : : "r" (i)); 205 : : "r" (i));
218 #endif 206 #endif
219 207
220 __enable_icache_nomsr(); 208 __enable_icache_nomsr();
221 local_irq_restore(flags); 209 local_irq_restore(flags);
222 } 210 }
223 211
224 static void __flush_icache_range_noirq(unsigned long start, 212 static void __flush_icache_range_noirq(unsigned long start,
225 unsigned long end) 213 unsigned long end)
226 { 214 {
227 #ifndef ASM_LOOP 215 #ifndef ASM_LOOP
228 int i; 216 int i;
229 #endif 217 #endif
230 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 218 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
231 (unsigned int)start, (unsigned int) end); 219 (unsigned int)start, (unsigned int) end);
232 220
233 CACHE_LOOP_LIMITS(start, end, 221 CACHE_LOOP_LIMITS(start, end,
234 cpuinfo.icache_line_length, cpuinfo.icache_size); 222 cpuinfo.icache_line_length, cpuinfo.icache_size);
235 #ifdef ASM_LOOP 223 #ifdef ASM_LOOP
236 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); 224 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
237 #else 225 #else
238 for (i = start; i < end; i += cpuinfo.icache_line_length) 226 for (i = start; i < end; i += cpuinfo.icache_line_length)
239 __asm__ __volatile__ ("wic %0, r0;" \ 227 __asm__ __volatile__ ("wic %0, r0;" \
240 : : "r" (i)); 228 : : "r" (i));
241 #endif 229 #endif
242 } 230 }
243 231
244 static void __flush_icache_all_msr_irq(void) 232 static void __flush_icache_all_msr_irq(void)
245 { 233 {
246 unsigned long flags; 234 unsigned long flags;
247 #ifndef ASM_LOOP 235 #ifndef ASM_LOOP
248 int i; 236 int i;
249 #endif 237 #endif
250 pr_debug("%s\n", __func__); 238 pr_debug("%s\n", __func__);
251 239
252 local_irq_save(flags); 240 local_irq_save(flags);
253 __disable_icache_msr(); 241 __disable_icache_msr();
254 #ifdef ASM_LOOP 242 #ifdef ASM_LOOP
255 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); 243 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
256 #else 244 #else
257 for (i = 0; i < cpuinfo.icache_size; 245 for (i = 0; i < cpuinfo.icache_size;
258 i += cpuinfo.icache_line_length) 246 i += cpuinfo.icache_line_length)
259 __asm__ __volatile__ ("wic %0, r0;" \ 247 __asm__ __volatile__ ("wic %0, r0;" \
260 : : "r" (i)); 248 : : "r" (i));
261 #endif 249 #endif
262 __enable_icache_msr(); 250 __enable_icache_msr();
263 local_irq_restore(flags); 251 local_irq_restore(flags);
264 } 252 }
265 253
266 static void __flush_icache_all_nomsr_irq(void) 254 static void __flush_icache_all_nomsr_irq(void)
267 { 255 {
268 unsigned long flags; 256 unsigned long flags;
269 #ifndef ASM_LOOP 257 #ifndef ASM_LOOP
270 int i; 258 int i;
271 #endif 259 #endif
272 pr_debug("%s\n", __func__); 260 pr_debug("%s\n", __func__);
273 261
274 local_irq_save(flags); 262 local_irq_save(flags);
275 __disable_icache_nomsr(); 263 __disable_icache_nomsr();
276 #ifdef ASM_LOOP 264 #ifdef ASM_LOOP
277 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); 265 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
278 #else 266 #else
279 for (i = 0; i < cpuinfo.icache_size; 267 for (i = 0; i < cpuinfo.icache_size;
280 i += cpuinfo.icache_line_length) 268 i += cpuinfo.icache_line_length)
281 __asm__ __volatile__ ("wic %0, r0;" \ 269 __asm__ __volatile__ ("wic %0, r0;" \
282 : : "r" (i)); 270 : : "r" (i));
283 #endif 271 #endif
284 __enable_icache_nomsr(); 272 __enable_icache_nomsr();
285 local_irq_restore(flags); 273 local_irq_restore(flags);
286 } 274 }
287 275
288 static void __flush_icache_all_noirq(void) 276 static void __flush_icache_all_noirq(void)
289 { 277 {
290 #ifndef ASM_LOOP 278 #ifndef ASM_LOOP
291 int i; 279 int i;
292 #endif 280 #endif
293 pr_debug("%s\n", __func__); 281 pr_debug("%s\n", __func__);
294 #ifdef ASM_LOOP 282 #ifdef ASM_LOOP
295 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); 283 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
296 #else 284 #else
297 for (i = 0; i < cpuinfo.icache_size; 285 for (i = 0; i < cpuinfo.icache_size;
298 i += cpuinfo.icache_line_length) 286 i += cpuinfo.icache_line_length)
299 __asm__ __volatile__ ("wic %0, r0;" \ 287 __asm__ __volatile__ ("wic %0, r0;" \
300 : : "r" (i)); 288 : : "r" (i));
301 #endif 289 #endif
302 } 290 }
303 291
304 static void __invalidate_dcache_all_msr_irq(void) 292 static void __invalidate_dcache_all_msr_irq(void)
305 { 293 {
306 unsigned long flags; 294 unsigned long flags;
307 #ifndef ASM_LOOP 295 #ifndef ASM_LOOP
308 int i; 296 int i;
309 #endif 297 #endif
310 pr_debug("%s\n", __func__); 298 pr_debug("%s\n", __func__);
311 299
312 local_irq_save(flags); 300 local_irq_save(flags);
313 __disable_dcache_msr(); 301 __disable_dcache_msr();
314 #ifdef ASM_LOOP 302 #ifdef ASM_LOOP
315 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); 303 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
316 #else 304 #else
317 for (i = 0; i < cpuinfo.dcache_size; 305 for (i = 0; i < cpuinfo.dcache_size;
318 i += cpuinfo.dcache_line_length) 306 i += cpuinfo.dcache_line_length)
319 __asm__ __volatile__ ("wdc %0, r0;" \ 307 __asm__ __volatile__ ("wdc %0, r0;" \
320 : : "r" (i)); 308 : : "r" (i));
321 #endif 309 #endif
322 __enable_dcache_msr(); 310 __enable_dcache_msr();
323 local_irq_restore(flags); 311 local_irq_restore(flags);
324 } 312 }
325 313
326 static void __invalidate_dcache_all_nomsr_irq(void) 314 static void __invalidate_dcache_all_nomsr_irq(void)
327 { 315 {
328 unsigned long flags; 316 unsigned long flags;
329 #ifndef ASM_LOOP 317 #ifndef ASM_LOOP
330 int i; 318 int i;
331 #endif 319 #endif
332 pr_debug("%s\n", __func__); 320 pr_debug("%s\n", __func__);
333 321
334 local_irq_save(flags); 322 local_irq_save(flags);
335 __disable_dcache_nomsr(); 323 __disable_dcache_nomsr();
336 #ifdef ASM_LOOP 324 #ifdef ASM_LOOP
337 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); 325 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
338 #else 326 #else
339 for (i = 0; i < cpuinfo.dcache_size; 327 for (i = 0; i < cpuinfo.dcache_size;
340 i += cpuinfo.dcache_line_length) 328 i += cpuinfo.dcache_line_length)
341 __asm__ __volatile__ ("wdc %0, r0;" \ 329 __asm__ __volatile__ ("wdc %0, r0;" \
342 : : "r" (i)); 330 : : "r" (i));
343 #endif 331 #endif
344 __enable_dcache_nomsr(); 332 __enable_dcache_nomsr();
345 local_irq_restore(flags); 333 local_irq_restore(flags);
346 } 334 }
347 335
348 static void __invalidate_dcache_all_noirq_wt(void) 336 static void __invalidate_dcache_all_noirq_wt(void)
349 { 337 {
350 #ifndef ASM_LOOP 338 #ifndef ASM_LOOP
351 int i; 339 int i;
352 #endif 340 #endif
353 pr_debug("%s\n", __func__); 341 pr_debug("%s\n", __func__);
354 #ifdef ASM_LOOP 342 #ifdef ASM_LOOP
355 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc) 343 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
356 #else 344 #else
357 for (i = 0; i < cpuinfo.dcache_size; 345 for (i = 0; i < cpuinfo.dcache_size;
358 i += cpuinfo.dcache_line_length) 346 i += cpuinfo.dcache_line_length)
359 __asm__ __volatile__ ("wdc %0, r0;" \ 347 __asm__ __volatile__ ("wdc %0, r0;" \
360 : : "r" (i)); 348 : : "r" (i));
361 #endif 349 #endif
362 } 350 }
363 351
364 /* FIXME It is blindly invalidation as is expected 352 /*
353 * FIXME It is blindly invalidation as is expected
365 * but can't be called on noMMU in microblaze_cache_init below 354 * but can't be called on noMMU in microblaze_cache_init below
366 * 355 *
367 * MS: noMMU kernel won't boot if simple wdc is used 356 * MS: noMMU kernel won't boot if simple wdc is used
368 * The reason should be that there are discared data which kernel needs 357 * The reason should be that there are discared data which kernel needs
369 */ 358 */
370 static void __invalidate_dcache_all_wb(void) 359 static void __invalidate_dcache_all_wb(void)
371 { 360 {
372 #ifndef ASM_LOOP 361 #ifndef ASM_LOOP
373 int i; 362 int i;
374 #endif 363 #endif
375 pr_debug("%s\n", __func__); 364 pr_debug("%s\n", __func__);
376 #ifdef ASM_LOOP 365 #ifdef ASM_LOOP
377 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, 366 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
378 wdc) 367 wdc);
379 #else 368 #else
380 for (i = 0; i < cpuinfo.dcache_size; 369 for (i = 0; i < cpuinfo.dcache_size;
381 i += cpuinfo.dcache_line_length) 370 i += cpuinfo.dcache_line_length)
382 __asm__ __volatile__ ("wdc %0, r0;" \ 371 __asm__ __volatile__ ("wdc %0, r0;" \
383 : : "r" (i)); 372 : : "r" (i));
384 #endif 373 #endif
385 } 374 }
386 375
387 static void __invalidate_dcache_range_wb(unsigned long start, 376 static void __invalidate_dcache_range_wb(unsigned long start,
388 unsigned long end) 377 unsigned long end)
389 { 378 {
390 #ifndef ASM_LOOP 379 #ifndef ASM_LOOP
391 int i; 380 int i;
392 #endif 381 #endif
393 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 382 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
394 (unsigned int)start, (unsigned int) end); 383 (unsigned int)start, (unsigned int) end);
395 384
396 CACHE_LOOP_LIMITS(start, end, 385 CACHE_LOOP_LIMITS(start, end,
397 cpuinfo.dcache_line_length, cpuinfo.dcache_size); 386 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
398 #ifdef ASM_LOOP 387 #ifdef ASM_LOOP
399 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear); 388 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
400 #else 389 #else
401 for (i = start; i < end; i += cpuinfo.dcache_line_length) 390 for (i = start; i < end; i += cpuinfo.dcache_line_length)
402 __asm__ __volatile__ ("wdc.clear %0, r0;" \ 391 __asm__ __volatile__ ("wdc.clear %0, r0;" \
403 : : "r" (i)); 392 : : "r" (i));
404 #endif 393 #endif
405 } 394 }
406 395
407 static void __invalidate_dcache_range_nomsr_wt(unsigned long start, 396 static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
408 unsigned long end) 397 unsigned long end)
409 { 398 {
410 #ifndef ASM_LOOP 399 #ifndef ASM_LOOP
411 int i; 400 int i;
412 #endif 401 #endif
413 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 402 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
414 (unsigned int)start, (unsigned int) end); 403 (unsigned int)start, (unsigned int) end);
415 CACHE_LOOP_LIMITS(start, end, 404 CACHE_LOOP_LIMITS(start, end,
416 cpuinfo.dcache_line_length, cpuinfo.dcache_size); 405 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
417 406
418 #ifdef ASM_LOOP 407 #ifdef ASM_LOOP
419 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); 408 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
420 #else 409 #else
421 for (i = start; i < end; i += cpuinfo.dcache_line_length) 410 for (i = start; i < end; i += cpuinfo.dcache_line_length)
422 __asm__ __volatile__ ("wdc %0, r0;" \ 411 __asm__ __volatile__ ("wdc %0, r0;" \
423 : : "r" (i)); 412 : : "r" (i));
424 #endif 413 #endif
425 } 414 }
426 415
427 static void __invalidate_dcache_range_msr_irq_wt(unsigned long start, 416 static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
428 unsigned long end) 417 unsigned long end)
429 { 418 {
430 unsigned long flags; 419 unsigned long flags;
431 #ifndef ASM_LOOP 420 #ifndef ASM_LOOP
432 int i; 421 int i;
433 #endif 422 #endif
434 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 423 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
435 (unsigned int)start, (unsigned int) end); 424 (unsigned int)start, (unsigned int) end);
436 CACHE_LOOP_LIMITS(start, end, 425 CACHE_LOOP_LIMITS(start, end,
437 cpuinfo.dcache_line_length, cpuinfo.dcache_size); 426 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
438 427
439 local_irq_save(flags); 428 local_irq_save(flags);
440 __disable_dcache_msr(); 429 __disable_dcache_msr();
441 430
442 #ifdef ASM_LOOP 431 #ifdef ASM_LOOP
443 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); 432 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
444 #else 433 #else
445 for (i = start; i < end; i += cpuinfo.dcache_line_length) 434 for (i = start; i < end; i += cpuinfo.dcache_line_length)
446 __asm__ __volatile__ ("wdc %0, r0;" \ 435 __asm__ __volatile__ ("wdc %0, r0;" \
447 : : "r" (i)); 436 : : "r" (i));
448 #endif 437 #endif
449 438
450 __enable_dcache_msr(); 439 __enable_dcache_msr();
451 local_irq_restore(flags); 440 local_irq_restore(flags);
452 } 441 }
453 442
454 static void __invalidate_dcache_range_nomsr_irq(unsigned long start, 443 static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
455 unsigned long end) 444 unsigned long end)
456 { 445 {
457 unsigned long flags; 446 unsigned long flags;
458 #ifndef ASM_LOOP 447 #ifndef ASM_LOOP
459 int i; 448 int i;
460 #endif 449 #endif
461 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 450 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
462 (unsigned int)start, (unsigned int) end); 451 (unsigned int)start, (unsigned int) end);
463 452
464 CACHE_LOOP_LIMITS(start, end, 453 CACHE_LOOP_LIMITS(start, end,
465 cpuinfo.dcache_line_length, cpuinfo.dcache_size); 454 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
466 455
467 local_irq_save(flags); 456 local_irq_save(flags);
468 __disable_dcache_nomsr(); 457 __disable_dcache_nomsr();
469 458
470 #ifdef ASM_LOOP 459 #ifdef ASM_LOOP
471 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); 460 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
472 #else 461 #else
473 for (i = start; i < end; i += cpuinfo.dcache_line_length) 462 for (i = start; i < end; i += cpuinfo.dcache_line_length)
474 __asm__ __volatile__ ("wdc %0, r0;" \ 463 __asm__ __volatile__ ("wdc %0, r0;" \
475 : : "r" (i)); 464 : : "r" (i));
476 #endif 465 #endif
477 466
478 __enable_dcache_nomsr(); 467 __enable_dcache_nomsr();
479 local_irq_restore(flags); 468 local_irq_restore(flags);
480 } 469 }
481 470
482 static void __flush_dcache_all_wb(void) 471 static void __flush_dcache_all_wb(void)
483 { 472 {
484 #ifndef ASM_LOOP 473 #ifndef ASM_LOOP
485 int i; 474 int i;
486 #endif 475 #endif
487 pr_debug("%s\n", __func__); 476 pr_debug("%s\n", __func__);
488 #ifdef ASM_LOOP 477 #ifdef ASM_LOOP
489 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, 478 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
490 wdc.flush); 479 wdc.flush);
491 #else 480 #else
492 for (i = 0; i < cpuinfo.dcache_size; 481 for (i = 0; i < cpuinfo.dcache_size;
493 i += cpuinfo.dcache_line_length) 482 i += cpuinfo.dcache_line_length)
494 __asm__ __volatile__ ("wdc.flush %0, r0;" \ 483 __asm__ __volatile__ ("wdc.flush %0, r0;" \
495 : : "r" (i)); 484 : : "r" (i));
496 #endif 485 #endif
497 } 486 }
498 487
499 static void __flush_dcache_range_wb(unsigned long start, unsigned long end) 488 static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
500 { 489 {
501 #ifndef ASM_LOOP 490 #ifndef ASM_LOOP
502 int i; 491 int i;
503 #endif 492 #endif
504 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 493 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
505 (unsigned int)start, (unsigned int) end); 494 (unsigned int)start, (unsigned int) end);
506 495
507 CACHE_LOOP_LIMITS(start, end, 496 CACHE_LOOP_LIMITS(start, end,
508 cpuinfo.dcache_line_length, cpuinfo.dcache_size); 497 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
509 #ifdef ASM_LOOP 498 #ifdef ASM_LOOP
510 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush); 499 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
511 #else 500 #else
512 for (i = start; i < end; i += cpuinfo.dcache_line_length) 501 for (i = start; i < end; i += cpuinfo.dcache_line_length)
513 __asm__ __volatile__ ("wdc.flush %0, r0;" \ 502 __asm__ __volatile__ ("wdc.flush %0, r0;" \
514 : : "r" (i)); 503 : : "r" (i));
515 #endif 504 #endif
516 } 505 }
517 506
518 /* struct for wb caches and for wt caches */ 507 /* struct for wb caches and for wt caches */
519 struct scache *mbc; 508 struct scache *mbc;
520 509
521 /* new wb cache model */ 510 /* new wb cache model */
522 static const struct scache wb_msr = { 511 static const struct scache wb_msr = {
523 .ie = __enable_icache_msr, 512 .ie = __enable_icache_msr,
524 .id = __disable_icache_msr, 513 .id = __disable_icache_msr,
525 .ifl = __flush_icache_all_noirq, 514 .ifl = __flush_icache_all_noirq,
526 .iflr = __flush_icache_range_noirq, 515 .iflr = __flush_icache_range_noirq,
527 .iin = __flush_icache_all_noirq, 516 .iin = __flush_icache_all_noirq,
528 .iinr = __flush_icache_range_noirq, 517 .iinr = __flush_icache_range_noirq,
529 .de = __enable_dcache_msr, 518 .de = __enable_dcache_msr,
530 .dd = __disable_dcache_msr, 519 .dd = __disable_dcache_msr,
531 .dfl = __flush_dcache_all_wb, 520 .dfl = __flush_dcache_all_wb,
532 .dflr = __flush_dcache_range_wb, 521 .dflr = __flush_dcache_range_wb,
533 .din = __invalidate_dcache_all_wb, 522 .din = __invalidate_dcache_all_wb,
534 .dinr = __invalidate_dcache_range_wb, 523 .dinr = __invalidate_dcache_range_wb,
535 }; 524 };
536 525
537 /* There is only difference in ie, id, de, dd functions */ 526 /* There is only difference in ie, id, de, dd functions */
538 static const struct scache wb_nomsr = { 527 static const struct scache wb_nomsr = {
539 .ie = __enable_icache_nomsr, 528 .ie = __enable_icache_nomsr,
540 .id = __disable_icache_nomsr, 529 .id = __disable_icache_nomsr,
541 .ifl = __flush_icache_all_noirq, 530 .ifl = __flush_icache_all_noirq,
542 .iflr = __flush_icache_range_noirq, 531 .iflr = __flush_icache_range_noirq,
543 .iin = __flush_icache_all_noirq, 532 .iin = __flush_icache_all_noirq,
544 .iinr = __flush_icache_range_noirq, 533 .iinr = __flush_icache_range_noirq,
545 .de = __enable_dcache_nomsr, 534 .de = __enable_dcache_nomsr,
546 .dd = __disable_dcache_nomsr, 535 .dd = __disable_dcache_nomsr,
547 .dfl = __flush_dcache_all_wb, 536 .dfl = __flush_dcache_all_wb,
548 .dflr = __flush_dcache_range_wb, 537 .dflr = __flush_dcache_range_wb,
549 .din = __invalidate_dcache_all_wb, 538 .din = __invalidate_dcache_all_wb,
550 .dinr = __invalidate_dcache_range_wb, 539 .dinr = __invalidate_dcache_range_wb,
551 }; 540 };
552 541
553 /* Old wt cache model with disabling irq and turn off cache */ 542 /* Old wt cache model with disabling irq and turn off cache */
554 static const struct scache wt_msr = { 543 static const struct scache wt_msr = {
555 .ie = __enable_icache_msr, 544 .ie = __enable_icache_msr,
556 .id = __disable_icache_msr, 545 .id = __disable_icache_msr,
557 .ifl = __flush_icache_all_msr_irq, 546 .ifl = __flush_icache_all_msr_irq,
558 .iflr = __flush_icache_range_msr_irq, 547 .iflr = __flush_icache_range_msr_irq,
559 .iin = __flush_icache_all_msr_irq, 548 .iin = __flush_icache_all_msr_irq,
560 .iinr = __flush_icache_range_msr_irq, 549 .iinr = __flush_icache_range_msr_irq,
561 .de = __enable_dcache_msr, 550 .de = __enable_dcache_msr,
562 .dd = __disable_dcache_msr, 551 .dd = __disable_dcache_msr,
563 .dfl = __invalidate_dcache_all_msr_irq, 552 .dfl = __invalidate_dcache_all_msr_irq,
564 .dflr = __invalidate_dcache_range_msr_irq_wt, 553 .dflr = __invalidate_dcache_range_msr_irq_wt,
565 .din = __invalidate_dcache_all_msr_irq, 554 .din = __invalidate_dcache_all_msr_irq,
566 .dinr = __invalidate_dcache_range_msr_irq_wt, 555 .dinr = __invalidate_dcache_range_msr_irq_wt,
567 }; 556 };
568 557
569 static const struct scache wt_nomsr = { 558 static const struct scache wt_nomsr = {
570 .ie = __enable_icache_nomsr, 559 .ie = __enable_icache_nomsr,
571 .id = __disable_icache_nomsr, 560 .id = __disable_icache_nomsr,
572 .ifl = __flush_icache_all_nomsr_irq, 561 .ifl = __flush_icache_all_nomsr_irq,
573 .iflr = __flush_icache_range_nomsr_irq, 562 .iflr = __flush_icache_range_nomsr_irq,
574 .iin = __flush_icache_all_nomsr_irq, 563 .iin = __flush_icache_all_nomsr_irq,
575 .iinr = __flush_icache_range_nomsr_irq, 564 .iinr = __flush_icache_range_nomsr_irq,
576 .de = __enable_dcache_nomsr, 565 .de = __enable_dcache_nomsr,
577 .dd = __disable_dcache_nomsr, 566 .dd = __disable_dcache_nomsr,
578 .dfl = __invalidate_dcache_all_nomsr_irq, 567 .dfl = __invalidate_dcache_all_nomsr_irq,
579 .dflr = __invalidate_dcache_range_nomsr_irq, 568 .dflr = __invalidate_dcache_range_nomsr_irq,
580 .din = __invalidate_dcache_all_nomsr_irq, 569 .din = __invalidate_dcache_all_nomsr_irq,
581 .dinr = __invalidate_dcache_range_nomsr_irq, 570 .dinr = __invalidate_dcache_range_nomsr_irq,
582 }; 571 };
583 572
584 /* New wt cache model for newer Microblaze versions */ 573 /* New wt cache model for newer Microblaze versions */
585 static const struct scache wt_msr_noirq = { 574 static const struct scache wt_msr_noirq = {
586 .ie = __enable_icache_msr, 575 .ie = __enable_icache_msr,
587 .id = __disable_icache_msr, 576 .id = __disable_icache_msr,
588 .ifl = __flush_icache_all_noirq, 577 .ifl = __flush_icache_all_noirq,
589 .iflr = __flush_icache_range_noirq, 578 .iflr = __flush_icache_range_noirq,
590 .iin = __flush_icache_all_noirq, 579 .iin = __flush_icache_all_noirq,
591 .iinr = __flush_icache_range_noirq, 580 .iinr = __flush_icache_range_noirq,
592 .de = __enable_dcache_msr, 581 .de = __enable_dcache_msr,
593 .dd = __disable_dcache_msr, 582 .dd = __disable_dcache_msr,
594 .dfl = __invalidate_dcache_all_noirq_wt, 583 .dfl = __invalidate_dcache_all_noirq_wt,
595 .dflr = __invalidate_dcache_range_nomsr_wt, 584 .dflr = __invalidate_dcache_range_nomsr_wt,
596 .din = __invalidate_dcache_all_noirq_wt, 585 .din = __invalidate_dcache_all_noirq_wt,
597 .dinr = __invalidate_dcache_range_nomsr_wt, 586 .dinr = __invalidate_dcache_range_nomsr_wt,
598 }; 587 };
599 588
600 static const struct scache wt_nomsr_noirq = { 589 static const struct scache wt_nomsr_noirq = {
601 .ie = __enable_icache_nomsr, 590 .ie = __enable_icache_nomsr,
602 .id = __disable_icache_nomsr, 591 .id = __disable_icache_nomsr,
603 .ifl = __flush_icache_all_noirq, 592 .ifl = __flush_icache_all_noirq,
604 .iflr = __flush_icache_range_noirq, 593 .iflr = __flush_icache_range_noirq,
605 .iin = __flush_icache_all_noirq, 594 .iin = __flush_icache_all_noirq,
606 .iinr = __flush_icache_range_noirq, 595 .iinr = __flush_icache_range_noirq,
607 .de = __enable_dcache_nomsr, 596 .de = __enable_dcache_nomsr,
608 .dd = __disable_dcache_nomsr, 597 .dd = __disable_dcache_nomsr,
609 .dfl = __invalidate_dcache_all_noirq_wt, 598 .dfl = __invalidate_dcache_all_noirq_wt,
610 .dflr = __invalidate_dcache_range_nomsr_wt, 599 .dflr = __invalidate_dcache_range_nomsr_wt,
611 .din = __invalidate_dcache_all_noirq_wt, 600 .din = __invalidate_dcache_all_noirq_wt,
612 .dinr = __invalidate_dcache_range_nomsr_wt, 601 .dinr = __invalidate_dcache_range_nomsr_wt,
613 }; 602 };
614 603
615 /* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */ 604 /* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */
616 #define CPUVER_7_20_A 0x0c 605 #define CPUVER_7_20_A 0x0c
617 #define CPUVER_7_20_D 0x0f 606 #define CPUVER_7_20_D 0x0f
618 607
619 #define INFO(s) printk(KERN_INFO "cache: " s "\n");
620
621 void microblaze_cache_init(void) 608 void microblaze_cache_init(void)
622 { 609 {
623 if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) { 610 if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) {
624 if (cpuinfo.dcache_wb) { 611 if (cpuinfo.dcache_wb) {
625 INFO("wb_msr"); 612 pr_info("wb_msr\n");
626 mbc = (struct scache *)&wb_msr; 613 mbc = (struct scache *)&wb_msr;
627 if (cpuinfo.ver_code <= CPUVER_7_20_D) { 614 if (cpuinfo.ver_code <= CPUVER_7_20_D) {
628 /* MS: problem with signal handling - hw bug */ 615 /* MS: problem with signal handling - hw bug */
629 INFO("WB won't work properly"); 616 pr_info("WB won't work properly\n");
630 } 617 }
631 } else { 618 } else {
632 if (cpuinfo.ver_code >= CPUVER_7_20_A) { 619 if (cpuinfo.ver_code >= CPUVER_7_20_A) {
633 INFO("wt_msr_noirq"); 620 pr_info("wt_msr_noirq\n");
634 mbc = (struct scache *)&wt_msr_noirq; 621 mbc = (struct scache *)&wt_msr_noirq;
635 } else { 622 } else {
636 INFO("wt_msr"); 623 pr_info("wt_msr\n");
637 mbc = (struct scache *)&wt_msr; 624 mbc = (struct scache *)&wt_msr;
638 } 625 }
639 } 626 }
640 } else { 627 } else {
641 if (cpuinfo.dcache_wb) { 628 if (cpuinfo.dcache_wb) {
642 INFO("wb_nomsr"); 629 pr_info("wb_nomsr\n");
643 mbc = (struct scache *)&wb_nomsr; 630 mbc = (struct scache *)&wb_nomsr;
644 if (cpuinfo.ver_code <= CPUVER_7_20_D) { 631 if (cpuinfo.ver_code <= CPUVER_7_20_D) {
645 /* MS: problem with signal handling - hw bug */ 632 /* MS: problem with signal handling - hw bug */
646 INFO("WB won't work properly"); 633 pr_info("WB won't work properly\n");
647 } 634 }
648 } else { 635 } else {
649 if (cpuinfo.ver_code >= CPUVER_7_20_A) { 636 if (cpuinfo.ver_code >= CPUVER_7_20_A) {
650 INFO("wt_nomsr_noirq"); 637 pr_info("wt_nomsr_noirq\n");
651 mbc = (struct scache *)&wt_nomsr_noirq; 638 mbc = (struct scache *)&wt_nomsr_noirq;
652 } else { 639 } else {
653 INFO("wt_nomsr"); 640 pr_info("wt_nomsr\n");
654 mbc = (struct scache *)&wt_nomsr; 641 mbc = (struct scache *)&wt_nomsr;
655 } 642 }
656 } 643 }
657 } 644 }
658 /* FIXME Invalidation is done in U-BOOT 645 /*
659 * WT cache: Data is already written to main memory 646 * FIXME Invalidation is done in U-BOOT
660 * WB cache: Discard data on noMMU which caused that kernel doesn't boot 647 * WT cache: Data is already written to main memory
661 */ 648 * WB cache: Discard data on noMMU which caused that kernel doesn't boot
649 */
662 /* invalidate_dcache(); */ 650 /* invalidate_dcache(); */
663 enable_dcache(); 651 enable_dcache();
664 652
665 invalidate_icache(); 653 invalidate_icache();
666 enable_icache(); 654 enable_icache();
arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
1 /* 1 /*
2 * Support for MicroBlaze PVR (processor version register) 2 * Support for MicroBlaze PVR (processor version register)
3 * 3 *
4 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> 4 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2007-2009 PetaLogix 5 * Copyright (C) 2007-2009 PetaLogix
6 * Copyright (C) 2007 John Williams <john.williams@petalogix.com> 6 * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
7 * 7 *
8 * This file is subject to the terms and conditions of the GNU General Public 8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive 9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details. 10 * for more details.
11 */ 11 */
12 12
13 #include <linux/init.h> 13 #include <linux/init.h>
14 #include <linux/string.h> 14 #include <linux/string.h>
15 #include <asm/pvr.h> 15 #include <asm/pvr.h>
16 #include <asm/cpuinfo.h> 16 #include <asm/cpuinfo.h>
17 17
18 /* 18 /*
19 * Helper macro to map between fields in our struct cpuinfo, and 19 * Helper macro to map between fields in our struct cpuinfo, and
20 * the PVR macros in pvr.h. 20 * the PVR macros in pvr.h.
21 */ 21 */
22 22
23 #define CI(c, p) { ci->c = PVR_##p(pvr); } 23 #define CI(c, p) { ci->c = PVR_##p(pvr); }
24 24
25 #if defined(CONFIG_EARLY_PRINTK) && defined(CONFIG_SERIAL_UARTLITE_CONSOLE) 25 #if defined(CONFIG_EARLY_PRINTK) && defined(CONFIG_SERIAL_UARTLITE_CONSOLE)
26 #define err_printk(x) \ 26 #define err_printk(x) \
27 early_printk("ERROR: Microblaze " x "-different for PVR and DTS\n"); 27 early_printk("ERROR: Microblaze " x "-different for PVR and DTS\n");
28 #else 28 #else
29 #define err_printk(x) \ 29 #define err_printk(x) \
30 printk(KERN_INFO "ERROR: Microblaze " x "-different for PVR and DTS\n"); 30 pr_info("ERROR: Microblaze " x "-different for PVR and DTS\n");
31 #endif 31 #endif
32 32
33 void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu) 33 void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu)
34 { 34 {
35 struct pvr_s pvr; 35 struct pvr_s pvr;
36 int temp; /* for saving temp value */ 36 int temp; /* for saving temp value */
37 get_pvr(&pvr); 37 get_pvr(&pvr);
38 38
39 CI(ver_code, VERSION); 39 CI(ver_code, VERSION);
40 if (!ci->ver_code) { 40 if (!ci->ver_code) {
41 printk(KERN_ERR "ERROR: MB has broken PVR regs " 41 pr_err("ERROR: MB has broken PVR regs -> use DTS setting\n");
42 "-> use DTS setting\n");
43 return; 42 return;
44 } 43 }
45 44
46 temp = PVR_USE_BARREL(pvr) | PVR_USE_MSR_INSTR(pvr) |\ 45 temp = PVR_USE_BARREL(pvr) | PVR_USE_MSR_INSTR(pvr) |
47 PVR_USE_PCMP_INSTR(pvr) | PVR_USE_DIV(pvr); 46 PVR_USE_PCMP_INSTR(pvr) | PVR_USE_DIV(pvr);
48 if (ci->use_instr != temp) 47 if (ci->use_instr != temp)
49 err_printk("BARREL, MSR, PCMP or DIV"); 48 err_printk("BARREL, MSR, PCMP or DIV");
50 ci->use_instr = temp; 49 ci->use_instr = temp;
51 50
52 temp = PVR_USE_HW_MUL(pvr) | PVR_USE_MUL64(pvr); 51 temp = PVR_USE_HW_MUL(pvr) | PVR_USE_MUL64(pvr);
53 if (ci->use_mult != temp) 52 if (ci->use_mult != temp)
54 err_printk("HW_MUL"); 53 err_printk("HW_MUL");
55 ci->use_mult = temp; 54 ci->use_mult = temp;
56 55
57 temp = PVR_USE_FPU(pvr) | PVR_USE_FPU2(pvr); 56 temp = PVR_USE_FPU(pvr) | PVR_USE_FPU2(pvr);
58 if (ci->use_fpu != temp) 57 if (ci->use_fpu != temp)
59 err_printk("HW_FPU"); 58 err_printk("HW_FPU");
60 ci->use_fpu = temp; 59 ci->use_fpu = temp;
61 60
62 ci->use_exc = PVR_OPCODE_0x0_ILLEGAL(pvr) |\ 61 ci->use_exc = PVR_OPCODE_0x0_ILLEGAL(pvr) |
63 PVR_UNALIGNED_EXCEPTION(pvr) |\ 62 PVR_UNALIGNED_EXCEPTION(pvr) |
64 PVR_ILL_OPCODE_EXCEPTION(pvr) |\ 63 PVR_ILL_OPCODE_EXCEPTION(pvr) |
65 PVR_IOPB_BUS_EXCEPTION(pvr) |\ 64 PVR_IOPB_BUS_EXCEPTION(pvr) |
66 PVR_DOPB_BUS_EXCEPTION(pvr) |\ 65 PVR_DOPB_BUS_EXCEPTION(pvr) |
67 PVR_DIV_ZERO_EXCEPTION(pvr) |\ 66 PVR_DIV_ZERO_EXCEPTION(pvr) |
68 PVR_FPU_EXCEPTION(pvr) |\ 67 PVR_FPU_EXCEPTION(pvr) |
69 PVR_FSL_EXCEPTION(pvr); 68 PVR_FSL_EXCEPTION(pvr);
70 69
71 CI(pvr_user1, USER1); 70 CI(pvr_user1, USER1);
72 CI(pvr_user2, USER2); 71 CI(pvr_user2, USER2);
73 72
74 CI(mmu, USE_MMU); 73 CI(mmu, USE_MMU);
75 CI(mmu_privins, MMU_PRIVINS); 74 CI(mmu_privins, MMU_PRIVINS);
76 CI(endian, ENDIAN); 75 CI(endian, ENDIAN);
77 76
78 CI(use_icache, USE_ICACHE); 77 CI(use_icache, USE_ICACHE);
79 CI(icache_tagbits, ICACHE_ADDR_TAG_BITS); 78 CI(icache_tagbits, ICACHE_ADDR_TAG_BITS);
80 CI(icache_write, ICACHE_ALLOW_WR); 79 CI(icache_write, ICACHE_ALLOW_WR);
81 ci->icache_line_length = PVR_ICACHE_LINE_LEN(pvr) << 2; 80 ci->icache_line_length = PVR_ICACHE_LINE_LEN(pvr) << 2;
82 CI(icache_size, ICACHE_BYTE_SIZE); 81 CI(icache_size, ICACHE_BYTE_SIZE);
83 CI(icache_base, ICACHE_BASEADDR); 82 CI(icache_base, ICACHE_BASEADDR);
84 CI(icache_high, ICACHE_HIGHADDR); 83 CI(icache_high, ICACHE_HIGHADDR);
85 84
86 CI(use_dcache, USE_DCACHE); 85 CI(use_dcache, USE_DCACHE);
87 CI(dcache_tagbits, DCACHE_ADDR_TAG_BITS); 86 CI(dcache_tagbits, DCACHE_ADDR_TAG_BITS);
88 CI(dcache_write, DCACHE_ALLOW_WR); 87 CI(dcache_write, DCACHE_ALLOW_WR);
89 ci->dcache_line_length = PVR_DCACHE_LINE_LEN(pvr) << 2; 88 ci->dcache_line_length = PVR_DCACHE_LINE_LEN(pvr) << 2;
90 CI(dcache_size, DCACHE_BYTE_SIZE); 89 CI(dcache_size, DCACHE_BYTE_SIZE);
91 CI(dcache_base, DCACHE_BASEADDR); 90 CI(dcache_base, DCACHE_BASEADDR);
92 CI(dcache_high, DCACHE_HIGHADDR); 91 CI(dcache_high, DCACHE_HIGHADDR);
93 92
94 temp = PVR_DCACHE_USE_WRITEBACK(pvr); 93 temp = PVR_DCACHE_USE_WRITEBACK(pvr);
95 if (ci->dcache_wb != temp) 94 if (ci->dcache_wb != temp)
96 err_printk("DCACHE WB"); 95 err_printk("DCACHE WB");
97 ci->dcache_wb = temp; 96 ci->dcache_wb = temp;
98 97
99 CI(use_dopb, D_OPB); 98 CI(use_dopb, D_OPB);
100 CI(use_iopb, I_OPB); 99 CI(use_iopb, I_OPB);
101 CI(use_dlmb, D_LMB); 100 CI(use_dlmb, D_LMB);
102 CI(use_ilmb, I_LMB); 101 CI(use_ilmb, I_LMB);
103 CI(num_fsl, FSL_LINKS); 102 CI(num_fsl, FSL_LINKS);
104 103
105 CI(irq_edge, INTERRUPT_IS_EDGE); 104 CI(irq_edge, INTERRUPT_IS_EDGE);
106 CI(irq_positive, EDGE_IS_POSITIVE); 105 CI(irq_positive, EDGE_IS_POSITIVE);
107 106
108 CI(area_optimised, AREA_OPTIMISED); 107 CI(area_optimised, AREA_OPTIMISED);
109 108
110 CI(hw_debug, DEBUG_ENABLED); 109 CI(hw_debug, DEBUG_ENABLED);
111 CI(num_pc_brk, NUMBER_OF_PC_BRK); 110 CI(num_pc_brk, NUMBER_OF_PC_BRK);
112 CI(num_rd_brk, NUMBER_OF_RD_ADDR_BRK); 111 CI(num_rd_brk, NUMBER_OF_RD_ADDR_BRK);
113 CI(num_wr_brk, NUMBER_OF_WR_ADDR_BRK); 112 CI(num_wr_brk, NUMBER_OF_WR_ADDR_BRK);
114 113
115 CI(fpga_family_code, TARGET_FAMILY); 114 CI(fpga_family_code, TARGET_FAMILY);
116 115
117 /* take timebase-frequency from DTS */ 116 /* take timebase-frequency from DTS */
118 ci->cpu_clock_freq = fcpu(cpu, "timebase-frequency"); 117 ci->cpu_clock_freq = fcpu(cpu, "timebase-frequency");
119 } 118 }
120 119
arch/microblaze/kernel/cpu/cpuinfo.c
1 /* 1 /*
2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> 2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2007-2009 PetaLogix 3 * Copyright (C) 2007-2009 PetaLogix
4 * Copyright (C) 2007 John Williams <john.williams@petalogix.com> 4 * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
5 * 5 *
6 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive 7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details. 8 * for more details.
9 */ 9 */
10 10
11 #include <linux/init.h> 11 #include <linux/init.h>
12 #include <asm/cpuinfo.h> 12 #include <asm/cpuinfo.h>
13 #include <asm/pvr.h> 13 #include <asm/pvr.h>
14 14
15 const struct cpu_ver_key cpu_ver_lookup[] = { 15 const struct cpu_ver_key cpu_ver_lookup[] = {
16 /* These key value are as per MBV field in PVR0 */ 16 /* These key value are as per MBV field in PVR0 */
17 {"5.00.a", 0x01}, 17 {"5.00.a", 0x01},
18 {"5.00.b", 0x02}, 18 {"5.00.b", 0x02},
19 {"5.00.c", 0x03}, 19 {"5.00.c", 0x03},
20 {"6.00.a", 0x04}, 20 {"6.00.a", 0x04},
21 {"6.00.b", 0x06}, 21 {"6.00.b", 0x06},
22 {"7.00.a", 0x05}, 22 {"7.00.a", 0x05},
23 {"7.00.b", 0x07}, 23 {"7.00.b", 0x07},
24 {"7.10.a", 0x08}, 24 {"7.10.a", 0x08},
25 {"7.10.b", 0x09}, 25 {"7.10.b", 0x09},
26 {"7.10.c", 0x0a}, 26 {"7.10.c", 0x0a},
27 {"7.10.d", 0x0b}, 27 {"7.10.d", 0x0b},
28 {"7.20.a", 0x0c}, 28 {"7.20.a", 0x0c},
29 {"7.20.b", 0x0d}, 29 {"7.20.b", 0x0d},
30 {"7.20.c", 0x0e}, 30 {"7.20.c", 0x0e},
31 {"7.20.d", 0x0f}, 31 {"7.20.d", 0x0f},
32 {"7.30.a", 0x10}, 32 {"7.30.a", 0x10},
33 {"7.30.b", 0x11}, 33 {"7.30.b", 0x11},
34 {"8.00.a", 0x12}, 34 {"8.00.a", 0x12},
35 {"8.00.b", 0x13}, 35 {"8.00.b", 0x13},
36 {"8.10.a", 0x14}, 36 {"8.10.a", 0x14},
37 {"8.20.a", 0x15}, 37 {"8.20.a", 0x15},
38 {"8.20.b", 0x16}, 38 {"8.20.b", 0x16},
39 {"8.30.a", 0x17}, 39 {"8.30.a", 0x17},
40 {NULL, 0}, 40 {NULL, 0},
41 }; 41 };
42 42
43 /* 43 /*
44 * FIXME Not sure if the actual key is defined by Xilinx in the PVR 44 * FIXME Not sure if the actual key is defined by Xilinx in the PVR
45 */ 45 */
46 const struct family_string_key family_string_lookup[] = { 46 const struct family_string_key family_string_lookup[] = {
47 {"virtex2", 0x4}, 47 {"virtex2", 0x4},
48 {"virtex2pro", 0x5}, 48 {"virtex2pro", 0x5},
49 {"spartan3", 0x6}, 49 {"spartan3", 0x6},
50 {"virtex4", 0x7}, 50 {"virtex4", 0x7},
51 {"virtex5", 0x8}, 51 {"virtex5", 0x8},
52 {"spartan3e", 0x9}, 52 {"spartan3e", 0x9},
53 {"spartan3a", 0xa}, 53 {"spartan3a", 0xa},
54 {"spartan3an", 0xb}, 54 {"spartan3an", 0xb},
55 {"spartan3adsp", 0xc}, 55 {"spartan3adsp", 0xc},
56 {"spartan6", 0xd}, 56 {"spartan6", 0xd},
57 {"virtex6", 0xe}, 57 {"virtex6", 0xe},
58 /* FIXME There is no key code defined for spartan2 */ 58 /* FIXME There is no key code defined for spartan2 */
59 {"spartan2", 0xf0}, 59 {"spartan2", 0xf0},
60 {NULL, 0}, 60 {NULL, 0},
61 }; 61 };
62 62
63 struct cpuinfo cpuinfo; 63 struct cpuinfo cpuinfo;
64 64
65 void __init setup_cpuinfo(void) 65 void __init setup_cpuinfo(void)
66 { 66 {
67 struct device_node *cpu = NULL; 67 struct device_node *cpu = NULL;
68 68
69 cpu = (struct device_node *) of_find_node_by_type(NULL, "cpu"); 69 cpu = (struct device_node *) of_find_node_by_type(NULL, "cpu");
70 if (!cpu) 70 if (!cpu)
71 printk(KERN_ERR "You don't have cpu!!!\n"); 71 pr_err("You don't have cpu!!!\n");
72 72
73 printk(KERN_INFO "%s: initialising\n", __func__); 73 pr_info("%s: initialising\n", __func__);
74 74
75 switch (cpu_has_pvr()) { 75 switch (cpu_has_pvr()) {
76 case 0: 76 case 0:
77 printk(KERN_WARNING 77 pr_warn("%s: No PVR support. Using static CPU info from FDT\n",
78 "%s: No PVR support. Using static CPU info from FDT\n",
79 __func__); 78 __func__);
80 set_cpuinfo_static(&cpuinfo, cpu); 79 set_cpuinfo_static(&cpuinfo, cpu);
81 break; 80 break;
82 /* FIXME I found weird behavior with MB 7.00.a/b 7.10.a 81 /* FIXME I found weird behavior with MB 7.00.a/b 7.10.a
83 * please do not use FULL PVR with MMU */ 82 * please do not use FULL PVR with MMU */
84 case 1: 83 case 1:
85 printk(KERN_INFO "%s: Using full CPU PVR support\n", 84 pr_info("%s: Using full CPU PVR support\n",
86 __func__); 85 __func__);
87 set_cpuinfo_static(&cpuinfo, cpu); 86 set_cpuinfo_static(&cpuinfo, cpu);
88 set_cpuinfo_pvr_full(&cpuinfo, cpu); 87 set_cpuinfo_pvr_full(&cpuinfo, cpu);
89 break; 88 break;
90 default: 89 default:
91 printk(KERN_WARNING "%s: Unsupported PVR setting\n", __func__); 90 pr_warn("%s: Unsupported PVR setting\n", __func__);
92 set_cpuinfo_static(&cpuinfo, cpu); 91 set_cpuinfo_static(&cpuinfo, cpu);
93 } 92 }
94 93
95 if (cpuinfo.mmu_privins) 94 if (cpuinfo.mmu_privins)
96 printk(KERN_WARNING "%s: Stream instructions enabled" 95 pr_warn("%s: Stream instructions enabled"
97 " - USERSPACE CAN LOCK THIS KERNEL!\n", __func__); 96 " - USERSPACE CAN LOCK THIS KERNEL!\n", __func__);
98 } 97 }
99 98
arch/microblaze/kernel/cpu/pvr.c
1 /* 1 /*
2 * Support for MicroBlaze PVR (processor version register) 2 * Support for MicroBlaze PVR (processor version register)
3 * 3 *
4 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> 4 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2007-2009 PetaLogix 5 * Copyright (C) 2007-2009 PetaLogix
6 * Copyright (C) 2007 John Williams <john.williams@petalogix.com> 6 * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
7 * 7 *
8 * This file is subject to the terms and conditions of the GNU General Public 8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive 9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details. 10 * for more details.
11 */ 11 */
12 12
13 #include <linux/kernel.h> 13 #include <linux/kernel.h>
14 #include <linux/compiler.h> 14 #include <linux/compiler.h>
15 #include <asm/exceptions.h> 15 #include <asm/exceptions.h>
16 #include <asm/pvr.h> 16 #include <asm/pvr.h>
17 17
18 /* 18 /*
19 * Until we get an assembler that knows about the pvr registers, 19 * Until we get an assembler that knows about the pvr registers,
20 * this horrible cruft will have to do. 20 * this horrible cruft will have to do.
21 * That hardcoded opcode is mfs r3, rpvrNN 21 * That hardcoded opcode is mfs r3, rpvrNN
22 */ 22 */
23 23
24 #define get_single_pvr(pvrid, val) \ 24 #define get_single_pvr(pvrid, val) \
25 { \ 25 { \
26 register unsigned tmp __asm__("r3"); \ 26 register unsigned tmp __asm__("r3"); \
27 tmp = 0x0; /* Prevent warning about unused */ \ 27 tmp = 0x0; /* Prevent warning about unused */ \
28 __asm__ __volatile__ ( \ 28 __asm__ __volatile__ ( \
29 "mfs %0, rpvr" #pvrid ";" \ 29 "mfs %0, rpvr" #pvrid ";" \
30 : "=r" (tmp) : : "memory"); \ 30 : "=r" (tmp) : : "memory"); \
31 val = tmp; \ 31 val = tmp; \
32 } 32 }
33 33
34 /* 34 /*
35 * Does the CPU support the PVR register? 35 * Does the CPU support the PVR register?
36 * return value: 36 * return value:
37 * 0: no PVR 37 * 0: no PVR
38 * 1: simple PVR 38 * 1: simple PVR
39 * 2: full PVR 39 * 2: full PVR
40 * 40 *
41 * This must work on all CPU versions, including those before the 41 * This must work on all CPU versions, including those before the
42 * PVR was even an option. 42 * PVR was even an option.
43 */ 43 */
44 44
45 int cpu_has_pvr(void) 45 int cpu_has_pvr(void)
46 { 46 {
47 unsigned long flags; 47 unsigned long flags;
48 unsigned pvr0; 48 unsigned pvr0;
49 49
50 local_save_flags(flags); 50 local_save_flags(flags);
51 51
52 /* PVR bit in MSR tells us if there is any support */ 52 /* PVR bit in MSR tells us if there is any support */
53 if (!(flags & PVR_MSR_BIT)) 53 if (!(flags & PVR_MSR_BIT))
54 return 0; 54 return 0;
55 55
56 get_single_pvr(0, pvr0); 56 get_single_pvr(0, pvr0);
57 pr_debug("%s: pvr0 is 0x%08x\n", __func__, pvr0); 57 pr_debug("%s: pvr0 is 0x%08x\n", __func__, pvr0);
58 58
59 if (pvr0 & PVR0_PVR_FULL_MASK) 59 if (pvr0 & PVR0_PVR_FULL_MASK)
60 return 1; 60 return 1;
61 61
62 /* for partial PVR use static cpuinfo */ 62 /* for partial PVR use static cpuinfo */
63 return 2; 63 return 2;
64 } 64 }
65 65
66 void get_pvr(struct pvr_s *p) 66 void get_pvr(struct pvr_s *p)
67 { 67 {
68 get_single_pvr(0, p->pvr[0]); 68 get_single_pvr(0, p->pvr[0]);
69 get_single_pvr(1, p->pvr[1]); 69 get_single_pvr(1, p->pvr[1]);
70 get_single_pvr(2, p->pvr[2]); 70 get_single_pvr(2, p->pvr[2]);
71 get_single_pvr(3, p->pvr[3]); 71 get_single_pvr(3, p->pvr[3]);
72 get_single_pvr(4, p->pvr[4]); 72 get_single_pvr(4, p->pvr[4]);
73 get_single_pvr(5, p->pvr[5]); 73 get_single_pvr(5, p->pvr[5]);
74 get_single_pvr(6, p->pvr[6]); 74 get_single_pvr(6, p->pvr[6]);
75 get_single_pvr(7, p->pvr[7]); 75 get_single_pvr(7, p->pvr[7]);
76 get_single_pvr(8, p->pvr[8]); 76 get_single_pvr(8, p->pvr[8]);
77 get_single_pvr(9, p->pvr[9]); 77 get_single_pvr(9, p->pvr[9]);
78 get_single_pvr(10, p->pvr[10]); 78 get_single_pvr(10, p->pvr[10]);
79 get_single_pvr(11, p->pvr[11]); 79 get_single_pvr(11, p->pvr[11]);
80 } 80 }
81 81
arch/microblaze/kernel/dma.c
1 /* 1 /*
2 * Copyright (C) 2009-2010 PetaLogix 2 * Copyright (C) 2009-2010 PetaLogix
3 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation 3 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
4 * 4 *
5 * Provide default implementations of the DMA mapping callbacks for 5 * Provide default implementations of the DMA mapping callbacks for
6 * directly mapped busses. 6 * directly mapped busses.
7 */ 7 */
8 8
9 #include <linux/device.h> 9 #include <linux/device.h>
10 #include <linux/dma-mapping.h> 10 #include <linux/dma-mapping.h>
11 #include <linux/gfp.h> 11 #include <linux/gfp.h>
12 #include <linux/dma-debug.h> 12 #include <linux/dma-debug.h>
13 #include <linux/export.h> 13 #include <linux/export.h>
14 #include <asm/bug.h> 14 #include <linux/bug.h>
15 15
16 /* 16 /*
17 * Generic direct DMA implementation 17 * Generic direct DMA implementation
18 * 18 *
19 * This implementation supports a per-device offset that can be applied if 19 * This implementation supports a per-device offset that can be applied if
20 * the address at which memory is visible to devices is not 0. Platform code 20 * the address at which memory is visible to devices is not 0. Platform code
21 * can set archdata.dma_data to an unsigned long holding the offset. By 21 * can set archdata.dma_data to an unsigned long holding the offset. By
22 * default the offset is PCI_DRAM_OFFSET. 22 * default the offset is PCI_DRAM_OFFSET.
23 */ 23 */
24 24
25 static unsigned long get_dma_direct_offset(struct device *dev) 25 static unsigned long get_dma_direct_offset(struct device *dev)
26 { 26 {
27 if (likely(dev)) 27 if (likely(dev))
28 return (unsigned long)dev->archdata.dma_data; 28 return (unsigned long)dev->archdata.dma_data;
29 29
30 return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */ 30 return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */
31 } 31 }
32 32
33 #define NOT_COHERENT_CACHE 33 #define NOT_COHERENT_CACHE
34 34
35 static void *dma_direct_alloc_coherent(struct device *dev, size_t size, 35 static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
36 dma_addr_t *dma_handle, gfp_t flag, 36 dma_addr_t *dma_handle, gfp_t flag,
37 struct dma_attrs *attrs) 37 struct dma_attrs *attrs)
38 { 38 {
39 #ifdef NOT_COHERENT_CACHE 39 #ifdef NOT_COHERENT_CACHE
40 return consistent_alloc(flag, size, dma_handle); 40 return consistent_alloc(flag, size, dma_handle);
41 #else 41 #else
42 void *ret; 42 void *ret;
43 struct page *page; 43 struct page *page;
44 int node = dev_to_node(dev); 44 int node = dev_to_node(dev);
45 45
46 /* ignore region specifiers */ 46 /* ignore region specifiers */
47 flag &= ~(__GFP_HIGHMEM); 47 flag &= ~(__GFP_HIGHMEM);
48 48
49 page = alloc_pages_node(node, flag, get_order(size)); 49 page = alloc_pages_node(node, flag, get_order(size));
50 if (page == NULL) 50 if (page == NULL)
51 return NULL; 51 return NULL;
52 ret = page_address(page); 52 ret = page_address(page);
53 memset(ret, 0, size); 53 memset(ret, 0, size);
54 *dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev); 54 *dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev);
55 55
56 return ret; 56 return ret;
57 #endif 57 #endif
58 } 58 }
59 59
60 static void dma_direct_free_coherent(struct device *dev, size_t size, 60 static void dma_direct_free_coherent(struct device *dev, size_t size,
61 void *vaddr, dma_addr_t dma_handle, 61 void *vaddr, dma_addr_t dma_handle,
62 struct dma_attrs *attrs) 62 struct dma_attrs *attrs)
63 { 63 {
64 #ifdef NOT_COHERENT_CACHE 64 #ifdef NOT_COHERENT_CACHE
65 consistent_free(size, vaddr); 65 consistent_free(size, vaddr);
66 #else 66 #else
67 free_pages((unsigned long)vaddr, get_order(size)); 67 free_pages((unsigned long)vaddr, get_order(size));
68 #endif 68 #endif
69 } 69 }
70 70
71 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, 71 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
72 int nents, enum dma_data_direction direction, 72 int nents, enum dma_data_direction direction,
73 struct dma_attrs *attrs) 73 struct dma_attrs *attrs)
74 { 74 {
75 struct scatterlist *sg; 75 struct scatterlist *sg;
76 int i; 76 int i;
77 77
78 /* FIXME this part of code is untested */ 78 /* FIXME this part of code is untested */
79 for_each_sg(sgl, sg, nents, i) { 79 for_each_sg(sgl, sg, nents, i) {
80 sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev); 80 sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
81 __dma_sync(page_to_phys(sg_page(sg)) + sg->offset, 81 __dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
82 sg->length, direction); 82 sg->length, direction);
83 } 83 }
84 84
85 return nents; 85 return nents;
86 } 86 }
87 87
88 static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, 88 static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
89 int nents, enum dma_data_direction direction, 89 int nents, enum dma_data_direction direction,
90 struct dma_attrs *attrs) 90 struct dma_attrs *attrs)
91 { 91 {
92 } 92 }
93 93
94 static int dma_direct_dma_supported(struct device *dev, u64 mask) 94 static int dma_direct_dma_supported(struct device *dev, u64 mask)
95 { 95 {
96 return 1; 96 return 1;
97 } 97 }
98 98
99 static inline dma_addr_t dma_direct_map_page(struct device *dev, 99 static inline dma_addr_t dma_direct_map_page(struct device *dev,
100 struct page *page, 100 struct page *page,
101 unsigned long offset, 101 unsigned long offset,
102 size_t size, 102 size_t size,
103 enum dma_data_direction direction, 103 enum dma_data_direction direction,
104 struct dma_attrs *attrs) 104 struct dma_attrs *attrs)
105 { 105 {
106 __dma_sync(page_to_phys(page) + offset, size, direction); 106 __dma_sync(page_to_phys(page) + offset, size, direction);
107 return page_to_phys(page) + offset + get_dma_direct_offset(dev); 107 return page_to_phys(page) + offset + get_dma_direct_offset(dev);
108 } 108 }
109 109
110 static inline void dma_direct_unmap_page(struct device *dev, 110 static inline void dma_direct_unmap_page(struct device *dev,
111 dma_addr_t dma_address, 111 dma_addr_t dma_address,
112 size_t size, 112 size_t size,
113 enum dma_data_direction direction, 113 enum dma_data_direction direction,
114 struct dma_attrs *attrs) 114 struct dma_attrs *attrs)
115 { 115 {
116 /* There is not necessary to do cache cleanup 116 /* There is not necessary to do cache cleanup
117 * 117 *
118 * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and 118 * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
119 * dma_address is physical address 119 * dma_address is physical address
120 */ 120 */
121 __dma_sync(dma_address, size, direction); 121 __dma_sync(dma_address, size, direction);
122 } 122 }
123 123
124 static inline void 124 static inline void
125 dma_direct_sync_single_for_cpu(struct device *dev, 125 dma_direct_sync_single_for_cpu(struct device *dev,
126 dma_addr_t dma_handle, size_t size, 126 dma_addr_t dma_handle, size_t size,
127 enum dma_data_direction direction) 127 enum dma_data_direction direction)
128 { 128 {
129 /* 129 /*
130 * It's pointless to flush the cache as the memory segment 130 * It's pointless to flush the cache as the memory segment
131 * is given to the CPU 131 * is given to the CPU
132 */ 132 */
133 133
134 if (direction == DMA_FROM_DEVICE) 134 if (direction == DMA_FROM_DEVICE)
135 __dma_sync(dma_handle, size, direction); 135 __dma_sync(dma_handle, size, direction);
136 } 136 }
137 137
138 static inline void 138 static inline void
139 dma_direct_sync_single_for_device(struct device *dev, 139 dma_direct_sync_single_for_device(struct device *dev,
140 dma_addr_t dma_handle, size_t size, 140 dma_addr_t dma_handle, size_t size,
141 enum dma_data_direction direction) 141 enum dma_data_direction direction)
142 { 142 {
143 /* 143 /*
144 * It's pointless to invalidate the cache if the device isn't 144 * It's pointless to invalidate the cache if the device isn't
145 * supposed to write to the relevant region 145 * supposed to write to the relevant region
146 */ 146 */
147 147
148 if (direction == DMA_TO_DEVICE) 148 if (direction == DMA_TO_DEVICE)
149 __dma_sync(dma_handle, size, direction); 149 __dma_sync(dma_handle, size, direction);
150 } 150 }
151 151
152 static inline void 152 static inline void
153 dma_direct_sync_sg_for_cpu(struct device *dev, 153 dma_direct_sync_sg_for_cpu(struct device *dev,
154 struct scatterlist *sgl, int nents, 154 struct scatterlist *sgl, int nents,
155 enum dma_data_direction direction) 155 enum dma_data_direction direction)
156 { 156 {
157 struct scatterlist *sg; 157 struct scatterlist *sg;
158 int i; 158 int i;
159 159
160 /* FIXME this part of code is untested */ 160 /* FIXME this part of code is untested */
161 if (direction == DMA_FROM_DEVICE) 161 if (direction == DMA_FROM_DEVICE)
162 for_each_sg(sgl, sg, nents, i) 162 for_each_sg(sgl, sg, nents, i)
163 __dma_sync(sg->dma_address, sg->length, direction); 163 __dma_sync(sg->dma_address, sg->length, direction);
164 } 164 }
165 165
166 static inline void 166 static inline void
167 dma_direct_sync_sg_for_device(struct device *dev, 167 dma_direct_sync_sg_for_device(struct device *dev,
168 struct scatterlist *sgl, int nents, 168 struct scatterlist *sgl, int nents,
169 enum dma_data_direction direction) 169 enum dma_data_direction direction)
170 { 170 {
171 struct scatterlist *sg; 171 struct scatterlist *sg;
172 int i; 172 int i;
173 173
174 /* FIXME this part of code is untested */ 174 /* FIXME this part of code is untested */
175 if (direction == DMA_TO_DEVICE) 175 if (direction == DMA_TO_DEVICE)
176 for_each_sg(sgl, sg, nents, i) 176 for_each_sg(sgl, sg, nents, i)
177 __dma_sync(sg->dma_address, sg->length, direction); 177 __dma_sync(sg->dma_address, sg->length, direction);
178 } 178 }
179 179
180 struct dma_map_ops dma_direct_ops = { 180 struct dma_map_ops dma_direct_ops = {
181 .alloc = dma_direct_alloc_coherent, 181 .alloc = dma_direct_alloc_coherent,
182 .free = dma_direct_free_coherent, 182 .free = dma_direct_free_coherent,
183 .map_sg = dma_direct_map_sg, 183 .map_sg = dma_direct_map_sg,
184 .unmap_sg = dma_direct_unmap_sg, 184 .unmap_sg = dma_direct_unmap_sg,
185 .dma_supported = dma_direct_dma_supported, 185 .dma_supported = dma_direct_dma_supported,
186 .map_page = dma_direct_map_page, 186 .map_page = dma_direct_map_page,
187 .unmap_page = dma_direct_unmap_page, 187 .unmap_page = dma_direct_unmap_page,
188 .sync_single_for_cpu = dma_direct_sync_single_for_cpu, 188 .sync_single_for_cpu = dma_direct_sync_single_for_cpu,
189 .sync_single_for_device = dma_direct_sync_single_for_device, 189 .sync_single_for_device = dma_direct_sync_single_for_device,
190 .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu, 190 .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
191 .sync_sg_for_device = dma_direct_sync_sg_for_device, 191 .sync_sg_for_device = dma_direct_sync_sg_for_device,
192 }; 192 };
193 EXPORT_SYMBOL(dma_direct_ops); 193 EXPORT_SYMBOL(dma_direct_ops);
194 194
195 /* Number of entries preallocated for DMA-API debugging */ 195 /* Number of entries preallocated for DMA-API debugging */
196 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) 196 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
197 197
198 static int __init dma_init(void) 198 static int __init dma_init(void)
199 { 199 {
200 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); 200 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
201 201
202 return 0; 202 return 0;
203 } 203 }
204 fs_initcall(dma_init); 204 fs_initcall(dma_init);
205 205
arch/microblaze/kernel/early_printk.c
1 /* 1 /*
2 * Early printk support for Microblaze. 2 * Early printk support for Microblaze.
3 * 3 *
4 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> 4 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2007-2009 PetaLogix 5 * Copyright (C) 2007-2009 PetaLogix
6 * Copyright (C) 2003-2006 Yasushi SHOJI <yashi@atmark-techno.com> 6 * Copyright (C) 2003-2006 Yasushi SHOJI <yashi@atmark-techno.com>
7 * 7 *
8 * This file is subject to the terms and conditions of the GNU General Public 8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive 9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details. 10 * for more details.
11 */ 11 */
12 12
13 #include <linux/console.h> 13 #include <linux/console.h>
14 #include <linux/kernel.h> 14 #include <linux/kernel.h>
15 #include <linux/init.h> 15 #include <linux/init.h>
16 #include <linux/string.h> 16 #include <linux/string.h>
17 #include <linux/tty.h> 17 #include <linux/tty.h>
18 #include <linux/io.h> 18 #include <linux/io.h>
19 #include <asm/processor.h> 19 #include <asm/processor.h>
20 #include <linux/fcntl.h> 20 #include <linux/fcntl.h>
21 #include <asm/setup.h> 21 #include <asm/setup.h>
22 #include <asm/prom.h> 22 #include <asm/prom.h>
23 23
24 static u32 early_console_initialized; 24 static u32 early_console_initialized;
25 static u32 base_addr; 25 static u32 base_addr;
26 26
27 #ifdef CONFIG_SERIAL_UARTLITE_CONSOLE 27 #ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
28 static void early_printk_uartlite_putc(char c) 28 static void early_printk_uartlite_putc(char c)
29 { 29 {
30 /* 30 /*
31 * Limit how many times we'll spin waiting for TX FIFO status. 31 * Limit how many times we'll spin waiting for TX FIFO status.
32 * This will prevent lockups if the base address is incorrectly 32 * This will prevent lockups if the base address is incorrectly
33 * set, or any other issue on the UARTLITE. 33 * set, or any other issue on the UARTLITE.
34 * This limit is pretty arbitrary, unless we are at about 10 baud 34 * This limit is pretty arbitrary, unless we are at about 10 baud
35 * we'll never timeout on a working UART. 35 * we'll never timeout on a working UART.
36 */ 36 */
37 37
38 unsigned retries = 1000000; 38 unsigned retries = 1000000;
39 /* read status bit - 0x8 offset */ 39 /* read status bit - 0x8 offset */
40 while (--retries && (in_be32(base_addr + 8) & (1 << 3))) 40 while (--retries && (in_be32(base_addr + 8) & (1 << 3)))
41 ; 41 ;
42 42
43 /* Only attempt the iowrite if we didn't timeout */ 43 /* Only attempt the iowrite if we didn't timeout */
44 /* write to TX_FIFO - 0x4 offset */ 44 /* write to TX_FIFO - 0x4 offset */
45 if (retries) 45 if (retries)
46 out_be32(base_addr + 4, c & 0xff); 46 out_be32(base_addr + 4, c & 0xff);
47 } 47 }
48 48
49 static void early_printk_uartlite_write(struct console *unused, 49 static void early_printk_uartlite_write(struct console *unused,
50 const char *s, unsigned n) 50 const char *s, unsigned n)
51 { 51 {
52 while (*s && n-- > 0) { 52 while (*s && n-- > 0) {
53 if (*s == '\n') 53 if (*s == '\n')
54 early_printk_uartlite_putc('\r'); 54 early_printk_uartlite_putc('\r');
55 early_printk_uartlite_putc(*s); 55 early_printk_uartlite_putc(*s);
56 s++; 56 s++;
57 } 57 }
58 } 58 }
59 59
60 static struct console early_serial_uartlite_console = { 60 static struct console early_serial_uartlite_console = {
61 .name = "earlyser", 61 .name = "earlyser",
62 .write = early_printk_uartlite_write, 62 .write = early_printk_uartlite_write,
63 .flags = CON_PRINTBUFFER | CON_BOOT, 63 .flags = CON_PRINTBUFFER | CON_BOOT,
64 .index = -1, 64 .index = -1,
65 }; 65 };
66 #endif /* CONFIG_SERIAL_UARTLITE_CONSOLE */ 66 #endif /* CONFIG_SERIAL_UARTLITE_CONSOLE */
67 67
68 #ifdef CONFIG_SERIAL_8250_CONSOLE 68 #ifdef CONFIG_SERIAL_8250_CONSOLE
69 static void early_printk_uart16550_putc(char c) 69 static void early_printk_uart16550_putc(char c)
70 { 70 {
71 /* 71 /*
72 * Limit how many times we'll spin waiting for TX FIFO status. 72 * Limit how many times we'll spin waiting for TX FIFO status.
73 * This will prevent lockups if the base address is incorrectly 73 * This will prevent lockups if the base address is incorrectly
74 * set, or any other issue on the UARTLITE. 74 * set, or any other issue on the UARTLITE.
75 * This limit is pretty arbitrary, unless we are at about 10 baud 75 * This limit is pretty arbitrary, unless we are at about 10 baud
76 * we'll never timeout on a working UART. 76 * we'll never timeout on a working UART.
77 */ 77 */
78 78
79 #define UART_LSR_TEMT 0x40 /* Transmitter empty */ 79 #define UART_LSR_TEMT 0x40 /* Transmitter empty */
80 #define UART_LSR_THRE 0x20 /* Transmit-hold-register empty */ 80 #define UART_LSR_THRE 0x20 /* Transmit-hold-register empty */
81 #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) 81 #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
82 82
83 unsigned retries = 10000; 83 unsigned retries = 10000;
84 84
85 while (--retries && 85 while (--retries &&
86 !((in_be32(base_addr + 0x14) & BOTH_EMPTY) == BOTH_EMPTY)) 86 !((in_be32(base_addr + 0x14) & BOTH_EMPTY) == BOTH_EMPTY))
87 ; 87 ;
88 88
89 if (retries) 89 if (retries)
90 out_be32(base_addr, c & 0xff); 90 out_be32(base_addr, c & 0xff);
91 } 91 }
92 92
93 static void early_printk_uart16550_write(struct console *unused, 93 static void early_printk_uart16550_write(struct console *unused,
94 const char *s, unsigned n) 94 const char *s, unsigned n)
95 { 95 {
96 while (*s && n-- > 0) { 96 while (*s && n-- > 0) {
97 if (*s == '\n') 97 if (*s == '\n')
98 early_printk_uart16550_putc('\r'); 98 early_printk_uart16550_putc('\r');
99 early_printk_uart16550_putc(*s); 99 early_printk_uart16550_putc(*s);
100 s++; 100 s++;
101 } 101 }
102 } 102 }
103 103
104 static struct console early_serial_uart16550_console = { 104 static struct console early_serial_uart16550_console = {
105 .name = "earlyser", 105 .name = "earlyser",
106 .write = early_printk_uart16550_write, 106 .write = early_printk_uart16550_write,
107 .flags = CON_PRINTBUFFER | CON_BOOT, 107 .flags = CON_PRINTBUFFER | CON_BOOT,
108 .index = -1, 108 .index = -1,
109 }; 109 };
110 #endif /* CONFIG_SERIAL_8250_CONSOLE */ 110 #endif /* CONFIG_SERIAL_8250_CONSOLE */
111 111
112 static struct console *early_console; 112 static struct console *early_console;
113 113
114 void early_printk(const char *fmt, ...) 114 void early_printk(const char *fmt, ...)
115 { 115 {
116 char buf[512]; 116 char buf[512];
117 int n; 117 int n;
118 va_list ap; 118 va_list ap;
119 119
120 if (early_console_initialized) { 120 if (early_console_initialized) {
121 va_start(ap, fmt); 121 va_start(ap, fmt);
122 n = vscnprintf(buf, 512, fmt, ap); 122 n = vscnprintf(buf, 512, fmt, ap);
123 early_console->write(early_console, buf, n); 123 early_console->write(early_console, buf, n);
124 va_end(ap); 124 va_end(ap);
125 } 125 }
126 } 126 }
127 127
128 int __init setup_early_printk(char *opt) 128 int __init setup_early_printk(char *opt)
129 { 129 {
130 int version = 0; 130 int version = 0;
131 131
132 if (early_console_initialized) 132 if (early_console_initialized)
133 return 1; 133 return 1;
134 134
135 base_addr = of_early_console(&version); 135 base_addr = of_early_console(&version);
136 if (base_addr) { 136 if (base_addr) {
137 #ifdef CONFIG_MMU 137 #ifdef CONFIG_MMU
138 early_console_reg_tlb_alloc(base_addr); 138 early_console_reg_tlb_alloc(base_addr);
139 #endif 139 #endif
140 switch (version) { 140 switch (version) {
141 #ifdef CONFIG_SERIAL_UARTLITE_CONSOLE 141 #ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
142 case UARTLITE: 142 case UARTLITE:
143 printk(KERN_INFO "Early console on uartlite " 143 pr_info("Early console on uartlite at 0x%08x\n",
144 "at 0x%08x\n", base_addr); 144 base_addr);
145 early_console = &early_serial_uartlite_console; 145 early_console = &early_serial_uartlite_console;
146 break; 146 break;
147 #endif 147 #endif
148 #ifdef CONFIG_SERIAL_8250_CONSOLE 148 #ifdef CONFIG_SERIAL_8250_CONSOLE
149 case UART16550: 149 case UART16550:
150 printk(KERN_INFO "Early console on uart16650 " 150 pr_info("Early console on uart16650 at 0x%08x\n",
151 "at 0x%08x\n", base_addr); 151 base_addr);
152 early_console = &early_serial_uart16550_console; 152 early_console = &early_serial_uart16550_console;
153 break; 153 break;
154 #endif 154 #endif
155 default: 155 default:
156 printk(KERN_INFO "Unsupported early console %d\n", 156 pr_info("Unsupported early console %d\n",
157 version); 157 version);
158 return 1; 158 return 1;
159 } 159 }
160 160
161 register_console(early_console); 161 register_console(early_console);
162 early_console_initialized = 1; 162 early_console_initialized = 1;
163 return 0; 163 return 0;
164 } 164 }
165 return 1; 165 return 1;
166 } 166 }
167 167
168 /* Remap early console to virtual address and do not allocate one TLB 168 /* Remap early console to virtual address and do not allocate one TLB
169 * only for early console because of performance degression */ 169 * only for early console because of performance degression */
170 void __init remap_early_printk(void) 170 void __init remap_early_printk(void)
171 { 171 {
172 if (!early_console_initialized || !early_console) 172 if (!early_console_initialized || !early_console)
173 return; 173 return;
174 printk(KERN_INFO "early_printk_console remapping from 0x%x to ", 174 pr_info("early_printk_console remapping from 0x%x to ", base_addr);
175 base_addr);
176 base_addr = (u32) ioremap(base_addr, PAGE_SIZE); 175 base_addr = (u32) ioremap(base_addr, PAGE_SIZE);
177 printk(KERN_CONT "0x%x\n", base_addr); 176 pr_cont("0x%x\n", base_addr);
178 177
179 #ifdef CONFIG_MMU 178 #ifdef CONFIG_MMU
180 /* 179 /*
181 * Early console is on the top of skipped TLB entries 180 * Early console is on the top of skipped TLB entries
182 * decrease tlb_skip size ensure that hardcoded TLB entry will be 181 * decrease tlb_skip size ensure that hardcoded TLB entry will be
183 * used by generic algorithm 182 * used by generic algorithm
184 * FIXME check if early console mapping is on the top by rereading 183 * FIXME check if early console mapping is on the top by rereading
185 * TLB entry and compare baseaddr 184 * TLB entry and compare baseaddr
186 * mts rtlbx, (tlb_skip - 1) 185 * mts rtlbx, (tlb_skip - 1)
187 * nop 186 * nop
188 * mfs rX, rtlblo 187 * mfs rX, rtlblo
189 * nop 188 * nop
190 * cmp rX, orig_base_addr 189 * cmp rX, orig_base_addr
191 */ 190 */
192 tlb_skip -= 1; 191 tlb_skip -= 1;
193 #endif 192 #endif
194 } 193 }
195 194
196 void __init disable_early_printk(void) 195 void __init disable_early_printk(void)
197 { 196 {
198 if (!early_console_initialized || !early_console) 197 if (!early_console_initialized || !early_console)
199 return; 198 return;
200 printk(KERN_WARNING "disabling early console\n"); 199 pr_warn("disabling early console\n");
201 unregister_console(early_console); 200 unregister_console(early_console);
202 early_console_initialized = 0; 201 early_console_initialized = 0;
203 } 202 }
204 203
arch/microblaze/kernel/exceptions.c
1 /* 1 /*
2 * HW exception handling 2 * HW exception handling
3 * 3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> 4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008 PetaLogix 5 * Copyright (C) 2008 PetaLogix
6 * 6 *
7 * This file is subject to the terms and conditions of the GNU General 7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this 8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details. 9 * archive for more details.
10 */ 10 */
11 11
12 /* 12 /*
13 * This file handles the architecture-dependent parts of hardware exceptions 13 * This file handles the architecture-dependent parts of hardware exceptions
14 */ 14 */
15 15
16 #include <linux/kernel.h> 16 #include <linux/kernel.h>
17 #include <linux/signal.h> 17 #include <linux/signal.h>
18 #include <linux/sched.h> 18 #include <linux/sched.h>
19 #include <linux/kallsyms.h> 19 #include <linux/kallsyms.h>
20 #include <linux/module.h> 20 #include <linux/module.h>
21 21
22 #include <asm/exceptions.h> 22 #include <asm/exceptions.h>
23 #include <asm/entry.h> /* For KM CPU var */ 23 #include <asm/entry.h> /* For KM CPU var */
24 #include <linux/uaccess.h> 24 #include <linux/uaccess.h>
25 #include <linux/errno.h> 25 #include <linux/errno.h>
26 #include <linux/ptrace.h> 26 #include <linux/ptrace.h>
27 #include <asm/current.h> 27 #include <asm/current.h>
28 #include <asm/cacheflush.h> 28 #include <asm/cacheflush.h>
29 29
30 #define MICROBLAZE_ILL_OPCODE_EXCEPTION 0x02 30 #define MICROBLAZE_ILL_OPCODE_EXCEPTION 0x02
31 #define MICROBLAZE_IBUS_EXCEPTION 0x03 31 #define MICROBLAZE_IBUS_EXCEPTION 0x03
32 #define MICROBLAZE_DBUS_EXCEPTION 0x04 32 #define MICROBLAZE_DBUS_EXCEPTION 0x04
33 #define MICROBLAZE_DIV_ZERO_EXCEPTION 0x05 33 #define MICROBLAZE_DIV_ZERO_EXCEPTION 0x05
34 #define MICROBLAZE_FPU_EXCEPTION 0x06 34 #define MICROBLAZE_FPU_EXCEPTION 0x06
35 #define MICROBLAZE_PRIVILEGED_EXCEPTION 0x07 35 #define MICROBLAZE_PRIVILEGED_EXCEPTION 0x07
36 36
37 static DEFINE_SPINLOCK(die_lock); 37 static DEFINE_SPINLOCK(die_lock);
38 38
39 void die(const char *str, struct pt_regs *fp, long err) 39 void die(const char *str, struct pt_regs *fp, long err)
40 { 40 {
41 console_verbose(); 41 console_verbose();
42 spin_lock_irq(&die_lock); 42 spin_lock_irq(&die_lock);
43 printk(KERN_WARNING "Oops: %s, sig: %ld\n", str, err); 43 pr_warn("Oops: %s, sig: %ld\n", str, err);
44 show_regs(fp); 44 show_regs(fp);
45 spin_unlock_irq(&die_lock); 45 spin_unlock_irq(&die_lock);
46 /* do_exit() should take care of panic'ing from an interrupt 46 /* do_exit() should take care of panic'ing from an interrupt
47 * context so we don't handle it here 47 * context so we don't handle it here
48 */ 48 */
49 do_exit(err); 49 do_exit(err);
50 } 50 }
51 51
52 /* for user application debugging */ 52 /* for user application debugging */
53 asmlinkage void sw_exception(struct pt_regs *regs) 53 asmlinkage void sw_exception(struct pt_regs *regs)
54 { 54 {
55 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->r16); 55 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->r16);
56 flush_dcache_range(regs->r16, regs->r16 + 0x4); 56 flush_dcache_range(regs->r16, regs->r16 + 0x4);
57 flush_icache_range(regs->r16, regs->r16 + 0x4); 57 flush_icache_range(regs->r16, regs->r16 + 0x4);
58 } 58 }
59 59
60 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) 60 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
61 { 61 {
62 siginfo_t info; 62 siginfo_t info;
63 63
64 if (kernel_mode(regs)) { 64 if (kernel_mode(regs))
65 die("Exception in kernel mode", regs, signr); 65 die("Exception in kernel mode", regs, signr);
66 } 66
67 info.si_signo = signr; 67 info.si_signo = signr;
68 info.si_errno = 0; 68 info.si_errno = 0;
69 info.si_code = code; 69 info.si_code = code;
70 info.si_addr = (void __user *) addr; 70 info.si_addr = (void __user *) addr;
71 force_sig_info(signr, &info, current); 71 force_sig_info(signr, &info, current);
72 } 72 }
73 73
74 asmlinkage void full_exception(struct pt_regs *regs, unsigned int type, 74 asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
75 int fsr, int addr) 75 int fsr, int addr)
76 { 76 {
77 #ifdef CONFIG_MMU 77 #ifdef CONFIG_MMU
78 addr = regs->pc; 78 addr = regs->pc;
79 #endif 79 #endif
80 80
81 #if 0 81 #if 0
82 printk(KERN_WARNING "Exception %02x in %s mode, FSR=%08x PC=%08x " \ 82 pr_warn("Exception %02x in %s mode, FSR=%08x PC=%08x ESR=%08x\n",
83 "ESR=%08x\n",
84 type, user_mode(regs) ? "user" : "kernel", fsr, 83 type, user_mode(regs) ? "user" : "kernel", fsr,
85 (unsigned int) regs->pc, (unsigned int) regs->esr); 84 (unsigned int) regs->pc, (unsigned int) regs->esr);
86 #endif 85 #endif
87 86
88 switch (type & 0x1F) { 87 switch (type & 0x1F) {
89 case MICROBLAZE_ILL_OPCODE_EXCEPTION: 88 case MICROBLAZE_ILL_OPCODE_EXCEPTION:
90 if (user_mode(regs)) { 89 if (user_mode(regs)) {
91 pr_debug("Illegal opcode exception in user mode\n"); 90 pr_debug("Illegal opcode exception in user mode\n");
92 _exception(SIGILL, regs, ILL_ILLOPC, addr); 91 _exception(SIGILL, regs, ILL_ILLOPC, addr);
93 return; 92 return;
94 } 93 }
95 printk(KERN_WARNING "Illegal opcode exception " \ 94 pr_warn("Illegal opcode exception in kernel mode.\n");
96 "in kernel mode.\n");
97 die("opcode exception", regs, SIGBUS); 95 die("opcode exception", regs, SIGBUS);
98 break; 96 break;
99 case MICROBLAZE_IBUS_EXCEPTION: 97 case MICROBLAZE_IBUS_EXCEPTION:
100 if (user_mode(regs)) { 98 if (user_mode(regs)) {
101 pr_debug("Instruction bus error exception in user mode\n"); 99 pr_debug("Instruction bus error exception in user mode\n");
102 _exception(SIGBUS, regs, BUS_ADRERR, addr); 100 _exception(SIGBUS, regs, BUS_ADRERR, addr);
103 return; 101 return;
104 } 102 }
105 printk(KERN_WARNING "Instruction bus error exception " \ 103 pr_warn("Instruction bus error exception in kernel mode.\n");
106 "in kernel mode.\n");
107 die("bus exception", regs, SIGBUS); 104 die("bus exception", regs, SIGBUS);
108 break; 105 break;
109 case MICROBLAZE_DBUS_EXCEPTION: 106 case MICROBLAZE_DBUS_EXCEPTION:
110 if (user_mode(regs)) { 107 if (user_mode(regs)) {
111 pr_debug("Data bus error exception in user mode\n"); 108 pr_debug("Data bus error exception in user mode\n");
112 _exception(SIGBUS, regs, BUS_ADRERR, addr); 109 _exception(SIGBUS, regs, BUS_ADRERR, addr);
113 return; 110 return;
114 } 111 }
115 printk(KERN_WARNING "Data bus error exception " \ 112 pr_warn("Data bus error exception in kernel mode.\n");
116 "in kernel mode.\n");
117 die("bus exception", regs, SIGBUS); 113 die("bus exception", regs, SIGBUS);
118 break; 114 break;
119 case MICROBLAZE_DIV_ZERO_EXCEPTION: 115 case MICROBLAZE_DIV_ZERO_EXCEPTION:
120 if (user_mode(regs)) { 116 if (user_mode(regs)) {
121 pr_debug("Divide by zero exception in user mode\n"); 117 pr_debug("Divide by zero exception in user mode\n");
122 _exception(SIGFPE, regs, FPE_INTDIV, addr); 118 _exception(SIGFPE, regs, FPE_INTDIV, addr);
123 return; 119 return;
124 } 120 }
125 printk(KERN_WARNING "Divide by zero exception " \ 121 pr_warn("Divide by zero exception in kernel mode.\n");
126 "in kernel mode.\n");
127 die("Divide by zero exception", regs, SIGBUS); 122 die("Divide by zero exception", regs, SIGBUS);
128 break; 123 break;
129 case MICROBLAZE_FPU_EXCEPTION: 124 case MICROBLAZE_FPU_EXCEPTION:
130 pr_debug("FPU exception\n"); 125 pr_debug("FPU exception\n");
131 /* IEEE FP exception */ 126 /* IEEE FP exception */
132 /* I removed fsr variable and use code var for storing fsr */ 127 /* I removed fsr variable and use code var for storing fsr */
133 if (fsr & FSR_IO) 128 if (fsr & FSR_IO)
134 fsr = FPE_FLTINV; 129 fsr = FPE_FLTINV;
135 else if (fsr & FSR_OF) 130 else if (fsr & FSR_OF)
136 fsr = FPE_FLTOVF; 131 fsr = FPE_FLTOVF;
137 else if (fsr & FSR_UF) 132 else if (fsr & FSR_UF)
138 fsr = FPE_FLTUND; 133 fsr = FPE_FLTUND;
139 else if (fsr & FSR_DZ) 134 else if (fsr & FSR_DZ)
140 fsr = FPE_FLTDIV; 135 fsr = FPE_FLTDIV;
141 else if (fsr & FSR_DO) 136 else if (fsr & FSR_DO)
142 fsr = FPE_FLTRES; 137 fsr = FPE_FLTRES;
143 _exception(SIGFPE, regs, fsr, addr); 138 _exception(SIGFPE, regs, fsr, addr);
144 break; 139 break;
145 140
146 #ifdef CONFIG_MMU 141 #ifdef CONFIG_MMU
147 case MICROBLAZE_PRIVILEGED_EXCEPTION: 142 case MICROBLAZE_PRIVILEGED_EXCEPTION:
148 pr_debug("Privileged exception\n"); 143 pr_debug("Privileged exception\n");
149 _exception(SIGILL, regs, ILL_PRVOPC, addr); 144 _exception(SIGILL, regs, ILL_PRVOPC, addr);
150 break; 145 break;
151 #endif 146 #endif
152 default: 147 default:
153 /* FIXME what to do in unexpected exception */ 148 /* FIXME what to do in unexpected exception */
154 printk(KERN_WARNING "Unexpected exception %02x " 149 pr_warn("Unexpected exception %02x PC=%08x in %s mode\n",
155 "PC=%08x in %s mode\n", type, (unsigned int) addr, 150 type, (unsigned int) addr,
156 kernel_mode(regs) ? "kernel" : "user"); 151 kernel_mode(regs) ? "kernel" : "user");
157 } 152 }
158 return; 153 return;
159 } 154 }
160 155
arch/microblaze/kernel/ftrace.c
1 /* 1 /*
2 * Ftrace support for Microblaze. 2 * Ftrace support for Microblaze.
3 * 3 *
4 * Copyright (C) 2009 Michal Simek <monstr@monstr.eu> 4 * Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2009 PetaLogix 5 * Copyright (C) 2009 PetaLogix
6 * 6 *
7 * Based on MIPS and PowerPC ftrace code 7 * Based on MIPS and PowerPC ftrace code
8 * 8 *
9 * This file is subject to the terms and conditions of the GNU General Public 9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive 10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details. 11 * for more details.
12 */ 12 */
13 13
14 #include <asm/cacheflush.h> 14 #include <asm/cacheflush.h>
15 #include <linux/ftrace.h> 15 #include <linux/ftrace.h>
16 16
17 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 17 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
18 /* 18 /*
19 * Hook the return address and push it in the stack of return addrs 19 * Hook the return address and push it in the stack of return addrs
20 * in current thread info. 20 * in current thread info.
21 */ 21 */
22 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) 22 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
23 { 23 {
24 unsigned long old; 24 unsigned long old;
25 int faulted, err; 25 int faulted, err;
26 struct ftrace_graph_ent trace; 26 struct ftrace_graph_ent trace;
27 unsigned long return_hooker = (unsigned long) 27 unsigned long return_hooker = (unsigned long)
28 &return_to_handler; 28 &return_to_handler;
29 29
30 if (unlikely(atomic_read(&current->tracing_graph_pause))) 30 if (unlikely(atomic_read(&current->tracing_graph_pause)))
31 return; 31 return;
32 32
33 /* 33 /*
34 * Protect against fault, even if it shouldn't 34 * Protect against fault, even if it shouldn't
35 * happen. This tool is too much intrusive to 35 * happen. This tool is too much intrusive to
36 * ignore such a protection. 36 * ignore such a protection.
37 */ 37 */
38 asm volatile(" 1: lwi %0, %2, 0; \ 38 asm volatile(" 1: lwi %0, %2, 0;" \
39 2: swi %3, %2, 0; \ 39 "2: swi %3, %2, 0;" \
40 addik %1, r0, 0; \ 40 " addik %1, r0, 0;" \
41 3: \ 41 "3:" \
42 .section .fixup, \"ax\"; \ 42 " .section .fixup, \"ax\";" \
43 4: brid 3b; \ 43 "4: brid 3b;" \
44 addik %1, r0, 1; \ 44 " addik %1, r0, 1;" \
45 .previous; \ 45 " .previous;" \
46 .section __ex_table,\"a\"; \ 46 " .section __ex_table,\"a\";" \
47 .word 1b,4b; \ 47 " .word 1b,4b;" \
48 .word 2b,4b; \ 48 " .word 2b,4b;" \
49 .previous;" \ 49 " .previous;" \
50 : "=&r" (old), "=r" (faulted) 50 : "=&r" (old), "=r" (faulted)
51 : "r" (parent), "r" (return_hooker) 51 : "r" (parent), "r" (return_hooker)
52 ); 52 );
53 53
54 flush_dcache_range((u32)parent, (u32)parent + 4); 54 flush_dcache_range((u32)parent, (u32)parent + 4);
55 flush_icache_range((u32)parent, (u32)parent + 4); 55 flush_icache_range((u32)parent, (u32)parent + 4);
56 56
57 if (unlikely(faulted)) { 57 if (unlikely(faulted)) {
58 ftrace_graph_stop(); 58 ftrace_graph_stop();
59 WARN_ON(1); 59 WARN_ON(1);
60 return; 60 return;
61 } 61 }
62 62
63 err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0); 63 err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0);
64 if (err == -EBUSY) { 64 if (err == -EBUSY) {
65 *parent = old; 65 *parent = old;
66 return; 66 return;
67 } 67 }
68 68
69 trace.func = self_addr; 69 trace.func = self_addr;
70 /* Only trace if the calling function expects to */ 70 /* Only trace if the calling function expects to */
71 if (!ftrace_graph_entry(&trace)) { 71 if (!ftrace_graph_entry(&trace)) {
72 current->curr_ret_stack--; 72 current->curr_ret_stack--;
73 *parent = old; 73 *parent = old;
74 } 74 }
75 } 75 }
76 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 76 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
77 77
78 #ifdef CONFIG_DYNAMIC_FTRACE 78 #ifdef CONFIG_DYNAMIC_FTRACE
79 /* save value to addr - it is save to do it in asm */ 79 /* save value to addr - it is save to do it in asm */
80 static int ftrace_modify_code(unsigned long addr, unsigned int value) 80 static int ftrace_modify_code(unsigned long addr, unsigned int value)
81 { 81 {
82 int faulted = 0; 82 int faulted = 0;
83 83
84 __asm__ __volatile__(" 1: swi %2, %1, 0; \ 84 __asm__ __volatile__(" 1: swi %2, %1, 0;" \
85 addik %0, r0, 0; \ 85 " addik %0, r0, 0;" \
86 2: \ 86 "2:" \
87 .section .fixup, \"ax\"; \ 87 " .section .fixup, \"ax\";" \
88 3: brid 2b; \ 88 "3: brid 2b;" \
89 addik %0, r0, 1; \ 89 " addik %0, r0, 1;" \
90 .previous; \ 90 " .previous;" \
91 .section __ex_table,\"a\"; \ 91 " .section __ex_table,\"a\";" \
92 .word 1b,3b; \ 92 " .word 1b,3b;" \
93 .previous;" \ 93 " .previous;" \
94 : "=r" (faulted) 94 : "=r" (faulted)
95 : "r" (addr), "r" (value) 95 : "r" (addr), "r" (value)
96 ); 96 );
97 97
98 if (unlikely(faulted)) 98 if (unlikely(faulted))
99 return -EFAULT; 99 return -EFAULT;
100 100
101 flush_dcache_range(addr, addr + 4); 101 flush_dcache_range(addr, addr + 4);
102 flush_icache_range(addr, addr + 4); 102 flush_icache_range(addr, addr + 4);
103 103
104 return 0; 104 return 0;
105 } 105 }
106 106
107 #define MICROBLAZE_NOP 0x80000000 107 #define MICROBLAZE_NOP 0x80000000
108 #define MICROBLAZE_BRI 0xb800000C 108 #define MICROBLAZE_BRI 0xb800000C
109 109
110 static unsigned int recorded; /* if save was or not */ 110 static unsigned int recorded; /* if save was or not */
111 static unsigned int imm; /* saving whole imm instruction */ 111 static unsigned int imm; /* saving whole imm instruction */
112 112
113 /* There are two approaches howto solve ftrace_make nop function - look below */ 113 /* There are two approaches howto solve ftrace_make nop function - look below */
114 #undef USE_FTRACE_NOP 114 #undef USE_FTRACE_NOP
115 115
116 #ifdef USE_FTRACE_NOP 116 #ifdef USE_FTRACE_NOP
117 static unsigned int bralid; /* saving whole bralid instruction */ 117 static unsigned int bralid; /* saving whole bralid instruction */
118 #endif 118 #endif
119 119
120 int ftrace_make_nop(struct module *mod, 120 int ftrace_make_nop(struct module *mod,
121 struct dyn_ftrace *rec, unsigned long addr) 121 struct dyn_ftrace *rec, unsigned long addr)
122 { 122 {
123 /* we have this part of code which we are working with 123 /* we have this part of code which we are working with
124 * b000c000 imm -16384 124 * b000c000 imm -16384
125 * b9fc8e30 bralid r15, -29136 // c0008e30 <_mcount> 125 * b9fc8e30 bralid r15, -29136 // c0008e30 <_mcount>
126 * 80000000 or r0, r0, r0 126 * 80000000 or r0, r0, r0
127 * 127 *
128 * The first solution (!USE_FTRACE_NOP-could be called branch solution) 128 * The first solution (!USE_FTRACE_NOP-could be called branch solution)
129 * b000c000 bri 12 (0xC - jump to any other instruction) 129 * b000c000 bri 12 (0xC - jump to any other instruction)
130 * b9fc8e30 bralid r15, -29136 // c0008e30 <_mcount> 130 * b9fc8e30 bralid r15, -29136 // c0008e30 <_mcount>
131 * 80000000 or r0, r0, r0 131 * 80000000 or r0, r0, r0
132 * any other instruction 132 * any other instruction
133 * 133 *
134 * The second solution (USE_FTRACE_NOP) - no jump just nops 134 * The second solution (USE_FTRACE_NOP) - no jump just nops
135 * 80000000 or r0, r0, r0 135 * 80000000 or r0, r0, r0
136 * 80000000 or r0, r0, r0 136 * 80000000 or r0, r0, r0
137 * 80000000 or r0, r0, r0 137 * 80000000 or r0, r0, r0
138 */ 138 */
139 int ret = 0; 139 int ret = 0;
140 140
141 if (recorded == 0) { 141 if (recorded == 0) {
142 recorded = 1; 142 recorded = 1;
143 imm = *(unsigned int *)rec->ip; 143 imm = *(unsigned int *)rec->ip;
144 pr_debug("%s: imm:0x%x\n", __func__, imm); 144 pr_debug("%s: imm:0x%x\n", __func__, imm);
145 #ifdef USE_FTRACE_NOP 145 #ifdef USE_FTRACE_NOP
146 bralid = *(unsigned int *)(rec->ip + 4); 146 bralid = *(unsigned int *)(rec->ip + 4);
147 pr_debug("%s: bralid 0x%x\n", __func__, bralid); 147 pr_debug("%s: bralid 0x%x\n", __func__, bralid);
148 #endif /* USE_FTRACE_NOP */ 148 #endif /* USE_FTRACE_NOP */
149 } 149 }
150 150
151 #ifdef USE_FTRACE_NOP 151 #ifdef USE_FTRACE_NOP
152 ret = ftrace_modify_code(rec->ip, MICROBLAZE_NOP); 152 ret = ftrace_modify_code(rec->ip, MICROBLAZE_NOP);
153 ret += ftrace_modify_code(rec->ip + 4, MICROBLAZE_NOP); 153 ret += ftrace_modify_code(rec->ip + 4, MICROBLAZE_NOP);
154 #else /* USE_FTRACE_NOP */ 154 #else /* USE_FTRACE_NOP */
155 ret = ftrace_modify_code(rec->ip, MICROBLAZE_BRI); 155 ret = ftrace_modify_code(rec->ip, MICROBLAZE_BRI);
156 #endif /* USE_FTRACE_NOP */ 156 #endif /* USE_FTRACE_NOP */
157 return ret; 157 return ret;
158 } 158 }
159 159
160 /* I believe that first is called ftrace_make_nop before this function */ 160 /* I believe that first is called ftrace_make_nop before this function */
161 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 161 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
162 { 162 {
163 int ret; 163 int ret;
164 pr_debug("%s: addr:0x%x, rec->ip: 0x%x, imm:0x%x\n", 164 pr_debug("%s: addr:0x%x, rec->ip: 0x%x, imm:0x%x\n",
165 __func__, (unsigned int)addr, (unsigned int)rec->ip, imm); 165 __func__, (unsigned int)addr, (unsigned int)rec->ip, imm);
166 ret = ftrace_modify_code(rec->ip, imm); 166 ret = ftrace_modify_code(rec->ip, imm);
167 #ifdef USE_FTRACE_NOP 167 #ifdef USE_FTRACE_NOP
168 pr_debug("%s: bralid:0x%x\n", __func__, bralid); 168 pr_debug("%s: bralid:0x%x\n", __func__, bralid);
169 ret += ftrace_modify_code(rec->ip + 4, bralid); 169 ret += ftrace_modify_code(rec->ip + 4, bralid);
170 #endif /* USE_FTRACE_NOP */ 170 #endif /* USE_FTRACE_NOP */
171 return ret; 171 return ret;
172 } 172 }
173 173
174 int __init ftrace_dyn_arch_init(void *data) 174 int __init ftrace_dyn_arch_init(void *data)
175 { 175 {
176 /* The return code is retured via data */ 176 /* The return code is retured via data */
177 *(unsigned long *)data = 0; 177 *(unsigned long *)data = 0;
178 178
179 return 0; 179 return 0;
180 } 180 }
181 181
182 int ftrace_update_ftrace_func(ftrace_func_t func) 182 int ftrace_update_ftrace_func(ftrace_func_t func)
183 { 183 {
184 unsigned long ip = (unsigned long)(&ftrace_call); 184 unsigned long ip = (unsigned long)(&ftrace_call);
185 unsigned int upper = (unsigned int)func; 185 unsigned int upper = (unsigned int)func;
186 unsigned int lower = (unsigned int)func; 186 unsigned int lower = (unsigned int)func;
187 int ret = 0; 187 int ret = 0;
188 188
189 /* create proper saving to ftrace_call poll */ 189 /* create proper saving to ftrace_call poll */
190 upper = 0xb0000000 + (upper >> 16); /* imm func_upper */ 190 upper = 0xb0000000 + (upper >> 16); /* imm func_upper */
191 lower = 0x32800000 + (lower & 0xFFFF); /* addik r20, r0, func_lower */ 191 lower = 0x32800000 + (lower & 0xFFFF); /* addik r20, r0, func_lower */
192 192
193 pr_debug("%s: func=0x%x, ip=0x%x, upper=0x%x, lower=0x%x\n", 193 pr_debug("%s: func=0x%x, ip=0x%x, upper=0x%x, lower=0x%x\n",
194 __func__, (unsigned int)func, (unsigned int)ip, upper, lower); 194 __func__, (unsigned int)func, (unsigned int)ip, upper, lower);
195 195
196 /* save upper and lower code */ 196 /* save upper and lower code */
197 ret = ftrace_modify_code(ip, upper); 197 ret = ftrace_modify_code(ip, upper);
198 ret += ftrace_modify_code(ip + 4, lower); 198 ret += ftrace_modify_code(ip + 4, lower);
199 199
200 /* We just need to replace the rtsd r15, 8 with NOP */ 200 /* We just need to replace the rtsd r15, 8 with NOP */
201 ret += ftrace_modify_code((unsigned long)&ftrace_caller, 201 ret += ftrace_modify_code((unsigned long)&ftrace_caller,
202 MICROBLAZE_NOP); 202 MICROBLAZE_NOP);
203 203
204 return ret; 204 return ret;
205 } 205 }
206 206
207 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 207 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
208 unsigned int old_jump; /* saving place for jump instruction */ 208 unsigned int old_jump; /* saving place for jump instruction */
209 209
210 int ftrace_enable_ftrace_graph_caller(void) 210 int ftrace_enable_ftrace_graph_caller(void)
211 { 211 {
212 unsigned int ret; 212 unsigned int ret;
213 unsigned long ip = (unsigned long)(&ftrace_call_graph); 213 unsigned long ip = (unsigned long)(&ftrace_call_graph);
214 214
215 old_jump = *(unsigned int *)ip; /* save jump over instruction */ 215 old_jump = *(unsigned int *)ip; /* save jump over instruction */
216 ret = ftrace_modify_code(ip, MICROBLAZE_NOP); 216 ret = ftrace_modify_code(ip, MICROBLAZE_NOP);
217 217
218 pr_debug("%s: Replace instruction: 0x%x\n", __func__, old_jump); 218 pr_debug("%s: Replace instruction: 0x%x\n", __func__, old_jump);
219 return ret; 219 return ret;
220 } 220 }
221 221
222 int ftrace_disable_ftrace_graph_caller(void) 222 int ftrace_disable_ftrace_graph_caller(void)
223 { 223 {
224 unsigned int ret; 224 unsigned int ret;
225 unsigned long ip = (unsigned long)(&ftrace_call_graph); 225 unsigned long ip = (unsigned long)(&ftrace_call_graph);
226 226
227 ret = ftrace_modify_code(ip, old_jump); 227 ret = ftrace_modify_code(ip, old_jump);
228 228
229 pr_debug("%s\n", __func__); 229 pr_debug("%s\n", __func__);
230 return ret; 230 return ret;
231 } 231 }
232 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 232 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
233 #endif /* CONFIG_DYNAMIC_FTRACE */ 233 #endif /* CONFIG_DYNAMIC_FTRACE */
234 234
arch/microblaze/kernel/heartbeat.c
1 /* 1 /*
2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> 2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2007-2009 PetaLogix 3 * Copyright (C) 2007-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
5 * 5 *
6 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive 7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details. 8 * for more details.
9 */ 9 */
10 10
11 #include <linux/sched.h> 11 #include <linux/sched.h>
12 #include <linux/io.h> 12 #include <linux/io.h>
13 13
14 #include <asm/setup.h> 14 #include <asm/setup.h>
15 #include <asm/page.h> 15 #include <asm/page.h>
16 #include <asm/prom.h> 16 #include <asm/prom.h>
17 17
18 static unsigned int base_addr; 18 static unsigned int base_addr;
19 19
20 void heartbeat(void) 20 void heartbeat(void)
21 { 21 {
22 static unsigned int cnt, period, dist; 22 static unsigned int cnt, period, dist;
23 23
24 if (base_addr) { 24 if (base_addr) {
25 if (cnt == 0 || cnt == dist) 25 if (cnt == 0 || cnt == dist)
26 out_be32(base_addr, 1); 26 out_be32(base_addr, 1);
27 else if (cnt == 7 || cnt == dist + 7) 27 else if (cnt == 7 || cnt == dist + 7)
28 out_be32(base_addr, 0); 28 out_be32(base_addr, 0);
29 29
30 if (++cnt > period) { 30 if (++cnt > period) {
31 cnt = 0; 31 cnt = 0;
32 /* 32 /*
33 * The hyperbolic function below modifies the heartbeat 33 * The hyperbolic function below modifies the heartbeat
34 * period length in dependency of the current (5min) 34 * period length in dependency of the current (5min)
35 * load. It goes through the points f(0)=126, f(1)=86, 35 * load. It goes through the points f(0)=126, f(1)=86,
36 * f(5)=51, f(inf)->30. 36 * f(5)=51, f(inf)->30.
37 */ 37 */
38 period = ((672 << FSHIFT) / (5 * avenrun[0] + 38 period = ((672 << FSHIFT) / (5 * avenrun[0] +
39 (7 << FSHIFT))) + 30; 39 (7 << FSHIFT))) + 30;
40 dist = period / 4; 40 dist = period / 4;
41 } 41 }
42 } 42 }
43 } 43 }
44 44
45 void setup_heartbeat(void) 45 void setup_heartbeat(void)
46 { 46 {
47 struct device_node *gpio = NULL; 47 struct device_node *gpio = NULL;
48 int *prop; 48 int *prop;
49 int j; 49 int j;
50 const char * const gpio_list[] = { 50 const char * const gpio_list[] = {
51 "xlnx,xps-gpio-1.00.a", 51 "xlnx,xps-gpio-1.00.a",
52 NULL 52 NULL
53 }; 53 };
54 54
55 for (j = 0; gpio_list[j] != NULL; j++) { 55 for (j = 0; gpio_list[j] != NULL; j++) {
56 gpio = of_find_compatible_node(NULL, NULL, gpio_list[j]); 56 gpio = of_find_compatible_node(NULL, NULL, gpio_list[j]);
57 if (gpio) 57 if (gpio)
58 break; 58 break;
59 } 59 }
60 60
61 if (gpio) { 61 if (gpio) {
62 base_addr = be32_to_cpup(of_get_property(gpio, "reg", NULL)); 62 base_addr = be32_to_cpup(of_get_property(gpio, "reg", NULL));
63 base_addr = (unsigned long) ioremap(base_addr, PAGE_SIZE); 63 base_addr = (unsigned long) ioremap(base_addr, PAGE_SIZE);
64 printk(KERN_NOTICE "Heartbeat GPIO at 0x%x\n", base_addr); 64 pr_notice("Heartbeat GPIO at 0x%x\n", base_addr);
65 65
66 /* GPIO is configured as output */ 66 /* GPIO is configured as output */
67 prop = (int *) of_get_property(gpio, "xlnx,is-bidir", NULL); 67 prop = (int *) of_get_property(gpio, "xlnx,is-bidir", NULL);
68 if (prop) 68 if (prop)
69 out_be32(base_addr + 4, 0); 69 out_be32(base_addr + 4, 0);
70 } 70 }
71 } 71 }
72 72
arch/microblaze/kernel/intc.c
1 /* 1 /*
2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> 2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2007-2009 PetaLogix 3 * Copyright (C) 2007-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
5 * 5 *
6 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive 7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details. 8 * for more details.
9 */ 9 */
10 10
11 #include <linux/init.h> 11 #include <linux/init.h>
12 #include <linux/irqdomain.h> 12 #include <linux/irqdomain.h>
13 #include <linux/irq.h> 13 #include <linux/irq.h>
14 #include <asm/page.h> 14 #include <asm/page.h>
15 #include <linux/io.h> 15 #include <linux/io.h>
16 #include <linux/bug.h> 16 #include <linux/bug.h>
17 17
18 #include <asm/prom.h> 18 #include <asm/prom.h>
19 #include <asm/irq.h> 19 #include <asm/irq.h>
20 20
21 #ifdef CONFIG_SELFMOD_INTC 21 #ifdef CONFIG_SELFMOD_INTC
22 #include <asm/selfmod.h> 22 #include <asm/selfmod.h>
23 #define INTC_BASE BARRIER_BASE_ADDR 23 #define INTC_BASE BARRIER_BASE_ADDR
24 #else 24 #else
25 static unsigned int intc_baseaddr; 25 static unsigned int intc_baseaddr;
26 #define INTC_BASE intc_baseaddr 26 #define INTC_BASE intc_baseaddr
27 #endif 27 #endif
28 28
29 /* No one else should require these constants, so define them locally here. */ 29 /* No one else should require these constants, so define them locally here. */
30 #define ISR 0x00 /* Interrupt Status Register */ 30 #define ISR 0x00 /* Interrupt Status Register */
31 #define IPR 0x04 /* Interrupt Pending Register */ 31 #define IPR 0x04 /* Interrupt Pending Register */
32 #define IER 0x08 /* Interrupt Enable Register */ 32 #define IER 0x08 /* Interrupt Enable Register */
33 #define IAR 0x0c /* Interrupt Acknowledge Register */ 33 #define IAR 0x0c /* Interrupt Acknowledge Register */
34 #define SIE 0x10 /* Set Interrupt Enable bits */ 34 #define SIE 0x10 /* Set Interrupt Enable bits */
35 #define CIE 0x14 /* Clear Interrupt Enable bits */ 35 #define CIE 0x14 /* Clear Interrupt Enable bits */
36 #define IVR 0x18 /* Interrupt Vector Register */ 36 #define IVR 0x18 /* Interrupt Vector Register */
37 #define MER 0x1c /* Master Enable Register */ 37 #define MER 0x1c /* Master Enable Register */
38 38
39 #define MER_ME (1<<0) 39 #define MER_ME (1<<0)
40 #define MER_HIE (1<<1) 40 #define MER_HIE (1<<1)
41 41
42 static void intc_enable_or_unmask(struct irq_data *d) 42 static void intc_enable_or_unmask(struct irq_data *d)
43 { 43 {
44 unsigned long mask = 1 << d->hwirq; 44 unsigned long mask = 1 << d->hwirq;
45 45
46 pr_debug("enable_or_unmask: %ld\n", d->hwirq); 46 pr_debug("enable_or_unmask: %ld\n", d->hwirq);
47 47
48 /* ack level irqs because they can't be acked during 48 /* ack level irqs because they can't be acked during
49 * ack function since the handle_level_irq function 49 * ack function since the handle_level_irq function
50 * acks the irq before calling the interrupt handler 50 * acks the irq before calling the interrupt handler
51 */ 51 */
52 if (irqd_is_level_type(d)) 52 if (irqd_is_level_type(d))
53 out_be32(INTC_BASE + IAR, mask); 53 out_be32(INTC_BASE + IAR, mask);
54 54
55 out_be32(INTC_BASE + SIE, mask); 55 out_be32(INTC_BASE + SIE, mask);
56 } 56 }
57 57
58 static void intc_disable_or_mask(struct irq_data *d) 58 static void intc_disable_or_mask(struct irq_data *d)
59 { 59 {
60 pr_debug("disable: %ld\n", d->hwirq); 60 pr_debug("disable: %ld\n", d->hwirq);
61 out_be32(INTC_BASE + CIE, 1 << d->hwirq); 61 out_be32(INTC_BASE + CIE, 1 << d->hwirq);
62 } 62 }
63 63
64 static void intc_ack(struct irq_data *d) 64 static void intc_ack(struct irq_data *d)
65 { 65 {
66 pr_debug("ack: %ld\n", d->hwirq); 66 pr_debug("ack: %ld\n", d->hwirq);
67 out_be32(INTC_BASE + IAR, 1 << d->hwirq); 67 out_be32(INTC_BASE + IAR, 1 << d->hwirq);
68 } 68 }
69 69
70 static void intc_mask_ack(struct irq_data *d) 70 static void intc_mask_ack(struct irq_data *d)
71 { 71 {
72 unsigned long mask = 1 << d->hwirq; 72 unsigned long mask = 1 << d->hwirq;
73 73
74 pr_debug("disable_and_ack: %ld\n", d->hwirq); 74 pr_debug("disable_and_ack: %ld\n", d->hwirq);
75 out_be32(INTC_BASE + CIE, mask); 75 out_be32(INTC_BASE + CIE, mask);
76 out_be32(INTC_BASE + IAR, mask); 76 out_be32(INTC_BASE + IAR, mask);
77 } 77 }
78 78
79 static struct irq_chip intc_dev = { 79 static struct irq_chip intc_dev = {
80 .name = "Xilinx INTC", 80 .name = "Xilinx INTC",
81 .irq_unmask = intc_enable_or_unmask, 81 .irq_unmask = intc_enable_or_unmask,
82 .irq_mask = intc_disable_or_mask, 82 .irq_mask = intc_disable_or_mask,
83 .irq_ack = intc_ack, 83 .irq_ack = intc_ack,
84 .irq_mask_ack = intc_mask_ack, 84 .irq_mask_ack = intc_mask_ack,
85 }; 85 };
86 86
87 static struct irq_domain *root_domain; 87 static struct irq_domain *root_domain;
88 88
89 unsigned int get_irq(void) 89 unsigned int get_irq(void)
90 { 90 {
91 unsigned int hwirq, irq = -1; 91 unsigned int hwirq, irq = -1;
92 92
93 hwirq = in_be32(INTC_BASE + IVR); 93 hwirq = in_be32(INTC_BASE + IVR);
94 if (hwirq != -1U) 94 if (hwirq != -1U)
95 irq = irq_find_mapping(root_domain, hwirq); 95 irq = irq_find_mapping(root_domain, hwirq);
96 96
97 pr_debug("get_irq: hwirq=%d, irq=%d\n", hwirq, irq); 97 pr_debug("get_irq: hwirq=%d, irq=%d\n", hwirq, irq);
98 98
99 return irq; 99 return irq;
100 } 100 }
101 101
102 static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) 102 static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
103 { 103 {
104 u32 intr_mask = (u32)d->host_data; 104 u32 intr_mask = (u32)d->host_data;
105 105
106 if (intr_mask & (1 << hw)) { 106 if (intr_mask & (1 << hw)) {
107 irq_set_chip_and_handler_name(irq, &intc_dev, 107 irq_set_chip_and_handler_name(irq, &intc_dev,
108 handle_edge_irq, "edge"); 108 handle_edge_irq, "edge");
109 irq_clear_status_flags(irq, IRQ_LEVEL); 109 irq_clear_status_flags(irq, IRQ_LEVEL);
110 } else { 110 } else {
111 irq_set_chip_and_handler_name(irq, &intc_dev, 111 irq_set_chip_and_handler_name(irq, &intc_dev,
112 handle_level_irq, "level"); 112 handle_level_irq, "level");
113 irq_set_status_flags(irq, IRQ_LEVEL); 113 irq_set_status_flags(irq, IRQ_LEVEL);
114 } 114 }
115 return 0; 115 return 0;
116 } 116 }
117 117
118 static const struct irq_domain_ops xintc_irq_domain_ops = { 118 static const struct irq_domain_ops xintc_irq_domain_ops = {
119 .xlate = irq_domain_xlate_onetwocell, 119 .xlate = irq_domain_xlate_onetwocell,
120 .map = xintc_map, 120 .map = xintc_map,
121 }; 121 };
122 122
123 void __init init_IRQ(void) 123 void __init init_IRQ(void)
124 { 124 {
125 u32 nr_irq, intr_mask; 125 u32 nr_irq, intr_mask;
126 struct device_node *intc = NULL; 126 struct device_node *intc = NULL;
127 #ifdef CONFIG_SELFMOD_INTC 127 #ifdef CONFIG_SELFMOD_INTC
128 unsigned int intc_baseaddr = 0; 128 unsigned int intc_baseaddr = 0;
129 static int arr_func[] = { 129 static int arr_func[] = {
130 (int)&get_irq, 130 (int)&get_irq,
131 (int)&intc_enable_or_unmask, 131 (int)&intc_enable_or_unmask,
132 (int)&intc_disable_or_mask, 132 (int)&intc_disable_or_mask,
133 (int)&intc_mask_ack, 133 (int)&intc_mask_ack,
134 (int)&intc_ack, 134 (int)&intc_ack,
135 (int)&intc_end, 135 (int)&intc_end,
136 0 136 0
137 }; 137 };
138 #endif 138 #endif
139 intc = of_find_compatible_node(NULL, NULL, "xlnx,xps-intc-1.00.a"); 139 intc = of_find_compatible_node(NULL, NULL, "xlnx,xps-intc-1.00.a");
140 BUG_ON(!intc); 140 BUG_ON(!intc);
141 141
142 intc_baseaddr = be32_to_cpup(of_get_property(intc, "reg", NULL)); 142 intc_baseaddr = be32_to_cpup(of_get_property(intc, "reg", NULL));
143 intc_baseaddr = (unsigned long) ioremap(intc_baseaddr, PAGE_SIZE); 143 intc_baseaddr = (unsigned long) ioremap(intc_baseaddr, PAGE_SIZE);
144 nr_irq = be32_to_cpup(of_get_property(intc, 144 nr_irq = be32_to_cpup(of_get_property(intc,
145 "xlnx,num-intr-inputs", NULL)); 145 "xlnx,num-intr-inputs", NULL));
146 146
147 intr_mask = 147 intr_mask =
148 be32_to_cpup(of_get_property(intc, "xlnx,kind-of-intr", NULL)); 148 be32_to_cpup(of_get_property(intc, "xlnx,kind-of-intr", NULL));
149 if (intr_mask > (u32)((1ULL << nr_irq) - 1)) 149 if (intr_mask > (u32)((1ULL << nr_irq) - 1))
150 printk(KERN_INFO " ERROR: Mismatch in kind-of-intr param\n"); 150 pr_info(" ERROR: Mismatch in kind-of-intr param\n");
151 151
152 #ifdef CONFIG_SELFMOD_INTC 152 #ifdef CONFIG_SELFMOD_INTC
153 selfmod_function((int *) arr_func, intc_baseaddr); 153 selfmod_function((int *) arr_func, intc_baseaddr);
154 #endif 154 #endif
155 printk(KERN_INFO "%s #0 at 0x%08x, num_irq=%d, edge=0x%x\n", 155 pr_info("%s #0 at 0x%08x, num_irq=%d, edge=0x%x\n",
156 intc->name, intc_baseaddr, nr_irq, intr_mask); 156 intc->name, intc_baseaddr, nr_irq, intr_mask);
157 157
158 /* 158 /*
159 * Disable all external interrupts until they are 159 * Disable all external interrupts until they are
160 * explicity requested. 160 * explicity requested.
161 */ 161 */
162 out_be32(intc_baseaddr + IER, 0); 162 out_be32(intc_baseaddr + IER, 0);
163 163
164 /* Acknowledge any pending interrupts just in case. */ 164 /* Acknowledge any pending interrupts just in case. */
165 out_be32(intc_baseaddr + IAR, 0xffffffff); 165 out_be32(intc_baseaddr + IAR, 0xffffffff);
166 166
167 /* Turn on the Master Enable. */ 167 /* Turn on the Master Enable. */
168 out_be32(intc_baseaddr + MER, MER_HIE | MER_ME); 168 out_be32(intc_baseaddr + MER, MER_HIE | MER_ME);
169 169
170 /* Yeah, okay, casting the intr_mask to a void* is butt-ugly, but I'm 170 /* Yeah, okay, casting the intr_mask to a void* is butt-ugly, but I'm
171 * lazy and Michal can clean it up to something nicer when he tests 171 * lazy and Michal can clean it up to something nicer when he tests
172 * and commits this patch. ~~gcl */ 172 * and commits this patch. ~~gcl */
173 root_domain = irq_domain_add_linear(intc, nr_irq, &xintc_irq_domain_ops, 173 root_domain = irq_domain_add_linear(intc, nr_irq, &xintc_irq_domain_ops,
174 (void *)intr_mask); 174 (void *)intr_mask);
175 } 175 }
176 176
arch/microblaze/kernel/kgdb.c
1 /* 1 /*
2 * Microblaze KGDB support 2 * Microblaze KGDB support
3 * 3 *
4 * This file is subject to the terms and conditions of the GNU General Public 4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive 5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details. 6 * for more details.
7 */ 7 */
8 8
9 #include <linux/kgdb.h> 9 #include <linux/kgdb.h>
10 #include <linux/kdebug.h> 10 #include <linux/kdebug.h>
11 #include <linux/irq.h> 11 #include <linux/irq.h>
12 #include <linux/io.h> 12 #include <linux/io.h>
13 #include <asm/cacheflush.h> 13 #include <asm/cacheflush.h>
14 #include <asm/asm-offsets.h> 14 #include <asm/asm-offsets.h>
15 #include <asm/pvr.h> 15 #include <asm/pvr.h>
16 16
17 #define GDB_REG 0 17 #define GDB_REG 0
18 #define GDB_PC 32 18 #define GDB_PC 32
19 #define GDB_MSR 33 19 #define GDB_MSR 33
20 #define GDB_EAR 34 20 #define GDB_EAR 34
21 #define GDB_ESR 35 21 #define GDB_ESR 35
22 #define GDB_FSR 36 22 #define GDB_FSR 36
23 #define GDB_BTR 37 23 #define GDB_BTR 37
24 #define GDB_PVR 38 24 #define GDB_PVR 38
25 #define GDB_REDR 50 25 #define GDB_REDR 50
26 #define GDB_RPID 51 26 #define GDB_RPID 51
27 #define GDB_RZPR 52 27 #define GDB_RZPR 52
28 #define GDB_RTLBX 53 28 #define GDB_RTLBX 53
29 #define GDB_RTLBSX 54 /* mfs can't read it */ 29 #define GDB_RTLBSX 54 /* mfs can't read it */
30 #define GDB_RTLBLO 55 30 #define GDB_RTLBLO 55
31 #define GDB_RTLBHI 56 31 #define GDB_RTLBHI 56
32 32
33 /* keep pvr separately because it is unchangeble */ 33 /* keep pvr separately because it is unchangeble */
34 struct pvr_s pvr; 34 struct pvr_s pvr;
35 35
36 void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) 36 void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
37 { 37 {
38 int i; 38 int i;
39 unsigned long *pt_regb = (unsigned long *)regs; 39 unsigned long *pt_regb = (unsigned long *)regs;
40 int temp; 40 int temp;
41 /* registers r0 - r31, pc, msr, ear, esr, fsr + do not save pt_mode */ 41 /* registers r0 - r31, pc, msr, ear, esr, fsr + do not save pt_mode */
42 for (i = 0; i < (sizeof(struct pt_regs) / 4) - 1; i++) 42 for (i = 0; i < (sizeof(struct pt_regs) / 4) - 1; i++)
43 gdb_regs[i] = pt_regb[i]; 43 gdb_regs[i] = pt_regb[i];
44 44
45 /* Branch target register can't be changed */ 45 /* Branch target register can't be changed */
46 __asm__ __volatile__ ("mfs %0, rbtr;" : "=r"(temp) : ); 46 __asm__ __volatile__ ("mfs %0, rbtr;" : "=r"(temp) : );
47 gdb_regs[GDB_BTR] = temp; 47 gdb_regs[GDB_BTR] = temp;
48 48
49 /* pvr part - we have 11 pvr regs */ 49 /* pvr part - we have 11 pvr regs */
50 for (i = 0; i < sizeof(struct pvr_s)/4; i++) 50 for (i = 0; i < sizeof(struct pvr_s)/4; i++)
51 gdb_regs[GDB_PVR + i] = pvr.pvr[i]; 51 gdb_regs[GDB_PVR + i] = pvr.pvr[i];
52 52
53 /* read special registers - can't be changed */ 53 /* read special registers - can't be changed */
54 __asm__ __volatile__ ("mfs %0, redr;" : "=r"(temp) : ); 54 __asm__ __volatile__ ("mfs %0, redr;" : "=r"(temp) : );
55 gdb_regs[GDB_REDR] = temp; 55 gdb_regs[GDB_REDR] = temp;
56 __asm__ __volatile__ ("mfs %0, rpid;" : "=r"(temp) : ); 56 __asm__ __volatile__ ("mfs %0, rpid;" : "=r"(temp) : );
57 gdb_regs[GDB_RPID] = temp; 57 gdb_regs[GDB_RPID] = temp;
58 __asm__ __volatile__ ("mfs %0, rzpr;" : "=r"(temp) : ); 58 __asm__ __volatile__ ("mfs %0, rzpr;" : "=r"(temp) : );
59 gdb_regs[GDB_RZPR] = temp; 59 gdb_regs[GDB_RZPR] = temp;
60 __asm__ __volatile__ ("mfs %0, rtlbx;" : "=r"(temp) : ); 60 __asm__ __volatile__ ("mfs %0, rtlbx;" : "=r"(temp) : );
61 gdb_regs[GDB_RTLBX] = temp; 61 gdb_regs[GDB_RTLBX] = temp;
62 __asm__ __volatile__ ("mfs %0, rtlblo;" : "=r"(temp) : ); 62 __asm__ __volatile__ ("mfs %0, rtlblo;" : "=r"(temp) : );
63 gdb_regs[GDB_RTLBLO] = temp; 63 gdb_regs[GDB_RTLBLO] = temp;
64 __asm__ __volatile__ ("mfs %0, rtlbhi;" : "=r"(temp) : ); 64 __asm__ __volatile__ ("mfs %0, rtlbhi;" : "=r"(temp) : );
65 gdb_regs[GDB_RTLBHI] = temp; 65 gdb_regs[GDB_RTLBHI] = temp;
66 } 66 }
67 67
68 void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) 68 void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
69 { 69 {
70 int i; 70 int i;
71 unsigned long *pt_regb = (unsigned long *)regs; 71 unsigned long *pt_regb = (unsigned long *)regs;
72 72
73 /* pt_regs and gdb_regs have the same 37 values. 73 /* pt_regs and gdb_regs have the same 37 values.
74 * The rest of gdb_regs are unused and can't be changed. 74 * The rest of gdb_regs are unused and can't be changed.
75 * r0 register value can't be changed too. */ 75 * r0 register value can't be changed too. */
76 for (i = 1; i < (sizeof(struct pt_regs) / 4) - 1; i++) 76 for (i = 1; i < (sizeof(struct pt_regs) / 4) - 1; i++)
77 pt_regb[i] = gdb_regs[i]; 77 pt_regb[i] = gdb_regs[i];
78 } 78 }
79 79
80 void microblaze_kgdb_break(struct pt_regs *regs) 80 void microblaze_kgdb_break(struct pt_regs *regs)
81 { 81 {
82 if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0) 82 if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0)
83 return; 83 return;
84 84
85 /* Jump over the first arch_kgdb_breakpoint which is barrier to 85 /* Jump over the first arch_kgdb_breakpoint which is barrier to
86 * get kgdb work. The same solution is used for powerpc */ 86 * get kgdb work. The same solution is used for powerpc */
87 if (*(u32 *) (regs->pc) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr)) 87 if (*(u32 *) (regs->pc) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
88 regs->pc += BREAK_INSTR_SIZE; 88 regs->pc += BREAK_INSTR_SIZE;
89 } 89 }
90 90
91 /* untested */ 91 /* untested */
92 void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) 92 void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
93 { 93 {
94 int i; 94 int i;
95 unsigned long *pt_regb = (unsigned long *)(p->thread.regs); 95 unsigned long *pt_regb = (unsigned long *)(p->thread.regs);
96 96
97 /* registers r0 - r31, pc, msr, ear, esr, fsr + do not save pt_mode */ 97 /* registers r0 - r31, pc, msr, ear, esr, fsr + do not save pt_mode */
98 for (i = 0; i < (sizeof(struct pt_regs) / 4) - 1; i++) 98 for (i = 0; i < (sizeof(struct pt_regs) / 4) - 1; i++)
99 gdb_regs[i] = pt_regb[i]; 99 gdb_regs[i] = pt_regb[i];
100 100
101 /* pvr part - we have 11 pvr regs */ 101 /* pvr part - we have 11 pvr regs */
102 for (i = 0; i < sizeof(struct pvr_s)/4; i++) 102 for (i = 0; i < sizeof(struct pvr_s)/4; i++)
103 gdb_regs[GDB_PVR + i] = pvr.pvr[i]; 103 gdb_regs[GDB_PVR + i] = pvr.pvr[i];
104 } 104 }
105 105
106 void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) 106 void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
107 { 107 {
108 regs->pc = ip; 108 regs->pc = ip;
109 } 109 }
110 110
111 int kgdb_arch_handle_exception(int vector, int signo, int err_code, 111 int kgdb_arch_handle_exception(int vector, int signo, int err_code,
112 char *remcom_in_buffer, char *remcom_out_buffer, 112 char *remcom_in_buffer, char *remcom_out_buffer,
113 struct pt_regs *regs) 113 struct pt_regs *regs)
114 { 114 {
115 char *ptr; 115 char *ptr;
116 unsigned long address; 116 unsigned long address;
117 117
118 switch (remcom_in_buffer[0]) { 118 switch (remcom_in_buffer[0]) {
119 case 'c': 119 case 'c':
120 /* handle the optional parameter */ 120 /* handle the optional parameter */
121 ptr = &remcom_in_buffer[1]; 121 ptr = &remcom_in_buffer[1];
122 if (kgdb_hex2long(&ptr, &address)) 122 if (kgdb_hex2long(&ptr, &address))
123 regs->pc = address; 123 regs->pc = address;
124 124
125 return 0; 125 return 0;
126 } 126 }
127 return -1; /* this means that we do not want to exit from the handler */ 127 return -1; /* this means that we do not want to exit from the handler */
128 } 128 }
129 129
130 int kgdb_arch_init(void) 130 int kgdb_arch_init(void)
131 { 131 {
132 get_pvr(&pvr); /* Fill PVR structure */ 132 get_pvr(&pvr); /* Fill PVR structure */
133 return 0; 133 return 0;
134 } 134 }
135 135
136 void kgdb_arch_exit(void) 136 void kgdb_arch_exit(void)
137 { 137 {
138 /* Nothing to do */ 138 /* Nothing to do */
139 } 139 }
140 140
141 /* 141 /*
142 * Global data 142 * Global data
143 */ 143 */
144 struct kgdb_arch arch_kgdb_ops = { 144 const struct kgdb_arch arch_kgdb_ops = {
145 #ifdef __MICROBLAZEEL__ 145 #ifdef __MICROBLAZEEL__
146 .gdb_bpt_instr = {0x18, 0x00, 0x0c, 0xba}, /* brki r16, 0x18 */ 146 .gdb_bpt_instr = {0x18, 0x00, 0x0c, 0xba}, /* brki r16, 0x18 */
147 #else 147 #else
148 .gdb_bpt_instr = {0xba, 0x0c, 0x00, 0x18}, /* brki r16, 0x18 */ 148 .gdb_bpt_instr = {0xba, 0x0c, 0x00, 0x18}, /* brki r16, 0x18 */
149 #endif 149 #endif
150 }; 150 };
151 151
arch/microblaze/kernel/module.c
1 /* 1 /*
2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> 2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2007-2009 PetaLogix 3 * Copyright (C) 2007-2009 PetaLogix
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as 6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 */ 8 */
9 9
10 #include <linux/module.h> 10 #include <linux/module.h>
11 #include <linux/moduleloader.h> 11 #include <linux/moduleloader.h>
12 #include <linux/kernel.h> 12 #include <linux/kernel.h>
13 #include <linux/elf.h> 13 #include <linux/elf.h>
14 #include <linux/vmalloc.h> 14 #include <linux/vmalloc.h>
15 #include <linux/fs.h> 15 #include <linux/fs.h>
16 #include <linux/string.h> 16 #include <linux/string.h>
17 17
18 #include <asm/pgtable.h> 18 #include <asm/pgtable.h>
19 #include <asm/cacheflush.h> 19 #include <asm/cacheflush.h>
20 20
21 int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, 21 int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
22 unsigned int symindex, unsigned int relsec, struct module *module) 22 unsigned int symindex, unsigned int relsec, struct module *module)
23 { 23 {
24 24
25 unsigned int i; 25 unsigned int i;
26 Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr; 26 Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
27 Elf32_Sym *sym; 27 Elf32_Sym *sym;
28 unsigned long int *location; 28 unsigned long int *location;
29 unsigned long int value; 29 unsigned long int value;
30 #if __GNUC__ < 4 30 #if __GNUC__ < 4
31 unsigned long int old_value; 31 unsigned long int old_value;
32 #endif 32 #endif
33 33
34 pr_debug("Applying add relocation section %u to %u\n", 34 pr_debug("Applying add relocation section %u to %u\n",
35 relsec, sechdrs[relsec].sh_info); 35 relsec, sechdrs[relsec].sh_info);
36 36
37 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) { 37 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
38 38
39 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + 39 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr +
40 rela[i].r_offset; 40 rela[i].r_offset;
41 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr + 41 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr +
42 ELF32_R_SYM(rela[i].r_info); 42 ELF32_R_SYM(rela[i].r_info);
43 value = sym->st_value + rela[i].r_addend; 43 value = sym->st_value + rela[i].r_addend;
44 44
45 switch (ELF32_R_TYPE(rela[i].r_info)) { 45 switch (ELF32_R_TYPE(rela[i].r_info)) {
46 46
47 /* 47 /*
48 * Be careful! mb-gcc / mb-ld splits the relocs between the 48 * Be careful! mb-gcc / mb-ld splits the relocs between the
49 * text and the reloc table. In general this means we must 49 * text and the reloc table. In general this means we must
50 * read the current contents of (*location), add any offset 50 * read the current contents of (*location), add any offset
51 * then store the result back in 51 * then store the result back in
52 */ 52 */
53 53
54 case R_MICROBLAZE_32: 54 case R_MICROBLAZE_32:
55 #if __GNUC__ < 4 55 #if __GNUC__ < 4
56 old_value = *location; 56 old_value = *location;
57 *location = value + old_value; 57 *location = value + old_value;
58 58
59 pr_debug("R_MICROBLAZE_32 (%08lx->%08lx)\n", 59 pr_debug("R_MICROBLAZE_32 (%08lx->%08lx)\n",
60 old_value, value); 60 old_value, value);
61 #else 61 #else
62 *location = value; 62 *location = value;
63 #endif 63 #endif
64 break; 64 break;
65 65
66 case R_MICROBLAZE_64: 66 case R_MICROBLAZE_64:
67 #if __GNUC__ < 4 67 #if __GNUC__ < 4
68 /* Split relocs only required/used pre gcc4.1.1 */ 68 /* Split relocs only required/used pre gcc4.1.1 */
69 old_value = ((location[0] & 0x0000FFFF) << 16) | 69 old_value = ((location[0] & 0x0000FFFF) << 16) |
70 (location[1] & 0x0000FFFF); 70 (location[1] & 0x0000FFFF);
71 value += old_value; 71 value += old_value;
72 #endif 72 #endif
73 location[0] = (location[0] & 0xFFFF0000) | 73 location[0] = (location[0] & 0xFFFF0000) |
74 (value >> 16); 74 (value >> 16);
75 location[1] = (location[1] & 0xFFFF0000) | 75 location[1] = (location[1] & 0xFFFF0000) |
76 (value & 0xFFFF); 76 (value & 0xFFFF);
77 #if __GNUC__ < 4 77 #if __GNUC__ < 4
78 pr_debug("R_MICROBLAZE_64 (%08lx->%08lx)\n", 78 pr_debug("R_MICROBLAZE_64 (%08lx->%08lx)\n",
79 old_value, value); 79 old_value, value);
80 #endif 80 #endif
81 break; 81 break;
82 82
83 case R_MICROBLAZE_64_PCREL: 83 case R_MICROBLAZE_64_PCREL:
84 #if __GNUC__ < 4 84 #if __GNUC__ < 4
85 old_value = (location[0] & 0xFFFF) << 16 | 85 old_value = (location[0] & 0xFFFF) << 16 |
86 (location[1] & 0xFFFF); 86 (location[1] & 0xFFFF);
87 value -= old_value; 87 value -= old_value;
88 #endif 88 #endif
89 value -= (unsigned long int)(location) + 4; 89 value -= (unsigned long int)(location) + 4;
90 location[0] = (location[0] & 0xFFFF0000) | 90 location[0] = (location[0] & 0xFFFF0000) |
91 (value >> 16); 91 (value >> 16);
92 location[1] = (location[1] & 0xFFFF0000) | 92 location[1] = (location[1] & 0xFFFF0000) |
93 (value & 0xFFFF); 93 (value & 0xFFFF);
94 pr_debug("R_MICROBLAZE_64_PCREL (%08lx)\n", 94 pr_debug("R_MICROBLAZE_64_PCREL (%08lx)\n",
95 value); 95 value);
96 break; 96 break;
97 97
98 case R_MICROBLAZE_32_PCREL_LO: 98 case R_MICROBLAZE_32_PCREL_LO:
99 pr_debug("R_MICROBLAZE_32_PCREL_LO\n"); 99 pr_debug("R_MICROBLAZE_32_PCREL_LO\n");
100 break; 100 break;
101 101
102 case R_MICROBLAZE_64_NONE: 102 case R_MICROBLAZE_64_NONE:
103 pr_debug("R_MICROBLAZE_64_NONE\n"); 103 pr_debug("R_MICROBLAZE_64_NONE\n");
104 break; 104 break;
105 105
106 case R_MICROBLAZE_NONE: 106 case R_MICROBLAZE_NONE:
107 pr_debug("R_MICROBLAZE_NONE\n"); 107 pr_debug("R_MICROBLAZE_NONE\n");
108 break; 108 break;
109 109
110 default: 110 default:
111 printk(KERN_ERR "module %s: " 111 pr_err("module %s: Unknown relocation: %u\n",
112 "Unknown relocation: %u\n",
113 module->name, 112 module->name,
114 ELF32_R_TYPE(rela[i].r_info)); 113 ELF32_R_TYPE(rela[i].r_info));
115 return -ENOEXEC; 114 return -ENOEXEC;
116 } 115 }
117 } 116 }
118 return 0; 117 return 0;
119 } 118 }
120 119
121 int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, 120 int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
122 struct module *module) 121 struct module *module)
123 { 122 {
124 flush_dcache(); 123 flush_dcache();
125 return 0; 124 return 0;
126 } 125 }
127 126
arch/microblaze/kernel/process.c
1 /* 1 /*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> 2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix 3 * Copyright (C) 2008-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
5 * 5 *
6 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive 7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details. 8 * for more details.
9 */ 9 */
10 10
11 #include <linux/module.h> 11 #include <linux/module.h>
12 #include <linux/sched.h> 12 #include <linux/sched.h>
13 #include <linux/pm.h> 13 #include <linux/pm.h>
14 #include <linux/tick.h> 14 #include <linux/tick.h>
15 #include <linux/bitops.h> 15 #include <linux/bitops.h>
16 #include <linux/ptrace.h> 16 #include <linux/ptrace.h>
17 #include <asm/pgalloc.h> 17 #include <asm/pgalloc.h>
18 #include <asm/uaccess.h> /* for USER_DS macros */ 18 #include <linux/uaccess.h> /* for USER_DS macros */
19 #include <asm/cacheflush.h> 19 #include <asm/cacheflush.h>
20 20
21 void show_regs(struct pt_regs *regs) 21 void show_regs(struct pt_regs *regs)
22 { 22 {
23 printk(KERN_INFO " Registers dump: mode=%X\r\n", regs->pt_mode); 23 pr_info(" Registers dump: mode=%X\r\n", regs->pt_mode);
24 printk(KERN_INFO " r1=%08lX, r2=%08lX, r3=%08lX, r4=%08lX\n", 24 pr_info(" r1=%08lX, r2=%08lX, r3=%08lX, r4=%08lX\n",
25 regs->r1, regs->r2, regs->r3, regs->r4); 25 regs->r1, regs->r2, regs->r3, regs->r4);
26 printk(KERN_INFO " r5=%08lX, r6=%08lX, r7=%08lX, r8=%08lX\n", 26 pr_info(" r5=%08lX, r6=%08lX, r7=%08lX, r8=%08lX\n",
27 regs->r5, regs->r6, regs->r7, regs->r8); 27 regs->r5, regs->r6, regs->r7, regs->r8);
28 printk(KERN_INFO " r9=%08lX, r10=%08lX, r11=%08lX, r12=%08lX\n", 28 pr_info(" r9=%08lX, r10=%08lX, r11=%08lX, r12=%08lX\n",
29 regs->r9, regs->r10, regs->r11, regs->r12); 29 regs->r9, regs->r10, regs->r11, regs->r12);
30 printk(KERN_INFO " r13=%08lX, r14=%08lX, r15=%08lX, r16=%08lX\n", 30 pr_info(" r13=%08lX, r14=%08lX, r15=%08lX, r16=%08lX\n",
31 regs->r13, regs->r14, regs->r15, regs->r16); 31 regs->r13, regs->r14, regs->r15, regs->r16);
32 printk(KERN_INFO " r17=%08lX, r18=%08lX, r19=%08lX, r20=%08lX\n", 32 pr_info(" r17=%08lX, r18=%08lX, r19=%08lX, r20=%08lX\n",
33 regs->r17, regs->r18, regs->r19, regs->r20); 33 regs->r17, regs->r18, regs->r19, regs->r20);
34 printk(KERN_INFO " r21=%08lX, r22=%08lX, r23=%08lX, r24=%08lX\n", 34 pr_info(" r21=%08lX, r22=%08lX, r23=%08lX, r24=%08lX\n",
35 regs->r21, regs->r22, regs->r23, regs->r24); 35 regs->r21, regs->r22, regs->r23, regs->r24);
36 printk(KERN_INFO " r25=%08lX, r26=%08lX, r27=%08lX, r28=%08lX\n", 36 pr_info(" r25=%08lX, r26=%08lX, r27=%08lX, r28=%08lX\n",
37 regs->r25, regs->r26, regs->r27, regs->r28); 37 regs->r25, regs->r26, regs->r27, regs->r28);
38 printk(KERN_INFO " r29=%08lX, r30=%08lX, r31=%08lX, rPC=%08lX\n", 38 pr_info(" r29=%08lX, r30=%08lX, r31=%08lX, rPC=%08lX\n",
39 regs->r29, regs->r30, regs->r31, regs->pc); 39 regs->r29, regs->r30, regs->r31, regs->pc);
40 printk(KERN_INFO " msr=%08lX, ear=%08lX, esr=%08lX, fsr=%08lX\n", 40 pr_info(" msr=%08lX, ear=%08lX, esr=%08lX, fsr=%08lX\n",
41 regs->msr, regs->ear, regs->esr, regs->fsr); 41 regs->msr, regs->ear, regs->esr, regs->fsr);
42 } 42 }
43 43
44 void (*pm_idle)(void); 44 void (*pm_idle)(void);
45 void (*pm_power_off)(void) = NULL; 45 void (*pm_power_off)(void) = NULL;
46 EXPORT_SYMBOL(pm_power_off); 46 EXPORT_SYMBOL(pm_power_off);
47 47
48 static int hlt_counter = 1; 48 static int hlt_counter = 1;
49 49
50 void disable_hlt(void) 50 void disable_hlt(void)
51 { 51 {
52 hlt_counter++; 52 hlt_counter++;
53 } 53 }
54 EXPORT_SYMBOL(disable_hlt); 54 EXPORT_SYMBOL(disable_hlt);
55 55
56 void enable_hlt(void) 56 void enable_hlt(void)
57 { 57 {
58 hlt_counter--; 58 hlt_counter--;
59 } 59 }
60 EXPORT_SYMBOL(enable_hlt); 60 EXPORT_SYMBOL(enable_hlt);
61 61
62 static int __init nohlt_setup(char *__unused) 62 static int __init nohlt_setup(char *__unused)
63 { 63 {
64 hlt_counter = 1; 64 hlt_counter = 1;
65 return 1; 65 return 1;
66 } 66 }
67 __setup("nohlt", nohlt_setup); 67 __setup("nohlt", nohlt_setup);
68 68
69 static int __init hlt_setup(char *__unused) 69 static int __init hlt_setup(char *__unused)
70 { 70 {
71 hlt_counter = 0; 71 hlt_counter = 0;
72 return 1; 72 return 1;
73 } 73 }
74 __setup("hlt", hlt_setup); 74 __setup("hlt", hlt_setup);
75 75
76 void default_idle(void) 76 void default_idle(void)
77 { 77 {
78 if (likely(hlt_counter)) { 78 if (likely(hlt_counter)) {
79 local_irq_disable(); 79 local_irq_disable();
80 stop_critical_timings(); 80 stop_critical_timings();
81 cpu_relax(); 81 cpu_relax();
82 start_critical_timings(); 82 start_critical_timings();
83 local_irq_enable(); 83 local_irq_enable();
84 } else { 84 } else {
85 clear_thread_flag(TIF_POLLING_NRFLAG); 85 clear_thread_flag(TIF_POLLING_NRFLAG);
86 smp_mb__after_clear_bit(); 86 smp_mb__after_clear_bit();
87 local_irq_disable(); 87 local_irq_disable();
88 while (!need_resched()) 88 while (!need_resched())
89 cpu_sleep(); 89 cpu_sleep();
90 local_irq_enable(); 90 local_irq_enable();
91 set_thread_flag(TIF_POLLING_NRFLAG); 91 set_thread_flag(TIF_POLLING_NRFLAG);
92 } 92 }
93 } 93 }
94 94
95 void cpu_idle(void) 95 void cpu_idle(void)
96 { 96 {
97 set_thread_flag(TIF_POLLING_NRFLAG); 97 set_thread_flag(TIF_POLLING_NRFLAG);
98 98
99 /* endless idle loop with no priority at all */ 99 /* endless idle loop with no priority at all */
100 while (1) { 100 while (1) {
101 void (*idle)(void) = pm_idle; 101 void (*idle)(void) = pm_idle;
102 102
103 if (!idle) 103 if (!idle)
104 idle = default_idle; 104 idle = default_idle;
105 105
106 tick_nohz_idle_enter(); 106 tick_nohz_idle_enter();
107 rcu_idle_enter(); 107 rcu_idle_enter();
108 while (!need_resched()) 108 while (!need_resched())
109 idle(); 109 idle();
110 rcu_idle_exit(); 110 rcu_idle_exit();
111 tick_nohz_idle_exit(); 111 tick_nohz_idle_exit();
112 112
113 schedule_preempt_disabled(); 113 schedule_preempt_disabled();
114 check_pgt_cache(); 114 check_pgt_cache();
115 } 115 }
116 } 116 }
117 117
118 void flush_thread(void) 118 void flush_thread(void)
119 { 119 {
120 } 120 }
121 121
122 int copy_thread(unsigned long clone_flags, unsigned long usp, 122 int copy_thread(unsigned long clone_flags, unsigned long usp,
123 unsigned long arg, struct task_struct *p) 123 unsigned long arg, struct task_struct *p)
124 { 124 {
125 struct pt_regs *childregs = task_pt_regs(p); 125 struct pt_regs *childregs = task_pt_regs(p);
126 struct thread_info *ti = task_thread_info(p); 126 struct thread_info *ti = task_thread_info(p);
127 127
128 if (unlikely(p->flags & PF_KTHREAD)) { 128 if (unlikely(p->flags & PF_KTHREAD)) {
129 /* if we're creating a new kernel thread then just zeroing all 129 /* if we're creating a new kernel thread then just zeroing all
130 * the registers. That's OK for a brand new thread.*/ 130 * the registers. That's OK for a brand new thread.*/
131 memset(childregs, 0, sizeof(struct pt_regs)); 131 memset(childregs, 0, sizeof(struct pt_regs));
132 memset(&ti->cpu_context, 0, sizeof(struct cpu_context)); 132 memset(&ti->cpu_context, 0, sizeof(struct cpu_context));
133 ti->cpu_context.r1 = (unsigned long)childregs; 133 ti->cpu_context.r1 = (unsigned long)childregs;
134 ti->cpu_context.r20 = (unsigned long)usp; /* fn */ 134 ti->cpu_context.r20 = (unsigned long)usp; /* fn */
135 ti->cpu_context.r19 = (unsigned long)arg; 135 ti->cpu_context.r19 = (unsigned long)arg;
136 childregs->pt_mode = 1; 136 childregs->pt_mode = 1;
137 local_save_flags(childregs->msr); 137 local_save_flags(childregs->msr);
138 #ifdef CONFIG_MMU 138 #ifdef CONFIG_MMU
139 ti->cpu_context.msr = childregs->msr & ~MSR_IE; 139 ti->cpu_context.msr = childregs->msr & ~MSR_IE;
140 #endif 140 #endif
141 ti->cpu_context.r15 = (unsigned long)ret_from_kernel_thread - 8; 141 ti->cpu_context.r15 = (unsigned long)ret_from_kernel_thread - 8;
142 return 0; 142 return 0;
143 } 143 }
144 *childregs = *current_pt_regs(); 144 *childregs = *current_pt_regs();
145 if (usp) 145 if (usp)
146 childregs->r1 = usp; 146 childregs->r1 = usp;
147 147
148 memset(&ti->cpu_context, 0, sizeof(struct cpu_context)); 148 memset(&ti->cpu_context, 0, sizeof(struct cpu_context));
149 ti->cpu_context.r1 = (unsigned long)childregs; 149 ti->cpu_context.r1 = (unsigned long)childregs;
150 #ifndef CONFIG_MMU 150 #ifndef CONFIG_MMU
151 ti->cpu_context.msr = (unsigned long)childregs->msr; 151 ti->cpu_context.msr = (unsigned long)childregs->msr;
152 #else 152 #else
153 childregs->msr |= MSR_UMS; 153 childregs->msr |= MSR_UMS;
154 154
155 /* we should consider the fact that childregs is a copy of the parent 155 /* we should consider the fact that childregs is a copy of the parent
156 * regs which were saved immediately after entering the kernel state 156 * regs which were saved immediately after entering the kernel state
157 * before enabling VM. This MSR will be restored in switch_to and 157 * before enabling VM. This MSR will be restored in switch_to and
158 * RETURN() and we want to have the right machine state there 158 * RETURN() and we want to have the right machine state there
159 * specifically this state must have INTs disabled before and enabled 159 * specifically this state must have INTs disabled before and enabled
160 * after performing rtbd 160 * after performing rtbd
161 * compose the right MSR for RETURN(). It will work for switch_to also 161 * compose the right MSR for RETURN(). It will work for switch_to also
162 * excepting for VM and UMS 162 * excepting for VM and UMS
163 * don't touch UMS , CARRY and cache bits 163 * don't touch UMS , CARRY and cache bits
164 * right now MSR is a copy of parent one */ 164 * right now MSR is a copy of parent one */
165 childregs->msr &= ~MSR_EIP; 165 childregs->msr &= ~MSR_EIP;
166 childregs->msr |= MSR_IE; 166 childregs->msr |= MSR_IE;
167 childregs->msr &= ~MSR_VM; 167 childregs->msr &= ~MSR_VM;
168 childregs->msr |= MSR_VMS; 168 childregs->msr |= MSR_VMS;
169 childregs->msr |= MSR_EE; /* exceptions will be enabled*/ 169 childregs->msr |= MSR_EE; /* exceptions will be enabled*/
170 170
171 ti->cpu_context.msr = (childregs->msr|MSR_VM); 171 ti->cpu_context.msr = (childregs->msr|MSR_VM);
172 ti->cpu_context.msr &= ~MSR_UMS; /* switch_to to kernel mode */ 172 ti->cpu_context.msr &= ~MSR_UMS; /* switch_to to kernel mode */
173 ti->cpu_context.msr &= ~MSR_IE; 173 ti->cpu_context.msr &= ~MSR_IE;
174 #endif 174 #endif
175 ti->cpu_context.r15 = (unsigned long)ret_from_fork - 8; 175 ti->cpu_context.r15 = (unsigned long)ret_from_fork - 8;
176 176
177 /* 177 /*
178 * r21 is the thread reg, r10 is 6th arg to clone 178 * r21 is the thread reg, r10 is 6th arg to clone
179 * which contains TLS area 179 * which contains TLS area
180 */ 180 */
181 if (clone_flags & CLONE_SETTLS) 181 if (clone_flags & CLONE_SETTLS)
182 childregs->r21 = childregs->r10; 182 childregs->r21 = childregs->r10;
183 183
184 return 0; 184 return 0;
185 } 185 }
186 186
187 #ifndef CONFIG_MMU 187 #ifndef CONFIG_MMU
188 /* 188 /*
189 * Return saved PC of a blocked thread. 189 * Return saved PC of a blocked thread.
190 */ 190 */
191 unsigned long thread_saved_pc(struct task_struct *tsk) 191 unsigned long thread_saved_pc(struct task_struct *tsk)
192 { 192 {
193 struct cpu_context *ctx = 193 struct cpu_context *ctx =
194 &(((struct thread_info *)(tsk->stack))->cpu_context); 194 &(((struct thread_info *)(tsk->stack))->cpu_context);
195 195
196 /* Check whether the thread is blocked in resume() */ 196 /* Check whether the thread is blocked in resume() */
197 if (in_sched_functions(ctx->r15)) 197 if (in_sched_functions(ctx->r15))
198 return (unsigned long)ctx->r15; 198 return (unsigned long)ctx->r15;
199 else 199 else
200 return ctx->r14; 200 return ctx->r14;
201 } 201 }
202 #endif 202 #endif
203 203
204 unsigned long get_wchan(struct task_struct *p) 204 unsigned long get_wchan(struct task_struct *p)
205 { 205 {
206 /* TBD (used by procfs) */ 206 /* TBD (used by procfs) */
207 return 0; 207 return 0;
208 } 208 }
209 209
210 /* Set up a thread for executing a new program */ 210 /* Set up a thread for executing a new program */
211 void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp) 211 void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp)
212 { 212 {
213 regs->pc = pc; 213 regs->pc = pc;
214 regs->r1 = usp; 214 regs->r1 = usp;
215 regs->pt_mode = 0; 215 regs->pt_mode = 0;
216 #ifdef CONFIG_MMU 216 #ifdef CONFIG_MMU
217 regs->msr |= MSR_UMS; 217 regs->msr |= MSR_UMS;
218 regs->msr &= ~MSR_VM; 218 regs->msr &= ~MSR_VM;
219 #endif 219 #endif
220 } 220 }
221 221
222 #ifdef CONFIG_MMU 222 #ifdef CONFIG_MMU
223 #include <linux/elfcore.h> 223 #include <linux/elfcore.h>
224 /* 224 /*
225 * Set up a thread for executing a new program 225 * Set up a thread for executing a new program
226 */ 226 */
227 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs) 227 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs)
228 { 228 {
229 return 0; /* MicroBlaze has no separate FPU registers */ 229 return 0; /* MicroBlaze has no separate FPU registers */
230 } 230 }
231 #endif /* CONFIG_MMU */ 231 #endif /* CONFIG_MMU */
232 232
arch/microblaze/kernel/ptrace.c
1 /* 1 /*
2 * `ptrace' system call 2 * `ptrace' system call
3 * 3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> 4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2007-2009 PetaLogix 5 * Copyright (C) 2007-2009 PetaLogix
6 * Copyright (C) 2004-2007 John Williams <john.williams@petalogix.com> 6 * Copyright (C) 2004-2007 John Williams <john.williams@petalogix.com>
7 * 7 *
8 * derived from arch/v850/kernel/ptrace.c 8 * derived from arch/v850/kernel/ptrace.c
9 * 9 *
10 * Copyright (C) 2002,03 NEC Electronics Corporation 10 * Copyright (C) 2002,03 NEC Electronics Corporation
11 * Copyright (C) 2002,03 Miles Bader <miles@gnu.org> 11 * Copyright (C) 2002,03 Miles Bader <miles@gnu.org>
12 * 12 *
13 * Derived from arch/mips/kernel/ptrace.c: 13 * Derived from arch/mips/kernel/ptrace.c:
14 * 14 *
15 * Copyright (C) 1992 Ross Biro 15 * Copyright (C) 1992 Ross Biro
16 * Copyright (C) Linus Torvalds 16 * Copyright (C) Linus Torvalds
17 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle 17 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
18 * Copyright (C) 1996 David S. Miller 18 * Copyright (C) 1996 David S. Miller
19 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com 19 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
20 * Copyright (C) 1999 MIPS Technologies, Inc. 20 * Copyright (C) 1999 MIPS Technologies, Inc.
21 * 21 *
22 * This file is subject to the terms and conditions of the GNU General 22 * This file is subject to the terms and conditions of the GNU General
23 * Public License. See the file COPYING in the main directory of this 23 * Public License. See the file COPYING in the main directory of this
24 * archive for more details. 24 * archive for more details.
25 */ 25 */
26 26
27 #include <linux/kernel.h> 27 #include <linux/kernel.h>
28 #include <linux/mm.h> 28 #include <linux/mm.h>
29 #include <linux/sched.h> 29 #include <linux/sched.h>
30 #include <linux/ptrace.h> 30 #include <linux/ptrace.h>
31 #include <linux/signal.h> 31 #include <linux/signal.h>
32 #include <linux/elf.h> 32 #include <linux/elf.h>
33 #include <linux/audit.h> 33 #include <linux/audit.h>
34 #include <linux/seccomp.h> 34 #include <linux/seccomp.h>
35 #include <linux/tracehook.h> 35 #include <linux/tracehook.h>
36 36
37 #include <linux/errno.h> 37 #include <linux/errno.h>
38 #include <asm/processor.h> 38 #include <asm/processor.h>
39 #include <linux/uaccess.h> 39 #include <linux/uaccess.h>
40 #include <asm/asm-offsets.h> 40 #include <asm/asm-offsets.h>
41 #include <asm/cacheflush.h> 41 #include <asm/cacheflush.h>
42 #include <asm/syscall.h> 42 #include <asm/syscall.h>
43 #include <asm/io.h> 43 #include <linux/io.h>
44 44
45 /* Returns the address where the register at REG_OFFS in P is stashed away. */ 45 /* Returns the address where the register at REG_OFFS in P is stashed away. */
46 static microblaze_reg_t *reg_save_addr(unsigned reg_offs, 46 static microblaze_reg_t *reg_save_addr(unsigned reg_offs,
47 struct task_struct *t) 47 struct task_struct *t)
48 { 48 {
49 struct pt_regs *regs; 49 struct pt_regs *regs;
50 50
51 /* 51 /*
52 * Three basic cases: 52 * Three basic cases:
53 * 53 *
54 * (1) A register normally saved before calling the scheduler, is 54 * (1) A register normally saved before calling the scheduler, is
55 * available in the kernel entry pt_regs structure at the top 55 * available in the kernel entry pt_regs structure at the top
56 * of the kernel stack. The kernel trap/irq exit path takes 56 * of the kernel stack. The kernel trap/irq exit path takes
57 * care to save/restore almost all registers for ptrace'd 57 * care to save/restore almost all registers for ptrace'd
58 * processes. 58 * processes.
59 * 59 *
60 * (2) A call-clobbered register, where the process P entered the 60 * (2) A call-clobbered register, where the process P entered the
61 * kernel via [syscall] trap, is not stored anywhere; that's 61 * kernel via [syscall] trap, is not stored anywhere; that's
62 * OK, because such registers are not expected to be preserved 62 * OK, because such registers are not expected to be preserved
63 * when the trap returns anyway (so we don't actually bother to 63 * when the trap returns anyway (so we don't actually bother to
64 * test for this case). 64 * test for this case).
65 * 65 *
66 * (3) A few registers not used at all by the kernel, and so 66 * (3) A few registers not used at all by the kernel, and so
67 * normally never saved except by context-switches, are in the 67 * normally never saved except by context-switches, are in the
68 * context switch state. 68 * context switch state.
69 */ 69 */
70 70
71 /* Register saved during kernel entry (or not available). */ 71 /* Register saved during kernel entry (or not available). */
72 regs = task_pt_regs(t); 72 regs = task_pt_regs(t);
73 73
74 return (microblaze_reg_t *)((char *)regs + reg_offs); 74 return (microblaze_reg_t *)((char *)regs + reg_offs);
75 } 75 }
76 76
77 long arch_ptrace(struct task_struct *child, long request, 77 long arch_ptrace(struct task_struct *child, long request,
78 unsigned long addr, unsigned long data) 78 unsigned long addr, unsigned long data)
79 { 79 {
80 int rval; 80 int rval;
81 unsigned long val = 0; 81 unsigned long val = 0;
82 82
83 switch (request) { 83 switch (request) {
84 /* Read/write the word at location ADDR in the registers. */ 84 /* Read/write the word at location ADDR in the registers. */
85 case PTRACE_PEEKUSR: 85 case PTRACE_PEEKUSR:
86 case PTRACE_POKEUSR: 86 case PTRACE_POKEUSR:
87 pr_debug("PEEKUSR/POKEUSR : 0x%08lx\n", addr); 87 pr_debug("PEEKUSR/POKEUSR : 0x%08lx\n", addr);
88 rval = 0; 88 rval = 0;
89 if (addr >= PT_SIZE && request == PTRACE_PEEKUSR) { 89 if (addr >= PT_SIZE && request == PTRACE_PEEKUSR) {
90 /* 90 /*
91 * Special requests that don't actually correspond 91 * Special requests that don't actually correspond
92 * to offsets in struct pt_regs. 92 * to offsets in struct pt_regs.
93 */ 93 */
94 if (addr == PT_TEXT_ADDR) { 94 if (addr == PT_TEXT_ADDR) {
95 val = child->mm->start_code; 95 val = child->mm->start_code;
96 } else if (addr == PT_DATA_ADDR) { 96 } else if (addr == PT_DATA_ADDR) {
97 val = child->mm->start_data; 97 val = child->mm->start_data;
98 } else if (addr == PT_TEXT_LEN) { 98 } else if (addr == PT_TEXT_LEN) {
99 val = child->mm->end_code 99 val = child->mm->end_code
100 - child->mm->start_code; 100 - child->mm->start_code;
101 } else { 101 } else {
102 rval = -EIO; 102 rval = -EIO;
103 } 103 }
104 } else if (addr < PT_SIZE && (addr & 0x3) == 0) { 104 } else if (addr < PT_SIZE && (addr & 0x3) == 0) {
105 microblaze_reg_t *reg_addr = reg_save_addr(addr, child); 105 microblaze_reg_t *reg_addr = reg_save_addr(addr, child);
106 if (request == PTRACE_PEEKUSR) 106 if (request == PTRACE_PEEKUSR)
107 val = *reg_addr; 107 val = *reg_addr;
108 else { 108 else {
109 #if 1 109 #if 1
110 *reg_addr = data; 110 *reg_addr = data;
111 #else 111 #else
112 /* MS potential problem on WB system 112 /* MS potential problem on WB system
113 * Be aware that reg_addr is virtual address 113 * Be aware that reg_addr is virtual address
114 * virt_to_phys conversion is necessary. 114 * virt_to_phys conversion is necessary.
115 * This could be sensible solution. 115 * This could be sensible solution.
116 */ 116 */
117 u32 paddr = virt_to_phys((u32)reg_addr); 117 u32 paddr = virt_to_phys((u32)reg_addr);
118 invalidate_icache_range(paddr, paddr + 4); 118 invalidate_icache_range(paddr, paddr + 4);
119 *reg_addr = data; 119 *reg_addr = data;
120 flush_dcache_range(paddr, paddr + 4); 120 flush_dcache_range(paddr, paddr + 4);
121 #endif 121 #endif
122 } 122 }
123 } else 123 } else
124 rval = -EIO; 124 rval = -EIO;
125 125
126 if (rval == 0 && request == PTRACE_PEEKUSR) 126 if (rval == 0 && request == PTRACE_PEEKUSR)
127 rval = put_user(val, (unsigned long __user *)data); 127 rval = put_user(val, (unsigned long __user *)data);
128 break; 128 break;
129 default: 129 default:
130 rval = ptrace_request(child, request, addr, data); 130 rval = ptrace_request(child, request, addr, data);
131 } 131 }
132 return rval; 132 return rval;
133 } 133 }
134 134
135 asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) 135 asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
136 { 136 {
137 long ret = 0; 137 long ret = 0;
138 138
139 secure_computing_strict(regs->r12); 139 secure_computing_strict(regs->r12);
140 140
141 if (test_thread_flag(TIF_SYSCALL_TRACE) && 141 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
142 tracehook_report_syscall_entry(regs)) 142 tracehook_report_syscall_entry(regs))
143 /* 143 /*
144 * Tracing decided this syscall should not happen. 144 * Tracing decided this syscall should not happen.
145 * We'll return a bogus call number to get an ENOSYS 145 * We'll return a bogus call number to get an ENOSYS
146 * error, but leave the original number in regs->regs[0]. 146 * error, but leave the original number in regs->regs[0].
147 */ 147 */
148 ret = -1L; 148 ret = -1L;
149 149
150 audit_syscall_entry(EM_MICROBLAZE, regs->r12, regs->r5, regs->r6, 150 audit_syscall_entry(EM_MICROBLAZE, regs->r12, regs->r5, regs->r6,
151 regs->r7, regs->r8); 151 regs->r7, regs->r8);
152 152
153 return ret ?: regs->r12; 153 return ret ?: regs->r12;
154 } 154 }
155 155
156 asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) 156 asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
157 { 157 {
158 int step; 158 int step;
159 159
160 audit_syscall_exit(regs); 160 audit_syscall_exit(regs);
161 161
162 step = test_thread_flag(TIF_SINGLESTEP); 162 step = test_thread_flag(TIF_SINGLESTEP);
163 if (step || test_thread_flag(TIF_SYSCALL_TRACE)) 163 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
164 tracehook_report_syscall_exit(regs, step); 164 tracehook_report_syscall_exit(regs, step);
165 } 165 }
166 166
167 #if 0 167 #if 0
168 static asmlinkage void syscall_trace(void) 168 static asmlinkage void syscall_trace(void)
169 { 169 {
170 if (!test_thread_flag(TIF_SYSCALL_TRACE)) 170 if (!test_thread_flag(TIF_SYSCALL_TRACE))
171 return; 171 return;
172 if (!(current->ptrace & PT_PTRACED)) 172 if (!(current->ptrace & PT_PTRACED))
173 return; 173 return;
174 /* The 0x80 provides a way for the tracing parent to distinguish 174 /* The 0x80 provides a way for the tracing parent to distinguish
175 between a syscall stop and SIGTRAP delivery */ 175 between a syscall stop and SIGTRAP delivery */
176 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) 176 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
177 ? 0x80 : 0)); 177 ? 0x80 : 0));
178 /* 178 /*
179 * this isn't the same as continuing with a signal, but it will do 179 * this isn't the same as continuing with a signal, but it will do
180 * for normal use. strace only continues with a signal if the 180 * for normal use. strace only continues with a signal if the
181 * stopping signal is not SIGTRAP. -brl 181 * stopping signal is not SIGTRAP. -brl
182 */ 182 */
183 if (current->exit_code) { 183 if (current->exit_code) {
184 send_sig(current->exit_code, current, 1); 184 send_sig(current->exit_code, current, 1);
185 current->exit_code = 0; 185 current->exit_code = 0;
186 } 186 }
187 } 187 }
188 #endif 188 #endif
189 189
190 void ptrace_disable(struct task_struct *child) 190 void ptrace_disable(struct task_struct *child)
191 { 191 {
192 /* nothing to do */ 192 /* nothing to do */
193 } 193 }
194 194
arch/microblaze/kernel/setup.c
1 /* 1 /*
2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> 2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2007-2009 PetaLogix 3 * Copyright (C) 2007-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
5 * 5 *
6 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive 7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details. 8 * for more details.
9 */ 9 */
10 10
11 #include <linux/init.h> 11 #include <linux/init.h>
12 #include <linux/string.h> 12 #include <linux/string.h>
13 #include <linux/seq_file.h> 13 #include <linux/seq_file.h>
14 #include <linux/cpu.h> 14 #include <linux/cpu.h>
15 #include <linux/initrd.h> 15 #include <linux/initrd.h>
16 #include <linux/console.h> 16 #include <linux/console.h>
17 #include <linux/debugfs.h> 17 #include <linux/debugfs.h>
18 18
19 #include <asm/setup.h> 19 #include <asm/setup.h>
20 #include <asm/sections.h> 20 #include <asm/sections.h>
21 #include <asm/page.h> 21 #include <asm/page.h>
22 #include <linux/io.h> 22 #include <linux/io.h>
23 #include <linux/bug.h> 23 #include <linux/bug.h>
24 #include <linux/param.h> 24 #include <linux/param.h>
25 #include <linux/pci.h> 25 #include <linux/pci.h>
26 #include <linux/cache.h> 26 #include <linux/cache.h>
27 #include <linux/of_platform.h> 27 #include <linux/of_platform.h>
28 #include <linux/dma-mapping.h> 28 #include <linux/dma-mapping.h>
29 #include <asm/cacheflush.h> 29 #include <asm/cacheflush.h>
30 #include <asm/entry.h> 30 #include <asm/entry.h>
31 #include <asm/cpuinfo.h> 31 #include <asm/cpuinfo.h>
32 32
33 #include <asm/prom.h> 33 #include <asm/prom.h>
34 #include <asm/pgtable.h> 34 #include <asm/pgtable.h>
35 35
36 DEFINE_PER_CPU(unsigned int, KSP); /* Saved kernel stack pointer */ 36 DEFINE_PER_CPU(unsigned int, KSP); /* Saved kernel stack pointer */
37 DEFINE_PER_CPU(unsigned int, KM); /* Kernel/user mode */ 37 DEFINE_PER_CPU(unsigned int, KM); /* Kernel/user mode */
38 DEFINE_PER_CPU(unsigned int, ENTRY_SP); /* Saved SP on kernel entry */ 38 DEFINE_PER_CPU(unsigned int, ENTRY_SP); /* Saved SP on kernel entry */
39 DEFINE_PER_CPU(unsigned int, R11_SAVE); /* Temp variable for entry */ 39 DEFINE_PER_CPU(unsigned int, R11_SAVE); /* Temp variable for entry */
40 DEFINE_PER_CPU(unsigned int, CURRENT_SAVE); /* Saved current pointer */ 40 DEFINE_PER_CPU(unsigned int, CURRENT_SAVE); /* Saved current pointer */
41 41
42 unsigned int boot_cpuid; 42 unsigned int boot_cpuid;
43 /* 43 /*
44 * Placed cmd_line to .data section because can be initialized from 44 * Placed cmd_line to .data section because can be initialized from
45 * ASM code. Default position is BSS section which is cleared 45 * ASM code. Default position is BSS section which is cleared
46 * in machine_early_init(). 46 * in machine_early_init().
47 */ 47 */
48 char cmd_line[COMMAND_LINE_SIZE] __attribute__ ((section(".data"))); 48 char cmd_line[COMMAND_LINE_SIZE] __attribute__ ((section(".data")));
49 49
50 void __init setup_arch(char **cmdline_p) 50 void __init setup_arch(char **cmdline_p)
51 { 51 {
52 *cmdline_p = cmd_line; 52 *cmdline_p = cmd_line;
53 53
54 console_verbose(); 54 console_verbose();
55 55
56 unflatten_device_tree(); 56 unflatten_device_tree();
57 57
58 setup_cpuinfo(); 58 setup_cpuinfo();
59 59
60 microblaze_cache_init(); 60 microblaze_cache_init();
61 61
62 setup_memory(); 62 setup_memory();
63 63
64 #ifdef CONFIG_EARLY_PRINTK 64 #ifdef CONFIG_EARLY_PRINTK
65 /* remap early console to virtual address */ 65 /* remap early console to virtual address */
66 remap_early_printk(); 66 remap_early_printk();
67 #endif 67 #endif
68 68
69 xilinx_pci_init(); 69 xilinx_pci_init();
70 70
71 #if defined(CONFIG_SELFMOD_INTC) || defined(CONFIG_SELFMOD_TIMER) 71 #if defined(CONFIG_SELFMOD_INTC) || defined(CONFIG_SELFMOD_TIMER)
72 pr_notice("Self modified code enable\n"); 72 pr_notice("Self modified code enable\n");
73 #endif 73 #endif
74 74
75 #ifdef CONFIG_VT 75 #ifdef CONFIG_VT
76 #if defined(CONFIG_XILINX_CONSOLE) 76 #if defined(CONFIG_XILINX_CONSOLE)
77 conswitchp = &xil_con; 77 conswitchp = &xil_con;
78 #elif defined(CONFIG_DUMMY_CONSOLE) 78 #elif defined(CONFIG_DUMMY_CONSOLE)
79 conswitchp = &dummy_con; 79 conswitchp = &dummy_con;
80 #endif 80 #endif
81 #endif 81 #endif
82 } 82 }
83 83
84 #ifdef CONFIG_MTD_UCLINUX 84 #ifdef CONFIG_MTD_UCLINUX
85 /* Handle both romfs and cramfs types, without generating unnecessary 85 /* Handle both romfs and cramfs types, without generating unnecessary
86 code (ie no point checking for CRAMFS if it's not even enabled) */ 86 code (ie no point checking for CRAMFS if it's not even enabled) */
87 inline unsigned get_romfs_len(unsigned *addr) 87 inline unsigned get_romfs_len(unsigned *addr)
88 { 88 {
89 #ifdef CONFIG_ROMFS_FS 89 #ifdef CONFIG_ROMFS_FS
90 if (memcmp(&addr[0], "-rom1fs-", 8) == 0) /* romfs */ 90 if (memcmp(&addr[0], "-rom1fs-", 8) == 0) /* romfs */
91 return be32_to_cpu(addr[2]); 91 return be32_to_cpu(addr[2]);
92 #endif 92 #endif
93 93
94 #ifdef CONFIG_CRAMFS 94 #ifdef CONFIG_CRAMFS
95 if (addr[0] == le32_to_cpu(0x28cd3d45)) /* cramfs */ 95 if (addr[0] == le32_to_cpu(0x28cd3d45)) /* cramfs */
96 return le32_to_cpu(addr[1]); 96 return le32_to_cpu(addr[1]);
97 #endif 97 #endif
98 return 0; 98 return 0;
99 } 99 }
100 #endif /* CONFIG_MTD_UCLINUX_EBSS */ 100 #endif /* CONFIG_MTD_UCLINUX_EBSS */
101 101
102 unsigned long kernel_tlb; 102 unsigned long kernel_tlb;
103 103
104 void __init machine_early_init(const char *cmdline, unsigned int ram, 104 void __init machine_early_init(const char *cmdline, unsigned int ram,
105 unsigned int fdt, unsigned int msr, unsigned int tlb0, 105 unsigned int fdt, unsigned int msr, unsigned int tlb0,
106 unsigned int tlb1) 106 unsigned int tlb1)
107 { 107 {
108 unsigned long *src, *dst; 108 unsigned long *src, *dst;
109 unsigned int offset = 0; 109 unsigned int offset = 0;
110 110
111 /* If CONFIG_MTD_UCLINUX is defined, assume ROMFS is at the 111 /* If CONFIG_MTD_UCLINUX is defined, assume ROMFS is at the
112 * end of kernel. There are two position which we want to check. 112 * end of kernel. There are two position which we want to check.
113 * The first is __init_end and the second __bss_start. 113 * The first is __init_end and the second __bss_start.
114 */ 114 */
115 #ifdef CONFIG_MTD_UCLINUX 115 #ifdef CONFIG_MTD_UCLINUX
116 int romfs_size; 116 int romfs_size;
117 unsigned int romfs_base; 117 unsigned int romfs_base;
118 char *old_klimit = klimit; 118 char *old_klimit = klimit;
119 119
120 romfs_base = (ram ? ram : (unsigned int)&__init_end); 120 romfs_base = (ram ? ram : (unsigned int)&__init_end);
121 romfs_size = PAGE_ALIGN(get_romfs_len((unsigned *)romfs_base)); 121 romfs_size = PAGE_ALIGN(get_romfs_len((unsigned *)romfs_base));
122 if (!romfs_size) { 122 if (!romfs_size) {
123 romfs_base = (unsigned int)&__bss_start; 123 romfs_base = (unsigned int)&__bss_start;
124 romfs_size = PAGE_ALIGN(get_romfs_len((unsigned *)romfs_base)); 124 romfs_size = PAGE_ALIGN(get_romfs_len((unsigned *)romfs_base));
125 } 125 }
126 126
127 /* Move ROMFS out of BSS before clearing it */ 127 /* Move ROMFS out of BSS before clearing it */
128 if (romfs_size > 0) { 128 if (romfs_size > 0) {
129 memmove(&__bss_stop, (int *)romfs_base, romfs_size); 129 memmove(&__bss_stop, (int *)romfs_base, romfs_size);
130 klimit += romfs_size; 130 klimit += romfs_size;
131 } 131 }
132 #endif 132 #endif
133 133
134 /* clearing bss section */ 134 /* clearing bss section */
135 memset(__bss_start, 0, __bss_stop-__bss_start); 135 memset(__bss_start, 0, __bss_stop-__bss_start);
136 memset(_ssbss, 0, _esbss-_ssbss); 136 memset(_ssbss, 0, _esbss-_ssbss);
137 137
138 lockdep_init(); 138 lockdep_init();
139 139
140 /* initialize device tree for usage in early_printk */ 140 /* initialize device tree for usage in early_printk */
141 early_init_devtree((void *)_fdt_start); 141 early_init_devtree((void *)_fdt_start);
142 142
143 #ifdef CONFIG_EARLY_PRINTK 143 #ifdef CONFIG_EARLY_PRINTK
144 setup_early_printk(NULL); 144 setup_early_printk(NULL);
145 #endif 145 #endif
146 146
147 /* setup kernel_tlb after BSS cleaning 147 /* setup kernel_tlb after BSS cleaning
148 * Maybe worth to move to asm code */ 148 * Maybe worth to move to asm code */
149 kernel_tlb = tlb0 + tlb1; 149 kernel_tlb = tlb0 + tlb1;
150 /* printk("TLB1 0x%08x, TLB0 0x%08x, tlb 0x%x\n", tlb0, 150 /* printk("TLB1 0x%08x, TLB0 0x%08x, tlb 0x%x\n", tlb0,
151 tlb1, kernel_tlb); */ 151 tlb1, kernel_tlb); */
152 152
153 printk("Ramdisk addr 0x%08x, ", ram); 153 pr_info("Ramdisk addr 0x%08x, ", ram);
154 if (fdt) 154 if (fdt)
155 printk("FDT at 0x%08x\n", fdt); 155 pr_info("FDT at 0x%08x\n", fdt);
156 else 156 else
157 printk("Compiled-in FDT at 0x%08x\n", 157 pr_info("Compiled-in FDT at 0x%08x\n",
158 (unsigned int)_fdt_start); 158 (unsigned int)_fdt_start);
159 159
160 #ifdef CONFIG_MTD_UCLINUX 160 #ifdef CONFIG_MTD_UCLINUX
161 printk("Found romfs @ 0x%08x (0x%08x)\n", 161 pr_info("Found romfs @ 0x%08x (0x%08x)\n",
162 romfs_base, romfs_size); 162 romfs_base, romfs_size);
163 printk("#### klimit %p ####\n", old_klimit); 163 pr_info("#### klimit %p ####\n", old_klimit);
164 BUG_ON(romfs_size < 0); /* What else can we do? */ 164 BUG_ON(romfs_size < 0); /* What else can we do? */
165 165
166 printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n", 166 pr_info("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
167 romfs_size, romfs_base, (unsigned)&__bss_stop); 167 romfs_size, romfs_base, (unsigned)&__bss_stop);
168 168
169 printk("New klimit: 0x%08x\n", (unsigned)klimit); 169 pr_info("New klimit: 0x%08x\n", (unsigned)klimit);
170 #endif 170 #endif
171 171
172 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR 172 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
173 if (msr) 173 if (msr) {
174 printk("!!!Your kernel has setup MSR instruction but " 174 pr_info("!!!Your kernel has setup MSR instruction but ");
175 "CPU don't have it %x\n", msr); 175 pr_cont("CPU don't have it %x\n", msr);
176 }
176 #else 177 #else
177 if (!msr) 178 if (!msr) {
178 printk("!!!Your kernel not setup MSR instruction but " 179 pr_info("!!!Your kernel not setup MSR instruction but ");
179 "CPU have it %x\n", msr); 180 pr_cont"CPU have it %x\n", msr);
181 }
180 #endif 182 #endif
181 183
182 /* Do not copy reset vectors. offset = 0x2 means skip the first 184 /* Do not copy reset vectors. offset = 0x2 means skip the first
183 * two instructions. dst is pointer to MB vectors which are placed 185 * two instructions. dst is pointer to MB vectors which are placed
184 * in block ram. If you want to copy reset vector setup offset to 0x0 */ 186 * in block ram. If you want to copy reset vector setup offset to 0x0 */
185 #if !CONFIG_MANUAL_RESET_VECTOR 187 #if !CONFIG_MANUAL_RESET_VECTOR
186 offset = 0x2; 188 offset = 0x2;
187 #endif 189 #endif
188 dst = (unsigned long *) (offset * sizeof(u32)); 190 dst = (unsigned long *) (offset * sizeof(u32));
189 for (src = __ivt_start + offset; src < __ivt_end; src++, dst++) 191 for (src = __ivt_start + offset; src < __ivt_end; src++, dst++)
190 *dst = *src; 192 *dst = *src;
191 193
192 /* Initialize global data */ 194 /* Initialize global data */
193 per_cpu(KM, 0) = 0x1; /* We start in kernel mode */ 195 per_cpu(KM, 0) = 0x1; /* We start in kernel mode */
194 per_cpu(CURRENT_SAVE, 0) = (unsigned long)current; 196 per_cpu(CURRENT_SAVE, 0) = (unsigned long)current;
195 } 197 }
196 198
197 #ifdef CONFIG_DEBUG_FS 199 #ifdef CONFIG_DEBUG_FS
198 struct dentry *of_debugfs_root; 200 struct dentry *of_debugfs_root;
199 201
200 static int microblaze_debugfs_init(void) 202 static int microblaze_debugfs_init(void)
201 { 203 {
202 of_debugfs_root = debugfs_create_dir("microblaze", NULL); 204 of_debugfs_root = debugfs_create_dir("microblaze", NULL);
203 205
204 return of_debugfs_root == NULL; 206 return of_debugfs_root == NULL;
205 } 207 }
206 arch_initcall(microblaze_debugfs_init); 208 arch_initcall(microblaze_debugfs_init);
207 209
208 # ifdef CONFIG_MMU 210 # ifdef CONFIG_MMU
209 static int __init debugfs_tlb(void) 211 static int __init debugfs_tlb(void)
210 { 212 {
211 struct dentry *d; 213 struct dentry *d;
212 214
213 if (!of_debugfs_root) 215 if (!of_debugfs_root)
214 return -ENODEV; 216 return -ENODEV;
215 217
216 d = debugfs_create_u32("tlb_skip", S_IRUGO, of_debugfs_root, &tlb_skip); 218 d = debugfs_create_u32("tlb_skip", S_IRUGO, of_debugfs_root, &tlb_skip);
217 if (!d) 219 if (!d)
218 return -ENOMEM; 220 return -ENOMEM;
219 221
220 return 0; 222 return 0;
221 } 223 }
222 device_initcall(debugfs_tlb); 224 device_initcall(debugfs_tlb);
223 # endif 225 # endif
224 #endif 226 #endif
225 227
226 static int dflt_bus_notify(struct notifier_block *nb, 228 static int dflt_bus_notify(struct notifier_block *nb,
227 unsigned long action, void *data) 229 unsigned long action, void *data)
228 { 230 {
229 struct device *dev = data; 231 struct device *dev = data;
230 232
231 /* We are only intereted in device addition */ 233 /* We are only intereted in device addition */
232 if (action != BUS_NOTIFY_ADD_DEVICE) 234 if (action != BUS_NOTIFY_ADD_DEVICE)
233 return 0; 235 return 0;
234 236
235 set_dma_ops(dev, &dma_direct_ops); 237 set_dma_ops(dev, &dma_direct_ops);
236 238
237 return NOTIFY_DONE; 239 return NOTIFY_DONE;
238 } 240 }
239 241
240 static struct notifier_block dflt_plat_bus_notifier = { 242 static struct notifier_block dflt_plat_bus_notifier = {
241 .notifier_call = dflt_bus_notify, 243 .notifier_call = dflt_bus_notify,
242 .priority = INT_MAX, 244 .priority = INT_MAX,
243 }; 245 };
244 246
245 static int __init setup_bus_notifier(void) 247 static int __init setup_bus_notifier(void)
246 { 248 {
247 bus_register_notifier(&platform_bus_type, &dflt_plat_bus_notifier); 249 bus_register_notifier(&platform_bus_type, &dflt_plat_bus_notifier);
248 250
249 return 0; 251 return 0;
250 } 252 }
251 253
252 arch_initcall(setup_bus_notifier); 254 arch_initcall(setup_bus_notifier);
253 255
arch/microblaze/kernel/signal.c
1 /* 1 /*
2 * Signal handling 2 * Signal handling
3 * 3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> 4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix 5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003,2004 John Williams <jwilliams@itee.uq.edu.au> 6 * Copyright (C) 2003,2004 John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001 NEC Corporation 7 * Copyright (C) 2001 NEC Corporation
8 * Copyright (C) 2001 Miles Bader <miles@gnu.org> 8 * Copyright (C) 2001 Miles Bader <miles@gnu.org>
9 * Copyright (C) 1999,2000 Niibe Yutaka & Kaz Kojima 9 * Copyright (C) 1999,2000 Niibe Yutaka & Kaz Kojima
10 * Copyright (C) 1991,1992 Linus Torvalds 10 * Copyright (C) 1991,1992 Linus Torvalds
11 * 11 *
12 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson 12 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
13 * 13 *
14 * This file was was derived from the sh version, arch/sh/kernel/signal.c 14 * This file was was derived from the sh version, arch/sh/kernel/signal.c
15 * 15 *
16 * This file is subject to the terms and conditions of the GNU General 16 * This file is subject to the terms and conditions of the GNU General
17 * Public License. See the file COPYING in the main directory of this 17 * Public License. See the file COPYING in the main directory of this
18 * archive for more details. 18 * archive for more details.
19 */ 19 */
20 20
21 #include <linux/sched.h> 21 #include <linux/sched.h>
22 #include <linux/mm.h> 22 #include <linux/mm.h>
23 #include <linux/smp.h> 23 #include <linux/smp.h>
24 #include <linux/kernel.h> 24 #include <linux/kernel.h>
25 #include <linux/signal.h> 25 #include <linux/signal.h>
26 #include <linux/errno.h> 26 #include <linux/errno.h>
27 #include <linux/wait.h> 27 #include <linux/wait.h>
28 #include <linux/ptrace.h> 28 #include <linux/ptrace.h>
29 #include <linux/unistd.h> 29 #include <linux/unistd.h>
30 #include <linux/stddef.h> 30 #include <linux/stddef.h>
31 #include <linux/personality.h> 31 #include <linux/personality.h>
32 #include <linux/percpu.h> 32 #include <linux/percpu.h>
33 #include <linux/linkage.h> 33 #include <linux/linkage.h>
34 #include <linux/tracehook.h> 34 #include <linux/tracehook.h>
35 #include <asm/entry.h> 35 #include <asm/entry.h>
36 #include <asm/ucontext.h> 36 #include <asm/ucontext.h>
37 #include <linux/uaccess.h> 37 #include <linux/uaccess.h>
38 #include <asm/pgtable.h> 38 #include <asm/pgtable.h>
39 #include <asm/pgalloc.h> 39 #include <asm/pgalloc.h>
40 #include <linux/syscalls.h> 40 #include <linux/syscalls.h>
41 #include <asm/cacheflush.h> 41 #include <asm/cacheflush.h>
42 #include <asm/syscalls.h> 42 #include <asm/syscalls.h>
43 43
44 asmlinkage long 44 asmlinkage long
45 sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, 45 sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
46 struct pt_regs *regs) 46 struct pt_regs *regs)
47 { 47 {
48 return do_sigaltstack(uss, uoss, regs->r1); 48 return do_sigaltstack(uss, uoss, regs->r1);
49 } 49 }
50 50
51 /* 51 /*
52 * Do a signal return; undo the signal stack. 52 * Do a signal return; undo the signal stack.
53 */ 53 */
54 struct sigframe { 54 struct sigframe {
55 struct sigcontext sc; 55 struct sigcontext sc;
56 unsigned long extramask[_NSIG_WORDS-1]; 56 unsigned long extramask[_NSIG_WORDS-1];
57 unsigned long tramp[2]; /* signal trampoline */ 57 unsigned long tramp[2]; /* signal trampoline */
58 }; 58 };
59 59
60 struct rt_sigframe { 60 struct rt_sigframe {
61 struct siginfo info; 61 struct siginfo info;
62 struct ucontext uc; 62 struct ucontext uc;
63 unsigned long tramp[2]; /* signal trampoline */ 63 unsigned long tramp[2]; /* signal trampoline */
64 }; 64 };
65 65
66 static int restore_sigcontext(struct pt_regs *regs, 66 static int restore_sigcontext(struct pt_regs *regs,
67 struct sigcontext __user *sc, int *rval_p) 67 struct sigcontext __user *sc, int *rval_p)
68 { 68 {
69 unsigned int err = 0; 69 unsigned int err = 0;
70 70
71 #define COPY(x) {err |= __get_user(regs->x, &sc->regs.x); } 71 #define COPY(x) {err |= __get_user(regs->x, &sc->regs.x); }
72 COPY(r0); 72 COPY(r0);
73 COPY(r1); 73 COPY(r1);
74 COPY(r2); COPY(r3); COPY(r4); COPY(r5); 74 COPY(r2); COPY(r3); COPY(r4); COPY(r5);
75 COPY(r6); COPY(r7); COPY(r8); COPY(r9); 75 COPY(r6); COPY(r7); COPY(r8); COPY(r9);
76 COPY(r10); COPY(r11); COPY(r12); COPY(r13); 76 COPY(r10); COPY(r11); COPY(r12); COPY(r13);
77 COPY(r14); COPY(r15); COPY(r16); COPY(r17); 77 COPY(r14); COPY(r15); COPY(r16); COPY(r17);
78 COPY(r18); COPY(r19); COPY(r20); COPY(r21); 78 COPY(r18); COPY(r19); COPY(r20); COPY(r21);
79 COPY(r22); COPY(r23); COPY(r24); COPY(r25); 79 COPY(r22); COPY(r23); COPY(r24); COPY(r25);
80 COPY(r26); COPY(r27); COPY(r28); COPY(r29); 80 COPY(r26); COPY(r27); COPY(r28); COPY(r29);
81 COPY(r30); COPY(r31); 81 COPY(r30); COPY(r31);
82 COPY(pc); COPY(ear); COPY(esr); COPY(fsr); 82 COPY(pc); COPY(ear); COPY(esr); COPY(fsr);
83 #undef COPY 83 #undef COPY
84 84
85 *rval_p = regs->r3; 85 *rval_p = regs->r3;
86 86
87 return err; 87 return err;
88 } 88 }
89 89
90 asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) 90 asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
91 { 91 {
92 struct rt_sigframe __user *frame = 92 struct rt_sigframe __user *frame =
93 (struct rt_sigframe __user *)(regs->r1); 93 (struct rt_sigframe __user *)(regs->r1);
94 94
95 sigset_t set; 95 sigset_t set;
96 int rval; 96 int rval;
97 97
98 /* Always make any pending restarted system calls return -EINTR */ 98 /* Always make any pending restarted system calls return -EINTR */
99 current_thread_info()->restart_block.fn = do_no_restart_syscall; 99 current_thread_info()->restart_block.fn = do_no_restart_syscall;
100 100
101 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 101 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
102 goto badframe; 102 goto badframe;
103 103
104 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 104 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
105 goto badframe; 105 goto badframe;
106 106
107 set_current_blocked(&set); 107 set_current_blocked(&set);
108 108
109 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &rval)) 109 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &rval))
110 goto badframe; 110 goto badframe;
111 111
112 /* It is more difficult to avoid calling this function than to 112 /* It is more difficult to avoid calling this function than to
113 call it and ignore errors. */ 113 call it and ignore errors. */
114 if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->r1) == -EFAULT) 114 if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->r1) == -EFAULT)
115 goto badframe; 115 goto badframe;
116 116
117 return rval; 117 return rval;
118 118
119 badframe: 119 badframe:
120 force_sig(SIGSEGV, current); 120 force_sig(SIGSEGV, current);
121 return 0; 121 return 0;
122 } 122 }
123 123
124 /* 124 /*
125 * Set up a signal frame. 125 * Set up a signal frame.
126 */ 126 */
127 127
128 static int 128 static int
129 setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, 129 setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
130 unsigned long mask) 130 unsigned long mask)
131 { 131 {
132 int err = 0; 132 int err = 0;
133 133
134 #define COPY(x) {err |= __put_user(regs->x, &sc->regs.x); } 134 #define COPY(x) {err |= __put_user(regs->x, &sc->regs.x); }
135 COPY(r0); 135 COPY(r0);
136 COPY(r1); 136 COPY(r1);
137 COPY(r2); COPY(r3); COPY(r4); COPY(r5); 137 COPY(r2); COPY(r3); COPY(r4); COPY(r5);
138 COPY(r6); COPY(r7); COPY(r8); COPY(r9); 138 COPY(r6); COPY(r7); COPY(r8); COPY(r9);
139 COPY(r10); COPY(r11); COPY(r12); COPY(r13); 139 COPY(r10); COPY(r11); COPY(r12); COPY(r13);
140 COPY(r14); COPY(r15); COPY(r16); COPY(r17); 140 COPY(r14); COPY(r15); COPY(r16); COPY(r17);
141 COPY(r18); COPY(r19); COPY(r20); COPY(r21); 141 COPY(r18); COPY(r19); COPY(r20); COPY(r21);
142 COPY(r22); COPY(r23); COPY(r24); COPY(r25); 142 COPY(r22); COPY(r23); COPY(r24); COPY(r25);
143 COPY(r26); COPY(r27); COPY(r28); COPY(r29); 143 COPY(r26); COPY(r27); COPY(r28); COPY(r29);
144 COPY(r30); COPY(r31); 144 COPY(r30); COPY(r31);
145 COPY(pc); COPY(ear); COPY(esr); COPY(fsr); 145 COPY(pc); COPY(ear); COPY(esr); COPY(fsr);
146 #undef COPY 146 #undef COPY
147 147
148 err |= __put_user(mask, &sc->oldmask); 148 err |= __put_user(mask, &sc->oldmask);
149 149
150 return err; 150 return err;
151 } 151 }
152 152
153 /* 153 /*
154 * Determine which stack to use.. 154 * Determine which stack to use..
155 */ 155 */
156 static inline void __user * 156 static inline void __user *
157 get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) 157 get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
158 { 158 {
159 /* Default to using normal stack */ 159 /* Default to using normal stack */
160 unsigned long sp = regs->r1; 160 unsigned long sp = regs->r1;
161 161
162 if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && !on_sig_stack(sp)) 162 if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && !on_sig_stack(sp))
163 sp = current->sas_ss_sp + current->sas_ss_size; 163 sp = current->sas_ss_sp + current->sas_ss_size;
164 164
165 return (void __user *)((sp - frame_size) & -8UL); 165 return (void __user *)((sp - frame_size) & -8UL);
166 } 166 }
167 167
168 static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 168 static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
169 sigset_t *set, struct pt_regs *regs) 169 sigset_t *set, struct pt_regs *regs)
170 { 170 {
171 struct rt_sigframe __user *frame; 171 struct rt_sigframe __user *frame;
172 int err = 0; 172 int err = 0;
173 int signal; 173 int signal;
174 unsigned long address = 0; 174 unsigned long address = 0;
175 #ifdef CONFIG_MMU 175 #ifdef CONFIG_MMU
176 pmd_t *pmdp; 176 pmd_t *pmdp;
177 pte_t *ptep; 177 pte_t *ptep;
178 #endif 178 #endif
179 179
180 frame = get_sigframe(ka, regs, sizeof(*frame)); 180 frame = get_sigframe(ka, regs, sizeof(*frame));
181 181
182 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 182 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
183 goto give_sigsegv; 183 goto give_sigsegv;
184 184
185 signal = current_thread_info()->exec_domain 185 signal = current_thread_info()->exec_domain
186 && current_thread_info()->exec_domain->signal_invmap 186 && current_thread_info()->exec_domain->signal_invmap
187 && sig < 32 187 && sig < 32
188 ? current_thread_info()->exec_domain->signal_invmap[sig] 188 ? current_thread_info()->exec_domain->signal_invmap[sig]
189 : sig; 189 : sig;
190 190
191 if (info) 191 if (info)
192 err |= copy_siginfo_to_user(&frame->info, info); 192 err |= copy_siginfo_to_user(&frame->info, info);
193 193
194 /* Create the ucontext. */ 194 /* Create the ucontext. */
195 err |= __put_user(0, &frame->uc.uc_flags); 195 err |= __put_user(0, &frame->uc.uc_flags);
196 err |= __put_user(NULL, &frame->uc.uc_link); 196 err |= __put_user(NULL, &frame->uc.uc_link);
197 err |= __put_user((void __user *)current->sas_ss_sp, 197 err |= __put_user((void __user *)current->sas_ss_sp,
198 &frame->uc.uc_stack.ss_sp); 198 &frame->uc.uc_stack.ss_sp);
199 err |= __put_user(sas_ss_flags(regs->r1), 199 err |= __put_user(sas_ss_flags(regs->r1),
200 &frame->uc.uc_stack.ss_flags); 200 &frame->uc.uc_stack.ss_flags);
201 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); 201 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
202 err |= setup_sigcontext(&frame->uc.uc_mcontext, 202 err |= setup_sigcontext(&frame->uc.uc_mcontext,
203 regs, set->sig[0]); 203 regs, set->sig[0]);
204 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); 204 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
205 205
206 /* Set up to return from userspace. If provided, use a stub 206 /* Set up to return from userspace. If provided, use a stub
207 already in userspace. */ 207 already in userspace. */
208 /* minus 8 is offset to cater for "rtsd r15,8" */ 208 /* minus 8 is offset to cater for "rtsd r15,8" */
209 /* addi r12, r0, __NR_sigreturn */ 209 /* addi r12, r0, __NR_sigreturn */
210 err |= __put_user(0x31800000 | __NR_rt_sigreturn , 210 err |= __put_user(0x31800000 | __NR_rt_sigreturn ,
211 frame->tramp + 0); 211 frame->tramp + 0);
212 /* brki r14, 0x8 */ 212 /* brki r14, 0x8 */
213 err |= __put_user(0xb9cc0008, frame->tramp + 1); 213 err |= __put_user(0xb9cc0008, frame->tramp + 1);
214 214
215 /* Return from sighandler will jump to the tramp. 215 /* Return from sighandler will jump to the tramp.
216 Negative 8 offset because return is rtsd r15, 8 */ 216 Negative 8 offset because return is rtsd r15, 8 */
217 regs->r15 = ((unsigned long)frame->tramp)-8; 217 regs->r15 = ((unsigned long)frame->tramp)-8;
218 218
219 address = ((unsigned long)frame->tramp); 219 address = ((unsigned long)frame->tramp);
220 #ifdef CONFIG_MMU 220 #ifdef CONFIG_MMU
221 pmdp = pmd_offset(pud_offset( 221 pmdp = pmd_offset(pud_offset(
222 pgd_offset(current->mm, address), 222 pgd_offset(current->mm, address),
223 address), address); 223 address), address);
224 224
225 preempt_disable(); 225 preempt_disable();
226 ptep = pte_offset_map(pmdp, address); 226 ptep = pte_offset_map(pmdp, address);
227 if (pte_present(*ptep)) { 227 if (pte_present(*ptep)) {
228 address = (unsigned long) page_address(pte_page(*ptep)); 228 address = (unsigned long) page_address(pte_page(*ptep));
229 /* MS: I need add offset in page */ 229 /* MS: I need add offset in page */
230 address += ((unsigned long)frame->tramp) & ~PAGE_MASK; 230 address += ((unsigned long)frame->tramp) & ~PAGE_MASK;
231 /* MS address is virtual */ 231 /* MS address is virtual */
232 address = virt_to_phys(address); 232 address = virt_to_phys(address);
233 invalidate_icache_range(address, address + 8); 233 invalidate_icache_range(address, address + 8);
234 flush_dcache_range(address, address + 8); 234 flush_dcache_range(address, address + 8);
235 } 235 }
236 pte_unmap(ptep); 236 pte_unmap(ptep);
237 preempt_enable(); 237 preempt_enable();
238 #else 238 #else
239 flush_icache_range(address, address + 8); 239 flush_icache_range(address, address + 8);
240 flush_dcache_range(address, address + 8); 240 flush_dcache_range(address, address + 8);
241 #endif 241 #endif
242 if (err) 242 if (err)
243 goto give_sigsegv; 243 goto give_sigsegv;
244 244
245 /* Set up registers for signal handler */ 245 /* Set up registers for signal handler */
246 regs->r1 = (unsigned long) frame; 246 regs->r1 = (unsigned long) frame;
247 247
248 /* Signal handler args: */ 248 /* Signal handler args: */
249 regs->r5 = signal; /* arg 0: signum */ 249 regs->r5 = signal; /* arg 0: signum */
250 regs->r6 = (unsigned long) &frame->info; /* arg 1: siginfo */ 250 regs->r6 = (unsigned long) &frame->info; /* arg 1: siginfo */
251 regs->r7 = (unsigned long) &frame->uc; /* arg2: ucontext */ 251 regs->r7 = (unsigned long) &frame->uc; /* arg2: ucontext */
252 /* Offset to handle microblaze rtid r14, 0 */ 252 /* Offset to handle microblaze rtid r14, 0 */
253 regs->pc = (unsigned long)ka->sa.sa_handler; 253 regs->pc = (unsigned long)ka->sa.sa_handler;
254 254
255 set_fs(USER_DS); 255 set_fs(USER_DS);
256 256
257 #ifdef DEBUG_SIG 257 #ifdef DEBUG_SIG
258 printk(KERN_INFO "SIG deliver (%s:%d): sp=%p pc=%08lx\n", 258 pr_info("SIG deliver (%s:%d): sp=%p pc=%08lx\n",
259 current->comm, current->pid, frame, regs->pc); 259 current->comm, current->pid, frame, regs->pc);
260 #endif 260 #endif
261 261
262 return 0; 262 return 0;
263 263
264 give_sigsegv: 264 give_sigsegv:
265 force_sigsegv(sig, current); 265 force_sigsegv(sig, current);
266 return -EFAULT; 266 return -EFAULT;
267 } 267 }
268 268
269 /* Handle restarting system calls */ 269 /* Handle restarting system calls */
270 static inline void 270 static inline void
271 handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler) 271 handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
272 { 272 {
273 switch (regs->r3) { 273 switch (regs->r3) {
274 case -ERESTART_RESTARTBLOCK: 274 case -ERESTART_RESTARTBLOCK:
275 case -ERESTARTNOHAND: 275 case -ERESTARTNOHAND:
276 if (!has_handler) 276 if (!has_handler)
277 goto do_restart; 277 goto do_restart;
278 regs->r3 = -EINTR; 278 regs->r3 = -EINTR;
279 break; 279 break;
280 case -ERESTARTSYS: 280 case -ERESTARTSYS:
281 if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) { 281 if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
282 regs->r3 = -EINTR; 282 regs->r3 = -EINTR;
283 break; 283 break;
284 } 284 }
285 /* fallthrough */ 285 /* fallthrough */
286 case -ERESTARTNOINTR: 286 case -ERESTARTNOINTR:
287 do_restart: 287 do_restart:
288 /* offset of 4 bytes to re-execute trap (brki) instruction */ 288 /* offset of 4 bytes to re-execute trap (brki) instruction */
289 regs->pc -= 4; 289 regs->pc -= 4;
290 break; 290 break;
291 } 291 }
292 } 292 }
293 293
294 /* 294 /*
295 * OK, we're invoking a handler 295 * OK, we're invoking a handler
296 */ 296 */
297 297
298 static void 298 static void
299 handle_signal(unsigned long sig, struct k_sigaction *ka, 299 handle_signal(unsigned long sig, struct k_sigaction *ka,
300 siginfo_t *info, struct pt_regs *regs) 300 siginfo_t *info, struct pt_regs *regs)
301 { 301 {
302 sigset_t *oldset = sigmask_to_save(); 302 sigset_t *oldset = sigmask_to_save();
303 int ret; 303 int ret;
304 304
305 /* Set up the stack frame */ 305 /* Set up the stack frame */
306 if (ka->sa.sa_flags & SA_SIGINFO) 306 if (ka->sa.sa_flags & SA_SIGINFO)
307 ret = setup_rt_frame(sig, ka, info, oldset, regs); 307 ret = setup_rt_frame(sig, ka, info, oldset, regs);
308 else 308 else
309 ret = setup_rt_frame(sig, ka, NULL, oldset, regs); 309 ret = setup_rt_frame(sig, ka, NULL, oldset, regs);
310 310
311 if (ret) 311 if (ret)
312 return; 312 return;
313 313
314 signal_delivered(sig, info, ka, regs, 314 signal_delivered(sig, info, ka, regs,
315 test_thread_flag(TIF_SINGLESTEP)); 315 test_thread_flag(TIF_SINGLESTEP));
316 } 316 }
317 317
318 /* 318 /*
319 * Note that 'init' is a special process: it doesn't get signals it doesn't 319 * Note that 'init' is a special process: it doesn't get signals it doesn't
320 * want to handle. Thus you cannot kill init even with a SIGKILL even by 320 * want to handle. Thus you cannot kill init even with a SIGKILL even by
321 * mistake. 321 * mistake.
322 * 322 *
323 * Note that we go through the signals twice: once to check the signals that 323 * Note that we go through the signals twice: once to check the signals that
324 * the kernel can handle, and then we build all the user-level signal handling 324 * the kernel can handle, and then we build all the user-level signal handling
325 * stack-frames in one go after that. 325 * stack-frames in one go after that.
326 */ 326 */
327 static void do_signal(struct pt_regs *regs, int in_syscall) 327 static void do_signal(struct pt_regs *regs, int in_syscall)
328 { 328 {
329 siginfo_t info; 329 siginfo_t info;
330 int signr; 330 int signr;
331 struct k_sigaction ka; 331 struct k_sigaction ka;
332 #ifdef DEBUG_SIG 332 #ifdef DEBUG_SIG
333 printk(KERN_INFO "do signal: %p %d\n", regs, in_syscall); 333 pr_info("do signal: %p %d\n", regs, in_syscall);
334 printk(KERN_INFO "do signal2: %lx %lx %ld [%lx]\n", regs->pc, regs->r1, 334 pr_info("do signal2: %lx %lx %ld [%lx]\n", regs->pc, regs->r1,
335 regs->r12, current_thread_info()->flags); 335 regs->r12, current_thread_info()->flags);
336 #endif 336 #endif
337 337
338 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 338 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
339 if (signr > 0) { 339 if (signr > 0) {
340 /* Whee! Actually deliver the signal. */ 340 /* Whee! Actually deliver the signal. */
341 if (in_syscall) 341 if (in_syscall)
342 handle_restart(regs, &ka, 1); 342 handle_restart(regs, &ka, 1);
343 handle_signal(signr, &ka, &info, regs); 343 handle_signal(signr, &ka, &info, regs);
344 return; 344 return;
345 } 345 }
346 346
347 if (in_syscall) 347 if (in_syscall)
348 handle_restart(regs, NULL, 0); 348 handle_restart(regs, NULL, 0);
349 349
350 /* 350 /*
351 * If there's no signal to deliver, we just put the saved sigmask 351 * If there's no signal to deliver, we just put the saved sigmask
352 * back. 352 * back.
353 */ 353 */
354 restore_saved_sigmask(); 354 restore_saved_sigmask();
355 } 355 }
356 356
357 asmlinkage void do_notify_resume(struct pt_regs *regs, int in_syscall) 357 asmlinkage void do_notify_resume(struct pt_regs *regs, int in_syscall)
358 { 358 {
359 /* 359 /*
360 * We want the common case to go fast, which 360 * We want the common case to go fast, which
361 * is why we may in certain cases get here from 361 * is why we may in certain cases get here from
362 * kernel mode. Just return without doing anything 362 * kernel mode. Just return without doing anything
363 * if so. 363 * if so.
364 */ 364 */
365 if (kernel_mode(regs)) 365 if (kernel_mode(regs))
366 return; 366 return;
367 367
368 if (test_thread_flag(TIF_SIGPENDING)) 368 if (test_thread_flag(TIF_SIGPENDING))
369 do_signal(regs, in_syscall); 369 do_signal(regs, in_syscall);
370 370
371 if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) 371 if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
372 tracehook_notify_resume(regs); 372 tracehook_notify_resume(regs);
373 } 373 }
374 374
arch/microblaze/kernel/sys_microblaze.c
1 /* 1 /*
2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> 2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2007-2009 PetaLogix 3 * Copyright (C) 2007-2009 PetaLogix
4 * Copyright (C) 2007 John Williams <john.williams@petalogix.com> 4 * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
5 * 5 *
6 * Copyright (C) 2006 Atmark Techno, Inc. 6 * Copyright (C) 2006 Atmark Techno, Inc.
7 * Yasushi SHOJI <yashi@atmark-techno.com> 7 * Yasushi SHOJI <yashi@atmark-techno.com>
8 * Tetsuya OHKAWA <tetsuya@atmark-techno.com> 8 * Tetsuya OHKAWA <tetsuya@atmark-techno.com>
9 * 9 *
10 * This file is subject to the terms and conditions of the GNU General Public 10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive 11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details. 12 * for more details.
13 */ 13 */
14 14
15 #include <linux/errno.h> 15 #include <linux/errno.h>
16 #include <linux/mm.h> 16 #include <linux/mm.h>
17 #include <linux/smp.h> 17 #include <linux/smp.h>
18 #include <linux/syscalls.h> 18 #include <linux/syscalls.h>
19 #include <linux/sem.h> 19 #include <linux/sem.h>
20 #include <linux/msg.h> 20 #include <linux/msg.h>
21 #include <linux/shm.h> 21 #include <linux/shm.h>
22 #include <linux/stat.h> 22 #include <linux/stat.h>
23 #include <linux/mman.h> 23 #include <linux/mman.h>
24 #include <linux/sys.h> 24 #include <linux/sys.h>
25 #include <linux/ipc.h> 25 #include <linux/ipc.h>
26 #include <linux/file.h> 26 #include <linux/file.h>
27 #include <linux/module.h> 27 #include <linux/module.h>
28 #include <linux/err.h> 28 #include <linux/err.h>
29 #include <linux/fs.h> 29 #include <linux/fs.h>
30 #include <linux/semaphore.h> 30 #include <linux/semaphore.h>
31 #include <linux/uaccess.h> 31 #include <linux/uaccess.h>
32 #include <linux/unistd.h> 32 #include <linux/unistd.h>
33 #include <linux/slab.h> 33 #include <linux/slab.h>
34
35 #include <asm/syscalls.h> 34 #include <asm/syscalls.h>
36 35
37 asmlinkage long sys_mmap(unsigned long addr, unsigned long len, 36 asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
38 unsigned long prot, unsigned long flags, 37 unsigned long prot, unsigned long flags,
39 unsigned long fd, off_t pgoff) 38 unsigned long fd, off_t pgoff)
40 { 39 {
41 if (pgoff & ~PAGE_MASK) 40 if (pgoff & ~PAGE_MASK)
42 return -EINVAL; 41 return -EINVAL;
43 42
44 return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT); 43 return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT);
45 } 44 }
46 45
arch/microblaze/kernel/traps.c
1 /* 1 /*
2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> 2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2007-2009 PetaLogix 3 * Copyright (C) 2007-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
5 * 5 *
6 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive 7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details. 8 * for more details.
9 */ 9 */
10 10
11 #include <linux/kernel.h> 11 #include <linux/kernel.h>
12 #include <linux/kallsyms.h> 12 #include <linux/kallsyms.h>
13 #include <linux/module.h> 13 #include <linux/module.h>
14 #include <linux/sched.h> 14 #include <linux/sched.h>
15 #include <linux/debug_locks.h> 15 #include <linux/debug_locks.h>
16 16
17 #include <asm/exceptions.h> 17 #include <asm/exceptions.h>
18 #include <asm/unwind.h> 18 #include <asm/unwind.h>
19 19
20 void trap_init(void) 20 void trap_init(void)
21 { 21 {
22 __enable_hw_exceptions(); 22 __enable_hw_exceptions();
23 } 23 }
24 24
25 static unsigned long kstack_depth_to_print; /* 0 == entire stack */ 25 static unsigned long kstack_depth_to_print; /* 0 == entire stack */
26 26
27 static int __init kstack_setup(char *s) 27 static int __init kstack_setup(char *s)
28 { 28 {
29 return !strict_strtoul(s, 0, &kstack_depth_to_print); 29 return !kstrtoul(s, 0, &kstack_depth_to_print);
30 } 30 }
31 __setup("kstack=", kstack_setup); 31 __setup("kstack=", kstack_setup);
32 32
33 void show_stack(struct task_struct *task, unsigned long *sp) 33 void show_stack(struct task_struct *task, unsigned long *sp)
34 { 34 {
35 unsigned long words_to_show; 35 unsigned long words_to_show;
36 u32 fp = (u32) sp; 36 u32 fp = (u32) sp;
37 37
38 if (fp == 0) { 38 if (fp == 0) {
39 if (task) { 39 if (task) {
40 fp = ((struct thread_info *) 40 fp = ((struct thread_info *)
41 (task->stack))->cpu_context.r1; 41 (task->stack))->cpu_context.r1;
42 } else { 42 } else {
43 /* Pick up caller of dump_stack() */ 43 /* Pick up caller of dump_stack() */
44 fp = (u32)&sp - 8; 44 fp = (u32)&sp - 8;
45 } 45 }
46 } 46 }
47 47
48 words_to_show = (THREAD_SIZE - (fp & (THREAD_SIZE - 1))) >> 2; 48 words_to_show = (THREAD_SIZE - (fp & (THREAD_SIZE - 1))) >> 2;
49 if (kstack_depth_to_print && (words_to_show > kstack_depth_to_print)) 49 if (kstack_depth_to_print && (words_to_show > kstack_depth_to_print))
50 words_to_show = kstack_depth_to_print; 50 words_to_show = kstack_depth_to_print;
51 51
52 pr_info("Kernel Stack:\n"); 52 pr_info("Kernel Stack:\n");
53 53
54 /* 54 /*
55 * Make the first line an 'odd' size if necessary to get 55 * Make the first line an 'odd' size if necessary to get
56 * remaining lines to start at an address multiple of 0x10 56 * remaining lines to start at an address multiple of 0x10
57 */ 57 */
58 if (fp & 0xF) { 58 if (fp & 0xF) {
59 unsigned long line1_words = (0x10 - (fp & 0xF)) >> 2; 59 unsigned long line1_words = (0x10 - (fp & 0xF)) >> 2;
60 if (line1_words < words_to_show) { 60 if (line1_words < words_to_show) {
61 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 32, 61 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 32,
62 4, (void *)fp, line1_words << 2, 0); 62 4, (void *)fp, line1_words << 2, 0);
63 fp += line1_words << 2; 63 fp += line1_words << 2;
64 words_to_show -= line1_words; 64 words_to_show -= line1_words;
65 } 65 }
66 } 66 }
67 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 32, 4, (void *)fp, 67 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 32, 4, (void *)fp,
68 words_to_show << 2, 0); 68 words_to_show << 2, 0);
69 printk(KERN_INFO "\n\n"); 69 pr_info("\n\nCall Trace:\n");
70
71 pr_info("Call Trace:\n");
72 microblaze_unwind(task, NULL); 70 microblaze_unwind(task, NULL);
73 pr_info("\n"); 71 pr_info("\n");
74 72
75 if (!task) 73 if (!task)
76 task = current; 74 task = current;
77 75
78 debug_show_held_locks(task); 76 debug_show_held_locks(task);
79 } 77 }
80 78
81 void dump_stack(void) 79 void dump_stack(void)
82 { 80 {
83 show_stack(NULL, NULL); 81 show_stack(NULL, NULL);
84 } 82 }
85 EXPORT_SYMBOL(dump_stack); 83 EXPORT_SYMBOL(dump_stack);
86 84
arch/microblaze/lib/ashldi3.c
1 #include <linux/module.h> 1 #include <linux/module.h>
2 2
3 #include "libgcc.h" 3 #include "libgcc.h"
4 4
5 long long __ashldi3(long long u, word_type b) 5 long long __ashldi3(long long u, word_type b)
6 { 6 {
7 DWunion uu, w; 7 DWunion uu, w;
8 word_type bm; 8 word_type bm;
9 9
10 if (b == 0) 10 if (b == 0)
11 return u; 11 return u;
12 12
13 uu.ll = u; 13 uu.ll = u;
14 bm = 32 - b; 14 bm = 32 - b;
15 15
16 if (bm <= 0) { 16 if (bm <= 0) {
17 w.s.low = 0; 17 w.s.low = 0;
18 w.s.high = (unsigned int) uu.s.low << -bm; 18 w.s.high = (unsigned int) uu.s.low << -bm;
19 } else { 19 } else {
20 const unsigned int carries = (unsigned int) uu.s.low >> bm; 20 const unsigned int carries = (unsigned int) uu.s.low >> bm;
21 21
22 w.s.low = (unsigned int) uu.s.low << b; 22 w.s.low = (unsigned int) uu.s.low << b;
23 w.s.high = ((unsigned int) uu.s.high << b) | carries; 23 w.s.high = ((unsigned int) uu.s.high << b) | carries;
24 } 24 }
25 25
26 return w.ll; 26 return w.ll;
27 } 27 }
28
29 EXPORT_SYMBOL(__ashldi3); 28 EXPORT_SYMBOL(__ashldi3);
30 29
arch/microblaze/lib/ashrdi3.c
1 #include <linux/module.h> 1 #include <linux/module.h>
2 2
3 #include "libgcc.h" 3 #include "libgcc.h"
4 4
5 long long __ashrdi3(long long u, word_type b) 5 long long __ashrdi3(long long u, word_type b)
6 { 6 {
7 DWunion uu, w; 7 DWunion uu, w;
8 word_type bm; 8 word_type bm;
9 9
10 if (b == 0) 10 if (b == 0)
11 return u; 11 return u;
12 12
13 uu.ll = u; 13 uu.ll = u;
14 bm = 32 - b; 14 bm = 32 - b;
15 15
16 if (bm <= 0) { 16 if (bm <= 0) {
17 /* w.s.high = 1..1 or 0..0 */ 17 /* w.s.high = 1..1 or 0..0 */
18 w.s.high = 18 w.s.high =
19 uu.s.high >> 31; 19 uu.s.high >> 31;
20 w.s.low = uu.s.high >> -bm; 20 w.s.low = uu.s.high >> -bm;
21 } else { 21 } else {
22 const unsigned int carries = (unsigned int) uu.s.high << bm; 22 const unsigned int carries = (unsigned int) uu.s.high << bm;
23 23
24 w.s.high = uu.s.high >> b; 24 w.s.high = uu.s.high >> b;
25 w.s.low = ((unsigned int) uu.s.low >> b) | carries; 25 w.s.low = ((unsigned int) uu.s.low >> b) | carries;
26 } 26 }
27 27
28 return w.ll; 28 return w.ll;
29 } 29 }
30
31 EXPORT_SYMBOL(__ashrdi3); 30 EXPORT_SYMBOL(__ashrdi3);
32 31
arch/microblaze/lib/lshrdi3.c
1 #include <linux/module.h> 1 #include <linux/module.h>
2 2
3 #include "libgcc.h" 3 #include "libgcc.h"
4 4
5 long long __lshrdi3(long long u, word_type b) 5 long long __lshrdi3(long long u, word_type b)
6 { 6 {
7 DWunion uu, w; 7 DWunion uu, w;
8 word_type bm; 8 word_type bm;
9 9
10 if (b == 0) 10 if (b == 0)
11 return u; 11 return u;
12 12
13 uu.ll = u; 13 uu.ll = u;
14 bm = 32 - b; 14 bm = 32 - b;
15 15
16 if (bm <= 0) { 16 if (bm <= 0) {
17 w.s.high = 0; 17 w.s.high = 0;
18 w.s.low = (unsigned int) uu.s.high >> -bm; 18 w.s.low = (unsigned int) uu.s.high >> -bm;
19 } else { 19 } else {
20 const unsigned int carries = (unsigned int) uu.s.high << bm; 20 const unsigned int carries = (unsigned int) uu.s.high << bm;
21 21
22 w.s.high = (unsigned int) uu.s.high >> b; 22 w.s.high = (unsigned int) uu.s.high >> b;
23 w.s.low = ((unsigned int) uu.s.low >> b) | carries; 23 w.s.low = ((unsigned int) uu.s.low >> b) | carries;
24 } 24 }
25 25
26 return w.ll; 26 return w.ll;
27 } 27 }
28
29 EXPORT_SYMBOL(__lshrdi3); 28 EXPORT_SYMBOL(__lshrdi3);
30 29
arch/microblaze/lib/memcpy.c
1 /* 1 /*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> 2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix 3 * Copyright (C) 2008-2009 PetaLogix
4 * Copyright (C) 2007 John Williams 4 * Copyright (C) 2007 John Williams
5 * 5 *
6 * Reasonably optimised generic C-code for memcpy on Microblaze 6 * Reasonably optimised generic C-code for memcpy on Microblaze
7 * This is generic C code to do efficient, alignment-aware memcpy. 7 * This is generic C code to do efficient, alignment-aware memcpy.
8 * 8 *
9 * It is based on demo code originally Copyright 2001 by Intel Corp, taken from 9 * It is based on demo code originally Copyright 2001 by Intel Corp, taken from
10 * http://www.embedded.com/showArticle.jhtml?articleID=19205567 10 * http://www.embedded.com/showArticle.jhtml?articleID=19205567
11 * 11 *
12 * Attempts were made, unsuccessfully, to contact the original 12 * Attempts were made, unsuccessfully, to contact the original
13 * author of this code (Michael Morrow, Intel). Below is the original 13 * author of this code (Michael Morrow, Intel). Below is the original
14 * copyright notice. 14 * copyright notice.
15 * 15 *
16 * This software has been developed by Intel Corporation. 16 * This software has been developed by Intel Corporation.
17 * Intel specifically disclaims all warranties, express or 17 * Intel specifically disclaims all warranties, express or
18 * implied, and all liability, including consequential and 18 * implied, and all liability, including consequential and
19 * other indirect damages, for the use of this program, including 19 * other indirect damages, for the use of this program, including
20 * liability for infringement of any proprietary rights, 20 * liability for infringement of any proprietary rights,
21 * and including the warranties of merchantability and fitness 21 * and including the warranties of merchantability and fitness
22 * for a particular purpose. Intel does not assume any 22 * for a particular purpose. Intel does not assume any
23 * responsibility for and errors which may appear in this program 23 * responsibility for and errors which may appear in this program
24 * not any responsibility to update it. 24 * not any responsibility to update it.
25 */ 25 */
26 26
27 #include <linux/types.h> 27 #include <linux/types.h>
28 #include <linux/stddef.h> 28 #include <linux/stddef.h>
29 #include <linux/compiler.h> 29 #include <linux/compiler.h>
30 #include <linux/module.h> 30 #include <linux/module.h>
31 31
32 #include <linux/string.h> 32 #include <linux/string.h>
33 33
34 #ifdef __HAVE_ARCH_MEMCPY 34 #ifdef __HAVE_ARCH_MEMCPY
35 #ifndef CONFIG_OPT_LIB_FUNCTION 35 #ifndef CONFIG_OPT_LIB_FUNCTION
36 void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c) 36 void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
37 { 37 {
38 const char *src = v_src; 38 const char *src = v_src;
39 char *dst = v_dst; 39 char *dst = v_dst;
40 40
41 /* Simple, byte oriented memcpy. */ 41 /* Simple, byte oriented memcpy. */
42 while (c--) 42 while (c--)
43 *dst++ = *src++; 43 *dst++ = *src++;
44 44
45 return v_dst; 45 return v_dst;
46 } 46 }
47 #else /* CONFIG_OPT_LIB_FUNCTION */ 47 #else /* CONFIG_OPT_LIB_FUNCTION */
48 void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c) 48 void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
49 { 49 {
50 const char *src = v_src; 50 const char *src = v_src;
51 char *dst = v_dst; 51 char *dst = v_dst;
52 52
53 /* The following code tries to optimize the copy by using unsigned 53 /* The following code tries to optimize the copy by using unsigned
54 * alignment. This will work fine if both source and destination are 54 * alignment. This will work fine if both source and destination are
55 * aligned on the same boundary. However, if they are aligned on 55 * aligned on the same boundary. However, if they are aligned on
56 * different boundaries shifts will be necessary. This might result in 56 * different boundaries shifts will be necessary. This might result in
57 * bad performance on MicroBlaze systems without a barrel shifter. 57 * bad performance on MicroBlaze systems without a barrel shifter.
58 */ 58 */
59 const uint32_t *i_src; 59 const uint32_t *i_src;
60 uint32_t *i_dst; 60 uint32_t *i_dst;
61 61
62 if (likely(c >= 4)) { 62 if (likely(c >= 4)) {
63 unsigned value, buf_hold; 63 unsigned value, buf_hold;
64 64
65 /* Align the destination to a word boundary. */ 65 /* Align the destination to a word boundary. */
66 /* This is done in an endian independent manner. */ 66 /* This is done in an endian independent manner. */
67 switch ((unsigned long)dst & 3) { 67 switch ((unsigned long)dst & 3) {
68 case 1: 68 case 1:
69 *dst++ = *src++; 69 *dst++ = *src++;
70 --c; 70 --c;
71 case 2: 71 case 2:
72 *dst++ = *src++; 72 *dst++ = *src++;
73 --c; 73 --c;
74 case 3: 74 case 3:
75 *dst++ = *src++; 75 *dst++ = *src++;
76 --c; 76 --c;
77 } 77 }
78 78
79 i_dst = (void *)dst; 79 i_dst = (void *)dst;
80 80
81 /* Choose a copy scheme based on the source */ 81 /* Choose a copy scheme based on the source */
82 /* alignment relative to destination. */ 82 /* alignment relative to destination. */
83 switch ((unsigned long)src & 3) { 83 switch ((unsigned long)src & 3) {
84 case 0x0: /* Both byte offsets are aligned */ 84 case 0x0: /* Both byte offsets are aligned */
85 i_src = (const void *)src; 85 i_src = (const void *)src;
86 86
87 for (; c >= 4; c -= 4) 87 for (; c >= 4; c -= 4)
88 *i_dst++ = *i_src++; 88 *i_dst++ = *i_src++;
89 89
90 src = (const void *)i_src; 90 src = (const void *)i_src;
91 break; 91 break;
92 case 0x1: /* Unaligned - Off by 1 */ 92 case 0x1: /* Unaligned - Off by 1 */
93 /* Word align the source */ 93 /* Word align the source */
94 i_src = (const void *) ((unsigned)src & ~3); 94 i_src = (const void *) ((unsigned)src & ~3);
95 #ifndef __MICROBLAZEEL__ 95 #ifndef __MICROBLAZEEL__
96 /* Load the holding buffer */ 96 /* Load the holding buffer */
97 buf_hold = *i_src++ << 8; 97 buf_hold = *i_src++ << 8;
98 98
99 for (; c >= 4; c -= 4) { 99 for (; c >= 4; c -= 4) {
100 value = *i_src++; 100 value = *i_src++;
101 *i_dst++ = buf_hold | value >> 24; 101 *i_dst++ = buf_hold | value >> 24;
102 buf_hold = value << 8; 102 buf_hold = value << 8;
103 } 103 }
104 #else 104 #else
105 /* Load the holding buffer */ 105 /* Load the holding buffer */
106 buf_hold = (*i_src++ & 0xFFFFFF00) >>8; 106 buf_hold = (*i_src++ & 0xFFFFFF00) >> 8;
107 107
108 for (; c >= 4; c -= 4) { 108 for (; c >= 4; c -= 4) {
109 value = *i_src++; 109 value = *i_src++;
110 *i_dst++ = buf_hold | ((value & 0xFF) << 24); 110 *i_dst++ = buf_hold | ((value & 0xFF) << 24);
111 buf_hold = (value & 0xFFFFFF00) >>8; 111 buf_hold = (value & 0xFFFFFF00) >> 8;
112 } 112 }
113 #endif 113 #endif
114 /* Realign the source */ 114 /* Realign the source */
115 src = (const void *)i_src; 115 src = (const void *)i_src;
116 src -= 3; 116 src -= 3;
117 break; 117 break;
118 case 0x2: /* Unaligned - Off by 2 */ 118 case 0x2: /* Unaligned - Off by 2 */
119 /* Word align the source */ 119 /* Word align the source */
120 i_src = (const void *) ((unsigned)src & ~3); 120 i_src = (const void *) ((unsigned)src & ~3);
121 #ifndef __MICROBLAZEEL__ 121 #ifndef __MICROBLAZEEL__
122 /* Load the holding buffer */ 122 /* Load the holding buffer */
123 buf_hold = *i_src++ << 16; 123 buf_hold = *i_src++ << 16;
124 124
125 for (; c >= 4; c -= 4) { 125 for (; c >= 4; c -= 4) {
126 value = *i_src++; 126 value = *i_src++;
127 *i_dst++ = buf_hold | value >> 16; 127 *i_dst++ = buf_hold | value >> 16;
128 buf_hold = value << 16; 128 buf_hold = value << 16;
129 } 129 }
130 #else 130 #else
131 /* Load the holding buffer */ 131 /* Load the holding buffer */
132 buf_hold = (*i_src++ & 0xFFFF0000 )>>16; 132 buf_hold = (*i_src++ & 0xFFFF0000) >> 16;
133 133
134 for (; c >= 4; c -= 4) { 134 for (; c >= 4; c -= 4) {
135 value = *i_src++; 135 value = *i_src++;
136 *i_dst++ = buf_hold | ((value & 0xFFFF)<<16); 136 *i_dst++ = buf_hold | ((value & 0xFFFF) << 16);
137 buf_hold = (value & 0xFFFF0000) >>16; 137 buf_hold = (value & 0xFFFF0000) >> 16;
138 } 138 }
139 #endif 139 #endif
140 /* Realign the source */ 140 /* Realign the source */
141 src = (const void *)i_src; 141 src = (const void *)i_src;
142 src -= 2; 142 src -= 2;
143 break; 143 break;
144 case 0x3: /* Unaligned - Off by 3 */ 144 case 0x3: /* Unaligned - Off by 3 */
145 /* Word align the source */ 145 /* Word align the source */
146 i_src = (const void *) ((unsigned)src & ~3); 146 i_src = (const void *) ((unsigned)src & ~3);
147 #ifndef __MICROBLAZEEL__ 147 #ifndef __MICROBLAZEEL__
148 /* Load the holding buffer */ 148 /* Load the holding buffer */
149 buf_hold = *i_src++ << 24; 149 buf_hold = *i_src++ << 24;
150 150
151 for (; c >= 4; c -= 4) { 151 for (; c >= 4; c -= 4) {
152 value = *i_src++; 152 value = *i_src++;
153 *i_dst++ = buf_hold | value >> 8; 153 *i_dst++ = buf_hold | value >> 8;
154 buf_hold = value << 24; 154 buf_hold = value << 24;
155 } 155 }
156 #else 156 #else
157 /* Load the holding buffer */ 157 /* Load the holding buffer */
158 buf_hold = (*i_src++ & 0xFF000000) >> 24; 158 buf_hold = (*i_src++ & 0xFF000000) >> 24;
159 159
160 for (; c >= 4; c -= 4) { 160 for (; c >= 4; c -= 4) {
161 value = *i_src++; 161 value = *i_src++;
162 *i_dst++ = buf_hold | ((value & 0xFFFFFF) << 8); 162 *i_dst++ = buf_hold | ((value & 0xFFFFFF) << 8);
163 buf_hold = (value & 0xFF000000) >> 24; 163 buf_hold = (value & 0xFF000000) >> 24;
164 } 164 }
165 #endif 165 #endif
166 /* Realign the source */ 166 /* Realign the source */
167 src = (const void *)i_src; 167 src = (const void *)i_src;
168 src -= 1; 168 src -= 1;
169 break; 169 break;
170 } 170 }
171 dst = (void *)i_dst; 171 dst = (void *)i_dst;
172 } 172 }
173 173
174 /* Finish off any remaining bytes */ 174 /* Finish off any remaining bytes */
175 /* simple fast copy, ... unless a cache boundary is crossed */ 175 /* simple fast copy, ... unless a cache boundary is crossed */
176 switch (c) { 176 switch (c) {
177 case 3: 177 case 3:
178 *dst++ = *src++; 178 *dst++ = *src++;
179 case 2: 179 case 2:
180 *dst++ = *src++; 180 *dst++ = *src++;
181 case 1: 181 case 1:
182 *dst++ = *src++; 182 *dst++ = *src++;
183 } 183 }
184 184
185 return v_dst; 185 return v_dst;
186 } 186 }
187 #endif /* CONFIG_OPT_LIB_FUNCTION */ 187 #endif /* CONFIG_OPT_LIB_FUNCTION */
188 EXPORT_SYMBOL(memcpy); 188 EXPORT_SYMBOL(memcpy);
189 #endif /* __HAVE_ARCH_MEMCPY */ 189 #endif /* __HAVE_ARCH_MEMCPY */
190 190
arch/microblaze/lib/memmove.c
1 /* 1 /*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> 2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix 3 * Copyright (C) 2008-2009 PetaLogix
4 * Copyright (C) 2007 John Williams 4 * Copyright (C) 2007 John Williams
5 * 5 *
6 * Reasonably optimised generic C-code for memcpy on Microblaze 6 * Reasonably optimised generic C-code for memcpy on Microblaze
7 * This is generic C code to do efficient, alignment-aware memmove. 7 * This is generic C code to do efficient, alignment-aware memmove.
8 * 8 *
9 * It is based on demo code originally Copyright 2001 by Intel Corp, taken from 9 * It is based on demo code originally Copyright 2001 by Intel Corp, taken from
10 * http://www.embedded.com/showArticle.jhtml?articleID=19205567 10 * http://www.embedded.com/showArticle.jhtml?articleID=19205567
11 * 11 *
12 * Attempts were made, unsuccessfully, to contact the original 12 * Attempts were made, unsuccessfully, to contact the original
13 * author of this code (Michael Morrow, Intel). Below is the original 13 * author of this code (Michael Morrow, Intel). Below is the original
14 * copyright notice. 14 * copyright notice.
15 * 15 *
16 * This software has been developed by Intel Corporation. 16 * This software has been developed by Intel Corporation.
17 * Intel specifically disclaims all warranties, express or 17 * Intel specifically disclaims all warranties, express or
18 * implied, and all liability, including consequential and 18 * implied, and all liability, including consequential and
19 * other indirect damages, for the use of this program, including 19 * other indirect damages, for the use of this program, including
20 * liability for infringement of any proprietary rights, 20 * liability for infringement of any proprietary rights,
21 * and including the warranties of merchantability and fitness 21 * and including the warranties of merchantability and fitness
22 * for a particular purpose. Intel does not assume any 22 * for a particular purpose. Intel does not assume any
23 * responsibility for and errors which may appear in this program 23 * responsibility for and errors which may appear in this program
24 * not any responsibility to update it. 24 * not any responsibility to update it.
25 */ 25 */
26 26
27 #include <linux/types.h> 27 #include <linux/types.h>
28 #include <linux/stddef.h> 28 #include <linux/stddef.h>
29 #include <linux/compiler.h> 29 #include <linux/compiler.h>
30 #include <linux/module.h> 30 #include <linux/module.h>
31 #include <linux/string.h> 31 #include <linux/string.h>
32 32
33 #ifdef __HAVE_ARCH_MEMMOVE 33 #ifdef __HAVE_ARCH_MEMMOVE
34 #ifndef CONFIG_OPT_LIB_FUNCTION 34 #ifndef CONFIG_OPT_LIB_FUNCTION
35 void *memmove(void *v_dst, const void *v_src, __kernel_size_t c) 35 void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
36 { 36 {
37 const char *src = v_src; 37 const char *src = v_src;
38 char *dst = v_dst; 38 char *dst = v_dst;
39 39
40 if (!c) 40 if (!c)
41 return v_dst; 41 return v_dst;
42 42
43 /* Use memcpy when source is higher than dest */ 43 /* Use memcpy when source is higher than dest */
44 if (v_dst <= v_src) 44 if (v_dst <= v_src)
45 return memcpy(v_dst, v_src, c); 45 return memcpy(v_dst, v_src, c);
46 46
47 /* copy backwards, from end to beginning */ 47 /* copy backwards, from end to beginning */
48 src += c; 48 src += c;
49 dst += c; 49 dst += c;
50 50
51 /* Simple, byte oriented memmove. */ 51 /* Simple, byte oriented memmove. */
52 while (c--) 52 while (c--)
53 *--dst = *--src; 53 *--dst = *--src;
54 54
55 return v_dst; 55 return v_dst;
56 } 56 }
57 #else /* CONFIG_OPT_LIB_FUNCTION */ 57 #else /* CONFIG_OPT_LIB_FUNCTION */
58 void *memmove(void *v_dst, const void *v_src, __kernel_size_t c) 58 void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
59 { 59 {
60 const char *src = v_src; 60 const char *src = v_src;
61 char *dst = v_dst; 61 char *dst = v_dst;
62 const uint32_t *i_src; 62 const uint32_t *i_src;
63 uint32_t *i_dst; 63 uint32_t *i_dst;
64 64
65 if (!c) 65 if (!c)
66 return v_dst; 66 return v_dst;
67 67
68 /* Use memcpy when source is higher than dest */ 68 /* Use memcpy when source is higher than dest */
69 if (v_dst <= v_src) 69 if (v_dst <= v_src)
70 return memcpy(v_dst, v_src, c); 70 return memcpy(v_dst, v_src, c);
71 71
72 /* The following code tries to optimize the copy by using unsigned 72 /* The following code tries to optimize the copy by using unsigned
73 * alignment. This will work fine if both source and destination are 73 * alignment. This will work fine if both source and destination are
74 * aligned on the same boundary. However, if they are aligned on 74 * aligned on the same boundary. However, if they are aligned on
75 * different boundaries shifts will be necessary. This might result in 75 * different boundaries shifts will be necessary. This might result in
76 * bad performance on MicroBlaze systems without a barrel shifter. 76 * bad performance on MicroBlaze systems without a barrel shifter.
77 */ 77 */
78 /* FIXME this part needs more test */ 78 /* FIXME this part needs more test */
79 /* Do a descending copy - this is a bit trickier! */ 79 /* Do a descending copy - this is a bit trickier! */
80 dst += c; 80 dst += c;
81 src += c; 81 src += c;
82 82
83 if (c >= 4) { 83 if (c >= 4) {
84 unsigned value, buf_hold; 84 unsigned value, buf_hold;
85 85
86 /* Align the destination to a word boundary. */ 86 /* Align the destination to a word boundary. */
87 /* This is done in an endian independent manner. */ 87 /* This is done in an endian independent manner. */
88 88
89 switch ((unsigned long)dst & 3) { 89 switch ((unsigned long)dst & 3) {
90 case 3: 90 case 3:
91 *--dst = *--src; 91 *--dst = *--src;
92 --c; 92 --c;
93 case 2: 93 case 2:
94 *--dst = *--src; 94 *--dst = *--src;
95 --c; 95 --c;
96 case 1: 96 case 1:
97 *--dst = *--src; 97 *--dst = *--src;
98 --c; 98 --c;
99 } 99 }
100 100
101 i_dst = (void *)dst; 101 i_dst = (void *)dst;
102 /* Choose a copy scheme based on the source */ 102 /* Choose a copy scheme based on the source */
103 /* alignment relative to dstination. */ 103 /* alignment relative to dstination. */
104 switch ((unsigned long)src & 3) { 104 switch ((unsigned long)src & 3) {
105 case 0x0: /* Both byte offsets are aligned */ 105 case 0x0: /* Both byte offsets are aligned */
106 106
107 i_src = (const void *)src; 107 i_src = (const void *)src;
108 108
109 for (; c >= 4; c -= 4) 109 for (; c >= 4; c -= 4)
110 *--i_dst = *--i_src; 110 *--i_dst = *--i_src;
111 111
112 src = (const void *)i_src; 112 src = (const void *)i_src;
113 break; 113 break;
114 case 0x1: /* Unaligned - Off by 1 */ 114 case 0x1: /* Unaligned - Off by 1 */
115 /* Word align the source */ 115 /* Word align the source */
116 i_src = (const void *) (((unsigned)src + 4) & ~3); 116 i_src = (const void *) (((unsigned)src + 4) & ~3);
117 #ifndef __MICROBLAZEEL__ 117 #ifndef __MICROBLAZEEL__
118 /* Load the holding buffer */ 118 /* Load the holding buffer */
119 buf_hold = *--i_src >> 24; 119 buf_hold = *--i_src >> 24;
120 120
121 for (; c >= 4; c -= 4) { 121 for (; c >= 4; c -= 4) {
122 value = *--i_src; 122 value = *--i_src;
123 *--i_dst = buf_hold << 8 | value; 123 *--i_dst = buf_hold << 8 | value;
124 buf_hold = value >> 24; 124 buf_hold = value >> 24;
125 } 125 }
126 #else 126 #else
127 /* Load the holding buffer */ 127 /* Load the holding buffer */
128 buf_hold = (*--i_src & 0xFF) << 24; 128 buf_hold = (*--i_src & 0xFF) << 24;
129 129
130 for (; c >= 4; c -= 4) { 130 for (; c >= 4; c -= 4) {
131 value = *--i_src; 131 value = *--i_src;
132 *--i_dst = buf_hold | ((value & 0xFFFFFF00)>>8); 132 *--i_dst = buf_hold |
133 ((value & 0xFFFFFF00) >> 8);
133 buf_hold = (value & 0xFF) << 24; 134 buf_hold = (value & 0xFF) << 24;
134 } 135 }
135 #endif 136 #endif
136 /* Realign the source */ 137 /* Realign the source */
137 src = (const void *)i_src; 138 src = (const void *)i_src;
138 src += 1; 139 src += 1;
139 break; 140 break;
140 case 0x2: /* Unaligned - Off by 2 */ 141 case 0x2: /* Unaligned - Off by 2 */
141 /* Word align the source */ 142 /* Word align the source */
142 i_src = (const void *) (((unsigned)src + 4) & ~3); 143 i_src = (const void *) (((unsigned)src + 4) & ~3);
143 #ifndef __MICROBLAZEEL__ 144 #ifndef __MICROBLAZEEL__
144 /* Load the holding buffer */ 145 /* Load the holding buffer */
145 buf_hold = *--i_src >> 16; 146 buf_hold = *--i_src >> 16;
146 147
147 for (; c >= 4; c -= 4) { 148 for (; c >= 4; c -= 4) {
148 value = *--i_src; 149 value = *--i_src;
149 *--i_dst = buf_hold << 16 | value; 150 *--i_dst = buf_hold << 16 | value;
150 buf_hold = value >> 16; 151 buf_hold = value >> 16;
151 } 152 }
152 #else 153 #else
153 /* Load the holding buffer */ 154 /* Load the holding buffer */
154 buf_hold = (*--i_src & 0xFFFF) << 16; 155 buf_hold = (*--i_src & 0xFFFF) << 16;
155 156
156 for (; c >= 4; c -= 4) { 157 for (; c >= 4; c -= 4) {
157 value = *--i_src; 158 value = *--i_src;
158 *--i_dst = buf_hold | ((value & 0xFFFF0000)>>16); 159 *--i_dst = buf_hold |
160 ((value & 0xFFFF0000) >> 16);
159 buf_hold = (value & 0xFFFF) << 16; 161 buf_hold = (value & 0xFFFF) << 16;
160 } 162 }
161 #endif 163 #endif
162 /* Realign the source */ 164 /* Realign the source */
163 src = (const void *)i_src; 165 src = (const void *)i_src;
164 src += 2; 166 src += 2;
165 break; 167 break;
166 case 0x3: /* Unaligned - Off by 3 */ 168 case 0x3: /* Unaligned - Off by 3 */
167 /* Word align the source */ 169 /* Word align the source */
168 i_src = (const void *) (((unsigned)src + 4) & ~3); 170 i_src = (const void *) (((unsigned)src + 4) & ~3);
169 #ifndef __MICROBLAZEEL__ 171 #ifndef __MICROBLAZEEL__
170 /* Load the holding buffer */ 172 /* Load the holding buffer */
171 buf_hold = *--i_src >> 8; 173 buf_hold = *--i_src >> 8;
172 174
173 for (; c >= 4; c -= 4) { 175 for (; c >= 4; c -= 4) {
174 value = *--i_src; 176 value = *--i_src;
175 *--i_dst = buf_hold << 24 | value; 177 *--i_dst = buf_hold << 24 | value;
176 buf_hold = value >> 8; 178 buf_hold = value >> 8;
177 } 179 }
178 #else 180 #else
179 /* Load the holding buffer */ 181 /* Load the holding buffer */
180 buf_hold = (*--i_src & 0xFFFFFF) << 8; 182 buf_hold = (*--i_src & 0xFFFFFF) << 8;
181 183
182 for (; c >= 4; c -= 4) { 184 for (; c >= 4; c -= 4) {
183 value = *--i_src; 185 value = *--i_src;
184 *--i_dst = buf_hold | ((value & 0xFF000000)>> 24); 186 *--i_dst = buf_hold |
187 ((value & 0xFF000000) >> 24);
185 buf_hold = (value & 0xFFFFFF) << 8; 188 buf_hold = (value & 0xFFFFFF) << 8;
186 } 189 }
187 #endif 190 #endif
188 /* Realign the source */ 191 /* Realign the source */
189 src = (const void *)i_src; 192 src = (const void *)i_src;
190 src += 3; 193 src += 3;
191 break; 194 break;
192 } 195 }
193 dst = (void *)i_dst; 196 dst = (void *)i_dst;
194 } 197 }
195 198
196 /* simple fast copy, ... unless a cache boundary is crossed */ 199 /* simple fast copy, ... unless a cache boundary is crossed */
197 /* Finish off any remaining bytes */ 200 /* Finish off any remaining bytes */
198 switch (c) { 201 switch (c) {
199 case 4: 202 case 4:
200 *--dst = *--src; 203 *--dst = *--src;
201 case 3: 204 case 3:
202 *--dst = *--src; 205 *--dst = *--src;
203 case 2: 206 case 2:
204 *--dst = *--src; 207 *--dst = *--src;
205 case 1: 208 case 1:
206 *--dst = *--src; 209 *--dst = *--src;
207 } 210 }
208 return v_dst; 211 return v_dst;
209 } 212 }
210 #endif /* CONFIG_OPT_LIB_FUNCTION */ 213 #endif /* CONFIG_OPT_LIB_FUNCTION */
211 EXPORT_SYMBOL(memmove); 214 EXPORT_SYMBOL(memmove);
212 #endif /* __HAVE_ARCH_MEMMOVE */ 215 #endif /* __HAVE_ARCH_MEMMOVE */
213 216
arch/microblaze/mm/consistent.c
1 /* 1 /*
2 * Microblaze support for cache consistent memory. 2 * Microblaze support for cache consistent memory.
3 * Copyright (C) 2010 Michal Simek <monstr@monstr.eu> 3 * Copyright (C) 2010 Michal Simek <monstr@monstr.eu>
4 * Copyright (C) 2010 PetaLogix 4 * Copyright (C) 2010 PetaLogix
5 * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au> 5 * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au>
6 * 6 *
7 * Based on PowerPC version derived from arch/arm/mm/consistent.c 7 * Based on PowerPC version derived from arch/arm/mm/consistent.c
8 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net) 8 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
9 * Copyright (C) 2000 Russell King 9 * Copyright (C) 2000 Russell King
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as 12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation. 13 * published by the Free Software Foundation.
14 */ 14 */
15 15
16 #include <linux/module.h> 16 #include <linux/module.h>
17 #include <linux/signal.h> 17 #include <linux/signal.h>
18 #include <linux/sched.h> 18 #include <linux/sched.h>
19 #include <linux/kernel.h> 19 #include <linux/kernel.h>
20 #include <linux/errno.h> 20 #include <linux/errno.h>
21 #include <linux/string.h> 21 #include <linux/string.h>
22 #include <linux/types.h> 22 #include <linux/types.h>
23 #include <linux/ptrace.h> 23 #include <linux/ptrace.h>
24 #include <linux/mman.h> 24 #include <linux/mman.h>
25 #include <linux/mm.h> 25 #include <linux/mm.h>
26 #include <linux/swap.h> 26 #include <linux/swap.h>
27 #include <linux/stddef.h> 27 #include <linux/stddef.h>
28 #include <linux/vmalloc.h> 28 #include <linux/vmalloc.h>
29 #include <linux/init.h> 29 #include <linux/init.h>
30 #include <linux/delay.h> 30 #include <linux/delay.h>
31 #include <linux/bootmem.h> 31 #include <linux/bootmem.h>
32 #include <linux/highmem.h> 32 #include <linux/highmem.h>
33 #include <linux/pci.h> 33 #include <linux/pci.h>
34 #include <linux/interrupt.h> 34 #include <linux/interrupt.h>
35 #include <linux/gfp.h> 35 #include <linux/gfp.h>
36 36
37 #include <asm/pgalloc.h> 37 #include <asm/pgalloc.h>
38 #include <linux/io.h> 38 #include <linux/io.h>
39 #include <linux/hardirq.h> 39 #include <linux/hardirq.h>
40 #include <asm/mmu_context.h> 40 #include <linux/mmu_context.h>
41 #include <asm/mmu.h> 41 #include <asm/mmu.h>
42 #include <linux/uaccess.h> 42 #include <linux/uaccess.h>
43 #include <asm/pgtable.h> 43 #include <asm/pgtable.h>
44 #include <asm/cpuinfo.h> 44 #include <asm/cpuinfo.h>
45 #include <asm/tlbflush.h> 45 #include <asm/tlbflush.h>
46 46
47 #ifndef CONFIG_MMU 47 #ifndef CONFIG_MMU
48 /* I have to use dcache values because I can't relate on ram size */ 48 /* I have to use dcache values because I can't relate on ram size */
49 # define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1) 49 # define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
50 #endif 50 #endif
51 51
52 /* 52 /*
53 * Consistent memory allocators. Used for DMA devices that want to 53 * Consistent memory allocators. Used for DMA devices that want to
54 * share uncached memory with the processor core. 54 * share uncached memory with the processor core.
55 * My crufty no-MMU approach is simple. In the HW platform we can optionally 55 * My crufty no-MMU approach is simple. In the HW platform we can optionally
56 * mirror the DDR up above the processor cacheable region. So, memory accessed 56 * mirror the DDR up above the processor cacheable region. So, memory accessed
57 * in this mirror region will not be cached. It's alloced from the same 57 * in this mirror region will not be cached. It's alloced from the same
58 * pool as normal memory, but the handle we return is shifted up into the 58 * pool as normal memory, but the handle we return is shifted up into the
59 * uncached region. This will no doubt cause big problems if memory allocated 59 * uncached region. This will no doubt cause big problems if memory allocated
60 * here is not also freed properly. -- JW 60 * here is not also freed properly. -- JW
61 */ 61 */
62 void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle) 62 void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
63 { 63 {
64 unsigned long order, vaddr; 64 unsigned long order, vaddr;
65 void *ret; 65 void *ret;
66 unsigned int i, err = 0; 66 unsigned int i, err = 0;
67 struct page *page, *end; 67 struct page *page, *end;
68 68
69 #ifdef CONFIG_MMU 69 #ifdef CONFIG_MMU
70 phys_addr_t pa; 70 phys_addr_t pa;
71 struct vm_struct *area; 71 struct vm_struct *area;
72 unsigned long va; 72 unsigned long va;
73 #endif 73 #endif
74 74
75 if (in_interrupt()) 75 if (in_interrupt())
76 BUG(); 76 BUG();
77 77
78 /* Only allocate page size areas. */ 78 /* Only allocate page size areas. */
79 size = PAGE_ALIGN(size); 79 size = PAGE_ALIGN(size);
80 order = get_order(size); 80 order = get_order(size);
81 81
82 vaddr = __get_free_pages(gfp, order); 82 vaddr = __get_free_pages(gfp, order);
83 if (!vaddr) 83 if (!vaddr)
84 return NULL; 84 return NULL;
85 85
86 /* 86 /*
87 * we need to ensure that there are no cachelines in use, 87 * we need to ensure that there are no cachelines in use,
88 * or worse dirty in this area. 88 * or worse dirty in this area.
89 */ 89 */
90 flush_dcache_range(virt_to_phys((void *)vaddr), 90 flush_dcache_range(virt_to_phys((void *)vaddr),
91 virt_to_phys((void *)vaddr) + size); 91 virt_to_phys((void *)vaddr) + size);
92 92
93 #ifndef CONFIG_MMU 93 #ifndef CONFIG_MMU
94 ret = (void *)vaddr; 94 ret = (void *)vaddr;
95 /* 95 /*
96 * Here's the magic! Note if the uncached shadow is not implemented, 96 * Here's the magic! Note if the uncached shadow is not implemented,
97 * it's up to the calling code to also test that condition and make 97 * it's up to the calling code to also test that condition and make
98 * other arranegments, such as manually flushing the cache and so on. 98 * other arranegments, such as manually flushing the cache and so on.
99 */ 99 */
100 # ifdef CONFIG_XILINX_UNCACHED_SHADOW 100 # ifdef CONFIG_XILINX_UNCACHED_SHADOW
101 ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK); 101 ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK);
102 # endif 102 # endif
103 if ((unsigned int)ret > cpuinfo.dcache_base && 103 if ((unsigned int)ret > cpuinfo.dcache_base &&
104 (unsigned int)ret < cpuinfo.dcache_high) 104 (unsigned int)ret < cpuinfo.dcache_high)
105 printk(KERN_WARNING 105 pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
106 "ERROR: Your cache coherent area is CACHED!!!\n");
107 106
108 /* dma_handle is same as physical (shadowed) address */ 107 /* dma_handle is same as physical (shadowed) address */
109 *dma_handle = (dma_addr_t)ret; 108 *dma_handle = (dma_addr_t)ret;
110 #else 109 #else
111 /* Allocate some common virtual space to map the new pages. */ 110 /* Allocate some common virtual space to map the new pages. */
112 area = get_vm_area(size, VM_ALLOC); 111 area = get_vm_area(size, VM_ALLOC);
113 if (!area) { 112 if (!area) {
114 free_pages(vaddr, order); 113 free_pages(vaddr, order);
115 return NULL; 114 return NULL;
116 } 115 }
117 va = (unsigned long) area->addr; 116 va = (unsigned long) area->addr;
118 ret = (void *)va; 117 ret = (void *)va;
119 118
120 /* This gives us the real physical address of the first page. */ 119 /* This gives us the real physical address of the first page. */
121 *dma_handle = pa = virt_to_bus((void *)vaddr); 120 *dma_handle = pa = virt_to_bus((void *)vaddr);
122 #endif 121 #endif
123 122
124 /* 123 /*
125 * free wasted pages. We skip the first page since we know 124 * free wasted pages. We skip the first page since we know
126 * that it will have count = 1 and won't require freeing. 125 * that it will have count = 1 and won't require freeing.
127 * We also mark the pages in use as reserved so that 126 * We also mark the pages in use as reserved so that
128 * remap_page_range works. 127 * remap_page_range works.
129 */ 128 */
130 page = virt_to_page(vaddr); 129 page = virt_to_page(vaddr);
131 end = page + (1 << order); 130 end = page + (1 << order);
132 131
133 split_page(page, order); 132 split_page(page, order);
134 133
135 for (i = 0; i < size && err == 0; i += PAGE_SIZE) { 134 for (i = 0; i < size && err == 0; i += PAGE_SIZE) {
136 #ifdef CONFIG_MMU 135 #ifdef CONFIG_MMU
137 /* MS: This is the whole magic - use cache inhibit pages */ 136 /* MS: This is the whole magic - use cache inhibit pages */
138 err = map_page(va + i, pa + i, _PAGE_KERNEL | _PAGE_NO_CACHE); 137 err = map_page(va + i, pa + i, _PAGE_KERNEL | _PAGE_NO_CACHE);
139 #endif 138 #endif
140 139
141 SetPageReserved(page); 140 SetPageReserved(page);
142 page++; 141 page++;
143 } 142 }
144 143
145 /* Free the otherwise unused pages. */ 144 /* Free the otherwise unused pages. */
146 while (page < end) { 145 while (page < end) {
147 __free_page(page); 146 __free_page(page);
148 page++; 147 page++;
149 } 148 }
150 149
151 if (err) { 150 if (err) {
152 free_pages(vaddr, order); 151 free_pages(vaddr, order);
153 return NULL; 152 return NULL;
154 } 153 }
155 154
156 return ret; 155 return ret;
157 } 156 }
158 EXPORT_SYMBOL(consistent_alloc); 157 EXPORT_SYMBOL(consistent_alloc);
159 158
160 /* 159 /*
161 * free page(s) as defined by the above mapping. 160 * free page(s) as defined by the above mapping.
162 */ 161 */
163 void consistent_free(size_t size, void *vaddr) 162 void consistent_free(size_t size, void *vaddr)
164 { 163 {
165 struct page *page; 164 struct page *page;
166 165
167 if (in_interrupt()) 166 if (in_interrupt())
168 BUG(); 167 BUG();
169 168
170 size = PAGE_ALIGN(size); 169 size = PAGE_ALIGN(size);
171 170
172 #ifndef CONFIG_MMU 171 #ifndef CONFIG_MMU
173 /* Clear SHADOW_MASK bit in address, and free as per usual */ 172 /* Clear SHADOW_MASK bit in address, and free as per usual */
174 # ifdef CONFIG_XILINX_UNCACHED_SHADOW 173 # ifdef CONFIG_XILINX_UNCACHED_SHADOW
175 vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK); 174 vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK);
176 # endif 175 # endif
177 page = virt_to_page(vaddr); 176 page = virt_to_page(vaddr);
178 177
179 do { 178 do {
180 ClearPageReserved(page); 179 ClearPageReserved(page);
181 __free_page(page); 180 __free_page(page);
182 page++; 181 page++;
183 } while (size -= PAGE_SIZE); 182 } while (size -= PAGE_SIZE);
184 #else 183 #else
185 do { 184 do {
186 pte_t *ptep; 185 pte_t *ptep;
187 unsigned long pfn; 186 unsigned long pfn;
188 187
189 ptep = pte_offset_kernel(pmd_offset(pgd_offset_k( 188 ptep = pte_offset_kernel(pmd_offset(pgd_offset_k(
190 (unsigned int)vaddr), 189 (unsigned int)vaddr),
191 (unsigned int)vaddr), 190 (unsigned int)vaddr),
192 (unsigned int)vaddr); 191 (unsigned int)vaddr);
193 if (!pte_none(*ptep) && pte_present(*ptep)) { 192 if (!pte_none(*ptep) && pte_present(*ptep)) {
194 pfn = pte_pfn(*ptep); 193 pfn = pte_pfn(*ptep);
195 pte_clear(&init_mm, (unsigned int)vaddr, ptep); 194 pte_clear(&init_mm, (unsigned int)vaddr, ptep);
196 if (pfn_valid(pfn)) { 195 if (pfn_valid(pfn)) {
197 page = pfn_to_page(pfn); 196 page = pfn_to_page(pfn);
198 197
199 ClearPageReserved(page); 198 ClearPageReserved(page);
200 __free_page(page); 199 __free_page(page);
201 } 200 }
202 } 201 }
203 vaddr += PAGE_SIZE; 202 vaddr += PAGE_SIZE;
204 } while (size -= PAGE_SIZE); 203 } while (size -= PAGE_SIZE);
205 204
206 /* flush tlb */ 205 /* flush tlb */
207 flush_tlb_all(); 206 flush_tlb_all();
208 #endif 207 #endif
209 } 208 }
210 EXPORT_SYMBOL(consistent_free); 209 EXPORT_SYMBOL(consistent_free);
211 210
212 /* 211 /*
213 * make an area consistent. 212 * make an area consistent.
214 */ 213 */
215 void consistent_sync(void *vaddr, size_t size, int direction) 214 void consistent_sync(void *vaddr, size_t size, int direction)
216 { 215 {
217 unsigned long start; 216 unsigned long start;
218 unsigned long end; 217 unsigned long end;
219 218
220 start = (unsigned long)vaddr; 219 start = (unsigned long)vaddr;
221 220
222 /* Convert start address back down to unshadowed memory region */ 221 /* Convert start address back down to unshadowed memory region */
223 #ifdef CONFIG_XILINX_UNCACHED_SHADOW 222 #ifdef CONFIG_XILINX_UNCACHED_SHADOW
224 start &= ~UNCACHED_SHADOW_MASK; 223 start &= ~UNCACHED_SHADOW_MASK;
225 #endif 224 #endif
226 end = start + size; 225 end = start + size;
227 226
228 switch (direction) { 227 switch (direction) {
229 case PCI_DMA_NONE: 228 case PCI_DMA_NONE:
230 BUG(); 229 BUG();
231 case PCI_DMA_FROMDEVICE: /* invalidate only */ 230 case PCI_DMA_FROMDEVICE: /* invalidate only */
232 invalidate_dcache_range(start, end); 231 invalidate_dcache_range(start, end);
233 break; 232 break;
234 case PCI_DMA_TODEVICE: /* writeback only */ 233 case PCI_DMA_TODEVICE: /* writeback only */
235 flush_dcache_range(start, end); 234 flush_dcache_range(start, end);
236 break; 235 break;
237 case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */ 236 case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */
238 flush_dcache_range(start, end); 237 flush_dcache_range(start, end);
239 break; 238 break;
240 } 239 }
241 } 240 }
242 EXPORT_SYMBOL(consistent_sync); 241 EXPORT_SYMBOL(consistent_sync);
243 242
244 /* 243 /*
245 * consistent_sync_page makes memory consistent. identical 244 * consistent_sync_page makes memory consistent. identical
246 * to consistent_sync, but takes a struct page instead of a 245 * to consistent_sync, but takes a struct page instead of a
247 * virtual address 246 * virtual address
248 */ 247 */
249 void consistent_sync_page(struct page *page, unsigned long offset, 248 void consistent_sync_page(struct page *page, unsigned long offset,
250 size_t size, int direction) 249 size_t size, int direction)
251 { 250 {
252 unsigned long start = (unsigned long)page_address(page) + offset; 251 unsigned long start = (unsigned long)page_address(page) + offset;
253 consistent_sync((void *)start, size, direction); 252 consistent_sync((void *)start, size, direction);
254 } 253 }
255 EXPORT_SYMBOL(consistent_sync_page); 254 EXPORT_SYMBOL(consistent_sync_page);
256 255
arch/microblaze/mm/fault.c
1 /* 1 /*
2 * arch/microblaze/mm/fault.c 2 * arch/microblaze/mm/fault.c
3 * 3 *
4 * Copyright (C) 2007 Xilinx, Inc. All rights reserved. 4 * Copyright (C) 2007 Xilinx, Inc. All rights reserved.
5 * 5 *
6 * Derived from "arch/ppc/mm/fault.c" 6 * Derived from "arch/ppc/mm/fault.c"
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 * 8 *
9 * Derived from "arch/i386/mm/fault.c" 9 * Derived from "arch/i386/mm/fault.c"
10 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 10 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
11 * 11 *
12 * Modified by Cort Dougan and Paul Mackerras. 12 * Modified by Cort Dougan and Paul Mackerras.
13 * 13 *
14 * This file is subject to the terms and conditions of the GNU General 14 * This file is subject to the terms and conditions of the GNU General
15 * Public License. See the file COPYING in the main directory of this 15 * Public License. See the file COPYING in the main directory of this
16 * archive for more details. 16 * archive for more details.
17 * 17 *
18 */ 18 */
19 19
20 #include <linux/module.h> 20 #include <linux/module.h>
21 #include <linux/signal.h> 21 #include <linux/signal.h>
22 #include <linux/sched.h> 22 #include <linux/sched.h>
23 #include <linux/kernel.h> 23 #include <linux/kernel.h>
24 #include <linux/errno.h> 24 #include <linux/errno.h>
25 #include <linux/string.h> 25 #include <linux/string.h>
26 #include <linux/types.h> 26 #include <linux/types.h>
27 #include <linux/ptrace.h> 27 #include <linux/ptrace.h>
28 #include <linux/mman.h> 28 #include <linux/mman.h>
29 #include <linux/mm.h> 29 #include <linux/mm.h>
30 #include <linux/interrupt.h> 30 #include <linux/interrupt.h>
31 31
32 #include <asm/page.h> 32 #include <asm/page.h>
33 #include <asm/pgtable.h> 33 #include <asm/pgtable.h>
34 #include <asm/mmu.h> 34 #include <asm/mmu.h>
35 #include <asm/mmu_context.h> 35 #include <linux/mmu_context.h>
36 #include <linux/uaccess.h> 36 #include <linux/uaccess.h>
37 #include <asm/exceptions.h> 37 #include <asm/exceptions.h>
38 38
39 static unsigned long pte_misses; /* updated by do_page_fault() */ 39 static unsigned long pte_misses; /* updated by do_page_fault() */
40 static unsigned long pte_errors; /* updated by do_page_fault() */ 40 static unsigned long pte_errors; /* updated by do_page_fault() */
41 41
42 /* 42 /*
43 * Check whether the instruction at regs->pc is a store using 43 * Check whether the instruction at regs->pc is a store using
44 * an update addressing form which will update r1. 44 * an update addressing form which will update r1.
45 */ 45 */
46 static int store_updates_sp(struct pt_regs *regs) 46 static int store_updates_sp(struct pt_regs *regs)
47 { 47 {
48 unsigned int inst; 48 unsigned int inst;
49 49
50 if (get_user(inst, (unsigned int __user *)regs->pc)) 50 if (get_user(inst, (unsigned int __user *)regs->pc))
51 return 0; 51 return 0;
52 /* check for 1 in the rD field */ 52 /* check for 1 in the rD field */
53 if (((inst >> 21) & 0x1f) != 1) 53 if (((inst >> 21) & 0x1f) != 1)
54 return 0; 54 return 0;
55 /* check for store opcodes */ 55 /* check for store opcodes */
56 if ((inst & 0xd0000000) == 0xd0000000) 56 if ((inst & 0xd0000000) == 0xd0000000)
57 return 1; 57 return 1;
58 return 0; 58 return 0;
59 } 59 }
60 60
61 61
62 /* 62 /*
63 * bad_page_fault is called when we have a bad access from the kernel. 63 * bad_page_fault is called when we have a bad access from the kernel.
64 * It is called from do_page_fault above and from some of the procedures 64 * It is called from do_page_fault above and from some of the procedures
65 * in traps.c. 65 * in traps.c.
66 */ 66 */
67 void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) 67 void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
68 { 68 {
69 const struct exception_table_entry *fixup; 69 const struct exception_table_entry *fixup;
70 /* MS: no context */ 70 /* MS: no context */
71 /* Are we prepared to handle this fault? */ 71 /* Are we prepared to handle this fault? */
72 fixup = search_exception_tables(regs->pc); 72 fixup = search_exception_tables(regs->pc);
73 if (fixup) { 73 if (fixup) {
74 regs->pc = fixup->fixup; 74 regs->pc = fixup->fixup;
75 return; 75 return;
76 } 76 }
77 77
78 /* kernel has accessed a bad area */ 78 /* kernel has accessed a bad area */
79 die("kernel access of bad area", regs, sig); 79 die("kernel access of bad area", regs, sig);
80 } 80 }
81 81
82 /* 82 /*
83 * The error_code parameter is ESR for a data fault, 83 * The error_code parameter is ESR for a data fault,
84 * 0 for an instruction fault. 84 * 0 for an instruction fault.
85 */ 85 */
86 void do_page_fault(struct pt_regs *regs, unsigned long address, 86 void do_page_fault(struct pt_regs *regs, unsigned long address,
87 unsigned long error_code) 87 unsigned long error_code)
88 { 88 {
89 struct vm_area_struct *vma; 89 struct vm_area_struct *vma;
90 struct mm_struct *mm = current->mm; 90 struct mm_struct *mm = current->mm;
91 siginfo_t info; 91 siginfo_t info;
92 int code = SEGV_MAPERR; 92 int code = SEGV_MAPERR;
93 int is_write = error_code & ESR_S; 93 int is_write = error_code & ESR_S;
94 int fault; 94 int fault;
95 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | 95 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
96 (is_write ? FAULT_FLAG_WRITE : 0); 96 (is_write ? FAULT_FLAG_WRITE : 0);
97 97
98 regs->ear = address; 98 regs->ear = address;
99 regs->esr = error_code; 99 regs->esr = error_code;
100 100
101 /* On a kernel SLB miss we can only check for a valid exception entry */ 101 /* On a kernel SLB miss we can only check for a valid exception entry */
102 if (unlikely(kernel_mode(regs) && (address >= TASK_SIZE))) { 102 if (unlikely(kernel_mode(regs) && (address >= TASK_SIZE))) {
103 printk(KERN_WARNING "kernel task_size exceed"); 103 pr_warn("kernel task_size exceed");
104 _exception(SIGSEGV, regs, code, address); 104 _exception(SIGSEGV, regs, code, address);
105 } 105 }
106 106
107 /* for instr TLB miss and instr storage exception ESR_S is undefined */ 107 /* for instr TLB miss and instr storage exception ESR_S is undefined */
108 if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11) 108 if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
109 is_write = 0; 109 is_write = 0;
110 110
111 if (unlikely(in_atomic() || !mm)) { 111 if (unlikely(in_atomic() || !mm)) {
112 if (kernel_mode(regs)) 112 if (kernel_mode(regs))
113 goto bad_area_nosemaphore; 113 goto bad_area_nosemaphore;
114 114
115 /* in_atomic() in user mode is really bad, 115 /* in_atomic() in user mode is really bad,
116 as is current->mm == NULL. */ 116 as is current->mm == NULL. */
117 printk(KERN_EMERG "Page fault in user mode with " 117 pr_emerg("Page fault in user mode with in_atomic(), mm = %p\n",
118 "in_atomic(), mm = %p\n", mm); 118 mm);
119 printk(KERN_EMERG "r15 = %lx MSR = %lx\n", 119 pr_emerg("r15 = %lx MSR = %lx\n",
120 regs->r15, regs->msr); 120 regs->r15, regs->msr);
121 die("Weird page fault", regs, SIGSEGV); 121 die("Weird page fault", regs, SIGSEGV);
122 } 122 }
123 123
124 /* When running in the kernel we expect faults to occur only to 124 /* When running in the kernel we expect faults to occur only to
125 * addresses in user space. All other faults represent errors in the 125 * addresses in user space. All other faults represent errors in the
126 * kernel and should generate an OOPS. Unfortunately, in the case of an 126 * kernel and should generate an OOPS. Unfortunately, in the case of an
127 * erroneous fault occurring in a code path which already holds mmap_sem 127 * erroneous fault occurring in a code path which already holds mmap_sem
128 * we will deadlock attempting to validate the fault against the 128 * we will deadlock attempting to validate the fault against the
129 * address space. Luckily the kernel only validly references user 129 * address space. Luckily the kernel only validly references user
130 * space from well defined areas of code, which are listed in the 130 * space from well defined areas of code, which are listed in the
131 * exceptions table. 131 * exceptions table.
132 * 132 *
133 * As the vast majority of faults will be valid we will only perform 133 * As the vast majority of faults will be valid we will only perform
134 * the source reference check when there is a possibility of a deadlock. 134 * the source reference check when there is a possibility of a deadlock.
135 * Attempt to lock the address space, if we cannot we then validate the 135 * Attempt to lock the address space, if we cannot we then validate the
136 * source. If this is invalid we can skip the address space check, 136 * source. If this is invalid we can skip the address space check,
137 * thus avoiding the deadlock. 137 * thus avoiding the deadlock.
138 */ 138 */
139 if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 139 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
140 if (kernel_mode(regs) && !search_exception_tables(regs->pc)) 140 if (kernel_mode(regs) && !search_exception_tables(regs->pc))
141 goto bad_area_nosemaphore; 141 goto bad_area_nosemaphore;
142 142
143 retry: 143 retry:
144 down_read(&mm->mmap_sem); 144 down_read(&mm->mmap_sem);
145 } 145 }
146 146
147 vma = find_vma(mm, address); 147 vma = find_vma(mm, address);
148 if (unlikely(!vma)) 148 if (unlikely(!vma))
149 goto bad_area; 149 goto bad_area;
150 150
151 if (vma->vm_start <= address) 151 if (vma->vm_start <= address)
152 goto good_area; 152 goto good_area;
153 153
154 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) 154 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
155 goto bad_area; 155 goto bad_area;
156 156
157 if (unlikely(!is_write)) 157 if (unlikely(!is_write))
158 goto bad_area; 158 goto bad_area;
159 159
160 /* 160 /*
161 * N.B. The ABI allows programs to access up to 161 * N.B. The ABI allows programs to access up to
162 * a few hundred bytes below the stack pointer (TBD). 162 * a few hundred bytes below the stack pointer (TBD).
163 * The kernel signal delivery code writes up to about 1.5kB 163 * The kernel signal delivery code writes up to about 1.5kB
164 * below the stack pointer (r1) before decrementing it. 164 * below the stack pointer (r1) before decrementing it.
165 * The exec code can write slightly over 640kB to the stack 165 * The exec code can write slightly over 640kB to the stack
166 * before setting the user r1. Thus we allow the stack to 166 * before setting the user r1. Thus we allow the stack to
167 * expand to 1MB without further checks. 167 * expand to 1MB without further checks.
168 */ 168 */
169 if (unlikely(address + 0x100000 < vma->vm_end)) { 169 if (unlikely(address + 0x100000 < vma->vm_end)) {
170 170
171 /* get user regs even if this fault is in kernel mode */ 171 /* get user regs even if this fault is in kernel mode */
172 struct pt_regs *uregs = current->thread.regs; 172 struct pt_regs *uregs = current->thread.regs;
173 if (uregs == NULL) 173 if (uregs == NULL)
174 goto bad_area; 174 goto bad_area;
175 175
176 /* 176 /*
177 * A user-mode access to an address a long way below 177 * A user-mode access to an address a long way below
178 * the stack pointer is only valid if the instruction 178 * the stack pointer is only valid if the instruction
179 * is one which would update the stack pointer to the 179 * is one which would update the stack pointer to the
180 * address accessed if the instruction completed, 180 * address accessed if the instruction completed,
181 * i.e. either stwu rs,n(r1) or stwux rs,r1,rb 181 * i.e. either stwu rs,n(r1) or stwux rs,r1,rb
182 * (or the byte, halfword, float or double forms). 182 * (or the byte, halfword, float or double forms).
183 * 183 *
184 * If we don't check this then any write to the area 184 * If we don't check this then any write to the area
185 * between the last mapped region and the stack will 185 * between the last mapped region and the stack will
186 * expand the stack rather than segfaulting. 186 * expand the stack rather than segfaulting.
187 */ 187 */
188 if (address + 2048 < uregs->r1 188 if (address + 2048 < uregs->r1
189 && (kernel_mode(regs) || !store_updates_sp(regs))) 189 && (kernel_mode(regs) || !store_updates_sp(regs)))
190 goto bad_area; 190 goto bad_area;
191 } 191 }
192 if (expand_stack(vma, address)) 192 if (expand_stack(vma, address))
193 goto bad_area; 193 goto bad_area;
194 194
195 good_area: 195 good_area:
196 code = SEGV_ACCERR; 196 code = SEGV_ACCERR;
197 197
198 /* a write */ 198 /* a write */
199 if (unlikely(is_write)) { 199 if (unlikely(is_write)) {
200 if (unlikely(!(vma->vm_flags & VM_WRITE))) 200 if (unlikely(!(vma->vm_flags & VM_WRITE)))
201 goto bad_area; 201 goto bad_area;
202 /* a read */ 202 /* a read */
203 } else { 203 } else {
204 /* protection fault */ 204 /* protection fault */
205 if (unlikely(error_code & 0x08000000)) 205 if (unlikely(error_code & 0x08000000))
206 goto bad_area; 206 goto bad_area;
207 if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC)))) 207 if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC))))
208 goto bad_area; 208 goto bad_area;
209 } 209 }
210 210
211 /* 211 /*
212 * If for any reason at all we couldn't handle the fault, 212 * If for any reason at all we couldn't handle the fault,
213 * make sure we exit gracefully rather than endlessly redo 213 * make sure we exit gracefully rather than endlessly redo
214 * the fault. 214 * the fault.
215 */ 215 */
216 fault = handle_mm_fault(mm, vma, address, flags); 216 fault = handle_mm_fault(mm, vma, address, flags);
217 217
218 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 218 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
219 return; 219 return;
220 220
221 if (unlikely(fault & VM_FAULT_ERROR)) { 221 if (unlikely(fault & VM_FAULT_ERROR)) {
222 if (fault & VM_FAULT_OOM) 222 if (fault & VM_FAULT_OOM)
223 goto out_of_memory; 223 goto out_of_memory;
224 else if (fault & VM_FAULT_SIGBUS) 224 else if (fault & VM_FAULT_SIGBUS)
225 goto do_sigbus; 225 goto do_sigbus;
226 BUG(); 226 BUG();
227 } 227 }
228 228
229 if (flags & FAULT_FLAG_ALLOW_RETRY) { 229 if (flags & FAULT_FLAG_ALLOW_RETRY) {
230 if (unlikely(fault & VM_FAULT_MAJOR)) 230 if (unlikely(fault & VM_FAULT_MAJOR))
231 current->maj_flt++; 231 current->maj_flt++;
232 else 232 else
233 current->min_flt++; 233 current->min_flt++;
234 if (fault & VM_FAULT_RETRY) { 234 if (fault & VM_FAULT_RETRY) {
235 flags &= ~FAULT_FLAG_ALLOW_RETRY; 235 flags &= ~FAULT_FLAG_ALLOW_RETRY;
236 flags |= FAULT_FLAG_TRIED; 236 flags |= FAULT_FLAG_TRIED;
237 237
238 /* 238 /*
239 * No need to up_read(&mm->mmap_sem) as we would 239 * No need to up_read(&mm->mmap_sem) as we would
240 * have already released it in __lock_page_or_retry 240 * have already released it in __lock_page_or_retry
241 * in mm/filemap.c. 241 * in mm/filemap.c.
242 */ 242 */
243 243
244 goto retry; 244 goto retry;
245 } 245 }
246 } 246 }
247 247
248 up_read(&mm->mmap_sem); 248 up_read(&mm->mmap_sem);
249 249
250 /* 250 /*
251 * keep track of tlb+htab misses that are good addrs but 251 * keep track of tlb+htab misses that are good addrs but
252 * just need pte's created via handle_mm_fault() 252 * just need pte's created via handle_mm_fault()
253 * -- Cort 253 * -- Cort
254 */ 254 */
255 pte_misses++; 255 pte_misses++;
256 return; 256 return;
257 257
258 bad_area: 258 bad_area:
259 up_read(&mm->mmap_sem); 259 up_read(&mm->mmap_sem);
260 260
261 bad_area_nosemaphore: 261 bad_area_nosemaphore:
262 pte_errors++; 262 pte_errors++;
263 263
264 /* User mode accesses cause a SIGSEGV */ 264 /* User mode accesses cause a SIGSEGV */
265 if (user_mode(regs)) { 265 if (user_mode(regs)) {
266 _exception(SIGSEGV, regs, code, address); 266 _exception(SIGSEGV, regs, code, address);
267 /* info.si_signo = SIGSEGV; 267 /* info.si_signo = SIGSEGV;
268 info.si_errno = 0; 268 info.si_errno = 0;
269 info.si_code = code; 269 info.si_code = code;
270 info.si_addr = (void *) address; 270 info.si_addr = (void *) address;
271 force_sig_info(SIGSEGV, &info, current);*/ 271 force_sig_info(SIGSEGV, &info, current);*/
272 return; 272 return;
273 } 273 }
274 274
275 bad_page_fault(regs, address, SIGSEGV); 275 bad_page_fault(regs, address, SIGSEGV);
276 return; 276 return;
277 277
278 /* 278 /*
279 * We ran out of memory, or some other thing happened to us that made 279 * We ran out of memory, or some other thing happened to us that made
280 * us unable to handle the page fault gracefully. 280 * us unable to handle the page fault gracefully.
281 */ 281 */
282 out_of_memory: 282 out_of_memory:
283 up_read(&mm->mmap_sem); 283 up_read(&mm->mmap_sem);
284 if (!user_mode(regs)) 284 if (!user_mode(regs))
285 bad_page_fault(regs, address, SIGKILL); 285 bad_page_fault(regs, address, SIGKILL);
286 else 286 else
287 pagefault_out_of_memory(); 287 pagefault_out_of_memory();
288 return; 288 return;
289 289
290 do_sigbus: 290 do_sigbus:
291 up_read(&mm->mmap_sem); 291 up_read(&mm->mmap_sem);
292 if (user_mode(regs)) { 292 if (user_mode(regs)) {
293 info.si_signo = SIGBUS; 293 info.si_signo = SIGBUS;
294 info.si_errno = 0; 294 info.si_errno = 0;
295 info.si_code = BUS_ADRERR; 295 info.si_code = BUS_ADRERR;
296 info.si_addr = (void __user *)address; 296 info.si_addr = (void __user *)address;
297 force_sig_info(SIGBUS, &info, current); 297 force_sig_info(SIGBUS, &info, current);
298 return; 298 return;
299 } 299 }
300 bad_page_fault(regs, address, SIGBUS); 300 bad_page_fault(regs, address, SIGBUS);
301 } 301 }
302 302
arch/microblaze/mm/init.c
1 /* 1 /*
2 * Copyright (C) 2007-2008 Michal Simek <monstr@monstr.eu> 2 * Copyright (C) 2007-2008 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2006 Atmark Techno, Inc. 3 * Copyright (C) 2006 Atmark Techno, Inc.
4 * 4 *
5 * This file is subject to the terms and conditions of the GNU General Public 5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive 6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details. 7 * for more details.
8 */ 8 */
9 9
10 #include <linux/bootmem.h> 10 #include <linux/bootmem.h>
11 #include <linux/init.h> 11 #include <linux/init.h>
12 #include <linux/kernel.h> 12 #include <linux/kernel.h>
13 #include <linux/memblock.h> 13 #include <linux/memblock.h>
14 #include <linux/mm.h> /* mem_init */ 14 #include <linux/mm.h> /* mem_init */
15 #include <linux/initrd.h> 15 #include <linux/initrd.h>
16 #include <linux/pagemap.h> 16 #include <linux/pagemap.h>
17 #include <linux/pfn.h> 17 #include <linux/pfn.h>
18 #include <linux/slab.h> 18 #include <linux/slab.h>
19 #include <linux/swap.h> 19 #include <linux/swap.h>
20 #include <linux/export.h> 20 #include <linux/export.h>
21 21
22 #include <asm/page.h> 22 #include <asm/page.h>
23 #include <asm/mmu_context.h> 23 #include <asm/mmu_context.h>
24 #include <asm/pgalloc.h> 24 #include <asm/pgalloc.h>
25 #include <asm/sections.h> 25 #include <asm/sections.h>
26 #include <asm/tlb.h> 26 #include <asm/tlb.h>
27 #include <asm/fixmap.h> 27 #include <asm/fixmap.h>
28 28
29 /* Use for MMU and noMMU because of PCI generic code */ 29 /* Use for MMU and noMMU because of PCI generic code */
30 int mem_init_done; 30 int mem_init_done;
31 31
32 #ifndef CONFIG_MMU 32 #ifndef CONFIG_MMU
33 unsigned int __page_offset; 33 unsigned int __page_offset;
34 EXPORT_SYMBOL(__page_offset); 34 EXPORT_SYMBOL(__page_offset);
35 35
36 #else 36 #else
37 static int init_bootmem_done; 37 static int init_bootmem_done;
38 #endif /* CONFIG_MMU */ 38 #endif /* CONFIG_MMU */
39 39
40 char *klimit = _end; 40 char *klimit = _end;
41 41
42 /* 42 /*
43 * Initialize the bootmem system and give it all the memory we 43 * Initialize the bootmem system and give it all the memory we
44 * have available. 44 * have available.
45 */ 45 */
46 unsigned long memory_start; 46 unsigned long memory_start;
47 EXPORT_SYMBOL(memory_start); 47 EXPORT_SYMBOL(memory_start);
48 unsigned long memory_size; 48 unsigned long memory_size;
49 EXPORT_SYMBOL(memory_size); 49 EXPORT_SYMBOL(memory_size);
50 unsigned long lowmem_size; 50 unsigned long lowmem_size;
51 51
52 #ifdef CONFIG_HIGHMEM 52 #ifdef CONFIG_HIGHMEM
53 pte_t *kmap_pte; 53 pte_t *kmap_pte;
54 EXPORT_SYMBOL(kmap_pte); 54 EXPORT_SYMBOL(kmap_pte);
55 pgprot_t kmap_prot; 55 pgprot_t kmap_prot;
56 EXPORT_SYMBOL(kmap_prot); 56 EXPORT_SYMBOL(kmap_prot);
57 57
58 static inline pte_t *virt_to_kpte(unsigned long vaddr) 58 static inline pte_t *virt_to_kpte(unsigned long vaddr)
59 { 59 {
60 return pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), 60 return pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr),
61 vaddr), vaddr); 61 vaddr), vaddr);
62 } 62 }
63 63
64 static void __init highmem_init(void) 64 static void __init highmem_init(void)
65 { 65 {
66 pr_debug("%x\n", (u32)PKMAP_BASE); 66 pr_debug("%x\n", (u32)PKMAP_BASE);
67 map_page(PKMAP_BASE, 0, 0); /* XXX gross */ 67 map_page(PKMAP_BASE, 0, 0); /* XXX gross */
68 pkmap_page_table = virt_to_kpte(PKMAP_BASE); 68 pkmap_page_table = virt_to_kpte(PKMAP_BASE);
69 69
70 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); 70 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
71 kmap_prot = PAGE_KERNEL; 71 kmap_prot = PAGE_KERNEL;
72 } 72 }
73 73
74 static unsigned long highmem_setup(void) 74 static unsigned long highmem_setup(void)
75 { 75 {
76 unsigned long pfn; 76 unsigned long pfn;
77 unsigned long reservedpages = 0; 77 unsigned long reservedpages = 0;
78 78
79 for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) { 79 for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) {
80 struct page *page = pfn_to_page(pfn); 80 struct page *page = pfn_to_page(pfn);
81 81
82 /* FIXME not sure about */ 82 /* FIXME not sure about */
83 if (memblock_is_reserved(pfn << PAGE_SHIFT)) 83 if (memblock_is_reserved(pfn << PAGE_SHIFT))
84 continue; 84 continue;
85 ClearPageReserved(page); 85 ClearPageReserved(page);
86 init_page_count(page); 86 init_page_count(page);
87 __free_page(page); 87 __free_page(page);
88 totalhigh_pages++; 88 totalhigh_pages++;
89 reservedpages++; 89 reservedpages++;
90 } 90 }
91 totalram_pages += totalhigh_pages; 91 totalram_pages += totalhigh_pages;
92 printk(KERN_INFO "High memory: %luk\n", 92 pr_info("High memory: %luk\n",
93 totalhigh_pages << (PAGE_SHIFT-10)); 93 totalhigh_pages << (PAGE_SHIFT-10));
94 94
95 return reservedpages; 95 return reservedpages;
96 } 96 }
97 #endif /* CONFIG_HIGHMEM */ 97 #endif /* CONFIG_HIGHMEM */
98 98
99 /* 99 /*
100 * paging_init() sets up the page tables - in fact we've already done this. 100 * paging_init() sets up the page tables - in fact we've already done this.
101 */ 101 */
102 static void __init paging_init(void) 102 static void __init paging_init(void)
103 { 103 {
104 unsigned long zones_size[MAX_NR_ZONES]; 104 unsigned long zones_size[MAX_NR_ZONES];
105 #ifdef CONFIG_MMU 105 #ifdef CONFIG_MMU
106 int idx; 106 int idx;
107 107
108 /* Setup fixmaps */ 108 /* Setup fixmaps */
109 for (idx = 0; idx < __end_of_fixed_addresses; idx++) 109 for (idx = 0; idx < __end_of_fixed_addresses; idx++)
110 clear_fixmap(idx); 110 clear_fixmap(idx);
111 #endif 111 #endif
112 112
113 /* Clean every zones */ 113 /* Clean every zones */
114 memset(zones_size, 0, sizeof(zones_size)); 114 memset(zones_size, 0, sizeof(zones_size));
115 115
116 #ifdef CONFIG_HIGHMEM 116 #ifdef CONFIG_HIGHMEM
117 highmem_init(); 117 highmem_init();
118 118
119 zones_size[ZONE_DMA] = max_low_pfn; 119 zones_size[ZONE_DMA] = max_low_pfn;
120 zones_size[ZONE_HIGHMEM] = max_pfn; 120 zones_size[ZONE_HIGHMEM] = max_pfn;
121 #else 121 #else
122 zones_size[ZONE_DMA] = max_pfn; 122 zones_size[ZONE_DMA] = max_pfn;
123 #endif 123 #endif
124 124
125 /* We don't have holes in memory map */ 125 /* We don't have holes in memory map */
126 free_area_init_nodes(zones_size); 126 free_area_init_nodes(zones_size);
127 } 127 }
128 128
129 void __init setup_memory(void) 129 void __init setup_memory(void)
130 { 130 {
131 unsigned long map_size; 131 unsigned long map_size;
132 struct memblock_region *reg; 132 struct memblock_region *reg;
133 133
134 #ifndef CONFIG_MMU 134 #ifndef CONFIG_MMU
135 u32 kernel_align_start, kernel_align_size; 135 u32 kernel_align_start, kernel_align_size;
136 136
137 /* Find main memory where is the kernel */ 137 /* Find main memory where is the kernel */
138 for_each_memblock(memory, reg) { 138 for_each_memblock(memory, reg) {
139 memory_start = (u32)reg->base; 139 memory_start = (u32)reg->base;
140 lowmem_size = reg->size; 140 lowmem_size = reg->size;
141 if ((memory_start <= (u32)_text) && 141 if ((memory_start <= (u32)_text) &&
142 ((u32)_text <= (memory_start + lowmem_size - 1))) { 142 ((u32)_text <= (memory_start + lowmem_size - 1))) {
143 memory_size = lowmem_size; 143 memory_size = lowmem_size;
144 PAGE_OFFSET = memory_start; 144 PAGE_OFFSET = memory_start;
145 printk(KERN_INFO "%s: Main mem: 0x%x, " 145 pr_info("%s: Main mem: 0x%x, size 0x%08x\n",
146 "size 0x%08x\n", __func__, (u32) memory_start, 146 __func__, (u32) memory_start,
147 (u32) memory_size); 147 (u32) memory_size);
148 break; 148 break;
149 } 149 }
150 } 150 }
151 151
152 if (!memory_start || !memory_size) { 152 if (!memory_start || !memory_size) {
153 panic("%s: Missing memory setting 0x%08x, size=0x%08x\n", 153 panic("%s: Missing memory setting 0x%08x, size=0x%08x\n",
154 __func__, (u32) memory_start, (u32) memory_size); 154 __func__, (u32) memory_start, (u32) memory_size);
155 } 155 }
156 156
157 /* reservation of region where is the kernel */ 157 /* reservation of region where is the kernel */
158 kernel_align_start = PAGE_DOWN((u32)_text); 158 kernel_align_start = PAGE_DOWN((u32)_text);
159 /* ALIGN can be remove because _end in vmlinux.lds.S is align */ 159 /* ALIGN can be remove because _end in vmlinux.lds.S is align */
160 kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start; 160 kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start;
161 printk(KERN_INFO "%s: kernel addr:0x%08x-0x%08x size=0x%08x\n", 161 pr_info("%s: kernel addr:0x%08x-0x%08x size=0x%08x\n",
162 __func__, kernel_align_start, kernel_align_start 162 __func__, kernel_align_start, kernel_align_start
163 + kernel_align_size, kernel_align_size); 163 + kernel_align_size, kernel_align_size);
164 memblock_reserve(kernel_align_start, kernel_align_size); 164 memblock_reserve(kernel_align_start, kernel_align_size);
165 #endif 165 #endif
166 /* 166 /*
167 * Kernel: 167 * Kernel:
168 * start: base phys address of kernel - page align 168 * start: base phys address of kernel - page align
169 * end: base phys address of kernel - page align 169 * end: base phys address of kernel - page align
170 * 170 *
171 * min_low_pfn - the first page (mm/bootmem.c - node_boot_start) 171 * min_low_pfn - the first page (mm/bootmem.c - node_boot_start)
172 * max_low_pfn 172 * max_low_pfn
173 * max_mapnr - the first unused page (mm/bootmem.c - node_low_pfn) 173 * max_mapnr - the first unused page (mm/bootmem.c - node_low_pfn)
174 * num_physpages - number of all pages 174 * num_physpages - number of all pages
175 */ 175 */
176 176
177 /* memory start is from the kernel end (aligned) to higher addr */ 177 /* memory start is from the kernel end (aligned) to higher addr */
178 min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */ 178 min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */
179 /* RAM is assumed contiguous */ 179 /* RAM is assumed contiguous */
180 num_physpages = max_mapnr = memory_size >> PAGE_SHIFT; 180 num_physpages = max_mapnr = memory_size >> PAGE_SHIFT;
181 max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT; 181 max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT;
182 max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT; 182 max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT;
183 183
184 printk(KERN_INFO "%s: max_mapnr: %#lx\n", __func__, max_mapnr); 184 pr_info("%s: max_mapnr: %#lx\n", __func__, max_mapnr);
185 printk(KERN_INFO "%s: min_low_pfn: %#lx\n", __func__, min_low_pfn); 185 pr_info("%s: min_low_pfn: %#lx\n", __func__, min_low_pfn);
186 printk(KERN_INFO "%s: max_low_pfn: %#lx\n", __func__, max_low_pfn); 186 pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
187 printk(KERN_INFO "%s: max_pfn: %#lx\n", __func__, max_pfn); 187 pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn);
188 188
189 /* 189 /*
190 * Find an area to use for the bootmem bitmap. 190 * Find an area to use for the bootmem bitmap.
191 * We look for the first area which is at least 191 * We look for the first area which is at least
192 * 128kB in length (128kB is enough for a bitmap 192 * 128kB in length (128kB is enough for a bitmap
193 * for 4GB of memory, using 4kB pages), plus 1 page 193 * for 4GB of memory, using 4kB pages), plus 1 page
194 * (in case the address isn't page-aligned). 194 * (in case the address isn't page-aligned).
195 */ 195 */
196 map_size = init_bootmem_node(NODE_DATA(0), 196 map_size = init_bootmem_node(NODE_DATA(0),
197 PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn); 197 PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn);
198 memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size); 198 memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size);
199 199
200 /* Add active regions with valid PFNs */ 200 /* Add active regions with valid PFNs */
201 for_each_memblock(memory, reg) { 201 for_each_memblock(memory, reg) {
202 unsigned long start_pfn, end_pfn; 202 unsigned long start_pfn, end_pfn;
203 203
204 start_pfn = memblock_region_memory_base_pfn(reg); 204 start_pfn = memblock_region_memory_base_pfn(reg);
205 end_pfn = memblock_region_memory_end_pfn(reg); 205 end_pfn = memblock_region_memory_end_pfn(reg);
206 memblock_set_node(start_pfn << PAGE_SHIFT, 206 memblock_set_node(start_pfn << PAGE_SHIFT,
207 (end_pfn - start_pfn) << PAGE_SHIFT, 0); 207 (end_pfn - start_pfn) << PAGE_SHIFT, 0);
208 } 208 }
209 209
210 /* free bootmem is whole main memory */ 210 /* free bootmem is whole main memory */
211 free_bootmem_with_active_regions(0, max_low_pfn); 211 free_bootmem_with_active_regions(0, max_low_pfn);
212 212
213 /* reserve allocate blocks */ 213 /* reserve allocate blocks */
214 for_each_memblock(reserved, reg) { 214 for_each_memblock(reserved, reg) {
215 unsigned long top = reg->base + reg->size - 1; 215 unsigned long top = reg->base + reg->size - 1;
216 216
217 pr_debug("reserved - 0x%08x-0x%08x, %lx, %lx\n", 217 pr_debug("reserved - 0x%08x-0x%08x, %lx, %lx\n",
218 (u32) reg->base, (u32) reg->size, top, 218 (u32) reg->base, (u32) reg->size, top,
219 memory_start + lowmem_size - 1); 219 memory_start + lowmem_size - 1);
220 220
221 if (top <= (memory_start + lowmem_size - 1)) { 221 if (top <= (memory_start + lowmem_size - 1)) {
222 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); 222 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
223 } else if (reg->base < (memory_start + lowmem_size - 1)) { 223 } else if (reg->base < (memory_start + lowmem_size - 1)) {
224 unsigned long trunc_size = memory_start + lowmem_size - 224 unsigned long trunc_size = memory_start + lowmem_size -
225 reg->base; 225 reg->base;
226 reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT); 226 reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT);
227 } 227 }
228 } 228 }
229 229
230 /* XXX need to clip this if using highmem? */ 230 /* XXX need to clip this if using highmem? */
231 sparse_memory_present_with_active_regions(0); 231 sparse_memory_present_with_active_regions(0);
232 232
233 #ifdef CONFIG_MMU 233 #ifdef CONFIG_MMU
234 init_bootmem_done = 1; 234 init_bootmem_done = 1;
235 #endif 235 #endif
236 paging_init(); 236 paging_init();
237 } 237 }
238 238
239 void free_init_pages(char *what, unsigned long begin, unsigned long end) 239 void free_init_pages(char *what, unsigned long begin, unsigned long end)
240 { 240 {
241 unsigned long addr; 241 unsigned long addr;
242 242
243 for (addr = begin; addr < end; addr += PAGE_SIZE) { 243 for (addr = begin; addr < end; addr += PAGE_SIZE) {
244 ClearPageReserved(virt_to_page(addr)); 244 ClearPageReserved(virt_to_page(addr));
245 init_page_count(virt_to_page(addr)); 245 init_page_count(virt_to_page(addr));
246 free_page(addr); 246 free_page(addr);
247 totalram_pages++; 247 totalram_pages++;
248 } 248 }
249 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); 249 pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
250 } 250 }
251 251
252 #ifdef CONFIG_BLK_DEV_INITRD 252 #ifdef CONFIG_BLK_DEV_INITRD
253 void free_initrd_mem(unsigned long start, unsigned long end) 253 void free_initrd_mem(unsigned long start, unsigned long end)
254 { 254 {
255 int pages = 0; 255 int pages = 0;
256 for (; start < end; start += PAGE_SIZE) { 256 for (; start < end; start += PAGE_SIZE) {
257 ClearPageReserved(virt_to_page(start)); 257 ClearPageReserved(virt_to_page(start));
258 init_page_count(virt_to_page(start)); 258 init_page_count(virt_to_page(start));
259 free_page(start); 259 free_page(start);
260 totalram_pages++; 260 totalram_pages++;
261 pages++; 261 pages++;
262 } 262 }
263 printk(KERN_NOTICE "Freeing initrd memory: %dk freed\n", 263 pr_notice("Freeing initrd memory: %dk freed\n",
264 (int)(pages * (PAGE_SIZE / 1024))); 264 (int)(pages * (PAGE_SIZE / 1024)));
265 } 265 }
266 #endif 266 #endif
267 267
268 void free_initmem(void) 268 void free_initmem(void)
269 { 269 {
270 free_init_pages("unused kernel memory", 270 free_init_pages("unused kernel memory",
271 (unsigned long)(&__init_begin), 271 (unsigned long)(&__init_begin),
272 (unsigned long)(&__init_end)); 272 (unsigned long)(&__init_end));
273 } 273 }
274 274
275 void __init mem_init(void) 275 void __init mem_init(void)
276 { 276 {
277 pg_data_t *pgdat; 277 pg_data_t *pgdat;
278 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize; 278 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
279 279
280 high_memory = (void *)__va(memory_start + lowmem_size - 1); 280 high_memory = (void *)__va(memory_start + lowmem_size - 1);
281 281
282 /* this will put all memory onto the freelists */ 282 /* this will put all memory onto the freelists */
283 totalram_pages += free_all_bootmem(); 283 totalram_pages += free_all_bootmem();
284 284
285 for_each_online_pgdat(pgdat) { 285 for_each_online_pgdat(pgdat) {
286 unsigned long i; 286 unsigned long i;
287 struct page *page; 287 struct page *page;
288 288
289 for (i = 0; i < pgdat->node_spanned_pages; i++) { 289 for (i = 0; i < pgdat->node_spanned_pages; i++) {
290 if (!pfn_valid(pgdat->node_start_pfn + i)) 290 if (!pfn_valid(pgdat->node_start_pfn + i))
291 continue; 291 continue;
292 page = pgdat_page_nr(pgdat, i); 292 page = pgdat_page_nr(pgdat, i);
293 if (PageReserved(page)) 293 if (PageReserved(page))
294 reservedpages++; 294 reservedpages++;
295 } 295 }
296 } 296 }
297 297
298 #ifdef CONFIG_HIGHMEM 298 #ifdef CONFIG_HIGHMEM
299 reservedpages -= highmem_setup(); 299 reservedpages -= highmem_setup();
300 #endif 300 #endif
301 301
302 codesize = (unsigned long)&_sdata - (unsigned long)&_stext; 302 codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
303 datasize = (unsigned long)&_edata - (unsigned long)&_sdata; 303 datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
304 initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin; 304 initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
305 bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start; 305 bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
306 306
307 pr_info("Memory: %luk/%luk available (%luk kernel code, " 307 pr_info("Memory: %luk/%luk available (%luk kernel code, ",
308 "%luk reserved, %luk data, %luk bss, %luk init)\n",
309 nr_free_pages() << (PAGE_SHIFT-10), 308 nr_free_pages() << (PAGE_SHIFT-10),
310 num_physpages << (PAGE_SHIFT-10), 309 num_physpages << (PAGE_SHIFT-10),
311 codesize >> 10, 310 codesize >> 10);
311 pr_cont("%luk reserved, %luk data, %luk bss, %luk init)\n",
312 reservedpages << (PAGE_SHIFT-10), 312 reservedpages << (PAGE_SHIFT-10),
313 datasize >> 10, 313 datasize >> 10,
314 bsssize >> 10, 314 bsssize >> 10,
315 initsize >> 10); 315 initsize >> 10);
316 316
317 #ifdef CONFIG_MMU 317 #ifdef CONFIG_MMU
318 pr_info("Kernel virtual memory layout:\n"); 318 pr_info("Kernel virtual memory layout:\n");
319 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); 319 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
320 #ifdef CONFIG_HIGHMEM 320 #ifdef CONFIG_HIGHMEM
321 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n", 321 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
322 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP)); 322 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
323 #endif /* CONFIG_HIGHMEM */ 323 #endif /* CONFIG_HIGHMEM */
324 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n", 324 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
325 ioremap_bot, ioremap_base); 325 ioremap_bot, ioremap_base);
326 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", 326 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
327 (unsigned long)VMALLOC_START, VMALLOC_END); 327 (unsigned long)VMALLOC_START, VMALLOC_END);
328 #endif 328 #endif
329 mem_init_done = 1; 329 mem_init_done = 1;
330 } 330 }
331 331
332 #ifndef CONFIG_MMU 332 #ifndef CONFIG_MMU
333 int page_is_ram(unsigned long pfn) 333 int page_is_ram(unsigned long pfn)
334 { 334 {
335 return __range_ok(pfn, 0); 335 return __range_ok(pfn, 0);
336 } 336 }
337 #else 337 #else
338 int page_is_ram(unsigned long pfn) 338 int page_is_ram(unsigned long pfn)
339 { 339 {
340 return pfn < max_low_pfn; 340 return pfn < max_low_pfn;
341 } 341 }
342 342
343 /* 343 /*
344 * Check for command-line options that affect what MMU_init will do. 344 * Check for command-line options that affect what MMU_init will do.
345 */ 345 */
346 static void mm_cmdline_setup(void) 346 static void mm_cmdline_setup(void)
347 { 347 {
348 unsigned long maxmem = 0; 348 unsigned long maxmem = 0;
349 char *p = cmd_line; 349 char *p = cmd_line;
350 350
351 /* Look for mem= option on command line */ 351 /* Look for mem= option on command line */
352 p = strstr(cmd_line, "mem="); 352 p = strstr(cmd_line, "mem=");
353 if (p) { 353 if (p) {
354 p += 4; 354 p += 4;
355 maxmem = memparse(p, &p); 355 maxmem = memparse(p, &p);
356 if (maxmem && memory_size > maxmem) { 356 if (maxmem && memory_size > maxmem) {
357 memory_size = maxmem; 357 memory_size = maxmem;
358 memblock.memory.regions[0].size = memory_size; 358 memblock.memory.regions[0].size = memory_size;
359 } 359 }
360 } 360 }
361 } 361 }
362 362
363 /* 363 /*
364 * MMU_init_hw does the chip-specific initialization of the MMU hardware. 364 * MMU_init_hw does the chip-specific initialization of the MMU hardware.
365 */ 365 */
366 static void __init mmu_init_hw(void) 366 static void __init mmu_init_hw(void)
367 { 367 {
368 /* 368 /*
369 * The Zone Protection Register (ZPR) defines how protection will 369 * The Zone Protection Register (ZPR) defines how protection will
370 * be applied to every page which is a member of a given zone. At 370 * be applied to every page which is a member of a given zone. At
371 * present, we utilize only two of the zones. 371 * present, we utilize only two of the zones.
372 * The zone index bits (of ZSEL) in the PTE are used for software 372 * The zone index bits (of ZSEL) in the PTE are used for software
373 * indicators, except the LSB. For user access, zone 1 is used, 373 * indicators, except the LSB. For user access, zone 1 is used,
374 * for kernel access, zone 0 is used. We set all but zone 1 374 * for kernel access, zone 0 is used. We set all but zone 1
375 * to zero, allowing only kernel access as indicated in the PTE. 375 * to zero, allowing only kernel access as indicated in the PTE.
376 * For zone 1, we set a 01 binary (a value of 10 will not work) 376 * For zone 1, we set a 01 binary (a value of 10 will not work)
377 * to allow user access as indicated in the PTE. This also allows 377 * to allow user access as indicated in the PTE. This also allows
378 * kernel access as indicated in the PTE. 378 * kernel access as indicated in the PTE.
379 */ 379 */
380 __asm__ __volatile__ ("ori r11, r0, 0x10000000;" \ 380 __asm__ __volatile__ ("ori r11, r0, 0x10000000;" \
381 "mts rzpr, r11;" 381 "mts rzpr, r11;"
382 : : : "r11"); 382 : : : "r11");
383 } 383 }
384 384
385 /* 385 /*
386 * MMU_init sets up the basic memory mappings for the kernel, 386 * MMU_init sets up the basic memory mappings for the kernel,
387 * including both RAM and possibly some I/O regions, 387 * including both RAM and possibly some I/O regions,
388 * and sets up the page tables and the MMU hardware ready to go. 388 * and sets up the page tables and the MMU hardware ready to go.
389 */ 389 */
390 390
391 /* called from head.S */ 391 /* called from head.S */
392 asmlinkage void __init mmu_init(void) 392 asmlinkage void __init mmu_init(void)
393 { 393 {
394 unsigned int kstart, ksize; 394 unsigned int kstart, ksize;
395 395
396 if (!memblock.reserved.cnt) { 396 if (!memblock.reserved.cnt) {
397 printk(KERN_EMERG "Error memory count\n"); 397 pr_emerg("Error memory count\n");
398 machine_restart(NULL); 398 machine_restart(NULL);
399 } 399 }
400 400
401 if ((u32) memblock.memory.regions[0].size < 0x400000) { 401 if ((u32) memblock.memory.regions[0].size < 0x400000) {
402 printk(KERN_EMERG "Memory must be greater than 4MB\n"); 402 pr_emerg("Memory must be greater than 4MB\n");
403 machine_restart(NULL); 403 machine_restart(NULL);
404 } 404 }
405 405
406 if ((u32) memblock.memory.regions[0].size < kernel_tlb) { 406 if ((u32) memblock.memory.regions[0].size < kernel_tlb) {
407 printk(KERN_EMERG "Kernel size is greater than memory node\n"); 407 pr_emerg("Kernel size is greater than memory node\n");
408 machine_restart(NULL); 408 machine_restart(NULL);
409 } 409 }
410 410
411 /* Find main memory where the kernel is */ 411 /* Find main memory where the kernel is */
412 memory_start = (u32) memblock.memory.regions[0].base; 412 memory_start = (u32) memblock.memory.regions[0].base;
413 lowmem_size = memory_size = (u32) memblock.memory.regions[0].size; 413 lowmem_size = memory_size = (u32) memblock.memory.regions[0].size;
414 414
415 if (lowmem_size > CONFIG_LOWMEM_SIZE) { 415 if (lowmem_size > CONFIG_LOWMEM_SIZE) {
416 lowmem_size = CONFIG_LOWMEM_SIZE; 416 lowmem_size = CONFIG_LOWMEM_SIZE;
417 #ifndef CONFIG_HIGHMEM 417 #ifndef CONFIG_HIGHMEM
418 memory_size = lowmem_size; 418 memory_size = lowmem_size;
419 #endif 419 #endif
420 } 420 }
421 421
422 mm_cmdline_setup(); /* FIXME parse args from command line - not used */ 422 mm_cmdline_setup(); /* FIXME parse args from command line - not used */
423 423
424 /* 424 /*
425 * Map out the kernel text/data/bss from the available physical 425 * Map out the kernel text/data/bss from the available physical
426 * memory. 426 * memory.
427 */ 427 */
428 kstart = __pa(CONFIG_KERNEL_START); /* kernel start */ 428 kstart = __pa(CONFIG_KERNEL_START); /* kernel start */
429 /* kernel size */ 429 /* kernel size */
430 ksize = PAGE_ALIGN(((u32)_end - (u32)CONFIG_KERNEL_START)); 430 ksize = PAGE_ALIGN(((u32)_end - (u32)CONFIG_KERNEL_START));
431 memblock_reserve(kstart, ksize); 431 memblock_reserve(kstart, ksize);
432 432
433 #if defined(CONFIG_BLK_DEV_INITRD) 433 #if defined(CONFIG_BLK_DEV_INITRD)
434 /* Remove the init RAM disk from the available memory. */ 434 /* Remove the init RAM disk from the available memory. */
435 /* if (initrd_start) { 435 /* if (initrd_start) {
436 mem_pieces_remove(&phys_avail, __pa(initrd_start), 436 mem_pieces_remove(&phys_avail, __pa(initrd_start),
437 initrd_end - initrd_start, 1); 437 initrd_end - initrd_start, 1);
438 }*/ 438 }*/
439 #endif /* CONFIG_BLK_DEV_INITRD */ 439 #endif /* CONFIG_BLK_DEV_INITRD */
440 440
441 /* Initialize the MMU hardware */ 441 /* Initialize the MMU hardware */
442 mmu_init_hw(); 442 mmu_init_hw();
443 443
444 /* Map in all of RAM starting at CONFIG_KERNEL_START */ 444 /* Map in all of RAM starting at CONFIG_KERNEL_START */
445 mapin_ram(); 445 mapin_ram();
446 446
447 /* Extend vmalloc and ioremap area as big as possible */ 447 /* Extend vmalloc and ioremap area as big as possible */
448 #ifdef CONFIG_HIGHMEM 448 #ifdef CONFIG_HIGHMEM
449 ioremap_base = ioremap_bot = PKMAP_BASE; 449 ioremap_base = ioremap_bot = PKMAP_BASE;
450 #else 450 #else
451 ioremap_base = ioremap_bot = FIXADDR_START; 451 ioremap_base = ioremap_bot = FIXADDR_START;
452 #endif 452 #endif
453 453
454 /* Initialize the context management stuff */ 454 /* Initialize the context management stuff */
455 mmu_context_init(); 455 mmu_context_init();
456 456
457 /* Shortly after that, the entire linear mapping will be available */ 457 /* Shortly after that, the entire linear mapping will be available */
458 /* This will also cause that unflatten device tree will be allocated 458 /* This will also cause that unflatten device tree will be allocated
459 * inside 768MB limit */ 459 * inside 768MB limit */
460 memblock_set_current_limit(memory_start + lowmem_size - 1); 460 memblock_set_current_limit(memory_start + lowmem_size - 1);
461 } 461 }
462 462
463 /* This is only called until mem_init is done. */ 463 /* This is only called until mem_init is done. */
464 void __init *early_get_page(void) 464 void __init *early_get_page(void)
465 { 465 {
466 void *p; 466 void *p;
467 if (init_bootmem_done) { 467 if (init_bootmem_done) {
468 p = alloc_bootmem_pages(PAGE_SIZE); 468 p = alloc_bootmem_pages(PAGE_SIZE);
469 } else { 469 } else {
470 /* 470 /*
471 * Mem start + kernel_tlb -> here is limit 471 * Mem start + kernel_tlb -> here is limit
472 * because of mem mapping from head.S 472 * because of mem mapping from head.S
473 */ 473 */
474 p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, 474 p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
475 memory_start + kernel_tlb)); 475 memory_start + kernel_tlb));
476 } 476 }
477 return p; 477 return p;
478 } 478 }
479 479
480 #endif /* CONFIG_MMU */ 480 #endif /* CONFIG_MMU */
481 481
482 void * __init_refok alloc_maybe_bootmem(size_t size, gfp_t mask) 482 void * __init_refok alloc_maybe_bootmem(size_t size, gfp_t mask)
483 { 483 {
484 if (mem_init_done) 484 if (mem_init_done)
485 return kmalloc(size, mask); 485 return kmalloc(size, mask);
486 else 486 else
487 return alloc_bootmem(size); 487 return alloc_bootmem(size);
488 } 488 }
489 489
490 void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask) 490 void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask)
491 { 491 {
492 void *p; 492 void *p;
493 493
494 if (mem_init_done) 494 if (mem_init_done)
495 p = kzalloc(size, mask); 495 p = kzalloc(size, mask);
496 else { 496 else {
497 p = alloc_bootmem(size); 497 p = alloc_bootmem(size);
498 if (p) 498 if (p)
499 memset(p, 0, size); 499 memset(p, 0, size);
500 } 500 }
501 return p; 501 return p;
502 } 502 }
arch/microblaze/mm/pgtable.c
1 /* 1 /*
2 * This file contains the routines setting up the linux page tables. 2 * This file contains the routines setting up the linux page tables.
3 * 3 *
4 * Copyright (C) 2008 Michal Simek 4 * Copyright (C) 2008 Michal Simek
5 * Copyright (C) 2008 PetaLogix 5 * Copyright (C) 2008 PetaLogix
6 * 6 *
7 * Copyright (C) 2007 Xilinx, Inc. All rights reserved. 7 * Copyright (C) 2007 Xilinx, Inc. All rights reserved.
8 * 8 *
9 * Derived from arch/ppc/mm/pgtable.c: 9 * Derived from arch/ppc/mm/pgtable.c:
10 * -- paulus 10 * -- paulus
11 * 11 *
12 * Derived from arch/ppc/mm/init.c: 12 * Derived from arch/ppc/mm/init.c:
13 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 13 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
14 * 14 *
15 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 15 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
16 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 16 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
17 * Copyright (C) 1996 Paul Mackerras 17 * Copyright (C) 1996 Paul Mackerras
18 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). 18 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
19 * 19 *
20 * Derived from "arch/i386/mm/init.c" 20 * Derived from "arch/i386/mm/init.c"
21 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 21 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
22 * 22 *
23 * This file is subject to the terms and conditions of the GNU General 23 * This file is subject to the terms and conditions of the GNU General
24 * Public License. See the file COPYING in the main directory of this 24 * Public License. See the file COPYING in the main directory of this
25 * archive for more details. 25 * archive for more details.
26 * 26 *
27 */ 27 */
28 28
29 #include <linux/kernel.h> 29 #include <linux/kernel.h>
30 #include <linux/module.h> 30 #include <linux/module.h>
31 #include <linux/types.h> 31 #include <linux/types.h>
32 #include <linux/vmalloc.h> 32 #include <linux/vmalloc.h>
33 #include <linux/init.h> 33 #include <linux/init.h>
34 34
35 #include <asm/pgtable.h> 35 #include <asm/pgtable.h>
36 #include <asm/pgalloc.h> 36 #include <asm/pgalloc.h>
37 #include <linux/io.h> 37 #include <linux/io.h>
38 #include <asm/mmu.h> 38 #include <asm/mmu.h>
39 #include <asm/sections.h> 39 #include <asm/sections.h>
40 #include <asm/fixmap.h> 40 #include <asm/fixmap.h>
41 41
42 #define flush_HPTE(X, va, pg) _tlbie(va)
43
44 unsigned long ioremap_base; 42 unsigned long ioremap_base;
45 unsigned long ioremap_bot; 43 unsigned long ioremap_bot;
46 EXPORT_SYMBOL(ioremap_bot); 44 EXPORT_SYMBOL(ioremap_bot);
47 45
48 #ifndef CONFIG_SMP 46 #ifndef CONFIG_SMP
49 struct pgtable_cache_struct quicklists; 47 struct pgtable_cache_struct quicklists;
50 #endif 48 #endif
51 49
52 static void __iomem *__ioremap(phys_addr_t addr, unsigned long size, 50 static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
53 unsigned long flags) 51 unsigned long flags)
54 { 52 {
55 unsigned long v, i; 53 unsigned long v, i;
56 phys_addr_t p; 54 phys_addr_t p;
57 int err; 55 int err;
58 56
59 /* 57 /*
60 * Choose an address to map it to. 58 * Choose an address to map it to.
61 * Once the vmalloc system is running, we use it. 59 * Once the vmalloc system is running, we use it.
62 * Before then, we use space going down from ioremap_base 60 * Before then, we use space going down from ioremap_base
63 * (ioremap_bot records where we're up to). 61 * (ioremap_bot records where we're up to).
64 */ 62 */
65 p = addr & PAGE_MASK; 63 p = addr & PAGE_MASK;
66 size = PAGE_ALIGN(addr + size) - p; 64 size = PAGE_ALIGN(addr + size) - p;
67 65
68 /* 66 /*
69 * Don't allow anybody to remap normal RAM that we're using. 67 * Don't allow anybody to remap normal RAM that we're using.
70 * mem_init() sets high_memory so only do the check after that. 68 * mem_init() sets high_memory so only do the check after that.
71 * 69 *
72 * However, allow remap of rootfs: TBD 70 * However, allow remap of rootfs: TBD
73 */ 71 */
74 if (mem_init_done && 72 if (mem_init_done &&
75 p >= memory_start && p < virt_to_phys(high_memory) && 73 p >= memory_start && p < virt_to_phys(high_memory) &&
76 !(p >= virt_to_phys((unsigned long)&__bss_stop) && 74 !(p >= virt_to_phys((unsigned long)&__bss_stop) &&
77 p < virt_to_phys((unsigned long)__bss_stop))) { 75 p < virt_to_phys((unsigned long)__bss_stop))) {
78 printk(KERN_WARNING "__ioremap(): phys addr "PTE_FMT 76 pr_warn("__ioremap(): phys addr "PTE_FMT" is RAM lr %pf\n",
79 " is RAM lr %pf\n", (unsigned long)p, 77 (unsigned long)p, __builtin_return_address(0));
80 __builtin_return_address(0));
81 return NULL; 78 return NULL;
82 } 79 }
83 80
84 if (size == 0) 81 if (size == 0)
85 return NULL; 82 return NULL;
86 83
87 /* 84 /*
88 * Is it already mapped? If the whole area is mapped then we're 85 * Is it already mapped? If the whole area is mapped then we're
89 * done, otherwise remap it since we want to keep the virt addrs for 86 * done, otherwise remap it since we want to keep the virt addrs for
90 * each request contiguous. 87 * each request contiguous.
91 * 88 *
92 * We make the assumption here that if the bottom and top 89 * We make the assumption here that if the bottom and top
93 * of the range we want are mapped then it's mapped to the 90 * of the range we want are mapped then it's mapped to the
94 * same virt address (and this is contiguous). 91 * same virt address (and this is contiguous).
95 * -- Cort 92 * -- Cort
96 */ 93 */
97 94
98 if (mem_init_done) { 95 if (mem_init_done) {
99 struct vm_struct *area; 96 struct vm_struct *area;
100 area = get_vm_area(size, VM_IOREMAP); 97 area = get_vm_area(size, VM_IOREMAP);
101 if (area == NULL) 98 if (area == NULL)
102 return NULL; 99 return NULL;
103 v = (unsigned long) area->addr; 100 v = (unsigned long) area->addr;
104 } else { 101 } else {
105 v = (ioremap_bot -= size); 102 v = (ioremap_bot -= size);
106 } 103 }
107 104
108 if ((flags & _PAGE_PRESENT) == 0) 105 if ((flags & _PAGE_PRESENT) == 0)
109 flags |= _PAGE_KERNEL; 106 flags |= _PAGE_KERNEL;
110 if (flags & _PAGE_NO_CACHE) 107 if (flags & _PAGE_NO_CACHE)
111 flags |= _PAGE_GUARDED; 108 flags |= _PAGE_GUARDED;
112 109
113 err = 0; 110 err = 0;
114 for (i = 0; i < size && err == 0; i += PAGE_SIZE) 111 for (i = 0; i < size && err == 0; i += PAGE_SIZE)
115 err = map_page(v + i, p + i, flags); 112 err = map_page(v + i, p + i, flags);
116 if (err) { 113 if (err) {
117 if (mem_init_done) 114 if (mem_init_done)
118 vfree((void *)v); 115 vfree((void *)v);
119 return NULL; 116 return NULL;
120 } 117 }
121 118
122 return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK)); 119 return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
123 } 120 }
124 121
125 void __iomem *ioremap(phys_addr_t addr, unsigned long size) 122 void __iomem *ioremap(phys_addr_t addr, unsigned long size)
126 { 123 {
127 return __ioremap(addr, size, _PAGE_NO_CACHE); 124 return __ioremap(addr, size, _PAGE_NO_CACHE);
128 } 125 }
129 EXPORT_SYMBOL(ioremap); 126 EXPORT_SYMBOL(ioremap);
130 127
131 void iounmap(void *addr) 128 void iounmap(void __iomem *addr)
132 { 129 {
133 if (addr > high_memory && (unsigned long) addr < ioremap_bot) 130 if ((__force void *)addr > high_memory &&
131 (unsigned long) addr < ioremap_bot)
134 vfree((void *) (PAGE_MASK & (unsigned long) addr)); 132 vfree((void *) (PAGE_MASK & (unsigned long) addr));
135 } 133 }
136 EXPORT_SYMBOL(iounmap); 134 EXPORT_SYMBOL(iounmap);
137 135
138 136
139 int map_page(unsigned long va, phys_addr_t pa, int flags) 137 int map_page(unsigned long va, phys_addr_t pa, int flags)
140 { 138 {
141 pmd_t *pd; 139 pmd_t *pd;
142 pte_t *pg; 140 pte_t *pg;
143 int err = -ENOMEM; 141 int err = -ENOMEM;
144 /* Use upper 10 bits of VA to index the first level map */ 142 /* Use upper 10 bits of VA to index the first level map */
145 pd = pmd_offset(pgd_offset_k(va), va); 143 pd = pmd_offset(pgd_offset_k(va), va);
146 /* Use middle 10 bits of VA to index the second-level map */ 144 /* Use middle 10 bits of VA to index the second-level map */
147 pg = pte_alloc_kernel(pd, va); /* from powerpc - pgtable.c */ 145 pg = pte_alloc_kernel(pd, va); /* from powerpc - pgtable.c */
148 /* pg = pte_alloc_kernel(&init_mm, pd, va); */ 146 /* pg = pte_alloc_kernel(&init_mm, pd, va); */
149 147
150 if (pg != NULL) { 148 if (pg != NULL) {
151 err = 0; 149 err = 0;
152 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, 150 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
153 __pgprot(flags))); 151 __pgprot(flags)));
154 if (unlikely(mem_init_done)) 152 if (unlikely(mem_init_done))
155 flush_HPTE(0, va, pmd_val(*pd)); 153 _tlbie(va);
156 /* flush_HPTE(0, va, pg); */
157 } 154 }
158 return err; 155 return err;
159 } 156 }
160 157
161 /* 158 /*
162 * Map in all of physical memory starting at CONFIG_KERNEL_START. 159 * Map in all of physical memory starting at CONFIG_KERNEL_START.
163 */ 160 */
164 void __init mapin_ram(void) 161 void __init mapin_ram(void)
165 { 162 {
166 unsigned long v, p, s, f; 163 unsigned long v, p, s, f;
167 164
168 v = CONFIG_KERNEL_START; 165 v = CONFIG_KERNEL_START;
169 p = memory_start; 166 p = memory_start;
170 for (s = 0; s < lowmem_size; s += PAGE_SIZE) { 167 for (s = 0; s < lowmem_size; s += PAGE_SIZE) {
171 f = _PAGE_PRESENT | _PAGE_ACCESSED | 168 f = _PAGE_PRESENT | _PAGE_ACCESSED |
172 _PAGE_SHARED | _PAGE_HWEXEC; 169 _PAGE_SHARED | _PAGE_HWEXEC;
173 if ((char *) v < _stext || (char *) v >= _etext) 170 if ((char *) v < _stext || (char *) v >= _etext)
174 f |= _PAGE_WRENABLE; 171 f |= _PAGE_WRENABLE;
175 else 172 else
176 /* On the MicroBlaze, no user access 173 /* On the MicroBlaze, no user access
177 forces R/W kernel access */ 174 forces R/W kernel access */
178 f |= _PAGE_USER; 175 f |= _PAGE_USER;
179 map_page(v, p, f); 176 map_page(v, p, f);
180 v += PAGE_SIZE; 177 v += PAGE_SIZE;
181 p += PAGE_SIZE; 178 p += PAGE_SIZE;
182 } 179 }
183 } 180 }
184 181
185 /* is x a power of 2? */ 182 /* is x a power of 2? */
186 #define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0)) 183 #define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
187 184
188 /* Scan the real Linux page tables and return a PTE pointer for 185 /* Scan the real Linux page tables and return a PTE pointer for
189 * a virtual address in a context. 186 * a virtual address in a context.
190 * Returns true (1) if PTE was found, zero otherwise. The pointer to 187 * Returns true (1) if PTE was found, zero otherwise. The pointer to
191 * the PTE pointer is unmodified if PTE is not found. 188 * the PTE pointer is unmodified if PTE is not found.
192 */ 189 */
193 static int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep) 190 static int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
194 { 191 {
195 pgd_t *pgd; 192 pgd_t *pgd;
196 pmd_t *pmd; 193 pmd_t *pmd;
197 pte_t *pte; 194 pte_t *pte;
198 int retval = 0; 195 int retval = 0;
199 196
200 pgd = pgd_offset(mm, addr & PAGE_MASK); 197 pgd = pgd_offset(mm, addr & PAGE_MASK);
201 if (pgd) { 198 if (pgd) {
202 pmd = pmd_offset(pgd, addr & PAGE_MASK); 199 pmd = pmd_offset(pgd, addr & PAGE_MASK);
203 if (pmd_present(*pmd)) { 200 if (pmd_present(*pmd)) {
204 pte = pte_offset_kernel(pmd, addr & PAGE_MASK); 201 pte = pte_offset_kernel(pmd, addr & PAGE_MASK);
205 if (pte) { 202 if (pte) {
206 retval = 1; 203 retval = 1;
207 *ptep = pte; 204 *ptep = pte;
208 } 205 }
209 } 206 }
210 } 207 }
211 return retval; 208 return retval;
212 } 209 }
213 210
214 /* Find physical address for this virtual address. Normally used by 211 /* Find physical address for this virtual address. Normally used by
215 * I/O functions, but anyone can call it. 212 * I/O functions, but anyone can call it.
216 */ 213 */
217 unsigned long iopa(unsigned long addr) 214 unsigned long iopa(unsigned long addr)
218 { 215 {
219 unsigned long pa; 216 unsigned long pa;
220 217
221 pte_t *pte; 218 pte_t *pte;
222 struct mm_struct *mm; 219 struct mm_struct *mm;
223 220
224 /* Allow mapping of user addresses (within the thread) 221 /* Allow mapping of user addresses (within the thread)
225 * for DMA if necessary. 222 * for DMA if necessary.
226 */ 223 */
227 if (addr < TASK_SIZE) 224 if (addr < TASK_SIZE)
228 mm = current->mm; 225 mm = current->mm;
229 else 226 else
230 mm = &init_mm; 227 mm = &init_mm;
231 228
232 pa = 0; 229 pa = 0;
233 if (get_pteptr(mm, addr, &pte)) 230 if (get_pteptr(mm, addr, &pte))
234 pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK); 231 pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
235 232
236 return pa; 233 return pa;
237 } 234 }
238 235
239 __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 236 __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
240 unsigned long address) 237 unsigned long address)
241 { 238 {
242 pte_t *pte; 239 pte_t *pte;
243 if (mem_init_done) { 240 if (mem_init_done) {
244 pte = (pte_t *)__get_free_page(GFP_KERNEL | 241 pte = (pte_t *)__get_free_page(GFP_KERNEL |
245 __GFP_REPEAT | __GFP_ZERO); 242 __GFP_REPEAT | __GFP_ZERO);
246 } else { 243 } else {
247 pte = (pte_t *)early_get_page(); 244 pte = (pte_t *)early_get_page();
248 if (pte) 245 if (pte)
249 clear_page(pte); 246 clear_page(pte);
250 } 247 }
251 return pte; 248 return pte;
252 } 249 }
253 250
254 void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) 251 void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
255 { 252 {
256 unsigned long address = __fix_to_virt(idx); 253 unsigned long address = __fix_to_virt(idx);
257 254
258 if (idx >= __end_of_fixed_addresses) 255 if (idx >= __end_of_fixed_addresses)
259 BUG(); 256 BUG();
260 257
261 map_page(address, phys, pgprot_val(flags)); 258 map_page(address, phys, pgprot_val(flags));
262 } 259 }
arch/microblaze/pci/indirect_pci.c
1 /* 1 /*
2 * Support for indirect PCI bridges. 2 * Support for indirect PCI bridges.
3 * 3 *
4 * Copyright (C) 1998 Gabriel Paubert. 4 * Copyright (C) 1998 Gabriel Paubert.
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12 #include <linux/kernel.h> 12 #include <linux/kernel.h>
13 #include <linux/pci.h> 13 #include <linux/pci.h>
14 #include <linux/delay.h> 14 #include <linux/delay.h>
15 #include <linux/string.h> 15 #include <linux/string.h>
16 #include <linux/init.h> 16 #include <linux/init.h>
17 17
18 #include <asm/io.h> 18 #include <linux/io.h>
19 #include <asm/prom.h> 19 #include <asm/prom.h>
20 #include <asm/pci-bridge.h> 20 #include <asm/pci-bridge.h>
21 21
22 static int 22 static int
23 indirect_read_config(struct pci_bus *bus, unsigned int devfn, int offset, 23 indirect_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
24 int len, u32 *val) 24 int len, u32 *val)
25 { 25 {
26 struct pci_controller *hose = pci_bus_to_host(bus); 26 struct pci_controller *hose = pci_bus_to_host(bus);
27 volatile void __iomem *cfg_data; 27 volatile void __iomem *cfg_data;
28 u8 cfg_type = 0; 28 u8 cfg_type = 0;
29 u32 bus_no, reg; 29 u32 bus_no, reg;
30 30
31 if (hose->indirect_type & INDIRECT_TYPE_NO_PCIE_LINK) { 31 if (hose->indirect_type & INDIRECT_TYPE_NO_PCIE_LINK) {
32 if (bus->number != hose->first_busno) 32 if (bus->number != hose->first_busno)
33 return PCIBIOS_DEVICE_NOT_FOUND; 33 return PCIBIOS_DEVICE_NOT_FOUND;
34 if (devfn != 0) 34 if (devfn != 0)
35 return PCIBIOS_DEVICE_NOT_FOUND; 35 return PCIBIOS_DEVICE_NOT_FOUND;
36 } 36 }
37 37
38 if (hose->indirect_type & INDIRECT_TYPE_SET_CFG_TYPE) 38 if (hose->indirect_type & INDIRECT_TYPE_SET_CFG_TYPE)
39 if (bus->number != hose->first_busno) 39 if (bus->number != hose->first_busno)
40 cfg_type = 1; 40 cfg_type = 1;
41 41
42 bus_no = (bus->number == hose->first_busno) ? 42 bus_no = (bus->number == hose->first_busno) ?
43 hose->self_busno : bus->number; 43 hose->self_busno : bus->number;
44 44
45 if (hose->indirect_type & INDIRECT_TYPE_EXT_REG) 45 if (hose->indirect_type & INDIRECT_TYPE_EXT_REG)
46 reg = ((offset & 0xf00) << 16) | (offset & 0xfc); 46 reg = ((offset & 0xf00) << 16) | (offset & 0xfc);
47 else 47 else
48 reg = offset & 0xfc; /* Only 3 bits for function */ 48 reg = offset & 0xfc; /* Only 3 bits for function */
49 49
50 if (hose->indirect_type & INDIRECT_TYPE_BIG_ENDIAN) 50 if (hose->indirect_type & INDIRECT_TYPE_BIG_ENDIAN)
51 out_be32(hose->cfg_addr, (0x80000000 | (bus_no << 16) | 51 out_be32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
52 (devfn << 8) | reg | cfg_type)); 52 (devfn << 8) | reg | cfg_type));
53 else 53 else
54 out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) | 54 out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
55 (devfn << 8) | reg | cfg_type)); 55 (devfn << 8) | reg | cfg_type));
56 56
57 /* 57 /*
58 * Note: the caller has already checked that offset is 58 * Note: the caller has already checked that offset is
59 * suitably aligned and that len is 1, 2 or 4. 59 * suitably aligned and that len is 1, 2 or 4.
60 */ 60 */
61 cfg_data = hose->cfg_data + (offset & 3); /* Only 3 bits for function */ 61 cfg_data = hose->cfg_data + (offset & 3); /* Only 3 bits for function */
62 switch (len) { 62 switch (len) {
63 case 1: 63 case 1:
64 *val = in_8(cfg_data); 64 *val = in_8(cfg_data);
65 break; 65 break;
66 case 2: 66 case 2:
67 *val = in_le16(cfg_data); 67 *val = in_le16(cfg_data);
68 break; 68 break;
69 default: 69 default:
70 *val = in_le32(cfg_data); 70 *val = in_le32(cfg_data);
71 break; 71 break;
72 } 72 }
73 return PCIBIOS_SUCCESSFUL; 73 return PCIBIOS_SUCCESSFUL;
74 } 74 }
75 75
76 static int 76 static int
77 indirect_write_config(struct pci_bus *bus, unsigned int devfn, int offset, 77 indirect_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
78 int len, u32 val) 78 int len, u32 val)
79 { 79 {
80 struct pci_controller *hose = pci_bus_to_host(bus); 80 struct pci_controller *hose = pci_bus_to_host(bus);
81 volatile void __iomem *cfg_data; 81 volatile void __iomem *cfg_data;
82 u8 cfg_type = 0; 82 u8 cfg_type = 0;
83 u32 bus_no, reg; 83 u32 bus_no, reg;
84 84
85 if (hose->indirect_type & INDIRECT_TYPE_NO_PCIE_LINK) { 85 if (hose->indirect_type & INDIRECT_TYPE_NO_PCIE_LINK) {
86 if (bus->number != hose->first_busno) 86 if (bus->number != hose->first_busno)
87 return PCIBIOS_DEVICE_NOT_FOUND; 87 return PCIBIOS_DEVICE_NOT_FOUND;
88 if (devfn != 0) 88 if (devfn != 0)
89 return PCIBIOS_DEVICE_NOT_FOUND; 89 return PCIBIOS_DEVICE_NOT_FOUND;
90 } 90 }
91 91
92 if (hose->indirect_type & INDIRECT_TYPE_SET_CFG_TYPE) 92 if (hose->indirect_type & INDIRECT_TYPE_SET_CFG_TYPE)
93 if (bus->number != hose->first_busno) 93 if (bus->number != hose->first_busno)
94 cfg_type = 1; 94 cfg_type = 1;
95 95
96 bus_no = (bus->number == hose->first_busno) ? 96 bus_no = (bus->number == hose->first_busno) ?
97 hose->self_busno : bus->number; 97 hose->self_busno : bus->number;
98 98
99 if (hose->indirect_type & INDIRECT_TYPE_EXT_REG) 99 if (hose->indirect_type & INDIRECT_TYPE_EXT_REG)
100 reg = ((offset & 0xf00) << 16) | (offset & 0xfc); 100 reg = ((offset & 0xf00) << 16) | (offset & 0xfc);
101 else 101 else
102 reg = offset & 0xfc; 102 reg = offset & 0xfc;
103 103
104 if (hose->indirect_type & INDIRECT_TYPE_BIG_ENDIAN) 104 if (hose->indirect_type & INDIRECT_TYPE_BIG_ENDIAN)
105 out_be32(hose->cfg_addr, (0x80000000 | (bus_no << 16) | 105 out_be32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
106 (devfn << 8) | reg | cfg_type)); 106 (devfn << 8) | reg | cfg_type));
107 else 107 else
108 out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) | 108 out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
109 (devfn << 8) | reg | cfg_type)); 109 (devfn << 8) | reg | cfg_type));
110 110
111 /* suppress setting of PCI_PRIMARY_BUS */ 111 /* suppress setting of PCI_PRIMARY_BUS */
112 if (hose->indirect_type & INDIRECT_TYPE_SURPRESS_PRIMARY_BUS) 112 if (hose->indirect_type & INDIRECT_TYPE_SURPRESS_PRIMARY_BUS)
113 if ((offset == PCI_PRIMARY_BUS) && 113 if ((offset == PCI_PRIMARY_BUS) &&
114 (bus->number == hose->first_busno)) 114 (bus->number == hose->first_busno))
115 val &= 0xffffff00; 115 val &= 0xffffff00;
116 116
117 /* Workaround for PCI_28 Errata in 440EPx/GRx */ 117 /* Workaround for PCI_28 Errata in 440EPx/GRx */
118 if ((hose->indirect_type & INDIRECT_TYPE_BROKEN_MRM) && 118 if ((hose->indirect_type & INDIRECT_TYPE_BROKEN_MRM) &&
119 offset == PCI_CACHE_LINE_SIZE) { 119 offset == PCI_CACHE_LINE_SIZE) {
120 val = 0; 120 val = 0;
121 } 121 }
122 122
123 /* 123 /*
124 * Note: the caller has already checked that offset is 124 * Note: the caller has already checked that offset is
125 * suitably aligned and that len is 1, 2 or 4. 125 * suitably aligned and that len is 1, 2 or 4.
126 */ 126 */
127 cfg_data = hose->cfg_data + (offset & 3); 127 cfg_data = hose->cfg_data + (offset & 3);
128 switch (len) { 128 switch (len) {
129 case 1: 129 case 1:
130 out_8(cfg_data, val); 130 out_8(cfg_data, val);
131 break; 131 break;
132 case 2: 132 case 2:
133 out_le16(cfg_data, val); 133 out_le16(cfg_data, val);
134 break; 134 break;
135 default: 135 default:
136 out_le32(cfg_data, val); 136 out_le32(cfg_data, val);
137 break; 137 break;
138 } 138 }
139 139
140 return PCIBIOS_SUCCESSFUL; 140 return PCIBIOS_SUCCESSFUL;
141 } 141 }
142 142
143 static struct pci_ops indirect_pci_ops = { 143 static struct pci_ops indirect_pci_ops = {
144 .read = indirect_read_config, 144 .read = indirect_read_config,
145 .write = indirect_write_config, 145 .write = indirect_write_config,
146 }; 146 };
147 147
148 void __init 148 void __init
149 setup_indirect_pci(struct pci_controller *hose, 149 setup_indirect_pci(struct pci_controller *hose,
150 resource_size_t cfg_addr, 150 resource_size_t cfg_addr,
151 resource_size_t cfg_data, u32 flags) 151 resource_size_t cfg_data, u32 flags)
152 { 152 {
153 resource_size_t base = cfg_addr & PAGE_MASK; 153 resource_size_t base = cfg_addr & PAGE_MASK;
154 void __iomem *mbase; 154 void __iomem *mbase;
155 155
156 mbase = ioremap(base, PAGE_SIZE); 156 mbase = ioremap(base, PAGE_SIZE);
157 hose->cfg_addr = mbase + (cfg_addr & ~PAGE_MASK); 157 hose->cfg_addr = mbase + (cfg_addr & ~PAGE_MASK);
158 if ((cfg_data & PAGE_MASK) != base) 158 if ((cfg_data & PAGE_MASK) != base)
159 mbase = ioremap(cfg_data & PAGE_MASK, PAGE_SIZE); 159 mbase = ioremap(cfg_data & PAGE_MASK, PAGE_SIZE);
160 hose->cfg_data = mbase + (cfg_data & ~PAGE_MASK); 160 hose->cfg_data = mbase + (cfg_data & ~PAGE_MASK);
161 hose->ops = &indirect_pci_ops; 161 hose->ops = &indirect_pci_ops;
162 hose->indirect_type = flags; 162 hose->indirect_type = flags;
163 } 163 }
164 164
arch/microblaze/pci/iomap.c
1 /* 1 /*
2 * ppc64 "iomap" interface implementation. 2 * ppc64 "iomap" interface implementation.
3 * 3 *
4 * (C) Copyright 2004 Linus Torvalds 4 * (C) Copyright 2004 Linus Torvalds
5 */ 5 */
6 #include <linux/init.h> 6 #include <linux/init.h>
7 #include <linux/pci.h> 7 #include <linux/pci.h>
8 #include <linux/mm.h> 8 #include <linux/mm.h>
9 #include <linux/export.h> 9 #include <linux/export.h>
10 #include <asm/io.h> 10 #include <linux/io.h>
11 #include <asm/pci-bridge.h> 11 #include <asm/pci-bridge.h>
12 12
13 void pci_iounmap(struct pci_dev *dev, void __iomem *addr) 13 void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
14 { 14 {
15 if (isa_vaddr_is_ioport(addr)) 15 if (isa_vaddr_is_ioport(addr))
16 return; 16 return;
17 if (pcibios_vaddr_is_ioport(addr)) 17 if (pcibios_vaddr_is_ioport(addr))
18 return; 18 return;
19 iounmap(addr); 19 iounmap(addr);
20 } 20 }
21 EXPORT_SYMBOL(pci_iounmap); 21 EXPORT_SYMBOL(pci_iounmap);
22 22
arch/microblaze/pci/pci-common.c
1 /* 1 /*
2 * Contains common pci routines for ALL ppc platform 2 * Contains common pci routines for ALL ppc platform
3 * (based on pci_32.c and pci_64.c) 3 * (based on pci_32.c and pci_64.c)
4 * 4 *
5 * Port for PPC64 David Engebretsen, IBM Corp. 5 * Port for PPC64 David Engebretsen, IBM Corp.
6 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands. 6 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
7 * 7 *
8 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM 8 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
9 * Rework, based on alpha PCI code. 9 * Rework, based on alpha PCI code.
10 * 10 *
11 * Common pmac/prep/chrp pci routines. -- Cort 11 * Common pmac/prep/chrp pci routines. -- Cort
12 * 12 *
13 * This program is free software; you can redistribute it and/or 13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License 14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version 15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version. 16 * 2 of the License, or (at your option) any later version.
17 */ 17 */
18 18
19 #include <linux/kernel.h> 19 #include <linux/kernel.h>
20 #include <linux/pci.h> 20 #include <linux/pci.h>
21 #include <linux/string.h> 21 #include <linux/string.h>
22 #include <linux/init.h> 22 #include <linux/init.h>
23 #include <linux/bootmem.h> 23 #include <linux/bootmem.h>
24 #include <linux/mm.h> 24 #include <linux/mm.h>
25 #include <linux/list.h> 25 #include <linux/list.h>
26 #include <linux/syscalls.h> 26 #include <linux/syscalls.h>
27 #include <linux/irq.h> 27 #include <linux/irq.h>
28 #include <linux/vmalloc.h> 28 #include <linux/vmalloc.h>
29 #include <linux/slab.h> 29 #include <linux/slab.h>
30 #include <linux/of.h> 30 #include <linux/of.h>
31 #include <linux/of_address.h> 31 #include <linux/of_address.h>
32 #include <linux/of_pci.h> 32 #include <linux/of_pci.h>
33 #include <linux/export.h> 33 #include <linux/export.h>
34 34
35 #include <asm/processor.h> 35 #include <asm/processor.h>
36 #include <asm/io.h> 36 #include <linux/io.h>
37 #include <asm/pci-bridge.h> 37 #include <asm/pci-bridge.h>
38 #include <asm/byteorder.h> 38 #include <asm/byteorder.h>
39 39
40 static DEFINE_SPINLOCK(hose_spinlock); 40 static DEFINE_SPINLOCK(hose_spinlock);
41 LIST_HEAD(hose_list); 41 LIST_HEAD(hose_list);
42 42
43 /* XXX kill that some day ... */ 43 /* XXX kill that some day ... */
44 static int global_phb_number; /* Global phb counter */ 44 static int global_phb_number; /* Global phb counter */
45 45
46 /* ISA Memory physical address */ 46 /* ISA Memory physical address */
47 resource_size_t isa_mem_base; 47 resource_size_t isa_mem_base;
48 48
49 static struct dma_map_ops *pci_dma_ops = &dma_direct_ops; 49 static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
50 50
51 unsigned long isa_io_base; 51 unsigned long isa_io_base;
52 unsigned long pci_dram_offset; 52 unsigned long pci_dram_offset;
53 static int pci_bus_count; 53 static int pci_bus_count;
54 54
55 55
56 void set_pci_dma_ops(struct dma_map_ops *dma_ops) 56 void set_pci_dma_ops(struct dma_map_ops *dma_ops)
57 { 57 {
58 pci_dma_ops = dma_ops; 58 pci_dma_ops = dma_ops;
59 } 59 }
60 60
61 struct dma_map_ops *get_pci_dma_ops(void) 61 struct dma_map_ops *get_pci_dma_ops(void)
62 { 62 {
63 return pci_dma_ops; 63 return pci_dma_ops;
64 } 64 }
65 EXPORT_SYMBOL(get_pci_dma_ops); 65 EXPORT_SYMBOL(get_pci_dma_ops);
66 66
67 struct pci_controller *pcibios_alloc_controller(struct device_node *dev) 67 struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
68 { 68 {
69 struct pci_controller *phb; 69 struct pci_controller *phb;
70 70
71 phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL); 71 phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
72 if (!phb) 72 if (!phb)
73 return NULL; 73 return NULL;
74 spin_lock(&hose_spinlock); 74 spin_lock(&hose_spinlock);
75 phb->global_number = global_phb_number++; 75 phb->global_number = global_phb_number++;
76 list_add_tail(&phb->list_node, &hose_list); 76 list_add_tail(&phb->list_node, &hose_list);
77 spin_unlock(&hose_spinlock); 77 spin_unlock(&hose_spinlock);
78 phb->dn = dev; 78 phb->dn = dev;
79 phb->is_dynamic = mem_init_done; 79 phb->is_dynamic = mem_init_done;
80 return phb; 80 return phb;
81 } 81 }
82 82
83 void pcibios_free_controller(struct pci_controller *phb) 83 void pcibios_free_controller(struct pci_controller *phb)
84 { 84 {
85 spin_lock(&hose_spinlock); 85 spin_lock(&hose_spinlock);
86 list_del(&phb->list_node); 86 list_del(&phb->list_node);
87 spin_unlock(&hose_spinlock); 87 spin_unlock(&hose_spinlock);
88 88
89 if (phb->is_dynamic) 89 if (phb->is_dynamic)
90 kfree(phb); 90 kfree(phb);
91 } 91 }
92 92
93 static resource_size_t pcibios_io_size(const struct pci_controller *hose) 93 static resource_size_t pcibios_io_size(const struct pci_controller *hose)
94 { 94 {
95 return resource_size(&hose->io_resource); 95 return resource_size(&hose->io_resource);
96 } 96 }
97 97
98 int pcibios_vaddr_is_ioport(void __iomem *address) 98 int pcibios_vaddr_is_ioport(void __iomem *address)
99 { 99 {
100 int ret = 0; 100 int ret = 0;
101 struct pci_controller *hose; 101 struct pci_controller *hose;
102 resource_size_t size; 102 resource_size_t size;
103 103
104 spin_lock(&hose_spinlock); 104 spin_lock(&hose_spinlock);
105 list_for_each_entry(hose, &hose_list, list_node) { 105 list_for_each_entry(hose, &hose_list, list_node) {
106 size = pcibios_io_size(hose); 106 size = pcibios_io_size(hose);
107 if (address >= hose->io_base_virt && 107 if (address >= hose->io_base_virt &&
108 address < (hose->io_base_virt + size)) { 108 address < (hose->io_base_virt + size)) {
109 ret = 1; 109 ret = 1;
110 break; 110 break;
111 } 111 }
112 } 112 }
113 spin_unlock(&hose_spinlock); 113 spin_unlock(&hose_spinlock);
114 return ret; 114 return ret;
115 } 115 }
116 116
117 unsigned long pci_address_to_pio(phys_addr_t address) 117 unsigned long pci_address_to_pio(phys_addr_t address)
118 { 118 {
119 struct pci_controller *hose; 119 struct pci_controller *hose;
120 resource_size_t size; 120 resource_size_t size;
121 unsigned long ret = ~0; 121 unsigned long ret = ~0;
122 122
123 spin_lock(&hose_spinlock); 123 spin_lock(&hose_spinlock);
124 list_for_each_entry(hose, &hose_list, list_node) { 124 list_for_each_entry(hose, &hose_list, list_node) {
125 size = pcibios_io_size(hose); 125 size = pcibios_io_size(hose);
126 if (address >= hose->io_base_phys && 126 if (address >= hose->io_base_phys &&
127 address < (hose->io_base_phys + size)) { 127 address < (hose->io_base_phys + size)) {
128 unsigned long base = 128 unsigned long base =
129 (unsigned long)hose->io_base_virt - _IO_BASE; 129 (unsigned long)hose->io_base_virt - _IO_BASE;
130 ret = base + (address - hose->io_base_phys); 130 ret = base + (address - hose->io_base_phys);
131 break; 131 break;
132 } 132 }
133 } 133 }
134 spin_unlock(&hose_spinlock); 134 spin_unlock(&hose_spinlock);
135 135
136 return ret; 136 return ret;
137 } 137 }
138 EXPORT_SYMBOL_GPL(pci_address_to_pio); 138 EXPORT_SYMBOL_GPL(pci_address_to_pio);
139 139
140 /* 140 /*
141 * Return the domain number for this bus. 141 * Return the domain number for this bus.
142 */ 142 */
143 int pci_domain_nr(struct pci_bus *bus) 143 int pci_domain_nr(struct pci_bus *bus)
144 { 144 {
145 struct pci_controller *hose = pci_bus_to_host(bus); 145 struct pci_controller *hose = pci_bus_to_host(bus);
146 146
147 return hose->global_number; 147 return hose->global_number;
148 } 148 }
149 EXPORT_SYMBOL(pci_domain_nr); 149 EXPORT_SYMBOL(pci_domain_nr);
150 150
151 /* This routine is meant to be used early during boot, when the 151 /* This routine is meant to be used early during boot, when the
152 * PCI bus numbers have not yet been assigned, and you need to 152 * PCI bus numbers have not yet been assigned, and you need to
153 * issue PCI config cycles to an OF device. 153 * issue PCI config cycles to an OF device.
154 * It could also be used to "fix" RTAS config cycles if you want 154 * It could also be used to "fix" RTAS config cycles if you want
155 * to set pci_assign_all_buses to 1 and still use RTAS for PCI 155 * to set pci_assign_all_buses to 1 and still use RTAS for PCI
156 * config cycles. 156 * config cycles.
157 */ 157 */
158 struct pci_controller *pci_find_hose_for_OF_device(struct device_node *node) 158 struct pci_controller *pci_find_hose_for_OF_device(struct device_node *node)
159 { 159 {
160 while (node) { 160 while (node) {
161 struct pci_controller *hose, *tmp; 161 struct pci_controller *hose, *tmp;
162 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) 162 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
163 if (hose->dn == node) 163 if (hose->dn == node)
164 return hose; 164 return hose;
165 node = node->parent; 165 node = node->parent;
166 } 166 }
167 return NULL; 167 return NULL;
168 } 168 }
169 169
170 static ssize_t pci_show_devspec(struct device *dev, 170 static ssize_t pci_show_devspec(struct device *dev,
171 struct device_attribute *attr, char *buf) 171 struct device_attribute *attr, char *buf)
172 { 172 {
173 struct pci_dev *pdev; 173 struct pci_dev *pdev;
174 struct device_node *np; 174 struct device_node *np;
175 175
176 pdev = to_pci_dev(dev); 176 pdev = to_pci_dev(dev);
177 np = pci_device_to_OF_node(pdev); 177 np = pci_device_to_OF_node(pdev);
178 if (np == NULL || np->full_name == NULL) 178 if (np == NULL || np->full_name == NULL)
179 return 0; 179 return 0;
180 return sprintf(buf, "%s", np->full_name); 180 return sprintf(buf, "%s", np->full_name);
181 } 181 }
182 static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL); 182 static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
183 183
184 /* Add sysfs properties */ 184 /* Add sysfs properties */
185 int pcibios_add_platform_entries(struct pci_dev *pdev) 185 int pcibios_add_platform_entries(struct pci_dev *pdev)
186 { 186 {
187 return device_create_file(&pdev->dev, &dev_attr_devspec); 187 return device_create_file(&pdev->dev, &dev_attr_devspec);
188 } 188 }
189 189
190 void pcibios_set_master(struct pci_dev *dev) 190 void pcibios_set_master(struct pci_dev *dev)
191 { 191 {
192 /* No special bus mastering setup handling */ 192 /* No special bus mastering setup handling */
193 } 193 }
194 194
195 /* 195 /*
196 * Reads the interrupt pin to determine if interrupt is use by card. 196 * Reads the interrupt pin to determine if interrupt is use by card.
197 * If the interrupt is used, then gets the interrupt line from the 197 * If the interrupt is used, then gets the interrupt line from the
198 * openfirmware and sets it in the pci_dev and pci_config line. 198 * openfirmware and sets it in the pci_dev and pci_config line.
199 */ 199 */
200 int pci_read_irq_line(struct pci_dev *pci_dev) 200 int pci_read_irq_line(struct pci_dev *pci_dev)
201 { 201 {
202 struct of_irq oirq; 202 struct of_irq oirq;
203 unsigned int virq; 203 unsigned int virq;
204 204
205 /* The current device-tree that iSeries generates from the HV 205 /* The current device-tree that iSeries generates from the HV
206 * PCI informations doesn't contain proper interrupt routing, 206 * PCI informations doesn't contain proper interrupt routing,
207 * and all the fallback would do is print out crap, so we 207 * and all the fallback would do is print out crap, so we
208 * don't attempt to resolve the interrupts here at all, some 208 * don't attempt to resolve the interrupts here at all, some
209 * iSeries specific fixup does it. 209 * iSeries specific fixup does it.
210 * 210 *
211 * In the long run, we will hopefully fix the generated device-tree 211 * In the long run, we will hopefully fix the generated device-tree
212 * instead. 212 * instead.
213 */ 213 */
214 pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev)); 214 pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
215 215
216 #ifdef DEBUG 216 #ifdef DEBUG
217 memset(&oirq, 0xff, sizeof(oirq)); 217 memset(&oirq, 0xff, sizeof(oirq));
218 #endif 218 #endif
219 /* Try to get a mapping from the device-tree */ 219 /* Try to get a mapping from the device-tree */
220 if (of_irq_map_pci(pci_dev, &oirq)) { 220 if (of_irq_map_pci(pci_dev, &oirq)) {
221 u8 line, pin; 221 u8 line, pin;
222 222
223 /* If that fails, lets fallback to what is in the config 223 /* If that fails, lets fallback to what is in the config
224 * space and map that through the default controller. We 224 * space and map that through the default controller. We
225 * also set the type to level low since that's what PCI 225 * also set the type to level low since that's what PCI
226 * interrupts are. If your platform does differently, then 226 * interrupts are. If your platform does differently, then
227 * either provide a proper interrupt tree or don't use this 227 * either provide a proper interrupt tree or don't use this
228 * function. 228 * function.
229 */ 229 */
230 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin)) 230 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
231 return -1; 231 return -1;
232 if (pin == 0) 232 if (pin == 0)
233 return -1; 233 return -1;
234 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) || 234 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
235 line == 0xff || line == 0) { 235 line == 0xff || line == 0) {
236 return -1; 236 return -1;
237 } 237 }
238 pr_debug(" No map ! Using line %d (pin %d) from PCI config\n", 238 pr_debug(" No map ! Using line %d (pin %d) from PCI config\n",
239 line, pin); 239 line, pin);
240 240
241 virq = irq_create_mapping(NULL, line); 241 virq = irq_create_mapping(NULL, line);
242 if (virq) 242 if (virq)
243 irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); 243 irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
244 } else { 244 } else {
245 pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n", 245 pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
246 oirq.size, oirq.specifier[0], oirq.specifier[1], 246 oirq.size, oirq.specifier[0], oirq.specifier[1],
247 of_node_full_name(oirq.controller)); 247 of_node_full_name(oirq.controller));
248 248
249 virq = irq_create_of_mapping(oirq.controller, oirq.specifier, 249 virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
250 oirq.size); 250 oirq.size);
251 } 251 }
252 if (!virq) { 252 if (!virq) {
253 pr_debug(" Failed to map !\n"); 253 pr_debug(" Failed to map !\n");
254 return -1; 254 return -1;
255 } 255 }
256 256
257 pr_debug(" Mapped to linux irq %d\n", virq); 257 pr_debug(" Mapped to linux irq %d\n", virq);
258 258
259 pci_dev->irq = virq; 259 pci_dev->irq = virq;
260 260
261 return 0; 261 return 0;
262 } 262 }
263 EXPORT_SYMBOL(pci_read_irq_line); 263 EXPORT_SYMBOL(pci_read_irq_line);
264 264
265 /* 265 /*
266 * Platform support for /proc/bus/pci/X/Y mmap()s, 266 * Platform support for /proc/bus/pci/X/Y mmap()s,
267 * modelled on the sparc64 implementation by Dave Miller. 267 * modelled on the sparc64 implementation by Dave Miller.
268 * -- paulus. 268 * -- paulus.
269 */ 269 */
270 270
271 /* 271 /*
272 * Adjust vm_pgoff of VMA such that it is the physical page offset 272 * Adjust vm_pgoff of VMA such that it is the physical page offset
273 * corresponding to the 32-bit pci bus offset for DEV requested by the user. 273 * corresponding to the 32-bit pci bus offset for DEV requested by the user.
274 * 274 *
275 * Basically, the user finds the base address for his device which he wishes 275 * Basically, the user finds the base address for his device which he wishes
276 * to mmap. They read the 32-bit value from the config space base register, 276 * to mmap. They read the 32-bit value from the config space base register,
277 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the 277 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
278 * offset parameter of mmap on /proc/bus/pci/XXX for that device. 278 * offset parameter of mmap on /proc/bus/pci/XXX for that device.
279 * 279 *
280 * Returns negative error code on failure, zero on success. 280 * Returns negative error code on failure, zero on success.
281 */ 281 */
282 static struct resource *__pci_mmap_make_offset(struct pci_dev *dev, 282 static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
283 resource_size_t *offset, 283 resource_size_t *offset,
284 enum pci_mmap_state mmap_state) 284 enum pci_mmap_state mmap_state)
285 { 285 {
286 struct pci_controller *hose = pci_bus_to_host(dev->bus); 286 struct pci_controller *hose = pci_bus_to_host(dev->bus);
287 unsigned long io_offset = 0; 287 unsigned long io_offset = 0;
288 int i, res_bit; 288 int i, res_bit;
289 289
290 if (!hose) 290 if (!hose)
291 return NULL; /* should never happen */ 291 return NULL; /* should never happen */
292 292
293 /* If memory, add on the PCI bridge address offset */ 293 /* If memory, add on the PCI bridge address offset */
294 if (mmap_state == pci_mmap_mem) { 294 if (mmap_state == pci_mmap_mem) {
295 #if 0 /* See comment in pci_resource_to_user() for why this is disabled */ 295 #if 0 /* See comment in pci_resource_to_user() for why this is disabled */
296 *offset += hose->pci_mem_offset; 296 *offset += hose->pci_mem_offset;
297 #endif 297 #endif
298 res_bit = IORESOURCE_MEM; 298 res_bit = IORESOURCE_MEM;
299 } else { 299 } else {
300 io_offset = (unsigned long)hose->io_base_virt - _IO_BASE; 300 io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
301 *offset += io_offset; 301 *offset += io_offset;
302 res_bit = IORESOURCE_IO; 302 res_bit = IORESOURCE_IO;
303 } 303 }
304 304
305 /* 305 /*
306 * Check that the offset requested corresponds to one of the 306 * Check that the offset requested corresponds to one of the
307 * resources of the device. 307 * resources of the device.
308 */ 308 */
309 for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 309 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
310 struct resource *rp = &dev->resource[i]; 310 struct resource *rp = &dev->resource[i];
311 int flags = rp->flags; 311 int flags = rp->flags;
312 312
313 /* treat ROM as memory (should be already) */ 313 /* treat ROM as memory (should be already) */
314 if (i == PCI_ROM_RESOURCE) 314 if (i == PCI_ROM_RESOURCE)
315 flags |= IORESOURCE_MEM; 315 flags |= IORESOURCE_MEM;
316 316
317 /* Active and same type? */ 317 /* Active and same type? */
318 if ((flags & res_bit) == 0) 318 if ((flags & res_bit) == 0)
319 continue; 319 continue;
320 320
321 /* In the range of this resource? */ 321 /* In the range of this resource? */
322 if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end) 322 if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
323 continue; 323 continue;
324 324
325 /* found it! construct the final physical address */ 325 /* found it! construct the final physical address */
326 if (mmap_state == pci_mmap_io) 326 if (mmap_state == pci_mmap_io)
327 *offset += hose->io_base_phys - io_offset; 327 *offset += hose->io_base_phys - io_offset;
328 return rp; 328 return rp;
329 } 329 }
330 330
331 return NULL; 331 return NULL;
332 } 332 }
333 333
334 /* 334 /*
335 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci 335 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
336 * device mapping. 336 * device mapping.
337 */ 337 */
338 static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, 338 static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
339 pgprot_t protection, 339 pgprot_t protection,
340 enum pci_mmap_state mmap_state, 340 enum pci_mmap_state mmap_state,
341 int write_combine) 341 int write_combine)
342 { 342 {
343 pgprot_t prot = protection; 343 pgprot_t prot = protection;
344 344
345 /* Write combine is always 0 on non-memory space mappings. On 345 /* Write combine is always 0 on non-memory space mappings. On
346 * memory space, if the user didn't pass 1, we check for a 346 * memory space, if the user didn't pass 1, we check for a
347 * "prefetchable" resource. This is a bit hackish, but we use 347 * "prefetchable" resource. This is a bit hackish, but we use
348 * this to workaround the inability of /sysfs to provide a write 348 * this to workaround the inability of /sysfs to provide a write
349 * combine bit 349 * combine bit
350 */ 350 */
351 if (mmap_state != pci_mmap_mem) 351 if (mmap_state != pci_mmap_mem)
352 write_combine = 0; 352 write_combine = 0;
353 else if (write_combine == 0) { 353 else if (write_combine == 0) {
354 if (rp->flags & IORESOURCE_PREFETCH) 354 if (rp->flags & IORESOURCE_PREFETCH)
355 write_combine = 1; 355 write_combine = 1;
356 } 356 }
357 357
358 return pgprot_noncached(prot); 358 return pgprot_noncached(prot);
359 } 359 }
360 360
361 /* 361 /*
362 * This one is used by /dev/mem and fbdev who have no clue about the 362 * This one is used by /dev/mem and fbdev who have no clue about the
363 * PCI device, it tries to find the PCI device first and calls the 363 * PCI device, it tries to find the PCI device first and calls the
364 * above routine 364 * above routine
365 */ 365 */
366 pgprot_t pci_phys_mem_access_prot(struct file *file, 366 pgprot_t pci_phys_mem_access_prot(struct file *file,
367 unsigned long pfn, 367 unsigned long pfn,
368 unsigned long size, 368 unsigned long size,
369 pgprot_t prot) 369 pgprot_t prot)
370 { 370 {
371 struct pci_dev *pdev = NULL; 371 struct pci_dev *pdev = NULL;
372 struct resource *found = NULL; 372 struct resource *found = NULL;
373 resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT; 373 resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
374 int i; 374 int i;
375 375
376 if (page_is_ram(pfn)) 376 if (page_is_ram(pfn))
377 return prot; 377 return prot;
378 378
379 prot = pgprot_noncached(prot); 379 prot = pgprot_noncached(prot);
380 for_each_pci_dev(pdev) { 380 for_each_pci_dev(pdev) {
381 for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 381 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
382 struct resource *rp = &pdev->resource[i]; 382 struct resource *rp = &pdev->resource[i];
383 int flags = rp->flags; 383 int flags = rp->flags;
384 384
385 /* Active and same type? */ 385 /* Active and same type? */
386 if ((flags & IORESOURCE_MEM) == 0) 386 if ((flags & IORESOURCE_MEM) == 0)
387 continue; 387 continue;
388 /* In the range of this resource? */ 388 /* In the range of this resource? */
389 if (offset < (rp->start & PAGE_MASK) || 389 if (offset < (rp->start & PAGE_MASK) ||
390 offset > rp->end) 390 offset > rp->end)
391 continue; 391 continue;
392 found = rp; 392 found = rp;
393 break; 393 break;
394 } 394 }
395 if (found) 395 if (found)
396 break; 396 break;
397 } 397 }
398 if (found) { 398 if (found) {
399 if (found->flags & IORESOURCE_PREFETCH) 399 if (found->flags & IORESOURCE_PREFETCH)
400 prot = pgprot_noncached_wc(prot); 400 prot = pgprot_noncached_wc(prot);
401 pci_dev_put(pdev); 401 pci_dev_put(pdev);
402 } 402 }
403 403
404 pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n", 404 pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
405 (unsigned long long)offset, pgprot_val(prot)); 405 (unsigned long long)offset, pgprot_val(prot));
406 406
407 return prot; 407 return prot;
408 } 408 }
409 409
410 /* 410 /*
411 * Perform the actual remap of the pages for a PCI device mapping, as 411 * Perform the actual remap of the pages for a PCI device mapping, as
412 * appropriate for this architecture. The region in the process to map 412 * appropriate for this architecture. The region in the process to map
413 * is described by vm_start and vm_end members of VMA, the base physical 413 * is described by vm_start and vm_end members of VMA, the base physical
414 * address is found in vm_pgoff. 414 * address is found in vm_pgoff.
415 * The pci device structure is provided so that architectures may make mapping 415 * The pci device structure is provided so that architectures may make mapping
416 * decisions on a per-device or per-bus basis. 416 * decisions on a per-device or per-bus basis.
417 * 417 *
418 * Returns a negative error code on failure, zero on success. 418 * Returns a negative error code on failure, zero on success.
419 */ 419 */
420 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, 420 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
421 enum pci_mmap_state mmap_state, int write_combine) 421 enum pci_mmap_state mmap_state, int write_combine)
422 { 422 {
423 resource_size_t offset = 423 resource_size_t offset =
424 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT; 424 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
425 struct resource *rp; 425 struct resource *rp;
426 int ret; 426 int ret;
427 427
428 rp = __pci_mmap_make_offset(dev, &offset, mmap_state); 428 rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
429 if (rp == NULL) 429 if (rp == NULL)
430 return -EINVAL; 430 return -EINVAL;
431 431
432 vma->vm_pgoff = offset >> PAGE_SHIFT; 432 vma->vm_pgoff = offset >> PAGE_SHIFT;
433 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp, 433 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
434 vma->vm_page_prot, 434 vma->vm_page_prot,
435 mmap_state, write_combine); 435 mmap_state, write_combine);
436 436
437 ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 437 ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
438 vma->vm_end - vma->vm_start, vma->vm_page_prot); 438 vma->vm_end - vma->vm_start, vma->vm_page_prot);
439 439
440 return ret; 440 return ret;
441 } 441 }
442 442
443 /* This provides legacy IO read access on a bus */ 443 /* This provides legacy IO read access on a bus */
444 int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size) 444 int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
445 { 445 {
446 unsigned long offset; 446 unsigned long offset;
447 struct pci_controller *hose = pci_bus_to_host(bus); 447 struct pci_controller *hose = pci_bus_to_host(bus);
448 struct resource *rp = &hose->io_resource; 448 struct resource *rp = &hose->io_resource;
449 void __iomem *addr; 449 void __iomem *addr;
450 450
451 /* Check if port can be supported by that bus. We only check 451 /* Check if port can be supported by that bus. We only check
452 * the ranges of the PHB though, not the bus itself as the rules 452 * the ranges of the PHB though, not the bus itself as the rules
453 * for forwarding legacy cycles down bridges are not our problem 453 * for forwarding legacy cycles down bridges are not our problem
454 * here. So if the host bridge supports it, we do it. 454 * here. So if the host bridge supports it, we do it.
455 */ 455 */
456 offset = (unsigned long)hose->io_base_virt - _IO_BASE; 456 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
457 offset += port; 457 offset += port;
458 458
459 if (!(rp->flags & IORESOURCE_IO)) 459 if (!(rp->flags & IORESOURCE_IO))
460 return -ENXIO; 460 return -ENXIO;
461 if (offset < rp->start || (offset + size) > rp->end) 461 if (offset < rp->start || (offset + size) > rp->end)
462 return -ENXIO; 462 return -ENXIO;
463 addr = hose->io_base_virt + port; 463 addr = hose->io_base_virt + port;
464 464
465 switch (size) { 465 switch (size) {
466 case 1: 466 case 1:
467 *((u8 *)val) = in_8(addr); 467 *((u8 *)val) = in_8(addr);
468 return 1; 468 return 1;
469 case 2: 469 case 2:
470 if (port & 1) 470 if (port & 1)
471 return -EINVAL; 471 return -EINVAL;
472 *((u16 *)val) = in_le16(addr); 472 *((u16 *)val) = in_le16(addr);
473 return 2; 473 return 2;
474 case 4: 474 case 4:
475 if (port & 3) 475 if (port & 3)
476 return -EINVAL; 476 return -EINVAL;
477 *((u32 *)val) = in_le32(addr); 477 *((u32 *)val) = in_le32(addr);
478 return 4; 478 return 4;
479 } 479 }
480 return -EINVAL; 480 return -EINVAL;
481 } 481 }
482 482
483 /* This provides legacy IO write access on a bus */ 483 /* This provides legacy IO write access on a bus */
484 int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size) 484 int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
485 { 485 {
486 unsigned long offset; 486 unsigned long offset;
487 struct pci_controller *hose = pci_bus_to_host(bus); 487 struct pci_controller *hose = pci_bus_to_host(bus);
488 struct resource *rp = &hose->io_resource; 488 struct resource *rp = &hose->io_resource;
489 void __iomem *addr; 489 void __iomem *addr;
490 490
491 /* Check if port can be supported by that bus. We only check 491 /* Check if port can be supported by that bus. We only check
492 * the ranges of the PHB though, not the bus itself as the rules 492 * the ranges of the PHB though, not the bus itself as the rules
493 * for forwarding legacy cycles down bridges are not our problem 493 * for forwarding legacy cycles down bridges are not our problem
494 * here. So if the host bridge supports it, we do it. 494 * here. So if the host bridge supports it, we do it.
495 */ 495 */
496 offset = (unsigned long)hose->io_base_virt - _IO_BASE; 496 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
497 offset += port; 497 offset += port;
498 498
499 if (!(rp->flags & IORESOURCE_IO)) 499 if (!(rp->flags & IORESOURCE_IO))
500 return -ENXIO; 500 return -ENXIO;
501 if (offset < rp->start || (offset + size) > rp->end) 501 if (offset < rp->start || (offset + size) > rp->end)
502 return -ENXIO; 502 return -ENXIO;
503 addr = hose->io_base_virt + port; 503 addr = hose->io_base_virt + port;
504 504
505 /* WARNING: The generic code is idiotic. It gets passed a pointer 505 /* WARNING: The generic code is idiotic. It gets passed a pointer
506 * to what can be a 1, 2 or 4 byte quantity and always reads that 506 * to what can be a 1, 2 or 4 byte quantity and always reads that
507 * as a u32, which means that we have to correct the location of 507 * as a u32, which means that we have to correct the location of
508 * the data read within those 32 bits for size 1 and 2 508 * the data read within those 32 bits for size 1 and 2
509 */ 509 */
510 switch (size) { 510 switch (size) {
511 case 1: 511 case 1:
512 out_8(addr, val >> 24); 512 out_8(addr, val >> 24);
513 return 1; 513 return 1;
514 case 2: 514 case 2:
515 if (port & 1) 515 if (port & 1)
516 return -EINVAL; 516 return -EINVAL;
517 out_le16(addr, val >> 16); 517 out_le16(addr, val >> 16);
518 return 2; 518 return 2;
519 case 4: 519 case 4:
520 if (port & 3) 520 if (port & 3)
521 return -EINVAL; 521 return -EINVAL;
522 out_le32(addr, val); 522 out_le32(addr, val);
523 return 4; 523 return 4;
524 } 524 }
525 return -EINVAL; 525 return -EINVAL;
526 } 526 }
527 527
528 /* This provides legacy IO or memory mmap access on a bus */ 528 /* This provides legacy IO or memory mmap access on a bus */
529 int pci_mmap_legacy_page_range(struct pci_bus *bus, 529 int pci_mmap_legacy_page_range(struct pci_bus *bus,
530 struct vm_area_struct *vma, 530 struct vm_area_struct *vma,
531 enum pci_mmap_state mmap_state) 531 enum pci_mmap_state mmap_state)
532 { 532 {
533 struct pci_controller *hose = pci_bus_to_host(bus); 533 struct pci_controller *hose = pci_bus_to_host(bus);
534 resource_size_t offset = 534 resource_size_t offset =
535 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT; 535 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
536 resource_size_t size = vma->vm_end - vma->vm_start; 536 resource_size_t size = vma->vm_end - vma->vm_start;
537 struct resource *rp; 537 struct resource *rp;
538 538
539 pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n", 539 pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
540 pci_domain_nr(bus), bus->number, 540 pci_domain_nr(bus), bus->number,
541 mmap_state == pci_mmap_mem ? "MEM" : "IO", 541 mmap_state == pci_mmap_mem ? "MEM" : "IO",
542 (unsigned long long)offset, 542 (unsigned long long)offset,
543 (unsigned long long)(offset + size - 1)); 543 (unsigned long long)(offset + size - 1));
544 544
545 if (mmap_state == pci_mmap_mem) { 545 if (mmap_state == pci_mmap_mem) {
546 /* Hack alert ! 546 /* Hack alert !
547 * 547 *
548 * Because X is lame and can fail starting if it gets an error 548 * Because X is lame and can fail starting if it gets an error
549 * trying to mmap legacy_mem (instead of just moving on without 549 * trying to mmap legacy_mem (instead of just moving on without
550 * legacy memory access) we fake it here by giving it anonymous 550 * legacy memory access) we fake it here by giving it anonymous
551 * memory, effectively behaving just like /dev/zero 551 * memory, effectively behaving just like /dev/zero
552 */ 552 */
553 if ((offset + size) > hose->isa_mem_size) { 553 if ((offset + size) > hose->isa_mem_size) {
554 #ifdef CONFIG_MMU 554 #ifdef CONFIG_MMU
555 printk(KERN_DEBUG 555 pr_debug("Process %s (pid:%d) mapped non-existing PCI",
556 "Process %s (pid:%d) mapped non-existing PCI" 556 current->comm, current->pid);
557 "legacy memory for 0%04x:%02x\n", 557 pr_debug("legacy memory for 0%04x:%02x\n",
558 current->comm, current->pid, pci_domain_nr(bus), 558 pci_domain_nr(bus), bus->number);
559 bus->number);
560 #endif 559 #endif
561 if (vma->vm_flags & VM_SHARED) 560 if (vma->vm_flags & VM_SHARED)
562 return shmem_zero_setup(vma); 561 return shmem_zero_setup(vma);
563 return 0; 562 return 0;
564 } 563 }
565 offset += hose->isa_mem_phys; 564 offset += hose->isa_mem_phys;
566 } else { 565 } else {
567 unsigned long io_offset = (unsigned long)hose->io_base_virt - \ 566 unsigned long io_offset = (unsigned long)hose->io_base_virt -
568 _IO_BASE; 567 _IO_BASE;
569 unsigned long roffset = offset + io_offset; 568 unsigned long roffset = offset + io_offset;
570 rp = &hose->io_resource; 569 rp = &hose->io_resource;
571 if (!(rp->flags & IORESOURCE_IO)) 570 if (!(rp->flags & IORESOURCE_IO))
572 return -ENXIO; 571 return -ENXIO;
573 if (roffset < rp->start || (roffset + size) > rp->end) 572 if (roffset < rp->start || (roffset + size) > rp->end)
574 return -ENXIO; 573 return -ENXIO;
575 offset += hose->io_base_phys; 574 offset += hose->io_base_phys;
576 } 575 }
577 pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset); 576 pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
578 577
579 vma->vm_pgoff = offset >> PAGE_SHIFT; 578 vma->vm_pgoff = offset >> PAGE_SHIFT;
580 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 579 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
581 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 580 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
582 vma->vm_end - vma->vm_start, 581 vma->vm_end - vma->vm_start,
583 vma->vm_page_prot); 582 vma->vm_page_prot);
584 } 583 }
585 584
586 void pci_resource_to_user(const struct pci_dev *dev, int bar, 585 void pci_resource_to_user(const struct pci_dev *dev, int bar,
587 const struct resource *rsrc, 586 const struct resource *rsrc,
588 resource_size_t *start, resource_size_t *end) 587 resource_size_t *start, resource_size_t *end)
589 { 588 {
590 struct pci_controller *hose = pci_bus_to_host(dev->bus); 589 struct pci_controller *hose = pci_bus_to_host(dev->bus);
591 resource_size_t offset = 0; 590 resource_size_t offset = 0;
592 591
593 if (hose == NULL) 592 if (hose == NULL)
594 return; 593 return;
595 594
596 if (rsrc->flags & IORESOURCE_IO) 595 if (rsrc->flags & IORESOURCE_IO)
597 offset = (unsigned long)hose->io_base_virt - _IO_BASE; 596 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
598 597
599 /* We pass a fully fixed up address to userland for MMIO instead of 598 /* We pass a fully fixed up address to userland for MMIO instead of
600 * a BAR value because X is lame and expects to be able to use that 599 * a BAR value because X is lame and expects to be able to use that
601 * to pass to /dev/mem ! 600 * to pass to /dev/mem !
602 * 601 *
603 * That means that we'll have potentially 64 bits values where some 602 * That means that we'll have potentially 64 bits values where some
604 * userland apps only expect 32 (like X itself since it thinks only 603 * userland apps only expect 32 (like X itself since it thinks only
605 * Sparc has 64 bits MMIO) but if we don't do that, we break it on 604 * Sparc has 64 bits MMIO) but if we don't do that, we break it on
606 * 32 bits CHRPs :-( 605 * 32 bits CHRPs :-(
607 * 606 *
608 * Hopefully, the sysfs insterface is immune to that gunk. Once X 607 * Hopefully, the sysfs insterface is immune to that gunk. Once X
609 * has been fixed (and the fix spread enough), we can re-enable the 608 * has been fixed (and the fix spread enough), we can re-enable the
610 * 2 lines below and pass down a BAR value to userland. In that case 609 * 2 lines below and pass down a BAR value to userland. In that case
611 * we'll also have to re-enable the matching code in 610 * we'll also have to re-enable the matching code in
612 * __pci_mmap_make_offset(). 611 * __pci_mmap_make_offset().
613 * 612 *
614 * BenH. 613 * BenH.
615 */ 614 */
616 #if 0 615 #if 0
617 else if (rsrc->flags & IORESOURCE_MEM) 616 else if (rsrc->flags & IORESOURCE_MEM)
618 offset = hose->pci_mem_offset; 617 offset = hose->pci_mem_offset;
619 #endif 618 #endif
620 619
621 *start = rsrc->start - offset; 620 *start = rsrc->start - offset;
622 *end = rsrc->end - offset; 621 *end = rsrc->end - offset;
623 } 622 }
624 623
625 /** 624 /**
626 * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree 625 * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree
627 * @hose: newly allocated pci_controller to be setup 626 * @hose: newly allocated pci_controller to be setup
628 * @dev: device node of the host bridge 627 * @dev: device node of the host bridge
629 * @primary: set if primary bus (32 bits only, soon to be deprecated) 628 * @primary: set if primary bus (32 bits only, soon to be deprecated)
630 * 629 *
631 * This function will parse the "ranges" property of a PCI host bridge device 630 * This function will parse the "ranges" property of a PCI host bridge device
632 * node and setup the resource mapping of a pci controller based on its 631 * node and setup the resource mapping of a pci controller based on its
633 * content. 632 * content.
634 * 633 *
635 * Life would be boring if it wasn't for a few issues that we have to deal 634 * Life would be boring if it wasn't for a few issues that we have to deal
636 * with here: 635 * with here:
637 * 636 *
638 * - We can only cope with one IO space range and up to 3 Memory space 637 * - We can only cope with one IO space range and up to 3 Memory space
639 * ranges. However, some machines (thanks Apple !) tend to split their 638 * ranges. However, some machines (thanks Apple !) tend to split their
640 * space into lots of small contiguous ranges. So we have to coalesce. 639 * space into lots of small contiguous ranges. So we have to coalesce.
641 * 640 *
642 * - We can only cope with all memory ranges having the same offset 641 * - We can only cope with all memory ranges having the same offset
643 * between CPU addresses and PCI addresses. Unfortunately, some bridges 642 * between CPU addresses and PCI addresses. Unfortunately, some bridges
644 * are setup for a large 1:1 mapping along with a small "window" which 643 * are setup for a large 1:1 mapping along with a small "window" which
645 * maps PCI address 0 to some arbitrary high address of the CPU space in 644 * maps PCI address 0 to some arbitrary high address of the CPU space in
646 * order to give access to the ISA memory hole. 645 * order to give access to the ISA memory hole.
647 * The way out of here that I've chosen for now is to always set the 646 * The way out of here that I've chosen for now is to always set the
648 * offset based on the first resource found, then override it if we 647 * offset based on the first resource found, then override it if we
649 * have a different offset and the previous was set by an ISA hole. 648 * have a different offset and the previous was set by an ISA hole.
650 * 649 *
651 * - Some busses have IO space not starting at 0, which causes trouble with 650 * - Some busses have IO space not starting at 0, which causes trouble with
652 * the way we do our IO resource renumbering. The code somewhat deals with 651 * the way we do our IO resource renumbering. The code somewhat deals with
653 * it for 64 bits but I would expect problems on 32 bits. 652 * it for 64 bits but I would expect problems on 32 bits.
654 * 653 *
655 * - Some 32 bits platforms such as 4xx can have physical space larger than 654 * - Some 32 bits platforms such as 4xx can have physical space larger than
656 * 32 bits so we need to use 64 bits values for the parsing 655 * 32 bits so we need to use 64 bits values for the parsing
657 */ 656 */
658 void pci_process_bridge_OF_ranges(struct pci_controller *hose, 657 void pci_process_bridge_OF_ranges(struct pci_controller *hose,
659 struct device_node *dev, int primary) 658 struct device_node *dev, int primary)
660 { 659 {
661 const u32 *ranges; 660 const u32 *ranges;
662 int rlen; 661 int rlen;
663 int pna = of_n_addr_cells(dev); 662 int pna = of_n_addr_cells(dev);
664 int np = pna + 5; 663 int np = pna + 5;
665 int memno = 0, isa_hole = -1; 664 int memno = 0, isa_hole = -1;
666 u32 pci_space; 665 u32 pci_space;
667 unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size; 666 unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size;
668 unsigned long long isa_mb = 0; 667 unsigned long long isa_mb = 0;
669 struct resource *res; 668 struct resource *res;
670 669
671 printk(KERN_INFO "PCI host bridge %s %s ranges:\n", 670 pr_info("PCI host bridge %s %s ranges:\n",
672 dev->full_name, primary ? "(primary)" : ""); 671 dev->full_name, primary ? "(primary)" : "");
673 672
674 /* Get ranges property */ 673 /* Get ranges property */
675 ranges = of_get_property(dev, "ranges", &rlen); 674 ranges = of_get_property(dev, "ranges", &rlen);
676 if (ranges == NULL) 675 if (ranges == NULL)
677 return; 676 return;
678 677
679 /* Parse it */ 678 /* Parse it */
680 pr_debug("Parsing ranges property...\n"); 679 pr_debug("Parsing ranges property...\n");
681 while ((rlen -= np * 4) >= 0) { 680 while ((rlen -= np * 4) >= 0) {
682 /* Read next ranges element */ 681 /* Read next ranges element */
683 pci_space = ranges[0]; 682 pci_space = ranges[0];
684 pci_addr = of_read_number(ranges + 1, 2); 683 pci_addr = of_read_number(ranges + 1, 2);
685 cpu_addr = of_translate_address(dev, ranges + 3); 684 cpu_addr = of_translate_address(dev, ranges + 3);
686 size = of_read_number(ranges + pna + 3, 2); 685 size = of_read_number(ranges + pna + 3, 2);
687 686
688 pr_debug("pci_space: 0x%08x pci_addr:0x%016llx " 687 pr_debug("pci_space: 0x%08x pci_addr:0x%016llx ",
689 "cpu_addr:0x%016llx size:0x%016llx\n", 688 pci_space, pci_addr);
690 pci_space, pci_addr, cpu_addr, size); 689 pr_debug("cpu_addr:0x%016llx size:0x%016llx\n",
690 cpu_addr, size);
691 691
692 ranges += np; 692 ranges += np;
693 693
694 /* If we failed translation or got a zero-sized region 694 /* If we failed translation or got a zero-sized region
695 * (some FW try to feed us with non sensical zero sized regions 695 * (some FW try to feed us with non sensical zero sized regions
696 * such as power3 which look like some kind of attempt 696 * such as power3 which look like some kind of attempt
697 * at exposing the VGA memory hole) 697 * at exposing the VGA memory hole)
698 */ 698 */
699 if (cpu_addr == OF_BAD_ADDR || size == 0) 699 if (cpu_addr == OF_BAD_ADDR || size == 0)
700 continue; 700 continue;
701 701
702 /* Now consume following elements while they are contiguous */ 702 /* Now consume following elements while they are contiguous */
703 for (; rlen >= np * sizeof(u32); 703 for (; rlen >= np * sizeof(u32);
704 ranges += np, rlen -= np * 4) { 704 ranges += np, rlen -= np * 4) {
705 if (ranges[0] != pci_space) 705 if (ranges[0] != pci_space)
706 break; 706 break;
707 pci_next = of_read_number(ranges + 1, 2); 707 pci_next = of_read_number(ranges + 1, 2);
708 cpu_next = of_translate_address(dev, ranges + 3); 708 cpu_next = of_translate_address(dev, ranges + 3);
709 if (pci_next != pci_addr + size || 709 if (pci_next != pci_addr + size ||
710 cpu_next != cpu_addr + size) 710 cpu_next != cpu_addr + size)
711 break; 711 break;
712 size += of_read_number(ranges + pna + 3, 2); 712 size += of_read_number(ranges + pna + 3, 2);
713 } 713 }
714 714
715 /* Act based on address space type */ 715 /* Act based on address space type */
716 res = NULL; 716 res = NULL;
717 switch ((pci_space >> 24) & 0x3) { 717 switch ((pci_space >> 24) & 0x3) {
718 case 1: /* PCI IO space */ 718 case 1: /* PCI IO space */
719 printk(KERN_INFO 719 pr_info(" IO 0x%016llx..0x%016llx -> 0x%016llx\n",
720 " IO 0x%016llx..0x%016llx -> 0x%016llx\n",
721 cpu_addr, cpu_addr + size - 1, pci_addr); 720 cpu_addr, cpu_addr + size - 1, pci_addr);
722 721
723 /* We support only one IO range */ 722 /* We support only one IO range */
724 if (hose->pci_io_size) { 723 if (hose->pci_io_size) {
725 printk(KERN_INFO 724 pr_info(" \\--> Skipped (too many) !\n");
726 " \\--> Skipped (too many) !\n");
727 continue; 725 continue;
728 } 726 }
729 /* On 32 bits, limit I/O space to 16MB */ 727 /* On 32 bits, limit I/O space to 16MB */
730 if (size > 0x01000000) 728 if (size > 0x01000000)
731 size = 0x01000000; 729 size = 0x01000000;
732 730
733 /* 32 bits needs to map IOs here */ 731 /* 32 bits needs to map IOs here */
734 hose->io_base_virt = ioremap(cpu_addr, size); 732 hose->io_base_virt = ioremap(cpu_addr, size);
735 733
736 /* Expect trouble if pci_addr is not 0 */ 734 /* Expect trouble if pci_addr is not 0 */
737 if (primary) 735 if (primary)
738 isa_io_base = 736 isa_io_base =
739 (unsigned long)hose->io_base_virt; 737 (unsigned long)hose->io_base_virt;
740 /* pci_io_size and io_base_phys always represent IO 738 /* pci_io_size and io_base_phys always represent IO
741 * space starting at 0 so we factor in pci_addr 739 * space starting at 0 so we factor in pci_addr
742 */ 740 */
743 hose->pci_io_size = pci_addr + size; 741 hose->pci_io_size = pci_addr + size;
744 hose->io_base_phys = cpu_addr - pci_addr; 742 hose->io_base_phys = cpu_addr - pci_addr;
745 743
746 /* Build resource */ 744 /* Build resource */
747 res = &hose->io_resource; 745 res = &hose->io_resource;
748 res->flags = IORESOURCE_IO; 746 res->flags = IORESOURCE_IO;
749 res->start = pci_addr; 747 res->start = pci_addr;
750 break; 748 break;
751 case 2: /* PCI Memory space */ 749 case 2: /* PCI Memory space */
752 case 3: /* PCI 64 bits Memory space */ 750 case 3: /* PCI 64 bits Memory space */
753 printk(KERN_INFO 751 pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
754 " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
755 cpu_addr, cpu_addr + size - 1, pci_addr, 752 cpu_addr, cpu_addr + size - 1, pci_addr,
756 (pci_space & 0x40000000) ? "Prefetch" : ""); 753 (pci_space & 0x40000000) ? "Prefetch" : "");
757 754
758 /* We support only 3 memory ranges */ 755 /* We support only 3 memory ranges */
759 if (memno >= 3) { 756 if (memno >= 3) {
760 printk(KERN_INFO 757 pr_info(" \\--> Skipped (too many) !\n");
761 " \\--> Skipped (too many) !\n");
762 continue; 758 continue;
763 } 759 }
764 /* Handles ISA memory hole space here */ 760 /* Handles ISA memory hole space here */
765 if (pci_addr == 0) { 761 if (pci_addr == 0) {
766 isa_mb = cpu_addr; 762 isa_mb = cpu_addr;
767 isa_hole = memno; 763 isa_hole = memno;
768 if (primary || isa_mem_base == 0) 764 if (primary || isa_mem_base == 0)
769 isa_mem_base = cpu_addr; 765 isa_mem_base = cpu_addr;
770 hose->isa_mem_phys = cpu_addr; 766 hose->isa_mem_phys = cpu_addr;
771 hose->isa_mem_size = size; 767 hose->isa_mem_size = size;
772 } 768 }
773 769
774 /* We get the PCI/Mem offset from the first range or 770 /* We get the PCI/Mem offset from the first range or
775 * the, current one if the offset came from an ISA 771 * the, current one if the offset came from an ISA
776 * hole. If they don't match, bugger. 772 * hole. If they don't match, bugger.
777 */ 773 */
778 if (memno == 0 || 774 if (memno == 0 ||
779 (isa_hole >= 0 && pci_addr != 0 && 775 (isa_hole >= 0 && pci_addr != 0 &&
780 hose->pci_mem_offset == isa_mb)) 776 hose->pci_mem_offset == isa_mb))
781 hose->pci_mem_offset = cpu_addr - pci_addr; 777 hose->pci_mem_offset = cpu_addr - pci_addr;
782 else if (pci_addr != 0 && 778 else if (pci_addr != 0 &&
783 hose->pci_mem_offset != cpu_addr - pci_addr) { 779 hose->pci_mem_offset != cpu_addr - pci_addr) {
784 printk(KERN_INFO 780 pr_info(" \\--> Skipped (offset mismatch) !\n");
785 " \\--> Skipped (offset mismatch) !\n");
786 continue; 781 continue;
787 } 782 }
788 783
789 /* Build resource */ 784 /* Build resource */
790 res = &hose->mem_resources[memno++]; 785 res = &hose->mem_resources[memno++];
791 res->flags = IORESOURCE_MEM; 786 res->flags = IORESOURCE_MEM;
792 if (pci_space & 0x40000000) 787 if (pci_space & 0x40000000)
793 res->flags |= IORESOURCE_PREFETCH; 788 res->flags |= IORESOURCE_PREFETCH;
794 res->start = cpu_addr; 789 res->start = cpu_addr;
795 break; 790 break;
796 } 791 }
797 if (res != NULL) { 792 if (res != NULL) {
798 res->name = dev->full_name; 793 res->name = dev->full_name;
799 res->end = res->start + size - 1; 794 res->end = res->start + size - 1;
800 res->parent = NULL; 795 res->parent = NULL;
801 res->sibling = NULL; 796 res->sibling = NULL;
802 res->child = NULL; 797 res->child = NULL;
803 } 798 }
804 } 799 }
805 800
806 /* If there's an ISA hole and the pci_mem_offset is -not- matching 801 /* If there's an ISA hole and the pci_mem_offset is -not- matching
807 * the ISA hole offset, then we need to remove the ISA hole from 802 * the ISA hole offset, then we need to remove the ISA hole from
808 * the resource list for that brige 803 * the resource list for that brige
809 */ 804 */
810 if (isa_hole >= 0 && hose->pci_mem_offset != isa_mb) { 805 if (isa_hole >= 0 && hose->pci_mem_offset != isa_mb) {
811 unsigned int next = isa_hole + 1; 806 unsigned int next = isa_hole + 1;
812 printk(KERN_INFO " Removing ISA hole at 0x%016llx\n", isa_mb); 807 pr_info(" Removing ISA hole at 0x%016llx\n", isa_mb);
813 if (next < memno) 808 if (next < memno)
814 memmove(&hose->mem_resources[isa_hole], 809 memmove(&hose->mem_resources[isa_hole],
815 &hose->mem_resources[next], 810 &hose->mem_resources[next],
816 sizeof(struct resource) * (memno - next)); 811 sizeof(struct resource) * (memno - next));
817 hose->mem_resources[--memno].flags = 0; 812 hose->mem_resources[--memno].flags = 0;
818 } 813 }
819 } 814 }
820 815
821 /* Decide whether to display the domain number in /proc */ 816 /* Decide whether to display the domain number in /proc */
822 int pci_proc_domain(struct pci_bus *bus) 817 int pci_proc_domain(struct pci_bus *bus)
823 { 818 {
824 return 0; 819 return 0;
825 } 820 }
826 821
827 /* This header fixup will do the resource fixup for all devices as they are 822 /* This header fixup will do the resource fixup for all devices as they are
828 * probed, but not for bridge ranges 823 * probed, but not for bridge ranges
829 */ 824 */
830 static void pcibios_fixup_resources(struct pci_dev *dev) 825 static void pcibios_fixup_resources(struct pci_dev *dev)
831 { 826 {
832 struct pci_controller *hose = pci_bus_to_host(dev->bus); 827 struct pci_controller *hose = pci_bus_to_host(dev->bus);
833 int i; 828 int i;
834 829
835 if (!hose) { 830 if (!hose) {
836 printk(KERN_ERR "No host bridge for PCI dev %s !\n", 831 pr_err("No host bridge for PCI dev %s !\n",
837 pci_name(dev)); 832 pci_name(dev));
838 return; 833 return;
839 } 834 }
840 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 835 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
841 struct resource *res = dev->resource + i; 836 struct resource *res = dev->resource + i;
842 if (!res->flags) 837 if (!res->flags)
843 continue; 838 continue;
844 if (res->start == 0) { 839 if (res->start == 0) {
845 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]" \ 840 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]",
846 "is unassigned\n",
847 pci_name(dev), i, 841 pci_name(dev), i,
848 (unsigned long long)res->start, 842 (unsigned long long)res->start,
849 (unsigned long long)res->end, 843 (unsigned long long)res->end,
850 (unsigned int)res->flags); 844 (unsigned int)res->flags);
845 pr_debug("is unassigned\n");
851 res->end -= res->start; 846 res->end -= res->start;
852 res->start = 0; 847 res->start = 0;
853 res->flags |= IORESOURCE_UNSET; 848 res->flags |= IORESOURCE_UNSET;
854 continue; 849 continue;
855 } 850 }
856 851
857 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]\n", 852 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]\n",
858 pci_name(dev), i, 853 pci_name(dev), i,
859 (unsigned long long)res->start,\ 854 (unsigned long long)res->start,
860 (unsigned long long)res->end, 855 (unsigned long long)res->end,
861 (unsigned int)res->flags); 856 (unsigned int)res->flags);
862 } 857 }
863 } 858 }
864 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources); 859 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
865 860
866 /* This function tries to figure out if a bridge resource has been initialized 861 /* This function tries to figure out if a bridge resource has been initialized
867 * by the firmware or not. It doesn't have to be absolutely bullet proof, but 862 * by the firmware or not. It doesn't have to be absolutely bullet proof, but
868 * things go more smoothly when it gets it right. It should covers cases such 863 * things go more smoothly when it gets it right. It should covers cases such
869 * as Apple "closed" bridge resources and bare-metal pSeries unassigned bridges 864 * as Apple "closed" bridge resources and bare-metal pSeries unassigned bridges
870 */ 865 */
871 static int pcibios_uninitialized_bridge_resource(struct pci_bus *bus, 866 static int pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
872 struct resource *res) 867 struct resource *res)
873 { 868 {
874 struct pci_controller *hose = pci_bus_to_host(bus); 869 struct pci_controller *hose = pci_bus_to_host(bus);
875 struct pci_dev *dev = bus->self; 870 struct pci_dev *dev = bus->self;
876 resource_size_t offset; 871 resource_size_t offset;
877 u16 command; 872 u16 command;
878 int i; 873 int i;
879 874
880 /* Job is a bit different between memory and IO */ 875 /* Job is a bit different between memory and IO */
881 if (res->flags & IORESOURCE_MEM) { 876 if (res->flags & IORESOURCE_MEM) {
882 /* If the BAR is non-0 (res != pci_mem_offset) then it's 877 /* If the BAR is non-0 (res != pci_mem_offset) then it's
883 * probably been initialized by somebody 878 * probably been initialized by somebody
884 */ 879 */
885 if (res->start != hose->pci_mem_offset) 880 if (res->start != hose->pci_mem_offset)
886 return 0; 881 return 0;
887 882
888 /* The BAR is 0, let's check if memory decoding is enabled on 883 /* The BAR is 0, let's check if memory decoding is enabled on
889 * the bridge. If not, we consider it unassigned 884 * the bridge. If not, we consider it unassigned
890 */ 885 */
891 pci_read_config_word(dev, PCI_COMMAND, &command); 886 pci_read_config_word(dev, PCI_COMMAND, &command);
892 if ((command & PCI_COMMAND_MEMORY) == 0) 887 if ((command & PCI_COMMAND_MEMORY) == 0)
893 return 1; 888 return 1;
894 889
895 /* Memory decoding is enabled and the BAR is 0. If any of 890 /* Memory decoding is enabled and the BAR is 0. If any of
896 * the bridge resources covers that starting address (0 then 891 * the bridge resources covers that starting address (0 then
897 * it's good enough for us for memory 892 * it's good enough for us for memory
898 */ 893 */
899 for (i = 0; i < 3; i++) { 894 for (i = 0; i < 3; i++) {
900 if ((hose->mem_resources[i].flags & IORESOURCE_MEM) && 895 if ((hose->mem_resources[i].flags & IORESOURCE_MEM) &&
901 hose->mem_resources[i].start == hose->pci_mem_offset) 896 hose->mem_resources[i].start == hose->pci_mem_offset)
902 return 0; 897 return 0;
903 } 898 }
904 899
905 /* Well, it starts at 0 and we know it will collide so we may as 900 /* Well, it starts at 0 and we know it will collide so we may as
906 * well consider it as unassigned. That covers the Apple case. 901 * well consider it as unassigned. That covers the Apple case.
907 */ 902 */
908 return 1; 903 return 1;
909 } else { 904 } else {
910 /* If the BAR is non-0, then we consider it assigned */ 905 /* If the BAR is non-0, then we consider it assigned */
911 offset = (unsigned long)hose->io_base_virt - _IO_BASE; 906 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
912 if (((res->start - offset) & 0xfffffffful) != 0) 907 if (((res->start - offset) & 0xfffffffful) != 0)
913 return 0; 908 return 0;
914 909
915 /* Here, we are a bit different than memory as typically IO 910 /* Here, we are a bit different than memory as typically IO
916 * space starting at low addresses -is- valid. What we do 911 * space starting at low addresses -is- valid. What we do
917 * instead if that we consider as unassigned anything that 912 * instead if that we consider as unassigned anything that
918 * doesn't have IO enabled in the PCI command register, 913 * doesn't have IO enabled in the PCI command register,
919 * and that's it. 914 * and that's it.
920 */ 915 */
921 pci_read_config_word(dev, PCI_COMMAND, &command); 916 pci_read_config_word(dev, PCI_COMMAND, &command);
922 if (command & PCI_COMMAND_IO) 917 if (command & PCI_COMMAND_IO)
923 return 0; 918 return 0;
924 919
925 /* It's starting at 0 and IO is disabled in the bridge, consider 920 /* It's starting at 0 and IO is disabled in the bridge, consider
926 * it unassigned 921 * it unassigned
927 */ 922 */
928 return 1; 923 return 1;
929 } 924 }
930 } 925 }
931 926
932 /* Fixup resources of a PCI<->PCI bridge */ 927 /* Fixup resources of a PCI<->PCI bridge */
933 static void pcibios_fixup_bridge(struct pci_bus *bus) 928 static void pcibios_fixup_bridge(struct pci_bus *bus)
934 { 929 {
935 struct resource *res; 930 struct resource *res;
936 int i; 931 int i;
937 932
938 struct pci_dev *dev = bus->self; 933 struct pci_dev *dev = bus->self;
939 934
940 pci_bus_for_each_resource(bus, res, i) { 935 pci_bus_for_each_resource(bus, res, i) {
941 if (!res) 936 if (!res)
942 continue; 937 continue;
943 if (!res->flags) 938 if (!res->flags)
944 continue; 939 continue;
945 if (i >= 3 && bus->self->transparent) 940 if (i >= 3 && bus->self->transparent)
946 continue; 941 continue;
947 942
948 pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x] fixup...\n", 943 pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x] fixup...\n",
949 pci_name(dev), i, 944 pci_name(dev), i,
950 (unsigned long long)res->start,\ 945 (unsigned long long)res->start,
951 (unsigned long long)res->end, 946 (unsigned long long)res->end,
952 (unsigned int)res->flags); 947 (unsigned int)res->flags);
953 948
954 /* Try to detect uninitialized P2P bridge resources, 949 /* Try to detect uninitialized P2P bridge resources,
955 * and clear them out so they get re-assigned later 950 * and clear them out so they get re-assigned later
956 */ 951 */
957 if (pcibios_uninitialized_bridge_resource(bus, res)) { 952 if (pcibios_uninitialized_bridge_resource(bus, res)) {
958 res->flags = 0; 953 res->flags = 0;
959 pr_debug("PCI:%s (unassigned)\n", 954 pr_debug("PCI:%s (unassigned)\n",
960 pci_name(dev)); 955 pci_name(dev));
961 } else { 956 } else {
962 pr_debug("PCI:%s %016llx-%016llx\n", 957 pr_debug("PCI:%s %016llx-%016llx\n",
963 pci_name(dev), 958 pci_name(dev),
964 (unsigned long long)res->start, 959 (unsigned long long)res->start,
965 (unsigned long long)res->end); 960 (unsigned long long)res->end);
966 } 961 }
967 } 962 }
968 } 963 }
969 964
970 void pcibios_setup_bus_self(struct pci_bus *bus) 965 void pcibios_setup_bus_self(struct pci_bus *bus)
971 { 966 {
972 /* Fix up the bus resources for P2P bridges */ 967 /* Fix up the bus resources for P2P bridges */
973 if (bus->self != NULL) 968 if (bus->self != NULL)
974 pcibios_fixup_bridge(bus); 969 pcibios_fixup_bridge(bus);
975 } 970 }
976 971
977 void pcibios_setup_bus_devices(struct pci_bus *bus) 972 void pcibios_setup_bus_devices(struct pci_bus *bus)
978 { 973 {
979 struct pci_dev *dev; 974 struct pci_dev *dev;
980 975
981 pr_debug("PCI: Fixup bus devices %d (%s)\n", 976 pr_debug("PCI: Fixup bus devices %d (%s)\n",
982 bus->number, bus->self ? pci_name(bus->self) : "PHB"); 977 bus->number, bus->self ? pci_name(bus->self) : "PHB");
983 978
984 list_for_each_entry(dev, &bus->devices, bus_list) { 979 list_for_each_entry(dev, &bus->devices, bus_list) {
985 /* Setup OF node pointer in archdata */ 980 /* Setup OF node pointer in archdata */
986 dev->dev.of_node = pci_device_to_OF_node(dev); 981 dev->dev.of_node = pci_device_to_OF_node(dev);
987 982
988 /* Fixup NUMA node as it may not be setup yet by the generic 983 /* Fixup NUMA node as it may not be setup yet by the generic
989 * code and is needed by the DMA init 984 * code and is needed by the DMA init
990 */ 985 */
991 set_dev_node(&dev->dev, pcibus_to_node(dev->bus)); 986 set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
992 987
993 /* Hook up default DMA ops */ 988 /* Hook up default DMA ops */
994 set_dma_ops(&dev->dev, pci_dma_ops); 989 set_dma_ops(&dev->dev, pci_dma_ops);
995 dev->dev.archdata.dma_data = (void *)PCI_DRAM_OFFSET; 990 dev->dev.archdata.dma_data = (void *)PCI_DRAM_OFFSET;
996 991
997 /* Read default IRQs and fixup if necessary */ 992 /* Read default IRQs and fixup if necessary */
998 pci_read_irq_line(dev); 993 pci_read_irq_line(dev);
999 } 994 }
1000 } 995 }
1001 996
1002 void pcibios_fixup_bus(struct pci_bus *bus) 997 void pcibios_fixup_bus(struct pci_bus *bus)
1003 { 998 {
1004 /* When called from the generic PCI probe, read PCI<->PCI bridge 999 /* When called from the generic PCI probe, read PCI<->PCI bridge
1005 * bases. This is -not- called when generating the PCI tree from 1000 * bases. This is -not- called when generating the PCI tree from
1006 * the OF device-tree. 1001 * the OF device-tree.
1007 */ 1002 */
1008 if (bus->self != NULL) 1003 if (bus->self != NULL)
1009 pci_read_bridge_bases(bus); 1004 pci_read_bridge_bases(bus);
1010 1005
1011 /* Now fixup the bus bus */ 1006 /* Now fixup the bus bus */
1012 pcibios_setup_bus_self(bus); 1007 pcibios_setup_bus_self(bus);
1013 1008
1014 /* Now fixup devices on that bus */ 1009 /* Now fixup devices on that bus */
1015 pcibios_setup_bus_devices(bus); 1010 pcibios_setup_bus_devices(bus);
1016 } 1011 }
1017 EXPORT_SYMBOL(pcibios_fixup_bus); 1012 EXPORT_SYMBOL(pcibios_fixup_bus);
1018 1013
1019 static int skip_isa_ioresource_align(struct pci_dev *dev) 1014 static int skip_isa_ioresource_align(struct pci_dev *dev)
1020 { 1015 {
1021 return 0; 1016 return 0;
1022 } 1017 }
1023 1018
1024 /* 1019 /*
1025 * We need to avoid collisions with `mirrored' VGA ports 1020 * We need to avoid collisions with `mirrored' VGA ports
1026 * and other strange ISA hardware, so we always want the 1021 * and other strange ISA hardware, so we always want the
1027 * addresses to be allocated in the 0x000-0x0ff region 1022 * addresses to be allocated in the 0x000-0x0ff region
1028 * modulo 0x400. 1023 * modulo 0x400.
1029 * 1024 *
1030 * Why? Because some silly external IO cards only decode 1025 * Why? Because some silly external IO cards only decode
1031 * the low 10 bits of the IO address. The 0x00-0xff region 1026 * the low 10 bits of the IO address. The 0x00-0xff region
1032 * is reserved for motherboard devices that decode all 16 1027 * is reserved for motherboard devices that decode all 16
1033 * bits, so it's ok to allocate at, say, 0x2800-0x28ff, 1028 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
1034 * but we want to try to avoid allocating at 0x2900-0x2bff 1029 * but we want to try to avoid allocating at 0x2900-0x2bff
1035 * which might have be mirrored at 0x0100-0x03ff.. 1030 * which might have be mirrored at 0x0100-0x03ff..
1036 */ 1031 */
1037 resource_size_t pcibios_align_resource(void *data, const struct resource *res, 1032 resource_size_t pcibios_align_resource(void *data, const struct resource *res,
1038 resource_size_t size, resource_size_t align) 1033 resource_size_t size, resource_size_t align)
1039 { 1034 {
1040 struct pci_dev *dev = data; 1035 struct pci_dev *dev = data;
1041 resource_size_t start = res->start; 1036 resource_size_t start = res->start;
1042 1037
1043 if (res->flags & IORESOURCE_IO) { 1038 if (res->flags & IORESOURCE_IO) {
1044 if (skip_isa_ioresource_align(dev)) 1039 if (skip_isa_ioresource_align(dev))
1045 return start; 1040 return start;
1046 if (start & 0x300) 1041 if (start & 0x300)
1047 start = (start + 0x3ff) & ~0x3ff; 1042 start = (start + 0x3ff) & ~0x3ff;
1048 } 1043 }
1049 1044
1050 return start; 1045 return start;
1051 } 1046 }
1052 EXPORT_SYMBOL(pcibios_align_resource); 1047 EXPORT_SYMBOL(pcibios_align_resource);
1053 1048
1054 /* 1049 /*
1055 * Reparent resource children of pr that conflict with res 1050 * Reparent resource children of pr that conflict with res
1056 * under res, and make res replace those children. 1051 * under res, and make res replace those children.
1057 */ 1052 */
1058 static int __init reparent_resources(struct resource *parent, 1053 static int __init reparent_resources(struct resource *parent,
1059 struct resource *res) 1054 struct resource *res)
1060 { 1055 {
1061 struct resource *p, **pp; 1056 struct resource *p, **pp;
1062 struct resource **firstpp = NULL; 1057 struct resource **firstpp = NULL;
1063 1058
1064 for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) { 1059 for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
1065 if (p->end < res->start) 1060 if (p->end < res->start)
1066 continue; 1061 continue;
1067 if (res->end < p->start) 1062 if (res->end < p->start)
1068 break; 1063 break;
1069 if (p->start < res->start || p->end > res->end) 1064 if (p->start < res->start || p->end > res->end)
1070 return -1; /* not completely contained */ 1065 return -1; /* not completely contained */
1071 if (firstpp == NULL) 1066 if (firstpp == NULL)
1072 firstpp = pp; 1067 firstpp = pp;
1073 } 1068 }
1074 if (firstpp == NULL) 1069 if (firstpp == NULL)
1075 return -1; /* didn't find any conflicting entries? */ 1070 return -1; /* didn't find any conflicting entries? */
1076 res->parent = parent; 1071 res->parent = parent;
1077 res->child = *firstpp; 1072 res->child = *firstpp;
1078 res->sibling = *pp; 1073 res->sibling = *pp;
1079 *firstpp = res; 1074 *firstpp = res;
1080 *pp = NULL; 1075 *pp = NULL;
1081 for (p = res->child; p != NULL; p = p->sibling) { 1076 for (p = res->child; p != NULL; p = p->sibling) {
1082 p->parent = res; 1077 p->parent = res;
1083 pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n", 1078 pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n",
1084 p->name, 1079 p->name,
1085 (unsigned long long)p->start, 1080 (unsigned long long)p->start,
1086 (unsigned long long)p->end, res->name); 1081 (unsigned long long)p->end, res->name);
1087 } 1082 }
1088 return 0; 1083 return 0;
1089 } 1084 }
1090 1085
1091 /* 1086 /*
1092 * Handle resources of PCI devices. If the world were perfect, we could 1087 * Handle resources of PCI devices. If the world were perfect, we could
1093 * just allocate all the resource regions and do nothing more. It isn't. 1088 * just allocate all the resource regions and do nothing more. It isn't.
1094 * On the other hand, we cannot just re-allocate all devices, as it would 1089 * On the other hand, we cannot just re-allocate all devices, as it would
1095 * require us to know lots of host bridge internals. So we attempt to 1090 * require us to know lots of host bridge internals. So we attempt to
1096 * keep as much of the original configuration as possible, but tweak it 1091 * keep as much of the original configuration as possible, but tweak it
1097 * when it's found to be wrong. 1092 * when it's found to be wrong.
1098 * 1093 *
1099 * Known BIOS problems we have to work around: 1094 * Known BIOS problems we have to work around:
1100 * - I/O or memory regions not configured 1095 * - I/O or memory regions not configured
1101 * - regions configured, but not enabled in the command register 1096 * - regions configured, but not enabled in the command register
1102 * - bogus I/O addresses above 64K used 1097 * - bogus I/O addresses above 64K used
1103 * - expansion ROMs left enabled (this may sound harmless, but given 1098 * - expansion ROMs left enabled (this may sound harmless, but given
1104 * the fact the PCI specs explicitly allow address decoders to be 1099 * the fact the PCI specs explicitly allow address decoders to be
1105 * shared between expansion ROMs and other resource regions, it's 1100 * shared between expansion ROMs and other resource regions, it's
1106 * at least dangerous) 1101 * at least dangerous)
1107 * 1102 *
1108 * Our solution: 1103 * Our solution:
1109 * (1) Allocate resources for all buses behind PCI-to-PCI bridges. 1104 * (1) Allocate resources for all buses behind PCI-to-PCI bridges.
1110 * This gives us fixed barriers on where we can allocate. 1105 * This gives us fixed barriers on where we can allocate.
1111 * (2) Allocate resources for all enabled devices. If there is 1106 * (2) Allocate resources for all enabled devices. If there is
1112 * a collision, just mark the resource as unallocated. Also 1107 * a collision, just mark the resource as unallocated. Also
1113 * disable expansion ROMs during this step. 1108 * disable expansion ROMs during this step.
1114 * (3) Try to allocate resources for disabled devices. If the 1109 * (3) Try to allocate resources for disabled devices. If the
1115 * resources were assigned correctly, everything goes well, 1110 * resources were assigned correctly, everything goes well,
1116 * if they weren't, they won't disturb allocation of other 1111 * if they weren't, they won't disturb allocation of other
1117 * resources. 1112 * resources.
1118 * (4) Assign new addresses to resources which were either 1113 * (4) Assign new addresses to resources which were either
1119 * not configured at all or misconfigured. If explicitly 1114 * not configured at all or misconfigured. If explicitly
1120 * requested by the user, configure expansion ROM address 1115 * requested by the user, configure expansion ROM address
1121 * as well. 1116 * as well.
1122 */ 1117 */
1123 1118
1124 static void pcibios_allocate_bus_resources(struct pci_bus *bus) 1119 static void pcibios_allocate_bus_resources(struct pci_bus *bus)
1125 { 1120 {
1126 struct pci_bus *b; 1121 struct pci_bus *b;
1127 int i; 1122 int i;
1128 struct resource *res, *pr; 1123 struct resource *res, *pr;
1129 1124
1130 pr_debug("PCI: Allocating bus resources for %04x:%02x...\n", 1125 pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
1131 pci_domain_nr(bus), bus->number); 1126 pci_domain_nr(bus), bus->number);
1132 1127
1133 pci_bus_for_each_resource(bus, res, i) { 1128 pci_bus_for_each_resource(bus, res, i) {
1134 if (!res || !res->flags 1129 if (!res || !res->flags
1135 || res->start > res->end || res->parent) 1130 || res->start > res->end || res->parent)
1136 continue; 1131 continue;
1137 if (bus->parent == NULL) 1132 if (bus->parent == NULL)
1138 pr = (res->flags & IORESOURCE_IO) ? 1133 pr = (res->flags & IORESOURCE_IO) ?
1139 &ioport_resource : &iomem_resource; 1134 &ioport_resource : &iomem_resource;
1140 else { 1135 else {
1141 /* Don't bother with non-root busses when 1136 /* Don't bother with non-root busses when
1142 * re-assigning all resources. We clear the 1137 * re-assigning all resources. We clear the
1143 * resource flags as if they were colliding 1138 * resource flags as if they were colliding
1144 * and as such ensure proper re-allocation 1139 * and as such ensure proper re-allocation
1145 * later. 1140 * later.
1146 */ 1141 */
1147 pr = pci_find_parent_resource(bus->self, res); 1142 pr = pci_find_parent_resource(bus->self, res);
1148 if (pr == res) { 1143 if (pr == res) {
1149 /* this happens when the generic PCI 1144 /* this happens when the generic PCI
1150 * code (wrongly) decides that this 1145 * code (wrongly) decides that this
1151 * bridge is transparent -- paulus 1146 * bridge is transparent -- paulus
1152 */ 1147 */
1153 continue; 1148 continue;
1154 } 1149 }
1155 } 1150 }
1156 1151
1157 pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx " 1152 pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx ",
1158 "[0x%x], parent %p (%s)\n",
1159 bus->self ? pci_name(bus->self) : "PHB", 1153 bus->self ? pci_name(bus->self) : "PHB",
1160 bus->number, i, 1154 bus->number, i,
1161 (unsigned long long)res->start, 1155 (unsigned long long)res->start,
1162 (unsigned long long)res->end, 1156 (unsigned long long)res->end);
1157 pr_debug("[0x%x], parent %p (%s)\n",
1163 (unsigned int)res->flags, 1158 (unsigned int)res->flags,
1164 pr, (pr && pr->name) ? pr->name : "nil"); 1159 pr, (pr && pr->name) ? pr->name : "nil");
1165 1160
1166 if (pr && !(pr->flags & IORESOURCE_UNSET)) { 1161 if (pr && !(pr->flags & IORESOURCE_UNSET)) {
1167 if (request_resource(pr, res) == 0) 1162 if (request_resource(pr, res) == 0)
1168 continue; 1163 continue;
1169 /* 1164 /*
1170 * Must be a conflict with an existing entry. 1165 * Must be a conflict with an existing entry.
1171 * Move that entry (or entries) under the 1166 * Move that entry (or entries) under the
1172 * bridge resource and try again. 1167 * bridge resource and try again.
1173 */ 1168 */
1174 if (reparent_resources(pr, res) == 0) 1169 if (reparent_resources(pr, res) == 0)
1175 continue; 1170 continue;
1176 } 1171 }
1177 printk(KERN_WARNING "PCI: Cannot allocate resource region " 1172 pr_warn("PCI: Cannot allocate resource region ");
1178 "%d of PCI bridge %d, will remap\n", i, bus->number); 1173 pr_cont("%d of PCI bridge %d, will remap\n", i, bus->number);
1179
1180 res->start = res->end = 0; 1174 res->start = res->end = 0;
1181 res->flags = 0; 1175 res->flags = 0;
1182 } 1176 }
1183 1177
1184 list_for_each_entry(b, &bus->children, node) 1178 list_for_each_entry(b, &bus->children, node)
1185 pcibios_allocate_bus_resources(b); 1179 pcibios_allocate_bus_resources(b);
1186 } 1180 }
1187 1181
1188 static inline void alloc_resource(struct pci_dev *dev, int idx) 1182 static inline void alloc_resource(struct pci_dev *dev, int idx)
1189 { 1183 {
1190 struct resource *pr, *r = &dev->resource[idx]; 1184 struct resource *pr, *r = &dev->resource[idx];
1191 1185
1192 pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n", 1186 pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n",
1193 pci_name(dev), idx, 1187 pci_name(dev), idx,
1194 (unsigned long long)r->start, 1188 (unsigned long long)r->start,
1195 (unsigned long long)r->end, 1189 (unsigned long long)r->end,
1196 (unsigned int)r->flags); 1190 (unsigned int)r->flags);
1197 1191
1198 pr = pci_find_parent_resource(dev, r); 1192 pr = pci_find_parent_resource(dev, r);
1199 if (!pr || (pr->flags & IORESOURCE_UNSET) || 1193 if (!pr || (pr->flags & IORESOURCE_UNSET) ||
1200 request_resource(pr, r) < 0) { 1194 request_resource(pr, r) < 0) {
1201 printk(KERN_WARNING "PCI: Cannot allocate resource region %d" 1195 pr_warn("PCI: Cannot allocate resource region %d ", idx);
1202 " of device %s, will remap\n", idx, pci_name(dev)); 1196 pr_cont("of device %s, will remap\n", pci_name(dev));
1203 if (pr) 1197 if (pr)
1204 pr_debug("PCI: parent is %p: %016llx-%016llx [%x]\n", 1198 pr_debug("PCI: parent is %p: %016llx-%016llx [%x]\n",
1205 pr, 1199 pr,
1206 (unsigned long long)pr->start, 1200 (unsigned long long)pr->start,
1207 (unsigned long long)pr->end, 1201 (unsigned long long)pr->end,
1208 (unsigned int)pr->flags); 1202 (unsigned int)pr->flags);
1209 /* We'll assign a new address later */ 1203 /* We'll assign a new address later */
1210 r->flags |= IORESOURCE_UNSET; 1204 r->flags |= IORESOURCE_UNSET;
1211 r->end -= r->start; 1205 r->end -= r->start;
1212 r->start = 0; 1206 r->start = 0;
1213 } 1207 }
1214 } 1208 }
1215 1209
1216 static void __init pcibios_allocate_resources(int pass) 1210 static void __init pcibios_allocate_resources(int pass)
1217 { 1211 {
1218 struct pci_dev *dev = NULL; 1212 struct pci_dev *dev = NULL;
1219 int idx, disabled; 1213 int idx, disabled;
1220 u16 command; 1214 u16 command;
1221 struct resource *r; 1215 struct resource *r;
1222 1216
1223 for_each_pci_dev(dev) { 1217 for_each_pci_dev(dev) {
1224 pci_read_config_word(dev, PCI_COMMAND, &command); 1218 pci_read_config_word(dev, PCI_COMMAND, &command);
1225 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) { 1219 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
1226 r = &dev->resource[idx]; 1220 r = &dev->resource[idx];
1227 if (r->parent) /* Already allocated */ 1221 if (r->parent) /* Already allocated */
1228 continue; 1222 continue;
1229 if (!r->flags || (r->flags & IORESOURCE_UNSET)) 1223 if (!r->flags || (r->flags & IORESOURCE_UNSET))
1230 continue; /* Not assigned at all */ 1224 continue; /* Not assigned at all */
1231 /* We only allocate ROMs on pass 1 just in case they 1225 /* We only allocate ROMs on pass 1 just in case they
1232 * have been screwed up by firmware 1226 * have been screwed up by firmware
1233 */ 1227 */
1234 if (idx == PCI_ROM_RESOURCE) 1228 if (idx == PCI_ROM_RESOURCE)
1235 disabled = 1; 1229 disabled = 1;
1236 if (r->flags & IORESOURCE_IO) 1230 if (r->flags & IORESOURCE_IO)
1237 disabled = !(command & PCI_COMMAND_IO); 1231 disabled = !(command & PCI_COMMAND_IO);
1238 else 1232 else
1239 disabled = !(command & PCI_COMMAND_MEMORY); 1233 disabled = !(command & PCI_COMMAND_MEMORY);
1240 if (pass == disabled) 1234 if (pass == disabled)
1241 alloc_resource(dev, idx); 1235 alloc_resource(dev, idx);
1242 } 1236 }
1243 if (pass) 1237 if (pass)
1244 continue; 1238 continue;
1245 r = &dev->resource[PCI_ROM_RESOURCE]; 1239 r = &dev->resource[PCI_ROM_RESOURCE];
1246 if (r->flags) { 1240 if (r->flags) {
1247 /* Turn the ROM off, leave the resource region, 1241 /* Turn the ROM off, leave the resource region,
1248 * but keep it unregistered. 1242 * but keep it unregistered.
1249 */ 1243 */
1250 u32 reg; 1244 u32 reg;
1251 pci_read_config_dword(dev, dev->rom_base_reg, &reg); 1245 pci_read_config_dword(dev, dev->rom_base_reg, &reg);
1252 if (reg & PCI_ROM_ADDRESS_ENABLE) { 1246 if (reg & PCI_ROM_ADDRESS_ENABLE) {
1253 pr_debug("PCI: Switching off ROM of %s\n", 1247 pr_debug("PCI: Switching off ROM of %s\n",
1254 pci_name(dev)); 1248 pci_name(dev));
1255 r->flags &= ~IORESOURCE_ROM_ENABLE; 1249 r->flags &= ~IORESOURCE_ROM_ENABLE;
1256 pci_write_config_dword(dev, dev->rom_base_reg, 1250 pci_write_config_dword(dev, dev->rom_base_reg,
1257 reg & ~PCI_ROM_ADDRESS_ENABLE); 1251 reg & ~PCI_ROM_ADDRESS_ENABLE);
1258 } 1252 }
1259 } 1253 }
1260 } 1254 }
1261 } 1255 }
1262 1256
1263 static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus) 1257 static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
1264 { 1258 {
1265 struct pci_controller *hose = pci_bus_to_host(bus); 1259 struct pci_controller *hose = pci_bus_to_host(bus);
1266 resource_size_t offset; 1260 resource_size_t offset;
1267 struct resource *res, *pres; 1261 struct resource *res, *pres;
1268 int i; 1262 int i;
1269 1263
1270 pr_debug("Reserving legacy ranges for domain %04x\n", 1264 pr_debug("Reserving legacy ranges for domain %04x\n",
1271 pci_domain_nr(bus)); 1265 pci_domain_nr(bus));
1272 1266
1273 /* Check for IO */ 1267 /* Check for IO */
1274 if (!(hose->io_resource.flags & IORESOURCE_IO)) 1268 if (!(hose->io_resource.flags & IORESOURCE_IO))
1275 goto no_io; 1269 goto no_io;
1276 offset = (unsigned long)hose->io_base_virt - _IO_BASE; 1270 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1277 res = kzalloc(sizeof(struct resource), GFP_KERNEL); 1271 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1278 BUG_ON(res == NULL); 1272 BUG_ON(res == NULL);
1279 res->name = "Legacy IO"; 1273 res->name = "Legacy IO";
1280 res->flags = IORESOURCE_IO; 1274 res->flags = IORESOURCE_IO;
1281 res->start = offset; 1275 res->start = offset;
1282 res->end = (offset + 0xfff) & 0xfffffffful; 1276 res->end = (offset + 0xfff) & 0xfffffffful;
1283 pr_debug("Candidate legacy IO: %pR\n", res); 1277 pr_debug("Candidate legacy IO: %pR\n", res);
1284 if (request_resource(&hose->io_resource, res)) { 1278 if (request_resource(&hose->io_resource, res)) {
1285 printk(KERN_DEBUG 1279 pr_debug("PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
1286 "PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
1287 pci_domain_nr(bus), bus->number, res); 1280 pci_domain_nr(bus), bus->number, res);
1288 kfree(res); 1281 kfree(res);
1289 } 1282 }
1290 1283
1291 no_io: 1284 no_io:
1292 /* Check for memory */ 1285 /* Check for memory */
1293 offset = hose->pci_mem_offset; 1286 offset = hose->pci_mem_offset;
1294 pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset); 1287 pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset);
1295 for (i = 0; i < 3; i++) { 1288 for (i = 0; i < 3; i++) {
1296 pres = &hose->mem_resources[i]; 1289 pres = &hose->mem_resources[i];
1297 if (!(pres->flags & IORESOURCE_MEM)) 1290 if (!(pres->flags & IORESOURCE_MEM))
1298 continue; 1291 continue;
1299 pr_debug("hose mem res: %pR\n", pres); 1292 pr_debug("hose mem res: %pR\n", pres);
1300 if ((pres->start - offset) <= 0xa0000 && 1293 if ((pres->start - offset) <= 0xa0000 &&
1301 (pres->end - offset) >= 0xbffff) 1294 (pres->end - offset) >= 0xbffff)
1302 break; 1295 break;
1303 } 1296 }
1304 if (i >= 3) 1297 if (i >= 3)
1305 return; 1298 return;
1306 res = kzalloc(sizeof(struct resource), GFP_KERNEL); 1299 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1307 BUG_ON(res == NULL); 1300 BUG_ON(res == NULL);
1308 res->name = "Legacy VGA memory"; 1301 res->name = "Legacy VGA memory";
1309 res->flags = IORESOURCE_MEM; 1302 res->flags = IORESOURCE_MEM;
1310 res->start = 0xa0000 + offset; 1303 res->start = 0xa0000 + offset;
1311 res->end = 0xbffff + offset; 1304 res->end = 0xbffff + offset;
1312 pr_debug("Candidate VGA memory: %pR\n", res); 1305 pr_debug("Candidate VGA memory: %pR\n", res);
1313 if (request_resource(pres, res)) { 1306 if (request_resource(pres, res)) {
1314 printk(KERN_DEBUG 1307 pr_debug("PCI %04x:%02x Cannot reserve VGA memory %pR\n",
1315 "PCI %04x:%02x Cannot reserve VGA memory %pR\n",
1316 pci_domain_nr(bus), bus->number, res); 1308 pci_domain_nr(bus), bus->number, res);
1317 kfree(res); 1309 kfree(res);
1318 } 1310 }
1319 } 1311 }
1320 1312
1321 void __init pcibios_resource_survey(void) 1313 void __init pcibios_resource_survey(void)
1322 { 1314 {
1323 struct pci_bus *b; 1315 struct pci_bus *b;
1324 1316
1325 /* Allocate and assign resources. If we re-assign everything, then 1317 /* Allocate and assign resources. If we re-assign everything, then
1326 * we skip the allocate phase 1318 * we skip the allocate phase
1327 */ 1319 */
1328 list_for_each_entry(b, &pci_root_buses, node) 1320 list_for_each_entry(b, &pci_root_buses, node)
1329 pcibios_allocate_bus_resources(b); 1321 pcibios_allocate_bus_resources(b);
1330 1322
1331 pcibios_allocate_resources(0); 1323 pcibios_allocate_resources(0);
1332 pcibios_allocate_resources(1); 1324 pcibios_allocate_resources(1);
1333 1325
1334 /* Before we start assigning unassigned resource, we try to reserve 1326 /* Before we start assigning unassigned resource, we try to reserve
1335 * the low IO area and the VGA memory area if they intersect the 1327 * the low IO area and the VGA memory area if they intersect the
1336 * bus available resources to avoid allocating things on top of them 1328 * bus available resources to avoid allocating things on top of them
1337 */ 1329 */
1338 list_for_each_entry(b, &pci_root_buses, node) 1330 list_for_each_entry(b, &pci_root_buses, node)
1339 pcibios_reserve_legacy_regions(b); 1331 pcibios_reserve_legacy_regions(b);
1340 1332
1341 /* Now proceed to assigning things that were left unassigned */ 1333 /* Now proceed to assigning things that were left unassigned */
1342 pr_debug("PCI: Assigning unassigned resources...\n"); 1334 pr_debug("PCI: Assigning unassigned resources...\n");
1343 pci_assign_unassigned_resources(); 1335 pci_assign_unassigned_resources();
1344 } 1336 }
1345 1337
1346 /* This is used by the PCI hotplug driver to allocate resource 1338 /* This is used by the PCI hotplug driver to allocate resource
1347 * of newly plugged busses. We can try to consolidate with the 1339 * of newly plugged busses. We can try to consolidate with the
1348 * rest of the code later, for now, keep it as-is as our main 1340 * rest of the code later, for now, keep it as-is as our main
1349 * resource allocation function doesn't deal with sub-trees yet. 1341 * resource allocation function doesn't deal with sub-trees yet.
1350 */ 1342 */
1351 void pcibios_claim_one_bus(struct pci_bus *bus) 1343 void pcibios_claim_one_bus(struct pci_bus *bus)
1352 { 1344 {
1353 struct pci_dev *dev; 1345 struct pci_dev *dev;
1354 struct pci_bus *child_bus; 1346 struct pci_bus *child_bus;
1355 1347
1356 list_for_each_entry(dev, &bus->devices, bus_list) { 1348 list_for_each_entry(dev, &bus->devices, bus_list) {
1357 int i; 1349 int i;
1358 1350
1359 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 1351 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1360 struct resource *r = &dev->resource[i]; 1352 struct resource *r = &dev->resource[i];
1361 1353
1362 if (r->parent || !r->start || !r->flags) 1354 if (r->parent || !r->start || !r->flags)
1363 continue; 1355 continue;
1364 1356
1365 pr_debug("PCI: Claiming %s: " 1357 pr_debug("PCI: Claiming %s: ", pci_name(dev));
1366 "Resource %d: %016llx..%016llx [%x]\n", 1358 pr_debug("Resource %d: %016llx..%016llx [%x]\n",
1367 pci_name(dev), i, 1359 i, (unsigned long long)r->start,
1368 (unsigned long long)r->start,
1369 (unsigned long long)r->end, 1360 (unsigned long long)r->end,
1370 (unsigned int)r->flags); 1361 (unsigned int)r->flags);
1371 1362
1372 pci_claim_resource(dev, i); 1363 pci_claim_resource(dev, i);
1373 } 1364 }
1374 } 1365 }
1375 1366
1376 list_for_each_entry(child_bus, &bus->children, node) 1367 list_for_each_entry(child_bus, &bus->children, node)
1377 pcibios_claim_one_bus(child_bus); 1368 pcibios_claim_one_bus(child_bus);
1378 } 1369 }
1379 EXPORT_SYMBOL_GPL(pcibios_claim_one_bus); 1370 EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
1380 1371
1381 1372
1382 /* pcibios_finish_adding_to_bus 1373 /* pcibios_finish_adding_to_bus
1383 * 1374 *
1384 * This is to be called by the hotplug code after devices have been 1375 * This is to be called by the hotplug code after devices have been
1385 * added to a bus, this include calling it for a PHB that is just 1376 * added to a bus, this include calling it for a PHB that is just
1386 * being added 1377 * being added
1387 */ 1378 */
1388 void pcibios_finish_adding_to_bus(struct pci_bus *bus) 1379 void pcibios_finish_adding_to_bus(struct pci_bus *bus)
1389 { 1380 {
1390 pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n", 1381 pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
1391 pci_domain_nr(bus), bus->number); 1382 pci_domain_nr(bus), bus->number);
1392 1383
1393 /* Allocate bus and devices resources */ 1384 /* Allocate bus and devices resources */
1394 pcibios_allocate_bus_resources(bus); 1385 pcibios_allocate_bus_resources(bus);
1395 pcibios_claim_one_bus(bus); 1386 pcibios_claim_one_bus(bus);
1396 1387
1397 /* Add new devices to global lists. Register in proc, sysfs. */ 1388 /* Add new devices to global lists. Register in proc, sysfs. */
1398 pci_bus_add_devices(bus); 1389 pci_bus_add_devices(bus);
1399 1390
1400 /* Fixup EEH */ 1391 /* Fixup EEH */
1401 /* eeh_add_device_tree_late(bus); */ 1392 /* eeh_add_device_tree_late(bus); */
1402 } 1393 }
1403 EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus); 1394 EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
1404 1395
1405 int pcibios_enable_device(struct pci_dev *dev, int mask) 1396 int pcibios_enable_device(struct pci_dev *dev, int mask)
1406 { 1397 {
1407 return pci_enable_resources(dev, mask); 1398 return pci_enable_resources(dev, mask);
1408 } 1399 }
1409 1400
1410 static void pcibios_setup_phb_resources(struct pci_controller *hose, 1401 static void pcibios_setup_phb_resources(struct pci_controller *hose,
1411 struct list_head *resources) 1402 struct list_head *resources)
1412 { 1403 {
1413 unsigned long io_offset; 1404 unsigned long io_offset;
1414 struct resource *res; 1405 struct resource *res;
1415 int i; 1406 int i;
1416 1407
1417 /* Hookup PHB IO resource */ 1408 /* Hookup PHB IO resource */
1418 res = &hose->io_resource; 1409 res = &hose->io_resource;
1419 1410
1420 /* Fixup IO space offset */ 1411 /* Fixup IO space offset */
1421 io_offset = (unsigned long)hose->io_base_virt - isa_io_base; 1412 io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
1422 res->start = (res->start + io_offset) & 0xffffffffu; 1413 res->start = (res->start + io_offset) & 0xffffffffu;
1423 res->end = (res->end + io_offset) & 0xffffffffu; 1414 res->end = (res->end + io_offset) & 0xffffffffu;
1424 1415
1425 if (!res->flags) { 1416 if (!res->flags) {
1426 printk(KERN_WARNING "PCI: I/O resource not set for host" 1417 pr_warn("PCI: I/O resource not set for host ");
1427 " bridge %s (domain %d)\n", 1418 pr_cont("bridge %s (domain %d)\n",
1428 hose->dn->full_name, hose->global_number); 1419 hose->dn->full_name, hose->global_number);
1429 /* Workaround for lack of IO resource only on 32-bit */ 1420 /* Workaround for lack of IO resource only on 32-bit */
1430 res->start = (unsigned long)hose->io_base_virt - isa_io_base; 1421 res->start = (unsigned long)hose->io_base_virt - isa_io_base;
1431 res->end = res->start + IO_SPACE_LIMIT; 1422 res->end = res->start + IO_SPACE_LIMIT;
1432 res->flags = IORESOURCE_IO; 1423 res->flags = IORESOURCE_IO;
1433 } 1424 }
1434 pci_add_resource_offset(resources, res, 1425 pci_add_resource_offset(resources, res,
1435 (__force resource_size_t)(hose->io_base_virt - _IO_BASE)); 1426 (__force resource_size_t)(hose->io_base_virt - _IO_BASE));
1436 1427
1437 pr_debug("PCI: PHB IO resource = %016llx-%016llx [%lx]\n", 1428 pr_debug("PCI: PHB IO resource = %016llx-%016llx [%lx]\n",
1438 (unsigned long long)res->start, 1429 (unsigned long long)res->start,
1439 (unsigned long long)res->end, 1430 (unsigned long long)res->end,
1440 (unsigned long)res->flags); 1431 (unsigned long)res->flags);
1441 1432
1442 /* Hookup PHB Memory resources */ 1433 /* Hookup PHB Memory resources */
1443 for (i = 0; i < 3; ++i) { 1434 for (i = 0; i < 3; ++i) {
1444 res = &hose->mem_resources[i]; 1435 res = &hose->mem_resources[i];
1445 if (!res->flags) { 1436 if (!res->flags) {
1446 if (i > 0) 1437 if (i > 0)
1447 continue; 1438 continue;
1448 printk(KERN_ERR "PCI: Memory resource 0 not set for " 1439 pr_err("PCI: Memory resource 0 not set for ");
1449 "host bridge %s (domain %d)\n", 1440 pr_cont("host bridge %s (domain %d)\n",
1450 hose->dn->full_name, hose->global_number); 1441 hose->dn->full_name, hose->global_number);
1451 1442
1452 /* Workaround for lack of MEM resource only on 32-bit */ 1443 /* Workaround for lack of MEM resource only on 32-bit */
1453 res->start = hose->pci_mem_offset; 1444 res->start = hose->pci_mem_offset;
1454 res->end = (resource_size_t)-1LL; 1445 res->end = (resource_size_t)-1LL;
1455 res->flags = IORESOURCE_MEM; 1446 res->flags = IORESOURCE_MEM;
1456 1447
1457 } 1448 }
1458 pci_add_resource_offset(resources, res, hose->pci_mem_offset); 1449 pci_add_resource_offset(resources, res, hose->pci_mem_offset);
1459 1450
1460 pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n", 1451 pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n",
1461 i, (unsigned long long)res->start, 1452 i, (unsigned long long)res->start,
1462 (unsigned long long)res->end, 1453 (unsigned long long)res->end,
1463 (unsigned long)res->flags); 1454 (unsigned long)res->flags);
1464 } 1455 }
1465 1456
1466 pr_debug("PCI: PHB MEM offset = %016llx\n", 1457 pr_debug("PCI: PHB MEM offset = %016llx\n",
1467 (unsigned long long)hose->pci_mem_offset); 1458 (unsigned long long)hose->pci_mem_offset);
1468 pr_debug("PCI: PHB IO offset = %08lx\n", 1459 pr_debug("PCI: PHB IO offset = %08lx\n",
1469 (unsigned long)hose->io_base_virt - _IO_BASE); 1460 (unsigned long)hose->io_base_virt - _IO_BASE);
1470 } 1461 }
1471 1462
1472 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus) 1463 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
1473 { 1464 {
1474 struct pci_controller *hose = bus->sysdata; 1465 struct pci_controller *hose = bus->sysdata;
1475 1466
1476 return of_node_get(hose->dn); 1467 return of_node_get(hose->dn);
1477 } 1468 }
1478 1469
1479 static void pcibios_scan_phb(struct pci_controller *hose) 1470 static void pcibios_scan_phb(struct pci_controller *hose)
1480 { 1471 {
1481 LIST_HEAD(resources); 1472 LIST_HEAD(resources);
1482 struct pci_bus *bus; 1473 struct pci_bus *bus;
1483 struct device_node *node = hose->dn; 1474 struct device_node *node = hose->dn;
1484 1475
1485 pr_debug("PCI: Scanning PHB %s\n", of_node_full_name(node)); 1476 pr_debug("PCI: Scanning PHB %s\n", of_node_full_name(node));
1486 1477
1487 pcibios_setup_phb_resources(hose, &resources); 1478 pcibios_setup_phb_resources(hose, &resources);
1488 1479
1489 bus = pci_scan_root_bus(hose->parent, hose->first_busno, 1480 bus = pci_scan_root_bus(hose->parent, hose->first_busno,
1490 hose->ops, hose, &resources); 1481 hose->ops, hose, &resources);
1491 if (bus == NULL) { 1482 if (bus == NULL) {
1492 printk(KERN_ERR "Failed to create bus for PCI domain %04x\n", 1483 pr_err("Failed to create bus for PCI domain %04x\n",
1493 hose->global_number); 1484 hose->global_number);
1494 pci_free_resource_list(&resources); 1485 pci_free_resource_list(&resources);
1495 return; 1486 return;
1496 } 1487 }
1497 bus->busn_res.start = hose->first_busno; 1488 bus->busn_res.start = hose->first_busno;
1498 hose->bus = bus; 1489 hose->bus = bus;
1499 1490
1500 hose->last_busno = bus->busn_res.end; 1491 hose->last_busno = bus->busn_res.end;
1501 } 1492 }
1502 1493
1503 static int __init pcibios_init(void) 1494 static int __init pcibios_init(void)
1504 { 1495 {
1505 struct pci_controller *hose, *tmp; 1496 struct pci_controller *hose, *tmp;
1506 int next_busno = 0; 1497 int next_busno = 0;
1507 1498
1508 printk(KERN_INFO "PCI: Probing PCI hardware\n"); 1499 pr_info("PCI: Probing PCI hardware\n");
1509 1500
1510 /* Scan all of the recorded PCI controllers. */ 1501 /* Scan all of the recorded PCI controllers. */
1511 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { 1502 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
1512 hose->last_busno = 0xff; 1503 hose->last_busno = 0xff;
1513 pcibios_scan_phb(hose); 1504 pcibios_scan_phb(hose);
1514 if (next_busno <= hose->last_busno) 1505 if (next_busno <= hose->last_busno)
1515 next_busno = hose->last_busno + 1; 1506 next_busno = hose->last_busno + 1;
1516 } 1507 }
1517 pci_bus_count = next_busno; 1508 pci_bus_count = next_busno;
1518 1509
1519 /* Call common code to handle resource allocation */ 1510 /* Call common code to handle resource allocation */
1520 pcibios_resource_survey(); 1511 pcibios_resource_survey();
1521 1512
1522 return 0; 1513 return 0;
1523 } 1514 }
1524 1515
1525 subsys_initcall(pcibios_init); 1516 subsys_initcall(pcibios_init);
1526 1517
1527 static struct pci_controller *pci_bus_to_hose(int bus) 1518 static struct pci_controller *pci_bus_to_hose(int bus)
1528 { 1519 {
1529 struct pci_controller *hose, *tmp; 1520 struct pci_controller *hose, *tmp;
1530 1521
1531 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) 1522 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
1532 if (bus >= hose->first_busno && bus <= hose->last_busno) 1523 if (bus >= hose->first_busno && bus <= hose->last_busno)
1533 return hose; 1524 return hose;
1534 return NULL; 1525 return NULL;
1535 } 1526 }
1536 1527
1537 /* Provide information on locations of various I/O regions in physical 1528 /* Provide information on locations of various I/O regions in physical
1538 * memory. Do this on a per-card basis so that we choose the right 1529 * memory. Do this on a per-card basis so that we choose the right
1539 * root bridge. 1530 * root bridge.
1540 * Note that the returned IO or memory base is a physical address 1531 * Note that the returned IO or memory base is a physical address
1541 */ 1532 */
1542 1533
1543 long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn) 1534 long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
1544 { 1535 {
1545 struct pci_controller *hose; 1536 struct pci_controller *hose;
1546 long result = -EOPNOTSUPP; 1537 long result = -EOPNOTSUPP;
1547 1538
1548 hose = pci_bus_to_hose(bus); 1539 hose = pci_bus_to_hose(bus);
1549 if (!hose) 1540 if (!hose)
1550 return -ENODEV; 1541 return -ENODEV;
1551 1542
1552 switch (which) { 1543 switch (which) {
1553 case IOBASE_BRIDGE_NUMBER: 1544 case IOBASE_BRIDGE_NUMBER:
1554 return (long)hose->first_busno; 1545 return (long)hose->first_busno;
1555 case IOBASE_MEMORY: 1546 case IOBASE_MEMORY:
1556 return (long)hose->pci_mem_offset; 1547 return (long)hose->pci_mem_offset;
1557 case IOBASE_IO: 1548 case IOBASE_IO:
1558 return (long)hose->io_base_phys; 1549 return (long)hose->io_base_phys;
1559 case IOBASE_ISA_IO: 1550 case IOBASE_ISA_IO:
1560 return (long)isa_io_base; 1551 return (long)isa_io_base;
1561 case IOBASE_ISA_MEM: 1552 case IOBASE_ISA_MEM:
1562 return (long)isa_mem_base; 1553 return (long)isa_mem_base;
1563 } 1554 }
1564 1555
1565 return result; 1556 return result;
1566 } 1557 }
1567 1558
1568 /* 1559 /*
1569 * Null PCI config access functions, for the case when we can't 1560 * Null PCI config access functions, for the case when we can't
1570 * find a hose. 1561 * find a hose.
1571 */ 1562 */
1572 #define NULL_PCI_OP(rw, size, type) \ 1563 #define NULL_PCI_OP(rw, size, type) \
1573 static int \ 1564 static int \
1574 null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \ 1565 null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
1575 { \ 1566 { \
1576 return PCIBIOS_DEVICE_NOT_FOUND; \ 1567 return PCIBIOS_DEVICE_NOT_FOUND; \
1577 } 1568 }
1578 1569
1579 static int 1570 static int
1580 null_read_config(struct pci_bus *bus, unsigned int devfn, int offset, 1571 null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1581 int len, u32 *val) 1572 int len, u32 *val)
1582 { 1573 {
1583 return PCIBIOS_DEVICE_NOT_FOUND; 1574 return PCIBIOS_DEVICE_NOT_FOUND;
1584 } 1575 }
1585 1576
1586 static int 1577 static int
1587 null_write_config(struct pci_bus *bus, unsigned int devfn, int offset, 1578 null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1588 int len, u32 val) 1579 int len, u32 val)
1589 { 1580 {
1590 return PCIBIOS_DEVICE_NOT_FOUND; 1581 return PCIBIOS_DEVICE_NOT_FOUND;
1591 } 1582 }
1592 1583
1593 static struct pci_ops null_pci_ops = { 1584 static struct pci_ops null_pci_ops = {
1594 .read = null_read_config, 1585 .read = null_read_config,
1595 .write = null_write_config, 1586 .write = null_write_config,
1596 }; 1587 };
1597 1588
1598 /* 1589 /*
1599 * These functions are used early on before PCI scanning is done 1590 * These functions are used early on before PCI scanning is done
1600 * and all of the pci_dev and pci_bus structures have been created. 1591 * and all of the pci_dev and pci_bus structures have been created.
1601 */ 1592 */
1602 static struct pci_bus * 1593 static struct pci_bus *
1603 fake_pci_bus(struct pci_controller *hose, int busnr) 1594 fake_pci_bus(struct pci_controller *hose, int busnr)
1604 { 1595 {
1605 static struct pci_bus bus; 1596 static struct pci_bus bus;
1606 1597
1607 if (!hose) 1598 if (!hose)
1608 printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr); 1599 pr_err("Can't find hose for PCI bus %d!\n", busnr);
1609 1600
1610 bus.number = busnr; 1601 bus.number = busnr;
1611 bus.sysdata = hose; 1602 bus.sysdata = hose;
1612 bus.ops = hose ? hose->ops : &null_pci_ops; 1603 bus.ops = hose ? hose->ops : &null_pci_ops;
1613 return &bus; 1604 return &bus;
1614 } 1605 }
1615 1606
1616 #define EARLY_PCI_OP(rw, size, type) \ 1607 #define EARLY_PCI_OP(rw, size, type) \
1617 int early_##rw##_config_##size(struct pci_controller *hose, int bus, \ 1608 int early_##rw##_config_##size(struct pci_controller *hose, int bus, \
1618 int devfn, int offset, type value) \ 1609 int devfn, int offset, type value) \
1619 { \ 1610 { \
1620 return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \ 1611 return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \
1621 devfn, offset, value); \ 1612 devfn, offset, value); \
1622 } 1613 }
1623 1614
1624 EARLY_PCI_OP(read, byte, u8 *) 1615 EARLY_PCI_OP(read, byte, u8 *)
1625 EARLY_PCI_OP(read, word, u16 *) 1616 EARLY_PCI_OP(read, word, u16 *)
1626 EARLY_PCI_OP(read, dword, u32 *) 1617 EARLY_PCI_OP(read, dword, u32 *)
1627 EARLY_PCI_OP(write, byte, u8) 1618 EARLY_PCI_OP(write, byte, u8)
1628 EARLY_PCI_OP(write, word, u16) 1619 EARLY_PCI_OP(write, word, u16)
1629 EARLY_PCI_OP(write, dword, u32) 1620 EARLY_PCI_OP(write, dword, u32)
1630 1621
1631 int early_find_capability(struct pci_controller *hose, int bus, int devfn, 1622 int early_find_capability(struct pci_controller *hose, int bus, int devfn,
1632 int cap) 1623 int cap)
1633 { 1624 {
1634 return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap); 1625 return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
arch/microblaze/pci/xilinx_pci.c
1 /* 1 /*
2 * PCI support for Xilinx plbv46_pci soft-core which can be used on 2 * PCI support for Xilinx plbv46_pci soft-core which can be used on
3 * Xilinx Virtex ML410 / ML510 boards. 3 * Xilinx Virtex ML410 / ML510 boards.
4 * 4 *
5 * Copyright 2009 Roderick Colenbrander 5 * Copyright 2009 Roderick Colenbrander
6 * Copyright 2009 Secret Lab Technologies Ltd. 6 * Copyright 2009 Secret Lab Technologies Ltd.
7 * 7 *
8 * The pci bridge fixup code was copied from ppc4xx_pci.c and was written 8 * The pci bridge fixup code was copied from ppc4xx_pci.c and was written
9 * by Benjamin Herrenschmidt. 9 * by Benjamin Herrenschmidt.
10 * Copyright 2007 Ben. Herrenschmidt <benh@kernel.crashing.org>, IBM Corp. 10 * Copyright 2007 Ben. Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
11 * 11 *
12 * This file is licensed under the terms of the GNU General Public License 12 * This file is licensed under the terms of the GNU General Public License
13 * version 2. This program is licensed "as is" without any warranty of any 13 * version 2. This program is licensed "as is" without any warranty of any
14 * kind, whether express or implied. 14 * kind, whether express or implied.
15 */ 15 */
16 16
17 #include <linux/ioport.h> 17 #include <linux/ioport.h>
18 #include <linux/of.h> 18 #include <linux/of.h>
19 #include <linux/of_address.h> 19 #include <linux/of_address.h>
20 #include <linux/pci.h> 20 #include <linux/pci.h>
21 #include <asm/io.h> 21 #include <linux/io.h>
22 22
23 #define XPLB_PCI_ADDR 0x10c 23 #define XPLB_PCI_ADDR 0x10c
24 #define XPLB_PCI_DATA 0x110 24 #define XPLB_PCI_DATA 0x110
25 #define XPLB_PCI_BUS 0x114 25 #define XPLB_PCI_BUS 0x114
26 26
27 #define PCI_HOST_ENABLE_CMD (PCI_COMMAND_SERR | PCI_COMMAND_PARITY | \ 27 #define PCI_HOST_ENABLE_CMD (PCI_COMMAND_SERR | PCI_COMMAND_PARITY | \
28 PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY) 28 PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY)
29 29
30 static struct of_device_id xilinx_pci_match[] = { 30 static struct of_device_id xilinx_pci_match[] = {
31 { .compatible = "xlnx,plbv46-pci-1.03.a", }, 31 { .compatible = "xlnx,plbv46-pci-1.03.a", },
32 {} 32 {}
33 }; 33 };
34 34
35 /** 35 /**
36 * xilinx_pci_fixup_bridge - Block Xilinx PHB configuration. 36 * xilinx_pci_fixup_bridge - Block Xilinx PHB configuration.
37 */ 37 */
38 static void xilinx_pci_fixup_bridge(struct pci_dev *dev) 38 static void xilinx_pci_fixup_bridge(struct pci_dev *dev)
39 { 39 {
40 struct pci_controller *hose; 40 struct pci_controller *hose;
41 int i; 41 int i;
42 42
43 if (dev->devfn || dev->bus->self) 43 if (dev->devfn || dev->bus->self)
44 return; 44 return;
45 45
46 hose = pci_bus_to_host(dev->bus); 46 hose = pci_bus_to_host(dev->bus);
47 if (!hose) 47 if (!hose)
48 return; 48 return;
49 49
50 if (!of_match_node(xilinx_pci_match, hose->dn)) 50 if (!of_match_node(xilinx_pci_match, hose->dn))
51 return; 51 return;
52 52
53 /* Hide the PCI host BARs from the kernel as their content doesn't 53 /* Hide the PCI host BARs from the kernel as their content doesn't
54 * fit well in the resource management 54 * fit well in the resource management
55 */ 55 */
56 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 56 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
57 dev->resource[i].start = 0; 57 dev->resource[i].start = 0;
58 dev->resource[i].end = 0; 58 dev->resource[i].end = 0;
59 dev->resource[i].flags = 0; 59 dev->resource[i].flags = 0;
60 } 60 }
61 61
62 dev_info(&dev->dev, "Hiding Xilinx plb-pci host bridge resources %s\n", 62 dev_info(&dev->dev, "Hiding Xilinx plb-pci host bridge resources %s\n",
63 pci_name(dev)); 63 pci_name(dev));
64 } 64 }
65 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, xilinx_pci_fixup_bridge); 65 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, xilinx_pci_fixup_bridge);
66 66
67 #ifdef DEBUG 67 #ifdef DEBUG
68 /** 68 /**
69 * xilinx_pci_exclude_device - Don't do config access for non-root bus 69 * xilinx_pci_exclude_device - Don't do config access for non-root bus
70 * 70 *
71 * This is a hack. Config access to any bus other than bus 0 does not 71 * This is a hack. Config access to any bus other than bus 0 does not
72 * currently work on the ML510 so we prevent it here. 72 * currently work on the ML510 so we prevent it here.
73 */ 73 */
74 static int 74 static int
75 xilinx_pci_exclude_device(struct pci_controller *hose, u_char bus, u8 devfn) 75 xilinx_pci_exclude_device(struct pci_controller *hose, u_char bus, u8 devfn)
76 { 76 {
77 return (bus != 0); 77 return (bus != 0);
78 } 78 }
79 79
80 /** 80 /**
81 * xilinx_early_pci_scan - List pci config space for available devices 81 * xilinx_early_pci_scan - List pci config space for available devices
82 * 82 *
83 * List pci devices in very early phase. 83 * List pci devices in very early phase.
84 */ 84 */
85 void __init xilinx_early_pci_scan(struct pci_controller *hose) 85 static void __init xilinx_early_pci_scan(struct pci_controller *hose)
86 { 86 {
87 u32 bus = 0; 87 u32 bus = 0;
88 u32 val, dev, func, offset; 88 u32 val, dev, func, offset;
89 89
90 /* Currently we have only 2 device connected - up-to 32 devices */ 90 /* Currently we have only 2 device connected - up-to 32 devices */
91 for (dev = 0; dev < 2; dev++) { 91 for (dev = 0; dev < 2; dev++) {
92 /* List only first function number - up-to 8 functions */ 92 /* List only first function number - up-to 8 functions */
93 for (func = 0; func < 1; func++) { 93 for (func = 0; func < 1; func++) {
94 printk(KERN_INFO "%02x:%02x:%02x", bus, dev, func); 94 pr_info("%02x:%02x:%02x", bus, dev, func);
95 /* read the first 64 standardized bytes */ 95 /* read the first 64 standardized bytes */
96 /* Up-to 192 bytes can be list of capabilities */ 96 /* Up-to 192 bytes can be list of capabilities */
97 for (offset = 0; offset < 64; offset += 4) { 97 for (offset = 0; offset < 64; offset += 4) {
98 early_read_config_dword(hose, bus, 98 early_read_config_dword(hose, bus,
99 PCI_DEVFN(dev, func), offset, &val); 99 PCI_DEVFN(dev, func), offset, &val);
100 if (offset == 0 && val == 0xFFFFFFFF) { 100 if (offset == 0 && val == 0xFFFFFFFF) {
101 printk(KERN_CONT "\nABSENT"); 101 pr_cont("\nABSENT");
102 break; 102 break;
103 } 103 }
104 if (!(offset % 0x10)) 104 if (!(offset % 0x10))
105 printk(KERN_CONT "\n%04x: ", offset); 105 pr_cont("\n%04x: ", offset);
106 106
107 printk(KERN_CONT "%08x ", val); 107 pr_cont("%08x ", val);
108 } 108 }
109 printk(KERN_INFO "\n"); 109 pr_info("\n");
110 } 110 }
111 } 111 }
112 } 112 }
113 #else 113 #else
114 void __init xilinx_early_pci_scan(struct pci_controller *hose) 114 static void __init xilinx_early_pci_scan(struct pci_controller *hose)
115 { 115 {
116 } 116 }
117 #endif 117 #endif
118 118
119 /** 119 /**
120 * xilinx_pci_init - Find and register a Xilinx PCI host bridge 120 * xilinx_pci_init - Find and register a Xilinx PCI host bridge
121 */ 121 */
122 void __init xilinx_pci_init(void) 122 void __init xilinx_pci_init(void)
123 { 123 {
124 struct pci_controller *hose; 124 struct pci_controller *hose;
125 struct resource r; 125 struct resource r;
126 void __iomem *pci_reg; 126 void __iomem *pci_reg;
127 struct device_node *pci_node; 127 struct device_node *pci_node;
128 128
129 pci_node = of_find_matching_node(NULL, xilinx_pci_match); 129 pci_node = of_find_matching_node(NULL, xilinx_pci_match);
130 if (!pci_node) 130 if (!pci_node)
131 return; 131 return;
132 132
133 if (of_address_to_resource(pci_node, 0, &r)) { 133 if (of_address_to_resource(pci_node, 0, &r)) {
134 pr_err("xilinx-pci: cannot resolve base address\n"); 134 pr_err("xilinx-pci: cannot resolve base address\n");
135 return; 135 return;
136 } 136 }
137 137
138 hose = pcibios_alloc_controller(pci_node); 138 hose = pcibios_alloc_controller(pci_node);
139 if (!hose) { 139 if (!hose) {
140 pr_err("xilinx-pci: pcibios_alloc_controller() failed\n"); 140 pr_err("xilinx-pci: pcibios_alloc_controller() failed\n");
141 return; 141 return;
142 } 142 }
143 143
144 /* Setup config space */ 144 /* Setup config space */
145 setup_indirect_pci(hose, r.start + XPLB_PCI_ADDR, 145 setup_indirect_pci(hose, r.start + XPLB_PCI_ADDR,
146 r.start + XPLB_PCI_DATA, 146 r.start + XPLB_PCI_DATA,
147 INDIRECT_TYPE_SET_CFG_TYPE); 147 INDIRECT_TYPE_SET_CFG_TYPE);
148 148
149 /* According to the xilinx plbv46_pci documentation the soft-core starts 149 /* According to the xilinx plbv46_pci documentation the soft-core starts
150 * a self-init when the bus master enable bit is set. Without this bit 150 * a self-init when the bus master enable bit is set. Without this bit
151 * set the pci bus can't be scanned. 151 * set the pci bus can't be scanned.
152 */ 152 */
153 early_write_config_word(hose, 0, 0, PCI_COMMAND, PCI_HOST_ENABLE_CMD); 153 early_write_config_word(hose, 0, 0, PCI_COMMAND, PCI_HOST_ENABLE_CMD);
154 154
155 /* Set the max latency timer to 255 */ 155 /* Set the max latency timer to 255 */
156 early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0xff); 156 early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0xff);
157 157
158 /* Set the max bus number to 255, and bus/subbus no's to 0 */ 158 /* Set the max bus number to 255, and bus/subbus no's to 0 */
159 pci_reg = of_iomap(pci_node, 0); 159 pci_reg = of_iomap(pci_node, 0);
160 out_be32(pci_reg + XPLB_PCI_BUS, 0x000000ff); 160 out_be32(pci_reg + XPLB_PCI_BUS, 0x000000ff);
161 iounmap(pci_reg); 161 iounmap(pci_reg);
162 162
163 /* Register the host bridge with the linux kernel! */ 163 /* Register the host bridge with the linux kernel! */
164 pci_process_bridge_OF_ranges(hose, pci_node, 164 pci_process_bridge_OF_ranges(hose, pci_node,
165 INDIRECT_TYPE_SET_CFG_TYPE); 165 INDIRECT_TYPE_SET_CFG_TYPE);
166 166
167 pr_info("xilinx-pci: Registered PCI host bridge\n"); 167 pr_info("xilinx-pci: Registered PCI host bridge\n");
168 xilinx_early_pci_scan(hose); 168 xilinx_early_pci_scan(hose);
169 } 169 }
170 170