Commit 4b16f8e2d6d64249f0ed3ca7fe2a319d0dde2719

Authored by Paul Gortmaker
1 parent e9848d62ab

powerpc: various straight conversions from module.h --> export.h

All these files were including module.h just for the basic
EXPORT_SYMBOL infrastructure.  We can shift them off to the
export.h header which is a way smaller footprint and thus
realize some compile time gains.

Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>

Showing 61 changed files with 61 additions and 61 deletions Inline Diff

arch/powerpc/kernel/btext.c
1 /* 1 /*
2 * Procedures for drawing on the screen early on in the boot process. 2 * Procedures for drawing on the screen early on in the boot process.
3 * 3 *
4 * Benjamin Herrenschmidt <benh@kernel.crashing.org> 4 * Benjamin Herrenschmidt <benh@kernel.crashing.org>
5 */ 5 */
6 #include <linux/kernel.h> 6 #include <linux/kernel.h>
7 #include <linux/string.h> 7 #include <linux/string.h>
8 #include <linux/init.h> 8 #include <linux/init.h>
9 #include <linux/module.h> 9 #include <linux/export.h>
10 #include <linux/memblock.h> 10 #include <linux/memblock.h>
11 11
12 #include <asm/sections.h> 12 #include <asm/sections.h>
13 #include <asm/prom.h> 13 #include <asm/prom.h>
14 #include <asm/btext.h> 14 #include <asm/btext.h>
15 #include <asm/page.h> 15 #include <asm/page.h>
16 #include <asm/mmu.h> 16 #include <asm/mmu.h>
17 #include <asm/pgtable.h> 17 #include <asm/pgtable.h>
18 #include <asm/io.h> 18 #include <asm/io.h>
19 #include <asm/processor.h> 19 #include <asm/processor.h>
20 #include <asm/udbg.h> 20 #include <asm/udbg.h>
21 21
22 #define NO_SCROLL 22 #define NO_SCROLL
23 23
24 #ifndef NO_SCROLL 24 #ifndef NO_SCROLL
25 static void scrollscreen(void); 25 static void scrollscreen(void);
26 #endif 26 #endif
27 27
28 static void draw_byte(unsigned char c, long locX, long locY); 28 static void draw_byte(unsigned char c, long locX, long locY);
29 static void draw_byte_32(unsigned char *bits, unsigned int *base, int rb); 29 static void draw_byte_32(unsigned char *bits, unsigned int *base, int rb);
30 static void draw_byte_16(unsigned char *bits, unsigned int *base, int rb); 30 static void draw_byte_16(unsigned char *bits, unsigned int *base, int rb);
31 static void draw_byte_8(unsigned char *bits, unsigned int *base, int rb); 31 static void draw_byte_8(unsigned char *bits, unsigned int *base, int rb);
32 32
33 #define __force_data __attribute__((__section__(".data"))) 33 #define __force_data __attribute__((__section__(".data")))
34 34
35 static int g_loc_X __force_data; 35 static int g_loc_X __force_data;
36 static int g_loc_Y __force_data; 36 static int g_loc_Y __force_data;
37 static int g_max_loc_X __force_data; 37 static int g_max_loc_X __force_data;
38 static int g_max_loc_Y __force_data; 38 static int g_max_loc_Y __force_data;
39 39
40 static int dispDeviceRowBytes __force_data; 40 static int dispDeviceRowBytes __force_data;
41 static int dispDeviceDepth __force_data; 41 static int dispDeviceDepth __force_data;
42 static int dispDeviceRect[4] __force_data; 42 static int dispDeviceRect[4] __force_data;
43 static unsigned char *dispDeviceBase __force_data; 43 static unsigned char *dispDeviceBase __force_data;
44 static unsigned char *logicalDisplayBase __force_data; 44 static unsigned char *logicalDisplayBase __force_data;
45 45
46 unsigned long disp_BAT[2] __initdata = {0, 0}; 46 unsigned long disp_BAT[2] __initdata = {0, 0};
47 47
48 #define cmapsz (16*256) 48 #define cmapsz (16*256)
49 49
50 static unsigned char vga_font[cmapsz]; 50 static unsigned char vga_font[cmapsz];
51 51
52 int boot_text_mapped __force_data = 0; 52 int boot_text_mapped __force_data = 0;
53 int force_printk_to_btext = 0; 53 int force_printk_to_btext = 0;
54 54
55 #ifdef CONFIG_PPC32 55 #ifdef CONFIG_PPC32
56 /* Calc BAT values for mapping the display and store them 56 /* Calc BAT values for mapping the display and store them
57 * in disp_BAT. Those values are then used from head.S to map 57 * in disp_BAT. Those values are then used from head.S to map
58 * the display during identify_machine() and MMU_Init() 58 * the display during identify_machine() and MMU_Init()
59 * 59 *
60 * The display is mapped to virtual address 0xD0000000, rather 60 * The display is mapped to virtual address 0xD0000000, rather
61 * than 1:1, because some some CHRP machines put the frame buffer 61 * than 1:1, because some some CHRP machines put the frame buffer
62 * in the region starting at 0xC0000000 (PAGE_OFFSET). 62 * in the region starting at 0xC0000000 (PAGE_OFFSET).
63 * This mapping is temporary and will disappear as soon as the 63 * This mapping is temporary and will disappear as soon as the
64 * setup done by MMU_Init() is applied. 64 * setup done by MMU_Init() is applied.
65 * 65 *
66 * For now, we align the BAT and then map 8Mb on 601 and 16Mb 66 * For now, we align the BAT and then map 8Mb on 601 and 16Mb
67 * on other PPCs. This may cause trouble if the framebuffer 67 * on other PPCs. This may cause trouble if the framebuffer
68 * is really badly aligned, but I didn't encounter this case 68 * is really badly aligned, but I didn't encounter this case
69 * yet. 69 * yet.
70 */ 70 */
71 void __init btext_prepare_BAT(void) 71 void __init btext_prepare_BAT(void)
72 { 72 {
73 unsigned long vaddr = PAGE_OFFSET + 0x10000000; 73 unsigned long vaddr = PAGE_OFFSET + 0x10000000;
74 unsigned long addr; 74 unsigned long addr;
75 unsigned long lowbits; 75 unsigned long lowbits;
76 76
77 addr = (unsigned long)dispDeviceBase; 77 addr = (unsigned long)dispDeviceBase;
78 if (!addr) { 78 if (!addr) {
79 boot_text_mapped = 0; 79 boot_text_mapped = 0;
80 return; 80 return;
81 } 81 }
82 if (PVR_VER(mfspr(SPRN_PVR)) != 1) { 82 if (PVR_VER(mfspr(SPRN_PVR)) != 1) {
83 /* 603, 604, G3, G4, ... */ 83 /* 603, 604, G3, G4, ... */
84 lowbits = addr & ~0xFF000000UL; 84 lowbits = addr & ~0xFF000000UL;
85 addr &= 0xFF000000UL; 85 addr &= 0xFF000000UL;
86 disp_BAT[0] = vaddr | (BL_16M<<2) | 2; 86 disp_BAT[0] = vaddr | (BL_16M<<2) | 2;
87 disp_BAT[1] = addr | (_PAGE_NO_CACHE | _PAGE_GUARDED | BPP_RW); 87 disp_BAT[1] = addr | (_PAGE_NO_CACHE | _PAGE_GUARDED | BPP_RW);
88 } else { 88 } else {
89 /* 601 */ 89 /* 601 */
90 lowbits = addr & ~0xFF800000UL; 90 lowbits = addr & ~0xFF800000UL;
91 addr &= 0xFF800000UL; 91 addr &= 0xFF800000UL;
92 disp_BAT[0] = vaddr | (_PAGE_NO_CACHE | PP_RWXX) | 4; 92 disp_BAT[0] = vaddr | (_PAGE_NO_CACHE | PP_RWXX) | 4;
93 disp_BAT[1] = addr | BL_8M | 0x40; 93 disp_BAT[1] = addr | BL_8M | 0x40;
94 } 94 }
95 logicalDisplayBase = (void *) (vaddr + lowbits); 95 logicalDisplayBase = (void *) (vaddr + lowbits);
96 } 96 }
97 #endif 97 #endif
98 98
99 99
100 /* This function can be used to enable the early boot text when doing 100 /* This function can be used to enable the early boot text when doing
101 * OF booting or within bootx init. It must be followed by a btext_unmap() 101 * OF booting or within bootx init. It must be followed by a btext_unmap()
102 * call before the logical address becomes unusable 102 * call before the logical address becomes unusable
103 */ 103 */
104 void __init btext_setup_display(int width, int height, int depth, int pitch, 104 void __init btext_setup_display(int width, int height, int depth, int pitch,
105 unsigned long address) 105 unsigned long address)
106 { 106 {
107 g_loc_X = 0; 107 g_loc_X = 0;
108 g_loc_Y = 0; 108 g_loc_Y = 0;
109 g_max_loc_X = width / 8; 109 g_max_loc_X = width / 8;
110 g_max_loc_Y = height / 16; 110 g_max_loc_Y = height / 16;
111 logicalDisplayBase = (unsigned char *)address; 111 logicalDisplayBase = (unsigned char *)address;
112 dispDeviceBase = (unsigned char *)address; 112 dispDeviceBase = (unsigned char *)address;
113 dispDeviceRowBytes = pitch; 113 dispDeviceRowBytes = pitch;
114 dispDeviceDepth = depth == 15 ? 16 : depth; 114 dispDeviceDepth = depth == 15 ? 16 : depth;
115 dispDeviceRect[0] = dispDeviceRect[1] = 0; 115 dispDeviceRect[0] = dispDeviceRect[1] = 0;
116 dispDeviceRect[2] = width; 116 dispDeviceRect[2] = width;
117 dispDeviceRect[3] = height; 117 dispDeviceRect[3] = height;
118 boot_text_mapped = 1; 118 boot_text_mapped = 1;
119 } 119 }
120 120
121 void __init btext_unmap(void) 121 void __init btext_unmap(void)
122 { 122 {
123 boot_text_mapped = 0; 123 boot_text_mapped = 0;
124 } 124 }
125 125
126 /* Here's a small text engine to use during early boot 126 /* Here's a small text engine to use during early boot
127 * or for debugging purposes 127 * or for debugging purposes
128 * 128 *
129 * todo: 129 * todo:
130 * 130 *
131 * - build some kind of vgacon with it to enable early printk 131 * - build some kind of vgacon with it to enable early printk
132 * - move to a separate file 132 * - move to a separate file
133 * - add a few video driver hooks to keep in sync with display 133 * - add a few video driver hooks to keep in sync with display
134 * changes. 134 * changes.
135 */ 135 */
136 136
137 static void map_boot_text(void) 137 static void map_boot_text(void)
138 { 138 {
139 unsigned long base, offset, size; 139 unsigned long base, offset, size;
140 unsigned char *vbase; 140 unsigned char *vbase;
141 141
142 /* By default, we are no longer mapped */ 142 /* By default, we are no longer mapped */
143 boot_text_mapped = 0; 143 boot_text_mapped = 0;
144 if (dispDeviceBase == 0) 144 if (dispDeviceBase == 0)
145 return; 145 return;
146 base = ((unsigned long) dispDeviceBase) & 0xFFFFF000UL; 146 base = ((unsigned long) dispDeviceBase) & 0xFFFFF000UL;
147 offset = ((unsigned long) dispDeviceBase) - base; 147 offset = ((unsigned long) dispDeviceBase) - base;
148 size = dispDeviceRowBytes * dispDeviceRect[3] + offset 148 size = dispDeviceRowBytes * dispDeviceRect[3] + offset
149 + dispDeviceRect[0]; 149 + dispDeviceRect[0];
150 vbase = __ioremap(base, size, _PAGE_NO_CACHE); 150 vbase = __ioremap(base, size, _PAGE_NO_CACHE);
151 if (vbase == 0) 151 if (vbase == 0)
152 return; 152 return;
153 logicalDisplayBase = vbase + offset; 153 logicalDisplayBase = vbase + offset;
154 boot_text_mapped = 1; 154 boot_text_mapped = 1;
155 } 155 }
156 156
157 int btext_initialize(struct device_node *np) 157 int btext_initialize(struct device_node *np)
158 { 158 {
159 unsigned int width, height, depth, pitch; 159 unsigned int width, height, depth, pitch;
160 unsigned long address = 0; 160 unsigned long address = 0;
161 const u32 *prop; 161 const u32 *prop;
162 162
163 prop = of_get_property(np, "linux,bootx-width", NULL); 163 prop = of_get_property(np, "linux,bootx-width", NULL);
164 if (prop == NULL) 164 if (prop == NULL)
165 prop = of_get_property(np, "width", NULL); 165 prop = of_get_property(np, "width", NULL);
166 if (prop == NULL) 166 if (prop == NULL)
167 return -EINVAL; 167 return -EINVAL;
168 width = *prop; 168 width = *prop;
169 prop = of_get_property(np, "linux,bootx-height", NULL); 169 prop = of_get_property(np, "linux,bootx-height", NULL);
170 if (prop == NULL) 170 if (prop == NULL)
171 prop = of_get_property(np, "height", NULL); 171 prop = of_get_property(np, "height", NULL);
172 if (prop == NULL) 172 if (prop == NULL)
173 return -EINVAL; 173 return -EINVAL;
174 height = *prop; 174 height = *prop;
175 prop = of_get_property(np, "linux,bootx-depth", NULL); 175 prop = of_get_property(np, "linux,bootx-depth", NULL);
176 if (prop == NULL) 176 if (prop == NULL)
177 prop = of_get_property(np, "depth", NULL); 177 prop = of_get_property(np, "depth", NULL);
178 if (prop == NULL) 178 if (prop == NULL)
179 return -EINVAL; 179 return -EINVAL;
180 depth = *prop; 180 depth = *prop;
181 pitch = width * ((depth + 7) / 8); 181 pitch = width * ((depth + 7) / 8);
182 prop = of_get_property(np, "linux,bootx-linebytes", NULL); 182 prop = of_get_property(np, "linux,bootx-linebytes", NULL);
183 if (prop == NULL) 183 if (prop == NULL)
184 prop = of_get_property(np, "linebytes", NULL); 184 prop = of_get_property(np, "linebytes", NULL);
185 if (prop && *prop != 0xffffffffu) 185 if (prop && *prop != 0xffffffffu)
186 pitch = *prop; 186 pitch = *prop;
187 if (pitch == 1) 187 if (pitch == 1)
188 pitch = 0x1000; 188 pitch = 0x1000;
189 prop = of_get_property(np, "linux,bootx-addr", NULL); 189 prop = of_get_property(np, "linux,bootx-addr", NULL);
190 if (prop == NULL) 190 if (prop == NULL)
191 prop = of_get_property(np, "address", NULL); 191 prop = of_get_property(np, "address", NULL);
192 if (prop) 192 if (prop)
193 address = *prop; 193 address = *prop;
194 194
195 /* FIXME: Add support for PCI reg properties. Right now, only 195 /* FIXME: Add support for PCI reg properties. Right now, only
196 * reliable on macs 196 * reliable on macs
197 */ 197 */
198 if (address == 0) 198 if (address == 0)
199 return -EINVAL; 199 return -EINVAL;
200 200
201 g_loc_X = 0; 201 g_loc_X = 0;
202 g_loc_Y = 0; 202 g_loc_Y = 0;
203 g_max_loc_X = width / 8; 203 g_max_loc_X = width / 8;
204 g_max_loc_Y = height / 16; 204 g_max_loc_Y = height / 16;
205 dispDeviceBase = (unsigned char *)address; 205 dispDeviceBase = (unsigned char *)address;
206 dispDeviceRowBytes = pitch; 206 dispDeviceRowBytes = pitch;
207 dispDeviceDepth = depth == 15 ? 16 : depth; 207 dispDeviceDepth = depth == 15 ? 16 : depth;
208 dispDeviceRect[0] = dispDeviceRect[1] = 0; 208 dispDeviceRect[0] = dispDeviceRect[1] = 0;
209 dispDeviceRect[2] = width; 209 dispDeviceRect[2] = width;
210 dispDeviceRect[3] = height; 210 dispDeviceRect[3] = height;
211 211
212 map_boot_text(); 212 map_boot_text();
213 213
214 return 0; 214 return 0;
215 } 215 }
216 216
217 int __init btext_find_display(int allow_nonstdout) 217 int __init btext_find_display(int allow_nonstdout)
218 { 218 {
219 const char *name; 219 const char *name;
220 struct device_node *np = NULL; 220 struct device_node *np = NULL;
221 int rc = -ENODEV; 221 int rc = -ENODEV;
222 222
223 name = of_get_property(of_chosen, "linux,stdout-path", NULL); 223 name = of_get_property(of_chosen, "linux,stdout-path", NULL);
224 if (name != NULL) { 224 if (name != NULL) {
225 np = of_find_node_by_path(name); 225 np = of_find_node_by_path(name);
226 if (np != NULL) { 226 if (np != NULL) {
227 if (strcmp(np->type, "display") != 0) { 227 if (strcmp(np->type, "display") != 0) {
228 printk("boot stdout isn't a display !\n"); 228 printk("boot stdout isn't a display !\n");
229 of_node_put(np); 229 of_node_put(np);
230 np = NULL; 230 np = NULL;
231 } 231 }
232 } 232 }
233 } 233 }
234 if (np) 234 if (np)
235 rc = btext_initialize(np); 235 rc = btext_initialize(np);
236 if (rc == 0 || !allow_nonstdout) 236 if (rc == 0 || !allow_nonstdout)
237 return rc; 237 return rc;
238 238
239 for_each_node_by_type(np, "display") { 239 for_each_node_by_type(np, "display") {
240 if (of_get_property(np, "linux,opened", NULL)) { 240 if (of_get_property(np, "linux,opened", NULL)) {
241 printk("trying %s ...\n", np->full_name); 241 printk("trying %s ...\n", np->full_name);
242 rc = btext_initialize(np); 242 rc = btext_initialize(np);
243 printk("result: %d\n", rc); 243 printk("result: %d\n", rc);
244 } 244 }
245 if (rc == 0) 245 if (rc == 0)
246 break; 246 break;
247 } 247 }
248 return rc; 248 return rc;
249 } 249 }
250 250
251 /* Calc the base address of a given point (x,y) */ 251 /* Calc the base address of a given point (x,y) */
252 static unsigned char * calc_base(int x, int y) 252 static unsigned char * calc_base(int x, int y)
253 { 253 {
254 unsigned char *base; 254 unsigned char *base;
255 255
256 base = logicalDisplayBase; 256 base = logicalDisplayBase;
257 if (base == 0) 257 if (base == 0)
258 base = dispDeviceBase; 258 base = dispDeviceBase;
259 base += (x + dispDeviceRect[0]) * (dispDeviceDepth >> 3); 259 base += (x + dispDeviceRect[0]) * (dispDeviceDepth >> 3);
260 base += (y + dispDeviceRect[1]) * dispDeviceRowBytes; 260 base += (y + dispDeviceRect[1]) * dispDeviceRowBytes;
261 return base; 261 return base;
262 } 262 }
263 263
264 /* Adjust the display to a new resolution */ 264 /* Adjust the display to a new resolution */
265 void btext_update_display(unsigned long phys, int width, int height, 265 void btext_update_display(unsigned long phys, int width, int height,
266 int depth, int pitch) 266 int depth, int pitch)
267 { 267 {
268 if (dispDeviceBase == 0) 268 if (dispDeviceBase == 0)
269 return; 269 return;
270 270
271 /* check it's the same frame buffer (within 256MB) */ 271 /* check it's the same frame buffer (within 256MB) */
272 if ((phys ^ (unsigned long)dispDeviceBase) & 0xf0000000) 272 if ((phys ^ (unsigned long)dispDeviceBase) & 0xf0000000)
273 return; 273 return;
274 274
275 dispDeviceBase = (__u8 *) phys; 275 dispDeviceBase = (__u8 *) phys;
276 dispDeviceRect[0] = 0; 276 dispDeviceRect[0] = 0;
277 dispDeviceRect[1] = 0; 277 dispDeviceRect[1] = 0;
278 dispDeviceRect[2] = width; 278 dispDeviceRect[2] = width;
279 dispDeviceRect[3] = height; 279 dispDeviceRect[3] = height;
280 dispDeviceDepth = depth; 280 dispDeviceDepth = depth;
281 dispDeviceRowBytes = pitch; 281 dispDeviceRowBytes = pitch;
282 if (boot_text_mapped) { 282 if (boot_text_mapped) {
283 iounmap(logicalDisplayBase); 283 iounmap(logicalDisplayBase);
284 boot_text_mapped = 0; 284 boot_text_mapped = 0;
285 } 285 }
286 map_boot_text(); 286 map_boot_text();
287 g_loc_X = 0; 287 g_loc_X = 0;
288 g_loc_Y = 0; 288 g_loc_Y = 0;
289 g_max_loc_X = width / 8; 289 g_max_loc_X = width / 8;
290 g_max_loc_Y = height / 16; 290 g_max_loc_Y = height / 16;
291 } 291 }
292 EXPORT_SYMBOL(btext_update_display); 292 EXPORT_SYMBOL(btext_update_display);
293 293
294 void btext_clearscreen(void) 294 void btext_clearscreen(void)
295 { 295 {
296 unsigned int *base = (unsigned int *)calc_base(0, 0); 296 unsigned int *base = (unsigned int *)calc_base(0, 0);
297 unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) * 297 unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
298 (dispDeviceDepth >> 3)) >> 2; 298 (dispDeviceDepth >> 3)) >> 2;
299 int i,j; 299 int i,j;
300 300
301 for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1]); i++) 301 for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1]); i++)
302 { 302 {
303 unsigned int *ptr = base; 303 unsigned int *ptr = base;
304 for(j=width; j; --j) 304 for(j=width; j; --j)
305 *(ptr++) = 0; 305 *(ptr++) = 0;
306 base += (dispDeviceRowBytes >> 2); 306 base += (dispDeviceRowBytes >> 2);
307 } 307 }
308 } 308 }
309 309
310 void btext_flushscreen(void) 310 void btext_flushscreen(void)
311 { 311 {
312 unsigned int *base = (unsigned int *)calc_base(0, 0); 312 unsigned int *base = (unsigned int *)calc_base(0, 0);
313 unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) * 313 unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
314 (dispDeviceDepth >> 3)) >> 2; 314 (dispDeviceDepth >> 3)) >> 2;
315 int i,j; 315 int i,j;
316 316
317 for (i=0; i < (dispDeviceRect[3] - dispDeviceRect[1]); i++) 317 for (i=0; i < (dispDeviceRect[3] - dispDeviceRect[1]); i++)
318 { 318 {
319 unsigned int *ptr = base; 319 unsigned int *ptr = base;
320 for(j = width; j > 0; j -= 8) { 320 for(j = width; j > 0; j -= 8) {
321 __asm__ __volatile__ ("dcbst 0,%0" :: "r" (ptr)); 321 __asm__ __volatile__ ("dcbst 0,%0" :: "r" (ptr));
322 ptr += 8; 322 ptr += 8;
323 } 323 }
324 base += (dispDeviceRowBytes >> 2); 324 base += (dispDeviceRowBytes >> 2);
325 } 325 }
326 __asm__ __volatile__ ("sync" ::: "memory"); 326 __asm__ __volatile__ ("sync" ::: "memory");
327 } 327 }
328 328
329 void btext_flushline(void) 329 void btext_flushline(void)
330 { 330 {
331 unsigned int *base = (unsigned int *)calc_base(0, g_loc_Y << 4); 331 unsigned int *base = (unsigned int *)calc_base(0, g_loc_Y << 4);
332 unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) * 332 unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
333 (dispDeviceDepth >> 3)) >> 2; 333 (dispDeviceDepth >> 3)) >> 2;
334 int i,j; 334 int i,j;
335 335
336 for (i=0; i < 16; i++) 336 for (i=0; i < 16; i++)
337 { 337 {
338 unsigned int *ptr = base; 338 unsigned int *ptr = base;
339 for(j = width; j > 0; j -= 8) { 339 for(j = width; j > 0; j -= 8) {
340 __asm__ __volatile__ ("dcbst 0,%0" :: "r" (ptr)); 340 __asm__ __volatile__ ("dcbst 0,%0" :: "r" (ptr));
341 ptr += 8; 341 ptr += 8;
342 } 342 }
343 base += (dispDeviceRowBytes >> 2); 343 base += (dispDeviceRowBytes >> 2);
344 } 344 }
345 __asm__ __volatile__ ("sync" ::: "memory"); 345 __asm__ __volatile__ ("sync" ::: "memory");
346 } 346 }
347 347
348 348
349 #ifndef NO_SCROLL 349 #ifndef NO_SCROLL
350 static void scrollscreen(void) 350 static void scrollscreen(void)
351 { 351 {
352 unsigned int *src = (unsigned int *)calc_base(0,16); 352 unsigned int *src = (unsigned int *)calc_base(0,16);
353 unsigned int *dst = (unsigned int *)calc_base(0,0); 353 unsigned int *dst = (unsigned int *)calc_base(0,0);
354 unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) * 354 unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
355 (dispDeviceDepth >> 3)) >> 2; 355 (dispDeviceDepth >> 3)) >> 2;
356 int i,j; 356 int i,j;
357 357
358 for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1] - 16); i++) 358 for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1] - 16); i++)
359 { 359 {
360 unsigned int *src_ptr = src; 360 unsigned int *src_ptr = src;
361 unsigned int *dst_ptr = dst; 361 unsigned int *dst_ptr = dst;
362 for(j=width; j; --j) 362 for(j=width; j; --j)
363 *(dst_ptr++) = *(src_ptr++); 363 *(dst_ptr++) = *(src_ptr++);
364 src += (dispDeviceRowBytes >> 2); 364 src += (dispDeviceRowBytes >> 2);
365 dst += (dispDeviceRowBytes >> 2); 365 dst += (dispDeviceRowBytes >> 2);
366 } 366 }
367 for (i=0; i<16; i++) 367 for (i=0; i<16; i++)
368 { 368 {
369 unsigned int *dst_ptr = dst; 369 unsigned int *dst_ptr = dst;
370 for(j=width; j; --j) 370 for(j=width; j; --j)
371 *(dst_ptr++) = 0; 371 *(dst_ptr++) = 0;
372 dst += (dispDeviceRowBytes >> 2); 372 dst += (dispDeviceRowBytes >> 2);
373 } 373 }
374 } 374 }
375 #endif /* ndef NO_SCROLL */ 375 #endif /* ndef NO_SCROLL */
376 376
377 void btext_drawchar(char c) 377 void btext_drawchar(char c)
378 { 378 {
379 int cline = 0; 379 int cline = 0;
380 #ifdef NO_SCROLL 380 #ifdef NO_SCROLL
381 int x; 381 int x;
382 #endif 382 #endif
383 if (!boot_text_mapped) 383 if (!boot_text_mapped)
384 return; 384 return;
385 385
386 switch (c) { 386 switch (c) {
387 case '\b': 387 case '\b':
388 if (g_loc_X > 0) 388 if (g_loc_X > 0)
389 --g_loc_X; 389 --g_loc_X;
390 break; 390 break;
391 case '\t': 391 case '\t':
392 g_loc_X = (g_loc_X & -8) + 8; 392 g_loc_X = (g_loc_X & -8) + 8;
393 break; 393 break;
394 case '\r': 394 case '\r':
395 g_loc_X = 0; 395 g_loc_X = 0;
396 break; 396 break;
397 case '\n': 397 case '\n':
398 g_loc_X = 0; 398 g_loc_X = 0;
399 g_loc_Y++; 399 g_loc_Y++;
400 cline = 1; 400 cline = 1;
401 break; 401 break;
402 default: 402 default:
403 draw_byte(c, g_loc_X++, g_loc_Y); 403 draw_byte(c, g_loc_X++, g_loc_Y);
404 } 404 }
405 if (g_loc_X >= g_max_loc_X) { 405 if (g_loc_X >= g_max_loc_X) {
406 g_loc_X = 0; 406 g_loc_X = 0;
407 g_loc_Y++; 407 g_loc_Y++;
408 cline = 1; 408 cline = 1;
409 } 409 }
410 #ifndef NO_SCROLL 410 #ifndef NO_SCROLL
411 while (g_loc_Y >= g_max_loc_Y) { 411 while (g_loc_Y >= g_max_loc_Y) {
412 scrollscreen(); 412 scrollscreen();
413 g_loc_Y--; 413 g_loc_Y--;
414 } 414 }
415 #else 415 #else
416 /* wrap around from bottom to top of screen so we don't 416 /* wrap around from bottom to top of screen so we don't
417 waste time scrolling each line. -- paulus. */ 417 waste time scrolling each line. -- paulus. */
418 if (g_loc_Y >= g_max_loc_Y) 418 if (g_loc_Y >= g_max_loc_Y)
419 g_loc_Y = 0; 419 g_loc_Y = 0;
420 if (cline) { 420 if (cline) {
421 for (x = 0; x < g_max_loc_X; ++x) 421 for (x = 0; x < g_max_loc_X; ++x)
422 draw_byte(' ', x, g_loc_Y); 422 draw_byte(' ', x, g_loc_Y);
423 } 423 }
424 #endif 424 #endif
425 } 425 }
426 426
427 void btext_drawstring(const char *c) 427 void btext_drawstring(const char *c)
428 { 428 {
429 if (!boot_text_mapped) 429 if (!boot_text_mapped)
430 return; 430 return;
431 while (*c) 431 while (*c)
432 btext_drawchar(*c++); 432 btext_drawchar(*c++);
433 } 433 }
434 434
435 void btext_drawtext(const char *c, unsigned int len) 435 void btext_drawtext(const char *c, unsigned int len)
436 { 436 {
437 if (!boot_text_mapped) 437 if (!boot_text_mapped)
438 return; 438 return;
439 while (len--) 439 while (len--)
440 btext_drawchar(*c++); 440 btext_drawchar(*c++);
441 } 441 }
442 442
443 void btext_drawhex(unsigned long v) 443 void btext_drawhex(unsigned long v)
444 { 444 {
445 if (!boot_text_mapped) 445 if (!boot_text_mapped)
446 return; 446 return;
447 #ifdef CONFIG_PPC64 447 #ifdef CONFIG_PPC64
448 btext_drawchar(hex_asc_hi(v >> 56)); 448 btext_drawchar(hex_asc_hi(v >> 56));
449 btext_drawchar(hex_asc_lo(v >> 56)); 449 btext_drawchar(hex_asc_lo(v >> 56));
450 btext_drawchar(hex_asc_hi(v >> 48)); 450 btext_drawchar(hex_asc_hi(v >> 48));
451 btext_drawchar(hex_asc_lo(v >> 48)); 451 btext_drawchar(hex_asc_lo(v >> 48));
452 btext_drawchar(hex_asc_hi(v >> 40)); 452 btext_drawchar(hex_asc_hi(v >> 40));
453 btext_drawchar(hex_asc_lo(v >> 40)); 453 btext_drawchar(hex_asc_lo(v >> 40));
454 btext_drawchar(hex_asc_hi(v >> 32)); 454 btext_drawchar(hex_asc_hi(v >> 32));
455 btext_drawchar(hex_asc_lo(v >> 32)); 455 btext_drawchar(hex_asc_lo(v >> 32));
456 #endif 456 #endif
457 btext_drawchar(hex_asc_hi(v >> 24)); 457 btext_drawchar(hex_asc_hi(v >> 24));
458 btext_drawchar(hex_asc_lo(v >> 24)); 458 btext_drawchar(hex_asc_lo(v >> 24));
459 btext_drawchar(hex_asc_hi(v >> 16)); 459 btext_drawchar(hex_asc_hi(v >> 16));
460 btext_drawchar(hex_asc_lo(v >> 16)); 460 btext_drawchar(hex_asc_lo(v >> 16));
461 btext_drawchar(hex_asc_hi(v >> 8)); 461 btext_drawchar(hex_asc_hi(v >> 8));
462 btext_drawchar(hex_asc_lo(v >> 8)); 462 btext_drawchar(hex_asc_lo(v >> 8));
463 btext_drawchar(hex_asc_hi(v)); 463 btext_drawchar(hex_asc_hi(v));
464 btext_drawchar(hex_asc_lo(v)); 464 btext_drawchar(hex_asc_lo(v));
465 btext_drawchar(' '); 465 btext_drawchar(' ');
466 } 466 }
467 467
468 static void draw_byte(unsigned char c, long locX, long locY) 468 static void draw_byte(unsigned char c, long locX, long locY)
469 { 469 {
470 unsigned char *base = calc_base(locX << 3, locY << 4); 470 unsigned char *base = calc_base(locX << 3, locY << 4);
471 unsigned char *font = &vga_font[((unsigned int)c) * 16]; 471 unsigned char *font = &vga_font[((unsigned int)c) * 16];
472 int rb = dispDeviceRowBytes; 472 int rb = dispDeviceRowBytes;
473 473
474 switch(dispDeviceDepth) { 474 switch(dispDeviceDepth) {
475 case 24: 475 case 24:
476 case 32: 476 case 32:
477 draw_byte_32(font, (unsigned int *)base, rb); 477 draw_byte_32(font, (unsigned int *)base, rb);
478 break; 478 break;
479 case 15: 479 case 15:
480 case 16: 480 case 16:
481 draw_byte_16(font, (unsigned int *)base, rb); 481 draw_byte_16(font, (unsigned int *)base, rb);
482 break; 482 break;
483 case 8: 483 case 8:
484 draw_byte_8(font, (unsigned int *)base, rb); 484 draw_byte_8(font, (unsigned int *)base, rb);
485 break; 485 break;
486 } 486 }
487 } 487 }
488 488
489 static unsigned int expand_bits_8[16] = { 489 static unsigned int expand_bits_8[16] = {
490 0x00000000, 490 0x00000000,
491 0x000000ff, 491 0x000000ff,
492 0x0000ff00, 492 0x0000ff00,
493 0x0000ffff, 493 0x0000ffff,
494 0x00ff0000, 494 0x00ff0000,
495 0x00ff00ff, 495 0x00ff00ff,
496 0x00ffff00, 496 0x00ffff00,
497 0x00ffffff, 497 0x00ffffff,
498 0xff000000, 498 0xff000000,
499 0xff0000ff, 499 0xff0000ff,
500 0xff00ff00, 500 0xff00ff00,
501 0xff00ffff, 501 0xff00ffff,
502 0xffff0000, 502 0xffff0000,
503 0xffff00ff, 503 0xffff00ff,
504 0xffffff00, 504 0xffffff00,
505 0xffffffff 505 0xffffffff
506 }; 506 };
507 507
508 static unsigned int expand_bits_16[4] = { 508 static unsigned int expand_bits_16[4] = {
509 0x00000000, 509 0x00000000,
510 0x0000ffff, 510 0x0000ffff,
511 0xffff0000, 511 0xffff0000,
512 0xffffffff 512 0xffffffff
513 }; 513 };
514 514
515 515
516 static void draw_byte_32(unsigned char *font, unsigned int *base, int rb) 516 static void draw_byte_32(unsigned char *font, unsigned int *base, int rb)
517 { 517 {
518 int l, bits; 518 int l, bits;
519 int fg = 0xFFFFFFFFUL; 519 int fg = 0xFFFFFFFFUL;
520 int bg = 0x00000000UL; 520 int bg = 0x00000000UL;
521 521
522 for (l = 0; l < 16; ++l) 522 for (l = 0; l < 16; ++l)
523 { 523 {
524 bits = *font++; 524 bits = *font++;
525 base[0] = (-(bits >> 7) & fg) ^ bg; 525 base[0] = (-(bits >> 7) & fg) ^ bg;
526 base[1] = (-((bits >> 6) & 1) & fg) ^ bg; 526 base[1] = (-((bits >> 6) & 1) & fg) ^ bg;
527 base[2] = (-((bits >> 5) & 1) & fg) ^ bg; 527 base[2] = (-((bits >> 5) & 1) & fg) ^ bg;
528 base[3] = (-((bits >> 4) & 1) & fg) ^ bg; 528 base[3] = (-((bits >> 4) & 1) & fg) ^ bg;
529 base[4] = (-((bits >> 3) & 1) & fg) ^ bg; 529 base[4] = (-((bits >> 3) & 1) & fg) ^ bg;
530 base[5] = (-((bits >> 2) & 1) & fg) ^ bg; 530 base[5] = (-((bits >> 2) & 1) & fg) ^ bg;
531 base[6] = (-((bits >> 1) & 1) & fg) ^ bg; 531 base[6] = (-((bits >> 1) & 1) & fg) ^ bg;
532 base[7] = (-(bits & 1) & fg) ^ bg; 532 base[7] = (-(bits & 1) & fg) ^ bg;
533 base = (unsigned int *) ((char *)base + rb); 533 base = (unsigned int *) ((char *)base + rb);
534 } 534 }
535 } 535 }
536 536
537 static void draw_byte_16(unsigned char *font, unsigned int *base, int rb) 537 static void draw_byte_16(unsigned char *font, unsigned int *base, int rb)
538 { 538 {
539 int l, bits; 539 int l, bits;
540 int fg = 0xFFFFFFFFUL; 540 int fg = 0xFFFFFFFFUL;
541 int bg = 0x00000000UL; 541 int bg = 0x00000000UL;
542 unsigned int *eb = (int *)expand_bits_16; 542 unsigned int *eb = (int *)expand_bits_16;
543 543
544 for (l = 0; l < 16; ++l) 544 for (l = 0; l < 16; ++l)
545 { 545 {
546 bits = *font++; 546 bits = *font++;
547 base[0] = (eb[bits >> 6] & fg) ^ bg; 547 base[0] = (eb[bits >> 6] & fg) ^ bg;
548 base[1] = (eb[(bits >> 4) & 3] & fg) ^ bg; 548 base[1] = (eb[(bits >> 4) & 3] & fg) ^ bg;
549 base[2] = (eb[(bits >> 2) & 3] & fg) ^ bg; 549 base[2] = (eb[(bits >> 2) & 3] & fg) ^ bg;
550 base[3] = (eb[bits & 3] & fg) ^ bg; 550 base[3] = (eb[bits & 3] & fg) ^ bg;
551 base = (unsigned int *) ((char *)base + rb); 551 base = (unsigned int *) ((char *)base + rb);
552 } 552 }
553 } 553 }
554 554
555 static void draw_byte_8(unsigned char *font, unsigned int *base, int rb) 555 static void draw_byte_8(unsigned char *font, unsigned int *base, int rb)
556 { 556 {
557 int l, bits; 557 int l, bits;
558 int fg = 0x0F0F0F0FUL; 558 int fg = 0x0F0F0F0FUL;
559 int bg = 0x00000000UL; 559 int bg = 0x00000000UL;
560 unsigned int *eb = (int *)expand_bits_8; 560 unsigned int *eb = (int *)expand_bits_8;
561 561
562 for (l = 0; l < 16; ++l) 562 for (l = 0; l < 16; ++l)
563 { 563 {
564 bits = *font++; 564 bits = *font++;
565 base[0] = (eb[bits >> 4] & fg) ^ bg; 565 base[0] = (eb[bits >> 4] & fg) ^ bg;
566 base[1] = (eb[bits & 0xf] & fg) ^ bg; 566 base[1] = (eb[bits & 0xf] & fg) ^ bg;
567 base = (unsigned int *) ((char *)base + rb); 567 base = (unsigned int *) ((char *)base + rb);
568 } 568 }
569 } 569 }
570 570
571 static unsigned char vga_font[cmapsz] = { 571 static unsigned char vga_font[cmapsz] = {
572 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 572 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
573 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x81, 0xa5, 0x81, 0x81, 0xbd, 573 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x81, 0xa5, 0x81, 0x81, 0xbd,
574 0x99, 0x81, 0x81, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xff, 574 0x99, 0x81, 0x81, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xff,
575 0xdb, 0xff, 0xff, 0xc3, 0xe7, 0xff, 0xff, 0x7e, 0x00, 0x00, 0x00, 0x00, 575 0xdb, 0xff, 0xff, 0xc3, 0xe7, 0xff, 0xff, 0x7e, 0x00, 0x00, 0x00, 0x00,
576 0x00, 0x00, 0x00, 0x00, 0x6c, 0xfe, 0xfe, 0xfe, 0xfe, 0x7c, 0x38, 0x10, 576 0x00, 0x00, 0x00, 0x00, 0x6c, 0xfe, 0xfe, 0xfe, 0xfe, 0x7c, 0x38, 0x10,
577 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x7c, 0xfe, 577 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x7c, 0xfe,
578 0x7c, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 578 0x7c, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18,
579 0x3c, 0x3c, 0xe7, 0xe7, 0xe7, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 579 0x3c, 0x3c, 0xe7, 0xe7, 0xe7, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
580 0x00, 0x00, 0x00, 0x18, 0x3c, 0x7e, 0xff, 0xff, 0x7e, 0x18, 0x18, 0x3c, 580 0x00, 0x00, 0x00, 0x18, 0x3c, 0x7e, 0xff, 0xff, 0x7e, 0x18, 0x18, 0x3c,
581 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 581 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
582 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 582 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
583 0xff, 0xff, 0xe7, 0xc3, 0xc3, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 583 0xff, 0xff, 0xe7, 0xc3, 0xc3, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
584 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x42, 0x42, 0x66, 0x3c, 0x00, 584 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x42, 0x42, 0x66, 0x3c, 0x00,
585 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x99, 0xbd, 585 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x99, 0xbd,
586 0xbd, 0x99, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x1e, 0x0e, 586 0xbd, 0x99, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x1e, 0x0e,
587 0x1a, 0x32, 0x78, 0xcc, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00, 587 0x1a, 0x32, 0x78, 0xcc, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
588 0x00, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x18, 0x7e, 0x18, 0x18, 588 0x00, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x18, 0x7e, 0x18, 0x18,
589 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x33, 0x3f, 0x30, 0x30, 0x30, 589 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x33, 0x3f, 0x30, 0x30, 0x30,
590 0x30, 0x70, 0xf0, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x63, 590 0x30, 0x70, 0xf0, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x63,
591 0x7f, 0x63, 0x63, 0x63, 0x63, 0x67, 0xe7, 0xe6, 0xc0, 0x00, 0x00, 0x00, 591 0x7f, 0x63, 0x63, 0x63, 0x63, 0x67, 0xe7, 0xe6, 0xc0, 0x00, 0x00, 0x00,
592 0x00, 0x00, 0x00, 0x18, 0x18, 0xdb, 0x3c, 0xe7, 0x3c, 0xdb, 0x18, 0x18, 592 0x00, 0x00, 0x00, 0x18, 0x18, 0xdb, 0x3c, 0xe7, 0x3c, 0xdb, 0x18, 0x18,
593 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfe, 0xf8, 593 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfe, 0xf8,
594 0xf0, 0xe0, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x06, 0x0e, 594 0xf0, 0xe0, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x06, 0x0e,
595 0x1e, 0x3e, 0xfe, 0x3e, 0x1e, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, 595 0x1e, 0x3e, 0xfe, 0x3e, 0x1e, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
596 0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x00, 596 0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x00,
597 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 597 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
598 0x66, 0x00, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xdb, 598 0x66, 0x00, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xdb,
599 0xdb, 0xdb, 0x7b, 0x1b, 0x1b, 0x1b, 0x1b, 0x1b, 0x00, 0x00, 0x00, 0x00, 599 0xdb, 0xdb, 0x7b, 0x1b, 0x1b, 0x1b, 0x1b, 0x1b, 0x00, 0x00, 0x00, 0x00,
600 0x00, 0x7c, 0xc6, 0x60, 0x38, 0x6c, 0xc6, 0xc6, 0x6c, 0x38, 0x0c, 0xc6, 600 0x00, 0x7c, 0xc6, 0x60, 0x38, 0x6c, 0xc6, 0xc6, 0x6c, 0x38, 0x0c, 0xc6,
601 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 601 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
602 0xfe, 0xfe, 0xfe, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 602 0xfe, 0xfe, 0xfe, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
603 0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 603 0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00,
604 0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 604 0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
605 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 605 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
606 0x18, 0x7e, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 606 0x18, 0x7e, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
607 0x00, 0x18, 0x0c, 0xfe, 0x0c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 607 0x00, 0x18, 0x0c, 0xfe, 0x0c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
608 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x60, 0xfe, 0x60, 0x30, 0x00, 0x00, 608 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x60, 0xfe, 0x60, 0x30, 0x00, 0x00,
609 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xc0, 609 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xc0,
610 0xc0, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 610 0xc0, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
611 0x00, 0x24, 0x66, 0xff, 0x66, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 611 0x00, 0x24, 0x66, 0xff, 0x66, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
612 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x38, 0x7c, 0x7c, 0xfe, 0xfe, 0x00, 612 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x38, 0x7c, 0x7c, 0xfe, 0xfe, 0x00,
613 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xfe, 0x7c, 0x7c, 613 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xfe, 0x7c, 0x7c,
614 0x38, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 614 0x38, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
615 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 615 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
616 0x00, 0x00, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18, 616 0x00, 0x00, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
617 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x24, 0x00, 0x00, 0x00, 617 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x24, 0x00, 0x00, 0x00,
618 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c, 618 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c,
619 0x6c, 0xfe, 0x6c, 0x6c, 0x6c, 0xfe, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, 619 0x6c, 0xfe, 0x6c, 0x6c, 0x6c, 0xfe, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
620 0x18, 0x18, 0x7c, 0xc6, 0xc2, 0xc0, 0x7c, 0x06, 0x06, 0x86, 0xc6, 0x7c, 620 0x18, 0x18, 0x7c, 0xc6, 0xc2, 0xc0, 0x7c, 0x06, 0x06, 0x86, 0xc6, 0x7c,
621 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0xc6, 0x0c, 0x18, 621 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0xc6, 0x0c, 0x18,
622 0x30, 0x60, 0xc6, 0x86, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 622 0x30, 0x60, 0xc6, 0x86, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c,
623 0x6c, 0x38, 0x76, 0xdc, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 623 0x6c, 0x38, 0x76, 0xdc, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
624 0x00, 0x30, 0x30, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 624 0x00, 0x30, 0x30, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
625 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x30, 0x30, 0x30, 625 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x30, 0x30, 0x30,
626 0x30, 0x30, 0x18, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x18, 626 0x30, 0x30, 0x18, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x18,
627 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00, 627 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
628 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x3c, 0xff, 0x3c, 0x66, 0x00, 0x00, 628 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x3c, 0xff, 0x3c, 0x66, 0x00, 0x00,
629 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, 629 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
630 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 630 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
631 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, 631 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00,
632 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00, 632 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
633 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 633 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
634 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 634 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
635 0x02, 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00, 635 0x02, 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00,
636 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xce, 0xde, 0xf6, 0xe6, 0xc6, 0xc6, 0x7c, 636 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xce, 0xde, 0xf6, 0xe6, 0xc6, 0xc6, 0x7c,
637 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x38, 0x78, 0x18, 0x18, 0x18, 637 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x38, 0x78, 0x18, 0x18, 0x18,
638 0x18, 0x18, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 638 0x18, 0x18, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
639 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, 639 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
640 0x00, 0x00, 0x7c, 0xc6, 0x06, 0x06, 0x3c, 0x06, 0x06, 0x06, 0xc6, 0x7c, 640 0x00, 0x00, 0x7c, 0xc6, 0x06, 0x06, 0x3c, 0x06, 0x06, 0x06, 0xc6, 0x7c,
641 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x1c, 0x3c, 0x6c, 0xcc, 0xfe, 641 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x1c, 0x3c, 0x6c, 0xcc, 0xfe,
642 0x0c, 0x0c, 0x0c, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0, 642 0x0c, 0x0c, 0x0c, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
643 0xc0, 0xc0, 0xfc, 0x06, 0x06, 0x06, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 643 0xc0, 0xc0, 0xfc, 0x06, 0x06, 0x06, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
644 0x00, 0x00, 0x38, 0x60, 0xc0, 0xc0, 0xfc, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 644 0x00, 0x00, 0x38, 0x60, 0xc0, 0xc0, 0xfc, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
645 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0x06, 0x06, 0x0c, 0x18, 645 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0x06, 0x06, 0x0c, 0x18,
646 0x30, 0x30, 0x30, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 646 0x30, 0x30, 0x30, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
647 0xc6, 0xc6, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 647 0xc6, 0xc6, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
648 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x06, 0x06, 0x0c, 0x78, 648 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x06, 0x06, 0x0c, 0x78,
649 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 649 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00,
650 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 650 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
651 0x18, 0x18, 0x00, 0x00, 0x00, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00, 651 0x18, 0x18, 0x00, 0x00, 0x00, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
652 0x00, 0x00, 0x00, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x06, 652 0x00, 0x00, 0x00, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x06,
653 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00, 653 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00,
654 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 654 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60,
655 0x30, 0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00, 655 0x30, 0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00,
656 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0x0c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18, 656 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0x0c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
657 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xde, 0xde, 657 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xde, 0xde,
658 0xde, 0xdc, 0xc0, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 658 0xde, 0xdc, 0xc0, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38,
659 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, 659 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
660 0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x66, 0x66, 0x66, 0x66, 0xfc, 660 0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x66, 0x66, 0x66, 0x66, 0xfc,
661 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0xc2, 0xc0, 0xc0, 0xc0, 661 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0xc2, 0xc0, 0xc0, 0xc0,
662 0xc0, 0xc2, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x6c, 662 0xc0, 0xc2, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x6c,
663 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x6c, 0xf8, 0x00, 0x00, 0x00, 0x00, 663 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x6c, 0xf8, 0x00, 0x00, 0x00, 0x00,
664 0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68, 0x60, 0x62, 0x66, 0xfe, 664 0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68, 0x60, 0x62, 0x66, 0xfe,
665 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68, 665 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68,
666 0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 666 0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
667 0xc2, 0xc0, 0xc0, 0xde, 0xc6, 0xc6, 0x66, 0x3a, 0x00, 0x00, 0x00, 0x00, 667 0xc2, 0xc0, 0xc0, 0xde, 0xc6, 0xc6, 0x66, 0x3a, 0x00, 0x00, 0x00, 0x00,
668 0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 668 0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
669 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x18, 669 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x18,
670 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x0c, 670 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x0c,
671 0x0c, 0x0c, 0x0c, 0x0c, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00, 671 0x0c, 0x0c, 0x0c, 0x0c, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
672 0x00, 0x00, 0xe6, 0x66, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0x66, 0xe6, 672 0x00, 0x00, 0xe6, 0x66, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0x66, 0xe6,
673 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x60, 0x60, 0x60, 0x60, 0x60, 673 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x60, 0x60, 0x60, 0x60, 0x60,
674 0x60, 0x62, 0x66, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xe7, 674 0x60, 0x62, 0x66, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xe7,
675 0xff, 0xff, 0xdb, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00, 675 0xff, 0xff, 0xdb, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00,
676 0x00, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6, 0xc6, 676 0x00, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6, 0xc6,
677 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 677 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
678 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 678 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66,
679 0x66, 0x66, 0x7c, 0x60, 0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 679 0x66, 0x66, 0x7c, 0x60, 0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00,
680 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xd6, 0xde, 0x7c, 680 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xd6, 0xde, 0x7c,
681 0x0c, 0x0e, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x6c, 681 0x0c, 0x0e, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x6c,
682 0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 682 0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
683 0xc6, 0x60, 0x38, 0x0c, 0x06, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 683 0xc6, 0x60, 0x38, 0x0c, 0x06, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
684 0x00, 0x00, 0xff, 0xdb, 0x99, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 684 0x00, 0x00, 0xff, 0xdb, 0x99, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
685 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 685 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
686 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 686 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
687 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 687 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
688 0x00, 0x00, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x66, 688 0x00, 0x00, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x66,
689 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x18, 689 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x18,
690 0x3c, 0x66, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 690 0x3c, 0x66, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
691 0xc3, 0x66, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 691 0xc3, 0x66, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
692 0x00, 0x00, 0xff, 0xc3, 0x86, 0x0c, 0x18, 0x30, 0x60, 0xc1, 0xc3, 0xff, 692 0x00, 0x00, 0xff, 0xc3, 0x86, 0x0c, 0x18, 0x30, 0x60, 0xc1, 0xc3, 0xff,
693 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x30, 0x30, 0x30, 0x30, 0x30, 693 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x30, 0x30, 0x30, 0x30, 0x30,
694 0x30, 0x30, 0x30, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 694 0x30, 0x30, 0x30, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
695 0xc0, 0xe0, 0x70, 0x38, 0x1c, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, 695 0xc0, 0xe0, 0x70, 0x38, 0x1c, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
696 0x00, 0x00, 0x3c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x3c, 696 0x00, 0x00, 0x3c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x3c,
697 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0x00, 0x00, 0x00, 0x00, 697 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0x00, 0x00, 0x00, 0x00,
698 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 698 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
699 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 699 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00,
700 0x30, 0x30, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 700 0x30, 0x30, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
701 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x0c, 0x7c, 701 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x0c, 0x7c,
702 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x60, 702 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x60,
703 0x60, 0x78, 0x6c, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x00, 0x00, 0x00, 0x00, 703 0x60, 0x78, 0x6c, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x00, 0x00, 0x00, 0x00,
704 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc0, 0xc0, 0xc0, 0xc6, 0x7c, 704 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc0, 0xc0, 0xc0, 0xc6, 0x7c,
705 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x0c, 0x0c, 0x3c, 0x6c, 0xcc, 705 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x0c, 0x0c, 0x3c, 0x6c, 0xcc,
706 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 706 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
707 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 707 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
708 0x00, 0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xf0, 708 0x00, 0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xf0,
709 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xcc, 0xcc, 709 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xcc, 0xcc,
710 0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0xcc, 0x78, 0x00, 0x00, 0x00, 0xe0, 0x60, 710 0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0xcc, 0x78, 0x00, 0x00, 0x00, 0xe0, 0x60,
711 0x60, 0x6c, 0x76, 0x66, 0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, 711 0x60, 0x6c, 0x76, 0x66, 0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
712 0x00, 0x00, 0x18, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 712 0x00, 0x00, 0x18, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
713 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, 0x00, 0x0e, 0x06, 0x06, 713 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, 0x00, 0x0e, 0x06, 0x06,
714 0x06, 0x06, 0x06, 0x06, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0xe0, 0x60, 714 0x06, 0x06, 0x06, 0x06, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0xe0, 0x60,
715 0x60, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, 715 0x60, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
716 0x00, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 716 0x00, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
717 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe6, 0xff, 0xdb, 717 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe6, 0xff, 0xdb,
718 0xdb, 0xdb, 0xdb, 0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 718 0xdb, 0xdb, 0xdb, 0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
719 0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, 719 0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
720 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 720 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
721 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x66, 0x66, 721 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x66, 0x66,
722 0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 722 0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00,
723 0x00, 0x76, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0x0c, 0x1e, 0x00, 723 0x00, 0x76, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0x0c, 0x1e, 0x00,
724 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x76, 0x66, 0x60, 0x60, 0x60, 0xf0, 724 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x76, 0x66, 0x60, 0x60, 0x60, 0xf0,
725 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0x60, 725 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0x60,
726 0x38, 0x0c, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x30, 726 0x38, 0x0c, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x30,
727 0x30, 0xfc, 0x30, 0x30, 0x30, 0x30, 0x36, 0x1c, 0x00, 0x00, 0x00, 0x00, 727 0x30, 0xfc, 0x30, 0x30, 0x30, 0x30, 0x36, 0x1c, 0x00, 0x00, 0x00, 0x00,
728 0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76, 728 0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
729 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0xc3, 729 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0xc3,
730 0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 730 0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
731 0x00, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x00, 0x00, 0x00, 0x00, 731 0x00, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x00, 0x00, 0x00, 0x00,
732 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0x3c, 0x66, 0xc3, 732 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0x3c, 0x66, 0xc3,
733 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6, 733 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6,
734 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 734 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00,
735 0x00, 0xfe, 0xcc, 0x18, 0x30, 0x60, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, 735 0x00, 0xfe, 0xcc, 0x18, 0x30, 0x60, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
736 0x00, 0x00, 0x0e, 0x18, 0x18, 0x18, 0x70, 0x18, 0x18, 0x18, 0x18, 0x0e, 736 0x00, 0x00, 0x0e, 0x18, 0x18, 0x18, 0x70, 0x18, 0x18, 0x18, 0x18, 0x0e,
737 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x00, 0x18, 737 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x00, 0x18,
738 0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x18, 738 0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x18,
739 0x18, 0x18, 0x0e, 0x18, 0x18, 0x18, 0x18, 0x70, 0x00, 0x00, 0x00, 0x00, 739 0x18, 0x18, 0x0e, 0x18, 0x18, 0x18, 0x18, 0x70, 0x00, 0x00, 0x00, 0x00,
740 0x00, 0x00, 0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 740 0x00, 0x00, 0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
741 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6, 741 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6,
742 0xc6, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 742 0xc6, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
743 0xc2, 0xc0, 0xc0, 0xc0, 0xc2, 0x66, 0x3c, 0x0c, 0x06, 0x7c, 0x00, 0x00, 743 0xc2, 0xc0, 0xc0, 0xc0, 0xc2, 0x66, 0x3c, 0x0c, 0x06, 0x7c, 0x00, 0x00,
744 0x00, 0x00, 0xcc, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76, 744 0x00, 0x00, 0xcc, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
745 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x00, 0x7c, 0xc6, 0xfe, 745 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x00, 0x7c, 0xc6, 0xfe,
746 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 746 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c,
747 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 747 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
748 0x00, 0x00, 0xcc, 0x00, 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 748 0x00, 0x00, 0xcc, 0x00, 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76,
749 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0x78, 0x0c, 0x7c, 749 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0x78, 0x0c, 0x7c,
750 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38, 750 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38,
751 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 751 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
752 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x60, 0x60, 0x66, 0x3c, 0x0c, 0x06, 752 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x60, 0x60, 0x66, 0x3c, 0x0c, 0x06,
753 0x3c, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xfe, 753 0x3c, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xfe,
754 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 754 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
755 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 755 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
756 0x00, 0x60, 0x30, 0x18, 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 756 0x00, 0x60, 0x30, 0x18, 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c,
757 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x00, 0x00, 0x38, 0x18, 0x18, 757 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x00, 0x00, 0x38, 0x18, 0x18,
758 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 0x66, 758 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 0x66,
759 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 759 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
760 0x00, 0x60, 0x30, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 760 0x00, 0x60, 0x30, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
761 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0xc6, 761 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0xc6,
762 0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38, 0x00, 762 0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38, 0x00,
763 0x38, 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, 763 0x38, 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
764 0x18, 0x30, 0x60, 0x00, 0xfe, 0x66, 0x60, 0x7c, 0x60, 0x60, 0x66, 0xfe, 764 0x18, 0x30, 0x60, 0x00, 0xfe, 0x66, 0x60, 0x7c, 0x60, 0x60, 0x66, 0xfe,
765 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x3b, 0x1b, 765 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x3b, 0x1b,
766 0x7e, 0xd8, 0xdc, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x6c, 766 0x7e, 0xd8, 0xdc, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x6c,
767 0xcc, 0xcc, 0xfe, 0xcc, 0xcc, 0xcc, 0xcc, 0xce, 0x00, 0x00, 0x00, 0x00, 767 0xcc, 0xcc, 0xfe, 0xcc, 0xcc, 0xcc, 0xcc, 0xce, 0x00, 0x00, 0x00, 0x00,
768 0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 768 0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
769 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 769 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x00, 0x7c, 0xc6, 0xc6,
770 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 770 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18,
771 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 771 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
772 0x00, 0x30, 0x78, 0xcc, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76, 772 0x00, 0x30, 0x78, 0xcc, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
773 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0xcc, 0xcc, 0xcc, 773 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0xcc, 0xcc, 0xcc,
774 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 774 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
775 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0x78, 0x00, 775 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0x78, 0x00,
776 0x00, 0xc6, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 776 0x00, 0xc6, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
777 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 777 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
778 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, 778 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
779 0xc3, 0xc0, 0xc0, 0xc0, 0xc3, 0x7e, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 779 0xc3, 0xc0, 0xc0, 0xc0, 0xc3, 0x7e, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00,
780 0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xe6, 0xfc, 780 0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xe6, 0xfc,
781 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0xff, 0x18, 781 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0xff, 0x18,
782 0xff, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66, 782 0xff, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66,
783 0x7c, 0x62, 0x66, 0x6f, 0x66, 0x66, 0x66, 0xf3, 0x00, 0x00, 0x00, 0x00, 783 0x7c, 0x62, 0x66, 0x6f, 0x66, 0x66, 0x66, 0xf3, 0x00, 0x00, 0x00, 0x00,
784 0x00, 0x0e, 0x1b, 0x18, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 784 0x00, 0x0e, 0x1b, 0x18, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18,
785 0xd8, 0x70, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0x78, 0x0c, 0x7c, 785 0xd8, 0x70, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0x78, 0x0c, 0x7c,
786 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 786 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30,
787 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 787 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
788 0x00, 0x18, 0x30, 0x60, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 788 0x00, 0x18, 0x30, 0x60, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
789 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0xcc, 0xcc, 0xcc, 789 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0xcc, 0xcc, 0xcc,
790 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 790 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc,
791 0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, 791 0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
792 0x76, 0xdc, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6, 792 0x76, 0xdc, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6,
793 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x6c, 0x6c, 0x3e, 0x00, 0x7e, 0x00, 793 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x6c, 0x6c, 0x3e, 0x00, 0x7e, 0x00,
794 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c, 794 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
795 0x38, 0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 795 0x38, 0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
796 0x00, 0x00, 0x30, 0x30, 0x00, 0x30, 0x30, 0x60, 0xc0, 0xc6, 0xc6, 0x7c, 796 0x00, 0x00, 0x30, 0x30, 0x00, 0x30, 0x30, 0x60, 0xc0, 0xc6, 0xc6, 0x7c,
797 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0, 797 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
798 0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 798 0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
799 0x00, 0x00, 0xfe, 0x06, 0x06, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 799 0x00, 0x00, 0xfe, 0x06, 0x06, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00,
800 0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30, 0x60, 0xce, 0x9b, 0x06, 800 0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30, 0x60, 0xce, 0x9b, 0x06,
801 0x0c, 0x1f, 0x00, 0x00, 0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30, 801 0x0c, 0x1f, 0x00, 0x00, 0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30,
802 0x66, 0xce, 0x96, 0x3e, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 802 0x66, 0xce, 0x96, 0x3e, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18,
803 0x00, 0x18, 0x18, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 803 0x00, 0x18, 0x18, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
804 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x6c, 0xd8, 0x6c, 0x36, 0x00, 0x00, 804 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x6c, 0xd8, 0x6c, 0x36, 0x00, 0x00,
805 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x6c, 0x36, 805 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x6c, 0x36,
806 0x6c, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x44, 0x11, 0x44, 806 0x6c, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x44, 0x11, 0x44,
807 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 807 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44,
808 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 808 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa,
809 0x55, 0xaa, 0x55, 0xaa, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 809 0x55, 0xaa, 0x55, 0xaa, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77,
810 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0x18, 0x18, 0x18, 0x18, 810 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0x18, 0x18, 0x18, 0x18,
811 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 811 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
812 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0x18, 0x18, 0x18, 812 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0x18, 0x18, 0x18,
813 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8, 813 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
814 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, 814 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
815 0x36, 0x36, 0x36, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 815 0x36, 0x36, 0x36, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
816 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x36, 0x36, 0x36, 0x36, 816 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x36, 0x36, 0x36, 0x36,
817 0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x18, 0xf8, 817 0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x18, 0xf8,
818 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, 818 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
819 0x36, 0xf6, 0x06, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 819 0x36, 0xf6, 0x06, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
820 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 820 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
821 0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x06, 0xf6, 821 0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x06, 0xf6,
822 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 822 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
823 0x36, 0xf6, 0x06, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 823 0x36, 0xf6, 0x06, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
824 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xfe, 0x00, 0x00, 0x00, 0x00, 824 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xfe, 0x00, 0x00, 0x00, 0x00,
825 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8, 825 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
826 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 826 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
827 0x00, 0x00, 0x00, 0xf8, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 827 0x00, 0x00, 0x00, 0xf8, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
828 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00, 828 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00,
829 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 829 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xff,
830 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 830 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
831 0x00, 0x00, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 831 0x00, 0x00, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
832 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18, 832 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
833 0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 833 0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
834 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 834 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
835 0x18, 0x18, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 835 0x18, 0x18, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
836 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18, 836 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
837 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x37, 837 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x37,
838 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 838 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
839 0x36, 0x37, 0x30, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 839 0x36, 0x37, 0x30, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
840 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36, 840 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
841 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xf7, 0x00, 0xff, 841 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xf7, 0x00, 0xff,
842 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 842 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
843 0x00, 0xff, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 843 0x00, 0xff, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
844 0x36, 0x36, 0x36, 0x36, 0x36, 0x37, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36, 844 0x36, 0x36, 0x36, 0x36, 0x36, 0x37, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
845 0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff, 845 0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff,
846 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36, 846 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36,
847 0x36, 0xf7, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 847 0x36, 0xf7, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
848 0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 848 0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00,
849 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xff, 849 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xff,
850 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 850 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
851 0x00, 0xff, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 851 0x00, 0xff, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
852 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x36, 0x36, 0x36, 0x36, 852 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x36, 0x36, 0x36, 0x36,
853 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x3f, 853 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x3f,
854 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 854 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
855 0x18, 0x1f, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 855 0x18, 0x1f, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
856 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18, 856 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
857 0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 857 0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f,
858 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 858 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
859 0x36, 0x36, 0x36, 0xff, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 859 0x36, 0x36, 0x36, 0xff, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
860 0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18, 860 0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18,
861 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 861 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8,
862 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 862 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
863 0x00, 0x00, 0x00, 0x1f, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 863 0x00, 0x00, 0x00, 0x1f, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
864 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 864 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
865 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 865 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
866 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xf0, 0xf0, 0xf0, 866 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xf0, 0xf0, 0xf0,
867 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 867 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
868 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 868 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
869 0x0f, 0x0f, 0x0f, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 869 0x0f, 0x0f, 0x0f, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
870 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 870 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
871 0x00, 0x76, 0xdc, 0xd8, 0xd8, 0xd8, 0xdc, 0x76, 0x00, 0x00, 0x00, 0x00, 871 0x00, 0x76, 0xdc, 0xd8, 0xd8, 0xd8, 0xdc, 0x76, 0x00, 0x00, 0x00, 0x00,
872 0x00, 0x00, 0x78, 0xcc, 0xcc, 0xcc, 0xd8, 0xcc, 0xc6, 0xc6, 0xc6, 0xcc, 872 0x00, 0x00, 0x78, 0xcc, 0xcc, 0xcc, 0xd8, 0xcc, 0xc6, 0xc6, 0xc6, 0xcc,
873 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0xc6, 0xc0, 0xc0, 0xc0, 873 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0xc6, 0xc0, 0xc0, 0xc0,
874 0xc0, 0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 874 0xc0, 0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
875 0xfe, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, 875 0xfe, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
876 0x00, 0x00, 0x00, 0xfe, 0xc6, 0x60, 0x30, 0x18, 0x30, 0x60, 0xc6, 0xfe, 876 0x00, 0x00, 0x00, 0xfe, 0xc6, 0x60, 0x30, 0x18, 0x30, 0x60, 0xc6, 0xfe,
877 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xd8, 0xd8, 877 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xd8, 0xd8,
878 0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 878 0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
879 0x66, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xc0, 0x00, 0x00, 0x00, 879 0x66, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xc0, 0x00, 0x00, 0x00,
880 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 880 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
881 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x18, 0x3c, 0x66, 0x66, 881 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x18, 0x3c, 0x66, 0x66,
882 0x66, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 882 0x66, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,
883 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0x6c, 0x38, 0x00, 0x00, 0x00, 0x00, 883 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0x6c, 0x38, 0x00, 0x00, 0x00, 0x00,
884 0x00, 0x00, 0x38, 0x6c, 0xc6, 0xc6, 0xc6, 0x6c, 0x6c, 0x6c, 0x6c, 0xee, 884 0x00, 0x00, 0x38, 0x6c, 0xc6, 0xc6, 0xc6, 0x6c, 0x6c, 0x6c, 0x6c, 0xee,
885 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x30, 0x18, 0x0c, 0x3e, 0x66, 885 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x30, 0x18, 0x0c, 0x3e, 0x66,
886 0x66, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 886 0x66, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
887 0x00, 0x7e, 0xdb, 0xdb, 0xdb, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 887 0x00, 0x7e, 0xdb, 0xdb, 0xdb, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
888 0x00, 0x00, 0x00, 0x03, 0x06, 0x7e, 0xdb, 0xdb, 0xf3, 0x7e, 0x60, 0xc0, 888 0x00, 0x00, 0x00, 0x03, 0x06, 0x7e, 0xdb, 0xdb, 0xf3, 0x7e, 0x60, 0xc0,
889 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x30, 0x60, 0x60, 0x7c, 0x60, 889 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x30, 0x60, 0x60, 0x7c, 0x60,
890 0x60, 0x60, 0x30, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 890 0x60, 0x60, 0x30, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c,
891 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, 891 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
892 0x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00, 892 0x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00,
893 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, 0x18, 893 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, 0x18,
894 0x18, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 894 0x18, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30,
895 0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00, 895 0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
896 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x00, 0x7e, 896 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x00, 0x7e,
897 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x1b, 0x1b, 0x1b, 0x18, 0x18, 897 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x1b, 0x1b, 0x1b, 0x18, 0x18,
898 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 898 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
899 0x18, 0x18, 0x18, 0x18, 0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00, 899 0x18, 0x18, 0x18, 0x18, 0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00,
900 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x7e, 0x00, 0x18, 0x18, 0x00, 900 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x7e, 0x00, 0x18, 0x18, 0x00,
901 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x00, 901 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x00,
902 0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c, 902 0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
903 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 903 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
904 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 904 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00,
905 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 905 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
906 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0c, 0x0c, 906 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0c, 0x0c,
907 0x0c, 0x0c, 0x0c, 0xec, 0x6c, 0x6c, 0x3c, 0x1c, 0x00, 0x00, 0x00, 0x00, 907 0x0c, 0x0c, 0x0c, 0xec, 0x6c, 0x6c, 0x3c, 0x1c, 0x00, 0x00, 0x00, 0x00,
908 0x00, 0xd8, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00, 908 0x00, 0xd8, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00,
909 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0xd8, 0x30, 0x60, 0xc8, 0xf8, 0x00, 909 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0xd8, 0x30, 0x60, 0xc8, 0xf8, 0x00,
910 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 910 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
911 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 911 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00,
912 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 912 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
913 0x00, 0x00, 0x00, 0x00, 913 0x00, 0x00, 0x00, 0x00,
914 }; 914 };
915 915
916 void __init udbg_init_btext(void) 916 void __init udbg_init_btext(void)
917 { 917 {
918 /* If btext is enabled, we might have a BAT setup for early display, 918 /* If btext is enabled, we might have a BAT setup for early display,
919 * thus we do enable some very basic udbg output 919 * thus we do enable some very basic udbg output
920 */ 920 */
921 udbg_putc = btext_drawchar; 921 udbg_putc = btext_drawchar;
922 } 922 }
923 923
arch/powerpc/kernel/clock.c
1 /* 1 /*
2 * Dummy clk implementations for powerpc. 2 * Dummy clk implementations for powerpc.
3 * These need to be overridden in platform code. 3 * These need to be overridden in platform code.
4 */ 4 */
5 5
6 #include <linux/clk.h> 6 #include <linux/clk.h>
7 #include <linux/err.h> 7 #include <linux/err.h>
8 #include <linux/errno.h> 8 #include <linux/errno.h>
9 #include <linux/module.h> 9 #include <linux/export.h>
10 #include <asm/clk_interface.h> 10 #include <asm/clk_interface.h>
11 11
12 struct clk_interface clk_functions; 12 struct clk_interface clk_functions;
13 13
14 struct clk *clk_get(struct device *dev, const char *id) 14 struct clk *clk_get(struct device *dev, const char *id)
15 { 15 {
16 if (clk_functions.clk_get) 16 if (clk_functions.clk_get)
17 return clk_functions.clk_get(dev, id); 17 return clk_functions.clk_get(dev, id);
18 return ERR_PTR(-ENOSYS); 18 return ERR_PTR(-ENOSYS);
19 } 19 }
20 EXPORT_SYMBOL(clk_get); 20 EXPORT_SYMBOL(clk_get);
21 21
22 void clk_put(struct clk *clk) 22 void clk_put(struct clk *clk)
23 { 23 {
24 if (clk_functions.clk_put) 24 if (clk_functions.clk_put)
25 clk_functions.clk_put(clk); 25 clk_functions.clk_put(clk);
26 } 26 }
27 EXPORT_SYMBOL(clk_put); 27 EXPORT_SYMBOL(clk_put);
28 28
29 int clk_enable(struct clk *clk) 29 int clk_enable(struct clk *clk)
30 { 30 {
31 if (clk_functions.clk_enable) 31 if (clk_functions.clk_enable)
32 return clk_functions.clk_enable(clk); 32 return clk_functions.clk_enable(clk);
33 return -ENOSYS; 33 return -ENOSYS;
34 } 34 }
35 EXPORT_SYMBOL(clk_enable); 35 EXPORT_SYMBOL(clk_enable);
36 36
37 void clk_disable(struct clk *clk) 37 void clk_disable(struct clk *clk)
38 { 38 {
39 if (clk_functions.clk_disable) 39 if (clk_functions.clk_disable)
40 clk_functions.clk_disable(clk); 40 clk_functions.clk_disable(clk);
41 } 41 }
42 EXPORT_SYMBOL(clk_disable); 42 EXPORT_SYMBOL(clk_disable);
43 43
44 unsigned long clk_get_rate(struct clk *clk) 44 unsigned long clk_get_rate(struct clk *clk)
45 { 45 {
46 if (clk_functions.clk_get_rate) 46 if (clk_functions.clk_get_rate)
47 return clk_functions.clk_get_rate(clk); 47 return clk_functions.clk_get_rate(clk);
48 return 0; 48 return 0;
49 } 49 }
50 EXPORT_SYMBOL(clk_get_rate); 50 EXPORT_SYMBOL(clk_get_rate);
51 51
52 long clk_round_rate(struct clk *clk, unsigned long rate) 52 long clk_round_rate(struct clk *clk, unsigned long rate)
53 { 53 {
54 if (clk_functions.clk_round_rate) 54 if (clk_functions.clk_round_rate)
55 return clk_functions.clk_round_rate(clk, rate); 55 return clk_functions.clk_round_rate(clk, rate);
56 return -ENOSYS; 56 return -ENOSYS;
57 } 57 }
58 EXPORT_SYMBOL(clk_round_rate); 58 EXPORT_SYMBOL(clk_round_rate);
59 59
60 int clk_set_rate(struct clk *clk, unsigned long rate) 60 int clk_set_rate(struct clk *clk, unsigned long rate)
61 { 61 {
62 if (clk_functions.clk_set_rate) 62 if (clk_functions.clk_set_rate)
63 return clk_functions.clk_set_rate(clk, rate); 63 return clk_functions.clk_set_rate(clk, rate);
64 return -ENOSYS; 64 return -ENOSYS;
65 } 65 }
66 EXPORT_SYMBOL(clk_set_rate); 66 EXPORT_SYMBOL(clk_set_rate);
67 67
68 struct clk *clk_get_parent(struct clk *clk) 68 struct clk *clk_get_parent(struct clk *clk)
69 { 69 {
70 if (clk_functions.clk_get_parent) 70 if (clk_functions.clk_get_parent)
71 return clk_functions.clk_get_parent(clk); 71 return clk_functions.clk_get_parent(clk);
72 return ERR_PTR(-ENOSYS); 72 return ERR_PTR(-ENOSYS);
73 } 73 }
74 EXPORT_SYMBOL(clk_get_parent); 74 EXPORT_SYMBOL(clk_get_parent);
75 75
76 int clk_set_parent(struct clk *clk, struct clk *parent) 76 int clk_set_parent(struct clk *clk, struct clk *parent)
77 { 77 {
78 if (clk_functions.clk_set_parent) 78 if (clk_functions.clk_set_parent)
79 return clk_functions.clk_set_parent(clk, parent); 79 return clk_functions.clk_set_parent(clk, parent);
80 return -ENOSYS; 80 return -ENOSYS;
81 } 81 }
82 EXPORT_SYMBOL(clk_set_parent); 82 EXPORT_SYMBOL(clk_set_parent);
83 83
arch/powerpc/kernel/cputable.c
1 /* 1 /*
2 * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org) 2 * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
3 * 3 *
4 * Modifications for ppc64: 4 * Modifications for ppc64:
5 * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com> 5 * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 */ 11 */
12 12
13 #include <linux/string.h> 13 #include <linux/string.h>
14 #include <linux/sched.h> 14 #include <linux/sched.h>
15 #include <linux/threads.h> 15 #include <linux/threads.h>
16 #include <linux/init.h> 16 #include <linux/init.h>
17 #include <linux/module.h> 17 #include <linux/export.h>
18 18
19 #include <asm/oprofile_impl.h> 19 #include <asm/oprofile_impl.h>
20 #include <asm/cputable.h> 20 #include <asm/cputable.h>
21 #include <asm/prom.h> /* for PTRRELOC on ARCH=ppc */ 21 #include <asm/prom.h> /* for PTRRELOC on ARCH=ppc */
22 #include <asm/mmu.h> 22 #include <asm/mmu.h>
23 23
24 struct cpu_spec* cur_cpu_spec = NULL; 24 struct cpu_spec* cur_cpu_spec = NULL;
25 EXPORT_SYMBOL(cur_cpu_spec); 25 EXPORT_SYMBOL(cur_cpu_spec);
26 26
27 /* The platform string corresponding to the real PVR */ 27 /* The platform string corresponding to the real PVR */
28 const char *powerpc_base_platform; 28 const char *powerpc_base_platform;
29 29
30 /* NOTE: 30 /* NOTE:
31 * Unlike ppc32, ppc64 will only call this once for the boot CPU, it's 31 * Unlike ppc32, ppc64 will only call this once for the boot CPU, it's
32 * the responsibility of the appropriate CPU save/restore functions to 32 * the responsibility of the appropriate CPU save/restore functions to
33 * eventually copy these settings over. Those save/restore aren't yet 33 * eventually copy these settings over. Those save/restore aren't yet
34 * part of the cputable though. That has to be fixed for both ppc32 34 * part of the cputable though. That has to be fixed for both ppc32
35 * and ppc64 35 * and ppc64
36 */ 36 */
37 #ifdef CONFIG_PPC32 37 #ifdef CONFIG_PPC32
38 extern void __setup_cpu_e200(unsigned long offset, struct cpu_spec* spec); 38 extern void __setup_cpu_e200(unsigned long offset, struct cpu_spec* spec);
39 extern void __setup_cpu_e500v1(unsigned long offset, struct cpu_spec* spec); 39 extern void __setup_cpu_e500v1(unsigned long offset, struct cpu_spec* spec);
40 extern void __setup_cpu_e500v2(unsigned long offset, struct cpu_spec* spec); 40 extern void __setup_cpu_e500v2(unsigned long offset, struct cpu_spec* spec);
41 extern void __setup_cpu_e500mc(unsigned long offset, struct cpu_spec* spec); 41 extern void __setup_cpu_e500mc(unsigned long offset, struct cpu_spec* spec);
42 extern void __setup_cpu_440ep(unsigned long offset, struct cpu_spec* spec); 42 extern void __setup_cpu_440ep(unsigned long offset, struct cpu_spec* spec);
43 extern void __setup_cpu_440epx(unsigned long offset, struct cpu_spec* spec); 43 extern void __setup_cpu_440epx(unsigned long offset, struct cpu_spec* spec);
44 extern void __setup_cpu_440gx(unsigned long offset, struct cpu_spec* spec); 44 extern void __setup_cpu_440gx(unsigned long offset, struct cpu_spec* spec);
45 extern void __setup_cpu_440grx(unsigned long offset, struct cpu_spec* spec); 45 extern void __setup_cpu_440grx(unsigned long offset, struct cpu_spec* spec);
46 extern void __setup_cpu_440spe(unsigned long offset, struct cpu_spec* spec); 46 extern void __setup_cpu_440spe(unsigned long offset, struct cpu_spec* spec);
47 extern void __setup_cpu_440x5(unsigned long offset, struct cpu_spec* spec); 47 extern void __setup_cpu_440x5(unsigned long offset, struct cpu_spec* spec);
48 extern void __setup_cpu_460ex(unsigned long offset, struct cpu_spec* spec); 48 extern void __setup_cpu_460ex(unsigned long offset, struct cpu_spec* spec);
49 extern void __setup_cpu_460gt(unsigned long offset, struct cpu_spec* spec); 49 extern void __setup_cpu_460gt(unsigned long offset, struct cpu_spec* spec);
50 extern void __setup_cpu_460sx(unsigned long offset, struct cpu_spec *spec); 50 extern void __setup_cpu_460sx(unsigned long offset, struct cpu_spec *spec);
51 extern void __setup_cpu_apm821xx(unsigned long offset, struct cpu_spec *spec); 51 extern void __setup_cpu_apm821xx(unsigned long offset, struct cpu_spec *spec);
52 extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec); 52 extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec);
53 extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec); 53 extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec);
54 extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec); 54 extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec);
55 extern void __setup_cpu_750cx(unsigned long offset, struct cpu_spec* spec); 55 extern void __setup_cpu_750cx(unsigned long offset, struct cpu_spec* spec);
56 extern void __setup_cpu_750fx(unsigned long offset, struct cpu_spec* spec); 56 extern void __setup_cpu_750fx(unsigned long offset, struct cpu_spec* spec);
57 extern void __setup_cpu_7400(unsigned long offset, struct cpu_spec* spec); 57 extern void __setup_cpu_7400(unsigned long offset, struct cpu_spec* spec);
58 extern void __setup_cpu_7410(unsigned long offset, struct cpu_spec* spec); 58 extern void __setup_cpu_7410(unsigned long offset, struct cpu_spec* spec);
59 extern void __setup_cpu_745x(unsigned long offset, struct cpu_spec* spec); 59 extern void __setup_cpu_745x(unsigned long offset, struct cpu_spec* spec);
60 #endif /* CONFIG_PPC32 */ 60 #endif /* CONFIG_PPC32 */
61 #ifdef CONFIG_PPC64 61 #ifdef CONFIG_PPC64
62 extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec); 62 extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
63 extern void __setup_cpu_ppc970MP(unsigned long offset, struct cpu_spec* spec); 63 extern void __setup_cpu_ppc970MP(unsigned long offset, struct cpu_spec* spec);
64 extern void __setup_cpu_pa6t(unsigned long offset, struct cpu_spec* spec); 64 extern void __setup_cpu_pa6t(unsigned long offset, struct cpu_spec* spec);
65 extern void __setup_cpu_a2(unsigned long offset, struct cpu_spec* spec); 65 extern void __setup_cpu_a2(unsigned long offset, struct cpu_spec* spec);
66 extern void __restore_cpu_pa6t(void); 66 extern void __restore_cpu_pa6t(void);
67 extern void __restore_cpu_ppc970(void); 67 extern void __restore_cpu_ppc970(void);
68 extern void __setup_cpu_power7(unsigned long offset, struct cpu_spec* spec); 68 extern void __setup_cpu_power7(unsigned long offset, struct cpu_spec* spec);
69 extern void __restore_cpu_power7(void); 69 extern void __restore_cpu_power7(void);
70 extern void __restore_cpu_a2(void); 70 extern void __restore_cpu_a2(void);
71 #endif /* CONFIG_PPC64 */ 71 #endif /* CONFIG_PPC64 */
72 #if defined(CONFIG_E500) 72 #if defined(CONFIG_E500)
73 extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec); 73 extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec);
74 extern void __restore_cpu_e5500(void); 74 extern void __restore_cpu_e5500(void);
75 #endif /* CONFIG_E500 */ 75 #endif /* CONFIG_E500 */
76 76
77 /* This table only contains "desktop" CPUs, it need to be filled with embedded 77 /* This table only contains "desktop" CPUs, it need to be filled with embedded
78 * ones as well... 78 * ones as well...
79 */ 79 */
80 #define COMMON_USER (PPC_FEATURE_32 | PPC_FEATURE_HAS_FPU | \ 80 #define COMMON_USER (PPC_FEATURE_32 | PPC_FEATURE_HAS_FPU | \
81 PPC_FEATURE_HAS_MMU) 81 PPC_FEATURE_HAS_MMU)
82 #define COMMON_USER_PPC64 (COMMON_USER | PPC_FEATURE_64) 82 #define COMMON_USER_PPC64 (COMMON_USER | PPC_FEATURE_64)
83 #define COMMON_USER_POWER4 (COMMON_USER_PPC64 | PPC_FEATURE_POWER4) 83 #define COMMON_USER_POWER4 (COMMON_USER_PPC64 | PPC_FEATURE_POWER4)
84 #define COMMON_USER_POWER5 (COMMON_USER_PPC64 | PPC_FEATURE_POWER5 |\ 84 #define COMMON_USER_POWER5 (COMMON_USER_PPC64 | PPC_FEATURE_POWER5 |\
85 PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP) 85 PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP)
86 #define COMMON_USER_POWER5_PLUS (COMMON_USER_PPC64 | PPC_FEATURE_POWER5_PLUS|\ 86 #define COMMON_USER_POWER5_PLUS (COMMON_USER_PPC64 | PPC_FEATURE_POWER5_PLUS|\
87 PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP) 87 PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP)
88 #define COMMON_USER_POWER6 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_05 |\ 88 #define COMMON_USER_POWER6 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_05 |\
89 PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \ 89 PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \
90 PPC_FEATURE_TRUE_LE | \ 90 PPC_FEATURE_TRUE_LE | \
91 PPC_FEATURE_PSERIES_PERFMON_COMPAT) 91 PPC_FEATURE_PSERIES_PERFMON_COMPAT)
92 #define COMMON_USER_POWER7 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_06 |\ 92 #define COMMON_USER_POWER7 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_06 |\
93 PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \ 93 PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \
94 PPC_FEATURE_TRUE_LE | \ 94 PPC_FEATURE_TRUE_LE | \
95 PPC_FEATURE_PSERIES_PERFMON_COMPAT) 95 PPC_FEATURE_PSERIES_PERFMON_COMPAT)
96 #define COMMON_USER_PA6T (COMMON_USER_PPC64 | PPC_FEATURE_PA6T |\ 96 #define COMMON_USER_PA6T (COMMON_USER_PPC64 | PPC_FEATURE_PA6T |\
97 PPC_FEATURE_TRUE_LE | \ 97 PPC_FEATURE_TRUE_LE | \
98 PPC_FEATURE_HAS_ALTIVEC_COMP) 98 PPC_FEATURE_HAS_ALTIVEC_COMP)
99 #ifdef CONFIG_PPC_BOOK3E_64 99 #ifdef CONFIG_PPC_BOOK3E_64
100 #define COMMON_USER_BOOKE (COMMON_USER_PPC64 | PPC_FEATURE_BOOKE) 100 #define COMMON_USER_BOOKE (COMMON_USER_PPC64 | PPC_FEATURE_BOOKE)
101 #else 101 #else
102 #define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \ 102 #define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \
103 PPC_FEATURE_BOOKE) 103 PPC_FEATURE_BOOKE)
104 #endif 104 #endif
105 105
106 static struct cpu_spec __initdata cpu_specs[] = { 106 static struct cpu_spec __initdata cpu_specs[] = {
107 #ifdef CONFIG_PPC_BOOK3S_64 107 #ifdef CONFIG_PPC_BOOK3S_64
108 { /* Power3 */ 108 { /* Power3 */
109 .pvr_mask = 0xffff0000, 109 .pvr_mask = 0xffff0000,
110 .pvr_value = 0x00400000, 110 .pvr_value = 0x00400000,
111 .cpu_name = "POWER3 (630)", 111 .cpu_name = "POWER3 (630)",
112 .cpu_features = CPU_FTRS_POWER3, 112 .cpu_features = CPU_FTRS_POWER3,
113 .cpu_user_features = COMMON_USER_PPC64|PPC_FEATURE_PPC_LE, 113 .cpu_user_features = COMMON_USER_PPC64|PPC_FEATURE_PPC_LE,
114 .mmu_features = MMU_FTR_HPTE_TABLE, 114 .mmu_features = MMU_FTR_HPTE_TABLE,
115 .icache_bsize = 128, 115 .icache_bsize = 128,
116 .dcache_bsize = 128, 116 .dcache_bsize = 128,
117 .num_pmcs = 8, 117 .num_pmcs = 8,
118 .pmc_type = PPC_PMC_IBM, 118 .pmc_type = PPC_PMC_IBM,
119 .oprofile_cpu_type = "ppc64/power3", 119 .oprofile_cpu_type = "ppc64/power3",
120 .oprofile_type = PPC_OPROFILE_RS64, 120 .oprofile_type = PPC_OPROFILE_RS64,
121 .platform = "power3", 121 .platform = "power3",
122 }, 122 },
123 { /* Power3+ */ 123 { /* Power3+ */
124 .pvr_mask = 0xffff0000, 124 .pvr_mask = 0xffff0000,
125 .pvr_value = 0x00410000, 125 .pvr_value = 0x00410000,
126 .cpu_name = "POWER3 (630+)", 126 .cpu_name = "POWER3 (630+)",
127 .cpu_features = CPU_FTRS_POWER3, 127 .cpu_features = CPU_FTRS_POWER3,
128 .cpu_user_features = COMMON_USER_PPC64|PPC_FEATURE_PPC_LE, 128 .cpu_user_features = COMMON_USER_PPC64|PPC_FEATURE_PPC_LE,
129 .mmu_features = MMU_FTR_HPTE_TABLE, 129 .mmu_features = MMU_FTR_HPTE_TABLE,
130 .icache_bsize = 128, 130 .icache_bsize = 128,
131 .dcache_bsize = 128, 131 .dcache_bsize = 128,
132 .num_pmcs = 8, 132 .num_pmcs = 8,
133 .pmc_type = PPC_PMC_IBM, 133 .pmc_type = PPC_PMC_IBM,
134 .oprofile_cpu_type = "ppc64/power3", 134 .oprofile_cpu_type = "ppc64/power3",
135 .oprofile_type = PPC_OPROFILE_RS64, 135 .oprofile_type = PPC_OPROFILE_RS64,
136 .platform = "power3", 136 .platform = "power3",
137 }, 137 },
138 { /* Northstar */ 138 { /* Northstar */
139 .pvr_mask = 0xffff0000, 139 .pvr_mask = 0xffff0000,
140 .pvr_value = 0x00330000, 140 .pvr_value = 0x00330000,
141 .cpu_name = "RS64-II (northstar)", 141 .cpu_name = "RS64-II (northstar)",
142 .cpu_features = CPU_FTRS_RS64, 142 .cpu_features = CPU_FTRS_RS64,
143 .cpu_user_features = COMMON_USER_PPC64, 143 .cpu_user_features = COMMON_USER_PPC64,
144 .mmu_features = MMU_FTR_HPTE_TABLE, 144 .mmu_features = MMU_FTR_HPTE_TABLE,
145 .icache_bsize = 128, 145 .icache_bsize = 128,
146 .dcache_bsize = 128, 146 .dcache_bsize = 128,
147 .num_pmcs = 8, 147 .num_pmcs = 8,
148 .pmc_type = PPC_PMC_IBM, 148 .pmc_type = PPC_PMC_IBM,
149 .oprofile_cpu_type = "ppc64/rs64", 149 .oprofile_cpu_type = "ppc64/rs64",
150 .oprofile_type = PPC_OPROFILE_RS64, 150 .oprofile_type = PPC_OPROFILE_RS64,
151 .platform = "rs64", 151 .platform = "rs64",
152 }, 152 },
153 { /* Pulsar */ 153 { /* Pulsar */
154 .pvr_mask = 0xffff0000, 154 .pvr_mask = 0xffff0000,
155 .pvr_value = 0x00340000, 155 .pvr_value = 0x00340000,
156 .cpu_name = "RS64-III (pulsar)", 156 .cpu_name = "RS64-III (pulsar)",
157 .cpu_features = CPU_FTRS_RS64, 157 .cpu_features = CPU_FTRS_RS64,
158 .cpu_user_features = COMMON_USER_PPC64, 158 .cpu_user_features = COMMON_USER_PPC64,
159 .mmu_features = MMU_FTR_HPTE_TABLE, 159 .mmu_features = MMU_FTR_HPTE_TABLE,
160 .icache_bsize = 128, 160 .icache_bsize = 128,
161 .dcache_bsize = 128, 161 .dcache_bsize = 128,
162 .num_pmcs = 8, 162 .num_pmcs = 8,
163 .pmc_type = PPC_PMC_IBM, 163 .pmc_type = PPC_PMC_IBM,
164 .oprofile_cpu_type = "ppc64/rs64", 164 .oprofile_cpu_type = "ppc64/rs64",
165 .oprofile_type = PPC_OPROFILE_RS64, 165 .oprofile_type = PPC_OPROFILE_RS64,
166 .platform = "rs64", 166 .platform = "rs64",
167 }, 167 },
168 { /* I-star */ 168 { /* I-star */
169 .pvr_mask = 0xffff0000, 169 .pvr_mask = 0xffff0000,
170 .pvr_value = 0x00360000, 170 .pvr_value = 0x00360000,
171 .cpu_name = "RS64-III (icestar)", 171 .cpu_name = "RS64-III (icestar)",
172 .cpu_features = CPU_FTRS_RS64, 172 .cpu_features = CPU_FTRS_RS64,
173 .cpu_user_features = COMMON_USER_PPC64, 173 .cpu_user_features = COMMON_USER_PPC64,
174 .mmu_features = MMU_FTR_HPTE_TABLE, 174 .mmu_features = MMU_FTR_HPTE_TABLE,
175 .icache_bsize = 128, 175 .icache_bsize = 128,
176 .dcache_bsize = 128, 176 .dcache_bsize = 128,
177 .num_pmcs = 8, 177 .num_pmcs = 8,
178 .pmc_type = PPC_PMC_IBM, 178 .pmc_type = PPC_PMC_IBM,
179 .oprofile_cpu_type = "ppc64/rs64", 179 .oprofile_cpu_type = "ppc64/rs64",
180 .oprofile_type = PPC_OPROFILE_RS64, 180 .oprofile_type = PPC_OPROFILE_RS64,
181 .platform = "rs64", 181 .platform = "rs64",
182 }, 182 },
183 { /* S-star */ 183 { /* S-star */
184 .pvr_mask = 0xffff0000, 184 .pvr_mask = 0xffff0000,
185 .pvr_value = 0x00370000, 185 .pvr_value = 0x00370000,
186 .cpu_name = "RS64-IV (sstar)", 186 .cpu_name = "RS64-IV (sstar)",
187 .cpu_features = CPU_FTRS_RS64, 187 .cpu_features = CPU_FTRS_RS64,
188 .cpu_user_features = COMMON_USER_PPC64, 188 .cpu_user_features = COMMON_USER_PPC64,
189 .mmu_features = MMU_FTR_HPTE_TABLE, 189 .mmu_features = MMU_FTR_HPTE_TABLE,
190 .icache_bsize = 128, 190 .icache_bsize = 128,
191 .dcache_bsize = 128, 191 .dcache_bsize = 128,
192 .num_pmcs = 8, 192 .num_pmcs = 8,
193 .pmc_type = PPC_PMC_IBM, 193 .pmc_type = PPC_PMC_IBM,
194 .oprofile_cpu_type = "ppc64/rs64", 194 .oprofile_cpu_type = "ppc64/rs64",
195 .oprofile_type = PPC_OPROFILE_RS64, 195 .oprofile_type = PPC_OPROFILE_RS64,
196 .platform = "rs64", 196 .platform = "rs64",
197 }, 197 },
198 { /* Power4 */ 198 { /* Power4 */
199 .pvr_mask = 0xffff0000, 199 .pvr_mask = 0xffff0000,
200 .pvr_value = 0x00350000, 200 .pvr_value = 0x00350000,
201 .cpu_name = "POWER4 (gp)", 201 .cpu_name = "POWER4 (gp)",
202 .cpu_features = CPU_FTRS_POWER4, 202 .cpu_features = CPU_FTRS_POWER4,
203 .cpu_user_features = COMMON_USER_POWER4, 203 .cpu_user_features = COMMON_USER_POWER4,
204 .mmu_features = MMU_FTRS_POWER4, 204 .mmu_features = MMU_FTRS_POWER4,
205 .icache_bsize = 128, 205 .icache_bsize = 128,
206 .dcache_bsize = 128, 206 .dcache_bsize = 128,
207 .num_pmcs = 8, 207 .num_pmcs = 8,
208 .pmc_type = PPC_PMC_IBM, 208 .pmc_type = PPC_PMC_IBM,
209 .oprofile_cpu_type = "ppc64/power4", 209 .oprofile_cpu_type = "ppc64/power4",
210 .oprofile_type = PPC_OPROFILE_POWER4, 210 .oprofile_type = PPC_OPROFILE_POWER4,
211 .platform = "power4", 211 .platform = "power4",
212 }, 212 },
213 { /* Power4+ */ 213 { /* Power4+ */
214 .pvr_mask = 0xffff0000, 214 .pvr_mask = 0xffff0000,
215 .pvr_value = 0x00380000, 215 .pvr_value = 0x00380000,
216 .cpu_name = "POWER4+ (gq)", 216 .cpu_name = "POWER4+ (gq)",
217 .cpu_features = CPU_FTRS_POWER4, 217 .cpu_features = CPU_FTRS_POWER4,
218 .cpu_user_features = COMMON_USER_POWER4, 218 .cpu_user_features = COMMON_USER_POWER4,
219 .mmu_features = MMU_FTRS_POWER4, 219 .mmu_features = MMU_FTRS_POWER4,
220 .icache_bsize = 128, 220 .icache_bsize = 128,
221 .dcache_bsize = 128, 221 .dcache_bsize = 128,
222 .num_pmcs = 8, 222 .num_pmcs = 8,
223 .pmc_type = PPC_PMC_IBM, 223 .pmc_type = PPC_PMC_IBM,
224 .oprofile_cpu_type = "ppc64/power4", 224 .oprofile_cpu_type = "ppc64/power4",
225 .oprofile_type = PPC_OPROFILE_POWER4, 225 .oprofile_type = PPC_OPROFILE_POWER4,
226 .platform = "power4", 226 .platform = "power4",
227 }, 227 },
228 { /* PPC970 */ 228 { /* PPC970 */
229 .pvr_mask = 0xffff0000, 229 .pvr_mask = 0xffff0000,
230 .pvr_value = 0x00390000, 230 .pvr_value = 0x00390000,
231 .cpu_name = "PPC970", 231 .cpu_name = "PPC970",
232 .cpu_features = CPU_FTRS_PPC970, 232 .cpu_features = CPU_FTRS_PPC970,
233 .cpu_user_features = COMMON_USER_POWER4 | 233 .cpu_user_features = COMMON_USER_POWER4 |
234 PPC_FEATURE_HAS_ALTIVEC_COMP, 234 PPC_FEATURE_HAS_ALTIVEC_COMP,
235 .mmu_features = MMU_FTRS_PPC970, 235 .mmu_features = MMU_FTRS_PPC970,
236 .icache_bsize = 128, 236 .icache_bsize = 128,
237 .dcache_bsize = 128, 237 .dcache_bsize = 128,
238 .num_pmcs = 8, 238 .num_pmcs = 8,
239 .pmc_type = PPC_PMC_IBM, 239 .pmc_type = PPC_PMC_IBM,
240 .cpu_setup = __setup_cpu_ppc970, 240 .cpu_setup = __setup_cpu_ppc970,
241 .cpu_restore = __restore_cpu_ppc970, 241 .cpu_restore = __restore_cpu_ppc970,
242 .oprofile_cpu_type = "ppc64/970", 242 .oprofile_cpu_type = "ppc64/970",
243 .oprofile_type = PPC_OPROFILE_POWER4, 243 .oprofile_type = PPC_OPROFILE_POWER4,
244 .platform = "ppc970", 244 .platform = "ppc970",
245 }, 245 },
246 { /* PPC970FX */ 246 { /* PPC970FX */
247 .pvr_mask = 0xffff0000, 247 .pvr_mask = 0xffff0000,
248 .pvr_value = 0x003c0000, 248 .pvr_value = 0x003c0000,
249 .cpu_name = "PPC970FX", 249 .cpu_name = "PPC970FX",
250 .cpu_features = CPU_FTRS_PPC970, 250 .cpu_features = CPU_FTRS_PPC970,
251 .cpu_user_features = COMMON_USER_POWER4 | 251 .cpu_user_features = COMMON_USER_POWER4 |
252 PPC_FEATURE_HAS_ALTIVEC_COMP, 252 PPC_FEATURE_HAS_ALTIVEC_COMP,
253 .mmu_features = MMU_FTRS_PPC970, 253 .mmu_features = MMU_FTRS_PPC970,
254 .icache_bsize = 128, 254 .icache_bsize = 128,
255 .dcache_bsize = 128, 255 .dcache_bsize = 128,
256 .num_pmcs = 8, 256 .num_pmcs = 8,
257 .pmc_type = PPC_PMC_IBM, 257 .pmc_type = PPC_PMC_IBM,
258 .cpu_setup = __setup_cpu_ppc970, 258 .cpu_setup = __setup_cpu_ppc970,
259 .cpu_restore = __restore_cpu_ppc970, 259 .cpu_restore = __restore_cpu_ppc970,
260 .oprofile_cpu_type = "ppc64/970", 260 .oprofile_cpu_type = "ppc64/970",
261 .oprofile_type = PPC_OPROFILE_POWER4, 261 .oprofile_type = PPC_OPROFILE_POWER4,
262 .platform = "ppc970", 262 .platform = "ppc970",
263 }, 263 },
264 { /* PPC970MP DD1.0 - no DEEPNAP, use regular 970 init */ 264 { /* PPC970MP DD1.0 - no DEEPNAP, use regular 970 init */
265 .pvr_mask = 0xffffffff, 265 .pvr_mask = 0xffffffff,
266 .pvr_value = 0x00440100, 266 .pvr_value = 0x00440100,
267 .cpu_name = "PPC970MP", 267 .cpu_name = "PPC970MP",
268 .cpu_features = CPU_FTRS_PPC970, 268 .cpu_features = CPU_FTRS_PPC970,
269 .cpu_user_features = COMMON_USER_POWER4 | 269 .cpu_user_features = COMMON_USER_POWER4 |
270 PPC_FEATURE_HAS_ALTIVEC_COMP, 270 PPC_FEATURE_HAS_ALTIVEC_COMP,
271 .mmu_features = MMU_FTR_HPTE_TABLE, 271 .mmu_features = MMU_FTR_HPTE_TABLE,
272 .icache_bsize = 128, 272 .icache_bsize = 128,
273 .dcache_bsize = 128, 273 .dcache_bsize = 128,
274 .num_pmcs = 8, 274 .num_pmcs = 8,
275 .pmc_type = PPC_PMC_IBM, 275 .pmc_type = PPC_PMC_IBM,
276 .cpu_setup = __setup_cpu_ppc970, 276 .cpu_setup = __setup_cpu_ppc970,
277 .cpu_restore = __restore_cpu_ppc970, 277 .cpu_restore = __restore_cpu_ppc970,
278 .oprofile_cpu_type = "ppc64/970MP", 278 .oprofile_cpu_type = "ppc64/970MP",
279 .oprofile_type = PPC_OPROFILE_POWER4, 279 .oprofile_type = PPC_OPROFILE_POWER4,
280 .platform = "ppc970", 280 .platform = "ppc970",
281 }, 281 },
282 { /* PPC970MP */ 282 { /* PPC970MP */
283 .pvr_mask = 0xffff0000, 283 .pvr_mask = 0xffff0000,
284 .pvr_value = 0x00440000, 284 .pvr_value = 0x00440000,
285 .cpu_name = "PPC970MP", 285 .cpu_name = "PPC970MP",
286 .cpu_features = CPU_FTRS_PPC970, 286 .cpu_features = CPU_FTRS_PPC970,
287 .cpu_user_features = COMMON_USER_POWER4 | 287 .cpu_user_features = COMMON_USER_POWER4 |
288 PPC_FEATURE_HAS_ALTIVEC_COMP, 288 PPC_FEATURE_HAS_ALTIVEC_COMP,
289 .mmu_features = MMU_FTRS_PPC970, 289 .mmu_features = MMU_FTRS_PPC970,
290 .icache_bsize = 128, 290 .icache_bsize = 128,
291 .dcache_bsize = 128, 291 .dcache_bsize = 128,
292 .num_pmcs = 8, 292 .num_pmcs = 8,
293 .pmc_type = PPC_PMC_IBM, 293 .pmc_type = PPC_PMC_IBM,
294 .cpu_setup = __setup_cpu_ppc970MP, 294 .cpu_setup = __setup_cpu_ppc970MP,
295 .cpu_restore = __restore_cpu_ppc970, 295 .cpu_restore = __restore_cpu_ppc970,
296 .oprofile_cpu_type = "ppc64/970MP", 296 .oprofile_cpu_type = "ppc64/970MP",
297 .oprofile_type = PPC_OPROFILE_POWER4, 297 .oprofile_type = PPC_OPROFILE_POWER4,
298 .platform = "ppc970", 298 .platform = "ppc970",
299 }, 299 },
300 { /* PPC970GX */ 300 { /* PPC970GX */
301 .pvr_mask = 0xffff0000, 301 .pvr_mask = 0xffff0000,
302 .pvr_value = 0x00450000, 302 .pvr_value = 0x00450000,
303 .cpu_name = "PPC970GX", 303 .cpu_name = "PPC970GX",
304 .cpu_features = CPU_FTRS_PPC970, 304 .cpu_features = CPU_FTRS_PPC970,
305 .cpu_user_features = COMMON_USER_POWER4 | 305 .cpu_user_features = COMMON_USER_POWER4 |
306 PPC_FEATURE_HAS_ALTIVEC_COMP, 306 PPC_FEATURE_HAS_ALTIVEC_COMP,
307 .mmu_features = MMU_FTRS_PPC970, 307 .mmu_features = MMU_FTRS_PPC970,
308 .icache_bsize = 128, 308 .icache_bsize = 128,
309 .dcache_bsize = 128, 309 .dcache_bsize = 128,
310 .num_pmcs = 8, 310 .num_pmcs = 8,
311 .pmc_type = PPC_PMC_IBM, 311 .pmc_type = PPC_PMC_IBM,
312 .cpu_setup = __setup_cpu_ppc970, 312 .cpu_setup = __setup_cpu_ppc970,
313 .oprofile_cpu_type = "ppc64/970", 313 .oprofile_cpu_type = "ppc64/970",
314 .oprofile_type = PPC_OPROFILE_POWER4, 314 .oprofile_type = PPC_OPROFILE_POWER4,
315 .platform = "ppc970", 315 .platform = "ppc970",
316 }, 316 },
317 { /* Power5 GR */ 317 { /* Power5 GR */
318 .pvr_mask = 0xffff0000, 318 .pvr_mask = 0xffff0000,
319 .pvr_value = 0x003a0000, 319 .pvr_value = 0x003a0000,
320 .cpu_name = "POWER5 (gr)", 320 .cpu_name = "POWER5 (gr)",
321 .cpu_features = CPU_FTRS_POWER5, 321 .cpu_features = CPU_FTRS_POWER5,
322 .cpu_user_features = COMMON_USER_POWER5, 322 .cpu_user_features = COMMON_USER_POWER5,
323 .mmu_features = MMU_FTRS_POWER5, 323 .mmu_features = MMU_FTRS_POWER5,
324 .icache_bsize = 128, 324 .icache_bsize = 128,
325 .dcache_bsize = 128, 325 .dcache_bsize = 128,
326 .num_pmcs = 6, 326 .num_pmcs = 6,
327 .pmc_type = PPC_PMC_IBM, 327 .pmc_type = PPC_PMC_IBM,
328 .oprofile_cpu_type = "ppc64/power5", 328 .oprofile_cpu_type = "ppc64/power5",
329 .oprofile_type = PPC_OPROFILE_POWER4, 329 .oprofile_type = PPC_OPROFILE_POWER4,
330 /* SIHV / SIPR bits are implemented on POWER4+ (GQ) 330 /* SIHV / SIPR bits are implemented on POWER4+ (GQ)
331 * and above but only works on POWER5 and above 331 * and above but only works on POWER5 and above
332 */ 332 */
333 .oprofile_mmcra_sihv = MMCRA_SIHV, 333 .oprofile_mmcra_sihv = MMCRA_SIHV,
334 .oprofile_mmcra_sipr = MMCRA_SIPR, 334 .oprofile_mmcra_sipr = MMCRA_SIPR,
335 .platform = "power5", 335 .platform = "power5",
336 }, 336 },
337 { /* Power5++ */ 337 { /* Power5++ */
338 .pvr_mask = 0xffffff00, 338 .pvr_mask = 0xffffff00,
339 .pvr_value = 0x003b0300, 339 .pvr_value = 0x003b0300,
340 .cpu_name = "POWER5+ (gs)", 340 .cpu_name = "POWER5+ (gs)",
341 .cpu_features = CPU_FTRS_POWER5, 341 .cpu_features = CPU_FTRS_POWER5,
342 .cpu_user_features = COMMON_USER_POWER5_PLUS, 342 .cpu_user_features = COMMON_USER_POWER5_PLUS,
343 .mmu_features = MMU_FTRS_POWER5, 343 .mmu_features = MMU_FTRS_POWER5,
344 .icache_bsize = 128, 344 .icache_bsize = 128,
345 .dcache_bsize = 128, 345 .dcache_bsize = 128,
346 .num_pmcs = 6, 346 .num_pmcs = 6,
347 .oprofile_cpu_type = "ppc64/power5++", 347 .oprofile_cpu_type = "ppc64/power5++",
348 .oprofile_type = PPC_OPROFILE_POWER4, 348 .oprofile_type = PPC_OPROFILE_POWER4,
349 .oprofile_mmcra_sihv = MMCRA_SIHV, 349 .oprofile_mmcra_sihv = MMCRA_SIHV,
350 .oprofile_mmcra_sipr = MMCRA_SIPR, 350 .oprofile_mmcra_sipr = MMCRA_SIPR,
351 .platform = "power5+", 351 .platform = "power5+",
352 }, 352 },
353 { /* Power5 GS */ 353 { /* Power5 GS */
354 .pvr_mask = 0xffff0000, 354 .pvr_mask = 0xffff0000,
355 .pvr_value = 0x003b0000, 355 .pvr_value = 0x003b0000,
356 .cpu_name = "POWER5+ (gs)", 356 .cpu_name = "POWER5+ (gs)",
357 .cpu_features = CPU_FTRS_POWER5, 357 .cpu_features = CPU_FTRS_POWER5,
358 .cpu_user_features = COMMON_USER_POWER5_PLUS, 358 .cpu_user_features = COMMON_USER_POWER5_PLUS,
359 .mmu_features = MMU_FTRS_POWER5, 359 .mmu_features = MMU_FTRS_POWER5,
360 .icache_bsize = 128, 360 .icache_bsize = 128,
361 .dcache_bsize = 128, 361 .dcache_bsize = 128,
362 .num_pmcs = 6, 362 .num_pmcs = 6,
363 .pmc_type = PPC_PMC_IBM, 363 .pmc_type = PPC_PMC_IBM,
364 .oprofile_cpu_type = "ppc64/power5+", 364 .oprofile_cpu_type = "ppc64/power5+",
365 .oprofile_type = PPC_OPROFILE_POWER4, 365 .oprofile_type = PPC_OPROFILE_POWER4,
366 .oprofile_mmcra_sihv = MMCRA_SIHV, 366 .oprofile_mmcra_sihv = MMCRA_SIHV,
367 .oprofile_mmcra_sipr = MMCRA_SIPR, 367 .oprofile_mmcra_sipr = MMCRA_SIPR,
368 .platform = "power5+", 368 .platform = "power5+",
369 }, 369 },
370 { /* POWER6 in P5+ mode; 2.04-compliant processor */ 370 { /* POWER6 in P5+ mode; 2.04-compliant processor */
371 .pvr_mask = 0xffffffff, 371 .pvr_mask = 0xffffffff,
372 .pvr_value = 0x0f000001, 372 .pvr_value = 0x0f000001,
373 .cpu_name = "POWER5+", 373 .cpu_name = "POWER5+",
374 .cpu_features = CPU_FTRS_POWER5, 374 .cpu_features = CPU_FTRS_POWER5,
375 .cpu_user_features = COMMON_USER_POWER5_PLUS, 375 .cpu_user_features = COMMON_USER_POWER5_PLUS,
376 .mmu_features = MMU_FTRS_POWER5, 376 .mmu_features = MMU_FTRS_POWER5,
377 .icache_bsize = 128, 377 .icache_bsize = 128,
378 .dcache_bsize = 128, 378 .dcache_bsize = 128,
379 .oprofile_cpu_type = "ppc64/ibm-compat-v1", 379 .oprofile_cpu_type = "ppc64/ibm-compat-v1",
380 .oprofile_type = PPC_OPROFILE_POWER4, 380 .oprofile_type = PPC_OPROFILE_POWER4,
381 .platform = "power5+", 381 .platform = "power5+",
382 }, 382 },
383 { /* Power6 */ 383 { /* Power6 */
384 .pvr_mask = 0xffff0000, 384 .pvr_mask = 0xffff0000,
385 .pvr_value = 0x003e0000, 385 .pvr_value = 0x003e0000,
386 .cpu_name = "POWER6 (raw)", 386 .cpu_name = "POWER6 (raw)",
387 .cpu_features = CPU_FTRS_POWER6, 387 .cpu_features = CPU_FTRS_POWER6,
388 .cpu_user_features = COMMON_USER_POWER6 | 388 .cpu_user_features = COMMON_USER_POWER6 |
389 PPC_FEATURE_POWER6_EXT, 389 PPC_FEATURE_POWER6_EXT,
390 .mmu_features = MMU_FTRS_POWER6, 390 .mmu_features = MMU_FTRS_POWER6,
391 .icache_bsize = 128, 391 .icache_bsize = 128,
392 .dcache_bsize = 128, 392 .dcache_bsize = 128,
393 .num_pmcs = 6, 393 .num_pmcs = 6,
394 .pmc_type = PPC_PMC_IBM, 394 .pmc_type = PPC_PMC_IBM,
395 .oprofile_cpu_type = "ppc64/power6", 395 .oprofile_cpu_type = "ppc64/power6",
396 .oprofile_type = PPC_OPROFILE_POWER4, 396 .oprofile_type = PPC_OPROFILE_POWER4,
397 .oprofile_mmcra_sihv = POWER6_MMCRA_SIHV, 397 .oprofile_mmcra_sihv = POWER6_MMCRA_SIHV,
398 .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR, 398 .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR,
399 .oprofile_mmcra_clear = POWER6_MMCRA_THRM | 399 .oprofile_mmcra_clear = POWER6_MMCRA_THRM |
400 POWER6_MMCRA_OTHER, 400 POWER6_MMCRA_OTHER,
401 .platform = "power6x", 401 .platform = "power6x",
402 }, 402 },
403 { /* 2.05-compliant processor, i.e. Power6 "architected" mode */ 403 { /* 2.05-compliant processor, i.e. Power6 "architected" mode */
404 .pvr_mask = 0xffffffff, 404 .pvr_mask = 0xffffffff,
405 .pvr_value = 0x0f000002, 405 .pvr_value = 0x0f000002,
406 .cpu_name = "POWER6 (architected)", 406 .cpu_name = "POWER6 (architected)",
407 .cpu_features = CPU_FTRS_POWER6, 407 .cpu_features = CPU_FTRS_POWER6,
408 .cpu_user_features = COMMON_USER_POWER6, 408 .cpu_user_features = COMMON_USER_POWER6,
409 .mmu_features = MMU_FTRS_POWER6, 409 .mmu_features = MMU_FTRS_POWER6,
410 .icache_bsize = 128, 410 .icache_bsize = 128,
411 .dcache_bsize = 128, 411 .dcache_bsize = 128,
412 .oprofile_cpu_type = "ppc64/ibm-compat-v1", 412 .oprofile_cpu_type = "ppc64/ibm-compat-v1",
413 .oprofile_type = PPC_OPROFILE_POWER4, 413 .oprofile_type = PPC_OPROFILE_POWER4,
414 .platform = "power6", 414 .platform = "power6",
415 }, 415 },
416 { /* 2.06-compliant processor, i.e. Power7 "architected" mode */ 416 { /* 2.06-compliant processor, i.e. Power7 "architected" mode */
417 .pvr_mask = 0xffffffff, 417 .pvr_mask = 0xffffffff,
418 .pvr_value = 0x0f000003, 418 .pvr_value = 0x0f000003,
419 .cpu_name = "POWER7 (architected)", 419 .cpu_name = "POWER7 (architected)",
420 .cpu_features = CPU_FTRS_POWER7, 420 .cpu_features = CPU_FTRS_POWER7,
421 .cpu_user_features = COMMON_USER_POWER7, 421 .cpu_user_features = COMMON_USER_POWER7,
422 .mmu_features = MMU_FTRS_POWER7, 422 .mmu_features = MMU_FTRS_POWER7,
423 .icache_bsize = 128, 423 .icache_bsize = 128,
424 .dcache_bsize = 128, 424 .dcache_bsize = 128,
425 .oprofile_type = PPC_OPROFILE_POWER4, 425 .oprofile_type = PPC_OPROFILE_POWER4,
426 .oprofile_cpu_type = "ppc64/ibm-compat-v1", 426 .oprofile_cpu_type = "ppc64/ibm-compat-v1",
427 .cpu_setup = __setup_cpu_power7, 427 .cpu_setup = __setup_cpu_power7,
428 .cpu_restore = __restore_cpu_power7, 428 .cpu_restore = __restore_cpu_power7,
429 .platform = "power7", 429 .platform = "power7",
430 }, 430 },
431 { /* Power7 */ 431 { /* Power7 */
432 .pvr_mask = 0xffff0000, 432 .pvr_mask = 0xffff0000,
433 .pvr_value = 0x003f0000, 433 .pvr_value = 0x003f0000,
434 .cpu_name = "POWER7 (raw)", 434 .cpu_name = "POWER7 (raw)",
435 .cpu_features = CPU_FTRS_POWER7, 435 .cpu_features = CPU_FTRS_POWER7,
436 .cpu_user_features = COMMON_USER_POWER7, 436 .cpu_user_features = COMMON_USER_POWER7,
437 .mmu_features = MMU_FTRS_POWER7, 437 .mmu_features = MMU_FTRS_POWER7,
438 .icache_bsize = 128, 438 .icache_bsize = 128,
439 .dcache_bsize = 128, 439 .dcache_bsize = 128,
440 .num_pmcs = 6, 440 .num_pmcs = 6,
441 .pmc_type = PPC_PMC_IBM, 441 .pmc_type = PPC_PMC_IBM,
442 .oprofile_cpu_type = "ppc64/power7", 442 .oprofile_cpu_type = "ppc64/power7",
443 .oprofile_type = PPC_OPROFILE_POWER4, 443 .oprofile_type = PPC_OPROFILE_POWER4,
444 .cpu_setup = __setup_cpu_power7, 444 .cpu_setup = __setup_cpu_power7,
445 .cpu_restore = __restore_cpu_power7, 445 .cpu_restore = __restore_cpu_power7,
446 .platform = "power7", 446 .platform = "power7",
447 }, 447 },
448 { /* Power7+ */ 448 { /* Power7+ */
449 .pvr_mask = 0xffff0000, 449 .pvr_mask = 0xffff0000,
450 .pvr_value = 0x004A0000, 450 .pvr_value = 0x004A0000,
451 .cpu_name = "POWER7+ (raw)", 451 .cpu_name = "POWER7+ (raw)",
452 .cpu_features = CPU_FTRS_POWER7, 452 .cpu_features = CPU_FTRS_POWER7,
453 .cpu_user_features = COMMON_USER_POWER7, 453 .cpu_user_features = COMMON_USER_POWER7,
454 .mmu_features = MMU_FTRS_POWER7, 454 .mmu_features = MMU_FTRS_POWER7,
455 .icache_bsize = 128, 455 .icache_bsize = 128,
456 .dcache_bsize = 128, 456 .dcache_bsize = 128,
457 .num_pmcs = 6, 457 .num_pmcs = 6,
458 .pmc_type = PPC_PMC_IBM, 458 .pmc_type = PPC_PMC_IBM,
459 .oprofile_cpu_type = "ppc64/power7", 459 .oprofile_cpu_type = "ppc64/power7",
460 .oprofile_type = PPC_OPROFILE_POWER4, 460 .oprofile_type = PPC_OPROFILE_POWER4,
461 .cpu_setup = __setup_cpu_power7, 461 .cpu_setup = __setup_cpu_power7,
462 .cpu_restore = __restore_cpu_power7, 462 .cpu_restore = __restore_cpu_power7,
463 .platform = "power7+", 463 .platform = "power7+",
464 }, 464 },
465 { /* Cell Broadband Engine */ 465 { /* Cell Broadband Engine */
466 .pvr_mask = 0xffff0000, 466 .pvr_mask = 0xffff0000,
467 .pvr_value = 0x00700000, 467 .pvr_value = 0x00700000,
468 .cpu_name = "Cell Broadband Engine", 468 .cpu_name = "Cell Broadband Engine",
469 .cpu_features = CPU_FTRS_CELL, 469 .cpu_features = CPU_FTRS_CELL,
470 .cpu_user_features = COMMON_USER_PPC64 | 470 .cpu_user_features = COMMON_USER_PPC64 |
471 PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP | 471 PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP |
472 PPC_FEATURE_SMT, 472 PPC_FEATURE_SMT,
473 .mmu_features = MMU_FTRS_CELL, 473 .mmu_features = MMU_FTRS_CELL,
474 .icache_bsize = 128, 474 .icache_bsize = 128,
475 .dcache_bsize = 128, 475 .dcache_bsize = 128,
476 .num_pmcs = 4, 476 .num_pmcs = 4,
477 .pmc_type = PPC_PMC_IBM, 477 .pmc_type = PPC_PMC_IBM,
478 .oprofile_cpu_type = "ppc64/cell-be", 478 .oprofile_cpu_type = "ppc64/cell-be",
479 .oprofile_type = PPC_OPROFILE_CELL, 479 .oprofile_type = PPC_OPROFILE_CELL,
480 .platform = "ppc-cell-be", 480 .platform = "ppc-cell-be",
481 }, 481 },
482 { /* PA Semi PA6T */ 482 { /* PA Semi PA6T */
483 .pvr_mask = 0x7fff0000, 483 .pvr_mask = 0x7fff0000,
484 .pvr_value = 0x00900000, 484 .pvr_value = 0x00900000,
485 .cpu_name = "PA6T", 485 .cpu_name = "PA6T",
486 .cpu_features = CPU_FTRS_PA6T, 486 .cpu_features = CPU_FTRS_PA6T,
487 .cpu_user_features = COMMON_USER_PA6T, 487 .cpu_user_features = COMMON_USER_PA6T,
488 .mmu_features = MMU_FTRS_PA6T, 488 .mmu_features = MMU_FTRS_PA6T,
489 .icache_bsize = 64, 489 .icache_bsize = 64,
490 .dcache_bsize = 64, 490 .dcache_bsize = 64,
491 .num_pmcs = 6, 491 .num_pmcs = 6,
492 .pmc_type = PPC_PMC_PA6T, 492 .pmc_type = PPC_PMC_PA6T,
493 .cpu_setup = __setup_cpu_pa6t, 493 .cpu_setup = __setup_cpu_pa6t,
494 .cpu_restore = __restore_cpu_pa6t, 494 .cpu_restore = __restore_cpu_pa6t,
495 .oprofile_cpu_type = "ppc64/pa6t", 495 .oprofile_cpu_type = "ppc64/pa6t",
496 .oprofile_type = PPC_OPROFILE_PA6T, 496 .oprofile_type = PPC_OPROFILE_PA6T,
497 .platform = "pa6t", 497 .platform = "pa6t",
498 }, 498 },
499 { /* default match */ 499 { /* default match */
500 .pvr_mask = 0x00000000, 500 .pvr_mask = 0x00000000,
501 .pvr_value = 0x00000000, 501 .pvr_value = 0x00000000,
502 .cpu_name = "POWER4 (compatible)", 502 .cpu_name = "POWER4 (compatible)",
503 .cpu_features = CPU_FTRS_COMPATIBLE, 503 .cpu_features = CPU_FTRS_COMPATIBLE,
504 .cpu_user_features = COMMON_USER_PPC64, 504 .cpu_user_features = COMMON_USER_PPC64,
505 .mmu_features = MMU_FTRS_DEFAULT_HPTE_ARCH_V2, 505 .mmu_features = MMU_FTRS_DEFAULT_HPTE_ARCH_V2,
506 .icache_bsize = 128, 506 .icache_bsize = 128,
507 .dcache_bsize = 128, 507 .dcache_bsize = 128,
508 .num_pmcs = 6, 508 .num_pmcs = 6,
509 .pmc_type = PPC_PMC_IBM, 509 .pmc_type = PPC_PMC_IBM,
510 .platform = "power4", 510 .platform = "power4",
511 } 511 }
512 #endif /* CONFIG_PPC_BOOK3S_64 */ 512 #endif /* CONFIG_PPC_BOOK3S_64 */
513 513
514 #ifdef CONFIG_PPC32 514 #ifdef CONFIG_PPC32
515 #if CLASSIC_PPC 515 #if CLASSIC_PPC
516 { /* 601 */ 516 { /* 601 */
517 .pvr_mask = 0xffff0000, 517 .pvr_mask = 0xffff0000,
518 .pvr_value = 0x00010000, 518 .pvr_value = 0x00010000,
519 .cpu_name = "601", 519 .cpu_name = "601",
520 .cpu_features = CPU_FTRS_PPC601, 520 .cpu_features = CPU_FTRS_PPC601,
521 .cpu_user_features = COMMON_USER | PPC_FEATURE_601_INSTR | 521 .cpu_user_features = COMMON_USER | PPC_FEATURE_601_INSTR |
522 PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB, 522 PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB,
523 .mmu_features = MMU_FTR_HPTE_TABLE, 523 .mmu_features = MMU_FTR_HPTE_TABLE,
524 .icache_bsize = 32, 524 .icache_bsize = 32,
525 .dcache_bsize = 32, 525 .dcache_bsize = 32,
526 .machine_check = machine_check_generic, 526 .machine_check = machine_check_generic,
527 .platform = "ppc601", 527 .platform = "ppc601",
528 }, 528 },
529 { /* 603 */ 529 { /* 603 */
530 .pvr_mask = 0xffff0000, 530 .pvr_mask = 0xffff0000,
531 .pvr_value = 0x00030000, 531 .pvr_value = 0x00030000,
532 .cpu_name = "603", 532 .cpu_name = "603",
533 .cpu_features = CPU_FTRS_603, 533 .cpu_features = CPU_FTRS_603,
534 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 534 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
535 .mmu_features = 0, 535 .mmu_features = 0,
536 .icache_bsize = 32, 536 .icache_bsize = 32,
537 .dcache_bsize = 32, 537 .dcache_bsize = 32,
538 .cpu_setup = __setup_cpu_603, 538 .cpu_setup = __setup_cpu_603,
539 .machine_check = machine_check_generic, 539 .machine_check = machine_check_generic,
540 .platform = "ppc603", 540 .platform = "ppc603",
541 }, 541 },
542 { /* 603e */ 542 { /* 603e */
543 .pvr_mask = 0xffff0000, 543 .pvr_mask = 0xffff0000,
544 .pvr_value = 0x00060000, 544 .pvr_value = 0x00060000,
545 .cpu_name = "603e", 545 .cpu_name = "603e",
546 .cpu_features = CPU_FTRS_603, 546 .cpu_features = CPU_FTRS_603,
547 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 547 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
548 .mmu_features = 0, 548 .mmu_features = 0,
549 .icache_bsize = 32, 549 .icache_bsize = 32,
550 .dcache_bsize = 32, 550 .dcache_bsize = 32,
551 .cpu_setup = __setup_cpu_603, 551 .cpu_setup = __setup_cpu_603,
552 .machine_check = machine_check_generic, 552 .machine_check = machine_check_generic,
553 .platform = "ppc603", 553 .platform = "ppc603",
554 }, 554 },
555 { /* 603ev */ 555 { /* 603ev */
556 .pvr_mask = 0xffff0000, 556 .pvr_mask = 0xffff0000,
557 .pvr_value = 0x00070000, 557 .pvr_value = 0x00070000,
558 .cpu_name = "603ev", 558 .cpu_name = "603ev",
559 .cpu_features = CPU_FTRS_603, 559 .cpu_features = CPU_FTRS_603,
560 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 560 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
561 .mmu_features = 0, 561 .mmu_features = 0,
562 .icache_bsize = 32, 562 .icache_bsize = 32,
563 .dcache_bsize = 32, 563 .dcache_bsize = 32,
564 .cpu_setup = __setup_cpu_603, 564 .cpu_setup = __setup_cpu_603,
565 .machine_check = machine_check_generic, 565 .machine_check = machine_check_generic,
566 .platform = "ppc603", 566 .platform = "ppc603",
567 }, 567 },
568 { /* 604 */ 568 { /* 604 */
569 .pvr_mask = 0xffff0000, 569 .pvr_mask = 0xffff0000,
570 .pvr_value = 0x00040000, 570 .pvr_value = 0x00040000,
571 .cpu_name = "604", 571 .cpu_name = "604",
572 .cpu_features = CPU_FTRS_604, 572 .cpu_features = CPU_FTRS_604,
573 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 573 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
574 .mmu_features = MMU_FTR_HPTE_TABLE, 574 .mmu_features = MMU_FTR_HPTE_TABLE,
575 .icache_bsize = 32, 575 .icache_bsize = 32,
576 .dcache_bsize = 32, 576 .dcache_bsize = 32,
577 .num_pmcs = 2, 577 .num_pmcs = 2,
578 .cpu_setup = __setup_cpu_604, 578 .cpu_setup = __setup_cpu_604,
579 .machine_check = machine_check_generic, 579 .machine_check = machine_check_generic,
580 .platform = "ppc604", 580 .platform = "ppc604",
581 }, 581 },
582 { /* 604e */ 582 { /* 604e */
583 .pvr_mask = 0xfffff000, 583 .pvr_mask = 0xfffff000,
584 .pvr_value = 0x00090000, 584 .pvr_value = 0x00090000,
585 .cpu_name = "604e", 585 .cpu_name = "604e",
586 .cpu_features = CPU_FTRS_604, 586 .cpu_features = CPU_FTRS_604,
587 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 587 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
588 .mmu_features = MMU_FTR_HPTE_TABLE, 588 .mmu_features = MMU_FTR_HPTE_TABLE,
589 .icache_bsize = 32, 589 .icache_bsize = 32,
590 .dcache_bsize = 32, 590 .dcache_bsize = 32,
591 .num_pmcs = 4, 591 .num_pmcs = 4,
592 .cpu_setup = __setup_cpu_604, 592 .cpu_setup = __setup_cpu_604,
593 .machine_check = machine_check_generic, 593 .machine_check = machine_check_generic,
594 .platform = "ppc604", 594 .platform = "ppc604",
595 }, 595 },
596 { /* 604r */ 596 { /* 604r */
597 .pvr_mask = 0xffff0000, 597 .pvr_mask = 0xffff0000,
598 .pvr_value = 0x00090000, 598 .pvr_value = 0x00090000,
599 .cpu_name = "604r", 599 .cpu_name = "604r",
600 .cpu_features = CPU_FTRS_604, 600 .cpu_features = CPU_FTRS_604,
601 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 601 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
602 .mmu_features = MMU_FTR_HPTE_TABLE, 602 .mmu_features = MMU_FTR_HPTE_TABLE,
603 .icache_bsize = 32, 603 .icache_bsize = 32,
604 .dcache_bsize = 32, 604 .dcache_bsize = 32,
605 .num_pmcs = 4, 605 .num_pmcs = 4,
606 .cpu_setup = __setup_cpu_604, 606 .cpu_setup = __setup_cpu_604,
607 .machine_check = machine_check_generic, 607 .machine_check = machine_check_generic,
608 .platform = "ppc604", 608 .platform = "ppc604",
609 }, 609 },
610 { /* 604ev */ 610 { /* 604ev */
611 .pvr_mask = 0xffff0000, 611 .pvr_mask = 0xffff0000,
612 .pvr_value = 0x000a0000, 612 .pvr_value = 0x000a0000,
613 .cpu_name = "604ev", 613 .cpu_name = "604ev",
614 .cpu_features = CPU_FTRS_604, 614 .cpu_features = CPU_FTRS_604,
615 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 615 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
616 .mmu_features = MMU_FTR_HPTE_TABLE, 616 .mmu_features = MMU_FTR_HPTE_TABLE,
617 .icache_bsize = 32, 617 .icache_bsize = 32,
618 .dcache_bsize = 32, 618 .dcache_bsize = 32,
619 .num_pmcs = 4, 619 .num_pmcs = 4,
620 .cpu_setup = __setup_cpu_604, 620 .cpu_setup = __setup_cpu_604,
621 .machine_check = machine_check_generic, 621 .machine_check = machine_check_generic,
622 .platform = "ppc604", 622 .platform = "ppc604",
623 }, 623 },
624 { /* 740/750 (0x4202, don't support TAU ?) */ 624 { /* 740/750 (0x4202, don't support TAU ?) */
625 .pvr_mask = 0xffffffff, 625 .pvr_mask = 0xffffffff,
626 .pvr_value = 0x00084202, 626 .pvr_value = 0x00084202,
627 .cpu_name = "740/750", 627 .cpu_name = "740/750",
628 .cpu_features = CPU_FTRS_740_NOTAU, 628 .cpu_features = CPU_FTRS_740_NOTAU,
629 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 629 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
630 .mmu_features = MMU_FTR_HPTE_TABLE, 630 .mmu_features = MMU_FTR_HPTE_TABLE,
631 .icache_bsize = 32, 631 .icache_bsize = 32,
632 .dcache_bsize = 32, 632 .dcache_bsize = 32,
633 .num_pmcs = 4, 633 .num_pmcs = 4,
634 .cpu_setup = __setup_cpu_750, 634 .cpu_setup = __setup_cpu_750,
635 .machine_check = machine_check_generic, 635 .machine_check = machine_check_generic,
636 .platform = "ppc750", 636 .platform = "ppc750",
637 }, 637 },
638 { /* 750CX (80100 and 8010x?) */ 638 { /* 750CX (80100 and 8010x?) */
639 .pvr_mask = 0xfffffff0, 639 .pvr_mask = 0xfffffff0,
640 .pvr_value = 0x00080100, 640 .pvr_value = 0x00080100,
641 .cpu_name = "750CX", 641 .cpu_name = "750CX",
642 .cpu_features = CPU_FTRS_750, 642 .cpu_features = CPU_FTRS_750,
643 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 643 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
644 .mmu_features = MMU_FTR_HPTE_TABLE, 644 .mmu_features = MMU_FTR_HPTE_TABLE,
645 .icache_bsize = 32, 645 .icache_bsize = 32,
646 .dcache_bsize = 32, 646 .dcache_bsize = 32,
647 .num_pmcs = 4, 647 .num_pmcs = 4,
648 .cpu_setup = __setup_cpu_750cx, 648 .cpu_setup = __setup_cpu_750cx,
649 .machine_check = machine_check_generic, 649 .machine_check = machine_check_generic,
650 .platform = "ppc750", 650 .platform = "ppc750",
651 }, 651 },
652 { /* 750CX (82201 and 82202) */ 652 { /* 750CX (82201 and 82202) */
653 .pvr_mask = 0xfffffff0, 653 .pvr_mask = 0xfffffff0,
654 .pvr_value = 0x00082200, 654 .pvr_value = 0x00082200,
655 .cpu_name = "750CX", 655 .cpu_name = "750CX",
656 .cpu_features = CPU_FTRS_750, 656 .cpu_features = CPU_FTRS_750,
657 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 657 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
658 .mmu_features = MMU_FTR_HPTE_TABLE, 658 .mmu_features = MMU_FTR_HPTE_TABLE,
659 .icache_bsize = 32, 659 .icache_bsize = 32,
660 .dcache_bsize = 32, 660 .dcache_bsize = 32,
661 .num_pmcs = 4, 661 .num_pmcs = 4,
662 .pmc_type = PPC_PMC_IBM, 662 .pmc_type = PPC_PMC_IBM,
663 .cpu_setup = __setup_cpu_750cx, 663 .cpu_setup = __setup_cpu_750cx,
664 .machine_check = machine_check_generic, 664 .machine_check = machine_check_generic,
665 .platform = "ppc750", 665 .platform = "ppc750",
666 }, 666 },
667 { /* 750CXe (82214) */ 667 { /* 750CXe (82214) */
668 .pvr_mask = 0xfffffff0, 668 .pvr_mask = 0xfffffff0,
669 .pvr_value = 0x00082210, 669 .pvr_value = 0x00082210,
670 .cpu_name = "750CXe", 670 .cpu_name = "750CXe",
671 .cpu_features = CPU_FTRS_750, 671 .cpu_features = CPU_FTRS_750,
672 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 672 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
673 .mmu_features = MMU_FTR_HPTE_TABLE, 673 .mmu_features = MMU_FTR_HPTE_TABLE,
674 .icache_bsize = 32, 674 .icache_bsize = 32,
675 .dcache_bsize = 32, 675 .dcache_bsize = 32,
676 .num_pmcs = 4, 676 .num_pmcs = 4,
677 .pmc_type = PPC_PMC_IBM, 677 .pmc_type = PPC_PMC_IBM,
678 .cpu_setup = __setup_cpu_750cx, 678 .cpu_setup = __setup_cpu_750cx,
679 .machine_check = machine_check_generic, 679 .machine_check = machine_check_generic,
680 .platform = "ppc750", 680 .platform = "ppc750",
681 }, 681 },
682 { /* 750CXe "Gekko" (83214) */ 682 { /* 750CXe "Gekko" (83214) */
683 .pvr_mask = 0xffffffff, 683 .pvr_mask = 0xffffffff,
684 .pvr_value = 0x00083214, 684 .pvr_value = 0x00083214,
685 .cpu_name = "750CXe", 685 .cpu_name = "750CXe",
686 .cpu_features = CPU_FTRS_750, 686 .cpu_features = CPU_FTRS_750,
687 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 687 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
688 .mmu_features = MMU_FTR_HPTE_TABLE, 688 .mmu_features = MMU_FTR_HPTE_TABLE,
689 .icache_bsize = 32, 689 .icache_bsize = 32,
690 .dcache_bsize = 32, 690 .dcache_bsize = 32,
691 .num_pmcs = 4, 691 .num_pmcs = 4,
692 .pmc_type = PPC_PMC_IBM, 692 .pmc_type = PPC_PMC_IBM,
693 .cpu_setup = __setup_cpu_750cx, 693 .cpu_setup = __setup_cpu_750cx,
694 .machine_check = machine_check_generic, 694 .machine_check = machine_check_generic,
695 .platform = "ppc750", 695 .platform = "ppc750",
696 }, 696 },
697 { /* 750CL (and "Broadway") */ 697 { /* 750CL (and "Broadway") */
698 .pvr_mask = 0xfffff0e0, 698 .pvr_mask = 0xfffff0e0,
699 .pvr_value = 0x00087000, 699 .pvr_value = 0x00087000,
700 .cpu_name = "750CL", 700 .cpu_name = "750CL",
701 .cpu_features = CPU_FTRS_750CL, 701 .cpu_features = CPU_FTRS_750CL,
702 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 702 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
703 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, 703 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
704 .icache_bsize = 32, 704 .icache_bsize = 32,
705 .dcache_bsize = 32, 705 .dcache_bsize = 32,
706 .num_pmcs = 4, 706 .num_pmcs = 4,
707 .pmc_type = PPC_PMC_IBM, 707 .pmc_type = PPC_PMC_IBM,
708 .cpu_setup = __setup_cpu_750, 708 .cpu_setup = __setup_cpu_750,
709 .machine_check = machine_check_generic, 709 .machine_check = machine_check_generic,
710 .platform = "ppc750", 710 .platform = "ppc750",
711 .oprofile_cpu_type = "ppc/750", 711 .oprofile_cpu_type = "ppc/750",
712 .oprofile_type = PPC_OPROFILE_G4, 712 .oprofile_type = PPC_OPROFILE_G4,
713 }, 713 },
714 { /* 745/755 */ 714 { /* 745/755 */
715 .pvr_mask = 0xfffff000, 715 .pvr_mask = 0xfffff000,
716 .pvr_value = 0x00083000, 716 .pvr_value = 0x00083000,
717 .cpu_name = "745/755", 717 .cpu_name = "745/755",
718 .cpu_features = CPU_FTRS_750, 718 .cpu_features = CPU_FTRS_750,
719 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 719 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
720 .mmu_features = MMU_FTR_HPTE_TABLE, 720 .mmu_features = MMU_FTR_HPTE_TABLE,
721 .icache_bsize = 32, 721 .icache_bsize = 32,
722 .dcache_bsize = 32, 722 .dcache_bsize = 32,
723 .num_pmcs = 4, 723 .num_pmcs = 4,
724 .pmc_type = PPC_PMC_IBM, 724 .pmc_type = PPC_PMC_IBM,
725 .cpu_setup = __setup_cpu_750, 725 .cpu_setup = __setup_cpu_750,
726 .machine_check = machine_check_generic, 726 .machine_check = machine_check_generic,
727 .platform = "ppc750", 727 .platform = "ppc750",
728 }, 728 },
729 { /* 750FX rev 1.x */ 729 { /* 750FX rev 1.x */
730 .pvr_mask = 0xffffff00, 730 .pvr_mask = 0xffffff00,
731 .pvr_value = 0x70000100, 731 .pvr_value = 0x70000100,
732 .cpu_name = "750FX", 732 .cpu_name = "750FX",
733 .cpu_features = CPU_FTRS_750FX1, 733 .cpu_features = CPU_FTRS_750FX1,
734 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 734 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
735 .mmu_features = MMU_FTR_HPTE_TABLE, 735 .mmu_features = MMU_FTR_HPTE_TABLE,
736 .icache_bsize = 32, 736 .icache_bsize = 32,
737 .dcache_bsize = 32, 737 .dcache_bsize = 32,
738 .num_pmcs = 4, 738 .num_pmcs = 4,
739 .pmc_type = PPC_PMC_IBM, 739 .pmc_type = PPC_PMC_IBM,
740 .cpu_setup = __setup_cpu_750, 740 .cpu_setup = __setup_cpu_750,
741 .machine_check = machine_check_generic, 741 .machine_check = machine_check_generic,
742 .platform = "ppc750", 742 .platform = "ppc750",
743 .oprofile_cpu_type = "ppc/750", 743 .oprofile_cpu_type = "ppc/750",
744 .oprofile_type = PPC_OPROFILE_G4, 744 .oprofile_type = PPC_OPROFILE_G4,
745 }, 745 },
746 { /* 750FX rev 2.0 must disable HID0[DPM] */ 746 { /* 750FX rev 2.0 must disable HID0[DPM] */
747 .pvr_mask = 0xffffffff, 747 .pvr_mask = 0xffffffff,
748 .pvr_value = 0x70000200, 748 .pvr_value = 0x70000200,
749 .cpu_name = "750FX", 749 .cpu_name = "750FX",
750 .cpu_features = CPU_FTRS_750FX2, 750 .cpu_features = CPU_FTRS_750FX2,
751 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 751 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
752 .mmu_features = MMU_FTR_HPTE_TABLE, 752 .mmu_features = MMU_FTR_HPTE_TABLE,
753 .icache_bsize = 32, 753 .icache_bsize = 32,
754 .dcache_bsize = 32, 754 .dcache_bsize = 32,
755 .num_pmcs = 4, 755 .num_pmcs = 4,
756 .pmc_type = PPC_PMC_IBM, 756 .pmc_type = PPC_PMC_IBM,
757 .cpu_setup = __setup_cpu_750, 757 .cpu_setup = __setup_cpu_750,
758 .machine_check = machine_check_generic, 758 .machine_check = machine_check_generic,
759 .platform = "ppc750", 759 .platform = "ppc750",
760 .oprofile_cpu_type = "ppc/750", 760 .oprofile_cpu_type = "ppc/750",
761 .oprofile_type = PPC_OPROFILE_G4, 761 .oprofile_type = PPC_OPROFILE_G4,
762 }, 762 },
763 { /* 750FX (All revs except 2.0) */ 763 { /* 750FX (All revs except 2.0) */
764 .pvr_mask = 0xffff0000, 764 .pvr_mask = 0xffff0000,
765 .pvr_value = 0x70000000, 765 .pvr_value = 0x70000000,
766 .cpu_name = "750FX", 766 .cpu_name = "750FX",
767 .cpu_features = CPU_FTRS_750FX, 767 .cpu_features = CPU_FTRS_750FX,
768 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 768 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
769 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, 769 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
770 .icache_bsize = 32, 770 .icache_bsize = 32,
771 .dcache_bsize = 32, 771 .dcache_bsize = 32,
772 .num_pmcs = 4, 772 .num_pmcs = 4,
773 .pmc_type = PPC_PMC_IBM, 773 .pmc_type = PPC_PMC_IBM,
774 .cpu_setup = __setup_cpu_750fx, 774 .cpu_setup = __setup_cpu_750fx,
775 .machine_check = machine_check_generic, 775 .machine_check = machine_check_generic,
776 .platform = "ppc750", 776 .platform = "ppc750",
777 .oprofile_cpu_type = "ppc/750", 777 .oprofile_cpu_type = "ppc/750",
778 .oprofile_type = PPC_OPROFILE_G4, 778 .oprofile_type = PPC_OPROFILE_G4,
779 }, 779 },
780 { /* 750GX */ 780 { /* 750GX */
781 .pvr_mask = 0xffff0000, 781 .pvr_mask = 0xffff0000,
782 .pvr_value = 0x70020000, 782 .pvr_value = 0x70020000,
783 .cpu_name = "750GX", 783 .cpu_name = "750GX",
784 .cpu_features = CPU_FTRS_750GX, 784 .cpu_features = CPU_FTRS_750GX,
785 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 785 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
786 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, 786 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
787 .icache_bsize = 32, 787 .icache_bsize = 32,
788 .dcache_bsize = 32, 788 .dcache_bsize = 32,
789 .num_pmcs = 4, 789 .num_pmcs = 4,
790 .pmc_type = PPC_PMC_IBM, 790 .pmc_type = PPC_PMC_IBM,
791 .cpu_setup = __setup_cpu_750fx, 791 .cpu_setup = __setup_cpu_750fx,
792 .machine_check = machine_check_generic, 792 .machine_check = machine_check_generic,
793 .platform = "ppc750", 793 .platform = "ppc750",
794 .oprofile_cpu_type = "ppc/750", 794 .oprofile_cpu_type = "ppc/750",
795 .oprofile_type = PPC_OPROFILE_G4, 795 .oprofile_type = PPC_OPROFILE_G4,
796 }, 796 },
797 { /* 740/750 (L2CR bit need fixup for 740) */ 797 { /* 740/750 (L2CR bit need fixup for 740) */
798 .pvr_mask = 0xffff0000, 798 .pvr_mask = 0xffff0000,
799 .pvr_value = 0x00080000, 799 .pvr_value = 0x00080000,
800 .cpu_name = "740/750", 800 .cpu_name = "740/750",
801 .cpu_features = CPU_FTRS_740, 801 .cpu_features = CPU_FTRS_740,
802 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 802 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
803 .mmu_features = MMU_FTR_HPTE_TABLE, 803 .mmu_features = MMU_FTR_HPTE_TABLE,
804 .icache_bsize = 32, 804 .icache_bsize = 32,
805 .dcache_bsize = 32, 805 .dcache_bsize = 32,
806 .num_pmcs = 4, 806 .num_pmcs = 4,
807 .pmc_type = PPC_PMC_IBM, 807 .pmc_type = PPC_PMC_IBM,
808 .cpu_setup = __setup_cpu_750, 808 .cpu_setup = __setup_cpu_750,
809 .machine_check = machine_check_generic, 809 .machine_check = machine_check_generic,
810 .platform = "ppc750", 810 .platform = "ppc750",
811 }, 811 },
812 { /* 7400 rev 1.1 ? (no TAU) */ 812 { /* 7400 rev 1.1 ? (no TAU) */
813 .pvr_mask = 0xffffffff, 813 .pvr_mask = 0xffffffff,
814 .pvr_value = 0x000c1101, 814 .pvr_value = 0x000c1101,
815 .cpu_name = "7400 (1.1)", 815 .cpu_name = "7400 (1.1)",
816 .cpu_features = CPU_FTRS_7400_NOTAU, 816 .cpu_features = CPU_FTRS_7400_NOTAU,
817 .cpu_user_features = COMMON_USER | 817 .cpu_user_features = COMMON_USER |
818 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, 818 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
819 .mmu_features = MMU_FTR_HPTE_TABLE, 819 .mmu_features = MMU_FTR_HPTE_TABLE,
820 .icache_bsize = 32, 820 .icache_bsize = 32,
821 .dcache_bsize = 32, 821 .dcache_bsize = 32,
822 .num_pmcs = 4, 822 .num_pmcs = 4,
823 .pmc_type = PPC_PMC_G4, 823 .pmc_type = PPC_PMC_G4,
824 .cpu_setup = __setup_cpu_7400, 824 .cpu_setup = __setup_cpu_7400,
825 .machine_check = machine_check_generic, 825 .machine_check = machine_check_generic,
826 .platform = "ppc7400", 826 .platform = "ppc7400",
827 }, 827 },
828 { /* 7400 */ 828 { /* 7400 */
829 .pvr_mask = 0xffff0000, 829 .pvr_mask = 0xffff0000,
830 .pvr_value = 0x000c0000, 830 .pvr_value = 0x000c0000,
831 .cpu_name = "7400", 831 .cpu_name = "7400",
832 .cpu_features = CPU_FTRS_7400, 832 .cpu_features = CPU_FTRS_7400,
833 .cpu_user_features = COMMON_USER | 833 .cpu_user_features = COMMON_USER |
834 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, 834 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
835 .mmu_features = MMU_FTR_HPTE_TABLE, 835 .mmu_features = MMU_FTR_HPTE_TABLE,
836 .icache_bsize = 32, 836 .icache_bsize = 32,
837 .dcache_bsize = 32, 837 .dcache_bsize = 32,
838 .num_pmcs = 4, 838 .num_pmcs = 4,
839 .pmc_type = PPC_PMC_G4, 839 .pmc_type = PPC_PMC_G4,
840 .cpu_setup = __setup_cpu_7400, 840 .cpu_setup = __setup_cpu_7400,
841 .machine_check = machine_check_generic, 841 .machine_check = machine_check_generic,
842 .platform = "ppc7400", 842 .platform = "ppc7400",
843 }, 843 },
844 { /* 7410 */ 844 { /* 7410 */
845 .pvr_mask = 0xffff0000, 845 .pvr_mask = 0xffff0000,
846 .pvr_value = 0x800c0000, 846 .pvr_value = 0x800c0000,
847 .cpu_name = "7410", 847 .cpu_name = "7410",
848 .cpu_features = CPU_FTRS_7400, 848 .cpu_features = CPU_FTRS_7400,
849 .cpu_user_features = COMMON_USER | 849 .cpu_user_features = COMMON_USER |
850 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, 850 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
851 .mmu_features = MMU_FTR_HPTE_TABLE, 851 .mmu_features = MMU_FTR_HPTE_TABLE,
852 .icache_bsize = 32, 852 .icache_bsize = 32,
853 .dcache_bsize = 32, 853 .dcache_bsize = 32,
854 .num_pmcs = 4, 854 .num_pmcs = 4,
855 .pmc_type = PPC_PMC_G4, 855 .pmc_type = PPC_PMC_G4,
856 .cpu_setup = __setup_cpu_7410, 856 .cpu_setup = __setup_cpu_7410,
857 .machine_check = machine_check_generic, 857 .machine_check = machine_check_generic,
858 .platform = "ppc7400", 858 .platform = "ppc7400",
859 }, 859 },
860 { /* 7450 2.0 - no doze/nap */ 860 { /* 7450 2.0 - no doze/nap */
861 .pvr_mask = 0xffffffff, 861 .pvr_mask = 0xffffffff,
862 .pvr_value = 0x80000200, 862 .pvr_value = 0x80000200,
863 .cpu_name = "7450", 863 .cpu_name = "7450",
864 .cpu_features = CPU_FTRS_7450_20, 864 .cpu_features = CPU_FTRS_7450_20,
865 .cpu_user_features = COMMON_USER | 865 .cpu_user_features = COMMON_USER |
866 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, 866 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
867 .mmu_features = MMU_FTR_HPTE_TABLE, 867 .mmu_features = MMU_FTR_HPTE_TABLE,
868 .icache_bsize = 32, 868 .icache_bsize = 32,
869 .dcache_bsize = 32, 869 .dcache_bsize = 32,
870 .num_pmcs = 6, 870 .num_pmcs = 6,
871 .pmc_type = PPC_PMC_G4, 871 .pmc_type = PPC_PMC_G4,
872 .cpu_setup = __setup_cpu_745x, 872 .cpu_setup = __setup_cpu_745x,
873 .oprofile_cpu_type = "ppc/7450", 873 .oprofile_cpu_type = "ppc/7450",
874 .oprofile_type = PPC_OPROFILE_G4, 874 .oprofile_type = PPC_OPROFILE_G4,
875 .machine_check = machine_check_generic, 875 .machine_check = machine_check_generic,
876 .platform = "ppc7450", 876 .platform = "ppc7450",
877 }, 877 },
878 { /* 7450 2.1 */ 878 { /* 7450 2.1 */
879 .pvr_mask = 0xffffffff, 879 .pvr_mask = 0xffffffff,
880 .pvr_value = 0x80000201, 880 .pvr_value = 0x80000201,
881 .cpu_name = "7450", 881 .cpu_name = "7450",
882 .cpu_features = CPU_FTRS_7450_21, 882 .cpu_features = CPU_FTRS_7450_21,
883 .cpu_user_features = COMMON_USER | 883 .cpu_user_features = COMMON_USER |
884 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, 884 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
885 .mmu_features = MMU_FTR_HPTE_TABLE, 885 .mmu_features = MMU_FTR_HPTE_TABLE,
886 .icache_bsize = 32, 886 .icache_bsize = 32,
887 .dcache_bsize = 32, 887 .dcache_bsize = 32,
888 .num_pmcs = 6, 888 .num_pmcs = 6,
889 .pmc_type = PPC_PMC_G4, 889 .pmc_type = PPC_PMC_G4,
890 .cpu_setup = __setup_cpu_745x, 890 .cpu_setup = __setup_cpu_745x,
891 .oprofile_cpu_type = "ppc/7450", 891 .oprofile_cpu_type = "ppc/7450",
892 .oprofile_type = PPC_OPROFILE_G4, 892 .oprofile_type = PPC_OPROFILE_G4,
893 .machine_check = machine_check_generic, 893 .machine_check = machine_check_generic,
894 .platform = "ppc7450", 894 .platform = "ppc7450",
895 }, 895 },
896 { /* 7450 2.3 and newer */ 896 { /* 7450 2.3 and newer */
897 .pvr_mask = 0xffff0000, 897 .pvr_mask = 0xffff0000,
898 .pvr_value = 0x80000000, 898 .pvr_value = 0x80000000,
899 .cpu_name = "7450", 899 .cpu_name = "7450",
900 .cpu_features = CPU_FTRS_7450_23, 900 .cpu_features = CPU_FTRS_7450_23,
901 .cpu_user_features = COMMON_USER | 901 .cpu_user_features = COMMON_USER |
902 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, 902 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
903 .mmu_features = MMU_FTR_HPTE_TABLE, 903 .mmu_features = MMU_FTR_HPTE_TABLE,
904 .icache_bsize = 32, 904 .icache_bsize = 32,
905 .dcache_bsize = 32, 905 .dcache_bsize = 32,
906 .num_pmcs = 6, 906 .num_pmcs = 6,
907 .pmc_type = PPC_PMC_G4, 907 .pmc_type = PPC_PMC_G4,
908 .cpu_setup = __setup_cpu_745x, 908 .cpu_setup = __setup_cpu_745x,
909 .oprofile_cpu_type = "ppc/7450", 909 .oprofile_cpu_type = "ppc/7450",
910 .oprofile_type = PPC_OPROFILE_G4, 910 .oprofile_type = PPC_OPROFILE_G4,
911 .machine_check = machine_check_generic, 911 .machine_check = machine_check_generic,
912 .platform = "ppc7450", 912 .platform = "ppc7450",
913 }, 913 },
914 { /* 7455 rev 1.x */ 914 { /* 7455 rev 1.x */
915 .pvr_mask = 0xffffff00, 915 .pvr_mask = 0xffffff00,
916 .pvr_value = 0x80010100, 916 .pvr_value = 0x80010100,
917 .cpu_name = "7455", 917 .cpu_name = "7455",
918 .cpu_features = CPU_FTRS_7455_1, 918 .cpu_features = CPU_FTRS_7455_1,
919 .cpu_user_features = COMMON_USER | 919 .cpu_user_features = COMMON_USER |
920 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, 920 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
921 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, 921 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
922 .icache_bsize = 32, 922 .icache_bsize = 32,
923 .dcache_bsize = 32, 923 .dcache_bsize = 32,
924 .num_pmcs = 6, 924 .num_pmcs = 6,
925 .pmc_type = PPC_PMC_G4, 925 .pmc_type = PPC_PMC_G4,
926 .cpu_setup = __setup_cpu_745x, 926 .cpu_setup = __setup_cpu_745x,
927 .oprofile_cpu_type = "ppc/7450", 927 .oprofile_cpu_type = "ppc/7450",
928 .oprofile_type = PPC_OPROFILE_G4, 928 .oprofile_type = PPC_OPROFILE_G4,
929 .machine_check = machine_check_generic, 929 .machine_check = machine_check_generic,
930 .platform = "ppc7450", 930 .platform = "ppc7450",
931 }, 931 },
932 { /* 7455 rev 2.0 */ 932 { /* 7455 rev 2.0 */
933 .pvr_mask = 0xffffffff, 933 .pvr_mask = 0xffffffff,
934 .pvr_value = 0x80010200, 934 .pvr_value = 0x80010200,
935 .cpu_name = "7455", 935 .cpu_name = "7455",
936 .cpu_features = CPU_FTRS_7455_20, 936 .cpu_features = CPU_FTRS_7455_20,
937 .cpu_user_features = COMMON_USER | 937 .cpu_user_features = COMMON_USER |
938 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, 938 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
939 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, 939 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
940 .icache_bsize = 32, 940 .icache_bsize = 32,
941 .dcache_bsize = 32, 941 .dcache_bsize = 32,
942 .num_pmcs = 6, 942 .num_pmcs = 6,
943 .pmc_type = PPC_PMC_G4, 943 .pmc_type = PPC_PMC_G4,
944 .cpu_setup = __setup_cpu_745x, 944 .cpu_setup = __setup_cpu_745x,
945 .oprofile_cpu_type = "ppc/7450", 945 .oprofile_cpu_type = "ppc/7450",
946 .oprofile_type = PPC_OPROFILE_G4, 946 .oprofile_type = PPC_OPROFILE_G4,
947 .machine_check = machine_check_generic, 947 .machine_check = machine_check_generic,
948 .platform = "ppc7450", 948 .platform = "ppc7450",
949 }, 949 },
950 { /* 7455 others */ 950 { /* 7455 others */
951 .pvr_mask = 0xffff0000, 951 .pvr_mask = 0xffff0000,
952 .pvr_value = 0x80010000, 952 .pvr_value = 0x80010000,
953 .cpu_name = "7455", 953 .cpu_name = "7455",
954 .cpu_features = CPU_FTRS_7455, 954 .cpu_features = CPU_FTRS_7455,
955 .cpu_user_features = COMMON_USER | 955 .cpu_user_features = COMMON_USER |
956 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, 956 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
957 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, 957 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
958 .icache_bsize = 32, 958 .icache_bsize = 32,
959 .dcache_bsize = 32, 959 .dcache_bsize = 32,
960 .num_pmcs = 6, 960 .num_pmcs = 6,
961 .pmc_type = PPC_PMC_G4, 961 .pmc_type = PPC_PMC_G4,
962 .cpu_setup = __setup_cpu_745x, 962 .cpu_setup = __setup_cpu_745x,
963 .oprofile_cpu_type = "ppc/7450", 963 .oprofile_cpu_type = "ppc/7450",
964 .oprofile_type = PPC_OPROFILE_G4, 964 .oprofile_type = PPC_OPROFILE_G4,
965 .machine_check = machine_check_generic, 965 .machine_check = machine_check_generic,
966 .platform = "ppc7450", 966 .platform = "ppc7450",
967 }, 967 },
968 { /* 7447/7457 Rev 1.0 */ 968 { /* 7447/7457 Rev 1.0 */
969 .pvr_mask = 0xffffffff, 969 .pvr_mask = 0xffffffff,
970 .pvr_value = 0x80020100, 970 .pvr_value = 0x80020100,
971 .cpu_name = "7447/7457", 971 .cpu_name = "7447/7457",
972 .cpu_features = CPU_FTRS_7447_10, 972 .cpu_features = CPU_FTRS_7447_10,
973 .cpu_user_features = COMMON_USER | 973 .cpu_user_features = COMMON_USER |
974 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, 974 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
975 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, 975 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
976 .icache_bsize = 32, 976 .icache_bsize = 32,
977 .dcache_bsize = 32, 977 .dcache_bsize = 32,
978 .num_pmcs = 6, 978 .num_pmcs = 6,
979 .pmc_type = PPC_PMC_G4, 979 .pmc_type = PPC_PMC_G4,
980 .cpu_setup = __setup_cpu_745x, 980 .cpu_setup = __setup_cpu_745x,
981 .oprofile_cpu_type = "ppc/7450", 981 .oprofile_cpu_type = "ppc/7450",
982 .oprofile_type = PPC_OPROFILE_G4, 982 .oprofile_type = PPC_OPROFILE_G4,
983 .machine_check = machine_check_generic, 983 .machine_check = machine_check_generic,
984 .platform = "ppc7450", 984 .platform = "ppc7450",
985 }, 985 },
986 { /* 7447/7457 Rev 1.1 */ 986 { /* 7447/7457 Rev 1.1 */
987 .pvr_mask = 0xffffffff, 987 .pvr_mask = 0xffffffff,
988 .pvr_value = 0x80020101, 988 .pvr_value = 0x80020101,
989 .cpu_name = "7447/7457", 989 .cpu_name = "7447/7457",
990 .cpu_features = CPU_FTRS_7447_10, 990 .cpu_features = CPU_FTRS_7447_10,
991 .cpu_user_features = COMMON_USER | 991 .cpu_user_features = COMMON_USER |
992 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, 992 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
993 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, 993 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
994 .icache_bsize = 32, 994 .icache_bsize = 32,
995 .dcache_bsize = 32, 995 .dcache_bsize = 32,
996 .num_pmcs = 6, 996 .num_pmcs = 6,
997 .pmc_type = PPC_PMC_G4, 997 .pmc_type = PPC_PMC_G4,
998 .cpu_setup = __setup_cpu_745x, 998 .cpu_setup = __setup_cpu_745x,
999 .oprofile_cpu_type = "ppc/7450", 999 .oprofile_cpu_type = "ppc/7450",
1000 .oprofile_type = PPC_OPROFILE_G4, 1000 .oprofile_type = PPC_OPROFILE_G4,
1001 .machine_check = machine_check_generic, 1001 .machine_check = machine_check_generic,
1002 .platform = "ppc7450", 1002 .platform = "ppc7450",
1003 }, 1003 },
1004 { /* 7447/7457 Rev 1.2 and later */ 1004 { /* 7447/7457 Rev 1.2 and later */
1005 .pvr_mask = 0xffff0000, 1005 .pvr_mask = 0xffff0000,
1006 .pvr_value = 0x80020000, 1006 .pvr_value = 0x80020000,
1007 .cpu_name = "7447/7457", 1007 .cpu_name = "7447/7457",
1008 .cpu_features = CPU_FTRS_7447, 1008 .cpu_features = CPU_FTRS_7447,
1009 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, 1009 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
1010 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, 1010 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
1011 .icache_bsize = 32, 1011 .icache_bsize = 32,
1012 .dcache_bsize = 32, 1012 .dcache_bsize = 32,
1013 .num_pmcs = 6, 1013 .num_pmcs = 6,
1014 .pmc_type = PPC_PMC_G4, 1014 .pmc_type = PPC_PMC_G4,
1015 .cpu_setup = __setup_cpu_745x, 1015 .cpu_setup = __setup_cpu_745x,
1016 .oprofile_cpu_type = "ppc/7450", 1016 .oprofile_cpu_type = "ppc/7450",
1017 .oprofile_type = PPC_OPROFILE_G4, 1017 .oprofile_type = PPC_OPROFILE_G4,
1018 .machine_check = machine_check_generic, 1018 .machine_check = machine_check_generic,
1019 .platform = "ppc7450", 1019 .platform = "ppc7450",
1020 }, 1020 },
1021 { /* 7447A */ 1021 { /* 7447A */
1022 .pvr_mask = 0xffff0000, 1022 .pvr_mask = 0xffff0000,
1023 .pvr_value = 0x80030000, 1023 .pvr_value = 0x80030000,
1024 .cpu_name = "7447A", 1024 .cpu_name = "7447A",
1025 .cpu_features = CPU_FTRS_7447A, 1025 .cpu_features = CPU_FTRS_7447A,
1026 .cpu_user_features = COMMON_USER | 1026 .cpu_user_features = COMMON_USER |
1027 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, 1027 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
1028 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, 1028 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
1029 .icache_bsize = 32, 1029 .icache_bsize = 32,
1030 .dcache_bsize = 32, 1030 .dcache_bsize = 32,
1031 .num_pmcs = 6, 1031 .num_pmcs = 6,
1032 .pmc_type = PPC_PMC_G4, 1032 .pmc_type = PPC_PMC_G4,
1033 .cpu_setup = __setup_cpu_745x, 1033 .cpu_setup = __setup_cpu_745x,
1034 .oprofile_cpu_type = "ppc/7450", 1034 .oprofile_cpu_type = "ppc/7450",
1035 .oprofile_type = PPC_OPROFILE_G4, 1035 .oprofile_type = PPC_OPROFILE_G4,
1036 .machine_check = machine_check_generic, 1036 .machine_check = machine_check_generic,
1037 .platform = "ppc7450", 1037 .platform = "ppc7450",
1038 }, 1038 },
1039 { /* 7448 */ 1039 { /* 7448 */
1040 .pvr_mask = 0xffff0000, 1040 .pvr_mask = 0xffff0000,
1041 .pvr_value = 0x80040000, 1041 .pvr_value = 0x80040000,
1042 .cpu_name = "7448", 1042 .cpu_name = "7448",
1043 .cpu_features = CPU_FTRS_7448, 1043 .cpu_features = CPU_FTRS_7448,
1044 .cpu_user_features = COMMON_USER | 1044 .cpu_user_features = COMMON_USER |
1045 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, 1045 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
1046 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, 1046 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
1047 .icache_bsize = 32, 1047 .icache_bsize = 32,
1048 .dcache_bsize = 32, 1048 .dcache_bsize = 32,
1049 .num_pmcs = 6, 1049 .num_pmcs = 6,
1050 .pmc_type = PPC_PMC_G4, 1050 .pmc_type = PPC_PMC_G4,
1051 .cpu_setup = __setup_cpu_745x, 1051 .cpu_setup = __setup_cpu_745x,
1052 .oprofile_cpu_type = "ppc/7450", 1052 .oprofile_cpu_type = "ppc/7450",
1053 .oprofile_type = PPC_OPROFILE_G4, 1053 .oprofile_type = PPC_OPROFILE_G4,
1054 .machine_check = machine_check_generic, 1054 .machine_check = machine_check_generic,
1055 .platform = "ppc7450", 1055 .platform = "ppc7450",
1056 }, 1056 },
1057 { /* 82xx (8240, 8245, 8260 are all 603e cores) */ 1057 { /* 82xx (8240, 8245, 8260 are all 603e cores) */
1058 .pvr_mask = 0x7fff0000, 1058 .pvr_mask = 0x7fff0000,
1059 .pvr_value = 0x00810000, 1059 .pvr_value = 0x00810000,
1060 .cpu_name = "82xx", 1060 .cpu_name = "82xx",
1061 .cpu_features = CPU_FTRS_82XX, 1061 .cpu_features = CPU_FTRS_82XX,
1062 .cpu_user_features = COMMON_USER, 1062 .cpu_user_features = COMMON_USER,
1063 .mmu_features = 0, 1063 .mmu_features = 0,
1064 .icache_bsize = 32, 1064 .icache_bsize = 32,
1065 .dcache_bsize = 32, 1065 .dcache_bsize = 32,
1066 .cpu_setup = __setup_cpu_603, 1066 .cpu_setup = __setup_cpu_603,
1067 .machine_check = machine_check_generic, 1067 .machine_check = machine_check_generic,
1068 .platform = "ppc603", 1068 .platform = "ppc603",
1069 }, 1069 },
1070 { /* All G2_LE (603e core, plus some) have the same pvr */ 1070 { /* All G2_LE (603e core, plus some) have the same pvr */
1071 .pvr_mask = 0x7fff0000, 1071 .pvr_mask = 0x7fff0000,
1072 .pvr_value = 0x00820000, 1072 .pvr_value = 0x00820000,
1073 .cpu_name = "G2_LE", 1073 .cpu_name = "G2_LE",
1074 .cpu_features = CPU_FTRS_G2_LE, 1074 .cpu_features = CPU_FTRS_G2_LE,
1075 .cpu_user_features = COMMON_USER, 1075 .cpu_user_features = COMMON_USER,
1076 .mmu_features = MMU_FTR_USE_HIGH_BATS, 1076 .mmu_features = MMU_FTR_USE_HIGH_BATS,
1077 .icache_bsize = 32, 1077 .icache_bsize = 32,
1078 .dcache_bsize = 32, 1078 .dcache_bsize = 32,
1079 .cpu_setup = __setup_cpu_603, 1079 .cpu_setup = __setup_cpu_603,
1080 .machine_check = machine_check_generic, 1080 .machine_check = machine_check_generic,
1081 .platform = "ppc603", 1081 .platform = "ppc603",
1082 }, 1082 },
1083 { /* e300c1 (a 603e core, plus some) on 83xx */ 1083 { /* e300c1 (a 603e core, plus some) on 83xx */
1084 .pvr_mask = 0x7fff0000, 1084 .pvr_mask = 0x7fff0000,
1085 .pvr_value = 0x00830000, 1085 .pvr_value = 0x00830000,
1086 .cpu_name = "e300c1", 1086 .cpu_name = "e300c1",
1087 .cpu_features = CPU_FTRS_E300, 1087 .cpu_features = CPU_FTRS_E300,
1088 .cpu_user_features = COMMON_USER, 1088 .cpu_user_features = COMMON_USER,
1089 .mmu_features = MMU_FTR_USE_HIGH_BATS, 1089 .mmu_features = MMU_FTR_USE_HIGH_BATS,
1090 .icache_bsize = 32, 1090 .icache_bsize = 32,
1091 .dcache_bsize = 32, 1091 .dcache_bsize = 32,
1092 .cpu_setup = __setup_cpu_603, 1092 .cpu_setup = __setup_cpu_603,
1093 .machine_check = machine_check_generic, 1093 .machine_check = machine_check_generic,
1094 .platform = "ppc603", 1094 .platform = "ppc603",
1095 }, 1095 },
1096 { /* e300c2 (an e300c1 core, plus some, minus FPU) on 83xx */ 1096 { /* e300c2 (an e300c1 core, plus some, minus FPU) on 83xx */
1097 .pvr_mask = 0x7fff0000, 1097 .pvr_mask = 0x7fff0000,
1098 .pvr_value = 0x00840000, 1098 .pvr_value = 0x00840000,
1099 .cpu_name = "e300c2", 1099 .cpu_name = "e300c2",
1100 .cpu_features = CPU_FTRS_E300C2, 1100 .cpu_features = CPU_FTRS_E300C2,
1101 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 1101 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
1102 .mmu_features = MMU_FTR_USE_HIGH_BATS | 1102 .mmu_features = MMU_FTR_USE_HIGH_BATS |
1103 MMU_FTR_NEED_DTLB_SW_LRU, 1103 MMU_FTR_NEED_DTLB_SW_LRU,
1104 .icache_bsize = 32, 1104 .icache_bsize = 32,
1105 .dcache_bsize = 32, 1105 .dcache_bsize = 32,
1106 .cpu_setup = __setup_cpu_603, 1106 .cpu_setup = __setup_cpu_603,
1107 .machine_check = machine_check_generic, 1107 .machine_check = machine_check_generic,
1108 .platform = "ppc603", 1108 .platform = "ppc603",
1109 }, 1109 },
1110 { /* e300c3 (e300c1, plus one IU, half cache size) on 83xx */ 1110 { /* e300c3 (e300c1, plus one IU, half cache size) on 83xx */
1111 .pvr_mask = 0x7fff0000, 1111 .pvr_mask = 0x7fff0000,
1112 .pvr_value = 0x00850000, 1112 .pvr_value = 0x00850000,
1113 .cpu_name = "e300c3", 1113 .cpu_name = "e300c3",
1114 .cpu_features = CPU_FTRS_E300, 1114 .cpu_features = CPU_FTRS_E300,
1115 .cpu_user_features = COMMON_USER, 1115 .cpu_user_features = COMMON_USER,
1116 .mmu_features = MMU_FTR_USE_HIGH_BATS | 1116 .mmu_features = MMU_FTR_USE_HIGH_BATS |
1117 MMU_FTR_NEED_DTLB_SW_LRU, 1117 MMU_FTR_NEED_DTLB_SW_LRU,
1118 .icache_bsize = 32, 1118 .icache_bsize = 32,
1119 .dcache_bsize = 32, 1119 .dcache_bsize = 32,
1120 .cpu_setup = __setup_cpu_603, 1120 .cpu_setup = __setup_cpu_603,
1121 .num_pmcs = 4, 1121 .num_pmcs = 4,
1122 .oprofile_cpu_type = "ppc/e300", 1122 .oprofile_cpu_type = "ppc/e300",
1123 .oprofile_type = PPC_OPROFILE_FSL_EMB, 1123 .oprofile_type = PPC_OPROFILE_FSL_EMB,
1124 .platform = "ppc603", 1124 .platform = "ppc603",
1125 }, 1125 },
1126 { /* e300c4 (e300c1, plus one IU) */ 1126 { /* e300c4 (e300c1, plus one IU) */
1127 .pvr_mask = 0x7fff0000, 1127 .pvr_mask = 0x7fff0000,
1128 .pvr_value = 0x00860000, 1128 .pvr_value = 0x00860000,
1129 .cpu_name = "e300c4", 1129 .cpu_name = "e300c4",
1130 .cpu_features = CPU_FTRS_E300, 1130 .cpu_features = CPU_FTRS_E300,
1131 .cpu_user_features = COMMON_USER, 1131 .cpu_user_features = COMMON_USER,
1132 .mmu_features = MMU_FTR_USE_HIGH_BATS | 1132 .mmu_features = MMU_FTR_USE_HIGH_BATS |
1133 MMU_FTR_NEED_DTLB_SW_LRU, 1133 MMU_FTR_NEED_DTLB_SW_LRU,
1134 .icache_bsize = 32, 1134 .icache_bsize = 32,
1135 .dcache_bsize = 32, 1135 .dcache_bsize = 32,
1136 .cpu_setup = __setup_cpu_603, 1136 .cpu_setup = __setup_cpu_603,
1137 .machine_check = machine_check_generic, 1137 .machine_check = machine_check_generic,
1138 .num_pmcs = 4, 1138 .num_pmcs = 4,
1139 .oprofile_cpu_type = "ppc/e300", 1139 .oprofile_cpu_type = "ppc/e300",
1140 .oprofile_type = PPC_OPROFILE_FSL_EMB, 1140 .oprofile_type = PPC_OPROFILE_FSL_EMB,
1141 .platform = "ppc603", 1141 .platform = "ppc603",
1142 }, 1142 },
1143 { /* default match, we assume split I/D cache & TB (non-601)... */ 1143 { /* default match, we assume split I/D cache & TB (non-601)... */
1144 .pvr_mask = 0x00000000, 1144 .pvr_mask = 0x00000000,
1145 .pvr_value = 0x00000000, 1145 .pvr_value = 0x00000000,
1146 .cpu_name = "(generic PPC)", 1146 .cpu_name = "(generic PPC)",
1147 .cpu_features = CPU_FTRS_CLASSIC32, 1147 .cpu_features = CPU_FTRS_CLASSIC32,
1148 .cpu_user_features = COMMON_USER, 1148 .cpu_user_features = COMMON_USER,
1149 .mmu_features = MMU_FTR_HPTE_TABLE, 1149 .mmu_features = MMU_FTR_HPTE_TABLE,
1150 .icache_bsize = 32, 1150 .icache_bsize = 32,
1151 .dcache_bsize = 32, 1151 .dcache_bsize = 32,
1152 .machine_check = machine_check_generic, 1152 .machine_check = machine_check_generic,
1153 .platform = "ppc603", 1153 .platform = "ppc603",
1154 }, 1154 },
1155 #endif /* CLASSIC_PPC */ 1155 #endif /* CLASSIC_PPC */
1156 #ifdef CONFIG_8xx 1156 #ifdef CONFIG_8xx
1157 { /* 8xx */ 1157 { /* 8xx */
1158 .pvr_mask = 0xffff0000, 1158 .pvr_mask = 0xffff0000,
1159 .pvr_value = 0x00500000, 1159 .pvr_value = 0x00500000,
1160 .cpu_name = "8xx", 1160 .cpu_name = "8xx",
1161 /* CPU_FTR_MAYBE_CAN_DOZE is possible, 1161 /* CPU_FTR_MAYBE_CAN_DOZE is possible,
1162 * if the 8xx code is there.... */ 1162 * if the 8xx code is there.... */
1163 .cpu_features = CPU_FTRS_8XX, 1163 .cpu_features = CPU_FTRS_8XX,
1164 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 1164 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
1165 .mmu_features = MMU_FTR_TYPE_8xx, 1165 .mmu_features = MMU_FTR_TYPE_8xx,
1166 .icache_bsize = 16, 1166 .icache_bsize = 16,
1167 .dcache_bsize = 16, 1167 .dcache_bsize = 16,
1168 .platform = "ppc823", 1168 .platform = "ppc823",
1169 }, 1169 },
1170 #endif /* CONFIG_8xx */ 1170 #endif /* CONFIG_8xx */
1171 #ifdef CONFIG_40x 1171 #ifdef CONFIG_40x
1172 { /* 403GC */ 1172 { /* 403GC */
1173 .pvr_mask = 0xffffff00, 1173 .pvr_mask = 0xffffff00,
1174 .pvr_value = 0x00200200, 1174 .pvr_value = 0x00200200,
1175 .cpu_name = "403GC", 1175 .cpu_name = "403GC",
1176 .cpu_features = CPU_FTRS_40X, 1176 .cpu_features = CPU_FTRS_40X,
1177 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 1177 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
1178 .mmu_features = MMU_FTR_TYPE_40x, 1178 .mmu_features = MMU_FTR_TYPE_40x,
1179 .icache_bsize = 16, 1179 .icache_bsize = 16,
1180 .dcache_bsize = 16, 1180 .dcache_bsize = 16,
1181 .machine_check = machine_check_4xx, 1181 .machine_check = machine_check_4xx,
1182 .platform = "ppc403", 1182 .platform = "ppc403",
1183 }, 1183 },
1184 { /* 403GCX */ 1184 { /* 403GCX */
1185 .pvr_mask = 0xffffff00, 1185 .pvr_mask = 0xffffff00,
1186 .pvr_value = 0x00201400, 1186 .pvr_value = 0x00201400,
1187 .cpu_name = "403GCX", 1187 .cpu_name = "403GCX",
1188 .cpu_features = CPU_FTRS_40X, 1188 .cpu_features = CPU_FTRS_40X,
1189 .cpu_user_features = PPC_FEATURE_32 | 1189 .cpu_user_features = PPC_FEATURE_32 |
1190 PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB, 1190 PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB,
1191 .mmu_features = MMU_FTR_TYPE_40x, 1191 .mmu_features = MMU_FTR_TYPE_40x,
1192 .icache_bsize = 16, 1192 .icache_bsize = 16,
1193 .dcache_bsize = 16, 1193 .dcache_bsize = 16,
1194 .machine_check = machine_check_4xx, 1194 .machine_check = machine_check_4xx,
1195 .platform = "ppc403", 1195 .platform = "ppc403",
1196 }, 1196 },
1197 { /* 403G ?? */ 1197 { /* 403G ?? */
1198 .pvr_mask = 0xffff0000, 1198 .pvr_mask = 0xffff0000,
1199 .pvr_value = 0x00200000, 1199 .pvr_value = 0x00200000,
1200 .cpu_name = "403G ??", 1200 .cpu_name = "403G ??",
1201 .cpu_features = CPU_FTRS_40X, 1201 .cpu_features = CPU_FTRS_40X,
1202 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 1202 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
1203 .mmu_features = MMU_FTR_TYPE_40x, 1203 .mmu_features = MMU_FTR_TYPE_40x,
1204 .icache_bsize = 16, 1204 .icache_bsize = 16,
1205 .dcache_bsize = 16, 1205 .dcache_bsize = 16,
1206 .machine_check = machine_check_4xx, 1206 .machine_check = machine_check_4xx,
1207 .platform = "ppc403", 1207 .platform = "ppc403",
1208 }, 1208 },
1209 { /* 405GP */ 1209 { /* 405GP */
1210 .pvr_mask = 0xffff0000, 1210 .pvr_mask = 0xffff0000,
1211 .pvr_value = 0x40110000, 1211 .pvr_value = 0x40110000,
1212 .cpu_name = "405GP", 1212 .cpu_name = "405GP",
1213 .cpu_features = CPU_FTRS_40X, 1213 .cpu_features = CPU_FTRS_40X,
1214 .cpu_user_features = PPC_FEATURE_32 | 1214 .cpu_user_features = PPC_FEATURE_32 |
1215 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1215 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1216 .mmu_features = MMU_FTR_TYPE_40x, 1216 .mmu_features = MMU_FTR_TYPE_40x,
1217 .icache_bsize = 32, 1217 .icache_bsize = 32,
1218 .dcache_bsize = 32, 1218 .dcache_bsize = 32,
1219 .machine_check = machine_check_4xx, 1219 .machine_check = machine_check_4xx,
1220 .platform = "ppc405", 1220 .platform = "ppc405",
1221 }, 1221 },
1222 { /* STB 03xxx */ 1222 { /* STB 03xxx */
1223 .pvr_mask = 0xffff0000, 1223 .pvr_mask = 0xffff0000,
1224 .pvr_value = 0x40130000, 1224 .pvr_value = 0x40130000,
1225 .cpu_name = "STB03xxx", 1225 .cpu_name = "STB03xxx",
1226 .cpu_features = CPU_FTRS_40X, 1226 .cpu_features = CPU_FTRS_40X,
1227 .cpu_user_features = PPC_FEATURE_32 | 1227 .cpu_user_features = PPC_FEATURE_32 |
1228 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1228 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1229 .mmu_features = MMU_FTR_TYPE_40x, 1229 .mmu_features = MMU_FTR_TYPE_40x,
1230 .icache_bsize = 32, 1230 .icache_bsize = 32,
1231 .dcache_bsize = 32, 1231 .dcache_bsize = 32,
1232 .machine_check = machine_check_4xx, 1232 .machine_check = machine_check_4xx,
1233 .platform = "ppc405", 1233 .platform = "ppc405",
1234 }, 1234 },
1235 { /* STB 04xxx */ 1235 { /* STB 04xxx */
1236 .pvr_mask = 0xffff0000, 1236 .pvr_mask = 0xffff0000,
1237 .pvr_value = 0x41810000, 1237 .pvr_value = 0x41810000,
1238 .cpu_name = "STB04xxx", 1238 .cpu_name = "STB04xxx",
1239 .cpu_features = CPU_FTRS_40X, 1239 .cpu_features = CPU_FTRS_40X,
1240 .cpu_user_features = PPC_FEATURE_32 | 1240 .cpu_user_features = PPC_FEATURE_32 |
1241 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1241 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1242 .mmu_features = MMU_FTR_TYPE_40x, 1242 .mmu_features = MMU_FTR_TYPE_40x,
1243 .icache_bsize = 32, 1243 .icache_bsize = 32,
1244 .dcache_bsize = 32, 1244 .dcache_bsize = 32,
1245 .machine_check = machine_check_4xx, 1245 .machine_check = machine_check_4xx,
1246 .platform = "ppc405", 1246 .platform = "ppc405",
1247 }, 1247 },
1248 { /* NP405L */ 1248 { /* NP405L */
1249 .pvr_mask = 0xffff0000, 1249 .pvr_mask = 0xffff0000,
1250 .pvr_value = 0x41610000, 1250 .pvr_value = 0x41610000,
1251 .cpu_name = "NP405L", 1251 .cpu_name = "NP405L",
1252 .cpu_features = CPU_FTRS_40X, 1252 .cpu_features = CPU_FTRS_40X,
1253 .cpu_user_features = PPC_FEATURE_32 | 1253 .cpu_user_features = PPC_FEATURE_32 |
1254 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1254 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1255 .mmu_features = MMU_FTR_TYPE_40x, 1255 .mmu_features = MMU_FTR_TYPE_40x,
1256 .icache_bsize = 32, 1256 .icache_bsize = 32,
1257 .dcache_bsize = 32, 1257 .dcache_bsize = 32,
1258 .machine_check = machine_check_4xx, 1258 .machine_check = machine_check_4xx,
1259 .platform = "ppc405", 1259 .platform = "ppc405",
1260 }, 1260 },
1261 { /* NP4GS3 */ 1261 { /* NP4GS3 */
1262 .pvr_mask = 0xffff0000, 1262 .pvr_mask = 0xffff0000,
1263 .pvr_value = 0x40B10000, 1263 .pvr_value = 0x40B10000,
1264 .cpu_name = "NP4GS3", 1264 .cpu_name = "NP4GS3",
1265 .cpu_features = CPU_FTRS_40X, 1265 .cpu_features = CPU_FTRS_40X,
1266 .cpu_user_features = PPC_FEATURE_32 | 1266 .cpu_user_features = PPC_FEATURE_32 |
1267 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1267 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1268 .mmu_features = MMU_FTR_TYPE_40x, 1268 .mmu_features = MMU_FTR_TYPE_40x,
1269 .icache_bsize = 32, 1269 .icache_bsize = 32,
1270 .dcache_bsize = 32, 1270 .dcache_bsize = 32,
1271 .machine_check = machine_check_4xx, 1271 .machine_check = machine_check_4xx,
1272 .platform = "ppc405", 1272 .platform = "ppc405",
1273 }, 1273 },
1274 { /* NP405H */ 1274 { /* NP405H */
1275 .pvr_mask = 0xffff0000, 1275 .pvr_mask = 0xffff0000,
1276 .pvr_value = 0x41410000, 1276 .pvr_value = 0x41410000,
1277 .cpu_name = "NP405H", 1277 .cpu_name = "NP405H",
1278 .cpu_features = CPU_FTRS_40X, 1278 .cpu_features = CPU_FTRS_40X,
1279 .cpu_user_features = PPC_FEATURE_32 | 1279 .cpu_user_features = PPC_FEATURE_32 |
1280 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1280 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1281 .mmu_features = MMU_FTR_TYPE_40x, 1281 .mmu_features = MMU_FTR_TYPE_40x,
1282 .icache_bsize = 32, 1282 .icache_bsize = 32,
1283 .dcache_bsize = 32, 1283 .dcache_bsize = 32,
1284 .machine_check = machine_check_4xx, 1284 .machine_check = machine_check_4xx,
1285 .platform = "ppc405", 1285 .platform = "ppc405",
1286 }, 1286 },
1287 { /* 405GPr */ 1287 { /* 405GPr */
1288 .pvr_mask = 0xffff0000, 1288 .pvr_mask = 0xffff0000,
1289 .pvr_value = 0x50910000, 1289 .pvr_value = 0x50910000,
1290 .cpu_name = "405GPr", 1290 .cpu_name = "405GPr",
1291 .cpu_features = CPU_FTRS_40X, 1291 .cpu_features = CPU_FTRS_40X,
1292 .cpu_user_features = PPC_FEATURE_32 | 1292 .cpu_user_features = PPC_FEATURE_32 |
1293 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1293 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1294 .mmu_features = MMU_FTR_TYPE_40x, 1294 .mmu_features = MMU_FTR_TYPE_40x,
1295 .icache_bsize = 32, 1295 .icache_bsize = 32,
1296 .dcache_bsize = 32, 1296 .dcache_bsize = 32,
1297 .machine_check = machine_check_4xx, 1297 .machine_check = machine_check_4xx,
1298 .platform = "ppc405", 1298 .platform = "ppc405",
1299 }, 1299 },
1300 { /* STBx25xx */ 1300 { /* STBx25xx */
1301 .pvr_mask = 0xffff0000, 1301 .pvr_mask = 0xffff0000,
1302 .pvr_value = 0x51510000, 1302 .pvr_value = 0x51510000,
1303 .cpu_name = "STBx25xx", 1303 .cpu_name = "STBx25xx",
1304 .cpu_features = CPU_FTRS_40X, 1304 .cpu_features = CPU_FTRS_40X,
1305 .cpu_user_features = PPC_FEATURE_32 | 1305 .cpu_user_features = PPC_FEATURE_32 |
1306 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1306 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1307 .mmu_features = MMU_FTR_TYPE_40x, 1307 .mmu_features = MMU_FTR_TYPE_40x,
1308 .icache_bsize = 32, 1308 .icache_bsize = 32,
1309 .dcache_bsize = 32, 1309 .dcache_bsize = 32,
1310 .machine_check = machine_check_4xx, 1310 .machine_check = machine_check_4xx,
1311 .platform = "ppc405", 1311 .platform = "ppc405",
1312 }, 1312 },
1313 { /* 405LP */ 1313 { /* 405LP */
1314 .pvr_mask = 0xffff0000, 1314 .pvr_mask = 0xffff0000,
1315 .pvr_value = 0x41F10000, 1315 .pvr_value = 0x41F10000,
1316 .cpu_name = "405LP", 1316 .cpu_name = "405LP",
1317 .cpu_features = CPU_FTRS_40X, 1317 .cpu_features = CPU_FTRS_40X,
1318 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 1318 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
1319 .mmu_features = MMU_FTR_TYPE_40x, 1319 .mmu_features = MMU_FTR_TYPE_40x,
1320 .icache_bsize = 32, 1320 .icache_bsize = 32,
1321 .dcache_bsize = 32, 1321 .dcache_bsize = 32,
1322 .machine_check = machine_check_4xx, 1322 .machine_check = machine_check_4xx,
1323 .platform = "ppc405", 1323 .platform = "ppc405",
1324 }, 1324 },
1325 { /* Xilinx Virtex-II Pro */ 1325 { /* Xilinx Virtex-II Pro */
1326 .pvr_mask = 0xfffff000, 1326 .pvr_mask = 0xfffff000,
1327 .pvr_value = 0x20010000, 1327 .pvr_value = 0x20010000,
1328 .cpu_name = "Virtex-II Pro", 1328 .cpu_name = "Virtex-II Pro",
1329 .cpu_features = CPU_FTRS_40X, 1329 .cpu_features = CPU_FTRS_40X,
1330 .cpu_user_features = PPC_FEATURE_32 | 1330 .cpu_user_features = PPC_FEATURE_32 |
1331 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1331 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1332 .mmu_features = MMU_FTR_TYPE_40x, 1332 .mmu_features = MMU_FTR_TYPE_40x,
1333 .icache_bsize = 32, 1333 .icache_bsize = 32,
1334 .dcache_bsize = 32, 1334 .dcache_bsize = 32,
1335 .machine_check = machine_check_4xx, 1335 .machine_check = machine_check_4xx,
1336 .platform = "ppc405", 1336 .platform = "ppc405",
1337 }, 1337 },
1338 { /* Xilinx Virtex-4 FX */ 1338 { /* Xilinx Virtex-4 FX */
1339 .pvr_mask = 0xfffff000, 1339 .pvr_mask = 0xfffff000,
1340 .pvr_value = 0x20011000, 1340 .pvr_value = 0x20011000,
1341 .cpu_name = "Virtex-4 FX", 1341 .cpu_name = "Virtex-4 FX",
1342 .cpu_features = CPU_FTRS_40X, 1342 .cpu_features = CPU_FTRS_40X,
1343 .cpu_user_features = PPC_FEATURE_32 | 1343 .cpu_user_features = PPC_FEATURE_32 |
1344 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1344 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1345 .mmu_features = MMU_FTR_TYPE_40x, 1345 .mmu_features = MMU_FTR_TYPE_40x,
1346 .icache_bsize = 32, 1346 .icache_bsize = 32,
1347 .dcache_bsize = 32, 1347 .dcache_bsize = 32,
1348 .machine_check = machine_check_4xx, 1348 .machine_check = machine_check_4xx,
1349 .platform = "ppc405", 1349 .platform = "ppc405",
1350 }, 1350 },
1351 { /* 405EP */ 1351 { /* 405EP */
1352 .pvr_mask = 0xffff0000, 1352 .pvr_mask = 0xffff0000,
1353 .pvr_value = 0x51210000, 1353 .pvr_value = 0x51210000,
1354 .cpu_name = "405EP", 1354 .cpu_name = "405EP",
1355 .cpu_features = CPU_FTRS_40X, 1355 .cpu_features = CPU_FTRS_40X,
1356 .cpu_user_features = PPC_FEATURE_32 | 1356 .cpu_user_features = PPC_FEATURE_32 |
1357 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1357 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1358 .mmu_features = MMU_FTR_TYPE_40x, 1358 .mmu_features = MMU_FTR_TYPE_40x,
1359 .icache_bsize = 32, 1359 .icache_bsize = 32,
1360 .dcache_bsize = 32, 1360 .dcache_bsize = 32,
1361 .machine_check = machine_check_4xx, 1361 .machine_check = machine_check_4xx,
1362 .platform = "ppc405", 1362 .platform = "ppc405",
1363 }, 1363 },
1364 { /* 405EX Rev. A/B with Security */ 1364 { /* 405EX Rev. A/B with Security */
1365 .pvr_mask = 0xffff000f, 1365 .pvr_mask = 0xffff000f,
1366 .pvr_value = 0x12910007, 1366 .pvr_value = 0x12910007,
1367 .cpu_name = "405EX Rev. A/B", 1367 .cpu_name = "405EX Rev. A/B",
1368 .cpu_features = CPU_FTRS_40X, 1368 .cpu_features = CPU_FTRS_40X,
1369 .cpu_user_features = PPC_FEATURE_32 | 1369 .cpu_user_features = PPC_FEATURE_32 |
1370 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1370 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1371 .mmu_features = MMU_FTR_TYPE_40x, 1371 .mmu_features = MMU_FTR_TYPE_40x,
1372 .icache_bsize = 32, 1372 .icache_bsize = 32,
1373 .dcache_bsize = 32, 1373 .dcache_bsize = 32,
1374 .machine_check = machine_check_4xx, 1374 .machine_check = machine_check_4xx,
1375 .platform = "ppc405", 1375 .platform = "ppc405",
1376 }, 1376 },
1377 { /* 405EX Rev. C without Security */ 1377 { /* 405EX Rev. C without Security */
1378 .pvr_mask = 0xffff000f, 1378 .pvr_mask = 0xffff000f,
1379 .pvr_value = 0x1291000d, 1379 .pvr_value = 0x1291000d,
1380 .cpu_name = "405EX Rev. C", 1380 .cpu_name = "405EX Rev. C",
1381 .cpu_features = CPU_FTRS_40X, 1381 .cpu_features = CPU_FTRS_40X,
1382 .cpu_user_features = PPC_FEATURE_32 | 1382 .cpu_user_features = PPC_FEATURE_32 |
1383 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1383 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1384 .mmu_features = MMU_FTR_TYPE_40x, 1384 .mmu_features = MMU_FTR_TYPE_40x,
1385 .icache_bsize = 32, 1385 .icache_bsize = 32,
1386 .dcache_bsize = 32, 1386 .dcache_bsize = 32,
1387 .machine_check = machine_check_4xx, 1387 .machine_check = machine_check_4xx,
1388 .platform = "ppc405", 1388 .platform = "ppc405",
1389 }, 1389 },
1390 { /* 405EX Rev. C with Security */ 1390 { /* 405EX Rev. C with Security */
1391 .pvr_mask = 0xffff000f, 1391 .pvr_mask = 0xffff000f,
1392 .pvr_value = 0x1291000f, 1392 .pvr_value = 0x1291000f,
1393 .cpu_name = "405EX Rev. C", 1393 .cpu_name = "405EX Rev. C",
1394 .cpu_features = CPU_FTRS_40X, 1394 .cpu_features = CPU_FTRS_40X,
1395 .cpu_user_features = PPC_FEATURE_32 | 1395 .cpu_user_features = PPC_FEATURE_32 |
1396 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1396 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1397 .mmu_features = MMU_FTR_TYPE_40x, 1397 .mmu_features = MMU_FTR_TYPE_40x,
1398 .icache_bsize = 32, 1398 .icache_bsize = 32,
1399 .dcache_bsize = 32, 1399 .dcache_bsize = 32,
1400 .machine_check = machine_check_4xx, 1400 .machine_check = machine_check_4xx,
1401 .platform = "ppc405", 1401 .platform = "ppc405",
1402 }, 1402 },
1403 { /* 405EX Rev. D without Security */ 1403 { /* 405EX Rev. D without Security */
1404 .pvr_mask = 0xffff000f, 1404 .pvr_mask = 0xffff000f,
1405 .pvr_value = 0x12910003, 1405 .pvr_value = 0x12910003,
1406 .cpu_name = "405EX Rev. D", 1406 .cpu_name = "405EX Rev. D",
1407 .cpu_features = CPU_FTRS_40X, 1407 .cpu_features = CPU_FTRS_40X,
1408 .cpu_user_features = PPC_FEATURE_32 | 1408 .cpu_user_features = PPC_FEATURE_32 |
1409 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1409 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1410 .mmu_features = MMU_FTR_TYPE_40x, 1410 .mmu_features = MMU_FTR_TYPE_40x,
1411 .icache_bsize = 32, 1411 .icache_bsize = 32,
1412 .dcache_bsize = 32, 1412 .dcache_bsize = 32,
1413 .machine_check = machine_check_4xx, 1413 .machine_check = machine_check_4xx,
1414 .platform = "ppc405", 1414 .platform = "ppc405",
1415 }, 1415 },
1416 { /* 405EX Rev. D with Security */ 1416 { /* 405EX Rev. D with Security */
1417 .pvr_mask = 0xffff000f, 1417 .pvr_mask = 0xffff000f,
1418 .pvr_value = 0x12910005, 1418 .pvr_value = 0x12910005,
1419 .cpu_name = "405EX Rev. D", 1419 .cpu_name = "405EX Rev. D",
1420 .cpu_features = CPU_FTRS_40X, 1420 .cpu_features = CPU_FTRS_40X,
1421 .cpu_user_features = PPC_FEATURE_32 | 1421 .cpu_user_features = PPC_FEATURE_32 |
1422 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1422 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1423 .mmu_features = MMU_FTR_TYPE_40x, 1423 .mmu_features = MMU_FTR_TYPE_40x,
1424 .icache_bsize = 32, 1424 .icache_bsize = 32,
1425 .dcache_bsize = 32, 1425 .dcache_bsize = 32,
1426 .machine_check = machine_check_4xx, 1426 .machine_check = machine_check_4xx,
1427 .platform = "ppc405", 1427 .platform = "ppc405",
1428 }, 1428 },
1429 { /* 405EXr Rev. A/B without Security */ 1429 { /* 405EXr Rev. A/B without Security */
1430 .pvr_mask = 0xffff000f, 1430 .pvr_mask = 0xffff000f,
1431 .pvr_value = 0x12910001, 1431 .pvr_value = 0x12910001,
1432 .cpu_name = "405EXr Rev. A/B", 1432 .cpu_name = "405EXr Rev. A/B",
1433 .cpu_features = CPU_FTRS_40X, 1433 .cpu_features = CPU_FTRS_40X,
1434 .cpu_user_features = PPC_FEATURE_32 | 1434 .cpu_user_features = PPC_FEATURE_32 |
1435 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1435 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1436 .mmu_features = MMU_FTR_TYPE_40x, 1436 .mmu_features = MMU_FTR_TYPE_40x,
1437 .icache_bsize = 32, 1437 .icache_bsize = 32,
1438 .dcache_bsize = 32, 1438 .dcache_bsize = 32,
1439 .machine_check = machine_check_4xx, 1439 .machine_check = machine_check_4xx,
1440 .platform = "ppc405", 1440 .platform = "ppc405",
1441 }, 1441 },
1442 { /* 405EXr Rev. C without Security */ 1442 { /* 405EXr Rev. C without Security */
1443 .pvr_mask = 0xffff000f, 1443 .pvr_mask = 0xffff000f,
1444 .pvr_value = 0x12910009, 1444 .pvr_value = 0x12910009,
1445 .cpu_name = "405EXr Rev. C", 1445 .cpu_name = "405EXr Rev. C",
1446 .cpu_features = CPU_FTRS_40X, 1446 .cpu_features = CPU_FTRS_40X,
1447 .cpu_user_features = PPC_FEATURE_32 | 1447 .cpu_user_features = PPC_FEATURE_32 |
1448 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1448 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1449 .mmu_features = MMU_FTR_TYPE_40x, 1449 .mmu_features = MMU_FTR_TYPE_40x,
1450 .icache_bsize = 32, 1450 .icache_bsize = 32,
1451 .dcache_bsize = 32, 1451 .dcache_bsize = 32,
1452 .machine_check = machine_check_4xx, 1452 .machine_check = machine_check_4xx,
1453 .platform = "ppc405", 1453 .platform = "ppc405",
1454 }, 1454 },
1455 { /* 405EXr Rev. C with Security */ 1455 { /* 405EXr Rev. C with Security */
1456 .pvr_mask = 0xffff000f, 1456 .pvr_mask = 0xffff000f,
1457 .pvr_value = 0x1291000b, 1457 .pvr_value = 0x1291000b,
1458 .cpu_name = "405EXr Rev. C", 1458 .cpu_name = "405EXr Rev. C",
1459 .cpu_features = CPU_FTRS_40X, 1459 .cpu_features = CPU_FTRS_40X,
1460 .cpu_user_features = PPC_FEATURE_32 | 1460 .cpu_user_features = PPC_FEATURE_32 |
1461 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1461 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1462 .mmu_features = MMU_FTR_TYPE_40x, 1462 .mmu_features = MMU_FTR_TYPE_40x,
1463 .icache_bsize = 32, 1463 .icache_bsize = 32,
1464 .dcache_bsize = 32, 1464 .dcache_bsize = 32,
1465 .machine_check = machine_check_4xx, 1465 .machine_check = machine_check_4xx,
1466 .platform = "ppc405", 1466 .platform = "ppc405",
1467 }, 1467 },
1468 { /* 405EXr Rev. D without Security */ 1468 { /* 405EXr Rev. D without Security */
1469 .pvr_mask = 0xffff000f, 1469 .pvr_mask = 0xffff000f,
1470 .pvr_value = 0x12910000, 1470 .pvr_value = 0x12910000,
1471 .cpu_name = "405EXr Rev. D", 1471 .cpu_name = "405EXr Rev. D",
1472 .cpu_features = CPU_FTRS_40X, 1472 .cpu_features = CPU_FTRS_40X,
1473 .cpu_user_features = PPC_FEATURE_32 | 1473 .cpu_user_features = PPC_FEATURE_32 |
1474 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1474 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1475 .mmu_features = MMU_FTR_TYPE_40x, 1475 .mmu_features = MMU_FTR_TYPE_40x,
1476 .icache_bsize = 32, 1476 .icache_bsize = 32,
1477 .dcache_bsize = 32, 1477 .dcache_bsize = 32,
1478 .machine_check = machine_check_4xx, 1478 .machine_check = machine_check_4xx,
1479 .platform = "ppc405", 1479 .platform = "ppc405",
1480 }, 1480 },
1481 { /* 405EXr Rev. D with Security */ 1481 { /* 405EXr Rev. D with Security */
1482 .pvr_mask = 0xffff000f, 1482 .pvr_mask = 0xffff000f,
1483 .pvr_value = 0x12910002, 1483 .pvr_value = 0x12910002,
1484 .cpu_name = "405EXr Rev. D", 1484 .cpu_name = "405EXr Rev. D",
1485 .cpu_features = CPU_FTRS_40X, 1485 .cpu_features = CPU_FTRS_40X,
1486 .cpu_user_features = PPC_FEATURE_32 | 1486 .cpu_user_features = PPC_FEATURE_32 |
1487 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1487 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1488 .mmu_features = MMU_FTR_TYPE_40x, 1488 .mmu_features = MMU_FTR_TYPE_40x,
1489 .icache_bsize = 32, 1489 .icache_bsize = 32,
1490 .dcache_bsize = 32, 1490 .dcache_bsize = 32,
1491 .machine_check = machine_check_4xx, 1491 .machine_check = machine_check_4xx,
1492 .platform = "ppc405", 1492 .platform = "ppc405",
1493 }, 1493 },
1494 { 1494 {
1495 /* 405EZ */ 1495 /* 405EZ */
1496 .pvr_mask = 0xffff0000, 1496 .pvr_mask = 0xffff0000,
1497 .pvr_value = 0x41510000, 1497 .pvr_value = 0x41510000,
1498 .cpu_name = "405EZ", 1498 .cpu_name = "405EZ",
1499 .cpu_features = CPU_FTRS_40X, 1499 .cpu_features = CPU_FTRS_40X,
1500 .cpu_user_features = PPC_FEATURE_32 | 1500 .cpu_user_features = PPC_FEATURE_32 |
1501 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1501 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1502 .mmu_features = MMU_FTR_TYPE_40x, 1502 .mmu_features = MMU_FTR_TYPE_40x,
1503 .icache_bsize = 32, 1503 .icache_bsize = 32,
1504 .dcache_bsize = 32, 1504 .dcache_bsize = 32,
1505 .machine_check = machine_check_4xx, 1505 .machine_check = machine_check_4xx,
1506 .platform = "ppc405", 1506 .platform = "ppc405",
1507 }, 1507 },
1508 { /* default match */ 1508 { /* default match */
1509 .pvr_mask = 0x00000000, 1509 .pvr_mask = 0x00000000,
1510 .pvr_value = 0x00000000, 1510 .pvr_value = 0x00000000,
1511 .cpu_name = "(generic 40x PPC)", 1511 .cpu_name = "(generic 40x PPC)",
1512 .cpu_features = CPU_FTRS_40X, 1512 .cpu_features = CPU_FTRS_40X,
1513 .cpu_user_features = PPC_FEATURE_32 | 1513 .cpu_user_features = PPC_FEATURE_32 |
1514 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1514 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1515 .mmu_features = MMU_FTR_TYPE_40x, 1515 .mmu_features = MMU_FTR_TYPE_40x,
1516 .icache_bsize = 32, 1516 .icache_bsize = 32,
1517 .dcache_bsize = 32, 1517 .dcache_bsize = 32,
1518 .machine_check = machine_check_4xx, 1518 .machine_check = machine_check_4xx,
1519 .platform = "ppc405", 1519 .platform = "ppc405",
1520 } 1520 }
1521 1521
1522 #endif /* CONFIG_40x */ 1522 #endif /* CONFIG_40x */
1523 #ifdef CONFIG_44x 1523 #ifdef CONFIG_44x
1524 { 1524 {
1525 .pvr_mask = 0xf0000fff, 1525 .pvr_mask = 0xf0000fff,
1526 .pvr_value = 0x40000850, 1526 .pvr_value = 0x40000850,
1527 .cpu_name = "440GR Rev. A", 1527 .cpu_name = "440GR Rev. A",
1528 .cpu_features = CPU_FTRS_44X, 1528 .cpu_features = CPU_FTRS_44X,
1529 .cpu_user_features = COMMON_USER_BOOKE, 1529 .cpu_user_features = COMMON_USER_BOOKE,
1530 .mmu_features = MMU_FTR_TYPE_44x, 1530 .mmu_features = MMU_FTR_TYPE_44x,
1531 .icache_bsize = 32, 1531 .icache_bsize = 32,
1532 .dcache_bsize = 32, 1532 .dcache_bsize = 32,
1533 .machine_check = machine_check_4xx, 1533 .machine_check = machine_check_4xx,
1534 .platform = "ppc440", 1534 .platform = "ppc440",
1535 }, 1535 },
1536 { /* Use logical PVR for 440EP (logical pvr = pvr | 0x8) */ 1536 { /* Use logical PVR for 440EP (logical pvr = pvr | 0x8) */
1537 .pvr_mask = 0xf0000fff, 1537 .pvr_mask = 0xf0000fff,
1538 .pvr_value = 0x40000858, 1538 .pvr_value = 0x40000858,
1539 .cpu_name = "440EP Rev. A", 1539 .cpu_name = "440EP Rev. A",
1540 .cpu_features = CPU_FTRS_44X, 1540 .cpu_features = CPU_FTRS_44X,
1541 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, 1541 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
1542 .mmu_features = MMU_FTR_TYPE_44x, 1542 .mmu_features = MMU_FTR_TYPE_44x,
1543 .icache_bsize = 32, 1543 .icache_bsize = 32,
1544 .dcache_bsize = 32, 1544 .dcache_bsize = 32,
1545 .cpu_setup = __setup_cpu_440ep, 1545 .cpu_setup = __setup_cpu_440ep,
1546 .machine_check = machine_check_4xx, 1546 .machine_check = machine_check_4xx,
1547 .platform = "ppc440", 1547 .platform = "ppc440",
1548 }, 1548 },
1549 { 1549 {
1550 .pvr_mask = 0xf0000fff, 1550 .pvr_mask = 0xf0000fff,
1551 .pvr_value = 0x400008d3, 1551 .pvr_value = 0x400008d3,
1552 .cpu_name = "440GR Rev. B", 1552 .cpu_name = "440GR Rev. B",
1553 .cpu_features = CPU_FTRS_44X, 1553 .cpu_features = CPU_FTRS_44X,
1554 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, 1554 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
1555 .mmu_features = MMU_FTR_TYPE_44x, 1555 .mmu_features = MMU_FTR_TYPE_44x,
1556 .icache_bsize = 32, 1556 .icache_bsize = 32,
1557 .dcache_bsize = 32, 1557 .dcache_bsize = 32,
1558 .machine_check = machine_check_4xx, 1558 .machine_check = machine_check_4xx,
1559 .platform = "ppc440", 1559 .platform = "ppc440",
1560 }, 1560 },
1561 { /* Matches both physical and logical PVR for 440EP (logical pvr = pvr | 0x8) */ 1561 { /* Matches both physical and logical PVR for 440EP (logical pvr = pvr | 0x8) */
1562 .pvr_mask = 0xf0000ff7, 1562 .pvr_mask = 0xf0000ff7,
1563 .pvr_value = 0x400008d4, 1563 .pvr_value = 0x400008d4,
1564 .cpu_name = "440EP Rev. C", 1564 .cpu_name = "440EP Rev. C",
1565 .cpu_features = CPU_FTRS_44X, 1565 .cpu_features = CPU_FTRS_44X,
1566 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, 1566 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
1567 .mmu_features = MMU_FTR_TYPE_44x, 1567 .mmu_features = MMU_FTR_TYPE_44x,
1568 .icache_bsize = 32, 1568 .icache_bsize = 32,
1569 .dcache_bsize = 32, 1569 .dcache_bsize = 32,
1570 .cpu_setup = __setup_cpu_440ep, 1570 .cpu_setup = __setup_cpu_440ep,
1571 .machine_check = machine_check_4xx, 1571 .machine_check = machine_check_4xx,
1572 .platform = "ppc440", 1572 .platform = "ppc440",
1573 }, 1573 },
1574 { /* Use logical PVR for 440EP (logical pvr = pvr | 0x8) */ 1574 { /* Use logical PVR for 440EP (logical pvr = pvr | 0x8) */
1575 .pvr_mask = 0xf0000fff, 1575 .pvr_mask = 0xf0000fff,
1576 .pvr_value = 0x400008db, 1576 .pvr_value = 0x400008db,
1577 .cpu_name = "440EP Rev. B", 1577 .cpu_name = "440EP Rev. B",
1578 .cpu_features = CPU_FTRS_44X, 1578 .cpu_features = CPU_FTRS_44X,
1579 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, 1579 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
1580 .mmu_features = MMU_FTR_TYPE_44x, 1580 .mmu_features = MMU_FTR_TYPE_44x,
1581 .icache_bsize = 32, 1581 .icache_bsize = 32,
1582 .dcache_bsize = 32, 1582 .dcache_bsize = 32,
1583 .cpu_setup = __setup_cpu_440ep, 1583 .cpu_setup = __setup_cpu_440ep,
1584 .machine_check = machine_check_4xx, 1584 .machine_check = machine_check_4xx,
1585 .platform = "ppc440", 1585 .platform = "ppc440",
1586 }, 1586 },
1587 { /* 440GRX */ 1587 { /* 440GRX */
1588 .pvr_mask = 0xf0000ffb, 1588 .pvr_mask = 0xf0000ffb,
1589 .pvr_value = 0x200008D0, 1589 .pvr_value = 0x200008D0,
1590 .cpu_name = "440GRX", 1590 .cpu_name = "440GRX",
1591 .cpu_features = CPU_FTRS_44X, 1591 .cpu_features = CPU_FTRS_44X,
1592 .cpu_user_features = COMMON_USER_BOOKE, 1592 .cpu_user_features = COMMON_USER_BOOKE,
1593 .mmu_features = MMU_FTR_TYPE_44x, 1593 .mmu_features = MMU_FTR_TYPE_44x,
1594 .icache_bsize = 32, 1594 .icache_bsize = 32,
1595 .dcache_bsize = 32, 1595 .dcache_bsize = 32,
1596 .cpu_setup = __setup_cpu_440grx, 1596 .cpu_setup = __setup_cpu_440grx,
1597 .machine_check = machine_check_440A, 1597 .machine_check = machine_check_440A,
1598 .platform = "ppc440", 1598 .platform = "ppc440",
1599 }, 1599 },
1600 { /* Use logical PVR for 440EPx (logical pvr = pvr | 0x8) */ 1600 { /* Use logical PVR for 440EPx (logical pvr = pvr | 0x8) */
1601 .pvr_mask = 0xf0000ffb, 1601 .pvr_mask = 0xf0000ffb,
1602 .pvr_value = 0x200008D8, 1602 .pvr_value = 0x200008D8,
1603 .cpu_name = "440EPX", 1603 .cpu_name = "440EPX",
1604 .cpu_features = CPU_FTRS_44X, 1604 .cpu_features = CPU_FTRS_44X,
1605 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, 1605 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
1606 .mmu_features = MMU_FTR_TYPE_44x, 1606 .mmu_features = MMU_FTR_TYPE_44x,
1607 .icache_bsize = 32, 1607 .icache_bsize = 32,
1608 .dcache_bsize = 32, 1608 .dcache_bsize = 32,
1609 .cpu_setup = __setup_cpu_440epx, 1609 .cpu_setup = __setup_cpu_440epx,
1610 .machine_check = machine_check_440A, 1610 .machine_check = machine_check_440A,
1611 .platform = "ppc440", 1611 .platform = "ppc440",
1612 }, 1612 },
1613 { /* 440GP Rev. B */ 1613 { /* 440GP Rev. B */
1614 .pvr_mask = 0xf0000fff, 1614 .pvr_mask = 0xf0000fff,
1615 .pvr_value = 0x40000440, 1615 .pvr_value = 0x40000440,
1616 .cpu_name = "440GP Rev. B", 1616 .cpu_name = "440GP Rev. B",
1617 .cpu_features = CPU_FTRS_44X, 1617 .cpu_features = CPU_FTRS_44X,
1618 .cpu_user_features = COMMON_USER_BOOKE, 1618 .cpu_user_features = COMMON_USER_BOOKE,
1619 .mmu_features = MMU_FTR_TYPE_44x, 1619 .mmu_features = MMU_FTR_TYPE_44x,
1620 .icache_bsize = 32, 1620 .icache_bsize = 32,
1621 .dcache_bsize = 32, 1621 .dcache_bsize = 32,
1622 .machine_check = machine_check_4xx, 1622 .machine_check = machine_check_4xx,
1623 .platform = "ppc440gp", 1623 .platform = "ppc440gp",
1624 }, 1624 },
1625 { /* 440GP Rev. C */ 1625 { /* 440GP Rev. C */
1626 .pvr_mask = 0xf0000fff, 1626 .pvr_mask = 0xf0000fff,
1627 .pvr_value = 0x40000481, 1627 .pvr_value = 0x40000481,
1628 .cpu_name = "440GP Rev. C", 1628 .cpu_name = "440GP Rev. C",
1629 .cpu_features = CPU_FTRS_44X, 1629 .cpu_features = CPU_FTRS_44X,
1630 .cpu_user_features = COMMON_USER_BOOKE, 1630 .cpu_user_features = COMMON_USER_BOOKE,
1631 .mmu_features = MMU_FTR_TYPE_44x, 1631 .mmu_features = MMU_FTR_TYPE_44x,
1632 .icache_bsize = 32, 1632 .icache_bsize = 32,
1633 .dcache_bsize = 32, 1633 .dcache_bsize = 32,
1634 .machine_check = machine_check_4xx, 1634 .machine_check = machine_check_4xx,
1635 .platform = "ppc440gp", 1635 .platform = "ppc440gp",
1636 }, 1636 },
1637 { /* 440GX Rev. A */ 1637 { /* 440GX Rev. A */
1638 .pvr_mask = 0xf0000fff, 1638 .pvr_mask = 0xf0000fff,
1639 .pvr_value = 0x50000850, 1639 .pvr_value = 0x50000850,
1640 .cpu_name = "440GX Rev. A", 1640 .cpu_name = "440GX Rev. A",
1641 .cpu_features = CPU_FTRS_44X, 1641 .cpu_features = CPU_FTRS_44X,
1642 .cpu_user_features = COMMON_USER_BOOKE, 1642 .cpu_user_features = COMMON_USER_BOOKE,
1643 .mmu_features = MMU_FTR_TYPE_44x, 1643 .mmu_features = MMU_FTR_TYPE_44x,
1644 .icache_bsize = 32, 1644 .icache_bsize = 32,
1645 .dcache_bsize = 32, 1645 .dcache_bsize = 32,
1646 .cpu_setup = __setup_cpu_440gx, 1646 .cpu_setup = __setup_cpu_440gx,
1647 .machine_check = machine_check_440A, 1647 .machine_check = machine_check_440A,
1648 .platform = "ppc440", 1648 .platform = "ppc440",
1649 }, 1649 },
1650 { /* 440GX Rev. B */ 1650 { /* 440GX Rev. B */
1651 .pvr_mask = 0xf0000fff, 1651 .pvr_mask = 0xf0000fff,
1652 .pvr_value = 0x50000851, 1652 .pvr_value = 0x50000851,
1653 .cpu_name = "440GX Rev. B", 1653 .cpu_name = "440GX Rev. B",
1654 .cpu_features = CPU_FTRS_44X, 1654 .cpu_features = CPU_FTRS_44X,
1655 .cpu_user_features = COMMON_USER_BOOKE, 1655 .cpu_user_features = COMMON_USER_BOOKE,
1656 .mmu_features = MMU_FTR_TYPE_44x, 1656 .mmu_features = MMU_FTR_TYPE_44x,
1657 .icache_bsize = 32, 1657 .icache_bsize = 32,
1658 .dcache_bsize = 32, 1658 .dcache_bsize = 32,
1659 .cpu_setup = __setup_cpu_440gx, 1659 .cpu_setup = __setup_cpu_440gx,
1660 .machine_check = machine_check_440A, 1660 .machine_check = machine_check_440A,
1661 .platform = "ppc440", 1661 .platform = "ppc440",
1662 }, 1662 },
1663 { /* 440GX Rev. C */ 1663 { /* 440GX Rev. C */
1664 .pvr_mask = 0xf0000fff, 1664 .pvr_mask = 0xf0000fff,
1665 .pvr_value = 0x50000892, 1665 .pvr_value = 0x50000892,
1666 .cpu_name = "440GX Rev. C", 1666 .cpu_name = "440GX Rev. C",
1667 .cpu_features = CPU_FTRS_44X, 1667 .cpu_features = CPU_FTRS_44X,
1668 .cpu_user_features = COMMON_USER_BOOKE, 1668 .cpu_user_features = COMMON_USER_BOOKE,
1669 .mmu_features = MMU_FTR_TYPE_44x, 1669 .mmu_features = MMU_FTR_TYPE_44x,
1670 .icache_bsize = 32, 1670 .icache_bsize = 32,
1671 .dcache_bsize = 32, 1671 .dcache_bsize = 32,
1672 .cpu_setup = __setup_cpu_440gx, 1672 .cpu_setup = __setup_cpu_440gx,
1673 .machine_check = machine_check_440A, 1673 .machine_check = machine_check_440A,
1674 .platform = "ppc440", 1674 .platform = "ppc440",
1675 }, 1675 },
1676 { /* 440GX Rev. F */ 1676 { /* 440GX Rev. F */
1677 .pvr_mask = 0xf0000fff, 1677 .pvr_mask = 0xf0000fff,
1678 .pvr_value = 0x50000894, 1678 .pvr_value = 0x50000894,
1679 .cpu_name = "440GX Rev. F", 1679 .cpu_name = "440GX Rev. F",
1680 .cpu_features = CPU_FTRS_44X, 1680 .cpu_features = CPU_FTRS_44X,
1681 .cpu_user_features = COMMON_USER_BOOKE, 1681 .cpu_user_features = COMMON_USER_BOOKE,
1682 .mmu_features = MMU_FTR_TYPE_44x, 1682 .mmu_features = MMU_FTR_TYPE_44x,
1683 .icache_bsize = 32, 1683 .icache_bsize = 32,
1684 .dcache_bsize = 32, 1684 .dcache_bsize = 32,
1685 .cpu_setup = __setup_cpu_440gx, 1685 .cpu_setup = __setup_cpu_440gx,
1686 .machine_check = machine_check_440A, 1686 .machine_check = machine_check_440A,
1687 .platform = "ppc440", 1687 .platform = "ppc440",
1688 }, 1688 },
1689 { /* 440SP Rev. A */ 1689 { /* 440SP Rev. A */
1690 .pvr_mask = 0xfff00fff, 1690 .pvr_mask = 0xfff00fff,
1691 .pvr_value = 0x53200891, 1691 .pvr_value = 0x53200891,
1692 .cpu_name = "440SP Rev. A", 1692 .cpu_name = "440SP Rev. A",
1693 .cpu_features = CPU_FTRS_44X, 1693 .cpu_features = CPU_FTRS_44X,
1694 .cpu_user_features = COMMON_USER_BOOKE, 1694 .cpu_user_features = COMMON_USER_BOOKE,
1695 .mmu_features = MMU_FTR_TYPE_44x, 1695 .mmu_features = MMU_FTR_TYPE_44x,
1696 .icache_bsize = 32, 1696 .icache_bsize = 32,
1697 .dcache_bsize = 32, 1697 .dcache_bsize = 32,
1698 .machine_check = machine_check_4xx, 1698 .machine_check = machine_check_4xx,
1699 .platform = "ppc440", 1699 .platform = "ppc440",
1700 }, 1700 },
1701 { /* 440SPe Rev. A */ 1701 { /* 440SPe Rev. A */
1702 .pvr_mask = 0xfff00fff, 1702 .pvr_mask = 0xfff00fff,
1703 .pvr_value = 0x53400890, 1703 .pvr_value = 0x53400890,
1704 .cpu_name = "440SPe Rev. A", 1704 .cpu_name = "440SPe Rev. A",
1705 .cpu_features = CPU_FTRS_44X, 1705 .cpu_features = CPU_FTRS_44X,
1706 .cpu_user_features = COMMON_USER_BOOKE, 1706 .cpu_user_features = COMMON_USER_BOOKE,
1707 .mmu_features = MMU_FTR_TYPE_44x, 1707 .mmu_features = MMU_FTR_TYPE_44x,
1708 .icache_bsize = 32, 1708 .icache_bsize = 32,
1709 .dcache_bsize = 32, 1709 .dcache_bsize = 32,
1710 .cpu_setup = __setup_cpu_440spe, 1710 .cpu_setup = __setup_cpu_440spe,
1711 .machine_check = machine_check_440A, 1711 .machine_check = machine_check_440A,
1712 .platform = "ppc440", 1712 .platform = "ppc440",
1713 }, 1713 },
1714 { /* 440SPe Rev. B */ 1714 { /* 440SPe Rev. B */
1715 .pvr_mask = 0xfff00fff, 1715 .pvr_mask = 0xfff00fff,
1716 .pvr_value = 0x53400891, 1716 .pvr_value = 0x53400891,
1717 .cpu_name = "440SPe Rev. B", 1717 .cpu_name = "440SPe Rev. B",
1718 .cpu_features = CPU_FTRS_44X, 1718 .cpu_features = CPU_FTRS_44X,
1719 .cpu_user_features = COMMON_USER_BOOKE, 1719 .cpu_user_features = COMMON_USER_BOOKE,
1720 .mmu_features = MMU_FTR_TYPE_44x, 1720 .mmu_features = MMU_FTR_TYPE_44x,
1721 .icache_bsize = 32, 1721 .icache_bsize = 32,
1722 .dcache_bsize = 32, 1722 .dcache_bsize = 32,
1723 .cpu_setup = __setup_cpu_440spe, 1723 .cpu_setup = __setup_cpu_440spe,
1724 .machine_check = machine_check_440A, 1724 .machine_check = machine_check_440A,
1725 .platform = "ppc440", 1725 .platform = "ppc440",
1726 }, 1726 },
1727 { /* 440 in Xilinx Virtex-5 FXT */ 1727 { /* 440 in Xilinx Virtex-5 FXT */
1728 .pvr_mask = 0xfffffff0, 1728 .pvr_mask = 0xfffffff0,
1729 .pvr_value = 0x7ff21910, 1729 .pvr_value = 0x7ff21910,
1730 .cpu_name = "440 in Virtex-5 FXT", 1730 .cpu_name = "440 in Virtex-5 FXT",
1731 .cpu_features = CPU_FTRS_44X, 1731 .cpu_features = CPU_FTRS_44X,
1732 .cpu_user_features = COMMON_USER_BOOKE, 1732 .cpu_user_features = COMMON_USER_BOOKE,
1733 .mmu_features = MMU_FTR_TYPE_44x, 1733 .mmu_features = MMU_FTR_TYPE_44x,
1734 .icache_bsize = 32, 1734 .icache_bsize = 32,
1735 .dcache_bsize = 32, 1735 .dcache_bsize = 32,
1736 .cpu_setup = __setup_cpu_440x5, 1736 .cpu_setup = __setup_cpu_440x5,
1737 .machine_check = machine_check_440A, 1737 .machine_check = machine_check_440A,
1738 .platform = "ppc440", 1738 .platform = "ppc440",
1739 }, 1739 },
1740 { /* 460EX */ 1740 { /* 460EX */
1741 .pvr_mask = 0xffff0006, 1741 .pvr_mask = 0xffff0006,
1742 .pvr_value = 0x13020002, 1742 .pvr_value = 0x13020002,
1743 .cpu_name = "460EX", 1743 .cpu_name = "460EX",
1744 .cpu_features = CPU_FTRS_440x6, 1744 .cpu_features = CPU_FTRS_440x6,
1745 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, 1745 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
1746 .mmu_features = MMU_FTR_TYPE_44x, 1746 .mmu_features = MMU_FTR_TYPE_44x,
1747 .icache_bsize = 32, 1747 .icache_bsize = 32,
1748 .dcache_bsize = 32, 1748 .dcache_bsize = 32,
1749 .cpu_setup = __setup_cpu_460ex, 1749 .cpu_setup = __setup_cpu_460ex,
1750 .machine_check = machine_check_440A, 1750 .machine_check = machine_check_440A,
1751 .platform = "ppc440", 1751 .platform = "ppc440",
1752 }, 1752 },
1753 { /* 460EX Rev B */ 1753 { /* 460EX Rev B */
1754 .pvr_mask = 0xffff0007, 1754 .pvr_mask = 0xffff0007,
1755 .pvr_value = 0x13020004, 1755 .pvr_value = 0x13020004,
1756 .cpu_name = "460EX Rev. B", 1756 .cpu_name = "460EX Rev. B",
1757 .cpu_features = CPU_FTRS_440x6, 1757 .cpu_features = CPU_FTRS_440x6,
1758 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, 1758 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
1759 .mmu_features = MMU_FTR_TYPE_44x, 1759 .mmu_features = MMU_FTR_TYPE_44x,
1760 .icache_bsize = 32, 1760 .icache_bsize = 32,
1761 .dcache_bsize = 32, 1761 .dcache_bsize = 32,
1762 .cpu_setup = __setup_cpu_460ex, 1762 .cpu_setup = __setup_cpu_460ex,
1763 .machine_check = machine_check_440A, 1763 .machine_check = machine_check_440A,
1764 .platform = "ppc440", 1764 .platform = "ppc440",
1765 }, 1765 },
1766 { /* 460GT */ 1766 { /* 460GT */
1767 .pvr_mask = 0xffff0006, 1767 .pvr_mask = 0xffff0006,
1768 .pvr_value = 0x13020000, 1768 .pvr_value = 0x13020000,
1769 .cpu_name = "460GT", 1769 .cpu_name = "460GT",
1770 .cpu_features = CPU_FTRS_440x6, 1770 .cpu_features = CPU_FTRS_440x6,
1771 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, 1771 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
1772 .mmu_features = MMU_FTR_TYPE_44x, 1772 .mmu_features = MMU_FTR_TYPE_44x,
1773 .icache_bsize = 32, 1773 .icache_bsize = 32,
1774 .dcache_bsize = 32, 1774 .dcache_bsize = 32,
1775 .cpu_setup = __setup_cpu_460gt, 1775 .cpu_setup = __setup_cpu_460gt,
1776 .machine_check = machine_check_440A, 1776 .machine_check = machine_check_440A,
1777 .platform = "ppc440", 1777 .platform = "ppc440",
1778 }, 1778 },
1779 { /* 460GT Rev B */ 1779 { /* 460GT Rev B */
1780 .pvr_mask = 0xffff0007, 1780 .pvr_mask = 0xffff0007,
1781 .pvr_value = 0x13020005, 1781 .pvr_value = 0x13020005,
1782 .cpu_name = "460GT Rev. B", 1782 .cpu_name = "460GT Rev. B",
1783 .cpu_features = CPU_FTRS_440x6, 1783 .cpu_features = CPU_FTRS_440x6,
1784 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, 1784 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
1785 .mmu_features = MMU_FTR_TYPE_44x, 1785 .mmu_features = MMU_FTR_TYPE_44x,
1786 .icache_bsize = 32, 1786 .icache_bsize = 32,
1787 .dcache_bsize = 32, 1787 .dcache_bsize = 32,
1788 .cpu_setup = __setup_cpu_460gt, 1788 .cpu_setup = __setup_cpu_460gt,
1789 .machine_check = machine_check_440A, 1789 .machine_check = machine_check_440A,
1790 .platform = "ppc440", 1790 .platform = "ppc440",
1791 }, 1791 },
1792 { /* 460SX */ 1792 { /* 460SX */
1793 .pvr_mask = 0xffffff00, 1793 .pvr_mask = 0xffffff00,
1794 .pvr_value = 0x13541800, 1794 .pvr_value = 0x13541800,
1795 .cpu_name = "460SX", 1795 .cpu_name = "460SX",
1796 .cpu_features = CPU_FTRS_44X, 1796 .cpu_features = CPU_FTRS_44X,
1797 .cpu_user_features = COMMON_USER_BOOKE, 1797 .cpu_user_features = COMMON_USER_BOOKE,
1798 .mmu_features = MMU_FTR_TYPE_44x, 1798 .mmu_features = MMU_FTR_TYPE_44x,
1799 .icache_bsize = 32, 1799 .icache_bsize = 32,
1800 .dcache_bsize = 32, 1800 .dcache_bsize = 32,
1801 .cpu_setup = __setup_cpu_460sx, 1801 .cpu_setup = __setup_cpu_460sx,
1802 .machine_check = machine_check_440A, 1802 .machine_check = machine_check_440A,
1803 .platform = "ppc440", 1803 .platform = "ppc440",
1804 }, 1804 },
1805 { /* 464 in APM821xx */ 1805 { /* 464 in APM821xx */
1806 .pvr_mask = 0xffffff00, 1806 .pvr_mask = 0xffffff00,
1807 .pvr_value = 0x12C41C80, 1807 .pvr_value = 0x12C41C80,
1808 .cpu_name = "APM821XX", 1808 .cpu_name = "APM821XX",
1809 .cpu_features = CPU_FTRS_44X, 1809 .cpu_features = CPU_FTRS_44X,
1810 .cpu_user_features = COMMON_USER_BOOKE | 1810 .cpu_user_features = COMMON_USER_BOOKE |
1811 PPC_FEATURE_HAS_FPU, 1811 PPC_FEATURE_HAS_FPU,
1812 .mmu_features = MMU_FTR_TYPE_44x, 1812 .mmu_features = MMU_FTR_TYPE_44x,
1813 .icache_bsize = 32, 1813 .icache_bsize = 32,
1814 .dcache_bsize = 32, 1814 .dcache_bsize = 32,
1815 .cpu_setup = __setup_cpu_apm821xx, 1815 .cpu_setup = __setup_cpu_apm821xx,
1816 .machine_check = machine_check_440A, 1816 .machine_check = machine_check_440A,
1817 .platform = "ppc440", 1817 .platform = "ppc440",
1818 }, 1818 },
1819 { /* 476 DD2 core */ 1819 { /* 476 DD2 core */
1820 .pvr_mask = 0xffffffff, 1820 .pvr_mask = 0xffffffff,
1821 .pvr_value = 0x11a52080, 1821 .pvr_value = 0x11a52080,
1822 .cpu_name = "476", 1822 .cpu_name = "476",
1823 .cpu_features = CPU_FTRS_47X | CPU_FTR_476_DD2, 1823 .cpu_features = CPU_FTRS_47X | CPU_FTR_476_DD2,
1824 .cpu_user_features = COMMON_USER_BOOKE | 1824 .cpu_user_features = COMMON_USER_BOOKE |
1825 PPC_FEATURE_HAS_FPU, 1825 PPC_FEATURE_HAS_FPU,
1826 .mmu_features = MMU_FTR_TYPE_47x | 1826 .mmu_features = MMU_FTR_TYPE_47x |
1827 MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL, 1827 MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL,
1828 .icache_bsize = 32, 1828 .icache_bsize = 32,
1829 .dcache_bsize = 128, 1829 .dcache_bsize = 128,
1830 .machine_check = machine_check_47x, 1830 .machine_check = machine_check_47x,
1831 .platform = "ppc470", 1831 .platform = "ppc470",
1832 }, 1832 },
1833 { /* 476 iss */ 1833 { /* 476 iss */
1834 .pvr_mask = 0xffff0000, 1834 .pvr_mask = 0xffff0000,
1835 .pvr_value = 0x00050000, 1835 .pvr_value = 0x00050000,
1836 .cpu_name = "476", 1836 .cpu_name = "476",
1837 .cpu_features = CPU_FTRS_47X, 1837 .cpu_features = CPU_FTRS_47X,
1838 .cpu_user_features = COMMON_USER_BOOKE | 1838 .cpu_user_features = COMMON_USER_BOOKE |
1839 PPC_FEATURE_HAS_FPU, 1839 PPC_FEATURE_HAS_FPU,
1840 .mmu_features = MMU_FTR_TYPE_47x | 1840 .mmu_features = MMU_FTR_TYPE_47x |
1841 MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL, 1841 MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL,
1842 .icache_bsize = 32, 1842 .icache_bsize = 32,
1843 .dcache_bsize = 128, 1843 .dcache_bsize = 128,
1844 .machine_check = machine_check_47x, 1844 .machine_check = machine_check_47x,
1845 .platform = "ppc470", 1845 .platform = "ppc470",
1846 }, 1846 },
1847 { /* 476 others */ 1847 { /* 476 others */
1848 .pvr_mask = 0xffff0000, 1848 .pvr_mask = 0xffff0000,
1849 .pvr_value = 0x11a50000, 1849 .pvr_value = 0x11a50000,
1850 .cpu_name = "476", 1850 .cpu_name = "476",
1851 .cpu_features = CPU_FTRS_47X, 1851 .cpu_features = CPU_FTRS_47X,
1852 .cpu_user_features = COMMON_USER_BOOKE | 1852 .cpu_user_features = COMMON_USER_BOOKE |
1853 PPC_FEATURE_HAS_FPU, 1853 PPC_FEATURE_HAS_FPU,
1854 .mmu_features = MMU_FTR_TYPE_47x | 1854 .mmu_features = MMU_FTR_TYPE_47x |
1855 MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL, 1855 MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL,
1856 .icache_bsize = 32, 1856 .icache_bsize = 32,
1857 .dcache_bsize = 128, 1857 .dcache_bsize = 128,
1858 .machine_check = machine_check_47x, 1858 .machine_check = machine_check_47x,
1859 .platform = "ppc470", 1859 .platform = "ppc470",
1860 }, 1860 },
1861 { /* default match */ 1861 { /* default match */
1862 .pvr_mask = 0x00000000, 1862 .pvr_mask = 0x00000000,
1863 .pvr_value = 0x00000000, 1863 .pvr_value = 0x00000000,
1864 .cpu_name = "(generic 44x PPC)", 1864 .cpu_name = "(generic 44x PPC)",
1865 .cpu_features = CPU_FTRS_44X, 1865 .cpu_features = CPU_FTRS_44X,
1866 .cpu_user_features = COMMON_USER_BOOKE, 1866 .cpu_user_features = COMMON_USER_BOOKE,
1867 .mmu_features = MMU_FTR_TYPE_44x, 1867 .mmu_features = MMU_FTR_TYPE_44x,
1868 .icache_bsize = 32, 1868 .icache_bsize = 32,
1869 .dcache_bsize = 32, 1869 .dcache_bsize = 32,
1870 .machine_check = machine_check_4xx, 1870 .machine_check = machine_check_4xx,
1871 .platform = "ppc440", 1871 .platform = "ppc440",
1872 } 1872 }
1873 #endif /* CONFIG_44x */ 1873 #endif /* CONFIG_44x */
1874 #ifdef CONFIG_E200 1874 #ifdef CONFIG_E200
1875 { /* e200z5 */ 1875 { /* e200z5 */
1876 .pvr_mask = 0xfff00000, 1876 .pvr_mask = 0xfff00000,
1877 .pvr_value = 0x81000000, 1877 .pvr_value = 0x81000000,
1878 .cpu_name = "e200z5", 1878 .cpu_name = "e200z5",
1879 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ 1879 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
1880 .cpu_features = CPU_FTRS_E200, 1880 .cpu_features = CPU_FTRS_E200,
1881 .cpu_user_features = COMMON_USER_BOOKE | 1881 .cpu_user_features = COMMON_USER_BOOKE |
1882 PPC_FEATURE_HAS_EFP_SINGLE | 1882 PPC_FEATURE_HAS_EFP_SINGLE |
1883 PPC_FEATURE_UNIFIED_CACHE, 1883 PPC_FEATURE_UNIFIED_CACHE,
1884 .mmu_features = MMU_FTR_TYPE_FSL_E, 1884 .mmu_features = MMU_FTR_TYPE_FSL_E,
1885 .dcache_bsize = 32, 1885 .dcache_bsize = 32,
1886 .machine_check = machine_check_e200, 1886 .machine_check = machine_check_e200,
1887 .platform = "ppc5554", 1887 .platform = "ppc5554",
1888 }, 1888 },
1889 { /* e200z6 */ 1889 { /* e200z6 */
1890 .pvr_mask = 0xfff00000, 1890 .pvr_mask = 0xfff00000,
1891 .pvr_value = 0x81100000, 1891 .pvr_value = 0x81100000,
1892 .cpu_name = "e200z6", 1892 .cpu_name = "e200z6",
1893 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ 1893 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
1894 .cpu_features = CPU_FTRS_E200, 1894 .cpu_features = CPU_FTRS_E200,
1895 .cpu_user_features = COMMON_USER_BOOKE | 1895 .cpu_user_features = COMMON_USER_BOOKE |
1896 PPC_FEATURE_HAS_SPE_COMP | 1896 PPC_FEATURE_HAS_SPE_COMP |
1897 PPC_FEATURE_HAS_EFP_SINGLE_COMP | 1897 PPC_FEATURE_HAS_EFP_SINGLE_COMP |
1898 PPC_FEATURE_UNIFIED_CACHE, 1898 PPC_FEATURE_UNIFIED_CACHE,
1899 .mmu_features = MMU_FTR_TYPE_FSL_E, 1899 .mmu_features = MMU_FTR_TYPE_FSL_E,
1900 .dcache_bsize = 32, 1900 .dcache_bsize = 32,
1901 .machine_check = machine_check_e200, 1901 .machine_check = machine_check_e200,
1902 .platform = "ppc5554", 1902 .platform = "ppc5554",
1903 }, 1903 },
1904 { /* default match */ 1904 { /* default match */
1905 .pvr_mask = 0x00000000, 1905 .pvr_mask = 0x00000000,
1906 .pvr_value = 0x00000000, 1906 .pvr_value = 0x00000000,
1907 .cpu_name = "(generic E200 PPC)", 1907 .cpu_name = "(generic E200 PPC)",
1908 .cpu_features = CPU_FTRS_E200, 1908 .cpu_features = CPU_FTRS_E200,
1909 .cpu_user_features = COMMON_USER_BOOKE | 1909 .cpu_user_features = COMMON_USER_BOOKE |
1910 PPC_FEATURE_HAS_EFP_SINGLE | 1910 PPC_FEATURE_HAS_EFP_SINGLE |
1911 PPC_FEATURE_UNIFIED_CACHE, 1911 PPC_FEATURE_UNIFIED_CACHE,
1912 .mmu_features = MMU_FTR_TYPE_FSL_E, 1912 .mmu_features = MMU_FTR_TYPE_FSL_E,
1913 .dcache_bsize = 32, 1913 .dcache_bsize = 32,
1914 .cpu_setup = __setup_cpu_e200, 1914 .cpu_setup = __setup_cpu_e200,
1915 .machine_check = machine_check_e200, 1915 .machine_check = machine_check_e200,
1916 .platform = "ppc5554", 1916 .platform = "ppc5554",
1917 } 1917 }
1918 #endif /* CONFIG_E200 */ 1918 #endif /* CONFIG_E200 */
1919 #endif /* CONFIG_PPC32 */ 1919 #endif /* CONFIG_PPC32 */
1920 #ifdef CONFIG_E500 1920 #ifdef CONFIG_E500
1921 #ifdef CONFIG_PPC32 1921 #ifdef CONFIG_PPC32
1922 { /* e500 */ 1922 { /* e500 */
1923 .pvr_mask = 0xffff0000, 1923 .pvr_mask = 0xffff0000,
1924 .pvr_value = 0x80200000, 1924 .pvr_value = 0x80200000,
1925 .cpu_name = "e500", 1925 .cpu_name = "e500",
1926 .cpu_features = CPU_FTRS_E500, 1926 .cpu_features = CPU_FTRS_E500,
1927 .cpu_user_features = COMMON_USER_BOOKE | 1927 .cpu_user_features = COMMON_USER_BOOKE |
1928 PPC_FEATURE_HAS_SPE_COMP | 1928 PPC_FEATURE_HAS_SPE_COMP |
1929 PPC_FEATURE_HAS_EFP_SINGLE_COMP, 1929 PPC_FEATURE_HAS_EFP_SINGLE_COMP,
1930 .mmu_features = MMU_FTR_TYPE_FSL_E, 1930 .mmu_features = MMU_FTR_TYPE_FSL_E,
1931 .icache_bsize = 32, 1931 .icache_bsize = 32,
1932 .dcache_bsize = 32, 1932 .dcache_bsize = 32,
1933 .num_pmcs = 4, 1933 .num_pmcs = 4,
1934 .oprofile_cpu_type = "ppc/e500", 1934 .oprofile_cpu_type = "ppc/e500",
1935 .oprofile_type = PPC_OPROFILE_FSL_EMB, 1935 .oprofile_type = PPC_OPROFILE_FSL_EMB,
1936 .cpu_setup = __setup_cpu_e500v1, 1936 .cpu_setup = __setup_cpu_e500v1,
1937 .machine_check = machine_check_e500, 1937 .machine_check = machine_check_e500,
1938 .platform = "ppc8540", 1938 .platform = "ppc8540",
1939 }, 1939 },
1940 { /* e500v2 */ 1940 { /* e500v2 */
1941 .pvr_mask = 0xffff0000, 1941 .pvr_mask = 0xffff0000,
1942 .pvr_value = 0x80210000, 1942 .pvr_value = 0x80210000,
1943 .cpu_name = "e500v2", 1943 .cpu_name = "e500v2",
1944 .cpu_features = CPU_FTRS_E500_2, 1944 .cpu_features = CPU_FTRS_E500_2,
1945 .cpu_user_features = COMMON_USER_BOOKE | 1945 .cpu_user_features = COMMON_USER_BOOKE |
1946 PPC_FEATURE_HAS_SPE_COMP | 1946 PPC_FEATURE_HAS_SPE_COMP |
1947 PPC_FEATURE_HAS_EFP_SINGLE_COMP | 1947 PPC_FEATURE_HAS_EFP_SINGLE_COMP |
1948 PPC_FEATURE_HAS_EFP_DOUBLE_COMP, 1948 PPC_FEATURE_HAS_EFP_DOUBLE_COMP,
1949 .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS, 1949 .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS,
1950 .icache_bsize = 32, 1950 .icache_bsize = 32,
1951 .dcache_bsize = 32, 1951 .dcache_bsize = 32,
1952 .num_pmcs = 4, 1952 .num_pmcs = 4,
1953 .oprofile_cpu_type = "ppc/e500", 1953 .oprofile_cpu_type = "ppc/e500",
1954 .oprofile_type = PPC_OPROFILE_FSL_EMB, 1954 .oprofile_type = PPC_OPROFILE_FSL_EMB,
1955 .cpu_setup = __setup_cpu_e500v2, 1955 .cpu_setup = __setup_cpu_e500v2,
1956 .machine_check = machine_check_e500, 1956 .machine_check = machine_check_e500,
1957 .platform = "ppc8548", 1957 .platform = "ppc8548",
1958 }, 1958 },
1959 { /* e500mc */ 1959 { /* e500mc */
1960 .pvr_mask = 0xffff0000, 1960 .pvr_mask = 0xffff0000,
1961 .pvr_value = 0x80230000, 1961 .pvr_value = 0x80230000,
1962 .cpu_name = "e500mc", 1962 .cpu_name = "e500mc",
1963 .cpu_features = CPU_FTRS_E500MC, 1963 .cpu_features = CPU_FTRS_E500MC,
1964 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, 1964 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
1965 .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | 1965 .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
1966 MMU_FTR_USE_TLBILX, 1966 MMU_FTR_USE_TLBILX,
1967 .icache_bsize = 64, 1967 .icache_bsize = 64,
1968 .dcache_bsize = 64, 1968 .dcache_bsize = 64,
1969 .num_pmcs = 4, 1969 .num_pmcs = 4,
1970 .oprofile_cpu_type = "ppc/e500mc", 1970 .oprofile_cpu_type = "ppc/e500mc",
1971 .oprofile_type = PPC_OPROFILE_FSL_EMB, 1971 .oprofile_type = PPC_OPROFILE_FSL_EMB,
1972 .cpu_setup = __setup_cpu_e500mc, 1972 .cpu_setup = __setup_cpu_e500mc,
1973 .machine_check = machine_check_e500mc, 1973 .machine_check = machine_check_e500mc,
1974 .platform = "ppce500mc", 1974 .platform = "ppce500mc",
1975 }, 1975 },
1976 #endif /* CONFIG_PPC32 */ 1976 #endif /* CONFIG_PPC32 */
1977 { /* e5500 */ 1977 { /* e5500 */
1978 .pvr_mask = 0xffff0000, 1978 .pvr_mask = 0xffff0000,
1979 .pvr_value = 0x80240000, 1979 .pvr_value = 0x80240000,
1980 .cpu_name = "e5500", 1980 .cpu_name = "e5500",
1981 .cpu_features = CPU_FTRS_E5500, 1981 .cpu_features = CPU_FTRS_E5500,
1982 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, 1982 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
1983 .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | 1983 .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
1984 MMU_FTR_USE_TLBILX, 1984 MMU_FTR_USE_TLBILX,
1985 .icache_bsize = 64, 1985 .icache_bsize = 64,
1986 .dcache_bsize = 64, 1986 .dcache_bsize = 64,
1987 .num_pmcs = 4, 1987 .num_pmcs = 4,
1988 .oprofile_cpu_type = "ppc/e500mc", 1988 .oprofile_cpu_type = "ppc/e500mc",
1989 .oprofile_type = PPC_OPROFILE_FSL_EMB, 1989 .oprofile_type = PPC_OPROFILE_FSL_EMB,
1990 .cpu_setup = __setup_cpu_e5500, 1990 .cpu_setup = __setup_cpu_e5500,
1991 .cpu_restore = __restore_cpu_e5500, 1991 .cpu_restore = __restore_cpu_e5500,
1992 .machine_check = machine_check_e500mc, 1992 .machine_check = machine_check_e500mc,
1993 .platform = "ppce5500", 1993 .platform = "ppce5500",
1994 }, 1994 },
1995 #ifdef CONFIG_PPC32 1995 #ifdef CONFIG_PPC32
1996 { /* default match */ 1996 { /* default match */
1997 .pvr_mask = 0x00000000, 1997 .pvr_mask = 0x00000000,
1998 .pvr_value = 0x00000000, 1998 .pvr_value = 0x00000000,
1999 .cpu_name = "(generic E500 PPC)", 1999 .cpu_name = "(generic E500 PPC)",
2000 .cpu_features = CPU_FTRS_E500, 2000 .cpu_features = CPU_FTRS_E500,
2001 .cpu_user_features = COMMON_USER_BOOKE | 2001 .cpu_user_features = COMMON_USER_BOOKE |
2002 PPC_FEATURE_HAS_SPE_COMP | 2002 PPC_FEATURE_HAS_SPE_COMP |
2003 PPC_FEATURE_HAS_EFP_SINGLE_COMP, 2003 PPC_FEATURE_HAS_EFP_SINGLE_COMP,
2004 .mmu_features = MMU_FTR_TYPE_FSL_E, 2004 .mmu_features = MMU_FTR_TYPE_FSL_E,
2005 .icache_bsize = 32, 2005 .icache_bsize = 32,
2006 .dcache_bsize = 32, 2006 .dcache_bsize = 32,
2007 .machine_check = machine_check_e500, 2007 .machine_check = machine_check_e500,
2008 .platform = "powerpc", 2008 .platform = "powerpc",
2009 } 2009 }
2010 #endif /* CONFIG_PPC32 */ 2010 #endif /* CONFIG_PPC32 */
2011 #endif /* CONFIG_E500 */ 2011 #endif /* CONFIG_E500 */
2012 2012
2013 #ifdef CONFIG_PPC_A2 2013 #ifdef CONFIG_PPC_A2
2014 { /* Standard A2 (>= DD2) + FPU core */ 2014 { /* Standard A2 (>= DD2) + FPU core */
2015 .pvr_mask = 0xffff0000, 2015 .pvr_mask = 0xffff0000,
2016 .pvr_value = 0x00480000, 2016 .pvr_value = 0x00480000,
2017 .cpu_name = "A2 (>= DD2)", 2017 .cpu_name = "A2 (>= DD2)",
2018 .cpu_features = CPU_FTRS_A2, 2018 .cpu_features = CPU_FTRS_A2,
2019 .cpu_user_features = COMMON_USER_PPC64, 2019 .cpu_user_features = COMMON_USER_PPC64,
2020 .mmu_features = MMU_FTRS_A2, 2020 .mmu_features = MMU_FTRS_A2,
2021 .icache_bsize = 64, 2021 .icache_bsize = 64,
2022 .dcache_bsize = 64, 2022 .dcache_bsize = 64,
2023 .num_pmcs = 0, 2023 .num_pmcs = 0,
2024 .cpu_setup = __setup_cpu_a2, 2024 .cpu_setup = __setup_cpu_a2,
2025 .cpu_restore = __restore_cpu_a2, 2025 .cpu_restore = __restore_cpu_a2,
2026 .machine_check = machine_check_generic, 2026 .machine_check = machine_check_generic,
2027 .platform = "ppca2", 2027 .platform = "ppca2",
2028 }, 2028 },
2029 { /* This is a default entry to get going, to be replaced by 2029 { /* This is a default entry to get going, to be replaced by
2030 * a real one at some stage 2030 * a real one at some stage
2031 */ 2031 */
2032 #define CPU_FTRS_BASE_BOOK3E (CPU_FTR_USE_TB | \ 2032 #define CPU_FTRS_BASE_BOOK3E (CPU_FTR_USE_TB | \
2033 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_SMT | \ 2033 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_SMT | \
2034 CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) 2034 CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
2035 .pvr_mask = 0x00000000, 2035 .pvr_mask = 0x00000000,
2036 .pvr_value = 0x00000000, 2036 .pvr_value = 0x00000000,
2037 .cpu_name = "Book3E", 2037 .cpu_name = "Book3E",
2038 .cpu_features = CPU_FTRS_BASE_BOOK3E, 2038 .cpu_features = CPU_FTRS_BASE_BOOK3E,
2039 .cpu_user_features = COMMON_USER_PPC64, 2039 .cpu_user_features = COMMON_USER_PPC64,
2040 .mmu_features = MMU_FTR_TYPE_3E | MMU_FTR_USE_TLBILX | 2040 .mmu_features = MMU_FTR_TYPE_3E | MMU_FTR_USE_TLBILX |
2041 MMU_FTR_USE_TLBIVAX_BCAST | 2041 MMU_FTR_USE_TLBIVAX_BCAST |
2042 MMU_FTR_LOCK_BCAST_INVAL, 2042 MMU_FTR_LOCK_BCAST_INVAL,
2043 .icache_bsize = 64, 2043 .icache_bsize = 64,
2044 .dcache_bsize = 64, 2044 .dcache_bsize = 64,
2045 .num_pmcs = 0, 2045 .num_pmcs = 0,
2046 .machine_check = machine_check_generic, 2046 .machine_check = machine_check_generic,
2047 .platform = "power6", 2047 .platform = "power6",
2048 }, 2048 },
2049 #endif /* CONFIG_PPC_A2 */ 2049 #endif /* CONFIG_PPC_A2 */
2050 }; 2050 };
2051 2051
2052 static struct cpu_spec the_cpu_spec; 2052 static struct cpu_spec the_cpu_spec;
2053 2053
2054 static struct cpu_spec * __init setup_cpu_spec(unsigned long offset, 2054 static struct cpu_spec * __init setup_cpu_spec(unsigned long offset,
2055 struct cpu_spec *s) 2055 struct cpu_spec *s)
2056 { 2056 {
2057 struct cpu_spec *t = &the_cpu_spec; 2057 struct cpu_spec *t = &the_cpu_spec;
2058 struct cpu_spec old; 2058 struct cpu_spec old;
2059 2059
2060 t = PTRRELOC(t); 2060 t = PTRRELOC(t);
2061 old = *t; 2061 old = *t;
2062 2062
2063 /* Copy everything, then do fixups */ 2063 /* Copy everything, then do fixups */
2064 *t = *s; 2064 *t = *s;
2065 2065
2066 /* 2066 /*
2067 * If we are overriding a previous value derived from the real 2067 * If we are overriding a previous value derived from the real
2068 * PVR with a new value obtained using a logical PVR value, 2068 * PVR with a new value obtained using a logical PVR value,
2069 * don't modify the performance monitor fields. 2069 * don't modify the performance monitor fields.
2070 */ 2070 */
2071 if (old.num_pmcs && !s->num_pmcs) { 2071 if (old.num_pmcs && !s->num_pmcs) {
2072 t->num_pmcs = old.num_pmcs; 2072 t->num_pmcs = old.num_pmcs;
2073 t->pmc_type = old.pmc_type; 2073 t->pmc_type = old.pmc_type;
2074 t->oprofile_type = old.oprofile_type; 2074 t->oprofile_type = old.oprofile_type;
2075 t->oprofile_mmcra_sihv = old.oprofile_mmcra_sihv; 2075 t->oprofile_mmcra_sihv = old.oprofile_mmcra_sihv;
2076 t->oprofile_mmcra_sipr = old.oprofile_mmcra_sipr; 2076 t->oprofile_mmcra_sipr = old.oprofile_mmcra_sipr;
2077 t->oprofile_mmcra_clear = old.oprofile_mmcra_clear; 2077 t->oprofile_mmcra_clear = old.oprofile_mmcra_clear;
2078 2078
2079 /* 2079 /*
2080 * If we have passed through this logic once before and 2080 * If we have passed through this logic once before and
2081 * have pulled the default case because the real PVR was 2081 * have pulled the default case because the real PVR was
2082 * not found inside cpu_specs[], then we are possibly 2082 * not found inside cpu_specs[], then we are possibly
2083 * running in compatibility mode. In that case, let the 2083 * running in compatibility mode. In that case, let the
2084 * oprofiler know which set of compatibility counters to 2084 * oprofiler know which set of compatibility counters to
2085 * pull from by making sure the oprofile_cpu_type string 2085 * pull from by making sure the oprofile_cpu_type string
2086 * is set to that of compatibility mode. If the 2086 * is set to that of compatibility mode. If the
2087 * oprofile_cpu_type already has a value, then we are 2087 * oprofile_cpu_type already has a value, then we are
2088 * possibly overriding a real PVR with a logical one, 2088 * possibly overriding a real PVR with a logical one,
2089 * and, in that case, keep the current value for 2089 * and, in that case, keep the current value for
2090 * oprofile_cpu_type. 2090 * oprofile_cpu_type.
2091 */ 2091 */
2092 if (old.oprofile_cpu_type != NULL) { 2092 if (old.oprofile_cpu_type != NULL) {
2093 t->oprofile_cpu_type = old.oprofile_cpu_type; 2093 t->oprofile_cpu_type = old.oprofile_cpu_type;
2094 t->oprofile_type = old.oprofile_type; 2094 t->oprofile_type = old.oprofile_type;
2095 } 2095 }
2096 } 2096 }
2097 2097
2098 *PTRRELOC(&cur_cpu_spec) = &the_cpu_spec; 2098 *PTRRELOC(&cur_cpu_spec) = &the_cpu_spec;
2099 2099
2100 /* 2100 /*
2101 * Set the base platform string once; assumes 2101 * Set the base platform string once; assumes
2102 * we're called with real pvr first. 2102 * we're called with real pvr first.
2103 */ 2103 */
2104 if (*PTRRELOC(&powerpc_base_platform) == NULL) 2104 if (*PTRRELOC(&powerpc_base_platform) == NULL)
2105 *PTRRELOC(&powerpc_base_platform) = t->platform; 2105 *PTRRELOC(&powerpc_base_platform) = t->platform;
2106 2106
2107 #if defined(CONFIG_PPC64) || defined(CONFIG_BOOKE) 2107 #if defined(CONFIG_PPC64) || defined(CONFIG_BOOKE)
2108 /* ppc64 and booke expect identify_cpu to also call setup_cpu for 2108 /* ppc64 and booke expect identify_cpu to also call setup_cpu for
2109 * that processor. I will consolidate that at a later time, for now, 2109 * that processor. I will consolidate that at a later time, for now,
2110 * just use #ifdef. We also don't need to PTRRELOC the function 2110 * just use #ifdef. We also don't need to PTRRELOC the function
2111 * pointer on ppc64 and booke as we are running at 0 in real mode 2111 * pointer on ppc64 and booke as we are running at 0 in real mode
2112 * on ppc64 and reloc_offset is always 0 on booke. 2112 * on ppc64 and reloc_offset is always 0 on booke.
2113 */ 2113 */
2114 if (t->cpu_setup) { 2114 if (t->cpu_setup) {
2115 t->cpu_setup(offset, t); 2115 t->cpu_setup(offset, t);
2116 } 2116 }
2117 #endif /* CONFIG_PPC64 || CONFIG_BOOKE */ 2117 #endif /* CONFIG_PPC64 || CONFIG_BOOKE */
2118 2118
2119 return t; 2119 return t;
2120 } 2120 }
2121 2121
2122 struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr) 2122 struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr)
2123 { 2123 {
2124 struct cpu_spec *s = cpu_specs; 2124 struct cpu_spec *s = cpu_specs;
2125 int i; 2125 int i;
2126 2126
2127 s = PTRRELOC(s); 2127 s = PTRRELOC(s);
2128 2128
2129 for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++) { 2129 for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++) {
2130 if ((pvr & s->pvr_mask) == s->pvr_value) 2130 if ((pvr & s->pvr_mask) == s->pvr_value)
2131 return setup_cpu_spec(offset, s); 2131 return setup_cpu_spec(offset, s);
2132 } 2132 }
2133 2133
2134 BUG(); 2134 BUG();
2135 2135
2136 return NULL; 2136 return NULL;
2137 } 2137 }
2138 2138
arch/powerpc/kernel/init_task.c
1 #include <linux/mm.h> 1 #include <linux/mm.h>
2 #include <linux/module.h> 2 #include <linux/export.h>
3 #include <linux/sched.h> 3 #include <linux/sched.h>
4 #include <linux/init.h> 4 #include <linux/init.h>
5 #include <linux/init_task.h> 5 #include <linux/init_task.h>
6 #include <linux/fs.h> 6 #include <linux/fs.h>
7 #include <linux/mqueue.h> 7 #include <linux/mqueue.h>
8 #include <asm/uaccess.h> 8 #include <asm/uaccess.h>
9 9
10 static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 10 static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
11 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 11 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
12 /* 12 /*
13 * Initial thread structure. 13 * Initial thread structure.
14 * 14 *
15 * We need to make sure that this is 16384-byte aligned due to the 15 * We need to make sure that this is 16384-byte aligned due to the
16 * way process stacks are handled. This is done by having a special 16 * way process stacks are handled. This is done by having a special
17 * "init_task" linker map entry.. 17 * "init_task" linker map entry..
18 */ 18 */
19 union thread_union init_thread_union __init_task_data = 19 union thread_union init_thread_union __init_task_data =
20 { INIT_THREAD_INFO(init_task) }; 20 { INIT_THREAD_INFO(init_task) };
21 21
22 /* 22 /*
23 * Initial task structure. 23 * Initial task structure.
24 * 24 *
25 * All other task structs will be allocated on slabs in fork.c 25 * All other task structs will be allocated on slabs in fork.c
26 */ 26 */
27 struct task_struct init_task = INIT_TASK(init_task); 27 struct task_struct init_task = INIT_TASK(init_task);
28 28
29 EXPORT_SYMBOL(init_task); 29 EXPORT_SYMBOL(init_task);
30 30
arch/powerpc/kernel/io.c
1 /* 1 /*
2 * I/O string operations 2 * I/O string operations
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Copyright (C) 2006 IBM Corporation 4 * Copyright (C) 2006 IBM Corporation
5 * 5 *
6 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) 6 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
7 * and Paul Mackerras. 7 * and Paul Mackerras.
8 * 8 *
9 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com) 9 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
10 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com) 10 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
11 * 11 *
12 * Rewritten in C by Stephen Rothwell. 12 * Rewritten in C by Stephen Rothwell.
13 * 13 *
14 * This program is free software; you can redistribute it and/or 14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License 15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version 16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version. 17 * 2 of the License, or (at your option) any later version.
18 */ 18 */
19 #include <linux/kernel.h> 19 #include <linux/kernel.h>
20 #include <linux/types.h> 20 #include <linux/types.h>
21 #include <linux/compiler.h> 21 #include <linux/compiler.h>
22 #include <linux/module.h> 22 #include <linux/export.h>
23 23
24 #include <asm/io.h> 24 #include <asm/io.h>
25 #include <asm/firmware.h> 25 #include <asm/firmware.h>
26 #include <asm/bug.h> 26 #include <asm/bug.h>
27 27
28 void _insb(const volatile u8 __iomem *port, void *buf, long count) 28 void _insb(const volatile u8 __iomem *port, void *buf, long count)
29 { 29 {
30 u8 *tbuf = buf; 30 u8 *tbuf = buf;
31 u8 tmp; 31 u8 tmp;
32 32
33 if (unlikely(count <= 0)) 33 if (unlikely(count <= 0))
34 return; 34 return;
35 asm volatile("sync"); 35 asm volatile("sync");
36 do { 36 do {
37 tmp = *port; 37 tmp = *port;
38 eieio(); 38 eieio();
39 *tbuf++ = tmp; 39 *tbuf++ = tmp;
40 } while (--count != 0); 40 } while (--count != 0);
41 asm volatile("twi 0,%0,0; isync" : : "r" (tmp)); 41 asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
42 } 42 }
43 EXPORT_SYMBOL(_insb); 43 EXPORT_SYMBOL(_insb);
44 44
45 void _outsb(volatile u8 __iomem *port, const void *buf, long count) 45 void _outsb(volatile u8 __iomem *port, const void *buf, long count)
46 { 46 {
47 const u8 *tbuf = buf; 47 const u8 *tbuf = buf;
48 48
49 if (unlikely(count <= 0)) 49 if (unlikely(count <= 0))
50 return; 50 return;
51 asm volatile("sync"); 51 asm volatile("sync");
52 do { 52 do {
53 *port = *tbuf++; 53 *port = *tbuf++;
54 } while (--count != 0); 54 } while (--count != 0);
55 asm volatile("sync"); 55 asm volatile("sync");
56 } 56 }
57 EXPORT_SYMBOL(_outsb); 57 EXPORT_SYMBOL(_outsb);
58 58
59 void _insw_ns(const volatile u16 __iomem *port, void *buf, long count) 59 void _insw_ns(const volatile u16 __iomem *port, void *buf, long count)
60 { 60 {
61 u16 *tbuf = buf; 61 u16 *tbuf = buf;
62 u16 tmp; 62 u16 tmp;
63 63
64 if (unlikely(count <= 0)) 64 if (unlikely(count <= 0))
65 return; 65 return;
66 asm volatile("sync"); 66 asm volatile("sync");
67 do { 67 do {
68 tmp = *port; 68 tmp = *port;
69 eieio(); 69 eieio();
70 *tbuf++ = tmp; 70 *tbuf++ = tmp;
71 } while (--count != 0); 71 } while (--count != 0);
72 asm volatile("twi 0,%0,0; isync" : : "r" (tmp)); 72 asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
73 } 73 }
74 EXPORT_SYMBOL(_insw_ns); 74 EXPORT_SYMBOL(_insw_ns);
75 75
76 void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count) 76 void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count)
77 { 77 {
78 const u16 *tbuf = buf; 78 const u16 *tbuf = buf;
79 79
80 if (unlikely(count <= 0)) 80 if (unlikely(count <= 0))
81 return; 81 return;
82 asm volatile("sync"); 82 asm volatile("sync");
83 do { 83 do {
84 *port = *tbuf++; 84 *port = *tbuf++;
85 } while (--count != 0); 85 } while (--count != 0);
86 asm volatile("sync"); 86 asm volatile("sync");
87 } 87 }
88 EXPORT_SYMBOL(_outsw_ns); 88 EXPORT_SYMBOL(_outsw_ns);
89 89
90 void _insl_ns(const volatile u32 __iomem *port, void *buf, long count) 90 void _insl_ns(const volatile u32 __iomem *port, void *buf, long count)
91 { 91 {
92 u32 *tbuf = buf; 92 u32 *tbuf = buf;
93 u32 tmp; 93 u32 tmp;
94 94
95 if (unlikely(count <= 0)) 95 if (unlikely(count <= 0))
96 return; 96 return;
97 asm volatile("sync"); 97 asm volatile("sync");
98 do { 98 do {
99 tmp = *port; 99 tmp = *port;
100 eieio(); 100 eieio();
101 *tbuf++ = tmp; 101 *tbuf++ = tmp;
102 } while (--count != 0); 102 } while (--count != 0);
103 asm volatile("twi 0,%0,0; isync" : : "r" (tmp)); 103 asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
104 } 104 }
105 EXPORT_SYMBOL(_insl_ns); 105 EXPORT_SYMBOL(_insl_ns);
106 106
107 void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count) 107 void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count)
108 { 108 {
109 const u32 *tbuf = buf; 109 const u32 *tbuf = buf;
110 110
111 if (unlikely(count <= 0)) 111 if (unlikely(count <= 0))
112 return; 112 return;
113 asm volatile("sync"); 113 asm volatile("sync");
114 do { 114 do {
115 *port = *tbuf++; 115 *port = *tbuf++;
116 } while (--count != 0); 116 } while (--count != 0);
117 asm volatile("sync"); 117 asm volatile("sync");
118 } 118 }
119 EXPORT_SYMBOL(_outsl_ns); 119 EXPORT_SYMBOL(_outsl_ns);
120 120
121 #define IO_CHECK_ALIGN(v,a) ((((unsigned long)(v)) & ((a) - 1)) == 0) 121 #define IO_CHECK_ALIGN(v,a) ((((unsigned long)(v)) & ((a) - 1)) == 0)
122 122
123 notrace void 123 notrace void
124 _memset_io(volatile void __iomem *addr, int c, unsigned long n) 124 _memset_io(volatile void __iomem *addr, int c, unsigned long n)
125 { 125 {
126 void *p = (void __force *)addr; 126 void *p = (void __force *)addr;
127 u32 lc = c; 127 u32 lc = c;
128 lc |= lc << 8; 128 lc |= lc << 8;
129 lc |= lc << 16; 129 lc |= lc << 16;
130 130
131 __asm__ __volatile__ ("sync" : : : "memory"); 131 __asm__ __volatile__ ("sync" : : : "memory");
132 while(n && !IO_CHECK_ALIGN(p, 4)) { 132 while(n && !IO_CHECK_ALIGN(p, 4)) {
133 *((volatile u8 *)p) = c; 133 *((volatile u8 *)p) = c;
134 p++; 134 p++;
135 n--; 135 n--;
136 } 136 }
137 while(n >= 4) { 137 while(n >= 4) {
138 *((volatile u32 *)p) = lc; 138 *((volatile u32 *)p) = lc;
139 p += 4; 139 p += 4;
140 n -= 4; 140 n -= 4;
141 } 141 }
142 while(n) { 142 while(n) {
143 *((volatile u8 *)p) = c; 143 *((volatile u8 *)p) = c;
144 p++; 144 p++;
145 n--; 145 n--;
146 } 146 }
147 __asm__ __volatile__ ("sync" : : : "memory"); 147 __asm__ __volatile__ ("sync" : : : "memory");
148 } 148 }
149 EXPORT_SYMBOL(_memset_io); 149 EXPORT_SYMBOL(_memset_io);
150 150
151 void _memcpy_fromio(void *dest, const volatile void __iomem *src, 151 void _memcpy_fromio(void *dest, const volatile void __iomem *src,
152 unsigned long n) 152 unsigned long n)
153 { 153 {
154 void *vsrc = (void __force *) src; 154 void *vsrc = (void __force *) src;
155 155
156 __asm__ __volatile__ ("sync" : : : "memory"); 156 __asm__ __volatile__ ("sync" : : : "memory");
157 while(n && (!IO_CHECK_ALIGN(vsrc, 4) || !IO_CHECK_ALIGN(dest, 4))) { 157 while(n && (!IO_CHECK_ALIGN(vsrc, 4) || !IO_CHECK_ALIGN(dest, 4))) {
158 *((u8 *)dest) = *((volatile u8 *)vsrc); 158 *((u8 *)dest) = *((volatile u8 *)vsrc);
159 eieio(); 159 eieio();
160 vsrc++; 160 vsrc++;
161 dest++; 161 dest++;
162 n--; 162 n--;
163 } 163 }
164 while(n >= 4) { 164 while(n >= 4) {
165 *((u32 *)dest) = *((volatile u32 *)vsrc); 165 *((u32 *)dest) = *((volatile u32 *)vsrc);
166 eieio(); 166 eieio();
167 vsrc += 4; 167 vsrc += 4;
168 dest += 4; 168 dest += 4;
169 n -= 4; 169 n -= 4;
170 } 170 }
171 while(n) { 171 while(n) {
172 *((u8 *)dest) = *((volatile u8 *)vsrc); 172 *((u8 *)dest) = *((volatile u8 *)vsrc);
173 eieio(); 173 eieio();
174 vsrc++; 174 vsrc++;
175 dest++; 175 dest++;
176 n--; 176 n--;
177 } 177 }
178 __asm__ __volatile__ ("sync" : : : "memory"); 178 __asm__ __volatile__ ("sync" : : : "memory");
179 } 179 }
180 EXPORT_SYMBOL(_memcpy_fromio); 180 EXPORT_SYMBOL(_memcpy_fromio);
181 181
182 void _memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n) 182 void _memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n)
183 { 183 {
184 void *vdest = (void __force *) dest; 184 void *vdest = (void __force *) dest;
185 185
186 __asm__ __volatile__ ("sync" : : : "memory"); 186 __asm__ __volatile__ ("sync" : : : "memory");
187 while(n && (!IO_CHECK_ALIGN(vdest, 4) || !IO_CHECK_ALIGN(src, 4))) { 187 while(n && (!IO_CHECK_ALIGN(vdest, 4) || !IO_CHECK_ALIGN(src, 4))) {
188 *((volatile u8 *)vdest) = *((u8 *)src); 188 *((volatile u8 *)vdest) = *((u8 *)src);
189 src++; 189 src++;
190 vdest++; 190 vdest++;
191 n--; 191 n--;
192 } 192 }
193 while(n >= 4) { 193 while(n >= 4) {
194 *((volatile u32 *)vdest) = *((volatile u32 *)src); 194 *((volatile u32 *)vdest) = *((volatile u32 *)src);
195 src += 4; 195 src += 4;
196 vdest += 4; 196 vdest += 4;
197 n-=4; 197 n-=4;
198 } 198 }
199 while(n) { 199 while(n) {
200 *((volatile u8 *)vdest) = *((u8 *)src); 200 *((volatile u8 *)vdest) = *((u8 *)src);
201 src++; 201 src++;
202 vdest++; 202 vdest++;
203 n--; 203 n--;
204 } 204 }
205 __asm__ __volatile__ ("sync" : : : "memory"); 205 __asm__ __volatile__ ("sync" : : : "memory");
206 } 206 }
207 EXPORT_SYMBOL(_memcpy_toio); 207 EXPORT_SYMBOL(_memcpy_toio);
208 208
arch/powerpc/kernel/irq.c
1 /* 1 /*
2 * Derived from arch/i386/kernel/irq.c 2 * Derived from arch/i386/kernel/irq.c
3 * Copyright (C) 1992 Linus Torvalds 3 * Copyright (C) 1992 Linus Torvalds
4 * Adapted from arch/i386 by Gary Thomas 4 * Adapted from arch/i386 by Gary Thomas
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Updated and modified by Cort Dougan <cort@fsmlabs.com> 6 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
7 * Copyright (C) 1996-2001 Cort Dougan 7 * Copyright (C) 1996-2001 Cort Dougan
8 * Adapted for Power Macintosh by Paul Mackerras 8 * Adapted for Power Macintosh by Paul Mackerras
9 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) 9 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
10 * 10 *
11 * This program is free software; you can redistribute it and/or 11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License 12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version. 14 * 2 of the License, or (at your option) any later version.
15 * 15 *
16 * This file contains the code used by various IRQ handling routines: 16 * This file contains the code used by various IRQ handling routines:
17 * asking for different IRQ's should be done through these routines 17 * asking for different IRQ's should be done through these routines
18 * instead of just grabbing them. Thus setups with different IRQ numbers 18 * instead of just grabbing them. Thus setups with different IRQ numbers
19 * shouldn't result in any weird surprises, and installing new handlers 19 * shouldn't result in any weird surprises, and installing new handlers
20 * should be easier. 20 * should be easier.
21 * 21 *
22 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the 22 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
23 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit 23 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
24 * mask register (of which only 16 are defined), hence the weird shifting 24 * mask register (of which only 16 are defined), hence the weird shifting
25 * and complement of the cached_irq_mask. I want to be able to stuff 25 * and complement of the cached_irq_mask. I want to be able to stuff
26 * this right into the SIU SMASK register. 26 * this right into the SIU SMASK register.
27 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx 27 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
28 * to reduce code space and undefined function references. 28 * to reduce code space and undefined function references.
29 */ 29 */
30 30
31 #undef DEBUG 31 #undef DEBUG
32 32
33 #include <linux/module.h> 33 #include <linux/export.h>
34 #include <linux/threads.h> 34 #include <linux/threads.h>
35 #include <linux/kernel_stat.h> 35 #include <linux/kernel_stat.h>
36 #include <linux/signal.h> 36 #include <linux/signal.h>
37 #include <linux/sched.h> 37 #include <linux/sched.h>
38 #include <linux/ptrace.h> 38 #include <linux/ptrace.h>
39 #include <linux/ioport.h> 39 #include <linux/ioport.h>
40 #include <linux/interrupt.h> 40 #include <linux/interrupt.h>
41 #include <linux/timex.h> 41 #include <linux/timex.h>
42 #include <linux/init.h> 42 #include <linux/init.h>
43 #include <linux/slab.h> 43 #include <linux/slab.h>
44 #include <linux/delay.h> 44 #include <linux/delay.h>
45 #include <linux/irq.h> 45 #include <linux/irq.h>
46 #include <linux/seq_file.h> 46 #include <linux/seq_file.h>
47 #include <linux/cpumask.h> 47 #include <linux/cpumask.h>
48 #include <linux/profile.h> 48 #include <linux/profile.h>
49 #include <linux/bitops.h> 49 #include <linux/bitops.h>
50 #include <linux/list.h> 50 #include <linux/list.h>
51 #include <linux/radix-tree.h> 51 #include <linux/radix-tree.h>
52 #include <linux/mutex.h> 52 #include <linux/mutex.h>
53 #include <linux/bootmem.h> 53 #include <linux/bootmem.h>
54 #include <linux/pci.h> 54 #include <linux/pci.h>
55 #include <linux/debugfs.h> 55 #include <linux/debugfs.h>
56 #include <linux/of.h> 56 #include <linux/of.h>
57 #include <linux/of_irq.h> 57 #include <linux/of_irq.h>
58 58
59 #include <asm/uaccess.h> 59 #include <asm/uaccess.h>
60 #include <asm/system.h> 60 #include <asm/system.h>
61 #include <asm/io.h> 61 #include <asm/io.h>
62 #include <asm/pgtable.h> 62 #include <asm/pgtable.h>
63 #include <asm/irq.h> 63 #include <asm/irq.h>
64 #include <asm/cache.h> 64 #include <asm/cache.h>
65 #include <asm/prom.h> 65 #include <asm/prom.h>
66 #include <asm/ptrace.h> 66 #include <asm/ptrace.h>
67 #include <asm/machdep.h> 67 #include <asm/machdep.h>
68 #include <asm/udbg.h> 68 #include <asm/udbg.h>
69 #include <asm/smp.h> 69 #include <asm/smp.h>
70 70
71 #ifdef CONFIG_PPC64 71 #ifdef CONFIG_PPC64
72 #include <asm/paca.h> 72 #include <asm/paca.h>
73 #include <asm/firmware.h> 73 #include <asm/firmware.h>
74 #include <asm/lv1call.h> 74 #include <asm/lv1call.h>
75 #endif 75 #endif
76 #define CREATE_TRACE_POINTS 76 #define CREATE_TRACE_POINTS
77 #include <asm/trace.h> 77 #include <asm/trace.h>
78 78
79 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); 79 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
80 EXPORT_PER_CPU_SYMBOL(irq_stat); 80 EXPORT_PER_CPU_SYMBOL(irq_stat);
81 81
82 int __irq_offset_value; 82 int __irq_offset_value;
83 83
84 #ifdef CONFIG_PPC32 84 #ifdef CONFIG_PPC32
85 EXPORT_SYMBOL(__irq_offset_value); 85 EXPORT_SYMBOL(__irq_offset_value);
86 atomic_t ppc_n_lost_interrupts; 86 atomic_t ppc_n_lost_interrupts;
87 87
88 #ifdef CONFIG_TAU_INT 88 #ifdef CONFIG_TAU_INT
89 extern int tau_initialized; 89 extern int tau_initialized;
90 extern int tau_interrupts(int); 90 extern int tau_interrupts(int);
91 #endif 91 #endif
92 #endif /* CONFIG_PPC32 */ 92 #endif /* CONFIG_PPC32 */
93 93
94 #ifdef CONFIG_PPC64 94 #ifdef CONFIG_PPC64
95 95
96 #ifndef CONFIG_SPARSE_IRQ 96 #ifndef CONFIG_SPARSE_IRQ
97 EXPORT_SYMBOL(irq_desc); 97 EXPORT_SYMBOL(irq_desc);
98 #endif 98 #endif
99 99
100 int distribute_irqs = 1; 100 int distribute_irqs = 1;
101 101
102 static inline notrace unsigned long get_hard_enabled(void) 102 static inline notrace unsigned long get_hard_enabled(void)
103 { 103 {
104 unsigned long enabled; 104 unsigned long enabled;
105 105
106 __asm__ __volatile__("lbz %0,%1(13)" 106 __asm__ __volatile__("lbz %0,%1(13)"
107 : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled))); 107 : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled)));
108 108
109 return enabled; 109 return enabled;
110 } 110 }
111 111
112 static inline notrace void set_soft_enabled(unsigned long enable) 112 static inline notrace void set_soft_enabled(unsigned long enable)
113 { 113 {
114 __asm__ __volatile__("stb %0,%1(13)" 114 __asm__ __volatile__("stb %0,%1(13)"
115 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); 115 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
116 } 116 }
117 117
118 notrace void arch_local_irq_restore(unsigned long en) 118 notrace void arch_local_irq_restore(unsigned long en)
119 { 119 {
120 /* 120 /*
121 * get_paca()->soft_enabled = en; 121 * get_paca()->soft_enabled = en;
122 * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1? 122 * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1?
123 * That was allowed before, and in such a case we do need to take care 123 * That was allowed before, and in such a case we do need to take care
124 * that gcc will set soft_enabled directly via r13, not choose to use 124 * that gcc will set soft_enabled directly via r13, not choose to use
125 * an intermediate register, lest we're preempted to a different cpu. 125 * an intermediate register, lest we're preempted to a different cpu.
126 */ 126 */
127 set_soft_enabled(en); 127 set_soft_enabled(en);
128 if (!en) 128 if (!en)
129 return; 129 return;
130 130
131 #ifdef CONFIG_PPC_STD_MMU_64 131 #ifdef CONFIG_PPC_STD_MMU_64
132 if (firmware_has_feature(FW_FEATURE_ISERIES)) { 132 if (firmware_has_feature(FW_FEATURE_ISERIES)) {
133 /* 133 /*
134 * Do we need to disable preemption here? Not really: in the 134 * Do we need to disable preemption here? Not really: in the
135 * unlikely event that we're preempted to a different cpu in 135 * unlikely event that we're preempted to a different cpu in
136 * between getting r13, loading its lppaca_ptr, and loading 136 * between getting r13, loading its lppaca_ptr, and loading
137 * its any_int, we might call iseries_handle_interrupts without 137 * its any_int, we might call iseries_handle_interrupts without
138 * an interrupt pending on the new cpu, but that's no disaster, 138 * an interrupt pending on the new cpu, but that's no disaster,
139 * is it? And the business of preempting us off the old cpu 139 * is it? And the business of preempting us off the old cpu
140 * would itself involve a local_irq_restore which handles the 140 * would itself involve a local_irq_restore which handles the
141 * interrupt to that cpu. 141 * interrupt to that cpu.
142 * 142 *
143 * But use "local_paca->lppaca_ptr" instead of "get_lppaca()" 143 * But use "local_paca->lppaca_ptr" instead of "get_lppaca()"
144 * to avoid any preemption checking added into get_paca(). 144 * to avoid any preemption checking added into get_paca().
145 */ 145 */
146 if (local_paca->lppaca_ptr->int_dword.any_int) 146 if (local_paca->lppaca_ptr->int_dword.any_int)
147 iseries_handle_interrupts(); 147 iseries_handle_interrupts();
148 } 148 }
149 #endif /* CONFIG_PPC_STD_MMU_64 */ 149 #endif /* CONFIG_PPC_STD_MMU_64 */
150 150
151 /* 151 /*
152 * if (get_paca()->hard_enabled) return; 152 * if (get_paca()->hard_enabled) return;
153 * But again we need to take care that gcc gets hard_enabled directly 153 * But again we need to take care that gcc gets hard_enabled directly
154 * via r13, not choose to use an intermediate register, lest we're 154 * via r13, not choose to use an intermediate register, lest we're
155 * preempted to a different cpu in between the two instructions. 155 * preempted to a different cpu in between the two instructions.
156 */ 156 */
157 if (get_hard_enabled()) 157 if (get_hard_enabled())
158 return; 158 return;
159 159
160 /* 160 /*
161 * Need to hard-enable interrupts here. Since currently disabled, 161 * Need to hard-enable interrupts here. Since currently disabled,
162 * no need to take further asm precautions against preemption; but 162 * no need to take further asm precautions against preemption; but
163 * use local_paca instead of get_paca() to avoid preemption checking. 163 * use local_paca instead of get_paca() to avoid preemption checking.
164 */ 164 */
165 local_paca->hard_enabled = en; 165 local_paca->hard_enabled = en;
166 166
167 #ifndef CONFIG_BOOKE 167 #ifndef CONFIG_BOOKE
168 /* On server, re-trigger the decrementer if it went negative since 168 /* On server, re-trigger the decrementer if it went negative since
169 * some processors only trigger on edge transitions of the sign bit. 169 * some processors only trigger on edge transitions of the sign bit.
170 * 170 *
171 * BookE has a level sensitive decrementer (latches in TSR) so we 171 * BookE has a level sensitive decrementer (latches in TSR) so we
172 * don't need that 172 * don't need that
173 */ 173 */
174 if ((int)mfspr(SPRN_DEC) < 0) 174 if ((int)mfspr(SPRN_DEC) < 0)
175 mtspr(SPRN_DEC, 1); 175 mtspr(SPRN_DEC, 1);
176 #endif /* CONFIG_BOOKE */ 176 #endif /* CONFIG_BOOKE */
177 177
178 /* 178 /*
179 * Force the delivery of pending soft-disabled interrupts on PS3. 179 * Force the delivery of pending soft-disabled interrupts on PS3.
180 * Any HV call will have this side effect. 180 * Any HV call will have this side effect.
181 */ 181 */
182 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) { 182 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
183 u64 tmp; 183 u64 tmp;
184 lv1_get_version_info(&tmp); 184 lv1_get_version_info(&tmp);
185 } 185 }
186 186
187 __hard_irq_enable(); 187 __hard_irq_enable();
188 } 188 }
189 EXPORT_SYMBOL(arch_local_irq_restore); 189 EXPORT_SYMBOL(arch_local_irq_restore);
190 #endif /* CONFIG_PPC64 */ 190 #endif /* CONFIG_PPC64 */
191 191
192 int arch_show_interrupts(struct seq_file *p, int prec) 192 int arch_show_interrupts(struct seq_file *p, int prec)
193 { 193 {
194 int j; 194 int j;
195 195
196 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT) 196 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
197 if (tau_initialized) { 197 if (tau_initialized) {
198 seq_printf(p, "%*s: ", prec, "TAU"); 198 seq_printf(p, "%*s: ", prec, "TAU");
199 for_each_online_cpu(j) 199 for_each_online_cpu(j)
200 seq_printf(p, "%10u ", tau_interrupts(j)); 200 seq_printf(p, "%10u ", tau_interrupts(j));
201 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); 201 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
202 } 202 }
203 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */ 203 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
204 204
205 seq_printf(p, "%*s: ", prec, "LOC"); 205 seq_printf(p, "%*s: ", prec, "LOC");
206 for_each_online_cpu(j) 206 for_each_online_cpu(j)
207 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs); 207 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs);
208 seq_printf(p, " Local timer interrupts\n"); 208 seq_printf(p, " Local timer interrupts\n");
209 209
210 seq_printf(p, "%*s: ", prec, "SPU"); 210 seq_printf(p, "%*s: ", prec, "SPU");
211 for_each_online_cpu(j) 211 for_each_online_cpu(j)
212 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs); 212 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
213 seq_printf(p, " Spurious interrupts\n"); 213 seq_printf(p, " Spurious interrupts\n");
214 214
215 seq_printf(p, "%*s: ", prec, "CNT"); 215 seq_printf(p, "%*s: ", prec, "CNT");
216 for_each_online_cpu(j) 216 for_each_online_cpu(j)
217 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); 217 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
218 seq_printf(p, " Performance monitoring interrupts\n"); 218 seq_printf(p, " Performance monitoring interrupts\n");
219 219
220 seq_printf(p, "%*s: ", prec, "MCE"); 220 seq_printf(p, "%*s: ", prec, "MCE");
221 for_each_online_cpu(j) 221 for_each_online_cpu(j)
222 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions); 222 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
223 seq_printf(p, " Machine check exceptions\n"); 223 seq_printf(p, " Machine check exceptions\n");
224 224
225 return 0; 225 return 0;
226 } 226 }
227 227
228 /* 228 /*
229 * /proc/stat helpers 229 * /proc/stat helpers
230 */ 230 */
231 u64 arch_irq_stat_cpu(unsigned int cpu) 231 u64 arch_irq_stat_cpu(unsigned int cpu)
232 { 232 {
233 u64 sum = per_cpu(irq_stat, cpu).timer_irqs; 233 u64 sum = per_cpu(irq_stat, cpu).timer_irqs;
234 234
235 sum += per_cpu(irq_stat, cpu).pmu_irqs; 235 sum += per_cpu(irq_stat, cpu).pmu_irqs;
236 sum += per_cpu(irq_stat, cpu).mce_exceptions; 236 sum += per_cpu(irq_stat, cpu).mce_exceptions;
237 sum += per_cpu(irq_stat, cpu).spurious_irqs; 237 sum += per_cpu(irq_stat, cpu).spurious_irqs;
238 238
239 return sum; 239 return sum;
240 } 240 }
241 241
242 #ifdef CONFIG_HOTPLUG_CPU 242 #ifdef CONFIG_HOTPLUG_CPU
243 void migrate_irqs(void) 243 void migrate_irqs(void)
244 { 244 {
245 struct irq_desc *desc; 245 struct irq_desc *desc;
246 unsigned int irq; 246 unsigned int irq;
247 static int warned; 247 static int warned;
248 cpumask_var_t mask; 248 cpumask_var_t mask;
249 const struct cpumask *map = cpu_online_mask; 249 const struct cpumask *map = cpu_online_mask;
250 250
251 alloc_cpumask_var(&mask, GFP_KERNEL); 251 alloc_cpumask_var(&mask, GFP_KERNEL);
252 252
253 for_each_irq(irq) { 253 for_each_irq(irq) {
254 struct irq_data *data; 254 struct irq_data *data;
255 struct irq_chip *chip; 255 struct irq_chip *chip;
256 256
257 desc = irq_to_desc(irq); 257 desc = irq_to_desc(irq);
258 if (!desc) 258 if (!desc)
259 continue; 259 continue;
260 260
261 data = irq_desc_get_irq_data(desc); 261 data = irq_desc_get_irq_data(desc);
262 if (irqd_is_per_cpu(data)) 262 if (irqd_is_per_cpu(data))
263 continue; 263 continue;
264 264
265 chip = irq_data_get_irq_chip(data); 265 chip = irq_data_get_irq_chip(data);
266 266
267 cpumask_and(mask, data->affinity, map); 267 cpumask_and(mask, data->affinity, map);
268 if (cpumask_any(mask) >= nr_cpu_ids) { 268 if (cpumask_any(mask) >= nr_cpu_ids) {
269 printk("Breaking affinity for irq %i\n", irq); 269 printk("Breaking affinity for irq %i\n", irq);
270 cpumask_copy(mask, map); 270 cpumask_copy(mask, map);
271 } 271 }
272 if (chip->irq_set_affinity) 272 if (chip->irq_set_affinity)
273 chip->irq_set_affinity(data, mask, true); 273 chip->irq_set_affinity(data, mask, true);
274 else if (desc->action && !(warned++)) 274 else if (desc->action && !(warned++))
275 printk("Cannot set affinity for irq %i\n", irq); 275 printk("Cannot set affinity for irq %i\n", irq);
276 } 276 }
277 277
278 free_cpumask_var(mask); 278 free_cpumask_var(mask);
279 279
280 local_irq_enable(); 280 local_irq_enable();
281 mdelay(1); 281 mdelay(1);
282 local_irq_disable(); 282 local_irq_disable();
283 } 283 }
284 #endif 284 #endif
285 285
286 static inline void handle_one_irq(unsigned int irq) 286 static inline void handle_one_irq(unsigned int irq)
287 { 287 {
288 struct thread_info *curtp, *irqtp; 288 struct thread_info *curtp, *irqtp;
289 unsigned long saved_sp_limit; 289 unsigned long saved_sp_limit;
290 struct irq_desc *desc; 290 struct irq_desc *desc;
291 291
292 desc = irq_to_desc(irq); 292 desc = irq_to_desc(irq);
293 if (!desc) 293 if (!desc)
294 return; 294 return;
295 295
296 /* Switch to the irq stack to handle this */ 296 /* Switch to the irq stack to handle this */
297 curtp = current_thread_info(); 297 curtp = current_thread_info();
298 irqtp = hardirq_ctx[smp_processor_id()]; 298 irqtp = hardirq_ctx[smp_processor_id()];
299 299
300 if (curtp == irqtp) { 300 if (curtp == irqtp) {
301 /* We're already on the irq stack, just handle it */ 301 /* We're already on the irq stack, just handle it */
302 desc->handle_irq(irq, desc); 302 desc->handle_irq(irq, desc);
303 return; 303 return;
304 } 304 }
305 305
306 saved_sp_limit = current->thread.ksp_limit; 306 saved_sp_limit = current->thread.ksp_limit;
307 307
308 irqtp->task = curtp->task; 308 irqtp->task = curtp->task;
309 irqtp->flags = 0; 309 irqtp->flags = 0;
310 310
311 /* Copy the softirq bits in preempt_count so that the 311 /* Copy the softirq bits in preempt_count so that the
312 * softirq checks work in the hardirq context. */ 312 * softirq checks work in the hardirq context. */
313 irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) | 313 irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
314 (curtp->preempt_count & SOFTIRQ_MASK); 314 (curtp->preempt_count & SOFTIRQ_MASK);
315 315
316 current->thread.ksp_limit = (unsigned long)irqtp + 316 current->thread.ksp_limit = (unsigned long)irqtp +
317 _ALIGN_UP(sizeof(struct thread_info), 16); 317 _ALIGN_UP(sizeof(struct thread_info), 16);
318 318
319 call_handle_irq(irq, desc, irqtp, desc->handle_irq); 319 call_handle_irq(irq, desc, irqtp, desc->handle_irq);
320 current->thread.ksp_limit = saved_sp_limit; 320 current->thread.ksp_limit = saved_sp_limit;
321 irqtp->task = NULL; 321 irqtp->task = NULL;
322 322
323 /* Set any flag that may have been set on the 323 /* Set any flag that may have been set on the
324 * alternate stack 324 * alternate stack
325 */ 325 */
326 if (irqtp->flags) 326 if (irqtp->flags)
327 set_bits(irqtp->flags, &curtp->flags); 327 set_bits(irqtp->flags, &curtp->flags);
328 } 328 }
329 329
330 static inline void check_stack_overflow(void) 330 static inline void check_stack_overflow(void)
331 { 331 {
332 #ifdef CONFIG_DEBUG_STACKOVERFLOW 332 #ifdef CONFIG_DEBUG_STACKOVERFLOW
333 long sp; 333 long sp;
334 334
335 sp = __get_SP() & (THREAD_SIZE-1); 335 sp = __get_SP() & (THREAD_SIZE-1);
336 336
337 /* check for stack overflow: is there less than 2KB free? */ 337 /* check for stack overflow: is there less than 2KB free? */
338 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { 338 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
339 printk("do_IRQ: stack overflow: %ld\n", 339 printk("do_IRQ: stack overflow: %ld\n",
340 sp - sizeof(struct thread_info)); 340 sp - sizeof(struct thread_info));
341 dump_stack(); 341 dump_stack();
342 } 342 }
343 #endif 343 #endif
344 } 344 }
345 345
346 void do_IRQ(struct pt_regs *regs) 346 void do_IRQ(struct pt_regs *regs)
347 { 347 {
348 struct pt_regs *old_regs = set_irq_regs(regs); 348 struct pt_regs *old_regs = set_irq_regs(regs);
349 unsigned int irq; 349 unsigned int irq;
350 350
351 trace_irq_entry(regs); 351 trace_irq_entry(regs);
352 352
353 irq_enter(); 353 irq_enter();
354 354
355 check_stack_overflow(); 355 check_stack_overflow();
356 356
357 irq = ppc_md.get_irq(); 357 irq = ppc_md.get_irq();
358 358
359 if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) 359 if (irq != NO_IRQ && irq != NO_IRQ_IGNORE)
360 handle_one_irq(irq); 360 handle_one_irq(irq);
361 else if (irq != NO_IRQ_IGNORE) 361 else if (irq != NO_IRQ_IGNORE)
362 __get_cpu_var(irq_stat).spurious_irqs++; 362 __get_cpu_var(irq_stat).spurious_irqs++;
363 363
364 irq_exit(); 364 irq_exit();
365 set_irq_regs(old_regs); 365 set_irq_regs(old_regs);
366 366
367 #ifdef CONFIG_PPC_ISERIES 367 #ifdef CONFIG_PPC_ISERIES
368 if (firmware_has_feature(FW_FEATURE_ISERIES) && 368 if (firmware_has_feature(FW_FEATURE_ISERIES) &&
369 get_lppaca()->int_dword.fields.decr_int) { 369 get_lppaca()->int_dword.fields.decr_int) {
370 get_lppaca()->int_dword.fields.decr_int = 0; 370 get_lppaca()->int_dword.fields.decr_int = 0;
371 /* Signal a fake decrementer interrupt */ 371 /* Signal a fake decrementer interrupt */
372 timer_interrupt(regs); 372 timer_interrupt(regs);
373 } 373 }
374 #endif 374 #endif
375 375
376 trace_irq_exit(regs); 376 trace_irq_exit(regs);
377 } 377 }
378 378
379 void __init init_IRQ(void) 379 void __init init_IRQ(void)
380 { 380 {
381 if (ppc_md.init_IRQ) 381 if (ppc_md.init_IRQ)
382 ppc_md.init_IRQ(); 382 ppc_md.init_IRQ();
383 383
384 exc_lvl_ctx_init(); 384 exc_lvl_ctx_init();
385 385
386 irq_ctx_init(); 386 irq_ctx_init();
387 } 387 }
388 388
389 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) 389 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
390 struct thread_info *critirq_ctx[NR_CPUS] __read_mostly; 390 struct thread_info *critirq_ctx[NR_CPUS] __read_mostly;
391 struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly; 391 struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly;
392 struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly; 392 struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
393 393
394 void exc_lvl_ctx_init(void) 394 void exc_lvl_ctx_init(void)
395 { 395 {
396 struct thread_info *tp; 396 struct thread_info *tp;
397 int i, cpu_nr; 397 int i, cpu_nr;
398 398
399 for_each_possible_cpu(i) { 399 for_each_possible_cpu(i) {
400 #ifdef CONFIG_PPC64 400 #ifdef CONFIG_PPC64
401 cpu_nr = i; 401 cpu_nr = i;
402 #else 402 #else
403 cpu_nr = get_hard_smp_processor_id(i); 403 cpu_nr = get_hard_smp_processor_id(i);
404 #endif 404 #endif
405 memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE); 405 memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);
406 tp = critirq_ctx[cpu_nr]; 406 tp = critirq_ctx[cpu_nr];
407 tp->cpu = cpu_nr; 407 tp->cpu = cpu_nr;
408 tp->preempt_count = 0; 408 tp->preempt_count = 0;
409 409
410 #ifdef CONFIG_BOOKE 410 #ifdef CONFIG_BOOKE
411 memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE); 411 memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE);
412 tp = dbgirq_ctx[cpu_nr]; 412 tp = dbgirq_ctx[cpu_nr];
413 tp->cpu = cpu_nr; 413 tp->cpu = cpu_nr;
414 tp->preempt_count = 0; 414 tp->preempt_count = 0;
415 415
416 memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE); 416 memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE);
417 tp = mcheckirq_ctx[cpu_nr]; 417 tp = mcheckirq_ctx[cpu_nr];
418 tp->cpu = cpu_nr; 418 tp->cpu = cpu_nr;
419 tp->preempt_count = HARDIRQ_OFFSET; 419 tp->preempt_count = HARDIRQ_OFFSET;
420 #endif 420 #endif
421 } 421 }
422 } 422 }
423 #endif 423 #endif
424 424
425 struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; 425 struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
426 struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly; 426 struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
427 427
428 void irq_ctx_init(void) 428 void irq_ctx_init(void)
429 { 429 {
430 struct thread_info *tp; 430 struct thread_info *tp;
431 int i; 431 int i;
432 432
433 for_each_possible_cpu(i) { 433 for_each_possible_cpu(i) {
434 memset((void *)softirq_ctx[i], 0, THREAD_SIZE); 434 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
435 tp = softirq_ctx[i]; 435 tp = softirq_ctx[i];
436 tp->cpu = i; 436 tp->cpu = i;
437 tp->preempt_count = 0; 437 tp->preempt_count = 0;
438 438
439 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); 439 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
440 tp = hardirq_ctx[i]; 440 tp = hardirq_ctx[i];
441 tp->cpu = i; 441 tp->cpu = i;
442 tp->preempt_count = HARDIRQ_OFFSET; 442 tp->preempt_count = HARDIRQ_OFFSET;
443 } 443 }
444 } 444 }
445 445
446 static inline void do_softirq_onstack(void) 446 static inline void do_softirq_onstack(void)
447 { 447 {
448 struct thread_info *curtp, *irqtp; 448 struct thread_info *curtp, *irqtp;
449 unsigned long saved_sp_limit = current->thread.ksp_limit; 449 unsigned long saved_sp_limit = current->thread.ksp_limit;
450 450
451 curtp = current_thread_info(); 451 curtp = current_thread_info();
452 irqtp = softirq_ctx[smp_processor_id()]; 452 irqtp = softirq_ctx[smp_processor_id()];
453 irqtp->task = curtp->task; 453 irqtp->task = curtp->task;
454 irqtp->flags = 0; 454 irqtp->flags = 0;
455 current->thread.ksp_limit = (unsigned long)irqtp + 455 current->thread.ksp_limit = (unsigned long)irqtp +
456 _ALIGN_UP(sizeof(struct thread_info), 16); 456 _ALIGN_UP(sizeof(struct thread_info), 16);
457 call_do_softirq(irqtp); 457 call_do_softirq(irqtp);
458 current->thread.ksp_limit = saved_sp_limit; 458 current->thread.ksp_limit = saved_sp_limit;
459 irqtp->task = NULL; 459 irqtp->task = NULL;
460 460
461 /* Set any flag that may have been set on the 461 /* Set any flag that may have been set on the
462 * alternate stack 462 * alternate stack
463 */ 463 */
464 if (irqtp->flags) 464 if (irqtp->flags)
465 set_bits(irqtp->flags, &curtp->flags); 465 set_bits(irqtp->flags, &curtp->flags);
466 } 466 }
467 467
468 void do_softirq(void) 468 void do_softirq(void)
469 { 469 {
470 unsigned long flags; 470 unsigned long flags;
471 471
472 if (in_interrupt()) 472 if (in_interrupt())
473 return; 473 return;
474 474
475 local_irq_save(flags); 475 local_irq_save(flags);
476 476
477 if (local_softirq_pending()) 477 if (local_softirq_pending())
478 do_softirq_onstack(); 478 do_softirq_onstack();
479 479
480 local_irq_restore(flags); 480 local_irq_restore(flags);
481 } 481 }
482 482
483 483
484 /* 484 /*
485 * IRQ controller and virtual interrupts 485 * IRQ controller and virtual interrupts
486 */ 486 */
487 487
488 /* The main irq map itself is an array of NR_IRQ entries containing the 488 /* The main irq map itself is an array of NR_IRQ entries containing the
489 * associate host and irq number. An entry with a host of NULL is free. 489 * associate host and irq number. An entry with a host of NULL is free.
490 * An entry can be allocated if it's free, the allocator always then sets 490 * An entry can be allocated if it's free, the allocator always then sets
491 * hwirq first to the host's invalid irq number and then fills ops. 491 * hwirq first to the host's invalid irq number and then fills ops.
492 */ 492 */
493 struct irq_map_entry { 493 struct irq_map_entry {
494 irq_hw_number_t hwirq; 494 irq_hw_number_t hwirq;
495 struct irq_host *host; 495 struct irq_host *host;
496 }; 496 };
497 497
498 static LIST_HEAD(irq_hosts); 498 static LIST_HEAD(irq_hosts);
499 static DEFINE_RAW_SPINLOCK(irq_big_lock); 499 static DEFINE_RAW_SPINLOCK(irq_big_lock);
500 static DEFINE_MUTEX(revmap_trees_mutex); 500 static DEFINE_MUTEX(revmap_trees_mutex);
501 static struct irq_map_entry irq_map[NR_IRQS]; 501 static struct irq_map_entry irq_map[NR_IRQS];
502 static unsigned int irq_virq_count = NR_IRQS; 502 static unsigned int irq_virq_count = NR_IRQS;
503 static struct irq_host *irq_default_host; 503 static struct irq_host *irq_default_host;
504 504
505 irq_hw_number_t irqd_to_hwirq(struct irq_data *d) 505 irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
506 { 506 {
507 return irq_map[d->irq].hwirq; 507 return irq_map[d->irq].hwirq;
508 } 508 }
509 EXPORT_SYMBOL_GPL(irqd_to_hwirq); 509 EXPORT_SYMBOL_GPL(irqd_to_hwirq);
510 510
511 irq_hw_number_t virq_to_hw(unsigned int virq) 511 irq_hw_number_t virq_to_hw(unsigned int virq)
512 { 512 {
513 return irq_map[virq].hwirq; 513 return irq_map[virq].hwirq;
514 } 514 }
515 EXPORT_SYMBOL_GPL(virq_to_hw); 515 EXPORT_SYMBOL_GPL(virq_to_hw);
516 516
517 bool virq_is_host(unsigned int virq, struct irq_host *host) 517 bool virq_is_host(unsigned int virq, struct irq_host *host)
518 { 518 {
519 return irq_map[virq].host == host; 519 return irq_map[virq].host == host;
520 } 520 }
521 EXPORT_SYMBOL_GPL(virq_is_host); 521 EXPORT_SYMBOL_GPL(virq_is_host);
522 522
523 static int default_irq_host_match(struct irq_host *h, struct device_node *np) 523 static int default_irq_host_match(struct irq_host *h, struct device_node *np)
524 { 524 {
525 return h->of_node != NULL && h->of_node == np; 525 return h->of_node != NULL && h->of_node == np;
526 } 526 }
527 527
528 struct irq_host *irq_alloc_host(struct device_node *of_node, 528 struct irq_host *irq_alloc_host(struct device_node *of_node,
529 unsigned int revmap_type, 529 unsigned int revmap_type,
530 unsigned int revmap_arg, 530 unsigned int revmap_arg,
531 struct irq_host_ops *ops, 531 struct irq_host_ops *ops,
532 irq_hw_number_t inval_irq) 532 irq_hw_number_t inval_irq)
533 { 533 {
534 struct irq_host *host; 534 struct irq_host *host;
535 unsigned int size = sizeof(struct irq_host); 535 unsigned int size = sizeof(struct irq_host);
536 unsigned int i; 536 unsigned int i;
537 unsigned int *rmap; 537 unsigned int *rmap;
538 unsigned long flags; 538 unsigned long flags;
539 539
540 /* Allocate structure and revmap table if using linear mapping */ 540 /* Allocate structure and revmap table if using linear mapping */
541 if (revmap_type == IRQ_HOST_MAP_LINEAR) 541 if (revmap_type == IRQ_HOST_MAP_LINEAR)
542 size += revmap_arg * sizeof(unsigned int); 542 size += revmap_arg * sizeof(unsigned int);
543 host = kzalloc(size, GFP_KERNEL); 543 host = kzalloc(size, GFP_KERNEL);
544 if (host == NULL) 544 if (host == NULL)
545 return NULL; 545 return NULL;
546 546
547 /* Fill structure */ 547 /* Fill structure */
548 host->revmap_type = revmap_type; 548 host->revmap_type = revmap_type;
549 host->inval_irq = inval_irq; 549 host->inval_irq = inval_irq;
550 host->ops = ops; 550 host->ops = ops;
551 host->of_node = of_node_get(of_node); 551 host->of_node = of_node_get(of_node);
552 552
553 if (host->ops->match == NULL) 553 if (host->ops->match == NULL)
554 host->ops->match = default_irq_host_match; 554 host->ops->match = default_irq_host_match;
555 555
556 raw_spin_lock_irqsave(&irq_big_lock, flags); 556 raw_spin_lock_irqsave(&irq_big_lock, flags);
557 557
558 /* If it's a legacy controller, check for duplicates and 558 /* If it's a legacy controller, check for duplicates and
559 * mark it as allocated (we use irq 0 host pointer for that 559 * mark it as allocated (we use irq 0 host pointer for that
560 */ 560 */
561 if (revmap_type == IRQ_HOST_MAP_LEGACY) { 561 if (revmap_type == IRQ_HOST_MAP_LEGACY) {
562 if (irq_map[0].host != NULL) { 562 if (irq_map[0].host != NULL) {
563 raw_spin_unlock_irqrestore(&irq_big_lock, flags); 563 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
564 of_node_put(host->of_node); 564 of_node_put(host->of_node);
565 kfree(host); 565 kfree(host);
566 return NULL; 566 return NULL;
567 } 567 }
568 irq_map[0].host = host; 568 irq_map[0].host = host;
569 } 569 }
570 570
571 list_add(&host->link, &irq_hosts); 571 list_add(&host->link, &irq_hosts);
572 raw_spin_unlock_irqrestore(&irq_big_lock, flags); 572 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
573 573
574 /* Additional setups per revmap type */ 574 /* Additional setups per revmap type */
575 switch(revmap_type) { 575 switch(revmap_type) {
576 case IRQ_HOST_MAP_LEGACY: 576 case IRQ_HOST_MAP_LEGACY:
577 /* 0 is always the invalid number for legacy */ 577 /* 0 is always the invalid number for legacy */
578 host->inval_irq = 0; 578 host->inval_irq = 0;
579 /* setup us as the host for all legacy interrupts */ 579 /* setup us as the host for all legacy interrupts */
580 for (i = 1; i < NUM_ISA_INTERRUPTS; i++) { 580 for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
581 irq_map[i].hwirq = i; 581 irq_map[i].hwirq = i;
582 smp_wmb(); 582 smp_wmb();
583 irq_map[i].host = host; 583 irq_map[i].host = host;
584 smp_wmb(); 584 smp_wmb();
585 585
586 /* Legacy flags are left to default at this point, 586 /* Legacy flags are left to default at this point,
587 * one can then use irq_create_mapping() to 587 * one can then use irq_create_mapping() to
588 * explicitly change them 588 * explicitly change them
589 */ 589 */
590 ops->map(host, i, i); 590 ops->map(host, i, i);
591 591
592 /* Clear norequest flags */ 592 /* Clear norequest flags */
593 irq_clear_status_flags(i, IRQ_NOREQUEST); 593 irq_clear_status_flags(i, IRQ_NOREQUEST);
594 } 594 }
595 break; 595 break;
596 case IRQ_HOST_MAP_LINEAR: 596 case IRQ_HOST_MAP_LINEAR:
597 rmap = (unsigned int *)(host + 1); 597 rmap = (unsigned int *)(host + 1);
598 for (i = 0; i < revmap_arg; i++) 598 for (i = 0; i < revmap_arg; i++)
599 rmap[i] = NO_IRQ; 599 rmap[i] = NO_IRQ;
600 host->revmap_data.linear.size = revmap_arg; 600 host->revmap_data.linear.size = revmap_arg;
601 smp_wmb(); 601 smp_wmb();
602 host->revmap_data.linear.revmap = rmap; 602 host->revmap_data.linear.revmap = rmap;
603 break; 603 break;
604 case IRQ_HOST_MAP_TREE: 604 case IRQ_HOST_MAP_TREE:
605 INIT_RADIX_TREE(&host->revmap_data.tree, GFP_KERNEL); 605 INIT_RADIX_TREE(&host->revmap_data.tree, GFP_KERNEL);
606 break; 606 break;
607 default: 607 default:
608 break; 608 break;
609 } 609 }
610 610
611 pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host); 611 pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);
612 612
613 return host; 613 return host;
614 } 614 }
615 615
616 struct irq_host *irq_find_host(struct device_node *node) 616 struct irq_host *irq_find_host(struct device_node *node)
617 { 617 {
618 struct irq_host *h, *found = NULL; 618 struct irq_host *h, *found = NULL;
619 unsigned long flags; 619 unsigned long flags;
620 620
621 /* We might want to match the legacy controller last since 621 /* We might want to match the legacy controller last since
622 * it might potentially be set to match all interrupts in 622 * it might potentially be set to match all interrupts in
623 * the absence of a device node. This isn't a problem so far 623 * the absence of a device node. This isn't a problem so far
624 * yet though... 624 * yet though...
625 */ 625 */
626 raw_spin_lock_irqsave(&irq_big_lock, flags); 626 raw_spin_lock_irqsave(&irq_big_lock, flags);
627 list_for_each_entry(h, &irq_hosts, link) 627 list_for_each_entry(h, &irq_hosts, link)
628 if (h->ops->match(h, node)) { 628 if (h->ops->match(h, node)) {
629 found = h; 629 found = h;
630 break; 630 break;
631 } 631 }
632 raw_spin_unlock_irqrestore(&irq_big_lock, flags); 632 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
633 return found; 633 return found;
634 } 634 }
635 EXPORT_SYMBOL_GPL(irq_find_host); 635 EXPORT_SYMBOL_GPL(irq_find_host);
636 636
637 void irq_set_default_host(struct irq_host *host) 637 void irq_set_default_host(struct irq_host *host)
638 { 638 {
639 pr_debug("irq: Default host set to @0x%p\n", host); 639 pr_debug("irq: Default host set to @0x%p\n", host);
640 640
641 irq_default_host = host; 641 irq_default_host = host;
642 } 642 }
643 643
644 void irq_set_virq_count(unsigned int count) 644 void irq_set_virq_count(unsigned int count)
645 { 645 {
646 pr_debug("irq: Trying to set virq count to %d\n", count); 646 pr_debug("irq: Trying to set virq count to %d\n", count);
647 647
648 BUG_ON(count < NUM_ISA_INTERRUPTS); 648 BUG_ON(count < NUM_ISA_INTERRUPTS);
649 if (count < NR_IRQS) 649 if (count < NR_IRQS)
650 irq_virq_count = count; 650 irq_virq_count = count;
651 } 651 }
652 652
653 static int irq_setup_virq(struct irq_host *host, unsigned int virq, 653 static int irq_setup_virq(struct irq_host *host, unsigned int virq,
654 irq_hw_number_t hwirq) 654 irq_hw_number_t hwirq)
655 { 655 {
656 int res; 656 int res;
657 657
658 res = irq_alloc_desc_at(virq, 0); 658 res = irq_alloc_desc_at(virq, 0);
659 if (res != virq) { 659 if (res != virq) {
660 pr_debug("irq: -> allocating desc failed\n"); 660 pr_debug("irq: -> allocating desc failed\n");
661 goto error; 661 goto error;
662 } 662 }
663 663
664 /* map it */ 664 /* map it */
665 smp_wmb(); 665 smp_wmb();
666 irq_map[virq].hwirq = hwirq; 666 irq_map[virq].hwirq = hwirq;
667 smp_mb(); 667 smp_mb();
668 668
669 if (host->ops->map(host, virq, hwirq)) { 669 if (host->ops->map(host, virq, hwirq)) {
670 pr_debug("irq: -> mapping failed, freeing\n"); 670 pr_debug("irq: -> mapping failed, freeing\n");
671 goto errdesc; 671 goto errdesc;
672 } 672 }
673 673
674 irq_clear_status_flags(virq, IRQ_NOREQUEST); 674 irq_clear_status_flags(virq, IRQ_NOREQUEST);
675 675
676 return 0; 676 return 0;
677 677
678 errdesc: 678 errdesc:
679 irq_free_descs(virq, 1); 679 irq_free_descs(virq, 1);
680 error: 680 error:
681 irq_free_virt(virq, 1); 681 irq_free_virt(virq, 1);
682 return -1; 682 return -1;
683 } 683 }
684 684
685 unsigned int irq_create_direct_mapping(struct irq_host *host) 685 unsigned int irq_create_direct_mapping(struct irq_host *host)
686 { 686 {
687 unsigned int virq; 687 unsigned int virq;
688 688
689 if (host == NULL) 689 if (host == NULL)
690 host = irq_default_host; 690 host = irq_default_host;
691 691
692 BUG_ON(host == NULL); 692 BUG_ON(host == NULL);
693 WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP); 693 WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP);
694 694
695 virq = irq_alloc_virt(host, 1, 0); 695 virq = irq_alloc_virt(host, 1, 0);
696 if (virq == NO_IRQ) { 696 if (virq == NO_IRQ) {
697 pr_debug("irq: create_direct virq allocation failed\n"); 697 pr_debug("irq: create_direct virq allocation failed\n");
698 return NO_IRQ; 698 return NO_IRQ;
699 } 699 }
700 700
701 pr_debug("irq: create_direct obtained virq %d\n", virq); 701 pr_debug("irq: create_direct obtained virq %d\n", virq);
702 702
703 if (irq_setup_virq(host, virq, virq)) 703 if (irq_setup_virq(host, virq, virq))
704 return NO_IRQ; 704 return NO_IRQ;
705 705
706 return virq; 706 return virq;
707 } 707 }
708 708
709 unsigned int irq_create_mapping(struct irq_host *host, 709 unsigned int irq_create_mapping(struct irq_host *host,
710 irq_hw_number_t hwirq) 710 irq_hw_number_t hwirq)
711 { 711 {
712 unsigned int virq, hint; 712 unsigned int virq, hint;
713 713
714 pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq); 714 pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq);
715 715
716 /* Look for default host if nececssary */ 716 /* Look for default host if nececssary */
717 if (host == NULL) 717 if (host == NULL)
718 host = irq_default_host; 718 host = irq_default_host;
719 if (host == NULL) { 719 if (host == NULL) {
720 printk(KERN_WARNING "irq_create_mapping called for" 720 printk(KERN_WARNING "irq_create_mapping called for"
721 " NULL host, hwirq=%lx\n", hwirq); 721 " NULL host, hwirq=%lx\n", hwirq);
722 WARN_ON(1); 722 WARN_ON(1);
723 return NO_IRQ; 723 return NO_IRQ;
724 } 724 }
725 pr_debug("irq: -> using host @%p\n", host); 725 pr_debug("irq: -> using host @%p\n", host);
726 726
727 /* Check if mapping already exists */ 727 /* Check if mapping already exists */
728 virq = irq_find_mapping(host, hwirq); 728 virq = irq_find_mapping(host, hwirq);
729 if (virq != NO_IRQ) { 729 if (virq != NO_IRQ) {
730 pr_debug("irq: -> existing mapping on virq %d\n", virq); 730 pr_debug("irq: -> existing mapping on virq %d\n", virq);
731 return virq; 731 return virq;
732 } 732 }
733 733
734 /* Get a virtual interrupt number */ 734 /* Get a virtual interrupt number */
735 if (host->revmap_type == IRQ_HOST_MAP_LEGACY) { 735 if (host->revmap_type == IRQ_HOST_MAP_LEGACY) {
736 /* Handle legacy */ 736 /* Handle legacy */
737 virq = (unsigned int)hwirq; 737 virq = (unsigned int)hwirq;
738 if (virq == 0 || virq >= NUM_ISA_INTERRUPTS) 738 if (virq == 0 || virq >= NUM_ISA_INTERRUPTS)
739 return NO_IRQ; 739 return NO_IRQ;
740 return virq; 740 return virq;
741 } else { 741 } else {
742 /* Allocate a virtual interrupt number */ 742 /* Allocate a virtual interrupt number */
743 hint = hwirq % irq_virq_count; 743 hint = hwirq % irq_virq_count;
744 virq = irq_alloc_virt(host, 1, hint); 744 virq = irq_alloc_virt(host, 1, hint);
745 if (virq == NO_IRQ) { 745 if (virq == NO_IRQ) {
746 pr_debug("irq: -> virq allocation failed\n"); 746 pr_debug("irq: -> virq allocation failed\n");
747 return NO_IRQ; 747 return NO_IRQ;
748 } 748 }
749 } 749 }
750 750
751 if (irq_setup_virq(host, virq, hwirq)) 751 if (irq_setup_virq(host, virq, hwirq))
752 return NO_IRQ; 752 return NO_IRQ;
753 753
754 pr_debug("irq: irq %lu on host %s mapped to virtual irq %u\n", 754 pr_debug("irq: irq %lu on host %s mapped to virtual irq %u\n",
755 hwirq, host->of_node ? host->of_node->full_name : "null", virq); 755 hwirq, host->of_node ? host->of_node->full_name : "null", virq);
756 756
757 return virq; 757 return virq;
758 } 758 }
759 EXPORT_SYMBOL_GPL(irq_create_mapping); 759 EXPORT_SYMBOL_GPL(irq_create_mapping);
760 760
761 unsigned int irq_create_of_mapping(struct device_node *controller, 761 unsigned int irq_create_of_mapping(struct device_node *controller,
762 const u32 *intspec, unsigned int intsize) 762 const u32 *intspec, unsigned int intsize)
763 { 763 {
764 struct irq_host *host; 764 struct irq_host *host;
765 irq_hw_number_t hwirq; 765 irq_hw_number_t hwirq;
766 unsigned int type = IRQ_TYPE_NONE; 766 unsigned int type = IRQ_TYPE_NONE;
767 unsigned int virq; 767 unsigned int virq;
768 768
769 if (controller == NULL) 769 if (controller == NULL)
770 host = irq_default_host; 770 host = irq_default_host;
771 else 771 else
772 host = irq_find_host(controller); 772 host = irq_find_host(controller);
773 if (host == NULL) { 773 if (host == NULL) {
774 printk(KERN_WARNING "irq: no irq host found for %s !\n", 774 printk(KERN_WARNING "irq: no irq host found for %s !\n",
775 controller->full_name); 775 controller->full_name);
776 return NO_IRQ; 776 return NO_IRQ;
777 } 777 }
778 778
779 /* If host has no translation, then we assume interrupt line */ 779 /* If host has no translation, then we assume interrupt line */
780 if (host->ops->xlate == NULL) 780 if (host->ops->xlate == NULL)
781 hwirq = intspec[0]; 781 hwirq = intspec[0];
782 else { 782 else {
783 if (host->ops->xlate(host, controller, intspec, intsize, 783 if (host->ops->xlate(host, controller, intspec, intsize,
784 &hwirq, &type)) 784 &hwirq, &type))
785 return NO_IRQ; 785 return NO_IRQ;
786 } 786 }
787 787
788 /* Create mapping */ 788 /* Create mapping */
789 virq = irq_create_mapping(host, hwirq); 789 virq = irq_create_mapping(host, hwirq);
790 if (virq == NO_IRQ) 790 if (virq == NO_IRQ)
791 return virq; 791 return virq;
792 792
793 /* Set type if specified and different than the current one */ 793 /* Set type if specified and different than the current one */
794 if (type != IRQ_TYPE_NONE && 794 if (type != IRQ_TYPE_NONE &&
795 type != (irqd_get_trigger_type(irq_get_irq_data(virq)))) 795 type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
796 irq_set_irq_type(virq, type); 796 irq_set_irq_type(virq, type);
797 return virq; 797 return virq;
798 } 798 }
799 EXPORT_SYMBOL_GPL(irq_create_of_mapping); 799 EXPORT_SYMBOL_GPL(irq_create_of_mapping);
800 800
801 void irq_dispose_mapping(unsigned int virq) 801 void irq_dispose_mapping(unsigned int virq)
802 { 802 {
803 struct irq_host *host; 803 struct irq_host *host;
804 irq_hw_number_t hwirq; 804 irq_hw_number_t hwirq;
805 805
806 if (virq == NO_IRQ) 806 if (virq == NO_IRQ)
807 return; 807 return;
808 808
809 host = irq_map[virq].host; 809 host = irq_map[virq].host;
810 if (WARN_ON(host == NULL)) 810 if (WARN_ON(host == NULL))
811 return; 811 return;
812 812
813 /* Never unmap legacy interrupts */ 813 /* Never unmap legacy interrupts */
814 if (host->revmap_type == IRQ_HOST_MAP_LEGACY) 814 if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
815 return; 815 return;
816 816
817 irq_set_status_flags(virq, IRQ_NOREQUEST); 817 irq_set_status_flags(virq, IRQ_NOREQUEST);
818 818
819 /* remove chip and handler */ 819 /* remove chip and handler */
820 irq_set_chip_and_handler(virq, NULL, NULL); 820 irq_set_chip_and_handler(virq, NULL, NULL);
821 821
822 /* Make sure it's completed */ 822 /* Make sure it's completed */
823 synchronize_irq(virq); 823 synchronize_irq(virq);
824 824
825 /* Tell the PIC about it */ 825 /* Tell the PIC about it */
826 if (host->ops->unmap) 826 if (host->ops->unmap)
827 host->ops->unmap(host, virq); 827 host->ops->unmap(host, virq);
828 smp_mb(); 828 smp_mb();
829 829
830 /* Clear reverse map */ 830 /* Clear reverse map */
831 hwirq = irq_map[virq].hwirq; 831 hwirq = irq_map[virq].hwirq;
832 switch(host->revmap_type) { 832 switch(host->revmap_type) {
833 case IRQ_HOST_MAP_LINEAR: 833 case IRQ_HOST_MAP_LINEAR:
834 if (hwirq < host->revmap_data.linear.size) 834 if (hwirq < host->revmap_data.linear.size)
835 host->revmap_data.linear.revmap[hwirq] = NO_IRQ; 835 host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
836 break; 836 break;
837 case IRQ_HOST_MAP_TREE: 837 case IRQ_HOST_MAP_TREE:
838 mutex_lock(&revmap_trees_mutex); 838 mutex_lock(&revmap_trees_mutex);
839 radix_tree_delete(&host->revmap_data.tree, hwirq); 839 radix_tree_delete(&host->revmap_data.tree, hwirq);
840 mutex_unlock(&revmap_trees_mutex); 840 mutex_unlock(&revmap_trees_mutex);
841 break; 841 break;
842 } 842 }
843 843
844 /* Destroy map */ 844 /* Destroy map */
845 smp_mb(); 845 smp_mb();
846 irq_map[virq].hwirq = host->inval_irq; 846 irq_map[virq].hwirq = host->inval_irq;
847 847
848 irq_free_descs(virq, 1); 848 irq_free_descs(virq, 1);
849 /* Free it */ 849 /* Free it */
850 irq_free_virt(virq, 1); 850 irq_free_virt(virq, 1);
851 } 851 }
852 EXPORT_SYMBOL_GPL(irq_dispose_mapping); 852 EXPORT_SYMBOL_GPL(irq_dispose_mapping);
853 853
854 unsigned int irq_find_mapping(struct irq_host *host, 854 unsigned int irq_find_mapping(struct irq_host *host,
855 irq_hw_number_t hwirq) 855 irq_hw_number_t hwirq)
856 { 856 {
857 unsigned int i; 857 unsigned int i;
858 unsigned int hint = hwirq % irq_virq_count; 858 unsigned int hint = hwirq % irq_virq_count;
859 859
860 /* Look for default host if nececssary */ 860 /* Look for default host if nececssary */
861 if (host == NULL) 861 if (host == NULL)
862 host = irq_default_host; 862 host = irq_default_host;
863 if (host == NULL) 863 if (host == NULL)
864 return NO_IRQ; 864 return NO_IRQ;
865 865
866 /* legacy -> bail early */ 866 /* legacy -> bail early */
867 if (host->revmap_type == IRQ_HOST_MAP_LEGACY) 867 if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
868 return hwirq; 868 return hwirq;
869 869
870 /* Slow path does a linear search of the map */ 870 /* Slow path does a linear search of the map */
871 if (hint < NUM_ISA_INTERRUPTS) 871 if (hint < NUM_ISA_INTERRUPTS)
872 hint = NUM_ISA_INTERRUPTS; 872 hint = NUM_ISA_INTERRUPTS;
873 i = hint; 873 i = hint;
874 do { 874 do {
875 if (irq_map[i].host == host && 875 if (irq_map[i].host == host &&
876 irq_map[i].hwirq == hwirq) 876 irq_map[i].hwirq == hwirq)
877 return i; 877 return i;
878 i++; 878 i++;
879 if (i >= irq_virq_count) 879 if (i >= irq_virq_count)
880 i = NUM_ISA_INTERRUPTS; 880 i = NUM_ISA_INTERRUPTS;
881 } while(i != hint); 881 } while(i != hint);
882 return NO_IRQ; 882 return NO_IRQ;
883 } 883 }
884 EXPORT_SYMBOL_GPL(irq_find_mapping); 884 EXPORT_SYMBOL_GPL(irq_find_mapping);
885 885
886 #ifdef CONFIG_SMP 886 #ifdef CONFIG_SMP
887 int irq_choose_cpu(const struct cpumask *mask) 887 int irq_choose_cpu(const struct cpumask *mask)
888 { 888 {
889 int cpuid; 889 int cpuid;
890 890
891 if (cpumask_equal(mask, cpu_all_mask)) { 891 if (cpumask_equal(mask, cpu_all_mask)) {
892 static int irq_rover; 892 static int irq_rover;
893 static DEFINE_RAW_SPINLOCK(irq_rover_lock); 893 static DEFINE_RAW_SPINLOCK(irq_rover_lock);
894 unsigned long flags; 894 unsigned long flags;
895 895
896 /* Round-robin distribution... */ 896 /* Round-robin distribution... */
897 do_round_robin: 897 do_round_robin:
898 raw_spin_lock_irqsave(&irq_rover_lock, flags); 898 raw_spin_lock_irqsave(&irq_rover_lock, flags);
899 899
900 irq_rover = cpumask_next(irq_rover, cpu_online_mask); 900 irq_rover = cpumask_next(irq_rover, cpu_online_mask);
901 if (irq_rover >= nr_cpu_ids) 901 if (irq_rover >= nr_cpu_ids)
902 irq_rover = cpumask_first(cpu_online_mask); 902 irq_rover = cpumask_first(cpu_online_mask);
903 903
904 cpuid = irq_rover; 904 cpuid = irq_rover;
905 905
906 raw_spin_unlock_irqrestore(&irq_rover_lock, flags); 906 raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
907 } else { 907 } else {
908 cpuid = cpumask_first_and(mask, cpu_online_mask); 908 cpuid = cpumask_first_and(mask, cpu_online_mask);
909 if (cpuid >= nr_cpu_ids) 909 if (cpuid >= nr_cpu_ids)
910 goto do_round_robin; 910 goto do_round_robin;
911 } 911 }
912 912
913 return get_hard_smp_processor_id(cpuid); 913 return get_hard_smp_processor_id(cpuid);
914 } 914 }
915 #else 915 #else
916 int irq_choose_cpu(const struct cpumask *mask) 916 int irq_choose_cpu(const struct cpumask *mask)
917 { 917 {
918 return hard_smp_processor_id(); 918 return hard_smp_processor_id();
919 } 919 }
920 #endif 920 #endif
921 921
922 unsigned int irq_radix_revmap_lookup(struct irq_host *host, 922 unsigned int irq_radix_revmap_lookup(struct irq_host *host,
923 irq_hw_number_t hwirq) 923 irq_hw_number_t hwirq)
924 { 924 {
925 struct irq_map_entry *ptr; 925 struct irq_map_entry *ptr;
926 unsigned int virq; 926 unsigned int virq;
927 927
928 if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_TREE)) 928 if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_TREE))
929 return irq_find_mapping(host, hwirq); 929 return irq_find_mapping(host, hwirq);
930 930
931 /* 931 /*
932 * The ptr returned references the static global irq_map. 932 * The ptr returned references the static global irq_map.
933 * but freeing an irq can delete nodes along the path to 933 * but freeing an irq can delete nodes along the path to
934 * do the lookup via call_rcu. 934 * do the lookup via call_rcu.
935 */ 935 */
936 rcu_read_lock(); 936 rcu_read_lock();
937 ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq); 937 ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
938 rcu_read_unlock(); 938 rcu_read_unlock();
939 939
940 /* 940 /*
941 * If found in radix tree, then fine. 941 * If found in radix tree, then fine.
942 * Else fallback to linear lookup - this should not happen in practice 942 * Else fallback to linear lookup - this should not happen in practice
943 * as it means that we failed to insert the node in the radix tree. 943 * as it means that we failed to insert the node in the radix tree.
944 */ 944 */
945 if (ptr) 945 if (ptr)
946 virq = ptr - irq_map; 946 virq = ptr - irq_map;
947 else 947 else
948 virq = irq_find_mapping(host, hwirq); 948 virq = irq_find_mapping(host, hwirq);
949 949
950 return virq; 950 return virq;
951 } 951 }
952 952
953 void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, 953 void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
954 irq_hw_number_t hwirq) 954 irq_hw_number_t hwirq)
955 { 955 {
956 if (WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE)) 956 if (WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE))
957 return; 957 return;
958 958
959 if (virq != NO_IRQ) { 959 if (virq != NO_IRQ) {
960 mutex_lock(&revmap_trees_mutex); 960 mutex_lock(&revmap_trees_mutex);
961 radix_tree_insert(&host->revmap_data.tree, hwirq, 961 radix_tree_insert(&host->revmap_data.tree, hwirq,
962 &irq_map[virq]); 962 &irq_map[virq]);
963 mutex_unlock(&revmap_trees_mutex); 963 mutex_unlock(&revmap_trees_mutex);
964 } 964 }
965 } 965 }
966 966
967 unsigned int irq_linear_revmap(struct irq_host *host, 967 unsigned int irq_linear_revmap(struct irq_host *host,
968 irq_hw_number_t hwirq) 968 irq_hw_number_t hwirq)
969 { 969 {
970 unsigned int *revmap; 970 unsigned int *revmap;
971 971
972 if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_LINEAR)) 972 if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_LINEAR))
973 return irq_find_mapping(host, hwirq); 973 return irq_find_mapping(host, hwirq);
974 974
975 /* Check revmap bounds */ 975 /* Check revmap bounds */
976 if (unlikely(hwirq >= host->revmap_data.linear.size)) 976 if (unlikely(hwirq >= host->revmap_data.linear.size))
977 return irq_find_mapping(host, hwirq); 977 return irq_find_mapping(host, hwirq);
978 978
979 /* Check if revmap was allocated */ 979 /* Check if revmap was allocated */
980 revmap = host->revmap_data.linear.revmap; 980 revmap = host->revmap_data.linear.revmap;
981 if (unlikely(revmap == NULL)) 981 if (unlikely(revmap == NULL))
982 return irq_find_mapping(host, hwirq); 982 return irq_find_mapping(host, hwirq);
983 983
984 /* Fill up revmap with slow path if no mapping found */ 984 /* Fill up revmap with slow path if no mapping found */
985 if (unlikely(revmap[hwirq] == NO_IRQ)) 985 if (unlikely(revmap[hwirq] == NO_IRQ))
986 revmap[hwirq] = irq_find_mapping(host, hwirq); 986 revmap[hwirq] = irq_find_mapping(host, hwirq);
987 987
988 return revmap[hwirq]; 988 return revmap[hwirq];
989 } 989 }
990 990
991 unsigned int irq_alloc_virt(struct irq_host *host, 991 unsigned int irq_alloc_virt(struct irq_host *host,
992 unsigned int count, 992 unsigned int count,
993 unsigned int hint) 993 unsigned int hint)
994 { 994 {
995 unsigned long flags; 995 unsigned long flags;
996 unsigned int i, j, found = NO_IRQ; 996 unsigned int i, j, found = NO_IRQ;
997 997
998 if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS)) 998 if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
999 return NO_IRQ; 999 return NO_IRQ;
1000 1000
1001 raw_spin_lock_irqsave(&irq_big_lock, flags); 1001 raw_spin_lock_irqsave(&irq_big_lock, flags);
1002 1002
1003 /* Use hint for 1 interrupt if any */ 1003 /* Use hint for 1 interrupt if any */
1004 if (count == 1 && hint >= NUM_ISA_INTERRUPTS && 1004 if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
1005 hint < irq_virq_count && irq_map[hint].host == NULL) { 1005 hint < irq_virq_count && irq_map[hint].host == NULL) {
1006 found = hint; 1006 found = hint;
1007 goto hint_found; 1007 goto hint_found;
1008 } 1008 }
1009 1009
1010 /* Look for count consecutive numbers in the allocatable 1010 /* Look for count consecutive numbers in the allocatable
1011 * (non-legacy) space 1011 * (non-legacy) space
1012 */ 1012 */
1013 for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) { 1013 for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) {
1014 if (irq_map[i].host != NULL) 1014 if (irq_map[i].host != NULL)
1015 j = 0; 1015 j = 0;
1016 else 1016 else
1017 j++; 1017 j++;
1018 1018
1019 if (j == count) { 1019 if (j == count) {
1020 found = i - count + 1; 1020 found = i - count + 1;
1021 break; 1021 break;
1022 } 1022 }
1023 } 1023 }
1024 if (found == NO_IRQ) { 1024 if (found == NO_IRQ) {
1025 raw_spin_unlock_irqrestore(&irq_big_lock, flags); 1025 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1026 return NO_IRQ; 1026 return NO_IRQ;
1027 } 1027 }
1028 hint_found: 1028 hint_found:
1029 for (i = found; i < (found + count); i++) { 1029 for (i = found; i < (found + count); i++) {
1030 irq_map[i].hwirq = host->inval_irq; 1030 irq_map[i].hwirq = host->inval_irq;
1031 smp_wmb(); 1031 smp_wmb();
1032 irq_map[i].host = host; 1032 irq_map[i].host = host;
1033 } 1033 }
1034 raw_spin_unlock_irqrestore(&irq_big_lock, flags); 1034 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1035 return found; 1035 return found;
1036 } 1036 }
1037 1037
1038 void irq_free_virt(unsigned int virq, unsigned int count) 1038 void irq_free_virt(unsigned int virq, unsigned int count)
1039 { 1039 {
1040 unsigned long flags; 1040 unsigned long flags;
1041 unsigned int i; 1041 unsigned int i;
1042 1042
1043 WARN_ON (virq < NUM_ISA_INTERRUPTS); 1043 WARN_ON (virq < NUM_ISA_INTERRUPTS);
1044 WARN_ON (count == 0 || (virq + count) > irq_virq_count); 1044 WARN_ON (count == 0 || (virq + count) > irq_virq_count);
1045 1045
1046 if (virq < NUM_ISA_INTERRUPTS) { 1046 if (virq < NUM_ISA_INTERRUPTS) {
1047 if (virq + count < NUM_ISA_INTERRUPTS) 1047 if (virq + count < NUM_ISA_INTERRUPTS)
1048 return; 1048 return;
1049 count =- NUM_ISA_INTERRUPTS - virq; 1049 count =- NUM_ISA_INTERRUPTS - virq;
1050 virq = NUM_ISA_INTERRUPTS; 1050 virq = NUM_ISA_INTERRUPTS;
1051 } 1051 }
1052 1052
1053 if (count > irq_virq_count || virq > irq_virq_count - count) { 1053 if (count > irq_virq_count || virq > irq_virq_count - count) {
1054 if (virq > irq_virq_count) 1054 if (virq > irq_virq_count)
1055 return; 1055 return;
1056 count = irq_virq_count - virq; 1056 count = irq_virq_count - virq;
1057 } 1057 }
1058 1058
1059 raw_spin_lock_irqsave(&irq_big_lock, flags); 1059 raw_spin_lock_irqsave(&irq_big_lock, flags);
1060 for (i = virq; i < (virq + count); i++) { 1060 for (i = virq; i < (virq + count); i++) {
1061 struct irq_host *host; 1061 struct irq_host *host;
1062 1062
1063 host = irq_map[i].host; 1063 host = irq_map[i].host;
1064 irq_map[i].hwirq = host->inval_irq; 1064 irq_map[i].hwirq = host->inval_irq;
1065 smp_wmb(); 1065 smp_wmb();
1066 irq_map[i].host = NULL; 1066 irq_map[i].host = NULL;
1067 } 1067 }
1068 raw_spin_unlock_irqrestore(&irq_big_lock, flags); 1068 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1069 } 1069 }
1070 1070
1071 int arch_early_irq_init(void) 1071 int arch_early_irq_init(void)
1072 { 1072 {
1073 return 0; 1073 return 0;
1074 } 1074 }
1075 1075
1076 #ifdef CONFIG_VIRQ_DEBUG 1076 #ifdef CONFIG_VIRQ_DEBUG
1077 static int virq_debug_show(struct seq_file *m, void *private) 1077 static int virq_debug_show(struct seq_file *m, void *private)
1078 { 1078 {
1079 unsigned long flags; 1079 unsigned long flags;
1080 struct irq_desc *desc; 1080 struct irq_desc *desc;
1081 const char *p; 1081 const char *p;
1082 static const char none[] = "none"; 1082 static const char none[] = "none";
1083 void *data; 1083 void *data;
1084 int i; 1084 int i;
1085 1085
1086 seq_printf(m, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq", 1086 seq_printf(m, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq",
1087 "chip name", "chip data", "host name"); 1087 "chip name", "chip data", "host name");
1088 1088
1089 for (i = 1; i < nr_irqs; i++) { 1089 for (i = 1; i < nr_irqs; i++) {
1090 desc = irq_to_desc(i); 1090 desc = irq_to_desc(i);
1091 if (!desc) 1091 if (!desc)
1092 continue; 1092 continue;
1093 1093
1094 raw_spin_lock_irqsave(&desc->lock, flags); 1094 raw_spin_lock_irqsave(&desc->lock, flags);
1095 1095
1096 if (desc->action && desc->action->handler) { 1096 if (desc->action && desc->action->handler) {
1097 struct irq_chip *chip; 1097 struct irq_chip *chip;
1098 1098
1099 seq_printf(m, "%5d ", i); 1099 seq_printf(m, "%5d ", i);
1100 seq_printf(m, "0x%05lx ", irq_map[i].hwirq); 1100 seq_printf(m, "0x%05lx ", irq_map[i].hwirq);
1101 1101
1102 chip = irq_desc_get_chip(desc); 1102 chip = irq_desc_get_chip(desc);
1103 if (chip && chip->name) 1103 if (chip && chip->name)
1104 p = chip->name; 1104 p = chip->name;
1105 else 1105 else
1106 p = none; 1106 p = none;
1107 seq_printf(m, "%-15s ", p); 1107 seq_printf(m, "%-15s ", p);
1108 1108
1109 data = irq_desc_get_chip_data(desc); 1109 data = irq_desc_get_chip_data(desc);
1110 seq_printf(m, "0x%16p ", data); 1110 seq_printf(m, "0x%16p ", data);
1111 1111
1112 if (irq_map[i].host && irq_map[i].host->of_node) 1112 if (irq_map[i].host && irq_map[i].host->of_node)
1113 p = irq_map[i].host->of_node->full_name; 1113 p = irq_map[i].host->of_node->full_name;
1114 else 1114 else
1115 p = none; 1115 p = none;
1116 seq_printf(m, "%s\n", p); 1116 seq_printf(m, "%s\n", p);
1117 } 1117 }
1118 1118
1119 raw_spin_unlock_irqrestore(&desc->lock, flags); 1119 raw_spin_unlock_irqrestore(&desc->lock, flags);
1120 } 1120 }
1121 1121
1122 return 0; 1122 return 0;
1123 } 1123 }
1124 1124
1125 static int virq_debug_open(struct inode *inode, struct file *file) 1125 static int virq_debug_open(struct inode *inode, struct file *file)
1126 { 1126 {
1127 return single_open(file, virq_debug_show, inode->i_private); 1127 return single_open(file, virq_debug_show, inode->i_private);
1128 } 1128 }
1129 1129
1130 static const struct file_operations virq_debug_fops = { 1130 static const struct file_operations virq_debug_fops = {
1131 .open = virq_debug_open, 1131 .open = virq_debug_open,
1132 .read = seq_read, 1132 .read = seq_read,
1133 .llseek = seq_lseek, 1133 .llseek = seq_lseek,
1134 .release = single_release, 1134 .release = single_release,
1135 }; 1135 };
1136 1136
1137 static int __init irq_debugfs_init(void) 1137 static int __init irq_debugfs_init(void)
1138 { 1138 {
1139 if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root, 1139 if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root,
1140 NULL, &virq_debug_fops) == NULL) 1140 NULL, &virq_debug_fops) == NULL)
1141 return -ENOMEM; 1141 return -ENOMEM;
1142 1142
1143 return 0; 1143 return 0;
1144 } 1144 }
1145 __initcall(irq_debugfs_init); 1145 __initcall(irq_debugfs_init);
1146 #endif /* CONFIG_VIRQ_DEBUG */ 1146 #endif /* CONFIG_VIRQ_DEBUG */
1147 1147
1148 #ifdef CONFIG_PPC64 1148 #ifdef CONFIG_PPC64
1149 static int __init setup_noirqdistrib(char *str) 1149 static int __init setup_noirqdistrib(char *str)
1150 { 1150 {
1151 distribute_irqs = 0; 1151 distribute_irqs = 0;
1152 return 1; 1152 return 1;
1153 } 1153 }
1154 1154
1155 __setup("noirqdistrib", setup_noirqdistrib); 1155 __setup("noirqdistrib", setup_noirqdistrib);
1156 #endif /* CONFIG_PPC64 */ 1156 #endif /* CONFIG_PPC64 */
1157 1157
arch/powerpc/kernel/of_platform.c
1 /* 1 /*
2 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp. 2 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp.
3 * <benh@kernel.crashing.org> 3 * <benh@kernel.crashing.org>
4 * and Arnd Bergmann, IBM Corp. 4 * and Arnd Bergmann, IBM Corp.
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 * 10 *
11 */ 11 */
12 12
13 #undef DEBUG 13 #undef DEBUG
14 14
15 #include <linux/string.h> 15 #include <linux/string.h>
16 #include <linux/kernel.h> 16 #include <linux/kernel.h>
17 #include <linux/init.h> 17 #include <linux/init.h>
18 #include <linux/module.h> 18 #include <linux/export.h>
19 #include <linux/mod_devicetable.h> 19 #include <linux/mod_devicetable.h>
20 #include <linux/pci.h> 20 #include <linux/pci.h>
21 #include <linux/of.h> 21 #include <linux/of.h>
22 #include <linux/of_device.h> 22 #include <linux/of_device.h>
23 #include <linux/of_platform.h> 23 #include <linux/of_platform.h>
24 24
25 #include <asm/errno.h> 25 #include <asm/errno.h>
26 #include <asm/topology.h> 26 #include <asm/topology.h>
27 #include <asm/pci-bridge.h> 27 #include <asm/pci-bridge.h>
28 #include <asm/ppc-pci.h> 28 #include <asm/ppc-pci.h>
29 #include <linux/atomic.h> 29 #include <linux/atomic.h>
30 30
31 #ifdef CONFIG_PPC_OF_PLATFORM_PCI 31 #ifdef CONFIG_PPC_OF_PLATFORM_PCI
32 32
33 /* The probing of PCI controllers from of_platform is currently 33 /* The probing of PCI controllers from of_platform is currently
34 * 64 bits only, mostly due to gratuitous differences between 34 * 64 bits only, mostly due to gratuitous differences between
35 * the 32 and 64 bits PCI code on PowerPC and the 32 bits one 35 * the 32 and 64 bits PCI code on PowerPC and the 32 bits one
36 * lacking some bits needed here. 36 * lacking some bits needed here.
37 */ 37 */
38 38
39 static int __devinit of_pci_phb_probe(struct platform_device *dev) 39 static int __devinit of_pci_phb_probe(struct platform_device *dev)
40 { 40 {
41 struct pci_controller *phb; 41 struct pci_controller *phb;
42 42
43 /* Check if we can do that ... */ 43 /* Check if we can do that ... */
44 if (ppc_md.pci_setup_phb == NULL) 44 if (ppc_md.pci_setup_phb == NULL)
45 return -ENODEV; 45 return -ENODEV;
46 46
47 pr_info("Setting up PCI bus %s\n", dev->dev.of_node->full_name); 47 pr_info("Setting up PCI bus %s\n", dev->dev.of_node->full_name);
48 48
49 /* Alloc and setup PHB data structure */ 49 /* Alloc and setup PHB data structure */
50 phb = pcibios_alloc_controller(dev->dev.of_node); 50 phb = pcibios_alloc_controller(dev->dev.of_node);
51 if (!phb) 51 if (!phb)
52 return -ENODEV; 52 return -ENODEV;
53 53
54 /* Setup parent in sysfs */ 54 /* Setup parent in sysfs */
55 phb->parent = &dev->dev; 55 phb->parent = &dev->dev;
56 56
57 /* Setup the PHB using arch provided callback */ 57 /* Setup the PHB using arch provided callback */
58 if (ppc_md.pci_setup_phb(phb)) { 58 if (ppc_md.pci_setup_phb(phb)) {
59 pcibios_free_controller(phb); 59 pcibios_free_controller(phb);
60 return -ENODEV; 60 return -ENODEV;
61 } 61 }
62 62
63 /* Process "ranges" property */ 63 /* Process "ranges" property */
64 pci_process_bridge_OF_ranges(phb, dev->dev.of_node, 0); 64 pci_process_bridge_OF_ranges(phb, dev->dev.of_node, 0);
65 65
66 /* Init pci_dn data structures */ 66 /* Init pci_dn data structures */
67 pci_devs_phb_init_dynamic(phb); 67 pci_devs_phb_init_dynamic(phb);
68 68
69 /* Register devices with EEH */ 69 /* Register devices with EEH */
70 #ifdef CONFIG_EEH 70 #ifdef CONFIG_EEH
71 if (dev->dev.of_node->child) 71 if (dev->dev.of_node->child)
72 eeh_add_device_tree_early(dev->dev.of_node); 72 eeh_add_device_tree_early(dev->dev.of_node);
73 #endif /* CONFIG_EEH */ 73 #endif /* CONFIG_EEH */
74 74
75 /* Scan the bus */ 75 /* Scan the bus */
76 pcibios_scan_phb(phb); 76 pcibios_scan_phb(phb);
77 if (phb->bus == NULL) 77 if (phb->bus == NULL)
78 return -ENXIO; 78 return -ENXIO;
79 79
80 /* Claim resources. This might need some rework as well depending 80 /* Claim resources. This might need some rework as well depending
81 * wether we are doing probe-only or not, like assigning unassigned 81 * wether we are doing probe-only or not, like assigning unassigned
82 * resources etc... 82 * resources etc...
83 */ 83 */
84 pcibios_claim_one_bus(phb->bus); 84 pcibios_claim_one_bus(phb->bus);
85 85
86 /* Finish EEH setup */ 86 /* Finish EEH setup */
87 #ifdef CONFIG_EEH 87 #ifdef CONFIG_EEH
88 eeh_add_device_tree_late(phb->bus); 88 eeh_add_device_tree_late(phb->bus);
89 #endif 89 #endif
90 90
91 /* Add probed PCI devices to the device model */ 91 /* Add probed PCI devices to the device model */
92 pci_bus_add_devices(phb->bus); 92 pci_bus_add_devices(phb->bus);
93 93
94 return 0; 94 return 0;
95 } 95 }
96 96
97 static struct of_device_id of_pci_phb_ids[] = { 97 static struct of_device_id of_pci_phb_ids[] = {
98 { .type = "pci", }, 98 { .type = "pci", },
99 { .type = "pcix", }, 99 { .type = "pcix", },
100 { .type = "pcie", }, 100 { .type = "pcie", },
101 { .type = "pciex", }, 101 { .type = "pciex", },
102 { .type = "ht", }, 102 { .type = "ht", },
103 {} 103 {}
104 }; 104 };
105 105
106 static struct platform_driver of_pci_phb_driver = { 106 static struct platform_driver of_pci_phb_driver = {
107 .probe = of_pci_phb_probe, 107 .probe = of_pci_phb_probe,
108 .driver = { 108 .driver = {
109 .name = "of-pci", 109 .name = "of-pci",
110 .owner = THIS_MODULE, 110 .owner = THIS_MODULE,
111 .of_match_table = of_pci_phb_ids, 111 .of_match_table = of_pci_phb_ids,
112 }, 112 },
113 }; 113 };
114 114
115 static __init int of_pci_phb_init(void) 115 static __init int of_pci_phb_init(void)
116 { 116 {
117 return platform_driver_register(&of_pci_phb_driver); 117 return platform_driver_register(&of_pci_phb_driver);
118 } 118 }
119 119
120 device_initcall(of_pci_phb_init); 120 device_initcall(of_pci_phb_init);
121 121
122 #endif /* CONFIG_PPC_OF_PLATFORM_PCI */ 122 #endif /* CONFIG_PPC_OF_PLATFORM_PCI */
123 123
arch/powerpc/kernel/paca.c
1 /* 1 /*
2 * c 2001 PPC 64 Team, IBM Corp 2 * c 2001 PPC 64 Team, IBM Corp
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License 5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version. 7 * 2 of the License, or (at your option) any later version.
8 */ 8 */
9 9
10 #include <linux/smp.h> 10 #include <linux/smp.h>
11 #include <linux/module.h> 11 #include <linux/export.h>
12 #include <linux/memblock.h> 12 #include <linux/memblock.h>
13 13
14 #include <asm/firmware.h> 14 #include <asm/firmware.h>
15 #include <asm/lppaca.h> 15 #include <asm/lppaca.h>
16 #include <asm/paca.h> 16 #include <asm/paca.h>
17 #include <asm/sections.h> 17 #include <asm/sections.h>
18 #include <asm/pgtable.h> 18 #include <asm/pgtable.h>
19 #include <asm/iseries/lpar_map.h> 19 #include <asm/iseries/lpar_map.h>
20 #include <asm/iseries/hv_types.h> 20 #include <asm/iseries/hv_types.h>
21 #include <asm/kexec.h> 21 #include <asm/kexec.h>
22 22
23 /* This symbol is provided by the linker - let it fill in the paca 23 /* This symbol is provided by the linker - let it fill in the paca
24 * field correctly */ 24 * field correctly */
25 extern unsigned long __toc_start; 25 extern unsigned long __toc_start;
26 26
27 #ifdef CONFIG_PPC_BOOK3S 27 #ifdef CONFIG_PPC_BOOK3S
28 28
29 /* 29 /*
30 * The structure which the hypervisor knows about - this structure 30 * The structure which the hypervisor knows about - this structure
31 * should not cross a page boundary. The vpa_init/register_vpa call 31 * should not cross a page boundary. The vpa_init/register_vpa call
32 * is now known to fail if the lppaca structure crosses a page 32 * is now known to fail if the lppaca structure crosses a page
33 * boundary. The lppaca is also used on legacy iSeries and POWER5 33 * boundary. The lppaca is also used on legacy iSeries and POWER5
34 * pSeries boxes. The lppaca is 640 bytes long, and cannot readily 34 * pSeries boxes. The lppaca is 640 bytes long, and cannot readily
35 * change since the hypervisor knows its layout, so a 1kB alignment 35 * change since the hypervisor knows its layout, so a 1kB alignment
36 * will suffice to ensure that it doesn't cross a page boundary. 36 * will suffice to ensure that it doesn't cross a page boundary.
37 */ 37 */
38 struct lppaca lppaca[] = { 38 struct lppaca lppaca[] = {
39 [0 ... (NR_LPPACAS-1)] = { 39 [0 ... (NR_LPPACAS-1)] = {
40 .desc = 0xd397d781, /* "LpPa" */ 40 .desc = 0xd397d781, /* "LpPa" */
41 .size = sizeof(struct lppaca), 41 .size = sizeof(struct lppaca),
42 .dyn_proc_status = 2, 42 .dyn_proc_status = 2,
43 .decr_val = 0x00ff0000, 43 .decr_val = 0x00ff0000,
44 .fpregs_in_use = 1, 44 .fpregs_in_use = 1,
45 .end_of_quantum = 0xfffffffffffffffful, 45 .end_of_quantum = 0xfffffffffffffffful,
46 .slb_count = 64, 46 .slb_count = 64,
47 .vmxregs_in_use = 0, 47 .vmxregs_in_use = 0,
48 .page_ins = 0, 48 .page_ins = 0,
49 }, 49 },
50 }; 50 };
51 51
52 static struct lppaca *extra_lppacas; 52 static struct lppaca *extra_lppacas;
53 static long __initdata lppaca_size; 53 static long __initdata lppaca_size;
54 54
55 static void allocate_lppacas(int nr_cpus, unsigned long limit) 55 static void allocate_lppacas(int nr_cpus, unsigned long limit)
56 { 56 {
57 if (nr_cpus <= NR_LPPACAS) 57 if (nr_cpus <= NR_LPPACAS)
58 return; 58 return;
59 59
60 lppaca_size = PAGE_ALIGN(sizeof(struct lppaca) * 60 lppaca_size = PAGE_ALIGN(sizeof(struct lppaca) *
61 (nr_cpus - NR_LPPACAS)); 61 (nr_cpus - NR_LPPACAS));
62 extra_lppacas = __va(memblock_alloc_base(lppaca_size, 62 extra_lppacas = __va(memblock_alloc_base(lppaca_size,
63 PAGE_SIZE, limit)); 63 PAGE_SIZE, limit));
64 } 64 }
65 65
66 static struct lppaca *new_lppaca(int cpu) 66 static struct lppaca *new_lppaca(int cpu)
67 { 67 {
68 struct lppaca *lp; 68 struct lppaca *lp;
69 69
70 if (cpu < NR_LPPACAS) 70 if (cpu < NR_LPPACAS)
71 return &lppaca[cpu]; 71 return &lppaca[cpu];
72 72
73 lp = extra_lppacas + (cpu - NR_LPPACAS); 73 lp = extra_lppacas + (cpu - NR_LPPACAS);
74 *lp = lppaca[0]; 74 *lp = lppaca[0];
75 75
76 return lp; 76 return lp;
77 } 77 }
78 78
79 static void free_lppacas(void) 79 static void free_lppacas(void)
80 { 80 {
81 long new_size = 0, nr; 81 long new_size = 0, nr;
82 82
83 if (!lppaca_size) 83 if (!lppaca_size)
84 return; 84 return;
85 nr = num_possible_cpus() - NR_LPPACAS; 85 nr = num_possible_cpus() - NR_LPPACAS;
86 if (nr > 0) 86 if (nr > 0)
87 new_size = PAGE_ALIGN(nr * sizeof(struct lppaca)); 87 new_size = PAGE_ALIGN(nr * sizeof(struct lppaca));
88 if (new_size >= lppaca_size) 88 if (new_size >= lppaca_size)
89 return; 89 return;
90 90
91 memblock_free(__pa(extra_lppacas) + new_size, lppaca_size - new_size); 91 memblock_free(__pa(extra_lppacas) + new_size, lppaca_size - new_size);
92 lppaca_size = new_size; 92 lppaca_size = new_size;
93 } 93 }
94 94
95 #else 95 #else
96 96
97 static inline void allocate_lppacas(int nr_cpus, unsigned long limit) { } 97 static inline void allocate_lppacas(int nr_cpus, unsigned long limit) { }
98 static inline void free_lppacas(void) { } 98 static inline void free_lppacas(void) { }
99 99
100 #endif /* CONFIG_PPC_BOOK3S */ 100 #endif /* CONFIG_PPC_BOOK3S */
101 101
102 #ifdef CONFIG_PPC_STD_MMU_64 102 #ifdef CONFIG_PPC_STD_MMU_64
103 103
104 /* 104 /*
105 * 3 persistent SLBs are registered here. The buffer will be zero 105 * 3 persistent SLBs are registered here. The buffer will be zero
106 * initially, hence will all be invaild until we actually write them. 106 * initially, hence will all be invaild until we actually write them.
107 */ 107 */
108 struct slb_shadow slb_shadow[] __cacheline_aligned = { 108 struct slb_shadow slb_shadow[] __cacheline_aligned = {
109 [0 ... (NR_CPUS-1)] = { 109 [0 ... (NR_CPUS-1)] = {
110 .persistent = SLB_NUM_BOLTED, 110 .persistent = SLB_NUM_BOLTED,
111 .buffer_length = sizeof(struct slb_shadow), 111 .buffer_length = sizeof(struct slb_shadow),
112 }, 112 },
113 }; 113 };
114 114
115 #endif /* CONFIG_PPC_STD_MMU_64 */ 115 #endif /* CONFIG_PPC_STD_MMU_64 */
116 116
117 /* The Paca is an array with one entry per processor. Each contains an 117 /* The Paca is an array with one entry per processor. Each contains an
118 * lppaca, which contains the information shared between the 118 * lppaca, which contains the information shared between the
119 * hypervisor and Linux. 119 * hypervisor and Linux.
120 * On systems with hardware multi-threading, there are two threads 120 * On systems with hardware multi-threading, there are two threads
121 * per processor. The Paca array must contain an entry for each thread. 121 * per processor. The Paca array must contain an entry for each thread.
122 * The VPD Areas will give a max logical processors = 2 * max physical 122 * The VPD Areas will give a max logical processors = 2 * max physical
123 * processors. The processor VPD array needs one entry per physical 123 * processors. The processor VPD array needs one entry per physical
124 * processor (not thread). 124 * processor (not thread).
125 */ 125 */
126 struct paca_struct *paca; 126 struct paca_struct *paca;
127 EXPORT_SYMBOL(paca); 127 EXPORT_SYMBOL(paca);
128 128
129 struct paca_struct boot_paca; 129 struct paca_struct boot_paca;
130 130
131 void __init initialise_paca(struct paca_struct *new_paca, int cpu) 131 void __init initialise_paca(struct paca_struct *new_paca, int cpu)
132 { 132 {
133 /* The TOC register (GPR2) points 32kB into the TOC, so that 64kB 133 /* The TOC register (GPR2) points 32kB into the TOC, so that 64kB
134 * of the TOC can be addressed using a single machine instruction. 134 * of the TOC can be addressed using a single machine instruction.
135 */ 135 */
136 unsigned long kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL; 136 unsigned long kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL;
137 137
138 #ifdef CONFIG_PPC_BOOK3S 138 #ifdef CONFIG_PPC_BOOK3S
139 new_paca->lppaca_ptr = new_lppaca(cpu); 139 new_paca->lppaca_ptr = new_lppaca(cpu);
140 #else 140 #else
141 new_paca->kernel_pgd = swapper_pg_dir; 141 new_paca->kernel_pgd = swapper_pg_dir;
142 #endif 142 #endif
143 new_paca->lock_token = 0x8000; 143 new_paca->lock_token = 0x8000;
144 new_paca->paca_index = cpu; 144 new_paca->paca_index = cpu;
145 new_paca->kernel_toc = kernel_toc; 145 new_paca->kernel_toc = kernel_toc;
146 new_paca->kernelbase = (unsigned long) _stext; 146 new_paca->kernelbase = (unsigned long) _stext;
147 new_paca->kernel_msr = MSR_KERNEL; 147 new_paca->kernel_msr = MSR_KERNEL;
148 new_paca->hw_cpu_id = 0xffff; 148 new_paca->hw_cpu_id = 0xffff;
149 new_paca->kexec_state = KEXEC_STATE_NONE; 149 new_paca->kexec_state = KEXEC_STATE_NONE;
150 new_paca->__current = &init_task; 150 new_paca->__current = &init_task;
151 #ifdef CONFIG_PPC_STD_MMU_64 151 #ifdef CONFIG_PPC_STD_MMU_64
152 new_paca->slb_shadow_ptr = &slb_shadow[cpu]; 152 new_paca->slb_shadow_ptr = &slb_shadow[cpu];
153 #endif /* CONFIG_PPC_STD_MMU_64 */ 153 #endif /* CONFIG_PPC_STD_MMU_64 */
154 } 154 }
155 155
156 /* Put the paca pointer into r13 and SPRG_PACA */ 156 /* Put the paca pointer into r13 and SPRG_PACA */
157 void setup_paca(struct paca_struct *new_paca) 157 void setup_paca(struct paca_struct *new_paca)
158 { 158 {
159 /* Setup r13 */ 159 /* Setup r13 */
160 local_paca = new_paca; 160 local_paca = new_paca;
161 161
162 #ifdef CONFIG_PPC_BOOK3E 162 #ifdef CONFIG_PPC_BOOK3E
163 /* On Book3E, initialize the TLB miss exception frames */ 163 /* On Book3E, initialize the TLB miss exception frames */
164 mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb); 164 mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb);
165 #else 165 #else
166 /* In HV mode, we setup both HPACA and PACA to avoid problems 166 /* In HV mode, we setup both HPACA and PACA to avoid problems
167 * if we do a GET_PACA() before the feature fixups have been 167 * if we do a GET_PACA() before the feature fixups have been
168 * applied 168 * applied
169 */ 169 */
170 if (cpu_has_feature(CPU_FTR_HVMODE)) 170 if (cpu_has_feature(CPU_FTR_HVMODE))
171 mtspr(SPRN_SPRG_HPACA, local_paca); 171 mtspr(SPRN_SPRG_HPACA, local_paca);
172 #endif 172 #endif
173 mtspr(SPRN_SPRG_PACA, local_paca); 173 mtspr(SPRN_SPRG_PACA, local_paca);
174 174
175 } 175 }
176 176
177 static int __initdata paca_size; 177 static int __initdata paca_size;
178 178
179 void __init allocate_pacas(void) 179 void __init allocate_pacas(void)
180 { 180 {
181 int cpu, limit; 181 int cpu, limit;
182 182
183 /* 183 /*
184 * We can't take SLB misses on the paca, and we want to access them 184 * We can't take SLB misses on the paca, and we want to access them
185 * in real mode, so allocate them within the RMA and also within 185 * in real mode, so allocate them within the RMA and also within
186 * the first segment. On iSeries they must be within the area mapped 186 * the first segment. On iSeries they must be within the area mapped
187 * by the HV, which is HvPagesToMap * HVPAGESIZE bytes. 187 * by the HV, which is HvPagesToMap * HVPAGESIZE bytes.
188 */ 188 */
189 limit = min(0x10000000ULL, ppc64_rma_size); 189 limit = min(0x10000000ULL, ppc64_rma_size);
190 if (firmware_has_feature(FW_FEATURE_ISERIES)) 190 if (firmware_has_feature(FW_FEATURE_ISERIES))
191 limit = min(limit, HvPagesToMap * HVPAGESIZE); 191 limit = min(limit, HvPagesToMap * HVPAGESIZE);
192 192
193 paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids); 193 paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids);
194 194
195 paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit)); 195 paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit));
196 memset(paca, 0, paca_size); 196 memset(paca, 0, paca_size);
197 197
198 printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n", 198 printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n",
199 paca_size, nr_cpu_ids, paca); 199 paca_size, nr_cpu_ids, paca);
200 200
201 allocate_lppacas(nr_cpu_ids, limit); 201 allocate_lppacas(nr_cpu_ids, limit);
202 202
203 /* Can't use for_each_*_cpu, as they aren't functional yet */ 203 /* Can't use for_each_*_cpu, as they aren't functional yet */
204 for (cpu = 0; cpu < nr_cpu_ids; cpu++) 204 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
205 initialise_paca(&paca[cpu], cpu); 205 initialise_paca(&paca[cpu], cpu);
206 } 206 }
207 207
208 void __init free_unused_pacas(void) 208 void __init free_unused_pacas(void)
209 { 209 {
210 int new_size; 210 int new_size;
211 211
212 new_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids); 212 new_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids);
213 213
214 if (new_size >= paca_size) 214 if (new_size >= paca_size)
215 return; 215 return;
216 216
217 memblock_free(__pa(paca) + new_size, paca_size - new_size); 217 memblock_free(__pa(paca) + new_size, paca_size - new_size);
218 218
219 printk(KERN_DEBUG "Freed %u bytes for unused pacas\n", 219 printk(KERN_DEBUG "Freed %u bytes for unused pacas\n",
220 paca_size - new_size); 220 paca_size - new_size);
221 221
222 paca_size = new_size; 222 paca_size = new_size;
223 223
224 free_lppacas(); 224 free_lppacas();
225 } 225 }
226 226
arch/powerpc/kernel/pmc.c
1 /* 1 /*
2 * arch/powerpc/kernel/pmc.c 2 * arch/powerpc/kernel/pmc.c
3 * 3 *
4 * Copyright (C) 2004 David Gibson, IBM Corporation. 4 * Copyright (C) 2004 David Gibson, IBM Corporation.
5 * Includes code formerly from arch/ppc/kernel/perfmon.c: 5 * Includes code formerly from arch/ppc/kernel/perfmon.c:
6 * Author: Andy Fleming 6 * Author: Andy Fleming
7 * Copyright (c) 2004 Freescale Semiconductor, Inc 7 * Copyright (c) 2004 Freescale Semiconductor, Inc
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version. 12 * 2 of the License, or (at your option) any later version.
13 */ 13 */
14 14
15 #include <linux/errno.h> 15 #include <linux/errno.h>
16 #include <linux/spinlock.h> 16 #include <linux/spinlock.h>
17 #include <linux/module.h> 17 #include <linux/export.h>
18 18
19 #include <asm/processor.h> 19 #include <asm/processor.h>
20 #include <asm/cputable.h> 20 #include <asm/cputable.h>
21 #include <asm/pmc.h> 21 #include <asm/pmc.h>
22 22
23 #ifndef MMCR0_PMAO 23 #ifndef MMCR0_PMAO
24 #define MMCR0_PMAO 0 24 #define MMCR0_PMAO 0
25 #endif 25 #endif
26 26
27 static void dummy_perf(struct pt_regs *regs) 27 static void dummy_perf(struct pt_regs *regs)
28 { 28 {
29 #if defined(CONFIG_FSL_EMB_PERFMON) 29 #if defined(CONFIG_FSL_EMB_PERFMON)
30 mtpmr(PMRN_PMGC0, mfpmr(PMRN_PMGC0) & ~PMGC0_PMIE); 30 mtpmr(PMRN_PMGC0, mfpmr(PMRN_PMGC0) & ~PMGC0_PMIE);
31 #elif defined(CONFIG_PPC64) || defined(CONFIG_6xx) 31 #elif defined(CONFIG_PPC64) || defined(CONFIG_6xx)
32 if (cur_cpu_spec->pmc_type == PPC_PMC_IBM) 32 if (cur_cpu_spec->pmc_type == PPC_PMC_IBM)
33 mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~(MMCR0_PMXE|MMCR0_PMAO)); 33 mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~(MMCR0_PMXE|MMCR0_PMAO));
34 #else 34 #else
35 mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~MMCR0_PMXE); 35 mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~MMCR0_PMXE);
36 #endif 36 #endif
37 } 37 }
38 38
39 39
40 static DEFINE_RAW_SPINLOCK(pmc_owner_lock); 40 static DEFINE_RAW_SPINLOCK(pmc_owner_lock);
41 static void *pmc_owner_caller; /* mostly for debugging */ 41 static void *pmc_owner_caller; /* mostly for debugging */
42 perf_irq_t perf_irq = dummy_perf; 42 perf_irq_t perf_irq = dummy_perf;
43 43
44 int reserve_pmc_hardware(perf_irq_t new_perf_irq) 44 int reserve_pmc_hardware(perf_irq_t new_perf_irq)
45 { 45 {
46 int err = 0; 46 int err = 0;
47 47
48 raw_spin_lock(&pmc_owner_lock); 48 raw_spin_lock(&pmc_owner_lock);
49 49
50 if (pmc_owner_caller) { 50 if (pmc_owner_caller) {
51 printk(KERN_WARNING "reserve_pmc_hardware: " 51 printk(KERN_WARNING "reserve_pmc_hardware: "
52 "PMC hardware busy (reserved by caller %p)\n", 52 "PMC hardware busy (reserved by caller %p)\n",
53 pmc_owner_caller); 53 pmc_owner_caller);
54 err = -EBUSY; 54 err = -EBUSY;
55 goto out; 55 goto out;
56 } 56 }
57 57
58 pmc_owner_caller = __builtin_return_address(0); 58 pmc_owner_caller = __builtin_return_address(0);
59 perf_irq = new_perf_irq ? new_perf_irq : dummy_perf; 59 perf_irq = new_perf_irq ? new_perf_irq : dummy_perf;
60 60
61 out: 61 out:
62 raw_spin_unlock(&pmc_owner_lock); 62 raw_spin_unlock(&pmc_owner_lock);
63 return err; 63 return err;
64 } 64 }
65 EXPORT_SYMBOL_GPL(reserve_pmc_hardware); 65 EXPORT_SYMBOL_GPL(reserve_pmc_hardware);
66 66
67 void release_pmc_hardware(void) 67 void release_pmc_hardware(void)
68 { 68 {
69 raw_spin_lock(&pmc_owner_lock); 69 raw_spin_lock(&pmc_owner_lock);
70 70
71 WARN_ON(! pmc_owner_caller); 71 WARN_ON(! pmc_owner_caller);
72 72
73 pmc_owner_caller = NULL; 73 pmc_owner_caller = NULL;
74 perf_irq = dummy_perf; 74 perf_irq = dummy_perf;
75 75
76 raw_spin_unlock(&pmc_owner_lock); 76 raw_spin_unlock(&pmc_owner_lock);
77 } 77 }
78 EXPORT_SYMBOL_GPL(release_pmc_hardware); 78 EXPORT_SYMBOL_GPL(release_pmc_hardware);
79 79
80 #ifdef CONFIG_PPC64 80 #ifdef CONFIG_PPC64
81 void power4_enable_pmcs(void) 81 void power4_enable_pmcs(void)
82 { 82 {
83 unsigned long hid0; 83 unsigned long hid0;
84 84
85 hid0 = mfspr(SPRN_HID0); 85 hid0 = mfspr(SPRN_HID0);
86 hid0 |= 1UL << (63 - 20); 86 hid0 |= 1UL << (63 - 20);
87 87
88 /* POWER4 requires the following sequence */ 88 /* POWER4 requires the following sequence */
89 asm volatile( 89 asm volatile(
90 "sync\n" 90 "sync\n"
91 "mtspr %1, %0\n" 91 "mtspr %1, %0\n"
92 "mfspr %0, %1\n" 92 "mfspr %0, %1\n"
93 "mfspr %0, %1\n" 93 "mfspr %0, %1\n"
94 "mfspr %0, %1\n" 94 "mfspr %0, %1\n"
95 "mfspr %0, %1\n" 95 "mfspr %0, %1\n"
96 "mfspr %0, %1\n" 96 "mfspr %0, %1\n"
97 "mfspr %0, %1\n" 97 "mfspr %0, %1\n"
98 "isync" : "=&r" (hid0) : "i" (SPRN_HID0), "0" (hid0): 98 "isync" : "=&r" (hid0) : "i" (SPRN_HID0), "0" (hid0):
99 "memory"); 99 "memory");
100 } 100 }
101 #endif /* CONFIG_PPC64 */ 101 #endif /* CONFIG_PPC64 */
102 102
arch/powerpc/kernel/ppc_ksyms.c
1 #include <linux/module.h> 1 #include <linux/export.h>
2 #include <linux/threads.h> 2 #include <linux/threads.h>
3 #include <linux/smp.h> 3 #include <linux/smp.h>
4 #include <linux/sched.h> 4 #include <linux/sched.h>
5 #include <linux/elfcore.h> 5 #include <linux/elfcore.h>
6 #include <linux/string.h> 6 #include <linux/string.h>
7 #include <linux/interrupt.h> 7 #include <linux/interrupt.h>
8 #include <linux/screen_info.h> 8 #include <linux/screen_info.h>
9 #include <linux/vt_kern.h> 9 #include <linux/vt_kern.h>
10 #include <linux/nvram.h> 10 #include <linux/nvram.h>
11 #include <linux/irq.h> 11 #include <linux/irq.h>
12 #include <linux/pci.h> 12 #include <linux/pci.h>
13 #include <linux/delay.h> 13 #include <linux/delay.h>
14 #include <linux/bitops.h> 14 #include <linux/bitops.h>
15 15
16 #include <asm/page.h> 16 #include <asm/page.h>
17 #include <asm/processor.h> 17 #include <asm/processor.h>
18 #include <asm/cacheflush.h> 18 #include <asm/cacheflush.h>
19 #include <asm/uaccess.h> 19 #include <asm/uaccess.h>
20 #include <asm/io.h> 20 #include <asm/io.h>
21 #include <linux/atomic.h> 21 #include <linux/atomic.h>
22 #include <asm/checksum.h> 22 #include <asm/checksum.h>
23 #include <asm/pgtable.h> 23 #include <asm/pgtable.h>
24 #include <asm/tlbflush.h> 24 #include <asm/tlbflush.h>
25 #include <linux/adb.h> 25 #include <linux/adb.h>
26 #include <linux/cuda.h> 26 #include <linux/cuda.h>
27 #include <linux/pmu.h> 27 #include <linux/pmu.h>
28 #include <asm/prom.h> 28 #include <asm/prom.h>
29 #include <asm/system.h> 29 #include <asm/system.h>
30 #include <asm/pci-bridge.h> 30 #include <asm/pci-bridge.h>
31 #include <asm/irq.h> 31 #include <asm/irq.h>
32 #include <asm/pmac_feature.h> 32 #include <asm/pmac_feature.h>
33 #include <asm/dma.h> 33 #include <asm/dma.h>
34 #include <asm/machdep.h> 34 #include <asm/machdep.h>
35 #include <asm/hw_irq.h> 35 #include <asm/hw_irq.h>
36 #include <asm/nvram.h> 36 #include <asm/nvram.h>
37 #include <asm/mmu_context.h> 37 #include <asm/mmu_context.h>
38 #include <asm/backlight.h> 38 #include <asm/backlight.h>
39 #include <asm/time.h> 39 #include <asm/time.h>
40 #include <asm/cputable.h> 40 #include <asm/cputable.h>
41 #include <asm/btext.h> 41 #include <asm/btext.h>
42 #include <asm/div64.h> 42 #include <asm/div64.h>
43 #include <asm/signal.h> 43 #include <asm/signal.h>
44 #include <asm/dcr.h> 44 #include <asm/dcr.h>
45 #include <asm/ftrace.h> 45 #include <asm/ftrace.h>
46 46
47 #ifdef CONFIG_PPC32 47 #ifdef CONFIG_PPC32
48 extern void transfer_to_handler(void); 48 extern void transfer_to_handler(void);
49 extern void do_IRQ(struct pt_regs *regs); 49 extern void do_IRQ(struct pt_regs *regs);
50 extern void machine_check_exception(struct pt_regs *regs); 50 extern void machine_check_exception(struct pt_regs *regs);
51 extern void alignment_exception(struct pt_regs *regs); 51 extern void alignment_exception(struct pt_regs *regs);
52 extern void program_check_exception(struct pt_regs *regs); 52 extern void program_check_exception(struct pt_regs *regs);
53 extern void single_step_exception(struct pt_regs *regs); 53 extern void single_step_exception(struct pt_regs *regs);
54 extern int sys_sigreturn(struct pt_regs *regs); 54 extern int sys_sigreturn(struct pt_regs *regs);
55 55
56 EXPORT_SYMBOL(clear_pages); 56 EXPORT_SYMBOL(clear_pages);
57 EXPORT_SYMBOL(ISA_DMA_THRESHOLD); 57 EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
58 EXPORT_SYMBOL(DMA_MODE_READ); 58 EXPORT_SYMBOL(DMA_MODE_READ);
59 EXPORT_SYMBOL(DMA_MODE_WRITE); 59 EXPORT_SYMBOL(DMA_MODE_WRITE);
60 60
61 EXPORT_SYMBOL(transfer_to_handler); 61 EXPORT_SYMBOL(transfer_to_handler);
62 EXPORT_SYMBOL(do_IRQ); 62 EXPORT_SYMBOL(do_IRQ);
63 EXPORT_SYMBOL(machine_check_exception); 63 EXPORT_SYMBOL(machine_check_exception);
64 EXPORT_SYMBOL(alignment_exception); 64 EXPORT_SYMBOL(alignment_exception);
65 EXPORT_SYMBOL(program_check_exception); 65 EXPORT_SYMBOL(program_check_exception);
66 EXPORT_SYMBOL(single_step_exception); 66 EXPORT_SYMBOL(single_step_exception);
67 EXPORT_SYMBOL(sys_sigreturn); 67 EXPORT_SYMBOL(sys_sigreturn);
68 #endif 68 #endif
69 69
70 #ifdef CONFIG_FUNCTION_TRACER 70 #ifdef CONFIG_FUNCTION_TRACER
71 EXPORT_SYMBOL(_mcount); 71 EXPORT_SYMBOL(_mcount);
72 #endif 72 #endif
73 73
74 EXPORT_SYMBOL(strcpy); 74 EXPORT_SYMBOL(strcpy);
75 EXPORT_SYMBOL(strncpy); 75 EXPORT_SYMBOL(strncpy);
76 EXPORT_SYMBOL(strcat); 76 EXPORT_SYMBOL(strcat);
77 EXPORT_SYMBOL(strlen); 77 EXPORT_SYMBOL(strlen);
78 EXPORT_SYMBOL(strcmp); 78 EXPORT_SYMBOL(strcmp);
79 EXPORT_SYMBOL(strncmp); 79 EXPORT_SYMBOL(strncmp);
80 80
81 EXPORT_SYMBOL(csum_partial); 81 EXPORT_SYMBOL(csum_partial);
82 EXPORT_SYMBOL(csum_partial_copy_generic); 82 EXPORT_SYMBOL(csum_partial_copy_generic);
83 EXPORT_SYMBOL(ip_fast_csum); 83 EXPORT_SYMBOL(ip_fast_csum);
84 EXPORT_SYMBOL(csum_tcpudp_magic); 84 EXPORT_SYMBOL(csum_tcpudp_magic);
85 85
86 EXPORT_SYMBOL(__copy_tofrom_user); 86 EXPORT_SYMBOL(__copy_tofrom_user);
87 EXPORT_SYMBOL(__clear_user); 87 EXPORT_SYMBOL(__clear_user);
88 EXPORT_SYMBOL(__strncpy_from_user); 88 EXPORT_SYMBOL(__strncpy_from_user);
89 EXPORT_SYMBOL(__strnlen_user); 89 EXPORT_SYMBOL(__strnlen_user);
90 EXPORT_SYMBOL(copy_page); 90 EXPORT_SYMBOL(copy_page);
91 91
92 #if defined(CONFIG_PCI) && defined(CONFIG_PPC32) 92 #if defined(CONFIG_PCI) && defined(CONFIG_PPC32)
93 EXPORT_SYMBOL(isa_io_base); 93 EXPORT_SYMBOL(isa_io_base);
94 EXPORT_SYMBOL(isa_mem_base); 94 EXPORT_SYMBOL(isa_mem_base);
95 EXPORT_SYMBOL(pci_dram_offset); 95 EXPORT_SYMBOL(pci_dram_offset);
96 #endif /* CONFIG_PCI */ 96 #endif /* CONFIG_PCI */
97 97
98 EXPORT_SYMBOL(start_thread); 98 EXPORT_SYMBOL(start_thread);
99 EXPORT_SYMBOL(kernel_thread); 99 EXPORT_SYMBOL(kernel_thread);
100 100
101 EXPORT_SYMBOL(giveup_fpu); 101 EXPORT_SYMBOL(giveup_fpu);
102 #ifdef CONFIG_ALTIVEC 102 #ifdef CONFIG_ALTIVEC
103 EXPORT_SYMBOL(giveup_altivec); 103 EXPORT_SYMBOL(giveup_altivec);
104 #endif /* CONFIG_ALTIVEC */ 104 #endif /* CONFIG_ALTIVEC */
105 #ifdef CONFIG_VSX 105 #ifdef CONFIG_VSX
106 EXPORT_SYMBOL(giveup_vsx); 106 EXPORT_SYMBOL(giveup_vsx);
107 EXPORT_SYMBOL_GPL(__giveup_vsx); 107 EXPORT_SYMBOL_GPL(__giveup_vsx);
108 #endif /* CONFIG_VSX */ 108 #endif /* CONFIG_VSX */
109 #ifdef CONFIG_SPE 109 #ifdef CONFIG_SPE
110 EXPORT_SYMBOL(giveup_spe); 110 EXPORT_SYMBOL(giveup_spe);
111 #endif /* CONFIG_SPE */ 111 #endif /* CONFIG_SPE */
112 112
113 #ifndef CONFIG_PPC64 113 #ifndef CONFIG_PPC64
114 EXPORT_SYMBOL(flush_instruction_cache); 114 EXPORT_SYMBOL(flush_instruction_cache);
115 #endif 115 #endif
116 EXPORT_SYMBOL(__flush_icache_range); 116 EXPORT_SYMBOL(__flush_icache_range);
117 EXPORT_SYMBOL(flush_dcache_range); 117 EXPORT_SYMBOL(flush_dcache_range);
118 118
119 #ifdef CONFIG_SMP 119 #ifdef CONFIG_SMP
120 #ifdef CONFIG_PPC32 120 #ifdef CONFIG_PPC32
121 EXPORT_SYMBOL(smp_hw_index); 121 EXPORT_SYMBOL(smp_hw_index);
122 #endif 122 #endif
123 #endif 123 #endif
124 124
125 #ifdef CONFIG_ADB 125 #ifdef CONFIG_ADB
126 EXPORT_SYMBOL(adb_request); 126 EXPORT_SYMBOL(adb_request);
127 EXPORT_SYMBOL(adb_register); 127 EXPORT_SYMBOL(adb_register);
128 EXPORT_SYMBOL(adb_unregister); 128 EXPORT_SYMBOL(adb_unregister);
129 EXPORT_SYMBOL(adb_poll); 129 EXPORT_SYMBOL(adb_poll);
130 EXPORT_SYMBOL(adb_try_handler_change); 130 EXPORT_SYMBOL(adb_try_handler_change);
131 #endif /* CONFIG_ADB */ 131 #endif /* CONFIG_ADB */
132 #ifdef CONFIG_ADB_CUDA 132 #ifdef CONFIG_ADB_CUDA
133 EXPORT_SYMBOL(cuda_request); 133 EXPORT_SYMBOL(cuda_request);
134 EXPORT_SYMBOL(cuda_poll); 134 EXPORT_SYMBOL(cuda_poll);
135 #endif /* CONFIG_ADB_CUDA */ 135 #endif /* CONFIG_ADB_CUDA */
136 EXPORT_SYMBOL(to_tm); 136 EXPORT_SYMBOL(to_tm);
137 137
138 #ifdef CONFIG_PPC32 138 #ifdef CONFIG_PPC32
139 long long __ashrdi3(long long, int); 139 long long __ashrdi3(long long, int);
140 long long __ashldi3(long long, int); 140 long long __ashldi3(long long, int);
141 long long __lshrdi3(long long, int); 141 long long __lshrdi3(long long, int);
142 EXPORT_SYMBOL(__ashrdi3); 142 EXPORT_SYMBOL(__ashrdi3);
143 EXPORT_SYMBOL(__ashldi3); 143 EXPORT_SYMBOL(__ashldi3);
144 EXPORT_SYMBOL(__lshrdi3); 144 EXPORT_SYMBOL(__lshrdi3);
145 int __ucmpdi2(unsigned long long, unsigned long long); 145 int __ucmpdi2(unsigned long long, unsigned long long);
146 EXPORT_SYMBOL(__ucmpdi2); 146 EXPORT_SYMBOL(__ucmpdi2);
147 #endif 147 #endif
148 148
149 EXPORT_SYMBOL(memcpy); 149 EXPORT_SYMBOL(memcpy);
150 EXPORT_SYMBOL(memset); 150 EXPORT_SYMBOL(memset);
151 EXPORT_SYMBOL(memmove); 151 EXPORT_SYMBOL(memmove);
152 EXPORT_SYMBOL(memcmp); 152 EXPORT_SYMBOL(memcmp);
153 EXPORT_SYMBOL(memchr); 153 EXPORT_SYMBOL(memchr);
154 154
155 #if defined(CONFIG_FB_VGA16_MODULE) 155 #if defined(CONFIG_FB_VGA16_MODULE)
156 EXPORT_SYMBOL(screen_info); 156 EXPORT_SYMBOL(screen_info);
157 #endif 157 #endif
158 158
159 #ifdef CONFIG_PPC32 159 #ifdef CONFIG_PPC32
160 EXPORT_SYMBOL(timer_interrupt); 160 EXPORT_SYMBOL(timer_interrupt);
161 EXPORT_SYMBOL(tb_ticks_per_jiffy); 161 EXPORT_SYMBOL(tb_ticks_per_jiffy);
162 EXPORT_SYMBOL(cacheable_memcpy); 162 EXPORT_SYMBOL(cacheable_memcpy);
163 EXPORT_SYMBOL(cacheable_memzero); 163 EXPORT_SYMBOL(cacheable_memzero);
164 #endif 164 #endif
165 165
166 #ifdef CONFIG_PPC32 166 #ifdef CONFIG_PPC32
167 EXPORT_SYMBOL(switch_mmu_context); 167 EXPORT_SYMBOL(switch_mmu_context);
168 #endif 168 #endif
169 169
170 #ifdef CONFIG_PPC_STD_MMU_32 170 #ifdef CONFIG_PPC_STD_MMU_32
171 extern long mol_trampoline; 171 extern long mol_trampoline;
172 EXPORT_SYMBOL(mol_trampoline); /* For MOL */ 172 EXPORT_SYMBOL(mol_trampoline); /* For MOL */
173 EXPORT_SYMBOL(flush_hash_pages); /* For MOL */ 173 EXPORT_SYMBOL(flush_hash_pages); /* For MOL */
174 #ifdef CONFIG_SMP 174 #ifdef CONFIG_SMP
175 extern int mmu_hash_lock; 175 extern int mmu_hash_lock;
176 EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */ 176 EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */
177 #endif /* CONFIG_SMP */ 177 #endif /* CONFIG_SMP */
178 extern long *intercept_table; 178 extern long *intercept_table;
179 EXPORT_SYMBOL(intercept_table); 179 EXPORT_SYMBOL(intercept_table);
180 #endif /* CONFIG_PPC_STD_MMU_32 */ 180 #endif /* CONFIG_PPC_STD_MMU_32 */
181 #ifdef CONFIG_PPC_DCR_NATIVE 181 #ifdef CONFIG_PPC_DCR_NATIVE
182 EXPORT_SYMBOL(__mtdcr); 182 EXPORT_SYMBOL(__mtdcr);
183 EXPORT_SYMBOL(__mfdcr); 183 EXPORT_SYMBOL(__mfdcr);
184 #endif 184 #endif
185 EXPORT_SYMBOL(empty_zero_page); 185 EXPORT_SYMBOL(empty_zero_page);
186 186
187 #ifdef CONFIG_PPC64 187 #ifdef CONFIG_PPC64
188 EXPORT_SYMBOL(__arch_hweight8); 188 EXPORT_SYMBOL(__arch_hweight8);
189 EXPORT_SYMBOL(__arch_hweight16); 189 EXPORT_SYMBOL(__arch_hweight16);
190 EXPORT_SYMBOL(__arch_hweight32); 190 EXPORT_SYMBOL(__arch_hweight32);
191 EXPORT_SYMBOL(__arch_hweight64); 191 EXPORT_SYMBOL(__arch_hweight64);
192 #endif 192 #endif
193 193
arch/powerpc/kernel/process.c
1 /* 1 /*
2 * Derived from "arch/i386/kernel/process.c" 2 * Derived from "arch/i386/kernel/process.c"
3 * Copyright (C) 1995 Linus Torvalds 3 * Copyright (C) 1995 Linus Torvalds
4 * 4 *
5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and 5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6 * Paul Mackerras (paulus@cs.anu.edu.au) 6 * Paul Mackerras (paulus@cs.anu.edu.au)
7 * 7 *
8 * PowerPC version 8 * PowerPC version
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 * 10 *
11 * This program is free software; you can redistribute it and/or 11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License 12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version. 14 * 2 of the License, or (at your option) any later version.
15 */ 15 */
16 16
17 #include <linux/errno.h> 17 #include <linux/errno.h>
18 #include <linux/sched.h> 18 #include <linux/sched.h>
19 #include <linux/kernel.h> 19 #include <linux/kernel.h>
20 #include <linux/mm.h> 20 #include <linux/mm.h>
21 #include <linux/smp.h> 21 #include <linux/smp.h>
22 #include <linux/stddef.h> 22 #include <linux/stddef.h>
23 #include <linux/unistd.h> 23 #include <linux/unistd.h>
24 #include <linux/ptrace.h> 24 #include <linux/ptrace.h>
25 #include <linux/slab.h> 25 #include <linux/slab.h>
26 #include <linux/user.h> 26 #include <linux/user.h>
27 #include <linux/elf.h> 27 #include <linux/elf.h>
28 #include <linux/init.h> 28 #include <linux/init.h>
29 #include <linux/prctl.h> 29 #include <linux/prctl.h>
30 #include <linux/init_task.h> 30 #include <linux/init_task.h>
31 #include <linux/module.h> 31 #include <linux/export.h>
32 #include <linux/kallsyms.h> 32 #include <linux/kallsyms.h>
33 #include <linux/mqueue.h> 33 #include <linux/mqueue.h>
34 #include <linux/hardirq.h> 34 #include <linux/hardirq.h>
35 #include <linux/utsname.h> 35 #include <linux/utsname.h>
36 #include <linux/ftrace.h> 36 #include <linux/ftrace.h>
37 #include <linux/kernel_stat.h> 37 #include <linux/kernel_stat.h>
38 #include <linux/personality.h> 38 #include <linux/personality.h>
39 #include <linux/random.h> 39 #include <linux/random.h>
40 #include <linux/hw_breakpoint.h> 40 #include <linux/hw_breakpoint.h>
41 41
42 #include <asm/pgtable.h> 42 #include <asm/pgtable.h>
43 #include <asm/uaccess.h> 43 #include <asm/uaccess.h>
44 #include <asm/system.h> 44 #include <asm/system.h>
45 #include <asm/io.h> 45 #include <asm/io.h>
46 #include <asm/processor.h> 46 #include <asm/processor.h>
47 #include <asm/mmu.h> 47 #include <asm/mmu.h>
48 #include <asm/prom.h> 48 #include <asm/prom.h>
49 #include <asm/machdep.h> 49 #include <asm/machdep.h>
50 #include <asm/time.h> 50 #include <asm/time.h>
51 #include <asm/syscalls.h> 51 #include <asm/syscalls.h>
52 #ifdef CONFIG_PPC64 52 #ifdef CONFIG_PPC64
53 #include <asm/firmware.h> 53 #include <asm/firmware.h>
54 #endif 54 #endif
55 #include <linux/kprobes.h> 55 #include <linux/kprobes.h>
56 #include <linux/kdebug.h> 56 #include <linux/kdebug.h>
57 57
58 extern unsigned long _get_SP(void); 58 extern unsigned long _get_SP(void);
59 59
60 #ifndef CONFIG_SMP 60 #ifndef CONFIG_SMP
61 struct task_struct *last_task_used_math = NULL; 61 struct task_struct *last_task_used_math = NULL;
62 struct task_struct *last_task_used_altivec = NULL; 62 struct task_struct *last_task_used_altivec = NULL;
63 struct task_struct *last_task_used_vsx = NULL; 63 struct task_struct *last_task_used_vsx = NULL;
64 struct task_struct *last_task_used_spe = NULL; 64 struct task_struct *last_task_used_spe = NULL;
65 #endif 65 #endif
66 66
67 /* 67 /*
68 * Make sure the floating-point register state in the 68 * Make sure the floating-point register state in the
69 * the thread_struct is up to date for task tsk. 69 * the thread_struct is up to date for task tsk.
70 */ 70 */
71 void flush_fp_to_thread(struct task_struct *tsk) 71 void flush_fp_to_thread(struct task_struct *tsk)
72 { 72 {
73 if (tsk->thread.regs) { 73 if (tsk->thread.regs) {
74 /* 74 /*
75 * We need to disable preemption here because if we didn't, 75 * We need to disable preemption here because if we didn't,
76 * another process could get scheduled after the regs->msr 76 * another process could get scheduled after the regs->msr
77 * test but before we have finished saving the FP registers 77 * test but before we have finished saving the FP registers
78 * to the thread_struct. That process could take over the 78 * to the thread_struct. That process could take over the
79 * FPU, and then when we get scheduled again we would store 79 * FPU, and then when we get scheduled again we would store
80 * bogus values for the remaining FP registers. 80 * bogus values for the remaining FP registers.
81 */ 81 */
82 preempt_disable(); 82 preempt_disable();
83 if (tsk->thread.regs->msr & MSR_FP) { 83 if (tsk->thread.regs->msr & MSR_FP) {
84 #ifdef CONFIG_SMP 84 #ifdef CONFIG_SMP
85 /* 85 /*
86 * This should only ever be called for current or 86 * This should only ever be called for current or
87 * for a stopped child process. Since we save away 87 * for a stopped child process. Since we save away
88 * the FP register state on context switch on SMP, 88 * the FP register state on context switch on SMP,
89 * there is something wrong if a stopped child appears 89 * there is something wrong if a stopped child appears
90 * to still have its FP state in the CPU registers. 90 * to still have its FP state in the CPU registers.
91 */ 91 */
92 BUG_ON(tsk != current); 92 BUG_ON(tsk != current);
93 #endif 93 #endif
94 giveup_fpu(tsk); 94 giveup_fpu(tsk);
95 } 95 }
96 preempt_enable(); 96 preempt_enable();
97 } 97 }
98 } 98 }
99 EXPORT_SYMBOL_GPL(flush_fp_to_thread); 99 EXPORT_SYMBOL_GPL(flush_fp_to_thread);
100 100
101 void enable_kernel_fp(void) 101 void enable_kernel_fp(void)
102 { 102 {
103 WARN_ON(preemptible()); 103 WARN_ON(preemptible());
104 104
105 #ifdef CONFIG_SMP 105 #ifdef CONFIG_SMP
106 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) 106 if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
107 giveup_fpu(current); 107 giveup_fpu(current);
108 else 108 else
109 giveup_fpu(NULL); /* just enables FP for kernel */ 109 giveup_fpu(NULL); /* just enables FP for kernel */
110 #else 110 #else
111 giveup_fpu(last_task_used_math); 111 giveup_fpu(last_task_used_math);
112 #endif /* CONFIG_SMP */ 112 #endif /* CONFIG_SMP */
113 } 113 }
114 EXPORT_SYMBOL(enable_kernel_fp); 114 EXPORT_SYMBOL(enable_kernel_fp);
115 115
116 #ifdef CONFIG_ALTIVEC 116 #ifdef CONFIG_ALTIVEC
117 void enable_kernel_altivec(void) 117 void enable_kernel_altivec(void)
118 { 118 {
119 WARN_ON(preemptible()); 119 WARN_ON(preemptible());
120 120
121 #ifdef CONFIG_SMP 121 #ifdef CONFIG_SMP
122 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) 122 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
123 giveup_altivec(current); 123 giveup_altivec(current);
124 else 124 else
125 giveup_altivec(NULL); /* just enable AltiVec for kernel - force */ 125 giveup_altivec(NULL); /* just enable AltiVec for kernel - force */
126 #else 126 #else
127 giveup_altivec(last_task_used_altivec); 127 giveup_altivec(last_task_used_altivec);
128 #endif /* CONFIG_SMP */ 128 #endif /* CONFIG_SMP */
129 } 129 }
130 EXPORT_SYMBOL(enable_kernel_altivec); 130 EXPORT_SYMBOL(enable_kernel_altivec);
131 131
132 /* 132 /*
133 * Make sure the VMX/Altivec register state in the 133 * Make sure the VMX/Altivec register state in the
134 * the thread_struct is up to date for task tsk. 134 * the thread_struct is up to date for task tsk.
135 */ 135 */
136 void flush_altivec_to_thread(struct task_struct *tsk) 136 void flush_altivec_to_thread(struct task_struct *tsk)
137 { 137 {
138 if (tsk->thread.regs) { 138 if (tsk->thread.regs) {
139 preempt_disable(); 139 preempt_disable();
140 if (tsk->thread.regs->msr & MSR_VEC) { 140 if (tsk->thread.regs->msr & MSR_VEC) {
141 #ifdef CONFIG_SMP 141 #ifdef CONFIG_SMP
142 BUG_ON(tsk != current); 142 BUG_ON(tsk != current);
143 #endif 143 #endif
144 giveup_altivec(tsk); 144 giveup_altivec(tsk);
145 } 145 }
146 preempt_enable(); 146 preempt_enable();
147 } 147 }
148 } 148 }
149 EXPORT_SYMBOL_GPL(flush_altivec_to_thread); 149 EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
150 #endif /* CONFIG_ALTIVEC */ 150 #endif /* CONFIG_ALTIVEC */
151 151
152 #ifdef CONFIG_VSX 152 #ifdef CONFIG_VSX
153 #if 0 153 #if 0
154 /* not currently used, but some crazy RAID module might want to later */ 154 /* not currently used, but some crazy RAID module might want to later */
155 void enable_kernel_vsx(void) 155 void enable_kernel_vsx(void)
156 { 156 {
157 WARN_ON(preemptible()); 157 WARN_ON(preemptible());
158 158
159 #ifdef CONFIG_SMP 159 #ifdef CONFIG_SMP
160 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) 160 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
161 giveup_vsx(current); 161 giveup_vsx(current);
162 else 162 else
163 giveup_vsx(NULL); /* just enable vsx for kernel - force */ 163 giveup_vsx(NULL); /* just enable vsx for kernel - force */
164 #else 164 #else
165 giveup_vsx(last_task_used_vsx); 165 giveup_vsx(last_task_used_vsx);
166 #endif /* CONFIG_SMP */ 166 #endif /* CONFIG_SMP */
167 } 167 }
168 EXPORT_SYMBOL(enable_kernel_vsx); 168 EXPORT_SYMBOL(enable_kernel_vsx);
169 #endif 169 #endif
170 170
171 void giveup_vsx(struct task_struct *tsk) 171 void giveup_vsx(struct task_struct *tsk)
172 { 172 {
173 giveup_fpu(tsk); 173 giveup_fpu(tsk);
174 giveup_altivec(tsk); 174 giveup_altivec(tsk);
175 __giveup_vsx(tsk); 175 __giveup_vsx(tsk);
176 } 176 }
177 177
178 void flush_vsx_to_thread(struct task_struct *tsk) 178 void flush_vsx_to_thread(struct task_struct *tsk)
179 { 179 {
180 if (tsk->thread.regs) { 180 if (tsk->thread.regs) {
181 preempt_disable(); 181 preempt_disable();
182 if (tsk->thread.regs->msr & MSR_VSX) { 182 if (tsk->thread.regs->msr & MSR_VSX) {
183 #ifdef CONFIG_SMP 183 #ifdef CONFIG_SMP
184 BUG_ON(tsk != current); 184 BUG_ON(tsk != current);
185 #endif 185 #endif
186 giveup_vsx(tsk); 186 giveup_vsx(tsk);
187 } 187 }
188 preempt_enable(); 188 preempt_enable();
189 } 189 }
190 } 190 }
191 EXPORT_SYMBOL_GPL(flush_vsx_to_thread); 191 EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
192 #endif /* CONFIG_VSX */ 192 #endif /* CONFIG_VSX */
193 193
194 #ifdef CONFIG_SPE 194 #ifdef CONFIG_SPE
195 195
196 void enable_kernel_spe(void) 196 void enable_kernel_spe(void)
197 { 197 {
198 WARN_ON(preemptible()); 198 WARN_ON(preemptible());
199 199
200 #ifdef CONFIG_SMP 200 #ifdef CONFIG_SMP
201 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) 201 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
202 giveup_spe(current); 202 giveup_spe(current);
203 else 203 else
204 giveup_spe(NULL); /* just enable SPE for kernel - force */ 204 giveup_spe(NULL); /* just enable SPE for kernel - force */
205 #else 205 #else
206 giveup_spe(last_task_used_spe); 206 giveup_spe(last_task_used_spe);
207 #endif /* __SMP __ */ 207 #endif /* __SMP __ */
208 } 208 }
209 EXPORT_SYMBOL(enable_kernel_spe); 209 EXPORT_SYMBOL(enable_kernel_spe);
210 210
211 void flush_spe_to_thread(struct task_struct *tsk) 211 void flush_spe_to_thread(struct task_struct *tsk)
212 { 212 {
213 if (tsk->thread.regs) { 213 if (tsk->thread.regs) {
214 preempt_disable(); 214 preempt_disable();
215 if (tsk->thread.regs->msr & MSR_SPE) { 215 if (tsk->thread.regs->msr & MSR_SPE) {
216 #ifdef CONFIG_SMP 216 #ifdef CONFIG_SMP
217 BUG_ON(tsk != current); 217 BUG_ON(tsk != current);
218 #endif 218 #endif
219 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR); 219 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
220 giveup_spe(tsk); 220 giveup_spe(tsk);
221 } 221 }
222 preempt_enable(); 222 preempt_enable();
223 } 223 }
224 } 224 }
225 #endif /* CONFIG_SPE */ 225 #endif /* CONFIG_SPE */
226 226
227 #ifndef CONFIG_SMP 227 #ifndef CONFIG_SMP
228 /* 228 /*
229 * If we are doing lazy switching of CPU state (FP, altivec or SPE), 229 * If we are doing lazy switching of CPU state (FP, altivec or SPE),
230 * and the current task has some state, discard it. 230 * and the current task has some state, discard it.
231 */ 231 */
232 void discard_lazy_cpu_state(void) 232 void discard_lazy_cpu_state(void)
233 { 233 {
234 preempt_disable(); 234 preempt_disable();
235 if (last_task_used_math == current) 235 if (last_task_used_math == current)
236 last_task_used_math = NULL; 236 last_task_used_math = NULL;
237 #ifdef CONFIG_ALTIVEC 237 #ifdef CONFIG_ALTIVEC
238 if (last_task_used_altivec == current) 238 if (last_task_used_altivec == current)
239 last_task_used_altivec = NULL; 239 last_task_used_altivec = NULL;
240 #endif /* CONFIG_ALTIVEC */ 240 #endif /* CONFIG_ALTIVEC */
241 #ifdef CONFIG_VSX 241 #ifdef CONFIG_VSX
242 if (last_task_used_vsx == current) 242 if (last_task_used_vsx == current)
243 last_task_used_vsx = NULL; 243 last_task_used_vsx = NULL;
244 #endif /* CONFIG_VSX */ 244 #endif /* CONFIG_VSX */
245 #ifdef CONFIG_SPE 245 #ifdef CONFIG_SPE
246 if (last_task_used_spe == current) 246 if (last_task_used_spe == current)
247 last_task_used_spe = NULL; 247 last_task_used_spe = NULL;
248 #endif 248 #endif
249 preempt_enable(); 249 preempt_enable();
250 } 250 }
251 #endif /* CONFIG_SMP */ 251 #endif /* CONFIG_SMP */
252 252
253 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 253 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
254 void do_send_trap(struct pt_regs *regs, unsigned long address, 254 void do_send_trap(struct pt_regs *regs, unsigned long address,
255 unsigned long error_code, int signal_code, int breakpt) 255 unsigned long error_code, int signal_code, int breakpt)
256 { 256 {
257 siginfo_t info; 257 siginfo_t info;
258 258
259 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, 259 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
260 11, SIGSEGV) == NOTIFY_STOP) 260 11, SIGSEGV) == NOTIFY_STOP)
261 return; 261 return;
262 262
263 /* Deliver the signal to userspace */ 263 /* Deliver the signal to userspace */
264 info.si_signo = SIGTRAP; 264 info.si_signo = SIGTRAP;
265 info.si_errno = breakpt; /* breakpoint or watchpoint id */ 265 info.si_errno = breakpt; /* breakpoint or watchpoint id */
266 info.si_code = signal_code; 266 info.si_code = signal_code;
267 info.si_addr = (void __user *)address; 267 info.si_addr = (void __user *)address;
268 force_sig_info(SIGTRAP, &info, current); 268 force_sig_info(SIGTRAP, &info, current);
269 } 269 }
270 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ 270 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
271 void do_dabr(struct pt_regs *regs, unsigned long address, 271 void do_dabr(struct pt_regs *regs, unsigned long address,
272 unsigned long error_code) 272 unsigned long error_code)
273 { 273 {
274 siginfo_t info; 274 siginfo_t info;
275 275
276 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, 276 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
277 11, SIGSEGV) == NOTIFY_STOP) 277 11, SIGSEGV) == NOTIFY_STOP)
278 return; 278 return;
279 279
280 if (debugger_dabr_match(regs)) 280 if (debugger_dabr_match(regs))
281 return; 281 return;
282 282
283 /* Clear the DABR */ 283 /* Clear the DABR */
284 set_dabr(0); 284 set_dabr(0);
285 285
286 /* Deliver the signal to userspace */ 286 /* Deliver the signal to userspace */
287 info.si_signo = SIGTRAP; 287 info.si_signo = SIGTRAP;
288 info.si_errno = 0; 288 info.si_errno = 0;
289 info.si_code = TRAP_HWBKPT; 289 info.si_code = TRAP_HWBKPT;
290 info.si_addr = (void __user *)address; 290 info.si_addr = (void __user *)address;
291 force_sig_info(SIGTRAP, &info, current); 291 force_sig_info(SIGTRAP, &info, current);
292 } 292 }
293 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 293 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
294 294
295 static DEFINE_PER_CPU(unsigned long, current_dabr); 295 static DEFINE_PER_CPU(unsigned long, current_dabr);
296 296
297 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 297 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
298 /* 298 /*
299 * Set the debug registers back to their default "safe" values. 299 * Set the debug registers back to their default "safe" values.
300 */ 300 */
301 static void set_debug_reg_defaults(struct thread_struct *thread) 301 static void set_debug_reg_defaults(struct thread_struct *thread)
302 { 302 {
303 thread->iac1 = thread->iac2 = 0; 303 thread->iac1 = thread->iac2 = 0;
304 #if CONFIG_PPC_ADV_DEBUG_IACS > 2 304 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
305 thread->iac3 = thread->iac4 = 0; 305 thread->iac3 = thread->iac4 = 0;
306 #endif 306 #endif
307 thread->dac1 = thread->dac2 = 0; 307 thread->dac1 = thread->dac2 = 0;
308 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 308 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
309 thread->dvc1 = thread->dvc2 = 0; 309 thread->dvc1 = thread->dvc2 = 0;
310 #endif 310 #endif
311 thread->dbcr0 = 0; 311 thread->dbcr0 = 0;
312 #ifdef CONFIG_BOOKE 312 #ifdef CONFIG_BOOKE
313 /* 313 /*
314 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1) 314 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
315 */ 315 */
316 thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | \ 316 thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | \
317 DBCR1_IAC3US | DBCR1_IAC4US; 317 DBCR1_IAC3US | DBCR1_IAC4US;
318 /* 318 /*
319 * Force Data Address Compare User/Supervisor bits to be User-only 319 * Force Data Address Compare User/Supervisor bits to be User-only
320 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0. 320 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
321 */ 321 */
322 thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US; 322 thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
323 #else 323 #else
324 thread->dbcr1 = 0; 324 thread->dbcr1 = 0;
325 #endif 325 #endif
326 } 326 }
327 327
328 static void prime_debug_regs(struct thread_struct *thread) 328 static void prime_debug_regs(struct thread_struct *thread)
329 { 329 {
330 mtspr(SPRN_IAC1, thread->iac1); 330 mtspr(SPRN_IAC1, thread->iac1);
331 mtspr(SPRN_IAC2, thread->iac2); 331 mtspr(SPRN_IAC2, thread->iac2);
332 #if CONFIG_PPC_ADV_DEBUG_IACS > 2 332 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
333 mtspr(SPRN_IAC3, thread->iac3); 333 mtspr(SPRN_IAC3, thread->iac3);
334 mtspr(SPRN_IAC4, thread->iac4); 334 mtspr(SPRN_IAC4, thread->iac4);
335 #endif 335 #endif
336 mtspr(SPRN_DAC1, thread->dac1); 336 mtspr(SPRN_DAC1, thread->dac1);
337 mtspr(SPRN_DAC2, thread->dac2); 337 mtspr(SPRN_DAC2, thread->dac2);
338 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 338 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
339 mtspr(SPRN_DVC1, thread->dvc1); 339 mtspr(SPRN_DVC1, thread->dvc1);
340 mtspr(SPRN_DVC2, thread->dvc2); 340 mtspr(SPRN_DVC2, thread->dvc2);
341 #endif 341 #endif
342 mtspr(SPRN_DBCR0, thread->dbcr0); 342 mtspr(SPRN_DBCR0, thread->dbcr0);
343 mtspr(SPRN_DBCR1, thread->dbcr1); 343 mtspr(SPRN_DBCR1, thread->dbcr1);
344 #ifdef CONFIG_BOOKE 344 #ifdef CONFIG_BOOKE
345 mtspr(SPRN_DBCR2, thread->dbcr2); 345 mtspr(SPRN_DBCR2, thread->dbcr2);
346 #endif 346 #endif
347 } 347 }
348 /* 348 /*
349 * Unless neither the old or new thread are making use of the 349 * Unless neither the old or new thread are making use of the
350 * debug registers, set the debug registers from the values 350 * debug registers, set the debug registers from the values
351 * stored in the new thread. 351 * stored in the new thread.
352 */ 352 */
353 static void switch_booke_debug_regs(struct thread_struct *new_thread) 353 static void switch_booke_debug_regs(struct thread_struct *new_thread)
354 { 354 {
355 if ((current->thread.dbcr0 & DBCR0_IDM) 355 if ((current->thread.dbcr0 & DBCR0_IDM)
356 || (new_thread->dbcr0 & DBCR0_IDM)) 356 || (new_thread->dbcr0 & DBCR0_IDM))
357 prime_debug_regs(new_thread); 357 prime_debug_regs(new_thread);
358 } 358 }
359 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ 359 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
360 #ifndef CONFIG_HAVE_HW_BREAKPOINT 360 #ifndef CONFIG_HAVE_HW_BREAKPOINT
361 static void set_debug_reg_defaults(struct thread_struct *thread) 361 static void set_debug_reg_defaults(struct thread_struct *thread)
362 { 362 {
363 if (thread->dabr) { 363 if (thread->dabr) {
364 thread->dabr = 0; 364 thread->dabr = 0;
365 set_dabr(0); 365 set_dabr(0);
366 } 366 }
367 } 367 }
368 #endif /* !CONFIG_HAVE_HW_BREAKPOINT */ 368 #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
369 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 369 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
370 370
371 int set_dabr(unsigned long dabr) 371 int set_dabr(unsigned long dabr)
372 { 372 {
373 __get_cpu_var(current_dabr) = dabr; 373 __get_cpu_var(current_dabr) = dabr;
374 374
375 if (ppc_md.set_dabr) 375 if (ppc_md.set_dabr)
376 return ppc_md.set_dabr(dabr); 376 return ppc_md.set_dabr(dabr);
377 377
378 /* XXX should we have a CPU_FTR_HAS_DABR ? */ 378 /* XXX should we have a CPU_FTR_HAS_DABR ? */
379 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 379 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
380 mtspr(SPRN_DAC1, dabr); 380 mtspr(SPRN_DAC1, dabr);
381 #ifdef CONFIG_PPC_47x 381 #ifdef CONFIG_PPC_47x
382 isync(); 382 isync();
383 #endif 383 #endif
384 #elif defined(CONFIG_PPC_BOOK3S) 384 #elif defined(CONFIG_PPC_BOOK3S)
385 mtspr(SPRN_DABR, dabr); 385 mtspr(SPRN_DABR, dabr);
386 #endif 386 #endif
387 387
388 388
389 return 0; 389 return 0;
390 } 390 }
391 391
392 #ifdef CONFIG_PPC64 392 #ifdef CONFIG_PPC64
393 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array); 393 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
394 #endif 394 #endif
395 395
396 struct task_struct *__switch_to(struct task_struct *prev, 396 struct task_struct *__switch_to(struct task_struct *prev,
397 struct task_struct *new) 397 struct task_struct *new)
398 { 398 {
399 struct thread_struct *new_thread, *old_thread; 399 struct thread_struct *new_thread, *old_thread;
400 unsigned long flags; 400 unsigned long flags;
401 struct task_struct *last; 401 struct task_struct *last;
402 #ifdef CONFIG_PPC_BOOK3S_64 402 #ifdef CONFIG_PPC_BOOK3S_64
403 struct ppc64_tlb_batch *batch; 403 struct ppc64_tlb_batch *batch;
404 #endif 404 #endif
405 405
406 #ifdef CONFIG_SMP 406 #ifdef CONFIG_SMP
407 /* avoid complexity of lazy save/restore of fpu 407 /* avoid complexity of lazy save/restore of fpu
408 * by just saving it every time we switch out if 408 * by just saving it every time we switch out if
409 * this task used the fpu during the last quantum. 409 * this task used the fpu during the last quantum.
410 * 410 *
411 * If it tries to use the fpu again, it'll trap and 411 * If it tries to use the fpu again, it'll trap and
412 * reload its fp regs. So we don't have to do a restore 412 * reload its fp regs. So we don't have to do a restore
413 * every switch, just a save. 413 * every switch, just a save.
414 * -- Cort 414 * -- Cort
415 */ 415 */
416 if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP)) 416 if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
417 giveup_fpu(prev); 417 giveup_fpu(prev);
418 #ifdef CONFIG_ALTIVEC 418 #ifdef CONFIG_ALTIVEC
419 /* 419 /*
420 * If the previous thread used altivec in the last quantum 420 * If the previous thread used altivec in the last quantum
421 * (thus changing altivec regs) then save them. 421 * (thus changing altivec regs) then save them.
422 * We used to check the VRSAVE register but not all apps 422 * We used to check the VRSAVE register but not all apps
423 * set it, so we don't rely on it now (and in fact we need 423 * set it, so we don't rely on it now (and in fact we need
424 * to save & restore VSCR even if VRSAVE == 0). -- paulus 424 * to save & restore VSCR even if VRSAVE == 0). -- paulus
425 * 425 *
426 * On SMP we always save/restore altivec regs just to avoid the 426 * On SMP we always save/restore altivec regs just to avoid the
427 * complexity of changing processors. 427 * complexity of changing processors.
428 * -- Cort 428 * -- Cort
429 */ 429 */
430 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)) 430 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
431 giveup_altivec(prev); 431 giveup_altivec(prev);
432 #endif /* CONFIG_ALTIVEC */ 432 #endif /* CONFIG_ALTIVEC */
433 #ifdef CONFIG_VSX 433 #ifdef CONFIG_VSX
434 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX)) 434 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
435 /* VMX and FPU registers are already save here */ 435 /* VMX and FPU registers are already save here */
436 __giveup_vsx(prev); 436 __giveup_vsx(prev);
437 #endif /* CONFIG_VSX */ 437 #endif /* CONFIG_VSX */
438 #ifdef CONFIG_SPE 438 #ifdef CONFIG_SPE
439 /* 439 /*
440 * If the previous thread used spe in the last quantum 440 * If the previous thread used spe in the last quantum
441 * (thus changing spe regs) then save them. 441 * (thus changing spe regs) then save them.
442 * 442 *
443 * On SMP we always save/restore spe regs just to avoid the 443 * On SMP we always save/restore spe regs just to avoid the
444 * complexity of changing processors. 444 * complexity of changing processors.
445 */ 445 */
446 if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE))) 446 if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
447 giveup_spe(prev); 447 giveup_spe(prev);
448 #endif /* CONFIG_SPE */ 448 #endif /* CONFIG_SPE */
449 449
450 #else /* CONFIG_SMP */ 450 #else /* CONFIG_SMP */
451 #ifdef CONFIG_ALTIVEC 451 #ifdef CONFIG_ALTIVEC
452 /* Avoid the trap. On smp this this never happens since 452 /* Avoid the trap. On smp this this never happens since
453 * we don't set last_task_used_altivec -- Cort 453 * we don't set last_task_used_altivec -- Cort
454 */ 454 */
455 if (new->thread.regs && last_task_used_altivec == new) 455 if (new->thread.regs && last_task_used_altivec == new)
456 new->thread.regs->msr |= MSR_VEC; 456 new->thread.regs->msr |= MSR_VEC;
457 #endif /* CONFIG_ALTIVEC */ 457 #endif /* CONFIG_ALTIVEC */
458 #ifdef CONFIG_VSX 458 #ifdef CONFIG_VSX
459 if (new->thread.regs && last_task_used_vsx == new) 459 if (new->thread.regs && last_task_used_vsx == new)
460 new->thread.regs->msr |= MSR_VSX; 460 new->thread.regs->msr |= MSR_VSX;
461 #endif /* CONFIG_VSX */ 461 #endif /* CONFIG_VSX */
462 #ifdef CONFIG_SPE 462 #ifdef CONFIG_SPE
463 /* Avoid the trap. On smp this this never happens since 463 /* Avoid the trap. On smp this this never happens since
464 * we don't set last_task_used_spe 464 * we don't set last_task_used_spe
465 */ 465 */
466 if (new->thread.regs && last_task_used_spe == new) 466 if (new->thread.regs && last_task_used_spe == new)
467 new->thread.regs->msr |= MSR_SPE; 467 new->thread.regs->msr |= MSR_SPE;
468 #endif /* CONFIG_SPE */ 468 #endif /* CONFIG_SPE */
469 469
470 #endif /* CONFIG_SMP */ 470 #endif /* CONFIG_SMP */
471 471
472 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 472 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
473 switch_booke_debug_regs(&new->thread); 473 switch_booke_debug_regs(&new->thread);
474 #else 474 #else
475 /* 475 /*
476 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would 476 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
477 * schedule DABR 477 * schedule DABR
478 */ 478 */
479 #ifndef CONFIG_HAVE_HW_BREAKPOINT 479 #ifndef CONFIG_HAVE_HW_BREAKPOINT
480 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) 480 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
481 set_dabr(new->thread.dabr); 481 set_dabr(new->thread.dabr);
482 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 482 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
483 #endif 483 #endif
484 484
485 485
486 new_thread = &new->thread; 486 new_thread = &new->thread;
487 old_thread = &current->thread; 487 old_thread = &current->thread;
488 488
489 #if defined(CONFIG_PPC_BOOK3E_64) 489 #if defined(CONFIG_PPC_BOOK3E_64)
490 /* XXX Current Book3E code doesn't deal with kernel side DBCR0, 490 /* XXX Current Book3E code doesn't deal with kernel side DBCR0,
491 * we always hold the user values, so we set it now. 491 * we always hold the user values, so we set it now.
492 * 492 *
493 * However, we ensure the kernel MSR:DE is appropriately cleared too 493 * However, we ensure the kernel MSR:DE is appropriately cleared too
494 * to avoid spurrious single step exceptions in the kernel. 494 * to avoid spurrious single step exceptions in the kernel.
495 * 495 *
496 * This will have to change to merge with the ppc32 code at some point, 496 * This will have to change to merge with the ppc32 code at some point,
497 * but I don't like much what ppc32 is doing today so there's some 497 * but I don't like much what ppc32 is doing today so there's some
498 * thinking needed there 498 * thinking needed there
499 */ 499 */
500 if ((new_thread->dbcr0 | old_thread->dbcr0) & DBCR0_IDM) { 500 if ((new_thread->dbcr0 | old_thread->dbcr0) & DBCR0_IDM) {
501 u32 dbcr0; 501 u32 dbcr0;
502 502
503 mtmsr(mfmsr() & ~MSR_DE); 503 mtmsr(mfmsr() & ~MSR_DE);
504 isync(); 504 isync();
505 dbcr0 = mfspr(SPRN_DBCR0); 505 dbcr0 = mfspr(SPRN_DBCR0);
506 dbcr0 = (dbcr0 & DBCR0_EDM) | new_thread->dbcr0; 506 dbcr0 = (dbcr0 & DBCR0_EDM) | new_thread->dbcr0;
507 mtspr(SPRN_DBCR0, dbcr0); 507 mtspr(SPRN_DBCR0, dbcr0);
508 } 508 }
509 #endif /* CONFIG_PPC64_BOOK3E */ 509 #endif /* CONFIG_PPC64_BOOK3E */
510 510
511 #ifdef CONFIG_PPC64 511 #ifdef CONFIG_PPC64
512 /* 512 /*
513 * Collect processor utilization data per process 513 * Collect processor utilization data per process
514 */ 514 */
515 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 515 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
516 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); 516 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
517 long unsigned start_tb, current_tb; 517 long unsigned start_tb, current_tb;
518 start_tb = old_thread->start_tb; 518 start_tb = old_thread->start_tb;
519 cu->current_tb = current_tb = mfspr(SPRN_PURR); 519 cu->current_tb = current_tb = mfspr(SPRN_PURR);
520 old_thread->accum_tb += (current_tb - start_tb); 520 old_thread->accum_tb += (current_tb - start_tb);
521 new_thread->start_tb = current_tb; 521 new_thread->start_tb = current_tb;
522 } 522 }
523 #endif /* CONFIG_PPC64 */ 523 #endif /* CONFIG_PPC64 */
524 524
525 #ifdef CONFIG_PPC_BOOK3S_64 525 #ifdef CONFIG_PPC_BOOK3S_64
526 batch = &__get_cpu_var(ppc64_tlb_batch); 526 batch = &__get_cpu_var(ppc64_tlb_batch);
527 if (batch->active) { 527 if (batch->active) {
528 current_thread_info()->local_flags |= _TLF_LAZY_MMU; 528 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
529 if (batch->index) 529 if (batch->index)
530 __flush_tlb_pending(batch); 530 __flush_tlb_pending(batch);
531 batch->active = 0; 531 batch->active = 0;
532 } 532 }
533 #endif /* CONFIG_PPC_BOOK3S_64 */ 533 #endif /* CONFIG_PPC_BOOK3S_64 */
534 534
535 local_irq_save(flags); 535 local_irq_save(flags);
536 536
537 account_system_vtime(current); 537 account_system_vtime(current);
538 account_process_vtime(current); 538 account_process_vtime(current);
539 539
540 /* 540 /*
541 * We can't take a PMU exception inside _switch() since there is a 541 * We can't take a PMU exception inside _switch() since there is a
542 * window where the kernel stack SLB and the kernel stack are out 542 * window where the kernel stack SLB and the kernel stack are out
543 * of sync. Hard disable here. 543 * of sync. Hard disable here.
544 */ 544 */
545 hard_irq_disable(); 545 hard_irq_disable();
546 last = _switch(old_thread, new_thread); 546 last = _switch(old_thread, new_thread);
547 547
548 #ifdef CONFIG_PPC_BOOK3S_64 548 #ifdef CONFIG_PPC_BOOK3S_64
549 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) { 549 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
550 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU; 550 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
551 batch = &__get_cpu_var(ppc64_tlb_batch); 551 batch = &__get_cpu_var(ppc64_tlb_batch);
552 batch->active = 1; 552 batch->active = 1;
553 } 553 }
554 #endif /* CONFIG_PPC_BOOK3S_64 */ 554 #endif /* CONFIG_PPC_BOOK3S_64 */
555 555
556 local_irq_restore(flags); 556 local_irq_restore(flags);
557 557
558 return last; 558 return last;
559 } 559 }
560 560
561 static int instructions_to_print = 16; 561 static int instructions_to_print = 16;
562 562
563 static void show_instructions(struct pt_regs *regs) 563 static void show_instructions(struct pt_regs *regs)
564 { 564 {
565 int i; 565 int i;
566 unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 * 566 unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
567 sizeof(int)); 567 sizeof(int));
568 568
569 printk("Instruction dump:"); 569 printk("Instruction dump:");
570 570
571 for (i = 0; i < instructions_to_print; i++) { 571 for (i = 0; i < instructions_to_print; i++) {
572 int instr; 572 int instr;
573 573
574 if (!(i % 8)) 574 if (!(i % 8))
575 printk("\n"); 575 printk("\n");
576 576
577 #if !defined(CONFIG_BOOKE) 577 #if !defined(CONFIG_BOOKE)
578 /* If executing with the IMMU off, adjust pc rather 578 /* If executing with the IMMU off, adjust pc rather
579 * than print XXXXXXXX. 579 * than print XXXXXXXX.
580 */ 580 */
581 if (!(regs->msr & MSR_IR)) 581 if (!(regs->msr & MSR_IR))
582 pc = (unsigned long)phys_to_virt(pc); 582 pc = (unsigned long)phys_to_virt(pc);
583 #endif 583 #endif
584 584
585 /* We use __get_user here *only* to avoid an OOPS on a 585 /* We use __get_user here *only* to avoid an OOPS on a
586 * bad address because the pc *should* only be a 586 * bad address because the pc *should* only be a
587 * kernel address. 587 * kernel address.
588 */ 588 */
589 if (!__kernel_text_address(pc) || 589 if (!__kernel_text_address(pc) ||
590 __get_user(instr, (unsigned int __user *)pc)) { 590 __get_user(instr, (unsigned int __user *)pc)) {
591 printk("XXXXXXXX "); 591 printk("XXXXXXXX ");
592 } else { 592 } else {
593 if (regs->nip == pc) 593 if (regs->nip == pc)
594 printk("<%08x> ", instr); 594 printk("<%08x> ", instr);
595 else 595 else
596 printk("%08x ", instr); 596 printk("%08x ", instr);
597 } 597 }
598 598
599 pc += sizeof(int); 599 pc += sizeof(int);
600 } 600 }
601 601
602 printk("\n"); 602 printk("\n");
603 } 603 }
604 604
605 static struct regbit { 605 static struct regbit {
606 unsigned long bit; 606 unsigned long bit;
607 const char *name; 607 const char *name;
608 } msr_bits[] = { 608 } msr_bits[] = {
609 {MSR_EE, "EE"}, 609 {MSR_EE, "EE"},
610 {MSR_PR, "PR"}, 610 {MSR_PR, "PR"},
611 {MSR_FP, "FP"}, 611 {MSR_FP, "FP"},
612 {MSR_VEC, "VEC"}, 612 {MSR_VEC, "VEC"},
613 {MSR_VSX, "VSX"}, 613 {MSR_VSX, "VSX"},
614 {MSR_ME, "ME"}, 614 {MSR_ME, "ME"},
615 {MSR_CE, "CE"}, 615 {MSR_CE, "CE"},
616 {MSR_DE, "DE"}, 616 {MSR_DE, "DE"},
617 {MSR_IR, "IR"}, 617 {MSR_IR, "IR"},
618 {MSR_DR, "DR"}, 618 {MSR_DR, "DR"},
619 {0, NULL} 619 {0, NULL}
620 }; 620 };
621 621
622 static void printbits(unsigned long val, struct regbit *bits) 622 static void printbits(unsigned long val, struct regbit *bits)
623 { 623 {
624 const char *sep = ""; 624 const char *sep = "";
625 625
626 printk("<"); 626 printk("<");
627 for (; bits->bit; ++bits) 627 for (; bits->bit; ++bits)
628 if (val & bits->bit) { 628 if (val & bits->bit) {
629 printk("%s%s", sep, bits->name); 629 printk("%s%s", sep, bits->name);
630 sep = ","; 630 sep = ",";
631 } 631 }
632 printk(">"); 632 printk(">");
633 } 633 }
634 634
635 #ifdef CONFIG_PPC64 635 #ifdef CONFIG_PPC64
636 #define REG "%016lx" 636 #define REG "%016lx"
637 #define REGS_PER_LINE 4 637 #define REGS_PER_LINE 4
638 #define LAST_VOLATILE 13 638 #define LAST_VOLATILE 13
639 #else 639 #else
640 #define REG "%08lx" 640 #define REG "%08lx"
641 #define REGS_PER_LINE 8 641 #define REGS_PER_LINE 8
642 #define LAST_VOLATILE 12 642 #define LAST_VOLATILE 12
643 #endif 643 #endif
644 644
645 void show_regs(struct pt_regs * regs) 645 void show_regs(struct pt_regs * regs)
646 { 646 {
647 int i, trap; 647 int i, trap;
648 648
649 printk("NIP: "REG" LR: "REG" CTR: "REG"\n", 649 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
650 regs->nip, regs->link, regs->ctr); 650 regs->nip, regs->link, regs->ctr);
651 printk("REGS: %p TRAP: %04lx %s (%s)\n", 651 printk("REGS: %p TRAP: %04lx %s (%s)\n",
652 regs, regs->trap, print_tainted(), init_utsname()->release); 652 regs, regs->trap, print_tainted(), init_utsname()->release);
653 printk("MSR: "REG" ", regs->msr); 653 printk("MSR: "REG" ", regs->msr);
654 printbits(regs->msr, msr_bits); 654 printbits(regs->msr, msr_bits);
655 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); 655 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
656 trap = TRAP(regs); 656 trap = TRAP(regs);
657 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) 657 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
658 printk("CFAR: "REG"\n", regs->orig_gpr3); 658 printk("CFAR: "REG"\n", regs->orig_gpr3);
659 if (trap == 0x300 || trap == 0x600) 659 if (trap == 0x300 || trap == 0x600)
660 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 660 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
661 printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr); 661 printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
662 #else 662 #else
663 printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr); 663 printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
664 #endif 664 #endif
665 printk("TASK = %p[%d] '%s' THREAD: %p", 665 printk("TASK = %p[%d] '%s' THREAD: %p",
666 current, task_pid_nr(current), current->comm, task_thread_info(current)); 666 current, task_pid_nr(current), current->comm, task_thread_info(current));
667 667
668 #ifdef CONFIG_SMP 668 #ifdef CONFIG_SMP
669 printk(" CPU: %d", raw_smp_processor_id()); 669 printk(" CPU: %d", raw_smp_processor_id());
670 #endif /* CONFIG_SMP */ 670 #endif /* CONFIG_SMP */
671 671
672 for (i = 0; i < 32; i++) { 672 for (i = 0; i < 32; i++) {
673 if ((i % REGS_PER_LINE) == 0) 673 if ((i % REGS_PER_LINE) == 0)
674 printk("\nGPR%02d: ", i); 674 printk("\nGPR%02d: ", i);
675 printk(REG " ", regs->gpr[i]); 675 printk(REG " ", regs->gpr[i]);
676 if (i == LAST_VOLATILE && !FULL_REGS(regs)) 676 if (i == LAST_VOLATILE && !FULL_REGS(regs))
677 break; 677 break;
678 } 678 }
679 printk("\n"); 679 printk("\n");
680 #ifdef CONFIG_KALLSYMS 680 #ifdef CONFIG_KALLSYMS
681 /* 681 /*
682 * Lookup NIP late so we have the best change of getting the 682 * Lookup NIP late so we have the best change of getting the
683 * above info out without failing 683 * above info out without failing
684 */ 684 */
685 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip); 685 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
686 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link); 686 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
687 #endif 687 #endif
688 show_stack(current, (unsigned long *) regs->gpr[1]); 688 show_stack(current, (unsigned long *) regs->gpr[1]);
689 if (!user_mode(regs)) 689 if (!user_mode(regs))
690 show_instructions(regs); 690 show_instructions(regs);
691 } 691 }
692 692
693 void exit_thread(void) 693 void exit_thread(void)
694 { 694 {
695 discard_lazy_cpu_state(); 695 discard_lazy_cpu_state();
696 } 696 }
697 697
698 void flush_thread(void) 698 void flush_thread(void)
699 { 699 {
700 discard_lazy_cpu_state(); 700 discard_lazy_cpu_state();
701 701
702 #ifdef CONFIG_HAVE_HW_BREAKPOINT 702 #ifdef CONFIG_HAVE_HW_BREAKPOINT
703 flush_ptrace_hw_breakpoint(current); 703 flush_ptrace_hw_breakpoint(current);
704 #else /* CONFIG_HAVE_HW_BREAKPOINT */ 704 #else /* CONFIG_HAVE_HW_BREAKPOINT */
705 set_debug_reg_defaults(&current->thread); 705 set_debug_reg_defaults(&current->thread);
706 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 706 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
707 } 707 }
708 708
709 void 709 void
710 release_thread(struct task_struct *t) 710 release_thread(struct task_struct *t)
711 { 711 {
712 } 712 }
713 713
714 /* 714 /*
715 * This gets called before we allocate a new thread and copy 715 * This gets called before we allocate a new thread and copy
716 * the current task into it. 716 * the current task into it.
717 */ 717 */
718 void prepare_to_copy(struct task_struct *tsk) 718 void prepare_to_copy(struct task_struct *tsk)
719 { 719 {
720 flush_fp_to_thread(current); 720 flush_fp_to_thread(current);
721 flush_altivec_to_thread(current); 721 flush_altivec_to_thread(current);
722 flush_vsx_to_thread(current); 722 flush_vsx_to_thread(current);
723 flush_spe_to_thread(current); 723 flush_spe_to_thread(current);
724 #ifdef CONFIG_HAVE_HW_BREAKPOINT 724 #ifdef CONFIG_HAVE_HW_BREAKPOINT
725 flush_ptrace_hw_breakpoint(tsk); 725 flush_ptrace_hw_breakpoint(tsk);
726 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 726 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
727 } 727 }
728 728
729 /* 729 /*
730 * Copy a thread.. 730 * Copy a thread..
731 */ 731 */
732 extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */ 732 extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */
733 733
734 int copy_thread(unsigned long clone_flags, unsigned long usp, 734 int copy_thread(unsigned long clone_flags, unsigned long usp,
735 unsigned long unused, struct task_struct *p, 735 unsigned long unused, struct task_struct *p,
736 struct pt_regs *regs) 736 struct pt_regs *regs)
737 { 737 {
738 struct pt_regs *childregs, *kregs; 738 struct pt_regs *childregs, *kregs;
739 extern void ret_from_fork(void); 739 extern void ret_from_fork(void);
740 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE; 740 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
741 741
742 CHECK_FULL_REGS(regs); 742 CHECK_FULL_REGS(regs);
743 /* Copy registers */ 743 /* Copy registers */
744 sp -= sizeof(struct pt_regs); 744 sp -= sizeof(struct pt_regs);
745 childregs = (struct pt_regs *) sp; 745 childregs = (struct pt_regs *) sp;
746 *childregs = *regs; 746 *childregs = *regs;
747 if ((childregs->msr & MSR_PR) == 0) { 747 if ((childregs->msr & MSR_PR) == 0) {
748 /* for kernel thread, set `current' and stackptr in new task */ 748 /* for kernel thread, set `current' and stackptr in new task */
749 childregs->gpr[1] = sp + sizeof(struct pt_regs); 749 childregs->gpr[1] = sp + sizeof(struct pt_regs);
750 #ifdef CONFIG_PPC32 750 #ifdef CONFIG_PPC32
751 childregs->gpr[2] = (unsigned long) p; 751 childregs->gpr[2] = (unsigned long) p;
752 #else 752 #else
753 clear_tsk_thread_flag(p, TIF_32BIT); 753 clear_tsk_thread_flag(p, TIF_32BIT);
754 #endif 754 #endif
755 p->thread.regs = NULL; /* no user register state */ 755 p->thread.regs = NULL; /* no user register state */
756 } else { 756 } else {
757 childregs->gpr[1] = usp; 757 childregs->gpr[1] = usp;
758 p->thread.regs = childregs; 758 p->thread.regs = childregs;
759 if (clone_flags & CLONE_SETTLS) { 759 if (clone_flags & CLONE_SETTLS) {
760 #ifdef CONFIG_PPC64 760 #ifdef CONFIG_PPC64
761 if (!is_32bit_task()) 761 if (!is_32bit_task())
762 childregs->gpr[13] = childregs->gpr[6]; 762 childregs->gpr[13] = childregs->gpr[6];
763 else 763 else
764 #endif 764 #endif
765 childregs->gpr[2] = childregs->gpr[6]; 765 childregs->gpr[2] = childregs->gpr[6];
766 } 766 }
767 } 767 }
768 childregs->gpr[3] = 0; /* Result from fork() */ 768 childregs->gpr[3] = 0; /* Result from fork() */
769 sp -= STACK_FRAME_OVERHEAD; 769 sp -= STACK_FRAME_OVERHEAD;
770 770
771 /* 771 /*
772 * The way this works is that at some point in the future 772 * The way this works is that at some point in the future
773 * some task will call _switch to switch to the new task. 773 * some task will call _switch to switch to the new task.
774 * That will pop off the stack frame created below and start 774 * That will pop off the stack frame created below and start
775 * the new task running at ret_from_fork. The new task will 775 * the new task running at ret_from_fork. The new task will
776 * do some house keeping and then return from the fork or clone 776 * do some house keeping and then return from the fork or clone
777 * system call, using the stack frame created above. 777 * system call, using the stack frame created above.
778 */ 778 */
779 sp -= sizeof(struct pt_regs); 779 sp -= sizeof(struct pt_regs);
780 kregs = (struct pt_regs *) sp; 780 kregs = (struct pt_regs *) sp;
781 sp -= STACK_FRAME_OVERHEAD; 781 sp -= STACK_FRAME_OVERHEAD;
782 p->thread.ksp = sp; 782 p->thread.ksp = sp;
783 p->thread.ksp_limit = (unsigned long)task_stack_page(p) + 783 p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
784 _ALIGN_UP(sizeof(struct thread_info), 16); 784 _ALIGN_UP(sizeof(struct thread_info), 16);
785 785
786 #ifdef CONFIG_PPC_STD_MMU_64 786 #ifdef CONFIG_PPC_STD_MMU_64
787 if (mmu_has_feature(MMU_FTR_SLB)) { 787 if (mmu_has_feature(MMU_FTR_SLB)) {
788 unsigned long sp_vsid; 788 unsigned long sp_vsid;
789 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; 789 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
790 790
791 if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) 791 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
792 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T) 792 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
793 << SLB_VSID_SHIFT_1T; 793 << SLB_VSID_SHIFT_1T;
794 else 794 else
795 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M) 795 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
796 << SLB_VSID_SHIFT; 796 << SLB_VSID_SHIFT;
797 sp_vsid |= SLB_VSID_KERNEL | llp; 797 sp_vsid |= SLB_VSID_KERNEL | llp;
798 p->thread.ksp_vsid = sp_vsid; 798 p->thread.ksp_vsid = sp_vsid;
799 } 799 }
800 #endif /* CONFIG_PPC_STD_MMU_64 */ 800 #endif /* CONFIG_PPC_STD_MMU_64 */
801 #ifdef CONFIG_PPC64 801 #ifdef CONFIG_PPC64
802 if (cpu_has_feature(CPU_FTR_DSCR)) { 802 if (cpu_has_feature(CPU_FTR_DSCR)) {
803 if (current->thread.dscr_inherit) { 803 if (current->thread.dscr_inherit) {
804 p->thread.dscr_inherit = 1; 804 p->thread.dscr_inherit = 1;
805 p->thread.dscr = current->thread.dscr; 805 p->thread.dscr = current->thread.dscr;
806 } else if (0 != dscr_default) { 806 } else if (0 != dscr_default) {
807 p->thread.dscr_inherit = 1; 807 p->thread.dscr_inherit = 1;
808 p->thread.dscr = dscr_default; 808 p->thread.dscr = dscr_default;
809 } else { 809 } else {
810 p->thread.dscr_inherit = 0; 810 p->thread.dscr_inherit = 0;
811 p->thread.dscr = 0; 811 p->thread.dscr = 0;
812 } 812 }
813 } 813 }
814 #endif 814 #endif
815 815
816 /* 816 /*
817 * The PPC64 ABI makes use of a TOC to contain function 817 * The PPC64 ABI makes use of a TOC to contain function
818 * pointers. The function (ret_from_except) is actually a pointer 818 * pointers. The function (ret_from_except) is actually a pointer
819 * to the TOC entry. The first entry is a pointer to the actual 819 * to the TOC entry. The first entry is a pointer to the actual
820 * function. 820 * function.
821 */ 821 */
822 #ifdef CONFIG_PPC64 822 #ifdef CONFIG_PPC64
823 kregs->nip = *((unsigned long *)ret_from_fork); 823 kregs->nip = *((unsigned long *)ret_from_fork);
824 #else 824 #else
825 kregs->nip = (unsigned long)ret_from_fork; 825 kregs->nip = (unsigned long)ret_from_fork;
826 #endif 826 #endif
827 827
828 return 0; 828 return 0;
829 } 829 }
830 830
831 /* 831 /*
832 * Set up a thread for executing a new program 832 * Set up a thread for executing a new program
833 */ 833 */
834 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) 834 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
835 { 835 {
836 #ifdef CONFIG_PPC64 836 #ifdef CONFIG_PPC64
837 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */ 837 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
838 #endif 838 #endif
839 839
840 /* 840 /*
841 * If we exec out of a kernel thread then thread.regs will not be 841 * If we exec out of a kernel thread then thread.regs will not be
842 * set. Do it now. 842 * set. Do it now.
843 */ 843 */
844 if (!current->thread.regs) { 844 if (!current->thread.regs) {
845 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE; 845 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
846 current->thread.regs = regs - 1; 846 current->thread.regs = regs - 1;
847 } 847 }
848 848
849 memset(regs->gpr, 0, sizeof(regs->gpr)); 849 memset(regs->gpr, 0, sizeof(regs->gpr));
850 regs->ctr = 0; 850 regs->ctr = 0;
851 regs->link = 0; 851 regs->link = 0;
852 regs->xer = 0; 852 regs->xer = 0;
853 regs->ccr = 0; 853 regs->ccr = 0;
854 regs->gpr[1] = sp; 854 regs->gpr[1] = sp;
855 855
856 /* 856 /*
857 * We have just cleared all the nonvolatile GPRs, so make 857 * We have just cleared all the nonvolatile GPRs, so make
858 * FULL_REGS(regs) return true. This is necessary to allow 858 * FULL_REGS(regs) return true. This is necessary to allow
859 * ptrace to examine the thread immediately after exec. 859 * ptrace to examine the thread immediately after exec.
860 */ 860 */
861 regs->trap &= ~1UL; 861 regs->trap &= ~1UL;
862 862
863 #ifdef CONFIG_PPC32 863 #ifdef CONFIG_PPC32
864 regs->mq = 0; 864 regs->mq = 0;
865 regs->nip = start; 865 regs->nip = start;
866 regs->msr = MSR_USER; 866 regs->msr = MSR_USER;
867 #else 867 #else
868 if (!is_32bit_task()) { 868 if (!is_32bit_task()) {
869 unsigned long entry, toc; 869 unsigned long entry, toc;
870 870
871 /* start is a relocated pointer to the function descriptor for 871 /* start is a relocated pointer to the function descriptor for
872 * the elf _start routine. The first entry in the function 872 * the elf _start routine. The first entry in the function
873 * descriptor is the entry address of _start and the second 873 * descriptor is the entry address of _start and the second
874 * entry is the TOC value we need to use. 874 * entry is the TOC value we need to use.
875 */ 875 */
876 __get_user(entry, (unsigned long __user *)start); 876 __get_user(entry, (unsigned long __user *)start);
877 __get_user(toc, (unsigned long __user *)start+1); 877 __get_user(toc, (unsigned long __user *)start+1);
878 878
879 /* Check whether the e_entry function descriptor entries 879 /* Check whether the e_entry function descriptor entries
880 * need to be relocated before we can use them. 880 * need to be relocated before we can use them.
881 */ 881 */
882 if (load_addr != 0) { 882 if (load_addr != 0) {
883 entry += load_addr; 883 entry += load_addr;
884 toc += load_addr; 884 toc += load_addr;
885 } 885 }
886 regs->nip = entry; 886 regs->nip = entry;
887 regs->gpr[2] = toc; 887 regs->gpr[2] = toc;
888 regs->msr = MSR_USER64; 888 regs->msr = MSR_USER64;
889 } else { 889 } else {
890 regs->nip = start; 890 regs->nip = start;
891 regs->gpr[2] = 0; 891 regs->gpr[2] = 0;
892 regs->msr = MSR_USER32; 892 regs->msr = MSR_USER32;
893 } 893 }
894 #endif 894 #endif
895 895
896 discard_lazy_cpu_state(); 896 discard_lazy_cpu_state();
897 #ifdef CONFIG_VSX 897 #ifdef CONFIG_VSX
898 current->thread.used_vsr = 0; 898 current->thread.used_vsr = 0;
899 #endif 899 #endif
900 memset(current->thread.fpr, 0, sizeof(current->thread.fpr)); 900 memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
901 current->thread.fpscr.val = 0; 901 current->thread.fpscr.val = 0;
902 #ifdef CONFIG_ALTIVEC 902 #ifdef CONFIG_ALTIVEC
903 memset(current->thread.vr, 0, sizeof(current->thread.vr)); 903 memset(current->thread.vr, 0, sizeof(current->thread.vr));
904 memset(&current->thread.vscr, 0, sizeof(current->thread.vscr)); 904 memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
905 current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */ 905 current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
906 current->thread.vrsave = 0; 906 current->thread.vrsave = 0;
907 current->thread.used_vr = 0; 907 current->thread.used_vr = 0;
908 #endif /* CONFIG_ALTIVEC */ 908 #endif /* CONFIG_ALTIVEC */
909 #ifdef CONFIG_SPE 909 #ifdef CONFIG_SPE
910 memset(current->thread.evr, 0, sizeof(current->thread.evr)); 910 memset(current->thread.evr, 0, sizeof(current->thread.evr));
911 current->thread.acc = 0; 911 current->thread.acc = 0;
912 current->thread.spefscr = 0; 912 current->thread.spefscr = 0;
913 current->thread.used_spe = 0; 913 current->thread.used_spe = 0;
914 #endif /* CONFIG_SPE */ 914 #endif /* CONFIG_SPE */
915 } 915 }
916 916
917 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \ 917 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
918 | PR_FP_EXC_RES | PR_FP_EXC_INV) 918 | PR_FP_EXC_RES | PR_FP_EXC_INV)
919 919
920 int set_fpexc_mode(struct task_struct *tsk, unsigned int val) 920 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
921 { 921 {
922 struct pt_regs *regs = tsk->thread.regs; 922 struct pt_regs *regs = tsk->thread.regs;
923 923
924 /* This is a bit hairy. If we are an SPE enabled processor 924 /* This is a bit hairy. If we are an SPE enabled processor
925 * (have embedded fp) we store the IEEE exception enable flags in 925 * (have embedded fp) we store the IEEE exception enable flags in
926 * fpexc_mode. fpexc_mode is also used for setting FP exception 926 * fpexc_mode. fpexc_mode is also used for setting FP exception
927 * mode (asyn, precise, disabled) for 'Classic' FP. */ 927 * mode (asyn, precise, disabled) for 'Classic' FP. */
928 if (val & PR_FP_EXC_SW_ENABLE) { 928 if (val & PR_FP_EXC_SW_ENABLE) {
929 #ifdef CONFIG_SPE 929 #ifdef CONFIG_SPE
930 if (cpu_has_feature(CPU_FTR_SPE)) { 930 if (cpu_has_feature(CPU_FTR_SPE)) {
931 tsk->thread.fpexc_mode = val & 931 tsk->thread.fpexc_mode = val &
932 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); 932 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
933 return 0; 933 return 0;
934 } else { 934 } else {
935 return -EINVAL; 935 return -EINVAL;
936 } 936 }
937 #else 937 #else
938 return -EINVAL; 938 return -EINVAL;
939 #endif 939 #endif
940 } 940 }
941 941
942 /* on a CONFIG_SPE this does not hurt us. The bits that 942 /* on a CONFIG_SPE this does not hurt us. The bits that
943 * __pack_fe01 use do not overlap with bits used for 943 * __pack_fe01 use do not overlap with bits used for
944 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits 944 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
945 * on CONFIG_SPE implementations are reserved so writing to 945 * on CONFIG_SPE implementations are reserved so writing to
946 * them does not change anything */ 946 * them does not change anything */
947 if (val > PR_FP_EXC_PRECISE) 947 if (val > PR_FP_EXC_PRECISE)
948 return -EINVAL; 948 return -EINVAL;
949 tsk->thread.fpexc_mode = __pack_fe01(val); 949 tsk->thread.fpexc_mode = __pack_fe01(val);
950 if (regs != NULL && (regs->msr & MSR_FP) != 0) 950 if (regs != NULL && (regs->msr & MSR_FP) != 0)
951 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1)) 951 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
952 | tsk->thread.fpexc_mode; 952 | tsk->thread.fpexc_mode;
953 return 0; 953 return 0;
954 } 954 }
955 955
956 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr) 956 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
957 { 957 {
958 unsigned int val; 958 unsigned int val;
959 959
960 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) 960 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
961 #ifdef CONFIG_SPE 961 #ifdef CONFIG_SPE
962 if (cpu_has_feature(CPU_FTR_SPE)) 962 if (cpu_has_feature(CPU_FTR_SPE))
963 val = tsk->thread.fpexc_mode; 963 val = tsk->thread.fpexc_mode;
964 else 964 else
965 return -EINVAL; 965 return -EINVAL;
966 #else 966 #else
967 return -EINVAL; 967 return -EINVAL;
968 #endif 968 #endif
969 else 969 else
970 val = __unpack_fe01(tsk->thread.fpexc_mode); 970 val = __unpack_fe01(tsk->thread.fpexc_mode);
971 return put_user(val, (unsigned int __user *) adr); 971 return put_user(val, (unsigned int __user *) adr);
972 } 972 }
973 973
974 int set_endian(struct task_struct *tsk, unsigned int val) 974 int set_endian(struct task_struct *tsk, unsigned int val)
975 { 975 {
976 struct pt_regs *regs = tsk->thread.regs; 976 struct pt_regs *regs = tsk->thread.regs;
977 977
978 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) || 978 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
979 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE))) 979 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
980 return -EINVAL; 980 return -EINVAL;
981 981
982 if (regs == NULL) 982 if (regs == NULL)
983 return -EINVAL; 983 return -EINVAL;
984 984
985 if (val == PR_ENDIAN_BIG) 985 if (val == PR_ENDIAN_BIG)
986 regs->msr &= ~MSR_LE; 986 regs->msr &= ~MSR_LE;
987 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE) 987 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
988 regs->msr |= MSR_LE; 988 regs->msr |= MSR_LE;
989 else 989 else
990 return -EINVAL; 990 return -EINVAL;
991 991
992 return 0; 992 return 0;
993 } 993 }
994 994
995 int get_endian(struct task_struct *tsk, unsigned long adr) 995 int get_endian(struct task_struct *tsk, unsigned long adr)
996 { 996 {
997 struct pt_regs *regs = tsk->thread.regs; 997 struct pt_regs *regs = tsk->thread.regs;
998 unsigned int val; 998 unsigned int val;
999 999
1000 if (!cpu_has_feature(CPU_FTR_PPC_LE) && 1000 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
1001 !cpu_has_feature(CPU_FTR_REAL_LE)) 1001 !cpu_has_feature(CPU_FTR_REAL_LE))
1002 return -EINVAL; 1002 return -EINVAL;
1003 1003
1004 if (regs == NULL) 1004 if (regs == NULL)
1005 return -EINVAL; 1005 return -EINVAL;
1006 1006
1007 if (regs->msr & MSR_LE) { 1007 if (regs->msr & MSR_LE) {
1008 if (cpu_has_feature(CPU_FTR_REAL_LE)) 1008 if (cpu_has_feature(CPU_FTR_REAL_LE))
1009 val = PR_ENDIAN_LITTLE; 1009 val = PR_ENDIAN_LITTLE;
1010 else 1010 else
1011 val = PR_ENDIAN_PPC_LITTLE; 1011 val = PR_ENDIAN_PPC_LITTLE;
1012 } else 1012 } else
1013 val = PR_ENDIAN_BIG; 1013 val = PR_ENDIAN_BIG;
1014 1014
1015 return put_user(val, (unsigned int __user *)adr); 1015 return put_user(val, (unsigned int __user *)adr);
1016 } 1016 }
1017 1017
1018 int set_unalign_ctl(struct task_struct *tsk, unsigned int val) 1018 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1019 { 1019 {
1020 tsk->thread.align_ctl = val; 1020 tsk->thread.align_ctl = val;
1021 return 0; 1021 return 0;
1022 } 1022 }
1023 1023
1024 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr) 1024 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1025 { 1025 {
1026 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr); 1026 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1027 } 1027 }
1028 1028
1029 #define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff)) 1029 #define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
1030 1030
1031 int sys_clone(unsigned long clone_flags, unsigned long usp, 1031 int sys_clone(unsigned long clone_flags, unsigned long usp,
1032 int __user *parent_tidp, void __user *child_threadptr, 1032 int __user *parent_tidp, void __user *child_threadptr,
1033 int __user *child_tidp, int p6, 1033 int __user *child_tidp, int p6,
1034 struct pt_regs *regs) 1034 struct pt_regs *regs)
1035 { 1035 {
1036 CHECK_FULL_REGS(regs); 1036 CHECK_FULL_REGS(regs);
1037 if (usp == 0) 1037 if (usp == 0)
1038 usp = regs->gpr[1]; /* stack pointer for child */ 1038 usp = regs->gpr[1]; /* stack pointer for child */
1039 #ifdef CONFIG_PPC64 1039 #ifdef CONFIG_PPC64
1040 if (is_32bit_task()) { 1040 if (is_32bit_task()) {
1041 parent_tidp = TRUNC_PTR(parent_tidp); 1041 parent_tidp = TRUNC_PTR(parent_tidp);
1042 child_tidp = TRUNC_PTR(child_tidp); 1042 child_tidp = TRUNC_PTR(child_tidp);
1043 } 1043 }
1044 #endif 1044 #endif
1045 return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp); 1045 return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
1046 } 1046 }
1047 1047
1048 int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3, 1048 int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
1049 unsigned long p4, unsigned long p5, unsigned long p6, 1049 unsigned long p4, unsigned long p5, unsigned long p6,
1050 struct pt_regs *regs) 1050 struct pt_regs *regs)
1051 { 1051 {
1052 CHECK_FULL_REGS(regs); 1052 CHECK_FULL_REGS(regs);
1053 return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL); 1053 return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
1054 } 1054 }
1055 1055
1056 int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3, 1056 int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
1057 unsigned long p4, unsigned long p5, unsigned long p6, 1057 unsigned long p4, unsigned long p5, unsigned long p6,
1058 struct pt_regs *regs) 1058 struct pt_regs *regs)
1059 { 1059 {
1060 CHECK_FULL_REGS(regs); 1060 CHECK_FULL_REGS(regs);
1061 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1], 1061 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1],
1062 regs, 0, NULL, NULL); 1062 regs, 0, NULL, NULL);
1063 } 1063 }
1064 1064
1065 int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2, 1065 int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
1066 unsigned long a3, unsigned long a4, unsigned long a5, 1066 unsigned long a3, unsigned long a4, unsigned long a5,
1067 struct pt_regs *regs) 1067 struct pt_regs *regs)
1068 { 1068 {
1069 int error; 1069 int error;
1070 char *filename; 1070 char *filename;
1071 1071
1072 filename = getname((const char __user *) a0); 1072 filename = getname((const char __user *) a0);
1073 error = PTR_ERR(filename); 1073 error = PTR_ERR(filename);
1074 if (IS_ERR(filename)) 1074 if (IS_ERR(filename))
1075 goto out; 1075 goto out;
1076 flush_fp_to_thread(current); 1076 flush_fp_to_thread(current);
1077 flush_altivec_to_thread(current); 1077 flush_altivec_to_thread(current);
1078 flush_spe_to_thread(current); 1078 flush_spe_to_thread(current);
1079 error = do_execve(filename, 1079 error = do_execve(filename,
1080 (const char __user *const __user *) a1, 1080 (const char __user *const __user *) a1,
1081 (const char __user *const __user *) a2, regs); 1081 (const char __user *const __user *) a2, regs);
1082 putname(filename); 1082 putname(filename);
1083 out: 1083 out:
1084 return error; 1084 return error;
1085 } 1085 }
1086 1086
1087 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p, 1087 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1088 unsigned long nbytes) 1088 unsigned long nbytes)
1089 { 1089 {
1090 unsigned long stack_page; 1090 unsigned long stack_page;
1091 unsigned long cpu = task_cpu(p); 1091 unsigned long cpu = task_cpu(p);
1092 1092
1093 /* 1093 /*
1094 * Avoid crashing if the stack has overflowed and corrupted 1094 * Avoid crashing if the stack has overflowed and corrupted
1095 * task_cpu(p), which is in the thread_info struct. 1095 * task_cpu(p), which is in the thread_info struct.
1096 */ 1096 */
1097 if (cpu < NR_CPUS && cpu_possible(cpu)) { 1097 if (cpu < NR_CPUS && cpu_possible(cpu)) {
1098 stack_page = (unsigned long) hardirq_ctx[cpu]; 1098 stack_page = (unsigned long) hardirq_ctx[cpu];
1099 if (sp >= stack_page + sizeof(struct thread_struct) 1099 if (sp >= stack_page + sizeof(struct thread_struct)
1100 && sp <= stack_page + THREAD_SIZE - nbytes) 1100 && sp <= stack_page + THREAD_SIZE - nbytes)
1101 return 1; 1101 return 1;
1102 1102
1103 stack_page = (unsigned long) softirq_ctx[cpu]; 1103 stack_page = (unsigned long) softirq_ctx[cpu];
1104 if (sp >= stack_page + sizeof(struct thread_struct) 1104 if (sp >= stack_page + sizeof(struct thread_struct)
1105 && sp <= stack_page + THREAD_SIZE - nbytes) 1105 && sp <= stack_page + THREAD_SIZE - nbytes)
1106 return 1; 1106 return 1;
1107 } 1107 }
1108 return 0; 1108 return 0;
1109 } 1109 }
1110 1110
1111 int validate_sp(unsigned long sp, struct task_struct *p, 1111 int validate_sp(unsigned long sp, struct task_struct *p,
1112 unsigned long nbytes) 1112 unsigned long nbytes)
1113 { 1113 {
1114 unsigned long stack_page = (unsigned long)task_stack_page(p); 1114 unsigned long stack_page = (unsigned long)task_stack_page(p);
1115 1115
1116 if (sp >= stack_page + sizeof(struct thread_struct) 1116 if (sp >= stack_page + sizeof(struct thread_struct)
1117 && sp <= stack_page + THREAD_SIZE - nbytes) 1117 && sp <= stack_page + THREAD_SIZE - nbytes)
1118 return 1; 1118 return 1;
1119 1119
1120 return valid_irq_stack(sp, p, nbytes); 1120 return valid_irq_stack(sp, p, nbytes);
1121 } 1121 }
1122 1122
1123 EXPORT_SYMBOL(validate_sp); 1123 EXPORT_SYMBOL(validate_sp);
1124 1124
1125 unsigned long get_wchan(struct task_struct *p) 1125 unsigned long get_wchan(struct task_struct *p)
1126 { 1126 {
1127 unsigned long ip, sp; 1127 unsigned long ip, sp;
1128 int count = 0; 1128 int count = 0;
1129 1129
1130 if (!p || p == current || p->state == TASK_RUNNING) 1130 if (!p || p == current || p->state == TASK_RUNNING)
1131 return 0; 1131 return 0;
1132 1132
1133 sp = p->thread.ksp; 1133 sp = p->thread.ksp;
1134 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD)) 1134 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1135 return 0; 1135 return 0;
1136 1136
1137 do { 1137 do {
1138 sp = *(unsigned long *)sp; 1138 sp = *(unsigned long *)sp;
1139 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD)) 1139 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1140 return 0; 1140 return 0;
1141 if (count > 0) { 1141 if (count > 0) {
1142 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE]; 1142 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
1143 if (!in_sched_functions(ip)) 1143 if (!in_sched_functions(ip))
1144 return ip; 1144 return ip;
1145 } 1145 }
1146 } while (count++ < 16); 1146 } while (count++ < 16);
1147 return 0; 1147 return 0;
1148 } 1148 }
1149 1149
1150 static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH; 1150 static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
1151 1151
1152 void show_stack(struct task_struct *tsk, unsigned long *stack) 1152 void show_stack(struct task_struct *tsk, unsigned long *stack)
1153 { 1153 {
1154 unsigned long sp, ip, lr, newsp; 1154 unsigned long sp, ip, lr, newsp;
1155 int count = 0; 1155 int count = 0;
1156 int firstframe = 1; 1156 int firstframe = 1;
1157 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 1157 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1158 int curr_frame = current->curr_ret_stack; 1158 int curr_frame = current->curr_ret_stack;
1159 extern void return_to_handler(void); 1159 extern void return_to_handler(void);
1160 unsigned long rth = (unsigned long)return_to_handler; 1160 unsigned long rth = (unsigned long)return_to_handler;
1161 unsigned long mrth = -1; 1161 unsigned long mrth = -1;
1162 #ifdef CONFIG_PPC64 1162 #ifdef CONFIG_PPC64
1163 extern void mod_return_to_handler(void); 1163 extern void mod_return_to_handler(void);
1164 rth = *(unsigned long *)rth; 1164 rth = *(unsigned long *)rth;
1165 mrth = (unsigned long)mod_return_to_handler; 1165 mrth = (unsigned long)mod_return_to_handler;
1166 mrth = *(unsigned long *)mrth; 1166 mrth = *(unsigned long *)mrth;
1167 #endif 1167 #endif
1168 #endif 1168 #endif
1169 1169
1170 sp = (unsigned long) stack; 1170 sp = (unsigned long) stack;
1171 if (tsk == NULL) 1171 if (tsk == NULL)
1172 tsk = current; 1172 tsk = current;
1173 if (sp == 0) { 1173 if (sp == 0) {
1174 if (tsk == current) 1174 if (tsk == current)
1175 asm("mr %0,1" : "=r" (sp)); 1175 asm("mr %0,1" : "=r" (sp));
1176 else 1176 else
1177 sp = tsk->thread.ksp; 1177 sp = tsk->thread.ksp;
1178 } 1178 }
1179 1179
1180 lr = 0; 1180 lr = 0;
1181 printk("Call Trace:\n"); 1181 printk("Call Trace:\n");
1182 do { 1182 do {
1183 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD)) 1183 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
1184 return; 1184 return;
1185 1185
1186 stack = (unsigned long *) sp; 1186 stack = (unsigned long *) sp;
1187 newsp = stack[0]; 1187 newsp = stack[0];
1188 ip = stack[STACK_FRAME_LR_SAVE]; 1188 ip = stack[STACK_FRAME_LR_SAVE];
1189 if (!firstframe || ip != lr) { 1189 if (!firstframe || ip != lr) {
1190 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); 1190 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
1191 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 1191 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1192 if ((ip == rth || ip == mrth) && curr_frame >= 0) { 1192 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
1193 printk(" (%pS)", 1193 printk(" (%pS)",
1194 (void *)current->ret_stack[curr_frame].ret); 1194 (void *)current->ret_stack[curr_frame].ret);
1195 curr_frame--; 1195 curr_frame--;
1196 } 1196 }
1197 #endif 1197 #endif
1198 if (firstframe) 1198 if (firstframe)
1199 printk(" (unreliable)"); 1199 printk(" (unreliable)");
1200 printk("\n"); 1200 printk("\n");
1201 } 1201 }
1202 firstframe = 0; 1202 firstframe = 0;
1203 1203
1204 /* 1204 /*
1205 * See if this is an exception frame. 1205 * See if this is an exception frame.
1206 * We look for the "regshere" marker in the current frame. 1206 * We look for the "regshere" marker in the current frame.
1207 */ 1207 */
1208 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE) 1208 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
1209 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { 1209 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
1210 struct pt_regs *regs = (struct pt_regs *) 1210 struct pt_regs *regs = (struct pt_regs *)
1211 (sp + STACK_FRAME_OVERHEAD); 1211 (sp + STACK_FRAME_OVERHEAD);
1212 lr = regs->link; 1212 lr = regs->link;
1213 printk("--- Exception: %lx at %pS\n LR = %pS\n", 1213 printk("--- Exception: %lx at %pS\n LR = %pS\n",
1214 regs->trap, (void *)regs->nip, (void *)lr); 1214 regs->trap, (void *)regs->nip, (void *)lr);
1215 firstframe = 1; 1215 firstframe = 1;
1216 } 1216 }
1217 1217
1218 sp = newsp; 1218 sp = newsp;
1219 } while (count++ < kstack_depth_to_print); 1219 } while (count++ < kstack_depth_to_print);
1220 } 1220 }
1221 1221
1222 void dump_stack(void) 1222 void dump_stack(void)
1223 { 1223 {
1224 show_stack(current, NULL); 1224 show_stack(current, NULL);
1225 } 1225 }
1226 EXPORT_SYMBOL(dump_stack); 1226 EXPORT_SYMBOL(dump_stack);
1227 1227
1228 #ifdef CONFIG_PPC64 1228 #ifdef CONFIG_PPC64
1229 void ppc64_runlatch_on(void) 1229 void ppc64_runlatch_on(void)
1230 { 1230 {
1231 unsigned long ctrl; 1231 unsigned long ctrl;
1232 1232
1233 if (cpu_has_feature(CPU_FTR_CTRL) && !test_thread_flag(TIF_RUNLATCH)) { 1233 if (cpu_has_feature(CPU_FTR_CTRL) && !test_thread_flag(TIF_RUNLATCH)) {
1234 HMT_medium(); 1234 HMT_medium();
1235 1235
1236 ctrl = mfspr(SPRN_CTRLF); 1236 ctrl = mfspr(SPRN_CTRLF);
1237 ctrl |= CTRL_RUNLATCH; 1237 ctrl |= CTRL_RUNLATCH;
1238 mtspr(SPRN_CTRLT, ctrl); 1238 mtspr(SPRN_CTRLT, ctrl);
1239 1239
1240 set_thread_flag(TIF_RUNLATCH); 1240 set_thread_flag(TIF_RUNLATCH);
1241 } 1241 }
1242 } 1242 }
1243 1243
1244 void __ppc64_runlatch_off(void) 1244 void __ppc64_runlatch_off(void)
1245 { 1245 {
1246 unsigned long ctrl; 1246 unsigned long ctrl;
1247 1247
1248 HMT_medium(); 1248 HMT_medium();
1249 1249
1250 clear_thread_flag(TIF_RUNLATCH); 1250 clear_thread_flag(TIF_RUNLATCH);
1251 1251
1252 ctrl = mfspr(SPRN_CTRLF); 1252 ctrl = mfspr(SPRN_CTRLF);
1253 ctrl &= ~CTRL_RUNLATCH; 1253 ctrl &= ~CTRL_RUNLATCH;
1254 mtspr(SPRN_CTRLT, ctrl); 1254 mtspr(SPRN_CTRLT, ctrl);
1255 } 1255 }
1256 #endif 1256 #endif
1257 1257
1258 #if THREAD_SHIFT < PAGE_SHIFT 1258 #if THREAD_SHIFT < PAGE_SHIFT
1259 1259
1260 static struct kmem_cache *thread_info_cache; 1260 static struct kmem_cache *thread_info_cache;
1261 1261
1262 struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node) 1262 struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
1263 { 1263 {
1264 struct thread_info *ti; 1264 struct thread_info *ti;
1265 1265
1266 ti = kmem_cache_alloc_node(thread_info_cache, GFP_KERNEL, node); 1266 ti = kmem_cache_alloc_node(thread_info_cache, GFP_KERNEL, node);
1267 if (unlikely(ti == NULL)) 1267 if (unlikely(ti == NULL))
1268 return NULL; 1268 return NULL;
1269 #ifdef CONFIG_DEBUG_STACK_USAGE 1269 #ifdef CONFIG_DEBUG_STACK_USAGE
1270 memset(ti, 0, THREAD_SIZE); 1270 memset(ti, 0, THREAD_SIZE);
1271 #endif 1271 #endif
1272 return ti; 1272 return ti;
1273 } 1273 }
1274 1274
1275 void free_thread_info(struct thread_info *ti) 1275 void free_thread_info(struct thread_info *ti)
1276 { 1276 {
1277 kmem_cache_free(thread_info_cache, ti); 1277 kmem_cache_free(thread_info_cache, ti);
1278 } 1278 }
1279 1279
1280 void thread_info_cache_init(void) 1280 void thread_info_cache_init(void)
1281 { 1281 {
1282 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, 1282 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
1283 THREAD_SIZE, 0, NULL); 1283 THREAD_SIZE, 0, NULL);
1284 BUG_ON(thread_info_cache == NULL); 1284 BUG_ON(thread_info_cache == NULL);
1285 } 1285 }
1286 1286
1287 #endif /* THREAD_SHIFT < PAGE_SHIFT */ 1287 #endif /* THREAD_SHIFT < PAGE_SHIFT */
1288 1288
1289 unsigned long arch_align_stack(unsigned long sp) 1289 unsigned long arch_align_stack(unsigned long sp)
1290 { 1290 {
1291 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 1291 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1292 sp -= get_random_int() & ~PAGE_MASK; 1292 sp -= get_random_int() & ~PAGE_MASK;
1293 return sp & ~0xf; 1293 return sp & ~0xf;
1294 } 1294 }
1295 1295
1296 static inline unsigned long brk_rnd(void) 1296 static inline unsigned long brk_rnd(void)
1297 { 1297 {
1298 unsigned long rnd = 0; 1298 unsigned long rnd = 0;
1299 1299
1300 /* 8MB for 32bit, 1GB for 64bit */ 1300 /* 8MB for 32bit, 1GB for 64bit */
1301 if (is_32bit_task()) 1301 if (is_32bit_task())
1302 rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT))); 1302 rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
1303 else 1303 else
1304 rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT))); 1304 rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
1305 1305
1306 return rnd << PAGE_SHIFT; 1306 return rnd << PAGE_SHIFT;
1307 } 1307 }
1308 1308
1309 unsigned long arch_randomize_brk(struct mm_struct *mm) 1309 unsigned long arch_randomize_brk(struct mm_struct *mm)
1310 { 1310 {
1311 unsigned long base = mm->brk; 1311 unsigned long base = mm->brk;
1312 unsigned long ret; 1312 unsigned long ret;
1313 1313
1314 #ifdef CONFIG_PPC_STD_MMU_64 1314 #ifdef CONFIG_PPC_STD_MMU_64
1315 /* 1315 /*
1316 * If we are using 1TB segments and we are allowed to randomise 1316 * If we are using 1TB segments and we are allowed to randomise
1317 * the heap, we can put it above 1TB so it is backed by a 1TB 1317 * the heap, we can put it above 1TB so it is backed by a 1TB
1318 * segment. Otherwise the heap will be in the bottom 1TB 1318 * segment. Otherwise the heap will be in the bottom 1TB
1319 * which always uses 256MB segments and this may result in a 1319 * which always uses 256MB segments and this may result in a
1320 * performance penalty. 1320 * performance penalty.
1321 */ 1321 */
1322 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T)) 1322 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
1323 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T); 1323 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
1324 #endif 1324 #endif
1325 1325
1326 ret = PAGE_ALIGN(base + brk_rnd()); 1326 ret = PAGE_ALIGN(base + brk_rnd());
1327 1327
1328 if (ret < mm->brk) 1328 if (ret < mm->brk)
1329 return mm->brk; 1329 return mm->brk;
1330 1330
1331 return ret; 1331 return ret;
1332 } 1332 }
1333 1333
1334 unsigned long randomize_et_dyn(unsigned long base) 1334 unsigned long randomize_et_dyn(unsigned long base)
1335 { 1335 {
1336 unsigned long ret = PAGE_ALIGN(base + brk_rnd()); 1336 unsigned long ret = PAGE_ALIGN(base + brk_rnd());
1337 1337
1338 if (ret < base) 1338 if (ret < base)
1339 return base; 1339 return base;
1340 1340
1341 return ret; 1341 return ret;
1342 } 1342 }
1343 1343
arch/powerpc/kernel/prom.c
1 /* 1 /*
2 * Procedures for creating, accessing and interpreting the device tree. 2 * Procedures for creating, accessing and interpreting the device tree.
3 * 3 *
4 * Paul Mackerras August 1996. 4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras. 5 * Copyright (C) 1996-2005 Paul Mackerras.
6 * 6 *
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. 7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com 8 * {engebret|bergner}@us.ibm.com
9 * 9 *
10 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version. 13 * 2 of the License, or (at your option) any later version.
14 */ 14 */
15 15
16 #undef DEBUG 16 #undef DEBUG
17 17
18 #include <stdarg.h> 18 #include <stdarg.h>
19 #include <linux/kernel.h> 19 #include <linux/kernel.h>
20 #include <linux/string.h> 20 #include <linux/string.h>
21 #include <linux/init.h> 21 #include <linux/init.h>
22 #include <linux/threads.h> 22 #include <linux/threads.h>
23 #include <linux/spinlock.h> 23 #include <linux/spinlock.h>
24 #include <linux/types.h> 24 #include <linux/types.h>
25 #include <linux/pci.h> 25 #include <linux/pci.h>
26 #include <linux/stringify.h> 26 #include <linux/stringify.h>
27 #include <linux/delay.h> 27 #include <linux/delay.h>
28 #include <linux/initrd.h> 28 #include <linux/initrd.h>
29 #include <linux/bitops.h> 29 #include <linux/bitops.h>
30 #include <linux/module.h> 30 #include <linux/export.h>
31 #include <linux/kexec.h> 31 #include <linux/kexec.h>
32 #include <linux/debugfs.h> 32 #include <linux/debugfs.h>
33 #include <linux/irq.h> 33 #include <linux/irq.h>
34 #include <linux/memblock.h> 34 #include <linux/memblock.h>
35 35
36 #include <asm/prom.h> 36 #include <asm/prom.h>
37 #include <asm/rtas.h> 37 #include <asm/rtas.h>
38 #include <asm/page.h> 38 #include <asm/page.h>
39 #include <asm/processor.h> 39 #include <asm/processor.h>
40 #include <asm/irq.h> 40 #include <asm/irq.h>
41 #include <asm/io.h> 41 #include <asm/io.h>
42 #include <asm/kdump.h> 42 #include <asm/kdump.h>
43 #include <asm/smp.h> 43 #include <asm/smp.h>
44 #include <asm/system.h> 44 #include <asm/system.h>
45 #include <asm/mmu.h> 45 #include <asm/mmu.h>
46 #include <asm/paca.h> 46 #include <asm/paca.h>
47 #include <asm/pgtable.h> 47 #include <asm/pgtable.h>
48 #include <asm/pci.h> 48 #include <asm/pci.h>
49 #include <asm/iommu.h> 49 #include <asm/iommu.h>
50 #include <asm/btext.h> 50 #include <asm/btext.h>
51 #include <asm/sections.h> 51 #include <asm/sections.h>
52 #include <asm/machdep.h> 52 #include <asm/machdep.h>
53 #include <asm/pSeries_reconfig.h> 53 #include <asm/pSeries_reconfig.h>
54 #include <asm/pci-bridge.h> 54 #include <asm/pci-bridge.h>
55 #include <asm/phyp_dump.h> 55 #include <asm/phyp_dump.h>
56 #include <asm/kexec.h> 56 #include <asm/kexec.h>
57 #include <mm/mmu_decl.h> 57 #include <mm/mmu_decl.h>
58 58
59 #ifdef DEBUG 59 #ifdef DEBUG
60 #define DBG(fmt...) printk(KERN_ERR fmt) 60 #define DBG(fmt...) printk(KERN_ERR fmt)
61 #else 61 #else
62 #define DBG(fmt...) 62 #define DBG(fmt...)
63 #endif 63 #endif
64 64
65 #ifdef CONFIG_PPC64 65 #ifdef CONFIG_PPC64
66 int __initdata iommu_is_off; 66 int __initdata iommu_is_off;
67 int __initdata iommu_force_on; 67 int __initdata iommu_force_on;
68 unsigned long tce_alloc_start, tce_alloc_end; 68 unsigned long tce_alloc_start, tce_alloc_end;
69 u64 ppc64_rma_size; 69 u64 ppc64_rma_size;
70 #endif 70 #endif
71 static phys_addr_t first_memblock_size; 71 static phys_addr_t first_memblock_size;
72 static int __initdata boot_cpu_count; 72 static int __initdata boot_cpu_count;
73 73
74 static int __init early_parse_mem(char *p) 74 static int __init early_parse_mem(char *p)
75 { 75 {
76 if (!p) 76 if (!p)
77 return 1; 77 return 1;
78 78
79 memory_limit = PAGE_ALIGN(memparse(p, &p)); 79 memory_limit = PAGE_ALIGN(memparse(p, &p));
80 DBG("memory limit = 0x%llx\n", (unsigned long long)memory_limit); 80 DBG("memory limit = 0x%llx\n", (unsigned long long)memory_limit);
81 81
82 return 0; 82 return 0;
83 } 83 }
84 early_param("mem", early_parse_mem); 84 early_param("mem", early_parse_mem);
85 85
86 /* 86 /*
87 * overlaps_initrd - check for overlap with page aligned extension of 87 * overlaps_initrd - check for overlap with page aligned extension of
88 * initrd. 88 * initrd.
89 */ 89 */
90 static inline int overlaps_initrd(unsigned long start, unsigned long size) 90 static inline int overlaps_initrd(unsigned long start, unsigned long size)
91 { 91 {
92 #ifdef CONFIG_BLK_DEV_INITRD 92 #ifdef CONFIG_BLK_DEV_INITRD
93 if (!initrd_start) 93 if (!initrd_start)
94 return 0; 94 return 0;
95 95
96 return (start + size) > _ALIGN_DOWN(initrd_start, PAGE_SIZE) && 96 return (start + size) > _ALIGN_DOWN(initrd_start, PAGE_SIZE) &&
97 start <= _ALIGN_UP(initrd_end, PAGE_SIZE); 97 start <= _ALIGN_UP(initrd_end, PAGE_SIZE);
98 #else 98 #else
99 return 0; 99 return 0;
100 #endif 100 #endif
101 } 101 }
102 102
103 /** 103 /**
104 * move_device_tree - move tree to an unused area, if needed. 104 * move_device_tree - move tree to an unused area, if needed.
105 * 105 *
106 * The device tree may be allocated beyond our memory limit, or inside the 106 * The device tree may be allocated beyond our memory limit, or inside the
107 * crash kernel region for kdump, or within the page aligned range of initrd. 107 * crash kernel region for kdump, or within the page aligned range of initrd.
108 * If so, move it out of the way. 108 * If so, move it out of the way.
109 */ 109 */
110 static void __init move_device_tree(void) 110 static void __init move_device_tree(void)
111 { 111 {
112 unsigned long start, size; 112 unsigned long start, size;
113 void *p; 113 void *p;
114 114
115 DBG("-> move_device_tree\n"); 115 DBG("-> move_device_tree\n");
116 116
117 start = __pa(initial_boot_params); 117 start = __pa(initial_boot_params);
118 size = be32_to_cpu(initial_boot_params->totalsize); 118 size = be32_to_cpu(initial_boot_params->totalsize);
119 119
120 if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) || 120 if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) ||
121 overlaps_crashkernel(start, size) || 121 overlaps_crashkernel(start, size) ||
122 overlaps_initrd(start, size)) { 122 overlaps_initrd(start, size)) {
123 p = __va(memblock_alloc(size, PAGE_SIZE)); 123 p = __va(memblock_alloc(size, PAGE_SIZE));
124 memcpy(p, initial_boot_params, size); 124 memcpy(p, initial_boot_params, size);
125 initial_boot_params = (struct boot_param_header *)p; 125 initial_boot_params = (struct boot_param_header *)p;
126 DBG("Moved device tree to 0x%p\n", p); 126 DBG("Moved device tree to 0x%p\n", p);
127 } 127 }
128 128
129 DBG("<- move_device_tree\n"); 129 DBG("<- move_device_tree\n");
130 } 130 }
131 131
132 /* 132 /*
133 * ibm,pa-features is a per-cpu property that contains a string of 133 * ibm,pa-features is a per-cpu property that contains a string of
134 * attribute descriptors, each of which has a 2 byte header plus up 134 * attribute descriptors, each of which has a 2 byte header plus up
135 * to 254 bytes worth of processor attribute bits. First header 135 * to 254 bytes worth of processor attribute bits. First header
136 * byte specifies the number of bytes following the header. 136 * byte specifies the number of bytes following the header.
137 * Second header byte is an "attribute-specifier" type, of which 137 * Second header byte is an "attribute-specifier" type, of which
138 * zero is the only currently-defined value. 138 * zero is the only currently-defined value.
139 * Implementation: Pass in the byte and bit offset for the feature 139 * Implementation: Pass in the byte and bit offset for the feature
140 * that we are interested in. The function will return -1 if the 140 * that we are interested in. The function will return -1 if the
141 * pa-features property is missing, or a 1/0 to indicate if the feature 141 * pa-features property is missing, or a 1/0 to indicate if the feature
142 * is supported/not supported. Note that the bit numbers are 142 * is supported/not supported. Note that the bit numbers are
143 * big-endian to match the definition in PAPR. 143 * big-endian to match the definition in PAPR.
144 */ 144 */
145 static struct ibm_pa_feature { 145 static struct ibm_pa_feature {
146 unsigned long cpu_features; /* CPU_FTR_xxx bit */ 146 unsigned long cpu_features; /* CPU_FTR_xxx bit */
147 unsigned long mmu_features; /* MMU_FTR_xxx bit */ 147 unsigned long mmu_features; /* MMU_FTR_xxx bit */
148 unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */ 148 unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
149 unsigned char pabyte; /* byte number in ibm,pa-features */ 149 unsigned char pabyte; /* byte number in ibm,pa-features */
150 unsigned char pabit; /* bit number (big-endian) */ 150 unsigned char pabit; /* bit number (big-endian) */
151 unsigned char invert; /* if 1, pa bit set => clear feature */ 151 unsigned char invert; /* if 1, pa bit set => clear feature */
152 } ibm_pa_features[] __initdata = { 152 } ibm_pa_features[] __initdata = {
153 {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0}, 153 {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0},
154 {0, 0, PPC_FEATURE_HAS_FPU, 0, 1, 0}, 154 {0, 0, PPC_FEATURE_HAS_FPU, 0, 1, 0},
155 {0, MMU_FTR_SLB, 0, 0, 2, 0}, 155 {0, MMU_FTR_SLB, 0, 0, 2, 0},
156 {CPU_FTR_CTRL, 0, 0, 0, 3, 0}, 156 {CPU_FTR_CTRL, 0, 0, 0, 3, 0},
157 {CPU_FTR_NOEXECUTE, 0, 0, 0, 6, 0}, 157 {CPU_FTR_NOEXECUTE, 0, 0, 0, 6, 0},
158 {CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1}, 158 {CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1},
159 {0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0}, 159 {0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
160 {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0}, 160 {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
161 }; 161 };
162 162
163 static void __init scan_features(unsigned long node, unsigned char *ftrs, 163 static void __init scan_features(unsigned long node, unsigned char *ftrs,
164 unsigned long tablelen, 164 unsigned long tablelen,
165 struct ibm_pa_feature *fp, 165 struct ibm_pa_feature *fp,
166 unsigned long ft_size) 166 unsigned long ft_size)
167 { 167 {
168 unsigned long i, len, bit; 168 unsigned long i, len, bit;
169 169
170 /* find descriptor with type == 0 */ 170 /* find descriptor with type == 0 */
171 for (;;) { 171 for (;;) {
172 if (tablelen < 3) 172 if (tablelen < 3)
173 return; 173 return;
174 len = 2 + ftrs[0]; 174 len = 2 + ftrs[0];
175 if (tablelen < len) 175 if (tablelen < len)
176 return; /* descriptor 0 not found */ 176 return; /* descriptor 0 not found */
177 if (ftrs[1] == 0) 177 if (ftrs[1] == 0)
178 break; 178 break;
179 tablelen -= len; 179 tablelen -= len;
180 ftrs += len; 180 ftrs += len;
181 } 181 }
182 182
183 /* loop over bits we know about */ 183 /* loop over bits we know about */
184 for (i = 0; i < ft_size; ++i, ++fp) { 184 for (i = 0; i < ft_size; ++i, ++fp) {
185 if (fp->pabyte >= ftrs[0]) 185 if (fp->pabyte >= ftrs[0])
186 continue; 186 continue;
187 bit = (ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1; 187 bit = (ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1;
188 if (bit ^ fp->invert) { 188 if (bit ^ fp->invert) {
189 cur_cpu_spec->cpu_features |= fp->cpu_features; 189 cur_cpu_spec->cpu_features |= fp->cpu_features;
190 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs; 190 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
191 cur_cpu_spec->mmu_features |= fp->mmu_features; 191 cur_cpu_spec->mmu_features |= fp->mmu_features;
192 } else { 192 } else {
193 cur_cpu_spec->cpu_features &= ~fp->cpu_features; 193 cur_cpu_spec->cpu_features &= ~fp->cpu_features;
194 cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs; 194 cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
195 cur_cpu_spec->mmu_features &= ~fp->mmu_features; 195 cur_cpu_spec->mmu_features &= ~fp->mmu_features;
196 } 196 }
197 } 197 }
198 } 198 }
199 199
200 static void __init check_cpu_pa_features(unsigned long node) 200 static void __init check_cpu_pa_features(unsigned long node)
201 { 201 {
202 unsigned char *pa_ftrs; 202 unsigned char *pa_ftrs;
203 unsigned long tablelen; 203 unsigned long tablelen;
204 204
205 pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen); 205 pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen);
206 if (pa_ftrs == NULL) 206 if (pa_ftrs == NULL)
207 return; 207 return;
208 208
209 scan_features(node, pa_ftrs, tablelen, 209 scan_features(node, pa_ftrs, tablelen,
210 ibm_pa_features, ARRAY_SIZE(ibm_pa_features)); 210 ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
211 } 211 }
212 212
213 #ifdef CONFIG_PPC_STD_MMU_64 213 #ifdef CONFIG_PPC_STD_MMU_64
214 static void __init check_cpu_slb_size(unsigned long node) 214 static void __init check_cpu_slb_size(unsigned long node)
215 { 215 {
216 u32 *slb_size_ptr; 216 u32 *slb_size_ptr;
217 217
218 slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL); 218 slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL);
219 if (slb_size_ptr != NULL) { 219 if (slb_size_ptr != NULL) {
220 mmu_slb_size = *slb_size_ptr; 220 mmu_slb_size = *slb_size_ptr;
221 return; 221 return;
222 } 222 }
223 slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL); 223 slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
224 if (slb_size_ptr != NULL) { 224 if (slb_size_ptr != NULL) {
225 mmu_slb_size = *slb_size_ptr; 225 mmu_slb_size = *slb_size_ptr;
226 } 226 }
227 } 227 }
228 #else 228 #else
229 #define check_cpu_slb_size(node) do { } while(0) 229 #define check_cpu_slb_size(node) do { } while(0)
230 #endif 230 #endif
231 231
232 static struct feature_property { 232 static struct feature_property {
233 const char *name; 233 const char *name;
234 u32 min_value; 234 u32 min_value;
235 unsigned long cpu_feature; 235 unsigned long cpu_feature;
236 unsigned long cpu_user_ftr; 236 unsigned long cpu_user_ftr;
237 } feature_properties[] __initdata = { 237 } feature_properties[] __initdata = {
238 #ifdef CONFIG_ALTIVEC 238 #ifdef CONFIG_ALTIVEC
239 {"altivec", 0, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC}, 239 {"altivec", 0, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
240 {"ibm,vmx", 1, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC}, 240 {"ibm,vmx", 1, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
241 #endif /* CONFIG_ALTIVEC */ 241 #endif /* CONFIG_ALTIVEC */
242 #ifdef CONFIG_VSX 242 #ifdef CONFIG_VSX
243 /* Yes, this _really_ is ibm,vmx == 2 to enable VSX */ 243 /* Yes, this _really_ is ibm,vmx == 2 to enable VSX */
244 {"ibm,vmx", 2, CPU_FTR_VSX, PPC_FEATURE_HAS_VSX}, 244 {"ibm,vmx", 2, CPU_FTR_VSX, PPC_FEATURE_HAS_VSX},
245 #endif /* CONFIG_VSX */ 245 #endif /* CONFIG_VSX */
246 #ifdef CONFIG_PPC64 246 #ifdef CONFIG_PPC64
247 {"ibm,dfp", 1, 0, PPC_FEATURE_HAS_DFP}, 247 {"ibm,dfp", 1, 0, PPC_FEATURE_HAS_DFP},
248 {"ibm,purr", 1, CPU_FTR_PURR, 0}, 248 {"ibm,purr", 1, CPU_FTR_PURR, 0},
249 {"ibm,spurr", 1, CPU_FTR_SPURR, 0}, 249 {"ibm,spurr", 1, CPU_FTR_SPURR, 0},
250 #endif /* CONFIG_PPC64 */ 250 #endif /* CONFIG_PPC64 */
251 }; 251 };
252 252
253 #if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU) 253 #if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
254 static inline void identical_pvr_fixup(unsigned long node) 254 static inline void identical_pvr_fixup(unsigned long node)
255 { 255 {
256 unsigned int pvr; 256 unsigned int pvr;
257 char *model = of_get_flat_dt_prop(node, "model", NULL); 257 char *model = of_get_flat_dt_prop(node, "model", NULL);
258 258
259 /* 259 /*
260 * Since 440GR(x)/440EP(x) processors have the same pvr, 260 * Since 440GR(x)/440EP(x) processors have the same pvr,
261 * we check the node path and set bit 28 in the cur_cpu_spec 261 * we check the node path and set bit 28 in the cur_cpu_spec
262 * pvr for EP(x) processor version. This bit is always 0 in 262 * pvr for EP(x) processor version. This bit is always 0 in
263 * the "real" pvr. Then we call identify_cpu again with 263 * the "real" pvr. Then we call identify_cpu again with
264 * the new logical pvr to enable FPU support. 264 * the new logical pvr to enable FPU support.
265 */ 265 */
266 if (model && strstr(model, "440EP")) { 266 if (model && strstr(model, "440EP")) {
267 pvr = cur_cpu_spec->pvr_value | 0x8; 267 pvr = cur_cpu_spec->pvr_value | 0x8;
268 identify_cpu(0, pvr); 268 identify_cpu(0, pvr);
269 DBG("Using logical pvr %x for %s\n", pvr, model); 269 DBG("Using logical pvr %x for %s\n", pvr, model);
270 } 270 }
271 } 271 }
272 #else 272 #else
273 #define identical_pvr_fixup(node) do { } while(0) 273 #define identical_pvr_fixup(node) do { } while(0)
274 #endif 274 #endif
275 275
276 static void __init check_cpu_feature_properties(unsigned long node) 276 static void __init check_cpu_feature_properties(unsigned long node)
277 { 277 {
278 unsigned long i; 278 unsigned long i;
279 struct feature_property *fp = feature_properties; 279 struct feature_property *fp = feature_properties;
280 const u32 *prop; 280 const u32 *prop;
281 281
282 for (i = 0; i < ARRAY_SIZE(feature_properties); ++i, ++fp) { 282 for (i = 0; i < ARRAY_SIZE(feature_properties); ++i, ++fp) {
283 prop = of_get_flat_dt_prop(node, fp->name, NULL); 283 prop = of_get_flat_dt_prop(node, fp->name, NULL);
284 if (prop && *prop >= fp->min_value) { 284 if (prop && *prop >= fp->min_value) {
285 cur_cpu_spec->cpu_features |= fp->cpu_feature; 285 cur_cpu_spec->cpu_features |= fp->cpu_feature;
286 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftr; 286 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftr;
287 } 287 }
288 } 288 }
289 } 289 }
290 290
291 static int __init early_init_dt_scan_cpus(unsigned long node, 291 static int __init early_init_dt_scan_cpus(unsigned long node,
292 const char *uname, int depth, 292 const char *uname, int depth,
293 void *data) 293 void *data)
294 { 294 {
295 char *type = of_get_flat_dt_prop(node, "device_type", NULL); 295 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
296 const u32 *prop; 296 const u32 *prop;
297 const u32 *intserv; 297 const u32 *intserv;
298 int i, nthreads; 298 int i, nthreads;
299 unsigned long len; 299 unsigned long len;
300 int found = -1; 300 int found = -1;
301 int found_thread = 0; 301 int found_thread = 0;
302 302
303 /* We are scanning "cpu" nodes only */ 303 /* We are scanning "cpu" nodes only */
304 if (type == NULL || strcmp(type, "cpu") != 0) 304 if (type == NULL || strcmp(type, "cpu") != 0)
305 return 0; 305 return 0;
306 306
307 /* Get physical cpuid */ 307 /* Get physical cpuid */
308 intserv = of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", &len); 308 intserv = of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", &len);
309 if (intserv) { 309 if (intserv) {
310 nthreads = len / sizeof(int); 310 nthreads = len / sizeof(int);
311 } else { 311 } else {
312 intserv = of_get_flat_dt_prop(node, "reg", NULL); 312 intserv = of_get_flat_dt_prop(node, "reg", NULL);
313 nthreads = 1; 313 nthreads = 1;
314 } 314 }
315 315
316 /* 316 /*
317 * Now see if any of these threads match our boot cpu. 317 * Now see if any of these threads match our boot cpu.
318 * NOTE: This must match the parsing done in smp_setup_cpu_maps. 318 * NOTE: This must match the parsing done in smp_setup_cpu_maps.
319 */ 319 */
320 for (i = 0; i < nthreads; i++) { 320 for (i = 0; i < nthreads; i++) {
321 /* 321 /*
322 * version 2 of the kexec param format adds the phys cpuid of 322 * version 2 of the kexec param format adds the phys cpuid of
323 * booted proc. 323 * booted proc.
324 */ 324 */
325 if (initial_boot_params->version >= 2) { 325 if (initial_boot_params->version >= 2) {
326 if (intserv[i] == initial_boot_params->boot_cpuid_phys) { 326 if (intserv[i] == initial_boot_params->boot_cpuid_phys) {
327 found = boot_cpu_count; 327 found = boot_cpu_count;
328 found_thread = i; 328 found_thread = i;
329 } 329 }
330 } else { 330 } else {
331 /* 331 /*
332 * Check if it's the boot-cpu, set it's hw index now, 332 * Check if it's the boot-cpu, set it's hw index now,
333 * unfortunately this format did not support booting 333 * unfortunately this format did not support booting
334 * off secondary threads. 334 * off secondary threads.
335 */ 335 */
336 if (of_get_flat_dt_prop(node, 336 if (of_get_flat_dt_prop(node,
337 "linux,boot-cpu", NULL) != NULL) 337 "linux,boot-cpu", NULL) != NULL)
338 found = boot_cpu_count; 338 found = boot_cpu_count;
339 } 339 }
340 #ifdef CONFIG_SMP 340 #ifdef CONFIG_SMP
341 /* logical cpu id is always 0 on UP kernels */ 341 /* logical cpu id is always 0 on UP kernels */
342 boot_cpu_count++; 342 boot_cpu_count++;
343 #endif 343 #endif
344 } 344 }
345 345
346 if (found >= 0) { 346 if (found >= 0) {
347 DBG("boot cpu: logical %d physical %d\n", found, 347 DBG("boot cpu: logical %d physical %d\n", found,
348 intserv[found_thread]); 348 intserv[found_thread]);
349 boot_cpuid = found; 349 boot_cpuid = found;
350 set_hard_smp_processor_id(found, intserv[found_thread]); 350 set_hard_smp_processor_id(found, intserv[found_thread]);
351 351
352 /* 352 /*
353 * PAPR defines "logical" PVR values for cpus that 353 * PAPR defines "logical" PVR values for cpus that
354 * meet various levels of the architecture: 354 * meet various levels of the architecture:
355 * 0x0f000001 Architecture version 2.04 355 * 0x0f000001 Architecture version 2.04
356 * 0x0f000002 Architecture version 2.05 356 * 0x0f000002 Architecture version 2.05
357 * If the cpu-version property in the cpu node contains 357 * If the cpu-version property in the cpu node contains
358 * such a value, we call identify_cpu again with the 358 * such a value, we call identify_cpu again with the
359 * logical PVR value in order to use the cpu feature 359 * logical PVR value in order to use the cpu feature
360 * bits appropriate for the architecture level. 360 * bits appropriate for the architecture level.
361 * 361 *
362 * A POWER6 partition in "POWER6 architected" mode 362 * A POWER6 partition in "POWER6 architected" mode
363 * uses the 0x0f000002 PVR value; in POWER5+ mode 363 * uses the 0x0f000002 PVR value; in POWER5+ mode
364 * it uses 0x0f000001. 364 * it uses 0x0f000001.
365 */ 365 */
366 prop = of_get_flat_dt_prop(node, "cpu-version", NULL); 366 prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
367 if (prop && (*prop & 0xff000000) == 0x0f000000) 367 if (prop && (*prop & 0xff000000) == 0x0f000000)
368 identify_cpu(0, *prop); 368 identify_cpu(0, *prop);
369 369
370 identical_pvr_fixup(node); 370 identical_pvr_fixup(node);
371 } 371 }
372 372
373 check_cpu_feature_properties(node); 373 check_cpu_feature_properties(node);
374 check_cpu_pa_features(node); 374 check_cpu_pa_features(node);
375 check_cpu_slb_size(node); 375 check_cpu_slb_size(node);
376 376
377 #ifdef CONFIG_PPC_PSERIES 377 #ifdef CONFIG_PPC_PSERIES
378 if (nthreads > 1) 378 if (nthreads > 1)
379 cur_cpu_spec->cpu_features |= CPU_FTR_SMT; 379 cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
380 else 380 else
381 cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT; 381 cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
382 #endif 382 #endif
383 383
384 return 0; 384 return 0;
385 } 385 }
386 386
387 int __init early_init_dt_scan_chosen_ppc(unsigned long node, const char *uname, 387 int __init early_init_dt_scan_chosen_ppc(unsigned long node, const char *uname,
388 int depth, void *data) 388 int depth, void *data)
389 { 389 {
390 unsigned long *lprop; 390 unsigned long *lprop;
391 391
392 /* Use common scan routine to determine if this is the chosen node */ 392 /* Use common scan routine to determine if this is the chosen node */
393 if (early_init_dt_scan_chosen(node, uname, depth, data) == 0) 393 if (early_init_dt_scan_chosen(node, uname, depth, data) == 0)
394 return 0; 394 return 0;
395 395
396 #ifdef CONFIG_PPC64 396 #ifdef CONFIG_PPC64
397 /* check if iommu is forced on or off */ 397 /* check if iommu is forced on or off */
398 if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL) 398 if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
399 iommu_is_off = 1; 399 iommu_is_off = 1;
400 if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL) 400 if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
401 iommu_force_on = 1; 401 iommu_force_on = 1;
402 #endif 402 #endif
403 403
404 /* mem=x on the command line is the preferred mechanism */ 404 /* mem=x on the command line is the preferred mechanism */
405 lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL); 405 lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
406 if (lprop) 406 if (lprop)
407 memory_limit = *lprop; 407 memory_limit = *lprop;
408 408
409 #ifdef CONFIG_PPC64 409 #ifdef CONFIG_PPC64
410 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL); 410 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
411 if (lprop) 411 if (lprop)
412 tce_alloc_start = *lprop; 412 tce_alloc_start = *lprop;
413 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL); 413 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
414 if (lprop) 414 if (lprop)
415 tce_alloc_end = *lprop; 415 tce_alloc_end = *lprop;
416 #endif 416 #endif
417 417
418 #ifdef CONFIG_KEXEC 418 #ifdef CONFIG_KEXEC
419 lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL); 419 lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
420 if (lprop) 420 if (lprop)
421 crashk_res.start = *lprop; 421 crashk_res.start = *lprop;
422 422
423 lprop = of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL); 423 lprop = of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
424 if (lprop) 424 if (lprop)
425 crashk_res.end = crashk_res.start + *lprop - 1; 425 crashk_res.end = crashk_res.start + *lprop - 1;
426 #endif 426 #endif
427 427
428 /* break now */ 428 /* break now */
429 return 1; 429 return 1;
430 } 430 }
431 431
432 #ifdef CONFIG_PPC_PSERIES 432 #ifdef CONFIG_PPC_PSERIES
433 /* 433 /*
434 * Interpret the ibm,dynamic-memory property in the 434 * Interpret the ibm,dynamic-memory property in the
435 * /ibm,dynamic-reconfiguration-memory node. 435 * /ibm,dynamic-reconfiguration-memory node.
436 * This contains a list of memory blocks along with NUMA affinity 436 * This contains a list of memory blocks along with NUMA affinity
437 * information. 437 * information.
438 */ 438 */
439 static int __init early_init_dt_scan_drconf_memory(unsigned long node) 439 static int __init early_init_dt_scan_drconf_memory(unsigned long node)
440 { 440 {
441 __be32 *dm, *ls, *usm; 441 __be32 *dm, *ls, *usm;
442 unsigned long l, n, flags; 442 unsigned long l, n, flags;
443 u64 base, size, memblock_size; 443 u64 base, size, memblock_size;
444 unsigned int is_kexec_kdump = 0, rngs; 444 unsigned int is_kexec_kdump = 0, rngs;
445 445
446 ls = of_get_flat_dt_prop(node, "ibm,lmb-size", &l); 446 ls = of_get_flat_dt_prop(node, "ibm,lmb-size", &l);
447 if (ls == NULL || l < dt_root_size_cells * sizeof(__be32)) 447 if (ls == NULL || l < dt_root_size_cells * sizeof(__be32))
448 return 0; 448 return 0;
449 memblock_size = dt_mem_next_cell(dt_root_size_cells, &ls); 449 memblock_size = dt_mem_next_cell(dt_root_size_cells, &ls);
450 450
451 dm = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l); 451 dm = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l);
452 if (dm == NULL || l < sizeof(__be32)) 452 if (dm == NULL || l < sizeof(__be32))
453 return 0; 453 return 0;
454 454
455 n = *dm++; /* number of entries */ 455 n = *dm++; /* number of entries */
456 if (l < (n * (dt_root_addr_cells + 4) + 1) * sizeof(__be32)) 456 if (l < (n * (dt_root_addr_cells + 4) + 1) * sizeof(__be32))
457 return 0; 457 return 0;
458 458
459 /* check if this is a kexec/kdump kernel. */ 459 /* check if this is a kexec/kdump kernel. */
460 usm = of_get_flat_dt_prop(node, "linux,drconf-usable-memory", 460 usm = of_get_flat_dt_prop(node, "linux,drconf-usable-memory",
461 &l); 461 &l);
462 if (usm != NULL) 462 if (usm != NULL)
463 is_kexec_kdump = 1; 463 is_kexec_kdump = 1;
464 464
465 for (; n != 0; --n) { 465 for (; n != 0; --n) {
466 base = dt_mem_next_cell(dt_root_addr_cells, &dm); 466 base = dt_mem_next_cell(dt_root_addr_cells, &dm);
467 flags = dm[3]; 467 flags = dm[3];
468 /* skip DRC index, pad, assoc. list index, flags */ 468 /* skip DRC index, pad, assoc. list index, flags */
469 dm += 4; 469 dm += 4;
470 /* skip this block if the reserved bit is set in flags (0x80) 470 /* skip this block if the reserved bit is set in flags (0x80)
471 or if the block is not assigned to this partition (0x8) */ 471 or if the block is not assigned to this partition (0x8) */
472 if ((flags & 0x80) || !(flags & 0x8)) 472 if ((flags & 0x80) || !(flags & 0x8))
473 continue; 473 continue;
474 size = memblock_size; 474 size = memblock_size;
475 rngs = 1; 475 rngs = 1;
476 if (is_kexec_kdump) { 476 if (is_kexec_kdump) {
477 /* 477 /*
478 * For each memblock in ibm,dynamic-memory, a corresponding 478 * For each memblock in ibm,dynamic-memory, a corresponding
479 * entry in linux,drconf-usable-memory property contains 479 * entry in linux,drconf-usable-memory property contains
480 * a counter 'p' followed by 'p' (base, size) duple. 480 * a counter 'p' followed by 'p' (base, size) duple.
481 * Now read the counter from 481 * Now read the counter from
482 * linux,drconf-usable-memory property 482 * linux,drconf-usable-memory property
483 */ 483 */
484 rngs = dt_mem_next_cell(dt_root_size_cells, &usm); 484 rngs = dt_mem_next_cell(dt_root_size_cells, &usm);
485 if (!rngs) /* there are no (base, size) duple */ 485 if (!rngs) /* there are no (base, size) duple */
486 continue; 486 continue;
487 } 487 }
488 do { 488 do {
489 if (is_kexec_kdump) { 489 if (is_kexec_kdump) {
490 base = dt_mem_next_cell(dt_root_addr_cells, 490 base = dt_mem_next_cell(dt_root_addr_cells,
491 &usm); 491 &usm);
492 size = dt_mem_next_cell(dt_root_size_cells, 492 size = dt_mem_next_cell(dt_root_size_cells,
493 &usm); 493 &usm);
494 } 494 }
495 if (iommu_is_off) { 495 if (iommu_is_off) {
496 if (base >= 0x80000000ul) 496 if (base >= 0x80000000ul)
497 continue; 497 continue;
498 if ((base + size) > 0x80000000ul) 498 if ((base + size) > 0x80000000ul)
499 size = 0x80000000ul - base; 499 size = 0x80000000ul - base;
500 } 500 }
501 memblock_add(base, size); 501 memblock_add(base, size);
502 } while (--rngs); 502 } while (--rngs);
503 } 503 }
504 memblock_dump_all(); 504 memblock_dump_all();
505 return 0; 505 return 0;
506 } 506 }
507 #else 507 #else
508 #define early_init_dt_scan_drconf_memory(node) 0 508 #define early_init_dt_scan_drconf_memory(node) 0
509 #endif /* CONFIG_PPC_PSERIES */ 509 #endif /* CONFIG_PPC_PSERIES */
510 510
511 static int __init early_init_dt_scan_memory_ppc(unsigned long node, 511 static int __init early_init_dt_scan_memory_ppc(unsigned long node,
512 const char *uname, 512 const char *uname,
513 int depth, void *data) 513 int depth, void *data)
514 { 514 {
515 if (depth == 1 && 515 if (depth == 1 &&
516 strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0) 516 strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0)
517 return early_init_dt_scan_drconf_memory(node); 517 return early_init_dt_scan_drconf_memory(node);
518 518
519 return early_init_dt_scan_memory(node, uname, depth, data); 519 return early_init_dt_scan_memory(node, uname, depth, data);
520 } 520 }
521 521
522 void __init early_init_dt_add_memory_arch(u64 base, u64 size) 522 void __init early_init_dt_add_memory_arch(u64 base, u64 size)
523 { 523 {
524 #ifdef CONFIG_PPC64 524 #ifdef CONFIG_PPC64
525 if (iommu_is_off) { 525 if (iommu_is_off) {
526 if (base >= 0x80000000ul) 526 if (base >= 0x80000000ul)
527 return; 527 return;
528 if ((base + size) > 0x80000000ul) 528 if ((base + size) > 0x80000000ul)
529 size = 0x80000000ul - base; 529 size = 0x80000000ul - base;
530 } 530 }
531 #endif 531 #endif
532 /* Keep track of the beginning of memory -and- the size of 532 /* Keep track of the beginning of memory -and- the size of
533 * the very first block in the device-tree as it represents 533 * the very first block in the device-tree as it represents
534 * the RMA on ppc64 server 534 * the RMA on ppc64 server
535 */ 535 */
536 if (base < memstart_addr) { 536 if (base < memstart_addr) {
537 memstart_addr = base; 537 memstart_addr = base;
538 first_memblock_size = size; 538 first_memblock_size = size;
539 } 539 }
540 540
541 /* Add the chunk to the MEMBLOCK list */ 541 /* Add the chunk to the MEMBLOCK list */
542 memblock_add(base, size); 542 memblock_add(base, size);
543 } 543 }
544 544
545 void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) 545 void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
546 { 546 {
547 return __va(memblock_alloc(size, align)); 547 return __va(memblock_alloc(size, align));
548 } 548 }
549 549
550 #ifdef CONFIG_BLK_DEV_INITRD 550 #ifdef CONFIG_BLK_DEV_INITRD
551 void __init early_init_dt_setup_initrd_arch(unsigned long start, 551 void __init early_init_dt_setup_initrd_arch(unsigned long start,
552 unsigned long end) 552 unsigned long end)
553 { 553 {
554 initrd_start = (unsigned long)__va(start); 554 initrd_start = (unsigned long)__va(start);
555 initrd_end = (unsigned long)__va(end); 555 initrd_end = (unsigned long)__va(end);
556 initrd_below_start_ok = 1; 556 initrd_below_start_ok = 1;
557 } 557 }
558 #endif 558 #endif
559 559
560 static void __init early_reserve_mem(void) 560 static void __init early_reserve_mem(void)
561 { 561 {
562 u64 base, size; 562 u64 base, size;
563 u64 *reserve_map; 563 u64 *reserve_map;
564 unsigned long self_base; 564 unsigned long self_base;
565 unsigned long self_size; 565 unsigned long self_size;
566 566
567 reserve_map = (u64 *)(((unsigned long)initial_boot_params) + 567 reserve_map = (u64 *)(((unsigned long)initial_boot_params) +
568 initial_boot_params->off_mem_rsvmap); 568 initial_boot_params->off_mem_rsvmap);
569 569
570 /* before we do anything, lets reserve the dt blob */ 570 /* before we do anything, lets reserve the dt blob */
571 self_base = __pa((unsigned long)initial_boot_params); 571 self_base = __pa((unsigned long)initial_boot_params);
572 self_size = initial_boot_params->totalsize; 572 self_size = initial_boot_params->totalsize;
573 memblock_reserve(self_base, self_size); 573 memblock_reserve(self_base, self_size);
574 574
575 #ifdef CONFIG_BLK_DEV_INITRD 575 #ifdef CONFIG_BLK_DEV_INITRD
576 /* then reserve the initrd, if any */ 576 /* then reserve the initrd, if any */
577 if (initrd_start && (initrd_end > initrd_start)) 577 if (initrd_start && (initrd_end > initrd_start))
578 memblock_reserve(_ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE), 578 memblock_reserve(_ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE),
579 _ALIGN_UP(initrd_end, PAGE_SIZE) - 579 _ALIGN_UP(initrd_end, PAGE_SIZE) -
580 _ALIGN_DOWN(initrd_start, PAGE_SIZE)); 580 _ALIGN_DOWN(initrd_start, PAGE_SIZE));
581 #endif /* CONFIG_BLK_DEV_INITRD */ 581 #endif /* CONFIG_BLK_DEV_INITRD */
582 582
583 #ifdef CONFIG_PPC32 583 #ifdef CONFIG_PPC32
584 /* 584 /*
585 * Handle the case where we might be booting from an old kexec 585 * Handle the case where we might be booting from an old kexec
586 * image that setup the mem_rsvmap as pairs of 32-bit values 586 * image that setup the mem_rsvmap as pairs of 32-bit values
587 */ 587 */
588 if (*reserve_map > 0xffffffffull) { 588 if (*reserve_map > 0xffffffffull) {
589 u32 base_32, size_32; 589 u32 base_32, size_32;
590 u32 *reserve_map_32 = (u32 *)reserve_map; 590 u32 *reserve_map_32 = (u32 *)reserve_map;
591 591
592 while (1) { 592 while (1) {
593 base_32 = *(reserve_map_32++); 593 base_32 = *(reserve_map_32++);
594 size_32 = *(reserve_map_32++); 594 size_32 = *(reserve_map_32++);
595 if (size_32 == 0) 595 if (size_32 == 0)
596 break; 596 break;
597 /* skip if the reservation is for the blob */ 597 /* skip if the reservation is for the blob */
598 if (base_32 == self_base && size_32 == self_size) 598 if (base_32 == self_base && size_32 == self_size)
599 continue; 599 continue;
600 DBG("reserving: %x -> %x\n", base_32, size_32); 600 DBG("reserving: %x -> %x\n", base_32, size_32);
601 memblock_reserve(base_32, size_32); 601 memblock_reserve(base_32, size_32);
602 } 602 }
603 return; 603 return;
604 } 604 }
605 #endif 605 #endif
606 while (1) { 606 while (1) {
607 base = *(reserve_map++); 607 base = *(reserve_map++);
608 size = *(reserve_map++); 608 size = *(reserve_map++);
609 if (size == 0) 609 if (size == 0)
610 break; 610 break;
611 DBG("reserving: %llx -> %llx\n", base, size); 611 DBG("reserving: %llx -> %llx\n", base, size);
612 memblock_reserve(base, size); 612 memblock_reserve(base, size);
613 } 613 }
614 } 614 }
615 615
616 #ifdef CONFIG_PHYP_DUMP 616 #ifdef CONFIG_PHYP_DUMP
617 /** 617 /**
618 * phyp_dump_calculate_reserve_size() - reserve variable boot area 5% or arg 618 * phyp_dump_calculate_reserve_size() - reserve variable boot area 5% or arg
619 * 619 *
620 * Function to find the largest size we need to reserve 620 * Function to find the largest size we need to reserve
621 * during early boot process. 621 * during early boot process.
622 * 622 *
623 * It either looks for boot param and returns that OR 623 * It either looks for boot param and returns that OR
624 * returns larger of 256 or 5% rounded down to multiples of 256MB. 624 * returns larger of 256 or 5% rounded down to multiples of 256MB.
625 * 625 *
626 */ 626 */
627 static inline unsigned long phyp_dump_calculate_reserve_size(void) 627 static inline unsigned long phyp_dump_calculate_reserve_size(void)
628 { 628 {
629 unsigned long tmp; 629 unsigned long tmp;
630 630
631 if (phyp_dump_info->reserve_bootvar) 631 if (phyp_dump_info->reserve_bootvar)
632 return phyp_dump_info->reserve_bootvar; 632 return phyp_dump_info->reserve_bootvar;
633 633
634 /* divide by 20 to get 5% of value */ 634 /* divide by 20 to get 5% of value */
635 tmp = memblock_end_of_DRAM(); 635 tmp = memblock_end_of_DRAM();
636 do_div(tmp, 20); 636 do_div(tmp, 20);
637 637
638 /* round it down in multiples of 256 */ 638 /* round it down in multiples of 256 */
639 tmp = tmp & ~0x0FFFFFFFUL; 639 tmp = tmp & ~0x0FFFFFFFUL;
640 640
641 return (tmp > PHYP_DUMP_RMR_END ? tmp : PHYP_DUMP_RMR_END); 641 return (tmp > PHYP_DUMP_RMR_END ? tmp : PHYP_DUMP_RMR_END);
642 } 642 }
643 643
644 /** 644 /**
645 * phyp_dump_reserve_mem() - reserve all not-yet-dumped mmemory 645 * phyp_dump_reserve_mem() - reserve all not-yet-dumped mmemory
646 * 646 *
647 * This routine may reserve memory regions in the kernel only 647 * This routine may reserve memory regions in the kernel only
648 * if the system is supported and a dump was taken in last 648 * if the system is supported and a dump was taken in last
649 * boot instance or if the hardware is supported and the 649 * boot instance or if the hardware is supported and the
650 * scratch area needs to be setup. In other instances it returns 650 * scratch area needs to be setup. In other instances it returns
651 * without reserving anything. The memory in case of dump being 651 * without reserving anything. The memory in case of dump being
652 * active is freed when the dump is collected (by userland tools). 652 * active is freed when the dump is collected (by userland tools).
653 */ 653 */
654 static void __init phyp_dump_reserve_mem(void) 654 static void __init phyp_dump_reserve_mem(void)
655 { 655 {
656 unsigned long base, size; 656 unsigned long base, size;
657 unsigned long variable_reserve_size; 657 unsigned long variable_reserve_size;
658 658
659 if (!phyp_dump_info->phyp_dump_configured) { 659 if (!phyp_dump_info->phyp_dump_configured) {
660 printk(KERN_ERR "Phyp-dump not supported on this hardware\n"); 660 printk(KERN_ERR "Phyp-dump not supported on this hardware\n");
661 return; 661 return;
662 } 662 }
663 663
664 if (!phyp_dump_info->phyp_dump_at_boot) { 664 if (!phyp_dump_info->phyp_dump_at_boot) {
665 printk(KERN_INFO "Phyp-dump disabled at boot time\n"); 665 printk(KERN_INFO "Phyp-dump disabled at boot time\n");
666 return; 666 return;
667 } 667 }
668 668
669 variable_reserve_size = phyp_dump_calculate_reserve_size(); 669 variable_reserve_size = phyp_dump_calculate_reserve_size();
670 670
671 if (phyp_dump_info->phyp_dump_is_active) { 671 if (phyp_dump_info->phyp_dump_is_active) {
672 /* Reserve *everything* above RMR.Area freed by userland tools*/ 672 /* Reserve *everything* above RMR.Area freed by userland tools*/
673 base = variable_reserve_size; 673 base = variable_reserve_size;
674 size = memblock_end_of_DRAM() - base; 674 size = memblock_end_of_DRAM() - base;
675 675
676 /* XXX crashed_ram_end is wrong, since it may be beyond 676 /* XXX crashed_ram_end is wrong, since it may be beyond
677 * the memory_limit, it will need to be adjusted. */ 677 * the memory_limit, it will need to be adjusted. */
678 memblock_reserve(base, size); 678 memblock_reserve(base, size);
679 679
680 phyp_dump_info->init_reserve_start = base; 680 phyp_dump_info->init_reserve_start = base;
681 phyp_dump_info->init_reserve_size = size; 681 phyp_dump_info->init_reserve_size = size;
682 } else { 682 } else {
683 size = phyp_dump_info->cpu_state_size + 683 size = phyp_dump_info->cpu_state_size +
684 phyp_dump_info->hpte_region_size + 684 phyp_dump_info->hpte_region_size +
685 variable_reserve_size; 685 variable_reserve_size;
686 base = memblock_end_of_DRAM() - size; 686 base = memblock_end_of_DRAM() - size;
687 memblock_reserve(base, size); 687 memblock_reserve(base, size);
688 phyp_dump_info->init_reserve_start = base; 688 phyp_dump_info->init_reserve_start = base;
689 phyp_dump_info->init_reserve_size = size; 689 phyp_dump_info->init_reserve_size = size;
690 } 690 }
691 } 691 }
692 #else 692 #else
693 static inline void __init phyp_dump_reserve_mem(void) {} 693 static inline void __init phyp_dump_reserve_mem(void) {}
694 #endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */ 694 #endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */
695 695
696 void __init early_init_devtree(void *params) 696 void __init early_init_devtree(void *params)
697 { 697 {
698 phys_addr_t limit; 698 phys_addr_t limit;
699 699
700 DBG(" -> early_init_devtree(%p)\n", params); 700 DBG(" -> early_init_devtree(%p)\n", params);
701 701
702 /* Setup flat device-tree pointer */ 702 /* Setup flat device-tree pointer */
703 initial_boot_params = params; 703 initial_boot_params = params;
704 704
705 #ifdef CONFIG_PPC_RTAS 705 #ifdef CONFIG_PPC_RTAS
706 /* Some machines might need RTAS info for debugging, grab it now. */ 706 /* Some machines might need RTAS info for debugging, grab it now. */
707 of_scan_flat_dt(early_init_dt_scan_rtas, NULL); 707 of_scan_flat_dt(early_init_dt_scan_rtas, NULL);
708 #endif 708 #endif
709 709
710 #ifdef CONFIG_PHYP_DUMP 710 #ifdef CONFIG_PHYP_DUMP
711 /* scan tree to see if dump occurred during last boot */ 711 /* scan tree to see if dump occurred during last boot */
712 of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL); 712 of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL);
713 #endif 713 #endif
714 714
715 /* Retrieve various informations from the /chosen node of the 715 /* Retrieve various informations from the /chosen node of the
716 * device-tree, including the platform type, initrd location and 716 * device-tree, including the platform type, initrd location and
717 * size, TCE reserve, and more ... 717 * size, TCE reserve, and more ...
718 */ 718 */
719 of_scan_flat_dt(early_init_dt_scan_chosen_ppc, cmd_line); 719 of_scan_flat_dt(early_init_dt_scan_chosen_ppc, cmd_line);
720 720
721 /* Scan memory nodes and rebuild MEMBLOCKs */ 721 /* Scan memory nodes and rebuild MEMBLOCKs */
722 memblock_init(); 722 memblock_init();
723 723
724 of_scan_flat_dt(early_init_dt_scan_root, NULL); 724 of_scan_flat_dt(early_init_dt_scan_root, NULL);
725 of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL); 725 of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
726 setup_initial_memory_limit(memstart_addr, first_memblock_size); 726 setup_initial_memory_limit(memstart_addr, first_memblock_size);
727 727
728 /* Save command line for /proc/cmdline and then parse parameters */ 728 /* Save command line for /proc/cmdline and then parse parameters */
729 strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); 729 strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
730 parse_early_param(); 730 parse_early_param();
731 731
732 /* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */ 732 /* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */
733 memblock_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START); 733 memblock_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
734 /* If relocatable, reserve first 32k for interrupt vectors etc. */ 734 /* If relocatable, reserve first 32k for interrupt vectors etc. */
735 if (PHYSICAL_START > MEMORY_START) 735 if (PHYSICAL_START > MEMORY_START)
736 memblock_reserve(MEMORY_START, 0x8000); 736 memblock_reserve(MEMORY_START, 0x8000);
737 reserve_kdump_trampoline(); 737 reserve_kdump_trampoline();
738 reserve_crashkernel(); 738 reserve_crashkernel();
739 early_reserve_mem(); 739 early_reserve_mem();
740 phyp_dump_reserve_mem(); 740 phyp_dump_reserve_mem();
741 741
742 limit = memory_limit; 742 limit = memory_limit;
743 if (! limit) { 743 if (! limit) {
744 phys_addr_t memsize; 744 phys_addr_t memsize;
745 745
746 /* Ensure that total memory size is page-aligned, because 746 /* Ensure that total memory size is page-aligned, because
747 * otherwise mark_bootmem() gets upset. */ 747 * otherwise mark_bootmem() gets upset. */
748 memblock_analyze(); 748 memblock_analyze();
749 memsize = memblock_phys_mem_size(); 749 memsize = memblock_phys_mem_size();
750 if ((memsize & PAGE_MASK) != memsize) 750 if ((memsize & PAGE_MASK) != memsize)
751 limit = memsize & PAGE_MASK; 751 limit = memsize & PAGE_MASK;
752 } 752 }
753 memblock_enforce_memory_limit(limit); 753 memblock_enforce_memory_limit(limit);
754 754
755 memblock_analyze(); 755 memblock_analyze();
756 memblock_dump_all(); 756 memblock_dump_all();
757 757
758 DBG("Phys. mem: %llx\n", memblock_phys_mem_size()); 758 DBG("Phys. mem: %llx\n", memblock_phys_mem_size());
759 759
760 /* We may need to relocate the flat tree, do it now. 760 /* We may need to relocate the flat tree, do it now.
761 * FIXME .. and the initrd too? */ 761 * FIXME .. and the initrd too? */
762 move_device_tree(); 762 move_device_tree();
763 763
764 allocate_pacas(); 764 allocate_pacas();
765 765
766 DBG("Scanning CPUs ...\n"); 766 DBG("Scanning CPUs ...\n");
767 767
768 /* Retrieve CPU related informations from the flat tree 768 /* Retrieve CPU related informations from the flat tree
769 * (altivec support, boot CPU ID, ...) 769 * (altivec support, boot CPU ID, ...)
770 */ 770 */
771 of_scan_flat_dt(early_init_dt_scan_cpus, NULL); 771 of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
772 772
773 #if defined(CONFIG_SMP) && defined(CONFIG_PPC64) 773 #if defined(CONFIG_SMP) && defined(CONFIG_PPC64)
774 /* We'll later wait for secondaries to check in; there are 774 /* We'll later wait for secondaries to check in; there are
775 * NCPUS-1 non-boot CPUs :-) 775 * NCPUS-1 non-boot CPUs :-)
776 */ 776 */
777 spinning_secondaries = boot_cpu_count - 1; 777 spinning_secondaries = boot_cpu_count - 1;
778 #endif 778 #endif
779 779
780 DBG(" <- early_init_devtree()\n"); 780 DBG(" <- early_init_devtree()\n");
781 } 781 }
782 782
783 /******* 783 /*******
784 * 784 *
785 * New implementation of the OF "find" APIs, return a refcounted 785 * New implementation of the OF "find" APIs, return a refcounted
786 * object, call of_node_put() when done. The device tree and list 786 * object, call of_node_put() when done. The device tree and list
787 * are protected by a rw_lock. 787 * are protected by a rw_lock.
788 * 788 *
789 * Note that property management will need some locking as well, 789 * Note that property management will need some locking as well,
790 * this isn't dealt with yet. 790 * this isn't dealt with yet.
791 * 791 *
792 *******/ 792 *******/
793 793
794 /** 794 /**
795 * of_find_next_cache_node - Find a node's subsidiary cache 795 * of_find_next_cache_node - Find a node's subsidiary cache
796 * @np: node of type "cpu" or "cache" 796 * @np: node of type "cpu" or "cache"
797 * 797 *
798 * Returns a node pointer with refcount incremented, use 798 * Returns a node pointer with refcount incremented, use
799 * of_node_put() on it when done. Caller should hold a reference 799 * of_node_put() on it when done. Caller should hold a reference
800 * to np. 800 * to np.
801 */ 801 */
802 struct device_node *of_find_next_cache_node(struct device_node *np) 802 struct device_node *of_find_next_cache_node(struct device_node *np)
803 { 803 {
804 struct device_node *child; 804 struct device_node *child;
805 const phandle *handle; 805 const phandle *handle;
806 806
807 handle = of_get_property(np, "l2-cache", NULL); 807 handle = of_get_property(np, "l2-cache", NULL);
808 if (!handle) 808 if (!handle)
809 handle = of_get_property(np, "next-level-cache", NULL); 809 handle = of_get_property(np, "next-level-cache", NULL);
810 810
811 if (handle) 811 if (handle)
812 return of_find_node_by_phandle(*handle); 812 return of_find_node_by_phandle(*handle);
813 813
814 /* OF on pmac has nodes instead of properties named "l2-cache" 814 /* OF on pmac has nodes instead of properties named "l2-cache"
815 * beneath CPU nodes. 815 * beneath CPU nodes.
816 */ 816 */
817 if (!strcmp(np->type, "cpu")) 817 if (!strcmp(np->type, "cpu"))
818 for_each_child_of_node(np, child) 818 for_each_child_of_node(np, child)
819 if (!strcmp(child->type, "cache")) 819 if (!strcmp(child->type, "cache"))
820 return child; 820 return child;
821 821
822 return NULL; 822 return NULL;
823 } 823 }
824 824
825 #ifdef CONFIG_PPC_PSERIES 825 #ifdef CONFIG_PPC_PSERIES
826 /* 826 /*
827 * Fix up the uninitialized fields in a new device node: 827 * Fix up the uninitialized fields in a new device node:
828 * name, type and pci-specific fields 828 * name, type and pci-specific fields
829 */ 829 */
830 830
831 static int of_finish_dynamic_node(struct device_node *node) 831 static int of_finish_dynamic_node(struct device_node *node)
832 { 832 {
833 struct device_node *parent = of_get_parent(node); 833 struct device_node *parent = of_get_parent(node);
834 int err = 0; 834 int err = 0;
835 const phandle *ibm_phandle; 835 const phandle *ibm_phandle;
836 836
837 node->name = of_get_property(node, "name", NULL); 837 node->name = of_get_property(node, "name", NULL);
838 node->type = of_get_property(node, "device_type", NULL); 838 node->type = of_get_property(node, "device_type", NULL);
839 839
840 if (!node->name) 840 if (!node->name)
841 node->name = "<NULL>"; 841 node->name = "<NULL>";
842 if (!node->type) 842 if (!node->type)
843 node->type = "<NULL>"; 843 node->type = "<NULL>";
844 844
845 if (!parent) { 845 if (!parent) {
846 err = -ENODEV; 846 err = -ENODEV;
847 goto out; 847 goto out;
848 } 848 }
849 849
850 /* We don't support that function on PowerMac, at least 850 /* We don't support that function on PowerMac, at least
851 * not yet 851 * not yet
852 */ 852 */
853 if (machine_is(powermac)) 853 if (machine_is(powermac))
854 return -ENODEV; 854 return -ENODEV;
855 855
856 /* fix up new node's phandle field */ 856 /* fix up new node's phandle field */
857 if ((ibm_phandle = of_get_property(node, "ibm,phandle", NULL))) 857 if ((ibm_phandle = of_get_property(node, "ibm,phandle", NULL)))
858 node->phandle = *ibm_phandle; 858 node->phandle = *ibm_phandle;
859 859
860 out: 860 out:
861 of_node_put(parent); 861 of_node_put(parent);
862 return err; 862 return err;
863 } 863 }
864 864
865 static int prom_reconfig_notifier(struct notifier_block *nb, 865 static int prom_reconfig_notifier(struct notifier_block *nb,
866 unsigned long action, void *node) 866 unsigned long action, void *node)
867 { 867 {
868 int err; 868 int err;
869 869
870 switch (action) { 870 switch (action) {
871 case PSERIES_RECONFIG_ADD: 871 case PSERIES_RECONFIG_ADD:
872 err = of_finish_dynamic_node(node); 872 err = of_finish_dynamic_node(node);
873 if (err < 0) 873 if (err < 0)
874 printk(KERN_ERR "finish_node returned %d\n", err); 874 printk(KERN_ERR "finish_node returned %d\n", err);
875 break; 875 break;
876 default: 876 default:
877 err = 0; 877 err = 0;
878 break; 878 break;
879 } 879 }
880 return notifier_from_errno(err); 880 return notifier_from_errno(err);
881 } 881 }
882 882
883 static struct notifier_block prom_reconfig_nb = { 883 static struct notifier_block prom_reconfig_nb = {
884 .notifier_call = prom_reconfig_notifier, 884 .notifier_call = prom_reconfig_notifier,
885 .priority = 10, /* This one needs to run first */ 885 .priority = 10, /* This one needs to run first */
886 }; 886 };
887 887
888 static int __init prom_reconfig_setup(void) 888 static int __init prom_reconfig_setup(void)
889 { 889 {
890 return pSeries_reconfig_notifier_register(&prom_reconfig_nb); 890 return pSeries_reconfig_notifier_register(&prom_reconfig_nb);
891 } 891 }
892 __initcall(prom_reconfig_setup); 892 __initcall(prom_reconfig_setup);
893 #endif 893 #endif
894 894
895 /* Find the device node for a given logical cpu number, also returns the cpu 895 /* Find the device node for a given logical cpu number, also returns the cpu
896 * local thread number (index in ibm,interrupt-server#s) if relevant and 896 * local thread number (index in ibm,interrupt-server#s) if relevant and
897 * asked for (non NULL) 897 * asked for (non NULL)
898 */ 898 */
899 struct device_node *of_get_cpu_node(int cpu, unsigned int *thread) 899 struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
900 { 900 {
901 int hardid; 901 int hardid;
902 struct device_node *np; 902 struct device_node *np;
903 903
904 hardid = get_hard_smp_processor_id(cpu); 904 hardid = get_hard_smp_processor_id(cpu);
905 905
906 for_each_node_by_type(np, "cpu") { 906 for_each_node_by_type(np, "cpu") {
907 const u32 *intserv; 907 const u32 *intserv;
908 unsigned int plen, t; 908 unsigned int plen, t;
909 909
910 /* Check for ibm,ppc-interrupt-server#s. If it doesn't exist 910 /* Check for ibm,ppc-interrupt-server#s. If it doesn't exist
911 * fallback to "reg" property and assume no threads 911 * fallback to "reg" property and assume no threads
912 */ 912 */
913 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", 913 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s",
914 &plen); 914 &plen);
915 if (intserv == NULL) { 915 if (intserv == NULL) {
916 const u32 *reg = of_get_property(np, "reg", NULL); 916 const u32 *reg = of_get_property(np, "reg", NULL);
917 if (reg == NULL) 917 if (reg == NULL)
918 continue; 918 continue;
919 if (*reg == hardid) { 919 if (*reg == hardid) {
920 if (thread) 920 if (thread)
921 *thread = 0; 921 *thread = 0;
922 return np; 922 return np;
923 } 923 }
924 } else { 924 } else {
925 plen /= sizeof(u32); 925 plen /= sizeof(u32);
926 for (t = 0; t < plen; t++) { 926 for (t = 0; t < plen; t++) {
927 if (hardid == intserv[t]) { 927 if (hardid == intserv[t]) {
928 if (thread) 928 if (thread)
929 *thread = t; 929 *thread = t;
930 return np; 930 return np;
931 } 931 }
932 } 932 }
933 } 933 }
934 } 934 }
935 return NULL; 935 return NULL;
936 } 936 }
937 EXPORT_SYMBOL(of_get_cpu_node); 937 EXPORT_SYMBOL(of_get_cpu_node);
938 938
939 #if defined(CONFIG_DEBUG_FS) && defined(DEBUG) 939 #if defined(CONFIG_DEBUG_FS) && defined(DEBUG)
940 static struct debugfs_blob_wrapper flat_dt_blob; 940 static struct debugfs_blob_wrapper flat_dt_blob;
941 941
942 static int __init export_flat_device_tree(void) 942 static int __init export_flat_device_tree(void)
943 { 943 {
944 struct dentry *d; 944 struct dentry *d;
945 945
946 flat_dt_blob.data = initial_boot_params; 946 flat_dt_blob.data = initial_boot_params;
947 flat_dt_blob.size = initial_boot_params->totalsize; 947 flat_dt_blob.size = initial_boot_params->totalsize;
948 948
949 d = debugfs_create_blob("flat-device-tree", S_IFREG | S_IRUSR, 949 d = debugfs_create_blob("flat-device-tree", S_IFREG | S_IRUSR,
950 powerpc_debugfs_root, &flat_dt_blob); 950 powerpc_debugfs_root, &flat_dt_blob);
951 if (!d) 951 if (!d)
952 return 1; 952 return 1;
953 953
954 return 0; 954 return 0;
955 } 955 }
956 __initcall(export_flat_device_tree); 956 __initcall(export_flat_device_tree);
957 #endif 957 #endif
958 958
arch/powerpc/kernel/rtas.c
1 /* 1 /*
2 * 2 *
3 * Procedures for interfacing to the RTAS on CHRP machines. 3 * Procedures for interfacing to the RTAS on CHRP machines.
4 * 4 *
5 * Peter Bergner, IBM March 2001. 5 * Peter Bergner, IBM March 2001.
6 * Copyright (C) 2001 IBM. 6 * Copyright (C) 2001 IBM.
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version. 11 * 2 of the License, or (at your option) any later version.
12 */ 12 */
13 13
14 #include <stdarg.h> 14 #include <stdarg.h>
15 #include <linux/kernel.h> 15 #include <linux/kernel.h>
16 #include <linux/types.h> 16 #include <linux/types.h>
17 #include <linux/spinlock.h> 17 #include <linux/spinlock.h>
18 #include <linux/module.h> 18 #include <linux/export.h>
19 #include <linux/init.h> 19 #include <linux/init.h>
20 #include <linux/capability.h> 20 #include <linux/capability.h>
21 #include <linux/delay.h> 21 #include <linux/delay.h>
22 #include <linux/smp.h> 22 #include <linux/smp.h>
23 #include <linux/completion.h> 23 #include <linux/completion.h>
24 #include <linux/cpumask.h> 24 #include <linux/cpumask.h>
25 #include <linux/memblock.h> 25 #include <linux/memblock.h>
26 #include <linux/slab.h> 26 #include <linux/slab.h>
27 #include <linux/reboot.h> 27 #include <linux/reboot.h>
28 28
29 #include <asm/prom.h> 29 #include <asm/prom.h>
30 #include <asm/rtas.h> 30 #include <asm/rtas.h>
31 #include <asm/hvcall.h> 31 #include <asm/hvcall.h>
32 #include <asm/machdep.h> 32 #include <asm/machdep.h>
33 #include <asm/firmware.h> 33 #include <asm/firmware.h>
34 #include <asm/page.h> 34 #include <asm/page.h>
35 #include <asm/param.h> 35 #include <asm/param.h>
36 #include <asm/system.h> 36 #include <asm/system.h>
37 #include <asm/delay.h> 37 #include <asm/delay.h>
38 #include <asm/uaccess.h> 38 #include <asm/uaccess.h>
39 #include <asm/udbg.h> 39 #include <asm/udbg.h>
40 #include <asm/syscalls.h> 40 #include <asm/syscalls.h>
41 #include <asm/smp.h> 41 #include <asm/smp.h>
42 #include <linux/atomic.h> 42 #include <linux/atomic.h>
43 #include <asm/time.h> 43 #include <asm/time.h>
44 #include <asm/mmu.h> 44 #include <asm/mmu.h>
45 #include <asm/topology.h> 45 #include <asm/topology.h>
46 #include <asm/pSeries_reconfig.h> 46 #include <asm/pSeries_reconfig.h>
47 47
48 struct rtas_t rtas = { 48 struct rtas_t rtas = {
49 .lock = __ARCH_SPIN_LOCK_UNLOCKED 49 .lock = __ARCH_SPIN_LOCK_UNLOCKED
50 }; 50 };
51 EXPORT_SYMBOL(rtas); 51 EXPORT_SYMBOL(rtas);
52 52
53 DEFINE_SPINLOCK(rtas_data_buf_lock); 53 DEFINE_SPINLOCK(rtas_data_buf_lock);
54 EXPORT_SYMBOL(rtas_data_buf_lock); 54 EXPORT_SYMBOL(rtas_data_buf_lock);
55 55
56 char rtas_data_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned; 56 char rtas_data_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned;
57 EXPORT_SYMBOL(rtas_data_buf); 57 EXPORT_SYMBOL(rtas_data_buf);
58 58
59 unsigned long rtas_rmo_buf; 59 unsigned long rtas_rmo_buf;
60 60
61 /* 61 /*
62 * If non-NULL, this gets called when the kernel terminates. 62 * If non-NULL, this gets called when the kernel terminates.
63 * This is done like this so rtas_flash can be a module. 63 * This is done like this so rtas_flash can be a module.
64 */ 64 */
65 void (*rtas_flash_term_hook)(int); 65 void (*rtas_flash_term_hook)(int);
66 EXPORT_SYMBOL(rtas_flash_term_hook); 66 EXPORT_SYMBOL(rtas_flash_term_hook);
67 67
68 /* RTAS use home made raw locking instead of spin_lock_irqsave 68 /* RTAS use home made raw locking instead of spin_lock_irqsave
69 * because those can be called from within really nasty contexts 69 * because those can be called from within really nasty contexts
70 * such as having the timebase stopped which would lockup with 70 * such as having the timebase stopped which would lockup with
71 * normal locks and spinlock debugging enabled 71 * normal locks and spinlock debugging enabled
72 */ 72 */
73 static unsigned long lock_rtas(void) 73 static unsigned long lock_rtas(void)
74 { 74 {
75 unsigned long flags; 75 unsigned long flags;
76 76
77 local_irq_save(flags); 77 local_irq_save(flags);
78 preempt_disable(); 78 preempt_disable();
79 arch_spin_lock_flags(&rtas.lock, flags); 79 arch_spin_lock_flags(&rtas.lock, flags);
80 return flags; 80 return flags;
81 } 81 }
82 82
83 static void unlock_rtas(unsigned long flags) 83 static void unlock_rtas(unsigned long flags)
84 { 84 {
85 arch_spin_unlock(&rtas.lock); 85 arch_spin_unlock(&rtas.lock);
86 local_irq_restore(flags); 86 local_irq_restore(flags);
87 preempt_enable(); 87 preempt_enable();
88 } 88 }
89 89
90 /* 90 /*
91 * call_rtas_display_status and call_rtas_display_status_delay 91 * call_rtas_display_status and call_rtas_display_status_delay
92 * are designed only for very early low-level debugging, which 92 * are designed only for very early low-level debugging, which
93 * is why the token is hard-coded to 10. 93 * is why the token is hard-coded to 10.
94 */ 94 */
95 static void call_rtas_display_status(char c) 95 static void call_rtas_display_status(char c)
96 { 96 {
97 struct rtas_args *args = &rtas.args; 97 struct rtas_args *args = &rtas.args;
98 unsigned long s; 98 unsigned long s;
99 99
100 if (!rtas.base) 100 if (!rtas.base)
101 return; 101 return;
102 s = lock_rtas(); 102 s = lock_rtas();
103 103
104 args->token = 10; 104 args->token = 10;
105 args->nargs = 1; 105 args->nargs = 1;
106 args->nret = 1; 106 args->nret = 1;
107 args->rets = (rtas_arg_t *)&(args->args[1]); 107 args->rets = (rtas_arg_t *)&(args->args[1]);
108 args->args[0] = (unsigned char)c; 108 args->args[0] = (unsigned char)c;
109 109
110 enter_rtas(__pa(args)); 110 enter_rtas(__pa(args));
111 111
112 unlock_rtas(s); 112 unlock_rtas(s);
113 } 113 }
114 114
115 static void call_rtas_display_status_delay(char c) 115 static void call_rtas_display_status_delay(char c)
116 { 116 {
117 static int pending_newline = 0; /* did last write end with unprinted newline? */ 117 static int pending_newline = 0; /* did last write end with unprinted newline? */
118 static int width = 16; 118 static int width = 16;
119 119
120 if (c == '\n') { 120 if (c == '\n') {
121 while (width-- > 0) 121 while (width-- > 0)
122 call_rtas_display_status(' '); 122 call_rtas_display_status(' ');
123 width = 16; 123 width = 16;
124 mdelay(500); 124 mdelay(500);
125 pending_newline = 1; 125 pending_newline = 1;
126 } else { 126 } else {
127 if (pending_newline) { 127 if (pending_newline) {
128 call_rtas_display_status('\r'); 128 call_rtas_display_status('\r');
129 call_rtas_display_status('\n'); 129 call_rtas_display_status('\n');
130 } 130 }
131 pending_newline = 0; 131 pending_newline = 0;
132 if (width--) { 132 if (width--) {
133 call_rtas_display_status(c); 133 call_rtas_display_status(c);
134 udelay(10000); 134 udelay(10000);
135 } 135 }
136 } 136 }
137 } 137 }
138 138
139 void __init udbg_init_rtas_panel(void) 139 void __init udbg_init_rtas_panel(void)
140 { 140 {
141 udbg_putc = call_rtas_display_status_delay; 141 udbg_putc = call_rtas_display_status_delay;
142 } 142 }
143 143
144 #ifdef CONFIG_UDBG_RTAS_CONSOLE 144 #ifdef CONFIG_UDBG_RTAS_CONSOLE
145 145
146 /* If you think you're dying before early_init_dt_scan_rtas() does its 146 /* If you think you're dying before early_init_dt_scan_rtas() does its
147 * work, you can hard code the token values for your firmware here and 147 * work, you can hard code the token values for your firmware here and
148 * hardcode rtas.base/entry etc. 148 * hardcode rtas.base/entry etc.
149 */ 149 */
150 static unsigned int rtas_putchar_token = RTAS_UNKNOWN_SERVICE; 150 static unsigned int rtas_putchar_token = RTAS_UNKNOWN_SERVICE;
151 static unsigned int rtas_getchar_token = RTAS_UNKNOWN_SERVICE; 151 static unsigned int rtas_getchar_token = RTAS_UNKNOWN_SERVICE;
152 152
153 static void udbg_rtascon_putc(char c) 153 static void udbg_rtascon_putc(char c)
154 { 154 {
155 int tries; 155 int tries;
156 156
157 if (!rtas.base) 157 if (!rtas.base)
158 return; 158 return;
159 159
160 /* Add CRs before LFs */ 160 /* Add CRs before LFs */
161 if (c == '\n') 161 if (c == '\n')
162 udbg_rtascon_putc('\r'); 162 udbg_rtascon_putc('\r');
163 163
164 /* if there is more than one character to be displayed, wait a bit */ 164 /* if there is more than one character to be displayed, wait a bit */
165 for (tries = 0; tries < 16; tries++) { 165 for (tries = 0; tries < 16; tries++) {
166 if (rtas_call(rtas_putchar_token, 1, 1, NULL, c) == 0) 166 if (rtas_call(rtas_putchar_token, 1, 1, NULL, c) == 0)
167 break; 167 break;
168 udelay(1000); 168 udelay(1000);
169 } 169 }
170 } 170 }
171 171
172 static int udbg_rtascon_getc_poll(void) 172 static int udbg_rtascon_getc_poll(void)
173 { 173 {
174 int c; 174 int c;
175 175
176 if (!rtas.base) 176 if (!rtas.base)
177 return -1; 177 return -1;
178 178
179 if (rtas_call(rtas_getchar_token, 0, 2, &c)) 179 if (rtas_call(rtas_getchar_token, 0, 2, &c))
180 return -1; 180 return -1;
181 181
182 return c; 182 return c;
183 } 183 }
184 184
185 static int udbg_rtascon_getc(void) 185 static int udbg_rtascon_getc(void)
186 { 186 {
187 int c; 187 int c;
188 188
189 while ((c = udbg_rtascon_getc_poll()) == -1) 189 while ((c = udbg_rtascon_getc_poll()) == -1)
190 ; 190 ;
191 191
192 return c; 192 return c;
193 } 193 }
194 194
195 195
196 void __init udbg_init_rtas_console(void) 196 void __init udbg_init_rtas_console(void)
197 { 197 {
198 udbg_putc = udbg_rtascon_putc; 198 udbg_putc = udbg_rtascon_putc;
199 udbg_getc = udbg_rtascon_getc; 199 udbg_getc = udbg_rtascon_getc;
200 udbg_getc_poll = udbg_rtascon_getc_poll; 200 udbg_getc_poll = udbg_rtascon_getc_poll;
201 } 201 }
202 #endif /* CONFIG_UDBG_RTAS_CONSOLE */ 202 #endif /* CONFIG_UDBG_RTAS_CONSOLE */
203 203
204 void rtas_progress(char *s, unsigned short hex) 204 void rtas_progress(char *s, unsigned short hex)
205 { 205 {
206 struct device_node *root; 206 struct device_node *root;
207 int width; 207 int width;
208 const int *p; 208 const int *p;
209 char *os; 209 char *os;
210 static int display_character, set_indicator; 210 static int display_character, set_indicator;
211 static int display_width, display_lines, form_feed; 211 static int display_width, display_lines, form_feed;
212 static const int *row_width; 212 static const int *row_width;
213 static DEFINE_SPINLOCK(progress_lock); 213 static DEFINE_SPINLOCK(progress_lock);
214 static int current_line; 214 static int current_line;
215 static int pending_newline = 0; /* did last write end with unprinted newline? */ 215 static int pending_newline = 0; /* did last write end with unprinted newline? */
216 216
217 if (!rtas.base) 217 if (!rtas.base)
218 return; 218 return;
219 219
220 if (display_width == 0) { 220 if (display_width == 0) {
221 display_width = 0x10; 221 display_width = 0x10;
222 if ((root = of_find_node_by_path("/rtas"))) { 222 if ((root = of_find_node_by_path("/rtas"))) {
223 if ((p = of_get_property(root, 223 if ((p = of_get_property(root,
224 "ibm,display-line-length", NULL))) 224 "ibm,display-line-length", NULL)))
225 display_width = *p; 225 display_width = *p;
226 if ((p = of_get_property(root, 226 if ((p = of_get_property(root,
227 "ibm,form-feed", NULL))) 227 "ibm,form-feed", NULL)))
228 form_feed = *p; 228 form_feed = *p;
229 if ((p = of_get_property(root, 229 if ((p = of_get_property(root,
230 "ibm,display-number-of-lines", NULL))) 230 "ibm,display-number-of-lines", NULL)))
231 display_lines = *p; 231 display_lines = *p;
232 row_width = of_get_property(root, 232 row_width = of_get_property(root,
233 "ibm,display-truncation-length", NULL); 233 "ibm,display-truncation-length", NULL);
234 of_node_put(root); 234 of_node_put(root);
235 } 235 }
236 display_character = rtas_token("display-character"); 236 display_character = rtas_token("display-character");
237 set_indicator = rtas_token("set-indicator"); 237 set_indicator = rtas_token("set-indicator");
238 } 238 }
239 239
240 if (display_character == RTAS_UNKNOWN_SERVICE) { 240 if (display_character == RTAS_UNKNOWN_SERVICE) {
241 /* use hex display if available */ 241 /* use hex display if available */
242 if (set_indicator != RTAS_UNKNOWN_SERVICE) 242 if (set_indicator != RTAS_UNKNOWN_SERVICE)
243 rtas_call(set_indicator, 3, 1, NULL, 6, 0, hex); 243 rtas_call(set_indicator, 3, 1, NULL, 6, 0, hex);
244 return; 244 return;
245 } 245 }
246 246
247 spin_lock(&progress_lock); 247 spin_lock(&progress_lock);
248 248
249 /* 249 /*
250 * Last write ended with newline, but we didn't print it since 250 * Last write ended with newline, but we didn't print it since
251 * it would just clear the bottom line of output. Print it now 251 * it would just clear the bottom line of output. Print it now
252 * instead. 252 * instead.
253 * 253 *
254 * If no newline is pending and form feed is supported, clear the 254 * If no newline is pending and form feed is supported, clear the
255 * display with a form feed; otherwise, print a CR to start output 255 * display with a form feed; otherwise, print a CR to start output
256 * at the beginning of the line. 256 * at the beginning of the line.
257 */ 257 */
258 if (pending_newline) { 258 if (pending_newline) {
259 rtas_call(display_character, 1, 1, NULL, '\r'); 259 rtas_call(display_character, 1, 1, NULL, '\r');
260 rtas_call(display_character, 1, 1, NULL, '\n'); 260 rtas_call(display_character, 1, 1, NULL, '\n');
261 pending_newline = 0; 261 pending_newline = 0;
262 } else { 262 } else {
263 current_line = 0; 263 current_line = 0;
264 if (form_feed) 264 if (form_feed)
265 rtas_call(display_character, 1, 1, NULL, 265 rtas_call(display_character, 1, 1, NULL,
266 (char)form_feed); 266 (char)form_feed);
267 else 267 else
268 rtas_call(display_character, 1, 1, NULL, '\r'); 268 rtas_call(display_character, 1, 1, NULL, '\r');
269 } 269 }
270 270
271 if (row_width) 271 if (row_width)
272 width = row_width[current_line]; 272 width = row_width[current_line];
273 else 273 else
274 width = display_width; 274 width = display_width;
275 os = s; 275 os = s;
276 while (*os) { 276 while (*os) {
277 if (*os == '\n' || *os == '\r') { 277 if (*os == '\n' || *os == '\r') {
278 /* If newline is the last character, save it 278 /* If newline is the last character, save it
279 * until next call to avoid bumping up the 279 * until next call to avoid bumping up the
280 * display output. 280 * display output.
281 */ 281 */
282 if (*os == '\n' && !os[1]) { 282 if (*os == '\n' && !os[1]) {
283 pending_newline = 1; 283 pending_newline = 1;
284 current_line++; 284 current_line++;
285 if (current_line > display_lines-1) 285 if (current_line > display_lines-1)
286 current_line = display_lines-1; 286 current_line = display_lines-1;
287 spin_unlock(&progress_lock); 287 spin_unlock(&progress_lock);
288 return; 288 return;
289 } 289 }
290 290
291 /* RTAS wants CR-LF, not just LF */ 291 /* RTAS wants CR-LF, not just LF */
292 292
293 if (*os == '\n') { 293 if (*os == '\n') {
294 rtas_call(display_character, 1, 1, NULL, '\r'); 294 rtas_call(display_character, 1, 1, NULL, '\r');
295 rtas_call(display_character, 1, 1, NULL, '\n'); 295 rtas_call(display_character, 1, 1, NULL, '\n');
296 } else { 296 } else {
297 /* CR might be used to re-draw a line, so we'll 297 /* CR might be used to re-draw a line, so we'll
298 * leave it alone and not add LF. 298 * leave it alone and not add LF.
299 */ 299 */
300 rtas_call(display_character, 1, 1, NULL, *os); 300 rtas_call(display_character, 1, 1, NULL, *os);
301 } 301 }
302 302
303 if (row_width) 303 if (row_width)
304 width = row_width[current_line]; 304 width = row_width[current_line];
305 else 305 else
306 width = display_width; 306 width = display_width;
307 } else { 307 } else {
308 width--; 308 width--;
309 rtas_call(display_character, 1, 1, NULL, *os); 309 rtas_call(display_character, 1, 1, NULL, *os);
310 } 310 }
311 311
312 os++; 312 os++;
313 313
314 /* if we overwrite the screen length */ 314 /* if we overwrite the screen length */
315 if (width <= 0) 315 if (width <= 0)
316 while ((*os != 0) && (*os != '\n') && (*os != '\r')) 316 while ((*os != 0) && (*os != '\n') && (*os != '\r'))
317 os++; 317 os++;
318 } 318 }
319 319
320 spin_unlock(&progress_lock); 320 spin_unlock(&progress_lock);
321 } 321 }
322 EXPORT_SYMBOL(rtas_progress); /* needed by rtas_flash module */ 322 EXPORT_SYMBOL(rtas_progress); /* needed by rtas_flash module */
323 323
324 int rtas_token(const char *service) 324 int rtas_token(const char *service)
325 { 325 {
326 const int *tokp; 326 const int *tokp;
327 if (rtas.dev == NULL) 327 if (rtas.dev == NULL)
328 return RTAS_UNKNOWN_SERVICE; 328 return RTAS_UNKNOWN_SERVICE;
329 tokp = of_get_property(rtas.dev, service, NULL); 329 tokp = of_get_property(rtas.dev, service, NULL);
330 return tokp ? *tokp : RTAS_UNKNOWN_SERVICE; 330 return tokp ? *tokp : RTAS_UNKNOWN_SERVICE;
331 } 331 }
332 EXPORT_SYMBOL(rtas_token); 332 EXPORT_SYMBOL(rtas_token);
333 333
334 int rtas_service_present(const char *service) 334 int rtas_service_present(const char *service)
335 { 335 {
336 return rtas_token(service) != RTAS_UNKNOWN_SERVICE; 336 return rtas_token(service) != RTAS_UNKNOWN_SERVICE;
337 } 337 }
338 EXPORT_SYMBOL(rtas_service_present); 338 EXPORT_SYMBOL(rtas_service_present);
339 339
340 #ifdef CONFIG_RTAS_ERROR_LOGGING 340 #ifdef CONFIG_RTAS_ERROR_LOGGING
341 /* 341 /*
342 * Return the firmware-specified size of the error log buffer 342 * Return the firmware-specified size of the error log buffer
343 * for all rtas calls that require an error buffer argument. 343 * for all rtas calls that require an error buffer argument.
344 * This includes 'check-exception' and 'rtas-last-error'. 344 * This includes 'check-exception' and 'rtas-last-error'.
345 */ 345 */
346 int rtas_get_error_log_max(void) 346 int rtas_get_error_log_max(void)
347 { 347 {
348 static int rtas_error_log_max; 348 static int rtas_error_log_max;
349 if (rtas_error_log_max) 349 if (rtas_error_log_max)
350 return rtas_error_log_max; 350 return rtas_error_log_max;
351 351
352 rtas_error_log_max = rtas_token ("rtas-error-log-max"); 352 rtas_error_log_max = rtas_token ("rtas-error-log-max");
353 if ((rtas_error_log_max == RTAS_UNKNOWN_SERVICE) || 353 if ((rtas_error_log_max == RTAS_UNKNOWN_SERVICE) ||
354 (rtas_error_log_max > RTAS_ERROR_LOG_MAX)) { 354 (rtas_error_log_max > RTAS_ERROR_LOG_MAX)) {
355 printk (KERN_WARNING "RTAS: bad log buffer size %d\n", 355 printk (KERN_WARNING "RTAS: bad log buffer size %d\n",
356 rtas_error_log_max); 356 rtas_error_log_max);
357 rtas_error_log_max = RTAS_ERROR_LOG_MAX; 357 rtas_error_log_max = RTAS_ERROR_LOG_MAX;
358 } 358 }
359 return rtas_error_log_max; 359 return rtas_error_log_max;
360 } 360 }
361 EXPORT_SYMBOL(rtas_get_error_log_max); 361 EXPORT_SYMBOL(rtas_get_error_log_max);
362 362
363 363
364 static char rtas_err_buf[RTAS_ERROR_LOG_MAX]; 364 static char rtas_err_buf[RTAS_ERROR_LOG_MAX];
365 static int rtas_last_error_token; 365 static int rtas_last_error_token;
366 366
367 /** Return a copy of the detailed error text associated with the 367 /** Return a copy of the detailed error text associated with the
368 * most recent failed call to rtas. Because the error text 368 * most recent failed call to rtas. Because the error text
369 * might go stale if there are any other intervening rtas calls, 369 * might go stale if there are any other intervening rtas calls,
370 * this routine must be called atomically with whatever produced 370 * this routine must be called atomically with whatever produced
371 * the error (i.e. with rtas.lock still held from the previous call). 371 * the error (i.e. with rtas.lock still held from the previous call).
372 */ 372 */
373 static char *__fetch_rtas_last_error(char *altbuf) 373 static char *__fetch_rtas_last_error(char *altbuf)
374 { 374 {
375 struct rtas_args err_args, save_args; 375 struct rtas_args err_args, save_args;
376 u32 bufsz; 376 u32 bufsz;
377 char *buf = NULL; 377 char *buf = NULL;
378 378
379 if (rtas_last_error_token == -1) 379 if (rtas_last_error_token == -1)
380 return NULL; 380 return NULL;
381 381
382 bufsz = rtas_get_error_log_max(); 382 bufsz = rtas_get_error_log_max();
383 383
384 err_args.token = rtas_last_error_token; 384 err_args.token = rtas_last_error_token;
385 err_args.nargs = 2; 385 err_args.nargs = 2;
386 err_args.nret = 1; 386 err_args.nret = 1;
387 err_args.args[0] = (rtas_arg_t)__pa(rtas_err_buf); 387 err_args.args[0] = (rtas_arg_t)__pa(rtas_err_buf);
388 err_args.args[1] = bufsz; 388 err_args.args[1] = bufsz;
389 err_args.args[2] = 0; 389 err_args.args[2] = 0;
390 390
391 save_args = rtas.args; 391 save_args = rtas.args;
392 rtas.args = err_args; 392 rtas.args = err_args;
393 393
394 enter_rtas(__pa(&rtas.args)); 394 enter_rtas(__pa(&rtas.args));
395 395
396 err_args = rtas.args; 396 err_args = rtas.args;
397 rtas.args = save_args; 397 rtas.args = save_args;
398 398
399 /* Log the error in the unlikely case that there was one. */ 399 /* Log the error in the unlikely case that there was one. */
400 if (unlikely(err_args.args[2] == 0)) { 400 if (unlikely(err_args.args[2] == 0)) {
401 if (altbuf) { 401 if (altbuf) {
402 buf = altbuf; 402 buf = altbuf;
403 } else { 403 } else {
404 buf = rtas_err_buf; 404 buf = rtas_err_buf;
405 if (mem_init_done) 405 if (mem_init_done)
406 buf = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC); 406 buf = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC);
407 } 407 }
408 if (buf) 408 if (buf)
409 memcpy(buf, rtas_err_buf, RTAS_ERROR_LOG_MAX); 409 memcpy(buf, rtas_err_buf, RTAS_ERROR_LOG_MAX);
410 } 410 }
411 411
412 return buf; 412 return buf;
413 } 413 }
414 414
415 #define get_errorlog_buffer() kmalloc(RTAS_ERROR_LOG_MAX, GFP_KERNEL) 415 #define get_errorlog_buffer() kmalloc(RTAS_ERROR_LOG_MAX, GFP_KERNEL)
416 416
417 #else /* CONFIG_RTAS_ERROR_LOGGING */ 417 #else /* CONFIG_RTAS_ERROR_LOGGING */
418 #define __fetch_rtas_last_error(x) NULL 418 #define __fetch_rtas_last_error(x) NULL
419 #define get_errorlog_buffer() NULL 419 #define get_errorlog_buffer() NULL
420 #endif 420 #endif
421 421
422 int rtas_call(int token, int nargs, int nret, int *outputs, ...) 422 int rtas_call(int token, int nargs, int nret, int *outputs, ...)
423 { 423 {
424 va_list list; 424 va_list list;
425 int i; 425 int i;
426 unsigned long s; 426 unsigned long s;
427 struct rtas_args *rtas_args; 427 struct rtas_args *rtas_args;
428 char *buff_copy = NULL; 428 char *buff_copy = NULL;
429 int ret; 429 int ret;
430 430
431 if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE) 431 if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE)
432 return -1; 432 return -1;
433 433
434 s = lock_rtas(); 434 s = lock_rtas();
435 rtas_args = &rtas.args; 435 rtas_args = &rtas.args;
436 436
437 rtas_args->token = token; 437 rtas_args->token = token;
438 rtas_args->nargs = nargs; 438 rtas_args->nargs = nargs;
439 rtas_args->nret = nret; 439 rtas_args->nret = nret;
440 rtas_args->rets = (rtas_arg_t *)&(rtas_args->args[nargs]); 440 rtas_args->rets = (rtas_arg_t *)&(rtas_args->args[nargs]);
441 va_start(list, outputs); 441 va_start(list, outputs);
442 for (i = 0; i < nargs; ++i) 442 for (i = 0; i < nargs; ++i)
443 rtas_args->args[i] = va_arg(list, rtas_arg_t); 443 rtas_args->args[i] = va_arg(list, rtas_arg_t);
444 va_end(list); 444 va_end(list);
445 445
446 for (i = 0; i < nret; ++i) 446 for (i = 0; i < nret; ++i)
447 rtas_args->rets[i] = 0; 447 rtas_args->rets[i] = 0;
448 448
449 enter_rtas(__pa(rtas_args)); 449 enter_rtas(__pa(rtas_args));
450 450
451 /* A -1 return code indicates that the last command couldn't 451 /* A -1 return code indicates that the last command couldn't
452 be completed due to a hardware error. */ 452 be completed due to a hardware error. */
453 if (rtas_args->rets[0] == -1) 453 if (rtas_args->rets[0] == -1)
454 buff_copy = __fetch_rtas_last_error(NULL); 454 buff_copy = __fetch_rtas_last_error(NULL);
455 455
456 if (nret > 1 && outputs != NULL) 456 if (nret > 1 && outputs != NULL)
457 for (i = 0; i < nret-1; ++i) 457 for (i = 0; i < nret-1; ++i)
458 outputs[i] = rtas_args->rets[i+1]; 458 outputs[i] = rtas_args->rets[i+1];
459 ret = (nret > 0)? rtas_args->rets[0]: 0; 459 ret = (nret > 0)? rtas_args->rets[0]: 0;
460 460
461 unlock_rtas(s); 461 unlock_rtas(s);
462 462
463 if (buff_copy) { 463 if (buff_copy) {
464 log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0); 464 log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0);
465 if (mem_init_done) 465 if (mem_init_done)
466 kfree(buff_copy); 466 kfree(buff_copy);
467 } 467 }
468 return ret; 468 return ret;
469 } 469 }
470 EXPORT_SYMBOL(rtas_call); 470 EXPORT_SYMBOL(rtas_call);
471 471
472 /* For RTAS_BUSY (-2), delay for 1 millisecond. For an extended busy status 472 /* For RTAS_BUSY (-2), delay for 1 millisecond. For an extended busy status
473 * code of 990n, perform the hinted delay of 10^n (last digit) milliseconds. 473 * code of 990n, perform the hinted delay of 10^n (last digit) milliseconds.
474 */ 474 */
475 unsigned int rtas_busy_delay_time(int status) 475 unsigned int rtas_busy_delay_time(int status)
476 { 476 {
477 int order; 477 int order;
478 unsigned int ms = 0; 478 unsigned int ms = 0;
479 479
480 if (status == RTAS_BUSY) { 480 if (status == RTAS_BUSY) {
481 ms = 1; 481 ms = 1;
482 } else if (status >= 9900 && status <= 9905) { 482 } else if (status >= 9900 && status <= 9905) {
483 order = status - 9900; 483 order = status - 9900;
484 for (ms = 1; order > 0; order--) 484 for (ms = 1; order > 0; order--)
485 ms *= 10; 485 ms *= 10;
486 } 486 }
487 487
488 return ms; 488 return ms;
489 } 489 }
490 EXPORT_SYMBOL(rtas_busy_delay_time); 490 EXPORT_SYMBOL(rtas_busy_delay_time);
491 491
492 /* For an RTAS busy status code, perform the hinted delay. */ 492 /* For an RTAS busy status code, perform the hinted delay. */
493 unsigned int rtas_busy_delay(int status) 493 unsigned int rtas_busy_delay(int status)
494 { 494 {
495 unsigned int ms; 495 unsigned int ms;
496 496
497 might_sleep(); 497 might_sleep();
498 ms = rtas_busy_delay_time(status); 498 ms = rtas_busy_delay_time(status);
499 if (ms && need_resched()) 499 if (ms && need_resched())
500 msleep(ms); 500 msleep(ms);
501 501
502 return ms; 502 return ms;
503 } 503 }
504 EXPORT_SYMBOL(rtas_busy_delay); 504 EXPORT_SYMBOL(rtas_busy_delay);
505 505
506 static int rtas_error_rc(int rtas_rc) 506 static int rtas_error_rc(int rtas_rc)
507 { 507 {
508 int rc; 508 int rc;
509 509
510 switch (rtas_rc) { 510 switch (rtas_rc) {
511 case -1: /* Hardware Error */ 511 case -1: /* Hardware Error */
512 rc = -EIO; 512 rc = -EIO;
513 break; 513 break;
514 case -3: /* Bad indicator/domain/etc */ 514 case -3: /* Bad indicator/domain/etc */
515 rc = -EINVAL; 515 rc = -EINVAL;
516 break; 516 break;
517 case -9000: /* Isolation error */ 517 case -9000: /* Isolation error */
518 rc = -EFAULT; 518 rc = -EFAULT;
519 break; 519 break;
520 case -9001: /* Outstanding TCE/PTE */ 520 case -9001: /* Outstanding TCE/PTE */
521 rc = -EEXIST; 521 rc = -EEXIST;
522 break; 522 break;
523 case -9002: /* No usable slot */ 523 case -9002: /* No usable slot */
524 rc = -ENODEV; 524 rc = -ENODEV;
525 break; 525 break;
526 default: 526 default:
527 printk(KERN_ERR "%s: unexpected RTAS error %d\n", 527 printk(KERN_ERR "%s: unexpected RTAS error %d\n",
528 __func__, rtas_rc); 528 __func__, rtas_rc);
529 rc = -ERANGE; 529 rc = -ERANGE;
530 break; 530 break;
531 } 531 }
532 return rc; 532 return rc;
533 } 533 }
534 534
535 int rtas_get_power_level(int powerdomain, int *level) 535 int rtas_get_power_level(int powerdomain, int *level)
536 { 536 {
537 int token = rtas_token("get-power-level"); 537 int token = rtas_token("get-power-level");
538 int rc; 538 int rc;
539 539
540 if (token == RTAS_UNKNOWN_SERVICE) 540 if (token == RTAS_UNKNOWN_SERVICE)
541 return -ENOENT; 541 return -ENOENT;
542 542
543 while ((rc = rtas_call(token, 1, 2, level, powerdomain)) == RTAS_BUSY) 543 while ((rc = rtas_call(token, 1, 2, level, powerdomain)) == RTAS_BUSY)
544 udelay(1); 544 udelay(1);
545 545
546 if (rc < 0) 546 if (rc < 0)
547 return rtas_error_rc(rc); 547 return rtas_error_rc(rc);
548 return rc; 548 return rc;
549 } 549 }
550 EXPORT_SYMBOL(rtas_get_power_level); 550 EXPORT_SYMBOL(rtas_get_power_level);
551 551
552 int rtas_set_power_level(int powerdomain, int level, int *setlevel) 552 int rtas_set_power_level(int powerdomain, int level, int *setlevel)
553 { 553 {
554 int token = rtas_token("set-power-level"); 554 int token = rtas_token("set-power-level");
555 int rc; 555 int rc;
556 556
557 if (token == RTAS_UNKNOWN_SERVICE) 557 if (token == RTAS_UNKNOWN_SERVICE)
558 return -ENOENT; 558 return -ENOENT;
559 559
560 do { 560 do {
561 rc = rtas_call(token, 2, 2, setlevel, powerdomain, level); 561 rc = rtas_call(token, 2, 2, setlevel, powerdomain, level);
562 } while (rtas_busy_delay(rc)); 562 } while (rtas_busy_delay(rc));
563 563
564 if (rc < 0) 564 if (rc < 0)
565 return rtas_error_rc(rc); 565 return rtas_error_rc(rc);
566 return rc; 566 return rc;
567 } 567 }
568 EXPORT_SYMBOL(rtas_set_power_level); 568 EXPORT_SYMBOL(rtas_set_power_level);
569 569
570 int rtas_get_sensor(int sensor, int index, int *state) 570 int rtas_get_sensor(int sensor, int index, int *state)
571 { 571 {
572 int token = rtas_token("get-sensor-state"); 572 int token = rtas_token("get-sensor-state");
573 int rc; 573 int rc;
574 574
575 if (token == RTAS_UNKNOWN_SERVICE) 575 if (token == RTAS_UNKNOWN_SERVICE)
576 return -ENOENT; 576 return -ENOENT;
577 577
578 do { 578 do {
579 rc = rtas_call(token, 2, 2, state, sensor, index); 579 rc = rtas_call(token, 2, 2, state, sensor, index);
580 } while (rtas_busy_delay(rc)); 580 } while (rtas_busy_delay(rc));
581 581
582 if (rc < 0) 582 if (rc < 0)
583 return rtas_error_rc(rc); 583 return rtas_error_rc(rc);
584 return rc; 584 return rc;
585 } 585 }
586 EXPORT_SYMBOL(rtas_get_sensor); 586 EXPORT_SYMBOL(rtas_get_sensor);
587 587
588 bool rtas_indicator_present(int token, int *maxindex) 588 bool rtas_indicator_present(int token, int *maxindex)
589 { 589 {
590 int proplen, count, i; 590 int proplen, count, i;
591 const struct indicator_elem { 591 const struct indicator_elem {
592 u32 token; 592 u32 token;
593 u32 maxindex; 593 u32 maxindex;
594 } *indicators; 594 } *indicators;
595 595
596 indicators = of_get_property(rtas.dev, "rtas-indicators", &proplen); 596 indicators = of_get_property(rtas.dev, "rtas-indicators", &proplen);
597 if (!indicators) 597 if (!indicators)
598 return false; 598 return false;
599 599
600 count = proplen / sizeof(struct indicator_elem); 600 count = proplen / sizeof(struct indicator_elem);
601 601
602 for (i = 0; i < count; i++) { 602 for (i = 0; i < count; i++) {
603 if (indicators[i].token != token) 603 if (indicators[i].token != token)
604 continue; 604 continue;
605 if (maxindex) 605 if (maxindex)
606 *maxindex = indicators[i].maxindex; 606 *maxindex = indicators[i].maxindex;
607 return true; 607 return true;
608 } 608 }
609 609
610 return false; 610 return false;
611 } 611 }
612 EXPORT_SYMBOL(rtas_indicator_present); 612 EXPORT_SYMBOL(rtas_indicator_present);
613 613
614 int rtas_set_indicator(int indicator, int index, int new_value) 614 int rtas_set_indicator(int indicator, int index, int new_value)
615 { 615 {
616 int token = rtas_token("set-indicator"); 616 int token = rtas_token("set-indicator");
617 int rc; 617 int rc;
618 618
619 if (token == RTAS_UNKNOWN_SERVICE) 619 if (token == RTAS_UNKNOWN_SERVICE)
620 return -ENOENT; 620 return -ENOENT;
621 621
622 do { 622 do {
623 rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value); 623 rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value);
624 } while (rtas_busy_delay(rc)); 624 } while (rtas_busy_delay(rc));
625 625
626 if (rc < 0) 626 if (rc < 0)
627 return rtas_error_rc(rc); 627 return rtas_error_rc(rc);
628 return rc; 628 return rc;
629 } 629 }
630 EXPORT_SYMBOL(rtas_set_indicator); 630 EXPORT_SYMBOL(rtas_set_indicator);
631 631
632 /* 632 /*
633 * Ignoring RTAS extended delay 633 * Ignoring RTAS extended delay
634 */ 634 */
635 int rtas_set_indicator_fast(int indicator, int index, int new_value) 635 int rtas_set_indicator_fast(int indicator, int index, int new_value)
636 { 636 {
637 int rc; 637 int rc;
638 int token = rtas_token("set-indicator"); 638 int token = rtas_token("set-indicator");
639 639
640 if (token == RTAS_UNKNOWN_SERVICE) 640 if (token == RTAS_UNKNOWN_SERVICE)
641 return -ENOENT; 641 return -ENOENT;
642 642
643 rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value); 643 rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value);
644 644
645 WARN_ON(rc == -2 || (rc >= 9900 && rc <= 9905)); 645 WARN_ON(rc == -2 || (rc >= 9900 && rc <= 9905));
646 646
647 if (rc < 0) 647 if (rc < 0)
648 return rtas_error_rc(rc); 648 return rtas_error_rc(rc);
649 649
650 return rc; 650 return rc;
651 } 651 }
652 652
653 void rtas_restart(char *cmd) 653 void rtas_restart(char *cmd)
654 { 654 {
655 if (rtas_flash_term_hook) 655 if (rtas_flash_term_hook)
656 rtas_flash_term_hook(SYS_RESTART); 656 rtas_flash_term_hook(SYS_RESTART);
657 printk("RTAS system-reboot returned %d\n", 657 printk("RTAS system-reboot returned %d\n",
658 rtas_call(rtas_token("system-reboot"), 0, 1, NULL)); 658 rtas_call(rtas_token("system-reboot"), 0, 1, NULL));
659 for (;;); 659 for (;;);
660 } 660 }
661 661
662 void rtas_power_off(void) 662 void rtas_power_off(void)
663 { 663 {
664 if (rtas_flash_term_hook) 664 if (rtas_flash_term_hook)
665 rtas_flash_term_hook(SYS_POWER_OFF); 665 rtas_flash_term_hook(SYS_POWER_OFF);
666 /* allow power on only with power button press */ 666 /* allow power on only with power button press */
667 printk("RTAS power-off returned %d\n", 667 printk("RTAS power-off returned %d\n",
668 rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1)); 668 rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1));
669 for (;;); 669 for (;;);
670 } 670 }
671 671
672 void rtas_halt(void) 672 void rtas_halt(void)
673 { 673 {
674 if (rtas_flash_term_hook) 674 if (rtas_flash_term_hook)
675 rtas_flash_term_hook(SYS_HALT); 675 rtas_flash_term_hook(SYS_HALT);
676 /* allow power on only with power button press */ 676 /* allow power on only with power button press */
677 printk("RTAS power-off returned %d\n", 677 printk("RTAS power-off returned %d\n",
678 rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1)); 678 rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1));
679 for (;;); 679 for (;;);
680 } 680 }
681 681
682 /* Must be in the RMO region, so we place it here */ 682 /* Must be in the RMO region, so we place it here */
683 static char rtas_os_term_buf[2048]; 683 static char rtas_os_term_buf[2048];
684 684
685 void rtas_os_term(char *str) 685 void rtas_os_term(char *str)
686 { 686 {
687 int status; 687 int status;
688 688
689 /* 689 /*
690 * Firmware with the ibm,extended-os-term property is guaranteed 690 * Firmware with the ibm,extended-os-term property is guaranteed
691 * to always return from an ibm,os-term call. Earlier versions without 691 * to always return from an ibm,os-term call. Earlier versions without
692 * this property may terminate the partition which we want to avoid 692 * this property may terminate the partition which we want to avoid
693 * since it interferes with panic_timeout. 693 * since it interferes with panic_timeout.
694 */ 694 */
695 if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term") || 695 if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term") ||
696 RTAS_UNKNOWN_SERVICE == rtas_token("ibm,extended-os-term")) 696 RTAS_UNKNOWN_SERVICE == rtas_token("ibm,extended-os-term"))
697 return; 697 return;
698 698
699 snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str); 699 snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str);
700 700
701 do { 701 do {
702 status = rtas_call(rtas_token("ibm,os-term"), 1, 1, NULL, 702 status = rtas_call(rtas_token("ibm,os-term"), 1, 1, NULL,
703 __pa(rtas_os_term_buf)); 703 __pa(rtas_os_term_buf));
704 } while (rtas_busy_delay(status)); 704 } while (rtas_busy_delay(status));
705 705
706 if (status != 0) 706 if (status != 0)
707 printk(KERN_EMERG "ibm,os-term call failed %d\n", status); 707 printk(KERN_EMERG "ibm,os-term call failed %d\n", status);
708 } 708 }
709 709
710 static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE; 710 static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE;
711 #ifdef CONFIG_PPC_PSERIES 711 #ifdef CONFIG_PPC_PSERIES
712 static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_when_done) 712 static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_when_done)
713 { 713 {
714 u16 slb_size = mmu_slb_size; 714 u16 slb_size = mmu_slb_size;
715 int rc = H_MULTI_THREADS_ACTIVE; 715 int rc = H_MULTI_THREADS_ACTIVE;
716 int cpu; 716 int cpu;
717 717
718 slb_set_size(SLB_MIN_SIZE); 718 slb_set_size(SLB_MIN_SIZE);
719 stop_topology_update(); 719 stop_topology_update();
720 printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id()); 720 printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id());
721 721
722 while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) && 722 while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) &&
723 !atomic_read(&data->error)) 723 !atomic_read(&data->error))
724 rc = rtas_call(data->token, 0, 1, NULL); 724 rc = rtas_call(data->token, 0, 1, NULL);
725 725
726 if (rc || atomic_read(&data->error)) { 726 if (rc || atomic_read(&data->error)) {
727 printk(KERN_DEBUG "ibm,suspend-me returned %d\n", rc); 727 printk(KERN_DEBUG "ibm,suspend-me returned %d\n", rc);
728 slb_set_size(slb_size); 728 slb_set_size(slb_size);
729 } 729 }
730 730
731 if (atomic_read(&data->error)) 731 if (atomic_read(&data->error))
732 rc = atomic_read(&data->error); 732 rc = atomic_read(&data->error);
733 733
734 atomic_set(&data->error, rc); 734 atomic_set(&data->error, rc);
735 start_topology_update(); 735 start_topology_update();
736 pSeries_coalesce_init(); 736 pSeries_coalesce_init();
737 737
738 if (wake_when_done) { 738 if (wake_when_done) {
739 atomic_set(&data->done, 1); 739 atomic_set(&data->done, 1);
740 740
741 for_each_online_cpu(cpu) 741 for_each_online_cpu(cpu)
742 plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu)); 742 plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
743 } 743 }
744 744
745 if (atomic_dec_return(&data->working) == 0) 745 if (atomic_dec_return(&data->working) == 0)
746 complete(data->complete); 746 complete(data->complete);
747 747
748 return rc; 748 return rc;
749 } 749 }
750 750
751 int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data) 751 int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data)
752 { 752 {
753 atomic_inc(&data->working); 753 atomic_inc(&data->working);
754 return __rtas_suspend_last_cpu(data, 0); 754 return __rtas_suspend_last_cpu(data, 0);
755 } 755 }
756 756
757 static int __rtas_suspend_cpu(struct rtas_suspend_me_data *data, int wake_when_done) 757 static int __rtas_suspend_cpu(struct rtas_suspend_me_data *data, int wake_when_done)
758 { 758 {
759 long rc = H_SUCCESS; 759 long rc = H_SUCCESS;
760 unsigned long msr_save; 760 unsigned long msr_save;
761 int cpu; 761 int cpu;
762 762
763 atomic_inc(&data->working); 763 atomic_inc(&data->working);
764 764
765 /* really need to ensure MSR.EE is off for H_JOIN */ 765 /* really need to ensure MSR.EE is off for H_JOIN */
766 msr_save = mfmsr(); 766 msr_save = mfmsr();
767 mtmsr(msr_save & ~(MSR_EE)); 767 mtmsr(msr_save & ~(MSR_EE));
768 768
769 while (rc == H_SUCCESS && !atomic_read(&data->done) && !atomic_read(&data->error)) 769 while (rc == H_SUCCESS && !atomic_read(&data->done) && !atomic_read(&data->error))
770 rc = plpar_hcall_norets(H_JOIN); 770 rc = plpar_hcall_norets(H_JOIN);
771 771
772 mtmsr(msr_save); 772 mtmsr(msr_save);
773 773
774 if (rc == H_SUCCESS) { 774 if (rc == H_SUCCESS) {
775 /* This cpu was prodded and the suspend is complete. */ 775 /* This cpu was prodded and the suspend is complete. */
776 goto out; 776 goto out;
777 } else if (rc == H_CONTINUE) { 777 } else if (rc == H_CONTINUE) {
778 /* All other cpus are in H_JOIN, this cpu does 778 /* All other cpus are in H_JOIN, this cpu does
779 * the suspend. 779 * the suspend.
780 */ 780 */
781 return __rtas_suspend_last_cpu(data, wake_when_done); 781 return __rtas_suspend_last_cpu(data, wake_when_done);
782 } else { 782 } else {
783 printk(KERN_ERR "H_JOIN on cpu %i failed with rc = %ld\n", 783 printk(KERN_ERR "H_JOIN on cpu %i failed with rc = %ld\n",
784 smp_processor_id(), rc); 784 smp_processor_id(), rc);
785 atomic_set(&data->error, rc); 785 atomic_set(&data->error, rc);
786 } 786 }
787 787
788 if (wake_when_done) { 788 if (wake_when_done) {
789 atomic_set(&data->done, 1); 789 atomic_set(&data->done, 1);
790 790
791 /* This cpu did the suspend or got an error; in either case, 791 /* This cpu did the suspend or got an error; in either case,
792 * we need to prod all other other cpus out of join state. 792 * we need to prod all other other cpus out of join state.
793 * Extra prods are harmless. 793 * Extra prods are harmless.
794 */ 794 */
795 for_each_online_cpu(cpu) 795 for_each_online_cpu(cpu)
796 plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu)); 796 plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
797 } 797 }
798 out: 798 out:
799 if (atomic_dec_return(&data->working) == 0) 799 if (atomic_dec_return(&data->working) == 0)
800 complete(data->complete); 800 complete(data->complete);
801 return rc; 801 return rc;
802 } 802 }
803 803
804 int rtas_suspend_cpu(struct rtas_suspend_me_data *data) 804 int rtas_suspend_cpu(struct rtas_suspend_me_data *data)
805 { 805 {
806 return __rtas_suspend_cpu(data, 0); 806 return __rtas_suspend_cpu(data, 0);
807 } 807 }
808 808
809 static void rtas_percpu_suspend_me(void *info) 809 static void rtas_percpu_suspend_me(void *info)
810 { 810 {
811 __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1); 811 __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1);
812 } 812 }
813 813
814 int rtas_ibm_suspend_me(struct rtas_args *args) 814 int rtas_ibm_suspend_me(struct rtas_args *args)
815 { 815 {
816 long state; 816 long state;
817 long rc; 817 long rc;
818 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 818 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
819 struct rtas_suspend_me_data data; 819 struct rtas_suspend_me_data data;
820 DECLARE_COMPLETION_ONSTACK(done); 820 DECLARE_COMPLETION_ONSTACK(done);
821 821
822 if (!rtas_service_present("ibm,suspend-me")) 822 if (!rtas_service_present("ibm,suspend-me"))
823 return -ENOSYS; 823 return -ENOSYS;
824 824
825 /* Make sure the state is valid */ 825 /* Make sure the state is valid */
826 rc = plpar_hcall(H_VASI_STATE, retbuf, 826 rc = plpar_hcall(H_VASI_STATE, retbuf,
827 ((u64)args->args[0] << 32) | args->args[1]); 827 ((u64)args->args[0] << 32) | args->args[1]);
828 828
829 state = retbuf[0]; 829 state = retbuf[0];
830 830
831 if (rc) { 831 if (rc) {
832 printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned %ld\n",rc); 832 printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned %ld\n",rc);
833 return rc; 833 return rc;
834 } else if (state == H_VASI_ENABLED) { 834 } else if (state == H_VASI_ENABLED) {
835 args->args[args->nargs] = RTAS_NOT_SUSPENDABLE; 835 args->args[args->nargs] = RTAS_NOT_SUSPENDABLE;
836 return 0; 836 return 0;
837 } else if (state != H_VASI_SUSPENDING) { 837 } else if (state != H_VASI_SUSPENDING) {
838 printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned state %ld\n", 838 printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned state %ld\n",
839 state); 839 state);
840 args->args[args->nargs] = -1; 840 args->args[args->nargs] = -1;
841 return 0; 841 return 0;
842 } 842 }
843 843
844 atomic_set(&data.working, 0); 844 atomic_set(&data.working, 0);
845 atomic_set(&data.done, 0); 845 atomic_set(&data.done, 0);
846 atomic_set(&data.error, 0); 846 atomic_set(&data.error, 0);
847 data.token = rtas_token("ibm,suspend-me"); 847 data.token = rtas_token("ibm,suspend-me");
848 data.complete = &done; 848 data.complete = &done;
849 849
850 /* Call function on all CPUs. One of us will make the 850 /* Call function on all CPUs. One of us will make the
851 * rtas call 851 * rtas call
852 */ 852 */
853 if (on_each_cpu(rtas_percpu_suspend_me, &data, 0)) 853 if (on_each_cpu(rtas_percpu_suspend_me, &data, 0))
854 atomic_set(&data.error, -EINVAL); 854 atomic_set(&data.error, -EINVAL);
855 855
856 wait_for_completion(&done); 856 wait_for_completion(&done);
857 857
858 if (atomic_read(&data.error) != 0) 858 if (atomic_read(&data.error) != 0)
859 printk(KERN_ERR "Error doing global join\n"); 859 printk(KERN_ERR "Error doing global join\n");
860 860
861 return atomic_read(&data.error); 861 return atomic_read(&data.error);
862 } 862 }
863 #else /* CONFIG_PPC_PSERIES */ 863 #else /* CONFIG_PPC_PSERIES */
864 int rtas_ibm_suspend_me(struct rtas_args *args) 864 int rtas_ibm_suspend_me(struct rtas_args *args)
865 { 865 {
866 return -ENOSYS; 866 return -ENOSYS;
867 } 867 }
868 #endif 868 #endif
869 869
870 asmlinkage int ppc_rtas(struct rtas_args __user *uargs) 870 asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
871 { 871 {
872 struct rtas_args args; 872 struct rtas_args args;
873 unsigned long flags; 873 unsigned long flags;
874 char *buff_copy, *errbuf = NULL; 874 char *buff_copy, *errbuf = NULL;
875 int nargs; 875 int nargs;
876 int rc; 876 int rc;
877 877
878 if (!capable(CAP_SYS_ADMIN)) 878 if (!capable(CAP_SYS_ADMIN))
879 return -EPERM; 879 return -EPERM;
880 880
881 if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0) 881 if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0)
882 return -EFAULT; 882 return -EFAULT;
883 883
884 nargs = args.nargs; 884 nargs = args.nargs;
885 if (nargs > ARRAY_SIZE(args.args) 885 if (nargs > ARRAY_SIZE(args.args)
886 || args.nret > ARRAY_SIZE(args.args) 886 || args.nret > ARRAY_SIZE(args.args)
887 || nargs + args.nret > ARRAY_SIZE(args.args)) 887 || nargs + args.nret > ARRAY_SIZE(args.args))
888 return -EINVAL; 888 return -EINVAL;
889 889
890 /* Copy in args. */ 890 /* Copy in args. */
891 if (copy_from_user(args.args, uargs->args, 891 if (copy_from_user(args.args, uargs->args,
892 nargs * sizeof(rtas_arg_t)) != 0) 892 nargs * sizeof(rtas_arg_t)) != 0)
893 return -EFAULT; 893 return -EFAULT;
894 894
895 if (args.token == RTAS_UNKNOWN_SERVICE) 895 if (args.token == RTAS_UNKNOWN_SERVICE)
896 return -EINVAL; 896 return -EINVAL;
897 897
898 args.rets = &args.args[nargs]; 898 args.rets = &args.args[nargs];
899 memset(args.rets, 0, args.nret * sizeof(rtas_arg_t)); 899 memset(args.rets, 0, args.nret * sizeof(rtas_arg_t));
900 900
901 /* Need to handle ibm,suspend_me call specially */ 901 /* Need to handle ibm,suspend_me call specially */
902 if (args.token == ibm_suspend_me_token) { 902 if (args.token == ibm_suspend_me_token) {
903 rc = rtas_ibm_suspend_me(&args); 903 rc = rtas_ibm_suspend_me(&args);
904 if (rc) 904 if (rc)
905 return rc; 905 return rc;
906 goto copy_return; 906 goto copy_return;
907 } 907 }
908 908
909 buff_copy = get_errorlog_buffer(); 909 buff_copy = get_errorlog_buffer();
910 910
911 flags = lock_rtas(); 911 flags = lock_rtas();
912 912
913 rtas.args = args; 913 rtas.args = args;
914 enter_rtas(__pa(&rtas.args)); 914 enter_rtas(__pa(&rtas.args));
915 args = rtas.args; 915 args = rtas.args;
916 916
917 /* A -1 return code indicates that the last command couldn't 917 /* A -1 return code indicates that the last command couldn't
918 be completed due to a hardware error. */ 918 be completed due to a hardware error. */
919 if (args.rets[0] == -1) 919 if (args.rets[0] == -1)
920 errbuf = __fetch_rtas_last_error(buff_copy); 920 errbuf = __fetch_rtas_last_error(buff_copy);
921 921
922 unlock_rtas(flags); 922 unlock_rtas(flags);
923 923
924 if (buff_copy) { 924 if (buff_copy) {
925 if (errbuf) 925 if (errbuf)
926 log_error(errbuf, ERR_TYPE_RTAS_LOG, 0); 926 log_error(errbuf, ERR_TYPE_RTAS_LOG, 0);
927 kfree(buff_copy); 927 kfree(buff_copy);
928 } 928 }
929 929
930 copy_return: 930 copy_return:
931 /* Copy out args. */ 931 /* Copy out args. */
932 if (copy_to_user(uargs->args + nargs, 932 if (copy_to_user(uargs->args + nargs,
933 args.args + nargs, 933 args.args + nargs,
934 args.nret * sizeof(rtas_arg_t)) != 0) 934 args.nret * sizeof(rtas_arg_t)) != 0)
935 return -EFAULT; 935 return -EFAULT;
936 936
937 return 0; 937 return 0;
938 } 938 }
939 939
940 /* 940 /*
941 * Call early during boot, before mem init or bootmem, to retrieve the RTAS 941 * Call early during boot, before mem init or bootmem, to retrieve the RTAS
942 * informations from the device-tree and allocate the RMO buffer for userland 942 * informations from the device-tree and allocate the RMO buffer for userland
943 * accesses. 943 * accesses.
944 */ 944 */
945 void __init rtas_initialize(void) 945 void __init rtas_initialize(void)
946 { 946 {
947 unsigned long rtas_region = RTAS_INSTANTIATE_MAX; 947 unsigned long rtas_region = RTAS_INSTANTIATE_MAX;
948 948
949 /* Get RTAS dev node and fill up our "rtas" structure with infos 949 /* Get RTAS dev node and fill up our "rtas" structure with infos
950 * about it. 950 * about it.
951 */ 951 */
952 rtas.dev = of_find_node_by_name(NULL, "rtas"); 952 rtas.dev = of_find_node_by_name(NULL, "rtas");
953 if (rtas.dev) { 953 if (rtas.dev) {
954 const u32 *basep, *entryp, *sizep; 954 const u32 *basep, *entryp, *sizep;
955 955
956 basep = of_get_property(rtas.dev, "linux,rtas-base", NULL); 956 basep = of_get_property(rtas.dev, "linux,rtas-base", NULL);
957 sizep = of_get_property(rtas.dev, "rtas-size", NULL); 957 sizep = of_get_property(rtas.dev, "rtas-size", NULL);
958 if (basep != NULL && sizep != NULL) { 958 if (basep != NULL && sizep != NULL) {
959 rtas.base = *basep; 959 rtas.base = *basep;
960 rtas.size = *sizep; 960 rtas.size = *sizep;
961 entryp = of_get_property(rtas.dev, 961 entryp = of_get_property(rtas.dev,
962 "linux,rtas-entry", NULL); 962 "linux,rtas-entry", NULL);
963 if (entryp == NULL) /* Ugh */ 963 if (entryp == NULL) /* Ugh */
964 rtas.entry = rtas.base; 964 rtas.entry = rtas.base;
965 else 965 else
966 rtas.entry = *entryp; 966 rtas.entry = *entryp;
967 } else 967 } else
968 rtas.dev = NULL; 968 rtas.dev = NULL;
969 } 969 }
970 if (!rtas.dev) 970 if (!rtas.dev)
971 return; 971 return;
972 972
973 /* If RTAS was found, allocate the RMO buffer for it and look for 973 /* If RTAS was found, allocate the RMO buffer for it and look for
974 * the stop-self token if any 974 * the stop-self token if any
975 */ 975 */
976 #ifdef CONFIG_PPC64 976 #ifdef CONFIG_PPC64
977 if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) { 977 if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) {
978 rtas_region = min(ppc64_rma_size, RTAS_INSTANTIATE_MAX); 978 rtas_region = min(ppc64_rma_size, RTAS_INSTANTIATE_MAX);
979 ibm_suspend_me_token = rtas_token("ibm,suspend-me"); 979 ibm_suspend_me_token = rtas_token("ibm,suspend-me");
980 } 980 }
981 #endif 981 #endif
982 rtas_rmo_buf = memblock_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region); 982 rtas_rmo_buf = memblock_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region);
983 983
984 #ifdef CONFIG_RTAS_ERROR_LOGGING 984 #ifdef CONFIG_RTAS_ERROR_LOGGING
985 rtas_last_error_token = rtas_token("rtas-last-error"); 985 rtas_last_error_token = rtas_token("rtas-last-error");
986 #endif 986 #endif
987 } 987 }
988 988
989 int __init early_init_dt_scan_rtas(unsigned long node, 989 int __init early_init_dt_scan_rtas(unsigned long node,
990 const char *uname, int depth, void *data) 990 const char *uname, int depth, void *data)
991 { 991 {
992 u32 *basep, *entryp, *sizep; 992 u32 *basep, *entryp, *sizep;
993 993
994 if (depth != 1 || strcmp(uname, "rtas") != 0) 994 if (depth != 1 || strcmp(uname, "rtas") != 0)
995 return 0; 995 return 0;
996 996
997 basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL); 997 basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL);
998 entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL); 998 entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL);
999 sizep = of_get_flat_dt_prop(node, "rtas-size", NULL); 999 sizep = of_get_flat_dt_prop(node, "rtas-size", NULL);
1000 1000
1001 if (basep && entryp && sizep) { 1001 if (basep && entryp && sizep) {
1002 rtas.base = *basep; 1002 rtas.base = *basep;
1003 rtas.entry = *entryp; 1003 rtas.entry = *entryp;
1004 rtas.size = *sizep; 1004 rtas.size = *sizep;
1005 } 1005 }
1006 1006
1007 #ifdef CONFIG_UDBG_RTAS_CONSOLE 1007 #ifdef CONFIG_UDBG_RTAS_CONSOLE
1008 basep = of_get_flat_dt_prop(node, "put-term-char", NULL); 1008 basep = of_get_flat_dt_prop(node, "put-term-char", NULL);
1009 if (basep) 1009 if (basep)
1010 rtas_putchar_token = *basep; 1010 rtas_putchar_token = *basep;
1011 1011
1012 basep = of_get_flat_dt_prop(node, "get-term-char", NULL); 1012 basep = of_get_flat_dt_prop(node, "get-term-char", NULL);
1013 if (basep) 1013 if (basep)
1014 rtas_getchar_token = *basep; 1014 rtas_getchar_token = *basep;
1015 1015
1016 if (rtas_putchar_token != RTAS_UNKNOWN_SERVICE && 1016 if (rtas_putchar_token != RTAS_UNKNOWN_SERVICE &&
1017 rtas_getchar_token != RTAS_UNKNOWN_SERVICE) 1017 rtas_getchar_token != RTAS_UNKNOWN_SERVICE)
1018 udbg_init_rtas_console(); 1018 udbg_init_rtas_console();
1019 1019
1020 #endif 1020 #endif
1021 1021
1022 /* break now */ 1022 /* break now */
1023 return 1; 1023 return 1;
1024 } 1024 }
1025 1025
1026 static arch_spinlock_t timebase_lock; 1026 static arch_spinlock_t timebase_lock;
1027 static u64 timebase = 0; 1027 static u64 timebase = 0;
1028 1028
1029 void __cpuinit rtas_give_timebase(void) 1029 void __cpuinit rtas_give_timebase(void)
1030 { 1030 {
1031 unsigned long flags; 1031 unsigned long flags;
1032 1032
1033 local_irq_save(flags); 1033 local_irq_save(flags);
1034 hard_irq_disable(); 1034 hard_irq_disable();
1035 arch_spin_lock(&timebase_lock); 1035 arch_spin_lock(&timebase_lock);
1036 rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL); 1036 rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
1037 timebase = get_tb(); 1037 timebase = get_tb();
1038 arch_spin_unlock(&timebase_lock); 1038 arch_spin_unlock(&timebase_lock);
1039 1039
1040 while (timebase) 1040 while (timebase)
1041 barrier(); 1041 barrier();
1042 rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL); 1042 rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
1043 local_irq_restore(flags); 1043 local_irq_restore(flags);
1044 } 1044 }
1045 1045
1046 void __cpuinit rtas_take_timebase(void) 1046 void __cpuinit rtas_take_timebase(void)
1047 { 1047 {
1048 while (!timebase) 1048 while (!timebase)
1049 barrier(); 1049 barrier();
1050 arch_spin_lock(&timebase_lock); 1050 arch_spin_lock(&timebase_lock);
1051 set_tb(timebase >> 32, timebase & 0xffffffff); 1051 set_tb(timebase >> 32, timebase & 0xffffffff);
1052 timebase = 0; 1052 timebase = 0;
1053 arch_spin_unlock(&timebase_lock); 1053 arch_spin_unlock(&timebase_lock);
1054 } 1054 }
1055 1055
arch/powerpc/kernel/setup-common.c
1 /* 1 /*
2 * Common boot and setup code for both 32-bit and 64-bit. 2 * Common boot and setup code for both 32-bit and 64-bit.
3 * Extracted from arch/powerpc/kernel/setup_64.c. 3 * Extracted from arch/powerpc/kernel/setup_64.c.
4 * 4 *
5 * Copyright (C) 2001 PPC64 Team, IBM Corp 5 * Copyright (C) 2001 PPC64 Team, IBM Corp
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 */ 11 */
12 12
13 #undef DEBUG 13 #undef DEBUG
14 14
15 #include <linux/module.h> 15 #include <linux/export.h>
16 #include <linux/string.h> 16 #include <linux/string.h>
17 #include <linux/sched.h> 17 #include <linux/sched.h>
18 #include <linux/init.h> 18 #include <linux/init.h>
19 #include <linux/kernel.h> 19 #include <linux/kernel.h>
20 #include <linux/reboot.h> 20 #include <linux/reboot.h>
21 #include <linux/delay.h> 21 #include <linux/delay.h>
22 #include <linux/initrd.h> 22 #include <linux/initrd.h>
23 #include <linux/platform_device.h> 23 #include <linux/platform_device.h>
24 #include <linux/seq_file.h> 24 #include <linux/seq_file.h>
25 #include <linux/ioport.h> 25 #include <linux/ioport.h>
26 #include <linux/console.h> 26 #include <linux/console.h>
27 #include <linux/screen_info.h> 27 #include <linux/screen_info.h>
28 #include <linux/root_dev.h> 28 #include <linux/root_dev.h>
29 #include <linux/notifier.h> 29 #include <linux/notifier.h>
30 #include <linux/cpu.h> 30 #include <linux/cpu.h>
31 #include <linux/unistd.h> 31 #include <linux/unistd.h>
32 #include <linux/serial.h> 32 #include <linux/serial.h>
33 #include <linux/serial_8250.h> 33 #include <linux/serial_8250.h>
34 #include <linux/debugfs.h> 34 #include <linux/debugfs.h>
35 #include <linux/percpu.h> 35 #include <linux/percpu.h>
36 #include <linux/memblock.h> 36 #include <linux/memblock.h>
37 #include <linux/of_platform.h> 37 #include <linux/of_platform.h>
38 #include <asm/io.h> 38 #include <asm/io.h>
39 #include <asm/paca.h> 39 #include <asm/paca.h>
40 #include <asm/prom.h> 40 #include <asm/prom.h>
41 #include <asm/processor.h> 41 #include <asm/processor.h>
42 #include <asm/vdso_datapage.h> 42 #include <asm/vdso_datapage.h>
43 #include <asm/pgtable.h> 43 #include <asm/pgtable.h>
44 #include <asm/smp.h> 44 #include <asm/smp.h>
45 #include <asm/elf.h> 45 #include <asm/elf.h>
46 #include <asm/machdep.h> 46 #include <asm/machdep.h>
47 #include <asm/time.h> 47 #include <asm/time.h>
48 #include <asm/cputable.h> 48 #include <asm/cputable.h>
49 #include <asm/sections.h> 49 #include <asm/sections.h>
50 #include <asm/firmware.h> 50 #include <asm/firmware.h>
51 #include <asm/btext.h> 51 #include <asm/btext.h>
52 #include <asm/nvram.h> 52 #include <asm/nvram.h>
53 #include <asm/setup.h> 53 #include <asm/setup.h>
54 #include <asm/system.h> 54 #include <asm/system.h>
55 #include <asm/rtas.h> 55 #include <asm/rtas.h>
56 #include <asm/iommu.h> 56 #include <asm/iommu.h>
57 #include <asm/serial.h> 57 #include <asm/serial.h>
58 #include <asm/cache.h> 58 #include <asm/cache.h>
59 #include <asm/page.h> 59 #include <asm/page.h>
60 #include <asm/mmu.h> 60 #include <asm/mmu.h>
61 #include <asm/xmon.h> 61 #include <asm/xmon.h>
62 #include <asm/cputhreads.h> 62 #include <asm/cputhreads.h>
63 #include <mm/mmu_decl.h> 63 #include <mm/mmu_decl.h>
64 64
65 #include "setup.h" 65 #include "setup.h"
66 66
67 #ifdef DEBUG 67 #ifdef DEBUG
68 #include <asm/udbg.h> 68 #include <asm/udbg.h>
69 #define DBG(fmt...) udbg_printf(fmt) 69 #define DBG(fmt...) udbg_printf(fmt)
70 #else 70 #else
71 #define DBG(fmt...) 71 #define DBG(fmt...)
72 #endif 72 #endif
73 73
74 /* The main machine-dep calls structure 74 /* The main machine-dep calls structure
75 */ 75 */
76 struct machdep_calls ppc_md; 76 struct machdep_calls ppc_md;
77 EXPORT_SYMBOL(ppc_md); 77 EXPORT_SYMBOL(ppc_md);
78 struct machdep_calls *machine_id; 78 struct machdep_calls *machine_id;
79 EXPORT_SYMBOL(machine_id); 79 EXPORT_SYMBOL(machine_id);
80 80
81 unsigned long klimit = (unsigned long) _end; 81 unsigned long klimit = (unsigned long) _end;
82 82
83 char cmd_line[COMMAND_LINE_SIZE]; 83 char cmd_line[COMMAND_LINE_SIZE];
84 84
85 /* 85 /*
86 * This still seems to be needed... -- paulus 86 * This still seems to be needed... -- paulus
87 */ 87 */
88 struct screen_info screen_info = { 88 struct screen_info screen_info = {
89 .orig_x = 0, 89 .orig_x = 0,
90 .orig_y = 25, 90 .orig_y = 25,
91 .orig_video_cols = 80, 91 .orig_video_cols = 80,
92 .orig_video_lines = 25, 92 .orig_video_lines = 25,
93 .orig_video_isVGA = 1, 93 .orig_video_isVGA = 1,
94 .orig_video_points = 16 94 .orig_video_points = 16
95 }; 95 };
96 96
97 /* Variables required to store legacy IO irq routing */ 97 /* Variables required to store legacy IO irq routing */
98 int of_i8042_kbd_irq; 98 int of_i8042_kbd_irq;
99 EXPORT_SYMBOL_GPL(of_i8042_kbd_irq); 99 EXPORT_SYMBOL_GPL(of_i8042_kbd_irq);
100 int of_i8042_aux_irq; 100 int of_i8042_aux_irq;
101 EXPORT_SYMBOL_GPL(of_i8042_aux_irq); 101 EXPORT_SYMBOL_GPL(of_i8042_aux_irq);
102 102
103 #ifdef __DO_IRQ_CANON 103 #ifdef __DO_IRQ_CANON
104 /* XXX should go elsewhere eventually */ 104 /* XXX should go elsewhere eventually */
105 int ppc_do_canonicalize_irqs; 105 int ppc_do_canonicalize_irqs;
106 EXPORT_SYMBOL(ppc_do_canonicalize_irqs); 106 EXPORT_SYMBOL(ppc_do_canonicalize_irqs);
107 #endif 107 #endif
108 108
109 /* also used by kexec */ 109 /* also used by kexec */
110 void machine_shutdown(void) 110 void machine_shutdown(void)
111 { 111 {
112 if (ppc_md.machine_shutdown) 112 if (ppc_md.machine_shutdown)
113 ppc_md.machine_shutdown(); 113 ppc_md.machine_shutdown();
114 } 114 }
115 115
116 void machine_restart(char *cmd) 116 void machine_restart(char *cmd)
117 { 117 {
118 machine_shutdown(); 118 machine_shutdown();
119 if (ppc_md.restart) 119 if (ppc_md.restart)
120 ppc_md.restart(cmd); 120 ppc_md.restart(cmd);
121 #ifdef CONFIG_SMP 121 #ifdef CONFIG_SMP
122 smp_send_stop(); 122 smp_send_stop();
123 #endif 123 #endif
124 printk(KERN_EMERG "System Halted, OK to turn off power\n"); 124 printk(KERN_EMERG "System Halted, OK to turn off power\n");
125 local_irq_disable(); 125 local_irq_disable();
126 while (1) ; 126 while (1) ;
127 } 127 }
128 128
129 void machine_power_off(void) 129 void machine_power_off(void)
130 { 130 {
131 machine_shutdown(); 131 machine_shutdown();
132 if (ppc_md.power_off) 132 if (ppc_md.power_off)
133 ppc_md.power_off(); 133 ppc_md.power_off();
134 #ifdef CONFIG_SMP 134 #ifdef CONFIG_SMP
135 smp_send_stop(); 135 smp_send_stop();
136 #endif 136 #endif
137 printk(KERN_EMERG "System Halted, OK to turn off power\n"); 137 printk(KERN_EMERG "System Halted, OK to turn off power\n");
138 local_irq_disable(); 138 local_irq_disable();
139 while (1) ; 139 while (1) ;
140 } 140 }
141 /* Used by the G5 thermal driver */ 141 /* Used by the G5 thermal driver */
142 EXPORT_SYMBOL_GPL(machine_power_off); 142 EXPORT_SYMBOL_GPL(machine_power_off);
143 143
144 void (*pm_power_off)(void) = machine_power_off; 144 void (*pm_power_off)(void) = machine_power_off;
145 EXPORT_SYMBOL_GPL(pm_power_off); 145 EXPORT_SYMBOL_GPL(pm_power_off);
146 146
147 void machine_halt(void) 147 void machine_halt(void)
148 { 148 {
149 machine_shutdown(); 149 machine_shutdown();
150 if (ppc_md.halt) 150 if (ppc_md.halt)
151 ppc_md.halt(); 151 ppc_md.halt();
152 #ifdef CONFIG_SMP 152 #ifdef CONFIG_SMP
153 smp_send_stop(); 153 smp_send_stop();
154 #endif 154 #endif
155 printk(KERN_EMERG "System Halted, OK to turn off power\n"); 155 printk(KERN_EMERG "System Halted, OK to turn off power\n");
156 local_irq_disable(); 156 local_irq_disable();
157 while (1) ; 157 while (1) ;
158 } 158 }
159 159
160 160
161 #ifdef CONFIG_TAU 161 #ifdef CONFIG_TAU
162 extern u32 cpu_temp(unsigned long cpu); 162 extern u32 cpu_temp(unsigned long cpu);
163 extern u32 cpu_temp_both(unsigned long cpu); 163 extern u32 cpu_temp_both(unsigned long cpu);
164 #endif /* CONFIG_TAU */ 164 #endif /* CONFIG_TAU */
165 165
166 #ifdef CONFIG_SMP 166 #ifdef CONFIG_SMP
167 DEFINE_PER_CPU(unsigned int, cpu_pvr); 167 DEFINE_PER_CPU(unsigned int, cpu_pvr);
168 #endif 168 #endif
169 169
170 static void show_cpuinfo_summary(struct seq_file *m) 170 static void show_cpuinfo_summary(struct seq_file *m)
171 { 171 {
172 struct device_node *root; 172 struct device_node *root;
173 const char *model = NULL; 173 const char *model = NULL;
174 #if defined(CONFIG_SMP) && defined(CONFIG_PPC32) 174 #if defined(CONFIG_SMP) && defined(CONFIG_PPC32)
175 unsigned long bogosum = 0; 175 unsigned long bogosum = 0;
176 int i; 176 int i;
177 for_each_online_cpu(i) 177 for_each_online_cpu(i)
178 bogosum += loops_per_jiffy; 178 bogosum += loops_per_jiffy;
179 seq_printf(m, "total bogomips\t: %lu.%02lu\n", 179 seq_printf(m, "total bogomips\t: %lu.%02lu\n",
180 bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); 180 bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
181 #endif /* CONFIG_SMP && CONFIG_PPC32 */ 181 #endif /* CONFIG_SMP && CONFIG_PPC32 */
182 seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq); 182 seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
183 if (ppc_md.name) 183 if (ppc_md.name)
184 seq_printf(m, "platform\t: %s\n", ppc_md.name); 184 seq_printf(m, "platform\t: %s\n", ppc_md.name);
185 root = of_find_node_by_path("/"); 185 root = of_find_node_by_path("/");
186 if (root) 186 if (root)
187 model = of_get_property(root, "model", NULL); 187 model = of_get_property(root, "model", NULL);
188 if (model) 188 if (model)
189 seq_printf(m, "model\t\t: %s\n", model); 189 seq_printf(m, "model\t\t: %s\n", model);
190 of_node_put(root); 190 of_node_put(root);
191 191
192 if (ppc_md.show_cpuinfo != NULL) 192 if (ppc_md.show_cpuinfo != NULL)
193 ppc_md.show_cpuinfo(m); 193 ppc_md.show_cpuinfo(m);
194 194
195 #ifdef CONFIG_PPC32 195 #ifdef CONFIG_PPC32
196 /* Display the amount of memory */ 196 /* Display the amount of memory */
197 seq_printf(m, "Memory\t\t: %d MB\n", 197 seq_printf(m, "Memory\t\t: %d MB\n",
198 (unsigned int)(total_memory / (1024 * 1024))); 198 (unsigned int)(total_memory / (1024 * 1024)));
199 #endif 199 #endif
200 } 200 }
201 201
202 static int show_cpuinfo(struct seq_file *m, void *v) 202 static int show_cpuinfo(struct seq_file *m, void *v)
203 { 203 {
204 unsigned long cpu_id = (unsigned long)v - 1; 204 unsigned long cpu_id = (unsigned long)v - 1;
205 unsigned int pvr; 205 unsigned int pvr;
206 unsigned short maj; 206 unsigned short maj;
207 unsigned short min; 207 unsigned short min;
208 208
209 /* We only show online cpus: disable preempt (overzealous, I 209 /* We only show online cpus: disable preempt (overzealous, I
210 * knew) to prevent cpu going down. */ 210 * knew) to prevent cpu going down. */
211 preempt_disable(); 211 preempt_disable();
212 if (!cpu_online(cpu_id)) { 212 if (!cpu_online(cpu_id)) {
213 preempt_enable(); 213 preempt_enable();
214 return 0; 214 return 0;
215 } 215 }
216 216
217 #ifdef CONFIG_SMP 217 #ifdef CONFIG_SMP
218 pvr = per_cpu(cpu_pvr, cpu_id); 218 pvr = per_cpu(cpu_pvr, cpu_id);
219 #else 219 #else
220 pvr = mfspr(SPRN_PVR); 220 pvr = mfspr(SPRN_PVR);
221 #endif 221 #endif
222 maj = (pvr >> 8) & 0xFF; 222 maj = (pvr >> 8) & 0xFF;
223 min = pvr & 0xFF; 223 min = pvr & 0xFF;
224 224
225 seq_printf(m, "processor\t: %lu\n", cpu_id); 225 seq_printf(m, "processor\t: %lu\n", cpu_id);
226 seq_printf(m, "cpu\t\t: "); 226 seq_printf(m, "cpu\t\t: ");
227 227
228 if (cur_cpu_spec->pvr_mask) 228 if (cur_cpu_spec->pvr_mask)
229 seq_printf(m, "%s", cur_cpu_spec->cpu_name); 229 seq_printf(m, "%s", cur_cpu_spec->cpu_name);
230 else 230 else
231 seq_printf(m, "unknown (%08x)", pvr); 231 seq_printf(m, "unknown (%08x)", pvr);
232 232
233 #ifdef CONFIG_ALTIVEC 233 #ifdef CONFIG_ALTIVEC
234 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 234 if (cpu_has_feature(CPU_FTR_ALTIVEC))
235 seq_printf(m, ", altivec supported"); 235 seq_printf(m, ", altivec supported");
236 #endif /* CONFIG_ALTIVEC */ 236 #endif /* CONFIG_ALTIVEC */
237 237
238 seq_printf(m, "\n"); 238 seq_printf(m, "\n");
239 239
240 #ifdef CONFIG_TAU 240 #ifdef CONFIG_TAU
241 if (cur_cpu_spec->cpu_features & CPU_FTR_TAU) { 241 if (cur_cpu_spec->cpu_features & CPU_FTR_TAU) {
242 #ifdef CONFIG_TAU_AVERAGE 242 #ifdef CONFIG_TAU_AVERAGE
243 /* more straightforward, but potentially misleading */ 243 /* more straightforward, but potentially misleading */
244 seq_printf(m, "temperature \t: %u C (uncalibrated)\n", 244 seq_printf(m, "temperature \t: %u C (uncalibrated)\n",
245 cpu_temp(cpu_id)); 245 cpu_temp(cpu_id));
246 #else 246 #else
247 /* show the actual temp sensor range */ 247 /* show the actual temp sensor range */
248 u32 temp; 248 u32 temp;
249 temp = cpu_temp_both(cpu_id); 249 temp = cpu_temp_both(cpu_id);
250 seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n", 250 seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n",
251 temp & 0xff, temp >> 16); 251 temp & 0xff, temp >> 16);
252 #endif 252 #endif
253 } 253 }
254 #endif /* CONFIG_TAU */ 254 #endif /* CONFIG_TAU */
255 255
256 /* 256 /*
257 * Assume here that all clock rates are the same in a 257 * Assume here that all clock rates are the same in a
258 * smp system. -- Cort 258 * smp system. -- Cort
259 */ 259 */
260 if (ppc_proc_freq) 260 if (ppc_proc_freq)
261 seq_printf(m, "clock\t\t: %lu.%06luMHz\n", 261 seq_printf(m, "clock\t\t: %lu.%06luMHz\n",
262 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); 262 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
263 263
264 if (ppc_md.show_percpuinfo != NULL) 264 if (ppc_md.show_percpuinfo != NULL)
265 ppc_md.show_percpuinfo(m, cpu_id); 265 ppc_md.show_percpuinfo(m, cpu_id);
266 266
267 /* If we are a Freescale core do a simple check so 267 /* If we are a Freescale core do a simple check so
268 * we dont have to keep adding cases in the future */ 268 * we dont have to keep adding cases in the future */
269 if (PVR_VER(pvr) & 0x8000) { 269 if (PVR_VER(pvr) & 0x8000) {
270 switch (PVR_VER(pvr)) { 270 switch (PVR_VER(pvr)) {
271 case 0x8000: /* 7441/7450/7451, Voyager */ 271 case 0x8000: /* 7441/7450/7451, Voyager */
272 case 0x8001: /* 7445/7455, Apollo 6 */ 272 case 0x8001: /* 7445/7455, Apollo 6 */
273 case 0x8002: /* 7447/7457, Apollo 7 */ 273 case 0x8002: /* 7447/7457, Apollo 7 */
274 case 0x8003: /* 7447A, Apollo 7 PM */ 274 case 0x8003: /* 7447A, Apollo 7 PM */
275 case 0x8004: /* 7448, Apollo 8 */ 275 case 0x8004: /* 7448, Apollo 8 */
276 case 0x800c: /* 7410, Nitro */ 276 case 0x800c: /* 7410, Nitro */
277 maj = ((pvr >> 8) & 0xF); 277 maj = ((pvr >> 8) & 0xF);
278 min = PVR_MIN(pvr); 278 min = PVR_MIN(pvr);
279 break; 279 break;
280 default: /* e500/book-e */ 280 default: /* e500/book-e */
281 maj = PVR_MAJ(pvr); 281 maj = PVR_MAJ(pvr);
282 min = PVR_MIN(pvr); 282 min = PVR_MIN(pvr);
283 break; 283 break;
284 } 284 }
285 } else { 285 } else {
286 switch (PVR_VER(pvr)) { 286 switch (PVR_VER(pvr)) {
287 case 0x0020: /* 403 family */ 287 case 0x0020: /* 403 family */
288 maj = PVR_MAJ(pvr) + 1; 288 maj = PVR_MAJ(pvr) + 1;
289 min = PVR_MIN(pvr); 289 min = PVR_MIN(pvr);
290 break; 290 break;
291 case 0x1008: /* 740P/750P ?? */ 291 case 0x1008: /* 740P/750P ?? */
292 maj = ((pvr >> 8) & 0xFF) - 1; 292 maj = ((pvr >> 8) & 0xFF) - 1;
293 min = pvr & 0xFF; 293 min = pvr & 0xFF;
294 break; 294 break;
295 default: 295 default:
296 maj = (pvr >> 8) & 0xFF; 296 maj = (pvr >> 8) & 0xFF;
297 min = pvr & 0xFF; 297 min = pvr & 0xFF;
298 break; 298 break;
299 } 299 }
300 } 300 }
301 301
302 seq_printf(m, "revision\t: %hd.%hd (pvr %04x %04x)\n", 302 seq_printf(m, "revision\t: %hd.%hd (pvr %04x %04x)\n",
303 maj, min, PVR_VER(pvr), PVR_REV(pvr)); 303 maj, min, PVR_VER(pvr), PVR_REV(pvr));
304 304
305 #ifdef CONFIG_PPC32 305 #ifdef CONFIG_PPC32
306 seq_printf(m, "bogomips\t: %lu.%02lu\n", 306 seq_printf(m, "bogomips\t: %lu.%02lu\n",
307 loops_per_jiffy / (500000/HZ), 307 loops_per_jiffy / (500000/HZ),
308 (loops_per_jiffy / (5000/HZ)) % 100); 308 (loops_per_jiffy / (5000/HZ)) % 100);
309 #endif 309 #endif
310 310
311 #ifdef CONFIG_SMP 311 #ifdef CONFIG_SMP
312 seq_printf(m, "\n"); 312 seq_printf(m, "\n");
313 #endif 313 #endif
314 314
315 preempt_enable(); 315 preempt_enable();
316 316
317 /* If this is the last cpu, print the summary */ 317 /* If this is the last cpu, print the summary */
318 if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids) 318 if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids)
319 show_cpuinfo_summary(m); 319 show_cpuinfo_summary(m);
320 320
321 return 0; 321 return 0;
322 } 322 }
323 323
324 static void *c_start(struct seq_file *m, loff_t *pos) 324 static void *c_start(struct seq_file *m, loff_t *pos)
325 { 325 {
326 if (*pos == 0) /* just in case, cpu 0 is not the first */ 326 if (*pos == 0) /* just in case, cpu 0 is not the first */
327 *pos = cpumask_first(cpu_online_mask); 327 *pos = cpumask_first(cpu_online_mask);
328 else 328 else
329 *pos = cpumask_next(*pos - 1, cpu_online_mask); 329 *pos = cpumask_next(*pos - 1, cpu_online_mask);
330 if ((*pos) < nr_cpu_ids) 330 if ((*pos) < nr_cpu_ids)
331 return (void *)(unsigned long)(*pos + 1); 331 return (void *)(unsigned long)(*pos + 1);
332 return NULL; 332 return NULL;
333 } 333 }
334 334
335 static void *c_next(struct seq_file *m, void *v, loff_t *pos) 335 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
336 { 336 {
337 (*pos)++; 337 (*pos)++;
338 return c_start(m, pos); 338 return c_start(m, pos);
339 } 339 }
340 340
341 static void c_stop(struct seq_file *m, void *v) 341 static void c_stop(struct seq_file *m, void *v)
342 { 342 {
343 } 343 }
344 344
345 const struct seq_operations cpuinfo_op = { 345 const struct seq_operations cpuinfo_op = {
346 .start =c_start, 346 .start =c_start,
347 .next = c_next, 347 .next = c_next,
348 .stop = c_stop, 348 .stop = c_stop,
349 .show = show_cpuinfo, 349 .show = show_cpuinfo,
350 }; 350 };
351 351
352 void __init check_for_initrd(void) 352 void __init check_for_initrd(void)
353 { 353 {
354 #ifdef CONFIG_BLK_DEV_INITRD 354 #ifdef CONFIG_BLK_DEV_INITRD
355 DBG(" -> check_for_initrd() initrd_start=0x%lx initrd_end=0x%lx\n", 355 DBG(" -> check_for_initrd() initrd_start=0x%lx initrd_end=0x%lx\n",
356 initrd_start, initrd_end); 356 initrd_start, initrd_end);
357 357
358 /* If we were passed an initrd, set the ROOT_DEV properly if the values 358 /* If we were passed an initrd, set the ROOT_DEV properly if the values
359 * look sensible. If not, clear initrd reference. 359 * look sensible. If not, clear initrd reference.
360 */ 360 */
361 if (is_kernel_addr(initrd_start) && is_kernel_addr(initrd_end) && 361 if (is_kernel_addr(initrd_start) && is_kernel_addr(initrd_end) &&
362 initrd_end > initrd_start) 362 initrd_end > initrd_start)
363 ROOT_DEV = Root_RAM0; 363 ROOT_DEV = Root_RAM0;
364 else 364 else
365 initrd_start = initrd_end = 0; 365 initrd_start = initrd_end = 0;
366 366
367 if (initrd_start) 367 if (initrd_start)
368 printk("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end); 368 printk("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end);
369 369
370 DBG(" <- check_for_initrd()\n"); 370 DBG(" <- check_for_initrd()\n");
371 #endif /* CONFIG_BLK_DEV_INITRD */ 371 #endif /* CONFIG_BLK_DEV_INITRD */
372 } 372 }
373 373
374 #ifdef CONFIG_SMP 374 #ifdef CONFIG_SMP
375 375
376 int threads_per_core, threads_shift; 376 int threads_per_core, threads_shift;
377 cpumask_t threads_core_mask; 377 cpumask_t threads_core_mask;
378 EXPORT_SYMBOL_GPL(threads_per_core); 378 EXPORT_SYMBOL_GPL(threads_per_core);
379 EXPORT_SYMBOL_GPL(threads_shift); 379 EXPORT_SYMBOL_GPL(threads_shift);
380 EXPORT_SYMBOL_GPL(threads_core_mask); 380 EXPORT_SYMBOL_GPL(threads_core_mask);
381 381
382 static void __init cpu_init_thread_core_maps(int tpc) 382 static void __init cpu_init_thread_core_maps(int tpc)
383 { 383 {
384 int i; 384 int i;
385 385
386 threads_per_core = tpc; 386 threads_per_core = tpc;
387 cpumask_clear(&threads_core_mask); 387 cpumask_clear(&threads_core_mask);
388 388
389 /* This implementation only supports power of 2 number of threads 389 /* This implementation only supports power of 2 number of threads
390 * for simplicity and performance 390 * for simplicity and performance
391 */ 391 */
392 threads_shift = ilog2(tpc); 392 threads_shift = ilog2(tpc);
393 BUG_ON(tpc != (1 << threads_shift)); 393 BUG_ON(tpc != (1 << threads_shift));
394 394
395 for (i = 0; i < tpc; i++) 395 for (i = 0; i < tpc; i++)
396 cpumask_set_cpu(i, &threads_core_mask); 396 cpumask_set_cpu(i, &threads_core_mask);
397 397
398 printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n", 398 printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n",
399 tpc, tpc > 1 ? "s" : ""); 399 tpc, tpc > 1 ? "s" : "");
400 printk(KERN_DEBUG " (thread shift is %d)\n", threads_shift); 400 printk(KERN_DEBUG " (thread shift is %d)\n", threads_shift);
401 } 401 }
402 402
403 403
404 /** 404 /**
405 * setup_cpu_maps - initialize the following cpu maps: 405 * setup_cpu_maps - initialize the following cpu maps:
406 * cpu_possible_mask 406 * cpu_possible_mask
407 * cpu_present_mask 407 * cpu_present_mask
408 * 408 *
409 * Having the possible map set up early allows us to restrict allocations 409 * Having the possible map set up early allows us to restrict allocations
410 * of things like irqstacks to nr_cpu_ids rather than NR_CPUS. 410 * of things like irqstacks to nr_cpu_ids rather than NR_CPUS.
411 * 411 *
412 * We do not initialize the online map here; cpus set their own bits in 412 * We do not initialize the online map here; cpus set their own bits in
413 * cpu_online_mask as they come up. 413 * cpu_online_mask as they come up.
414 * 414 *
415 * This function is valid only for Open Firmware systems. finish_device_tree 415 * This function is valid only for Open Firmware systems. finish_device_tree
416 * must be called before using this. 416 * must be called before using this.
417 * 417 *
418 * While we're here, we may as well set the "physical" cpu ids in the paca. 418 * While we're here, we may as well set the "physical" cpu ids in the paca.
419 * 419 *
420 * NOTE: This must match the parsing done in early_init_dt_scan_cpus. 420 * NOTE: This must match the parsing done in early_init_dt_scan_cpus.
421 */ 421 */
422 void __init smp_setup_cpu_maps(void) 422 void __init smp_setup_cpu_maps(void)
423 { 423 {
424 struct device_node *dn = NULL; 424 struct device_node *dn = NULL;
425 int cpu = 0; 425 int cpu = 0;
426 int nthreads = 1; 426 int nthreads = 1;
427 427
428 DBG("smp_setup_cpu_maps()\n"); 428 DBG("smp_setup_cpu_maps()\n");
429 429
430 while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < nr_cpu_ids) { 430 while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < nr_cpu_ids) {
431 const int *intserv; 431 const int *intserv;
432 int j, len; 432 int j, len;
433 433
434 DBG(" * %s...\n", dn->full_name); 434 DBG(" * %s...\n", dn->full_name);
435 435
436 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", 436 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s",
437 &len); 437 &len);
438 if (intserv) { 438 if (intserv) {
439 nthreads = len / sizeof(int); 439 nthreads = len / sizeof(int);
440 DBG(" ibm,ppc-interrupt-server#s -> %d threads\n", 440 DBG(" ibm,ppc-interrupt-server#s -> %d threads\n",
441 nthreads); 441 nthreads);
442 } else { 442 } else {
443 DBG(" no ibm,ppc-interrupt-server#s -> 1 thread\n"); 443 DBG(" no ibm,ppc-interrupt-server#s -> 1 thread\n");
444 intserv = of_get_property(dn, "reg", NULL); 444 intserv = of_get_property(dn, "reg", NULL);
445 if (!intserv) 445 if (!intserv)
446 intserv = &cpu; /* assume logical == phys */ 446 intserv = &cpu; /* assume logical == phys */
447 } 447 }
448 448
449 for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) { 449 for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
450 DBG(" thread %d -> cpu %d (hard id %d)\n", 450 DBG(" thread %d -> cpu %d (hard id %d)\n",
451 j, cpu, intserv[j]); 451 j, cpu, intserv[j]);
452 set_cpu_present(cpu, true); 452 set_cpu_present(cpu, true);
453 set_hard_smp_processor_id(cpu, intserv[j]); 453 set_hard_smp_processor_id(cpu, intserv[j]);
454 set_cpu_possible(cpu, true); 454 set_cpu_possible(cpu, true);
455 cpu++; 455 cpu++;
456 } 456 }
457 } 457 }
458 458
459 /* If no SMT supported, nthreads is forced to 1 */ 459 /* If no SMT supported, nthreads is forced to 1 */
460 if (!cpu_has_feature(CPU_FTR_SMT)) { 460 if (!cpu_has_feature(CPU_FTR_SMT)) {
461 DBG(" SMT disabled ! nthreads forced to 1\n"); 461 DBG(" SMT disabled ! nthreads forced to 1\n");
462 nthreads = 1; 462 nthreads = 1;
463 } 463 }
464 464
465 #ifdef CONFIG_PPC64 465 #ifdef CONFIG_PPC64
466 /* 466 /*
467 * On pSeries LPAR, we need to know how many cpus 467 * On pSeries LPAR, we need to know how many cpus
468 * could possibly be added to this partition. 468 * could possibly be added to this partition.
469 */ 469 */
470 if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) && 470 if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) &&
471 (dn = of_find_node_by_path("/rtas"))) { 471 (dn = of_find_node_by_path("/rtas"))) {
472 int num_addr_cell, num_size_cell, maxcpus; 472 int num_addr_cell, num_size_cell, maxcpus;
473 const unsigned int *ireg; 473 const unsigned int *ireg;
474 474
475 num_addr_cell = of_n_addr_cells(dn); 475 num_addr_cell = of_n_addr_cells(dn);
476 num_size_cell = of_n_size_cells(dn); 476 num_size_cell = of_n_size_cells(dn);
477 477
478 ireg = of_get_property(dn, "ibm,lrdr-capacity", NULL); 478 ireg = of_get_property(dn, "ibm,lrdr-capacity", NULL);
479 479
480 if (!ireg) 480 if (!ireg)
481 goto out; 481 goto out;
482 482
483 maxcpus = ireg[num_addr_cell + num_size_cell]; 483 maxcpus = ireg[num_addr_cell + num_size_cell];
484 484
485 /* Double maxcpus for processors which have SMT capability */ 485 /* Double maxcpus for processors which have SMT capability */
486 if (cpu_has_feature(CPU_FTR_SMT)) 486 if (cpu_has_feature(CPU_FTR_SMT))
487 maxcpus *= nthreads; 487 maxcpus *= nthreads;
488 488
489 if (maxcpus > nr_cpu_ids) { 489 if (maxcpus > nr_cpu_ids) {
490 printk(KERN_WARNING 490 printk(KERN_WARNING
491 "Partition configured for %d cpus, " 491 "Partition configured for %d cpus, "
492 "operating system maximum is %d.\n", 492 "operating system maximum is %d.\n",
493 maxcpus, nr_cpu_ids); 493 maxcpus, nr_cpu_ids);
494 maxcpus = nr_cpu_ids; 494 maxcpus = nr_cpu_ids;
495 } else 495 } else
496 printk(KERN_INFO "Partition configured for %d cpus.\n", 496 printk(KERN_INFO "Partition configured for %d cpus.\n",
497 maxcpus); 497 maxcpus);
498 498
499 for (cpu = 0; cpu < maxcpus; cpu++) 499 for (cpu = 0; cpu < maxcpus; cpu++)
500 set_cpu_possible(cpu, true); 500 set_cpu_possible(cpu, true);
501 out: 501 out:
502 of_node_put(dn); 502 of_node_put(dn);
503 } 503 }
504 vdso_data->processorCount = num_present_cpus(); 504 vdso_data->processorCount = num_present_cpus();
505 #endif /* CONFIG_PPC64 */ 505 #endif /* CONFIG_PPC64 */
506 506
507 /* Initialize CPU <=> thread mapping/ 507 /* Initialize CPU <=> thread mapping/
508 * 508 *
509 * WARNING: We assume that the number of threads is the same for 509 * WARNING: We assume that the number of threads is the same for
510 * every CPU in the system. If that is not the case, then some code 510 * every CPU in the system. If that is not the case, then some code
511 * here will have to be reworked 511 * here will have to be reworked
512 */ 512 */
513 cpu_init_thread_core_maps(nthreads); 513 cpu_init_thread_core_maps(nthreads);
514 514
515 /* Now that possible cpus are set, set nr_cpu_ids for later use */ 515 /* Now that possible cpus are set, set nr_cpu_ids for later use */
516 setup_nr_cpu_ids(); 516 setup_nr_cpu_ids();
517 517
518 free_unused_pacas(); 518 free_unused_pacas();
519 } 519 }
520 #endif /* CONFIG_SMP */ 520 #endif /* CONFIG_SMP */
521 521
522 #ifdef CONFIG_PCSPKR_PLATFORM 522 #ifdef CONFIG_PCSPKR_PLATFORM
523 static __init int add_pcspkr(void) 523 static __init int add_pcspkr(void)
524 { 524 {
525 struct device_node *np; 525 struct device_node *np;
526 struct platform_device *pd; 526 struct platform_device *pd;
527 int ret; 527 int ret;
528 528
529 np = of_find_compatible_node(NULL, NULL, "pnpPNP,100"); 529 np = of_find_compatible_node(NULL, NULL, "pnpPNP,100");
530 of_node_put(np); 530 of_node_put(np);
531 if (!np) 531 if (!np)
532 return -ENODEV; 532 return -ENODEV;
533 533
534 pd = platform_device_alloc("pcspkr", -1); 534 pd = platform_device_alloc("pcspkr", -1);
535 if (!pd) 535 if (!pd)
536 return -ENOMEM; 536 return -ENOMEM;
537 537
538 ret = platform_device_add(pd); 538 ret = platform_device_add(pd);
539 if (ret) 539 if (ret)
540 platform_device_put(pd); 540 platform_device_put(pd);
541 541
542 return ret; 542 return ret;
543 } 543 }
544 device_initcall(add_pcspkr); 544 device_initcall(add_pcspkr);
545 #endif /* CONFIG_PCSPKR_PLATFORM */ 545 #endif /* CONFIG_PCSPKR_PLATFORM */
546 546
547 void probe_machine(void) 547 void probe_machine(void)
548 { 548 {
549 extern struct machdep_calls __machine_desc_start; 549 extern struct machdep_calls __machine_desc_start;
550 extern struct machdep_calls __machine_desc_end; 550 extern struct machdep_calls __machine_desc_end;
551 551
552 /* 552 /*
553 * Iterate all ppc_md structures until we find the proper 553 * Iterate all ppc_md structures until we find the proper
554 * one for the current machine type 554 * one for the current machine type
555 */ 555 */
556 DBG("Probing machine type ...\n"); 556 DBG("Probing machine type ...\n");
557 557
558 for (machine_id = &__machine_desc_start; 558 for (machine_id = &__machine_desc_start;
559 machine_id < &__machine_desc_end; 559 machine_id < &__machine_desc_end;
560 machine_id++) { 560 machine_id++) {
561 DBG(" %s ...", machine_id->name); 561 DBG(" %s ...", machine_id->name);
562 memcpy(&ppc_md, machine_id, sizeof(struct machdep_calls)); 562 memcpy(&ppc_md, machine_id, sizeof(struct machdep_calls));
563 if (ppc_md.probe()) { 563 if (ppc_md.probe()) {
564 DBG(" match !\n"); 564 DBG(" match !\n");
565 break; 565 break;
566 } 566 }
567 DBG("\n"); 567 DBG("\n");
568 } 568 }
569 /* What can we do if we didn't find ? */ 569 /* What can we do if we didn't find ? */
570 if (machine_id >= &__machine_desc_end) { 570 if (machine_id >= &__machine_desc_end) {
571 DBG("No suitable machine found !\n"); 571 DBG("No suitable machine found !\n");
572 for (;;); 572 for (;;);
573 } 573 }
574 574
575 printk(KERN_INFO "Using %s machine description\n", ppc_md.name); 575 printk(KERN_INFO "Using %s machine description\n", ppc_md.name);
576 } 576 }
577 577
578 /* Match a class of boards, not a specific device configuration. */ 578 /* Match a class of boards, not a specific device configuration. */
579 int check_legacy_ioport(unsigned long base_port) 579 int check_legacy_ioport(unsigned long base_port)
580 { 580 {
581 struct device_node *parent, *np = NULL; 581 struct device_node *parent, *np = NULL;
582 int ret = -ENODEV; 582 int ret = -ENODEV;
583 583
584 switch(base_port) { 584 switch(base_port) {
585 case I8042_DATA_REG: 585 case I8042_DATA_REG:
586 if (!(np = of_find_compatible_node(NULL, NULL, "pnpPNP,303"))) 586 if (!(np = of_find_compatible_node(NULL, NULL, "pnpPNP,303")))
587 np = of_find_compatible_node(NULL, NULL, "pnpPNP,f03"); 587 np = of_find_compatible_node(NULL, NULL, "pnpPNP,f03");
588 if (np) { 588 if (np) {
589 parent = of_get_parent(np); 589 parent = of_get_parent(np);
590 590
591 of_i8042_kbd_irq = irq_of_parse_and_map(parent, 0); 591 of_i8042_kbd_irq = irq_of_parse_and_map(parent, 0);
592 if (!of_i8042_kbd_irq) 592 if (!of_i8042_kbd_irq)
593 of_i8042_kbd_irq = 1; 593 of_i8042_kbd_irq = 1;
594 594
595 of_i8042_aux_irq = irq_of_parse_and_map(parent, 1); 595 of_i8042_aux_irq = irq_of_parse_and_map(parent, 1);
596 if (!of_i8042_aux_irq) 596 if (!of_i8042_aux_irq)
597 of_i8042_aux_irq = 12; 597 of_i8042_aux_irq = 12;
598 598
599 of_node_put(np); 599 of_node_put(np);
600 np = parent; 600 np = parent;
601 break; 601 break;
602 } 602 }
603 np = of_find_node_by_type(NULL, "8042"); 603 np = of_find_node_by_type(NULL, "8042");
604 /* Pegasos has no device_type on its 8042 node, look for the 604 /* Pegasos has no device_type on its 8042 node, look for the
605 * name instead */ 605 * name instead */
606 if (!np) 606 if (!np)
607 np = of_find_node_by_name(NULL, "8042"); 607 np = of_find_node_by_name(NULL, "8042");
608 if (np) { 608 if (np) {
609 of_i8042_kbd_irq = 1; 609 of_i8042_kbd_irq = 1;
610 of_i8042_aux_irq = 12; 610 of_i8042_aux_irq = 12;
611 } 611 }
612 break; 612 break;
613 case FDC_BASE: /* FDC1 */ 613 case FDC_BASE: /* FDC1 */
614 np = of_find_node_by_type(NULL, "fdc"); 614 np = of_find_node_by_type(NULL, "fdc");
615 break; 615 break;
616 #ifdef CONFIG_PPC_PREP 616 #ifdef CONFIG_PPC_PREP
617 case _PIDXR: 617 case _PIDXR:
618 case _PNPWRP: 618 case _PNPWRP:
619 case PNPBIOS_BASE: 619 case PNPBIOS_BASE:
620 /* implement me */ 620 /* implement me */
621 #endif 621 #endif
622 default: 622 default:
623 /* ipmi is supposed to fail here */ 623 /* ipmi is supposed to fail here */
624 break; 624 break;
625 } 625 }
626 if (!np) 626 if (!np)
627 return ret; 627 return ret;
628 parent = of_get_parent(np); 628 parent = of_get_parent(np);
629 if (parent) { 629 if (parent) {
630 if (strcmp(parent->type, "isa") == 0) 630 if (strcmp(parent->type, "isa") == 0)
631 ret = 0; 631 ret = 0;
632 of_node_put(parent); 632 of_node_put(parent);
633 } 633 }
634 of_node_put(np); 634 of_node_put(np);
635 return ret; 635 return ret;
636 } 636 }
637 EXPORT_SYMBOL(check_legacy_ioport); 637 EXPORT_SYMBOL(check_legacy_ioport);
638 638
639 static int ppc_panic_event(struct notifier_block *this, 639 static int ppc_panic_event(struct notifier_block *this,
640 unsigned long event, void *ptr) 640 unsigned long event, void *ptr)
641 { 641 {
642 ppc_md.panic(ptr); /* May not return */ 642 ppc_md.panic(ptr); /* May not return */
643 return NOTIFY_DONE; 643 return NOTIFY_DONE;
644 } 644 }
645 645
646 static struct notifier_block ppc_panic_block = { 646 static struct notifier_block ppc_panic_block = {
647 .notifier_call = ppc_panic_event, 647 .notifier_call = ppc_panic_event,
648 .priority = INT_MIN /* may not return; must be done last */ 648 .priority = INT_MIN /* may not return; must be done last */
649 }; 649 };
650 650
651 void __init setup_panic(void) 651 void __init setup_panic(void)
652 { 652 {
653 atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block); 653 atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block);
654 } 654 }
655 655
656 #ifdef CONFIG_CHECK_CACHE_COHERENCY 656 #ifdef CONFIG_CHECK_CACHE_COHERENCY
657 /* 657 /*
658 * For platforms that have configurable cache-coherency. This function 658 * For platforms that have configurable cache-coherency. This function
659 * checks that the cache coherency setting of the kernel matches the setting 659 * checks that the cache coherency setting of the kernel matches the setting
660 * left by the firmware, as indicated in the device tree. Since a mismatch 660 * left by the firmware, as indicated in the device tree. Since a mismatch
661 * will eventually result in DMA failures, we print * and error and call 661 * will eventually result in DMA failures, we print * and error and call
662 * BUG() in that case. 662 * BUG() in that case.
663 */ 663 */
664 664
665 #ifdef CONFIG_NOT_COHERENT_CACHE 665 #ifdef CONFIG_NOT_COHERENT_CACHE
666 #define KERNEL_COHERENCY 0 666 #define KERNEL_COHERENCY 0
667 #else 667 #else
668 #define KERNEL_COHERENCY 1 668 #define KERNEL_COHERENCY 1
669 #endif 669 #endif
670 670
671 static int __init check_cache_coherency(void) 671 static int __init check_cache_coherency(void)
672 { 672 {
673 struct device_node *np; 673 struct device_node *np;
674 const void *prop; 674 const void *prop;
675 int devtree_coherency; 675 int devtree_coherency;
676 676
677 np = of_find_node_by_path("/"); 677 np = of_find_node_by_path("/");
678 prop = of_get_property(np, "coherency-off", NULL); 678 prop = of_get_property(np, "coherency-off", NULL);
679 of_node_put(np); 679 of_node_put(np);
680 680
681 devtree_coherency = prop ? 0 : 1; 681 devtree_coherency = prop ? 0 : 1;
682 682
683 if (devtree_coherency != KERNEL_COHERENCY) { 683 if (devtree_coherency != KERNEL_COHERENCY) {
684 printk(KERN_ERR 684 printk(KERN_ERR
685 "kernel coherency:%s != device tree_coherency:%s\n", 685 "kernel coherency:%s != device tree_coherency:%s\n",
686 KERNEL_COHERENCY ? "on" : "off", 686 KERNEL_COHERENCY ? "on" : "off",
687 devtree_coherency ? "on" : "off"); 687 devtree_coherency ? "on" : "off");
688 BUG(); 688 BUG();
689 } 689 }
690 690
691 return 0; 691 return 0;
692 } 692 }
693 693
694 late_initcall(check_cache_coherency); 694 late_initcall(check_cache_coherency);
695 #endif /* CONFIG_CHECK_CACHE_COHERENCY */ 695 #endif /* CONFIG_CHECK_CACHE_COHERENCY */
696 696
697 #ifdef CONFIG_DEBUG_FS 697 #ifdef CONFIG_DEBUG_FS
698 struct dentry *powerpc_debugfs_root; 698 struct dentry *powerpc_debugfs_root;
699 EXPORT_SYMBOL(powerpc_debugfs_root); 699 EXPORT_SYMBOL(powerpc_debugfs_root);
700 700
701 static int powerpc_debugfs_init(void) 701 static int powerpc_debugfs_init(void)
702 { 702 {
703 powerpc_debugfs_root = debugfs_create_dir("powerpc", NULL); 703 powerpc_debugfs_root = debugfs_create_dir("powerpc", NULL);
704 704
705 return powerpc_debugfs_root == NULL; 705 return powerpc_debugfs_root == NULL;
706 } 706 }
707 arch_initcall(powerpc_debugfs_init); 707 arch_initcall(powerpc_debugfs_init);
708 #endif 708 #endif
709 709
710 void ppc_printk_progress(char *s, unsigned short hex) 710 void ppc_printk_progress(char *s, unsigned short hex)
711 { 711 {
712 pr_info("%s\n", s); 712 pr_info("%s\n", s);
713 } 713 }
714 714
715 void arch_setup_pdev_archdata(struct platform_device *pdev) 715 void arch_setup_pdev_archdata(struct platform_device *pdev)
716 { 716 {
717 pdev->archdata.dma_mask = DMA_BIT_MASK(32); 717 pdev->archdata.dma_mask = DMA_BIT_MASK(32);
718 pdev->dev.dma_mask = &pdev->archdata.dma_mask; 718 pdev->dev.dma_mask = &pdev->archdata.dma_mask;
719 set_dma_ops(&pdev->dev, &dma_direct_ops); 719 set_dma_ops(&pdev->dev, &dma_direct_ops);
720 } 720 }
721 721
arch/powerpc/kernel/setup_64.c
1 /* 1 /*
2 * 2 *
3 * Common boot and setup code. 3 * Common boot and setup code.
4 * 4 *
5 * Copyright (C) 2001 PPC64 Team, IBM Corp 5 * Copyright (C) 2001 PPC64 Team, IBM Corp
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 */ 11 */
12 12
13 #undef DEBUG 13 #undef DEBUG
14 14
15 #include <linux/module.h> 15 #include <linux/export.h>
16 #include <linux/string.h> 16 #include <linux/string.h>
17 #include <linux/sched.h> 17 #include <linux/sched.h>
18 #include <linux/init.h> 18 #include <linux/init.h>
19 #include <linux/kernel.h> 19 #include <linux/kernel.h>
20 #include <linux/reboot.h> 20 #include <linux/reboot.h>
21 #include <linux/delay.h> 21 #include <linux/delay.h>
22 #include <linux/initrd.h> 22 #include <linux/initrd.h>
23 #include <linux/seq_file.h> 23 #include <linux/seq_file.h>
24 #include <linux/ioport.h> 24 #include <linux/ioport.h>
25 #include <linux/console.h> 25 #include <linux/console.h>
26 #include <linux/utsname.h> 26 #include <linux/utsname.h>
27 #include <linux/tty.h> 27 #include <linux/tty.h>
28 #include <linux/root_dev.h> 28 #include <linux/root_dev.h>
29 #include <linux/notifier.h> 29 #include <linux/notifier.h>
30 #include <linux/cpu.h> 30 #include <linux/cpu.h>
31 #include <linux/unistd.h> 31 #include <linux/unistd.h>
32 #include <linux/serial.h> 32 #include <linux/serial.h>
33 #include <linux/serial_8250.h> 33 #include <linux/serial_8250.h>
34 #include <linux/bootmem.h> 34 #include <linux/bootmem.h>
35 #include <linux/pci.h> 35 #include <linux/pci.h>
36 #include <linux/lockdep.h> 36 #include <linux/lockdep.h>
37 #include <linux/memblock.h> 37 #include <linux/memblock.h>
38 #include <asm/io.h> 38 #include <asm/io.h>
39 #include <asm/kdump.h> 39 #include <asm/kdump.h>
40 #include <asm/prom.h> 40 #include <asm/prom.h>
41 #include <asm/processor.h> 41 #include <asm/processor.h>
42 #include <asm/pgtable.h> 42 #include <asm/pgtable.h>
43 #include <asm/smp.h> 43 #include <asm/smp.h>
44 #include <asm/elf.h> 44 #include <asm/elf.h>
45 #include <asm/machdep.h> 45 #include <asm/machdep.h>
46 #include <asm/paca.h> 46 #include <asm/paca.h>
47 #include <asm/time.h> 47 #include <asm/time.h>
48 #include <asm/cputable.h> 48 #include <asm/cputable.h>
49 #include <asm/sections.h> 49 #include <asm/sections.h>
50 #include <asm/btext.h> 50 #include <asm/btext.h>
51 #include <asm/nvram.h> 51 #include <asm/nvram.h>
52 #include <asm/setup.h> 52 #include <asm/setup.h>
53 #include <asm/system.h> 53 #include <asm/system.h>
54 #include <asm/rtas.h> 54 #include <asm/rtas.h>
55 #include <asm/iommu.h> 55 #include <asm/iommu.h>
56 #include <asm/serial.h> 56 #include <asm/serial.h>
57 #include <asm/cache.h> 57 #include <asm/cache.h>
58 #include <asm/page.h> 58 #include <asm/page.h>
59 #include <asm/mmu.h> 59 #include <asm/mmu.h>
60 #include <asm/firmware.h> 60 #include <asm/firmware.h>
61 #include <asm/xmon.h> 61 #include <asm/xmon.h>
62 #include <asm/udbg.h> 62 #include <asm/udbg.h>
63 #include <asm/kexec.h> 63 #include <asm/kexec.h>
64 #include <asm/mmu_context.h> 64 #include <asm/mmu_context.h>
65 #include <asm/code-patching.h> 65 #include <asm/code-patching.h>
66 #include <asm/kvm_ppc.h> 66 #include <asm/kvm_ppc.h>
67 67
68 #include "setup.h" 68 #include "setup.h"
69 69
70 #ifdef DEBUG 70 #ifdef DEBUG
71 #define DBG(fmt...) udbg_printf(fmt) 71 #define DBG(fmt...) udbg_printf(fmt)
72 #else 72 #else
73 #define DBG(fmt...) 73 #define DBG(fmt...)
74 #endif 74 #endif
75 75
76 int boot_cpuid = 0; 76 int boot_cpuid = 0;
77 int __initdata spinning_secondaries; 77 int __initdata spinning_secondaries;
78 u64 ppc64_pft_size; 78 u64 ppc64_pft_size;
79 79
80 /* Pick defaults since we might want to patch instructions 80 /* Pick defaults since we might want to patch instructions
81 * before we've read this from the device tree. 81 * before we've read this from the device tree.
82 */ 82 */
83 struct ppc64_caches ppc64_caches = { 83 struct ppc64_caches ppc64_caches = {
84 .dline_size = 0x40, 84 .dline_size = 0x40,
85 .log_dline_size = 6, 85 .log_dline_size = 6,
86 .iline_size = 0x40, 86 .iline_size = 0x40,
87 .log_iline_size = 6 87 .log_iline_size = 6
88 }; 88 };
89 EXPORT_SYMBOL_GPL(ppc64_caches); 89 EXPORT_SYMBOL_GPL(ppc64_caches);
90 90
91 /* 91 /*
92 * These are used in binfmt_elf.c to put aux entries on the stack 92 * These are used in binfmt_elf.c to put aux entries on the stack
93 * for each elf executable being started. 93 * for each elf executable being started.
94 */ 94 */
95 int dcache_bsize; 95 int dcache_bsize;
96 int icache_bsize; 96 int icache_bsize;
97 int ucache_bsize; 97 int ucache_bsize;
98 98
99 #ifdef CONFIG_SMP 99 #ifdef CONFIG_SMP
100 100
101 static char *smt_enabled_cmdline; 101 static char *smt_enabled_cmdline;
102 102
103 /* Look for ibm,smt-enabled OF option */ 103 /* Look for ibm,smt-enabled OF option */
104 static void check_smt_enabled(void) 104 static void check_smt_enabled(void)
105 { 105 {
106 struct device_node *dn; 106 struct device_node *dn;
107 const char *smt_option; 107 const char *smt_option;
108 108
109 /* Default to enabling all threads */ 109 /* Default to enabling all threads */
110 smt_enabled_at_boot = threads_per_core; 110 smt_enabled_at_boot = threads_per_core;
111 111
112 /* Allow the command line to overrule the OF option */ 112 /* Allow the command line to overrule the OF option */
113 if (smt_enabled_cmdline) { 113 if (smt_enabled_cmdline) {
114 if (!strcmp(smt_enabled_cmdline, "on")) 114 if (!strcmp(smt_enabled_cmdline, "on"))
115 smt_enabled_at_boot = threads_per_core; 115 smt_enabled_at_boot = threads_per_core;
116 else if (!strcmp(smt_enabled_cmdline, "off")) 116 else if (!strcmp(smt_enabled_cmdline, "off"))
117 smt_enabled_at_boot = 0; 117 smt_enabled_at_boot = 0;
118 else { 118 else {
119 long smt; 119 long smt;
120 int rc; 120 int rc;
121 121
122 rc = strict_strtol(smt_enabled_cmdline, 10, &smt); 122 rc = strict_strtol(smt_enabled_cmdline, 10, &smt);
123 if (!rc) 123 if (!rc)
124 smt_enabled_at_boot = 124 smt_enabled_at_boot =
125 min(threads_per_core, (int)smt); 125 min(threads_per_core, (int)smt);
126 } 126 }
127 } else { 127 } else {
128 dn = of_find_node_by_path("/options"); 128 dn = of_find_node_by_path("/options");
129 if (dn) { 129 if (dn) {
130 smt_option = of_get_property(dn, "ibm,smt-enabled", 130 smt_option = of_get_property(dn, "ibm,smt-enabled",
131 NULL); 131 NULL);
132 132
133 if (smt_option) { 133 if (smt_option) {
134 if (!strcmp(smt_option, "on")) 134 if (!strcmp(smt_option, "on"))
135 smt_enabled_at_boot = threads_per_core; 135 smt_enabled_at_boot = threads_per_core;
136 else if (!strcmp(smt_option, "off")) 136 else if (!strcmp(smt_option, "off"))
137 smt_enabled_at_boot = 0; 137 smt_enabled_at_boot = 0;
138 } 138 }
139 139
140 of_node_put(dn); 140 of_node_put(dn);
141 } 141 }
142 } 142 }
143 } 143 }
144 144
145 /* Look for smt-enabled= cmdline option */ 145 /* Look for smt-enabled= cmdline option */
146 static int __init early_smt_enabled(char *p) 146 static int __init early_smt_enabled(char *p)
147 { 147 {
148 smt_enabled_cmdline = p; 148 smt_enabled_cmdline = p;
149 return 0; 149 return 0;
150 } 150 }
151 early_param("smt-enabled", early_smt_enabled); 151 early_param("smt-enabled", early_smt_enabled);
152 152
153 #else 153 #else
154 #define check_smt_enabled() 154 #define check_smt_enabled()
155 #endif /* CONFIG_SMP */ 155 #endif /* CONFIG_SMP */
156 156
157 /* 157 /*
158 * Early initialization entry point. This is called by head.S 158 * Early initialization entry point. This is called by head.S
159 * with MMU translation disabled. We rely on the "feature" of 159 * with MMU translation disabled. We rely on the "feature" of
160 * the CPU that ignores the top 2 bits of the address in real 160 * the CPU that ignores the top 2 bits of the address in real
161 * mode so we can access kernel globals normally provided we 161 * mode so we can access kernel globals normally provided we
162 * only toy with things in the RMO region. From here, we do 162 * only toy with things in the RMO region. From here, we do
163 * some early parsing of the device-tree to setup out MEMBLOCK 163 * some early parsing of the device-tree to setup out MEMBLOCK
164 * data structures, and allocate & initialize the hash table 164 * data structures, and allocate & initialize the hash table
165 * and segment tables so we can start running with translation 165 * and segment tables so we can start running with translation
166 * enabled. 166 * enabled.
167 * 167 *
168 * It is this function which will call the probe() callback of 168 * It is this function which will call the probe() callback of
169 * the various platform types and copy the matching one to the 169 * the various platform types and copy the matching one to the
170 * global ppc_md structure. Your platform can eventually do 170 * global ppc_md structure. Your platform can eventually do
171 * some very early initializations from the probe() routine, but 171 * some very early initializations from the probe() routine, but
172 * this is not recommended, be very careful as, for example, the 172 * this is not recommended, be very careful as, for example, the
173 * device-tree is not accessible via normal means at this point. 173 * device-tree is not accessible via normal means at this point.
174 */ 174 */
175 175
176 void __init early_setup(unsigned long dt_ptr) 176 void __init early_setup(unsigned long dt_ptr)
177 { 177 {
178 /* -------- printk is _NOT_ safe to use here ! ------- */ 178 /* -------- printk is _NOT_ safe to use here ! ------- */
179 179
180 /* Identify CPU type */ 180 /* Identify CPU type */
181 identify_cpu(0, mfspr(SPRN_PVR)); 181 identify_cpu(0, mfspr(SPRN_PVR));
182 182
183 /* Assume we're on cpu 0 for now. Don't write to the paca yet! */ 183 /* Assume we're on cpu 0 for now. Don't write to the paca yet! */
184 initialise_paca(&boot_paca, 0); 184 initialise_paca(&boot_paca, 0);
185 setup_paca(&boot_paca); 185 setup_paca(&boot_paca);
186 186
187 /* Initialize lockdep early or else spinlocks will blow */ 187 /* Initialize lockdep early or else spinlocks will blow */
188 lockdep_init(); 188 lockdep_init();
189 189
190 /* -------- printk is now safe to use ------- */ 190 /* -------- printk is now safe to use ------- */
191 191
192 /* Enable early debugging if any specified (see udbg.h) */ 192 /* Enable early debugging if any specified (see udbg.h) */
193 udbg_early_init(); 193 udbg_early_init();
194 194
195 DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr); 195 DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
196 196
197 /* 197 /*
198 * Do early initialization using the flattened device 198 * Do early initialization using the flattened device
199 * tree, such as retrieving the physical memory map or 199 * tree, such as retrieving the physical memory map or
200 * calculating/retrieving the hash table size. 200 * calculating/retrieving the hash table size.
201 */ 201 */
202 early_init_devtree(__va(dt_ptr)); 202 early_init_devtree(__va(dt_ptr));
203 203
204 /* Now we know the logical id of our boot cpu, setup the paca. */ 204 /* Now we know the logical id of our boot cpu, setup the paca. */
205 setup_paca(&paca[boot_cpuid]); 205 setup_paca(&paca[boot_cpuid]);
206 206
207 /* Fix up paca fields required for the boot cpu */ 207 /* Fix up paca fields required for the boot cpu */
208 get_paca()->cpu_start = 1; 208 get_paca()->cpu_start = 1;
209 209
210 /* Probe the machine type */ 210 /* Probe the machine type */
211 probe_machine(); 211 probe_machine();
212 212
213 setup_kdump_trampoline(); 213 setup_kdump_trampoline();
214 214
215 DBG("Found, Initializing memory management...\n"); 215 DBG("Found, Initializing memory management...\n");
216 216
217 /* Initialize the hash table or TLB handling */ 217 /* Initialize the hash table or TLB handling */
218 early_init_mmu(); 218 early_init_mmu();
219 219
220 DBG(" <- early_setup()\n"); 220 DBG(" <- early_setup()\n");
221 } 221 }
222 222
223 #ifdef CONFIG_SMP 223 #ifdef CONFIG_SMP
224 void early_setup_secondary(void) 224 void early_setup_secondary(void)
225 { 225 {
226 /* Mark interrupts enabled in PACA */ 226 /* Mark interrupts enabled in PACA */
227 get_paca()->soft_enabled = 0; 227 get_paca()->soft_enabled = 0;
228 228
229 /* Initialize the hash table or TLB handling */ 229 /* Initialize the hash table or TLB handling */
230 early_init_mmu_secondary(); 230 early_init_mmu_secondary();
231 } 231 }
232 232
233 #endif /* CONFIG_SMP */ 233 #endif /* CONFIG_SMP */
234 234
235 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) 235 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
236 void smp_release_cpus(void) 236 void smp_release_cpus(void)
237 { 237 {
238 unsigned long *ptr; 238 unsigned long *ptr;
239 int i; 239 int i;
240 240
241 DBG(" -> smp_release_cpus()\n"); 241 DBG(" -> smp_release_cpus()\n");
242 242
243 /* All secondary cpus are spinning on a common spinloop, release them 243 /* All secondary cpus are spinning on a common spinloop, release them
244 * all now so they can start to spin on their individual paca 244 * all now so they can start to spin on their individual paca
245 * spinloops. For non SMP kernels, the secondary cpus never get out 245 * spinloops. For non SMP kernels, the secondary cpus never get out
246 * of the common spinloop. 246 * of the common spinloop.
247 */ 247 */
248 248
249 ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop 249 ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
250 - PHYSICAL_START); 250 - PHYSICAL_START);
251 *ptr = __pa(generic_secondary_smp_init); 251 *ptr = __pa(generic_secondary_smp_init);
252 252
253 /* And wait a bit for them to catch up */ 253 /* And wait a bit for them to catch up */
254 for (i = 0; i < 100000; i++) { 254 for (i = 0; i < 100000; i++) {
255 mb(); 255 mb();
256 HMT_low(); 256 HMT_low();
257 if (spinning_secondaries == 0) 257 if (spinning_secondaries == 0)
258 break; 258 break;
259 udelay(1); 259 udelay(1);
260 } 260 }
261 DBG("spinning_secondaries = %d\n", spinning_secondaries); 261 DBG("spinning_secondaries = %d\n", spinning_secondaries);
262 262
263 DBG(" <- smp_release_cpus()\n"); 263 DBG(" <- smp_release_cpus()\n");
264 } 264 }
265 #endif /* CONFIG_SMP || CONFIG_KEXEC */ 265 #endif /* CONFIG_SMP || CONFIG_KEXEC */
266 266
267 /* 267 /*
268 * Initialize some remaining members of the ppc64_caches and systemcfg 268 * Initialize some remaining members of the ppc64_caches and systemcfg
269 * structures 269 * structures
270 * (at least until we get rid of them completely). This is mostly some 270 * (at least until we get rid of them completely). This is mostly some
271 * cache informations about the CPU that will be used by cache flush 271 * cache informations about the CPU that will be used by cache flush
272 * routines and/or provided to userland 272 * routines and/or provided to userland
273 */ 273 */
274 static void __init initialize_cache_info(void) 274 static void __init initialize_cache_info(void)
275 { 275 {
276 struct device_node *np; 276 struct device_node *np;
277 unsigned long num_cpus = 0; 277 unsigned long num_cpus = 0;
278 278
279 DBG(" -> initialize_cache_info()\n"); 279 DBG(" -> initialize_cache_info()\n");
280 280
281 for (np = NULL; (np = of_find_node_by_type(np, "cpu"));) { 281 for (np = NULL; (np = of_find_node_by_type(np, "cpu"));) {
282 num_cpus += 1; 282 num_cpus += 1;
283 283
284 /* We're assuming *all* of the CPUs have the same 284 /* We're assuming *all* of the CPUs have the same
285 * d-cache and i-cache sizes... -Peter 285 * d-cache and i-cache sizes... -Peter
286 */ 286 */
287 287
288 if ( num_cpus == 1 ) { 288 if ( num_cpus == 1 ) {
289 const u32 *sizep, *lsizep; 289 const u32 *sizep, *lsizep;
290 u32 size, lsize; 290 u32 size, lsize;
291 291
292 size = 0; 292 size = 0;
293 lsize = cur_cpu_spec->dcache_bsize; 293 lsize = cur_cpu_spec->dcache_bsize;
294 sizep = of_get_property(np, "d-cache-size", NULL); 294 sizep = of_get_property(np, "d-cache-size", NULL);
295 if (sizep != NULL) 295 if (sizep != NULL)
296 size = *sizep; 296 size = *sizep;
297 lsizep = of_get_property(np, "d-cache-block-size", NULL); 297 lsizep = of_get_property(np, "d-cache-block-size", NULL);
298 /* fallback if block size missing */ 298 /* fallback if block size missing */
299 if (lsizep == NULL) 299 if (lsizep == NULL)
300 lsizep = of_get_property(np, "d-cache-line-size", NULL); 300 lsizep = of_get_property(np, "d-cache-line-size", NULL);
301 if (lsizep != NULL) 301 if (lsizep != NULL)
302 lsize = *lsizep; 302 lsize = *lsizep;
303 if (sizep == 0 || lsizep == 0) 303 if (sizep == 0 || lsizep == 0)
304 DBG("Argh, can't find dcache properties ! " 304 DBG("Argh, can't find dcache properties ! "
305 "sizep: %p, lsizep: %p\n", sizep, lsizep); 305 "sizep: %p, lsizep: %p\n", sizep, lsizep);
306 306
307 ppc64_caches.dsize = size; 307 ppc64_caches.dsize = size;
308 ppc64_caches.dline_size = lsize; 308 ppc64_caches.dline_size = lsize;
309 ppc64_caches.log_dline_size = __ilog2(lsize); 309 ppc64_caches.log_dline_size = __ilog2(lsize);
310 ppc64_caches.dlines_per_page = PAGE_SIZE / lsize; 310 ppc64_caches.dlines_per_page = PAGE_SIZE / lsize;
311 311
312 size = 0; 312 size = 0;
313 lsize = cur_cpu_spec->icache_bsize; 313 lsize = cur_cpu_spec->icache_bsize;
314 sizep = of_get_property(np, "i-cache-size", NULL); 314 sizep = of_get_property(np, "i-cache-size", NULL);
315 if (sizep != NULL) 315 if (sizep != NULL)
316 size = *sizep; 316 size = *sizep;
317 lsizep = of_get_property(np, "i-cache-block-size", NULL); 317 lsizep = of_get_property(np, "i-cache-block-size", NULL);
318 if (lsizep == NULL) 318 if (lsizep == NULL)
319 lsizep = of_get_property(np, "i-cache-line-size", NULL); 319 lsizep = of_get_property(np, "i-cache-line-size", NULL);
320 if (lsizep != NULL) 320 if (lsizep != NULL)
321 lsize = *lsizep; 321 lsize = *lsizep;
322 if (sizep == 0 || lsizep == 0) 322 if (sizep == 0 || lsizep == 0)
323 DBG("Argh, can't find icache properties ! " 323 DBG("Argh, can't find icache properties ! "
324 "sizep: %p, lsizep: %p\n", sizep, lsizep); 324 "sizep: %p, lsizep: %p\n", sizep, lsizep);
325 325
326 ppc64_caches.isize = size; 326 ppc64_caches.isize = size;
327 ppc64_caches.iline_size = lsize; 327 ppc64_caches.iline_size = lsize;
328 ppc64_caches.log_iline_size = __ilog2(lsize); 328 ppc64_caches.log_iline_size = __ilog2(lsize);
329 ppc64_caches.ilines_per_page = PAGE_SIZE / lsize; 329 ppc64_caches.ilines_per_page = PAGE_SIZE / lsize;
330 } 330 }
331 } 331 }
332 332
333 DBG(" <- initialize_cache_info()\n"); 333 DBG(" <- initialize_cache_info()\n");
334 } 334 }
335 335
336 336
337 /* 337 /*
338 * Do some initial setup of the system. The parameters are those which 338 * Do some initial setup of the system. The parameters are those which
339 * were passed in from the bootloader. 339 * were passed in from the bootloader.
340 */ 340 */
341 void __init setup_system(void) 341 void __init setup_system(void)
342 { 342 {
343 DBG(" -> setup_system()\n"); 343 DBG(" -> setup_system()\n");
344 344
345 /* Apply the CPUs-specific and firmware specific fixups to kernel 345 /* Apply the CPUs-specific and firmware specific fixups to kernel
346 * text (nop out sections not relevant to this CPU or this firmware) 346 * text (nop out sections not relevant to this CPU or this firmware)
347 */ 347 */
348 do_feature_fixups(cur_cpu_spec->cpu_features, 348 do_feature_fixups(cur_cpu_spec->cpu_features,
349 &__start___ftr_fixup, &__stop___ftr_fixup); 349 &__start___ftr_fixup, &__stop___ftr_fixup);
350 do_feature_fixups(cur_cpu_spec->mmu_features, 350 do_feature_fixups(cur_cpu_spec->mmu_features,
351 &__start___mmu_ftr_fixup, &__stop___mmu_ftr_fixup); 351 &__start___mmu_ftr_fixup, &__stop___mmu_ftr_fixup);
352 do_feature_fixups(powerpc_firmware_features, 352 do_feature_fixups(powerpc_firmware_features,
353 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); 353 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
354 do_lwsync_fixups(cur_cpu_spec->cpu_features, 354 do_lwsync_fixups(cur_cpu_spec->cpu_features,
355 &__start___lwsync_fixup, &__stop___lwsync_fixup); 355 &__start___lwsync_fixup, &__stop___lwsync_fixup);
356 356
357 /* 357 /*
358 * Unflatten the device-tree passed by prom_init or kexec 358 * Unflatten the device-tree passed by prom_init or kexec
359 */ 359 */
360 unflatten_device_tree(); 360 unflatten_device_tree();
361 361
362 /* 362 /*
363 * Fill the ppc64_caches & systemcfg structures with informations 363 * Fill the ppc64_caches & systemcfg structures with informations
364 * retrieved from the device-tree. 364 * retrieved from the device-tree.
365 */ 365 */
366 initialize_cache_info(); 366 initialize_cache_info();
367 367
368 #ifdef CONFIG_PPC_RTAS 368 #ifdef CONFIG_PPC_RTAS
369 /* 369 /*
370 * Initialize RTAS if available 370 * Initialize RTAS if available
371 */ 371 */
372 rtas_initialize(); 372 rtas_initialize();
373 #endif /* CONFIG_PPC_RTAS */ 373 #endif /* CONFIG_PPC_RTAS */
374 374
375 /* 375 /*
376 * Check if we have an initrd provided via the device-tree 376 * Check if we have an initrd provided via the device-tree
377 */ 377 */
378 check_for_initrd(); 378 check_for_initrd();
379 379
380 /* 380 /*
381 * Do some platform specific early initializations, that includes 381 * Do some platform specific early initializations, that includes
382 * setting up the hash table pointers. It also sets up some interrupt-mapping 382 * setting up the hash table pointers. It also sets up some interrupt-mapping
383 * related options that will be used by finish_device_tree() 383 * related options that will be used by finish_device_tree()
384 */ 384 */
385 if (ppc_md.init_early) 385 if (ppc_md.init_early)
386 ppc_md.init_early(); 386 ppc_md.init_early();
387 387
388 /* 388 /*
389 * We can discover serial ports now since the above did setup the 389 * We can discover serial ports now since the above did setup the
390 * hash table management for us, thus ioremap works. We do that early 390 * hash table management for us, thus ioremap works. We do that early
391 * so that further code can be debugged 391 * so that further code can be debugged
392 */ 392 */
393 find_legacy_serial_ports(); 393 find_legacy_serial_ports();
394 394
395 /* 395 /*
396 * Register early console 396 * Register early console
397 */ 397 */
398 register_early_udbg_console(); 398 register_early_udbg_console();
399 399
400 /* 400 /*
401 * Initialize xmon 401 * Initialize xmon
402 */ 402 */
403 xmon_setup(); 403 xmon_setup();
404 404
405 smp_setup_cpu_maps(); 405 smp_setup_cpu_maps();
406 check_smt_enabled(); 406 check_smt_enabled();
407 407
408 #ifdef CONFIG_SMP 408 #ifdef CONFIG_SMP
409 /* Release secondary cpus out of their spinloops at 0x60 now that 409 /* Release secondary cpus out of their spinloops at 0x60 now that
410 * we can map physical -> logical CPU ids 410 * we can map physical -> logical CPU ids
411 */ 411 */
412 smp_release_cpus(); 412 smp_release_cpus();
413 #endif 413 #endif
414 414
415 printk("Starting Linux PPC64 %s\n", init_utsname()->version); 415 printk("Starting Linux PPC64 %s\n", init_utsname()->version);
416 416
417 printk("-----------------------------------------------------\n"); 417 printk("-----------------------------------------------------\n");
418 printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size); 418 printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
419 printk("physicalMemorySize = 0x%llx\n", memblock_phys_mem_size()); 419 printk("physicalMemorySize = 0x%llx\n", memblock_phys_mem_size());
420 if (ppc64_caches.dline_size != 0x80) 420 if (ppc64_caches.dline_size != 0x80)
421 printk("ppc64_caches.dcache_line_size = 0x%x\n", 421 printk("ppc64_caches.dcache_line_size = 0x%x\n",
422 ppc64_caches.dline_size); 422 ppc64_caches.dline_size);
423 if (ppc64_caches.iline_size != 0x80) 423 if (ppc64_caches.iline_size != 0x80)
424 printk("ppc64_caches.icache_line_size = 0x%x\n", 424 printk("ppc64_caches.icache_line_size = 0x%x\n",
425 ppc64_caches.iline_size); 425 ppc64_caches.iline_size);
426 #ifdef CONFIG_PPC_STD_MMU_64 426 #ifdef CONFIG_PPC_STD_MMU_64
427 if (htab_address) 427 if (htab_address)
428 printk("htab_address = 0x%p\n", htab_address); 428 printk("htab_address = 0x%p\n", htab_address);
429 printk("htab_hash_mask = 0x%lx\n", htab_hash_mask); 429 printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
430 #endif /* CONFIG_PPC_STD_MMU_64 */ 430 #endif /* CONFIG_PPC_STD_MMU_64 */
431 if (PHYSICAL_START > 0) 431 if (PHYSICAL_START > 0)
432 printk("physical_start = 0x%llx\n", 432 printk("physical_start = 0x%llx\n",
433 (unsigned long long)PHYSICAL_START); 433 (unsigned long long)PHYSICAL_START);
434 printk("-----------------------------------------------------\n"); 434 printk("-----------------------------------------------------\n");
435 435
436 DBG(" <- setup_system()\n"); 436 DBG(" <- setup_system()\n");
437 } 437 }
438 438
439 /* This returns the limit below which memory accesses to the linear 439 /* This returns the limit below which memory accesses to the linear
440 * mapping are guarnateed not to cause a TLB or SLB miss. This is 440 * mapping are guarnateed not to cause a TLB or SLB miss. This is
441 * used to allocate interrupt or emergency stacks for which our 441 * used to allocate interrupt or emergency stacks for which our
442 * exception entry path doesn't deal with being interrupted. 442 * exception entry path doesn't deal with being interrupted.
443 */ 443 */
444 static u64 safe_stack_limit(void) 444 static u64 safe_stack_limit(void)
445 { 445 {
446 #ifdef CONFIG_PPC_BOOK3E 446 #ifdef CONFIG_PPC_BOOK3E
447 /* Freescale BookE bolts the entire linear mapping */ 447 /* Freescale BookE bolts the entire linear mapping */
448 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) 448 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
449 return linear_map_top; 449 return linear_map_top;
450 /* Other BookE, we assume the first GB is bolted */ 450 /* Other BookE, we assume the first GB is bolted */
451 return 1ul << 30; 451 return 1ul << 30;
452 #else 452 #else
453 /* BookS, the first segment is bolted */ 453 /* BookS, the first segment is bolted */
454 if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) 454 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
455 return 1UL << SID_SHIFT_1T; 455 return 1UL << SID_SHIFT_1T;
456 return 1UL << SID_SHIFT; 456 return 1UL << SID_SHIFT;
457 #endif 457 #endif
458 } 458 }
459 459
460 static void __init irqstack_early_init(void) 460 static void __init irqstack_early_init(void)
461 { 461 {
462 u64 limit = safe_stack_limit(); 462 u64 limit = safe_stack_limit();
463 unsigned int i; 463 unsigned int i;
464 464
465 /* 465 /*
466 * Interrupt stacks must be in the first segment since we 466 * Interrupt stacks must be in the first segment since we
467 * cannot afford to take SLB misses on them. 467 * cannot afford to take SLB misses on them.
468 */ 468 */
469 for_each_possible_cpu(i) { 469 for_each_possible_cpu(i) {
470 softirq_ctx[i] = (struct thread_info *) 470 softirq_ctx[i] = (struct thread_info *)
471 __va(memblock_alloc_base(THREAD_SIZE, 471 __va(memblock_alloc_base(THREAD_SIZE,
472 THREAD_SIZE, limit)); 472 THREAD_SIZE, limit));
473 hardirq_ctx[i] = (struct thread_info *) 473 hardirq_ctx[i] = (struct thread_info *)
474 __va(memblock_alloc_base(THREAD_SIZE, 474 __va(memblock_alloc_base(THREAD_SIZE,
475 THREAD_SIZE, limit)); 475 THREAD_SIZE, limit));
476 } 476 }
477 } 477 }
478 478
479 #ifdef CONFIG_PPC_BOOK3E 479 #ifdef CONFIG_PPC_BOOK3E
480 static void __init exc_lvl_early_init(void) 480 static void __init exc_lvl_early_init(void)
481 { 481 {
482 extern unsigned int interrupt_base_book3e; 482 extern unsigned int interrupt_base_book3e;
483 extern unsigned int exc_debug_debug_book3e; 483 extern unsigned int exc_debug_debug_book3e;
484 484
485 unsigned int i; 485 unsigned int i;
486 486
487 for_each_possible_cpu(i) { 487 for_each_possible_cpu(i) {
488 critirq_ctx[i] = (struct thread_info *) 488 critirq_ctx[i] = (struct thread_info *)
489 __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); 489 __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
490 dbgirq_ctx[i] = (struct thread_info *) 490 dbgirq_ctx[i] = (struct thread_info *)
491 __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); 491 __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
492 mcheckirq_ctx[i] = (struct thread_info *) 492 mcheckirq_ctx[i] = (struct thread_info *)
493 __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); 493 __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
494 } 494 }
495 495
496 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) 496 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
497 patch_branch(&interrupt_base_book3e + (0x040 / 4) + 1, 497 patch_branch(&interrupt_base_book3e + (0x040 / 4) + 1,
498 (unsigned long)&exc_debug_debug_book3e, 0); 498 (unsigned long)&exc_debug_debug_book3e, 0);
499 } 499 }
500 #else 500 #else
501 #define exc_lvl_early_init() 501 #define exc_lvl_early_init()
502 #endif 502 #endif
503 503
504 /* 504 /*
505 * Stack space used when we detect a bad kernel stack pointer, and 505 * Stack space used when we detect a bad kernel stack pointer, and
506 * early in SMP boots before relocation is enabled. 506 * early in SMP boots before relocation is enabled.
507 */ 507 */
508 static void __init emergency_stack_init(void) 508 static void __init emergency_stack_init(void)
509 { 509 {
510 u64 limit; 510 u64 limit;
511 unsigned int i; 511 unsigned int i;
512 512
513 /* 513 /*
514 * Emergency stacks must be under 256MB, we cannot afford to take 514 * Emergency stacks must be under 256MB, we cannot afford to take
515 * SLB misses on them. The ABI also requires them to be 128-byte 515 * SLB misses on them. The ABI also requires them to be 128-byte
516 * aligned. 516 * aligned.
517 * 517 *
518 * Since we use these as temporary stacks during secondary CPU 518 * Since we use these as temporary stacks during secondary CPU
519 * bringup, we need to get at them in real mode. This means they 519 * bringup, we need to get at them in real mode. This means they
520 * must also be within the RMO region. 520 * must also be within the RMO region.
521 */ 521 */
522 limit = min(safe_stack_limit(), ppc64_rma_size); 522 limit = min(safe_stack_limit(), ppc64_rma_size);
523 523
524 for_each_possible_cpu(i) { 524 for_each_possible_cpu(i) {
525 unsigned long sp; 525 unsigned long sp;
526 sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); 526 sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
527 sp += THREAD_SIZE; 527 sp += THREAD_SIZE;
528 paca[i].emergency_sp = __va(sp); 528 paca[i].emergency_sp = __va(sp);
529 } 529 }
530 } 530 }
531 531
532 /* 532 /*
533 * Called into from start_kernel this initializes bootmem, which is used 533 * Called into from start_kernel this initializes bootmem, which is used
534 * to manage page allocation until mem_init is called. 534 * to manage page allocation until mem_init is called.
535 */ 535 */
536 void __init setup_arch(char **cmdline_p) 536 void __init setup_arch(char **cmdline_p)
537 { 537 {
538 ppc64_boot_msg(0x12, "Setup Arch"); 538 ppc64_boot_msg(0x12, "Setup Arch");
539 539
540 *cmdline_p = cmd_line; 540 *cmdline_p = cmd_line;
541 541
542 /* 542 /*
543 * Set cache line size based on type of cpu as a default. 543 * Set cache line size based on type of cpu as a default.
544 * Systems with OF can look in the properties on the cpu node(s) 544 * Systems with OF can look in the properties on the cpu node(s)
545 * for a possibly more accurate value. 545 * for a possibly more accurate value.
546 */ 546 */
547 dcache_bsize = ppc64_caches.dline_size; 547 dcache_bsize = ppc64_caches.dline_size;
548 icache_bsize = ppc64_caches.iline_size; 548 icache_bsize = ppc64_caches.iline_size;
549 549
550 /* reboot on panic */ 550 /* reboot on panic */
551 panic_timeout = 180; 551 panic_timeout = 180;
552 552
553 if (ppc_md.panic) 553 if (ppc_md.panic)
554 setup_panic(); 554 setup_panic();
555 555
556 init_mm.start_code = (unsigned long)_stext; 556 init_mm.start_code = (unsigned long)_stext;
557 init_mm.end_code = (unsigned long) _etext; 557 init_mm.end_code = (unsigned long) _etext;
558 init_mm.end_data = (unsigned long) _edata; 558 init_mm.end_data = (unsigned long) _edata;
559 init_mm.brk = klimit; 559 init_mm.brk = klimit;
560 560
561 irqstack_early_init(); 561 irqstack_early_init();
562 exc_lvl_early_init(); 562 exc_lvl_early_init();
563 emergency_stack_init(); 563 emergency_stack_init();
564 564
565 #ifdef CONFIG_PPC_STD_MMU_64 565 #ifdef CONFIG_PPC_STD_MMU_64
566 stabs_alloc(); 566 stabs_alloc();
567 #endif 567 #endif
568 /* set up the bootmem stuff with available memory */ 568 /* set up the bootmem stuff with available memory */
569 do_init_bootmem(); 569 do_init_bootmem();
570 sparse_init(); 570 sparse_init();
571 571
572 #ifdef CONFIG_DUMMY_CONSOLE 572 #ifdef CONFIG_DUMMY_CONSOLE
573 conswitchp = &dummy_con; 573 conswitchp = &dummy_con;
574 #endif 574 #endif
575 575
576 if (ppc_md.setup_arch) 576 if (ppc_md.setup_arch)
577 ppc_md.setup_arch(); 577 ppc_md.setup_arch();
578 578
579 paging_init(); 579 paging_init();
580 580
581 /* Initialize the MMU context management stuff */ 581 /* Initialize the MMU context management stuff */
582 mmu_context_init(); 582 mmu_context_init();
583 583
584 kvm_rma_init(); 584 kvm_rma_init();
585 585
586 ppc64_boot_msg(0x15, "Setup Done"); 586 ppc64_boot_msg(0x15, "Setup Done");
587 } 587 }
588 588
589 589
590 /* ToDo: do something useful if ppc_md is not yet setup. */ 590 /* ToDo: do something useful if ppc_md is not yet setup. */
591 #define PPC64_LINUX_FUNCTION 0x0f000000 591 #define PPC64_LINUX_FUNCTION 0x0f000000
592 #define PPC64_IPL_MESSAGE 0xc0000000 592 #define PPC64_IPL_MESSAGE 0xc0000000
593 #define PPC64_TERM_MESSAGE 0xb0000000 593 #define PPC64_TERM_MESSAGE 0xb0000000
594 594
595 static void ppc64_do_msg(unsigned int src, const char *msg) 595 static void ppc64_do_msg(unsigned int src, const char *msg)
596 { 596 {
597 if (ppc_md.progress) { 597 if (ppc_md.progress) {
598 char buf[128]; 598 char buf[128];
599 599
600 sprintf(buf, "%08X\n", src); 600 sprintf(buf, "%08X\n", src);
601 ppc_md.progress(buf, 0); 601 ppc_md.progress(buf, 0);
602 snprintf(buf, 128, "%s", msg); 602 snprintf(buf, 128, "%s", msg);
603 ppc_md.progress(buf, 0); 603 ppc_md.progress(buf, 0);
604 } 604 }
605 } 605 }
606 606
607 /* Print a boot progress message. */ 607 /* Print a boot progress message. */
608 void ppc64_boot_msg(unsigned int src, const char *msg) 608 void ppc64_boot_msg(unsigned int src, const char *msg)
609 { 609 {
610 ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_IPL_MESSAGE|src, msg); 610 ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_IPL_MESSAGE|src, msg);
611 printk("[boot]%04x %s\n", src, msg); 611 printk("[boot]%04x %s\n", src, msg);
612 } 612 }
613 613
614 #ifdef CONFIG_SMP 614 #ifdef CONFIG_SMP
615 #define PCPU_DYN_SIZE () 615 #define PCPU_DYN_SIZE ()
616 616
617 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) 617 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
618 { 618 {
619 return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align, 619 return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
620 __pa(MAX_DMA_ADDRESS)); 620 __pa(MAX_DMA_ADDRESS));
621 } 621 }
622 622
623 static void __init pcpu_fc_free(void *ptr, size_t size) 623 static void __init pcpu_fc_free(void *ptr, size_t size)
624 { 624 {
625 free_bootmem(__pa(ptr), size); 625 free_bootmem(__pa(ptr), size);
626 } 626 }
627 627
628 static int pcpu_cpu_distance(unsigned int from, unsigned int to) 628 static int pcpu_cpu_distance(unsigned int from, unsigned int to)
629 { 629 {
630 if (cpu_to_node(from) == cpu_to_node(to)) 630 if (cpu_to_node(from) == cpu_to_node(to))
631 return LOCAL_DISTANCE; 631 return LOCAL_DISTANCE;
632 else 632 else
633 return REMOTE_DISTANCE; 633 return REMOTE_DISTANCE;
634 } 634 }
635 635
636 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; 636 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
637 EXPORT_SYMBOL(__per_cpu_offset); 637 EXPORT_SYMBOL(__per_cpu_offset);
638 638
639 void __init setup_per_cpu_areas(void) 639 void __init setup_per_cpu_areas(void)
640 { 640 {
641 const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE; 641 const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
642 size_t atom_size; 642 size_t atom_size;
643 unsigned long delta; 643 unsigned long delta;
644 unsigned int cpu; 644 unsigned int cpu;
645 int rc; 645 int rc;
646 646
647 /* 647 /*
648 * Linear mapping is one of 4K, 1M and 16M. For 4K, no need 648 * Linear mapping is one of 4K, 1M and 16M. For 4K, no need
649 * to group units. For larger mappings, use 1M atom which 649 * to group units. For larger mappings, use 1M atom which
650 * should be large enough to contain a number of units. 650 * should be large enough to contain a number of units.
651 */ 651 */
652 if (mmu_linear_psize == MMU_PAGE_4K) 652 if (mmu_linear_psize == MMU_PAGE_4K)
653 atom_size = PAGE_SIZE; 653 atom_size = PAGE_SIZE;
654 else 654 else
655 atom_size = 1 << 20; 655 atom_size = 1 << 20;
656 656
657 rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance, 657 rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
658 pcpu_fc_alloc, pcpu_fc_free); 658 pcpu_fc_alloc, pcpu_fc_free);
659 if (rc < 0) 659 if (rc < 0)
660 panic("cannot initialize percpu area (err=%d)", rc); 660 panic("cannot initialize percpu area (err=%d)", rc);
661 661
662 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 662 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
663 for_each_possible_cpu(cpu) { 663 for_each_possible_cpu(cpu) {
664 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; 664 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
665 paca[cpu].data_offset = __per_cpu_offset[cpu]; 665 paca[cpu].data_offset = __per_cpu_offset[cpu];
666 } 666 }
667 } 667 }
668 #endif 668 #endif
669 669
670 670
671 #ifdef CONFIG_PPC_INDIRECT_IO 671 #ifdef CONFIG_PPC_INDIRECT_IO
672 struct ppc_pci_io ppc_pci_io; 672 struct ppc_pci_io ppc_pci_io;
673 EXPORT_SYMBOL(ppc_pci_io); 673 EXPORT_SYMBOL(ppc_pci_io);
674 #endif /* CONFIG_PPC_INDIRECT_IO */ 674 #endif /* CONFIG_PPC_INDIRECT_IO */
675 675
676 676
arch/powerpc/kernel/smp.c
1 /* 1 /*
2 * SMP support for ppc. 2 * SMP support for ppc.
3 * 3 *
4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great 4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5 * deal of code from the sparc and intel versions. 5 * deal of code from the sparc and intel versions.
6 * 6 *
7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> 7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
8 * 8 *
9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and 9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com 10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
11 * 11 *
12 * This program is free software; you can redistribute it and/or 12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License 13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version. 15 * 2 of the License, or (at your option) any later version.
16 */ 16 */
17 17
18 #undef DEBUG 18 #undef DEBUG
19 19
20 #include <linux/kernel.h> 20 #include <linux/kernel.h>
21 #include <linux/module.h> 21 #include <linux/export.h>
22 #include <linux/sched.h> 22 #include <linux/sched.h>
23 #include <linux/smp.h> 23 #include <linux/smp.h>
24 #include <linux/interrupt.h> 24 #include <linux/interrupt.h>
25 #include <linux/delay.h> 25 #include <linux/delay.h>
26 #include <linux/init.h> 26 #include <linux/init.h>
27 #include <linux/spinlock.h> 27 #include <linux/spinlock.h>
28 #include <linux/cache.h> 28 #include <linux/cache.h>
29 #include <linux/err.h> 29 #include <linux/err.h>
30 #include <linux/sysdev.h> 30 #include <linux/sysdev.h>
31 #include <linux/cpu.h> 31 #include <linux/cpu.h>
32 #include <linux/notifier.h> 32 #include <linux/notifier.h>
33 #include <linux/topology.h> 33 #include <linux/topology.h>
34 34
35 #include <asm/ptrace.h> 35 #include <asm/ptrace.h>
36 #include <linux/atomic.h> 36 #include <linux/atomic.h>
37 #include <asm/irq.h> 37 #include <asm/irq.h>
38 #include <asm/page.h> 38 #include <asm/page.h>
39 #include <asm/pgtable.h> 39 #include <asm/pgtable.h>
40 #include <asm/prom.h> 40 #include <asm/prom.h>
41 #include <asm/smp.h> 41 #include <asm/smp.h>
42 #include <asm/time.h> 42 #include <asm/time.h>
43 #include <asm/machdep.h> 43 #include <asm/machdep.h>
44 #include <asm/cputhreads.h> 44 #include <asm/cputhreads.h>
45 #include <asm/cputable.h> 45 #include <asm/cputable.h>
46 #include <asm/system.h> 46 #include <asm/system.h>
47 #include <asm/mpic.h> 47 #include <asm/mpic.h>
48 #include <asm/vdso_datapage.h> 48 #include <asm/vdso_datapage.h>
49 #ifdef CONFIG_PPC64 49 #ifdef CONFIG_PPC64
50 #include <asm/paca.h> 50 #include <asm/paca.h>
51 #endif 51 #endif
52 52
53 #ifdef DEBUG 53 #ifdef DEBUG
54 #include <asm/udbg.h> 54 #include <asm/udbg.h>
55 #define DBG(fmt...) udbg_printf(fmt) 55 #define DBG(fmt...) udbg_printf(fmt)
56 #else 56 #else
57 #define DBG(fmt...) 57 #define DBG(fmt...)
58 #endif 58 #endif
59 59
60 60
61 /* Store all idle threads, this can be reused instead of creating 61 /* Store all idle threads, this can be reused instead of creating
62 * a new thread. Also avoids complicated thread destroy functionality 62 * a new thread. Also avoids complicated thread destroy functionality
63 * for idle threads. 63 * for idle threads.
64 */ 64 */
65 #ifdef CONFIG_HOTPLUG_CPU 65 #ifdef CONFIG_HOTPLUG_CPU
66 /* 66 /*
67 * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is 67 * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
68 * removed after init for !CONFIG_HOTPLUG_CPU. 68 * removed after init for !CONFIG_HOTPLUG_CPU.
69 */ 69 */
70 static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); 70 static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
71 #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) 71 #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
72 #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) 72 #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
73 #else 73 #else
74 static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; 74 static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
75 #define get_idle_for_cpu(x) (idle_thread_array[(x)]) 75 #define get_idle_for_cpu(x) (idle_thread_array[(x)])
76 #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) 76 #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
77 #endif 77 #endif
78 78
79 struct thread_info *secondary_ti; 79 struct thread_info *secondary_ti;
80 80
81 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); 81 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
82 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); 82 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
83 83
84 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 84 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
85 EXPORT_PER_CPU_SYMBOL(cpu_core_map); 85 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
86 86
87 /* SMP operations for this machine */ 87 /* SMP operations for this machine */
88 struct smp_ops_t *smp_ops; 88 struct smp_ops_t *smp_ops;
89 89
90 /* Can't be static due to PowerMac hackery */ 90 /* Can't be static due to PowerMac hackery */
91 volatile unsigned int cpu_callin_map[NR_CPUS]; 91 volatile unsigned int cpu_callin_map[NR_CPUS];
92 92
93 int smt_enabled_at_boot = 1; 93 int smt_enabled_at_boot = 1;
94 94
95 static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; 95 static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
96 96
97 #ifdef CONFIG_PPC64 97 #ifdef CONFIG_PPC64
98 int __devinit smp_generic_kick_cpu(int nr) 98 int __devinit smp_generic_kick_cpu(int nr)
99 { 99 {
100 BUG_ON(nr < 0 || nr >= NR_CPUS); 100 BUG_ON(nr < 0 || nr >= NR_CPUS);
101 101
102 /* 102 /*
103 * The processor is currently spinning, waiting for the 103 * The processor is currently spinning, waiting for the
104 * cpu_start field to become non-zero After we set cpu_start, 104 * cpu_start field to become non-zero After we set cpu_start,
105 * the processor will continue on to secondary_start 105 * the processor will continue on to secondary_start
106 */ 106 */
107 paca[nr].cpu_start = 1; 107 paca[nr].cpu_start = 1;
108 smp_mb(); 108 smp_mb();
109 109
110 return 0; 110 return 0;
111 } 111 }
112 #endif 112 #endif
113 113
114 static irqreturn_t call_function_action(int irq, void *data) 114 static irqreturn_t call_function_action(int irq, void *data)
115 { 115 {
116 generic_smp_call_function_interrupt(); 116 generic_smp_call_function_interrupt();
117 return IRQ_HANDLED; 117 return IRQ_HANDLED;
118 } 118 }
119 119
120 static irqreturn_t reschedule_action(int irq, void *data) 120 static irqreturn_t reschedule_action(int irq, void *data)
121 { 121 {
122 scheduler_ipi(); 122 scheduler_ipi();
123 return IRQ_HANDLED; 123 return IRQ_HANDLED;
124 } 124 }
125 125
126 static irqreturn_t call_function_single_action(int irq, void *data) 126 static irqreturn_t call_function_single_action(int irq, void *data)
127 { 127 {
128 generic_smp_call_function_single_interrupt(); 128 generic_smp_call_function_single_interrupt();
129 return IRQ_HANDLED; 129 return IRQ_HANDLED;
130 } 130 }
131 131
132 static irqreturn_t debug_ipi_action(int irq, void *data) 132 static irqreturn_t debug_ipi_action(int irq, void *data)
133 { 133 {
134 if (crash_ipi_function_ptr) { 134 if (crash_ipi_function_ptr) {
135 crash_ipi_function_ptr(get_irq_regs()); 135 crash_ipi_function_ptr(get_irq_regs());
136 return IRQ_HANDLED; 136 return IRQ_HANDLED;
137 } 137 }
138 138
139 #ifdef CONFIG_DEBUGGER 139 #ifdef CONFIG_DEBUGGER
140 debugger_ipi(get_irq_regs()); 140 debugger_ipi(get_irq_regs());
141 #endif /* CONFIG_DEBUGGER */ 141 #endif /* CONFIG_DEBUGGER */
142 142
143 return IRQ_HANDLED; 143 return IRQ_HANDLED;
144 } 144 }
145 145
146 static irq_handler_t smp_ipi_action[] = { 146 static irq_handler_t smp_ipi_action[] = {
147 [PPC_MSG_CALL_FUNCTION] = call_function_action, 147 [PPC_MSG_CALL_FUNCTION] = call_function_action,
148 [PPC_MSG_RESCHEDULE] = reschedule_action, 148 [PPC_MSG_RESCHEDULE] = reschedule_action,
149 [PPC_MSG_CALL_FUNC_SINGLE] = call_function_single_action, 149 [PPC_MSG_CALL_FUNC_SINGLE] = call_function_single_action,
150 [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action, 150 [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action,
151 }; 151 };
152 152
153 const char *smp_ipi_name[] = { 153 const char *smp_ipi_name[] = {
154 [PPC_MSG_CALL_FUNCTION] = "ipi call function", 154 [PPC_MSG_CALL_FUNCTION] = "ipi call function",
155 [PPC_MSG_RESCHEDULE] = "ipi reschedule", 155 [PPC_MSG_RESCHEDULE] = "ipi reschedule",
156 [PPC_MSG_CALL_FUNC_SINGLE] = "ipi call function single", 156 [PPC_MSG_CALL_FUNC_SINGLE] = "ipi call function single",
157 [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger", 157 [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger",
158 }; 158 };
159 159
160 /* optional function to request ipi, for controllers with >= 4 ipis */ 160 /* optional function to request ipi, for controllers with >= 4 ipis */
161 int smp_request_message_ipi(int virq, int msg) 161 int smp_request_message_ipi(int virq, int msg)
162 { 162 {
163 int err; 163 int err;
164 164
165 if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) { 165 if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) {
166 return -EINVAL; 166 return -EINVAL;
167 } 167 }
168 #if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC) 168 #if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC)
169 if (msg == PPC_MSG_DEBUGGER_BREAK) { 169 if (msg == PPC_MSG_DEBUGGER_BREAK) {
170 return 1; 170 return 1;
171 } 171 }
172 #endif 172 #endif
173 err = request_irq(virq, smp_ipi_action[msg], IRQF_DISABLED|IRQF_PERCPU, 173 err = request_irq(virq, smp_ipi_action[msg], IRQF_DISABLED|IRQF_PERCPU,
174 smp_ipi_name[msg], 0); 174 smp_ipi_name[msg], 0);
175 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n", 175 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
176 virq, smp_ipi_name[msg], err); 176 virq, smp_ipi_name[msg], err);
177 177
178 return err; 178 return err;
179 } 179 }
180 180
181 #ifdef CONFIG_PPC_SMP_MUXED_IPI 181 #ifdef CONFIG_PPC_SMP_MUXED_IPI
182 struct cpu_messages { 182 struct cpu_messages {
183 int messages; /* current messages */ 183 int messages; /* current messages */
184 unsigned long data; /* data for cause ipi */ 184 unsigned long data; /* data for cause ipi */
185 }; 185 };
186 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message); 186 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
187 187
188 void smp_muxed_ipi_set_data(int cpu, unsigned long data) 188 void smp_muxed_ipi_set_data(int cpu, unsigned long data)
189 { 189 {
190 struct cpu_messages *info = &per_cpu(ipi_message, cpu); 190 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
191 191
192 info->data = data; 192 info->data = data;
193 } 193 }
194 194
195 void smp_muxed_ipi_message_pass(int cpu, int msg) 195 void smp_muxed_ipi_message_pass(int cpu, int msg)
196 { 196 {
197 struct cpu_messages *info = &per_cpu(ipi_message, cpu); 197 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
198 char *message = (char *)&info->messages; 198 char *message = (char *)&info->messages;
199 199
200 message[msg] = 1; 200 message[msg] = 1;
201 mb(); 201 mb();
202 smp_ops->cause_ipi(cpu, info->data); 202 smp_ops->cause_ipi(cpu, info->data);
203 } 203 }
204 204
205 irqreturn_t smp_ipi_demux(void) 205 irqreturn_t smp_ipi_demux(void)
206 { 206 {
207 struct cpu_messages *info = &__get_cpu_var(ipi_message); 207 struct cpu_messages *info = &__get_cpu_var(ipi_message);
208 unsigned int all; 208 unsigned int all;
209 209
210 mb(); /* order any irq clear */ 210 mb(); /* order any irq clear */
211 211
212 do { 212 do {
213 all = xchg_local(&info->messages, 0); 213 all = xchg_local(&info->messages, 0);
214 214
215 #ifdef __BIG_ENDIAN 215 #ifdef __BIG_ENDIAN
216 if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION))) 216 if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION)))
217 generic_smp_call_function_interrupt(); 217 generic_smp_call_function_interrupt();
218 if (all & (1 << (24 - 8 * PPC_MSG_RESCHEDULE))) 218 if (all & (1 << (24 - 8 * PPC_MSG_RESCHEDULE)))
219 scheduler_ipi(); 219 scheduler_ipi();
220 if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNC_SINGLE))) 220 if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNC_SINGLE)))
221 generic_smp_call_function_single_interrupt(); 221 generic_smp_call_function_single_interrupt();
222 if (all & (1 << (24 - 8 * PPC_MSG_DEBUGGER_BREAK))) 222 if (all & (1 << (24 - 8 * PPC_MSG_DEBUGGER_BREAK)))
223 debug_ipi_action(0, NULL); 223 debug_ipi_action(0, NULL);
224 #else 224 #else
225 #error Unsupported ENDIAN 225 #error Unsupported ENDIAN
226 #endif 226 #endif
227 } while (info->messages); 227 } while (info->messages);
228 228
229 return IRQ_HANDLED; 229 return IRQ_HANDLED;
230 } 230 }
231 #endif /* CONFIG_PPC_SMP_MUXED_IPI */ 231 #endif /* CONFIG_PPC_SMP_MUXED_IPI */
232 232
233 static inline void do_message_pass(int cpu, int msg) 233 static inline void do_message_pass(int cpu, int msg)
234 { 234 {
235 if (smp_ops->message_pass) 235 if (smp_ops->message_pass)
236 smp_ops->message_pass(cpu, msg); 236 smp_ops->message_pass(cpu, msg);
237 #ifdef CONFIG_PPC_SMP_MUXED_IPI 237 #ifdef CONFIG_PPC_SMP_MUXED_IPI
238 else 238 else
239 smp_muxed_ipi_message_pass(cpu, msg); 239 smp_muxed_ipi_message_pass(cpu, msg);
240 #endif 240 #endif
241 } 241 }
242 242
243 void smp_send_reschedule(int cpu) 243 void smp_send_reschedule(int cpu)
244 { 244 {
245 if (likely(smp_ops)) 245 if (likely(smp_ops))
246 do_message_pass(cpu, PPC_MSG_RESCHEDULE); 246 do_message_pass(cpu, PPC_MSG_RESCHEDULE);
247 } 247 }
248 EXPORT_SYMBOL_GPL(smp_send_reschedule); 248 EXPORT_SYMBOL_GPL(smp_send_reschedule);
249 249
250 void arch_send_call_function_single_ipi(int cpu) 250 void arch_send_call_function_single_ipi(int cpu)
251 { 251 {
252 do_message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE); 252 do_message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
253 } 253 }
254 254
255 void arch_send_call_function_ipi_mask(const struct cpumask *mask) 255 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
256 { 256 {
257 unsigned int cpu; 257 unsigned int cpu;
258 258
259 for_each_cpu(cpu, mask) 259 for_each_cpu(cpu, mask)
260 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); 260 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
261 } 261 }
262 262
263 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) 263 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
264 void smp_send_debugger_break(void) 264 void smp_send_debugger_break(void)
265 { 265 {
266 int cpu; 266 int cpu;
267 int me = raw_smp_processor_id(); 267 int me = raw_smp_processor_id();
268 268
269 if (unlikely(!smp_ops)) 269 if (unlikely(!smp_ops))
270 return; 270 return;
271 271
272 for_each_online_cpu(cpu) 272 for_each_online_cpu(cpu)
273 if (cpu != me) 273 if (cpu != me)
274 do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); 274 do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
275 } 275 }
276 #endif 276 #endif
277 277
278 #ifdef CONFIG_KEXEC 278 #ifdef CONFIG_KEXEC
279 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) 279 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
280 { 280 {
281 crash_ipi_function_ptr = crash_ipi_callback; 281 crash_ipi_function_ptr = crash_ipi_callback;
282 if (crash_ipi_callback) { 282 if (crash_ipi_callback) {
283 mb(); 283 mb();
284 smp_send_debugger_break(); 284 smp_send_debugger_break();
285 } 285 }
286 } 286 }
287 #endif 287 #endif
288 288
289 static void stop_this_cpu(void *dummy) 289 static void stop_this_cpu(void *dummy)
290 { 290 {
291 /* Remove this CPU */ 291 /* Remove this CPU */
292 set_cpu_online(smp_processor_id(), false); 292 set_cpu_online(smp_processor_id(), false);
293 293
294 local_irq_disable(); 294 local_irq_disable();
295 while (1) 295 while (1)
296 ; 296 ;
297 } 297 }
298 298
299 void smp_send_stop(void) 299 void smp_send_stop(void)
300 { 300 {
301 smp_call_function(stop_this_cpu, NULL, 0); 301 smp_call_function(stop_this_cpu, NULL, 0);
302 } 302 }
303 303
304 struct thread_info *current_set[NR_CPUS]; 304 struct thread_info *current_set[NR_CPUS];
305 305
306 static void __devinit smp_store_cpu_info(int id) 306 static void __devinit smp_store_cpu_info(int id)
307 { 307 {
308 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); 308 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
309 #ifdef CONFIG_PPC_FSL_BOOK3E 309 #ifdef CONFIG_PPC_FSL_BOOK3E
310 per_cpu(next_tlbcam_idx, id) 310 per_cpu(next_tlbcam_idx, id)
311 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; 311 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
312 #endif 312 #endif
313 } 313 }
314 314
315 void __init smp_prepare_cpus(unsigned int max_cpus) 315 void __init smp_prepare_cpus(unsigned int max_cpus)
316 { 316 {
317 unsigned int cpu; 317 unsigned int cpu;
318 318
319 DBG("smp_prepare_cpus\n"); 319 DBG("smp_prepare_cpus\n");
320 320
321 /* 321 /*
322 * setup_cpu may need to be called on the boot cpu. We havent 322 * setup_cpu may need to be called on the boot cpu. We havent
323 * spun any cpus up but lets be paranoid. 323 * spun any cpus up but lets be paranoid.
324 */ 324 */
325 BUG_ON(boot_cpuid != smp_processor_id()); 325 BUG_ON(boot_cpuid != smp_processor_id());
326 326
327 /* Fixup boot cpu */ 327 /* Fixup boot cpu */
328 smp_store_cpu_info(boot_cpuid); 328 smp_store_cpu_info(boot_cpuid);
329 cpu_callin_map[boot_cpuid] = 1; 329 cpu_callin_map[boot_cpuid] = 1;
330 330
331 for_each_possible_cpu(cpu) { 331 for_each_possible_cpu(cpu) {
332 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), 332 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
333 GFP_KERNEL, cpu_to_node(cpu)); 333 GFP_KERNEL, cpu_to_node(cpu));
334 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), 334 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
335 GFP_KERNEL, cpu_to_node(cpu)); 335 GFP_KERNEL, cpu_to_node(cpu));
336 } 336 }
337 337
338 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid)); 338 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
339 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid)); 339 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
340 340
341 if (smp_ops) 341 if (smp_ops)
342 if (smp_ops->probe) 342 if (smp_ops->probe)
343 max_cpus = smp_ops->probe(); 343 max_cpus = smp_ops->probe();
344 else 344 else
345 max_cpus = NR_CPUS; 345 max_cpus = NR_CPUS;
346 else 346 else
347 max_cpus = 1; 347 max_cpus = 1;
348 } 348 }
349 349
350 void __devinit smp_prepare_boot_cpu(void) 350 void __devinit smp_prepare_boot_cpu(void)
351 { 351 {
352 BUG_ON(smp_processor_id() != boot_cpuid); 352 BUG_ON(smp_processor_id() != boot_cpuid);
353 #ifdef CONFIG_PPC64 353 #ifdef CONFIG_PPC64
354 paca[boot_cpuid].__current = current; 354 paca[boot_cpuid].__current = current;
355 #endif 355 #endif
356 current_set[boot_cpuid] = task_thread_info(current); 356 current_set[boot_cpuid] = task_thread_info(current);
357 } 357 }
358 358
359 #ifdef CONFIG_HOTPLUG_CPU 359 #ifdef CONFIG_HOTPLUG_CPU
360 /* State of each CPU during hotplug phases */ 360 /* State of each CPU during hotplug phases */
361 static DEFINE_PER_CPU(int, cpu_state) = { 0 }; 361 static DEFINE_PER_CPU(int, cpu_state) = { 0 };
362 362
363 int generic_cpu_disable(void) 363 int generic_cpu_disable(void)
364 { 364 {
365 unsigned int cpu = smp_processor_id(); 365 unsigned int cpu = smp_processor_id();
366 366
367 if (cpu == boot_cpuid) 367 if (cpu == boot_cpuid)
368 return -EBUSY; 368 return -EBUSY;
369 369
370 set_cpu_online(cpu, false); 370 set_cpu_online(cpu, false);
371 #ifdef CONFIG_PPC64 371 #ifdef CONFIG_PPC64
372 vdso_data->processorCount--; 372 vdso_data->processorCount--;
373 #endif 373 #endif
374 migrate_irqs(); 374 migrate_irqs();
375 return 0; 375 return 0;
376 } 376 }
377 377
378 void generic_cpu_die(unsigned int cpu) 378 void generic_cpu_die(unsigned int cpu)
379 { 379 {
380 int i; 380 int i;
381 381
382 for (i = 0; i < 100; i++) { 382 for (i = 0; i < 100; i++) {
383 smp_rmb(); 383 smp_rmb();
384 if (per_cpu(cpu_state, cpu) == CPU_DEAD) 384 if (per_cpu(cpu_state, cpu) == CPU_DEAD)
385 return; 385 return;
386 msleep(100); 386 msleep(100);
387 } 387 }
388 printk(KERN_ERR "CPU%d didn't die...\n", cpu); 388 printk(KERN_ERR "CPU%d didn't die...\n", cpu);
389 } 389 }
390 390
391 void generic_mach_cpu_die(void) 391 void generic_mach_cpu_die(void)
392 { 392 {
393 unsigned int cpu; 393 unsigned int cpu;
394 394
395 local_irq_disable(); 395 local_irq_disable();
396 idle_task_exit(); 396 idle_task_exit();
397 cpu = smp_processor_id(); 397 cpu = smp_processor_id();
398 printk(KERN_DEBUG "CPU%d offline\n", cpu); 398 printk(KERN_DEBUG "CPU%d offline\n", cpu);
399 __get_cpu_var(cpu_state) = CPU_DEAD; 399 __get_cpu_var(cpu_state) = CPU_DEAD;
400 smp_wmb(); 400 smp_wmb();
401 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) 401 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
402 cpu_relax(); 402 cpu_relax();
403 } 403 }
404 404
405 void generic_set_cpu_dead(unsigned int cpu) 405 void generic_set_cpu_dead(unsigned int cpu)
406 { 406 {
407 per_cpu(cpu_state, cpu) = CPU_DEAD; 407 per_cpu(cpu_state, cpu) = CPU_DEAD;
408 } 408 }
409 #endif 409 #endif
410 410
411 struct create_idle { 411 struct create_idle {
412 struct work_struct work; 412 struct work_struct work;
413 struct task_struct *idle; 413 struct task_struct *idle;
414 struct completion done; 414 struct completion done;
415 int cpu; 415 int cpu;
416 }; 416 };
417 417
418 static void __cpuinit do_fork_idle(struct work_struct *work) 418 static void __cpuinit do_fork_idle(struct work_struct *work)
419 { 419 {
420 struct create_idle *c_idle = 420 struct create_idle *c_idle =
421 container_of(work, struct create_idle, work); 421 container_of(work, struct create_idle, work);
422 422
423 c_idle->idle = fork_idle(c_idle->cpu); 423 c_idle->idle = fork_idle(c_idle->cpu);
424 complete(&c_idle->done); 424 complete(&c_idle->done);
425 } 425 }
426 426
427 static int __cpuinit create_idle(unsigned int cpu) 427 static int __cpuinit create_idle(unsigned int cpu)
428 { 428 {
429 struct thread_info *ti; 429 struct thread_info *ti;
430 struct create_idle c_idle = { 430 struct create_idle c_idle = {
431 .cpu = cpu, 431 .cpu = cpu,
432 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), 432 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
433 }; 433 };
434 INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); 434 INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
435 435
436 c_idle.idle = get_idle_for_cpu(cpu); 436 c_idle.idle = get_idle_for_cpu(cpu);
437 437
438 /* We can't use kernel_thread since we must avoid to 438 /* We can't use kernel_thread since we must avoid to
439 * reschedule the child. We use a workqueue because 439 * reschedule the child. We use a workqueue because
440 * we want to fork from a kernel thread, not whatever 440 * we want to fork from a kernel thread, not whatever
441 * userspace process happens to be trying to online us. 441 * userspace process happens to be trying to online us.
442 */ 442 */
443 if (!c_idle.idle) { 443 if (!c_idle.idle) {
444 schedule_work(&c_idle.work); 444 schedule_work(&c_idle.work);
445 wait_for_completion(&c_idle.done); 445 wait_for_completion(&c_idle.done);
446 } else 446 } else
447 init_idle(c_idle.idle, cpu); 447 init_idle(c_idle.idle, cpu);
448 if (IS_ERR(c_idle.idle)) { 448 if (IS_ERR(c_idle.idle)) {
449 pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle)); 449 pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle));
450 return PTR_ERR(c_idle.idle); 450 return PTR_ERR(c_idle.idle);
451 } 451 }
452 ti = task_thread_info(c_idle.idle); 452 ti = task_thread_info(c_idle.idle);
453 453
454 #ifdef CONFIG_PPC64 454 #ifdef CONFIG_PPC64
455 paca[cpu].__current = c_idle.idle; 455 paca[cpu].__current = c_idle.idle;
456 paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD; 456 paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
457 #endif 457 #endif
458 ti->cpu = cpu; 458 ti->cpu = cpu;
459 current_set[cpu] = ti; 459 current_set[cpu] = ti;
460 460
461 return 0; 461 return 0;
462 } 462 }
463 463
464 int __cpuinit __cpu_up(unsigned int cpu) 464 int __cpuinit __cpu_up(unsigned int cpu)
465 { 465 {
466 int rc, c; 466 int rc, c;
467 467
468 if (smp_ops == NULL || 468 if (smp_ops == NULL ||
469 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) 469 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
470 return -EINVAL; 470 return -EINVAL;
471 471
472 /* Make sure we have an idle thread */ 472 /* Make sure we have an idle thread */
473 rc = create_idle(cpu); 473 rc = create_idle(cpu);
474 if (rc) 474 if (rc)
475 return rc; 475 return rc;
476 476
477 secondary_ti = current_set[cpu]; 477 secondary_ti = current_set[cpu];
478 478
479 /* Make sure callin-map entry is 0 (can be leftover a CPU 479 /* Make sure callin-map entry is 0 (can be leftover a CPU
480 * hotplug 480 * hotplug
481 */ 481 */
482 cpu_callin_map[cpu] = 0; 482 cpu_callin_map[cpu] = 0;
483 483
484 /* The information for processor bringup must 484 /* The information for processor bringup must
485 * be written out to main store before we release 485 * be written out to main store before we release
486 * the processor. 486 * the processor.
487 */ 487 */
488 smp_mb(); 488 smp_mb();
489 489
490 /* wake up cpus */ 490 /* wake up cpus */
491 DBG("smp: kicking cpu %d\n", cpu); 491 DBG("smp: kicking cpu %d\n", cpu);
492 rc = smp_ops->kick_cpu(cpu); 492 rc = smp_ops->kick_cpu(cpu);
493 if (rc) { 493 if (rc) {
494 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc); 494 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
495 return rc; 495 return rc;
496 } 496 }
497 497
498 /* 498 /*
499 * wait to see if the cpu made a callin (is actually up). 499 * wait to see if the cpu made a callin (is actually up).
500 * use this value that I found through experimentation. 500 * use this value that I found through experimentation.
501 * -- Cort 501 * -- Cort
502 */ 502 */
503 if (system_state < SYSTEM_RUNNING) 503 if (system_state < SYSTEM_RUNNING)
504 for (c = 50000; c && !cpu_callin_map[cpu]; c--) 504 for (c = 50000; c && !cpu_callin_map[cpu]; c--)
505 udelay(100); 505 udelay(100);
506 #ifdef CONFIG_HOTPLUG_CPU 506 #ifdef CONFIG_HOTPLUG_CPU
507 else 507 else
508 /* 508 /*
509 * CPUs can take much longer to come up in the 509 * CPUs can take much longer to come up in the
510 * hotplug case. Wait five seconds. 510 * hotplug case. Wait five seconds.
511 */ 511 */
512 for (c = 5000; c && !cpu_callin_map[cpu]; c--) 512 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
513 msleep(1); 513 msleep(1);
514 #endif 514 #endif
515 515
516 if (!cpu_callin_map[cpu]) { 516 if (!cpu_callin_map[cpu]) {
517 printk(KERN_ERR "Processor %u is stuck.\n", cpu); 517 printk(KERN_ERR "Processor %u is stuck.\n", cpu);
518 return -ENOENT; 518 return -ENOENT;
519 } 519 }
520 520
521 DBG("Processor %u found.\n", cpu); 521 DBG("Processor %u found.\n", cpu);
522 522
523 if (smp_ops->give_timebase) 523 if (smp_ops->give_timebase)
524 smp_ops->give_timebase(); 524 smp_ops->give_timebase();
525 525
526 /* Wait until cpu puts itself in the online map */ 526 /* Wait until cpu puts itself in the online map */
527 while (!cpu_online(cpu)) 527 while (!cpu_online(cpu))
528 cpu_relax(); 528 cpu_relax();
529 529
530 return 0; 530 return 0;
531 } 531 }
532 532
533 /* Return the value of the reg property corresponding to the given 533 /* Return the value of the reg property corresponding to the given
534 * logical cpu. 534 * logical cpu.
535 */ 535 */
536 int cpu_to_core_id(int cpu) 536 int cpu_to_core_id(int cpu)
537 { 537 {
538 struct device_node *np; 538 struct device_node *np;
539 const int *reg; 539 const int *reg;
540 int id = -1; 540 int id = -1;
541 541
542 np = of_get_cpu_node(cpu, NULL); 542 np = of_get_cpu_node(cpu, NULL);
543 if (!np) 543 if (!np)
544 goto out; 544 goto out;
545 545
546 reg = of_get_property(np, "reg", NULL); 546 reg = of_get_property(np, "reg", NULL);
547 if (!reg) 547 if (!reg)
548 goto out; 548 goto out;
549 549
550 id = *reg; 550 id = *reg;
551 out: 551 out:
552 of_node_put(np); 552 of_node_put(np);
553 return id; 553 return id;
554 } 554 }
555 555
556 /* Helper routines for cpu to core mapping */ 556 /* Helper routines for cpu to core mapping */
557 int cpu_core_index_of_thread(int cpu) 557 int cpu_core_index_of_thread(int cpu)
558 { 558 {
559 return cpu >> threads_shift; 559 return cpu >> threads_shift;
560 } 560 }
561 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread); 561 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
562 562
563 int cpu_first_thread_of_core(int core) 563 int cpu_first_thread_of_core(int core)
564 { 564 {
565 return core << threads_shift; 565 return core << threads_shift;
566 } 566 }
567 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core); 567 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
568 568
569 /* Must be called when no change can occur to cpu_present_mask, 569 /* Must be called when no change can occur to cpu_present_mask,
570 * i.e. during cpu online or offline. 570 * i.e. during cpu online or offline.
571 */ 571 */
572 static struct device_node *cpu_to_l2cache(int cpu) 572 static struct device_node *cpu_to_l2cache(int cpu)
573 { 573 {
574 struct device_node *np; 574 struct device_node *np;
575 struct device_node *cache; 575 struct device_node *cache;
576 576
577 if (!cpu_present(cpu)) 577 if (!cpu_present(cpu))
578 return NULL; 578 return NULL;
579 579
580 np = of_get_cpu_node(cpu, NULL); 580 np = of_get_cpu_node(cpu, NULL);
581 if (np == NULL) 581 if (np == NULL)
582 return NULL; 582 return NULL;
583 583
584 cache = of_find_next_cache_node(np); 584 cache = of_find_next_cache_node(np);
585 585
586 of_node_put(np); 586 of_node_put(np);
587 587
588 return cache; 588 return cache;
589 } 589 }
590 590
591 /* Activate a secondary processor. */ 591 /* Activate a secondary processor. */
592 void __devinit start_secondary(void *unused) 592 void __devinit start_secondary(void *unused)
593 { 593 {
594 unsigned int cpu = smp_processor_id(); 594 unsigned int cpu = smp_processor_id();
595 struct device_node *l2_cache; 595 struct device_node *l2_cache;
596 int i, base; 596 int i, base;
597 597
598 atomic_inc(&init_mm.mm_count); 598 atomic_inc(&init_mm.mm_count);
599 current->active_mm = &init_mm; 599 current->active_mm = &init_mm;
600 600
601 smp_store_cpu_info(cpu); 601 smp_store_cpu_info(cpu);
602 set_dec(tb_ticks_per_jiffy); 602 set_dec(tb_ticks_per_jiffy);
603 preempt_disable(); 603 preempt_disable();
604 cpu_callin_map[cpu] = 1; 604 cpu_callin_map[cpu] = 1;
605 605
606 if (smp_ops->setup_cpu) 606 if (smp_ops->setup_cpu)
607 smp_ops->setup_cpu(cpu); 607 smp_ops->setup_cpu(cpu);
608 if (smp_ops->take_timebase) 608 if (smp_ops->take_timebase)
609 smp_ops->take_timebase(); 609 smp_ops->take_timebase();
610 610
611 secondary_cpu_time_init(); 611 secondary_cpu_time_init();
612 612
613 #ifdef CONFIG_PPC64 613 #ifdef CONFIG_PPC64
614 if (system_state == SYSTEM_RUNNING) 614 if (system_state == SYSTEM_RUNNING)
615 vdso_data->processorCount++; 615 vdso_data->processorCount++;
616 #endif 616 #endif
617 ipi_call_lock(); 617 ipi_call_lock();
618 notify_cpu_starting(cpu); 618 notify_cpu_starting(cpu);
619 set_cpu_online(cpu, true); 619 set_cpu_online(cpu, true);
620 /* Update sibling maps */ 620 /* Update sibling maps */
621 base = cpu_first_thread_sibling(cpu); 621 base = cpu_first_thread_sibling(cpu);
622 for (i = 0; i < threads_per_core; i++) { 622 for (i = 0; i < threads_per_core; i++) {
623 if (cpu_is_offline(base + i)) 623 if (cpu_is_offline(base + i))
624 continue; 624 continue;
625 cpumask_set_cpu(cpu, cpu_sibling_mask(base + i)); 625 cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
626 cpumask_set_cpu(base + i, cpu_sibling_mask(cpu)); 626 cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
627 627
628 /* cpu_core_map should be a superset of 628 /* cpu_core_map should be a superset of
629 * cpu_sibling_map even if we don't have cache 629 * cpu_sibling_map even if we don't have cache
630 * information, so update the former here, too. 630 * information, so update the former here, too.
631 */ 631 */
632 cpumask_set_cpu(cpu, cpu_core_mask(base + i)); 632 cpumask_set_cpu(cpu, cpu_core_mask(base + i));
633 cpumask_set_cpu(base + i, cpu_core_mask(cpu)); 633 cpumask_set_cpu(base + i, cpu_core_mask(cpu));
634 } 634 }
635 l2_cache = cpu_to_l2cache(cpu); 635 l2_cache = cpu_to_l2cache(cpu);
636 for_each_online_cpu(i) { 636 for_each_online_cpu(i) {
637 struct device_node *np = cpu_to_l2cache(i); 637 struct device_node *np = cpu_to_l2cache(i);
638 if (!np) 638 if (!np)
639 continue; 639 continue;
640 if (np == l2_cache) { 640 if (np == l2_cache) {
641 cpumask_set_cpu(cpu, cpu_core_mask(i)); 641 cpumask_set_cpu(cpu, cpu_core_mask(i));
642 cpumask_set_cpu(i, cpu_core_mask(cpu)); 642 cpumask_set_cpu(i, cpu_core_mask(cpu));
643 } 643 }
644 of_node_put(np); 644 of_node_put(np);
645 } 645 }
646 of_node_put(l2_cache); 646 of_node_put(l2_cache);
647 ipi_call_unlock(); 647 ipi_call_unlock();
648 648
649 local_irq_enable(); 649 local_irq_enable();
650 650
651 cpu_idle(); 651 cpu_idle();
652 652
653 BUG(); 653 BUG();
654 } 654 }
655 655
656 int setup_profiling_timer(unsigned int multiplier) 656 int setup_profiling_timer(unsigned int multiplier)
657 { 657 {
658 return 0; 658 return 0;
659 } 659 }
660 660
661 void __init smp_cpus_done(unsigned int max_cpus) 661 void __init smp_cpus_done(unsigned int max_cpus)
662 { 662 {
663 cpumask_var_t old_mask; 663 cpumask_var_t old_mask;
664 664
665 /* We want the setup_cpu() here to be called from CPU 0, but our 665 /* We want the setup_cpu() here to be called from CPU 0, but our
666 * init thread may have been "borrowed" by another CPU in the meantime 666 * init thread may have been "borrowed" by another CPU in the meantime
667 * se we pin us down to CPU 0 for a short while 667 * se we pin us down to CPU 0 for a short while
668 */ 668 */
669 alloc_cpumask_var(&old_mask, GFP_NOWAIT); 669 alloc_cpumask_var(&old_mask, GFP_NOWAIT);
670 cpumask_copy(old_mask, tsk_cpus_allowed(current)); 670 cpumask_copy(old_mask, tsk_cpus_allowed(current));
671 set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid)); 671 set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
672 672
673 if (smp_ops && smp_ops->setup_cpu) 673 if (smp_ops && smp_ops->setup_cpu)
674 smp_ops->setup_cpu(boot_cpuid); 674 smp_ops->setup_cpu(boot_cpuid);
675 675
676 set_cpus_allowed_ptr(current, old_mask); 676 set_cpus_allowed_ptr(current, old_mask);
677 677
678 free_cpumask_var(old_mask); 678 free_cpumask_var(old_mask);
679 679
680 if (smp_ops && smp_ops->bringup_done) 680 if (smp_ops && smp_ops->bringup_done)
681 smp_ops->bringup_done(); 681 smp_ops->bringup_done();
682 682
683 dump_numa_cpu_topology(); 683 dump_numa_cpu_topology();
684 684
685 } 685 }
686 686
687 int arch_sd_sibling_asym_packing(void) 687 int arch_sd_sibling_asym_packing(void)
688 { 688 {
689 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { 689 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
690 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); 690 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
691 return SD_ASYM_PACKING; 691 return SD_ASYM_PACKING;
692 } 692 }
693 return 0; 693 return 0;
694 } 694 }
695 695
696 #ifdef CONFIG_HOTPLUG_CPU 696 #ifdef CONFIG_HOTPLUG_CPU
697 int __cpu_disable(void) 697 int __cpu_disable(void)
698 { 698 {
699 struct device_node *l2_cache; 699 struct device_node *l2_cache;
700 int cpu = smp_processor_id(); 700 int cpu = smp_processor_id();
701 int base, i; 701 int base, i;
702 int err; 702 int err;
703 703
704 if (!smp_ops->cpu_disable) 704 if (!smp_ops->cpu_disable)
705 return -ENOSYS; 705 return -ENOSYS;
706 706
707 err = smp_ops->cpu_disable(); 707 err = smp_ops->cpu_disable();
708 if (err) 708 if (err)
709 return err; 709 return err;
710 710
711 /* Update sibling maps */ 711 /* Update sibling maps */
712 base = cpu_first_thread_sibling(cpu); 712 base = cpu_first_thread_sibling(cpu);
713 for (i = 0; i < threads_per_core; i++) { 713 for (i = 0; i < threads_per_core; i++) {
714 cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i)); 714 cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
715 cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu)); 715 cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
716 cpumask_clear_cpu(cpu, cpu_core_mask(base + i)); 716 cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
717 cpumask_clear_cpu(base + i, cpu_core_mask(cpu)); 717 cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
718 } 718 }
719 719
720 l2_cache = cpu_to_l2cache(cpu); 720 l2_cache = cpu_to_l2cache(cpu);
721 for_each_present_cpu(i) { 721 for_each_present_cpu(i) {
722 struct device_node *np = cpu_to_l2cache(i); 722 struct device_node *np = cpu_to_l2cache(i);
723 if (!np) 723 if (!np)
724 continue; 724 continue;
725 if (np == l2_cache) { 725 if (np == l2_cache) {
726 cpumask_clear_cpu(cpu, cpu_core_mask(i)); 726 cpumask_clear_cpu(cpu, cpu_core_mask(i));
727 cpumask_clear_cpu(i, cpu_core_mask(cpu)); 727 cpumask_clear_cpu(i, cpu_core_mask(cpu));
728 } 728 }
729 of_node_put(np); 729 of_node_put(np);
730 } 730 }
731 of_node_put(l2_cache); 731 of_node_put(l2_cache);
732 732
733 733
734 return 0; 734 return 0;
735 } 735 }
736 736
737 void __cpu_die(unsigned int cpu) 737 void __cpu_die(unsigned int cpu)
738 { 738 {
739 if (smp_ops->cpu_die) 739 if (smp_ops->cpu_die)
740 smp_ops->cpu_die(cpu); 740 smp_ops->cpu_die(cpu);
741 } 741 }
742 742
743 static DEFINE_MUTEX(powerpc_cpu_hotplug_driver_mutex); 743 static DEFINE_MUTEX(powerpc_cpu_hotplug_driver_mutex);
744 744
745 void cpu_hotplug_driver_lock() 745 void cpu_hotplug_driver_lock()
746 { 746 {
747 mutex_lock(&powerpc_cpu_hotplug_driver_mutex); 747 mutex_lock(&powerpc_cpu_hotplug_driver_mutex);
748 } 748 }
749 749
750 void cpu_hotplug_driver_unlock() 750 void cpu_hotplug_driver_unlock()
751 { 751 {
752 mutex_unlock(&powerpc_cpu_hotplug_driver_mutex); 752 mutex_unlock(&powerpc_cpu_hotplug_driver_mutex);
753 } 753 }
754 754
755 void cpu_die(void) 755 void cpu_die(void)
756 { 756 {
757 if (ppc_md.cpu_die) 757 if (ppc_md.cpu_die)
758 ppc_md.cpu_die(); 758 ppc_md.cpu_die();
759 759
760 /* If we return, we re-enter start_secondary */ 760 /* If we return, we re-enter start_secondary */
761 start_secondary_resume(); 761 start_secondary_resume();
762 } 762 }
763 763
764 #endif 764 #endif
765 765
arch/powerpc/kernel/stacktrace.c
1 /* 1 /*
2 * Stack trace utility 2 * Stack trace utility
3 * 3 *
4 * Copyright 2008 Christoph Hellwig, IBM Corp. 4 * Copyright 2008 Christoph Hellwig, IBM Corp.
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 */ 11 */
12 12
13 #include <linux/module.h> 13 #include <linux/export.h>
14 #include <linux/sched.h> 14 #include <linux/sched.h>
15 #include <linux/stacktrace.h> 15 #include <linux/stacktrace.h>
16 #include <asm/ptrace.h> 16 #include <asm/ptrace.h>
17 #include <asm/processor.h> 17 #include <asm/processor.h>
18 18
19 /* 19 /*
20 * Save stack-backtrace addresses into a stack_trace buffer. 20 * Save stack-backtrace addresses into a stack_trace buffer.
21 */ 21 */
22 static void save_context_stack(struct stack_trace *trace, unsigned long sp, 22 static void save_context_stack(struct stack_trace *trace, unsigned long sp,
23 struct task_struct *tsk, int savesched) 23 struct task_struct *tsk, int savesched)
24 { 24 {
25 for (;;) { 25 for (;;) {
26 unsigned long *stack = (unsigned long *) sp; 26 unsigned long *stack = (unsigned long *) sp;
27 unsigned long newsp, ip; 27 unsigned long newsp, ip;
28 28
29 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD)) 29 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
30 return; 30 return;
31 31
32 newsp = stack[0]; 32 newsp = stack[0];
33 ip = stack[STACK_FRAME_LR_SAVE]; 33 ip = stack[STACK_FRAME_LR_SAVE];
34 34
35 if (savesched || !in_sched_functions(ip)) { 35 if (savesched || !in_sched_functions(ip)) {
36 if (!trace->skip) 36 if (!trace->skip)
37 trace->entries[trace->nr_entries++] = ip; 37 trace->entries[trace->nr_entries++] = ip;
38 else 38 else
39 trace->skip--; 39 trace->skip--;
40 } 40 }
41 41
42 if (trace->nr_entries >= trace->max_entries) 42 if (trace->nr_entries >= trace->max_entries)
43 return; 43 return;
44 44
45 sp = newsp; 45 sp = newsp;
46 } 46 }
47 } 47 }
48 48
49 void save_stack_trace(struct stack_trace *trace) 49 void save_stack_trace(struct stack_trace *trace)
50 { 50 {
51 unsigned long sp; 51 unsigned long sp;
52 52
53 asm("mr %0,1" : "=r" (sp)); 53 asm("mr %0,1" : "=r" (sp));
54 54
55 save_context_stack(trace, sp, current, 1); 55 save_context_stack(trace, sp, current, 1);
56 } 56 }
57 EXPORT_SYMBOL_GPL(save_stack_trace); 57 EXPORT_SYMBOL_GPL(save_stack_trace);
58 58
59 void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) 59 void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
60 { 60 {
61 save_context_stack(trace, tsk->thread.ksp, tsk, 0); 61 save_context_stack(trace, tsk->thread.ksp, tsk, 0);
62 } 62 }
63 EXPORT_SYMBOL_GPL(save_stack_trace_tsk); 63 EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
64 64
arch/powerpc/kernel/sysfs.c
1 #include <linux/sysdev.h> 1 #include <linux/sysdev.h>
2 #include <linux/cpu.h> 2 #include <linux/cpu.h>
3 #include <linux/smp.h> 3 #include <linux/smp.h>
4 #include <linux/percpu.h> 4 #include <linux/percpu.h>
5 #include <linux/init.h> 5 #include <linux/init.h>
6 #include <linux/sched.h> 6 #include <linux/sched.h>
7 #include <linux/module.h> 7 #include <linux/export.h>
8 #include <linux/nodemask.h> 8 #include <linux/nodemask.h>
9 #include <linux/cpumask.h> 9 #include <linux/cpumask.h>
10 #include <linux/notifier.h> 10 #include <linux/notifier.h>
11 11
12 #include <asm/current.h> 12 #include <asm/current.h>
13 #include <asm/processor.h> 13 #include <asm/processor.h>
14 #include <asm/cputable.h> 14 #include <asm/cputable.h>
15 #include <asm/firmware.h> 15 #include <asm/firmware.h>
16 #include <asm/hvcall.h> 16 #include <asm/hvcall.h>
17 #include <asm/prom.h> 17 #include <asm/prom.h>
18 #include <asm/machdep.h> 18 #include <asm/machdep.h>
19 #include <asm/smp.h> 19 #include <asm/smp.h>
20 #include <asm/pmc.h> 20 #include <asm/pmc.h>
21 21
22 #include "cacheinfo.h" 22 #include "cacheinfo.h"
23 23
24 #ifdef CONFIG_PPC64 24 #ifdef CONFIG_PPC64
25 #include <asm/paca.h> 25 #include <asm/paca.h>
26 #include <asm/lppaca.h> 26 #include <asm/lppaca.h>
27 #endif 27 #endif
28 28
29 static DEFINE_PER_CPU(struct cpu, cpu_devices); 29 static DEFINE_PER_CPU(struct cpu, cpu_devices);
30 30
31 /* 31 /*
32 * SMT snooze delay stuff, 64-bit only for now 32 * SMT snooze delay stuff, 64-bit only for now
33 */ 33 */
34 34
35 #ifdef CONFIG_PPC64 35 #ifdef CONFIG_PPC64
36 36
37 /* Time in microseconds we delay before sleeping in the idle loop */ 37 /* Time in microseconds we delay before sleeping in the idle loop */
38 DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 }; 38 DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 };
39 39
40 static ssize_t store_smt_snooze_delay(struct sys_device *dev, 40 static ssize_t store_smt_snooze_delay(struct sys_device *dev,
41 struct sysdev_attribute *attr, 41 struct sysdev_attribute *attr,
42 const char *buf, 42 const char *buf,
43 size_t count) 43 size_t count)
44 { 44 {
45 struct cpu *cpu = container_of(dev, struct cpu, sysdev); 45 struct cpu *cpu = container_of(dev, struct cpu, sysdev);
46 ssize_t ret; 46 ssize_t ret;
47 long snooze; 47 long snooze;
48 48
49 ret = sscanf(buf, "%ld", &snooze); 49 ret = sscanf(buf, "%ld", &snooze);
50 if (ret != 1) 50 if (ret != 1)
51 return -EINVAL; 51 return -EINVAL;
52 52
53 per_cpu(smt_snooze_delay, cpu->sysdev.id) = snooze; 53 per_cpu(smt_snooze_delay, cpu->sysdev.id) = snooze;
54 54
55 return count; 55 return count;
56 } 56 }
57 57
58 static ssize_t show_smt_snooze_delay(struct sys_device *dev, 58 static ssize_t show_smt_snooze_delay(struct sys_device *dev,
59 struct sysdev_attribute *attr, 59 struct sysdev_attribute *attr,
60 char *buf) 60 char *buf)
61 { 61 {
62 struct cpu *cpu = container_of(dev, struct cpu, sysdev); 62 struct cpu *cpu = container_of(dev, struct cpu, sysdev);
63 63
64 return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->sysdev.id)); 64 return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->sysdev.id));
65 } 65 }
66 66
67 static SYSDEV_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay, 67 static SYSDEV_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
68 store_smt_snooze_delay); 68 store_smt_snooze_delay);
69 69
70 static int __init setup_smt_snooze_delay(char *str) 70 static int __init setup_smt_snooze_delay(char *str)
71 { 71 {
72 unsigned int cpu; 72 unsigned int cpu;
73 long snooze; 73 long snooze;
74 74
75 if (!cpu_has_feature(CPU_FTR_SMT)) 75 if (!cpu_has_feature(CPU_FTR_SMT))
76 return 1; 76 return 1;
77 77
78 snooze = simple_strtol(str, NULL, 10); 78 snooze = simple_strtol(str, NULL, 10);
79 for_each_possible_cpu(cpu) 79 for_each_possible_cpu(cpu)
80 per_cpu(smt_snooze_delay, cpu) = snooze; 80 per_cpu(smt_snooze_delay, cpu) = snooze;
81 81
82 return 1; 82 return 1;
83 } 83 }
84 __setup("smt-snooze-delay=", setup_smt_snooze_delay); 84 __setup("smt-snooze-delay=", setup_smt_snooze_delay);
85 85
86 #endif /* CONFIG_PPC64 */ 86 #endif /* CONFIG_PPC64 */
87 87
88 /* 88 /*
89 * Enabling PMCs will slow partition context switch times so we only do 89 * Enabling PMCs will slow partition context switch times so we only do
90 * it the first time we write to the PMCs. 90 * it the first time we write to the PMCs.
91 */ 91 */
92 92
93 static DEFINE_PER_CPU(char, pmcs_enabled); 93 static DEFINE_PER_CPU(char, pmcs_enabled);
94 94
95 void ppc_enable_pmcs(void) 95 void ppc_enable_pmcs(void)
96 { 96 {
97 ppc_set_pmu_inuse(1); 97 ppc_set_pmu_inuse(1);
98 98
99 /* Only need to enable them once */ 99 /* Only need to enable them once */
100 if (__get_cpu_var(pmcs_enabled)) 100 if (__get_cpu_var(pmcs_enabled))
101 return; 101 return;
102 102
103 __get_cpu_var(pmcs_enabled) = 1; 103 __get_cpu_var(pmcs_enabled) = 1;
104 104
105 if (ppc_md.enable_pmcs) 105 if (ppc_md.enable_pmcs)
106 ppc_md.enable_pmcs(); 106 ppc_md.enable_pmcs();
107 } 107 }
108 EXPORT_SYMBOL(ppc_enable_pmcs); 108 EXPORT_SYMBOL(ppc_enable_pmcs);
109 109
110 #define SYSFS_PMCSETUP(NAME, ADDRESS) \ 110 #define SYSFS_PMCSETUP(NAME, ADDRESS) \
111 static void read_##NAME(void *val) \ 111 static void read_##NAME(void *val) \
112 { \ 112 { \
113 *(unsigned long *)val = mfspr(ADDRESS); \ 113 *(unsigned long *)val = mfspr(ADDRESS); \
114 } \ 114 } \
115 static void write_##NAME(void *val) \ 115 static void write_##NAME(void *val) \
116 { \ 116 { \
117 ppc_enable_pmcs(); \ 117 ppc_enable_pmcs(); \
118 mtspr(ADDRESS, *(unsigned long *)val); \ 118 mtspr(ADDRESS, *(unsigned long *)val); \
119 } \ 119 } \
120 static ssize_t show_##NAME(struct sys_device *dev, \ 120 static ssize_t show_##NAME(struct sys_device *dev, \
121 struct sysdev_attribute *attr, \ 121 struct sysdev_attribute *attr, \
122 char *buf) \ 122 char *buf) \
123 { \ 123 { \
124 struct cpu *cpu = container_of(dev, struct cpu, sysdev); \ 124 struct cpu *cpu = container_of(dev, struct cpu, sysdev); \
125 unsigned long val; \ 125 unsigned long val; \
126 smp_call_function_single(cpu->sysdev.id, read_##NAME, &val, 1); \ 126 smp_call_function_single(cpu->sysdev.id, read_##NAME, &val, 1); \
127 return sprintf(buf, "%lx\n", val); \ 127 return sprintf(buf, "%lx\n", val); \
128 } \ 128 } \
129 static ssize_t __used \ 129 static ssize_t __used \
130 store_##NAME(struct sys_device *dev, struct sysdev_attribute *attr, \ 130 store_##NAME(struct sys_device *dev, struct sysdev_attribute *attr, \
131 const char *buf, size_t count) \ 131 const char *buf, size_t count) \
132 { \ 132 { \
133 struct cpu *cpu = container_of(dev, struct cpu, sysdev); \ 133 struct cpu *cpu = container_of(dev, struct cpu, sysdev); \
134 unsigned long val; \ 134 unsigned long val; \
135 int ret = sscanf(buf, "%lx", &val); \ 135 int ret = sscanf(buf, "%lx", &val); \
136 if (ret != 1) \ 136 if (ret != 1) \
137 return -EINVAL; \ 137 return -EINVAL; \
138 smp_call_function_single(cpu->sysdev.id, write_##NAME, &val, 1); \ 138 smp_call_function_single(cpu->sysdev.id, write_##NAME, &val, 1); \
139 return count; \ 139 return count; \
140 } 140 }
141 141
142 142
143 /* Let's define all possible registers, we'll only hook up the ones 143 /* Let's define all possible registers, we'll only hook up the ones
144 * that are implemented on the current processor 144 * that are implemented on the current processor
145 */ 145 */
146 146
147 #if defined(CONFIG_PPC64) 147 #if defined(CONFIG_PPC64)
148 #define HAS_PPC_PMC_CLASSIC 1 148 #define HAS_PPC_PMC_CLASSIC 1
149 #define HAS_PPC_PMC_IBM 1 149 #define HAS_PPC_PMC_IBM 1
150 #define HAS_PPC_PMC_PA6T 1 150 #define HAS_PPC_PMC_PA6T 1
151 #elif defined(CONFIG_6xx) 151 #elif defined(CONFIG_6xx)
152 #define HAS_PPC_PMC_CLASSIC 1 152 #define HAS_PPC_PMC_CLASSIC 1
153 #define HAS_PPC_PMC_IBM 1 153 #define HAS_PPC_PMC_IBM 1
154 #define HAS_PPC_PMC_G4 1 154 #define HAS_PPC_PMC_G4 1
155 #endif 155 #endif
156 156
157 157
158 #ifdef HAS_PPC_PMC_CLASSIC 158 #ifdef HAS_PPC_PMC_CLASSIC
159 SYSFS_PMCSETUP(mmcr0, SPRN_MMCR0); 159 SYSFS_PMCSETUP(mmcr0, SPRN_MMCR0);
160 SYSFS_PMCSETUP(mmcr1, SPRN_MMCR1); 160 SYSFS_PMCSETUP(mmcr1, SPRN_MMCR1);
161 SYSFS_PMCSETUP(pmc1, SPRN_PMC1); 161 SYSFS_PMCSETUP(pmc1, SPRN_PMC1);
162 SYSFS_PMCSETUP(pmc2, SPRN_PMC2); 162 SYSFS_PMCSETUP(pmc2, SPRN_PMC2);
163 SYSFS_PMCSETUP(pmc3, SPRN_PMC3); 163 SYSFS_PMCSETUP(pmc3, SPRN_PMC3);
164 SYSFS_PMCSETUP(pmc4, SPRN_PMC4); 164 SYSFS_PMCSETUP(pmc4, SPRN_PMC4);
165 SYSFS_PMCSETUP(pmc5, SPRN_PMC5); 165 SYSFS_PMCSETUP(pmc5, SPRN_PMC5);
166 SYSFS_PMCSETUP(pmc6, SPRN_PMC6); 166 SYSFS_PMCSETUP(pmc6, SPRN_PMC6);
167 167
168 #ifdef HAS_PPC_PMC_G4 168 #ifdef HAS_PPC_PMC_G4
169 SYSFS_PMCSETUP(mmcr2, SPRN_MMCR2); 169 SYSFS_PMCSETUP(mmcr2, SPRN_MMCR2);
170 #endif 170 #endif
171 171
172 #ifdef CONFIG_PPC64 172 #ifdef CONFIG_PPC64
173 SYSFS_PMCSETUP(pmc7, SPRN_PMC7); 173 SYSFS_PMCSETUP(pmc7, SPRN_PMC7);
174 SYSFS_PMCSETUP(pmc8, SPRN_PMC8); 174 SYSFS_PMCSETUP(pmc8, SPRN_PMC8);
175 175
176 SYSFS_PMCSETUP(mmcra, SPRN_MMCRA); 176 SYSFS_PMCSETUP(mmcra, SPRN_MMCRA);
177 SYSFS_PMCSETUP(purr, SPRN_PURR); 177 SYSFS_PMCSETUP(purr, SPRN_PURR);
178 SYSFS_PMCSETUP(spurr, SPRN_SPURR); 178 SYSFS_PMCSETUP(spurr, SPRN_SPURR);
179 SYSFS_PMCSETUP(dscr, SPRN_DSCR); 179 SYSFS_PMCSETUP(dscr, SPRN_DSCR);
180 180
181 static SYSDEV_ATTR(mmcra, 0600, show_mmcra, store_mmcra); 181 static SYSDEV_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
182 static SYSDEV_ATTR(spurr, 0600, show_spurr, NULL); 182 static SYSDEV_ATTR(spurr, 0600, show_spurr, NULL);
183 static SYSDEV_ATTR(dscr, 0600, show_dscr, store_dscr); 183 static SYSDEV_ATTR(dscr, 0600, show_dscr, store_dscr);
184 static SYSDEV_ATTR(purr, 0600, show_purr, store_purr); 184 static SYSDEV_ATTR(purr, 0600, show_purr, store_purr);
185 185
186 unsigned long dscr_default = 0; 186 unsigned long dscr_default = 0;
187 EXPORT_SYMBOL(dscr_default); 187 EXPORT_SYMBOL(dscr_default);
188 188
189 static ssize_t show_dscr_default(struct sysdev_class *class, 189 static ssize_t show_dscr_default(struct sysdev_class *class,
190 struct sysdev_class_attribute *attr, char *buf) 190 struct sysdev_class_attribute *attr, char *buf)
191 { 191 {
192 return sprintf(buf, "%lx\n", dscr_default); 192 return sprintf(buf, "%lx\n", dscr_default);
193 } 193 }
194 194
195 static ssize_t __used store_dscr_default(struct sysdev_class *class, 195 static ssize_t __used store_dscr_default(struct sysdev_class *class,
196 struct sysdev_class_attribute *attr, const char *buf, 196 struct sysdev_class_attribute *attr, const char *buf,
197 size_t count) 197 size_t count)
198 { 198 {
199 unsigned long val; 199 unsigned long val;
200 int ret = 0; 200 int ret = 0;
201 201
202 ret = sscanf(buf, "%lx", &val); 202 ret = sscanf(buf, "%lx", &val);
203 if (ret != 1) 203 if (ret != 1)
204 return -EINVAL; 204 return -EINVAL;
205 dscr_default = val; 205 dscr_default = val;
206 206
207 return count; 207 return count;
208 } 208 }
209 209
210 static SYSDEV_CLASS_ATTR(dscr_default, 0600, 210 static SYSDEV_CLASS_ATTR(dscr_default, 0600,
211 show_dscr_default, store_dscr_default); 211 show_dscr_default, store_dscr_default);
212 212
213 static void sysfs_create_dscr_default(void) 213 static void sysfs_create_dscr_default(void)
214 { 214 {
215 int err = 0; 215 int err = 0;
216 if (cpu_has_feature(CPU_FTR_DSCR)) 216 if (cpu_has_feature(CPU_FTR_DSCR))
217 err = sysfs_create_file(&cpu_sysdev_class.kset.kobj, 217 err = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
218 &attr_dscr_default.attr); 218 &attr_dscr_default.attr);
219 } 219 }
220 #endif /* CONFIG_PPC64 */ 220 #endif /* CONFIG_PPC64 */
221 221
222 #ifdef HAS_PPC_PMC_PA6T 222 #ifdef HAS_PPC_PMC_PA6T
223 SYSFS_PMCSETUP(pa6t_pmc0, SPRN_PA6T_PMC0); 223 SYSFS_PMCSETUP(pa6t_pmc0, SPRN_PA6T_PMC0);
224 SYSFS_PMCSETUP(pa6t_pmc1, SPRN_PA6T_PMC1); 224 SYSFS_PMCSETUP(pa6t_pmc1, SPRN_PA6T_PMC1);
225 SYSFS_PMCSETUP(pa6t_pmc2, SPRN_PA6T_PMC2); 225 SYSFS_PMCSETUP(pa6t_pmc2, SPRN_PA6T_PMC2);
226 SYSFS_PMCSETUP(pa6t_pmc3, SPRN_PA6T_PMC3); 226 SYSFS_PMCSETUP(pa6t_pmc3, SPRN_PA6T_PMC3);
227 SYSFS_PMCSETUP(pa6t_pmc4, SPRN_PA6T_PMC4); 227 SYSFS_PMCSETUP(pa6t_pmc4, SPRN_PA6T_PMC4);
228 SYSFS_PMCSETUP(pa6t_pmc5, SPRN_PA6T_PMC5); 228 SYSFS_PMCSETUP(pa6t_pmc5, SPRN_PA6T_PMC5);
229 #ifdef CONFIG_DEBUG_KERNEL 229 #ifdef CONFIG_DEBUG_KERNEL
230 SYSFS_PMCSETUP(hid0, SPRN_HID0); 230 SYSFS_PMCSETUP(hid0, SPRN_HID0);
231 SYSFS_PMCSETUP(hid1, SPRN_HID1); 231 SYSFS_PMCSETUP(hid1, SPRN_HID1);
232 SYSFS_PMCSETUP(hid4, SPRN_HID4); 232 SYSFS_PMCSETUP(hid4, SPRN_HID4);
233 SYSFS_PMCSETUP(hid5, SPRN_HID5); 233 SYSFS_PMCSETUP(hid5, SPRN_HID5);
234 SYSFS_PMCSETUP(ima0, SPRN_PA6T_IMA0); 234 SYSFS_PMCSETUP(ima0, SPRN_PA6T_IMA0);
235 SYSFS_PMCSETUP(ima1, SPRN_PA6T_IMA1); 235 SYSFS_PMCSETUP(ima1, SPRN_PA6T_IMA1);
236 SYSFS_PMCSETUP(ima2, SPRN_PA6T_IMA2); 236 SYSFS_PMCSETUP(ima2, SPRN_PA6T_IMA2);
237 SYSFS_PMCSETUP(ima3, SPRN_PA6T_IMA3); 237 SYSFS_PMCSETUP(ima3, SPRN_PA6T_IMA3);
238 SYSFS_PMCSETUP(ima4, SPRN_PA6T_IMA4); 238 SYSFS_PMCSETUP(ima4, SPRN_PA6T_IMA4);
239 SYSFS_PMCSETUP(ima5, SPRN_PA6T_IMA5); 239 SYSFS_PMCSETUP(ima5, SPRN_PA6T_IMA5);
240 SYSFS_PMCSETUP(ima6, SPRN_PA6T_IMA6); 240 SYSFS_PMCSETUP(ima6, SPRN_PA6T_IMA6);
241 SYSFS_PMCSETUP(ima7, SPRN_PA6T_IMA7); 241 SYSFS_PMCSETUP(ima7, SPRN_PA6T_IMA7);
242 SYSFS_PMCSETUP(ima8, SPRN_PA6T_IMA8); 242 SYSFS_PMCSETUP(ima8, SPRN_PA6T_IMA8);
243 SYSFS_PMCSETUP(ima9, SPRN_PA6T_IMA9); 243 SYSFS_PMCSETUP(ima9, SPRN_PA6T_IMA9);
244 SYSFS_PMCSETUP(imaat, SPRN_PA6T_IMAAT); 244 SYSFS_PMCSETUP(imaat, SPRN_PA6T_IMAAT);
245 SYSFS_PMCSETUP(btcr, SPRN_PA6T_BTCR); 245 SYSFS_PMCSETUP(btcr, SPRN_PA6T_BTCR);
246 SYSFS_PMCSETUP(pccr, SPRN_PA6T_PCCR); 246 SYSFS_PMCSETUP(pccr, SPRN_PA6T_PCCR);
247 SYSFS_PMCSETUP(rpccr, SPRN_PA6T_RPCCR); 247 SYSFS_PMCSETUP(rpccr, SPRN_PA6T_RPCCR);
248 SYSFS_PMCSETUP(der, SPRN_PA6T_DER); 248 SYSFS_PMCSETUP(der, SPRN_PA6T_DER);
249 SYSFS_PMCSETUP(mer, SPRN_PA6T_MER); 249 SYSFS_PMCSETUP(mer, SPRN_PA6T_MER);
250 SYSFS_PMCSETUP(ber, SPRN_PA6T_BER); 250 SYSFS_PMCSETUP(ber, SPRN_PA6T_BER);
251 SYSFS_PMCSETUP(ier, SPRN_PA6T_IER); 251 SYSFS_PMCSETUP(ier, SPRN_PA6T_IER);
252 SYSFS_PMCSETUP(sier, SPRN_PA6T_SIER); 252 SYSFS_PMCSETUP(sier, SPRN_PA6T_SIER);
253 SYSFS_PMCSETUP(siar, SPRN_PA6T_SIAR); 253 SYSFS_PMCSETUP(siar, SPRN_PA6T_SIAR);
254 SYSFS_PMCSETUP(tsr0, SPRN_PA6T_TSR0); 254 SYSFS_PMCSETUP(tsr0, SPRN_PA6T_TSR0);
255 SYSFS_PMCSETUP(tsr1, SPRN_PA6T_TSR1); 255 SYSFS_PMCSETUP(tsr1, SPRN_PA6T_TSR1);
256 SYSFS_PMCSETUP(tsr2, SPRN_PA6T_TSR2); 256 SYSFS_PMCSETUP(tsr2, SPRN_PA6T_TSR2);
257 SYSFS_PMCSETUP(tsr3, SPRN_PA6T_TSR3); 257 SYSFS_PMCSETUP(tsr3, SPRN_PA6T_TSR3);
258 #endif /* CONFIG_DEBUG_KERNEL */ 258 #endif /* CONFIG_DEBUG_KERNEL */
259 #endif /* HAS_PPC_PMC_PA6T */ 259 #endif /* HAS_PPC_PMC_PA6T */
260 260
261 #ifdef HAS_PPC_PMC_IBM 261 #ifdef HAS_PPC_PMC_IBM
262 static struct sysdev_attribute ibm_common_attrs[] = { 262 static struct sysdev_attribute ibm_common_attrs[] = {
263 _SYSDEV_ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0), 263 _SYSDEV_ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
264 _SYSDEV_ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1), 264 _SYSDEV_ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
265 }; 265 };
266 #endif /* HAS_PPC_PMC_G4 */ 266 #endif /* HAS_PPC_PMC_G4 */
267 267
268 #ifdef HAS_PPC_PMC_G4 268 #ifdef HAS_PPC_PMC_G4
269 static struct sysdev_attribute g4_common_attrs[] = { 269 static struct sysdev_attribute g4_common_attrs[] = {
270 _SYSDEV_ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0), 270 _SYSDEV_ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
271 _SYSDEV_ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1), 271 _SYSDEV_ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
272 _SYSDEV_ATTR(mmcr2, 0600, show_mmcr2, store_mmcr2), 272 _SYSDEV_ATTR(mmcr2, 0600, show_mmcr2, store_mmcr2),
273 }; 273 };
274 #endif /* HAS_PPC_PMC_G4 */ 274 #endif /* HAS_PPC_PMC_G4 */
275 275
276 static struct sysdev_attribute classic_pmc_attrs[] = { 276 static struct sysdev_attribute classic_pmc_attrs[] = {
277 _SYSDEV_ATTR(pmc1, 0600, show_pmc1, store_pmc1), 277 _SYSDEV_ATTR(pmc1, 0600, show_pmc1, store_pmc1),
278 _SYSDEV_ATTR(pmc2, 0600, show_pmc2, store_pmc2), 278 _SYSDEV_ATTR(pmc2, 0600, show_pmc2, store_pmc2),
279 _SYSDEV_ATTR(pmc3, 0600, show_pmc3, store_pmc3), 279 _SYSDEV_ATTR(pmc3, 0600, show_pmc3, store_pmc3),
280 _SYSDEV_ATTR(pmc4, 0600, show_pmc4, store_pmc4), 280 _SYSDEV_ATTR(pmc4, 0600, show_pmc4, store_pmc4),
281 _SYSDEV_ATTR(pmc5, 0600, show_pmc5, store_pmc5), 281 _SYSDEV_ATTR(pmc5, 0600, show_pmc5, store_pmc5),
282 _SYSDEV_ATTR(pmc6, 0600, show_pmc6, store_pmc6), 282 _SYSDEV_ATTR(pmc6, 0600, show_pmc6, store_pmc6),
283 #ifdef CONFIG_PPC64 283 #ifdef CONFIG_PPC64
284 _SYSDEV_ATTR(pmc7, 0600, show_pmc7, store_pmc7), 284 _SYSDEV_ATTR(pmc7, 0600, show_pmc7, store_pmc7),
285 _SYSDEV_ATTR(pmc8, 0600, show_pmc8, store_pmc8), 285 _SYSDEV_ATTR(pmc8, 0600, show_pmc8, store_pmc8),
286 #endif 286 #endif
287 }; 287 };
288 288
289 #ifdef HAS_PPC_PMC_PA6T 289 #ifdef HAS_PPC_PMC_PA6T
290 static struct sysdev_attribute pa6t_attrs[] = { 290 static struct sysdev_attribute pa6t_attrs[] = {
291 _SYSDEV_ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0), 291 _SYSDEV_ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
292 _SYSDEV_ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1), 292 _SYSDEV_ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
293 _SYSDEV_ATTR(pmc0, 0600, show_pa6t_pmc0, store_pa6t_pmc0), 293 _SYSDEV_ATTR(pmc0, 0600, show_pa6t_pmc0, store_pa6t_pmc0),
294 _SYSDEV_ATTR(pmc1, 0600, show_pa6t_pmc1, store_pa6t_pmc1), 294 _SYSDEV_ATTR(pmc1, 0600, show_pa6t_pmc1, store_pa6t_pmc1),
295 _SYSDEV_ATTR(pmc2, 0600, show_pa6t_pmc2, store_pa6t_pmc2), 295 _SYSDEV_ATTR(pmc2, 0600, show_pa6t_pmc2, store_pa6t_pmc2),
296 _SYSDEV_ATTR(pmc3, 0600, show_pa6t_pmc3, store_pa6t_pmc3), 296 _SYSDEV_ATTR(pmc3, 0600, show_pa6t_pmc3, store_pa6t_pmc3),
297 _SYSDEV_ATTR(pmc4, 0600, show_pa6t_pmc4, store_pa6t_pmc4), 297 _SYSDEV_ATTR(pmc4, 0600, show_pa6t_pmc4, store_pa6t_pmc4),
298 _SYSDEV_ATTR(pmc5, 0600, show_pa6t_pmc5, store_pa6t_pmc5), 298 _SYSDEV_ATTR(pmc5, 0600, show_pa6t_pmc5, store_pa6t_pmc5),
299 #ifdef CONFIG_DEBUG_KERNEL 299 #ifdef CONFIG_DEBUG_KERNEL
300 _SYSDEV_ATTR(hid0, 0600, show_hid0, store_hid0), 300 _SYSDEV_ATTR(hid0, 0600, show_hid0, store_hid0),
301 _SYSDEV_ATTR(hid1, 0600, show_hid1, store_hid1), 301 _SYSDEV_ATTR(hid1, 0600, show_hid1, store_hid1),
302 _SYSDEV_ATTR(hid4, 0600, show_hid4, store_hid4), 302 _SYSDEV_ATTR(hid4, 0600, show_hid4, store_hid4),
303 _SYSDEV_ATTR(hid5, 0600, show_hid5, store_hid5), 303 _SYSDEV_ATTR(hid5, 0600, show_hid5, store_hid5),
304 _SYSDEV_ATTR(ima0, 0600, show_ima0, store_ima0), 304 _SYSDEV_ATTR(ima0, 0600, show_ima0, store_ima0),
305 _SYSDEV_ATTR(ima1, 0600, show_ima1, store_ima1), 305 _SYSDEV_ATTR(ima1, 0600, show_ima1, store_ima1),
306 _SYSDEV_ATTR(ima2, 0600, show_ima2, store_ima2), 306 _SYSDEV_ATTR(ima2, 0600, show_ima2, store_ima2),
307 _SYSDEV_ATTR(ima3, 0600, show_ima3, store_ima3), 307 _SYSDEV_ATTR(ima3, 0600, show_ima3, store_ima3),
308 _SYSDEV_ATTR(ima4, 0600, show_ima4, store_ima4), 308 _SYSDEV_ATTR(ima4, 0600, show_ima4, store_ima4),
309 _SYSDEV_ATTR(ima5, 0600, show_ima5, store_ima5), 309 _SYSDEV_ATTR(ima5, 0600, show_ima5, store_ima5),
310 _SYSDEV_ATTR(ima6, 0600, show_ima6, store_ima6), 310 _SYSDEV_ATTR(ima6, 0600, show_ima6, store_ima6),
311 _SYSDEV_ATTR(ima7, 0600, show_ima7, store_ima7), 311 _SYSDEV_ATTR(ima7, 0600, show_ima7, store_ima7),
312 _SYSDEV_ATTR(ima8, 0600, show_ima8, store_ima8), 312 _SYSDEV_ATTR(ima8, 0600, show_ima8, store_ima8),
313 _SYSDEV_ATTR(ima9, 0600, show_ima9, store_ima9), 313 _SYSDEV_ATTR(ima9, 0600, show_ima9, store_ima9),
314 _SYSDEV_ATTR(imaat, 0600, show_imaat, store_imaat), 314 _SYSDEV_ATTR(imaat, 0600, show_imaat, store_imaat),
315 _SYSDEV_ATTR(btcr, 0600, show_btcr, store_btcr), 315 _SYSDEV_ATTR(btcr, 0600, show_btcr, store_btcr),
316 _SYSDEV_ATTR(pccr, 0600, show_pccr, store_pccr), 316 _SYSDEV_ATTR(pccr, 0600, show_pccr, store_pccr),
317 _SYSDEV_ATTR(rpccr, 0600, show_rpccr, store_rpccr), 317 _SYSDEV_ATTR(rpccr, 0600, show_rpccr, store_rpccr),
318 _SYSDEV_ATTR(der, 0600, show_der, store_der), 318 _SYSDEV_ATTR(der, 0600, show_der, store_der),
319 _SYSDEV_ATTR(mer, 0600, show_mer, store_mer), 319 _SYSDEV_ATTR(mer, 0600, show_mer, store_mer),
320 _SYSDEV_ATTR(ber, 0600, show_ber, store_ber), 320 _SYSDEV_ATTR(ber, 0600, show_ber, store_ber),
321 _SYSDEV_ATTR(ier, 0600, show_ier, store_ier), 321 _SYSDEV_ATTR(ier, 0600, show_ier, store_ier),
322 _SYSDEV_ATTR(sier, 0600, show_sier, store_sier), 322 _SYSDEV_ATTR(sier, 0600, show_sier, store_sier),
323 _SYSDEV_ATTR(siar, 0600, show_siar, store_siar), 323 _SYSDEV_ATTR(siar, 0600, show_siar, store_siar),
324 _SYSDEV_ATTR(tsr0, 0600, show_tsr0, store_tsr0), 324 _SYSDEV_ATTR(tsr0, 0600, show_tsr0, store_tsr0),
325 _SYSDEV_ATTR(tsr1, 0600, show_tsr1, store_tsr1), 325 _SYSDEV_ATTR(tsr1, 0600, show_tsr1, store_tsr1),
326 _SYSDEV_ATTR(tsr2, 0600, show_tsr2, store_tsr2), 326 _SYSDEV_ATTR(tsr2, 0600, show_tsr2, store_tsr2),
327 _SYSDEV_ATTR(tsr3, 0600, show_tsr3, store_tsr3), 327 _SYSDEV_ATTR(tsr3, 0600, show_tsr3, store_tsr3),
328 #endif /* CONFIG_DEBUG_KERNEL */ 328 #endif /* CONFIG_DEBUG_KERNEL */
329 }; 329 };
330 #endif /* HAS_PPC_PMC_PA6T */ 330 #endif /* HAS_PPC_PMC_PA6T */
331 #endif /* HAS_PPC_PMC_CLASSIC */ 331 #endif /* HAS_PPC_PMC_CLASSIC */
332 332
333 static void __cpuinit register_cpu_online(unsigned int cpu) 333 static void __cpuinit register_cpu_online(unsigned int cpu)
334 { 334 {
335 struct cpu *c = &per_cpu(cpu_devices, cpu); 335 struct cpu *c = &per_cpu(cpu_devices, cpu);
336 struct sys_device *s = &c->sysdev; 336 struct sys_device *s = &c->sysdev;
337 struct sysdev_attribute *attrs, *pmc_attrs; 337 struct sysdev_attribute *attrs, *pmc_attrs;
338 int i, nattrs; 338 int i, nattrs;
339 339
340 #ifdef CONFIG_PPC64 340 #ifdef CONFIG_PPC64
341 if (!firmware_has_feature(FW_FEATURE_ISERIES) && 341 if (!firmware_has_feature(FW_FEATURE_ISERIES) &&
342 cpu_has_feature(CPU_FTR_SMT)) 342 cpu_has_feature(CPU_FTR_SMT))
343 sysdev_create_file(s, &attr_smt_snooze_delay); 343 sysdev_create_file(s, &attr_smt_snooze_delay);
344 #endif 344 #endif
345 345
346 /* PMC stuff */ 346 /* PMC stuff */
347 switch (cur_cpu_spec->pmc_type) { 347 switch (cur_cpu_spec->pmc_type) {
348 #ifdef HAS_PPC_PMC_IBM 348 #ifdef HAS_PPC_PMC_IBM
349 case PPC_PMC_IBM: 349 case PPC_PMC_IBM:
350 attrs = ibm_common_attrs; 350 attrs = ibm_common_attrs;
351 nattrs = sizeof(ibm_common_attrs) / sizeof(struct sysdev_attribute); 351 nattrs = sizeof(ibm_common_attrs) / sizeof(struct sysdev_attribute);
352 pmc_attrs = classic_pmc_attrs; 352 pmc_attrs = classic_pmc_attrs;
353 break; 353 break;
354 #endif /* HAS_PPC_PMC_IBM */ 354 #endif /* HAS_PPC_PMC_IBM */
355 #ifdef HAS_PPC_PMC_G4 355 #ifdef HAS_PPC_PMC_G4
356 case PPC_PMC_G4: 356 case PPC_PMC_G4:
357 attrs = g4_common_attrs; 357 attrs = g4_common_attrs;
358 nattrs = sizeof(g4_common_attrs) / sizeof(struct sysdev_attribute); 358 nattrs = sizeof(g4_common_attrs) / sizeof(struct sysdev_attribute);
359 pmc_attrs = classic_pmc_attrs; 359 pmc_attrs = classic_pmc_attrs;
360 break; 360 break;
361 #endif /* HAS_PPC_PMC_G4 */ 361 #endif /* HAS_PPC_PMC_G4 */
362 #ifdef HAS_PPC_PMC_PA6T 362 #ifdef HAS_PPC_PMC_PA6T
363 case PPC_PMC_PA6T: 363 case PPC_PMC_PA6T:
364 /* PA Semi starts counting at PMC0 */ 364 /* PA Semi starts counting at PMC0 */
365 attrs = pa6t_attrs; 365 attrs = pa6t_attrs;
366 nattrs = sizeof(pa6t_attrs) / sizeof(struct sysdev_attribute); 366 nattrs = sizeof(pa6t_attrs) / sizeof(struct sysdev_attribute);
367 pmc_attrs = NULL; 367 pmc_attrs = NULL;
368 break; 368 break;
369 #endif /* HAS_PPC_PMC_PA6T */ 369 #endif /* HAS_PPC_PMC_PA6T */
370 default: 370 default:
371 attrs = NULL; 371 attrs = NULL;
372 nattrs = 0; 372 nattrs = 0;
373 pmc_attrs = NULL; 373 pmc_attrs = NULL;
374 } 374 }
375 375
376 for (i = 0; i < nattrs; i++) 376 for (i = 0; i < nattrs; i++)
377 sysdev_create_file(s, &attrs[i]); 377 sysdev_create_file(s, &attrs[i]);
378 378
379 if (pmc_attrs) 379 if (pmc_attrs)
380 for (i = 0; i < cur_cpu_spec->num_pmcs; i++) 380 for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
381 sysdev_create_file(s, &pmc_attrs[i]); 381 sysdev_create_file(s, &pmc_attrs[i]);
382 382
383 #ifdef CONFIG_PPC64 383 #ifdef CONFIG_PPC64
384 if (cpu_has_feature(CPU_FTR_MMCRA)) 384 if (cpu_has_feature(CPU_FTR_MMCRA))
385 sysdev_create_file(s, &attr_mmcra); 385 sysdev_create_file(s, &attr_mmcra);
386 386
387 if (cpu_has_feature(CPU_FTR_PURR)) 387 if (cpu_has_feature(CPU_FTR_PURR))
388 sysdev_create_file(s, &attr_purr); 388 sysdev_create_file(s, &attr_purr);
389 389
390 if (cpu_has_feature(CPU_FTR_SPURR)) 390 if (cpu_has_feature(CPU_FTR_SPURR))
391 sysdev_create_file(s, &attr_spurr); 391 sysdev_create_file(s, &attr_spurr);
392 392
393 if (cpu_has_feature(CPU_FTR_DSCR)) 393 if (cpu_has_feature(CPU_FTR_DSCR))
394 sysdev_create_file(s, &attr_dscr); 394 sysdev_create_file(s, &attr_dscr);
395 #endif /* CONFIG_PPC64 */ 395 #endif /* CONFIG_PPC64 */
396 396
397 cacheinfo_cpu_online(cpu); 397 cacheinfo_cpu_online(cpu);
398 } 398 }
399 399
400 #ifdef CONFIG_HOTPLUG_CPU 400 #ifdef CONFIG_HOTPLUG_CPU
401 static void unregister_cpu_online(unsigned int cpu) 401 static void unregister_cpu_online(unsigned int cpu)
402 { 402 {
403 struct cpu *c = &per_cpu(cpu_devices, cpu); 403 struct cpu *c = &per_cpu(cpu_devices, cpu);
404 struct sys_device *s = &c->sysdev; 404 struct sys_device *s = &c->sysdev;
405 struct sysdev_attribute *attrs, *pmc_attrs; 405 struct sysdev_attribute *attrs, *pmc_attrs;
406 int i, nattrs; 406 int i, nattrs;
407 407
408 BUG_ON(!c->hotpluggable); 408 BUG_ON(!c->hotpluggable);
409 409
410 #ifdef CONFIG_PPC64 410 #ifdef CONFIG_PPC64
411 if (!firmware_has_feature(FW_FEATURE_ISERIES) && 411 if (!firmware_has_feature(FW_FEATURE_ISERIES) &&
412 cpu_has_feature(CPU_FTR_SMT)) 412 cpu_has_feature(CPU_FTR_SMT))
413 sysdev_remove_file(s, &attr_smt_snooze_delay); 413 sysdev_remove_file(s, &attr_smt_snooze_delay);
414 #endif 414 #endif
415 415
416 /* PMC stuff */ 416 /* PMC stuff */
417 switch (cur_cpu_spec->pmc_type) { 417 switch (cur_cpu_spec->pmc_type) {
418 #ifdef HAS_PPC_PMC_IBM 418 #ifdef HAS_PPC_PMC_IBM
419 case PPC_PMC_IBM: 419 case PPC_PMC_IBM:
420 attrs = ibm_common_attrs; 420 attrs = ibm_common_attrs;
421 nattrs = sizeof(ibm_common_attrs) / sizeof(struct sysdev_attribute); 421 nattrs = sizeof(ibm_common_attrs) / sizeof(struct sysdev_attribute);
422 pmc_attrs = classic_pmc_attrs; 422 pmc_attrs = classic_pmc_attrs;
423 break; 423 break;
424 #endif /* HAS_PPC_PMC_IBM */ 424 #endif /* HAS_PPC_PMC_IBM */
425 #ifdef HAS_PPC_PMC_G4 425 #ifdef HAS_PPC_PMC_G4
426 case PPC_PMC_G4: 426 case PPC_PMC_G4:
427 attrs = g4_common_attrs; 427 attrs = g4_common_attrs;
428 nattrs = sizeof(g4_common_attrs) / sizeof(struct sysdev_attribute); 428 nattrs = sizeof(g4_common_attrs) / sizeof(struct sysdev_attribute);
429 pmc_attrs = classic_pmc_attrs; 429 pmc_attrs = classic_pmc_attrs;
430 break; 430 break;
431 #endif /* HAS_PPC_PMC_G4 */ 431 #endif /* HAS_PPC_PMC_G4 */
432 #ifdef HAS_PPC_PMC_PA6T 432 #ifdef HAS_PPC_PMC_PA6T
433 case PPC_PMC_PA6T: 433 case PPC_PMC_PA6T:
434 /* PA Semi starts counting at PMC0 */ 434 /* PA Semi starts counting at PMC0 */
435 attrs = pa6t_attrs; 435 attrs = pa6t_attrs;
436 nattrs = sizeof(pa6t_attrs) / sizeof(struct sysdev_attribute); 436 nattrs = sizeof(pa6t_attrs) / sizeof(struct sysdev_attribute);
437 pmc_attrs = NULL; 437 pmc_attrs = NULL;
438 break; 438 break;
439 #endif /* HAS_PPC_PMC_PA6T */ 439 #endif /* HAS_PPC_PMC_PA6T */
440 default: 440 default:
441 attrs = NULL; 441 attrs = NULL;
442 nattrs = 0; 442 nattrs = 0;
443 pmc_attrs = NULL; 443 pmc_attrs = NULL;
444 } 444 }
445 445
446 for (i = 0; i < nattrs; i++) 446 for (i = 0; i < nattrs; i++)
447 sysdev_remove_file(s, &attrs[i]); 447 sysdev_remove_file(s, &attrs[i]);
448 448
449 if (pmc_attrs) 449 if (pmc_attrs)
450 for (i = 0; i < cur_cpu_spec->num_pmcs; i++) 450 for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
451 sysdev_remove_file(s, &pmc_attrs[i]); 451 sysdev_remove_file(s, &pmc_attrs[i]);
452 452
453 #ifdef CONFIG_PPC64 453 #ifdef CONFIG_PPC64
454 if (cpu_has_feature(CPU_FTR_MMCRA)) 454 if (cpu_has_feature(CPU_FTR_MMCRA))
455 sysdev_remove_file(s, &attr_mmcra); 455 sysdev_remove_file(s, &attr_mmcra);
456 456
457 if (cpu_has_feature(CPU_FTR_PURR)) 457 if (cpu_has_feature(CPU_FTR_PURR))
458 sysdev_remove_file(s, &attr_purr); 458 sysdev_remove_file(s, &attr_purr);
459 459
460 if (cpu_has_feature(CPU_FTR_SPURR)) 460 if (cpu_has_feature(CPU_FTR_SPURR))
461 sysdev_remove_file(s, &attr_spurr); 461 sysdev_remove_file(s, &attr_spurr);
462 462
463 if (cpu_has_feature(CPU_FTR_DSCR)) 463 if (cpu_has_feature(CPU_FTR_DSCR))
464 sysdev_remove_file(s, &attr_dscr); 464 sysdev_remove_file(s, &attr_dscr);
465 #endif /* CONFIG_PPC64 */ 465 #endif /* CONFIG_PPC64 */
466 466
467 cacheinfo_cpu_offline(cpu); 467 cacheinfo_cpu_offline(cpu);
468 } 468 }
469 469
470 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE 470 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
471 ssize_t arch_cpu_probe(const char *buf, size_t count) 471 ssize_t arch_cpu_probe(const char *buf, size_t count)
472 { 472 {
473 if (ppc_md.cpu_probe) 473 if (ppc_md.cpu_probe)
474 return ppc_md.cpu_probe(buf, count); 474 return ppc_md.cpu_probe(buf, count);
475 475
476 return -EINVAL; 476 return -EINVAL;
477 } 477 }
478 478
479 ssize_t arch_cpu_release(const char *buf, size_t count) 479 ssize_t arch_cpu_release(const char *buf, size_t count)
480 { 480 {
481 if (ppc_md.cpu_release) 481 if (ppc_md.cpu_release)
482 return ppc_md.cpu_release(buf, count); 482 return ppc_md.cpu_release(buf, count);
483 483
484 return -EINVAL; 484 return -EINVAL;
485 } 485 }
486 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ 486 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
487 487
488 #endif /* CONFIG_HOTPLUG_CPU */ 488 #endif /* CONFIG_HOTPLUG_CPU */
489 489
490 static int __cpuinit sysfs_cpu_notify(struct notifier_block *self, 490 static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
491 unsigned long action, void *hcpu) 491 unsigned long action, void *hcpu)
492 { 492 {
493 unsigned int cpu = (unsigned int)(long)hcpu; 493 unsigned int cpu = (unsigned int)(long)hcpu;
494 494
495 switch (action) { 495 switch (action) {
496 case CPU_ONLINE: 496 case CPU_ONLINE:
497 case CPU_ONLINE_FROZEN: 497 case CPU_ONLINE_FROZEN:
498 register_cpu_online(cpu); 498 register_cpu_online(cpu);
499 break; 499 break;
500 #ifdef CONFIG_HOTPLUG_CPU 500 #ifdef CONFIG_HOTPLUG_CPU
501 case CPU_DEAD: 501 case CPU_DEAD:
502 case CPU_DEAD_FROZEN: 502 case CPU_DEAD_FROZEN:
503 unregister_cpu_online(cpu); 503 unregister_cpu_online(cpu);
504 break; 504 break;
505 #endif 505 #endif
506 } 506 }
507 return NOTIFY_OK; 507 return NOTIFY_OK;
508 } 508 }
509 509
510 static struct notifier_block __cpuinitdata sysfs_cpu_nb = { 510 static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
511 .notifier_call = sysfs_cpu_notify, 511 .notifier_call = sysfs_cpu_notify,
512 }; 512 };
513 513
514 static DEFINE_MUTEX(cpu_mutex); 514 static DEFINE_MUTEX(cpu_mutex);
515 515
516 int cpu_add_sysdev_attr(struct sysdev_attribute *attr) 516 int cpu_add_sysdev_attr(struct sysdev_attribute *attr)
517 { 517 {
518 int cpu; 518 int cpu;
519 519
520 mutex_lock(&cpu_mutex); 520 mutex_lock(&cpu_mutex);
521 521
522 for_each_possible_cpu(cpu) { 522 for_each_possible_cpu(cpu) {
523 sysdev_create_file(get_cpu_sysdev(cpu), attr); 523 sysdev_create_file(get_cpu_sysdev(cpu), attr);
524 } 524 }
525 525
526 mutex_unlock(&cpu_mutex); 526 mutex_unlock(&cpu_mutex);
527 return 0; 527 return 0;
528 } 528 }
529 EXPORT_SYMBOL_GPL(cpu_add_sysdev_attr); 529 EXPORT_SYMBOL_GPL(cpu_add_sysdev_attr);
530 530
531 int cpu_add_sysdev_attr_group(struct attribute_group *attrs) 531 int cpu_add_sysdev_attr_group(struct attribute_group *attrs)
532 { 532 {
533 int cpu; 533 int cpu;
534 struct sys_device *sysdev; 534 struct sys_device *sysdev;
535 int ret; 535 int ret;
536 536
537 mutex_lock(&cpu_mutex); 537 mutex_lock(&cpu_mutex);
538 538
539 for_each_possible_cpu(cpu) { 539 for_each_possible_cpu(cpu) {
540 sysdev = get_cpu_sysdev(cpu); 540 sysdev = get_cpu_sysdev(cpu);
541 ret = sysfs_create_group(&sysdev->kobj, attrs); 541 ret = sysfs_create_group(&sysdev->kobj, attrs);
542 WARN_ON(ret != 0); 542 WARN_ON(ret != 0);
543 } 543 }
544 544
545 mutex_unlock(&cpu_mutex); 545 mutex_unlock(&cpu_mutex);
546 return 0; 546 return 0;
547 } 547 }
548 EXPORT_SYMBOL_GPL(cpu_add_sysdev_attr_group); 548 EXPORT_SYMBOL_GPL(cpu_add_sysdev_attr_group);
549 549
550 550
551 void cpu_remove_sysdev_attr(struct sysdev_attribute *attr) 551 void cpu_remove_sysdev_attr(struct sysdev_attribute *attr)
552 { 552 {
553 int cpu; 553 int cpu;
554 554
555 mutex_lock(&cpu_mutex); 555 mutex_lock(&cpu_mutex);
556 556
557 for_each_possible_cpu(cpu) { 557 for_each_possible_cpu(cpu) {
558 sysdev_remove_file(get_cpu_sysdev(cpu), attr); 558 sysdev_remove_file(get_cpu_sysdev(cpu), attr);
559 } 559 }
560 560
561 mutex_unlock(&cpu_mutex); 561 mutex_unlock(&cpu_mutex);
562 } 562 }
563 EXPORT_SYMBOL_GPL(cpu_remove_sysdev_attr); 563 EXPORT_SYMBOL_GPL(cpu_remove_sysdev_attr);
564 564
565 void cpu_remove_sysdev_attr_group(struct attribute_group *attrs) 565 void cpu_remove_sysdev_attr_group(struct attribute_group *attrs)
566 { 566 {
567 int cpu; 567 int cpu;
568 struct sys_device *sysdev; 568 struct sys_device *sysdev;
569 569
570 mutex_lock(&cpu_mutex); 570 mutex_lock(&cpu_mutex);
571 571
572 for_each_possible_cpu(cpu) { 572 for_each_possible_cpu(cpu) {
573 sysdev = get_cpu_sysdev(cpu); 573 sysdev = get_cpu_sysdev(cpu);
574 sysfs_remove_group(&sysdev->kobj, attrs); 574 sysfs_remove_group(&sysdev->kobj, attrs);
575 } 575 }
576 576
577 mutex_unlock(&cpu_mutex); 577 mutex_unlock(&cpu_mutex);
578 } 578 }
579 EXPORT_SYMBOL_GPL(cpu_remove_sysdev_attr_group); 579 EXPORT_SYMBOL_GPL(cpu_remove_sysdev_attr_group);
580 580
581 581
582 /* NUMA stuff */ 582 /* NUMA stuff */
583 583
584 #ifdef CONFIG_NUMA 584 #ifdef CONFIG_NUMA
585 static void register_nodes(void) 585 static void register_nodes(void)
586 { 586 {
587 int i; 587 int i;
588 588
589 for (i = 0; i < MAX_NUMNODES; i++) 589 for (i = 0; i < MAX_NUMNODES; i++)
590 register_one_node(i); 590 register_one_node(i);
591 } 591 }
592 592
593 int sysfs_add_device_to_node(struct sys_device *dev, int nid) 593 int sysfs_add_device_to_node(struct sys_device *dev, int nid)
594 { 594 {
595 struct node *node = &node_devices[nid]; 595 struct node *node = &node_devices[nid];
596 return sysfs_create_link(&node->sysdev.kobj, &dev->kobj, 596 return sysfs_create_link(&node->sysdev.kobj, &dev->kobj,
597 kobject_name(&dev->kobj)); 597 kobject_name(&dev->kobj));
598 } 598 }
599 EXPORT_SYMBOL_GPL(sysfs_add_device_to_node); 599 EXPORT_SYMBOL_GPL(sysfs_add_device_to_node);
600 600
601 void sysfs_remove_device_from_node(struct sys_device *dev, int nid) 601 void sysfs_remove_device_from_node(struct sys_device *dev, int nid)
602 { 602 {
603 struct node *node = &node_devices[nid]; 603 struct node *node = &node_devices[nid];
604 sysfs_remove_link(&node->sysdev.kobj, kobject_name(&dev->kobj)); 604 sysfs_remove_link(&node->sysdev.kobj, kobject_name(&dev->kobj));
605 } 605 }
606 EXPORT_SYMBOL_GPL(sysfs_remove_device_from_node); 606 EXPORT_SYMBOL_GPL(sysfs_remove_device_from_node);
607 607
608 #else 608 #else
609 static void register_nodes(void) 609 static void register_nodes(void)
610 { 610 {
611 return; 611 return;
612 } 612 }
613 613
614 #endif 614 #endif
615 615
616 /* Only valid if CPU is present. */ 616 /* Only valid if CPU is present. */
617 static ssize_t show_physical_id(struct sys_device *dev, 617 static ssize_t show_physical_id(struct sys_device *dev,
618 struct sysdev_attribute *attr, char *buf) 618 struct sysdev_attribute *attr, char *buf)
619 { 619 {
620 struct cpu *cpu = container_of(dev, struct cpu, sysdev); 620 struct cpu *cpu = container_of(dev, struct cpu, sysdev);
621 621
622 return sprintf(buf, "%d\n", get_hard_smp_processor_id(cpu->sysdev.id)); 622 return sprintf(buf, "%d\n", get_hard_smp_processor_id(cpu->sysdev.id));
623 } 623 }
624 static SYSDEV_ATTR(physical_id, 0444, show_physical_id, NULL); 624 static SYSDEV_ATTR(physical_id, 0444, show_physical_id, NULL);
625 625
626 static int __init topology_init(void) 626 static int __init topology_init(void)
627 { 627 {
628 int cpu; 628 int cpu;
629 629
630 register_nodes(); 630 register_nodes();
631 register_cpu_notifier(&sysfs_cpu_nb); 631 register_cpu_notifier(&sysfs_cpu_nb);
632 632
633 for_each_possible_cpu(cpu) { 633 for_each_possible_cpu(cpu) {
634 struct cpu *c = &per_cpu(cpu_devices, cpu); 634 struct cpu *c = &per_cpu(cpu_devices, cpu);
635 635
636 /* 636 /*
637 * For now, we just see if the system supports making 637 * For now, we just see if the system supports making
638 * the RTAS calls for CPU hotplug. But, there may be a 638 * the RTAS calls for CPU hotplug. But, there may be a
639 * more comprehensive way to do this for an individual 639 * more comprehensive way to do this for an individual
640 * CPU. For instance, the boot cpu might never be valid 640 * CPU. For instance, the boot cpu might never be valid
641 * for hotplugging. 641 * for hotplugging.
642 */ 642 */
643 if (ppc_md.cpu_die) 643 if (ppc_md.cpu_die)
644 c->hotpluggable = 1; 644 c->hotpluggable = 1;
645 645
646 if (cpu_online(cpu) || c->hotpluggable) { 646 if (cpu_online(cpu) || c->hotpluggable) {
647 register_cpu(c, cpu); 647 register_cpu(c, cpu);
648 648
649 sysdev_create_file(&c->sysdev, &attr_physical_id); 649 sysdev_create_file(&c->sysdev, &attr_physical_id);
650 } 650 }
651 651
652 if (cpu_online(cpu)) 652 if (cpu_online(cpu))
653 register_cpu_online(cpu); 653 register_cpu_online(cpu);
654 } 654 }
655 #ifdef CONFIG_PPC64 655 #ifdef CONFIG_PPC64
656 sysfs_create_dscr_default(); 656 sysfs_create_dscr_default();
657 #endif /* CONFIG_PPC64 */ 657 #endif /* CONFIG_PPC64 */
658 658
659 return 0; 659 return 0;
660 } 660 }
661 subsys_initcall(topology_init); 661 subsys_initcall(topology_init);
662 662
arch/powerpc/kernel/time.c
1 /* 1 /*
2 * Common time routines among all ppc machines. 2 * Common time routines among all ppc machines.
3 * 3 *
4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge 4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5 * Paul Mackerras' version and mine for PReP and Pmac. 5 * Paul Mackerras' version and mine for PReP and Pmac.
6 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net). 6 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
7 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com) 7 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
8 * 8 *
9 * First round of bugfixes by Gabriel Paubert (paubert@iram.es) 9 * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
10 * to make clock more stable (2.4.0-test5). The only thing 10 * to make clock more stable (2.4.0-test5). The only thing
11 * that this code assumes is that the timebases have been synchronized 11 * that this code assumes is that the timebases have been synchronized
12 * by firmware on SMP and are never stopped (never do sleep 12 * by firmware on SMP and are never stopped (never do sleep
13 * on SMP then, nap and doze are OK). 13 * on SMP then, nap and doze are OK).
14 * 14 *
15 * Speeded up do_gettimeofday by getting rid of references to 15 * Speeded up do_gettimeofday by getting rid of references to
16 * xtime (which required locks for consistency). (mikejc@us.ibm.com) 16 * xtime (which required locks for consistency). (mikejc@us.ibm.com)
17 * 17 *
18 * TODO (not necessarily in this file): 18 * TODO (not necessarily in this file):
19 * - improve precision and reproducibility of timebase frequency 19 * - improve precision and reproducibility of timebase frequency
20 * measurement at boot time. (for iSeries, we calibrate the timebase 20 * measurement at boot time. (for iSeries, we calibrate the timebase
21 * against the Titan chip's clock.) 21 * against the Titan chip's clock.)
22 * - for astronomical applications: add a new function to get 22 * - for astronomical applications: add a new function to get
23 * non ambiguous timestamps even around leap seconds. This needs 23 * non ambiguous timestamps even around leap seconds. This needs
24 * a new timestamp format and a good name. 24 * a new timestamp format and a good name.
25 * 25 *
26 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 26 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
27 * "A Kernel Model for Precision Timekeeping" by Dave Mills 27 * "A Kernel Model for Precision Timekeeping" by Dave Mills
28 * 28 *
29 * This program is free software; you can redistribute it and/or 29 * This program is free software; you can redistribute it and/or
30 * modify it under the terms of the GNU General Public License 30 * modify it under the terms of the GNU General Public License
31 * as published by the Free Software Foundation; either version 31 * as published by the Free Software Foundation; either version
32 * 2 of the License, or (at your option) any later version. 32 * 2 of the License, or (at your option) any later version.
33 */ 33 */
34 34
35 #include <linux/errno.h> 35 #include <linux/errno.h>
36 #include <linux/module.h> 36 #include <linux/export.h>
37 #include <linux/sched.h> 37 #include <linux/sched.h>
38 #include <linux/kernel.h> 38 #include <linux/kernel.h>
39 #include <linux/param.h> 39 #include <linux/param.h>
40 #include <linux/string.h> 40 #include <linux/string.h>
41 #include <linux/mm.h> 41 #include <linux/mm.h>
42 #include <linux/interrupt.h> 42 #include <linux/interrupt.h>
43 #include <linux/timex.h> 43 #include <linux/timex.h>
44 #include <linux/kernel_stat.h> 44 #include <linux/kernel_stat.h>
45 #include <linux/time.h> 45 #include <linux/time.h>
46 #include <linux/init.h> 46 #include <linux/init.h>
47 #include <linux/profile.h> 47 #include <linux/profile.h>
48 #include <linux/cpu.h> 48 #include <linux/cpu.h>
49 #include <linux/security.h> 49 #include <linux/security.h>
50 #include <linux/percpu.h> 50 #include <linux/percpu.h>
51 #include <linux/rtc.h> 51 #include <linux/rtc.h>
52 #include <linux/jiffies.h> 52 #include <linux/jiffies.h>
53 #include <linux/posix-timers.h> 53 #include <linux/posix-timers.h>
54 #include <linux/irq.h> 54 #include <linux/irq.h>
55 #include <linux/delay.h> 55 #include <linux/delay.h>
56 #include <linux/irq_work.h> 56 #include <linux/irq_work.h>
57 #include <asm/trace.h> 57 #include <asm/trace.h>
58 58
59 #include <asm/io.h> 59 #include <asm/io.h>
60 #include <asm/processor.h> 60 #include <asm/processor.h>
61 #include <asm/nvram.h> 61 #include <asm/nvram.h>
62 #include <asm/cache.h> 62 #include <asm/cache.h>
63 #include <asm/machdep.h> 63 #include <asm/machdep.h>
64 #include <asm/uaccess.h> 64 #include <asm/uaccess.h>
65 #include <asm/time.h> 65 #include <asm/time.h>
66 #include <asm/prom.h> 66 #include <asm/prom.h>
67 #include <asm/irq.h> 67 #include <asm/irq.h>
68 #include <asm/div64.h> 68 #include <asm/div64.h>
69 #include <asm/smp.h> 69 #include <asm/smp.h>
70 #include <asm/vdso_datapage.h> 70 #include <asm/vdso_datapage.h>
71 #include <asm/firmware.h> 71 #include <asm/firmware.h>
72 #include <asm/cputime.h> 72 #include <asm/cputime.h>
73 #ifdef CONFIG_PPC_ISERIES 73 #ifdef CONFIG_PPC_ISERIES
74 #include <asm/iseries/it_lp_queue.h> 74 #include <asm/iseries/it_lp_queue.h>
75 #include <asm/iseries/hv_call_xm.h> 75 #include <asm/iseries/hv_call_xm.h>
76 #endif 76 #endif
77 77
78 /* powerpc clocksource/clockevent code */ 78 /* powerpc clocksource/clockevent code */
79 79
80 #include <linux/clockchips.h> 80 #include <linux/clockchips.h>
81 #include <linux/clocksource.h> 81 #include <linux/clocksource.h>
82 82
83 static cycle_t rtc_read(struct clocksource *); 83 static cycle_t rtc_read(struct clocksource *);
84 static struct clocksource clocksource_rtc = { 84 static struct clocksource clocksource_rtc = {
85 .name = "rtc", 85 .name = "rtc",
86 .rating = 400, 86 .rating = 400,
87 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 87 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
88 .mask = CLOCKSOURCE_MASK(64), 88 .mask = CLOCKSOURCE_MASK(64),
89 .shift = 22, 89 .shift = 22,
90 .mult = 0, /* To be filled in */ 90 .mult = 0, /* To be filled in */
91 .read = rtc_read, 91 .read = rtc_read,
92 }; 92 };
93 93
94 static cycle_t timebase_read(struct clocksource *); 94 static cycle_t timebase_read(struct clocksource *);
95 static struct clocksource clocksource_timebase = { 95 static struct clocksource clocksource_timebase = {
96 .name = "timebase", 96 .name = "timebase",
97 .rating = 400, 97 .rating = 400,
98 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 98 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
99 .mask = CLOCKSOURCE_MASK(64), 99 .mask = CLOCKSOURCE_MASK(64),
100 .shift = 22, 100 .shift = 22,
101 .mult = 0, /* To be filled in */ 101 .mult = 0, /* To be filled in */
102 .read = timebase_read, 102 .read = timebase_read,
103 }; 103 };
104 104
105 #define DECREMENTER_MAX 0x7fffffff 105 #define DECREMENTER_MAX 0x7fffffff
106 106
107 static int decrementer_set_next_event(unsigned long evt, 107 static int decrementer_set_next_event(unsigned long evt,
108 struct clock_event_device *dev); 108 struct clock_event_device *dev);
109 static void decrementer_set_mode(enum clock_event_mode mode, 109 static void decrementer_set_mode(enum clock_event_mode mode,
110 struct clock_event_device *dev); 110 struct clock_event_device *dev);
111 111
112 static struct clock_event_device decrementer_clockevent = { 112 static struct clock_event_device decrementer_clockevent = {
113 .name = "decrementer", 113 .name = "decrementer",
114 .rating = 200, 114 .rating = 200,
115 .shift = 0, /* To be filled in */ 115 .shift = 0, /* To be filled in */
116 .mult = 0, /* To be filled in */ 116 .mult = 0, /* To be filled in */
117 .irq = 0, 117 .irq = 0,
118 .set_next_event = decrementer_set_next_event, 118 .set_next_event = decrementer_set_next_event,
119 .set_mode = decrementer_set_mode, 119 .set_mode = decrementer_set_mode,
120 .features = CLOCK_EVT_FEAT_ONESHOT, 120 .features = CLOCK_EVT_FEAT_ONESHOT,
121 }; 121 };
122 122
123 struct decrementer_clock { 123 struct decrementer_clock {
124 struct clock_event_device event; 124 struct clock_event_device event;
125 u64 next_tb; 125 u64 next_tb;
126 }; 126 };
127 127
128 static DEFINE_PER_CPU(struct decrementer_clock, decrementers); 128 static DEFINE_PER_CPU(struct decrementer_clock, decrementers);
129 129
130 #ifdef CONFIG_PPC_ISERIES 130 #ifdef CONFIG_PPC_ISERIES
131 static unsigned long __initdata iSeries_recal_titan; 131 static unsigned long __initdata iSeries_recal_titan;
132 static signed long __initdata iSeries_recal_tb; 132 static signed long __initdata iSeries_recal_tb;
133 133
134 /* Forward declaration is only needed for iSereis compiles */ 134 /* Forward declaration is only needed for iSereis compiles */
135 static void __init clocksource_init(void); 135 static void __init clocksource_init(void);
136 #endif 136 #endif
137 137
138 #define XSEC_PER_SEC (1024*1024) 138 #define XSEC_PER_SEC (1024*1024)
139 139
140 #ifdef CONFIG_PPC64 140 #ifdef CONFIG_PPC64
141 #define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC) 141 #define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
142 #else 142 #else
143 /* compute ((xsec << 12) * max) >> 32 */ 143 /* compute ((xsec << 12) * max) >> 32 */
144 #define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max) 144 #define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
145 #endif 145 #endif
146 146
147 unsigned long tb_ticks_per_jiffy; 147 unsigned long tb_ticks_per_jiffy;
148 unsigned long tb_ticks_per_usec = 100; /* sane default */ 148 unsigned long tb_ticks_per_usec = 100; /* sane default */
149 EXPORT_SYMBOL(tb_ticks_per_usec); 149 EXPORT_SYMBOL(tb_ticks_per_usec);
150 unsigned long tb_ticks_per_sec; 150 unsigned long tb_ticks_per_sec;
151 EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */ 151 EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */
152 152
153 DEFINE_SPINLOCK(rtc_lock); 153 DEFINE_SPINLOCK(rtc_lock);
154 EXPORT_SYMBOL_GPL(rtc_lock); 154 EXPORT_SYMBOL_GPL(rtc_lock);
155 155
156 static u64 tb_to_ns_scale __read_mostly; 156 static u64 tb_to_ns_scale __read_mostly;
157 static unsigned tb_to_ns_shift __read_mostly; 157 static unsigned tb_to_ns_shift __read_mostly;
158 static u64 boot_tb __read_mostly; 158 static u64 boot_tb __read_mostly;
159 159
160 extern struct timezone sys_tz; 160 extern struct timezone sys_tz;
161 static long timezone_offset; 161 static long timezone_offset;
162 162
163 unsigned long ppc_proc_freq; 163 unsigned long ppc_proc_freq;
164 EXPORT_SYMBOL_GPL(ppc_proc_freq); 164 EXPORT_SYMBOL_GPL(ppc_proc_freq);
165 unsigned long ppc_tb_freq; 165 unsigned long ppc_tb_freq;
166 EXPORT_SYMBOL_GPL(ppc_tb_freq); 166 EXPORT_SYMBOL_GPL(ppc_tb_freq);
167 167
168 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 168 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
169 /* 169 /*
170 * Factors for converting from cputime_t (timebase ticks) to 170 * Factors for converting from cputime_t (timebase ticks) to
171 * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds). 171 * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
172 * These are all stored as 0.64 fixed-point binary fractions. 172 * These are all stored as 0.64 fixed-point binary fractions.
173 */ 173 */
174 u64 __cputime_jiffies_factor; 174 u64 __cputime_jiffies_factor;
175 EXPORT_SYMBOL(__cputime_jiffies_factor); 175 EXPORT_SYMBOL(__cputime_jiffies_factor);
176 u64 __cputime_msec_factor; 176 u64 __cputime_msec_factor;
177 EXPORT_SYMBOL(__cputime_msec_factor); 177 EXPORT_SYMBOL(__cputime_msec_factor);
178 u64 __cputime_sec_factor; 178 u64 __cputime_sec_factor;
179 EXPORT_SYMBOL(__cputime_sec_factor); 179 EXPORT_SYMBOL(__cputime_sec_factor);
180 u64 __cputime_clockt_factor; 180 u64 __cputime_clockt_factor;
181 EXPORT_SYMBOL(__cputime_clockt_factor); 181 EXPORT_SYMBOL(__cputime_clockt_factor);
182 DEFINE_PER_CPU(unsigned long, cputime_last_delta); 182 DEFINE_PER_CPU(unsigned long, cputime_last_delta);
183 DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta); 183 DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta);
184 184
185 cputime_t cputime_one_jiffy; 185 cputime_t cputime_one_jiffy;
186 186
187 void (*dtl_consumer)(struct dtl_entry *, u64); 187 void (*dtl_consumer)(struct dtl_entry *, u64);
188 188
189 static void calc_cputime_factors(void) 189 static void calc_cputime_factors(void)
190 { 190 {
191 struct div_result res; 191 struct div_result res;
192 192
193 div128_by_32(HZ, 0, tb_ticks_per_sec, &res); 193 div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
194 __cputime_jiffies_factor = res.result_low; 194 __cputime_jiffies_factor = res.result_low;
195 div128_by_32(1000, 0, tb_ticks_per_sec, &res); 195 div128_by_32(1000, 0, tb_ticks_per_sec, &res);
196 __cputime_msec_factor = res.result_low; 196 __cputime_msec_factor = res.result_low;
197 div128_by_32(1, 0, tb_ticks_per_sec, &res); 197 div128_by_32(1, 0, tb_ticks_per_sec, &res);
198 __cputime_sec_factor = res.result_low; 198 __cputime_sec_factor = res.result_low;
199 div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res); 199 div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
200 __cputime_clockt_factor = res.result_low; 200 __cputime_clockt_factor = res.result_low;
201 } 201 }
202 202
203 /* 203 /*
204 * Read the SPURR on systems that have it, otherwise the PURR, 204 * Read the SPURR on systems that have it, otherwise the PURR,
205 * or if that doesn't exist return the timebase value passed in. 205 * or if that doesn't exist return the timebase value passed in.
206 */ 206 */
207 static u64 read_spurr(u64 tb) 207 static u64 read_spurr(u64 tb)
208 { 208 {
209 if (cpu_has_feature(CPU_FTR_SPURR)) 209 if (cpu_has_feature(CPU_FTR_SPURR))
210 return mfspr(SPRN_SPURR); 210 return mfspr(SPRN_SPURR);
211 if (cpu_has_feature(CPU_FTR_PURR)) 211 if (cpu_has_feature(CPU_FTR_PURR))
212 return mfspr(SPRN_PURR); 212 return mfspr(SPRN_PURR);
213 return tb; 213 return tb;
214 } 214 }
215 215
216 #ifdef CONFIG_PPC_SPLPAR 216 #ifdef CONFIG_PPC_SPLPAR
217 217
218 /* 218 /*
219 * Scan the dispatch trace log and count up the stolen time. 219 * Scan the dispatch trace log and count up the stolen time.
220 * Should be called with interrupts disabled. 220 * Should be called with interrupts disabled.
221 */ 221 */
222 static u64 scan_dispatch_log(u64 stop_tb) 222 static u64 scan_dispatch_log(u64 stop_tb)
223 { 223 {
224 u64 i = local_paca->dtl_ridx; 224 u64 i = local_paca->dtl_ridx;
225 struct dtl_entry *dtl = local_paca->dtl_curr; 225 struct dtl_entry *dtl = local_paca->dtl_curr;
226 struct dtl_entry *dtl_end = local_paca->dispatch_log_end; 226 struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
227 struct lppaca *vpa = local_paca->lppaca_ptr; 227 struct lppaca *vpa = local_paca->lppaca_ptr;
228 u64 tb_delta; 228 u64 tb_delta;
229 u64 stolen = 0; 229 u64 stolen = 0;
230 u64 dtb; 230 u64 dtb;
231 231
232 if (!dtl) 232 if (!dtl)
233 return 0; 233 return 0;
234 234
235 if (i == vpa->dtl_idx) 235 if (i == vpa->dtl_idx)
236 return 0; 236 return 0;
237 while (i < vpa->dtl_idx) { 237 while (i < vpa->dtl_idx) {
238 if (dtl_consumer) 238 if (dtl_consumer)
239 dtl_consumer(dtl, i); 239 dtl_consumer(dtl, i);
240 dtb = dtl->timebase; 240 dtb = dtl->timebase;
241 tb_delta = dtl->enqueue_to_dispatch_time + 241 tb_delta = dtl->enqueue_to_dispatch_time +
242 dtl->ready_to_enqueue_time; 242 dtl->ready_to_enqueue_time;
243 barrier(); 243 barrier();
244 if (i + N_DISPATCH_LOG < vpa->dtl_idx) { 244 if (i + N_DISPATCH_LOG < vpa->dtl_idx) {
245 /* buffer has overflowed */ 245 /* buffer has overflowed */
246 i = vpa->dtl_idx - N_DISPATCH_LOG; 246 i = vpa->dtl_idx - N_DISPATCH_LOG;
247 dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG); 247 dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
248 continue; 248 continue;
249 } 249 }
250 if (dtb > stop_tb) 250 if (dtb > stop_tb)
251 break; 251 break;
252 stolen += tb_delta; 252 stolen += tb_delta;
253 ++i; 253 ++i;
254 ++dtl; 254 ++dtl;
255 if (dtl == dtl_end) 255 if (dtl == dtl_end)
256 dtl = local_paca->dispatch_log; 256 dtl = local_paca->dispatch_log;
257 } 257 }
258 local_paca->dtl_ridx = i; 258 local_paca->dtl_ridx = i;
259 local_paca->dtl_curr = dtl; 259 local_paca->dtl_curr = dtl;
260 return stolen; 260 return stolen;
261 } 261 }
262 262
263 /* 263 /*
264 * Accumulate stolen time by scanning the dispatch trace log. 264 * Accumulate stolen time by scanning the dispatch trace log.
265 * Called on entry from user mode. 265 * Called on entry from user mode.
266 */ 266 */
267 void accumulate_stolen_time(void) 267 void accumulate_stolen_time(void)
268 { 268 {
269 u64 sst, ust; 269 u64 sst, ust;
270 270
271 u8 save_soft_enabled = local_paca->soft_enabled; 271 u8 save_soft_enabled = local_paca->soft_enabled;
272 u8 save_hard_enabled = local_paca->hard_enabled; 272 u8 save_hard_enabled = local_paca->hard_enabled;
273 273
274 /* We are called early in the exception entry, before 274 /* We are called early in the exception entry, before
275 * soft/hard_enabled are sync'ed to the expected state 275 * soft/hard_enabled are sync'ed to the expected state
276 * for the exception. We are hard disabled but the PACA 276 * for the exception. We are hard disabled but the PACA
277 * needs to reflect that so various debug stuff doesn't 277 * needs to reflect that so various debug stuff doesn't
278 * complain 278 * complain
279 */ 279 */
280 local_paca->soft_enabled = 0; 280 local_paca->soft_enabled = 0;
281 local_paca->hard_enabled = 0; 281 local_paca->hard_enabled = 0;
282 282
283 sst = scan_dispatch_log(local_paca->starttime_user); 283 sst = scan_dispatch_log(local_paca->starttime_user);
284 ust = scan_dispatch_log(local_paca->starttime); 284 ust = scan_dispatch_log(local_paca->starttime);
285 local_paca->system_time -= sst; 285 local_paca->system_time -= sst;
286 local_paca->user_time -= ust; 286 local_paca->user_time -= ust;
287 local_paca->stolen_time += ust + sst; 287 local_paca->stolen_time += ust + sst;
288 288
289 local_paca->soft_enabled = save_soft_enabled; 289 local_paca->soft_enabled = save_soft_enabled;
290 local_paca->hard_enabled = save_hard_enabled; 290 local_paca->hard_enabled = save_hard_enabled;
291 } 291 }
292 292
293 static inline u64 calculate_stolen_time(u64 stop_tb) 293 static inline u64 calculate_stolen_time(u64 stop_tb)
294 { 294 {
295 u64 stolen = 0; 295 u64 stolen = 0;
296 296
297 if (get_paca()->dtl_ridx != get_paca()->lppaca_ptr->dtl_idx) { 297 if (get_paca()->dtl_ridx != get_paca()->lppaca_ptr->dtl_idx) {
298 stolen = scan_dispatch_log(stop_tb); 298 stolen = scan_dispatch_log(stop_tb);
299 get_paca()->system_time -= stolen; 299 get_paca()->system_time -= stolen;
300 } 300 }
301 301
302 stolen += get_paca()->stolen_time; 302 stolen += get_paca()->stolen_time;
303 get_paca()->stolen_time = 0; 303 get_paca()->stolen_time = 0;
304 return stolen; 304 return stolen;
305 } 305 }
306 306
307 #else /* CONFIG_PPC_SPLPAR */ 307 #else /* CONFIG_PPC_SPLPAR */
308 static inline u64 calculate_stolen_time(u64 stop_tb) 308 static inline u64 calculate_stolen_time(u64 stop_tb)
309 { 309 {
310 return 0; 310 return 0;
311 } 311 }
312 312
313 #endif /* CONFIG_PPC_SPLPAR */ 313 #endif /* CONFIG_PPC_SPLPAR */
314 314
315 /* 315 /*
316 * Account time for a transition between system, hard irq 316 * Account time for a transition between system, hard irq
317 * or soft irq state. 317 * or soft irq state.
318 */ 318 */
319 void account_system_vtime(struct task_struct *tsk) 319 void account_system_vtime(struct task_struct *tsk)
320 { 320 {
321 u64 now, nowscaled, delta, deltascaled; 321 u64 now, nowscaled, delta, deltascaled;
322 unsigned long flags; 322 unsigned long flags;
323 u64 stolen, udelta, sys_scaled, user_scaled; 323 u64 stolen, udelta, sys_scaled, user_scaled;
324 324
325 local_irq_save(flags); 325 local_irq_save(flags);
326 now = mftb(); 326 now = mftb();
327 nowscaled = read_spurr(now); 327 nowscaled = read_spurr(now);
328 get_paca()->system_time += now - get_paca()->starttime; 328 get_paca()->system_time += now - get_paca()->starttime;
329 get_paca()->starttime = now; 329 get_paca()->starttime = now;
330 deltascaled = nowscaled - get_paca()->startspurr; 330 deltascaled = nowscaled - get_paca()->startspurr;
331 get_paca()->startspurr = nowscaled; 331 get_paca()->startspurr = nowscaled;
332 332
333 stolen = calculate_stolen_time(now); 333 stolen = calculate_stolen_time(now);
334 334
335 delta = get_paca()->system_time; 335 delta = get_paca()->system_time;
336 get_paca()->system_time = 0; 336 get_paca()->system_time = 0;
337 udelta = get_paca()->user_time - get_paca()->utime_sspurr; 337 udelta = get_paca()->user_time - get_paca()->utime_sspurr;
338 get_paca()->utime_sspurr = get_paca()->user_time; 338 get_paca()->utime_sspurr = get_paca()->user_time;
339 339
340 /* 340 /*
341 * Because we don't read the SPURR on every kernel entry/exit, 341 * Because we don't read the SPURR on every kernel entry/exit,
342 * deltascaled includes both user and system SPURR ticks. 342 * deltascaled includes both user and system SPURR ticks.
343 * Apportion these ticks to system SPURR ticks and user 343 * Apportion these ticks to system SPURR ticks and user
344 * SPURR ticks in the same ratio as the system time (delta) 344 * SPURR ticks in the same ratio as the system time (delta)
345 * and user time (udelta) values obtained from the timebase 345 * and user time (udelta) values obtained from the timebase
346 * over the same interval. The system ticks get accounted here; 346 * over the same interval. The system ticks get accounted here;
347 * the user ticks get saved up in paca->user_time_scaled to be 347 * the user ticks get saved up in paca->user_time_scaled to be
348 * used by account_process_tick. 348 * used by account_process_tick.
349 */ 349 */
350 sys_scaled = delta; 350 sys_scaled = delta;
351 user_scaled = udelta; 351 user_scaled = udelta;
352 if (deltascaled != delta + udelta) { 352 if (deltascaled != delta + udelta) {
353 if (udelta) { 353 if (udelta) {
354 sys_scaled = deltascaled * delta / (delta + udelta); 354 sys_scaled = deltascaled * delta / (delta + udelta);
355 user_scaled = deltascaled - sys_scaled; 355 user_scaled = deltascaled - sys_scaled;
356 } else { 356 } else {
357 sys_scaled = deltascaled; 357 sys_scaled = deltascaled;
358 } 358 }
359 } 359 }
360 get_paca()->user_time_scaled += user_scaled; 360 get_paca()->user_time_scaled += user_scaled;
361 361
362 if (in_interrupt() || idle_task(smp_processor_id()) != tsk) { 362 if (in_interrupt() || idle_task(smp_processor_id()) != tsk) {
363 account_system_time(tsk, 0, delta, sys_scaled); 363 account_system_time(tsk, 0, delta, sys_scaled);
364 if (stolen) 364 if (stolen)
365 account_steal_time(stolen); 365 account_steal_time(stolen);
366 } else { 366 } else {
367 account_idle_time(delta + stolen); 367 account_idle_time(delta + stolen);
368 } 368 }
369 local_irq_restore(flags); 369 local_irq_restore(flags);
370 } 370 }
371 EXPORT_SYMBOL_GPL(account_system_vtime); 371 EXPORT_SYMBOL_GPL(account_system_vtime);
372 372
373 /* 373 /*
374 * Transfer the user and system times accumulated in the paca 374 * Transfer the user and system times accumulated in the paca
375 * by the exception entry and exit code to the generic process 375 * by the exception entry and exit code to the generic process
376 * user and system time records. 376 * user and system time records.
377 * Must be called with interrupts disabled. 377 * Must be called with interrupts disabled.
378 * Assumes that account_system_vtime() has been called recently 378 * Assumes that account_system_vtime() has been called recently
379 * (i.e. since the last entry from usermode) so that 379 * (i.e. since the last entry from usermode) so that
380 * get_paca()->user_time_scaled is up to date. 380 * get_paca()->user_time_scaled is up to date.
381 */ 381 */
382 void account_process_tick(struct task_struct *tsk, int user_tick) 382 void account_process_tick(struct task_struct *tsk, int user_tick)
383 { 383 {
384 cputime_t utime, utimescaled; 384 cputime_t utime, utimescaled;
385 385
386 utime = get_paca()->user_time; 386 utime = get_paca()->user_time;
387 utimescaled = get_paca()->user_time_scaled; 387 utimescaled = get_paca()->user_time_scaled;
388 get_paca()->user_time = 0; 388 get_paca()->user_time = 0;
389 get_paca()->user_time_scaled = 0; 389 get_paca()->user_time_scaled = 0;
390 get_paca()->utime_sspurr = 0; 390 get_paca()->utime_sspurr = 0;
391 account_user_time(tsk, utime, utimescaled); 391 account_user_time(tsk, utime, utimescaled);
392 } 392 }
393 393
394 #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */ 394 #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
395 #define calc_cputime_factors() 395 #define calc_cputime_factors()
396 #endif 396 #endif
397 397
398 void __delay(unsigned long loops) 398 void __delay(unsigned long loops)
399 { 399 {
400 unsigned long start; 400 unsigned long start;
401 int diff; 401 int diff;
402 402
403 if (__USE_RTC()) { 403 if (__USE_RTC()) {
404 start = get_rtcl(); 404 start = get_rtcl();
405 do { 405 do {
406 /* the RTCL register wraps at 1000000000 */ 406 /* the RTCL register wraps at 1000000000 */
407 diff = get_rtcl() - start; 407 diff = get_rtcl() - start;
408 if (diff < 0) 408 if (diff < 0)
409 diff += 1000000000; 409 diff += 1000000000;
410 } while (diff < loops); 410 } while (diff < loops);
411 } else { 411 } else {
412 start = get_tbl(); 412 start = get_tbl();
413 while (get_tbl() - start < loops) 413 while (get_tbl() - start < loops)
414 HMT_low(); 414 HMT_low();
415 HMT_medium(); 415 HMT_medium();
416 } 416 }
417 } 417 }
418 EXPORT_SYMBOL(__delay); 418 EXPORT_SYMBOL(__delay);
419 419
420 void udelay(unsigned long usecs) 420 void udelay(unsigned long usecs)
421 { 421 {
422 __delay(tb_ticks_per_usec * usecs); 422 __delay(tb_ticks_per_usec * usecs);
423 } 423 }
424 EXPORT_SYMBOL(udelay); 424 EXPORT_SYMBOL(udelay);
425 425
426 #ifdef CONFIG_SMP 426 #ifdef CONFIG_SMP
427 unsigned long profile_pc(struct pt_regs *regs) 427 unsigned long profile_pc(struct pt_regs *regs)
428 { 428 {
429 unsigned long pc = instruction_pointer(regs); 429 unsigned long pc = instruction_pointer(regs);
430 430
431 if (in_lock_functions(pc)) 431 if (in_lock_functions(pc))
432 return regs->link; 432 return regs->link;
433 433
434 return pc; 434 return pc;
435 } 435 }
436 EXPORT_SYMBOL(profile_pc); 436 EXPORT_SYMBOL(profile_pc);
437 #endif 437 #endif
438 438
439 #ifdef CONFIG_PPC_ISERIES 439 #ifdef CONFIG_PPC_ISERIES
440 440
441 /* 441 /*
442 * This function recalibrates the timebase based on the 49-bit time-of-day 442 * This function recalibrates the timebase based on the 49-bit time-of-day
443 * value in the Titan chip. The Titan is much more accurate than the value 443 * value in the Titan chip. The Titan is much more accurate than the value
444 * returned by the service processor for the timebase frequency. 444 * returned by the service processor for the timebase frequency.
445 */ 445 */
446 446
447 static int __init iSeries_tb_recal(void) 447 static int __init iSeries_tb_recal(void)
448 { 448 {
449 unsigned long titan, tb; 449 unsigned long titan, tb;
450 450
451 /* Make sure we only run on iSeries */ 451 /* Make sure we only run on iSeries */
452 if (!firmware_has_feature(FW_FEATURE_ISERIES)) 452 if (!firmware_has_feature(FW_FEATURE_ISERIES))
453 return -ENODEV; 453 return -ENODEV;
454 454
455 tb = get_tb(); 455 tb = get_tb();
456 titan = HvCallXm_loadTod(); 456 titan = HvCallXm_loadTod();
457 if ( iSeries_recal_titan ) { 457 if ( iSeries_recal_titan ) {
458 unsigned long tb_ticks = tb - iSeries_recal_tb; 458 unsigned long tb_ticks = tb - iSeries_recal_tb;
459 unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12; 459 unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12;
460 unsigned long new_tb_ticks_per_sec = (tb_ticks * USEC_PER_SEC)/titan_usec; 460 unsigned long new_tb_ticks_per_sec = (tb_ticks * USEC_PER_SEC)/titan_usec;
461 unsigned long new_tb_ticks_per_jiffy = 461 unsigned long new_tb_ticks_per_jiffy =
462 DIV_ROUND_CLOSEST(new_tb_ticks_per_sec, HZ); 462 DIV_ROUND_CLOSEST(new_tb_ticks_per_sec, HZ);
463 long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy; 463 long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy;
464 char sign = '+'; 464 char sign = '+';
465 /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */ 465 /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */
466 new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ; 466 new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ;
467 467
468 if ( tick_diff < 0 ) { 468 if ( tick_diff < 0 ) {
469 tick_diff = -tick_diff; 469 tick_diff = -tick_diff;
470 sign = '-'; 470 sign = '-';
471 } 471 }
472 if ( tick_diff ) { 472 if ( tick_diff ) {
473 if ( tick_diff < tb_ticks_per_jiffy/25 ) { 473 if ( tick_diff < tb_ticks_per_jiffy/25 ) {
474 printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n", 474 printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n",
475 new_tb_ticks_per_jiffy, sign, tick_diff ); 475 new_tb_ticks_per_jiffy, sign, tick_diff );
476 tb_ticks_per_jiffy = new_tb_ticks_per_jiffy; 476 tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
477 tb_ticks_per_sec = new_tb_ticks_per_sec; 477 tb_ticks_per_sec = new_tb_ticks_per_sec;
478 calc_cputime_factors(); 478 calc_cputime_factors();
479 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; 479 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
480 setup_cputime_one_jiffy(); 480 setup_cputime_one_jiffy();
481 } 481 }
482 else { 482 else {
483 printk( "Titan recalibrate: FAILED (difference > 4 percent)\n" 483 printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
484 " new tb_ticks_per_jiffy = %lu\n" 484 " new tb_ticks_per_jiffy = %lu\n"
485 " old tb_ticks_per_jiffy = %lu\n", 485 " old tb_ticks_per_jiffy = %lu\n",
486 new_tb_ticks_per_jiffy, tb_ticks_per_jiffy ); 486 new_tb_ticks_per_jiffy, tb_ticks_per_jiffy );
487 } 487 }
488 } 488 }
489 } 489 }
490 iSeries_recal_titan = titan; 490 iSeries_recal_titan = titan;
491 iSeries_recal_tb = tb; 491 iSeries_recal_tb = tb;
492 492
493 /* Called here as now we know accurate values for the timebase */ 493 /* Called here as now we know accurate values for the timebase */
494 clocksource_init(); 494 clocksource_init();
495 return 0; 495 return 0;
496 } 496 }
497 late_initcall(iSeries_tb_recal); 497 late_initcall(iSeries_tb_recal);
498 498
499 /* Called from platform early init */ 499 /* Called from platform early init */
500 void __init iSeries_time_init_early(void) 500 void __init iSeries_time_init_early(void)
501 { 501 {
502 iSeries_recal_tb = get_tb(); 502 iSeries_recal_tb = get_tb();
503 iSeries_recal_titan = HvCallXm_loadTod(); 503 iSeries_recal_titan = HvCallXm_loadTod();
504 } 504 }
505 #endif /* CONFIG_PPC_ISERIES */ 505 #endif /* CONFIG_PPC_ISERIES */
506 506
507 #ifdef CONFIG_IRQ_WORK 507 #ifdef CONFIG_IRQ_WORK
508 508
509 /* 509 /*
510 * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable... 510 * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
511 */ 511 */
512 #ifdef CONFIG_PPC64 512 #ifdef CONFIG_PPC64
513 static inline unsigned long test_irq_work_pending(void) 513 static inline unsigned long test_irq_work_pending(void)
514 { 514 {
515 unsigned long x; 515 unsigned long x;
516 516
517 asm volatile("lbz %0,%1(13)" 517 asm volatile("lbz %0,%1(13)"
518 : "=r" (x) 518 : "=r" (x)
519 : "i" (offsetof(struct paca_struct, irq_work_pending))); 519 : "i" (offsetof(struct paca_struct, irq_work_pending)));
520 return x; 520 return x;
521 } 521 }
522 522
523 static inline void set_irq_work_pending_flag(void) 523 static inline void set_irq_work_pending_flag(void)
524 { 524 {
525 asm volatile("stb %0,%1(13)" : : 525 asm volatile("stb %0,%1(13)" : :
526 "r" (1), 526 "r" (1),
527 "i" (offsetof(struct paca_struct, irq_work_pending))); 527 "i" (offsetof(struct paca_struct, irq_work_pending)));
528 } 528 }
529 529
530 static inline void clear_irq_work_pending(void) 530 static inline void clear_irq_work_pending(void)
531 { 531 {
532 asm volatile("stb %0,%1(13)" : : 532 asm volatile("stb %0,%1(13)" : :
533 "r" (0), 533 "r" (0),
534 "i" (offsetof(struct paca_struct, irq_work_pending))); 534 "i" (offsetof(struct paca_struct, irq_work_pending)));
535 } 535 }
536 536
537 #else /* 32-bit */ 537 #else /* 32-bit */
538 538
539 DEFINE_PER_CPU(u8, irq_work_pending); 539 DEFINE_PER_CPU(u8, irq_work_pending);
540 540
541 #define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1 541 #define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1
542 #define test_irq_work_pending() __get_cpu_var(irq_work_pending) 542 #define test_irq_work_pending() __get_cpu_var(irq_work_pending)
543 #define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0 543 #define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0
544 544
545 #endif /* 32 vs 64 bit */ 545 #endif /* 32 vs 64 bit */
546 546
547 void arch_irq_work_raise(void) 547 void arch_irq_work_raise(void)
548 { 548 {
549 preempt_disable(); 549 preempt_disable();
550 set_irq_work_pending_flag(); 550 set_irq_work_pending_flag();
551 set_dec(1); 551 set_dec(1);
552 preempt_enable(); 552 preempt_enable();
553 } 553 }
554 554
555 #else /* CONFIG_IRQ_WORK */ 555 #else /* CONFIG_IRQ_WORK */
556 556
557 #define test_irq_work_pending() 0 557 #define test_irq_work_pending() 0
558 #define clear_irq_work_pending() 558 #define clear_irq_work_pending()
559 559
560 #endif /* CONFIG_IRQ_WORK */ 560 #endif /* CONFIG_IRQ_WORK */
561 561
562 /* 562 /*
563 * For iSeries shared processors, we have to let the hypervisor 563 * For iSeries shared processors, we have to let the hypervisor
564 * set the hardware decrementer. We set a virtual decrementer 564 * set the hardware decrementer. We set a virtual decrementer
565 * in the lppaca and call the hypervisor if the virtual 565 * in the lppaca and call the hypervisor if the virtual
566 * decrementer is less than the current value in the hardware 566 * decrementer is less than the current value in the hardware
567 * decrementer. (almost always the new decrementer value will 567 * decrementer. (almost always the new decrementer value will
568 * be greater than the current hardware decementer so the hypervisor 568 * be greater than the current hardware decementer so the hypervisor
569 * call will not be needed) 569 * call will not be needed)
570 */ 570 */
571 571
572 /* 572 /*
573 * timer_interrupt - gets called when the decrementer overflows, 573 * timer_interrupt - gets called when the decrementer overflows,
574 * with interrupts disabled. 574 * with interrupts disabled.
575 */ 575 */
576 void timer_interrupt(struct pt_regs * regs) 576 void timer_interrupt(struct pt_regs * regs)
577 { 577 {
578 struct pt_regs *old_regs; 578 struct pt_regs *old_regs;
579 struct decrementer_clock *decrementer = &__get_cpu_var(decrementers); 579 struct decrementer_clock *decrementer = &__get_cpu_var(decrementers);
580 struct clock_event_device *evt = &decrementer->event; 580 struct clock_event_device *evt = &decrementer->event;
581 u64 now; 581 u64 now;
582 582
583 /* Ensure a positive value is written to the decrementer, or else 583 /* Ensure a positive value is written to the decrementer, or else
584 * some CPUs will continue to take decrementer exceptions. 584 * some CPUs will continue to take decrementer exceptions.
585 */ 585 */
586 set_dec(DECREMENTER_MAX); 586 set_dec(DECREMENTER_MAX);
587 587
588 /* Some implementations of hotplug will get timer interrupts while 588 /* Some implementations of hotplug will get timer interrupts while
589 * offline, just ignore these 589 * offline, just ignore these
590 */ 590 */
591 if (!cpu_online(smp_processor_id())) 591 if (!cpu_online(smp_processor_id()))
592 return; 592 return;
593 593
594 trace_timer_interrupt_entry(regs); 594 trace_timer_interrupt_entry(regs);
595 595
596 __get_cpu_var(irq_stat).timer_irqs++; 596 __get_cpu_var(irq_stat).timer_irqs++;
597 597
598 #if defined(CONFIG_PPC32) && defined(CONFIG_PMAC) 598 #if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
599 if (atomic_read(&ppc_n_lost_interrupts) != 0) 599 if (atomic_read(&ppc_n_lost_interrupts) != 0)
600 do_IRQ(regs); 600 do_IRQ(regs);
601 #endif 601 #endif
602 602
603 old_regs = set_irq_regs(regs); 603 old_regs = set_irq_regs(regs);
604 irq_enter(); 604 irq_enter();
605 605
606 if (test_irq_work_pending()) { 606 if (test_irq_work_pending()) {
607 clear_irq_work_pending(); 607 clear_irq_work_pending();
608 irq_work_run(); 608 irq_work_run();
609 } 609 }
610 610
611 #ifdef CONFIG_PPC_ISERIES 611 #ifdef CONFIG_PPC_ISERIES
612 if (firmware_has_feature(FW_FEATURE_ISERIES)) 612 if (firmware_has_feature(FW_FEATURE_ISERIES))
613 get_lppaca()->int_dword.fields.decr_int = 0; 613 get_lppaca()->int_dword.fields.decr_int = 0;
614 #endif 614 #endif
615 615
616 now = get_tb_or_rtc(); 616 now = get_tb_or_rtc();
617 if (now >= decrementer->next_tb) { 617 if (now >= decrementer->next_tb) {
618 decrementer->next_tb = ~(u64)0; 618 decrementer->next_tb = ~(u64)0;
619 if (evt->event_handler) 619 if (evt->event_handler)
620 evt->event_handler(evt); 620 evt->event_handler(evt);
621 } else { 621 } else {
622 now = decrementer->next_tb - now; 622 now = decrementer->next_tb - now;
623 if (now <= DECREMENTER_MAX) 623 if (now <= DECREMENTER_MAX)
624 set_dec((int)now); 624 set_dec((int)now);
625 } 625 }
626 626
627 #ifdef CONFIG_PPC_ISERIES 627 #ifdef CONFIG_PPC_ISERIES
628 if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending()) 628 if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
629 process_hvlpevents(); 629 process_hvlpevents();
630 #endif 630 #endif
631 631
632 #ifdef CONFIG_PPC64 632 #ifdef CONFIG_PPC64
633 /* collect purr register values often, for accurate calculations */ 633 /* collect purr register values often, for accurate calculations */
634 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 634 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
635 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); 635 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
636 cu->current_tb = mfspr(SPRN_PURR); 636 cu->current_tb = mfspr(SPRN_PURR);
637 } 637 }
638 #endif 638 #endif
639 639
640 irq_exit(); 640 irq_exit();
641 set_irq_regs(old_regs); 641 set_irq_regs(old_regs);
642 642
643 trace_timer_interrupt_exit(regs); 643 trace_timer_interrupt_exit(regs);
644 } 644 }
645 645
646 #ifdef CONFIG_SUSPEND 646 #ifdef CONFIG_SUSPEND
647 static void generic_suspend_disable_irqs(void) 647 static void generic_suspend_disable_irqs(void)
648 { 648 {
649 /* Disable the decrementer, so that it doesn't interfere 649 /* Disable the decrementer, so that it doesn't interfere
650 * with suspending. 650 * with suspending.
651 */ 651 */
652 652
653 set_dec(0x7fffffff); 653 set_dec(0x7fffffff);
654 local_irq_disable(); 654 local_irq_disable();
655 set_dec(0x7fffffff); 655 set_dec(0x7fffffff);
656 } 656 }
657 657
658 static void generic_suspend_enable_irqs(void) 658 static void generic_suspend_enable_irqs(void)
659 { 659 {
660 local_irq_enable(); 660 local_irq_enable();
661 } 661 }
662 662
663 /* Overrides the weak version in kernel/power/main.c */ 663 /* Overrides the weak version in kernel/power/main.c */
664 void arch_suspend_disable_irqs(void) 664 void arch_suspend_disable_irqs(void)
665 { 665 {
666 if (ppc_md.suspend_disable_irqs) 666 if (ppc_md.suspend_disable_irqs)
667 ppc_md.suspend_disable_irqs(); 667 ppc_md.suspend_disable_irqs();
668 generic_suspend_disable_irqs(); 668 generic_suspend_disable_irqs();
669 } 669 }
670 670
671 /* Overrides the weak version in kernel/power/main.c */ 671 /* Overrides the weak version in kernel/power/main.c */
672 void arch_suspend_enable_irqs(void) 672 void arch_suspend_enable_irqs(void)
673 { 673 {
674 generic_suspend_enable_irqs(); 674 generic_suspend_enable_irqs();
675 if (ppc_md.suspend_enable_irqs) 675 if (ppc_md.suspend_enable_irqs)
676 ppc_md.suspend_enable_irqs(); 676 ppc_md.suspend_enable_irqs();
677 } 677 }
678 #endif 678 #endif
679 679
680 /* 680 /*
681 * Scheduler clock - returns current time in nanosec units. 681 * Scheduler clock - returns current time in nanosec units.
682 * 682 *
683 * Note: mulhdu(a, b) (multiply high double unsigned) returns 683 * Note: mulhdu(a, b) (multiply high double unsigned) returns
684 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b 684 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
685 * are 64-bit unsigned numbers. 685 * are 64-bit unsigned numbers.
686 */ 686 */
687 unsigned long long sched_clock(void) 687 unsigned long long sched_clock(void)
688 { 688 {
689 if (__USE_RTC()) 689 if (__USE_RTC())
690 return get_rtc(); 690 return get_rtc();
691 return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift; 691 return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
692 } 692 }
693 693
694 static int __init get_freq(char *name, int cells, unsigned long *val) 694 static int __init get_freq(char *name, int cells, unsigned long *val)
695 { 695 {
696 struct device_node *cpu; 696 struct device_node *cpu;
697 const unsigned int *fp; 697 const unsigned int *fp;
698 int found = 0; 698 int found = 0;
699 699
700 /* The cpu node should have timebase and clock frequency properties */ 700 /* The cpu node should have timebase and clock frequency properties */
701 cpu = of_find_node_by_type(NULL, "cpu"); 701 cpu = of_find_node_by_type(NULL, "cpu");
702 702
703 if (cpu) { 703 if (cpu) {
704 fp = of_get_property(cpu, name, NULL); 704 fp = of_get_property(cpu, name, NULL);
705 if (fp) { 705 if (fp) {
706 found = 1; 706 found = 1;
707 *val = of_read_ulong(fp, cells); 707 *val = of_read_ulong(fp, cells);
708 } 708 }
709 709
710 of_node_put(cpu); 710 of_node_put(cpu);
711 } 711 }
712 712
713 return found; 713 return found;
714 } 714 }
715 715
716 /* should become __cpuinit when secondary_cpu_time_init also is */ 716 /* should become __cpuinit when secondary_cpu_time_init also is */
717 void start_cpu_decrementer(void) 717 void start_cpu_decrementer(void)
718 { 718 {
719 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) 719 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
720 /* Clear any pending timer interrupts */ 720 /* Clear any pending timer interrupts */
721 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS); 721 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
722 722
723 /* Enable decrementer interrupt */ 723 /* Enable decrementer interrupt */
724 mtspr(SPRN_TCR, TCR_DIE); 724 mtspr(SPRN_TCR, TCR_DIE);
725 #endif /* defined(CONFIG_BOOKE) || defined(CONFIG_40x) */ 725 #endif /* defined(CONFIG_BOOKE) || defined(CONFIG_40x) */
726 } 726 }
727 727
728 void __init generic_calibrate_decr(void) 728 void __init generic_calibrate_decr(void)
729 { 729 {
730 ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */ 730 ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
731 731
732 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) && 732 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
733 !get_freq("timebase-frequency", 1, &ppc_tb_freq)) { 733 !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
734 734
735 printk(KERN_ERR "WARNING: Estimating decrementer frequency " 735 printk(KERN_ERR "WARNING: Estimating decrementer frequency "
736 "(not found)\n"); 736 "(not found)\n");
737 } 737 }
738 738
739 ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */ 739 ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */
740 740
741 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) && 741 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
742 !get_freq("clock-frequency", 1, &ppc_proc_freq)) { 742 !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
743 743
744 printk(KERN_ERR "WARNING: Estimating processor frequency " 744 printk(KERN_ERR "WARNING: Estimating processor frequency "
745 "(not found)\n"); 745 "(not found)\n");
746 } 746 }
747 } 747 }
748 748
749 int update_persistent_clock(struct timespec now) 749 int update_persistent_clock(struct timespec now)
750 { 750 {
751 struct rtc_time tm; 751 struct rtc_time tm;
752 752
753 if (!ppc_md.set_rtc_time) 753 if (!ppc_md.set_rtc_time)
754 return 0; 754 return 0;
755 755
756 to_tm(now.tv_sec + 1 + timezone_offset, &tm); 756 to_tm(now.tv_sec + 1 + timezone_offset, &tm);
757 tm.tm_year -= 1900; 757 tm.tm_year -= 1900;
758 tm.tm_mon -= 1; 758 tm.tm_mon -= 1;
759 759
760 return ppc_md.set_rtc_time(&tm); 760 return ppc_md.set_rtc_time(&tm);
761 } 761 }
762 762
763 static void __read_persistent_clock(struct timespec *ts) 763 static void __read_persistent_clock(struct timespec *ts)
764 { 764 {
765 struct rtc_time tm; 765 struct rtc_time tm;
766 static int first = 1; 766 static int first = 1;
767 767
768 ts->tv_nsec = 0; 768 ts->tv_nsec = 0;
769 /* XXX this is a litle fragile but will work okay in the short term */ 769 /* XXX this is a litle fragile but will work okay in the short term */
770 if (first) { 770 if (first) {
771 first = 0; 771 first = 0;
772 if (ppc_md.time_init) 772 if (ppc_md.time_init)
773 timezone_offset = ppc_md.time_init(); 773 timezone_offset = ppc_md.time_init();
774 774
775 /* get_boot_time() isn't guaranteed to be safe to call late */ 775 /* get_boot_time() isn't guaranteed to be safe to call late */
776 if (ppc_md.get_boot_time) { 776 if (ppc_md.get_boot_time) {
777 ts->tv_sec = ppc_md.get_boot_time() - timezone_offset; 777 ts->tv_sec = ppc_md.get_boot_time() - timezone_offset;
778 return; 778 return;
779 } 779 }
780 } 780 }
781 if (!ppc_md.get_rtc_time) { 781 if (!ppc_md.get_rtc_time) {
782 ts->tv_sec = 0; 782 ts->tv_sec = 0;
783 return; 783 return;
784 } 784 }
785 ppc_md.get_rtc_time(&tm); 785 ppc_md.get_rtc_time(&tm);
786 786
787 ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, 787 ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
788 tm.tm_hour, tm.tm_min, tm.tm_sec); 788 tm.tm_hour, tm.tm_min, tm.tm_sec);
789 } 789 }
790 790
791 void read_persistent_clock(struct timespec *ts) 791 void read_persistent_clock(struct timespec *ts)
792 { 792 {
793 __read_persistent_clock(ts); 793 __read_persistent_clock(ts);
794 794
795 /* Sanitize it in case real time clock is set below EPOCH */ 795 /* Sanitize it in case real time clock is set below EPOCH */
796 if (ts->tv_sec < 0) { 796 if (ts->tv_sec < 0) {
797 ts->tv_sec = 0; 797 ts->tv_sec = 0;
798 ts->tv_nsec = 0; 798 ts->tv_nsec = 0;
799 } 799 }
800 800
801 } 801 }
802 802
803 /* clocksource code */ 803 /* clocksource code */
804 static cycle_t rtc_read(struct clocksource *cs) 804 static cycle_t rtc_read(struct clocksource *cs)
805 { 805 {
806 return (cycle_t)get_rtc(); 806 return (cycle_t)get_rtc();
807 } 807 }
808 808
809 static cycle_t timebase_read(struct clocksource *cs) 809 static cycle_t timebase_read(struct clocksource *cs)
810 { 810 {
811 return (cycle_t)get_tb(); 811 return (cycle_t)get_tb();
812 } 812 }
813 813
814 void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, 814 void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
815 struct clocksource *clock, u32 mult) 815 struct clocksource *clock, u32 mult)
816 { 816 {
817 u64 new_tb_to_xs, new_stamp_xsec; 817 u64 new_tb_to_xs, new_stamp_xsec;
818 u32 frac_sec; 818 u32 frac_sec;
819 819
820 if (clock != &clocksource_timebase) 820 if (clock != &clocksource_timebase)
821 return; 821 return;
822 822
823 /* Make userspace gettimeofday spin until we're done. */ 823 /* Make userspace gettimeofday spin until we're done. */
824 ++vdso_data->tb_update_count; 824 ++vdso_data->tb_update_count;
825 smp_mb(); 825 smp_mb();
826 826
827 /* XXX this assumes clock->shift == 22 */ 827 /* XXX this assumes clock->shift == 22 */
828 /* 4611686018 ~= 2^(20+64-22) / 1e9 */ 828 /* 4611686018 ~= 2^(20+64-22) / 1e9 */
829 new_tb_to_xs = (u64) mult * 4611686018ULL; 829 new_tb_to_xs = (u64) mult * 4611686018ULL;
830 new_stamp_xsec = (u64) wall_time->tv_nsec * XSEC_PER_SEC; 830 new_stamp_xsec = (u64) wall_time->tv_nsec * XSEC_PER_SEC;
831 do_div(new_stamp_xsec, 1000000000); 831 do_div(new_stamp_xsec, 1000000000);
832 new_stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC; 832 new_stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC;
833 833
834 BUG_ON(wall_time->tv_nsec >= NSEC_PER_SEC); 834 BUG_ON(wall_time->tv_nsec >= NSEC_PER_SEC);
835 /* this is tv_nsec / 1e9 as a 0.32 fraction */ 835 /* this is tv_nsec / 1e9 as a 0.32 fraction */
836 frac_sec = ((u64) wall_time->tv_nsec * 18446744073ULL) >> 32; 836 frac_sec = ((u64) wall_time->tv_nsec * 18446744073ULL) >> 32;
837 837
838 /* 838 /*
839 * tb_update_count is used to allow the userspace gettimeofday code 839 * tb_update_count is used to allow the userspace gettimeofday code
840 * to assure itself that it sees a consistent view of the tb_to_xs and 840 * to assure itself that it sees a consistent view of the tb_to_xs and
841 * stamp_xsec variables. It reads the tb_update_count, then reads 841 * stamp_xsec variables. It reads the tb_update_count, then reads
842 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If 842 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
843 * the two values of tb_update_count match and are even then the 843 * the two values of tb_update_count match and are even then the
844 * tb_to_xs and stamp_xsec values are consistent. If not, then it 844 * tb_to_xs and stamp_xsec values are consistent. If not, then it
845 * loops back and reads them again until this criteria is met. 845 * loops back and reads them again until this criteria is met.
846 * We expect the caller to have done the first increment of 846 * We expect the caller to have done the first increment of
847 * vdso_data->tb_update_count already. 847 * vdso_data->tb_update_count already.
848 */ 848 */
849 vdso_data->tb_orig_stamp = clock->cycle_last; 849 vdso_data->tb_orig_stamp = clock->cycle_last;
850 vdso_data->stamp_xsec = new_stamp_xsec; 850 vdso_data->stamp_xsec = new_stamp_xsec;
851 vdso_data->tb_to_xs = new_tb_to_xs; 851 vdso_data->tb_to_xs = new_tb_to_xs;
852 vdso_data->wtom_clock_sec = wtm->tv_sec; 852 vdso_data->wtom_clock_sec = wtm->tv_sec;
853 vdso_data->wtom_clock_nsec = wtm->tv_nsec; 853 vdso_data->wtom_clock_nsec = wtm->tv_nsec;
854 vdso_data->stamp_xtime = *wall_time; 854 vdso_data->stamp_xtime = *wall_time;
855 vdso_data->stamp_sec_fraction = frac_sec; 855 vdso_data->stamp_sec_fraction = frac_sec;
856 smp_wmb(); 856 smp_wmb();
857 ++(vdso_data->tb_update_count); 857 ++(vdso_data->tb_update_count);
858 } 858 }
859 859
860 void update_vsyscall_tz(void) 860 void update_vsyscall_tz(void)
861 { 861 {
862 /* Make userspace gettimeofday spin until we're done. */ 862 /* Make userspace gettimeofday spin until we're done. */
863 ++vdso_data->tb_update_count; 863 ++vdso_data->tb_update_count;
864 smp_mb(); 864 smp_mb();
865 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; 865 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
866 vdso_data->tz_dsttime = sys_tz.tz_dsttime; 866 vdso_data->tz_dsttime = sys_tz.tz_dsttime;
867 smp_mb(); 867 smp_mb();
868 ++vdso_data->tb_update_count; 868 ++vdso_data->tb_update_count;
869 } 869 }
870 870
871 static void __init clocksource_init(void) 871 static void __init clocksource_init(void)
872 { 872 {
873 struct clocksource *clock; 873 struct clocksource *clock;
874 874
875 if (__USE_RTC()) 875 if (__USE_RTC())
876 clock = &clocksource_rtc; 876 clock = &clocksource_rtc;
877 else 877 else
878 clock = &clocksource_timebase; 878 clock = &clocksource_timebase;
879 879
880 clock->mult = clocksource_hz2mult(tb_ticks_per_sec, clock->shift); 880 clock->mult = clocksource_hz2mult(tb_ticks_per_sec, clock->shift);
881 881
882 if (clocksource_register(clock)) { 882 if (clocksource_register(clock)) {
883 printk(KERN_ERR "clocksource: %s is already registered\n", 883 printk(KERN_ERR "clocksource: %s is already registered\n",
884 clock->name); 884 clock->name);
885 return; 885 return;
886 } 886 }
887 887
888 printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n", 888 printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
889 clock->name, clock->mult, clock->shift); 889 clock->name, clock->mult, clock->shift);
890 } 890 }
891 891
892 static int decrementer_set_next_event(unsigned long evt, 892 static int decrementer_set_next_event(unsigned long evt,
893 struct clock_event_device *dev) 893 struct clock_event_device *dev)
894 { 894 {
895 __get_cpu_var(decrementers).next_tb = get_tb_or_rtc() + evt; 895 __get_cpu_var(decrementers).next_tb = get_tb_or_rtc() + evt;
896 set_dec(evt); 896 set_dec(evt);
897 return 0; 897 return 0;
898 } 898 }
899 899
900 static void decrementer_set_mode(enum clock_event_mode mode, 900 static void decrementer_set_mode(enum clock_event_mode mode,
901 struct clock_event_device *dev) 901 struct clock_event_device *dev)
902 { 902 {
903 if (mode != CLOCK_EVT_MODE_ONESHOT) 903 if (mode != CLOCK_EVT_MODE_ONESHOT)
904 decrementer_set_next_event(DECREMENTER_MAX, dev); 904 decrementer_set_next_event(DECREMENTER_MAX, dev);
905 } 905 }
906 906
907 static inline uint64_t div_sc64(unsigned long ticks, unsigned long nsec, 907 static inline uint64_t div_sc64(unsigned long ticks, unsigned long nsec,
908 int shift) 908 int shift)
909 { 909 {
910 uint64_t tmp = ((uint64_t)ticks) << shift; 910 uint64_t tmp = ((uint64_t)ticks) << shift;
911 911
912 do_div(tmp, nsec); 912 do_div(tmp, nsec);
913 return tmp; 913 return tmp;
914 } 914 }
915 915
916 static void __init setup_clockevent_multiplier(unsigned long hz) 916 static void __init setup_clockevent_multiplier(unsigned long hz)
917 { 917 {
918 u64 mult, shift = 32; 918 u64 mult, shift = 32;
919 919
920 while (1) { 920 while (1) {
921 mult = div_sc64(hz, NSEC_PER_SEC, shift); 921 mult = div_sc64(hz, NSEC_PER_SEC, shift);
922 if (mult && (mult >> 32UL) == 0UL) 922 if (mult && (mult >> 32UL) == 0UL)
923 break; 923 break;
924 924
925 shift--; 925 shift--;
926 } 926 }
927 927
928 decrementer_clockevent.shift = shift; 928 decrementer_clockevent.shift = shift;
929 decrementer_clockevent.mult = mult; 929 decrementer_clockevent.mult = mult;
930 } 930 }
931 931
932 static void register_decrementer_clockevent(int cpu) 932 static void register_decrementer_clockevent(int cpu)
933 { 933 {
934 struct clock_event_device *dec = &per_cpu(decrementers, cpu).event; 934 struct clock_event_device *dec = &per_cpu(decrementers, cpu).event;
935 935
936 *dec = decrementer_clockevent; 936 *dec = decrementer_clockevent;
937 dec->cpumask = cpumask_of(cpu); 937 dec->cpumask = cpumask_of(cpu);
938 938
939 printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n", 939 printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
940 dec->name, dec->mult, dec->shift, cpu); 940 dec->name, dec->mult, dec->shift, cpu);
941 941
942 clockevents_register_device(dec); 942 clockevents_register_device(dec);
943 } 943 }
944 944
945 static void __init init_decrementer_clockevent(void) 945 static void __init init_decrementer_clockevent(void)
946 { 946 {
947 int cpu = smp_processor_id(); 947 int cpu = smp_processor_id();
948 948
949 setup_clockevent_multiplier(ppc_tb_freq); 949 setup_clockevent_multiplier(ppc_tb_freq);
950 decrementer_clockevent.max_delta_ns = 950 decrementer_clockevent.max_delta_ns =
951 clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent); 951 clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent);
952 decrementer_clockevent.min_delta_ns = 952 decrementer_clockevent.min_delta_ns =
953 clockevent_delta2ns(2, &decrementer_clockevent); 953 clockevent_delta2ns(2, &decrementer_clockevent);
954 954
955 register_decrementer_clockevent(cpu); 955 register_decrementer_clockevent(cpu);
956 } 956 }
957 957
958 void secondary_cpu_time_init(void) 958 void secondary_cpu_time_init(void)
959 { 959 {
960 /* Start the decrementer on CPUs that have manual control 960 /* Start the decrementer on CPUs that have manual control
961 * such as BookE 961 * such as BookE
962 */ 962 */
963 start_cpu_decrementer(); 963 start_cpu_decrementer();
964 964
965 /* FIME: Should make unrelatred change to move snapshot_timebase 965 /* FIME: Should make unrelatred change to move snapshot_timebase
966 * call here ! */ 966 * call here ! */
967 register_decrementer_clockevent(smp_processor_id()); 967 register_decrementer_clockevent(smp_processor_id());
968 } 968 }
969 969
970 /* This function is only called on the boot processor */ 970 /* This function is only called on the boot processor */
971 void __init time_init(void) 971 void __init time_init(void)
972 { 972 {
973 struct div_result res; 973 struct div_result res;
974 u64 scale; 974 u64 scale;
975 unsigned shift; 975 unsigned shift;
976 976
977 if (__USE_RTC()) { 977 if (__USE_RTC()) {
978 /* 601 processor: dec counts down by 128 every 128ns */ 978 /* 601 processor: dec counts down by 128 every 128ns */
979 ppc_tb_freq = 1000000000; 979 ppc_tb_freq = 1000000000;
980 } else { 980 } else {
981 /* Normal PowerPC with timebase register */ 981 /* Normal PowerPC with timebase register */
982 ppc_md.calibrate_decr(); 982 ppc_md.calibrate_decr();
983 printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n", 983 printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
984 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000); 984 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
985 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n", 985 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
986 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); 986 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
987 } 987 }
988 988
989 tb_ticks_per_jiffy = ppc_tb_freq / HZ; 989 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
990 tb_ticks_per_sec = ppc_tb_freq; 990 tb_ticks_per_sec = ppc_tb_freq;
991 tb_ticks_per_usec = ppc_tb_freq / 1000000; 991 tb_ticks_per_usec = ppc_tb_freq / 1000000;
992 calc_cputime_factors(); 992 calc_cputime_factors();
993 setup_cputime_one_jiffy(); 993 setup_cputime_one_jiffy();
994 994
995 /* 995 /*
996 * Compute scale factor for sched_clock. 996 * Compute scale factor for sched_clock.
997 * The calibrate_decr() function has set tb_ticks_per_sec, 997 * The calibrate_decr() function has set tb_ticks_per_sec,
998 * which is the timebase frequency. 998 * which is the timebase frequency.
999 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret 999 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
1000 * the 128-bit result as a 64.64 fixed-point number. 1000 * the 128-bit result as a 64.64 fixed-point number.
1001 * We then shift that number right until it is less than 1.0, 1001 * We then shift that number right until it is less than 1.0,
1002 * giving us the scale factor and shift count to use in 1002 * giving us the scale factor and shift count to use in
1003 * sched_clock(). 1003 * sched_clock().
1004 */ 1004 */
1005 div128_by_32(1000000000, 0, tb_ticks_per_sec, &res); 1005 div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
1006 scale = res.result_low; 1006 scale = res.result_low;
1007 for (shift = 0; res.result_high != 0; ++shift) { 1007 for (shift = 0; res.result_high != 0; ++shift) {
1008 scale = (scale >> 1) | (res.result_high << 63); 1008 scale = (scale >> 1) | (res.result_high << 63);
1009 res.result_high >>= 1; 1009 res.result_high >>= 1;
1010 } 1010 }
1011 tb_to_ns_scale = scale; 1011 tb_to_ns_scale = scale;
1012 tb_to_ns_shift = shift; 1012 tb_to_ns_shift = shift;
1013 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */ 1013 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
1014 boot_tb = get_tb_or_rtc(); 1014 boot_tb = get_tb_or_rtc();
1015 1015
1016 /* If platform provided a timezone (pmac), we correct the time */ 1016 /* If platform provided a timezone (pmac), we correct the time */
1017 if (timezone_offset) { 1017 if (timezone_offset) {
1018 sys_tz.tz_minuteswest = -timezone_offset / 60; 1018 sys_tz.tz_minuteswest = -timezone_offset / 60;
1019 sys_tz.tz_dsttime = 0; 1019 sys_tz.tz_dsttime = 0;
1020 } 1020 }
1021 1021
1022 vdso_data->tb_update_count = 0; 1022 vdso_data->tb_update_count = 0;
1023 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; 1023 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
1024 1024
1025 /* Start the decrementer on CPUs that have manual control 1025 /* Start the decrementer on CPUs that have manual control
1026 * such as BookE 1026 * such as BookE
1027 */ 1027 */
1028 start_cpu_decrementer(); 1028 start_cpu_decrementer();
1029 1029
1030 /* Register the clocksource, if we're not running on iSeries */ 1030 /* Register the clocksource, if we're not running on iSeries */
1031 if (!firmware_has_feature(FW_FEATURE_ISERIES)) 1031 if (!firmware_has_feature(FW_FEATURE_ISERIES))
1032 clocksource_init(); 1032 clocksource_init();
1033 1033
1034 init_decrementer_clockevent(); 1034 init_decrementer_clockevent();
1035 } 1035 }
1036 1036
1037 1037
1038 #define FEBRUARY 2 1038 #define FEBRUARY 2
1039 #define STARTOFTIME 1970 1039 #define STARTOFTIME 1970
1040 #define SECDAY 86400L 1040 #define SECDAY 86400L
1041 #define SECYR (SECDAY * 365) 1041 #define SECYR (SECDAY * 365)
1042 #define leapyear(year) ((year) % 4 == 0 && \ 1042 #define leapyear(year) ((year) % 4 == 0 && \
1043 ((year) % 100 != 0 || (year) % 400 == 0)) 1043 ((year) % 100 != 0 || (year) % 400 == 0))
1044 #define days_in_year(a) (leapyear(a) ? 366 : 365) 1044 #define days_in_year(a) (leapyear(a) ? 366 : 365)
1045 #define days_in_month(a) (month_days[(a) - 1]) 1045 #define days_in_month(a) (month_days[(a) - 1])
1046 1046
1047 static int month_days[12] = { 1047 static int month_days[12] = {
1048 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 1048 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
1049 }; 1049 };
1050 1050
1051 /* 1051 /*
1052 * This only works for the Gregorian calendar - i.e. after 1752 (in the UK) 1052 * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
1053 */ 1053 */
1054 void GregorianDay(struct rtc_time * tm) 1054 void GregorianDay(struct rtc_time * tm)
1055 { 1055 {
1056 int leapsToDate; 1056 int leapsToDate;
1057 int lastYear; 1057 int lastYear;
1058 int day; 1058 int day;
1059 int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 }; 1059 int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
1060 1060
1061 lastYear = tm->tm_year - 1; 1061 lastYear = tm->tm_year - 1;
1062 1062
1063 /* 1063 /*
1064 * Number of leap corrections to apply up to end of last year 1064 * Number of leap corrections to apply up to end of last year
1065 */ 1065 */
1066 leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400; 1066 leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
1067 1067
1068 /* 1068 /*
1069 * This year is a leap year if it is divisible by 4 except when it is 1069 * This year is a leap year if it is divisible by 4 except when it is
1070 * divisible by 100 unless it is divisible by 400 1070 * divisible by 100 unless it is divisible by 400
1071 * 1071 *
1072 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was 1072 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
1073 */ 1073 */
1074 day = tm->tm_mon > 2 && leapyear(tm->tm_year); 1074 day = tm->tm_mon > 2 && leapyear(tm->tm_year);
1075 1075
1076 day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] + 1076 day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
1077 tm->tm_mday; 1077 tm->tm_mday;
1078 1078
1079 tm->tm_wday = day % 7; 1079 tm->tm_wday = day % 7;
1080 } 1080 }
1081 1081
1082 void to_tm(int tim, struct rtc_time * tm) 1082 void to_tm(int tim, struct rtc_time * tm)
1083 { 1083 {
1084 register int i; 1084 register int i;
1085 register long hms, day; 1085 register long hms, day;
1086 1086
1087 day = tim / SECDAY; 1087 day = tim / SECDAY;
1088 hms = tim % SECDAY; 1088 hms = tim % SECDAY;
1089 1089
1090 /* Hours, minutes, seconds are easy */ 1090 /* Hours, minutes, seconds are easy */
1091 tm->tm_hour = hms / 3600; 1091 tm->tm_hour = hms / 3600;
1092 tm->tm_min = (hms % 3600) / 60; 1092 tm->tm_min = (hms % 3600) / 60;
1093 tm->tm_sec = (hms % 3600) % 60; 1093 tm->tm_sec = (hms % 3600) % 60;
1094 1094
1095 /* Number of years in days */ 1095 /* Number of years in days */
1096 for (i = STARTOFTIME; day >= days_in_year(i); i++) 1096 for (i = STARTOFTIME; day >= days_in_year(i); i++)
1097 day -= days_in_year(i); 1097 day -= days_in_year(i);
1098 tm->tm_year = i; 1098 tm->tm_year = i;
1099 1099
1100 /* Number of months in days left */ 1100 /* Number of months in days left */
1101 if (leapyear(tm->tm_year)) 1101 if (leapyear(tm->tm_year))
1102 days_in_month(FEBRUARY) = 29; 1102 days_in_month(FEBRUARY) = 29;
1103 for (i = 1; day >= days_in_month(i); i++) 1103 for (i = 1; day >= days_in_month(i); i++)
1104 day -= days_in_month(i); 1104 day -= days_in_month(i);
1105 days_in_month(FEBRUARY) = 28; 1105 days_in_month(FEBRUARY) = 28;
1106 tm->tm_mon = i; 1106 tm->tm_mon = i;
1107 1107
1108 /* Days are what is left over (+1) from all that. */ 1108 /* Days are what is left over (+1) from all that. */
1109 tm->tm_mday = day + 1; 1109 tm->tm_mday = day + 1;
1110 1110
1111 /* 1111 /*
1112 * Determine the day of week 1112 * Determine the day of week
1113 */ 1113 */
1114 GregorianDay(tm); 1114 GregorianDay(tm);
1115 } 1115 }
1116 1116
1117 /* 1117 /*
1118 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit 1118 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1119 * result. 1119 * result.
1120 */ 1120 */
1121 void div128_by_32(u64 dividend_high, u64 dividend_low, 1121 void div128_by_32(u64 dividend_high, u64 dividend_low,
1122 unsigned divisor, struct div_result *dr) 1122 unsigned divisor, struct div_result *dr)
1123 { 1123 {
1124 unsigned long a, b, c, d; 1124 unsigned long a, b, c, d;
1125 unsigned long w, x, y, z; 1125 unsigned long w, x, y, z;
1126 u64 ra, rb, rc; 1126 u64 ra, rb, rc;
1127 1127
1128 a = dividend_high >> 32; 1128 a = dividend_high >> 32;
1129 b = dividend_high & 0xffffffff; 1129 b = dividend_high & 0xffffffff;
1130 c = dividend_low >> 32; 1130 c = dividend_low >> 32;
1131 d = dividend_low & 0xffffffff; 1131 d = dividend_low & 0xffffffff;
1132 1132
1133 w = a / divisor; 1133 w = a / divisor;
1134 ra = ((u64)(a - (w * divisor)) << 32) + b; 1134 ra = ((u64)(a - (w * divisor)) << 32) + b;
1135 1135
1136 rb = ((u64) do_div(ra, divisor) << 32) + c; 1136 rb = ((u64) do_div(ra, divisor) << 32) + c;
1137 x = ra; 1137 x = ra;
1138 1138
1139 rc = ((u64) do_div(rb, divisor) << 32) + d; 1139 rc = ((u64) do_div(rb, divisor) << 32) + d;
1140 y = rb; 1140 y = rb;
1141 1141
1142 do_div(rc, divisor); 1142 do_div(rc, divisor);
1143 z = rc; 1143 z = rc;
1144 1144
1145 dr->result_high = ((u64)w << 32) + x; 1145 dr->result_high = ((u64)w << 32) + x;
1146 dr->result_low = ((u64)y << 32) + z; 1146 dr->result_low = ((u64)y << 32) + z;
1147 1147
1148 } 1148 }
1149 1149
1150 /* We don't need to calibrate delay, we use the CPU timebase for that */ 1150 /* We don't need to calibrate delay, we use the CPU timebase for that */
1151 void calibrate_delay(void) 1151 void calibrate_delay(void)
1152 { 1152 {
1153 /* Some generic code (such as spinlock debug) use loops_per_jiffy 1153 /* Some generic code (such as spinlock debug) use loops_per_jiffy
1154 * as the number of __delay(1) in a jiffy, so make it so 1154 * as the number of __delay(1) in a jiffy, so make it so
1155 */ 1155 */
1156 loops_per_jiffy = tb_ticks_per_jiffy; 1156 loops_per_jiffy = tb_ticks_per_jiffy;
1157 } 1157 }
1158 1158
1159 static int __init rtc_init(void) 1159 static int __init rtc_init(void)
1160 { 1160 {
1161 struct platform_device *pdev; 1161 struct platform_device *pdev;
1162 1162
1163 if (!ppc_md.get_rtc_time) 1163 if (!ppc_md.get_rtc_time)
1164 return -ENODEV; 1164 return -ENODEV;
1165 1165
1166 pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0); 1166 pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
1167 if (IS_ERR(pdev)) 1167 if (IS_ERR(pdev))
1168 return PTR_ERR(pdev); 1168 return PTR_ERR(pdev);
1169 1169
1170 return 0; 1170 return 0;
1171 } 1171 }
1172 1172
1173 module_init(rtc_init); 1173 module_init(rtc_init);
1174 1174
arch/powerpc/kernel/vio.c
1 /* 1 /*
2 * IBM PowerPC Virtual I/O Infrastructure Support. 2 * IBM PowerPC Virtual I/O Infrastructure Support.
3 * 3 *
4 * Copyright (c) 2003,2008 IBM Corp. 4 * Copyright (c) 2003,2008 IBM Corp.
5 * Dave Engebretsen engebret@us.ibm.com 5 * Dave Engebretsen engebret@us.ibm.com
6 * Santiago Leon santil@us.ibm.com 6 * Santiago Leon santil@us.ibm.com
7 * Hollis Blanchard <hollisb@us.ibm.com> 7 * Hollis Blanchard <hollisb@us.ibm.com>
8 * Stephen Rothwell 8 * Stephen Rothwell
9 * Robert Jennings <rcjenn@us.ibm.com> 9 * Robert Jennings <rcjenn@us.ibm.com>
10 * 10 *
11 * This program is free software; you can redistribute it and/or 11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License 12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version. 14 * 2 of the License, or (at your option) any later version.
15 */ 15 */
16 16
17 #include <linux/types.h> 17 #include <linux/types.h>
18 #include <linux/stat.h> 18 #include <linux/stat.h>
19 #include <linux/device.h> 19 #include <linux/device.h>
20 #include <linux/init.h> 20 #include <linux/init.h>
21 #include <linux/slab.h> 21 #include <linux/slab.h>
22 #include <linux/console.h> 22 #include <linux/console.h>
23 #include <linux/module.h> 23 #include <linux/export.h>
24 #include <linux/mm.h> 24 #include <linux/mm.h>
25 #include <linux/dma-mapping.h> 25 #include <linux/dma-mapping.h>
26 #include <linux/kobject.h> 26 #include <linux/kobject.h>
27 27
28 #include <asm/iommu.h> 28 #include <asm/iommu.h>
29 #include <asm/dma.h> 29 #include <asm/dma.h>
30 #include <asm/vio.h> 30 #include <asm/vio.h>
31 #include <asm/prom.h> 31 #include <asm/prom.h>
32 #include <asm/firmware.h> 32 #include <asm/firmware.h>
33 #include <asm/tce.h> 33 #include <asm/tce.h>
34 #include <asm/abs_addr.h> 34 #include <asm/abs_addr.h>
35 #include <asm/page.h> 35 #include <asm/page.h>
36 #include <asm/hvcall.h> 36 #include <asm/hvcall.h>
37 #include <asm/iseries/vio.h> 37 #include <asm/iseries/vio.h>
38 #include <asm/iseries/hv_types.h> 38 #include <asm/iseries/hv_types.h>
39 #include <asm/iseries/hv_lp_config.h> 39 #include <asm/iseries/hv_lp_config.h>
40 #include <asm/iseries/hv_call_xm.h> 40 #include <asm/iseries/hv_call_xm.h>
41 #include <asm/iseries/iommu.h> 41 #include <asm/iseries/iommu.h>
42 42
43 static struct bus_type vio_bus_type; 43 static struct bus_type vio_bus_type;
44 44
45 static struct vio_dev vio_bus_device = { /* fake "parent" device */ 45 static struct vio_dev vio_bus_device = { /* fake "parent" device */
46 .name = "vio", 46 .name = "vio",
47 .type = "", 47 .type = "",
48 .dev.init_name = "vio", 48 .dev.init_name = "vio",
49 .dev.bus = &vio_bus_type, 49 .dev.bus = &vio_bus_type,
50 }; 50 };
51 51
52 #ifdef CONFIG_PPC_SMLPAR 52 #ifdef CONFIG_PPC_SMLPAR
53 /** 53 /**
54 * vio_cmo_pool - A pool of IO memory for CMO use 54 * vio_cmo_pool - A pool of IO memory for CMO use
55 * 55 *
56 * @size: The size of the pool in bytes 56 * @size: The size of the pool in bytes
57 * @free: The amount of free memory in the pool 57 * @free: The amount of free memory in the pool
58 */ 58 */
59 struct vio_cmo_pool { 59 struct vio_cmo_pool {
60 size_t size; 60 size_t size;
61 size_t free; 61 size_t free;
62 }; 62 };
63 63
64 /* How many ms to delay queued balance work */ 64 /* How many ms to delay queued balance work */
65 #define VIO_CMO_BALANCE_DELAY 100 65 #define VIO_CMO_BALANCE_DELAY 100
66 66
67 /* Portion out IO memory to CMO devices by this chunk size */ 67 /* Portion out IO memory to CMO devices by this chunk size */
68 #define VIO_CMO_BALANCE_CHUNK 131072 68 #define VIO_CMO_BALANCE_CHUNK 131072
69 69
70 /** 70 /**
71 * vio_cmo_dev_entry - A device that is CMO-enabled and requires entitlement 71 * vio_cmo_dev_entry - A device that is CMO-enabled and requires entitlement
72 * 72 *
73 * @vio_dev: struct vio_dev pointer 73 * @vio_dev: struct vio_dev pointer
74 * @list: pointer to other devices on bus that are being tracked 74 * @list: pointer to other devices on bus that are being tracked
75 */ 75 */
76 struct vio_cmo_dev_entry { 76 struct vio_cmo_dev_entry {
77 struct vio_dev *viodev; 77 struct vio_dev *viodev;
78 struct list_head list; 78 struct list_head list;
79 }; 79 };
80 80
81 /** 81 /**
82 * vio_cmo - VIO bus accounting structure for CMO entitlement 82 * vio_cmo - VIO bus accounting structure for CMO entitlement
83 * 83 *
84 * @lock: spinlock for entire structure 84 * @lock: spinlock for entire structure
85 * @balance_q: work queue for balancing system entitlement 85 * @balance_q: work queue for balancing system entitlement
86 * @device_list: list of CMO-enabled devices requiring entitlement 86 * @device_list: list of CMO-enabled devices requiring entitlement
87 * @entitled: total system entitlement in bytes 87 * @entitled: total system entitlement in bytes
88 * @reserve: pool of memory from which devices reserve entitlement, incl. spare 88 * @reserve: pool of memory from which devices reserve entitlement, incl. spare
89 * @excess: pool of excess entitlement not needed for device reserves or spare 89 * @excess: pool of excess entitlement not needed for device reserves or spare
90 * @spare: IO memory for device hotplug functionality 90 * @spare: IO memory for device hotplug functionality
91 * @min: minimum necessary for system operation 91 * @min: minimum necessary for system operation
92 * @desired: desired memory for system operation 92 * @desired: desired memory for system operation
93 * @curr: bytes currently allocated 93 * @curr: bytes currently allocated
94 * @high: high water mark for IO data usage 94 * @high: high water mark for IO data usage
95 */ 95 */
96 struct vio_cmo { 96 struct vio_cmo {
97 spinlock_t lock; 97 spinlock_t lock;
98 struct delayed_work balance_q; 98 struct delayed_work balance_q;
99 struct list_head device_list; 99 struct list_head device_list;
100 size_t entitled; 100 size_t entitled;
101 struct vio_cmo_pool reserve; 101 struct vio_cmo_pool reserve;
102 struct vio_cmo_pool excess; 102 struct vio_cmo_pool excess;
103 size_t spare; 103 size_t spare;
104 size_t min; 104 size_t min;
105 size_t desired; 105 size_t desired;
106 size_t curr; 106 size_t curr;
107 size_t high; 107 size_t high;
108 } vio_cmo; 108 } vio_cmo;
109 109
110 /** 110 /**
111 * vio_cmo_OF_devices - Count the number of OF devices that have DMA windows 111 * vio_cmo_OF_devices - Count the number of OF devices that have DMA windows
112 */ 112 */
113 static int vio_cmo_num_OF_devs(void) 113 static int vio_cmo_num_OF_devs(void)
114 { 114 {
115 struct device_node *node_vroot; 115 struct device_node *node_vroot;
116 int count = 0; 116 int count = 0;
117 117
118 /* 118 /*
119 * Count the number of vdevice entries with an 119 * Count the number of vdevice entries with an
120 * ibm,my-dma-window OF property 120 * ibm,my-dma-window OF property
121 */ 121 */
122 node_vroot = of_find_node_by_name(NULL, "vdevice"); 122 node_vroot = of_find_node_by_name(NULL, "vdevice");
123 if (node_vroot) { 123 if (node_vroot) {
124 struct device_node *of_node; 124 struct device_node *of_node;
125 struct property *prop; 125 struct property *prop;
126 126
127 for_each_child_of_node(node_vroot, of_node) { 127 for_each_child_of_node(node_vroot, of_node) {
128 prop = of_find_property(of_node, "ibm,my-dma-window", 128 prop = of_find_property(of_node, "ibm,my-dma-window",
129 NULL); 129 NULL);
130 if (prop) 130 if (prop)
131 count++; 131 count++;
132 } 132 }
133 } 133 }
134 of_node_put(node_vroot); 134 of_node_put(node_vroot);
135 return count; 135 return count;
136 } 136 }
137 137
138 /** 138 /**
139 * vio_cmo_alloc - allocate IO memory for CMO-enable devices 139 * vio_cmo_alloc - allocate IO memory for CMO-enable devices
140 * 140 *
141 * @viodev: VIO device requesting IO memory 141 * @viodev: VIO device requesting IO memory
142 * @size: size of allocation requested 142 * @size: size of allocation requested
143 * 143 *
144 * Allocations come from memory reserved for the devices and any excess 144 * Allocations come from memory reserved for the devices and any excess
145 * IO memory available to all devices. The spare pool used to service 145 * IO memory available to all devices. The spare pool used to service
146 * hotplug must be equal to %VIO_CMO_MIN_ENT for the excess pool to be 146 * hotplug must be equal to %VIO_CMO_MIN_ENT for the excess pool to be
147 * made available. 147 * made available.
148 * 148 *
149 * Return codes: 149 * Return codes:
150 * 0 for successful allocation and -ENOMEM for a failure 150 * 0 for successful allocation and -ENOMEM for a failure
151 */ 151 */
152 static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size) 152 static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size)
153 { 153 {
154 unsigned long flags; 154 unsigned long flags;
155 size_t reserve_free = 0; 155 size_t reserve_free = 0;
156 size_t excess_free = 0; 156 size_t excess_free = 0;
157 int ret = -ENOMEM; 157 int ret = -ENOMEM;
158 158
159 spin_lock_irqsave(&vio_cmo.lock, flags); 159 spin_lock_irqsave(&vio_cmo.lock, flags);
160 160
161 /* Determine the amount of free entitlement available in reserve */ 161 /* Determine the amount of free entitlement available in reserve */
162 if (viodev->cmo.entitled > viodev->cmo.allocated) 162 if (viodev->cmo.entitled > viodev->cmo.allocated)
163 reserve_free = viodev->cmo.entitled - viodev->cmo.allocated; 163 reserve_free = viodev->cmo.entitled - viodev->cmo.allocated;
164 164
165 /* If spare is not fulfilled, the excess pool can not be used. */ 165 /* If spare is not fulfilled, the excess pool can not be used. */
166 if (vio_cmo.spare >= VIO_CMO_MIN_ENT) 166 if (vio_cmo.spare >= VIO_CMO_MIN_ENT)
167 excess_free = vio_cmo.excess.free; 167 excess_free = vio_cmo.excess.free;
168 168
169 /* The request can be satisfied */ 169 /* The request can be satisfied */
170 if ((reserve_free + excess_free) >= size) { 170 if ((reserve_free + excess_free) >= size) {
171 vio_cmo.curr += size; 171 vio_cmo.curr += size;
172 if (vio_cmo.curr > vio_cmo.high) 172 if (vio_cmo.curr > vio_cmo.high)
173 vio_cmo.high = vio_cmo.curr; 173 vio_cmo.high = vio_cmo.curr;
174 viodev->cmo.allocated += size; 174 viodev->cmo.allocated += size;
175 size -= min(reserve_free, size); 175 size -= min(reserve_free, size);
176 vio_cmo.excess.free -= size; 176 vio_cmo.excess.free -= size;
177 ret = 0; 177 ret = 0;
178 } 178 }
179 179
180 spin_unlock_irqrestore(&vio_cmo.lock, flags); 180 spin_unlock_irqrestore(&vio_cmo.lock, flags);
181 return ret; 181 return ret;
182 } 182 }
183 183
184 /** 184 /**
185 * vio_cmo_dealloc - deallocate IO memory from CMO-enable devices 185 * vio_cmo_dealloc - deallocate IO memory from CMO-enable devices
186 * @viodev: VIO device freeing IO memory 186 * @viodev: VIO device freeing IO memory
187 * @size: size of deallocation 187 * @size: size of deallocation
188 * 188 *
189 * IO memory is freed by the device back to the correct memory pools. 189 * IO memory is freed by the device back to the correct memory pools.
190 * The spare pool is replenished first from either memory pool, then 190 * The spare pool is replenished first from either memory pool, then
191 * the reserve pool is used to reduce device entitlement, the excess 191 * the reserve pool is used to reduce device entitlement, the excess
192 * pool is used to increase the reserve pool toward the desired entitlement 192 * pool is used to increase the reserve pool toward the desired entitlement
193 * target, and then the remaining memory is returned to the pools. 193 * target, and then the remaining memory is returned to the pools.
194 * 194 *
195 */ 195 */
196 static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size) 196 static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size)
197 { 197 {
198 unsigned long flags; 198 unsigned long flags;
199 size_t spare_needed = 0; 199 size_t spare_needed = 0;
200 size_t excess_freed = 0; 200 size_t excess_freed = 0;
201 size_t reserve_freed = size; 201 size_t reserve_freed = size;
202 size_t tmp; 202 size_t tmp;
203 int balance = 0; 203 int balance = 0;
204 204
205 spin_lock_irqsave(&vio_cmo.lock, flags); 205 spin_lock_irqsave(&vio_cmo.lock, flags);
206 vio_cmo.curr -= size; 206 vio_cmo.curr -= size;
207 207
208 /* Amount of memory freed from the excess pool */ 208 /* Amount of memory freed from the excess pool */
209 if (viodev->cmo.allocated > viodev->cmo.entitled) { 209 if (viodev->cmo.allocated > viodev->cmo.entitled) {
210 excess_freed = min(reserve_freed, (viodev->cmo.allocated - 210 excess_freed = min(reserve_freed, (viodev->cmo.allocated -
211 viodev->cmo.entitled)); 211 viodev->cmo.entitled));
212 reserve_freed -= excess_freed; 212 reserve_freed -= excess_freed;
213 } 213 }
214 214
215 /* Remove allocation from device */ 215 /* Remove allocation from device */
216 viodev->cmo.allocated -= (reserve_freed + excess_freed); 216 viodev->cmo.allocated -= (reserve_freed + excess_freed);
217 217
218 /* Spare is a subset of the reserve pool, replenish it first. */ 218 /* Spare is a subset of the reserve pool, replenish it first. */
219 spare_needed = VIO_CMO_MIN_ENT - vio_cmo.spare; 219 spare_needed = VIO_CMO_MIN_ENT - vio_cmo.spare;
220 220
221 /* 221 /*
222 * Replenish the spare in the reserve pool from the excess pool. 222 * Replenish the spare in the reserve pool from the excess pool.
223 * This moves entitlement into the reserve pool. 223 * This moves entitlement into the reserve pool.
224 */ 224 */
225 if (spare_needed && excess_freed) { 225 if (spare_needed && excess_freed) {
226 tmp = min(excess_freed, spare_needed); 226 tmp = min(excess_freed, spare_needed);
227 vio_cmo.excess.size -= tmp; 227 vio_cmo.excess.size -= tmp;
228 vio_cmo.reserve.size += tmp; 228 vio_cmo.reserve.size += tmp;
229 vio_cmo.spare += tmp; 229 vio_cmo.spare += tmp;
230 excess_freed -= tmp; 230 excess_freed -= tmp;
231 spare_needed -= tmp; 231 spare_needed -= tmp;
232 balance = 1; 232 balance = 1;
233 } 233 }
234 234
235 /* 235 /*
236 * Replenish the spare in the reserve pool from the reserve pool. 236 * Replenish the spare in the reserve pool from the reserve pool.
237 * This removes entitlement from the device down to VIO_CMO_MIN_ENT, 237 * This removes entitlement from the device down to VIO_CMO_MIN_ENT,
238 * if needed, and gives it to the spare pool. The amount of used 238 * if needed, and gives it to the spare pool. The amount of used
239 * memory in this pool does not change. 239 * memory in this pool does not change.
240 */ 240 */
241 if (spare_needed && reserve_freed) { 241 if (spare_needed && reserve_freed) {
242 tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT)); 242 tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT));
243 243
244 vio_cmo.spare += tmp; 244 vio_cmo.spare += tmp;
245 viodev->cmo.entitled -= tmp; 245 viodev->cmo.entitled -= tmp;
246 reserve_freed -= tmp; 246 reserve_freed -= tmp;
247 spare_needed -= tmp; 247 spare_needed -= tmp;
248 balance = 1; 248 balance = 1;
249 } 249 }
250 250
251 /* 251 /*
252 * Increase the reserve pool until the desired allocation is met. 252 * Increase the reserve pool until the desired allocation is met.
253 * Move an allocation freed from the excess pool into the reserve 253 * Move an allocation freed from the excess pool into the reserve
254 * pool and schedule a balance operation. 254 * pool and schedule a balance operation.
255 */ 255 */
256 if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) { 256 if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) {
257 tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size)); 257 tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size));
258 258
259 vio_cmo.excess.size -= tmp; 259 vio_cmo.excess.size -= tmp;
260 vio_cmo.reserve.size += tmp; 260 vio_cmo.reserve.size += tmp;
261 excess_freed -= tmp; 261 excess_freed -= tmp;
262 balance = 1; 262 balance = 1;
263 } 263 }
264 264
265 /* Return memory from the excess pool to that pool */ 265 /* Return memory from the excess pool to that pool */
266 if (excess_freed) 266 if (excess_freed)
267 vio_cmo.excess.free += excess_freed; 267 vio_cmo.excess.free += excess_freed;
268 268
269 if (balance) 269 if (balance)
270 schedule_delayed_work(&vio_cmo.balance_q, VIO_CMO_BALANCE_DELAY); 270 schedule_delayed_work(&vio_cmo.balance_q, VIO_CMO_BALANCE_DELAY);
271 spin_unlock_irqrestore(&vio_cmo.lock, flags); 271 spin_unlock_irqrestore(&vio_cmo.lock, flags);
272 } 272 }
273 273
274 /** 274 /**
275 * vio_cmo_entitlement_update - Manage system entitlement changes 275 * vio_cmo_entitlement_update - Manage system entitlement changes
276 * 276 *
277 * @new_entitlement: new system entitlement to attempt to accommodate 277 * @new_entitlement: new system entitlement to attempt to accommodate
278 * 278 *
279 * Increases in entitlement will be used to fulfill the spare entitlement 279 * Increases in entitlement will be used to fulfill the spare entitlement
280 * and the rest is given to the excess pool. Decreases, if they are 280 * and the rest is given to the excess pool. Decreases, if they are
281 * possible, come from the excess pool and from unused device entitlement 281 * possible, come from the excess pool and from unused device entitlement
282 * 282 *
283 * Returns: 0 on success, -ENOMEM when change can not be made 283 * Returns: 0 on success, -ENOMEM when change can not be made
284 */ 284 */
285 int vio_cmo_entitlement_update(size_t new_entitlement) 285 int vio_cmo_entitlement_update(size_t new_entitlement)
286 { 286 {
287 struct vio_dev *viodev; 287 struct vio_dev *viodev;
288 struct vio_cmo_dev_entry *dev_ent; 288 struct vio_cmo_dev_entry *dev_ent;
289 unsigned long flags; 289 unsigned long flags;
290 size_t avail, delta, tmp; 290 size_t avail, delta, tmp;
291 291
292 spin_lock_irqsave(&vio_cmo.lock, flags); 292 spin_lock_irqsave(&vio_cmo.lock, flags);
293 293
294 /* Entitlement increases */ 294 /* Entitlement increases */
295 if (new_entitlement > vio_cmo.entitled) { 295 if (new_entitlement > vio_cmo.entitled) {
296 delta = new_entitlement - vio_cmo.entitled; 296 delta = new_entitlement - vio_cmo.entitled;
297 297
298 /* Fulfill spare allocation */ 298 /* Fulfill spare allocation */
299 if (vio_cmo.spare < VIO_CMO_MIN_ENT) { 299 if (vio_cmo.spare < VIO_CMO_MIN_ENT) {
300 tmp = min(delta, (VIO_CMO_MIN_ENT - vio_cmo.spare)); 300 tmp = min(delta, (VIO_CMO_MIN_ENT - vio_cmo.spare));
301 vio_cmo.spare += tmp; 301 vio_cmo.spare += tmp;
302 vio_cmo.reserve.size += tmp; 302 vio_cmo.reserve.size += tmp;
303 delta -= tmp; 303 delta -= tmp;
304 } 304 }
305 305
306 /* Remaining new allocation goes to the excess pool */ 306 /* Remaining new allocation goes to the excess pool */
307 vio_cmo.entitled += delta; 307 vio_cmo.entitled += delta;
308 vio_cmo.excess.size += delta; 308 vio_cmo.excess.size += delta;
309 vio_cmo.excess.free += delta; 309 vio_cmo.excess.free += delta;
310 310
311 goto out; 311 goto out;
312 } 312 }
313 313
314 /* Entitlement decreases */ 314 /* Entitlement decreases */
315 delta = vio_cmo.entitled - new_entitlement; 315 delta = vio_cmo.entitled - new_entitlement;
316 avail = vio_cmo.excess.free; 316 avail = vio_cmo.excess.free;
317 317
318 /* 318 /*
319 * Need to check how much unused entitlement each device can 319 * Need to check how much unused entitlement each device can
320 * sacrifice to fulfill entitlement change. 320 * sacrifice to fulfill entitlement change.
321 */ 321 */
322 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { 322 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
323 if (avail >= delta) 323 if (avail >= delta)
324 break; 324 break;
325 325
326 viodev = dev_ent->viodev; 326 viodev = dev_ent->viodev;
327 if ((viodev->cmo.entitled > viodev->cmo.allocated) && 327 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
328 (viodev->cmo.entitled > VIO_CMO_MIN_ENT)) 328 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
329 avail += viodev->cmo.entitled - 329 avail += viodev->cmo.entitled -
330 max_t(size_t, viodev->cmo.allocated, 330 max_t(size_t, viodev->cmo.allocated,
331 VIO_CMO_MIN_ENT); 331 VIO_CMO_MIN_ENT);
332 } 332 }
333 333
334 if (delta <= avail) { 334 if (delta <= avail) {
335 vio_cmo.entitled -= delta; 335 vio_cmo.entitled -= delta;
336 336
337 /* Take entitlement from the excess pool first */ 337 /* Take entitlement from the excess pool first */
338 tmp = min(vio_cmo.excess.free, delta); 338 tmp = min(vio_cmo.excess.free, delta);
339 vio_cmo.excess.size -= tmp; 339 vio_cmo.excess.size -= tmp;
340 vio_cmo.excess.free -= tmp; 340 vio_cmo.excess.free -= tmp;
341 delta -= tmp; 341 delta -= tmp;
342 342
343 /* 343 /*
344 * Remove all but VIO_CMO_MIN_ENT bytes from devices 344 * Remove all but VIO_CMO_MIN_ENT bytes from devices
345 * until entitlement change is served 345 * until entitlement change is served
346 */ 346 */
347 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { 347 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
348 if (!delta) 348 if (!delta)
349 break; 349 break;
350 350
351 viodev = dev_ent->viodev; 351 viodev = dev_ent->viodev;
352 tmp = 0; 352 tmp = 0;
353 if ((viodev->cmo.entitled > viodev->cmo.allocated) && 353 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
354 (viodev->cmo.entitled > VIO_CMO_MIN_ENT)) 354 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
355 tmp = viodev->cmo.entitled - 355 tmp = viodev->cmo.entitled -
356 max_t(size_t, viodev->cmo.allocated, 356 max_t(size_t, viodev->cmo.allocated,
357 VIO_CMO_MIN_ENT); 357 VIO_CMO_MIN_ENT);
358 viodev->cmo.entitled -= min(tmp, delta); 358 viodev->cmo.entitled -= min(tmp, delta);
359 delta -= min(tmp, delta); 359 delta -= min(tmp, delta);
360 } 360 }
361 } else { 361 } else {
362 spin_unlock_irqrestore(&vio_cmo.lock, flags); 362 spin_unlock_irqrestore(&vio_cmo.lock, flags);
363 return -ENOMEM; 363 return -ENOMEM;
364 } 364 }
365 365
366 out: 366 out:
367 schedule_delayed_work(&vio_cmo.balance_q, 0); 367 schedule_delayed_work(&vio_cmo.balance_q, 0);
368 spin_unlock_irqrestore(&vio_cmo.lock, flags); 368 spin_unlock_irqrestore(&vio_cmo.lock, flags);
369 return 0; 369 return 0;
370 } 370 }
371 371
372 /** 372 /**
373 * vio_cmo_balance - Balance entitlement among devices 373 * vio_cmo_balance - Balance entitlement among devices
374 * 374 *
375 * @work: work queue structure for this operation 375 * @work: work queue structure for this operation
376 * 376 *
377 * Any system entitlement above the minimum needed for devices, or 377 * Any system entitlement above the minimum needed for devices, or
378 * already allocated to devices, can be distributed to the devices. 378 * already allocated to devices, can be distributed to the devices.
379 * The list of devices is iterated through to recalculate the desired 379 * The list of devices is iterated through to recalculate the desired
380 * entitlement level and to determine how much entitlement above the 380 * entitlement level and to determine how much entitlement above the
381 * minimum entitlement is allocated to devices. 381 * minimum entitlement is allocated to devices.
382 * 382 *
383 * Small chunks of the available entitlement are given to devices until 383 * Small chunks of the available entitlement are given to devices until
384 * their requirements are fulfilled or there is no entitlement left to give. 384 * their requirements are fulfilled or there is no entitlement left to give.
385 * Upon completion sizes of the reserve and excess pools are calculated. 385 * Upon completion sizes of the reserve and excess pools are calculated.
386 * 386 *
387 * The system minimum entitlement level is also recalculated here. 387 * The system minimum entitlement level is also recalculated here.
388 * Entitlement will be reserved for devices even after vio_bus_remove to 388 * Entitlement will be reserved for devices even after vio_bus_remove to
389 * accommodate reloading the driver. The OF tree is walked to count the 389 * accommodate reloading the driver. The OF tree is walked to count the
390 * number of devices present and this will remove entitlement for devices 390 * number of devices present and this will remove entitlement for devices
391 * that have actually left the system after having vio_bus_remove called. 391 * that have actually left the system after having vio_bus_remove called.
392 */ 392 */
393 static void vio_cmo_balance(struct work_struct *work) 393 static void vio_cmo_balance(struct work_struct *work)
394 { 394 {
395 struct vio_cmo *cmo; 395 struct vio_cmo *cmo;
396 struct vio_dev *viodev; 396 struct vio_dev *viodev;
397 struct vio_cmo_dev_entry *dev_ent; 397 struct vio_cmo_dev_entry *dev_ent;
398 unsigned long flags; 398 unsigned long flags;
399 size_t avail = 0, level, chunk, need; 399 size_t avail = 0, level, chunk, need;
400 int devcount = 0, fulfilled; 400 int devcount = 0, fulfilled;
401 401
402 cmo = container_of(work, struct vio_cmo, balance_q.work); 402 cmo = container_of(work, struct vio_cmo, balance_q.work);
403 403
404 spin_lock_irqsave(&vio_cmo.lock, flags); 404 spin_lock_irqsave(&vio_cmo.lock, flags);
405 405
406 /* Calculate minimum entitlement and fulfill spare */ 406 /* Calculate minimum entitlement and fulfill spare */
407 cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT; 407 cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT;
408 BUG_ON(cmo->min > cmo->entitled); 408 BUG_ON(cmo->min > cmo->entitled);
409 cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min)); 409 cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min));
410 cmo->min += cmo->spare; 410 cmo->min += cmo->spare;
411 cmo->desired = cmo->min; 411 cmo->desired = cmo->min;
412 412
413 /* 413 /*
414 * Determine how much entitlement is available and reset device 414 * Determine how much entitlement is available and reset device
415 * entitlements 415 * entitlements
416 */ 416 */
417 avail = cmo->entitled - cmo->spare; 417 avail = cmo->entitled - cmo->spare;
418 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { 418 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
419 viodev = dev_ent->viodev; 419 viodev = dev_ent->viodev;
420 devcount++; 420 devcount++;
421 viodev->cmo.entitled = VIO_CMO_MIN_ENT; 421 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
422 cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT); 422 cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT);
423 avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT); 423 avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT);
424 } 424 }
425 425
426 /* 426 /*
427 * Having provided each device with the minimum entitlement, loop 427 * Having provided each device with the minimum entitlement, loop
428 * over the devices portioning out the remaining entitlement 428 * over the devices portioning out the remaining entitlement
429 * until there is nothing left. 429 * until there is nothing left.
430 */ 430 */
431 level = VIO_CMO_MIN_ENT; 431 level = VIO_CMO_MIN_ENT;
432 while (avail) { 432 while (avail) {
433 fulfilled = 0; 433 fulfilled = 0;
434 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { 434 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
435 viodev = dev_ent->viodev; 435 viodev = dev_ent->viodev;
436 436
437 if (viodev->cmo.desired <= level) { 437 if (viodev->cmo.desired <= level) {
438 fulfilled++; 438 fulfilled++;
439 continue; 439 continue;
440 } 440 }
441 441
442 /* 442 /*
443 * Give the device up to VIO_CMO_BALANCE_CHUNK 443 * Give the device up to VIO_CMO_BALANCE_CHUNK
444 * bytes of entitlement, but do not exceed the 444 * bytes of entitlement, but do not exceed the
445 * desired level of entitlement for the device. 445 * desired level of entitlement for the device.
446 */ 446 */
447 chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK); 447 chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK);
448 chunk = min(chunk, (viodev->cmo.desired - 448 chunk = min(chunk, (viodev->cmo.desired -
449 viodev->cmo.entitled)); 449 viodev->cmo.entitled));
450 viodev->cmo.entitled += chunk; 450 viodev->cmo.entitled += chunk;
451 451
452 /* 452 /*
453 * If the memory for this entitlement increase was 453 * If the memory for this entitlement increase was
454 * already allocated to the device it does not come 454 * already allocated to the device it does not come
455 * from the available pool being portioned out. 455 * from the available pool being portioned out.
456 */ 456 */
457 need = max(viodev->cmo.allocated, viodev->cmo.entitled)- 457 need = max(viodev->cmo.allocated, viodev->cmo.entitled)-
458 max(viodev->cmo.allocated, level); 458 max(viodev->cmo.allocated, level);
459 avail -= need; 459 avail -= need;
460 460
461 } 461 }
462 if (fulfilled == devcount) 462 if (fulfilled == devcount)
463 break; 463 break;
464 level += VIO_CMO_BALANCE_CHUNK; 464 level += VIO_CMO_BALANCE_CHUNK;
465 } 465 }
466 466
467 /* Calculate new reserve and excess pool sizes */ 467 /* Calculate new reserve and excess pool sizes */
468 cmo->reserve.size = cmo->min; 468 cmo->reserve.size = cmo->min;
469 cmo->excess.free = 0; 469 cmo->excess.free = 0;
470 cmo->excess.size = 0; 470 cmo->excess.size = 0;
471 need = 0; 471 need = 0;
472 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { 472 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
473 viodev = dev_ent->viodev; 473 viodev = dev_ent->viodev;
474 /* Calculated reserve size above the minimum entitlement */ 474 /* Calculated reserve size above the minimum entitlement */
475 if (viodev->cmo.entitled) 475 if (viodev->cmo.entitled)
476 cmo->reserve.size += (viodev->cmo.entitled - 476 cmo->reserve.size += (viodev->cmo.entitled -
477 VIO_CMO_MIN_ENT); 477 VIO_CMO_MIN_ENT);
478 /* Calculated used excess entitlement */ 478 /* Calculated used excess entitlement */
479 if (viodev->cmo.allocated > viodev->cmo.entitled) 479 if (viodev->cmo.allocated > viodev->cmo.entitled)
480 need += viodev->cmo.allocated - viodev->cmo.entitled; 480 need += viodev->cmo.allocated - viodev->cmo.entitled;
481 } 481 }
482 cmo->excess.size = cmo->entitled - cmo->reserve.size; 482 cmo->excess.size = cmo->entitled - cmo->reserve.size;
483 cmo->excess.free = cmo->excess.size - need; 483 cmo->excess.free = cmo->excess.size - need;
484 484
485 cancel_delayed_work(to_delayed_work(work)); 485 cancel_delayed_work(to_delayed_work(work));
486 spin_unlock_irqrestore(&vio_cmo.lock, flags); 486 spin_unlock_irqrestore(&vio_cmo.lock, flags);
487 } 487 }
488 488
489 static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size, 489 static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
490 dma_addr_t *dma_handle, gfp_t flag) 490 dma_addr_t *dma_handle, gfp_t flag)
491 { 491 {
492 struct vio_dev *viodev = to_vio_dev(dev); 492 struct vio_dev *viodev = to_vio_dev(dev);
493 void *ret; 493 void *ret;
494 494
495 if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) { 495 if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) {
496 atomic_inc(&viodev->cmo.allocs_failed); 496 atomic_inc(&viodev->cmo.allocs_failed);
497 return NULL; 497 return NULL;
498 } 498 }
499 499
500 ret = dma_iommu_ops.alloc_coherent(dev, size, dma_handle, flag); 500 ret = dma_iommu_ops.alloc_coherent(dev, size, dma_handle, flag);
501 if (unlikely(ret == NULL)) { 501 if (unlikely(ret == NULL)) {
502 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); 502 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
503 atomic_inc(&viodev->cmo.allocs_failed); 503 atomic_inc(&viodev->cmo.allocs_failed);
504 } 504 }
505 505
506 return ret; 506 return ret;
507 } 507 }
508 508
509 static void vio_dma_iommu_free_coherent(struct device *dev, size_t size, 509 static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
510 void *vaddr, dma_addr_t dma_handle) 510 void *vaddr, dma_addr_t dma_handle)
511 { 511 {
512 struct vio_dev *viodev = to_vio_dev(dev); 512 struct vio_dev *viodev = to_vio_dev(dev);
513 513
514 dma_iommu_ops.free_coherent(dev, size, vaddr, dma_handle); 514 dma_iommu_ops.free_coherent(dev, size, vaddr, dma_handle);
515 515
516 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); 516 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
517 } 517 }
518 518
519 static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page, 519 static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
520 unsigned long offset, size_t size, 520 unsigned long offset, size_t size,
521 enum dma_data_direction direction, 521 enum dma_data_direction direction,
522 struct dma_attrs *attrs) 522 struct dma_attrs *attrs)
523 { 523 {
524 struct vio_dev *viodev = to_vio_dev(dev); 524 struct vio_dev *viodev = to_vio_dev(dev);
525 dma_addr_t ret = DMA_ERROR_CODE; 525 dma_addr_t ret = DMA_ERROR_CODE;
526 526
527 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE))) { 527 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE))) {
528 atomic_inc(&viodev->cmo.allocs_failed); 528 atomic_inc(&viodev->cmo.allocs_failed);
529 return ret; 529 return ret;
530 } 530 }
531 531
532 ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs); 532 ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs);
533 if (unlikely(dma_mapping_error(dev, ret))) { 533 if (unlikely(dma_mapping_error(dev, ret))) {
534 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); 534 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
535 atomic_inc(&viodev->cmo.allocs_failed); 535 atomic_inc(&viodev->cmo.allocs_failed);
536 } 536 }
537 537
538 return ret; 538 return ret;
539 } 539 }
540 540
541 static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, 541 static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
542 size_t size, 542 size_t size,
543 enum dma_data_direction direction, 543 enum dma_data_direction direction,
544 struct dma_attrs *attrs) 544 struct dma_attrs *attrs)
545 { 545 {
546 struct vio_dev *viodev = to_vio_dev(dev); 546 struct vio_dev *viodev = to_vio_dev(dev);
547 547
548 dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs); 548 dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs);
549 549
550 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); 550 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
551 } 551 }
552 552
553 static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, 553 static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
554 int nelems, enum dma_data_direction direction, 554 int nelems, enum dma_data_direction direction,
555 struct dma_attrs *attrs) 555 struct dma_attrs *attrs)
556 { 556 {
557 struct vio_dev *viodev = to_vio_dev(dev); 557 struct vio_dev *viodev = to_vio_dev(dev);
558 struct scatterlist *sgl; 558 struct scatterlist *sgl;
559 int ret, count = 0; 559 int ret, count = 0;
560 size_t alloc_size = 0; 560 size_t alloc_size = 0;
561 561
562 for (sgl = sglist; count < nelems; count++, sgl++) 562 for (sgl = sglist; count < nelems; count++, sgl++)
563 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE); 563 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE);
564 564
565 if (vio_cmo_alloc(viodev, alloc_size)) { 565 if (vio_cmo_alloc(viodev, alloc_size)) {
566 atomic_inc(&viodev->cmo.allocs_failed); 566 atomic_inc(&viodev->cmo.allocs_failed);
567 return 0; 567 return 0;
568 } 568 }
569 569
570 ret = dma_iommu_ops.map_sg(dev, sglist, nelems, direction, attrs); 570 ret = dma_iommu_ops.map_sg(dev, sglist, nelems, direction, attrs);
571 571
572 if (unlikely(!ret)) { 572 if (unlikely(!ret)) {
573 vio_cmo_dealloc(viodev, alloc_size); 573 vio_cmo_dealloc(viodev, alloc_size);
574 atomic_inc(&viodev->cmo.allocs_failed); 574 atomic_inc(&viodev->cmo.allocs_failed);
575 return ret; 575 return ret;
576 } 576 }
577 577
578 for (sgl = sglist, count = 0; count < ret; count++, sgl++) 578 for (sgl = sglist, count = 0; count < ret; count++, sgl++)
579 alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE); 579 alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE);
580 if (alloc_size) 580 if (alloc_size)
581 vio_cmo_dealloc(viodev, alloc_size); 581 vio_cmo_dealloc(viodev, alloc_size);
582 582
583 return ret; 583 return ret;
584 } 584 }
585 585
586 static void vio_dma_iommu_unmap_sg(struct device *dev, 586 static void vio_dma_iommu_unmap_sg(struct device *dev,
587 struct scatterlist *sglist, int nelems, 587 struct scatterlist *sglist, int nelems,
588 enum dma_data_direction direction, 588 enum dma_data_direction direction,
589 struct dma_attrs *attrs) 589 struct dma_attrs *attrs)
590 { 590 {
591 struct vio_dev *viodev = to_vio_dev(dev); 591 struct vio_dev *viodev = to_vio_dev(dev);
592 struct scatterlist *sgl; 592 struct scatterlist *sgl;
593 size_t alloc_size = 0; 593 size_t alloc_size = 0;
594 int count = 0; 594 int count = 0;
595 595
596 for (sgl = sglist; count < nelems; count++, sgl++) 596 for (sgl = sglist; count < nelems; count++, sgl++)
597 alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE); 597 alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE);
598 598
599 dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs); 599 dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs);
600 600
601 vio_cmo_dealloc(viodev, alloc_size); 601 vio_cmo_dealloc(viodev, alloc_size);
602 } 602 }
603 603
604 static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask) 604 static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask)
605 { 605 {
606 return dma_iommu_ops.dma_supported(dev, mask); 606 return dma_iommu_ops.dma_supported(dev, mask);
607 } 607 }
608 608
609 struct dma_map_ops vio_dma_mapping_ops = { 609 struct dma_map_ops vio_dma_mapping_ops = {
610 .alloc_coherent = vio_dma_iommu_alloc_coherent, 610 .alloc_coherent = vio_dma_iommu_alloc_coherent,
611 .free_coherent = vio_dma_iommu_free_coherent, 611 .free_coherent = vio_dma_iommu_free_coherent,
612 .map_sg = vio_dma_iommu_map_sg, 612 .map_sg = vio_dma_iommu_map_sg,
613 .unmap_sg = vio_dma_iommu_unmap_sg, 613 .unmap_sg = vio_dma_iommu_unmap_sg,
614 .map_page = vio_dma_iommu_map_page, 614 .map_page = vio_dma_iommu_map_page,
615 .unmap_page = vio_dma_iommu_unmap_page, 615 .unmap_page = vio_dma_iommu_unmap_page,
616 .dma_supported = vio_dma_iommu_dma_supported, 616 .dma_supported = vio_dma_iommu_dma_supported,
617 617
618 }; 618 };
619 619
620 /** 620 /**
621 * vio_cmo_set_dev_desired - Set desired entitlement for a device 621 * vio_cmo_set_dev_desired - Set desired entitlement for a device
622 * 622 *
623 * @viodev: struct vio_dev for device to alter 623 * @viodev: struct vio_dev for device to alter
624 * @new_desired: new desired entitlement level in bytes 624 * @new_desired: new desired entitlement level in bytes
625 * 625 *
626 * For use by devices to request a change to their entitlement at runtime or 626 * For use by devices to request a change to their entitlement at runtime or
627 * through sysfs. The desired entitlement level is changed and a balancing 627 * through sysfs. The desired entitlement level is changed and a balancing
628 * of system resources is scheduled to run in the future. 628 * of system resources is scheduled to run in the future.
629 */ 629 */
630 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) 630 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired)
631 { 631 {
632 unsigned long flags; 632 unsigned long flags;
633 struct vio_cmo_dev_entry *dev_ent; 633 struct vio_cmo_dev_entry *dev_ent;
634 int found = 0; 634 int found = 0;
635 635
636 if (!firmware_has_feature(FW_FEATURE_CMO)) 636 if (!firmware_has_feature(FW_FEATURE_CMO))
637 return; 637 return;
638 638
639 spin_lock_irqsave(&vio_cmo.lock, flags); 639 spin_lock_irqsave(&vio_cmo.lock, flags);
640 if (desired < VIO_CMO_MIN_ENT) 640 if (desired < VIO_CMO_MIN_ENT)
641 desired = VIO_CMO_MIN_ENT; 641 desired = VIO_CMO_MIN_ENT;
642 642
643 /* 643 /*
644 * Changes will not be made for devices not in the device list. 644 * Changes will not be made for devices not in the device list.
645 * If it is not in the device list, then no driver is loaded 645 * If it is not in the device list, then no driver is loaded
646 * for the device and it can not receive entitlement. 646 * for the device and it can not receive entitlement.
647 */ 647 */
648 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) 648 list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
649 if (viodev == dev_ent->viodev) { 649 if (viodev == dev_ent->viodev) {
650 found = 1; 650 found = 1;
651 break; 651 break;
652 } 652 }
653 if (!found) { 653 if (!found) {
654 spin_unlock_irqrestore(&vio_cmo.lock, flags); 654 spin_unlock_irqrestore(&vio_cmo.lock, flags);
655 return; 655 return;
656 } 656 }
657 657
658 /* Increase/decrease in desired device entitlement */ 658 /* Increase/decrease in desired device entitlement */
659 if (desired >= viodev->cmo.desired) { 659 if (desired >= viodev->cmo.desired) {
660 /* Just bump the bus and device values prior to a balance*/ 660 /* Just bump the bus and device values prior to a balance*/
661 vio_cmo.desired += desired - viodev->cmo.desired; 661 vio_cmo.desired += desired - viodev->cmo.desired;
662 viodev->cmo.desired = desired; 662 viodev->cmo.desired = desired;
663 } else { 663 } else {
664 /* Decrease bus and device values for desired entitlement */ 664 /* Decrease bus and device values for desired entitlement */
665 vio_cmo.desired -= viodev->cmo.desired - desired; 665 vio_cmo.desired -= viodev->cmo.desired - desired;
666 viodev->cmo.desired = desired; 666 viodev->cmo.desired = desired;
667 /* 667 /*
668 * If less entitlement is desired than current entitlement, move 668 * If less entitlement is desired than current entitlement, move
669 * any reserve memory in the change region to the excess pool. 669 * any reserve memory in the change region to the excess pool.
670 */ 670 */
671 if (viodev->cmo.entitled > desired) { 671 if (viodev->cmo.entitled > desired) {
672 vio_cmo.reserve.size -= viodev->cmo.entitled - desired; 672 vio_cmo.reserve.size -= viodev->cmo.entitled - desired;
673 vio_cmo.excess.size += viodev->cmo.entitled - desired; 673 vio_cmo.excess.size += viodev->cmo.entitled - desired;
674 /* 674 /*
675 * If entitlement moving from the reserve pool to the 675 * If entitlement moving from the reserve pool to the
676 * excess pool is currently unused, add to the excess 676 * excess pool is currently unused, add to the excess
677 * free counter. 677 * free counter.
678 */ 678 */
679 if (viodev->cmo.allocated < viodev->cmo.entitled) 679 if (viodev->cmo.allocated < viodev->cmo.entitled)
680 vio_cmo.excess.free += viodev->cmo.entitled - 680 vio_cmo.excess.free += viodev->cmo.entitled -
681 max(viodev->cmo.allocated, desired); 681 max(viodev->cmo.allocated, desired);
682 viodev->cmo.entitled = desired; 682 viodev->cmo.entitled = desired;
683 } 683 }
684 } 684 }
685 schedule_delayed_work(&vio_cmo.balance_q, 0); 685 schedule_delayed_work(&vio_cmo.balance_q, 0);
686 spin_unlock_irqrestore(&vio_cmo.lock, flags); 686 spin_unlock_irqrestore(&vio_cmo.lock, flags);
687 } 687 }
688 688
689 /** 689 /**
690 * vio_cmo_bus_probe - Handle CMO specific bus probe activities 690 * vio_cmo_bus_probe - Handle CMO specific bus probe activities
691 * 691 *
692 * @viodev - Pointer to struct vio_dev for device 692 * @viodev - Pointer to struct vio_dev for device
693 * 693 *
694 * Determine the devices IO memory entitlement needs, attempting 694 * Determine the devices IO memory entitlement needs, attempting
695 * to satisfy the system minimum entitlement at first and scheduling 695 * to satisfy the system minimum entitlement at first and scheduling
696 * a balance operation to take care of the rest at a later time. 696 * a balance operation to take care of the rest at a later time.
697 * 697 *
698 * Returns: 0 on success, -EINVAL when device doesn't support CMO, and 698 * Returns: 0 on success, -EINVAL when device doesn't support CMO, and
699 * -ENOMEM when entitlement is not available for device or 699 * -ENOMEM when entitlement is not available for device or
700 * device entry. 700 * device entry.
701 * 701 *
702 */ 702 */
703 static int vio_cmo_bus_probe(struct vio_dev *viodev) 703 static int vio_cmo_bus_probe(struct vio_dev *viodev)
704 { 704 {
705 struct vio_cmo_dev_entry *dev_ent; 705 struct vio_cmo_dev_entry *dev_ent;
706 struct device *dev = &viodev->dev; 706 struct device *dev = &viodev->dev;
707 struct vio_driver *viodrv = to_vio_driver(dev->driver); 707 struct vio_driver *viodrv = to_vio_driver(dev->driver);
708 unsigned long flags; 708 unsigned long flags;
709 size_t size; 709 size_t size;
710 710
711 /* 711 /*
712 * Check to see that device has a DMA window and configure 712 * Check to see that device has a DMA window and configure
713 * entitlement for the device. 713 * entitlement for the device.
714 */ 714 */
715 if (of_get_property(viodev->dev.of_node, 715 if (of_get_property(viodev->dev.of_node,
716 "ibm,my-dma-window", NULL)) { 716 "ibm,my-dma-window", NULL)) {
717 /* Check that the driver is CMO enabled and get desired DMA */ 717 /* Check that the driver is CMO enabled and get desired DMA */
718 if (!viodrv->get_desired_dma) { 718 if (!viodrv->get_desired_dma) {
719 dev_err(dev, "%s: device driver does not support CMO\n", 719 dev_err(dev, "%s: device driver does not support CMO\n",
720 __func__); 720 __func__);
721 return -EINVAL; 721 return -EINVAL;
722 } 722 }
723 723
724 viodev->cmo.desired = IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev)); 724 viodev->cmo.desired = IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev));
725 if (viodev->cmo.desired < VIO_CMO_MIN_ENT) 725 if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
726 viodev->cmo.desired = VIO_CMO_MIN_ENT; 726 viodev->cmo.desired = VIO_CMO_MIN_ENT;
727 size = VIO_CMO_MIN_ENT; 727 size = VIO_CMO_MIN_ENT;
728 728
729 dev_ent = kmalloc(sizeof(struct vio_cmo_dev_entry), 729 dev_ent = kmalloc(sizeof(struct vio_cmo_dev_entry),
730 GFP_KERNEL); 730 GFP_KERNEL);
731 if (!dev_ent) 731 if (!dev_ent)
732 return -ENOMEM; 732 return -ENOMEM;
733 733
734 dev_ent->viodev = viodev; 734 dev_ent->viodev = viodev;
735 spin_lock_irqsave(&vio_cmo.lock, flags); 735 spin_lock_irqsave(&vio_cmo.lock, flags);
736 list_add(&dev_ent->list, &vio_cmo.device_list); 736 list_add(&dev_ent->list, &vio_cmo.device_list);
737 } else { 737 } else {
738 viodev->cmo.desired = 0; 738 viodev->cmo.desired = 0;
739 size = 0; 739 size = 0;
740 spin_lock_irqsave(&vio_cmo.lock, flags); 740 spin_lock_irqsave(&vio_cmo.lock, flags);
741 } 741 }
742 742
743 /* 743 /*
744 * If the needs for vio_cmo.min have not changed since they 744 * If the needs for vio_cmo.min have not changed since they
745 * were last set, the number of devices in the OF tree has 745 * were last set, the number of devices in the OF tree has
746 * been constant and the IO memory for this is already in 746 * been constant and the IO memory for this is already in
747 * the reserve pool. 747 * the reserve pool.
748 */ 748 */
749 if (vio_cmo.min == ((vio_cmo_num_OF_devs() + 1) * 749 if (vio_cmo.min == ((vio_cmo_num_OF_devs() + 1) *
750 VIO_CMO_MIN_ENT)) { 750 VIO_CMO_MIN_ENT)) {
751 /* Updated desired entitlement if device requires it */ 751 /* Updated desired entitlement if device requires it */
752 if (size) 752 if (size)
753 vio_cmo.desired += (viodev->cmo.desired - 753 vio_cmo.desired += (viodev->cmo.desired -
754 VIO_CMO_MIN_ENT); 754 VIO_CMO_MIN_ENT);
755 } else { 755 } else {
756 size_t tmp; 756 size_t tmp;
757 757
758 tmp = vio_cmo.spare + vio_cmo.excess.free; 758 tmp = vio_cmo.spare + vio_cmo.excess.free;
759 if (tmp < size) { 759 if (tmp < size) {
760 dev_err(dev, "%s: insufficient free " 760 dev_err(dev, "%s: insufficient free "
761 "entitlement to add device. " 761 "entitlement to add device. "
762 "Need %lu, have %lu\n", __func__, 762 "Need %lu, have %lu\n", __func__,
763 size, (vio_cmo.spare + tmp)); 763 size, (vio_cmo.spare + tmp));
764 spin_unlock_irqrestore(&vio_cmo.lock, flags); 764 spin_unlock_irqrestore(&vio_cmo.lock, flags);
765 return -ENOMEM; 765 return -ENOMEM;
766 } 766 }
767 767
768 /* Use excess pool first to fulfill request */ 768 /* Use excess pool first to fulfill request */
769 tmp = min(size, vio_cmo.excess.free); 769 tmp = min(size, vio_cmo.excess.free);
770 vio_cmo.excess.free -= tmp; 770 vio_cmo.excess.free -= tmp;
771 vio_cmo.excess.size -= tmp; 771 vio_cmo.excess.size -= tmp;
772 vio_cmo.reserve.size += tmp; 772 vio_cmo.reserve.size += tmp;
773 773
774 /* Use spare if excess pool was insufficient */ 774 /* Use spare if excess pool was insufficient */
775 vio_cmo.spare -= size - tmp; 775 vio_cmo.spare -= size - tmp;
776 776
777 /* Update bus accounting */ 777 /* Update bus accounting */
778 vio_cmo.min += size; 778 vio_cmo.min += size;
779 vio_cmo.desired += viodev->cmo.desired; 779 vio_cmo.desired += viodev->cmo.desired;
780 } 780 }
781 spin_unlock_irqrestore(&vio_cmo.lock, flags); 781 spin_unlock_irqrestore(&vio_cmo.lock, flags);
782 return 0; 782 return 0;
783 } 783 }
784 784
785 /** 785 /**
786 * vio_cmo_bus_remove - Handle CMO specific bus removal activities 786 * vio_cmo_bus_remove - Handle CMO specific bus removal activities
787 * 787 *
788 * @viodev - Pointer to struct vio_dev for device 788 * @viodev - Pointer to struct vio_dev for device
789 * 789 *
790 * Remove the device from the cmo device list. The minimum entitlement 790 * Remove the device from the cmo device list. The minimum entitlement
791 * will be reserved for the device as long as it is in the system. The 791 * will be reserved for the device as long as it is in the system. The
792 * rest of the entitlement the device had been allocated will be returned 792 * rest of the entitlement the device had been allocated will be returned
793 * to the system. 793 * to the system.
794 */ 794 */
795 static void vio_cmo_bus_remove(struct vio_dev *viodev) 795 static void vio_cmo_bus_remove(struct vio_dev *viodev)
796 { 796 {
797 struct vio_cmo_dev_entry *dev_ent; 797 struct vio_cmo_dev_entry *dev_ent;
798 unsigned long flags; 798 unsigned long flags;
799 size_t tmp; 799 size_t tmp;
800 800
801 spin_lock_irqsave(&vio_cmo.lock, flags); 801 spin_lock_irqsave(&vio_cmo.lock, flags);
802 if (viodev->cmo.allocated) { 802 if (viodev->cmo.allocated) {
803 dev_err(&viodev->dev, "%s: device had %lu bytes of IO " 803 dev_err(&viodev->dev, "%s: device had %lu bytes of IO "
804 "allocated after remove operation.\n", 804 "allocated after remove operation.\n",
805 __func__, viodev->cmo.allocated); 805 __func__, viodev->cmo.allocated);
806 BUG(); 806 BUG();
807 } 807 }
808 808
809 /* 809 /*
810 * Remove the device from the device list being maintained for 810 * Remove the device from the device list being maintained for
811 * CMO enabled devices. 811 * CMO enabled devices.
812 */ 812 */
813 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) 813 list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
814 if (viodev == dev_ent->viodev) { 814 if (viodev == dev_ent->viodev) {
815 list_del(&dev_ent->list); 815 list_del(&dev_ent->list);
816 kfree(dev_ent); 816 kfree(dev_ent);
817 break; 817 break;
818 } 818 }
819 819
820 /* 820 /*
821 * Devices may not require any entitlement and they do not need 821 * Devices may not require any entitlement and they do not need
822 * to be processed. Otherwise, return the device's entitlement 822 * to be processed. Otherwise, return the device's entitlement
823 * back to the pools. 823 * back to the pools.
824 */ 824 */
825 if (viodev->cmo.entitled) { 825 if (viodev->cmo.entitled) {
826 /* 826 /*
827 * This device has not yet left the OF tree, it's 827 * This device has not yet left the OF tree, it's
828 * minimum entitlement remains in vio_cmo.min and 828 * minimum entitlement remains in vio_cmo.min and
829 * vio_cmo.desired 829 * vio_cmo.desired
830 */ 830 */
831 vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT); 831 vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT);
832 832
833 /* 833 /*
834 * Save min allocation for device in reserve as long 834 * Save min allocation for device in reserve as long
835 * as it exists in OF tree as determined by later 835 * as it exists in OF tree as determined by later
836 * balance operation 836 * balance operation
837 */ 837 */
838 viodev->cmo.entitled -= VIO_CMO_MIN_ENT; 838 viodev->cmo.entitled -= VIO_CMO_MIN_ENT;
839 839
840 /* Replenish spare from freed reserve pool */ 840 /* Replenish spare from freed reserve pool */
841 if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) { 841 if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) {
842 tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT - 842 tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT -
843 vio_cmo.spare)); 843 vio_cmo.spare));
844 vio_cmo.spare += tmp; 844 vio_cmo.spare += tmp;
845 viodev->cmo.entitled -= tmp; 845 viodev->cmo.entitled -= tmp;
846 } 846 }
847 847
848 /* Remaining reserve goes to excess pool */ 848 /* Remaining reserve goes to excess pool */
849 vio_cmo.excess.size += viodev->cmo.entitled; 849 vio_cmo.excess.size += viodev->cmo.entitled;
850 vio_cmo.excess.free += viodev->cmo.entitled; 850 vio_cmo.excess.free += viodev->cmo.entitled;
851 vio_cmo.reserve.size -= viodev->cmo.entitled; 851 vio_cmo.reserve.size -= viodev->cmo.entitled;
852 852
853 /* 853 /*
854 * Until the device is removed it will keep a 854 * Until the device is removed it will keep a
855 * minimum entitlement; this will guarantee that 855 * minimum entitlement; this will guarantee that
856 * a module unload/load will result in a success. 856 * a module unload/load will result in a success.
857 */ 857 */
858 viodev->cmo.entitled = VIO_CMO_MIN_ENT; 858 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
859 viodev->cmo.desired = VIO_CMO_MIN_ENT; 859 viodev->cmo.desired = VIO_CMO_MIN_ENT;
860 atomic_set(&viodev->cmo.allocs_failed, 0); 860 atomic_set(&viodev->cmo.allocs_failed, 0);
861 } 861 }
862 862
863 spin_unlock_irqrestore(&vio_cmo.lock, flags); 863 spin_unlock_irqrestore(&vio_cmo.lock, flags);
864 } 864 }
865 865
866 static void vio_cmo_set_dma_ops(struct vio_dev *viodev) 866 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
867 { 867 {
868 set_dma_ops(&viodev->dev, &vio_dma_mapping_ops); 868 set_dma_ops(&viodev->dev, &vio_dma_mapping_ops);
869 } 869 }
870 870
871 /** 871 /**
872 * vio_cmo_bus_init - CMO entitlement initialization at bus init time 872 * vio_cmo_bus_init - CMO entitlement initialization at bus init time
873 * 873 *
874 * Set up the reserve and excess entitlement pools based on available 874 * Set up the reserve and excess entitlement pools based on available
875 * system entitlement and the number of devices in the OF tree that 875 * system entitlement and the number of devices in the OF tree that
876 * require entitlement in the reserve pool. 876 * require entitlement in the reserve pool.
877 */ 877 */
878 static void vio_cmo_bus_init(void) 878 static void vio_cmo_bus_init(void)
879 { 879 {
880 struct hvcall_mpp_data mpp_data; 880 struct hvcall_mpp_data mpp_data;
881 int err; 881 int err;
882 882
883 memset(&vio_cmo, 0, sizeof(struct vio_cmo)); 883 memset(&vio_cmo, 0, sizeof(struct vio_cmo));
884 spin_lock_init(&vio_cmo.lock); 884 spin_lock_init(&vio_cmo.lock);
885 INIT_LIST_HEAD(&vio_cmo.device_list); 885 INIT_LIST_HEAD(&vio_cmo.device_list);
886 INIT_DELAYED_WORK(&vio_cmo.balance_q, vio_cmo_balance); 886 INIT_DELAYED_WORK(&vio_cmo.balance_q, vio_cmo_balance);
887 887
888 /* Get current system entitlement */ 888 /* Get current system entitlement */
889 err = h_get_mpp(&mpp_data); 889 err = h_get_mpp(&mpp_data);
890 890
891 /* 891 /*
892 * On failure, continue with entitlement set to 0, will panic() 892 * On failure, continue with entitlement set to 0, will panic()
893 * later when spare is reserved. 893 * later when spare is reserved.
894 */ 894 */
895 if (err != H_SUCCESS) { 895 if (err != H_SUCCESS) {
896 printk(KERN_ERR "%s: unable to determine system IO "\ 896 printk(KERN_ERR "%s: unable to determine system IO "\
897 "entitlement. (%d)\n", __func__, err); 897 "entitlement. (%d)\n", __func__, err);
898 vio_cmo.entitled = 0; 898 vio_cmo.entitled = 0;
899 } else { 899 } else {
900 vio_cmo.entitled = mpp_data.entitled_mem; 900 vio_cmo.entitled = mpp_data.entitled_mem;
901 } 901 }
902 902
903 /* Set reservation and check against entitlement */ 903 /* Set reservation and check against entitlement */
904 vio_cmo.spare = VIO_CMO_MIN_ENT; 904 vio_cmo.spare = VIO_CMO_MIN_ENT;
905 vio_cmo.reserve.size = vio_cmo.spare; 905 vio_cmo.reserve.size = vio_cmo.spare;
906 vio_cmo.reserve.size += (vio_cmo_num_OF_devs() * 906 vio_cmo.reserve.size += (vio_cmo_num_OF_devs() *
907 VIO_CMO_MIN_ENT); 907 VIO_CMO_MIN_ENT);
908 if (vio_cmo.reserve.size > vio_cmo.entitled) { 908 if (vio_cmo.reserve.size > vio_cmo.entitled) {
909 printk(KERN_ERR "%s: insufficient system entitlement\n", 909 printk(KERN_ERR "%s: insufficient system entitlement\n",
910 __func__); 910 __func__);
911 panic("%s: Insufficient system entitlement", __func__); 911 panic("%s: Insufficient system entitlement", __func__);
912 } 912 }
913 913
914 /* Set the remaining accounting variables */ 914 /* Set the remaining accounting variables */
915 vio_cmo.excess.size = vio_cmo.entitled - vio_cmo.reserve.size; 915 vio_cmo.excess.size = vio_cmo.entitled - vio_cmo.reserve.size;
916 vio_cmo.excess.free = vio_cmo.excess.size; 916 vio_cmo.excess.free = vio_cmo.excess.size;
917 vio_cmo.min = vio_cmo.reserve.size; 917 vio_cmo.min = vio_cmo.reserve.size;
918 vio_cmo.desired = vio_cmo.reserve.size; 918 vio_cmo.desired = vio_cmo.reserve.size;
919 } 919 }
920 920
921 /* sysfs device functions and data structures for CMO */ 921 /* sysfs device functions and data structures for CMO */
922 922
923 #define viodev_cmo_rd_attr(name) \ 923 #define viodev_cmo_rd_attr(name) \
924 static ssize_t viodev_cmo_##name##_show(struct device *dev, \ 924 static ssize_t viodev_cmo_##name##_show(struct device *dev, \
925 struct device_attribute *attr, \ 925 struct device_attribute *attr, \
926 char *buf) \ 926 char *buf) \
927 { \ 927 { \
928 return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name); \ 928 return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name); \
929 } 929 }
930 930
931 static ssize_t viodev_cmo_allocs_failed_show(struct device *dev, 931 static ssize_t viodev_cmo_allocs_failed_show(struct device *dev,
932 struct device_attribute *attr, char *buf) 932 struct device_attribute *attr, char *buf)
933 { 933 {
934 struct vio_dev *viodev = to_vio_dev(dev); 934 struct vio_dev *viodev = to_vio_dev(dev);
935 return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed)); 935 return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed));
936 } 936 }
937 937
938 static ssize_t viodev_cmo_allocs_failed_reset(struct device *dev, 938 static ssize_t viodev_cmo_allocs_failed_reset(struct device *dev,
939 struct device_attribute *attr, const char *buf, size_t count) 939 struct device_attribute *attr, const char *buf, size_t count)
940 { 940 {
941 struct vio_dev *viodev = to_vio_dev(dev); 941 struct vio_dev *viodev = to_vio_dev(dev);
942 atomic_set(&viodev->cmo.allocs_failed, 0); 942 atomic_set(&viodev->cmo.allocs_failed, 0);
943 return count; 943 return count;
944 } 944 }
945 945
946 static ssize_t viodev_cmo_desired_set(struct device *dev, 946 static ssize_t viodev_cmo_desired_set(struct device *dev,
947 struct device_attribute *attr, const char *buf, size_t count) 947 struct device_attribute *attr, const char *buf, size_t count)
948 { 948 {
949 struct vio_dev *viodev = to_vio_dev(dev); 949 struct vio_dev *viodev = to_vio_dev(dev);
950 size_t new_desired; 950 size_t new_desired;
951 int ret; 951 int ret;
952 952
953 ret = strict_strtoul(buf, 10, &new_desired); 953 ret = strict_strtoul(buf, 10, &new_desired);
954 if (ret) 954 if (ret)
955 return ret; 955 return ret;
956 956
957 vio_cmo_set_dev_desired(viodev, new_desired); 957 vio_cmo_set_dev_desired(viodev, new_desired);
958 return count; 958 return count;
959 } 959 }
960 960
961 viodev_cmo_rd_attr(desired); 961 viodev_cmo_rd_attr(desired);
962 viodev_cmo_rd_attr(entitled); 962 viodev_cmo_rd_attr(entitled);
963 viodev_cmo_rd_attr(allocated); 963 viodev_cmo_rd_attr(allocated);
964 964
965 static ssize_t name_show(struct device *, struct device_attribute *, char *); 965 static ssize_t name_show(struct device *, struct device_attribute *, char *);
966 static ssize_t devspec_show(struct device *, struct device_attribute *, char *); 966 static ssize_t devspec_show(struct device *, struct device_attribute *, char *);
967 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 967 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
968 char *buf); 968 char *buf);
969 static struct device_attribute vio_cmo_dev_attrs[] = { 969 static struct device_attribute vio_cmo_dev_attrs[] = {
970 __ATTR_RO(name), 970 __ATTR_RO(name),
971 __ATTR_RO(devspec), 971 __ATTR_RO(devspec),
972 __ATTR_RO(modalias), 972 __ATTR_RO(modalias),
973 __ATTR(cmo_desired, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH, 973 __ATTR(cmo_desired, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
974 viodev_cmo_desired_show, viodev_cmo_desired_set), 974 viodev_cmo_desired_show, viodev_cmo_desired_set),
975 __ATTR(cmo_entitled, S_IRUGO, viodev_cmo_entitled_show, NULL), 975 __ATTR(cmo_entitled, S_IRUGO, viodev_cmo_entitled_show, NULL),
976 __ATTR(cmo_allocated, S_IRUGO, viodev_cmo_allocated_show, NULL), 976 __ATTR(cmo_allocated, S_IRUGO, viodev_cmo_allocated_show, NULL),
977 __ATTR(cmo_allocs_failed, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH, 977 __ATTR(cmo_allocs_failed, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
978 viodev_cmo_allocs_failed_show, viodev_cmo_allocs_failed_reset), 978 viodev_cmo_allocs_failed_show, viodev_cmo_allocs_failed_reset),
979 __ATTR_NULL 979 __ATTR_NULL
980 }; 980 };
981 981
982 /* sysfs bus functions and data structures for CMO */ 982 /* sysfs bus functions and data structures for CMO */
983 983
984 #define viobus_cmo_rd_attr(name) \ 984 #define viobus_cmo_rd_attr(name) \
985 static ssize_t \ 985 static ssize_t \
986 viobus_cmo_##name##_show(struct bus_type *bt, char *buf) \ 986 viobus_cmo_##name##_show(struct bus_type *bt, char *buf) \
987 { \ 987 { \
988 return sprintf(buf, "%lu\n", vio_cmo.name); \ 988 return sprintf(buf, "%lu\n", vio_cmo.name); \
989 } 989 }
990 990
991 #define viobus_cmo_pool_rd_attr(name, var) \ 991 #define viobus_cmo_pool_rd_attr(name, var) \
992 static ssize_t \ 992 static ssize_t \
993 viobus_cmo_##name##_pool_show_##var(struct bus_type *bt, char *buf) \ 993 viobus_cmo_##name##_pool_show_##var(struct bus_type *bt, char *buf) \
994 { \ 994 { \
995 return sprintf(buf, "%lu\n", vio_cmo.name.var); \ 995 return sprintf(buf, "%lu\n", vio_cmo.name.var); \
996 } 996 }
997 997
998 static ssize_t viobus_cmo_high_reset(struct bus_type *bt, const char *buf, 998 static ssize_t viobus_cmo_high_reset(struct bus_type *bt, const char *buf,
999 size_t count) 999 size_t count)
1000 { 1000 {
1001 unsigned long flags; 1001 unsigned long flags;
1002 1002
1003 spin_lock_irqsave(&vio_cmo.lock, flags); 1003 spin_lock_irqsave(&vio_cmo.lock, flags);
1004 vio_cmo.high = vio_cmo.curr; 1004 vio_cmo.high = vio_cmo.curr;
1005 spin_unlock_irqrestore(&vio_cmo.lock, flags); 1005 spin_unlock_irqrestore(&vio_cmo.lock, flags);
1006 1006
1007 return count; 1007 return count;
1008 } 1008 }
1009 1009
1010 viobus_cmo_rd_attr(entitled); 1010 viobus_cmo_rd_attr(entitled);
1011 viobus_cmo_pool_rd_attr(reserve, size); 1011 viobus_cmo_pool_rd_attr(reserve, size);
1012 viobus_cmo_pool_rd_attr(excess, size); 1012 viobus_cmo_pool_rd_attr(excess, size);
1013 viobus_cmo_pool_rd_attr(excess, free); 1013 viobus_cmo_pool_rd_attr(excess, free);
1014 viobus_cmo_rd_attr(spare); 1014 viobus_cmo_rd_attr(spare);
1015 viobus_cmo_rd_attr(min); 1015 viobus_cmo_rd_attr(min);
1016 viobus_cmo_rd_attr(desired); 1016 viobus_cmo_rd_attr(desired);
1017 viobus_cmo_rd_attr(curr); 1017 viobus_cmo_rd_attr(curr);
1018 viobus_cmo_rd_attr(high); 1018 viobus_cmo_rd_attr(high);
1019 1019
1020 static struct bus_attribute vio_cmo_bus_attrs[] = { 1020 static struct bus_attribute vio_cmo_bus_attrs[] = {
1021 __ATTR(cmo_entitled, S_IRUGO, viobus_cmo_entitled_show, NULL), 1021 __ATTR(cmo_entitled, S_IRUGO, viobus_cmo_entitled_show, NULL),
1022 __ATTR(cmo_reserve_size, S_IRUGO, viobus_cmo_reserve_pool_show_size, NULL), 1022 __ATTR(cmo_reserve_size, S_IRUGO, viobus_cmo_reserve_pool_show_size, NULL),
1023 __ATTR(cmo_excess_size, S_IRUGO, viobus_cmo_excess_pool_show_size, NULL), 1023 __ATTR(cmo_excess_size, S_IRUGO, viobus_cmo_excess_pool_show_size, NULL),
1024 __ATTR(cmo_excess_free, S_IRUGO, viobus_cmo_excess_pool_show_free, NULL), 1024 __ATTR(cmo_excess_free, S_IRUGO, viobus_cmo_excess_pool_show_free, NULL),
1025 __ATTR(cmo_spare, S_IRUGO, viobus_cmo_spare_show, NULL), 1025 __ATTR(cmo_spare, S_IRUGO, viobus_cmo_spare_show, NULL),
1026 __ATTR(cmo_min, S_IRUGO, viobus_cmo_min_show, NULL), 1026 __ATTR(cmo_min, S_IRUGO, viobus_cmo_min_show, NULL),
1027 __ATTR(cmo_desired, S_IRUGO, viobus_cmo_desired_show, NULL), 1027 __ATTR(cmo_desired, S_IRUGO, viobus_cmo_desired_show, NULL),
1028 __ATTR(cmo_curr, S_IRUGO, viobus_cmo_curr_show, NULL), 1028 __ATTR(cmo_curr, S_IRUGO, viobus_cmo_curr_show, NULL),
1029 __ATTR(cmo_high, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH, 1029 __ATTR(cmo_high, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
1030 viobus_cmo_high_show, viobus_cmo_high_reset), 1030 viobus_cmo_high_show, viobus_cmo_high_reset),
1031 __ATTR_NULL 1031 __ATTR_NULL
1032 }; 1032 };
1033 1033
1034 static void vio_cmo_sysfs_init(void) 1034 static void vio_cmo_sysfs_init(void)
1035 { 1035 {
1036 vio_bus_type.dev_attrs = vio_cmo_dev_attrs; 1036 vio_bus_type.dev_attrs = vio_cmo_dev_attrs;
1037 vio_bus_type.bus_attrs = vio_cmo_bus_attrs; 1037 vio_bus_type.bus_attrs = vio_cmo_bus_attrs;
1038 } 1038 }
1039 #else /* CONFIG_PPC_SMLPAR */ 1039 #else /* CONFIG_PPC_SMLPAR */
1040 /* Dummy functions for iSeries platform */ 1040 /* Dummy functions for iSeries platform */
1041 int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; } 1041 int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; }
1042 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {} 1042 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {}
1043 static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; } 1043 static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
1044 static void vio_cmo_bus_remove(struct vio_dev *viodev) {} 1044 static void vio_cmo_bus_remove(struct vio_dev *viodev) {}
1045 static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {} 1045 static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {}
1046 static void vio_cmo_bus_init(void) {} 1046 static void vio_cmo_bus_init(void) {}
1047 static void vio_cmo_sysfs_init(void) { } 1047 static void vio_cmo_sysfs_init(void) { }
1048 #endif /* CONFIG_PPC_SMLPAR */ 1048 #endif /* CONFIG_PPC_SMLPAR */
1049 EXPORT_SYMBOL(vio_cmo_entitlement_update); 1049 EXPORT_SYMBOL(vio_cmo_entitlement_update);
1050 EXPORT_SYMBOL(vio_cmo_set_dev_desired); 1050 EXPORT_SYMBOL(vio_cmo_set_dev_desired);
1051 1051
1052 static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) 1052 static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
1053 { 1053 {
1054 const unsigned char *dma_window; 1054 const unsigned char *dma_window;
1055 struct iommu_table *tbl; 1055 struct iommu_table *tbl;
1056 unsigned long offset, size; 1056 unsigned long offset, size;
1057 1057
1058 if (firmware_has_feature(FW_FEATURE_ISERIES)) 1058 if (firmware_has_feature(FW_FEATURE_ISERIES))
1059 return vio_build_iommu_table_iseries(dev); 1059 return vio_build_iommu_table_iseries(dev);
1060 1060
1061 dma_window = of_get_property(dev->dev.of_node, 1061 dma_window = of_get_property(dev->dev.of_node,
1062 "ibm,my-dma-window", NULL); 1062 "ibm,my-dma-window", NULL);
1063 if (!dma_window) 1063 if (!dma_window)
1064 return NULL; 1064 return NULL;
1065 1065
1066 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); 1066 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
1067 if (tbl == NULL) 1067 if (tbl == NULL)
1068 return NULL; 1068 return NULL;
1069 1069
1070 of_parse_dma_window(dev->dev.of_node, dma_window, 1070 of_parse_dma_window(dev->dev.of_node, dma_window,
1071 &tbl->it_index, &offset, &size); 1071 &tbl->it_index, &offset, &size);
1072 1072
1073 /* TCE table size - measured in tce entries */ 1073 /* TCE table size - measured in tce entries */
1074 tbl->it_size = size >> IOMMU_PAGE_SHIFT; 1074 tbl->it_size = size >> IOMMU_PAGE_SHIFT;
1075 /* offset for VIO should always be 0 */ 1075 /* offset for VIO should always be 0 */
1076 tbl->it_offset = offset >> IOMMU_PAGE_SHIFT; 1076 tbl->it_offset = offset >> IOMMU_PAGE_SHIFT;
1077 tbl->it_busno = 0; 1077 tbl->it_busno = 0;
1078 tbl->it_type = TCE_VB; 1078 tbl->it_type = TCE_VB;
1079 tbl->it_blocksize = 16; 1079 tbl->it_blocksize = 16;
1080 1080
1081 return iommu_init_table(tbl, -1); 1081 return iommu_init_table(tbl, -1);
1082 } 1082 }
1083 1083
1084 /** 1084 /**
1085 * vio_match_device: - Tell if a VIO device has a matching 1085 * vio_match_device: - Tell if a VIO device has a matching
1086 * VIO device id structure. 1086 * VIO device id structure.
1087 * @ids: array of VIO device id structures to search in 1087 * @ids: array of VIO device id structures to search in
1088 * @dev: the VIO device structure to match against 1088 * @dev: the VIO device structure to match against
1089 * 1089 *
1090 * Used by a driver to check whether a VIO device present in the 1090 * Used by a driver to check whether a VIO device present in the
1091 * system is in its list of supported devices. Returns the matching 1091 * system is in its list of supported devices. Returns the matching
1092 * vio_device_id structure or NULL if there is no match. 1092 * vio_device_id structure or NULL if there is no match.
1093 */ 1093 */
1094 static const struct vio_device_id *vio_match_device( 1094 static const struct vio_device_id *vio_match_device(
1095 const struct vio_device_id *ids, const struct vio_dev *dev) 1095 const struct vio_device_id *ids, const struct vio_dev *dev)
1096 { 1096 {
1097 while (ids->type[0] != '\0') { 1097 while (ids->type[0] != '\0') {
1098 if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) && 1098 if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) &&
1099 of_device_is_compatible(dev->dev.of_node, 1099 of_device_is_compatible(dev->dev.of_node,
1100 ids->compat)) 1100 ids->compat))
1101 return ids; 1101 return ids;
1102 ids++; 1102 ids++;
1103 } 1103 }
1104 return NULL; 1104 return NULL;
1105 } 1105 }
1106 1106
1107 /* 1107 /*
1108 * Convert from struct device to struct vio_dev and pass to driver. 1108 * Convert from struct device to struct vio_dev and pass to driver.
1109 * dev->driver has already been set by generic code because vio_bus_match 1109 * dev->driver has already been set by generic code because vio_bus_match
1110 * succeeded. 1110 * succeeded.
1111 */ 1111 */
1112 static int vio_bus_probe(struct device *dev) 1112 static int vio_bus_probe(struct device *dev)
1113 { 1113 {
1114 struct vio_dev *viodev = to_vio_dev(dev); 1114 struct vio_dev *viodev = to_vio_dev(dev);
1115 struct vio_driver *viodrv = to_vio_driver(dev->driver); 1115 struct vio_driver *viodrv = to_vio_driver(dev->driver);
1116 const struct vio_device_id *id; 1116 const struct vio_device_id *id;
1117 int error = -ENODEV; 1117 int error = -ENODEV;
1118 1118
1119 if (!viodrv->probe) 1119 if (!viodrv->probe)
1120 return error; 1120 return error;
1121 1121
1122 id = vio_match_device(viodrv->id_table, viodev); 1122 id = vio_match_device(viodrv->id_table, viodev);
1123 if (id) { 1123 if (id) {
1124 memset(&viodev->cmo, 0, sizeof(viodev->cmo)); 1124 memset(&viodev->cmo, 0, sizeof(viodev->cmo));
1125 if (firmware_has_feature(FW_FEATURE_CMO)) { 1125 if (firmware_has_feature(FW_FEATURE_CMO)) {
1126 error = vio_cmo_bus_probe(viodev); 1126 error = vio_cmo_bus_probe(viodev);
1127 if (error) 1127 if (error)
1128 return error; 1128 return error;
1129 } 1129 }
1130 error = viodrv->probe(viodev, id); 1130 error = viodrv->probe(viodev, id);
1131 if (error && firmware_has_feature(FW_FEATURE_CMO)) 1131 if (error && firmware_has_feature(FW_FEATURE_CMO))
1132 vio_cmo_bus_remove(viodev); 1132 vio_cmo_bus_remove(viodev);
1133 } 1133 }
1134 1134
1135 return error; 1135 return error;
1136 } 1136 }
1137 1137
1138 /* convert from struct device to struct vio_dev and pass to driver. */ 1138 /* convert from struct device to struct vio_dev and pass to driver. */
1139 static int vio_bus_remove(struct device *dev) 1139 static int vio_bus_remove(struct device *dev)
1140 { 1140 {
1141 struct vio_dev *viodev = to_vio_dev(dev); 1141 struct vio_dev *viodev = to_vio_dev(dev);
1142 struct vio_driver *viodrv = to_vio_driver(dev->driver); 1142 struct vio_driver *viodrv = to_vio_driver(dev->driver);
1143 struct device *devptr; 1143 struct device *devptr;
1144 int ret = 1; 1144 int ret = 1;
1145 1145
1146 /* 1146 /*
1147 * Hold a reference to the device after the remove function is called 1147 * Hold a reference to the device after the remove function is called
1148 * to allow for CMO accounting cleanup for the device. 1148 * to allow for CMO accounting cleanup for the device.
1149 */ 1149 */
1150 devptr = get_device(dev); 1150 devptr = get_device(dev);
1151 1151
1152 if (viodrv->remove) 1152 if (viodrv->remove)
1153 ret = viodrv->remove(viodev); 1153 ret = viodrv->remove(viodev);
1154 1154
1155 if (!ret && firmware_has_feature(FW_FEATURE_CMO)) 1155 if (!ret && firmware_has_feature(FW_FEATURE_CMO))
1156 vio_cmo_bus_remove(viodev); 1156 vio_cmo_bus_remove(viodev);
1157 1157
1158 put_device(devptr); 1158 put_device(devptr);
1159 return ret; 1159 return ret;
1160 } 1160 }
1161 1161
1162 /** 1162 /**
1163 * vio_register_driver: - Register a new vio driver 1163 * vio_register_driver: - Register a new vio driver
1164 * @drv: The vio_driver structure to be registered. 1164 * @drv: The vio_driver structure to be registered.
1165 */ 1165 */
1166 int vio_register_driver(struct vio_driver *viodrv) 1166 int vio_register_driver(struct vio_driver *viodrv)
1167 { 1167 {
1168 printk(KERN_DEBUG "%s: driver %s registering\n", __func__, 1168 printk(KERN_DEBUG "%s: driver %s registering\n", __func__,
1169 viodrv->driver.name); 1169 viodrv->driver.name);
1170 1170
1171 /* fill in 'struct driver' fields */ 1171 /* fill in 'struct driver' fields */
1172 viodrv->driver.bus = &vio_bus_type; 1172 viodrv->driver.bus = &vio_bus_type;
1173 1173
1174 return driver_register(&viodrv->driver); 1174 return driver_register(&viodrv->driver);
1175 } 1175 }
1176 EXPORT_SYMBOL(vio_register_driver); 1176 EXPORT_SYMBOL(vio_register_driver);
1177 1177
1178 /** 1178 /**
1179 * vio_unregister_driver - Remove registration of vio driver. 1179 * vio_unregister_driver - Remove registration of vio driver.
1180 * @driver: The vio_driver struct to be removed form registration 1180 * @driver: The vio_driver struct to be removed form registration
1181 */ 1181 */
1182 void vio_unregister_driver(struct vio_driver *viodrv) 1182 void vio_unregister_driver(struct vio_driver *viodrv)
1183 { 1183 {
1184 driver_unregister(&viodrv->driver); 1184 driver_unregister(&viodrv->driver);
1185 } 1185 }
1186 EXPORT_SYMBOL(vio_unregister_driver); 1186 EXPORT_SYMBOL(vio_unregister_driver);
1187 1187
1188 /* vio_dev refcount hit 0 */ 1188 /* vio_dev refcount hit 0 */
1189 static void __devinit vio_dev_release(struct device *dev) 1189 static void __devinit vio_dev_release(struct device *dev)
1190 { 1190 {
1191 struct iommu_table *tbl = get_iommu_table_base(dev); 1191 struct iommu_table *tbl = get_iommu_table_base(dev);
1192 1192
1193 /* iSeries uses a common table for all vio devices */ 1193 /* iSeries uses a common table for all vio devices */
1194 if (!firmware_has_feature(FW_FEATURE_ISERIES) && tbl) 1194 if (!firmware_has_feature(FW_FEATURE_ISERIES) && tbl)
1195 iommu_free_table(tbl, dev->of_node ? 1195 iommu_free_table(tbl, dev->of_node ?
1196 dev->of_node->full_name : dev_name(dev)); 1196 dev->of_node->full_name : dev_name(dev));
1197 of_node_put(dev->of_node); 1197 of_node_put(dev->of_node);
1198 kfree(to_vio_dev(dev)); 1198 kfree(to_vio_dev(dev));
1199 } 1199 }
1200 1200
1201 /** 1201 /**
1202 * vio_register_device_node: - Register a new vio device. 1202 * vio_register_device_node: - Register a new vio device.
1203 * @of_node: The OF node for this device. 1203 * @of_node: The OF node for this device.
1204 * 1204 *
1205 * Creates and initializes a vio_dev structure from the data in 1205 * Creates and initializes a vio_dev structure from the data in
1206 * of_node and adds it to the list of virtual devices. 1206 * of_node and adds it to the list of virtual devices.
1207 * Returns a pointer to the created vio_dev or NULL if node has 1207 * Returns a pointer to the created vio_dev or NULL if node has
1208 * NULL device_type or compatible fields. 1208 * NULL device_type or compatible fields.
1209 */ 1209 */
1210 struct vio_dev *vio_register_device_node(struct device_node *of_node) 1210 struct vio_dev *vio_register_device_node(struct device_node *of_node)
1211 { 1211 {
1212 struct vio_dev *viodev; 1212 struct vio_dev *viodev;
1213 const unsigned int *unit_address; 1213 const unsigned int *unit_address;
1214 1214
1215 /* we need the 'device_type' property, in order to match with drivers */ 1215 /* we need the 'device_type' property, in order to match with drivers */
1216 if (of_node->type == NULL) { 1216 if (of_node->type == NULL) {
1217 printk(KERN_WARNING "%s: node %s missing 'device_type'\n", 1217 printk(KERN_WARNING "%s: node %s missing 'device_type'\n",
1218 __func__, 1218 __func__,
1219 of_node->name ? of_node->name : "<unknown>"); 1219 of_node->name ? of_node->name : "<unknown>");
1220 return NULL; 1220 return NULL;
1221 } 1221 }
1222 1222
1223 unit_address = of_get_property(of_node, "reg", NULL); 1223 unit_address = of_get_property(of_node, "reg", NULL);
1224 if (unit_address == NULL) { 1224 if (unit_address == NULL) {
1225 printk(KERN_WARNING "%s: node %s missing 'reg'\n", 1225 printk(KERN_WARNING "%s: node %s missing 'reg'\n",
1226 __func__, 1226 __func__,
1227 of_node->name ? of_node->name : "<unknown>"); 1227 of_node->name ? of_node->name : "<unknown>");
1228 return NULL; 1228 return NULL;
1229 } 1229 }
1230 1230
1231 /* allocate a vio_dev for this node */ 1231 /* allocate a vio_dev for this node */
1232 viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL); 1232 viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL);
1233 if (viodev == NULL) 1233 if (viodev == NULL)
1234 return NULL; 1234 return NULL;
1235 1235
1236 viodev->irq = irq_of_parse_and_map(of_node, 0); 1236 viodev->irq = irq_of_parse_and_map(of_node, 0);
1237 1237
1238 dev_set_name(&viodev->dev, "%x", *unit_address); 1238 dev_set_name(&viodev->dev, "%x", *unit_address);
1239 viodev->name = of_node->name; 1239 viodev->name = of_node->name;
1240 viodev->type = of_node->type; 1240 viodev->type = of_node->type;
1241 viodev->unit_address = *unit_address; 1241 viodev->unit_address = *unit_address;
1242 if (firmware_has_feature(FW_FEATURE_ISERIES)) { 1242 if (firmware_has_feature(FW_FEATURE_ISERIES)) {
1243 unit_address = of_get_property(of_node, 1243 unit_address = of_get_property(of_node,
1244 "linux,unit_address", NULL); 1244 "linux,unit_address", NULL);
1245 if (unit_address != NULL) 1245 if (unit_address != NULL)
1246 viodev->unit_address = *unit_address; 1246 viodev->unit_address = *unit_address;
1247 } 1247 }
1248 viodev->dev.of_node = of_node_get(of_node); 1248 viodev->dev.of_node = of_node_get(of_node);
1249 1249
1250 if (firmware_has_feature(FW_FEATURE_CMO)) 1250 if (firmware_has_feature(FW_FEATURE_CMO))
1251 vio_cmo_set_dma_ops(viodev); 1251 vio_cmo_set_dma_ops(viodev);
1252 else 1252 else
1253 set_dma_ops(&viodev->dev, &dma_iommu_ops); 1253 set_dma_ops(&viodev->dev, &dma_iommu_ops);
1254 set_iommu_table_base(&viodev->dev, vio_build_iommu_table(viodev)); 1254 set_iommu_table_base(&viodev->dev, vio_build_iommu_table(viodev));
1255 set_dev_node(&viodev->dev, of_node_to_nid(of_node)); 1255 set_dev_node(&viodev->dev, of_node_to_nid(of_node));
1256 1256
1257 /* init generic 'struct device' fields: */ 1257 /* init generic 'struct device' fields: */
1258 viodev->dev.parent = &vio_bus_device.dev; 1258 viodev->dev.parent = &vio_bus_device.dev;
1259 viodev->dev.bus = &vio_bus_type; 1259 viodev->dev.bus = &vio_bus_type;
1260 viodev->dev.release = vio_dev_release; 1260 viodev->dev.release = vio_dev_release;
1261 /* needed to ensure proper operation of coherent allocations 1261 /* needed to ensure proper operation of coherent allocations
1262 * later, in case driver doesn't set it explicitly */ 1262 * later, in case driver doesn't set it explicitly */
1263 dma_set_mask(&viodev->dev, DMA_BIT_MASK(64)); 1263 dma_set_mask(&viodev->dev, DMA_BIT_MASK(64));
1264 dma_set_coherent_mask(&viodev->dev, DMA_BIT_MASK(64)); 1264 dma_set_coherent_mask(&viodev->dev, DMA_BIT_MASK(64));
1265 1265
1266 /* register with generic device framework */ 1266 /* register with generic device framework */
1267 if (device_register(&viodev->dev)) { 1267 if (device_register(&viodev->dev)) {
1268 printk(KERN_ERR "%s: failed to register device %s\n", 1268 printk(KERN_ERR "%s: failed to register device %s\n",
1269 __func__, dev_name(&viodev->dev)); 1269 __func__, dev_name(&viodev->dev));
1270 put_device(&viodev->dev); 1270 put_device(&viodev->dev);
1271 return NULL; 1271 return NULL;
1272 } 1272 }
1273 1273
1274 return viodev; 1274 return viodev;
1275 } 1275 }
1276 EXPORT_SYMBOL(vio_register_device_node); 1276 EXPORT_SYMBOL(vio_register_device_node);
1277 1277
1278 /** 1278 /**
1279 * vio_bus_init: - Initialize the virtual IO bus 1279 * vio_bus_init: - Initialize the virtual IO bus
1280 */ 1280 */
1281 static int __init vio_bus_init(void) 1281 static int __init vio_bus_init(void)
1282 { 1282 {
1283 int err; 1283 int err;
1284 struct device_node *node_vroot; 1284 struct device_node *node_vroot;
1285 1285
1286 if (firmware_has_feature(FW_FEATURE_CMO)) 1286 if (firmware_has_feature(FW_FEATURE_CMO))
1287 vio_cmo_sysfs_init(); 1287 vio_cmo_sysfs_init();
1288 1288
1289 err = bus_register(&vio_bus_type); 1289 err = bus_register(&vio_bus_type);
1290 if (err) { 1290 if (err) {
1291 printk(KERN_ERR "failed to register VIO bus\n"); 1291 printk(KERN_ERR "failed to register VIO bus\n");
1292 return err; 1292 return err;
1293 } 1293 }
1294 1294
1295 /* 1295 /*
1296 * The fake parent of all vio devices, just to give us 1296 * The fake parent of all vio devices, just to give us
1297 * a nice directory 1297 * a nice directory
1298 */ 1298 */
1299 err = device_register(&vio_bus_device.dev); 1299 err = device_register(&vio_bus_device.dev);
1300 if (err) { 1300 if (err) {
1301 printk(KERN_WARNING "%s: device_register returned %i\n", 1301 printk(KERN_WARNING "%s: device_register returned %i\n",
1302 __func__, err); 1302 __func__, err);
1303 return err; 1303 return err;
1304 } 1304 }
1305 1305
1306 if (firmware_has_feature(FW_FEATURE_CMO)) 1306 if (firmware_has_feature(FW_FEATURE_CMO))
1307 vio_cmo_bus_init(); 1307 vio_cmo_bus_init();
1308 1308
1309 node_vroot = of_find_node_by_name(NULL, "vdevice"); 1309 node_vroot = of_find_node_by_name(NULL, "vdevice");
1310 if (node_vroot) { 1310 if (node_vroot) {
1311 struct device_node *of_node; 1311 struct device_node *of_node;
1312 1312
1313 /* 1313 /*
1314 * Create struct vio_devices for each virtual device in 1314 * Create struct vio_devices for each virtual device in
1315 * the device tree. Drivers will associate with them later. 1315 * the device tree. Drivers will associate with them later.
1316 */ 1316 */
1317 for (of_node = node_vroot->child; of_node != NULL; 1317 for (of_node = node_vroot->child; of_node != NULL;
1318 of_node = of_node->sibling) 1318 of_node = of_node->sibling)
1319 vio_register_device_node(of_node); 1319 vio_register_device_node(of_node);
1320 of_node_put(node_vroot); 1320 of_node_put(node_vroot);
1321 } 1321 }
1322 1322
1323 return 0; 1323 return 0;
1324 } 1324 }
1325 __initcall(vio_bus_init); 1325 __initcall(vio_bus_init);
1326 1326
1327 static ssize_t name_show(struct device *dev, 1327 static ssize_t name_show(struct device *dev,
1328 struct device_attribute *attr, char *buf) 1328 struct device_attribute *attr, char *buf)
1329 { 1329 {
1330 return sprintf(buf, "%s\n", to_vio_dev(dev)->name); 1330 return sprintf(buf, "%s\n", to_vio_dev(dev)->name);
1331 } 1331 }
1332 1332
1333 static ssize_t devspec_show(struct device *dev, 1333 static ssize_t devspec_show(struct device *dev,
1334 struct device_attribute *attr, char *buf) 1334 struct device_attribute *attr, char *buf)
1335 { 1335 {
1336 struct device_node *of_node = dev->of_node; 1336 struct device_node *of_node = dev->of_node;
1337 1337
1338 return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none"); 1338 return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none");
1339 } 1339 }
1340 1340
1341 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 1341 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
1342 char *buf) 1342 char *buf)
1343 { 1343 {
1344 const struct vio_dev *vio_dev = to_vio_dev(dev); 1344 const struct vio_dev *vio_dev = to_vio_dev(dev);
1345 struct device_node *dn; 1345 struct device_node *dn;
1346 const char *cp; 1346 const char *cp;
1347 1347
1348 dn = dev->of_node; 1348 dn = dev->of_node;
1349 if (!dn) 1349 if (!dn)
1350 return -ENODEV; 1350 return -ENODEV;
1351 cp = of_get_property(dn, "compatible", NULL); 1351 cp = of_get_property(dn, "compatible", NULL);
1352 if (!cp) 1352 if (!cp)
1353 return -ENODEV; 1353 return -ENODEV;
1354 1354
1355 return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp); 1355 return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
1356 } 1356 }
1357 1357
1358 static struct device_attribute vio_dev_attrs[] = { 1358 static struct device_attribute vio_dev_attrs[] = {
1359 __ATTR_RO(name), 1359 __ATTR_RO(name),
1360 __ATTR_RO(devspec), 1360 __ATTR_RO(devspec),
1361 __ATTR_RO(modalias), 1361 __ATTR_RO(modalias),
1362 __ATTR_NULL 1362 __ATTR_NULL
1363 }; 1363 };
1364 1364
1365 void __devinit vio_unregister_device(struct vio_dev *viodev) 1365 void __devinit vio_unregister_device(struct vio_dev *viodev)
1366 { 1366 {
1367 device_unregister(&viodev->dev); 1367 device_unregister(&viodev->dev);
1368 } 1368 }
1369 EXPORT_SYMBOL(vio_unregister_device); 1369 EXPORT_SYMBOL(vio_unregister_device);
1370 1370
1371 static int vio_bus_match(struct device *dev, struct device_driver *drv) 1371 static int vio_bus_match(struct device *dev, struct device_driver *drv)
1372 { 1372 {
1373 const struct vio_dev *vio_dev = to_vio_dev(dev); 1373 const struct vio_dev *vio_dev = to_vio_dev(dev);
1374 struct vio_driver *vio_drv = to_vio_driver(drv); 1374 struct vio_driver *vio_drv = to_vio_driver(drv);
1375 const struct vio_device_id *ids = vio_drv->id_table; 1375 const struct vio_device_id *ids = vio_drv->id_table;
1376 1376
1377 return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL); 1377 return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL);
1378 } 1378 }
1379 1379
1380 static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env) 1380 static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env)
1381 { 1381 {
1382 const struct vio_dev *vio_dev = to_vio_dev(dev); 1382 const struct vio_dev *vio_dev = to_vio_dev(dev);
1383 struct device_node *dn; 1383 struct device_node *dn;
1384 const char *cp; 1384 const char *cp;
1385 1385
1386 dn = dev->of_node; 1386 dn = dev->of_node;
1387 if (!dn) 1387 if (!dn)
1388 return -ENODEV; 1388 return -ENODEV;
1389 cp = of_get_property(dn, "compatible", NULL); 1389 cp = of_get_property(dn, "compatible", NULL);
1390 if (!cp) 1390 if (!cp)
1391 return -ENODEV; 1391 return -ENODEV;
1392 1392
1393 add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp); 1393 add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp);
1394 return 0; 1394 return 0;
1395 } 1395 }
1396 1396
1397 static struct bus_type vio_bus_type = { 1397 static struct bus_type vio_bus_type = {
1398 .name = "vio", 1398 .name = "vio",
1399 .dev_attrs = vio_dev_attrs, 1399 .dev_attrs = vio_dev_attrs,
1400 .uevent = vio_hotplug, 1400 .uevent = vio_hotplug,
1401 .match = vio_bus_match, 1401 .match = vio_bus_match,
1402 .probe = vio_bus_probe, 1402 .probe = vio_bus_probe,
1403 .remove = vio_bus_remove, 1403 .remove = vio_bus_remove,
1404 .pm = GENERIC_SUBSYS_PM_OPS, 1404 .pm = GENERIC_SUBSYS_PM_OPS,
1405 }; 1405 };
1406 1406
1407 /** 1407 /**
1408 * vio_get_attribute: - get attribute for virtual device 1408 * vio_get_attribute: - get attribute for virtual device
1409 * @vdev: The vio device to get property. 1409 * @vdev: The vio device to get property.
1410 * @which: The property/attribute to be extracted. 1410 * @which: The property/attribute to be extracted.
1411 * @length: Pointer to length of returned data size (unused if NULL). 1411 * @length: Pointer to length of returned data size (unused if NULL).
1412 * 1412 *
1413 * Calls prom.c's of_get_property() to return the value of the 1413 * Calls prom.c's of_get_property() to return the value of the
1414 * attribute specified by @which 1414 * attribute specified by @which
1415 */ 1415 */
1416 const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length) 1416 const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length)
1417 { 1417 {
1418 return of_get_property(vdev->dev.of_node, which, length); 1418 return of_get_property(vdev->dev.of_node, which, length);
1419 } 1419 }
1420 EXPORT_SYMBOL(vio_get_attribute); 1420 EXPORT_SYMBOL(vio_get_attribute);
1421 1421
1422 #ifdef CONFIG_PPC_PSERIES 1422 #ifdef CONFIG_PPC_PSERIES
1423 /* vio_find_name() - internal because only vio.c knows how we formatted the 1423 /* vio_find_name() - internal because only vio.c knows how we formatted the
1424 * kobject name 1424 * kobject name
1425 */ 1425 */
1426 static struct vio_dev *vio_find_name(const char *name) 1426 static struct vio_dev *vio_find_name(const char *name)
1427 { 1427 {
1428 struct device *found; 1428 struct device *found;
1429 1429
1430 found = bus_find_device_by_name(&vio_bus_type, NULL, name); 1430 found = bus_find_device_by_name(&vio_bus_type, NULL, name);
1431 if (!found) 1431 if (!found)
1432 return NULL; 1432 return NULL;
1433 1433
1434 return to_vio_dev(found); 1434 return to_vio_dev(found);
1435 } 1435 }
1436 1436
1437 /** 1437 /**
1438 * vio_find_node - find an already-registered vio_dev 1438 * vio_find_node - find an already-registered vio_dev
1439 * @vnode: device_node of the virtual device we're looking for 1439 * @vnode: device_node of the virtual device we're looking for
1440 */ 1440 */
1441 struct vio_dev *vio_find_node(struct device_node *vnode) 1441 struct vio_dev *vio_find_node(struct device_node *vnode)
1442 { 1442 {
1443 const uint32_t *unit_address; 1443 const uint32_t *unit_address;
1444 char kobj_name[20]; 1444 char kobj_name[20];
1445 1445
1446 /* construct the kobject name from the device node */ 1446 /* construct the kobject name from the device node */
1447 unit_address = of_get_property(vnode, "reg", NULL); 1447 unit_address = of_get_property(vnode, "reg", NULL);
1448 if (!unit_address) 1448 if (!unit_address)
1449 return NULL; 1449 return NULL;
1450 snprintf(kobj_name, sizeof(kobj_name), "%x", *unit_address); 1450 snprintf(kobj_name, sizeof(kobj_name), "%x", *unit_address);
1451 1451
1452 return vio_find_name(kobj_name); 1452 return vio_find_name(kobj_name);
1453 } 1453 }
1454 EXPORT_SYMBOL(vio_find_node); 1454 EXPORT_SYMBOL(vio_find_node);
1455 1455
1456 int vio_enable_interrupts(struct vio_dev *dev) 1456 int vio_enable_interrupts(struct vio_dev *dev)
1457 { 1457 {
1458 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE); 1458 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE);
1459 if (rc != H_SUCCESS) 1459 if (rc != H_SUCCESS)
1460 printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc); 1460 printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc);
1461 return rc; 1461 return rc;
1462 } 1462 }
1463 EXPORT_SYMBOL(vio_enable_interrupts); 1463 EXPORT_SYMBOL(vio_enable_interrupts);
1464 1464
1465 int vio_disable_interrupts(struct vio_dev *dev) 1465 int vio_disable_interrupts(struct vio_dev *dev)
1466 { 1466 {
1467 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE); 1467 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE);
1468 if (rc != H_SUCCESS) 1468 if (rc != H_SUCCESS)
1469 printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc); 1469 printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc);
1470 return rc; 1470 return rc;
1471 } 1471 }
1472 EXPORT_SYMBOL(vio_disable_interrupts); 1472 EXPORT_SYMBOL(vio_disable_interrupts);
1473 #endif /* CONFIG_PPC_PSERIES */ 1473 #endif /* CONFIG_PPC_PSERIES */
1474 1474
arch/powerpc/kvm/book3s_exports.c
1 /* 1 /*
2 * This program is free software; you can redistribute it and/or modify 2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as 3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation. 4 * published by the Free Software Foundation.
5 * 5 *
6 * This program is distributed in the hope that it will be useful, 6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details. 9 * GNU General Public License for more details.
10 * 10 *
11 * You should have received a copy of the GNU General Public License 11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software 12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 * 14 *
15 * Copyright SUSE Linux Products GmbH 2009 15 * Copyright SUSE Linux Products GmbH 2009
16 * 16 *
17 * Authors: Alexander Graf <agraf@suse.de> 17 * Authors: Alexander Graf <agraf@suse.de>
18 */ 18 */
19 19
20 #include <linux/module.h> 20 #include <linux/export.h>
21 #include <asm/kvm_book3s.h> 21 #include <asm/kvm_book3s.h>
22 22
23 #ifdef CONFIG_KVM_BOOK3S_64_HV 23 #ifdef CONFIG_KVM_BOOK3S_64_HV
24 EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline); 24 EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline);
25 #else 25 #else
26 EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline); 26 EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline);
27 EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu); 27 EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu);
28 #ifdef CONFIG_ALTIVEC 28 #ifdef CONFIG_ALTIVEC
29 EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec); 29 EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec);
30 #endif 30 #endif
31 #ifdef CONFIG_VSX 31 #ifdef CONFIG_VSX
32 EXPORT_SYMBOL_GPL(kvmppc_load_up_vsx); 32 EXPORT_SYMBOL_GPL(kvmppc_load_up_vsx);
33 #endif 33 #endif
34 #endif 34 #endif
35 35
36 36
arch/powerpc/lib/checksum_wrappers_64.c
1 /* 1 /*
2 * This program is free software; you can redistribute it and/or modify 2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by 3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or 4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version. 5 * (at your option) any later version.
6 * 6 *
7 * This program is distributed in the hope that it will be useful, 7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details. 10 * GNU General Public License for more details.
11 * 11 *
12 * You should have received a copy of the GNU General Public License 12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software 13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 * 15 *
16 * Copyright (C) IBM Corporation, 2010 16 * Copyright (C) IBM Corporation, 2010
17 * 17 *
18 * Author: Anton Blanchard <anton@au.ibm.com> 18 * Author: Anton Blanchard <anton@au.ibm.com>
19 */ 19 */
20 #include <linux/module.h> 20 #include <linux/export.h>
21 #include <linux/compiler.h> 21 #include <linux/compiler.h>
22 #include <linux/types.h> 22 #include <linux/types.h>
23 #include <asm/checksum.h> 23 #include <asm/checksum.h>
24 #include <asm/uaccess.h> 24 #include <asm/uaccess.h>
25 25
26 __wsum csum_and_copy_from_user(const void __user *src, void *dst, 26 __wsum csum_and_copy_from_user(const void __user *src, void *dst,
27 int len, __wsum sum, int *err_ptr) 27 int len, __wsum sum, int *err_ptr)
28 { 28 {
29 unsigned int csum; 29 unsigned int csum;
30 30
31 might_sleep(); 31 might_sleep();
32 32
33 *err_ptr = 0; 33 *err_ptr = 0;
34 34
35 if (!len) { 35 if (!len) {
36 csum = 0; 36 csum = 0;
37 goto out; 37 goto out;
38 } 38 }
39 39
40 if (unlikely((len < 0) || !access_ok(VERIFY_READ, src, len))) { 40 if (unlikely((len < 0) || !access_ok(VERIFY_READ, src, len))) {
41 *err_ptr = -EFAULT; 41 *err_ptr = -EFAULT;
42 csum = (__force unsigned int)sum; 42 csum = (__force unsigned int)sum;
43 goto out; 43 goto out;
44 } 44 }
45 45
46 csum = csum_partial_copy_generic((void __force *)src, dst, 46 csum = csum_partial_copy_generic((void __force *)src, dst,
47 len, sum, err_ptr, NULL); 47 len, sum, err_ptr, NULL);
48 48
49 if (unlikely(*err_ptr)) { 49 if (unlikely(*err_ptr)) {
50 int missing = __copy_from_user(dst, src, len); 50 int missing = __copy_from_user(dst, src, len);
51 51
52 if (missing) { 52 if (missing) {
53 memset(dst + len - missing, 0, missing); 53 memset(dst + len - missing, 0, missing);
54 *err_ptr = -EFAULT; 54 *err_ptr = -EFAULT;
55 } else { 55 } else {
56 *err_ptr = 0; 56 *err_ptr = 0;
57 } 57 }
58 58
59 csum = csum_partial(dst, len, sum); 59 csum = csum_partial(dst, len, sum);
60 } 60 }
61 61
62 out: 62 out:
63 return (__force __wsum)csum; 63 return (__force __wsum)csum;
64 } 64 }
65 EXPORT_SYMBOL(csum_and_copy_from_user); 65 EXPORT_SYMBOL(csum_and_copy_from_user);
66 66
67 __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, 67 __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
68 __wsum sum, int *err_ptr) 68 __wsum sum, int *err_ptr)
69 { 69 {
70 unsigned int csum; 70 unsigned int csum;
71 71
72 might_sleep(); 72 might_sleep();
73 73
74 *err_ptr = 0; 74 *err_ptr = 0;
75 75
76 if (!len) { 76 if (!len) {
77 csum = 0; 77 csum = 0;
78 goto out; 78 goto out;
79 } 79 }
80 80
81 if (unlikely((len < 0) || !access_ok(VERIFY_WRITE, dst, len))) { 81 if (unlikely((len < 0) || !access_ok(VERIFY_WRITE, dst, len))) {
82 *err_ptr = -EFAULT; 82 *err_ptr = -EFAULT;
83 csum = -1; /* invalid checksum */ 83 csum = -1; /* invalid checksum */
84 goto out; 84 goto out;
85 } 85 }
86 86
87 csum = csum_partial_copy_generic(src, (void __force *)dst, 87 csum = csum_partial_copy_generic(src, (void __force *)dst,
88 len, sum, NULL, err_ptr); 88 len, sum, NULL, err_ptr);
89 89
90 if (unlikely(*err_ptr)) { 90 if (unlikely(*err_ptr)) {
91 csum = csum_partial(src, len, sum); 91 csum = csum_partial(src, len, sum);
92 92
93 if (copy_to_user(dst, src, len)) { 93 if (copy_to_user(dst, src, len)) {
94 *err_ptr = -EFAULT; 94 *err_ptr = -EFAULT;
95 csum = -1; /* invalid checksum */ 95 csum = -1; /* invalid checksum */
96 } 96 }
97 } 97 }
98 98
99 out: 99 out:
100 return (__force __wsum)csum; 100 return (__force __wsum)csum;
101 } 101 }
102 EXPORT_SYMBOL(csum_and_copy_to_user); 102 EXPORT_SYMBOL(csum_and_copy_to_user);
103 103
arch/powerpc/lib/devres.c
1 /* 1 /*
2 * Copyright (C) 2008 Freescale Semiconductor, Inc. 2 * Copyright (C) 2008 Freescale Semiconductor, Inc.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License 5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version. 7 * 2 of the License, or (at your option) any later version.
8 */ 8 */
9 9
10 #include <linux/device.h> /* devres_*(), devm_ioremap_release() */ 10 #include <linux/device.h> /* devres_*(), devm_ioremap_release() */
11 #include <linux/gfp.h> 11 #include <linux/gfp.h>
12 #include <linux/io.h> /* ioremap_prot() */ 12 #include <linux/io.h> /* ioremap_prot() */
13 #include <linux/module.h> /* EXPORT_SYMBOL() */ 13 #include <linux/export.h> /* EXPORT_SYMBOL() */
14 14
15 /** 15 /**
16 * devm_ioremap_prot - Managed ioremap_prot() 16 * devm_ioremap_prot - Managed ioremap_prot()
17 * @dev: Generic device to remap IO address for 17 * @dev: Generic device to remap IO address for
18 * @offset: BUS offset to map 18 * @offset: BUS offset to map
19 * @size: Size of map 19 * @size: Size of map
20 * @flags: Page flags 20 * @flags: Page flags
21 * 21 *
22 * Managed ioremap_prot(). Map is automatically unmapped on driver 22 * Managed ioremap_prot(). Map is automatically unmapped on driver
23 * detach. 23 * detach.
24 */ 24 */
25 void __iomem *devm_ioremap_prot(struct device *dev, resource_size_t offset, 25 void __iomem *devm_ioremap_prot(struct device *dev, resource_size_t offset,
26 size_t size, unsigned long flags) 26 size_t size, unsigned long flags)
27 { 27 {
28 void __iomem **ptr, *addr; 28 void __iomem **ptr, *addr;
29 29
30 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); 30 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
31 if (!ptr) 31 if (!ptr)
32 return NULL; 32 return NULL;
33 33
34 addr = ioremap_prot(offset, size, flags); 34 addr = ioremap_prot(offset, size, flags);
35 if (addr) { 35 if (addr) {
36 *ptr = addr; 36 *ptr = addr;
37 devres_add(dev, ptr); 37 devres_add(dev, ptr);
38 } else 38 } else
39 devres_free(ptr); 39 devres_free(ptr);
40 40
41 return addr; 41 return addr;
42 } 42 }
43 EXPORT_SYMBOL(devm_ioremap_prot); 43 EXPORT_SYMBOL(devm_ioremap_prot);
44 44
arch/powerpc/lib/locks.c
1 /* 1 /*
2 * Spin and read/write lock operations. 2 * Spin and read/write lock operations.
3 * 3 *
4 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM 4 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
5 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM 5 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
6 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM 6 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
7 * Rework to support virtual processors 7 * Rework to support virtual processors
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version. 12 * 2 of the License, or (at your option) any later version.
13 */ 13 */
14 14
15 #include <linux/kernel.h> 15 #include <linux/kernel.h>
16 #include <linux/spinlock.h> 16 #include <linux/spinlock.h>
17 #include <linux/module.h> 17 #include <linux/export.h>
18 #include <linux/stringify.h> 18 #include <linux/stringify.h>
19 #include <linux/smp.h> 19 #include <linux/smp.h>
20 20
21 /* waiting for a spinlock... */ 21 /* waiting for a spinlock... */
22 #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) 22 #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
23 #include <asm/hvcall.h> 23 #include <asm/hvcall.h>
24 #include <asm/iseries/hv_call.h> 24 #include <asm/iseries/hv_call.h>
25 #include <asm/smp.h> 25 #include <asm/smp.h>
26 #include <asm/firmware.h> 26 #include <asm/firmware.h>
27 27
28 void __spin_yield(arch_spinlock_t *lock) 28 void __spin_yield(arch_spinlock_t *lock)
29 { 29 {
30 unsigned int lock_value, holder_cpu, yield_count; 30 unsigned int lock_value, holder_cpu, yield_count;
31 31
32 lock_value = lock->slock; 32 lock_value = lock->slock;
33 if (lock_value == 0) 33 if (lock_value == 0)
34 return; 34 return;
35 holder_cpu = lock_value & 0xffff; 35 holder_cpu = lock_value & 0xffff;
36 BUG_ON(holder_cpu >= NR_CPUS); 36 BUG_ON(holder_cpu >= NR_CPUS);
37 yield_count = lppaca_of(holder_cpu).yield_count; 37 yield_count = lppaca_of(holder_cpu).yield_count;
38 if ((yield_count & 1) == 0) 38 if ((yield_count & 1) == 0)
39 return; /* virtual cpu is currently running */ 39 return; /* virtual cpu is currently running */
40 rmb(); 40 rmb();
41 if (lock->slock != lock_value) 41 if (lock->slock != lock_value)
42 return; /* something has changed */ 42 return; /* something has changed */
43 if (firmware_has_feature(FW_FEATURE_ISERIES)) 43 if (firmware_has_feature(FW_FEATURE_ISERIES))
44 HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc, 44 HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
45 ((u64)holder_cpu << 32) | yield_count); 45 ((u64)holder_cpu << 32) | yield_count);
46 #ifdef CONFIG_PPC_SPLPAR 46 #ifdef CONFIG_PPC_SPLPAR
47 else 47 else
48 plpar_hcall_norets(H_CONFER, 48 plpar_hcall_norets(H_CONFER,
49 get_hard_smp_processor_id(holder_cpu), yield_count); 49 get_hard_smp_processor_id(holder_cpu), yield_count);
50 #endif 50 #endif
51 } 51 }
52 52
53 /* 53 /*
54 * Waiting for a read lock or a write lock on a rwlock... 54 * Waiting for a read lock or a write lock on a rwlock...
55 * This turns out to be the same for read and write locks, since 55 * This turns out to be the same for read and write locks, since
56 * we only know the holder if it is write-locked. 56 * we only know the holder if it is write-locked.
57 */ 57 */
58 void __rw_yield(arch_rwlock_t *rw) 58 void __rw_yield(arch_rwlock_t *rw)
59 { 59 {
60 int lock_value; 60 int lock_value;
61 unsigned int holder_cpu, yield_count; 61 unsigned int holder_cpu, yield_count;
62 62
63 lock_value = rw->lock; 63 lock_value = rw->lock;
64 if (lock_value >= 0) 64 if (lock_value >= 0)
65 return; /* no write lock at present */ 65 return; /* no write lock at present */
66 holder_cpu = lock_value & 0xffff; 66 holder_cpu = lock_value & 0xffff;
67 BUG_ON(holder_cpu >= NR_CPUS); 67 BUG_ON(holder_cpu >= NR_CPUS);
68 yield_count = lppaca_of(holder_cpu).yield_count; 68 yield_count = lppaca_of(holder_cpu).yield_count;
69 if ((yield_count & 1) == 0) 69 if ((yield_count & 1) == 0)
70 return; /* virtual cpu is currently running */ 70 return; /* virtual cpu is currently running */
71 rmb(); 71 rmb();
72 if (rw->lock != lock_value) 72 if (rw->lock != lock_value)
73 return; /* something has changed */ 73 return; /* something has changed */
74 if (firmware_has_feature(FW_FEATURE_ISERIES)) 74 if (firmware_has_feature(FW_FEATURE_ISERIES))
75 HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc, 75 HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
76 ((u64)holder_cpu << 32) | yield_count); 76 ((u64)holder_cpu << 32) | yield_count);
77 #ifdef CONFIG_PPC_SPLPAR 77 #ifdef CONFIG_PPC_SPLPAR
78 else 78 else
79 plpar_hcall_norets(H_CONFER, 79 plpar_hcall_norets(H_CONFER,
80 get_hard_smp_processor_id(holder_cpu), yield_count); 80 get_hard_smp_processor_id(holder_cpu), yield_count);
81 #endif 81 #endif
82 } 82 }
83 #endif 83 #endif
84 84
85 void arch_spin_unlock_wait(arch_spinlock_t *lock) 85 void arch_spin_unlock_wait(arch_spinlock_t *lock)
86 { 86 {
87 while (lock->slock) { 87 while (lock->slock) {
88 HMT_low(); 88 HMT_low();
89 if (SHARED_PROCESSOR) 89 if (SHARED_PROCESSOR)
90 __spin_yield(lock); 90 __spin_yield(lock);
91 } 91 }
92 HMT_medium(); 92 HMT_medium();
93 } 93 }
94 94
95 EXPORT_SYMBOL(arch_spin_unlock_wait); 95 EXPORT_SYMBOL(arch_spin_unlock_wait);
96 96
arch/powerpc/lib/rheap.c
1 /* 1 /*
2 * A Remote Heap. Remote means that we don't touch the memory that the 2 * A Remote Heap. Remote means that we don't touch the memory that the
3 * heap points to. Normal heap implementations use the memory they manage 3 * heap points to. Normal heap implementations use the memory they manage
4 * to place their list. We cannot do that because the memory we manage may 4 * to place their list. We cannot do that because the memory we manage may
5 * have special properties, for example it is uncachable or of different 5 * have special properties, for example it is uncachable or of different
6 * endianess. 6 * endianess.
7 * 7 *
8 * Author: Pantelis Antoniou <panto@intracom.gr> 8 * Author: Pantelis Antoniou <panto@intracom.gr>
9 * 9 *
10 * 2004 (c) INTRACOM S.A. Greece. This file is licensed under 10 * 2004 (c) INTRACOM S.A. Greece. This file is licensed under
11 * the terms of the GNU General Public License version 2. This program 11 * the terms of the GNU General Public License version 2. This program
12 * is licensed "as is" without any warranty of any kind, whether express 12 * is licensed "as is" without any warranty of any kind, whether express
13 * or implied. 13 * or implied.
14 */ 14 */
15 #include <linux/types.h> 15 #include <linux/types.h>
16 #include <linux/errno.h> 16 #include <linux/errno.h>
17 #include <linux/kernel.h> 17 #include <linux/kernel.h>
18 #include <linux/module.h> 18 #include <linux/export.h>
19 #include <linux/mm.h> 19 #include <linux/mm.h>
20 #include <linux/err.h> 20 #include <linux/err.h>
21 #include <linux/slab.h> 21 #include <linux/slab.h>
22 22
23 #include <asm/rheap.h> 23 #include <asm/rheap.h>
24 24
25 /* 25 /*
26 * Fixup a list_head, needed when copying lists. If the pointers fall 26 * Fixup a list_head, needed when copying lists. If the pointers fall
27 * between s and e, apply the delta. This assumes that 27 * between s and e, apply the delta. This assumes that
28 * sizeof(struct list_head *) == sizeof(unsigned long *). 28 * sizeof(struct list_head *) == sizeof(unsigned long *).
29 */ 29 */
30 static inline void fixup(unsigned long s, unsigned long e, int d, 30 static inline void fixup(unsigned long s, unsigned long e, int d,
31 struct list_head *l) 31 struct list_head *l)
32 { 32 {
33 unsigned long *pp; 33 unsigned long *pp;
34 34
35 pp = (unsigned long *)&l->next; 35 pp = (unsigned long *)&l->next;
36 if (*pp >= s && *pp < e) 36 if (*pp >= s && *pp < e)
37 *pp += d; 37 *pp += d;
38 38
39 pp = (unsigned long *)&l->prev; 39 pp = (unsigned long *)&l->prev;
40 if (*pp >= s && *pp < e) 40 if (*pp >= s && *pp < e)
41 *pp += d; 41 *pp += d;
42 } 42 }
43 43
44 /* Grow the allocated blocks */ 44 /* Grow the allocated blocks */
45 static int grow(rh_info_t * info, int max_blocks) 45 static int grow(rh_info_t * info, int max_blocks)
46 { 46 {
47 rh_block_t *block, *blk; 47 rh_block_t *block, *blk;
48 int i, new_blocks; 48 int i, new_blocks;
49 int delta; 49 int delta;
50 unsigned long blks, blke; 50 unsigned long blks, blke;
51 51
52 if (max_blocks <= info->max_blocks) 52 if (max_blocks <= info->max_blocks)
53 return -EINVAL; 53 return -EINVAL;
54 54
55 new_blocks = max_blocks - info->max_blocks; 55 new_blocks = max_blocks - info->max_blocks;
56 56
57 block = kmalloc(sizeof(rh_block_t) * max_blocks, GFP_ATOMIC); 57 block = kmalloc(sizeof(rh_block_t) * max_blocks, GFP_ATOMIC);
58 if (block == NULL) 58 if (block == NULL)
59 return -ENOMEM; 59 return -ENOMEM;
60 60
61 if (info->max_blocks > 0) { 61 if (info->max_blocks > 0) {
62 62
63 /* copy old block area */ 63 /* copy old block area */
64 memcpy(block, info->block, 64 memcpy(block, info->block,
65 sizeof(rh_block_t) * info->max_blocks); 65 sizeof(rh_block_t) * info->max_blocks);
66 66
67 delta = (char *)block - (char *)info->block; 67 delta = (char *)block - (char *)info->block;
68 68
69 /* and fixup list pointers */ 69 /* and fixup list pointers */
70 blks = (unsigned long)info->block; 70 blks = (unsigned long)info->block;
71 blke = (unsigned long)(info->block + info->max_blocks); 71 blke = (unsigned long)(info->block + info->max_blocks);
72 72
73 for (i = 0, blk = block; i < info->max_blocks; i++, blk++) 73 for (i = 0, blk = block; i < info->max_blocks; i++, blk++)
74 fixup(blks, blke, delta, &blk->list); 74 fixup(blks, blke, delta, &blk->list);
75 75
76 fixup(blks, blke, delta, &info->empty_list); 76 fixup(blks, blke, delta, &info->empty_list);
77 fixup(blks, blke, delta, &info->free_list); 77 fixup(blks, blke, delta, &info->free_list);
78 fixup(blks, blke, delta, &info->taken_list); 78 fixup(blks, blke, delta, &info->taken_list);
79 79
80 /* free the old allocated memory */ 80 /* free the old allocated memory */
81 if ((info->flags & RHIF_STATIC_BLOCK) == 0) 81 if ((info->flags & RHIF_STATIC_BLOCK) == 0)
82 kfree(info->block); 82 kfree(info->block);
83 } 83 }
84 84
85 info->block = block; 85 info->block = block;
86 info->empty_slots += new_blocks; 86 info->empty_slots += new_blocks;
87 info->max_blocks = max_blocks; 87 info->max_blocks = max_blocks;
88 info->flags &= ~RHIF_STATIC_BLOCK; 88 info->flags &= ~RHIF_STATIC_BLOCK;
89 89
90 /* add all new blocks to the free list */ 90 /* add all new blocks to the free list */
91 blk = block + info->max_blocks - new_blocks; 91 blk = block + info->max_blocks - new_blocks;
92 for (i = 0; i < new_blocks; i++, blk++) 92 for (i = 0; i < new_blocks; i++, blk++)
93 list_add(&blk->list, &info->empty_list); 93 list_add(&blk->list, &info->empty_list);
94 94
95 return 0; 95 return 0;
96 } 96 }
97 97
98 /* 98 /*
99 * Assure at least the required amount of empty slots. If this function 99 * Assure at least the required amount of empty slots. If this function
100 * causes a grow in the block area then all pointers kept to the block 100 * causes a grow in the block area then all pointers kept to the block
101 * area are invalid! 101 * area are invalid!
102 */ 102 */
103 static int assure_empty(rh_info_t * info, int slots) 103 static int assure_empty(rh_info_t * info, int slots)
104 { 104 {
105 int max_blocks; 105 int max_blocks;
106 106
107 /* This function is not meant to be used to grow uncontrollably */ 107 /* This function is not meant to be used to grow uncontrollably */
108 if (slots >= 4) 108 if (slots >= 4)
109 return -EINVAL; 109 return -EINVAL;
110 110
111 /* Enough space */ 111 /* Enough space */
112 if (info->empty_slots >= slots) 112 if (info->empty_slots >= slots)
113 return 0; 113 return 0;
114 114
115 /* Next 16 sized block */ 115 /* Next 16 sized block */
116 max_blocks = ((info->max_blocks + slots) + 15) & ~15; 116 max_blocks = ((info->max_blocks + slots) + 15) & ~15;
117 117
118 return grow(info, max_blocks); 118 return grow(info, max_blocks);
119 } 119 }
120 120
121 static rh_block_t *get_slot(rh_info_t * info) 121 static rh_block_t *get_slot(rh_info_t * info)
122 { 122 {
123 rh_block_t *blk; 123 rh_block_t *blk;
124 124
125 /* If no more free slots, and failure to extend. */ 125 /* If no more free slots, and failure to extend. */
126 /* XXX: You should have called assure_empty before */ 126 /* XXX: You should have called assure_empty before */
127 if (info->empty_slots == 0) { 127 if (info->empty_slots == 0) {
128 printk(KERN_ERR "rh: out of slots; crash is imminent.\n"); 128 printk(KERN_ERR "rh: out of slots; crash is imminent.\n");
129 return NULL; 129 return NULL;
130 } 130 }
131 131
132 /* Get empty slot to use */ 132 /* Get empty slot to use */
133 blk = list_entry(info->empty_list.next, rh_block_t, list); 133 blk = list_entry(info->empty_list.next, rh_block_t, list);
134 list_del_init(&blk->list); 134 list_del_init(&blk->list);
135 info->empty_slots--; 135 info->empty_slots--;
136 136
137 /* Initialize */ 137 /* Initialize */
138 blk->start = 0; 138 blk->start = 0;
139 blk->size = 0; 139 blk->size = 0;
140 blk->owner = NULL; 140 blk->owner = NULL;
141 141
142 return blk; 142 return blk;
143 } 143 }
144 144
145 static inline void release_slot(rh_info_t * info, rh_block_t * blk) 145 static inline void release_slot(rh_info_t * info, rh_block_t * blk)
146 { 146 {
147 list_add(&blk->list, &info->empty_list); 147 list_add(&blk->list, &info->empty_list);
148 info->empty_slots++; 148 info->empty_slots++;
149 } 149 }
150 150
151 static void attach_free_block(rh_info_t * info, rh_block_t * blkn) 151 static void attach_free_block(rh_info_t * info, rh_block_t * blkn)
152 { 152 {
153 rh_block_t *blk; 153 rh_block_t *blk;
154 rh_block_t *before; 154 rh_block_t *before;
155 rh_block_t *after; 155 rh_block_t *after;
156 rh_block_t *next; 156 rh_block_t *next;
157 int size; 157 int size;
158 unsigned long s, e, bs, be; 158 unsigned long s, e, bs, be;
159 struct list_head *l; 159 struct list_head *l;
160 160
161 /* We assume that they are aligned properly */ 161 /* We assume that they are aligned properly */
162 size = blkn->size; 162 size = blkn->size;
163 s = blkn->start; 163 s = blkn->start;
164 e = s + size; 164 e = s + size;
165 165
166 /* Find the blocks immediately before and after the given one 166 /* Find the blocks immediately before and after the given one
167 * (if any) */ 167 * (if any) */
168 before = NULL; 168 before = NULL;
169 after = NULL; 169 after = NULL;
170 next = NULL; 170 next = NULL;
171 171
172 list_for_each(l, &info->free_list) { 172 list_for_each(l, &info->free_list) {
173 blk = list_entry(l, rh_block_t, list); 173 blk = list_entry(l, rh_block_t, list);
174 174
175 bs = blk->start; 175 bs = blk->start;
176 be = bs + blk->size; 176 be = bs + blk->size;
177 177
178 if (next == NULL && s >= bs) 178 if (next == NULL && s >= bs)
179 next = blk; 179 next = blk;
180 180
181 if (be == s) 181 if (be == s)
182 before = blk; 182 before = blk;
183 183
184 if (e == bs) 184 if (e == bs)
185 after = blk; 185 after = blk;
186 186
187 /* If both are not null, break now */ 187 /* If both are not null, break now */
188 if (before != NULL && after != NULL) 188 if (before != NULL && after != NULL)
189 break; 189 break;
190 } 190 }
191 191
192 /* Now check if they are really adjacent */ 192 /* Now check if they are really adjacent */
193 if (before && s != (before->start + before->size)) 193 if (before && s != (before->start + before->size))
194 before = NULL; 194 before = NULL;
195 195
196 if (after && e != after->start) 196 if (after && e != after->start)
197 after = NULL; 197 after = NULL;
198 198
199 /* No coalescing; list insert and return */ 199 /* No coalescing; list insert and return */
200 if (before == NULL && after == NULL) { 200 if (before == NULL && after == NULL) {
201 201
202 if (next != NULL) 202 if (next != NULL)
203 list_add(&blkn->list, &next->list); 203 list_add(&blkn->list, &next->list);
204 else 204 else
205 list_add(&blkn->list, &info->free_list); 205 list_add(&blkn->list, &info->free_list);
206 206
207 return; 207 return;
208 } 208 }
209 209
210 /* We don't need it anymore */ 210 /* We don't need it anymore */
211 release_slot(info, blkn); 211 release_slot(info, blkn);
212 212
213 /* Grow the before block */ 213 /* Grow the before block */
214 if (before != NULL && after == NULL) { 214 if (before != NULL && after == NULL) {
215 before->size += size; 215 before->size += size;
216 return; 216 return;
217 } 217 }
218 218
219 /* Grow the after block backwards */ 219 /* Grow the after block backwards */
220 if (before == NULL && after != NULL) { 220 if (before == NULL && after != NULL) {
221 after->start -= size; 221 after->start -= size;
222 after->size += size; 222 after->size += size;
223 return; 223 return;
224 } 224 }
225 225
226 /* Grow the before block, and release the after block */ 226 /* Grow the before block, and release the after block */
227 before->size += size + after->size; 227 before->size += size + after->size;
228 list_del(&after->list); 228 list_del(&after->list);
229 release_slot(info, after); 229 release_slot(info, after);
230 } 230 }
231 231
232 static void attach_taken_block(rh_info_t * info, rh_block_t * blkn) 232 static void attach_taken_block(rh_info_t * info, rh_block_t * blkn)
233 { 233 {
234 rh_block_t *blk; 234 rh_block_t *blk;
235 struct list_head *l; 235 struct list_head *l;
236 236
237 /* Find the block immediately before the given one (if any) */ 237 /* Find the block immediately before the given one (if any) */
238 list_for_each(l, &info->taken_list) { 238 list_for_each(l, &info->taken_list) {
239 blk = list_entry(l, rh_block_t, list); 239 blk = list_entry(l, rh_block_t, list);
240 if (blk->start > blkn->start) { 240 if (blk->start > blkn->start) {
241 list_add_tail(&blkn->list, &blk->list); 241 list_add_tail(&blkn->list, &blk->list);
242 return; 242 return;
243 } 243 }
244 } 244 }
245 245
246 list_add_tail(&blkn->list, &info->taken_list); 246 list_add_tail(&blkn->list, &info->taken_list);
247 } 247 }
248 248
249 /* 249 /*
250 * Create a remote heap dynamically. Note that no memory for the blocks 250 * Create a remote heap dynamically. Note that no memory for the blocks
251 * are allocated. It will upon the first allocation 251 * are allocated. It will upon the first allocation
252 */ 252 */
253 rh_info_t *rh_create(unsigned int alignment) 253 rh_info_t *rh_create(unsigned int alignment)
254 { 254 {
255 rh_info_t *info; 255 rh_info_t *info;
256 256
257 /* Alignment must be a power of two */ 257 /* Alignment must be a power of two */
258 if ((alignment & (alignment - 1)) != 0) 258 if ((alignment & (alignment - 1)) != 0)
259 return ERR_PTR(-EINVAL); 259 return ERR_PTR(-EINVAL);
260 260
261 info = kmalloc(sizeof(*info), GFP_ATOMIC); 261 info = kmalloc(sizeof(*info), GFP_ATOMIC);
262 if (info == NULL) 262 if (info == NULL)
263 return ERR_PTR(-ENOMEM); 263 return ERR_PTR(-ENOMEM);
264 264
265 info->alignment = alignment; 265 info->alignment = alignment;
266 266
267 /* Initially everything as empty */ 267 /* Initially everything as empty */
268 info->block = NULL; 268 info->block = NULL;
269 info->max_blocks = 0; 269 info->max_blocks = 0;
270 info->empty_slots = 0; 270 info->empty_slots = 0;
271 info->flags = 0; 271 info->flags = 0;
272 272
273 INIT_LIST_HEAD(&info->empty_list); 273 INIT_LIST_HEAD(&info->empty_list);
274 INIT_LIST_HEAD(&info->free_list); 274 INIT_LIST_HEAD(&info->free_list);
275 INIT_LIST_HEAD(&info->taken_list); 275 INIT_LIST_HEAD(&info->taken_list);
276 276
277 return info; 277 return info;
278 } 278 }
279 EXPORT_SYMBOL_GPL(rh_create); 279 EXPORT_SYMBOL_GPL(rh_create);
280 280
281 /* 281 /*
282 * Destroy a dynamically created remote heap. Deallocate only if the areas 282 * Destroy a dynamically created remote heap. Deallocate only if the areas
283 * are not static 283 * are not static
284 */ 284 */
285 void rh_destroy(rh_info_t * info) 285 void rh_destroy(rh_info_t * info)
286 { 286 {
287 if ((info->flags & RHIF_STATIC_BLOCK) == 0 && info->block != NULL) 287 if ((info->flags & RHIF_STATIC_BLOCK) == 0 && info->block != NULL)
288 kfree(info->block); 288 kfree(info->block);
289 289
290 if ((info->flags & RHIF_STATIC_INFO) == 0) 290 if ((info->flags & RHIF_STATIC_INFO) == 0)
291 kfree(info); 291 kfree(info);
292 } 292 }
293 EXPORT_SYMBOL_GPL(rh_destroy); 293 EXPORT_SYMBOL_GPL(rh_destroy);
294 294
295 /* 295 /*
296 * Initialize in place a remote heap info block. This is needed to support 296 * Initialize in place a remote heap info block. This is needed to support
297 * operation very early in the startup of the kernel, when it is not yet safe 297 * operation very early in the startup of the kernel, when it is not yet safe
298 * to call kmalloc. 298 * to call kmalloc.
299 */ 299 */
300 void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks, 300 void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks,
301 rh_block_t * block) 301 rh_block_t * block)
302 { 302 {
303 int i; 303 int i;
304 rh_block_t *blk; 304 rh_block_t *blk;
305 305
306 /* Alignment must be a power of two */ 306 /* Alignment must be a power of two */
307 if ((alignment & (alignment - 1)) != 0) 307 if ((alignment & (alignment - 1)) != 0)
308 return; 308 return;
309 309
310 info->alignment = alignment; 310 info->alignment = alignment;
311 311
312 /* Initially everything as empty */ 312 /* Initially everything as empty */
313 info->block = block; 313 info->block = block;
314 info->max_blocks = max_blocks; 314 info->max_blocks = max_blocks;
315 info->empty_slots = max_blocks; 315 info->empty_slots = max_blocks;
316 info->flags = RHIF_STATIC_INFO | RHIF_STATIC_BLOCK; 316 info->flags = RHIF_STATIC_INFO | RHIF_STATIC_BLOCK;
317 317
318 INIT_LIST_HEAD(&info->empty_list); 318 INIT_LIST_HEAD(&info->empty_list);
319 INIT_LIST_HEAD(&info->free_list); 319 INIT_LIST_HEAD(&info->free_list);
320 INIT_LIST_HEAD(&info->taken_list); 320 INIT_LIST_HEAD(&info->taken_list);
321 321
322 /* Add all new blocks to the free list */ 322 /* Add all new blocks to the free list */
323 for (i = 0, blk = block; i < max_blocks; i++, blk++) 323 for (i = 0, blk = block; i < max_blocks; i++, blk++)
324 list_add(&blk->list, &info->empty_list); 324 list_add(&blk->list, &info->empty_list);
325 } 325 }
326 EXPORT_SYMBOL_GPL(rh_init); 326 EXPORT_SYMBOL_GPL(rh_init);
327 327
328 /* Attach a free memory region, coalesces regions if adjuscent */ 328 /* Attach a free memory region, coalesces regions if adjuscent */
329 int rh_attach_region(rh_info_t * info, unsigned long start, int size) 329 int rh_attach_region(rh_info_t * info, unsigned long start, int size)
330 { 330 {
331 rh_block_t *blk; 331 rh_block_t *blk;
332 unsigned long s, e, m; 332 unsigned long s, e, m;
333 int r; 333 int r;
334 334
335 /* The region must be aligned */ 335 /* The region must be aligned */
336 s = start; 336 s = start;
337 e = s + size; 337 e = s + size;
338 m = info->alignment - 1; 338 m = info->alignment - 1;
339 339
340 /* Round start up */ 340 /* Round start up */
341 s = (s + m) & ~m; 341 s = (s + m) & ~m;
342 342
343 /* Round end down */ 343 /* Round end down */
344 e = e & ~m; 344 e = e & ~m;
345 345
346 if (IS_ERR_VALUE(e) || (e < s)) 346 if (IS_ERR_VALUE(e) || (e < s))
347 return -ERANGE; 347 return -ERANGE;
348 348
349 /* Take final values */ 349 /* Take final values */
350 start = s; 350 start = s;
351 size = e - s; 351 size = e - s;
352 352
353 /* Grow the blocks, if needed */ 353 /* Grow the blocks, if needed */
354 r = assure_empty(info, 1); 354 r = assure_empty(info, 1);
355 if (r < 0) 355 if (r < 0)
356 return r; 356 return r;
357 357
358 blk = get_slot(info); 358 blk = get_slot(info);
359 blk->start = start; 359 blk->start = start;
360 blk->size = size; 360 blk->size = size;
361 blk->owner = NULL; 361 blk->owner = NULL;
362 362
363 attach_free_block(info, blk); 363 attach_free_block(info, blk);
364 364
365 return 0; 365 return 0;
366 } 366 }
367 EXPORT_SYMBOL_GPL(rh_attach_region); 367 EXPORT_SYMBOL_GPL(rh_attach_region);
368 368
369 /* Detatch given address range, splits free block if needed. */ 369 /* Detatch given address range, splits free block if needed. */
370 unsigned long rh_detach_region(rh_info_t * info, unsigned long start, int size) 370 unsigned long rh_detach_region(rh_info_t * info, unsigned long start, int size)
371 { 371 {
372 struct list_head *l; 372 struct list_head *l;
373 rh_block_t *blk, *newblk; 373 rh_block_t *blk, *newblk;
374 unsigned long s, e, m, bs, be; 374 unsigned long s, e, m, bs, be;
375 375
376 /* Validate size */ 376 /* Validate size */
377 if (size <= 0) 377 if (size <= 0)
378 return (unsigned long) -EINVAL; 378 return (unsigned long) -EINVAL;
379 379
380 /* The region must be aligned */ 380 /* The region must be aligned */
381 s = start; 381 s = start;
382 e = s + size; 382 e = s + size;
383 m = info->alignment - 1; 383 m = info->alignment - 1;
384 384
385 /* Round start up */ 385 /* Round start up */
386 s = (s + m) & ~m; 386 s = (s + m) & ~m;
387 387
388 /* Round end down */ 388 /* Round end down */
389 e = e & ~m; 389 e = e & ~m;
390 390
391 if (assure_empty(info, 1) < 0) 391 if (assure_empty(info, 1) < 0)
392 return (unsigned long) -ENOMEM; 392 return (unsigned long) -ENOMEM;
393 393
394 blk = NULL; 394 blk = NULL;
395 list_for_each(l, &info->free_list) { 395 list_for_each(l, &info->free_list) {
396 blk = list_entry(l, rh_block_t, list); 396 blk = list_entry(l, rh_block_t, list);
397 /* The range must lie entirely inside one free block */ 397 /* The range must lie entirely inside one free block */
398 bs = blk->start; 398 bs = blk->start;
399 be = blk->start + blk->size; 399 be = blk->start + blk->size;
400 if (s >= bs && e <= be) 400 if (s >= bs && e <= be)
401 break; 401 break;
402 blk = NULL; 402 blk = NULL;
403 } 403 }
404 404
405 if (blk == NULL) 405 if (blk == NULL)
406 return (unsigned long) -ENOMEM; 406 return (unsigned long) -ENOMEM;
407 407
408 /* Perfect fit */ 408 /* Perfect fit */
409 if (bs == s && be == e) { 409 if (bs == s && be == e) {
410 /* Delete from free list, release slot */ 410 /* Delete from free list, release slot */
411 list_del(&blk->list); 411 list_del(&blk->list);
412 release_slot(info, blk); 412 release_slot(info, blk);
413 return s; 413 return s;
414 } 414 }
415 415
416 /* blk still in free list, with updated start and/or size */ 416 /* blk still in free list, with updated start and/or size */
417 if (bs == s || be == e) { 417 if (bs == s || be == e) {
418 if (bs == s) 418 if (bs == s)
419 blk->start += size; 419 blk->start += size;
420 blk->size -= size; 420 blk->size -= size;
421 421
422 } else { 422 } else {
423 /* The front free fragment */ 423 /* The front free fragment */
424 blk->size = s - bs; 424 blk->size = s - bs;
425 425
426 /* the back free fragment */ 426 /* the back free fragment */
427 newblk = get_slot(info); 427 newblk = get_slot(info);
428 newblk->start = e; 428 newblk->start = e;
429 newblk->size = be - e; 429 newblk->size = be - e;
430 430
431 list_add(&newblk->list, &blk->list); 431 list_add(&newblk->list, &blk->list);
432 } 432 }
433 433
434 return s; 434 return s;
435 } 435 }
436 EXPORT_SYMBOL_GPL(rh_detach_region); 436 EXPORT_SYMBOL_GPL(rh_detach_region);
437 437
438 /* Allocate a block of memory at the specified alignment. The value returned 438 /* Allocate a block of memory at the specified alignment. The value returned
439 * is an offset into the buffer initialized by rh_init(), or a negative number 439 * is an offset into the buffer initialized by rh_init(), or a negative number
440 * if there is an error. 440 * if there is an error.
441 */ 441 */
442 unsigned long rh_alloc_align(rh_info_t * info, int size, int alignment, const char *owner) 442 unsigned long rh_alloc_align(rh_info_t * info, int size, int alignment, const char *owner)
443 { 443 {
444 struct list_head *l; 444 struct list_head *l;
445 rh_block_t *blk; 445 rh_block_t *blk;
446 rh_block_t *newblk; 446 rh_block_t *newblk;
447 unsigned long start, sp_size; 447 unsigned long start, sp_size;
448 448
449 /* Validate size, and alignment must be power of two */ 449 /* Validate size, and alignment must be power of two */
450 if (size <= 0 || (alignment & (alignment - 1)) != 0) 450 if (size <= 0 || (alignment & (alignment - 1)) != 0)
451 return (unsigned long) -EINVAL; 451 return (unsigned long) -EINVAL;
452 452
453 /* Align to configured alignment */ 453 /* Align to configured alignment */
454 size = (size + (info->alignment - 1)) & ~(info->alignment - 1); 454 size = (size + (info->alignment - 1)) & ~(info->alignment - 1);
455 455
456 if (assure_empty(info, 2) < 0) 456 if (assure_empty(info, 2) < 0)
457 return (unsigned long) -ENOMEM; 457 return (unsigned long) -ENOMEM;
458 458
459 blk = NULL; 459 blk = NULL;
460 list_for_each(l, &info->free_list) { 460 list_for_each(l, &info->free_list) {
461 blk = list_entry(l, rh_block_t, list); 461 blk = list_entry(l, rh_block_t, list);
462 if (size <= blk->size) { 462 if (size <= blk->size) {
463 start = (blk->start + alignment - 1) & ~(alignment - 1); 463 start = (blk->start + alignment - 1) & ~(alignment - 1);
464 if (start + size <= blk->start + blk->size) 464 if (start + size <= blk->start + blk->size)
465 break; 465 break;
466 } 466 }
467 blk = NULL; 467 blk = NULL;
468 } 468 }
469 469
470 if (blk == NULL) 470 if (blk == NULL)
471 return (unsigned long) -ENOMEM; 471 return (unsigned long) -ENOMEM;
472 472
473 /* Just fits */ 473 /* Just fits */
474 if (blk->size == size) { 474 if (blk->size == size) {
475 /* Move from free list to taken list */ 475 /* Move from free list to taken list */
476 list_del(&blk->list); 476 list_del(&blk->list);
477 newblk = blk; 477 newblk = blk;
478 } else { 478 } else {
479 /* Fragment caused, split if needed */ 479 /* Fragment caused, split if needed */
480 /* Create block for fragment in the beginning */ 480 /* Create block for fragment in the beginning */
481 sp_size = start - blk->start; 481 sp_size = start - blk->start;
482 if (sp_size) { 482 if (sp_size) {
483 rh_block_t *spblk; 483 rh_block_t *spblk;
484 484
485 spblk = get_slot(info); 485 spblk = get_slot(info);
486 spblk->start = blk->start; 486 spblk->start = blk->start;
487 spblk->size = sp_size; 487 spblk->size = sp_size;
488 /* add before the blk */ 488 /* add before the blk */
489 list_add(&spblk->list, blk->list.prev); 489 list_add(&spblk->list, blk->list.prev);
490 } 490 }
491 newblk = get_slot(info); 491 newblk = get_slot(info);
492 newblk->start = start; 492 newblk->start = start;
493 newblk->size = size; 493 newblk->size = size;
494 494
495 /* blk still in free list, with updated start and size 495 /* blk still in free list, with updated start and size
496 * for fragment in the end */ 496 * for fragment in the end */
497 blk->start = start + size; 497 blk->start = start + size;
498 blk->size -= sp_size + size; 498 blk->size -= sp_size + size;
499 /* No fragment in the end, remove blk */ 499 /* No fragment in the end, remove blk */
500 if (blk->size == 0) { 500 if (blk->size == 0) {
501 list_del(&blk->list); 501 list_del(&blk->list);
502 release_slot(info, blk); 502 release_slot(info, blk);
503 } 503 }
504 } 504 }
505 505
506 newblk->owner = owner; 506 newblk->owner = owner;
507 attach_taken_block(info, newblk); 507 attach_taken_block(info, newblk);
508 508
509 return start; 509 return start;
510 } 510 }
511 EXPORT_SYMBOL_GPL(rh_alloc_align); 511 EXPORT_SYMBOL_GPL(rh_alloc_align);
512 512
513 /* Allocate a block of memory at the default alignment. The value returned is 513 /* Allocate a block of memory at the default alignment. The value returned is
514 * an offset into the buffer initialized by rh_init(), or a negative number if 514 * an offset into the buffer initialized by rh_init(), or a negative number if
515 * there is an error. 515 * there is an error.
516 */ 516 */
517 unsigned long rh_alloc(rh_info_t * info, int size, const char *owner) 517 unsigned long rh_alloc(rh_info_t * info, int size, const char *owner)
518 { 518 {
519 return rh_alloc_align(info, size, info->alignment, owner); 519 return rh_alloc_align(info, size, info->alignment, owner);
520 } 520 }
521 EXPORT_SYMBOL_GPL(rh_alloc); 521 EXPORT_SYMBOL_GPL(rh_alloc);
522 522
523 /* Allocate a block of memory at the given offset, rounded up to the default 523 /* Allocate a block of memory at the given offset, rounded up to the default
524 * alignment. The value returned is an offset into the buffer initialized by 524 * alignment. The value returned is an offset into the buffer initialized by
525 * rh_init(), or a negative number if there is an error. 525 * rh_init(), or a negative number if there is an error.
526 */ 526 */
527 unsigned long rh_alloc_fixed(rh_info_t * info, unsigned long start, int size, const char *owner) 527 unsigned long rh_alloc_fixed(rh_info_t * info, unsigned long start, int size, const char *owner)
528 { 528 {
529 struct list_head *l; 529 struct list_head *l;
530 rh_block_t *blk, *newblk1, *newblk2; 530 rh_block_t *blk, *newblk1, *newblk2;
531 unsigned long s, e, m, bs = 0, be = 0; 531 unsigned long s, e, m, bs = 0, be = 0;
532 532
533 /* Validate size */ 533 /* Validate size */
534 if (size <= 0) 534 if (size <= 0)
535 return (unsigned long) -EINVAL; 535 return (unsigned long) -EINVAL;
536 536
537 /* The region must be aligned */ 537 /* The region must be aligned */
538 s = start; 538 s = start;
539 e = s + size; 539 e = s + size;
540 m = info->alignment - 1; 540 m = info->alignment - 1;
541 541
542 /* Round start up */ 542 /* Round start up */
543 s = (s + m) & ~m; 543 s = (s + m) & ~m;
544 544
545 /* Round end down */ 545 /* Round end down */
546 e = e & ~m; 546 e = e & ~m;
547 547
548 if (assure_empty(info, 2) < 0) 548 if (assure_empty(info, 2) < 0)
549 return (unsigned long) -ENOMEM; 549 return (unsigned long) -ENOMEM;
550 550
551 blk = NULL; 551 blk = NULL;
552 list_for_each(l, &info->free_list) { 552 list_for_each(l, &info->free_list) {
553 blk = list_entry(l, rh_block_t, list); 553 blk = list_entry(l, rh_block_t, list);
554 /* The range must lie entirely inside one free block */ 554 /* The range must lie entirely inside one free block */
555 bs = blk->start; 555 bs = blk->start;
556 be = blk->start + blk->size; 556 be = blk->start + blk->size;
557 if (s >= bs && e <= be) 557 if (s >= bs && e <= be)
558 break; 558 break;
559 blk = NULL; 559 blk = NULL;
560 } 560 }
561 561
562 if (blk == NULL) 562 if (blk == NULL)
563 return (unsigned long) -ENOMEM; 563 return (unsigned long) -ENOMEM;
564 564
565 /* Perfect fit */ 565 /* Perfect fit */
566 if (bs == s && be == e) { 566 if (bs == s && be == e) {
567 /* Move from free list to taken list */ 567 /* Move from free list to taken list */
568 list_del(&blk->list); 568 list_del(&blk->list);
569 blk->owner = owner; 569 blk->owner = owner;
570 570
571 start = blk->start; 571 start = blk->start;
572 attach_taken_block(info, blk); 572 attach_taken_block(info, blk);
573 573
574 return start; 574 return start;
575 575
576 } 576 }
577 577
578 /* blk still in free list, with updated start and/or size */ 578 /* blk still in free list, with updated start and/or size */
579 if (bs == s || be == e) { 579 if (bs == s || be == e) {
580 if (bs == s) 580 if (bs == s)
581 blk->start += size; 581 blk->start += size;
582 blk->size -= size; 582 blk->size -= size;
583 583
584 } else { 584 } else {
585 /* The front free fragment */ 585 /* The front free fragment */
586 blk->size = s - bs; 586 blk->size = s - bs;
587 587
588 /* The back free fragment */ 588 /* The back free fragment */
589 newblk2 = get_slot(info); 589 newblk2 = get_slot(info);
590 newblk2->start = e; 590 newblk2->start = e;
591 newblk2->size = be - e; 591 newblk2->size = be - e;
592 592
593 list_add(&newblk2->list, &blk->list); 593 list_add(&newblk2->list, &blk->list);
594 } 594 }
595 595
596 newblk1 = get_slot(info); 596 newblk1 = get_slot(info);
597 newblk1->start = s; 597 newblk1->start = s;
598 newblk1->size = e - s; 598 newblk1->size = e - s;
599 newblk1->owner = owner; 599 newblk1->owner = owner;
600 600
601 start = newblk1->start; 601 start = newblk1->start;
602 attach_taken_block(info, newblk1); 602 attach_taken_block(info, newblk1);
603 603
604 return start; 604 return start;
605 } 605 }
606 EXPORT_SYMBOL_GPL(rh_alloc_fixed); 606 EXPORT_SYMBOL_GPL(rh_alloc_fixed);
607 607
608 /* Deallocate the memory previously allocated by one of the rh_alloc functions. 608 /* Deallocate the memory previously allocated by one of the rh_alloc functions.
609 * The return value is the size of the deallocated block, or a negative number 609 * The return value is the size of the deallocated block, or a negative number
610 * if there is an error. 610 * if there is an error.
611 */ 611 */
612 int rh_free(rh_info_t * info, unsigned long start) 612 int rh_free(rh_info_t * info, unsigned long start)
613 { 613 {
614 rh_block_t *blk, *blk2; 614 rh_block_t *blk, *blk2;
615 struct list_head *l; 615 struct list_head *l;
616 int size; 616 int size;
617 617
618 /* Linear search for block */ 618 /* Linear search for block */
619 blk = NULL; 619 blk = NULL;
620 list_for_each(l, &info->taken_list) { 620 list_for_each(l, &info->taken_list) {
621 blk2 = list_entry(l, rh_block_t, list); 621 blk2 = list_entry(l, rh_block_t, list);
622 if (start < blk2->start) 622 if (start < blk2->start)
623 break; 623 break;
624 blk = blk2; 624 blk = blk2;
625 } 625 }
626 626
627 if (blk == NULL || start > (blk->start + blk->size)) 627 if (blk == NULL || start > (blk->start + blk->size))
628 return -EINVAL; 628 return -EINVAL;
629 629
630 /* Remove from taken list */ 630 /* Remove from taken list */
631 list_del(&blk->list); 631 list_del(&blk->list);
632 632
633 /* Get size of freed block */ 633 /* Get size of freed block */
634 size = blk->size; 634 size = blk->size;
635 attach_free_block(info, blk); 635 attach_free_block(info, blk);
636 636
637 return size; 637 return size;
638 } 638 }
639 EXPORT_SYMBOL_GPL(rh_free); 639 EXPORT_SYMBOL_GPL(rh_free);
640 640
641 int rh_get_stats(rh_info_t * info, int what, int max_stats, rh_stats_t * stats) 641 int rh_get_stats(rh_info_t * info, int what, int max_stats, rh_stats_t * stats)
642 { 642 {
643 rh_block_t *blk; 643 rh_block_t *blk;
644 struct list_head *l; 644 struct list_head *l;
645 struct list_head *h; 645 struct list_head *h;
646 int nr; 646 int nr;
647 647
648 switch (what) { 648 switch (what) {
649 649
650 case RHGS_FREE: 650 case RHGS_FREE:
651 h = &info->free_list; 651 h = &info->free_list;
652 break; 652 break;
653 653
654 case RHGS_TAKEN: 654 case RHGS_TAKEN:
655 h = &info->taken_list; 655 h = &info->taken_list;
656 break; 656 break;
657 657
658 default: 658 default:
659 return -EINVAL; 659 return -EINVAL;
660 } 660 }
661 661
662 /* Linear search for block */ 662 /* Linear search for block */
663 nr = 0; 663 nr = 0;
664 list_for_each(l, h) { 664 list_for_each(l, h) {
665 blk = list_entry(l, rh_block_t, list); 665 blk = list_entry(l, rh_block_t, list);
666 if (stats != NULL && nr < max_stats) { 666 if (stats != NULL && nr < max_stats) {
667 stats->start = blk->start; 667 stats->start = blk->start;
668 stats->size = blk->size; 668 stats->size = blk->size;
669 stats->owner = blk->owner; 669 stats->owner = blk->owner;
670 stats++; 670 stats++;
671 } 671 }
672 nr++; 672 nr++;
673 } 673 }
674 674
675 return nr; 675 return nr;
676 } 676 }
677 EXPORT_SYMBOL_GPL(rh_get_stats); 677 EXPORT_SYMBOL_GPL(rh_get_stats);
678 678
679 int rh_set_owner(rh_info_t * info, unsigned long start, const char *owner) 679 int rh_set_owner(rh_info_t * info, unsigned long start, const char *owner)
680 { 680 {
681 rh_block_t *blk, *blk2; 681 rh_block_t *blk, *blk2;
682 struct list_head *l; 682 struct list_head *l;
683 int size; 683 int size;
684 684
685 /* Linear search for block */ 685 /* Linear search for block */
686 blk = NULL; 686 blk = NULL;
687 list_for_each(l, &info->taken_list) { 687 list_for_each(l, &info->taken_list) {
688 blk2 = list_entry(l, rh_block_t, list); 688 blk2 = list_entry(l, rh_block_t, list);
689 if (start < blk2->start) 689 if (start < blk2->start)
690 break; 690 break;
691 blk = blk2; 691 blk = blk2;
692 } 692 }
693 693
694 if (blk == NULL || start > (blk->start + blk->size)) 694 if (blk == NULL || start > (blk->start + blk->size))
695 return -EINVAL; 695 return -EINVAL;
696 696
697 blk->owner = owner; 697 blk->owner = owner;
698 size = blk->size; 698 size = blk->size;
699 699
700 return size; 700 return size;
701 } 701 }
702 EXPORT_SYMBOL_GPL(rh_set_owner); 702 EXPORT_SYMBOL_GPL(rh_set_owner);
703 703
704 void rh_dump(rh_info_t * info) 704 void rh_dump(rh_info_t * info)
705 { 705 {
706 static rh_stats_t st[32]; /* XXX maximum 32 blocks */ 706 static rh_stats_t st[32]; /* XXX maximum 32 blocks */
707 int maxnr; 707 int maxnr;
708 int i, nr; 708 int i, nr;
709 709
710 maxnr = ARRAY_SIZE(st); 710 maxnr = ARRAY_SIZE(st);
711 711
712 printk(KERN_INFO 712 printk(KERN_INFO
713 "info @0x%p (%d slots empty / %d max)\n", 713 "info @0x%p (%d slots empty / %d max)\n",
714 info, info->empty_slots, info->max_blocks); 714 info, info->empty_slots, info->max_blocks);
715 715
716 printk(KERN_INFO " Free:\n"); 716 printk(KERN_INFO " Free:\n");
717 nr = rh_get_stats(info, RHGS_FREE, maxnr, st); 717 nr = rh_get_stats(info, RHGS_FREE, maxnr, st);
718 if (nr > maxnr) 718 if (nr > maxnr)
719 nr = maxnr; 719 nr = maxnr;
720 for (i = 0; i < nr; i++) 720 for (i = 0; i < nr; i++)
721 printk(KERN_INFO 721 printk(KERN_INFO
722 " 0x%lx-0x%lx (%u)\n", 722 " 0x%lx-0x%lx (%u)\n",
723 st[i].start, st[i].start + st[i].size, 723 st[i].start, st[i].start + st[i].size,
724 st[i].size); 724 st[i].size);
725 printk(KERN_INFO "\n"); 725 printk(KERN_INFO "\n");
726 726
727 printk(KERN_INFO " Taken:\n"); 727 printk(KERN_INFO " Taken:\n");
728 nr = rh_get_stats(info, RHGS_TAKEN, maxnr, st); 728 nr = rh_get_stats(info, RHGS_TAKEN, maxnr, st);
729 if (nr > maxnr) 729 if (nr > maxnr)
730 nr = maxnr; 730 nr = maxnr;
731 for (i = 0; i < nr; i++) 731 for (i = 0; i < nr; i++)
732 printk(KERN_INFO 732 printk(KERN_INFO
733 " 0x%lx-0x%lx (%u) %s\n", 733 " 0x%lx-0x%lx (%u) %s\n",
734 st[i].start, st[i].start + st[i].size, 734 st[i].start, st[i].start + st[i].size,
735 st[i].size, st[i].owner != NULL ? st[i].owner : ""); 735 st[i].size, st[i].owner != NULL ? st[i].owner : "");
736 printk(KERN_INFO "\n"); 736 printk(KERN_INFO "\n");
737 } 737 }
738 EXPORT_SYMBOL_GPL(rh_dump); 738 EXPORT_SYMBOL_GPL(rh_dump);
739 739
740 void rh_dump_blk(rh_info_t * info, rh_block_t * blk) 740 void rh_dump_blk(rh_info_t * info, rh_block_t * blk)
741 { 741 {
742 printk(KERN_INFO 742 printk(KERN_INFO
743 "blk @0x%p: 0x%lx-0x%lx (%u)\n", 743 "blk @0x%p: 0x%lx-0x%lx (%u)\n",
744 blk, blk->start, blk->start + blk->size, blk->size); 744 blk, blk->start, blk->start + blk->size, blk->size);
745 } 745 }
746 EXPORT_SYMBOL_GPL(rh_dump_blk); 746 EXPORT_SYMBOL_GPL(rh_dump_blk);
747 747
748 748
arch/powerpc/mm/mem.c
1 /* 1 /*
2 * PowerPC version 2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * 4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras 7 * Copyright (C) 1996 Paul Mackerras
8 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com) 8 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
9 * 9 *
10 * Derived from "arch/i386/mm/init.c" 10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
12 * 12 *
13 * This program is free software; you can redistribute it and/or 13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License 14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version 15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version. 16 * 2 of the License, or (at your option) any later version.
17 * 17 *
18 */ 18 */
19 19
20 #include <linux/module.h> 20 #include <linux/export.h>
21 #include <linux/sched.h> 21 #include <linux/sched.h>
22 #include <linux/kernel.h> 22 #include <linux/kernel.h>
23 #include <linux/errno.h> 23 #include <linux/errno.h>
24 #include <linux/string.h> 24 #include <linux/string.h>
25 #include <linux/gfp.h> 25 #include <linux/gfp.h>
26 #include <linux/types.h> 26 #include <linux/types.h>
27 #include <linux/mm.h> 27 #include <linux/mm.h>
28 #include <linux/stddef.h> 28 #include <linux/stddef.h>
29 #include <linux/init.h> 29 #include <linux/init.h>
30 #include <linux/bootmem.h> 30 #include <linux/bootmem.h>
31 #include <linux/highmem.h> 31 #include <linux/highmem.h>
32 #include <linux/initrd.h> 32 #include <linux/initrd.h>
33 #include <linux/pagemap.h> 33 #include <linux/pagemap.h>
34 #include <linux/suspend.h> 34 #include <linux/suspend.h>
35 #include <linux/memblock.h> 35 #include <linux/memblock.h>
36 #include <linux/hugetlb.h> 36 #include <linux/hugetlb.h>
37 37
38 #include <asm/pgalloc.h> 38 #include <asm/pgalloc.h>
39 #include <asm/prom.h> 39 #include <asm/prom.h>
40 #include <asm/io.h> 40 #include <asm/io.h>
41 #include <asm/mmu_context.h> 41 #include <asm/mmu_context.h>
42 #include <asm/pgtable.h> 42 #include <asm/pgtable.h>
43 #include <asm/mmu.h> 43 #include <asm/mmu.h>
44 #include <asm/smp.h> 44 #include <asm/smp.h>
45 #include <asm/machdep.h> 45 #include <asm/machdep.h>
46 #include <asm/btext.h> 46 #include <asm/btext.h>
47 #include <asm/tlb.h> 47 #include <asm/tlb.h>
48 #include <asm/sections.h> 48 #include <asm/sections.h>
49 #include <asm/sparsemem.h> 49 #include <asm/sparsemem.h>
50 #include <asm/vdso.h> 50 #include <asm/vdso.h>
51 #include <asm/fixmap.h> 51 #include <asm/fixmap.h>
52 #include <asm/swiotlb.h> 52 #include <asm/swiotlb.h>
53 53
54 #include "mmu_decl.h" 54 #include "mmu_decl.h"
55 55
56 #ifndef CPU_FTR_COHERENT_ICACHE 56 #ifndef CPU_FTR_COHERENT_ICACHE
57 #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */ 57 #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
58 #define CPU_FTR_NOEXECUTE 0 58 #define CPU_FTR_NOEXECUTE 0
59 #endif 59 #endif
60 60
61 int init_bootmem_done; 61 int init_bootmem_done;
62 int mem_init_done; 62 int mem_init_done;
63 phys_addr_t memory_limit; 63 phys_addr_t memory_limit;
64 64
65 #ifdef CONFIG_HIGHMEM 65 #ifdef CONFIG_HIGHMEM
66 pte_t *kmap_pte; 66 pte_t *kmap_pte;
67 pgprot_t kmap_prot; 67 pgprot_t kmap_prot;
68 68
69 EXPORT_SYMBOL(kmap_prot); 69 EXPORT_SYMBOL(kmap_prot);
70 EXPORT_SYMBOL(kmap_pte); 70 EXPORT_SYMBOL(kmap_pte);
71 71
72 static inline pte_t *virt_to_kpte(unsigned long vaddr) 72 static inline pte_t *virt_to_kpte(unsigned long vaddr)
73 { 73 {
74 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), 74 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
75 vaddr), vaddr), vaddr); 75 vaddr), vaddr), vaddr);
76 } 76 }
77 #endif 77 #endif
78 78
79 int page_is_ram(unsigned long pfn) 79 int page_is_ram(unsigned long pfn)
80 { 80 {
81 #ifndef CONFIG_PPC64 /* XXX for now */ 81 #ifndef CONFIG_PPC64 /* XXX for now */
82 return pfn < max_pfn; 82 return pfn < max_pfn;
83 #else 83 #else
84 unsigned long paddr = (pfn << PAGE_SHIFT); 84 unsigned long paddr = (pfn << PAGE_SHIFT);
85 struct memblock_region *reg; 85 struct memblock_region *reg;
86 86
87 for_each_memblock(memory, reg) 87 for_each_memblock(memory, reg)
88 if (paddr >= reg->base && paddr < (reg->base + reg->size)) 88 if (paddr >= reg->base && paddr < (reg->base + reg->size))
89 return 1; 89 return 1;
90 return 0; 90 return 0;
91 #endif 91 #endif
92 } 92 }
93 93
94 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 94 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
95 unsigned long size, pgprot_t vma_prot) 95 unsigned long size, pgprot_t vma_prot)
96 { 96 {
97 if (ppc_md.phys_mem_access_prot) 97 if (ppc_md.phys_mem_access_prot)
98 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot); 98 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
99 99
100 if (!page_is_ram(pfn)) 100 if (!page_is_ram(pfn))
101 vma_prot = pgprot_noncached(vma_prot); 101 vma_prot = pgprot_noncached(vma_prot);
102 102
103 return vma_prot; 103 return vma_prot;
104 } 104 }
105 EXPORT_SYMBOL(phys_mem_access_prot); 105 EXPORT_SYMBOL(phys_mem_access_prot);
106 106
107 #ifdef CONFIG_MEMORY_HOTPLUG 107 #ifdef CONFIG_MEMORY_HOTPLUG
108 108
109 #ifdef CONFIG_NUMA 109 #ifdef CONFIG_NUMA
110 int memory_add_physaddr_to_nid(u64 start) 110 int memory_add_physaddr_to_nid(u64 start)
111 { 111 {
112 return hot_add_scn_to_nid(start); 112 return hot_add_scn_to_nid(start);
113 } 113 }
114 #endif 114 #endif
115 115
116 int arch_add_memory(int nid, u64 start, u64 size) 116 int arch_add_memory(int nid, u64 start, u64 size)
117 { 117 {
118 struct pglist_data *pgdata; 118 struct pglist_data *pgdata;
119 struct zone *zone; 119 struct zone *zone;
120 unsigned long start_pfn = start >> PAGE_SHIFT; 120 unsigned long start_pfn = start >> PAGE_SHIFT;
121 unsigned long nr_pages = size >> PAGE_SHIFT; 121 unsigned long nr_pages = size >> PAGE_SHIFT;
122 122
123 pgdata = NODE_DATA(nid); 123 pgdata = NODE_DATA(nid);
124 124
125 start = (unsigned long)__va(start); 125 start = (unsigned long)__va(start);
126 create_section_mapping(start, start + size); 126 create_section_mapping(start, start + size);
127 127
128 /* this should work for most non-highmem platforms */ 128 /* this should work for most non-highmem platforms */
129 zone = pgdata->node_zones; 129 zone = pgdata->node_zones;
130 130
131 return __add_pages(nid, zone, start_pfn, nr_pages); 131 return __add_pages(nid, zone, start_pfn, nr_pages);
132 } 132 }
133 #endif /* CONFIG_MEMORY_HOTPLUG */ 133 #endif /* CONFIG_MEMORY_HOTPLUG */
134 134
135 /* 135 /*
136 * walk_memory_resource() needs to make sure there is no holes in a given 136 * walk_memory_resource() needs to make sure there is no holes in a given
137 * memory range. PPC64 does not maintain the memory layout in /proc/iomem. 137 * memory range. PPC64 does not maintain the memory layout in /proc/iomem.
138 * Instead it maintains it in memblock.memory structures. Walk through the 138 * Instead it maintains it in memblock.memory structures. Walk through the
139 * memory regions, find holes and callback for contiguous regions. 139 * memory regions, find holes and callback for contiguous regions.
140 */ 140 */
141 int 141 int
142 walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, 142 walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
143 void *arg, int (*func)(unsigned long, unsigned long, void *)) 143 void *arg, int (*func)(unsigned long, unsigned long, void *))
144 { 144 {
145 struct memblock_region *reg; 145 struct memblock_region *reg;
146 unsigned long end_pfn = start_pfn + nr_pages; 146 unsigned long end_pfn = start_pfn + nr_pages;
147 unsigned long tstart, tend; 147 unsigned long tstart, tend;
148 int ret = -1; 148 int ret = -1;
149 149
150 for_each_memblock(memory, reg) { 150 for_each_memblock(memory, reg) {
151 tstart = max(start_pfn, memblock_region_memory_base_pfn(reg)); 151 tstart = max(start_pfn, memblock_region_memory_base_pfn(reg));
152 tend = min(end_pfn, memblock_region_memory_end_pfn(reg)); 152 tend = min(end_pfn, memblock_region_memory_end_pfn(reg));
153 if (tstart >= tend) 153 if (tstart >= tend)
154 continue; 154 continue;
155 ret = (*func)(tstart, tend - tstart, arg); 155 ret = (*func)(tstart, tend - tstart, arg);
156 if (ret) 156 if (ret)
157 break; 157 break;
158 } 158 }
159 return ret; 159 return ret;
160 } 160 }
161 EXPORT_SYMBOL_GPL(walk_system_ram_range); 161 EXPORT_SYMBOL_GPL(walk_system_ram_range);
162 162
163 /* 163 /*
164 * Initialize the bootmem system and give it all the memory we 164 * Initialize the bootmem system and give it all the memory we
165 * have available. If we are using highmem, we only put the 165 * have available. If we are using highmem, we only put the
166 * lowmem into the bootmem system. 166 * lowmem into the bootmem system.
167 */ 167 */
168 #ifndef CONFIG_NEED_MULTIPLE_NODES 168 #ifndef CONFIG_NEED_MULTIPLE_NODES
169 void __init do_init_bootmem(void) 169 void __init do_init_bootmem(void)
170 { 170 {
171 unsigned long start, bootmap_pages; 171 unsigned long start, bootmap_pages;
172 unsigned long total_pages; 172 unsigned long total_pages;
173 struct memblock_region *reg; 173 struct memblock_region *reg;
174 int boot_mapsize; 174 int boot_mapsize;
175 175
176 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; 176 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
177 total_pages = (memblock_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT; 177 total_pages = (memblock_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
178 #ifdef CONFIG_HIGHMEM 178 #ifdef CONFIG_HIGHMEM
179 total_pages = total_lowmem >> PAGE_SHIFT; 179 total_pages = total_lowmem >> PAGE_SHIFT;
180 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT; 180 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
181 #endif 181 #endif
182 182
183 /* 183 /*
184 * Find an area to use for the bootmem bitmap. Calculate the size of 184 * Find an area to use for the bootmem bitmap. Calculate the size of
185 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE. 185 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
186 * Add 1 additional page in case the address isn't page-aligned. 186 * Add 1 additional page in case the address isn't page-aligned.
187 */ 187 */
188 bootmap_pages = bootmem_bootmap_pages(total_pages); 188 bootmap_pages = bootmem_bootmap_pages(total_pages);
189 189
190 start = memblock_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); 190 start = memblock_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
191 191
192 min_low_pfn = MEMORY_START >> PAGE_SHIFT; 192 min_low_pfn = MEMORY_START >> PAGE_SHIFT;
193 boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn); 193 boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);
194 194
195 /* Add active regions with valid PFNs */ 195 /* Add active regions with valid PFNs */
196 for_each_memblock(memory, reg) { 196 for_each_memblock(memory, reg) {
197 unsigned long start_pfn, end_pfn; 197 unsigned long start_pfn, end_pfn;
198 start_pfn = memblock_region_memory_base_pfn(reg); 198 start_pfn = memblock_region_memory_base_pfn(reg);
199 end_pfn = memblock_region_memory_end_pfn(reg); 199 end_pfn = memblock_region_memory_end_pfn(reg);
200 add_active_range(0, start_pfn, end_pfn); 200 add_active_range(0, start_pfn, end_pfn);
201 } 201 }
202 202
203 /* Add all physical memory to the bootmem map, mark each area 203 /* Add all physical memory to the bootmem map, mark each area
204 * present. 204 * present.
205 */ 205 */
206 #ifdef CONFIG_HIGHMEM 206 #ifdef CONFIG_HIGHMEM
207 free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT); 207 free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT);
208 208
209 /* reserve the sections we're already using */ 209 /* reserve the sections we're already using */
210 for_each_memblock(reserved, reg) { 210 for_each_memblock(reserved, reg) {
211 unsigned long top = reg->base + reg->size - 1; 211 unsigned long top = reg->base + reg->size - 1;
212 if (top < lowmem_end_addr) 212 if (top < lowmem_end_addr)
213 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); 213 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
214 else if (reg->base < lowmem_end_addr) { 214 else if (reg->base < lowmem_end_addr) {
215 unsigned long trunc_size = lowmem_end_addr - reg->base; 215 unsigned long trunc_size = lowmem_end_addr - reg->base;
216 reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT); 216 reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT);
217 } 217 }
218 } 218 }
219 #else 219 #else
220 free_bootmem_with_active_regions(0, max_pfn); 220 free_bootmem_with_active_regions(0, max_pfn);
221 221
222 /* reserve the sections we're already using */ 222 /* reserve the sections we're already using */
223 for_each_memblock(reserved, reg) 223 for_each_memblock(reserved, reg)
224 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); 224 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
225 #endif 225 #endif
226 /* XXX need to clip this if using highmem? */ 226 /* XXX need to clip this if using highmem? */
227 sparse_memory_present_with_active_regions(0); 227 sparse_memory_present_with_active_regions(0);
228 228
229 init_bootmem_done = 1; 229 init_bootmem_done = 1;
230 } 230 }
231 231
232 /* mark pages that don't exist as nosave */ 232 /* mark pages that don't exist as nosave */
233 static int __init mark_nonram_nosave(void) 233 static int __init mark_nonram_nosave(void)
234 { 234 {
235 struct memblock_region *reg, *prev = NULL; 235 struct memblock_region *reg, *prev = NULL;
236 236
237 for_each_memblock(memory, reg) { 237 for_each_memblock(memory, reg) {
238 if (prev && 238 if (prev &&
239 memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg)) 239 memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
240 register_nosave_region(memblock_region_memory_end_pfn(prev), 240 register_nosave_region(memblock_region_memory_end_pfn(prev),
241 memblock_region_memory_base_pfn(reg)); 241 memblock_region_memory_base_pfn(reg));
242 prev = reg; 242 prev = reg;
243 } 243 }
244 return 0; 244 return 0;
245 } 245 }
246 246
247 /* 247 /*
248 * paging_init() sets up the page tables - in fact we've already done this. 248 * paging_init() sets up the page tables - in fact we've already done this.
249 */ 249 */
250 void __init paging_init(void) 250 void __init paging_init(void)
251 { 251 {
252 unsigned long long total_ram = memblock_phys_mem_size(); 252 unsigned long long total_ram = memblock_phys_mem_size();
253 phys_addr_t top_of_ram = memblock_end_of_DRAM(); 253 phys_addr_t top_of_ram = memblock_end_of_DRAM();
254 unsigned long max_zone_pfns[MAX_NR_ZONES]; 254 unsigned long max_zone_pfns[MAX_NR_ZONES];
255 255
256 #ifdef CONFIG_PPC32 256 #ifdef CONFIG_PPC32
257 unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1); 257 unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
258 unsigned long end = __fix_to_virt(FIX_HOLE); 258 unsigned long end = __fix_to_virt(FIX_HOLE);
259 259
260 for (; v < end; v += PAGE_SIZE) 260 for (; v < end; v += PAGE_SIZE)
261 map_page(v, 0, 0); /* XXX gross */ 261 map_page(v, 0, 0); /* XXX gross */
262 #endif 262 #endif
263 263
264 #ifdef CONFIG_HIGHMEM 264 #ifdef CONFIG_HIGHMEM
265 map_page(PKMAP_BASE, 0, 0); /* XXX gross */ 265 map_page(PKMAP_BASE, 0, 0); /* XXX gross */
266 pkmap_page_table = virt_to_kpte(PKMAP_BASE); 266 pkmap_page_table = virt_to_kpte(PKMAP_BASE);
267 267
268 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); 268 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
269 kmap_prot = PAGE_KERNEL; 269 kmap_prot = PAGE_KERNEL;
270 #endif /* CONFIG_HIGHMEM */ 270 #endif /* CONFIG_HIGHMEM */
271 271
272 printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n", 272 printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
273 (unsigned long long)top_of_ram, total_ram); 273 (unsigned long long)top_of_ram, total_ram);
274 printk(KERN_DEBUG "Memory hole size: %ldMB\n", 274 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
275 (long int)((top_of_ram - total_ram) >> 20)); 275 (long int)((top_of_ram - total_ram) >> 20));
276 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 276 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
277 #ifdef CONFIG_HIGHMEM 277 #ifdef CONFIG_HIGHMEM
278 max_zone_pfns[ZONE_DMA] = lowmem_end_addr >> PAGE_SHIFT; 278 max_zone_pfns[ZONE_DMA] = lowmem_end_addr >> PAGE_SHIFT;
279 max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT; 279 max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT;
280 #else 280 #else
281 max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT; 281 max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
282 #endif 282 #endif
283 free_area_init_nodes(max_zone_pfns); 283 free_area_init_nodes(max_zone_pfns);
284 284
285 mark_nonram_nosave(); 285 mark_nonram_nosave();
286 } 286 }
287 #endif /* ! CONFIG_NEED_MULTIPLE_NODES */ 287 #endif /* ! CONFIG_NEED_MULTIPLE_NODES */
288 288
289 void __init mem_init(void) 289 void __init mem_init(void)
290 { 290 {
291 #ifdef CONFIG_NEED_MULTIPLE_NODES 291 #ifdef CONFIG_NEED_MULTIPLE_NODES
292 int nid; 292 int nid;
293 #endif 293 #endif
294 pg_data_t *pgdat; 294 pg_data_t *pgdat;
295 unsigned long i; 295 unsigned long i;
296 struct page *page; 296 struct page *page;
297 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize; 297 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
298 298
299 #ifdef CONFIG_SWIOTLB 299 #ifdef CONFIG_SWIOTLB
300 if (ppc_swiotlb_enable) 300 if (ppc_swiotlb_enable)
301 swiotlb_init(1); 301 swiotlb_init(1);
302 #endif 302 #endif
303 303
304 num_physpages = memblock_phys_mem_size() >> PAGE_SHIFT; 304 num_physpages = memblock_phys_mem_size() >> PAGE_SHIFT;
305 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 305 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
306 306
307 #ifdef CONFIG_NEED_MULTIPLE_NODES 307 #ifdef CONFIG_NEED_MULTIPLE_NODES
308 for_each_online_node(nid) { 308 for_each_online_node(nid) {
309 if (NODE_DATA(nid)->node_spanned_pages != 0) { 309 if (NODE_DATA(nid)->node_spanned_pages != 0) {
310 printk("freeing bootmem node %d\n", nid); 310 printk("freeing bootmem node %d\n", nid);
311 totalram_pages += 311 totalram_pages +=
312 free_all_bootmem_node(NODE_DATA(nid)); 312 free_all_bootmem_node(NODE_DATA(nid));
313 } 313 }
314 } 314 }
315 #else 315 #else
316 max_mapnr = max_pfn; 316 max_mapnr = max_pfn;
317 totalram_pages += free_all_bootmem(); 317 totalram_pages += free_all_bootmem();
318 #endif 318 #endif
319 for_each_online_pgdat(pgdat) { 319 for_each_online_pgdat(pgdat) {
320 for (i = 0; i < pgdat->node_spanned_pages; i++) { 320 for (i = 0; i < pgdat->node_spanned_pages; i++) {
321 if (!pfn_valid(pgdat->node_start_pfn + i)) 321 if (!pfn_valid(pgdat->node_start_pfn + i))
322 continue; 322 continue;
323 page = pgdat_page_nr(pgdat, i); 323 page = pgdat_page_nr(pgdat, i);
324 if (PageReserved(page)) 324 if (PageReserved(page))
325 reservedpages++; 325 reservedpages++;
326 } 326 }
327 } 327 }
328 328
329 codesize = (unsigned long)&_sdata - (unsigned long)&_stext; 329 codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
330 datasize = (unsigned long)&_edata - (unsigned long)&_sdata; 330 datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
331 initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin; 331 initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
332 bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start; 332 bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
333 333
334 #ifdef CONFIG_HIGHMEM 334 #ifdef CONFIG_HIGHMEM
335 { 335 {
336 unsigned long pfn, highmem_mapnr; 336 unsigned long pfn, highmem_mapnr;
337 337
338 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT; 338 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
339 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { 339 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
340 phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT; 340 phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
341 struct page *page = pfn_to_page(pfn); 341 struct page *page = pfn_to_page(pfn);
342 if (memblock_is_reserved(paddr)) 342 if (memblock_is_reserved(paddr))
343 continue; 343 continue;
344 ClearPageReserved(page); 344 ClearPageReserved(page);
345 init_page_count(page); 345 init_page_count(page);
346 __free_page(page); 346 __free_page(page);
347 totalhigh_pages++; 347 totalhigh_pages++;
348 reservedpages--; 348 reservedpages--;
349 } 349 }
350 totalram_pages += totalhigh_pages; 350 totalram_pages += totalhigh_pages;
351 printk(KERN_DEBUG "High memory: %luk\n", 351 printk(KERN_DEBUG "High memory: %luk\n",
352 totalhigh_pages << (PAGE_SHIFT-10)); 352 totalhigh_pages << (PAGE_SHIFT-10));
353 } 353 }
354 #endif /* CONFIG_HIGHMEM */ 354 #endif /* CONFIG_HIGHMEM */
355 355
356 #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP) 356 #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
357 /* 357 /*
358 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up 358 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
359 * functions.... do it here for the non-smp case. 359 * functions.... do it here for the non-smp case.
360 */ 360 */
361 per_cpu(next_tlbcam_idx, smp_processor_id()) = 361 per_cpu(next_tlbcam_idx, smp_processor_id()) =
362 (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; 362 (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
363 #endif 363 #endif
364 364
365 printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, " 365 printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
366 "%luk reserved, %luk data, %luk bss, %luk init)\n", 366 "%luk reserved, %luk data, %luk bss, %luk init)\n",
367 nr_free_pages() << (PAGE_SHIFT-10), 367 nr_free_pages() << (PAGE_SHIFT-10),
368 num_physpages << (PAGE_SHIFT-10), 368 num_physpages << (PAGE_SHIFT-10),
369 codesize >> 10, 369 codesize >> 10,
370 reservedpages << (PAGE_SHIFT-10), 370 reservedpages << (PAGE_SHIFT-10),
371 datasize >> 10, 371 datasize >> 10,
372 bsssize >> 10, 372 bsssize >> 10,
373 initsize >> 10); 373 initsize >> 10);
374 374
375 #ifdef CONFIG_PPC32 375 #ifdef CONFIG_PPC32
376 pr_info("Kernel virtual memory layout:\n"); 376 pr_info("Kernel virtual memory layout:\n");
377 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); 377 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
378 #ifdef CONFIG_HIGHMEM 378 #ifdef CONFIG_HIGHMEM
379 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n", 379 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
380 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP)); 380 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
381 #endif /* CONFIG_HIGHMEM */ 381 #endif /* CONFIG_HIGHMEM */
382 #ifdef CONFIG_NOT_COHERENT_CACHE 382 #ifdef CONFIG_NOT_COHERENT_CACHE
383 pr_info(" * 0x%08lx..0x%08lx : consistent mem\n", 383 pr_info(" * 0x%08lx..0x%08lx : consistent mem\n",
384 IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE); 384 IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
385 #endif /* CONFIG_NOT_COHERENT_CACHE */ 385 #endif /* CONFIG_NOT_COHERENT_CACHE */
386 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n", 386 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
387 ioremap_bot, IOREMAP_TOP); 387 ioremap_bot, IOREMAP_TOP);
388 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", 388 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
389 VMALLOC_START, VMALLOC_END); 389 VMALLOC_START, VMALLOC_END);
390 #endif /* CONFIG_PPC32 */ 390 #endif /* CONFIG_PPC32 */
391 391
392 mem_init_done = 1; 392 mem_init_done = 1;
393 } 393 }
394 394
395 void free_initmem(void) 395 void free_initmem(void)
396 { 396 {
397 unsigned long addr; 397 unsigned long addr;
398 398
399 ppc_md.progress = ppc_printk_progress; 399 ppc_md.progress = ppc_printk_progress;
400 400
401 addr = (unsigned long)__init_begin; 401 addr = (unsigned long)__init_begin;
402 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) { 402 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
403 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); 403 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
404 ClearPageReserved(virt_to_page(addr)); 404 ClearPageReserved(virt_to_page(addr));
405 init_page_count(virt_to_page(addr)); 405 init_page_count(virt_to_page(addr));
406 free_page(addr); 406 free_page(addr);
407 totalram_pages++; 407 totalram_pages++;
408 } 408 }
409 pr_info("Freeing unused kernel memory: %luk freed\n", 409 pr_info("Freeing unused kernel memory: %luk freed\n",
410 ((unsigned long)__init_end - 410 ((unsigned long)__init_end -
411 (unsigned long)__init_begin) >> 10); 411 (unsigned long)__init_begin) >> 10);
412 } 412 }
413 413
414 #ifdef CONFIG_BLK_DEV_INITRD 414 #ifdef CONFIG_BLK_DEV_INITRD
415 void __init free_initrd_mem(unsigned long start, unsigned long end) 415 void __init free_initrd_mem(unsigned long start, unsigned long end)
416 { 416 {
417 if (start >= end) 417 if (start >= end)
418 return; 418 return;
419 419
420 start = _ALIGN_DOWN(start, PAGE_SIZE); 420 start = _ALIGN_DOWN(start, PAGE_SIZE);
421 end = _ALIGN_UP(end, PAGE_SIZE); 421 end = _ALIGN_UP(end, PAGE_SIZE);
422 pr_info("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); 422 pr_info("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
423 423
424 for (; start < end; start += PAGE_SIZE) { 424 for (; start < end; start += PAGE_SIZE) {
425 ClearPageReserved(virt_to_page(start)); 425 ClearPageReserved(virt_to_page(start));
426 init_page_count(virt_to_page(start)); 426 init_page_count(virt_to_page(start));
427 free_page(start); 427 free_page(start);
428 totalram_pages++; 428 totalram_pages++;
429 } 429 }
430 } 430 }
431 #endif 431 #endif
432 432
433 /* 433 /*
434 * This is called when a page has been modified by the kernel. 434 * This is called when a page has been modified by the kernel.
435 * It just marks the page as not i-cache clean. We do the i-cache 435 * It just marks the page as not i-cache clean. We do the i-cache
436 * flush later when the page is given to a user process, if necessary. 436 * flush later when the page is given to a user process, if necessary.
437 */ 437 */
438 void flush_dcache_page(struct page *page) 438 void flush_dcache_page(struct page *page)
439 { 439 {
440 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) 440 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
441 return; 441 return;
442 /* avoid an atomic op if possible */ 442 /* avoid an atomic op if possible */
443 if (test_bit(PG_arch_1, &page->flags)) 443 if (test_bit(PG_arch_1, &page->flags))
444 clear_bit(PG_arch_1, &page->flags); 444 clear_bit(PG_arch_1, &page->flags);
445 } 445 }
446 EXPORT_SYMBOL(flush_dcache_page); 446 EXPORT_SYMBOL(flush_dcache_page);
447 447
448 void flush_dcache_icache_page(struct page *page) 448 void flush_dcache_icache_page(struct page *page)
449 { 449 {
450 #ifdef CONFIG_HUGETLB_PAGE 450 #ifdef CONFIG_HUGETLB_PAGE
451 if (PageCompound(page)) { 451 if (PageCompound(page)) {
452 flush_dcache_icache_hugepage(page); 452 flush_dcache_icache_hugepage(page);
453 return; 453 return;
454 } 454 }
455 #endif 455 #endif
456 #ifdef CONFIG_BOOKE 456 #ifdef CONFIG_BOOKE
457 { 457 {
458 void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE); 458 void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
459 __flush_dcache_icache(start); 459 __flush_dcache_icache(start);
460 kunmap_atomic(start, KM_PPC_SYNC_ICACHE); 460 kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
461 } 461 }
462 #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64) 462 #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
463 /* On 8xx there is no need to kmap since highmem is not supported */ 463 /* On 8xx there is no need to kmap since highmem is not supported */
464 __flush_dcache_icache(page_address(page)); 464 __flush_dcache_icache(page_address(page));
465 #else 465 #else
466 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT); 466 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
467 #endif 467 #endif
468 } 468 }
469 469
470 void clear_user_page(void *page, unsigned long vaddr, struct page *pg) 470 void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
471 { 471 {
472 clear_page(page); 472 clear_page(page);
473 473
474 /* 474 /*
475 * We shouldn't have to do this, but some versions of glibc 475 * We shouldn't have to do this, but some versions of glibc
476 * require it (ld.so assumes zero filled pages are icache clean) 476 * require it (ld.so assumes zero filled pages are icache clean)
477 * - Anton 477 * - Anton
478 */ 478 */
479 flush_dcache_page(pg); 479 flush_dcache_page(pg);
480 } 480 }
481 EXPORT_SYMBOL(clear_user_page); 481 EXPORT_SYMBOL(clear_user_page);
482 482
483 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, 483 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
484 struct page *pg) 484 struct page *pg)
485 { 485 {
486 copy_page(vto, vfrom); 486 copy_page(vto, vfrom);
487 487
488 /* 488 /*
489 * We should be able to use the following optimisation, however 489 * We should be able to use the following optimisation, however
490 * there are two problems. 490 * there are two problems.
491 * Firstly a bug in some versions of binutils meant PLT sections 491 * Firstly a bug in some versions of binutils meant PLT sections
492 * were not marked executable. 492 * were not marked executable.
493 * Secondly the first word in the GOT section is blrl, used 493 * Secondly the first word in the GOT section is blrl, used
494 * to establish the GOT address. Until recently the GOT was 494 * to establish the GOT address. Until recently the GOT was
495 * not marked executable. 495 * not marked executable.
496 * - Anton 496 * - Anton
497 */ 497 */
498 #if 0 498 #if 0
499 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0)) 499 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
500 return; 500 return;
501 #endif 501 #endif
502 502
503 flush_dcache_page(pg); 503 flush_dcache_page(pg);
504 } 504 }
505 505
506 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, 506 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
507 unsigned long addr, int len) 507 unsigned long addr, int len)
508 { 508 {
509 unsigned long maddr; 509 unsigned long maddr;
510 510
511 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK); 511 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
512 flush_icache_range(maddr, maddr + len); 512 flush_icache_range(maddr, maddr + len);
513 kunmap(page); 513 kunmap(page);
514 } 514 }
515 EXPORT_SYMBOL(flush_icache_user_range); 515 EXPORT_SYMBOL(flush_icache_user_range);
516 516
517 /* 517 /*
518 * This is called at the end of handling a user page fault, when the 518 * This is called at the end of handling a user page fault, when the
519 * fault has been handled by updating a PTE in the linux page tables. 519 * fault has been handled by updating a PTE in the linux page tables.
520 * We use it to preload an HPTE into the hash table corresponding to 520 * We use it to preload an HPTE into the hash table corresponding to
521 * the updated linux PTE. 521 * the updated linux PTE.
522 * 522 *
523 * This must always be called with the pte lock held. 523 * This must always be called with the pte lock held.
524 */ 524 */
525 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 525 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
526 pte_t *ptep) 526 pte_t *ptep)
527 { 527 {
528 #ifdef CONFIG_PPC_STD_MMU 528 #ifdef CONFIG_PPC_STD_MMU
529 unsigned long access = 0, trap; 529 unsigned long access = 0, trap;
530 530
531 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ 531 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
532 if (!pte_young(*ptep) || address >= TASK_SIZE) 532 if (!pte_young(*ptep) || address >= TASK_SIZE)
533 return; 533 return;
534 534
535 /* We try to figure out if we are coming from an instruction 535 /* We try to figure out if we are coming from an instruction
536 * access fault and pass that down to __hash_page so we avoid 536 * access fault and pass that down to __hash_page so we avoid
537 * double-faulting on execution of fresh text. We have to test 537 * double-faulting on execution of fresh text. We have to test
538 * for regs NULL since init will get here first thing at boot 538 * for regs NULL since init will get here first thing at boot
539 * 539 *
540 * We also avoid filling the hash if not coming from a fault 540 * We also avoid filling the hash if not coming from a fault
541 */ 541 */
542 if (current->thread.regs == NULL) 542 if (current->thread.regs == NULL)
543 return; 543 return;
544 trap = TRAP(current->thread.regs); 544 trap = TRAP(current->thread.regs);
545 if (trap == 0x400) 545 if (trap == 0x400)
546 access |= _PAGE_EXEC; 546 access |= _PAGE_EXEC;
547 else if (trap != 0x300) 547 else if (trap != 0x300)
548 return; 548 return;
549 hash_preload(vma->vm_mm, address, access, trap); 549 hash_preload(vma->vm_mm, address, access, trap);
550 #endif /* CONFIG_PPC_STD_MMU */ 550 #endif /* CONFIG_PPC_STD_MMU */
551 } 551 }
552 552
arch/powerpc/mm/mmu_context_hash64.c
1 /* 1 /*
2 * MMU context allocation for 64-bit kernels. 2 * MMU context allocation for 64-bit kernels.
3 * 3 *
4 * Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org> 4 * Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 * 10 *
11 */ 11 */
12 12
13 #include <linux/sched.h> 13 #include <linux/sched.h>
14 #include <linux/kernel.h> 14 #include <linux/kernel.h>
15 #include <linux/errno.h> 15 #include <linux/errno.h>
16 #include <linux/string.h> 16 #include <linux/string.h>
17 #include <linux/types.h> 17 #include <linux/types.h>
18 #include <linux/mm.h> 18 #include <linux/mm.h>
19 #include <linux/spinlock.h> 19 #include <linux/spinlock.h>
20 #include <linux/idr.h> 20 #include <linux/idr.h>
21 #include <linux/module.h> 21 #include <linux/export.h>
22 #include <linux/gfp.h> 22 #include <linux/gfp.h>
23 #include <linux/slab.h> 23 #include <linux/slab.h>
24 24
25 #include <asm/mmu_context.h> 25 #include <asm/mmu_context.h>
26 26
27 #ifdef CONFIG_PPC_ICSWX 27 #ifdef CONFIG_PPC_ICSWX
28 /* 28 /*
29 * The processor and its L2 cache cause the icswx instruction to 29 * The processor and its L2 cache cause the icswx instruction to
30 * generate a COP_REQ transaction on PowerBus. The transaction has 30 * generate a COP_REQ transaction on PowerBus. The transaction has
31 * no address, and the processor does not perform an MMU access 31 * no address, and the processor does not perform an MMU access
32 * to authenticate the transaction. The command portion of the 32 * to authenticate the transaction. The command portion of the
33 * PowerBus COP_REQ transaction includes the LPAR_ID (LPID) and 33 * PowerBus COP_REQ transaction includes the LPAR_ID (LPID) and
34 * the coprocessor Process ID (PID), which the coprocessor compares 34 * the coprocessor Process ID (PID), which the coprocessor compares
35 * to the authorized LPID and PID held in the coprocessor, to determine 35 * to the authorized LPID and PID held in the coprocessor, to determine
36 * if the process is authorized to generate the transaction. 36 * if the process is authorized to generate the transaction.
37 * The data of the COP_REQ transaction is 128-byte or less and is 37 * The data of the COP_REQ transaction is 128-byte or less and is
38 * placed in cacheable memory on a 128-byte cache line boundary. 38 * placed in cacheable memory on a 128-byte cache line boundary.
39 * 39 *
40 * The task to use a coprocessor should use use_cop() to allocate 40 * The task to use a coprocessor should use use_cop() to allocate
41 * a coprocessor PID before executing icswx instruction. use_cop() 41 * a coprocessor PID before executing icswx instruction. use_cop()
42 * also enables the coprocessor context switching. Drop_cop() is 42 * also enables the coprocessor context switching. Drop_cop() is
43 * used to free the coprocessor PID. 43 * used to free the coprocessor PID.
44 * 44 *
45 * Example: 45 * Example:
46 * Host Fabric Interface (HFI) is a PowerPC network coprocessor. 46 * Host Fabric Interface (HFI) is a PowerPC network coprocessor.
47 * Each HFI have multiple windows. Each HFI window serves as a 47 * Each HFI have multiple windows. Each HFI window serves as a
48 * network device sending to and receiving from HFI network. 48 * network device sending to and receiving from HFI network.
49 * HFI immediate send function uses icswx instruction. The immediate 49 * HFI immediate send function uses icswx instruction. The immediate
50 * send function allows small (single cache-line) packets be sent 50 * send function allows small (single cache-line) packets be sent
51 * without using the regular HFI send FIFO and doorbell, which are 51 * without using the regular HFI send FIFO and doorbell, which are
52 * much slower than immediate send. 52 * much slower than immediate send.
53 * 53 *
54 * For each task intending to use HFI immediate send, the HFI driver 54 * For each task intending to use HFI immediate send, the HFI driver
55 * calls use_cop() to obtain a coprocessor PID for the task. 55 * calls use_cop() to obtain a coprocessor PID for the task.
56 * The HFI driver then allocate a free HFI window and save the 56 * The HFI driver then allocate a free HFI window and save the
57 * coprocessor PID to the HFI window to allow the task to use the 57 * coprocessor PID to the HFI window to allow the task to use the
58 * HFI window. 58 * HFI window.
59 * 59 *
60 * The HFI driver repeatedly creates immediate send packets and 60 * The HFI driver repeatedly creates immediate send packets and
61 * issues icswx instruction to send data through the HFI window. 61 * issues icswx instruction to send data through the HFI window.
62 * The HFI compares the coprocessor PID in the CPU PID register 62 * The HFI compares the coprocessor PID in the CPU PID register
63 * to the PID held in the HFI window to determine if the transaction 63 * to the PID held in the HFI window to determine if the transaction
64 * is allowed. 64 * is allowed.
65 * 65 *
66 * When the task to release the HFI window, the HFI driver calls 66 * When the task to release the HFI window, the HFI driver calls
67 * drop_cop() to release the coprocessor PID. 67 * drop_cop() to release the coprocessor PID.
68 */ 68 */
69 69
70 #define COP_PID_NONE 0 70 #define COP_PID_NONE 0
71 #define COP_PID_MIN (COP_PID_NONE + 1) 71 #define COP_PID_MIN (COP_PID_NONE + 1)
72 #define COP_PID_MAX (0xFFFF) 72 #define COP_PID_MAX (0xFFFF)
73 73
74 static DEFINE_SPINLOCK(mmu_context_acop_lock); 74 static DEFINE_SPINLOCK(mmu_context_acop_lock);
75 static DEFINE_IDA(cop_ida); 75 static DEFINE_IDA(cop_ida);
76 76
77 void switch_cop(struct mm_struct *next) 77 void switch_cop(struct mm_struct *next)
78 { 78 {
79 mtspr(SPRN_PID, next->context.cop_pid); 79 mtspr(SPRN_PID, next->context.cop_pid);
80 mtspr(SPRN_ACOP, next->context.acop); 80 mtspr(SPRN_ACOP, next->context.acop);
81 } 81 }
82 82
83 static int new_cop_pid(struct ida *ida, int min_id, int max_id, 83 static int new_cop_pid(struct ida *ida, int min_id, int max_id,
84 spinlock_t *lock) 84 spinlock_t *lock)
85 { 85 {
86 int index; 86 int index;
87 int err; 87 int err;
88 88
89 again: 89 again:
90 if (!ida_pre_get(ida, GFP_KERNEL)) 90 if (!ida_pre_get(ida, GFP_KERNEL))
91 return -ENOMEM; 91 return -ENOMEM;
92 92
93 spin_lock(lock); 93 spin_lock(lock);
94 err = ida_get_new_above(ida, min_id, &index); 94 err = ida_get_new_above(ida, min_id, &index);
95 spin_unlock(lock); 95 spin_unlock(lock);
96 96
97 if (err == -EAGAIN) 97 if (err == -EAGAIN)
98 goto again; 98 goto again;
99 else if (err) 99 else if (err)
100 return err; 100 return err;
101 101
102 if (index > max_id) { 102 if (index > max_id) {
103 spin_lock(lock); 103 spin_lock(lock);
104 ida_remove(ida, index); 104 ida_remove(ida, index);
105 spin_unlock(lock); 105 spin_unlock(lock);
106 return -ENOMEM; 106 return -ENOMEM;
107 } 107 }
108 108
109 return index; 109 return index;
110 } 110 }
111 111
112 static void sync_cop(void *arg) 112 static void sync_cop(void *arg)
113 { 113 {
114 struct mm_struct *mm = arg; 114 struct mm_struct *mm = arg;
115 115
116 if (mm == current->active_mm) 116 if (mm == current->active_mm)
117 switch_cop(current->active_mm); 117 switch_cop(current->active_mm);
118 } 118 }
119 119
120 /** 120 /**
121 * Start using a coprocessor. 121 * Start using a coprocessor.
122 * @acop: mask of coprocessor to be used. 122 * @acop: mask of coprocessor to be used.
123 * @mm: The mm the coprocessor to associate with. Most likely current mm. 123 * @mm: The mm the coprocessor to associate with. Most likely current mm.
124 * 124 *
125 * Return a positive PID if successful. Negative errno otherwise. 125 * Return a positive PID if successful. Negative errno otherwise.
126 * The returned PID will be fed to the coprocessor to determine if an 126 * The returned PID will be fed to the coprocessor to determine if an
127 * icswx transaction is authenticated. 127 * icswx transaction is authenticated.
128 */ 128 */
129 int use_cop(unsigned long acop, struct mm_struct *mm) 129 int use_cop(unsigned long acop, struct mm_struct *mm)
130 { 130 {
131 int ret; 131 int ret;
132 132
133 if (!cpu_has_feature(CPU_FTR_ICSWX)) 133 if (!cpu_has_feature(CPU_FTR_ICSWX))
134 return -ENODEV; 134 return -ENODEV;
135 135
136 if (!mm || !acop) 136 if (!mm || !acop)
137 return -EINVAL; 137 return -EINVAL;
138 138
139 /* We need to make sure mm_users doesn't change */ 139 /* We need to make sure mm_users doesn't change */
140 down_read(&mm->mmap_sem); 140 down_read(&mm->mmap_sem);
141 spin_lock(mm->context.cop_lockp); 141 spin_lock(mm->context.cop_lockp);
142 142
143 if (mm->context.cop_pid == COP_PID_NONE) { 143 if (mm->context.cop_pid == COP_PID_NONE) {
144 ret = new_cop_pid(&cop_ida, COP_PID_MIN, COP_PID_MAX, 144 ret = new_cop_pid(&cop_ida, COP_PID_MIN, COP_PID_MAX,
145 &mmu_context_acop_lock); 145 &mmu_context_acop_lock);
146 if (ret < 0) 146 if (ret < 0)
147 goto out; 147 goto out;
148 148
149 mm->context.cop_pid = ret; 149 mm->context.cop_pid = ret;
150 } 150 }
151 mm->context.acop |= acop; 151 mm->context.acop |= acop;
152 152
153 sync_cop(mm); 153 sync_cop(mm);
154 154
155 /* 155 /*
156 * If this is a threaded process then there might be other threads 156 * If this is a threaded process then there might be other threads
157 * running. We need to send an IPI to force them to pick up any 157 * running. We need to send an IPI to force them to pick up any
158 * change in PID and ACOP. 158 * change in PID and ACOP.
159 */ 159 */
160 if (atomic_read(&mm->mm_users) > 1) 160 if (atomic_read(&mm->mm_users) > 1)
161 smp_call_function(sync_cop, mm, 1); 161 smp_call_function(sync_cop, mm, 1);
162 162
163 ret = mm->context.cop_pid; 163 ret = mm->context.cop_pid;
164 164
165 out: 165 out:
166 spin_unlock(mm->context.cop_lockp); 166 spin_unlock(mm->context.cop_lockp);
167 up_read(&mm->mmap_sem); 167 up_read(&mm->mmap_sem);
168 168
169 return ret; 169 return ret;
170 } 170 }
171 EXPORT_SYMBOL_GPL(use_cop); 171 EXPORT_SYMBOL_GPL(use_cop);
172 172
173 /** 173 /**
174 * Stop using a coprocessor. 174 * Stop using a coprocessor.
175 * @acop: mask of coprocessor to be stopped. 175 * @acop: mask of coprocessor to be stopped.
176 * @mm: The mm the coprocessor associated with. 176 * @mm: The mm the coprocessor associated with.
177 */ 177 */
178 void drop_cop(unsigned long acop, struct mm_struct *mm) 178 void drop_cop(unsigned long acop, struct mm_struct *mm)
179 { 179 {
180 int free_pid = COP_PID_NONE; 180 int free_pid = COP_PID_NONE;
181 181
182 if (!cpu_has_feature(CPU_FTR_ICSWX)) 182 if (!cpu_has_feature(CPU_FTR_ICSWX))
183 return; 183 return;
184 184
185 if (WARN_ON_ONCE(!mm)) 185 if (WARN_ON_ONCE(!mm))
186 return; 186 return;
187 187
188 /* We need to make sure mm_users doesn't change */ 188 /* We need to make sure mm_users doesn't change */
189 down_read(&mm->mmap_sem); 189 down_read(&mm->mmap_sem);
190 spin_lock(mm->context.cop_lockp); 190 spin_lock(mm->context.cop_lockp);
191 191
192 mm->context.acop &= ~acop; 192 mm->context.acop &= ~acop;
193 193
194 if ((!mm->context.acop) && (mm->context.cop_pid != COP_PID_NONE)) { 194 if ((!mm->context.acop) && (mm->context.cop_pid != COP_PID_NONE)) {
195 free_pid = mm->context.cop_pid; 195 free_pid = mm->context.cop_pid;
196 mm->context.cop_pid = COP_PID_NONE; 196 mm->context.cop_pid = COP_PID_NONE;
197 } 197 }
198 198
199 sync_cop(mm); 199 sync_cop(mm);
200 200
201 /* 201 /*
202 * If this is a threaded process then there might be other threads 202 * If this is a threaded process then there might be other threads
203 * running. We need to send an IPI to force them to pick up any 203 * running. We need to send an IPI to force them to pick up any
204 * change in PID and ACOP. 204 * change in PID and ACOP.
205 */ 205 */
206 if (atomic_read(&mm->mm_users) > 1) 206 if (atomic_read(&mm->mm_users) > 1)
207 smp_call_function(sync_cop, mm, 1); 207 smp_call_function(sync_cop, mm, 1);
208 208
209 if (free_pid != COP_PID_NONE) { 209 if (free_pid != COP_PID_NONE) {
210 spin_lock(&mmu_context_acop_lock); 210 spin_lock(&mmu_context_acop_lock);
211 ida_remove(&cop_ida, free_pid); 211 ida_remove(&cop_ida, free_pid);
212 spin_unlock(&mmu_context_acop_lock); 212 spin_unlock(&mmu_context_acop_lock);
213 } 213 }
214 214
215 spin_unlock(mm->context.cop_lockp); 215 spin_unlock(mm->context.cop_lockp);
216 up_read(&mm->mmap_sem); 216 up_read(&mm->mmap_sem);
217 } 217 }
218 EXPORT_SYMBOL_GPL(drop_cop); 218 EXPORT_SYMBOL_GPL(drop_cop);
219 219
220 #endif /* CONFIG_PPC_ICSWX */ 220 #endif /* CONFIG_PPC_ICSWX */
221 221
222 static DEFINE_SPINLOCK(mmu_context_lock); 222 static DEFINE_SPINLOCK(mmu_context_lock);
223 static DEFINE_IDA(mmu_context_ida); 223 static DEFINE_IDA(mmu_context_ida);
224 224
225 /* 225 /*
226 * The proto-VSID space has 2^35 - 1 segments available for user mappings. 226 * The proto-VSID space has 2^35 - 1 segments available for user mappings.
227 * Each segment contains 2^28 bytes. Each context maps 2^44 bytes, 227 * Each segment contains 2^28 bytes. Each context maps 2^44 bytes,
228 * so we can support 2^19-1 contexts (19 == 35 + 28 - 44). 228 * so we can support 2^19-1 contexts (19 == 35 + 28 - 44).
229 */ 229 */
230 #define MAX_CONTEXT ((1UL << 19) - 1) 230 #define MAX_CONTEXT ((1UL << 19) - 1)
231 231
232 int __init_new_context(void) 232 int __init_new_context(void)
233 { 233 {
234 int index; 234 int index;
235 int err; 235 int err;
236 236
237 again: 237 again:
238 if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL)) 238 if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
239 return -ENOMEM; 239 return -ENOMEM;
240 240
241 spin_lock(&mmu_context_lock); 241 spin_lock(&mmu_context_lock);
242 err = ida_get_new_above(&mmu_context_ida, 1, &index); 242 err = ida_get_new_above(&mmu_context_ida, 1, &index);
243 spin_unlock(&mmu_context_lock); 243 spin_unlock(&mmu_context_lock);
244 244
245 if (err == -EAGAIN) 245 if (err == -EAGAIN)
246 goto again; 246 goto again;
247 else if (err) 247 else if (err)
248 return err; 248 return err;
249 249
250 if (index > MAX_CONTEXT) { 250 if (index > MAX_CONTEXT) {
251 spin_lock(&mmu_context_lock); 251 spin_lock(&mmu_context_lock);
252 ida_remove(&mmu_context_ida, index); 252 ida_remove(&mmu_context_ida, index);
253 spin_unlock(&mmu_context_lock); 253 spin_unlock(&mmu_context_lock);
254 return -ENOMEM; 254 return -ENOMEM;
255 } 255 }
256 256
257 return index; 257 return index;
258 } 258 }
259 EXPORT_SYMBOL_GPL(__init_new_context); 259 EXPORT_SYMBOL_GPL(__init_new_context);
260 260
261 int init_new_context(struct task_struct *tsk, struct mm_struct *mm) 261 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
262 { 262 {
263 int index; 263 int index;
264 264
265 index = __init_new_context(); 265 index = __init_new_context();
266 if (index < 0) 266 if (index < 0)
267 return index; 267 return index;
268 268
269 /* The old code would re-promote on fork, we don't do that 269 /* The old code would re-promote on fork, we don't do that
270 * when using slices as it could cause problem promoting slices 270 * when using slices as it could cause problem promoting slices
271 * that have been forced down to 4K 271 * that have been forced down to 4K
272 */ 272 */
273 if (slice_mm_new_context(mm)) 273 if (slice_mm_new_context(mm))
274 slice_set_user_psize(mm, mmu_virtual_psize); 274 slice_set_user_psize(mm, mmu_virtual_psize);
275 subpage_prot_init_new_context(mm); 275 subpage_prot_init_new_context(mm);
276 mm->context.id = index; 276 mm->context.id = index;
277 #ifdef CONFIG_PPC_ICSWX 277 #ifdef CONFIG_PPC_ICSWX
278 mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL); 278 mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
279 if (!mm->context.cop_lockp) { 279 if (!mm->context.cop_lockp) {
280 __destroy_context(index); 280 __destroy_context(index);
281 subpage_prot_free(mm); 281 subpage_prot_free(mm);
282 mm->context.id = MMU_NO_CONTEXT; 282 mm->context.id = MMU_NO_CONTEXT;
283 return -ENOMEM; 283 return -ENOMEM;
284 } 284 }
285 spin_lock_init(mm->context.cop_lockp); 285 spin_lock_init(mm->context.cop_lockp);
286 #endif /* CONFIG_PPC_ICSWX */ 286 #endif /* CONFIG_PPC_ICSWX */
287 287
288 return 0; 288 return 0;
289 } 289 }
290 290
291 void __destroy_context(int context_id) 291 void __destroy_context(int context_id)
292 { 292 {
293 spin_lock(&mmu_context_lock); 293 spin_lock(&mmu_context_lock);
294 ida_remove(&mmu_context_ida, context_id); 294 ida_remove(&mmu_context_ida, context_id);
295 spin_unlock(&mmu_context_lock); 295 spin_unlock(&mmu_context_lock);
296 } 296 }
297 EXPORT_SYMBOL_GPL(__destroy_context); 297 EXPORT_SYMBOL_GPL(__destroy_context);
298 298
299 void destroy_context(struct mm_struct *mm) 299 void destroy_context(struct mm_struct *mm)
300 { 300 {
301 #ifdef CONFIG_PPC_ICSWX 301 #ifdef CONFIG_PPC_ICSWX
302 drop_cop(mm->context.acop, mm); 302 drop_cop(mm->context.acop, mm);
303 kfree(mm->context.cop_lockp); 303 kfree(mm->context.cop_lockp);
304 mm->context.cop_lockp = NULL; 304 mm->context.cop_lockp = NULL;
305 #endif /* CONFIG_PPC_ICSWX */ 305 #endif /* CONFIG_PPC_ICSWX */
306 __destroy_context(mm->context.id); 306 __destroy_context(mm->context.id);
307 subpage_prot_free(mm); 307 subpage_prot_free(mm);
308 mm->context.id = MMU_NO_CONTEXT; 308 mm->context.id = MMU_NO_CONTEXT;
309 } 309 }
310 310
arch/powerpc/mm/numa.c
1 /* 1 /*
2 * pSeries NUMA support 2 * pSeries NUMA support
3 * 3 *
4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM 4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 #include <linux/threads.h> 11 #include <linux/threads.h>
12 #include <linux/bootmem.h> 12 #include <linux/bootmem.h>
13 #include <linux/init.h> 13 #include <linux/init.h>
14 #include <linux/mm.h> 14 #include <linux/mm.h>
15 #include <linux/mmzone.h> 15 #include <linux/mmzone.h>
16 #include <linux/module.h> 16 #include <linux/export.h>
17 #include <linux/nodemask.h> 17 #include <linux/nodemask.h>
18 #include <linux/cpu.h> 18 #include <linux/cpu.h>
19 #include <linux/notifier.h> 19 #include <linux/notifier.h>
20 #include <linux/memblock.h> 20 #include <linux/memblock.h>
21 #include <linux/of.h> 21 #include <linux/of.h>
22 #include <linux/pfn.h> 22 #include <linux/pfn.h>
23 #include <linux/cpuset.h> 23 #include <linux/cpuset.h>
24 #include <linux/node.h> 24 #include <linux/node.h>
25 #include <asm/sparsemem.h> 25 #include <asm/sparsemem.h>
26 #include <asm/prom.h> 26 #include <asm/prom.h>
27 #include <asm/system.h> 27 #include <asm/system.h>
28 #include <asm/smp.h> 28 #include <asm/smp.h>
29 #include <asm/firmware.h> 29 #include <asm/firmware.h>
30 #include <asm/paca.h> 30 #include <asm/paca.h>
31 #include <asm/hvcall.h> 31 #include <asm/hvcall.h>
32 32
33 static int numa_enabled = 1; 33 static int numa_enabled = 1;
34 34
35 static char *cmdline __initdata; 35 static char *cmdline __initdata;
36 36
37 static int numa_debug; 37 static int numa_debug;
38 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); } 38 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
39 39
40 int numa_cpu_lookup_table[NR_CPUS]; 40 int numa_cpu_lookup_table[NR_CPUS];
41 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; 41 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
42 struct pglist_data *node_data[MAX_NUMNODES]; 42 struct pglist_data *node_data[MAX_NUMNODES];
43 43
44 EXPORT_SYMBOL(numa_cpu_lookup_table); 44 EXPORT_SYMBOL(numa_cpu_lookup_table);
45 EXPORT_SYMBOL(node_to_cpumask_map); 45 EXPORT_SYMBOL(node_to_cpumask_map);
46 EXPORT_SYMBOL(node_data); 46 EXPORT_SYMBOL(node_data);
47 47
48 static int min_common_depth; 48 static int min_common_depth;
49 static int n_mem_addr_cells, n_mem_size_cells; 49 static int n_mem_addr_cells, n_mem_size_cells;
50 static int form1_affinity; 50 static int form1_affinity;
51 51
52 #define MAX_DISTANCE_REF_POINTS 4 52 #define MAX_DISTANCE_REF_POINTS 4
53 static int distance_ref_points_depth; 53 static int distance_ref_points_depth;
54 static const unsigned int *distance_ref_points; 54 static const unsigned int *distance_ref_points;
55 static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS]; 55 static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
56 56
57 /* 57 /*
58 * Allocate node_to_cpumask_map based on number of available nodes 58 * Allocate node_to_cpumask_map based on number of available nodes
59 * Requires node_possible_map to be valid. 59 * Requires node_possible_map to be valid.
60 * 60 *
61 * Note: node_to_cpumask() is not valid until after this is done. 61 * Note: node_to_cpumask() is not valid until after this is done.
62 */ 62 */
63 static void __init setup_node_to_cpumask_map(void) 63 static void __init setup_node_to_cpumask_map(void)
64 { 64 {
65 unsigned int node, num = 0; 65 unsigned int node, num = 0;
66 66
67 /* setup nr_node_ids if not done yet */ 67 /* setup nr_node_ids if not done yet */
68 if (nr_node_ids == MAX_NUMNODES) { 68 if (nr_node_ids == MAX_NUMNODES) {
69 for_each_node_mask(node, node_possible_map) 69 for_each_node_mask(node, node_possible_map)
70 num = node; 70 num = node;
71 nr_node_ids = num + 1; 71 nr_node_ids = num + 1;
72 } 72 }
73 73
74 /* allocate the map */ 74 /* allocate the map */
75 for (node = 0; node < nr_node_ids; node++) 75 for (node = 0; node < nr_node_ids; node++)
76 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); 76 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
77 77
78 /* cpumask_of_node() will now work */ 78 /* cpumask_of_node() will now work */
79 dbg("Node to cpumask map for %d nodes\n", nr_node_ids); 79 dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
80 } 80 }
81 81
82 static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn, 82 static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn,
83 unsigned int *nid) 83 unsigned int *nid)
84 { 84 {
85 unsigned long long mem; 85 unsigned long long mem;
86 char *p = cmdline; 86 char *p = cmdline;
87 static unsigned int fake_nid; 87 static unsigned int fake_nid;
88 static unsigned long long curr_boundary; 88 static unsigned long long curr_boundary;
89 89
90 /* 90 /*
91 * Modify node id, iff we started creating NUMA nodes 91 * Modify node id, iff we started creating NUMA nodes
92 * We want to continue from where we left of the last time 92 * We want to continue from where we left of the last time
93 */ 93 */
94 if (fake_nid) 94 if (fake_nid)
95 *nid = fake_nid; 95 *nid = fake_nid;
96 /* 96 /*
97 * In case there are no more arguments to parse, the 97 * In case there are no more arguments to parse, the
98 * node_id should be the same as the last fake node id 98 * node_id should be the same as the last fake node id
99 * (we've handled this above). 99 * (we've handled this above).
100 */ 100 */
101 if (!p) 101 if (!p)
102 return 0; 102 return 0;
103 103
104 mem = memparse(p, &p); 104 mem = memparse(p, &p);
105 if (!mem) 105 if (!mem)
106 return 0; 106 return 0;
107 107
108 if (mem < curr_boundary) 108 if (mem < curr_boundary)
109 return 0; 109 return 0;
110 110
111 curr_boundary = mem; 111 curr_boundary = mem;
112 112
113 if ((end_pfn << PAGE_SHIFT) > mem) { 113 if ((end_pfn << PAGE_SHIFT) > mem) {
114 /* 114 /*
115 * Skip commas and spaces 115 * Skip commas and spaces
116 */ 116 */
117 while (*p == ',' || *p == ' ' || *p == '\t') 117 while (*p == ',' || *p == ' ' || *p == '\t')
118 p++; 118 p++;
119 119
120 cmdline = p; 120 cmdline = p;
121 fake_nid++; 121 fake_nid++;
122 *nid = fake_nid; 122 *nid = fake_nid;
123 dbg("created new fake_node with id %d\n", fake_nid); 123 dbg("created new fake_node with id %d\n", fake_nid);
124 return 1; 124 return 1;
125 } 125 }
126 return 0; 126 return 0;
127 } 127 }
128 128
129 /* 129 /*
130 * get_active_region_work_fn - A helper function for get_node_active_region 130 * get_active_region_work_fn - A helper function for get_node_active_region
131 * Returns datax set to the start_pfn and end_pfn if they contain 131 * Returns datax set to the start_pfn and end_pfn if they contain
132 * the initial value of datax->start_pfn between them 132 * the initial value of datax->start_pfn between them
133 * @start_pfn: start page(inclusive) of region to check 133 * @start_pfn: start page(inclusive) of region to check
134 * @end_pfn: end page(exclusive) of region to check 134 * @end_pfn: end page(exclusive) of region to check
135 * @datax: comes in with ->start_pfn set to value to search for and 135 * @datax: comes in with ->start_pfn set to value to search for and
136 * goes out with active range if it contains it 136 * goes out with active range if it contains it
137 * Returns 1 if search value is in range else 0 137 * Returns 1 if search value is in range else 0
138 */ 138 */
139 static int __init get_active_region_work_fn(unsigned long start_pfn, 139 static int __init get_active_region_work_fn(unsigned long start_pfn,
140 unsigned long end_pfn, void *datax) 140 unsigned long end_pfn, void *datax)
141 { 141 {
142 struct node_active_region *data; 142 struct node_active_region *data;
143 data = (struct node_active_region *)datax; 143 data = (struct node_active_region *)datax;
144 144
145 if (start_pfn <= data->start_pfn && end_pfn > data->start_pfn) { 145 if (start_pfn <= data->start_pfn && end_pfn > data->start_pfn) {
146 data->start_pfn = start_pfn; 146 data->start_pfn = start_pfn;
147 data->end_pfn = end_pfn; 147 data->end_pfn = end_pfn;
148 return 1; 148 return 1;
149 } 149 }
150 return 0; 150 return 0;
151 151
152 } 152 }
153 153
154 /* 154 /*
155 * get_node_active_region - Return active region containing start_pfn 155 * get_node_active_region - Return active region containing start_pfn
156 * Active range returned is empty if none found. 156 * Active range returned is empty if none found.
157 * @start_pfn: The page to return the region for. 157 * @start_pfn: The page to return the region for.
158 * @node_ar: Returned set to the active region containing start_pfn 158 * @node_ar: Returned set to the active region containing start_pfn
159 */ 159 */
160 static void __init get_node_active_region(unsigned long start_pfn, 160 static void __init get_node_active_region(unsigned long start_pfn,
161 struct node_active_region *node_ar) 161 struct node_active_region *node_ar)
162 { 162 {
163 int nid = early_pfn_to_nid(start_pfn); 163 int nid = early_pfn_to_nid(start_pfn);
164 164
165 node_ar->nid = nid; 165 node_ar->nid = nid;
166 node_ar->start_pfn = start_pfn; 166 node_ar->start_pfn = start_pfn;
167 node_ar->end_pfn = start_pfn; 167 node_ar->end_pfn = start_pfn;
168 work_with_active_regions(nid, get_active_region_work_fn, node_ar); 168 work_with_active_regions(nid, get_active_region_work_fn, node_ar);
169 } 169 }
170 170
171 static void map_cpu_to_node(int cpu, int node) 171 static void map_cpu_to_node(int cpu, int node)
172 { 172 {
173 numa_cpu_lookup_table[cpu] = node; 173 numa_cpu_lookup_table[cpu] = node;
174 174
175 dbg("adding cpu %d to node %d\n", cpu, node); 175 dbg("adding cpu %d to node %d\n", cpu, node);
176 176
177 if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node]))) 177 if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
178 cpumask_set_cpu(cpu, node_to_cpumask_map[node]); 178 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
179 } 179 }
180 180
181 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR) 181 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
182 static void unmap_cpu_from_node(unsigned long cpu) 182 static void unmap_cpu_from_node(unsigned long cpu)
183 { 183 {
184 int node = numa_cpu_lookup_table[cpu]; 184 int node = numa_cpu_lookup_table[cpu];
185 185
186 dbg("removing cpu %lu from node %d\n", cpu, node); 186 dbg("removing cpu %lu from node %d\n", cpu, node);
187 187
188 if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) { 188 if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
189 cpumask_clear_cpu(cpu, node_to_cpumask_map[node]); 189 cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
190 } else { 190 } else {
191 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n", 191 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
192 cpu, node); 192 cpu, node);
193 } 193 }
194 } 194 }
195 #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */ 195 #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
196 196
197 /* must hold reference to node during call */ 197 /* must hold reference to node during call */
198 static const int *of_get_associativity(struct device_node *dev) 198 static const int *of_get_associativity(struct device_node *dev)
199 { 199 {
200 return of_get_property(dev, "ibm,associativity", NULL); 200 return of_get_property(dev, "ibm,associativity", NULL);
201 } 201 }
202 202
203 /* 203 /*
204 * Returns the property linux,drconf-usable-memory if 204 * Returns the property linux,drconf-usable-memory if
205 * it exists (the property exists only in kexec/kdump kernels, 205 * it exists (the property exists only in kexec/kdump kernels,
206 * added by kexec-tools) 206 * added by kexec-tools)
207 */ 207 */
208 static const u32 *of_get_usable_memory(struct device_node *memory) 208 static const u32 *of_get_usable_memory(struct device_node *memory)
209 { 209 {
210 const u32 *prop; 210 const u32 *prop;
211 u32 len; 211 u32 len;
212 prop = of_get_property(memory, "linux,drconf-usable-memory", &len); 212 prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
213 if (!prop || len < sizeof(unsigned int)) 213 if (!prop || len < sizeof(unsigned int))
214 return 0; 214 return 0;
215 return prop; 215 return prop;
216 } 216 }
217 217
218 int __node_distance(int a, int b) 218 int __node_distance(int a, int b)
219 { 219 {
220 int i; 220 int i;
221 int distance = LOCAL_DISTANCE; 221 int distance = LOCAL_DISTANCE;
222 222
223 if (!form1_affinity) 223 if (!form1_affinity)
224 return distance; 224 return distance;
225 225
226 for (i = 0; i < distance_ref_points_depth; i++) { 226 for (i = 0; i < distance_ref_points_depth; i++) {
227 if (distance_lookup_table[a][i] == distance_lookup_table[b][i]) 227 if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
228 break; 228 break;
229 229
230 /* Double the distance for each NUMA level */ 230 /* Double the distance for each NUMA level */
231 distance *= 2; 231 distance *= 2;
232 } 232 }
233 233
234 return distance; 234 return distance;
235 } 235 }
236 236
237 static void initialize_distance_lookup_table(int nid, 237 static void initialize_distance_lookup_table(int nid,
238 const unsigned int *associativity) 238 const unsigned int *associativity)
239 { 239 {
240 int i; 240 int i;
241 241
242 if (!form1_affinity) 242 if (!form1_affinity)
243 return; 243 return;
244 244
245 for (i = 0; i < distance_ref_points_depth; i++) { 245 for (i = 0; i < distance_ref_points_depth; i++) {
246 distance_lookup_table[nid][i] = 246 distance_lookup_table[nid][i] =
247 associativity[distance_ref_points[i]]; 247 associativity[distance_ref_points[i]];
248 } 248 }
249 } 249 }
250 250
251 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa 251 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
252 * info is found. 252 * info is found.
253 */ 253 */
254 static int associativity_to_nid(const unsigned int *associativity) 254 static int associativity_to_nid(const unsigned int *associativity)
255 { 255 {
256 int nid = -1; 256 int nid = -1;
257 257
258 if (min_common_depth == -1) 258 if (min_common_depth == -1)
259 goto out; 259 goto out;
260 260
261 if (associativity[0] >= min_common_depth) 261 if (associativity[0] >= min_common_depth)
262 nid = associativity[min_common_depth]; 262 nid = associativity[min_common_depth];
263 263
264 /* POWER4 LPAR uses 0xffff as invalid node */ 264 /* POWER4 LPAR uses 0xffff as invalid node */
265 if (nid == 0xffff || nid >= MAX_NUMNODES) 265 if (nid == 0xffff || nid >= MAX_NUMNODES)
266 nid = -1; 266 nid = -1;
267 267
268 if (nid > 0 && associativity[0] >= distance_ref_points_depth) 268 if (nid > 0 && associativity[0] >= distance_ref_points_depth)
269 initialize_distance_lookup_table(nid, associativity); 269 initialize_distance_lookup_table(nid, associativity);
270 270
271 out: 271 out:
272 return nid; 272 return nid;
273 } 273 }
274 274
275 /* Returns the nid associated with the given device tree node, 275 /* Returns the nid associated with the given device tree node,
276 * or -1 if not found. 276 * or -1 if not found.
277 */ 277 */
278 static int of_node_to_nid_single(struct device_node *device) 278 static int of_node_to_nid_single(struct device_node *device)
279 { 279 {
280 int nid = -1; 280 int nid = -1;
281 const unsigned int *tmp; 281 const unsigned int *tmp;
282 282
283 tmp = of_get_associativity(device); 283 tmp = of_get_associativity(device);
284 if (tmp) 284 if (tmp)
285 nid = associativity_to_nid(tmp); 285 nid = associativity_to_nid(tmp);
286 return nid; 286 return nid;
287 } 287 }
288 288
289 /* Walk the device tree upwards, looking for an associativity id */ 289 /* Walk the device tree upwards, looking for an associativity id */
290 int of_node_to_nid(struct device_node *device) 290 int of_node_to_nid(struct device_node *device)
291 { 291 {
292 struct device_node *tmp; 292 struct device_node *tmp;
293 int nid = -1; 293 int nid = -1;
294 294
295 of_node_get(device); 295 of_node_get(device);
296 while (device) { 296 while (device) {
297 nid = of_node_to_nid_single(device); 297 nid = of_node_to_nid_single(device);
298 if (nid != -1) 298 if (nid != -1)
299 break; 299 break;
300 300
301 tmp = device; 301 tmp = device;
302 device = of_get_parent(tmp); 302 device = of_get_parent(tmp);
303 of_node_put(tmp); 303 of_node_put(tmp);
304 } 304 }
305 of_node_put(device); 305 of_node_put(device);
306 306
307 return nid; 307 return nid;
308 } 308 }
309 EXPORT_SYMBOL_GPL(of_node_to_nid); 309 EXPORT_SYMBOL_GPL(of_node_to_nid);
310 310
311 static int __init find_min_common_depth(void) 311 static int __init find_min_common_depth(void)
312 { 312 {
313 int depth; 313 int depth;
314 struct device_node *chosen; 314 struct device_node *chosen;
315 struct device_node *root; 315 struct device_node *root;
316 const char *vec5; 316 const char *vec5;
317 317
318 root = of_find_node_by_path("/rtas"); 318 root = of_find_node_by_path("/rtas");
319 if (!root) 319 if (!root)
320 root = of_find_node_by_path("/"); 320 root = of_find_node_by_path("/");
321 321
322 /* 322 /*
323 * This property is a set of 32-bit integers, each representing 323 * This property is a set of 32-bit integers, each representing
324 * an index into the ibm,associativity nodes. 324 * an index into the ibm,associativity nodes.
325 * 325 *
326 * With form 0 affinity the first integer is for an SMP configuration 326 * With form 0 affinity the first integer is for an SMP configuration
327 * (should be all 0's) and the second is for a normal NUMA 327 * (should be all 0's) and the second is for a normal NUMA
328 * configuration. We have only one level of NUMA. 328 * configuration. We have only one level of NUMA.
329 * 329 *
330 * With form 1 affinity the first integer is the most significant 330 * With form 1 affinity the first integer is the most significant
331 * NUMA boundary and the following are progressively less significant 331 * NUMA boundary and the following are progressively less significant
332 * boundaries. There can be more than one level of NUMA. 332 * boundaries. There can be more than one level of NUMA.
333 */ 333 */
334 distance_ref_points = of_get_property(root, 334 distance_ref_points = of_get_property(root,
335 "ibm,associativity-reference-points", 335 "ibm,associativity-reference-points",
336 &distance_ref_points_depth); 336 &distance_ref_points_depth);
337 337
338 if (!distance_ref_points) { 338 if (!distance_ref_points) {
339 dbg("NUMA: ibm,associativity-reference-points not found.\n"); 339 dbg("NUMA: ibm,associativity-reference-points not found.\n");
340 goto err; 340 goto err;
341 } 341 }
342 342
343 distance_ref_points_depth /= sizeof(int); 343 distance_ref_points_depth /= sizeof(int);
344 344
345 #define VEC5_AFFINITY_BYTE 5 345 #define VEC5_AFFINITY_BYTE 5
346 #define VEC5_AFFINITY 0x80 346 #define VEC5_AFFINITY 0x80
347 chosen = of_find_node_by_path("/chosen"); 347 chosen = of_find_node_by_path("/chosen");
348 if (chosen) { 348 if (chosen) {
349 vec5 = of_get_property(chosen, "ibm,architecture-vec-5", NULL); 349 vec5 = of_get_property(chosen, "ibm,architecture-vec-5", NULL);
350 if (vec5 && (vec5[VEC5_AFFINITY_BYTE] & VEC5_AFFINITY)) { 350 if (vec5 && (vec5[VEC5_AFFINITY_BYTE] & VEC5_AFFINITY)) {
351 dbg("Using form 1 affinity\n"); 351 dbg("Using form 1 affinity\n");
352 form1_affinity = 1; 352 form1_affinity = 1;
353 } 353 }
354 } 354 }
355 355
356 if (form1_affinity) { 356 if (form1_affinity) {
357 depth = distance_ref_points[0]; 357 depth = distance_ref_points[0];
358 } else { 358 } else {
359 if (distance_ref_points_depth < 2) { 359 if (distance_ref_points_depth < 2) {
360 printk(KERN_WARNING "NUMA: " 360 printk(KERN_WARNING "NUMA: "
361 "short ibm,associativity-reference-points\n"); 361 "short ibm,associativity-reference-points\n");
362 goto err; 362 goto err;
363 } 363 }
364 364
365 depth = distance_ref_points[1]; 365 depth = distance_ref_points[1];
366 } 366 }
367 367
368 /* 368 /*
369 * Warn and cap if the hardware supports more than 369 * Warn and cap if the hardware supports more than
370 * MAX_DISTANCE_REF_POINTS domains. 370 * MAX_DISTANCE_REF_POINTS domains.
371 */ 371 */
372 if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) { 372 if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
373 printk(KERN_WARNING "NUMA: distance array capped at " 373 printk(KERN_WARNING "NUMA: distance array capped at "
374 "%d entries\n", MAX_DISTANCE_REF_POINTS); 374 "%d entries\n", MAX_DISTANCE_REF_POINTS);
375 distance_ref_points_depth = MAX_DISTANCE_REF_POINTS; 375 distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
376 } 376 }
377 377
378 of_node_put(root); 378 of_node_put(root);
379 return depth; 379 return depth;
380 380
381 err: 381 err:
382 of_node_put(root); 382 of_node_put(root);
383 return -1; 383 return -1;
384 } 384 }
385 385
386 static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells) 386 static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
387 { 387 {
388 struct device_node *memory = NULL; 388 struct device_node *memory = NULL;
389 389
390 memory = of_find_node_by_type(memory, "memory"); 390 memory = of_find_node_by_type(memory, "memory");
391 if (!memory) 391 if (!memory)
392 panic("numa.c: No memory nodes found!"); 392 panic("numa.c: No memory nodes found!");
393 393
394 *n_addr_cells = of_n_addr_cells(memory); 394 *n_addr_cells = of_n_addr_cells(memory);
395 *n_size_cells = of_n_size_cells(memory); 395 *n_size_cells = of_n_size_cells(memory);
396 of_node_put(memory); 396 of_node_put(memory);
397 } 397 }
398 398
399 static unsigned long __devinit read_n_cells(int n, const unsigned int **buf) 399 static unsigned long __devinit read_n_cells(int n, const unsigned int **buf)
400 { 400 {
401 unsigned long result = 0; 401 unsigned long result = 0;
402 402
403 while (n--) { 403 while (n--) {
404 result = (result << 32) | **buf; 404 result = (result << 32) | **buf;
405 (*buf)++; 405 (*buf)++;
406 } 406 }
407 return result; 407 return result;
408 } 408 }
409 409
410 struct of_drconf_cell { 410 struct of_drconf_cell {
411 u64 base_addr; 411 u64 base_addr;
412 u32 drc_index; 412 u32 drc_index;
413 u32 reserved; 413 u32 reserved;
414 u32 aa_index; 414 u32 aa_index;
415 u32 flags; 415 u32 flags;
416 }; 416 };
417 417
418 #define DRCONF_MEM_ASSIGNED 0x00000008 418 #define DRCONF_MEM_ASSIGNED 0x00000008
419 #define DRCONF_MEM_AI_INVALID 0x00000040 419 #define DRCONF_MEM_AI_INVALID 0x00000040
420 #define DRCONF_MEM_RESERVED 0x00000080 420 #define DRCONF_MEM_RESERVED 0x00000080
421 421
422 /* 422 /*
423 * Read the next memblock list entry from the ibm,dynamic-memory property 423 * Read the next memblock list entry from the ibm,dynamic-memory property
424 * and return the information in the provided of_drconf_cell structure. 424 * and return the information in the provided of_drconf_cell structure.
425 */ 425 */
426 static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp) 426 static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
427 { 427 {
428 const u32 *cp; 428 const u32 *cp;
429 429
430 drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp); 430 drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp);
431 431
432 cp = *cellp; 432 cp = *cellp;
433 drmem->drc_index = cp[0]; 433 drmem->drc_index = cp[0];
434 drmem->reserved = cp[1]; 434 drmem->reserved = cp[1];
435 drmem->aa_index = cp[2]; 435 drmem->aa_index = cp[2];
436 drmem->flags = cp[3]; 436 drmem->flags = cp[3];
437 437
438 *cellp = cp + 4; 438 *cellp = cp + 4;
439 } 439 }
440 440
441 /* 441 /*
442 * Retrieve and validate the ibm,dynamic-memory property of the device tree. 442 * Retrieve and validate the ibm,dynamic-memory property of the device tree.
443 * 443 *
444 * The layout of the ibm,dynamic-memory property is a number N of memblock 444 * The layout of the ibm,dynamic-memory property is a number N of memblock
445 * list entries followed by N memblock list entries. Each memblock list entry 445 * list entries followed by N memblock list entries. Each memblock list entry
446 * contains information as laid out in the of_drconf_cell struct above. 446 * contains information as laid out in the of_drconf_cell struct above.
447 */ 447 */
448 static int of_get_drconf_memory(struct device_node *memory, const u32 **dm) 448 static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
449 { 449 {
450 const u32 *prop; 450 const u32 *prop;
451 u32 len, entries; 451 u32 len, entries;
452 452
453 prop = of_get_property(memory, "ibm,dynamic-memory", &len); 453 prop = of_get_property(memory, "ibm,dynamic-memory", &len);
454 if (!prop || len < sizeof(unsigned int)) 454 if (!prop || len < sizeof(unsigned int))
455 return 0; 455 return 0;
456 456
457 entries = *prop++; 457 entries = *prop++;
458 458
459 /* Now that we know the number of entries, revalidate the size 459 /* Now that we know the number of entries, revalidate the size
460 * of the property read in to ensure we have everything 460 * of the property read in to ensure we have everything
461 */ 461 */
462 if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int)) 462 if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int))
463 return 0; 463 return 0;
464 464
465 *dm = prop; 465 *dm = prop;
466 return entries; 466 return entries;
467 } 467 }
468 468
469 /* 469 /*
470 * Retrieve and validate the ibm,lmb-size property for drconf memory 470 * Retrieve and validate the ibm,lmb-size property for drconf memory
471 * from the device tree. 471 * from the device tree.
472 */ 472 */
473 static u64 of_get_lmb_size(struct device_node *memory) 473 static u64 of_get_lmb_size(struct device_node *memory)
474 { 474 {
475 const u32 *prop; 475 const u32 *prop;
476 u32 len; 476 u32 len;
477 477
478 prop = of_get_property(memory, "ibm,lmb-size", &len); 478 prop = of_get_property(memory, "ibm,lmb-size", &len);
479 if (!prop || len < sizeof(unsigned int)) 479 if (!prop || len < sizeof(unsigned int))
480 return 0; 480 return 0;
481 481
482 return read_n_cells(n_mem_size_cells, &prop); 482 return read_n_cells(n_mem_size_cells, &prop);
483 } 483 }
484 484
485 struct assoc_arrays { 485 struct assoc_arrays {
486 u32 n_arrays; 486 u32 n_arrays;
487 u32 array_sz; 487 u32 array_sz;
488 const u32 *arrays; 488 const u32 *arrays;
489 }; 489 };
490 490
491 /* 491 /*
492 * Retrieve and validate the list of associativity arrays for drconf 492 * Retrieve and validate the list of associativity arrays for drconf
493 * memory from the ibm,associativity-lookup-arrays property of the 493 * memory from the ibm,associativity-lookup-arrays property of the
494 * device tree.. 494 * device tree..
495 * 495 *
496 * The layout of the ibm,associativity-lookup-arrays property is a number N 496 * The layout of the ibm,associativity-lookup-arrays property is a number N
497 * indicating the number of associativity arrays, followed by a number M 497 * indicating the number of associativity arrays, followed by a number M
498 * indicating the size of each associativity array, followed by a list 498 * indicating the size of each associativity array, followed by a list
499 * of N associativity arrays. 499 * of N associativity arrays.
500 */ 500 */
501 static int of_get_assoc_arrays(struct device_node *memory, 501 static int of_get_assoc_arrays(struct device_node *memory,
502 struct assoc_arrays *aa) 502 struct assoc_arrays *aa)
503 { 503 {
504 const u32 *prop; 504 const u32 *prop;
505 u32 len; 505 u32 len;
506 506
507 prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len); 507 prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
508 if (!prop || len < 2 * sizeof(unsigned int)) 508 if (!prop || len < 2 * sizeof(unsigned int))
509 return -1; 509 return -1;
510 510
511 aa->n_arrays = *prop++; 511 aa->n_arrays = *prop++;
512 aa->array_sz = *prop++; 512 aa->array_sz = *prop++;
513 513
514 /* Now that we know the number of arrrays and size of each array, 514 /* Now that we know the number of arrrays and size of each array,
515 * revalidate the size of the property read in. 515 * revalidate the size of the property read in.
516 */ 516 */
517 if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int)) 517 if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
518 return -1; 518 return -1;
519 519
520 aa->arrays = prop; 520 aa->arrays = prop;
521 return 0; 521 return 0;
522 } 522 }
523 523
524 /* 524 /*
525 * This is like of_node_to_nid_single() for memory represented in the 525 * This is like of_node_to_nid_single() for memory represented in the
526 * ibm,dynamic-reconfiguration-memory node. 526 * ibm,dynamic-reconfiguration-memory node.
527 */ 527 */
528 static int of_drconf_to_nid_single(struct of_drconf_cell *drmem, 528 static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
529 struct assoc_arrays *aa) 529 struct assoc_arrays *aa)
530 { 530 {
531 int default_nid = 0; 531 int default_nid = 0;
532 int nid = default_nid; 532 int nid = default_nid;
533 int index; 533 int index;
534 534
535 if (min_common_depth > 0 && min_common_depth <= aa->array_sz && 535 if (min_common_depth > 0 && min_common_depth <= aa->array_sz &&
536 !(drmem->flags & DRCONF_MEM_AI_INVALID) && 536 !(drmem->flags & DRCONF_MEM_AI_INVALID) &&
537 drmem->aa_index < aa->n_arrays) { 537 drmem->aa_index < aa->n_arrays) {
538 index = drmem->aa_index * aa->array_sz + min_common_depth - 1; 538 index = drmem->aa_index * aa->array_sz + min_common_depth - 1;
539 nid = aa->arrays[index]; 539 nid = aa->arrays[index];
540 540
541 if (nid == 0xffff || nid >= MAX_NUMNODES) 541 if (nid == 0xffff || nid >= MAX_NUMNODES)
542 nid = default_nid; 542 nid = default_nid;
543 } 543 }
544 544
545 return nid; 545 return nid;
546 } 546 }
547 547
548 /* 548 /*
549 * Figure out to which domain a cpu belongs and stick it there. 549 * Figure out to which domain a cpu belongs and stick it there.
550 * Return the id of the domain used. 550 * Return the id of the domain used.
551 */ 551 */
552 static int __cpuinit numa_setup_cpu(unsigned long lcpu) 552 static int __cpuinit numa_setup_cpu(unsigned long lcpu)
553 { 553 {
554 int nid = 0; 554 int nid = 0;
555 struct device_node *cpu = of_get_cpu_node(lcpu, NULL); 555 struct device_node *cpu = of_get_cpu_node(lcpu, NULL);
556 556
557 if (!cpu) { 557 if (!cpu) {
558 WARN_ON(1); 558 WARN_ON(1);
559 goto out; 559 goto out;
560 } 560 }
561 561
562 nid = of_node_to_nid_single(cpu); 562 nid = of_node_to_nid_single(cpu);
563 563
564 if (nid < 0 || !node_online(nid)) 564 if (nid < 0 || !node_online(nid))
565 nid = first_online_node; 565 nid = first_online_node;
566 out: 566 out:
567 map_cpu_to_node(lcpu, nid); 567 map_cpu_to_node(lcpu, nid);
568 568
569 of_node_put(cpu); 569 of_node_put(cpu);
570 570
571 return nid; 571 return nid;
572 } 572 }
573 573
574 static int __cpuinit cpu_numa_callback(struct notifier_block *nfb, 574 static int __cpuinit cpu_numa_callback(struct notifier_block *nfb,
575 unsigned long action, 575 unsigned long action,
576 void *hcpu) 576 void *hcpu)
577 { 577 {
578 unsigned long lcpu = (unsigned long)hcpu; 578 unsigned long lcpu = (unsigned long)hcpu;
579 int ret = NOTIFY_DONE; 579 int ret = NOTIFY_DONE;
580 580
581 switch (action) { 581 switch (action) {
582 case CPU_UP_PREPARE: 582 case CPU_UP_PREPARE:
583 case CPU_UP_PREPARE_FROZEN: 583 case CPU_UP_PREPARE_FROZEN:
584 numa_setup_cpu(lcpu); 584 numa_setup_cpu(lcpu);
585 ret = NOTIFY_OK; 585 ret = NOTIFY_OK;
586 break; 586 break;
587 #ifdef CONFIG_HOTPLUG_CPU 587 #ifdef CONFIG_HOTPLUG_CPU
588 case CPU_DEAD: 588 case CPU_DEAD:
589 case CPU_DEAD_FROZEN: 589 case CPU_DEAD_FROZEN:
590 case CPU_UP_CANCELED: 590 case CPU_UP_CANCELED:
591 case CPU_UP_CANCELED_FROZEN: 591 case CPU_UP_CANCELED_FROZEN:
592 unmap_cpu_from_node(lcpu); 592 unmap_cpu_from_node(lcpu);
593 break; 593 break;
594 ret = NOTIFY_OK; 594 ret = NOTIFY_OK;
595 #endif 595 #endif
596 } 596 }
597 return ret; 597 return ret;
598 } 598 }
599 599
600 /* 600 /*
601 * Check and possibly modify a memory region to enforce the memory limit. 601 * Check and possibly modify a memory region to enforce the memory limit.
602 * 602 *
603 * Returns the size the region should have to enforce the memory limit. 603 * Returns the size the region should have to enforce the memory limit.
604 * This will either be the original value of size, a truncated value, 604 * This will either be the original value of size, a truncated value,
605 * or zero. If the returned value of size is 0 the region should be 605 * or zero. If the returned value of size is 0 the region should be
606 * discarded as it lies wholly above the memory limit. 606 * discarded as it lies wholly above the memory limit.
607 */ 607 */
608 static unsigned long __init numa_enforce_memory_limit(unsigned long start, 608 static unsigned long __init numa_enforce_memory_limit(unsigned long start,
609 unsigned long size) 609 unsigned long size)
610 { 610 {
611 /* 611 /*
612 * We use memblock_end_of_DRAM() in here instead of memory_limit because 612 * We use memblock_end_of_DRAM() in here instead of memory_limit because
613 * we've already adjusted it for the limit and it takes care of 613 * we've already adjusted it for the limit and it takes care of
614 * having memory holes below the limit. Also, in the case of 614 * having memory holes below the limit. Also, in the case of
615 * iommu_is_off, memory_limit is not set but is implicitly enforced. 615 * iommu_is_off, memory_limit is not set but is implicitly enforced.
616 */ 616 */
617 617
618 if (start + size <= memblock_end_of_DRAM()) 618 if (start + size <= memblock_end_of_DRAM())
619 return size; 619 return size;
620 620
621 if (start >= memblock_end_of_DRAM()) 621 if (start >= memblock_end_of_DRAM())
622 return 0; 622 return 0;
623 623
624 return memblock_end_of_DRAM() - start; 624 return memblock_end_of_DRAM() - start;
625 } 625 }
626 626
627 /* 627 /*
628 * Reads the counter for a given entry in 628 * Reads the counter for a given entry in
629 * linux,drconf-usable-memory property 629 * linux,drconf-usable-memory property
630 */ 630 */
631 static inline int __init read_usm_ranges(const u32 **usm) 631 static inline int __init read_usm_ranges(const u32 **usm)
632 { 632 {
633 /* 633 /*
634 * For each lmb in ibm,dynamic-memory a corresponding 634 * For each lmb in ibm,dynamic-memory a corresponding
635 * entry in linux,drconf-usable-memory property contains 635 * entry in linux,drconf-usable-memory property contains
636 * a counter followed by that many (base, size) duple. 636 * a counter followed by that many (base, size) duple.
637 * read the counter from linux,drconf-usable-memory 637 * read the counter from linux,drconf-usable-memory
638 */ 638 */
639 return read_n_cells(n_mem_size_cells, usm); 639 return read_n_cells(n_mem_size_cells, usm);
640 } 640 }
641 641
642 /* 642 /*
643 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory 643 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
644 * node. This assumes n_mem_{addr,size}_cells have been set. 644 * node. This assumes n_mem_{addr,size}_cells have been set.
645 */ 645 */
646 static void __init parse_drconf_memory(struct device_node *memory) 646 static void __init parse_drconf_memory(struct device_node *memory)
647 { 647 {
648 const u32 *dm, *usm; 648 const u32 *dm, *usm;
649 unsigned int n, rc, ranges, is_kexec_kdump = 0; 649 unsigned int n, rc, ranges, is_kexec_kdump = 0;
650 unsigned long lmb_size, base, size, sz; 650 unsigned long lmb_size, base, size, sz;
651 int nid; 651 int nid;
652 struct assoc_arrays aa; 652 struct assoc_arrays aa;
653 653
654 n = of_get_drconf_memory(memory, &dm); 654 n = of_get_drconf_memory(memory, &dm);
655 if (!n) 655 if (!n)
656 return; 656 return;
657 657
658 lmb_size = of_get_lmb_size(memory); 658 lmb_size = of_get_lmb_size(memory);
659 if (!lmb_size) 659 if (!lmb_size)
660 return; 660 return;
661 661
662 rc = of_get_assoc_arrays(memory, &aa); 662 rc = of_get_assoc_arrays(memory, &aa);
663 if (rc) 663 if (rc)
664 return; 664 return;
665 665
666 /* check if this is a kexec/kdump kernel */ 666 /* check if this is a kexec/kdump kernel */
667 usm = of_get_usable_memory(memory); 667 usm = of_get_usable_memory(memory);
668 if (usm != NULL) 668 if (usm != NULL)
669 is_kexec_kdump = 1; 669 is_kexec_kdump = 1;
670 670
671 for (; n != 0; --n) { 671 for (; n != 0; --n) {
672 struct of_drconf_cell drmem; 672 struct of_drconf_cell drmem;
673 673
674 read_drconf_cell(&drmem, &dm); 674 read_drconf_cell(&drmem, &dm);
675 675
676 /* skip this block if the reserved bit is set in flags (0x80) 676 /* skip this block if the reserved bit is set in flags (0x80)
677 or if the block is not assigned to this partition (0x8) */ 677 or if the block is not assigned to this partition (0x8) */
678 if ((drmem.flags & DRCONF_MEM_RESERVED) 678 if ((drmem.flags & DRCONF_MEM_RESERVED)
679 || !(drmem.flags & DRCONF_MEM_ASSIGNED)) 679 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
680 continue; 680 continue;
681 681
682 base = drmem.base_addr; 682 base = drmem.base_addr;
683 size = lmb_size; 683 size = lmb_size;
684 ranges = 1; 684 ranges = 1;
685 685
686 if (is_kexec_kdump) { 686 if (is_kexec_kdump) {
687 ranges = read_usm_ranges(&usm); 687 ranges = read_usm_ranges(&usm);
688 if (!ranges) /* there are no (base, size) duple */ 688 if (!ranges) /* there are no (base, size) duple */
689 continue; 689 continue;
690 } 690 }
691 do { 691 do {
692 if (is_kexec_kdump) { 692 if (is_kexec_kdump) {
693 base = read_n_cells(n_mem_addr_cells, &usm); 693 base = read_n_cells(n_mem_addr_cells, &usm);
694 size = read_n_cells(n_mem_size_cells, &usm); 694 size = read_n_cells(n_mem_size_cells, &usm);
695 } 695 }
696 nid = of_drconf_to_nid_single(&drmem, &aa); 696 nid = of_drconf_to_nid_single(&drmem, &aa);
697 fake_numa_create_new_node( 697 fake_numa_create_new_node(
698 ((base + size) >> PAGE_SHIFT), 698 ((base + size) >> PAGE_SHIFT),
699 &nid); 699 &nid);
700 node_set_online(nid); 700 node_set_online(nid);
701 sz = numa_enforce_memory_limit(base, size); 701 sz = numa_enforce_memory_limit(base, size);
702 if (sz) 702 if (sz)
703 add_active_range(nid, base >> PAGE_SHIFT, 703 add_active_range(nid, base >> PAGE_SHIFT,
704 (base >> PAGE_SHIFT) 704 (base >> PAGE_SHIFT)
705 + (sz >> PAGE_SHIFT)); 705 + (sz >> PAGE_SHIFT));
706 } while (--ranges); 706 } while (--ranges);
707 } 707 }
708 } 708 }
709 709
710 static int __init parse_numa_properties(void) 710 static int __init parse_numa_properties(void)
711 { 711 {
712 struct device_node *cpu = NULL; 712 struct device_node *cpu = NULL;
713 struct device_node *memory = NULL; 713 struct device_node *memory = NULL;
714 int default_nid = 0; 714 int default_nid = 0;
715 unsigned long i; 715 unsigned long i;
716 716
717 if (numa_enabled == 0) { 717 if (numa_enabled == 0) {
718 printk(KERN_WARNING "NUMA disabled by user\n"); 718 printk(KERN_WARNING "NUMA disabled by user\n");
719 return -1; 719 return -1;
720 } 720 }
721 721
722 min_common_depth = find_min_common_depth(); 722 min_common_depth = find_min_common_depth();
723 723
724 if (min_common_depth < 0) 724 if (min_common_depth < 0)
725 return min_common_depth; 725 return min_common_depth;
726 726
727 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth); 727 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
728 728
729 /* 729 /*
730 * Even though we connect cpus to numa domains later in SMP 730 * Even though we connect cpus to numa domains later in SMP
731 * init, we need to know the node ids now. This is because 731 * init, we need to know the node ids now. This is because
732 * each node to be onlined must have NODE_DATA etc backing it. 732 * each node to be onlined must have NODE_DATA etc backing it.
733 */ 733 */
734 for_each_present_cpu(i) { 734 for_each_present_cpu(i) {
735 int nid; 735 int nid;
736 736
737 cpu = of_get_cpu_node(i, NULL); 737 cpu = of_get_cpu_node(i, NULL);
738 BUG_ON(!cpu); 738 BUG_ON(!cpu);
739 nid = of_node_to_nid_single(cpu); 739 nid = of_node_to_nid_single(cpu);
740 of_node_put(cpu); 740 of_node_put(cpu);
741 741
742 /* 742 /*
743 * Don't fall back to default_nid yet -- we will plug 743 * Don't fall back to default_nid yet -- we will plug
744 * cpus into nodes once the memory scan has discovered 744 * cpus into nodes once the memory scan has discovered
745 * the topology. 745 * the topology.
746 */ 746 */
747 if (nid < 0) 747 if (nid < 0)
748 continue; 748 continue;
749 node_set_online(nid); 749 node_set_online(nid);
750 } 750 }
751 751
752 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells); 752 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
753 memory = NULL; 753 memory = NULL;
754 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) { 754 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
755 unsigned long start; 755 unsigned long start;
756 unsigned long size; 756 unsigned long size;
757 int nid; 757 int nid;
758 int ranges; 758 int ranges;
759 const unsigned int *memcell_buf; 759 const unsigned int *memcell_buf;
760 unsigned int len; 760 unsigned int len;
761 761
762 memcell_buf = of_get_property(memory, 762 memcell_buf = of_get_property(memory,
763 "linux,usable-memory", &len); 763 "linux,usable-memory", &len);
764 if (!memcell_buf || len <= 0) 764 if (!memcell_buf || len <= 0)
765 memcell_buf = of_get_property(memory, "reg", &len); 765 memcell_buf = of_get_property(memory, "reg", &len);
766 if (!memcell_buf || len <= 0) 766 if (!memcell_buf || len <= 0)
767 continue; 767 continue;
768 768
769 /* ranges in cell */ 769 /* ranges in cell */
770 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); 770 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
771 new_range: 771 new_range:
772 /* these are order-sensitive, and modify the buffer pointer */ 772 /* these are order-sensitive, and modify the buffer pointer */
773 start = read_n_cells(n_mem_addr_cells, &memcell_buf); 773 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
774 size = read_n_cells(n_mem_size_cells, &memcell_buf); 774 size = read_n_cells(n_mem_size_cells, &memcell_buf);
775 775
776 /* 776 /*
777 * Assumption: either all memory nodes or none will 777 * Assumption: either all memory nodes or none will
778 * have associativity properties. If none, then 778 * have associativity properties. If none, then
779 * everything goes to default_nid. 779 * everything goes to default_nid.
780 */ 780 */
781 nid = of_node_to_nid_single(memory); 781 nid = of_node_to_nid_single(memory);
782 if (nid < 0) 782 if (nid < 0)
783 nid = default_nid; 783 nid = default_nid;
784 784
785 fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid); 785 fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
786 node_set_online(nid); 786 node_set_online(nid);
787 787
788 if (!(size = numa_enforce_memory_limit(start, size))) { 788 if (!(size = numa_enforce_memory_limit(start, size))) {
789 if (--ranges) 789 if (--ranges)
790 goto new_range; 790 goto new_range;
791 else 791 else
792 continue; 792 continue;
793 } 793 }
794 794
795 add_active_range(nid, start >> PAGE_SHIFT, 795 add_active_range(nid, start >> PAGE_SHIFT,
796 (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT)); 796 (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT));
797 797
798 if (--ranges) 798 if (--ranges)
799 goto new_range; 799 goto new_range;
800 } 800 }
801 801
802 /* 802 /*
803 * Now do the same thing for each MEMBLOCK listed in the ibm,dynamic-memory 803 * Now do the same thing for each MEMBLOCK listed in the ibm,dynamic-memory
804 * property in the ibm,dynamic-reconfiguration-memory node. 804 * property in the ibm,dynamic-reconfiguration-memory node.
805 */ 805 */
806 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 806 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
807 if (memory) 807 if (memory)
808 parse_drconf_memory(memory); 808 parse_drconf_memory(memory);
809 809
810 return 0; 810 return 0;
811 } 811 }
812 812
813 static void __init setup_nonnuma(void) 813 static void __init setup_nonnuma(void)
814 { 814 {
815 unsigned long top_of_ram = memblock_end_of_DRAM(); 815 unsigned long top_of_ram = memblock_end_of_DRAM();
816 unsigned long total_ram = memblock_phys_mem_size(); 816 unsigned long total_ram = memblock_phys_mem_size();
817 unsigned long start_pfn, end_pfn; 817 unsigned long start_pfn, end_pfn;
818 unsigned int nid = 0; 818 unsigned int nid = 0;
819 struct memblock_region *reg; 819 struct memblock_region *reg;
820 820
821 printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", 821 printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
822 top_of_ram, total_ram); 822 top_of_ram, total_ram);
823 printk(KERN_DEBUG "Memory hole size: %ldMB\n", 823 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
824 (top_of_ram - total_ram) >> 20); 824 (top_of_ram - total_ram) >> 20);
825 825
826 for_each_memblock(memory, reg) { 826 for_each_memblock(memory, reg) {
827 start_pfn = memblock_region_memory_base_pfn(reg); 827 start_pfn = memblock_region_memory_base_pfn(reg);
828 end_pfn = memblock_region_memory_end_pfn(reg); 828 end_pfn = memblock_region_memory_end_pfn(reg);
829 829
830 fake_numa_create_new_node(end_pfn, &nid); 830 fake_numa_create_new_node(end_pfn, &nid);
831 add_active_range(nid, start_pfn, end_pfn); 831 add_active_range(nid, start_pfn, end_pfn);
832 node_set_online(nid); 832 node_set_online(nid);
833 } 833 }
834 } 834 }
835 835
836 void __init dump_numa_cpu_topology(void) 836 void __init dump_numa_cpu_topology(void)
837 { 837 {
838 unsigned int node; 838 unsigned int node;
839 unsigned int cpu, count; 839 unsigned int cpu, count;
840 840
841 if (min_common_depth == -1 || !numa_enabled) 841 if (min_common_depth == -1 || !numa_enabled)
842 return; 842 return;
843 843
844 for_each_online_node(node) { 844 for_each_online_node(node) {
845 printk(KERN_DEBUG "Node %d CPUs:", node); 845 printk(KERN_DEBUG "Node %d CPUs:", node);
846 846
847 count = 0; 847 count = 0;
848 /* 848 /*
849 * If we used a CPU iterator here we would miss printing 849 * If we used a CPU iterator here we would miss printing
850 * the holes in the cpumap. 850 * the holes in the cpumap.
851 */ 851 */
852 for (cpu = 0; cpu < nr_cpu_ids; cpu++) { 852 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
853 if (cpumask_test_cpu(cpu, 853 if (cpumask_test_cpu(cpu,
854 node_to_cpumask_map[node])) { 854 node_to_cpumask_map[node])) {
855 if (count == 0) 855 if (count == 0)
856 printk(" %u", cpu); 856 printk(" %u", cpu);
857 ++count; 857 ++count;
858 } else { 858 } else {
859 if (count > 1) 859 if (count > 1)
860 printk("-%u", cpu - 1); 860 printk("-%u", cpu - 1);
861 count = 0; 861 count = 0;
862 } 862 }
863 } 863 }
864 864
865 if (count > 1) 865 if (count > 1)
866 printk("-%u", nr_cpu_ids - 1); 866 printk("-%u", nr_cpu_ids - 1);
867 printk("\n"); 867 printk("\n");
868 } 868 }
869 } 869 }
870 870
871 static void __init dump_numa_memory_topology(void) 871 static void __init dump_numa_memory_topology(void)
872 { 872 {
873 unsigned int node; 873 unsigned int node;
874 unsigned int count; 874 unsigned int count;
875 875
876 if (min_common_depth == -1 || !numa_enabled) 876 if (min_common_depth == -1 || !numa_enabled)
877 return; 877 return;
878 878
879 for_each_online_node(node) { 879 for_each_online_node(node) {
880 unsigned long i; 880 unsigned long i;
881 881
882 printk(KERN_DEBUG "Node %d Memory:", node); 882 printk(KERN_DEBUG "Node %d Memory:", node);
883 883
884 count = 0; 884 count = 0;
885 885
886 for (i = 0; i < memblock_end_of_DRAM(); 886 for (i = 0; i < memblock_end_of_DRAM();
887 i += (1 << SECTION_SIZE_BITS)) { 887 i += (1 << SECTION_SIZE_BITS)) {
888 if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) { 888 if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
889 if (count == 0) 889 if (count == 0)
890 printk(" 0x%lx", i); 890 printk(" 0x%lx", i);
891 ++count; 891 ++count;
892 } else { 892 } else {
893 if (count > 0) 893 if (count > 0)
894 printk("-0x%lx", i); 894 printk("-0x%lx", i);
895 count = 0; 895 count = 0;
896 } 896 }
897 } 897 }
898 898
899 if (count > 0) 899 if (count > 0)
900 printk("-0x%lx", i); 900 printk("-0x%lx", i);
901 printk("\n"); 901 printk("\n");
902 } 902 }
903 } 903 }
904 904
905 /* 905 /*
906 * Allocate some memory, satisfying the memblock or bootmem allocator where 906 * Allocate some memory, satisfying the memblock or bootmem allocator where
907 * required. nid is the preferred node and end is the physical address of 907 * required. nid is the preferred node and end is the physical address of
908 * the highest address in the node. 908 * the highest address in the node.
909 * 909 *
910 * Returns the virtual address of the memory. 910 * Returns the virtual address of the memory.
911 */ 911 */
912 static void __init *careful_zallocation(int nid, unsigned long size, 912 static void __init *careful_zallocation(int nid, unsigned long size,
913 unsigned long align, 913 unsigned long align,
914 unsigned long end_pfn) 914 unsigned long end_pfn)
915 { 915 {
916 void *ret; 916 void *ret;
917 int new_nid; 917 int new_nid;
918 unsigned long ret_paddr; 918 unsigned long ret_paddr;
919 919
920 ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT); 920 ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT);
921 921
922 /* retry over all memory */ 922 /* retry over all memory */
923 if (!ret_paddr) 923 if (!ret_paddr)
924 ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM()); 924 ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM());
925 925
926 if (!ret_paddr) 926 if (!ret_paddr)
927 panic("numa.c: cannot allocate %lu bytes for node %d", 927 panic("numa.c: cannot allocate %lu bytes for node %d",
928 size, nid); 928 size, nid);
929 929
930 ret = __va(ret_paddr); 930 ret = __va(ret_paddr);
931 931
932 /* 932 /*
933 * We initialize the nodes in numeric order: 0, 1, 2... 933 * We initialize the nodes in numeric order: 0, 1, 2...
934 * and hand over control from the MEMBLOCK allocator to the 934 * and hand over control from the MEMBLOCK allocator to the
935 * bootmem allocator. If this function is called for 935 * bootmem allocator. If this function is called for
936 * node 5, then we know that all nodes <5 are using the 936 * node 5, then we know that all nodes <5 are using the
937 * bootmem allocator instead of the MEMBLOCK allocator. 937 * bootmem allocator instead of the MEMBLOCK allocator.
938 * 938 *
939 * So, check the nid from which this allocation came 939 * So, check the nid from which this allocation came
940 * and double check to see if we need to use bootmem 940 * and double check to see if we need to use bootmem
941 * instead of the MEMBLOCK. We don't free the MEMBLOCK memory 941 * instead of the MEMBLOCK. We don't free the MEMBLOCK memory
942 * since it would be useless. 942 * since it would be useless.
943 */ 943 */
944 new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT); 944 new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
945 if (new_nid < nid) { 945 if (new_nid < nid) {
946 ret = __alloc_bootmem_node(NODE_DATA(new_nid), 946 ret = __alloc_bootmem_node(NODE_DATA(new_nid),
947 size, align, 0); 947 size, align, 0);
948 948
949 dbg("alloc_bootmem %p %lx\n", ret, size); 949 dbg("alloc_bootmem %p %lx\n", ret, size);
950 } 950 }
951 951
952 memset(ret, 0, size); 952 memset(ret, 0, size);
953 return ret; 953 return ret;
954 } 954 }
955 955
956 static struct notifier_block __cpuinitdata ppc64_numa_nb = { 956 static struct notifier_block __cpuinitdata ppc64_numa_nb = {
957 .notifier_call = cpu_numa_callback, 957 .notifier_call = cpu_numa_callback,
958 .priority = 1 /* Must run before sched domains notifier. */ 958 .priority = 1 /* Must run before sched domains notifier. */
959 }; 959 };
960 960
961 static void mark_reserved_regions_for_nid(int nid) 961 static void mark_reserved_regions_for_nid(int nid)
962 { 962 {
963 struct pglist_data *node = NODE_DATA(nid); 963 struct pglist_data *node = NODE_DATA(nid);
964 struct memblock_region *reg; 964 struct memblock_region *reg;
965 965
966 for_each_memblock(reserved, reg) { 966 for_each_memblock(reserved, reg) {
967 unsigned long physbase = reg->base; 967 unsigned long physbase = reg->base;
968 unsigned long size = reg->size; 968 unsigned long size = reg->size;
969 unsigned long start_pfn = physbase >> PAGE_SHIFT; 969 unsigned long start_pfn = physbase >> PAGE_SHIFT;
970 unsigned long end_pfn = PFN_UP(physbase + size); 970 unsigned long end_pfn = PFN_UP(physbase + size);
971 struct node_active_region node_ar; 971 struct node_active_region node_ar;
972 unsigned long node_end_pfn = node->node_start_pfn + 972 unsigned long node_end_pfn = node->node_start_pfn +
973 node->node_spanned_pages; 973 node->node_spanned_pages;
974 974
975 /* 975 /*
976 * Check to make sure that this memblock.reserved area is 976 * Check to make sure that this memblock.reserved area is
977 * within the bounds of the node that we care about. 977 * within the bounds of the node that we care about.
978 * Checking the nid of the start and end points is not 978 * Checking the nid of the start and end points is not
979 * sufficient because the reserved area could span the 979 * sufficient because the reserved area could span the
980 * entire node. 980 * entire node.
981 */ 981 */
982 if (end_pfn <= node->node_start_pfn || 982 if (end_pfn <= node->node_start_pfn ||
983 start_pfn >= node_end_pfn) 983 start_pfn >= node_end_pfn)
984 continue; 984 continue;
985 985
986 get_node_active_region(start_pfn, &node_ar); 986 get_node_active_region(start_pfn, &node_ar);
987 while (start_pfn < end_pfn && 987 while (start_pfn < end_pfn &&
988 node_ar.start_pfn < node_ar.end_pfn) { 988 node_ar.start_pfn < node_ar.end_pfn) {
989 unsigned long reserve_size = size; 989 unsigned long reserve_size = size;
990 /* 990 /*
991 * if reserved region extends past active region 991 * if reserved region extends past active region
992 * then trim size to active region 992 * then trim size to active region
993 */ 993 */
994 if (end_pfn > node_ar.end_pfn) 994 if (end_pfn > node_ar.end_pfn)
995 reserve_size = (node_ar.end_pfn << PAGE_SHIFT) 995 reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
996 - physbase; 996 - physbase;
997 /* 997 /*
998 * Only worry about *this* node, others may not 998 * Only worry about *this* node, others may not
999 * yet have valid NODE_DATA(). 999 * yet have valid NODE_DATA().
1000 */ 1000 */
1001 if (node_ar.nid == nid) { 1001 if (node_ar.nid == nid) {
1002 dbg("reserve_bootmem %lx %lx nid=%d\n", 1002 dbg("reserve_bootmem %lx %lx nid=%d\n",
1003 physbase, reserve_size, node_ar.nid); 1003 physbase, reserve_size, node_ar.nid);
1004 reserve_bootmem_node(NODE_DATA(node_ar.nid), 1004 reserve_bootmem_node(NODE_DATA(node_ar.nid),
1005 physbase, reserve_size, 1005 physbase, reserve_size,
1006 BOOTMEM_DEFAULT); 1006 BOOTMEM_DEFAULT);
1007 } 1007 }
1008 /* 1008 /*
1009 * if reserved region is contained in the active region 1009 * if reserved region is contained in the active region
1010 * then done. 1010 * then done.
1011 */ 1011 */
1012 if (end_pfn <= node_ar.end_pfn) 1012 if (end_pfn <= node_ar.end_pfn)
1013 break; 1013 break;
1014 1014
1015 /* 1015 /*
1016 * reserved region extends past the active region 1016 * reserved region extends past the active region
1017 * get next active region that contains this 1017 * get next active region that contains this
1018 * reserved region 1018 * reserved region
1019 */ 1019 */
1020 start_pfn = node_ar.end_pfn; 1020 start_pfn = node_ar.end_pfn;
1021 physbase = start_pfn << PAGE_SHIFT; 1021 physbase = start_pfn << PAGE_SHIFT;
1022 size = size - reserve_size; 1022 size = size - reserve_size;
1023 get_node_active_region(start_pfn, &node_ar); 1023 get_node_active_region(start_pfn, &node_ar);
1024 } 1024 }
1025 } 1025 }
1026 } 1026 }
1027 1027
1028 1028
1029 void __init do_init_bootmem(void) 1029 void __init do_init_bootmem(void)
1030 { 1030 {
1031 int nid; 1031 int nid;
1032 1032
1033 min_low_pfn = 0; 1033 min_low_pfn = 0;
1034 max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; 1034 max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1035 max_pfn = max_low_pfn; 1035 max_pfn = max_low_pfn;
1036 1036
1037 if (parse_numa_properties()) 1037 if (parse_numa_properties())
1038 setup_nonnuma(); 1038 setup_nonnuma();
1039 else 1039 else
1040 dump_numa_memory_topology(); 1040 dump_numa_memory_topology();
1041 1041
1042 for_each_online_node(nid) { 1042 for_each_online_node(nid) {
1043 unsigned long start_pfn, end_pfn; 1043 unsigned long start_pfn, end_pfn;
1044 void *bootmem_vaddr; 1044 void *bootmem_vaddr;
1045 unsigned long bootmap_pages; 1045 unsigned long bootmap_pages;
1046 1046
1047 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 1047 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1048 1048
1049 /* 1049 /*
1050 * Allocate the node structure node local if possible 1050 * Allocate the node structure node local if possible
1051 * 1051 *
1052 * Be careful moving this around, as it relies on all 1052 * Be careful moving this around, as it relies on all
1053 * previous nodes' bootmem to be initialized and have 1053 * previous nodes' bootmem to be initialized and have
1054 * all reserved areas marked. 1054 * all reserved areas marked.
1055 */ 1055 */
1056 NODE_DATA(nid) = careful_zallocation(nid, 1056 NODE_DATA(nid) = careful_zallocation(nid,
1057 sizeof(struct pglist_data), 1057 sizeof(struct pglist_data),
1058 SMP_CACHE_BYTES, end_pfn); 1058 SMP_CACHE_BYTES, end_pfn);
1059 1059
1060 dbg("node %d\n", nid); 1060 dbg("node %d\n", nid);
1061 dbg("NODE_DATA() = %p\n", NODE_DATA(nid)); 1061 dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
1062 1062
1063 NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; 1063 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
1064 NODE_DATA(nid)->node_start_pfn = start_pfn; 1064 NODE_DATA(nid)->node_start_pfn = start_pfn;
1065 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; 1065 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
1066 1066
1067 if (NODE_DATA(nid)->node_spanned_pages == 0) 1067 if (NODE_DATA(nid)->node_spanned_pages == 0)
1068 continue; 1068 continue;
1069 1069
1070 dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT); 1070 dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT);
1071 dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT); 1071 dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
1072 1072
1073 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); 1073 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
1074 bootmem_vaddr = careful_zallocation(nid, 1074 bootmem_vaddr = careful_zallocation(nid,
1075 bootmap_pages << PAGE_SHIFT, 1075 bootmap_pages << PAGE_SHIFT,
1076 PAGE_SIZE, end_pfn); 1076 PAGE_SIZE, end_pfn);
1077 1077
1078 dbg("bootmap_vaddr = %p\n", bootmem_vaddr); 1078 dbg("bootmap_vaddr = %p\n", bootmem_vaddr);
1079 1079
1080 init_bootmem_node(NODE_DATA(nid), 1080 init_bootmem_node(NODE_DATA(nid),
1081 __pa(bootmem_vaddr) >> PAGE_SHIFT, 1081 __pa(bootmem_vaddr) >> PAGE_SHIFT,
1082 start_pfn, end_pfn); 1082 start_pfn, end_pfn);
1083 1083
1084 free_bootmem_with_active_regions(nid, end_pfn); 1084 free_bootmem_with_active_regions(nid, end_pfn);
1085 /* 1085 /*
1086 * Be very careful about moving this around. Future 1086 * Be very careful about moving this around. Future
1087 * calls to careful_zallocation() depend on this getting 1087 * calls to careful_zallocation() depend on this getting
1088 * done correctly. 1088 * done correctly.
1089 */ 1089 */
1090 mark_reserved_regions_for_nid(nid); 1090 mark_reserved_regions_for_nid(nid);
1091 sparse_memory_present_with_active_regions(nid); 1091 sparse_memory_present_with_active_regions(nid);
1092 } 1092 }
1093 1093
1094 init_bootmem_done = 1; 1094 init_bootmem_done = 1;
1095 1095
1096 /* 1096 /*
1097 * Now bootmem is initialised we can create the node to cpumask 1097 * Now bootmem is initialised we can create the node to cpumask
1098 * lookup tables and setup the cpu callback to populate them. 1098 * lookup tables and setup the cpu callback to populate them.
1099 */ 1099 */
1100 setup_node_to_cpumask_map(); 1100 setup_node_to_cpumask_map();
1101 1101
1102 register_cpu_notifier(&ppc64_numa_nb); 1102 register_cpu_notifier(&ppc64_numa_nb);
1103 cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE, 1103 cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
1104 (void *)(unsigned long)boot_cpuid); 1104 (void *)(unsigned long)boot_cpuid);
1105 } 1105 }
1106 1106
1107 void __init paging_init(void) 1107 void __init paging_init(void)
1108 { 1108 {
1109 unsigned long max_zone_pfns[MAX_NR_ZONES]; 1109 unsigned long max_zone_pfns[MAX_NR_ZONES];
1110 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 1110 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
1111 max_zone_pfns[ZONE_DMA] = memblock_end_of_DRAM() >> PAGE_SHIFT; 1111 max_zone_pfns[ZONE_DMA] = memblock_end_of_DRAM() >> PAGE_SHIFT;
1112 free_area_init_nodes(max_zone_pfns); 1112 free_area_init_nodes(max_zone_pfns);
1113 } 1113 }
1114 1114
1115 static int __init early_numa(char *p) 1115 static int __init early_numa(char *p)
1116 { 1116 {
1117 if (!p) 1117 if (!p)
1118 return 0; 1118 return 0;
1119 1119
1120 if (strstr(p, "off")) 1120 if (strstr(p, "off"))
1121 numa_enabled = 0; 1121 numa_enabled = 0;
1122 1122
1123 if (strstr(p, "debug")) 1123 if (strstr(p, "debug"))
1124 numa_debug = 1; 1124 numa_debug = 1;
1125 1125
1126 p = strstr(p, "fake="); 1126 p = strstr(p, "fake=");
1127 if (p) 1127 if (p)
1128 cmdline = p + strlen("fake="); 1128 cmdline = p + strlen("fake=");
1129 1129
1130 return 0; 1130 return 0;
1131 } 1131 }
1132 early_param("numa", early_numa); 1132 early_param("numa", early_numa);
1133 1133
1134 #ifdef CONFIG_MEMORY_HOTPLUG 1134 #ifdef CONFIG_MEMORY_HOTPLUG
1135 /* 1135 /*
1136 * Find the node associated with a hot added memory section for 1136 * Find the node associated with a hot added memory section for
1137 * memory represented in the device tree by the property 1137 * memory represented in the device tree by the property
1138 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory. 1138 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
1139 */ 1139 */
1140 static int hot_add_drconf_scn_to_nid(struct device_node *memory, 1140 static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1141 unsigned long scn_addr) 1141 unsigned long scn_addr)
1142 { 1142 {
1143 const u32 *dm; 1143 const u32 *dm;
1144 unsigned int drconf_cell_cnt, rc; 1144 unsigned int drconf_cell_cnt, rc;
1145 unsigned long lmb_size; 1145 unsigned long lmb_size;
1146 struct assoc_arrays aa; 1146 struct assoc_arrays aa;
1147 int nid = -1; 1147 int nid = -1;
1148 1148
1149 drconf_cell_cnt = of_get_drconf_memory(memory, &dm); 1149 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1150 if (!drconf_cell_cnt) 1150 if (!drconf_cell_cnt)
1151 return -1; 1151 return -1;
1152 1152
1153 lmb_size = of_get_lmb_size(memory); 1153 lmb_size = of_get_lmb_size(memory);
1154 if (!lmb_size) 1154 if (!lmb_size)
1155 return -1; 1155 return -1;
1156 1156
1157 rc = of_get_assoc_arrays(memory, &aa); 1157 rc = of_get_assoc_arrays(memory, &aa);
1158 if (rc) 1158 if (rc)
1159 return -1; 1159 return -1;
1160 1160
1161 for (; drconf_cell_cnt != 0; --drconf_cell_cnt) { 1161 for (; drconf_cell_cnt != 0; --drconf_cell_cnt) {
1162 struct of_drconf_cell drmem; 1162 struct of_drconf_cell drmem;
1163 1163
1164 read_drconf_cell(&drmem, &dm); 1164 read_drconf_cell(&drmem, &dm);
1165 1165
1166 /* skip this block if it is reserved or not assigned to 1166 /* skip this block if it is reserved or not assigned to
1167 * this partition */ 1167 * this partition */
1168 if ((drmem.flags & DRCONF_MEM_RESERVED) 1168 if ((drmem.flags & DRCONF_MEM_RESERVED)
1169 || !(drmem.flags & DRCONF_MEM_ASSIGNED)) 1169 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
1170 continue; 1170 continue;
1171 1171
1172 if ((scn_addr < drmem.base_addr) 1172 if ((scn_addr < drmem.base_addr)
1173 || (scn_addr >= (drmem.base_addr + lmb_size))) 1173 || (scn_addr >= (drmem.base_addr + lmb_size)))
1174 continue; 1174 continue;
1175 1175
1176 nid = of_drconf_to_nid_single(&drmem, &aa); 1176 nid = of_drconf_to_nid_single(&drmem, &aa);
1177 break; 1177 break;
1178 } 1178 }
1179 1179
1180 return nid; 1180 return nid;
1181 } 1181 }
1182 1182
1183 /* 1183 /*
1184 * Find the node associated with a hot added memory section for memory 1184 * Find the node associated with a hot added memory section for memory
1185 * represented in the device tree as a node (i.e. memory@XXXX) for 1185 * represented in the device tree as a node (i.e. memory@XXXX) for
1186 * each memblock. 1186 * each memblock.
1187 */ 1187 */
1188 int hot_add_node_scn_to_nid(unsigned long scn_addr) 1188 int hot_add_node_scn_to_nid(unsigned long scn_addr)
1189 { 1189 {
1190 struct device_node *memory = NULL; 1190 struct device_node *memory = NULL;
1191 int nid = -1; 1191 int nid = -1;
1192 1192
1193 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) { 1193 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
1194 unsigned long start, size; 1194 unsigned long start, size;
1195 int ranges; 1195 int ranges;
1196 const unsigned int *memcell_buf; 1196 const unsigned int *memcell_buf;
1197 unsigned int len; 1197 unsigned int len;
1198 1198
1199 memcell_buf = of_get_property(memory, "reg", &len); 1199 memcell_buf = of_get_property(memory, "reg", &len);
1200 if (!memcell_buf || len <= 0) 1200 if (!memcell_buf || len <= 0)
1201 continue; 1201 continue;
1202 1202
1203 /* ranges in cell */ 1203 /* ranges in cell */
1204 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); 1204 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
1205 1205
1206 while (ranges--) { 1206 while (ranges--) {
1207 start = read_n_cells(n_mem_addr_cells, &memcell_buf); 1207 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
1208 size = read_n_cells(n_mem_size_cells, &memcell_buf); 1208 size = read_n_cells(n_mem_size_cells, &memcell_buf);
1209 1209
1210 if ((scn_addr < start) || (scn_addr >= (start + size))) 1210 if ((scn_addr < start) || (scn_addr >= (start + size)))
1211 continue; 1211 continue;
1212 1212
1213 nid = of_node_to_nid_single(memory); 1213 nid = of_node_to_nid_single(memory);
1214 break; 1214 break;
1215 } 1215 }
1216 1216
1217 of_node_put(memory); 1217 of_node_put(memory);
1218 if (nid >= 0) 1218 if (nid >= 0)
1219 break; 1219 break;
1220 } 1220 }
1221 1221
1222 return nid; 1222 return nid;
1223 } 1223 }
1224 1224
1225 /* 1225 /*
1226 * Find the node associated with a hot added memory section. Section 1226 * Find the node associated with a hot added memory section. Section
1227 * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that 1227 * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that
1228 * sections are fully contained within a single MEMBLOCK. 1228 * sections are fully contained within a single MEMBLOCK.
1229 */ 1229 */
1230 int hot_add_scn_to_nid(unsigned long scn_addr) 1230 int hot_add_scn_to_nid(unsigned long scn_addr)
1231 { 1231 {
1232 struct device_node *memory = NULL; 1232 struct device_node *memory = NULL;
1233 int nid, found = 0; 1233 int nid, found = 0;
1234 1234
1235 if (!numa_enabled || (min_common_depth < 0)) 1235 if (!numa_enabled || (min_common_depth < 0))
1236 return first_online_node; 1236 return first_online_node;
1237 1237
1238 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 1238 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1239 if (memory) { 1239 if (memory) {
1240 nid = hot_add_drconf_scn_to_nid(memory, scn_addr); 1240 nid = hot_add_drconf_scn_to_nid(memory, scn_addr);
1241 of_node_put(memory); 1241 of_node_put(memory);
1242 } else { 1242 } else {
1243 nid = hot_add_node_scn_to_nid(scn_addr); 1243 nid = hot_add_node_scn_to_nid(scn_addr);
1244 } 1244 }
1245 1245
1246 if (nid < 0 || !node_online(nid)) 1246 if (nid < 0 || !node_online(nid))
1247 nid = first_online_node; 1247 nid = first_online_node;
1248 1248
1249 if (NODE_DATA(nid)->node_spanned_pages) 1249 if (NODE_DATA(nid)->node_spanned_pages)
1250 return nid; 1250 return nid;
1251 1251
1252 for_each_online_node(nid) { 1252 for_each_online_node(nid) {
1253 if (NODE_DATA(nid)->node_spanned_pages) { 1253 if (NODE_DATA(nid)->node_spanned_pages) {
1254 found = 1; 1254 found = 1;
1255 break; 1255 break;
1256 } 1256 }
1257 } 1257 }
1258 1258
1259 BUG_ON(!found); 1259 BUG_ON(!found);
1260 return nid; 1260 return nid;
1261 } 1261 }
1262 1262
1263 static u64 hot_add_drconf_memory_max(void) 1263 static u64 hot_add_drconf_memory_max(void)
1264 { 1264 {
1265 struct device_node *memory = NULL; 1265 struct device_node *memory = NULL;
1266 unsigned int drconf_cell_cnt = 0; 1266 unsigned int drconf_cell_cnt = 0;
1267 u64 lmb_size = 0; 1267 u64 lmb_size = 0;
1268 const u32 *dm = 0; 1268 const u32 *dm = 0;
1269 1269
1270 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 1270 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1271 if (memory) { 1271 if (memory) {
1272 drconf_cell_cnt = of_get_drconf_memory(memory, &dm); 1272 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1273 lmb_size = of_get_lmb_size(memory); 1273 lmb_size = of_get_lmb_size(memory);
1274 of_node_put(memory); 1274 of_node_put(memory);
1275 } 1275 }
1276 return lmb_size * drconf_cell_cnt; 1276 return lmb_size * drconf_cell_cnt;
1277 } 1277 }
1278 1278
1279 /* 1279 /*
1280 * memory_hotplug_max - return max address of memory that may be added 1280 * memory_hotplug_max - return max address of memory that may be added
1281 * 1281 *
1282 * This is currently only used on systems that support drconfig memory 1282 * This is currently only used on systems that support drconfig memory
1283 * hotplug. 1283 * hotplug.
1284 */ 1284 */
1285 u64 memory_hotplug_max(void) 1285 u64 memory_hotplug_max(void)
1286 { 1286 {
1287 return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM()); 1287 return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1288 } 1288 }
1289 #endif /* CONFIG_MEMORY_HOTPLUG */ 1289 #endif /* CONFIG_MEMORY_HOTPLUG */
1290 1290
1291 /* Virtual Processor Home Node (VPHN) support */ 1291 /* Virtual Processor Home Node (VPHN) support */
1292 #ifdef CONFIG_PPC_SPLPAR 1292 #ifdef CONFIG_PPC_SPLPAR
1293 static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS]; 1293 static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
1294 static cpumask_t cpu_associativity_changes_mask; 1294 static cpumask_t cpu_associativity_changes_mask;
1295 static int vphn_enabled; 1295 static int vphn_enabled;
1296 static void set_topology_timer(void); 1296 static void set_topology_timer(void);
1297 1297
1298 /* 1298 /*
1299 * Store the current values of the associativity change counters in the 1299 * Store the current values of the associativity change counters in the
1300 * hypervisor. 1300 * hypervisor.
1301 */ 1301 */
1302 static void setup_cpu_associativity_change_counters(void) 1302 static void setup_cpu_associativity_change_counters(void)
1303 { 1303 {
1304 int cpu; 1304 int cpu;
1305 1305
1306 /* The VPHN feature supports a maximum of 8 reference points */ 1306 /* The VPHN feature supports a maximum of 8 reference points */
1307 BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8); 1307 BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
1308 1308
1309 for_each_possible_cpu(cpu) { 1309 for_each_possible_cpu(cpu) {
1310 int i; 1310 int i;
1311 u8 *counts = vphn_cpu_change_counts[cpu]; 1311 u8 *counts = vphn_cpu_change_counts[cpu];
1312 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; 1312 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1313 1313
1314 for (i = 0; i < distance_ref_points_depth; i++) 1314 for (i = 0; i < distance_ref_points_depth; i++)
1315 counts[i] = hypervisor_counts[i]; 1315 counts[i] = hypervisor_counts[i];
1316 } 1316 }
1317 } 1317 }
1318 1318
1319 /* 1319 /*
1320 * The hypervisor maintains a set of 8 associativity change counters in 1320 * The hypervisor maintains a set of 8 associativity change counters in
1321 * the VPA of each cpu that correspond to the associativity levels in the 1321 * the VPA of each cpu that correspond to the associativity levels in the
1322 * ibm,associativity-reference-points property. When an associativity 1322 * ibm,associativity-reference-points property. When an associativity
1323 * level changes, the corresponding counter is incremented. 1323 * level changes, the corresponding counter is incremented.
1324 * 1324 *
1325 * Set a bit in cpu_associativity_changes_mask for each cpu whose home 1325 * Set a bit in cpu_associativity_changes_mask for each cpu whose home
1326 * node associativity levels have changed. 1326 * node associativity levels have changed.
1327 * 1327 *
1328 * Returns the number of cpus with unhandled associativity changes. 1328 * Returns the number of cpus with unhandled associativity changes.
1329 */ 1329 */
1330 static int update_cpu_associativity_changes_mask(void) 1330 static int update_cpu_associativity_changes_mask(void)
1331 { 1331 {
1332 int cpu, nr_cpus = 0; 1332 int cpu, nr_cpus = 0;
1333 cpumask_t *changes = &cpu_associativity_changes_mask; 1333 cpumask_t *changes = &cpu_associativity_changes_mask;
1334 1334
1335 cpumask_clear(changes); 1335 cpumask_clear(changes);
1336 1336
1337 for_each_possible_cpu(cpu) { 1337 for_each_possible_cpu(cpu) {
1338 int i, changed = 0; 1338 int i, changed = 0;
1339 u8 *counts = vphn_cpu_change_counts[cpu]; 1339 u8 *counts = vphn_cpu_change_counts[cpu];
1340 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; 1340 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1341 1341
1342 for (i = 0; i < distance_ref_points_depth; i++) { 1342 for (i = 0; i < distance_ref_points_depth; i++) {
1343 if (hypervisor_counts[i] != counts[i]) { 1343 if (hypervisor_counts[i] != counts[i]) {
1344 counts[i] = hypervisor_counts[i]; 1344 counts[i] = hypervisor_counts[i];
1345 changed = 1; 1345 changed = 1;
1346 } 1346 }
1347 } 1347 }
1348 if (changed) { 1348 if (changed) {
1349 cpumask_set_cpu(cpu, changes); 1349 cpumask_set_cpu(cpu, changes);
1350 nr_cpus++; 1350 nr_cpus++;
1351 } 1351 }
1352 } 1352 }
1353 1353
1354 return nr_cpus; 1354 return nr_cpus;
1355 } 1355 }
1356 1356
1357 /* 1357 /*
1358 * 6 64-bit registers unpacked into 12 32-bit associativity values. To form 1358 * 6 64-bit registers unpacked into 12 32-bit associativity values. To form
1359 * the complete property we have to add the length in the first cell. 1359 * the complete property we have to add the length in the first cell.
1360 */ 1360 */
1361 #define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1) 1361 #define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1)
1362 1362
1363 /* 1363 /*
1364 * Convert the associativity domain numbers returned from the hypervisor 1364 * Convert the associativity domain numbers returned from the hypervisor
1365 * to the sequence they would appear in the ibm,associativity property. 1365 * to the sequence they would appear in the ibm,associativity property.
1366 */ 1366 */
1367 static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) 1367 static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
1368 { 1368 {
1369 int i, nr_assoc_doms = 0; 1369 int i, nr_assoc_doms = 0;
1370 const u16 *field = (const u16*) packed; 1370 const u16 *field = (const u16*) packed;
1371 1371
1372 #define VPHN_FIELD_UNUSED (0xffff) 1372 #define VPHN_FIELD_UNUSED (0xffff)
1373 #define VPHN_FIELD_MSB (0x8000) 1373 #define VPHN_FIELD_MSB (0x8000)
1374 #define VPHN_FIELD_MASK (~VPHN_FIELD_MSB) 1374 #define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
1375 1375
1376 for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) { 1376 for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
1377 if (*field == VPHN_FIELD_UNUSED) { 1377 if (*field == VPHN_FIELD_UNUSED) {
1378 /* All significant fields processed, and remaining 1378 /* All significant fields processed, and remaining
1379 * fields contain the reserved value of all 1's. 1379 * fields contain the reserved value of all 1's.
1380 * Just store them. 1380 * Just store them.
1381 */ 1381 */
1382 unpacked[i] = *((u32*)field); 1382 unpacked[i] = *((u32*)field);
1383 field += 2; 1383 field += 2;
1384 } else if (*field & VPHN_FIELD_MSB) { 1384 } else if (*field & VPHN_FIELD_MSB) {
1385 /* Data is in the lower 15 bits of this field */ 1385 /* Data is in the lower 15 bits of this field */
1386 unpacked[i] = *field & VPHN_FIELD_MASK; 1386 unpacked[i] = *field & VPHN_FIELD_MASK;
1387 field++; 1387 field++;
1388 nr_assoc_doms++; 1388 nr_assoc_doms++;
1389 } else { 1389 } else {
1390 /* Data is in the lower 15 bits of this field 1390 /* Data is in the lower 15 bits of this field
1391 * concatenated with the next 16 bit field 1391 * concatenated with the next 16 bit field
1392 */ 1392 */
1393 unpacked[i] = *((u32*)field); 1393 unpacked[i] = *((u32*)field);
1394 field += 2; 1394 field += 2;
1395 nr_assoc_doms++; 1395 nr_assoc_doms++;
1396 } 1396 }
1397 } 1397 }
1398 1398
1399 /* The first cell contains the length of the property */ 1399 /* The first cell contains the length of the property */
1400 unpacked[0] = nr_assoc_doms; 1400 unpacked[0] = nr_assoc_doms;
1401 1401
1402 return nr_assoc_doms; 1402 return nr_assoc_doms;
1403 } 1403 }
1404 1404
1405 /* 1405 /*
1406 * Retrieve the new associativity information for a virtual processor's 1406 * Retrieve the new associativity information for a virtual processor's
1407 * home node. 1407 * home node.
1408 */ 1408 */
1409 static long hcall_vphn(unsigned long cpu, unsigned int *associativity) 1409 static long hcall_vphn(unsigned long cpu, unsigned int *associativity)
1410 { 1410 {
1411 long rc; 1411 long rc;
1412 long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; 1412 long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
1413 u64 flags = 1; 1413 u64 flags = 1;
1414 int hwcpu = get_hard_smp_processor_id(cpu); 1414 int hwcpu = get_hard_smp_processor_id(cpu);
1415 1415
1416 rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu); 1416 rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
1417 vphn_unpack_associativity(retbuf, associativity); 1417 vphn_unpack_associativity(retbuf, associativity);
1418 1418
1419 return rc; 1419 return rc;
1420 } 1420 }
1421 1421
1422 static long vphn_get_associativity(unsigned long cpu, 1422 static long vphn_get_associativity(unsigned long cpu,
1423 unsigned int *associativity) 1423 unsigned int *associativity)
1424 { 1424 {
1425 long rc; 1425 long rc;
1426 1426
1427 rc = hcall_vphn(cpu, associativity); 1427 rc = hcall_vphn(cpu, associativity);
1428 1428
1429 switch (rc) { 1429 switch (rc) {
1430 case H_FUNCTION: 1430 case H_FUNCTION:
1431 printk(KERN_INFO 1431 printk(KERN_INFO
1432 "VPHN is not supported. Disabling polling...\n"); 1432 "VPHN is not supported. Disabling polling...\n");
1433 stop_topology_update(); 1433 stop_topology_update();
1434 break; 1434 break;
1435 case H_HARDWARE: 1435 case H_HARDWARE:
1436 printk(KERN_ERR 1436 printk(KERN_ERR
1437 "hcall_vphn() experienced a hardware fault " 1437 "hcall_vphn() experienced a hardware fault "
1438 "preventing VPHN. Disabling polling...\n"); 1438 "preventing VPHN. Disabling polling...\n");
1439 stop_topology_update(); 1439 stop_topology_update();
1440 } 1440 }
1441 1441
1442 return rc; 1442 return rc;
1443 } 1443 }
1444 1444
1445 /* 1445 /*
1446 * Update the node maps and sysfs entries for each cpu whose home node 1446 * Update the node maps and sysfs entries for each cpu whose home node
1447 * has changed. 1447 * has changed.
1448 */ 1448 */
1449 int arch_update_cpu_topology(void) 1449 int arch_update_cpu_topology(void)
1450 { 1450 {
1451 int cpu, nid, old_nid; 1451 int cpu, nid, old_nid;
1452 unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0}; 1452 unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
1453 struct sys_device *sysdev; 1453 struct sys_device *sysdev;
1454 1454
1455 for_each_cpu(cpu,&cpu_associativity_changes_mask) { 1455 for_each_cpu(cpu,&cpu_associativity_changes_mask) {
1456 vphn_get_associativity(cpu, associativity); 1456 vphn_get_associativity(cpu, associativity);
1457 nid = associativity_to_nid(associativity); 1457 nid = associativity_to_nid(associativity);
1458 1458
1459 if (nid < 0 || !node_online(nid)) 1459 if (nid < 0 || !node_online(nid))
1460 nid = first_online_node; 1460 nid = first_online_node;
1461 1461
1462 old_nid = numa_cpu_lookup_table[cpu]; 1462 old_nid = numa_cpu_lookup_table[cpu];
1463 1463
1464 /* Disable hotplug while we update the cpu 1464 /* Disable hotplug while we update the cpu
1465 * masks and sysfs. 1465 * masks and sysfs.
1466 */ 1466 */
1467 get_online_cpus(); 1467 get_online_cpus();
1468 unregister_cpu_under_node(cpu, old_nid); 1468 unregister_cpu_under_node(cpu, old_nid);
1469 unmap_cpu_from_node(cpu); 1469 unmap_cpu_from_node(cpu);
1470 map_cpu_to_node(cpu, nid); 1470 map_cpu_to_node(cpu, nid);
1471 register_cpu_under_node(cpu, nid); 1471 register_cpu_under_node(cpu, nid);
1472 put_online_cpus(); 1472 put_online_cpus();
1473 1473
1474 sysdev = get_cpu_sysdev(cpu); 1474 sysdev = get_cpu_sysdev(cpu);
1475 if (sysdev) 1475 if (sysdev)
1476 kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); 1476 kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
1477 } 1477 }
1478 1478
1479 return 1; 1479 return 1;
1480 } 1480 }
1481 1481
1482 static void topology_work_fn(struct work_struct *work) 1482 static void topology_work_fn(struct work_struct *work)
1483 { 1483 {
1484 rebuild_sched_domains(); 1484 rebuild_sched_domains();
1485 } 1485 }
1486 static DECLARE_WORK(topology_work, topology_work_fn); 1486 static DECLARE_WORK(topology_work, topology_work_fn);
1487 1487
1488 void topology_schedule_update(void) 1488 void topology_schedule_update(void)
1489 { 1489 {
1490 schedule_work(&topology_work); 1490 schedule_work(&topology_work);
1491 } 1491 }
1492 1492
1493 static void topology_timer_fn(unsigned long ignored) 1493 static void topology_timer_fn(unsigned long ignored)
1494 { 1494 {
1495 if (!vphn_enabled) 1495 if (!vphn_enabled)
1496 return; 1496 return;
1497 if (update_cpu_associativity_changes_mask() > 0) 1497 if (update_cpu_associativity_changes_mask() > 0)
1498 topology_schedule_update(); 1498 topology_schedule_update();
1499 set_topology_timer(); 1499 set_topology_timer();
1500 } 1500 }
1501 static struct timer_list topology_timer = 1501 static struct timer_list topology_timer =
1502 TIMER_INITIALIZER(topology_timer_fn, 0, 0); 1502 TIMER_INITIALIZER(topology_timer_fn, 0, 0);
1503 1503
1504 static void set_topology_timer(void) 1504 static void set_topology_timer(void)
1505 { 1505 {
1506 topology_timer.data = 0; 1506 topology_timer.data = 0;
1507 topology_timer.expires = jiffies + 60 * HZ; 1507 topology_timer.expires = jiffies + 60 * HZ;
1508 add_timer(&topology_timer); 1508 add_timer(&topology_timer);
1509 } 1509 }
1510 1510
1511 /* 1511 /*
1512 * Start polling for VPHN associativity changes. 1512 * Start polling for VPHN associativity changes.
1513 */ 1513 */
1514 int start_topology_update(void) 1514 int start_topology_update(void)
1515 { 1515 {
1516 int rc = 0; 1516 int rc = 0;
1517 1517
1518 /* Disabled until races with load balancing are fixed */ 1518 /* Disabled until races with load balancing are fixed */
1519 if (0 && firmware_has_feature(FW_FEATURE_VPHN) && 1519 if (0 && firmware_has_feature(FW_FEATURE_VPHN) &&
1520 get_lppaca()->shared_proc) { 1520 get_lppaca()->shared_proc) {
1521 vphn_enabled = 1; 1521 vphn_enabled = 1;
1522 setup_cpu_associativity_change_counters(); 1522 setup_cpu_associativity_change_counters();
1523 init_timer_deferrable(&topology_timer); 1523 init_timer_deferrable(&topology_timer);
1524 set_topology_timer(); 1524 set_topology_timer();
1525 rc = 1; 1525 rc = 1;
1526 } 1526 }
1527 1527
1528 return rc; 1528 return rc;
1529 } 1529 }
1530 __initcall(start_topology_update); 1530 __initcall(start_topology_update);
1531 1531
1532 /* 1532 /*
1533 * Disable polling for VPHN associativity changes. 1533 * Disable polling for VPHN associativity changes.
1534 */ 1534 */
1535 int stop_topology_update(void) 1535 int stop_topology_update(void)
1536 { 1536 {
1537 vphn_enabled = 0; 1537 vphn_enabled = 0;
1538 return del_timer_sync(&topology_timer); 1538 return del_timer_sync(&topology_timer);
1539 } 1539 }
1540 #endif /* CONFIG_PPC_SPLPAR */ 1540 #endif /* CONFIG_PPC_SPLPAR */
1541 1541
arch/powerpc/mm/slice.c
1 /* 1 /*
2 * address space "slices" (meta-segments) support 2 * address space "slices" (meta-segments) support
3 * 3 *
4 * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation. 4 * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
5 * 5 *
6 * Based on hugetlb implementation 6 * Based on hugetlb implementation
7 * 7 *
8 * Copyright (C) 2003 David Gibson, IBM Corporation. 8 * Copyright (C) 2003 David Gibson, IBM Corporation.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or 12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version. 13 * (at your option) any later version.
14 * 14 *
15 * This program is distributed in the hope that it will be useful, 15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details. 18 * GNU General Public License for more details.
19 * 19 *
20 * You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */ 23 */
24 24
25 #undef DEBUG 25 #undef DEBUG
26 26
27 #include <linux/kernel.h> 27 #include <linux/kernel.h>
28 #include <linux/mm.h> 28 #include <linux/mm.h>
29 #include <linux/pagemap.h> 29 #include <linux/pagemap.h>
30 #include <linux/err.h> 30 #include <linux/err.h>
31 #include <linux/spinlock.h> 31 #include <linux/spinlock.h>
32 #include <linux/module.h> 32 #include <linux/export.h>
33 #include <asm/mman.h> 33 #include <asm/mman.h>
34 #include <asm/mmu.h> 34 #include <asm/mmu.h>
35 #include <asm/spu.h> 35 #include <asm/spu.h>
36 36
37 static DEFINE_SPINLOCK(slice_convert_lock); 37 static DEFINE_SPINLOCK(slice_convert_lock);
38 38
39 39
40 #ifdef DEBUG 40 #ifdef DEBUG
41 int _slice_debug = 1; 41 int _slice_debug = 1;
42 42
43 static void slice_print_mask(const char *label, struct slice_mask mask) 43 static void slice_print_mask(const char *label, struct slice_mask mask)
44 { 44 {
45 char *p, buf[16 + 3 + 16 + 1]; 45 char *p, buf[16 + 3 + 16 + 1];
46 int i; 46 int i;
47 47
48 if (!_slice_debug) 48 if (!_slice_debug)
49 return; 49 return;
50 p = buf; 50 p = buf;
51 for (i = 0; i < SLICE_NUM_LOW; i++) 51 for (i = 0; i < SLICE_NUM_LOW; i++)
52 *(p++) = (mask.low_slices & (1 << i)) ? '1' : '0'; 52 *(p++) = (mask.low_slices & (1 << i)) ? '1' : '0';
53 *(p++) = ' '; 53 *(p++) = ' ';
54 *(p++) = '-'; 54 *(p++) = '-';
55 *(p++) = ' '; 55 *(p++) = ' ';
56 for (i = 0; i < SLICE_NUM_HIGH; i++) 56 for (i = 0; i < SLICE_NUM_HIGH; i++)
57 *(p++) = (mask.high_slices & (1 << i)) ? '1' : '0'; 57 *(p++) = (mask.high_slices & (1 << i)) ? '1' : '0';
58 *(p++) = 0; 58 *(p++) = 0;
59 59
60 printk(KERN_DEBUG "%s:%s\n", label, buf); 60 printk(KERN_DEBUG "%s:%s\n", label, buf);
61 } 61 }
62 62
63 #define slice_dbg(fmt...) do { if (_slice_debug) pr_debug(fmt); } while(0) 63 #define slice_dbg(fmt...) do { if (_slice_debug) pr_debug(fmt); } while(0)
64 64
65 #else 65 #else
66 66
67 static void slice_print_mask(const char *label, struct slice_mask mask) {} 67 static void slice_print_mask(const char *label, struct slice_mask mask) {}
68 #define slice_dbg(fmt...) 68 #define slice_dbg(fmt...)
69 69
70 #endif 70 #endif
71 71
72 static struct slice_mask slice_range_to_mask(unsigned long start, 72 static struct slice_mask slice_range_to_mask(unsigned long start,
73 unsigned long len) 73 unsigned long len)
74 { 74 {
75 unsigned long end = start + len - 1; 75 unsigned long end = start + len - 1;
76 struct slice_mask ret = { 0, 0 }; 76 struct slice_mask ret = { 0, 0 };
77 77
78 if (start < SLICE_LOW_TOP) { 78 if (start < SLICE_LOW_TOP) {
79 unsigned long mend = min(end, SLICE_LOW_TOP); 79 unsigned long mend = min(end, SLICE_LOW_TOP);
80 unsigned long mstart = min(start, SLICE_LOW_TOP); 80 unsigned long mstart = min(start, SLICE_LOW_TOP);
81 81
82 ret.low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1)) 82 ret.low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
83 - (1u << GET_LOW_SLICE_INDEX(mstart)); 83 - (1u << GET_LOW_SLICE_INDEX(mstart));
84 } 84 }
85 85
86 if ((start + len) > SLICE_LOW_TOP) 86 if ((start + len) > SLICE_LOW_TOP)
87 ret.high_slices = (1u << (GET_HIGH_SLICE_INDEX(end) + 1)) 87 ret.high_slices = (1u << (GET_HIGH_SLICE_INDEX(end) + 1))
88 - (1u << GET_HIGH_SLICE_INDEX(start)); 88 - (1u << GET_HIGH_SLICE_INDEX(start));
89 89
90 return ret; 90 return ret;
91 } 91 }
92 92
93 static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, 93 static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
94 unsigned long len) 94 unsigned long len)
95 { 95 {
96 struct vm_area_struct *vma; 96 struct vm_area_struct *vma;
97 97
98 if ((mm->task_size - len) < addr) 98 if ((mm->task_size - len) < addr)
99 return 0; 99 return 0;
100 vma = find_vma(mm, addr); 100 vma = find_vma(mm, addr);
101 return (!vma || (addr + len) <= vma->vm_start); 101 return (!vma || (addr + len) <= vma->vm_start);
102 } 102 }
103 103
104 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) 104 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
105 { 105 {
106 return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT, 106 return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
107 1ul << SLICE_LOW_SHIFT); 107 1ul << SLICE_LOW_SHIFT);
108 } 108 }
109 109
110 static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice) 110 static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
111 { 111 {
112 unsigned long start = slice << SLICE_HIGH_SHIFT; 112 unsigned long start = slice << SLICE_HIGH_SHIFT;
113 unsigned long end = start + (1ul << SLICE_HIGH_SHIFT); 113 unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
114 114
115 /* Hack, so that each addresses is controlled by exactly one 115 /* Hack, so that each addresses is controlled by exactly one
116 * of the high or low area bitmaps, the first high area starts 116 * of the high or low area bitmaps, the first high area starts
117 * at 4GB, not 0 */ 117 * at 4GB, not 0 */
118 if (start == 0) 118 if (start == 0)
119 start = SLICE_LOW_TOP; 119 start = SLICE_LOW_TOP;
120 120
121 return !slice_area_is_free(mm, start, end - start); 121 return !slice_area_is_free(mm, start, end - start);
122 } 122 }
123 123
124 static struct slice_mask slice_mask_for_free(struct mm_struct *mm) 124 static struct slice_mask slice_mask_for_free(struct mm_struct *mm)
125 { 125 {
126 struct slice_mask ret = { 0, 0 }; 126 struct slice_mask ret = { 0, 0 };
127 unsigned long i; 127 unsigned long i;
128 128
129 for (i = 0; i < SLICE_NUM_LOW; i++) 129 for (i = 0; i < SLICE_NUM_LOW; i++)
130 if (!slice_low_has_vma(mm, i)) 130 if (!slice_low_has_vma(mm, i))
131 ret.low_slices |= 1u << i; 131 ret.low_slices |= 1u << i;
132 132
133 if (mm->task_size <= SLICE_LOW_TOP) 133 if (mm->task_size <= SLICE_LOW_TOP)
134 return ret; 134 return ret;
135 135
136 for (i = 0; i < SLICE_NUM_HIGH; i++) 136 for (i = 0; i < SLICE_NUM_HIGH; i++)
137 if (!slice_high_has_vma(mm, i)) 137 if (!slice_high_has_vma(mm, i))
138 ret.high_slices |= 1u << i; 138 ret.high_slices |= 1u << i;
139 139
140 return ret; 140 return ret;
141 } 141 }
142 142
143 static struct slice_mask slice_mask_for_size(struct mm_struct *mm, int psize) 143 static struct slice_mask slice_mask_for_size(struct mm_struct *mm, int psize)
144 { 144 {
145 struct slice_mask ret = { 0, 0 }; 145 struct slice_mask ret = { 0, 0 };
146 unsigned long i; 146 unsigned long i;
147 u64 psizes; 147 u64 psizes;
148 148
149 psizes = mm->context.low_slices_psize; 149 psizes = mm->context.low_slices_psize;
150 for (i = 0; i < SLICE_NUM_LOW; i++) 150 for (i = 0; i < SLICE_NUM_LOW; i++)
151 if (((psizes >> (i * 4)) & 0xf) == psize) 151 if (((psizes >> (i * 4)) & 0xf) == psize)
152 ret.low_slices |= 1u << i; 152 ret.low_slices |= 1u << i;
153 153
154 psizes = mm->context.high_slices_psize; 154 psizes = mm->context.high_slices_psize;
155 for (i = 0; i < SLICE_NUM_HIGH; i++) 155 for (i = 0; i < SLICE_NUM_HIGH; i++)
156 if (((psizes >> (i * 4)) & 0xf) == psize) 156 if (((psizes >> (i * 4)) & 0xf) == psize)
157 ret.high_slices |= 1u << i; 157 ret.high_slices |= 1u << i;
158 158
159 return ret; 159 return ret;
160 } 160 }
161 161
162 static int slice_check_fit(struct slice_mask mask, struct slice_mask available) 162 static int slice_check_fit(struct slice_mask mask, struct slice_mask available)
163 { 163 {
164 return (mask.low_slices & available.low_slices) == mask.low_slices && 164 return (mask.low_slices & available.low_slices) == mask.low_slices &&
165 (mask.high_slices & available.high_slices) == mask.high_slices; 165 (mask.high_slices & available.high_slices) == mask.high_slices;
166 } 166 }
167 167
168 static void slice_flush_segments(void *parm) 168 static void slice_flush_segments(void *parm)
169 { 169 {
170 struct mm_struct *mm = parm; 170 struct mm_struct *mm = parm;
171 unsigned long flags; 171 unsigned long flags;
172 172
173 if (mm != current->active_mm) 173 if (mm != current->active_mm)
174 return; 174 return;
175 175
176 /* update the paca copy of the context struct */ 176 /* update the paca copy of the context struct */
177 get_paca()->context = current->active_mm->context; 177 get_paca()->context = current->active_mm->context;
178 178
179 local_irq_save(flags); 179 local_irq_save(flags);
180 slb_flush_and_rebolt(); 180 slb_flush_and_rebolt();
181 local_irq_restore(flags); 181 local_irq_restore(flags);
182 } 182 }
183 183
184 static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize) 184 static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize)
185 { 185 {
186 /* Write the new slice psize bits */ 186 /* Write the new slice psize bits */
187 u64 lpsizes, hpsizes; 187 u64 lpsizes, hpsizes;
188 unsigned long i, flags; 188 unsigned long i, flags;
189 189
190 slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize); 190 slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
191 slice_print_mask(" mask", mask); 191 slice_print_mask(" mask", mask);
192 192
193 /* We need to use a spinlock here to protect against 193 /* We need to use a spinlock here to protect against
194 * concurrent 64k -> 4k demotion ... 194 * concurrent 64k -> 4k demotion ...
195 */ 195 */
196 spin_lock_irqsave(&slice_convert_lock, flags); 196 spin_lock_irqsave(&slice_convert_lock, flags);
197 197
198 lpsizes = mm->context.low_slices_psize; 198 lpsizes = mm->context.low_slices_psize;
199 for (i = 0; i < SLICE_NUM_LOW; i++) 199 for (i = 0; i < SLICE_NUM_LOW; i++)
200 if (mask.low_slices & (1u << i)) 200 if (mask.low_slices & (1u << i))
201 lpsizes = (lpsizes & ~(0xful << (i * 4))) | 201 lpsizes = (lpsizes & ~(0xful << (i * 4))) |
202 (((unsigned long)psize) << (i * 4)); 202 (((unsigned long)psize) << (i * 4));
203 203
204 hpsizes = mm->context.high_slices_psize; 204 hpsizes = mm->context.high_slices_psize;
205 for (i = 0; i < SLICE_NUM_HIGH; i++) 205 for (i = 0; i < SLICE_NUM_HIGH; i++)
206 if (mask.high_slices & (1u << i)) 206 if (mask.high_slices & (1u << i))
207 hpsizes = (hpsizes & ~(0xful << (i * 4))) | 207 hpsizes = (hpsizes & ~(0xful << (i * 4))) |
208 (((unsigned long)psize) << (i * 4)); 208 (((unsigned long)psize) << (i * 4));
209 209
210 mm->context.low_slices_psize = lpsizes; 210 mm->context.low_slices_psize = lpsizes;
211 mm->context.high_slices_psize = hpsizes; 211 mm->context.high_slices_psize = hpsizes;
212 212
213 slice_dbg(" lsps=%lx, hsps=%lx\n", 213 slice_dbg(" lsps=%lx, hsps=%lx\n",
214 mm->context.low_slices_psize, 214 mm->context.low_slices_psize,
215 mm->context.high_slices_psize); 215 mm->context.high_slices_psize);
216 216
217 spin_unlock_irqrestore(&slice_convert_lock, flags); 217 spin_unlock_irqrestore(&slice_convert_lock, flags);
218 218
219 #ifdef CONFIG_SPU_BASE 219 #ifdef CONFIG_SPU_BASE
220 spu_flush_all_slbs(mm); 220 spu_flush_all_slbs(mm);
221 #endif 221 #endif
222 } 222 }
223 223
224 static unsigned long slice_find_area_bottomup(struct mm_struct *mm, 224 static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
225 unsigned long len, 225 unsigned long len,
226 struct slice_mask available, 226 struct slice_mask available,
227 int psize, int use_cache) 227 int psize, int use_cache)
228 { 228 {
229 struct vm_area_struct *vma; 229 struct vm_area_struct *vma;
230 unsigned long start_addr, addr; 230 unsigned long start_addr, addr;
231 struct slice_mask mask; 231 struct slice_mask mask;
232 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); 232 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
233 233
234 if (use_cache) { 234 if (use_cache) {
235 if (len <= mm->cached_hole_size) { 235 if (len <= mm->cached_hole_size) {
236 start_addr = addr = TASK_UNMAPPED_BASE; 236 start_addr = addr = TASK_UNMAPPED_BASE;
237 mm->cached_hole_size = 0; 237 mm->cached_hole_size = 0;
238 } else 238 } else
239 start_addr = addr = mm->free_area_cache; 239 start_addr = addr = mm->free_area_cache;
240 } else 240 } else
241 start_addr = addr = TASK_UNMAPPED_BASE; 241 start_addr = addr = TASK_UNMAPPED_BASE;
242 242
243 full_search: 243 full_search:
244 for (;;) { 244 for (;;) {
245 addr = _ALIGN_UP(addr, 1ul << pshift); 245 addr = _ALIGN_UP(addr, 1ul << pshift);
246 if ((TASK_SIZE - len) < addr) 246 if ((TASK_SIZE - len) < addr)
247 break; 247 break;
248 vma = find_vma(mm, addr); 248 vma = find_vma(mm, addr);
249 BUG_ON(vma && (addr >= vma->vm_end)); 249 BUG_ON(vma && (addr >= vma->vm_end));
250 250
251 mask = slice_range_to_mask(addr, len); 251 mask = slice_range_to_mask(addr, len);
252 if (!slice_check_fit(mask, available)) { 252 if (!slice_check_fit(mask, available)) {
253 if (addr < SLICE_LOW_TOP) 253 if (addr < SLICE_LOW_TOP)
254 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_LOW_SHIFT); 254 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_LOW_SHIFT);
255 else 255 else
256 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT); 256 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
257 continue; 257 continue;
258 } 258 }
259 if (!vma || addr + len <= vma->vm_start) { 259 if (!vma || addr + len <= vma->vm_start) {
260 /* 260 /*
261 * Remember the place where we stopped the search: 261 * Remember the place where we stopped the search:
262 */ 262 */
263 if (use_cache) 263 if (use_cache)
264 mm->free_area_cache = addr + len; 264 mm->free_area_cache = addr + len;
265 return addr; 265 return addr;
266 } 266 }
267 if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start) 267 if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start)
268 mm->cached_hole_size = vma->vm_start - addr; 268 mm->cached_hole_size = vma->vm_start - addr;
269 addr = vma->vm_end; 269 addr = vma->vm_end;
270 } 270 }
271 271
272 /* Make sure we didn't miss any holes */ 272 /* Make sure we didn't miss any holes */
273 if (use_cache && start_addr != TASK_UNMAPPED_BASE) { 273 if (use_cache && start_addr != TASK_UNMAPPED_BASE) {
274 start_addr = addr = TASK_UNMAPPED_BASE; 274 start_addr = addr = TASK_UNMAPPED_BASE;
275 mm->cached_hole_size = 0; 275 mm->cached_hole_size = 0;
276 goto full_search; 276 goto full_search;
277 } 277 }
278 return -ENOMEM; 278 return -ENOMEM;
279 } 279 }
280 280
281 static unsigned long slice_find_area_topdown(struct mm_struct *mm, 281 static unsigned long slice_find_area_topdown(struct mm_struct *mm,
282 unsigned long len, 282 unsigned long len,
283 struct slice_mask available, 283 struct slice_mask available,
284 int psize, int use_cache) 284 int psize, int use_cache)
285 { 285 {
286 struct vm_area_struct *vma; 286 struct vm_area_struct *vma;
287 unsigned long addr; 287 unsigned long addr;
288 struct slice_mask mask; 288 struct slice_mask mask;
289 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); 289 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
290 290
291 /* check if free_area_cache is useful for us */ 291 /* check if free_area_cache is useful for us */
292 if (use_cache) { 292 if (use_cache) {
293 if (len <= mm->cached_hole_size) { 293 if (len <= mm->cached_hole_size) {
294 mm->cached_hole_size = 0; 294 mm->cached_hole_size = 0;
295 mm->free_area_cache = mm->mmap_base; 295 mm->free_area_cache = mm->mmap_base;
296 } 296 }
297 297
298 /* either no address requested or can't fit in requested 298 /* either no address requested or can't fit in requested
299 * address hole 299 * address hole
300 */ 300 */
301 addr = mm->free_area_cache; 301 addr = mm->free_area_cache;
302 302
303 /* make sure it can fit in the remaining address space */ 303 /* make sure it can fit in the remaining address space */
304 if (addr > len) { 304 if (addr > len) {
305 addr = _ALIGN_DOWN(addr - len, 1ul << pshift); 305 addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
306 mask = slice_range_to_mask(addr, len); 306 mask = slice_range_to_mask(addr, len);
307 if (slice_check_fit(mask, available) && 307 if (slice_check_fit(mask, available) &&
308 slice_area_is_free(mm, addr, len)) 308 slice_area_is_free(mm, addr, len))
309 /* remember the address as a hint for 309 /* remember the address as a hint for
310 * next time 310 * next time
311 */ 311 */
312 return (mm->free_area_cache = addr); 312 return (mm->free_area_cache = addr);
313 } 313 }
314 } 314 }
315 315
316 addr = mm->mmap_base; 316 addr = mm->mmap_base;
317 while (addr > len) { 317 while (addr > len) {
318 /* Go down by chunk size */ 318 /* Go down by chunk size */
319 addr = _ALIGN_DOWN(addr - len, 1ul << pshift); 319 addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
320 320
321 /* Check for hit with different page size */ 321 /* Check for hit with different page size */
322 mask = slice_range_to_mask(addr, len); 322 mask = slice_range_to_mask(addr, len);
323 if (!slice_check_fit(mask, available)) { 323 if (!slice_check_fit(mask, available)) {
324 if (addr < SLICE_LOW_TOP) 324 if (addr < SLICE_LOW_TOP)
325 addr = _ALIGN_DOWN(addr, 1ul << SLICE_LOW_SHIFT); 325 addr = _ALIGN_DOWN(addr, 1ul << SLICE_LOW_SHIFT);
326 else if (addr < (1ul << SLICE_HIGH_SHIFT)) 326 else if (addr < (1ul << SLICE_HIGH_SHIFT))
327 addr = SLICE_LOW_TOP; 327 addr = SLICE_LOW_TOP;
328 else 328 else
329 addr = _ALIGN_DOWN(addr, 1ul << SLICE_HIGH_SHIFT); 329 addr = _ALIGN_DOWN(addr, 1ul << SLICE_HIGH_SHIFT);
330 continue; 330 continue;
331 } 331 }
332 332
333 /* 333 /*
334 * Lookup failure means no vma is above this address, 334 * Lookup failure means no vma is above this address,
335 * else if new region fits below vma->vm_start, 335 * else if new region fits below vma->vm_start,
336 * return with success: 336 * return with success:
337 */ 337 */
338 vma = find_vma(mm, addr); 338 vma = find_vma(mm, addr);
339 if (!vma || (addr + len) <= vma->vm_start) { 339 if (!vma || (addr + len) <= vma->vm_start) {
340 /* remember the address as a hint for next time */ 340 /* remember the address as a hint for next time */
341 if (use_cache) 341 if (use_cache)
342 mm->free_area_cache = addr; 342 mm->free_area_cache = addr;
343 return addr; 343 return addr;
344 } 344 }
345 345
346 /* remember the largest hole we saw so far */ 346 /* remember the largest hole we saw so far */
347 if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start) 347 if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start)
348 mm->cached_hole_size = vma->vm_start - addr; 348 mm->cached_hole_size = vma->vm_start - addr;
349 349
350 /* try just below the current vma->vm_start */ 350 /* try just below the current vma->vm_start */
351 addr = vma->vm_start; 351 addr = vma->vm_start;
352 } 352 }
353 353
354 /* 354 /*
355 * A failed mmap() very likely causes application failure, 355 * A failed mmap() very likely causes application failure,
356 * so fall back to the bottom-up function here. This scenario 356 * so fall back to the bottom-up function here. This scenario
357 * can happen with large stack limits and large mmap() 357 * can happen with large stack limits and large mmap()
358 * allocations. 358 * allocations.
359 */ 359 */
360 addr = slice_find_area_bottomup(mm, len, available, psize, 0); 360 addr = slice_find_area_bottomup(mm, len, available, psize, 0);
361 361
362 /* 362 /*
363 * Restore the topdown base: 363 * Restore the topdown base:
364 */ 364 */
365 if (use_cache) { 365 if (use_cache) {
366 mm->free_area_cache = mm->mmap_base; 366 mm->free_area_cache = mm->mmap_base;
367 mm->cached_hole_size = ~0UL; 367 mm->cached_hole_size = ~0UL;
368 } 368 }
369 369
370 return addr; 370 return addr;
371 } 371 }
372 372
373 373
374 static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len, 374 static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
375 struct slice_mask mask, int psize, 375 struct slice_mask mask, int psize,
376 int topdown, int use_cache) 376 int topdown, int use_cache)
377 { 377 {
378 if (topdown) 378 if (topdown)
379 return slice_find_area_topdown(mm, len, mask, psize, use_cache); 379 return slice_find_area_topdown(mm, len, mask, psize, use_cache);
380 else 380 else
381 return slice_find_area_bottomup(mm, len, mask, psize, use_cache); 381 return slice_find_area_bottomup(mm, len, mask, psize, use_cache);
382 } 382 }
383 383
384 #define or_mask(dst, src) do { \ 384 #define or_mask(dst, src) do { \
385 (dst).low_slices |= (src).low_slices; \ 385 (dst).low_slices |= (src).low_slices; \
386 (dst).high_slices |= (src).high_slices; \ 386 (dst).high_slices |= (src).high_slices; \
387 } while (0) 387 } while (0)
388 388
389 #define andnot_mask(dst, src) do { \ 389 #define andnot_mask(dst, src) do { \
390 (dst).low_slices &= ~(src).low_slices; \ 390 (dst).low_slices &= ~(src).low_slices; \
391 (dst).high_slices &= ~(src).high_slices; \ 391 (dst).high_slices &= ~(src).high_slices; \
392 } while (0) 392 } while (0)
393 393
394 #ifdef CONFIG_PPC_64K_PAGES 394 #ifdef CONFIG_PPC_64K_PAGES
395 #define MMU_PAGE_BASE MMU_PAGE_64K 395 #define MMU_PAGE_BASE MMU_PAGE_64K
396 #else 396 #else
397 #define MMU_PAGE_BASE MMU_PAGE_4K 397 #define MMU_PAGE_BASE MMU_PAGE_4K
398 #endif 398 #endif
399 399
400 unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, 400 unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
401 unsigned long flags, unsigned int psize, 401 unsigned long flags, unsigned int psize,
402 int topdown, int use_cache) 402 int topdown, int use_cache)
403 { 403 {
404 struct slice_mask mask = {0, 0}; 404 struct slice_mask mask = {0, 0};
405 struct slice_mask good_mask; 405 struct slice_mask good_mask;
406 struct slice_mask potential_mask = {0,0} /* silence stupid warning */; 406 struct slice_mask potential_mask = {0,0} /* silence stupid warning */;
407 struct slice_mask compat_mask = {0, 0}; 407 struct slice_mask compat_mask = {0, 0};
408 int fixed = (flags & MAP_FIXED); 408 int fixed = (flags & MAP_FIXED);
409 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); 409 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
410 struct mm_struct *mm = current->mm; 410 struct mm_struct *mm = current->mm;
411 unsigned long newaddr; 411 unsigned long newaddr;
412 412
413 /* Sanity checks */ 413 /* Sanity checks */
414 BUG_ON(mm->task_size == 0); 414 BUG_ON(mm->task_size == 0);
415 415
416 slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize); 416 slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
417 slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d, use_cache=%d\n", 417 slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d, use_cache=%d\n",
418 addr, len, flags, topdown, use_cache); 418 addr, len, flags, topdown, use_cache);
419 419
420 if (len > mm->task_size) 420 if (len > mm->task_size)
421 return -ENOMEM; 421 return -ENOMEM;
422 if (len & ((1ul << pshift) - 1)) 422 if (len & ((1ul << pshift) - 1))
423 return -EINVAL; 423 return -EINVAL;
424 if (fixed && (addr & ((1ul << pshift) - 1))) 424 if (fixed && (addr & ((1ul << pshift) - 1)))
425 return -EINVAL; 425 return -EINVAL;
426 if (fixed && addr > (mm->task_size - len)) 426 if (fixed && addr > (mm->task_size - len))
427 return -EINVAL; 427 return -EINVAL;
428 428
429 /* If hint, make sure it matches our alignment restrictions */ 429 /* If hint, make sure it matches our alignment restrictions */
430 if (!fixed && addr) { 430 if (!fixed && addr) {
431 addr = _ALIGN_UP(addr, 1ul << pshift); 431 addr = _ALIGN_UP(addr, 1ul << pshift);
432 slice_dbg(" aligned addr=%lx\n", addr); 432 slice_dbg(" aligned addr=%lx\n", addr);
433 /* Ignore hint if it's too large or overlaps a VMA */ 433 /* Ignore hint if it's too large or overlaps a VMA */
434 if (addr > mm->task_size - len || 434 if (addr > mm->task_size - len ||
435 !slice_area_is_free(mm, addr, len)) 435 !slice_area_is_free(mm, addr, len))
436 addr = 0; 436 addr = 0;
437 } 437 }
438 438
439 /* First make up a "good" mask of slices that have the right size 439 /* First make up a "good" mask of slices that have the right size
440 * already 440 * already
441 */ 441 */
442 good_mask = slice_mask_for_size(mm, psize); 442 good_mask = slice_mask_for_size(mm, psize);
443 slice_print_mask(" good_mask", good_mask); 443 slice_print_mask(" good_mask", good_mask);
444 444
445 /* 445 /*
446 * Here "good" means slices that are already the right page size, 446 * Here "good" means slices that are already the right page size,
447 * "compat" means slices that have a compatible page size (i.e. 447 * "compat" means slices that have a compatible page size (i.e.
448 * 4k in a 64k pagesize kernel), and "free" means slices without 448 * 4k in a 64k pagesize kernel), and "free" means slices without
449 * any VMAs. 449 * any VMAs.
450 * 450 *
451 * If MAP_FIXED: 451 * If MAP_FIXED:
452 * check if fits in good | compat => OK 452 * check if fits in good | compat => OK
453 * check if fits in good | compat | free => convert free 453 * check if fits in good | compat | free => convert free
454 * else bad 454 * else bad
455 * If have hint: 455 * If have hint:
456 * check if hint fits in good => OK 456 * check if hint fits in good => OK
457 * check if hint fits in good | free => convert free 457 * check if hint fits in good | free => convert free
458 * Otherwise: 458 * Otherwise:
459 * search in good, found => OK 459 * search in good, found => OK
460 * search in good | free, found => convert free 460 * search in good | free, found => convert free
461 * search in good | compat | free, found => convert free. 461 * search in good | compat | free, found => convert free.
462 */ 462 */
463 463
464 #ifdef CONFIG_PPC_64K_PAGES 464 #ifdef CONFIG_PPC_64K_PAGES
465 /* If we support combo pages, we can allow 64k pages in 4k slices */ 465 /* If we support combo pages, we can allow 64k pages in 4k slices */
466 if (psize == MMU_PAGE_64K) { 466 if (psize == MMU_PAGE_64K) {
467 compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K); 467 compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K);
468 if (fixed) 468 if (fixed)
469 or_mask(good_mask, compat_mask); 469 or_mask(good_mask, compat_mask);
470 } 470 }
471 #endif 471 #endif
472 472
473 /* First check hint if it's valid or if we have MAP_FIXED */ 473 /* First check hint if it's valid or if we have MAP_FIXED */
474 if (addr != 0 || fixed) { 474 if (addr != 0 || fixed) {
475 /* Build a mask for the requested range */ 475 /* Build a mask for the requested range */
476 mask = slice_range_to_mask(addr, len); 476 mask = slice_range_to_mask(addr, len);
477 slice_print_mask(" mask", mask); 477 slice_print_mask(" mask", mask);
478 478
479 /* Check if we fit in the good mask. If we do, we just return, 479 /* Check if we fit in the good mask. If we do, we just return,
480 * nothing else to do 480 * nothing else to do
481 */ 481 */
482 if (slice_check_fit(mask, good_mask)) { 482 if (slice_check_fit(mask, good_mask)) {
483 slice_dbg(" fits good !\n"); 483 slice_dbg(" fits good !\n");
484 return addr; 484 return addr;
485 } 485 }
486 } else { 486 } else {
487 /* Now let's see if we can find something in the existing 487 /* Now let's see if we can find something in the existing
488 * slices for that size 488 * slices for that size
489 */ 489 */
490 newaddr = slice_find_area(mm, len, good_mask, psize, topdown, 490 newaddr = slice_find_area(mm, len, good_mask, psize, topdown,
491 use_cache); 491 use_cache);
492 if (newaddr != -ENOMEM) { 492 if (newaddr != -ENOMEM) {
493 /* Found within the good mask, we don't have to setup, 493 /* Found within the good mask, we don't have to setup,
494 * we thus return directly 494 * we thus return directly
495 */ 495 */
496 slice_dbg(" found area at 0x%lx\n", newaddr); 496 slice_dbg(" found area at 0x%lx\n", newaddr);
497 return newaddr; 497 return newaddr;
498 } 498 }
499 } 499 }
500 500
501 /* We don't fit in the good mask, check what other slices are 501 /* We don't fit in the good mask, check what other slices are
502 * empty and thus can be converted 502 * empty and thus can be converted
503 */ 503 */
504 potential_mask = slice_mask_for_free(mm); 504 potential_mask = slice_mask_for_free(mm);
505 or_mask(potential_mask, good_mask); 505 or_mask(potential_mask, good_mask);
506 slice_print_mask(" potential", potential_mask); 506 slice_print_mask(" potential", potential_mask);
507 507
508 if ((addr != 0 || fixed) && slice_check_fit(mask, potential_mask)) { 508 if ((addr != 0 || fixed) && slice_check_fit(mask, potential_mask)) {
509 slice_dbg(" fits potential !\n"); 509 slice_dbg(" fits potential !\n");
510 goto convert; 510 goto convert;
511 } 511 }
512 512
513 /* If we have MAP_FIXED and failed the above steps, then error out */ 513 /* If we have MAP_FIXED and failed the above steps, then error out */
514 if (fixed) 514 if (fixed)
515 return -EBUSY; 515 return -EBUSY;
516 516
517 slice_dbg(" search...\n"); 517 slice_dbg(" search...\n");
518 518
519 /* If we had a hint that didn't work out, see if we can fit 519 /* If we had a hint that didn't work out, see if we can fit
520 * anywhere in the good area. 520 * anywhere in the good area.
521 */ 521 */
522 if (addr) { 522 if (addr) {
523 addr = slice_find_area(mm, len, good_mask, psize, topdown, 523 addr = slice_find_area(mm, len, good_mask, psize, topdown,
524 use_cache); 524 use_cache);
525 if (addr != -ENOMEM) { 525 if (addr != -ENOMEM) {
526 slice_dbg(" found area at 0x%lx\n", addr); 526 slice_dbg(" found area at 0x%lx\n", addr);
527 return addr; 527 return addr;
528 } 528 }
529 } 529 }
530 530
531 /* Now let's see if we can find something in the existing slices 531 /* Now let's see if we can find something in the existing slices
532 * for that size plus free slices 532 * for that size plus free slices
533 */ 533 */
534 addr = slice_find_area(mm, len, potential_mask, psize, topdown, 534 addr = slice_find_area(mm, len, potential_mask, psize, topdown,
535 use_cache); 535 use_cache);
536 536
537 #ifdef CONFIG_PPC_64K_PAGES 537 #ifdef CONFIG_PPC_64K_PAGES
538 if (addr == -ENOMEM && psize == MMU_PAGE_64K) { 538 if (addr == -ENOMEM && psize == MMU_PAGE_64K) {
539 /* retry the search with 4k-page slices included */ 539 /* retry the search with 4k-page slices included */
540 or_mask(potential_mask, compat_mask); 540 or_mask(potential_mask, compat_mask);
541 addr = slice_find_area(mm, len, potential_mask, psize, 541 addr = slice_find_area(mm, len, potential_mask, psize,
542 topdown, use_cache); 542 topdown, use_cache);
543 } 543 }
544 #endif 544 #endif
545 545
546 if (addr == -ENOMEM) 546 if (addr == -ENOMEM)
547 return -ENOMEM; 547 return -ENOMEM;
548 548
549 mask = slice_range_to_mask(addr, len); 549 mask = slice_range_to_mask(addr, len);
550 slice_dbg(" found potential area at 0x%lx\n", addr); 550 slice_dbg(" found potential area at 0x%lx\n", addr);
551 slice_print_mask(" mask", mask); 551 slice_print_mask(" mask", mask);
552 552
553 convert: 553 convert:
554 andnot_mask(mask, good_mask); 554 andnot_mask(mask, good_mask);
555 andnot_mask(mask, compat_mask); 555 andnot_mask(mask, compat_mask);
556 if (mask.low_slices || mask.high_slices) { 556 if (mask.low_slices || mask.high_slices) {
557 slice_convert(mm, mask, psize); 557 slice_convert(mm, mask, psize);
558 if (psize > MMU_PAGE_BASE) 558 if (psize > MMU_PAGE_BASE)
559 on_each_cpu(slice_flush_segments, mm, 1); 559 on_each_cpu(slice_flush_segments, mm, 1);
560 } 560 }
561 return addr; 561 return addr;
562 562
563 } 563 }
564 EXPORT_SYMBOL_GPL(slice_get_unmapped_area); 564 EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
565 565
566 unsigned long arch_get_unmapped_area(struct file *filp, 566 unsigned long arch_get_unmapped_area(struct file *filp,
567 unsigned long addr, 567 unsigned long addr,
568 unsigned long len, 568 unsigned long len,
569 unsigned long pgoff, 569 unsigned long pgoff,
570 unsigned long flags) 570 unsigned long flags)
571 { 571 {
572 return slice_get_unmapped_area(addr, len, flags, 572 return slice_get_unmapped_area(addr, len, flags,
573 current->mm->context.user_psize, 573 current->mm->context.user_psize,
574 0, 1); 574 0, 1);
575 } 575 }
576 576
577 unsigned long arch_get_unmapped_area_topdown(struct file *filp, 577 unsigned long arch_get_unmapped_area_topdown(struct file *filp,
578 const unsigned long addr0, 578 const unsigned long addr0,
579 const unsigned long len, 579 const unsigned long len,
580 const unsigned long pgoff, 580 const unsigned long pgoff,
581 const unsigned long flags) 581 const unsigned long flags)
582 { 582 {
583 return slice_get_unmapped_area(addr0, len, flags, 583 return slice_get_unmapped_area(addr0, len, flags,
584 current->mm->context.user_psize, 584 current->mm->context.user_psize,
585 1, 1); 585 1, 1);
586 } 586 }
587 587
588 unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr) 588 unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
589 { 589 {
590 u64 psizes; 590 u64 psizes;
591 int index; 591 int index;
592 592
593 if (addr < SLICE_LOW_TOP) { 593 if (addr < SLICE_LOW_TOP) {
594 psizes = mm->context.low_slices_psize; 594 psizes = mm->context.low_slices_psize;
595 index = GET_LOW_SLICE_INDEX(addr); 595 index = GET_LOW_SLICE_INDEX(addr);
596 } else { 596 } else {
597 psizes = mm->context.high_slices_psize; 597 psizes = mm->context.high_slices_psize;
598 index = GET_HIGH_SLICE_INDEX(addr); 598 index = GET_HIGH_SLICE_INDEX(addr);
599 } 599 }
600 600
601 return (psizes >> (index * 4)) & 0xf; 601 return (psizes >> (index * 4)) & 0xf;
602 } 602 }
603 EXPORT_SYMBOL_GPL(get_slice_psize); 603 EXPORT_SYMBOL_GPL(get_slice_psize);
604 604
605 /* 605 /*
606 * This is called by hash_page when it needs to do a lazy conversion of 606 * This is called by hash_page when it needs to do a lazy conversion of
607 * an address space from real 64K pages to combo 4K pages (typically 607 * an address space from real 64K pages to combo 4K pages (typically
608 * when hitting a non cacheable mapping on a processor or hypervisor 608 * when hitting a non cacheable mapping on a processor or hypervisor
609 * that won't allow them for 64K pages). 609 * that won't allow them for 64K pages).
610 * 610 *
611 * This is also called in init_new_context() to change back the user 611 * This is also called in init_new_context() to change back the user
612 * psize from whatever the parent context had it set to 612 * psize from whatever the parent context had it set to
613 * N.B. This may be called before mm->context.id has been set. 613 * N.B. This may be called before mm->context.id has been set.
614 * 614 *
615 * This function will only change the content of the {low,high)_slice_psize 615 * This function will only change the content of the {low,high)_slice_psize
616 * masks, it will not flush SLBs as this shall be handled lazily by the 616 * masks, it will not flush SLBs as this shall be handled lazily by the
617 * caller. 617 * caller.
618 */ 618 */
619 void slice_set_user_psize(struct mm_struct *mm, unsigned int psize) 619 void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
620 { 620 {
621 unsigned long flags, lpsizes, hpsizes; 621 unsigned long flags, lpsizes, hpsizes;
622 unsigned int old_psize; 622 unsigned int old_psize;
623 int i; 623 int i;
624 624
625 slice_dbg("slice_set_user_psize(mm=%p, psize=%d)\n", mm, psize); 625 slice_dbg("slice_set_user_psize(mm=%p, psize=%d)\n", mm, psize);
626 626
627 spin_lock_irqsave(&slice_convert_lock, flags); 627 spin_lock_irqsave(&slice_convert_lock, flags);
628 628
629 old_psize = mm->context.user_psize; 629 old_psize = mm->context.user_psize;
630 slice_dbg(" old_psize=%d\n", old_psize); 630 slice_dbg(" old_psize=%d\n", old_psize);
631 if (old_psize == psize) 631 if (old_psize == psize)
632 goto bail; 632 goto bail;
633 633
634 mm->context.user_psize = psize; 634 mm->context.user_psize = psize;
635 wmb(); 635 wmb();
636 636
637 lpsizes = mm->context.low_slices_psize; 637 lpsizes = mm->context.low_slices_psize;
638 for (i = 0; i < SLICE_NUM_LOW; i++) 638 for (i = 0; i < SLICE_NUM_LOW; i++)
639 if (((lpsizes >> (i * 4)) & 0xf) == old_psize) 639 if (((lpsizes >> (i * 4)) & 0xf) == old_psize)
640 lpsizes = (lpsizes & ~(0xful << (i * 4))) | 640 lpsizes = (lpsizes & ~(0xful << (i * 4))) |
641 (((unsigned long)psize) << (i * 4)); 641 (((unsigned long)psize) << (i * 4));
642 642
643 hpsizes = mm->context.high_slices_psize; 643 hpsizes = mm->context.high_slices_psize;
644 for (i = 0; i < SLICE_NUM_HIGH; i++) 644 for (i = 0; i < SLICE_NUM_HIGH; i++)
645 if (((hpsizes >> (i * 4)) & 0xf) == old_psize) 645 if (((hpsizes >> (i * 4)) & 0xf) == old_psize)
646 hpsizes = (hpsizes & ~(0xful << (i * 4))) | 646 hpsizes = (hpsizes & ~(0xful << (i * 4))) |
647 (((unsigned long)psize) << (i * 4)); 647 (((unsigned long)psize) << (i * 4));
648 648
649 mm->context.low_slices_psize = lpsizes; 649 mm->context.low_slices_psize = lpsizes;
650 mm->context.high_slices_psize = hpsizes; 650 mm->context.high_slices_psize = hpsizes;
651 651
652 slice_dbg(" lsps=%lx, hsps=%lx\n", 652 slice_dbg(" lsps=%lx, hsps=%lx\n",
653 mm->context.low_slices_psize, 653 mm->context.low_slices_psize,
654 mm->context.high_slices_psize); 654 mm->context.high_slices_psize);
655 655
656 bail: 656 bail:
657 spin_unlock_irqrestore(&slice_convert_lock, flags); 657 spin_unlock_irqrestore(&slice_convert_lock, flags);
658 } 658 }
659 659
660 void slice_set_psize(struct mm_struct *mm, unsigned long address, 660 void slice_set_psize(struct mm_struct *mm, unsigned long address,
661 unsigned int psize) 661 unsigned int psize)
662 { 662 {
663 unsigned long i, flags; 663 unsigned long i, flags;
664 u64 *p; 664 u64 *p;
665 665
666 spin_lock_irqsave(&slice_convert_lock, flags); 666 spin_lock_irqsave(&slice_convert_lock, flags);
667 if (address < SLICE_LOW_TOP) { 667 if (address < SLICE_LOW_TOP) {
668 i = GET_LOW_SLICE_INDEX(address); 668 i = GET_LOW_SLICE_INDEX(address);
669 p = &mm->context.low_slices_psize; 669 p = &mm->context.low_slices_psize;
670 } else { 670 } else {
671 i = GET_HIGH_SLICE_INDEX(address); 671 i = GET_HIGH_SLICE_INDEX(address);
672 p = &mm->context.high_slices_psize; 672 p = &mm->context.high_slices_psize;
673 } 673 }
674 *p = (*p & ~(0xful << (i * 4))) | ((unsigned long) psize << (i * 4)); 674 *p = (*p & ~(0xful << (i * 4))) | ((unsigned long) psize << (i * 4));
675 spin_unlock_irqrestore(&slice_convert_lock, flags); 675 spin_unlock_irqrestore(&slice_convert_lock, flags);
676 676
677 #ifdef CONFIG_SPU_BASE 677 #ifdef CONFIG_SPU_BASE
678 spu_flush_all_slbs(mm); 678 spu_flush_all_slbs(mm);
679 #endif 679 #endif
680 } 680 }
681 681
682 void slice_set_range_psize(struct mm_struct *mm, unsigned long start, 682 void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
683 unsigned long len, unsigned int psize) 683 unsigned long len, unsigned int psize)
684 { 684 {
685 struct slice_mask mask = slice_range_to_mask(start, len); 685 struct slice_mask mask = slice_range_to_mask(start, len);
686 686
687 slice_convert(mm, mask, psize); 687 slice_convert(mm, mask, psize);
688 } 688 }
689 689
690 /* 690 /*
691 * is_hugepage_only_range() is used by generic code to verify wether 691 * is_hugepage_only_range() is used by generic code to verify wether
692 * a normal mmap mapping (non hugetlbfs) is valid on a given area. 692 * a normal mmap mapping (non hugetlbfs) is valid on a given area.
693 * 693 *
694 * until the generic code provides a more generic hook and/or starts 694 * until the generic code provides a more generic hook and/or starts
695 * calling arch get_unmapped_area for MAP_FIXED (which our implementation 695 * calling arch get_unmapped_area for MAP_FIXED (which our implementation
696 * here knows how to deal with), we hijack it to keep standard mappings 696 * here knows how to deal with), we hijack it to keep standard mappings
697 * away from us. 697 * away from us.
698 * 698 *
699 * because of that generic code limitation, MAP_FIXED mapping cannot 699 * because of that generic code limitation, MAP_FIXED mapping cannot
700 * "convert" back a slice with no VMAs to the standard page size, only 700 * "convert" back a slice with no VMAs to the standard page size, only
701 * get_unmapped_area() can. It would be possible to fix it here but I 701 * get_unmapped_area() can. It would be possible to fix it here but I
702 * prefer working on fixing the generic code instead. 702 * prefer working on fixing the generic code instead.
703 * 703 *
704 * WARNING: This will not work if hugetlbfs isn't enabled since the 704 * WARNING: This will not work if hugetlbfs isn't enabled since the
705 * generic code will redefine that function as 0 in that. This is ok 705 * generic code will redefine that function as 0 in that. This is ok
706 * for now as we only use slices with hugetlbfs enabled. This should 706 * for now as we only use slices with hugetlbfs enabled. This should
707 * be fixed as the generic code gets fixed. 707 * be fixed as the generic code gets fixed.
708 */ 708 */
709 int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, 709 int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
710 unsigned long len) 710 unsigned long len)
711 { 711 {
712 struct slice_mask mask, available; 712 struct slice_mask mask, available;
713 unsigned int psize = mm->context.user_psize; 713 unsigned int psize = mm->context.user_psize;
714 714
715 mask = slice_range_to_mask(addr, len); 715 mask = slice_range_to_mask(addr, len);
716 available = slice_mask_for_size(mm, psize); 716 available = slice_mask_for_size(mm, psize);
717 #ifdef CONFIG_PPC_64K_PAGES 717 #ifdef CONFIG_PPC_64K_PAGES
718 /* We need to account for 4k slices too */ 718 /* We need to account for 4k slices too */
719 if (psize == MMU_PAGE_64K) { 719 if (psize == MMU_PAGE_64K) {
720 struct slice_mask compat_mask; 720 struct slice_mask compat_mask;
721 compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K); 721 compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K);
722 or_mask(available, compat_mask); 722 or_mask(available, compat_mask);
723 } 723 }
724 #endif 724 #endif
725 725
726 #if 0 /* too verbose */ 726 #if 0 /* too verbose */
727 slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n", 727 slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n",
728 mm, addr, len); 728 mm, addr, len);
729 slice_print_mask(" mask", mask); 729 slice_print_mask(" mask", mask);
730 slice_print_mask(" available", available); 730 slice_print_mask(" available", available);
731 #endif 731 #endif
732 return !slice_check_fit(mask, available); 732 return !slice_check_fit(mask, available);
733 } 733 }
734 734
735 735
arch/powerpc/platforms/cell/beat.c
1 /* 1 /*
2 * Simple routines for Celleb/Beat 2 * Simple routines for Celleb/Beat
3 * 3 *
4 * (C) Copyright 2006-2007 TOSHIBA CORPORATION 4 * (C) Copyright 2006-2007 TOSHIBA CORPORATION
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or 8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version. 9 * (at your option) any later version.
10 * 10 *
11 * This program is distributed in the hope that it will be useful, 11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 * 15 *
16 * You should have received a copy of the GNU General Public License along 16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc., 17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */ 19 */
20 20
21 #include <linux/module.h> 21 #include <linux/export.h>
22 #include <linux/init.h> 22 #include <linux/init.h>
23 #include <linux/err.h> 23 #include <linux/err.h>
24 #include <linux/rtc.h> 24 #include <linux/rtc.h>
25 #include <linux/interrupt.h> 25 #include <linux/interrupt.h>
26 #include <linux/irqreturn.h> 26 #include <linux/irqreturn.h>
27 #include <linux/reboot.h> 27 #include <linux/reboot.h>
28 28
29 #include <asm/hvconsole.h> 29 #include <asm/hvconsole.h>
30 #include <asm/time.h> 30 #include <asm/time.h>
31 #include <asm/machdep.h> 31 #include <asm/machdep.h>
32 #include <asm/firmware.h> 32 #include <asm/firmware.h>
33 33
34 #include "beat_wrapper.h" 34 #include "beat_wrapper.h"
35 #include "beat.h" 35 #include "beat.h"
36 #include "beat_interrupt.h" 36 #include "beat_interrupt.h"
37 37
38 static int beat_pm_poweroff_flag; 38 static int beat_pm_poweroff_flag;
39 39
40 void beat_restart(char *cmd) 40 void beat_restart(char *cmd)
41 { 41 {
42 beat_shutdown_logical_partition(!beat_pm_poweroff_flag); 42 beat_shutdown_logical_partition(!beat_pm_poweroff_flag);
43 } 43 }
44 44
45 void beat_power_off(void) 45 void beat_power_off(void)
46 { 46 {
47 beat_shutdown_logical_partition(0); 47 beat_shutdown_logical_partition(0);
48 } 48 }
49 49
50 u64 beat_halt_code = 0x1000000000000000UL; 50 u64 beat_halt_code = 0x1000000000000000UL;
51 EXPORT_SYMBOL(beat_halt_code); 51 EXPORT_SYMBOL(beat_halt_code);
52 52
53 void beat_halt(void) 53 void beat_halt(void)
54 { 54 {
55 beat_shutdown_logical_partition(beat_halt_code); 55 beat_shutdown_logical_partition(beat_halt_code);
56 } 56 }
57 57
58 int beat_set_rtc_time(struct rtc_time *rtc_time) 58 int beat_set_rtc_time(struct rtc_time *rtc_time)
59 { 59 {
60 u64 tim; 60 u64 tim;
61 tim = mktime(rtc_time->tm_year+1900, 61 tim = mktime(rtc_time->tm_year+1900,
62 rtc_time->tm_mon+1, rtc_time->tm_mday, 62 rtc_time->tm_mon+1, rtc_time->tm_mday,
63 rtc_time->tm_hour, rtc_time->tm_min, rtc_time->tm_sec); 63 rtc_time->tm_hour, rtc_time->tm_min, rtc_time->tm_sec);
64 if (beat_rtc_write(tim)) 64 if (beat_rtc_write(tim))
65 return -1; 65 return -1;
66 return 0; 66 return 0;
67 } 67 }
68 68
69 void beat_get_rtc_time(struct rtc_time *rtc_time) 69 void beat_get_rtc_time(struct rtc_time *rtc_time)
70 { 70 {
71 u64 tim; 71 u64 tim;
72 72
73 if (beat_rtc_read(&tim)) 73 if (beat_rtc_read(&tim))
74 tim = 0; 74 tim = 0;
75 to_tm(tim, rtc_time); 75 to_tm(tim, rtc_time);
76 rtc_time->tm_year -= 1900; 76 rtc_time->tm_year -= 1900;
77 rtc_time->tm_mon -= 1; 77 rtc_time->tm_mon -= 1;
78 } 78 }
79 79
80 #define BEAT_NVRAM_SIZE 4096 80 #define BEAT_NVRAM_SIZE 4096
81 81
82 ssize_t beat_nvram_read(char *buf, size_t count, loff_t *index) 82 ssize_t beat_nvram_read(char *buf, size_t count, loff_t *index)
83 { 83 {
84 unsigned int i; 84 unsigned int i;
85 unsigned long len; 85 unsigned long len;
86 char *p = buf; 86 char *p = buf;
87 87
88 if (*index >= BEAT_NVRAM_SIZE) 88 if (*index >= BEAT_NVRAM_SIZE)
89 return -ENODEV; 89 return -ENODEV;
90 i = *index; 90 i = *index;
91 if (i + count > BEAT_NVRAM_SIZE) 91 if (i + count > BEAT_NVRAM_SIZE)
92 count = BEAT_NVRAM_SIZE - i; 92 count = BEAT_NVRAM_SIZE - i;
93 93
94 for (; count != 0; count -= len) { 94 for (; count != 0; count -= len) {
95 len = count; 95 len = count;
96 if (len > BEAT_NVRW_CNT) 96 if (len > BEAT_NVRW_CNT)
97 len = BEAT_NVRW_CNT; 97 len = BEAT_NVRW_CNT;
98 if (beat_eeprom_read(i, len, p)) 98 if (beat_eeprom_read(i, len, p))
99 return -EIO; 99 return -EIO;
100 100
101 p += len; 101 p += len;
102 i += len; 102 i += len;
103 } 103 }
104 *index = i; 104 *index = i;
105 return p - buf; 105 return p - buf;
106 } 106 }
107 107
108 ssize_t beat_nvram_write(char *buf, size_t count, loff_t *index) 108 ssize_t beat_nvram_write(char *buf, size_t count, loff_t *index)
109 { 109 {
110 unsigned int i; 110 unsigned int i;
111 unsigned long len; 111 unsigned long len;
112 char *p = buf; 112 char *p = buf;
113 113
114 if (*index >= BEAT_NVRAM_SIZE) 114 if (*index >= BEAT_NVRAM_SIZE)
115 return -ENODEV; 115 return -ENODEV;
116 i = *index; 116 i = *index;
117 if (i + count > BEAT_NVRAM_SIZE) 117 if (i + count > BEAT_NVRAM_SIZE)
118 count = BEAT_NVRAM_SIZE - i; 118 count = BEAT_NVRAM_SIZE - i;
119 119
120 for (; count != 0; count -= len) { 120 for (; count != 0; count -= len) {
121 len = count; 121 len = count;
122 if (len > BEAT_NVRW_CNT) 122 if (len > BEAT_NVRW_CNT)
123 len = BEAT_NVRW_CNT; 123 len = BEAT_NVRW_CNT;
124 if (beat_eeprom_write(i, len, p)) 124 if (beat_eeprom_write(i, len, p))
125 return -EIO; 125 return -EIO;
126 126
127 p += len; 127 p += len;
128 i += len; 128 i += len;
129 } 129 }
130 *index = i; 130 *index = i;
131 return p - buf; 131 return p - buf;
132 } 132 }
133 133
134 ssize_t beat_nvram_get_size(void) 134 ssize_t beat_nvram_get_size(void)
135 { 135 {
136 return BEAT_NVRAM_SIZE; 136 return BEAT_NVRAM_SIZE;
137 } 137 }
138 138
139 int beat_set_xdabr(unsigned long dabr) 139 int beat_set_xdabr(unsigned long dabr)
140 { 140 {
141 if (beat_set_dabr(dabr, DABRX_KERNEL | DABRX_USER)) 141 if (beat_set_dabr(dabr, DABRX_KERNEL | DABRX_USER))
142 return -1; 142 return -1;
143 return 0; 143 return 0;
144 } 144 }
145 145
146 int64_t beat_get_term_char(u64 vterm, u64 *len, u64 *t1, u64 *t2) 146 int64_t beat_get_term_char(u64 vterm, u64 *len, u64 *t1, u64 *t2)
147 { 147 {
148 u64 db[2]; 148 u64 db[2];
149 s64 ret; 149 s64 ret;
150 150
151 ret = beat_get_characters_from_console(vterm, len, (u8 *)db); 151 ret = beat_get_characters_from_console(vterm, len, (u8 *)db);
152 if (ret == 0) { 152 if (ret == 0) {
153 *t1 = db[0]; 153 *t1 = db[0];
154 *t2 = db[1]; 154 *t2 = db[1];
155 } 155 }
156 return ret; 156 return ret;
157 } 157 }
158 EXPORT_SYMBOL(beat_get_term_char); 158 EXPORT_SYMBOL(beat_get_term_char);
159 159
160 int64_t beat_put_term_char(u64 vterm, u64 len, u64 t1, u64 t2) 160 int64_t beat_put_term_char(u64 vterm, u64 len, u64 t1, u64 t2)
161 { 161 {
162 u64 db[2]; 162 u64 db[2];
163 163
164 db[0] = t1; 164 db[0] = t1;
165 db[1] = t2; 165 db[1] = t2;
166 return beat_put_characters_to_console(vterm, len, (u8 *)db); 166 return beat_put_characters_to_console(vterm, len, (u8 *)db);
167 } 167 }
168 EXPORT_SYMBOL(beat_put_term_char); 168 EXPORT_SYMBOL(beat_put_term_char);
169 169
170 void beat_power_save(void) 170 void beat_power_save(void)
171 { 171 {
172 beat_pause(0); 172 beat_pause(0);
173 } 173 }
174 174
175 #ifdef CONFIG_KEXEC 175 #ifdef CONFIG_KEXEC
176 void beat_kexec_cpu_down(int crash, int secondary) 176 void beat_kexec_cpu_down(int crash, int secondary)
177 { 177 {
178 beatic_deinit_IRQ(); 178 beatic_deinit_IRQ();
179 } 179 }
180 #endif 180 #endif
181 181
182 static irqreturn_t beat_power_event(int virq, void *arg) 182 static irqreturn_t beat_power_event(int virq, void *arg)
183 { 183 {
184 printk(KERN_DEBUG "Beat: power button pressed\n"); 184 printk(KERN_DEBUG "Beat: power button pressed\n");
185 beat_pm_poweroff_flag = 1; 185 beat_pm_poweroff_flag = 1;
186 ctrl_alt_del(); 186 ctrl_alt_del();
187 return IRQ_HANDLED; 187 return IRQ_HANDLED;
188 } 188 }
189 189
190 static irqreturn_t beat_reset_event(int virq, void *arg) 190 static irqreturn_t beat_reset_event(int virq, void *arg)
191 { 191 {
192 printk(KERN_DEBUG "Beat: reset button pressed\n"); 192 printk(KERN_DEBUG "Beat: reset button pressed\n");
193 beat_pm_poweroff_flag = 0; 193 beat_pm_poweroff_flag = 0;
194 ctrl_alt_del(); 194 ctrl_alt_del();
195 return IRQ_HANDLED; 195 return IRQ_HANDLED;
196 } 196 }
197 197
198 static struct beat_event_list { 198 static struct beat_event_list {
199 const char *typecode; 199 const char *typecode;
200 irq_handler_t handler; 200 irq_handler_t handler;
201 unsigned int virq; 201 unsigned int virq;
202 } beat_event_list[] = { 202 } beat_event_list[] = {
203 { "power", beat_power_event, 0 }, 203 { "power", beat_power_event, 0 },
204 { "reset", beat_reset_event, 0 }, 204 { "reset", beat_reset_event, 0 },
205 }; 205 };
206 206
207 static int __init beat_register_event(void) 207 static int __init beat_register_event(void)
208 { 208 {
209 u64 path[4], data[2]; 209 u64 path[4], data[2];
210 int rc, i; 210 int rc, i;
211 unsigned int virq; 211 unsigned int virq;
212 212
213 for (i = 0; i < ARRAY_SIZE(beat_event_list); i++) { 213 for (i = 0; i < ARRAY_SIZE(beat_event_list); i++) {
214 struct beat_event_list *ev = &beat_event_list[i]; 214 struct beat_event_list *ev = &beat_event_list[i];
215 215
216 if (beat_construct_event_receive_port(data) != 0) { 216 if (beat_construct_event_receive_port(data) != 0) {
217 printk(KERN_ERR "Beat: " 217 printk(KERN_ERR "Beat: "
218 "cannot construct event receive port for %s\n", 218 "cannot construct event receive port for %s\n",
219 ev->typecode); 219 ev->typecode);
220 return -EINVAL; 220 return -EINVAL;
221 } 221 }
222 222
223 virq = irq_create_mapping(NULL, data[0]); 223 virq = irq_create_mapping(NULL, data[0]);
224 if (virq == NO_IRQ) { 224 if (virq == NO_IRQ) {
225 printk(KERN_ERR "Beat: failed to get virtual IRQ" 225 printk(KERN_ERR "Beat: failed to get virtual IRQ"
226 " for event receive port for %s\n", 226 " for event receive port for %s\n",
227 ev->typecode); 227 ev->typecode);
228 beat_destruct_event_receive_port(data[0]); 228 beat_destruct_event_receive_port(data[0]);
229 return -EIO; 229 return -EIO;
230 } 230 }
231 ev->virq = virq; 231 ev->virq = virq;
232 232
233 rc = request_irq(virq, ev->handler, IRQF_DISABLED, 233 rc = request_irq(virq, ev->handler, IRQF_DISABLED,
234 ev->typecode, NULL); 234 ev->typecode, NULL);
235 if (rc != 0) { 235 if (rc != 0) {
236 printk(KERN_ERR "Beat: failed to request virtual IRQ" 236 printk(KERN_ERR "Beat: failed to request virtual IRQ"
237 " for event receive port for %s\n", 237 " for event receive port for %s\n",
238 ev->typecode); 238 ev->typecode);
239 beat_destruct_event_receive_port(data[0]); 239 beat_destruct_event_receive_port(data[0]);
240 return rc; 240 return rc;
241 } 241 }
242 242
243 path[0] = 0x1000000065780000ul; /* 1,ex */ 243 path[0] = 0x1000000065780000ul; /* 1,ex */
244 path[1] = 0x627574746f6e0000ul; /* button */ 244 path[1] = 0x627574746f6e0000ul; /* button */
245 path[2] = 0; 245 path[2] = 0;
246 strncpy((char *)&path[2], ev->typecode, 8); 246 strncpy((char *)&path[2], ev->typecode, 8);
247 path[3] = 0; 247 path[3] = 0;
248 data[1] = 0; 248 data[1] = 0;
249 249
250 beat_create_repository_node(path, data); 250 beat_create_repository_node(path, data);
251 } 251 }
252 return 0; 252 return 0;
253 } 253 }
254 254
255 static int __init beat_event_init(void) 255 static int __init beat_event_init(void)
256 { 256 {
257 if (!firmware_has_feature(FW_FEATURE_BEAT)) 257 if (!firmware_has_feature(FW_FEATURE_BEAT))
258 return -EINVAL; 258 return -EINVAL;
259 259
260 beat_pm_poweroff_flag = 0; 260 beat_pm_poweroff_flag = 0;
261 return beat_register_event(); 261 return beat_register_event();
262 } 262 }
263 263
264 device_initcall(beat_event_init); 264 device_initcall(beat_event_init);
265 265
arch/powerpc/platforms/cell/cbe_regs.c
1 /* 1 /*
2 * cbe_regs.c 2 * cbe_regs.c
3 * 3 *
4 * Accessor routines for the various MMIO register blocks of the CBE 4 * Accessor routines for the various MMIO register blocks of the CBE
5 * 5 *
6 * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp. 6 * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
7 */ 7 */
8 8
9 #include <linux/percpu.h> 9 #include <linux/percpu.h>
10 #include <linux/types.h> 10 #include <linux/types.h>
11 #include <linux/module.h> 11 #include <linux/export.h>
12 #include <linux/of_device.h> 12 #include <linux/of_device.h>
13 #include <linux/of_platform.h> 13 #include <linux/of_platform.h>
14 14
15 #include <asm/io.h> 15 #include <asm/io.h>
16 #include <asm/pgtable.h> 16 #include <asm/pgtable.h>
17 #include <asm/prom.h> 17 #include <asm/prom.h>
18 #include <asm/ptrace.h> 18 #include <asm/ptrace.h>
19 #include <asm/cell-regs.h> 19 #include <asm/cell-regs.h>
20 20
21 /* 21 /*
22 * Current implementation uses "cpu" nodes. We build our own mapping 22 * Current implementation uses "cpu" nodes. We build our own mapping
23 * array of cpu numbers to cpu nodes locally for now to allow interrupt 23 * array of cpu numbers to cpu nodes locally for now to allow interrupt
24 * time code to have a fast path rather than call of_get_cpu_node(). If 24 * time code to have a fast path rather than call of_get_cpu_node(). If
25 * we implement cpu hotplug, we'll have to install an appropriate norifier 25 * we implement cpu hotplug, we'll have to install an appropriate norifier
26 * in order to release references to the cpu going away 26 * in order to release references to the cpu going away
27 */ 27 */
28 static struct cbe_regs_map 28 static struct cbe_regs_map
29 { 29 {
30 struct device_node *cpu_node; 30 struct device_node *cpu_node;
31 struct device_node *be_node; 31 struct device_node *be_node;
32 struct cbe_pmd_regs __iomem *pmd_regs; 32 struct cbe_pmd_regs __iomem *pmd_regs;
33 struct cbe_iic_regs __iomem *iic_regs; 33 struct cbe_iic_regs __iomem *iic_regs;
34 struct cbe_mic_tm_regs __iomem *mic_tm_regs; 34 struct cbe_mic_tm_regs __iomem *mic_tm_regs;
35 struct cbe_pmd_shadow_regs pmd_shadow_regs; 35 struct cbe_pmd_shadow_regs pmd_shadow_regs;
36 } cbe_regs_maps[MAX_CBE]; 36 } cbe_regs_maps[MAX_CBE];
37 static int cbe_regs_map_count; 37 static int cbe_regs_map_count;
38 38
39 static struct cbe_thread_map 39 static struct cbe_thread_map
40 { 40 {
41 struct device_node *cpu_node; 41 struct device_node *cpu_node;
42 struct device_node *be_node; 42 struct device_node *be_node;
43 struct cbe_regs_map *regs; 43 struct cbe_regs_map *regs;
44 unsigned int thread_id; 44 unsigned int thread_id;
45 unsigned int cbe_id; 45 unsigned int cbe_id;
46 } cbe_thread_map[NR_CPUS]; 46 } cbe_thread_map[NR_CPUS];
47 47
48 static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = {CPU_BITS_NONE} }; 48 static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = {CPU_BITS_NONE} };
49 static cpumask_t cbe_first_online_cpu = { CPU_BITS_NONE }; 49 static cpumask_t cbe_first_online_cpu = { CPU_BITS_NONE };
50 50
51 static struct cbe_regs_map *cbe_find_map(struct device_node *np) 51 static struct cbe_regs_map *cbe_find_map(struct device_node *np)
52 { 52 {
53 int i; 53 int i;
54 struct device_node *tmp_np; 54 struct device_node *tmp_np;
55 55
56 if (strcasecmp(np->type, "spe")) { 56 if (strcasecmp(np->type, "spe")) {
57 for (i = 0; i < cbe_regs_map_count; i++) 57 for (i = 0; i < cbe_regs_map_count; i++)
58 if (cbe_regs_maps[i].cpu_node == np || 58 if (cbe_regs_maps[i].cpu_node == np ||
59 cbe_regs_maps[i].be_node == np) 59 cbe_regs_maps[i].be_node == np)
60 return &cbe_regs_maps[i]; 60 return &cbe_regs_maps[i];
61 return NULL; 61 return NULL;
62 } 62 }
63 63
64 if (np->data) 64 if (np->data)
65 return np->data; 65 return np->data;
66 66
67 /* walk up path until cpu or be node was found */ 67 /* walk up path until cpu or be node was found */
68 tmp_np = np; 68 tmp_np = np;
69 do { 69 do {
70 tmp_np = tmp_np->parent; 70 tmp_np = tmp_np->parent;
71 /* on a correct devicetree we wont get up to root */ 71 /* on a correct devicetree we wont get up to root */
72 BUG_ON(!tmp_np); 72 BUG_ON(!tmp_np);
73 } while (strcasecmp(tmp_np->type, "cpu") && 73 } while (strcasecmp(tmp_np->type, "cpu") &&
74 strcasecmp(tmp_np->type, "be")); 74 strcasecmp(tmp_np->type, "be"));
75 75
76 np->data = cbe_find_map(tmp_np); 76 np->data = cbe_find_map(tmp_np);
77 77
78 return np->data; 78 return np->data;
79 } 79 }
80 80
81 struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np) 81 struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np)
82 { 82 {
83 struct cbe_regs_map *map = cbe_find_map(np); 83 struct cbe_regs_map *map = cbe_find_map(np);
84 if (map == NULL) 84 if (map == NULL)
85 return NULL; 85 return NULL;
86 return map->pmd_regs; 86 return map->pmd_regs;
87 } 87 }
88 EXPORT_SYMBOL_GPL(cbe_get_pmd_regs); 88 EXPORT_SYMBOL_GPL(cbe_get_pmd_regs);
89 89
90 struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu) 90 struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu)
91 { 91 {
92 struct cbe_regs_map *map = cbe_thread_map[cpu].regs; 92 struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
93 if (map == NULL) 93 if (map == NULL)
94 return NULL; 94 return NULL;
95 return map->pmd_regs; 95 return map->pmd_regs;
96 } 96 }
97 EXPORT_SYMBOL_GPL(cbe_get_cpu_pmd_regs); 97 EXPORT_SYMBOL_GPL(cbe_get_cpu_pmd_regs);
98 98
99 struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np) 99 struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np)
100 { 100 {
101 struct cbe_regs_map *map = cbe_find_map(np); 101 struct cbe_regs_map *map = cbe_find_map(np);
102 if (map == NULL) 102 if (map == NULL)
103 return NULL; 103 return NULL;
104 return &map->pmd_shadow_regs; 104 return &map->pmd_shadow_regs;
105 } 105 }
106 106
107 struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu) 107 struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu)
108 { 108 {
109 struct cbe_regs_map *map = cbe_thread_map[cpu].regs; 109 struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
110 if (map == NULL) 110 if (map == NULL)
111 return NULL; 111 return NULL;
112 return &map->pmd_shadow_regs; 112 return &map->pmd_shadow_regs;
113 } 113 }
114 114
115 struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np) 115 struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np)
116 { 116 {
117 struct cbe_regs_map *map = cbe_find_map(np); 117 struct cbe_regs_map *map = cbe_find_map(np);
118 if (map == NULL) 118 if (map == NULL)
119 return NULL; 119 return NULL;
120 return map->iic_regs; 120 return map->iic_regs;
121 } 121 }
122 122
123 struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu) 123 struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu)
124 { 124 {
125 struct cbe_regs_map *map = cbe_thread_map[cpu].regs; 125 struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
126 if (map == NULL) 126 if (map == NULL)
127 return NULL; 127 return NULL;
128 return map->iic_regs; 128 return map->iic_regs;
129 } 129 }
130 130
131 struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np) 131 struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np)
132 { 132 {
133 struct cbe_regs_map *map = cbe_find_map(np); 133 struct cbe_regs_map *map = cbe_find_map(np);
134 if (map == NULL) 134 if (map == NULL)
135 return NULL; 135 return NULL;
136 return map->mic_tm_regs; 136 return map->mic_tm_regs;
137 } 137 }
138 138
139 struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu) 139 struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu)
140 { 140 {
141 struct cbe_regs_map *map = cbe_thread_map[cpu].regs; 141 struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
142 if (map == NULL) 142 if (map == NULL)
143 return NULL; 143 return NULL;
144 return map->mic_tm_regs; 144 return map->mic_tm_regs;
145 } 145 }
146 EXPORT_SYMBOL_GPL(cbe_get_cpu_mic_tm_regs); 146 EXPORT_SYMBOL_GPL(cbe_get_cpu_mic_tm_regs);
147 147
148 u32 cbe_get_hw_thread_id(int cpu) 148 u32 cbe_get_hw_thread_id(int cpu)
149 { 149 {
150 return cbe_thread_map[cpu].thread_id; 150 return cbe_thread_map[cpu].thread_id;
151 } 151 }
152 EXPORT_SYMBOL_GPL(cbe_get_hw_thread_id); 152 EXPORT_SYMBOL_GPL(cbe_get_hw_thread_id);
153 153
154 u32 cbe_cpu_to_node(int cpu) 154 u32 cbe_cpu_to_node(int cpu)
155 { 155 {
156 return cbe_thread_map[cpu].cbe_id; 156 return cbe_thread_map[cpu].cbe_id;
157 } 157 }
158 EXPORT_SYMBOL_GPL(cbe_cpu_to_node); 158 EXPORT_SYMBOL_GPL(cbe_cpu_to_node);
159 159
160 u32 cbe_node_to_cpu(int node) 160 u32 cbe_node_to_cpu(int node)
161 { 161 {
162 return cpumask_first(&cbe_local_mask[node]); 162 return cpumask_first(&cbe_local_mask[node]);
163 163
164 } 164 }
165 EXPORT_SYMBOL_GPL(cbe_node_to_cpu); 165 EXPORT_SYMBOL_GPL(cbe_node_to_cpu);
166 166
167 static struct device_node *cbe_get_be_node(int cpu_id) 167 static struct device_node *cbe_get_be_node(int cpu_id)
168 { 168 {
169 struct device_node *np; 169 struct device_node *np;
170 170
171 for_each_node_by_type (np, "be") { 171 for_each_node_by_type (np, "be") {
172 int len,i; 172 int len,i;
173 const phandle *cpu_handle; 173 const phandle *cpu_handle;
174 174
175 cpu_handle = of_get_property(np, "cpus", &len); 175 cpu_handle = of_get_property(np, "cpus", &len);
176 176
177 /* 177 /*
178 * the CAB SLOF tree is non compliant, so we just assume 178 * the CAB SLOF tree is non compliant, so we just assume
179 * there is only one node 179 * there is only one node
180 */ 180 */
181 if (WARN_ON_ONCE(!cpu_handle)) 181 if (WARN_ON_ONCE(!cpu_handle))
182 return np; 182 return np;
183 183
184 for (i=0; i<len; i++) 184 for (i=0; i<len; i++)
185 if (of_find_node_by_phandle(cpu_handle[i]) == of_get_cpu_node(cpu_id, NULL)) 185 if (of_find_node_by_phandle(cpu_handle[i]) == of_get_cpu_node(cpu_id, NULL))
186 return np; 186 return np;
187 } 187 }
188 188
189 return NULL; 189 return NULL;
190 } 190 }
191 191
192 void __init cbe_fill_regs_map(struct cbe_regs_map *map) 192 void __init cbe_fill_regs_map(struct cbe_regs_map *map)
193 { 193 {
194 if(map->be_node) { 194 if(map->be_node) {
195 struct device_node *be, *np; 195 struct device_node *be, *np;
196 196
197 be = map->be_node; 197 be = map->be_node;
198 198
199 for_each_node_by_type(np, "pervasive") 199 for_each_node_by_type(np, "pervasive")
200 if (of_get_parent(np) == be) 200 if (of_get_parent(np) == be)
201 map->pmd_regs = of_iomap(np, 0); 201 map->pmd_regs = of_iomap(np, 0);
202 202
203 for_each_node_by_type(np, "CBEA-Internal-Interrupt-Controller") 203 for_each_node_by_type(np, "CBEA-Internal-Interrupt-Controller")
204 if (of_get_parent(np) == be) 204 if (of_get_parent(np) == be)
205 map->iic_regs = of_iomap(np, 2); 205 map->iic_regs = of_iomap(np, 2);
206 206
207 for_each_node_by_type(np, "mic-tm") 207 for_each_node_by_type(np, "mic-tm")
208 if (of_get_parent(np) == be) 208 if (of_get_parent(np) == be)
209 map->mic_tm_regs = of_iomap(np, 0); 209 map->mic_tm_regs = of_iomap(np, 0);
210 } else { 210 } else {
211 struct device_node *cpu; 211 struct device_node *cpu;
212 /* That hack must die die die ! */ 212 /* That hack must die die die ! */
213 const struct address_prop { 213 const struct address_prop {
214 unsigned long address; 214 unsigned long address;
215 unsigned int len; 215 unsigned int len;
216 } __attribute__((packed)) *prop; 216 } __attribute__((packed)) *prop;
217 217
218 cpu = map->cpu_node; 218 cpu = map->cpu_node;
219 219
220 prop = of_get_property(cpu, "pervasive", NULL); 220 prop = of_get_property(cpu, "pervasive", NULL);
221 if (prop != NULL) 221 if (prop != NULL)
222 map->pmd_regs = ioremap(prop->address, prop->len); 222 map->pmd_regs = ioremap(prop->address, prop->len);
223 223
224 prop = of_get_property(cpu, "iic", NULL); 224 prop = of_get_property(cpu, "iic", NULL);
225 if (prop != NULL) 225 if (prop != NULL)
226 map->iic_regs = ioremap(prop->address, prop->len); 226 map->iic_regs = ioremap(prop->address, prop->len);
227 227
228 prop = of_get_property(cpu, "mic-tm", NULL); 228 prop = of_get_property(cpu, "mic-tm", NULL);
229 if (prop != NULL) 229 if (prop != NULL)
230 map->mic_tm_regs = ioremap(prop->address, prop->len); 230 map->mic_tm_regs = ioremap(prop->address, prop->len);
231 } 231 }
232 } 232 }
233 233
234 234
235 void __init cbe_regs_init(void) 235 void __init cbe_regs_init(void)
236 { 236 {
237 int i; 237 int i;
238 unsigned int thread_id; 238 unsigned int thread_id;
239 struct device_node *cpu; 239 struct device_node *cpu;
240 240
241 /* Build local fast map of CPUs */ 241 /* Build local fast map of CPUs */
242 for_each_possible_cpu(i) { 242 for_each_possible_cpu(i) {
243 cbe_thread_map[i].cpu_node = of_get_cpu_node(i, &thread_id); 243 cbe_thread_map[i].cpu_node = of_get_cpu_node(i, &thread_id);
244 cbe_thread_map[i].be_node = cbe_get_be_node(i); 244 cbe_thread_map[i].be_node = cbe_get_be_node(i);
245 cbe_thread_map[i].thread_id = thread_id; 245 cbe_thread_map[i].thread_id = thread_id;
246 } 246 }
247 247
248 /* Find maps for each device tree CPU */ 248 /* Find maps for each device tree CPU */
249 for_each_node_by_type(cpu, "cpu") { 249 for_each_node_by_type(cpu, "cpu") {
250 struct cbe_regs_map *map; 250 struct cbe_regs_map *map;
251 unsigned int cbe_id; 251 unsigned int cbe_id;
252 252
253 cbe_id = cbe_regs_map_count++; 253 cbe_id = cbe_regs_map_count++;
254 map = &cbe_regs_maps[cbe_id]; 254 map = &cbe_regs_maps[cbe_id];
255 255
256 if (cbe_regs_map_count > MAX_CBE) { 256 if (cbe_regs_map_count > MAX_CBE) {
257 printk(KERN_ERR "cbe_regs: More BE chips than supported" 257 printk(KERN_ERR "cbe_regs: More BE chips than supported"
258 "!\n"); 258 "!\n");
259 cbe_regs_map_count--; 259 cbe_regs_map_count--;
260 of_node_put(cpu); 260 of_node_put(cpu);
261 return; 261 return;
262 } 262 }
263 map->cpu_node = cpu; 263 map->cpu_node = cpu;
264 264
265 for_each_possible_cpu(i) { 265 for_each_possible_cpu(i) {
266 struct cbe_thread_map *thread = &cbe_thread_map[i]; 266 struct cbe_thread_map *thread = &cbe_thread_map[i];
267 267
268 if (thread->cpu_node == cpu) { 268 if (thread->cpu_node == cpu) {
269 thread->regs = map; 269 thread->regs = map;
270 thread->cbe_id = cbe_id; 270 thread->cbe_id = cbe_id;
271 map->be_node = thread->be_node; 271 map->be_node = thread->be_node;
272 cpumask_set_cpu(i, &cbe_local_mask[cbe_id]); 272 cpumask_set_cpu(i, &cbe_local_mask[cbe_id]);
273 if(thread->thread_id == 0) 273 if(thread->thread_id == 0)
274 cpumask_set_cpu(i, &cbe_first_online_cpu); 274 cpumask_set_cpu(i, &cbe_first_online_cpu);
275 } 275 }
276 } 276 }
277 277
278 cbe_fill_regs_map(map); 278 cbe_fill_regs_map(map);
279 } 279 }
280 } 280 }
281 281
282 282
arch/powerpc/platforms/cell/interrupt.c
1 /* 1 /*
2 * Cell Internal Interrupt Controller 2 * Cell Internal Interrupt Controller
3 * 3 *
4 * Copyright (C) 2006 Benjamin Herrenschmidt (benh@kernel.crashing.org) 4 * Copyright (C) 2006 Benjamin Herrenschmidt (benh@kernel.crashing.org)
5 * IBM, Corp. 5 * IBM, Corp.
6 * 6 *
7 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 7 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
8 * 8 *
9 * Author: Arnd Bergmann <arndb@de.ibm.com> 9 * Author: Arnd Bergmann <arndb@de.ibm.com>
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option) 13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version. 14 * any later version.
15 * 15 *
16 * This program is distributed in the hope that it will be useful, 16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details. 19 * GNU General Public License for more details.
20 * 20 *
21 * You should have received a copy of the GNU General Public License 21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software 22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 * 24 *
25 * TODO: 25 * TODO:
26 * - Fix various assumptions related to HW CPU numbers vs. linux CPU numbers 26 * - Fix various assumptions related to HW CPU numbers vs. linux CPU numbers
27 * vs node numbers in the setup code 27 * vs node numbers in the setup code
28 * - Implement proper handling of maxcpus=1/2 (that is, routing of irqs from 28 * - Implement proper handling of maxcpus=1/2 (that is, routing of irqs from
29 * a non-active node to the active node) 29 * a non-active node to the active node)
30 */ 30 */
31 31
32 #include <linux/interrupt.h> 32 #include <linux/interrupt.h>
33 #include <linux/irq.h> 33 #include <linux/irq.h>
34 #include <linux/module.h> 34 #include <linux/export.h>
35 #include <linux/percpu.h> 35 #include <linux/percpu.h>
36 #include <linux/types.h> 36 #include <linux/types.h>
37 #include <linux/ioport.h> 37 #include <linux/ioport.h>
38 #include <linux/kernel_stat.h> 38 #include <linux/kernel_stat.h>
39 39
40 #include <asm/io.h> 40 #include <asm/io.h>
41 #include <asm/pgtable.h> 41 #include <asm/pgtable.h>
42 #include <asm/prom.h> 42 #include <asm/prom.h>
43 #include <asm/ptrace.h> 43 #include <asm/ptrace.h>
44 #include <asm/machdep.h> 44 #include <asm/machdep.h>
45 #include <asm/cell-regs.h> 45 #include <asm/cell-regs.h>
46 46
47 #include "interrupt.h" 47 #include "interrupt.h"
48 48
49 struct iic { 49 struct iic {
50 struct cbe_iic_thread_regs __iomem *regs; 50 struct cbe_iic_thread_regs __iomem *regs;
51 u8 target_id; 51 u8 target_id;
52 u8 eoi_stack[16]; 52 u8 eoi_stack[16];
53 int eoi_ptr; 53 int eoi_ptr;
54 struct device_node *node; 54 struct device_node *node;
55 }; 55 };
56 56
57 static DEFINE_PER_CPU(struct iic, cpu_iic); 57 static DEFINE_PER_CPU(struct iic, cpu_iic);
58 #define IIC_NODE_COUNT 2 58 #define IIC_NODE_COUNT 2
59 static struct irq_host *iic_host; 59 static struct irq_host *iic_host;
60 60
61 /* Convert between "pending" bits and hw irq number */ 61 /* Convert between "pending" bits and hw irq number */
62 static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits) 62 static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits)
63 { 63 {
64 unsigned char unit = bits.source & 0xf; 64 unsigned char unit = bits.source & 0xf;
65 unsigned char node = bits.source >> 4; 65 unsigned char node = bits.source >> 4;
66 unsigned char class = bits.class & 3; 66 unsigned char class = bits.class & 3;
67 67
68 /* Decode IPIs */ 68 /* Decode IPIs */
69 if (bits.flags & CBE_IIC_IRQ_IPI) 69 if (bits.flags & CBE_IIC_IRQ_IPI)
70 return IIC_IRQ_TYPE_IPI | (bits.prio >> 4); 70 return IIC_IRQ_TYPE_IPI | (bits.prio >> 4);
71 else 71 else
72 return (node << IIC_IRQ_NODE_SHIFT) | (class << 4) | unit; 72 return (node << IIC_IRQ_NODE_SHIFT) | (class << 4) | unit;
73 } 73 }
74 74
75 static void iic_mask(struct irq_data *d) 75 static void iic_mask(struct irq_data *d)
76 { 76 {
77 } 77 }
78 78
79 static void iic_unmask(struct irq_data *d) 79 static void iic_unmask(struct irq_data *d)
80 { 80 {
81 } 81 }
82 82
83 static void iic_eoi(struct irq_data *d) 83 static void iic_eoi(struct irq_data *d)
84 { 84 {
85 struct iic *iic = &__get_cpu_var(cpu_iic); 85 struct iic *iic = &__get_cpu_var(cpu_iic);
86 out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]); 86 out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]);
87 BUG_ON(iic->eoi_ptr < 0); 87 BUG_ON(iic->eoi_ptr < 0);
88 } 88 }
89 89
90 static struct irq_chip iic_chip = { 90 static struct irq_chip iic_chip = {
91 .name = "CELL-IIC", 91 .name = "CELL-IIC",
92 .irq_mask = iic_mask, 92 .irq_mask = iic_mask,
93 .irq_unmask = iic_unmask, 93 .irq_unmask = iic_unmask,
94 .irq_eoi = iic_eoi, 94 .irq_eoi = iic_eoi,
95 }; 95 };
96 96
97 97
98 static void iic_ioexc_eoi(struct irq_data *d) 98 static void iic_ioexc_eoi(struct irq_data *d)
99 { 99 {
100 } 100 }
101 101
102 static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc) 102 static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc)
103 { 103 {
104 struct irq_chip *chip = irq_desc_get_chip(desc); 104 struct irq_chip *chip = irq_desc_get_chip(desc);
105 struct cbe_iic_regs __iomem *node_iic = 105 struct cbe_iic_regs __iomem *node_iic =
106 (void __iomem *)irq_desc_get_handler_data(desc); 106 (void __iomem *)irq_desc_get_handler_data(desc);
107 unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC; 107 unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC;
108 unsigned long bits, ack; 108 unsigned long bits, ack;
109 int cascade; 109 int cascade;
110 110
111 for (;;) { 111 for (;;) {
112 bits = in_be64(&node_iic->iic_is); 112 bits = in_be64(&node_iic->iic_is);
113 if (bits == 0) 113 if (bits == 0)
114 break; 114 break;
115 /* pre-ack edge interrupts */ 115 /* pre-ack edge interrupts */
116 ack = bits & IIC_ISR_EDGE_MASK; 116 ack = bits & IIC_ISR_EDGE_MASK;
117 if (ack) 117 if (ack)
118 out_be64(&node_iic->iic_is, ack); 118 out_be64(&node_iic->iic_is, ack);
119 /* handle them */ 119 /* handle them */
120 for (cascade = 63; cascade >= 0; cascade--) 120 for (cascade = 63; cascade >= 0; cascade--)
121 if (bits & (0x8000000000000000UL >> cascade)) { 121 if (bits & (0x8000000000000000UL >> cascade)) {
122 unsigned int cirq = 122 unsigned int cirq =
123 irq_linear_revmap(iic_host, 123 irq_linear_revmap(iic_host,
124 base | cascade); 124 base | cascade);
125 if (cirq != NO_IRQ) 125 if (cirq != NO_IRQ)
126 generic_handle_irq(cirq); 126 generic_handle_irq(cirq);
127 } 127 }
128 /* post-ack level interrupts */ 128 /* post-ack level interrupts */
129 ack = bits & ~IIC_ISR_EDGE_MASK; 129 ack = bits & ~IIC_ISR_EDGE_MASK;
130 if (ack) 130 if (ack)
131 out_be64(&node_iic->iic_is, ack); 131 out_be64(&node_iic->iic_is, ack);
132 } 132 }
133 chip->irq_eoi(&desc->irq_data); 133 chip->irq_eoi(&desc->irq_data);
134 } 134 }
135 135
136 136
137 static struct irq_chip iic_ioexc_chip = { 137 static struct irq_chip iic_ioexc_chip = {
138 .name = "CELL-IOEX", 138 .name = "CELL-IOEX",
139 .irq_mask = iic_mask, 139 .irq_mask = iic_mask,
140 .irq_unmask = iic_unmask, 140 .irq_unmask = iic_unmask,
141 .irq_eoi = iic_ioexc_eoi, 141 .irq_eoi = iic_ioexc_eoi,
142 }; 142 };
143 143
144 /* Get an IRQ number from the pending state register of the IIC */ 144 /* Get an IRQ number from the pending state register of the IIC */
145 static unsigned int iic_get_irq(void) 145 static unsigned int iic_get_irq(void)
146 { 146 {
147 struct cbe_iic_pending_bits pending; 147 struct cbe_iic_pending_bits pending;
148 struct iic *iic; 148 struct iic *iic;
149 unsigned int virq; 149 unsigned int virq;
150 150
151 iic = &__get_cpu_var(cpu_iic); 151 iic = &__get_cpu_var(cpu_iic);
152 *(unsigned long *) &pending = 152 *(unsigned long *) &pending =
153 in_be64((u64 __iomem *) &iic->regs->pending_destr); 153 in_be64((u64 __iomem *) &iic->regs->pending_destr);
154 if (!(pending.flags & CBE_IIC_IRQ_VALID)) 154 if (!(pending.flags & CBE_IIC_IRQ_VALID))
155 return NO_IRQ; 155 return NO_IRQ;
156 virq = irq_linear_revmap(iic_host, iic_pending_to_hwnum(pending)); 156 virq = irq_linear_revmap(iic_host, iic_pending_to_hwnum(pending));
157 if (virq == NO_IRQ) 157 if (virq == NO_IRQ)
158 return NO_IRQ; 158 return NO_IRQ;
159 iic->eoi_stack[++iic->eoi_ptr] = pending.prio; 159 iic->eoi_stack[++iic->eoi_ptr] = pending.prio;
160 BUG_ON(iic->eoi_ptr > 15); 160 BUG_ON(iic->eoi_ptr > 15);
161 return virq; 161 return virq;
162 } 162 }
163 163
164 void iic_setup_cpu(void) 164 void iic_setup_cpu(void)
165 { 165 {
166 out_be64(&__get_cpu_var(cpu_iic).regs->prio, 0xff); 166 out_be64(&__get_cpu_var(cpu_iic).regs->prio, 0xff);
167 } 167 }
168 168
169 u8 iic_get_target_id(int cpu) 169 u8 iic_get_target_id(int cpu)
170 { 170 {
171 return per_cpu(cpu_iic, cpu).target_id; 171 return per_cpu(cpu_iic, cpu).target_id;
172 } 172 }
173 173
174 EXPORT_SYMBOL_GPL(iic_get_target_id); 174 EXPORT_SYMBOL_GPL(iic_get_target_id);
175 175
176 #ifdef CONFIG_SMP 176 #ifdef CONFIG_SMP
177 177
178 /* Use the highest interrupt priorities for IPI */ 178 /* Use the highest interrupt priorities for IPI */
179 static inline int iic_msg_to_irq(int msg) 179 static inline int iic_msg_to_irq(int msg)
180 { 180 {
181 return IIC_IRQ_TYPE_IPI + 0xf - msg; 181 return IIC_IRQ_TYPE_IPI + 0xf - msg;
182 } 182 }
183 183
184 void iic_message_pass(int cpu, int msg) 184 void iic_message_pass(int cpu, int msg)
185 { 185 {
186 out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4); 186 out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4);
187 } 187 }
188 188
189 struct irq_host *iic_get_irq_host(int node) 189 struct irq_host *iic_get_irq_host(int node)
190 { 190 {
191 return iic_host; 191 return iic_host;
192 } 192 }
193 EXPORT_SYMBOL_GPL(iic_get_irq_host); 193 EXPORT_SYMBOL_GPL(iic_get_irq_host);
194 194
195 static void iic_request_ipi(int msg) 195 static void iic_request_ipi(int msg)
196 { 196 {
197 int virq; 197 int virq;
198 198
199 virq = irq_create_mapping(iic_host, iic_msg_to_irq(msg)); 199 virq = irq_create_mapping(iic_host, iic_msg_to_irq(msg));
200 if (virq == NO_IRQ) { 200 if (virq == NO_IRQ) {
201 printk(KERN_ERR 201 printk(KERN_ERR
202 "iic: failed to map IPI %s\n", smp_ipi_name[msg]); 202 "iic: failed to map IPI %s\n", smp_ipi_name[msg]);
203 return; 203 return;
204 } 204 }
205 205
206 /* 206 /*
207 * If smp_request_message_ipi encounters an error it will notify 207 * If smp_request_message_ipi encounters an error it will notify
208 * the error. If a message is not needed it will return non-zero. 208 * the error. If a message is not needed it will return non-zero.
209 */ 209 */
210 if (smp_request_message_ipi(virq, msg)) 210 if (smp_request_message_ipi(virq, msg))
211 irq_dispose_mapping(virq); 211 irq_dispose_mapping(virq);
212 } 212 }
213 213
214 void iic_request_IPIs(void) 214 void iic_request_IPIs(void)
215 { 215 {
216 iic_request_ipi(PPC_MSG_CALL_FUNCTION); 216 iic_request_ipi(PPC_MSG_CALL_FUNCTION);
217 iic_request_ipi(PPC_MSG_RESCHEDULE); 217 iic_request_ipi(PPC_MSG_RESCHEDULE);
218 iic_request_ipi(PPC_MSG_CALL_FUNC_SINGLE); 218 iic_request_ipi(PPC_MSG_CALL_FUNC_SINGLE);
219 iic_request_ipi(PPC_MSG_DEBUGGER_BREAK); 219 iic_request_ipi(PPC_MSG_DEBUGGER_BREAK);
220 } 220 }
221 221
222 #endif /* CONFIG_SMP */ 222 #endif /* CONFIG_SMP */
223 223
224 224
225 static int iic_host_match(struct irq_host *h, struct device_node *node) 225 static int iic_host_match(struct irq_host *h, struct device_node *node)
226 { 226 {
227 return of_device_is_compatible(node, 227 return of_device_is_compatible(node,
228 "IBM,CBEA-Internal-Interrupt-Controller"); 228 "IBM,CBEA-Internal-Interrupt-Controller");
229 } 229 }
230 230
231 static int iic_host_map(struct irq_host *h, unsigned int virq, 231 static int iic_host_map(struct irq_host *h, unsigned int virq,
232 irq_hw_number_t hw) 232 irq_hw_number_t hw)
233 { 233 {
234 switch (hw & IIC_IRQ_TYPE_MASK) { 234 switch (hw & IIC_IRQ_TYPE_MASK) {
235 case IIC_IRQ_TYPE_IPI: 235 case IIC_IRQ_TYPE_IPI:
236 irq_set_chip_and_handler(virq, &iic_chip, handle_percpu_irq); 236 irq_set_chip_and_handler(virq, &iic_chip, handle_percpu_irq);
237 break; 237 break;
238 case IIC_IRQ_TYPE_IOEXC: 238 case IIC_IRQ_TYPE_IOEXC:
239 irq_set_chip_and_handler(virq, &iic_ioexc_chip, 239 irq_set_chip_and_handler(virq, &iic_ioexc_chip,
240 handle_edge_eoi_irq); 240 handle_edge_eoi_irq);
241 break; 241 break;
242 default: 242 default:
243 irq_set_chip_and_handler(virq, &iic_chip, handle_edge_eoi_irq); 243 irq_set_chip_and_handler(virq, &iic_chip, handle_edge_eoi_irq);
244 } 244 }
245 return 0; 245 return 0;
246 } 246 }
247 247
248 static int iic_host_xlate(struct irq_host *h, struct device_node *ct, 248 static int iic_host_xlate(struct irq_host *h, struct device_node *ct,
249 const u32 *intspec, unsigned int intsize, 249 const u32 *intspec, unsigned int intsize,
250 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 250 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
251 251
252 { 252 {
253 unsigned int node, ext, unit, class; 253 unsigned int node, ext, unit, class;
254 const u32 *val; 254 const u32 *val;
255 255
256 if (!of_device_is_compatible(ct, 256 if (!of_device_is_compatible(ct,
257 "IBM,CBEA-Internal-Interrupt-Controller")) 257 "IBM,CBEA-Internal-Interrupt-Controller"))
258 return -ENODEV; 258 return -ENODEV;
259 if (intsize != 1) 259 if (intsize != 1)
260 return -ENODEV; 260 return -ENODEV;
261 val = of_get_property(ct, "#interrupt-cells", NULL); 261 val = of_get_property(ct, "#interrupt-cells", NULL);
262 if (val == NULL || *val != 1) 262 if (val == NULL || *val != 1)
263 return -ENODEV; 263 return -ENODEV;
264 264
265 node = intspec[0] >> 24; 265 node = intspec[0] >> 24;
266 ext = (intspec[0] >> 16) & 0xff; 266 ext = (intspec[0] >> 16) & 0xff;
267 class = (intspec[0] >> 8) & 0xff; 267 class = (intspec[0] >> 8) & 0xff;
268 unit = intspec[0] & 0xff; 268 unit = intspec[0] & 0xff;
269 269
270 /* Check if node is in supported range */ 270 /* Check if node is in supported range */
271 if (node > 1) 271 if (node > 1)
272 return -EINVAL; 272 return -EINVAL;
273 273
274 /* Build up interrupt number, special case for IO exceptions */ 274 /* Build up interrupt number, special case for IO exceptions */
275 *out_hwirq = (node << IIC_IRQ_NODE_SHIFT); 275 *out_hwirq = (node << IIC_IRQ_NODE_SHIFT);
276 if (unit == IIC_UNIT_IIC && class == 1) 276 if (unit == IIC_UNIT_IIC && class == 1)
277 *out_hwirq |= IIC_IRQ_TYPE_IOEXC | ext; 277 *out_hwirq |= IIC_IRQ_TYPE_IOEXC | ext;
278 else 278 else
279 *out_hwirq |= IIC_IRQ_TYPE_NORMAL | 279 *out_hwirq |= IIC_IRQ_TYPE_NORMAL |
280 (class << IIC_IRQ_CLASS_SHIFT) | unit; 280 (class << IIC_IRQ_CLASS_SHIFT) | unit;
281 281
282 /* Dummy flags, ignored by iic code */ 282 /* Dummy flags, ignored by iic code */
283 *out_flags = IRQ_TYPE_EDGE_RISING; 283 *out_flags = IRQ_TYPE_EDGE_RISING;
284 284
285 return 0; 285 return 0;
286 } 286 }
287 287
288 static struct irq_host_ops iic_host_ops = { 288 static struct irq_host_ops iic_host_ops = {
289 .match = iic_host_match, 289 .match = iic_host_match,
290 .map = iic_host_map, 290 .map = iic_host_map,
291 .xlate = iic_host_xlate, 291 .xlate = iic_host_xlate,
292 }; 292 };
293 293
294 static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr, 294 static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr,
295 struct device_node *node) 295 struct device_node *node)
296 { 296 {
297 /* XXX FIXME: should locate the linux CPU number from the HW cpu 297 /* XXX FIXME: should locate the linux CPU number from the HW cpu
298 * number properly. We are lucky for now 298 * number properly. We are lucky for now
299 */ 299 */
300 struct iic *iic = &per_cpu(cpu_iic, hw_cpu); 300 struct iic *iic = &per_cpu(cpu_iic, hw_cpu);
301 301
302 iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs)); 302 iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs));
303 BUG_ON(iic->regs == NULL); 303 BUG_ON(iic->regs == NULL);
304 304
305 iic->target_id = ((hw_cpu & 2) << 3) | ((hw_cpu & 1) ? 0xf : 0xe); 305 iic->target_id = ((hw_cpu & 2) << 3) | ((hw_cpu & 1) ? 0xf : 0xe);
306 iic->eoi_stack[0] = 0xff; 306 iic->eoi_stack[0] = 0xff;
307 iic->node = of_node_get(node); 307 iic->node = of_node_get(node);
308 out_be64(&iic->regs->prio, 0); 308 out_be64(&iic->regs->prio, 0);
309 309
310 printk(KERN_INFO "IIC for CPU %d target id 0x%x : %s\n", 310 printk(KERN_INFO "IIC for CPU %d target id 0x%x : %s\n",
311 hw_cpu, iic->target_id, node->full_name); 311 hw_cpu, iic->target_id, node->full_name);
312 } 312 }
313 313
314 static int __init setup_iic(void) 314 static int __init setup_iic(void)
315 { 315 {
316 struct device_node *dn; 316 struct device_node *dn;
317 struct resource r0, r1; 317 struct resource r0, r1;
318 unsigned int node, cascade, found = 0; 318 unsigned int node, cascade, found = 0;
319 struct cbe_iic_regs __iomem *node_iic; 319 struct cbe_iic_regs __iomem *node_iic;
320 const u32 *np; 320 const u32 *np;
321 321
322 for (dn = NULL; 322 for (dn = NULL;
323 (dn = of_find_node_by_name(dn,"interrupt-controller")) != NULL;) { 323 (dn = of_find_node_by_name(dn,"interrupt-controller")) != NULL;) {
324 if (!of_device_is_compatible(dn, 324 if (!of_device_is_compatible(dn,
325 "IBM,CBEA-Internal-Interrupt-Controller")) 325 "IBM,CBEA-Internal-Interrupt-Controller"))
326 continue; 326 continue;
327 np = of_get_property(dn, "ibm,interrupt-server-ranges", NULL); 327 np = of_get_property(dn, "ibm,interrupt-server-ranges", NULL);
328 if (np == NULL) { 328 if (np == NULL) {
329 printk(KERN_WARNING "IIC: CPU association not found\n"); 329 printk(KERN_WARNING "IIC: CPU association not found\n");
330 of_node_put(dn); 330 of_node_put(dn);
331 return -ENODEV; 331 return -ENODEV;
332 } 332 }
333 if (of_address_to_resource(dn, 0, &r0) || 333 if (of_address_to_resource(dn, 0, &r0) ||
334 of_address_to_resource(dn, 1, &r1)) { 334 of_address_to_resource(dn, 1, &r1)) {
335 printk(KERN_WARNING "IIC: Can't resolve addresses\n"); 335 printk(KERN_WARNING "IIC: Can't resolve addresses\n");
336 of_node_put(dn); 336 of_node_put(dn);
337 return -ENODEV; 337 return -ENODEV;
338 } 338 }
339 found++; 339 found++;
340 init_one_iic(np[0], r0.start, dn); 340 init_one_iic(np[0], r0.start, dn);
341 init_one_iic(np[1], r1.start, dn); 341 init_one_iic(np[1], r1.start, dn);
342 342
343 /* Setup cascade for IO exceptions. XXX cleanup tricks to get 343 /* Setup cascade for IO exceptions. XXX cleanup tricks to get
344 * node vs CPU etc... 344 * node vs CPU etc...
345 * Note that we configure the IIC_IRR here with a hard coded 345 * Note that we configure the IIC_IRR here with a hard coded
346 * priority of 1. We might want to improve that later. 346 * priority of 1. We might want to improve that later.
347 */ 347 */
348 node = np[0] >> 1; 348 node = np[0] >> 1;
349 node_iic = cbe_get_cpu_iic_regs(np[0]); 349 node_iic = cbe_get_cpu_iic_regs(np[0]);
350 cascade = node << IIC_IRQ_NODE_SHIFT; 350 cascade = node << IIC_IRQ_NODE_SHIFT;
351 cascade |= 1 << IIC_IRQ_CLASS_SHIFT; 351 cascade |= 1 << IIC_IRQ_CLASS_SHIFT;
352 cascade |= IIC_UNIT_IIC; 352 cascade |= IIC_UNIT_IIC;
353 cascade = irq_create_mapping(iic_host, cascade); 353 cascade = irq_create_mapping(iic_host, cascade);
354 if (cascade == NO_IRQ) 354 if (cascade == NO_IRQ)
355 continue; 355 continue;
356 /* 356 /*
357 * irq_data is a generic pointer that gets passed back 357 * irq_data is a generic pointer that gets passed back
358 * to us later, so the forced cast is fine. 358 * to us later, so the forced cast is fine.
359 */ 359 */
360 irq_set_handler_data(cascade, (void __force *)node_iic); 360 irq_set_handler_data(cascade, (void __force *)node_iic);
361 irq_set_chained_handler(cascade, iic_ioexc_cascade); 361 irq_set_chained_handler(cascade, iic_ioexc_cascade);
362 out_be64(&node_iic->iic_ir, 362 out_be64(&node_iic->iic_ir,
363 (1 << 12) /* priority */ | 363 (1 << 12) /* priority */ |
364 (node << 4) /* dest node */ | 364 (node << 4) /* dest node */ |
365 IIC_UNIT_THREAD_0 /* route them to thread 0 */); 365 IIC_UNIT_THREAD_0 /* route them to thread 0 */);
366 /* Flush pending (make sure it triggers if there is 366 /* Flush pending (make sure it triggers if there is
367 * anything pending 367 * anything pending
368 */ 368 */
369 out_be64(&node_iic->iic_is, 0xfffffffffffffffful); 369 out_be64(&node_iic->iic_is, 0xfffffffffffffffful);
370 } 370 }
371 371
372 if (found) 372 if (found)
373 return 0; 373 return 0;
374 else 374 else
375 return -ENODEV; 375 return -ENODEV;
376 } 376 }
377 377
378 void __init iic_init_IRQ(void) 378 void __init iic_init_IRQ(void)
379 { 379 {
380 /* Setup an irq host data structure */ 380 /* Setup an irq host data structure */
381 iic_host = irq_alloc_host(NULL, IRQ_HOST_MAP_LINEAR, IIC_SOURCE_COUNT, 381 iic_host = irq_alloc_host(NULL, IRQ_HOST_MAP_LINEAR, IIC_SOURCE_COUNT,
382 &iic_host_ops, IIC_IRQ_INVALID); 382 &iic_host_ops, IIC_IRQ_INVALID);
383 BUG_ON(iic_host == NULL); 383 BUG_ON(iic_host == NULL);
384 irq_set_default_host(iic_host); 384 irq_set_default_host(iic_host);
385 385
386 /* Discover and initialize iics */ 386 /* Discover and initialize iics */
387 if (setup_iic() < 0) 387 if (setup_iic() < 0)
388 panic("IIC: Failed to initialize !\n"); 388 panic("IIC: Failed to initialize !\n");
389 389
390 /* Set master interrupt handling function */ 390 /* Set master interrupt handling function */
391 ppc_md.get_irq = iic_get_irq; 391 ppc_md.get_irq = iic_get_irq;
392 392
393 /* Enable on current CPU */ 393 /* Enable on current CPU */
394 iic_setup_cpu(); 394 iic_setup_cpu();
395 } 395 }
396 396
397 void iic_set_interrupt_routing(int cpu, int thread, int priority) 397 void iic_set_interrupt_routing(int cpu, int thread, int priority)
398 { 398 {
399 struct cbe_iic_regs __iomem *iic_regs = cbe_get_cpu_iic_regs(cpu); 399 struct cbe_iic_regs __iomem *iic_regs = cbe_get_cpu_iic_regs(cpu);
400 u64 iic_ir = 0; 400 u64 iic_ir = 0;
401 int node = cpu >> 1; 401 int node = cpu >> 1;
402 402
403 /* Set which node and thread will handle the next interrupt */ 403 /* Set which node and thread will handle the next interrupt */
404 iic_ir |= CBE_IIC_IR_PRIO(priority) | 404 iic_ir |= CBE_IIC_IR_PRIO(priority) |
405 CBE_IIC_IR_DEST_NODE(node); 405 CBE_IIC_IR_DEST_NODE(node);
406 if (thread == 0) 406 if (thread == 0)
407 iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_0); 407 iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_0);
408 else 408 else
409 iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_1); 409 iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_1);
410 out_be64(&iic_regs->iic_ir, iic_ir); 410 out_be64(&iic_regs->iic_ir, iic_ir);
411 } 411 }
412 412
arch/powerpc/platforms/cell/spu_callbacks.c
1 /* 1 /*
2 * System call callback functions for SPUs 2 * System call callback functions for SPUs
3 */ 3 */
4 4
5 #undef DEBUG 5 #undef DEBUG
6 6
7 #include <linux/kallsyms.h> 7 #include <linux/kallsyms.h>
8 #include <linux/module.h> 8 #include <linux/export.h>
9 #include <linux/syscalls.h> 9 #include <linux/syscalls.h>
10 10
11 #include <asm/spu.h> 11 #include <asm/spu.h>
12 #include <asm/syscalls.h> 12 #include <asm/syscalls.h>
13 #include <asm/unistd.h> 13 #include <asm/unistd.h>
14 14
15 /* 15 /*
16 * This table defines the system calls that an SPU can call. 16 * This table defines the system calls that an SPU can call.
17 * It is currently a subset of the 64 bit powerpc system calls, 17 * It is currently a subset of the 64 bit powerpc system calls,
18 * with the exact semantics. 18 * with the exact semantics.
19 * 19 *
20 * The reasons for disabling some of the system calls are: 20 * The reasons for disabling some of the system calls are:
21 * 1. They interact with the way SPU syscalls are handled 21 * 1. They interact with the way SPU syscalls are handled
22 * and we can't let them execute ever: 22 * and we can't let them execute ever:
23 * restart_syscall, exit, for, execve, ptrace, ... 23 * restart_syscall, exit, for, execve, ptrace, ...
24 * 2. They are deprecated and replaced by other means: 24 * 2. They are deprecated and replaced by other means:
25 * uselib, pciconfig_*, sysfs, ... 25 * uselib, pciconfig_*, sysfs, ...
26 * 3. They are somewhat interacting with the system in a way 26 * 3. They are somewhat interacting with the system in a way
27 * we don't want an SPU to: 27 * we don't want an SPU to:
28 * reboot, init_module, mount, kexec_load 28 * reboot, init_module, mount, kexec_load
29 * 4. They are optional and we can't rely on them being 29 * 4. They are optional and we can't rely on them being
30 * linked into the kernel. Unfortunately, the cond_syscall 30 * linked into the kernel. Unfortunately, the cond_syscall
31 * helper does not work here as it does not add the necessary 31 * helper does not work here as it does not add the necessary
32 * opd symbols: 32 * opd symbols:
33 * mbind, mq_open, ipc, ... 33 * mbind, mq_open, ipc, ...
34 */ 34 */
35 35
36 static void *spu_syscall_table[] = { 36 static void *spu_syscall_table[] = {
37 #define SYSCALL(func) sys_ni_syscall, 37 #define SYSCALL(func) sys_ni_syscall,
38 #define COMPAT_SYS(func) sys_ni_syscall, 38 #define COMPAT_SYS(func) sys_ni_syscall,
39 #define PPC_SYS(func) sys_ni_syscall, 39 #define PPC_SYS(func) sys_ni_syscall,
40 #define OLDSYS(func) sys_ni_syscall, 40 #define OLDSYS(func) sys_ni_syscall,
41 #define SYS32ONLY(func) sys_ni_syscall, 41 #define SYS32ONLY(func) sys_ni_syscall,
42 #define SYSX(f, f3264, f32) sys_ni_syscall, 42 #define SYSX(f, f3264, f32) sys_ni_syscall,
43 43
44 #define SYSCALL_SPU(func) sys_##func, 44 #define SYSCALL_SPU(func) sys_##func,
45 #define COMPAT_SYS_SPU(func) sys_##func, 45 #define COMPAT_SYS_SPU(func) sys_##func,
46 #define PPC_SYS_SPU(func) ppc_##func, 46 #define PPC_SYS_SPU(func) ppc_##func,
47 #define SYSX_SPU(f, f3264, f32) f, 47 #define SYSX_SPU(f, f3264, f32) f,
48 48
49 #include <asm/systbl.h> 49 #include <asm/systbl.h>
50 }; 50 };
51 51
52 long spu_sys_callback(struct spu_syscall_block *s) 52 long spu_sys_callback(struct spu_syscall_block *s)
53 { 53 {
54 long (*syscall)(u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6); 54 long (*syscall)(u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6);
55 55
56 if (s->nr_ret >= ARRAY_SIZE(spu_syscall_table)) { 56 if (s->nr_ret >= ARRAY_SIZE(spu_syscall_table)) {
57 pr_debug("%s: invalid syscall #%lld", __func__, s->nr_ret); 57 pr_debug("%s: invalid syscall #%lld", __func__, s->nr_ret);
58 return -ENOSYS; 58 return -ENOSYS;
59 } 59 }
60 60
61 syscall = spu_syscall_table[s->nr_ret]; 61 syscall = spu_syscall_table[s->nr_ret];
62 62
63 #ifdef DEBUG 63 #ifdef DEBUG
64 print_symbol(KERN_DEBUG "SPU-syscall %s:", (unsigned long)syscall); 64 print_symbol(KERN_DEBUG "SPU-syscall %s:", (unsigned long)syscall);
65 printk("syscall%ld(%lx, %lx, %lx, %lx, %lx, %lx)\n", 65 printk("syscall%ld(%lx, %lx, %lx, %lx, %lx, %lx)\n",
66 s->nr_ret, 66 s->nr_ret,
67 s->parm[0], s->parm[1], s->parm[2], 67 s->parm[0], s->parm[1], s->parm[2],
68 s->parm[3], s->parm[4], s->parm[5]); 68 s->parm[3], s->parm[4], s->parm[5]);
69 #endif 69 #endif
70 70
71 return syscall(s->parm[0], s->parm[1], s->parm[2], 71 return syscall(s->parm[0], s->parm[1], s->parm[2],
72 s->parm[3], s->parm[4], s->parm[5]); 72 s->parm[3], s->parm[4], s->parm[5]);
73 } 73 }
74 EXPORT_SYMBOL_GPL(spu_sys_callback); 74 EXPORT_SYMBOL_GPL(spu_sys_callback);
75 75
arch/powerpc/platforms/cell/spu_fault.c
1 /* 1 /*
2 * SPU mm fault handler 2 * SPU mm fault handler
3 * 3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2007 4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2007
5 * 5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com> 6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 * Author: Jeremy Kerr <jk@ozlabs.org> 7 * Author: Jeremy Kerr <jk@ozlabs.org>
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option) 11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version. 12 * any later version.
13 * 13 *
14 * This program is distributed in the hope that it will be useful, 14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details. 17 * GNU General Public License for more details.
18 * 18 *
19 * You should have received a copy of the GNU General Public License 19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software 20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */ 22 */
23 #include <linux/sched.h> 23 #include <linux/sched.h>
24 #include <linux/mm.h> 24 #include <linux/mm.h>
25 #include <linux/module.h> 25 #include <linux/export.h>
26 26
27 #include <asm/spu.h> 27 #include <asm/spu.h>
28 #include <asm/spu_csa.h> 28 #include <asm/spu_csa.h>
29 29
30 /* 30 /*
31 * This ought to be kept in sync with the powerpc specific do_page_fault 31 * This ought to be kept in sync with the powerpc specific do_page_fault
32 * function. Currently, there are a few corner cases that we haven't had 32 * function. Currently, there are a few corner cases that we haven't had
33 * to handle fortunately. 33 * to handle fortunately.
34 */ 34 */
35 int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea, 35 int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
36 unsigned long dsisr, unsigned *flt) 36 unsigned long dsisr, unsigned *flt)
37 { 37 {
38 struct vm_area_struct *vma; 38 struct vm_area_struct *vma;
39 unsigned long is_write; 39 unsigned long is_write;
40 int ret; 40 int ret;
41 41
42 if (mm == NULL) 42 if (mm == NULL)
43 return -EFAULT; 43 return -EFAULT;
44 44
45 if (mm->pgd == NULL) 45 if (mm->pgd == NULL)
46 return -EFAULT; 46 return -EFAULT;
47 47
48 down_read(&mm->mmap_sem); 48 down_read(&mm->mmap_sem);
49 ret = -EFAULT; 49 ret = -EFAULT;
50 vma = find_vma(mm, ea); 50 vma = find_vma(mm, ea);
51 if (!vma) 51 if (!vma)
52 goto out_unlock; 52 goto out_unlock;
53 53
54 if (ea < vma->vm_start) { 54 if (ea < vma->vm_start) {
55 if (!(vma->vm_flags & VM_GROWSDOWN)) 55 if (!(vma->vm_flags & VM_GROWSDOWN))
56 goto out_unlock; 56 goto out_unlock;
57 if (expand_stack(vma, ea)) 57 if (expand_stack(vma, ea))
58 goto out_unlock; 58 goto out_unlock;
59 } 59 }
60 60
61 is_write = dsisr & MFC_DSISR_ACCESS_PUT; 61 is_write = dsisr & MFC_DSISR_ACCESS_PUT;
62 if (is_write) { 62 if (is_write) {
63 if (!(vma->vm_flags & VM_WRITE)) 63 if (!(vma->vm_flags & VM_WRITE))
64 goto out_unlock; 64 goto out_unlock;
65 } else { 65 } else {
66 if (dsisr & MFC_DSISR_ACCESS_DENIED) 66 if (dsisr & MFC_DSISR_ACCESS_DENIED)
67 goto out_unlock; 67 goto out_unlock;
68 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) 68 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
69 goto out_unlock; 69 goto out_unlock;
70 } 70 }
71 71
72 ret = 0; 72 ret = 0;
73 *flt = handle_mm_fault(mm, vma, ea, is_write ? FAULT_FLAG_WRITE : 0); 73 *flt = handle_mm_fault(mm, vma, ea, is_write ? FAULT_FLAG_WRITE : 0);
74 if (unlikely(*flt & VM_FAULT_ERROR)) { 74 if (unlikely(*flt & VM_FAULT_ERROR)) {
75 if (*flt & VM_FAULT_OOM) { 75 if (*flt & VM_FAULT_OOM) {
76 ret = -ENOMEM; 76 ret = -ENOMEM;
77 goto out_unlock; 77 goto out_unlock;
78 } else if (*flt & VM_FAULT_SIGBUS) { 78 } else if (*flt & VM_FAULT_SIGBUS) {
79 ret = -EFAULT; 79 ret = -EFAULT;
80 goto out_unlock; 80 goto out_unlock;
81 } 81 }
82 BUG(); 82 BUG();
83 } 83 }
84 84
85 if (*flt & VM_FAULT_MAJOR) 85 if (*flt & VM_FAULT_MAJOR)
86 current->maj_flt++; 86 current->maj_flt++;
87 else 87 else
88 current->min_flt++; 88 current->min_flt++;
89 89
90 out_unlock: 90 out_unlock:
91 up_read(&mm->mmap_sem); 91 up_read(&mm->mmap_sem);
92 return ret; 92 return ret;
93 } 93 }
94 EXPORT_SYMBOL_GPL(spu_handle_mm_fault); 94 EXPORT_SYMBOL_GPL(spu_handle_mm_fault);
95 95
arch/powerpc/platforms/cell/spu_manage.c
1 /* 1 /*
2 * spu management operations for of based platforms 2 * spu management operations for of based platforms
3 * 3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 * Copyright 2006 Sony Corp. 5 * Copyright 2006 Sony Corp.
6 * (C) Copyright 2007 TOSHIBA CORPORATION 6 * (C) Copyright 2007 TOSHIBA CORPORATION
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License. 10 * the Free Software Foundation; version 2 of the License.
11 * 11 *
12 * This program is distributed in the hope that it will be useful, 12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 15 * GNU General Public License for more details.
16 * 16 *
17 * You should have received a copy of the GNU General Public License along 17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc., 18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 */ 20 */
21 21
22 #include <linux/interrupt.h> 22 #include <linux/interrupt.h>
23 #include <linux/list.h> 23 #include <linux/list.h>
24 #include <linux/module.h> 24 #include <linux/export.h>
25 #include <linux/ptrace.h> 25 #include <linux/ptrace.h>
26 #include <linux/wait.h> 26 #include <linux/wait.h>
27 #include <linux/mm.h> 27 #include <linux/mm.h>
28 #include <linux/io.h> 28 #include <linux/io.h>
29 #include <linux/mutex.h> 29 #include <linux/mutex.h>
30 #include <linux/device.h> 30 #include <linux/device.h>
31 31
32 #include <asm/spu.h> 32 #include <asm/spu.h>
33 #include <asm/spu_priv1.h> 33 #include <asm/spu_priv1.h>
34 #include <asm/firmware.h> 34 #include <asm/firmware.h>
35 #include <asm/prom.h> 35 #include <asm/prom.h>
36 36
37 #include "spufs/spufs.h" 37 #include "spufs/spufs.h"
38 #include "interrupt.h" 38 #include "interrupt.h"
39 39
40 struct device_node *spu_devnode(struct spu *spu) 40 struct device_node *spu_devnode(struct spu *spu)
41 { 41 {
42 return spu->devnode; 42 return spu->devnode;
43 } 43 }
44 44
45 EXPORT_SYMBOL_GPL(spu_devnode); 45 EXPORT_SYMBOL_GPL(spu_devnode);
46 46
47 static u64 __init find_spu_unit_number(struct device_node *spe) 47 static u64 __init find_spu_unit_number(struct device_node *spe)
48 { 48 {
49 const unsigned int *prop; 49 const unsigned int *prop;
50 int proplen; 50 int proplen;
51 51
52 /* new device trees should provide the physical-id attribute */ 52 /* new device trees should provide the physical-id attribute */
53 prop = of_get_property(spe, "physical-id", &proplen); 53 prop = of_get_property(spe, "physical-id", &proplen);
54 if (proplen == 4) 54 if (proplen == 4)
55 return (u64)*prop; 55 return (u64)*prop;
56 56
57 /* celleb device tree provides the unit-id */ 57 /* celleb device tree provides the unit-id */
58 prop = of_get_property(spe, "unit-id", &proplen); 58 prop = of_get_property(spe, "unit-id", &proplen);
59 if (proplen == 4) 59 if (proplen == 4)
60 return (u64)*prop; 60 return (u64)*prop;
61 61
62 /* legacy device trees provide the id in the reg attribute */ 62 /* legacy device trees provide the id in the reg attribute */
63 prop = of_get_property(spe, "reg", &proplen); 63 prop = of_get_property(spe, "reg", &proplen);
64 if (proplen == 4) 64 if (proplen == 4)
65 return (u64)*prop; 65 return (u64)*prop;
66 66
67 return 0; 67 return 0;
68 } 68 }
69 69
70 static void spu_unmap(struct spu *spu) 70 static void spu_unmap(struct spu *spu)
71 { 71 {
72 if (!firmware_has_feature(FW_FEATURE_LPAR)) 72 if (!firmware_has_feature(FW_FEATURE_LPAR))
73 iounmap(spu->priv1); 73 iounmap(spu->priv1);
74 iounmap(spu->priv2); 74 iounmap(spu->priv2);
75 iounmap(spu->problem); 75 iounmap(spu->problem);
76 iounmap((__force u8 __iomem *)spu->local_store); 76 iounmap((__force u8 __iomem *)spu->local_store);
77 } 77 }
78 78
79 static int __init spu_map_interrupts_old(struct spu *spu, 79 static int __init spu_map_interrupts_old(struct spu *spu,
80 struct device_node *np) 80 struct device_node *np)
81 { 81 {
82 unsigned int isrc; 82 unsigned int isrc;
83 const u32 *tmp; 83 const u32 *tmp;
84 int nid; 84 int nid;
85 85
86 /* Get the interrupt source unit from the device-tree */ 86 /* Get the interrupt source unit from the device-tree */
87 tmp = of_get_property(np, "isrc", NULL); 87 tmp = of_get_property(np, "isrc", NULL);
88 if (!tmp) 88 if (!tmp)
89 return -ENODEV; 89 return -ENODEV;
90 isrc = tmp[0]; 90 isrc = tmp[0];
91 91
92 tmp = of_get_property(np->parent->parent, "node-id", NULL); 92 tmp = of_get_property(np->parent->parent, "node-id", NULL);
93 if (!tmp) { 93 if (!tmp) {
94 printk(KERN_WARNING "%s: can't find node-id\n", __func__); 94 printk(KERN_WARNING "%s: can't find node-id\n", __func__);
95 nid = spu->node; 95 nid = spu->node;
96 } else 96 } else
97 nid = tmp[0]; 97 nid = tmp[0];
98 98
99 /* Add the node number */ 99 /* Add the node number */
100 isrc |= nid << IIC_IRQ_NODE_SHIFT; 100 isrc |= nid << IIC_IRQ_NODE_SHIFT;
101 101
102 /* Now map interrupts of all 3 classes */ 102 /* Now map interrupts of all 3 classes */
103 spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc); 103 spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc);
104 spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc); 104 spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc);
105 spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc); 105 spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc);
106 106
107 /* Right now, we only fail if class 2 failed */ 107 /* Right now, we only fail if class 2 failed */
108 return spu->irqs[2] == NO_IRQ ? -EINVAL : 0; 108 return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
109 } 109 }
110 110
111 static void __iomem * __init spu_map_prop_old(struct spu *spu, 111 static void __iomem * __init spu_map_prop_old(struct spu *spu,
112 struct device_node *n, 112 struct device_node *n,
113 const char *name) 113 const char *name)
114 { 114 {
115 const struct address_prop { 115 const struct address_prop {
116 unsigned long address; 116 unsigned long address;
117 unsigned int len; 117 unsigned int len;
118 } __attribute__((packed)) *prop; 118 } __attribute__((packed)) *prop;
119 int proplen; 119 int proplen;
120 120
121 prop = of_get_property(n, name, &proplen); 121 prop = of_get_property(n, name, &proplen);
122 if (prop == NULL || proplen != sizeof (struct address_prop)) 122 if (prop == NULL || proplen != sizeof (struct address_prop))
123 return NULL; 123 return NULL;
124 124
125 return ioremap(prop->address, prop->len); 125 return ioremap(prop->address, prop->len);
126 } 126 }
127 127
128 static int __init spu_map_device_old(struct spu *spu) 128 static int __init spu_map_device_old(struct spu *spu)
129 { 129 {
130 struct device_node *node = spu->devnode; 130 struct device_node *node = spu->devnode;
131 const char *prop; 131 const char *prop;
132 int ret; 132 int ret;
133 133
134 ret = -ENODEV; 134 ret = -ENODEV;
135 spu->name = of_get_property(node, "name", NULL); 135 spu->name = of_get_property(node, "name", NULL);
136 if (!spu->name) 136 if (!spu->name)
137 goto out; 137 goto out;
138 138
139 prop = of_get_property(node, "local-store", NULL); 139 prop = of_get_property(node, "local-store", NULL);
140 if (!prop) 140 if (!prop)
141 goto out; 141 goto out;
142 spu->local_store_phys = *(unsigned long *)prop; 142 spu->local_store_phys = *(unsigned long *)prop;
143 143
144 /* we use local store as ram, not io memory */ 144 /* we use local store as ram, not io memory */
145 spu->local_store = (void __force *) 145 spu->local_store = (void __force *)
146 spu_map_prop_old(spu, node, "local-store"); 146 spu_map_prop_old(spu, node, "local-store");
147 if (!spu->local_store) 147 if (!spu->local_store)
148 goto out; 148 goto out;
149 149
150 prop = of_get_property(node, "problem", NULL); 150 prop = of_get_property(node, "problem", NULL);
151 if (!prop) 151 if (!prop)
152 goto out_unmap; 152 goto out_unmap;
153 spu->problem_phys = *(unsigned long *)prop; 153 spu->problem_phys = *(unsigned long *)prop;
154 154
155 spu->problem = spu_map_prop_old(spu, node, "problem"); 155 spu->problem = spu_map_prop_old(spu, node, "problem");
156 if (!spu->problem) 156 if (!spu->problem)
157 goto out_unmap; 157 goto out_unmap;
158 158
159 spu->priv2 = spu_map_prop_old(spu, node, "priv2"); 159 spu->priv2 = spu_map_prop_old(spu, node, "priv2");
160 if (!spu->priv2) 160 if (!spu->priv2)
161 goto out_unmap; 161 goto out_unmap;
162 162
163 if (!firmware_has_feature(FW_FEATURE_LPAR)) { 163 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
164 spu->priv1 = spu_map_prop_old(spu, node, "priv1"); 164 spu->priv1 = spu_map_prop_old(spu, node, "priv1");
165 if (!spu->priv1) 165 if (!spu->priv1)
166 goto out_unmap; 166 goto out_unmap;
167 } 167 }
168 168
169 ret = 0; 169 ret = 0;
170 goto out; 170 goto out;
171 171
172 out_unmap: 172 out_unmap:
173 spu_unmap(spu); 173 spu_unmap(spu);
174 out: 174 out:
175 return ret; 175 return ret;
176 } 176 }
177 177
178 static int __init spu_map_interrupts(struct spu *spu, struct device_node *np) 178 static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
179 { 179 {
180 struct of_irq oirq; 180 struct of_irq oirq;
181 int ret; 181 int ret;
182 int i; 182 int i;
183 183
184 for (i=0; i < 3; i++) { 184 for (i=0; i < 3; i++) {
185 ret = of_irq_map_one(np, i, &oirq); 185 ret = of_irq_map_one(np, i, &oirq);
186 if (ret) { 186 if (ret) {
187 pr_debug("spu_new: failed to get irq %d\n", i); 187 pr_debug("spu_new: failed to get irq %d\n", i);
188 goto err; 188 goto err;
189 } 189 }
190 ret = -EINVAL; 190 ret = -EINVAL;
191 pr_debug(" irq %d no 0x%x on %s\n", i, oirq.specifier[0], 191 pr_debug(" irq %d no 0x%x on %s\n", i, oirq.specifier[0],
192 oirq.controller->full_name); 192 oirq.controller->full_name);
193 spu->irqs[i] = irq_create_of_mapping(oirq.controller, 193 spu->irqs[i] = irq_create_of_mapping(oirq.controller,
194 oirq.specifier, oirq.size); 194 oirq.specifier, oirq.size);
195 if (spu->irqs[i] == NO_IRQ) { 195 if (spu->irqs[i] == NO_IRQ) {
196 pr_debug("spu_new: failed to map it !\n"); 196 pr_debug("spu_new: failed to map it !\n");
197 goto err; 197 goto err;
198 } 198 }
199 } 199 }
200 return 0; 200 return 0;
201 201
202 err: 202 err:
203 pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier, 203 pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier,
204 spu->name); 204 spu->name);
205 for (; i >= 0; i--) { 205 for (; i >= 0; i--) {
206 if (spu->irqs[i] != NO_IRQ) 206 if (spu->irqs[i] != NO_IRQ)
207 irq_dispose_mapping(spu->irqs[i]); 207 irq_dispose_mapping(spu->irqs[i]);
208 } 208 }
209 return ret; 209 return ret;
210 } 210 }
211 211
212 static int spu_map_resource(struct spu *spu, int nr, 212 static int spu_map_resource(struct spu *spu, int nr,
213 void __iomem** virt, unsigned long *phys) 213 void __iomem** virt, unsigned long *phys)
214 { 214 {
215 struct device_node *np = spu->devnode; 215 struct device_node *np = spu->devnode;
216 struct resource resource = { }; 216 struct resource resource = { };
217 unsigned long len; 217 unsigned long len;
218 int ret; 218 int ret;
219 219
220 ret = of_address_to_resource(np, nr, &resource); 220 ret = of_address_to_resource(np, nr, &resource);
221 if (ret) 221 if (ret)
222 return ret; 222 return ret;
223 if (phys) 223 if (phys)
224 *phys = resource.start; 224 *phys = resource.start;
225 len = resource_size(&resource); 225 len = resource_size(&resource);
226 *virt = ioremap(resource.start, len); 226 *virt = ioremap(resource.start, len);
227 if (!*virt) 227 if (!*virt)
228 return -EINVAL; 228 return -EINVAL;
229 return 0; 229 return 0;
230 } 230 }
231 231
232 static int __init spu_map_device(struct spu *spu) 232 static int __init spu_map_device(struct spu *spu)
233 { 233 {
234 struct device_node *np = spu->devnode; 234 struct device_node *np = spu->devnode;
235 int ret = -ENODEV; 235 int ret = -ENODEV;
236 236
237 spu->name = of_get_property(np, "name", NULL); 237 spu->name = of_get_property(np, "name", NULL);
238 if (!spu->name) 238 if (!spu->name)
239 goto out; 239 goto out;
240 240
241 ret = spu_map_resource(spu, 0, (void __iomem**)&spu->local_store, 241 ret = spu_map_resource(spu, 0, (void __iomem**)&spu->local_store,
242 &spu->local_store_phys); 242 &spu->local_store_phys);
243 if (ret) { 243 if (ret) {
244 pr_debug("spu_new: failed to map %s resource 0\n", 244 pr_debug("spu_new: failed to map %s resource 0\n",
245 np->full_name); 245 np->full_name);
246 goto out; 246 goto out;
247 } 247 }
248 ret = spu_map_resource(spu, 1, (void __iomem**)&spu->problem, 248 ret = spu_map_resource(spu, 1, (void __iomem**)&spu->problem,
249 &spu->problem_phys); 249 &spu->problem_phys);
250 if (ret) { 250 if (ret) {
251 pr_debug("spu_new: failed to map %s resource 1\n", 251 pr_debug("spu_new: failed to map %s resource 1\n",
252 np->full_name); 252 np->full_name);
253 goto out_unmap; 253 goto out_unmap;
254 } 254 }
255 ret = spu_map_resource(spu, 2, (void __iomem**)&spu->priv2, NULL); 255 ret = spu_map_resource(spu, 2, (void __iomem**)&spu->priv2, NULL);
256 if (ret) { 256 if (ret) {
257 pr_debug("spu_new: failed to map %s resource 2\n", 257 pr_debug("spu_new: failed to map %s resource 2\n",
258 np->full_name); 258 np->full_name);
259 goto out_unmap; 259 goto out_unmap;
260 } 260 }
261 if (!firmware_has_feature(FW_FEATURE_LPAR)) 261 if (!firmware_has_feature(FW_FEATURE_LPAR))
262 ret = spu_map_resource(spu, 3, 262 ret = spu_map_resource(spu, 3,
263 (void __iomem**)&spu->priv1, NULL); 263 (void __iomem**)&spu->priv1, NULL);
264 if (ret) { 264 if (ret) {
265 pr_debug("spu_new: failed to map %s resource 3\n", 265 pr_debug("spu_new: failed to map %s resource 3\n",
266 np->full_name); 266 np->full_name);
267 goto out_unmap; 267 goto out_unmap;
268 } 268 }
269 pr_debug("spu_new: %s maps:\n", np->full_name); 269 pr_debug("spu_new: %s maps:\n", np->full_name);
270 pr_debug(" local store : 0x%016lx -> 0x%p\n", 270 pr_debug(" local store : 0x%016lx -> 0x%p\n",
271 spu->local_store_phys, spu->local_store); 271 spu->local_store_phys, spu->local_store);
272 pr_debug(" problem state : 0x%016lx -> 0x%p\n", 272 pr_debug(" problem state : 0x%016lx -> 0x%p\n",
273 spu->problem_phys, spu->problem); 273 spu->problem_phys, spu->problem);
274 pr_debug(" priv2 : 0x%p\n", spu->priv2); 274 pr_debug(" priv2 : 0x%p\n", spu->priv2);
275 pr_debug(" priv1 : 0x%p\n", spu->priv1); 275 pr_debug(" priv1 : 0x%p\n", spu->priv1);
276 276
277 return 0; 277 return 0;
278 278
279 out_unmap: 279 out_unmap:
280 spu_unmap(spu); 280 spu_unmap(spu);
281 out: 281 out:
282 pr_debug("failed to map spe %s: %d\n", spu->name, ret); 282 pr_debug("failed to map spe %s: %d\n", spu->name, ret);
283 return ret; 283 return ret;
284 } 284 }
285 285
286 static int __init of_enumerate_spus(int (*fn)(void *data)) 286 static int __init of_enumerate_spus(int (*fn)(void *data))
287 { 287 {
288 int ret; 288 int ret;
289 struct device_node *node; 289 struct device_node *node;
290 unsigned int n = 0; 290 unsigned int n = 0;
291 291
292 ret = -ENODEV; 292 ret = -ENODEV;
293 for (node = of_find_node_by_type(NULL, "spe"); 293 for (node = of_find_node_by_type(NULL, "spe");
294 node; node = of_find_node_by_type(node, "spe")) { 294 node; node = of_find_node_by_type(node, "spe")) {
295 ret = fn(node); 295 ret = fn(node);
296 if (ret) { 296 if (ret) {
297 printk(KERN_WARNING "%s: Error initializing %s\n", 297 printk(KERN_WARNING "%s: Error initializing %s\n",
298 __func__, node->name); 298 __func__, node->name);
299 break; 299 break;
300 } 300 }
301 n++; 301 n++;
302 } 302 }
303 return ret ? ret : n; 303 return ret ? ret : n;
304 } 304 }
305 305
306 static int __init of_create_spu(struct spu *spu, void *data) 306 static int __init of_create_spu(struct spu *spu, void *data)
307 { 307 {
308 int ret; 308 int ret;
309 struct device_node *spe = (struct device_node *)data; 309 struct device_node *spe = (struct device_node *)data;
310 static int legacy_map = 0, legacy_irq = 0; 310 static int legacy_map = 0, legacy_irq = 0;
311 311
312 spu->devnode = of_node_get(spe); 312 spu->devnode = of_node_get(spe);
313 spu->spe_id = find_spu_unit_number(spe); 313 spu->spe_id = find_spu_unit_number(spe);
314 314
315 spu->node = of_node_to_nid(spe); 315 spu->node = of_node_to_nid(spe);
316 if (spu->node >= MAX_NUMNODES) { 316 if (spu->node >= MAX_NUMNODES) {
317 printk(KERN_WARNING "SPE %s on node %d ignored," 317 printk(KERN_WARNING "SPE %s on node %d ignored,"
318 " node number too big\n", spe->full_name, spu->node); 318 " node number too big\n", spe->full_name, spu->node);
319 printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n"); 319 printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n");
320 ret = -ENODEV; 320 ret = -ENODEV;
321 goto out; 321 goto out;
322 } 322 }
323 323
324 ret = spu_map_device(spu); 324 ret = spu_map_device(spu);
325 if (ret) { 325 if (ret) {
326 if (!legacy_map) { 326 if (!legacy_map) {
327 legacy_map = 1; 327 legacy_map = 1;
328 printk(KERN_WARNING "%s: Legacy device tree found, " 328 printk(KERN_WARNING "%s: Legacy device tree found, "
329 "trying to map old style\n", __func__); 329 "trying to map old style\n", __func__);
330 } 330 }
331 ret = spu_map_device_old(spu); 331 ret = spu_map_device_old(spu);
332 if (ret) { 332 if (ret) {
333 printk(KERN_ERR "Unable to map %s\n", 333 printk(KERN_ERR "Unable to map %s\n",
334 spu->name); 334 spu->name);
335 goto out; 335 goto out;
336 } 336 }
337 } 337 }
338 338
339 ret = spu_map_interrupts(spu, spe); 339 ret = spu_map_interrupts(spu, spe);
340 if (ret) { 340 if (ret) {
341 if (!legacy_irq) { 341 if (!legacy_irq) {
342 legacy_irq = 1; 342 legacy_irq = 1;
343 printk(KERN_WARNING "%s: Legacy device tree found, " 343 printk(KERN_WARNING "%s: Legacy device tree found, "
344 "trying old style irq\n", __func__); 344 "trying old style irq\n", __func__);
345 } 345 }
346 ret = spu_map_interrupts_old(spu, spe); 346 ret = spu_map_interrupts_old(spu, spe);
347 if (ret) { 347 if (ret) {
348 printk(KERN_ERR "%s: could not map interrupts\n", 348 printk(KERN_ERR "%s: could not map interrupts\n",
349 spu->name); 349 spu->name);
350 goto out_unmap; 350 goto out_unmap;
351 } 351 }
352 } 352 }
353 353
354 pr_debug("Using SPE %s %p %p %p %p %d\n", spu->name, 354 pr_debug("Using SPE %s %p %p %p %p %d\n", spu->name,
355 spu->local_store, spu->problem, spu->priv1, 355 spu->local_store, spu->problem, spu->priv1,
356 spu->priv2, spu->number); 356 spu->priv2, spu->number);
357 goto out; 357 goto out;
358 358
359 out_unmap: 359 out_unmap:
360 spu_unmap(spu); 360 spu_unmap(spu);
361 out: 361 out:
362 return ret; 362 return ret;
363 } 363 }
364 364
365 static int of_destroy_spu(struct spu *spu) 365 static int of_destroy_spu(struct spu *spu)
366 { 366 {
367 spu_unmap(spu); 367 spu_unmap(spu);
368 of_node_put(spu->devnode); 368 of_node_put(spu->devnode);
369 return 0; 369 return 0;
370 } 370 }
371 371
372 static void enable_spu_by_master_run(struct spu_context *ctx) 372 static void enable_spu_by_master_run(struct spu_context *ctx)
373 { 373 {
374 ctx->ops->master_start(ctx); 374 ctx->ops->master_start(ctx);
375 } 375 }
376 376
377 static void disable_spu_by_master_run(struct spu_context *ctx) 377 static void disable_spu_by_master_run(struct spu_context *ctx)
378 { 378 {
379 ctx->ops->master_stop(ctx); 379 ctx->ops->master_stop(ctx);
380 } 380 }
381 381
382 /* Hardcoded affinity idxs for qs20 */ 382 /* Hardcoded affinity idxs for qs20 */
383 #define QS20_SPES_PER_BE 8 383 #define QS20_SPES_PER_BE 8
384 static int qs20_reg_idxs[QS20_SPES_PER_BE] = { 0, 2, 4, 6, 7, 5, 3, 1 }; 384 static int qs20_reg_idxs[QS20_SPES_PER_BE] = { 0, 2, 4, 6, 7, 5, 3, 1 };
385 static int qs20_reg_memory[QS20_SPES_PER_BE] = { 1, 1, 0, 0, 0, 0, 0, 0 }; 385 static int qs20_reg_memory[QS20_SPES_PER_BE] = { 1, 1, 0, 0, 0, 0, 0, 0 };
386 386
387 static struct spu *spu_lookup_reg(int node, u32 reg) 387 static struct spu *spu_lookup_reg(int node, u32 reg)
388 { 388 {
389 struct spu *spu; 389 struct spu *spu;
390 const u32 *spu_reg; 390 const u32 *spu_reg;
391 391
392 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { 392 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
393 spu_reg = of_get_property(spu_devnode(spu), "reg", NULL); 393 spu_reg = of_get_property(spu_devnode(spu), "reg", NULL);
394 if (*spu_reg == reg) 394 if (*spu_reg == reg)
395 return spu; 395 return spu;
396 } 396 }
397 return NULL; 397 return NULL;
398 } 398 }
399 399
400 static void init_affinity_qs20_harcoded(void) 400 static void init_affinity_qs20_harcoded(void)
401 { 401 {
402 int node, i; 402 int node, i;
403 struct spu *last_spu, *spu; 403 struct spu *last_spu, *spu;
404 u32 reg; 404 u32 reg;
405 405
406 for (node = 0; node < MAX_NUMNODES; node++) { 406 for (node = 0; node < MAX_NUMNODES; node++) {
407 last_spu = NULL; 407 last_spu = NULL;
408 for (i = 0; i < QS20_SPES_PER_BE; i++) { 408 for (i = 0; i < QS20_SPES_PER_BE; i++) {
409 reg = qs20_reg_idxs[i]; 409 reg = qs20_reg_idxs[i];
410 spu = spu_lookup_reg(node, reg); 410 spu = spu_lookup_reg(node, reg);
411 if (!spu) 411 if (!spu)
412 continue; 412 continue;
413 spu->has_mem_affinity = qs20_reg_memory[reg]; 413 spu->has_mem_affinity = qs20_reg_memory[reg];
414 if (last_spu) 414 if (last_spu)
415 list_add_tail(&spu->aff_list, 415 list_add_tail(&spu->aff_list,
416 &last_spu->aff_list); 416 &last_spu->aff_list);
417 last_spu = spu; 417 last_spu = spu;
418 } 418 }
419 } 419 }
420 } 420 }
421 421
422 static int of_has_vicinity(void) 422 static int of_has_vicinity(void)
423 { 423 {
424 struct device_node *dn; 424 struct device_node *dn;
425 425
426 for_each_node_by_type(dn, "spe") { 426 for_each_node_by_type(dn, "spe") {
427 if (of_find_property(dn, "vicinity", NULL)) { 427 if (of_find_property(dn, "vicinity", NULL)) {
428 of_node_put(dn); 428 of_node_put(dn);
429 return 1; 429 return 1;
430 } 430 }
431 } 431 }
432 return 0; 432 return 0;
433 } 433 }
434 434
435 static struct spu *devnode_spu(int cbe, struct device_node *dn) 435 static struct spu *devnode_spu(int cbe, struct device_node *dn)
436 { 436 {
437 struct spu *spu; 437 struct spu *spu;
438 438
439 list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list) 439 list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list)
440 if (spu_devnode(spu) == dn) 440 if (spu_devnode(spu) == dn)
441 return spu; 441 return spu;
442 return NULL; 442 return NULL;
443 } 443 }
444 444
445 static struct spu * 445 static struct spu *
446 neighbour_spu(int cbe, struct device_node *target, struct device_node *avoid) 446 neighbour_spu(int cbe, struct device_node *target, struct device_node *avoid)
447 { 447 {
448 struct spu *spu; 448 struct spu *spu;
449 struct device_node *spu_dn; 449 struct device_node *spu_dn;
450 const phandle *vic_handles; 450 const phandle *vic_handles;
451 int lenp, i; 451 int lenp, i;
452 452
453 list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list) { 453 list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list) {
454 spu_dn = spu_devnode(spu); 454 spu_dn = spu_devnode(spu);
455 if (spu_dn == avoid) 455 if (spu_dn == avoid)
456 continue; 456 continue;
457 vic_handles = of_get_property(spu_dn, "vicinity", &lenp); 457 vic_handles = of_get_property(spu_dn, "vicinity", &lenp);
458 for (i=0; i < (lenp / sizeof(phandle)); i++) { 458 for (i=0; i < (lenp / sizeof(phandle)); i++) {
459 if (vic_handles[i] == target->phandle) 459 if (vic_handles[i] == target->phandle)
460 return spu; 460 return spu;
461 } 461 }
462 } 462 }
463 return NULL; 463 return NULL;
464 } 464 }
465 465
466 static void init_affinity_node(int cbe) 466 static void init_affinity_node(int cbe)
467 { 467 {
468 struct spu *spu, *last_spu; 468 struct spu *spu, *last_spu;
469 struct device_node *vic_dn, *last_spu_dn; 469 struct device_node *vic_dn, *last_spu_dn;
470 phandle avoid_ph; 470 phandle avoid_ph;
471 const phandle *vic_handles; 471 const phandle *vic_handles;
472 const char *name; 472 const char *name;
473 int lenp, i, added; 473 int lenp, i, added;
474 474
475 last_spu = list_first_entry(&cbe_spu_info[cbe].spus, struct spu, 475 last_spu = list_first_entry(&cbe_spu_info[cbe].spus, struct spu,
476 cbe_list); 476 cbe_list);
477 avoid_ph = 0; 477 avoid_ph = 0;
478 for (added = 1; added < cbe_spu_info[cbe].n_spus; added++) { 478 for (added = 1; added < cbe_spu_info[cbe].n_spus; added++) {
479 last_spu_dn = spu_devnode(last_spu); 479 last_spu_dn = spu_devnode(last_spu);
480 vic_handles = of_get_property(last_spu_dn, "vicinity", &lenp); 480 vic_handles = of_get_property(last_spu_dn, "vicinity", &lenp);
481 481
482 /* 482 /*
483 * Walk through each phandle in vicinity property of the spu 483 * Walk through each phandle in vicinity property of the spu
484 * (tipically two vicinity phandles per spe node) 484 * (tipically two vicinity phandles per spe node)
485 */ 485 */
486 for (i = 0; i < (lenp / sizeof(phandle)); i++) { 486 for (i = 0; i < (lenp / sizeof(phandle)); i++) {
487 if (vic_handles[i] == avoid_ph) 487 if (vic_handles[i] == avoid_ph)
488 continue; 488 continue;
489 489
490 vic_dn = of_find_node_by_phandle(vic_handles[i]); 490 vic_dn = of_find_node_by_phandle(vic_handles[i]);
491 if (!vic_dn) 491 if (!vic_dn)
492 continue; 492 continue;
493 493
494 /* a neighbour might be spe, mic-tm, or bif0 */ 494 /* a neighbour might be spe, mic-tm, or bif0 */
495 name = of_get_property(vic_dn, "name", NULL); 495 name = of_get_property(vic_dn, "name", NULL);
496 if (!name) 496 if (!name)
497 continue; 497 continue;
498 498
499 if (strcmp(name, "spe") == 0) { 499 if (strcmp(name, "spe") == 0) {
500 spu = devnode_spu(cbe, vic_dn); 500 spu = devnode_spu(cbe, vic_dn);
501 avoid_ph = last_spu_dn->phandle; 501 avoid_ph = last_spu_dn->phandle;
502 } else { 502 } else {
503 /* 503 /*
504 * "mic-tm" and "bif0" nodes do not have 504 * "mic-tm" and "bif0" nodes do not have
505 * vicinity property. So we need to find the 505 * vicinity property. So we need to find the
506 * spe which has vic_dn as neighbour, but 506 * spe which has vic_dn as neighbour, but
507 * skipping the one we came from (last_spu_dn) 507 * skipping the one we came from (last_spu_dn)
508 */ 508 */
509 spu = neighbour_spu(cbe, vic_dn, last_spu_dn); 509 spu = neighbour_spu(cbe, vic_dn, last_spu_dn);
510 if (!spu) 510 if (!spu)
511 continue; 511 continue;
512 if (!strcmp(name, "mic-tm")) { 512 if (!strcmp(name, "mic-tm")) {
513 last_spu->has_mem_affinity = 1; 513 last_spu->has_mem_affinity = 1;
514 spu->has_mem_affinity = 1; 514 spu->has_mem_affinity = 1;
515 } 515 }
516 avoid_ph = vic_dn->phandle; 516 avoid_ph = vic_dn->phandle;
517 } 517 }
518 518
519 list_add_tail(&spu->aff_list, &last_spu->aff_list); 519 list_add_tail(&spu->aff_list, &last_spu->aff_list);
520 last_spu = spu; 520 last_spu = spu;
521 break; 521 break;
522 } 522 }
523 } 523 }
524 } 524 }
525 525
526 static void init_affinity_fw(void) 526 static void init_affinity_fw(void)
527 { 527 {
528 int cbe; 528 int cbe;
529 529
530 for (cbe = 0; cbe < MAX_NUMNODES; cbe++) 530 for (cbe = 0; cbe < MAX_NUMNODES; cbe++)
531 init_affinity_node(cbe); 531 init_affinity_node(cbe);
532 } 532 }
533 533
534 static int __init init_affinity(void) 534 static int __init init_affinity(void)
535 { 535 {
536 if (of_has_vicinity()) { 536 if (of_has_vicinity()) {
537 init_affinity_fw(); 537 init_affinity_fw();
538 } else { 538 } else {
539 long root = of_get_flat_dt_root(); 539 long root = of_get_flat_dt_root();
540 if (of_flat_dt_is_compatible(root, "IBM,CPBW-1.0")) 540 if (of_flat_dt_is_compatible(root, "IBM,CPBW-1.0"))
541 init_affinity_qs20_harcoded(); 541 init_affinity_qs20_harcoded();
542 else 542 else
543 printk("No affinity configuration found\n"); 543 printk("No affinity configuration found\n");
544 } 544 }
545 545
546 return 0; 546 return 0;
547 } 547 }
548 548
549 const struct spu_management_ops spu_management_of_ops = { 549 const struct spu_management_ops spu_management_of_ops = {
550 .enumerate_spus = of_enumerate_spus, 550 .enumerate_spus = of_enumerate_spus,
551 .create_spu = of_create_spu, 551 .create_spu = of_create_spu,
552 .destroy_spu = of_destroy_spu, 552 .destroy_spu = of_destroy_spu,
553 .enable_spu = enable_spu_by_master_run, 553 .enable_spu = enable_spu_by_master_run,
554 .disable_spu = disable_spu_by_master_run, 554 .disable_spu = disable_spu_by_master_run,
555 .init_affinity = init_affinity, 555 .init_affinity = init_affinity,
556 }; 556 };
557 557
arch/powerpc/platforms/cell/spufs/file.c
1 /* 1 /*
2 * SPU file system -- file contents 2 * SPU file system -- file contents
3 * 3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 * 5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com> 6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option) 10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version. 11 * any later version.
12 * 12 *
13 * This program is distributed in the hope that it will be useful, 13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 * 17 *
18 * You should have received a copy of the GNU General Public License 18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software 19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */ 21 */
22 22
23 #undef DEBUG 23 #undef DEBUG
24 24
25 #include <linux/fs.h> 25 #include <linux/fs.h>
26 #include <linux/ioctl.h> 26 #include <linux/ioctl.h>
27 #include <linux/module.h> 27 #include <linux/export.h>
28 #include <linux/pagemap.h> 28 #include <linux/pagemap.h>
29 #include <linux/poll.h> 29 #include <linux/poll.h>
30 #include <linux/ptrace.h> 30 #include <linux/ptrace.h>
31 #include <linux/seq_file.h> 31 #include <linux/seq_file.h>
32 #include <linux/slab.h> 32 #include <linux/slab.h>
33 33
34 #include <asm/io.h> 34 #include <asm/io.h>
35 #include <asm/time.h> 35 #include <asm/time.h>
36 #include <asm/spu.h> 36 #include <asm/spu.h>
37 #include <asm/spu_info.h> 37 #include <asm/spu_info.h>
38 #include <asm/uaccess.h> 38 #include <asm/uaccess.h>
39 39
40 #include "spufs.h" 40 #include "spufs.h"
41 #include "sputrace.h" 41 #include "sputrace.h"
42 42
43 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000) 43 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
44 44
45 /* Simple attribute files */ 45 /* Simple attribute files */
46 struct spufs_attr { 46 struct spufs_attr {
47 int (*get)(void *, u64 *); 47 int (*get)(void *, u64 *);
48 int (*set)(void *, u64); 48 int (*set)(void *, u64);
49 char get_buf[24]; /* enough to store a u64 and "\n\0" */ 49 char get_buf[24]; /* enough to store a u64 and "\n\0" */
50 char set_buf[24]; 50 char set_buf[24];
51 void *data; 51 void *data;
52 const char *fmt; /* format for read operation */ 52 const char *fmt; /* format for read operation */
53 struct mutex mutex; /* protects access to these buffers */ 53 struct mutex mutex; /* protects access to these buffers */
54 }; 54 };
55 55
56 static int spufs_attr_open(struct inode *inode, struct file *file, 56 static int spufs_attr_open(struct inode *inode, struct file *file,
57 int (*get)(void *, u64 *), int (*set)(void *, u64), 57 int (*get)(void *, u64 *), int (*set)(void *, u64),
58 const char *fmt) 58 const char *fmt)
59 { 59 {
60 struct spufs_attr *attr; 60 struct spufs_attr *attr;
61 61
62 attr = kmalloc(sizeof(*attr), GFP_KERNEL); 62 attr = kmalloc(sizeof(*attr), GFP_KERNEL);
63 if (!attr) 63 if (!attr)
64 return -ENOMEM; 64 return -ENOMEM;
65 65
66 attr->get = get; 66 attr->get = get;
67 attr->set = set; 67 attr->set = set;
68 attr->data = inode->i_private; 68 attr->data = inode->i_private;
69 attr->fmt = fmt; 69 attr->fmt = fmt;
70 mutex_init(&attr->mutex); 70 mutex_init(&attr->mutex);
71 file->private_data = attr; 71 file->private_data = attr;
72 72
73 return nonseekable_open(inode, file); 73 return nonseekable_open(inode, file);
74 } 74 }
75 75
76 static int spufs_attr_release(struct inode *inode, struct file *file) 76 static int spufs_attr_release(struct inode *inode, struct file *file)
77 { 77 {
78 kfree(file->private_data); 78 kfree(file->private_data);
79 return 0; 79 return 0;
80 } 80 }
81 81
82 static ssize_t spufs_attr_read(struct file *file, char __user *buf, 82 static ssize_t spufs_attr_read(struct file *file, char __user *buf,
83 size_t len, loff_t *ppos) 83 size_t len, loff_t *ppos)
84 { 84 {
85 struct spufs_attr *attr; 85 struct spufs_attr *attr;
86 size_t size; 86 size_t size;
87 ssize_t ret; 87 ssize_t ret;
88 88
89 attr = file->private_data; 89 attr = file->private_data;
90 if (!attr->get) 90 if (!attr->get)
91 return -EACCES; 91 return -EACCES;
92 92
93 ret = mutex_lock_interruptible(&attr->mutex); 93 ret = mutex_lock_interruptible(&attr->mutex);
94 if (ret) 94 if (ret)
95 return ret; 95 return ret;
96 96
97 if (*ppos) { /* continued read */ 97 if (*ppos) { /* continued read */
98 size = strlen(attr->get_buf); 98 size = strlen(attr->get_buf);
99 } else { /* first read */ 99 } else { /* first read */
100 u64 val; 100 u64 val;
101 ret = attr->get(attr->data, &val); 101 ret = attr->get(attr->data, &val);
102 if (ret) 102 if (ret)
103 goto out; 103 goto out;
104 104
105 size = scnprintf(attr->get_buf, sizeof(attr->get_buf), 105 size = scnprintf(attr->get_buf, sizeof(attr->get_buf),
106 attr->fmt, (unsigned long long)val); 106 attr->fmt, (unsigned long long)val);
107 } 107 }
108 108
109 ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size); 109 ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size);
110 out: 110 out:
111 mutex_unlock(&attr->mutex); 111 mutex_unlock(&attr->mutex);
112 return ret; 112 return ret;
113 } 113 }
114 114
115 static ssize_t spufs_attr_write(struct file *file, const char __user *buf, 115 static ssize_t spufs_attr_write(struct file *file, const char __user *buf,
116 size_t len, loff_t *ppos) 116 size_t len, loff_t *ppos)
117 { 117 {
118 struct spufs_attr *attr; 118 struct spufs_attr *attr;
119 u64 val; 119 u64 val;
120 size_t size; 120 size_t size;
121 ssize_t ret; 121 ssize_t ret;
122 122
123 attr = file->private_data; 123 attr = file->private_data;
124 if (!attr->set) 124 if (!attr->set)
125 return -EACCES; 125 return -EACCES;
126 126
127 ret = mutex_lock_interruptible(&attr->mutex); 127 ret = mutex_lock_interruptible(&attr->mutex);
128 if (ret) 128 if (ret)
129 return ret; 129 return ret;
130 130
131 ret = -EFAULT; 131 ret = -EFAULT;
132 size = min(sizeof(attr->set_buf) - 1, len); 132 size = min(sizeof(attr->set_buf) - 1, len);
133 if (copy_from_user(attr->set_buf, buf, size)) 133 if (copy_from_user(attr->set_buf, buf, size))
134 goto out; 134 goto out;
135 135
136 ret = len; /* claim we got the whole input */ 136 ret = len; /* claim we got the whole input */
137 attr->set_buf[size] = '\0'; 137 attr->set_buf[size] = '\0';
138 val = simple_strtol(attr->set_buf, NULL, 0); 138 val = simple_strtol(attr->set_buf, NULL, 0);
139 attr->set(attr->data, val); 139 attr->set(attr->data, val);
140 out: 140 out:
141 mutex_unlock(&attr->mutex); 141 mutex_unlock(&attr->mutex);
142 return ret; 142 return ret;
143 } 143 }
144 144
145 #define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \ 145 #define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
146 static int __fops ## _open(struct inode *inode, struct file *file) \ 146 static int __fops ## _open(struct inode *inode, struct file *file) \
147 { \ 147 { \
148 __simple_attr_check_format(__fmt, 0ull); \ 148 __simple_attr_check_format(__fmt, 0ull); \
149 return spufs_attr_open(inode, file, __get, __set, __fmt); \ 149 return spufs_attr_open(inode, file, __get, __set, __fmt); \
150 } \ 150 } \
151 static const struct file_operations __fops = { \ 151 static const struct file_operations __fops = { \
152 .owner = THIS_MODULE, \ 152 .owner = THIS_MODULE, \
153 .open = __fops ## _open, \ 153 .open = __fops ## _open, \
154 .release = spufs_attr_release, \ 154 .release = spufs_attr_release, \
155 .read = spufs_attr_read, \ 155 .read = spufs_attr_read, \
156 .write = spufs_attr_write, \ 156 .write = spufs_attr_write, \
157 .llseek = generic_file_llseek, \ 157 .llseek = generic_file_llseek, \
158 }; 158 };
159 159
160 160
161 static int 161 static int
162 spufs_mem_open(struct inode *inode, struct file *file) 162 spufs_mem_open(struct inode *inode, struct file *file)
163 { 163 {
164 struct spufs_inode_info *i = SPUFS_I(inode); 164 struct spufs_inode_info *i = SPUFS_I(inode);
165 struct spu_context *ctx = i->i_ctx; 165 struct spu_context *ctx = i->i_ctx;
166 166
167 mutex_lock(&ctx->mapping_lock); 167 mutex_lock(&ctx->mapping_lock);
168 file->private_data = ctx; 168 file->private_data = ctx;
169 if (!i->i_openers++) 169 if (!i->i_openers++)
170 ctx->local_store = inode->i_mapping; 170 ctx->local_store = inode->i_mapping;
171 mutex_unlock(&ctx->mapping_lock); 171 mutex_unlock(&ctx->mapping_lock);
172 return 0; 172 return 0;
173 } 173 }
174 174
175 static int 175 static int
176 spufs_mem_release(struct inode *inode, struct file *file) 176 spufs_mem_release(struct inode *inode, struct file *file)
177 { 177 {
178 struct spufs_inode_info *i = SPUFS_I(inode); 178 struct spufs_inode_info *i = SPUFS_I(inode);
179 struct spu_context *ctx = i->i_ctx; 179 struct spu_context *ctx = i->i_ctx;
180 180
181 mutex_lock(&ctx->mapping_lock); 181 mutex_lock(&ctx->mapping_lock);
182 if (!--i->i_openers) 182 if (!--i->i_openers)
183 ctx->local_store = NULL; 183 ctx->local_store = NULL;
184 mutex_unlock(&ctx->mapping_lock); 184 mutex_unlock(&ctx->mapping_lock);
185 return 0; 185 return 0;
186 } 186 }
187 187
188 static ssize_t 188 static ssize_t
189 __spufs_mem_read(struct spu_context *ctx, char __user *buffer, 189 __spufs_mem_read(struct spu_context *ctx, char __user *buffer,
190 size_t size, loff_t *pos) 190 size_t size, loff_t *pos)
191 { 191 {
192 char *local_store = ctx->ops->get_ls(ctx); 192 char *local_store = ctx->ops->get_ls(ctx);
193 return simple_read_from_buffer(buffer, size, pos, local_store, 193 return simple_read_from_buffer(buffer, size, pos, local_store,
194 LS_SIZE); 194 LS_SIZE);
195 } 195 }
196 196
197 static ssize_t 197 static ssize_t
198 spufs_mem_read(struct file *file, char __user *buffer, 198 spufs_mem_read(struct file *file, char __user *buffer,
199 size_t size, loff_t *pos) 199 size_t size, loff_t *pos)
200 { 200 {
201 struct spu_context *ctx = file->private_data; 201 struct spu_context *ctx = file->private_data;
202 ssize_t ret; 202 ssize_t ret;
203 203
204 ret = spu_acquire(ctx); 204 ret = spu_acquire(ctx);
205 if (ret) 205 if (ret)
206 return ret; 206 return ret;
207 ret = __spufs_mem_read(ctx, buffer, size, pos); 207 ret = __spufs_mem_read(ctx, buffer, size, pos);
208 spu_release(ctx); 208 spu_release(ctx);
209 209
210 return ret; 210 return ret;
211 } 211 }
212 212
213 static ssize_t 213 static ssize_t
214 spufs_mem_write(struct file *file, const char __user *buffer, 214 spufs_mem_write(struct file *file, const char __user *buffer,
215 size_t size, loff_t *ppos) 215 size_t size, loff_t *ppos)
216 { 216 {
217 struct spu_context *ctx = file->private_data; 217 struct spu_context *ctx = file->private_data;
218 char *local_store; 218 char *local_store;
219 loff_t pos = *ppos; 219 loff_t pos = *ppos;
220 int ret; 220 int ret;
221 221
222 if (pos > LS_SIZE) 222 if (pos > LS_SIZE)
223 return -EFBIG; 223 return -EFBIG;
224 224
225 ret = spu_acquire(ctx); 225 ret = spu_acquire(ctx);
226 if (ret) 226 if (ret)
227 return ret; 227 return ret;
228 228
229 local_store = ctx->ops->get_ls(ctx); 229 local_store = ctx->ops->get_ls(ctx);
230 size = simple_write_to_buffer(local_store, LS_SIZE, ppos, buffer, size); 230 size = simple_write_to_buffer(local_store, LS_SIZE, ppos, buffer, size);
231 spu_release(ctx); 231 spu_release(ctx);
232 232
233 return size; 233 return size;
234 } 234 }
235 235
236 static int 236 static int
237 spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 237 spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
238 { 238 {
239 struct spu_context *ctx = vma->vm_file->private_data; 239 struct spu_context *ctx = vma->vm_file->private_data;
240 unsigned long address = (unsigned long)vmf->virtual_address; 240 unsigned long address = (unsigned long)vmf->virtual_address;
241 unsigned long pfn, offset; 241 unsigned long pfn, offset;
242 242
243 #ifdef CONFIG_SPU_FS_64K_LS 243 #ifdef CONFIG_SPU_FS_64K_LS
244 struct spu_state *csa = &ctx->csa; 244 struct spu_state *csa = &ctx->csa;
245 int psize; 245 int psize;
246 246
247 /* Check what page size we are using */ 247 /* Check what page size we are using */
248 psize = get_slice_psize(vma->vm_mm, address); 248 psize = get_slice_psize(vma->vm_mm, address);
249 249
250 /* Some sanity checking */ 250 /* Some sanity checking */
251 BUG_ON(csa->use_big_pages != (psize == MMU_PAGE_64K)); 251 BUG_ON(csa->use_big_pages != (psize == MMU_PAGE_64K));
252 252
253 /* Wow, 64K, cool, we need to align the address though */ 253 /* Wow, 64K, cool, we need to align the address though */
254 if (csa->use_big_pages) { 254 if (csa->use_big_pages) {
255 BUG_ON(vma->vm_start & 0xffff); 255 BUG_ON(vma->vm_start & 0xffff);
256 address &= ~0xfffful; 256 address &= ~0xfffful;
257 } 257 }
258 #endif /* CONFIG_SPU_FS_64K_LS */ 258 #endif /* CONFIG_SPU_FS_64K_LS */
259 259
260 offset = vmf->pgoff << PAGE_SHIFT; 260 offset = vmf->pgoff << PAGE_SHIFT;
261 if (offset >= LS_SIZE) 261 if (offset >= LS_SIZE)
262 return VM_FAULT_SIGBUS; 262 return VM_FAULT_SIGBUS;
263 263
264 pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n", 264 pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n",
265 address, offset); 265 address, offset);
266 266
267 if (spu_acquire(ctx)) 267 if (spu_acquire(ctx))
268 return VM_FAULT_NOPAGE; 268 return VM_FAULT_NOPAGE;
269 269
270 if (ctx->state == SPU_STATE_SAVED) { 270 if (ctx->state == SPU_STATE_SAVED) {
271 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); 271 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
272 pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset); 272 pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
273 } else { 273 } else {
274 vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot); 274 vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
275 pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT; 275 pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
276 } 276 }
277 vm_insert_pfn(vma, address, pfn); 277 vm_insert_pfn(vma, address, pfn);
278 278
279 spu_release(ctx); 279 spu_release(ctx);
280 280
281 return VM_FAULT_NOPAGE; 281 return VM_FAULT_NOPAGE;
282 } 282 }
283 283
284 static int spufs_mem_mmap_access(struct vm_area_struct *vma, 284 static int spufs_mem_mmap_access(struct vm_area_struct *vma,
285 unsigned long address, 285 unsigned long address,
286 void *buf, int len, int write) 286 void *buf, int len, int write)
287 { 287 {
288 struct spu_context *ctx = vma->vm_file->private_data; 288 struct spu_context *ctx = vma->vm_file->private_data;
289 unsigned long offset = address - vma->vm_start; 289 unsigned long offset = address - vma->vm_start;
290 char *local_store; 290 char *local_store;
291 291
292 if (write && !(vma->vm_flags & VM_WRITE)) 292 if (write && !(vma->vm_flags & VM_WRITE))
293 return -EACCES; 293 return -EACCES;
294 if (spu_acquire(ctx)) 294 if (spu_acquire(ctx))
295 return -EINTR; 295 return -EINTR;
296 if ((offset + len) > vma->vm_end) 296 if ((offset + len) > vma->vm_end)
297 len = vma->vm_end - offset; 297 len = vma->vm_end - offset;
298 local_store = ctx->ops->get_ls(ctx); 298 local_store = ctx->ops->get_ls(ctx);
299 if (write) 299 if (write)
300 memcpy_toio(local_store + offset, buf, len); 300 memcpy_toio(local_store + offset, buf, len);
301 else 301 else
302 memcpy_fromio(buf, local_store + offset, len); 302 memcpy_fromio(buf, local_store + offset, len);
303 spu_release(ctx); 303 spu_release(ctx);
304 return len; 304 return len;
305 } 305 }
306 306
307 static const struct vm_operations_struct spufs_mem_mmap_vmops = { 307 static const struct vm_operations_struct spufs_mem_mmap_vmops = {
308 .fault = spufs_mem_mmap_fault, 308 .fault = spufs_mem_mmap_fault,
309 .access = spufs_mem_mmap_access, 309 .access = spufs_mem_mmap_access,
310 }; 310 };
311 311
312 static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma) 312 static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
313 { 313 {
314 #ifdef CONFIG_SPU_FS_64K_LS 314 #ifdef CONFIG_SPU_FS_64K_LS
315 struct spu_context *ctx = file->private_data; 315 struct spu_context *ctx = file->private_data;
316 struct spu_state *csa = &ctx->csa; 316 struct spu_state *csa = &ctx->csa;
317 317
318 /* Sanity check VMA alignment */ 318 /* Sanity check VMA alignment */
319 if (csa->use_big_pages) { 319 if (csa->use_big_pages) {
320 pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx," 320 pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx,"
321 " pgoff=0x%lx\n", vma->vm_start, vma->vm_end, 321 " pgoff=0x%lx\n", vma->vm_start, vma->vm_end,
322 vma->vm_pgoff); 322 vma->vm_pgoff);
323 if (vma->vm_start & 0xffff) 323 if (vma->vm_start & 0xffff)
324 return -EINVAL; 324 return -EINVAL;
325 if (vma->vm_pgoff & 0xf) 325 if (vma->vm_pgoff & 0xf)
326 return -EINVAL; 326 return -EINVAL;
327 } 327 }
328 #endif /* CONFIG_SPU_FS_64K_LS */ 328 #endif /* CONFIG_SPU_FS_64K_LS */
329 329
330 if (!(vma->vm_flags & VM_SHARED)) 330 if (!(vma->vm_flags & VM_SHARED))
331 return -EINVAL; 331 return -EINVAL;
332 332
333 vma->vm_flags |= VM_IO | VM_PFNMAP; 333 vma->vm_flags |= VM_IO | VM_PFNMAP;
334 vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot); 334 vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
335 335
336 vma->vm_ops = &spufs_mem_mmap_vmops; 336 vma->vm_ops = &spufs_mem_mmap_vmops;
337 return 0; 337 return 0;
338 } 338 }
339 339
340 #ifdef CONFIG_SPU_FS_64K_LS 340 #ifdef CONFIG_SPU_FS_64K_LS
341 static unsigned long spufs_get_unmapped_area(struct file *file, 341 static unsigned long spufs_get_unmapped_area(struct file *file,
342 unsigned long addr, unsigned long len, unsigned long pgoff, 342 unsigned long addr, unsigned long len, unsigned long pgoff,
343 unsigned long flags) 343 unsigned long flags)
344 { 344 {
345 struct spu_context *ctx = file->private_data; 345 struct spu_context *ctx = file->private_data;
346 struct spu_state *csa = &ctx->csa; 346 struct spu_state *csa = &ctx->csa;
347 347
348 /* If not using big pages, fallback to normal MM g_u_a */ 348 /* If not using big pages, fallback to normal MM g_u_a */
349 if (!csa->use_big_pages) 349 if (!csa->use_big_pages)
350 return current->mm->get_unmapped_area(file, addr, len, 350 return current->mm->get_unmapped_area(file, addr, len,
351 pgoff, flags); 351 pgoff, flags);
352 352
353 /* Else, try to obtain a 64K pages slice */ 353 /* Else, try to obtain a 64K pages slice */
354 return slice_get_unmapped_area(addr, len, flags, 354 return slice_get_unmapped_area(addr, len, flags,
355 MMU_PAGE_64K, 1, 0); 355 MMU_PAGE_64K, 1, 0);
356 } 356 }
357 #endif /* CONFIG_SPU_FS_64K_LS */ 357 #endif /* CONFIG_SPU_FS_64K_LS */
358 358
359 static const struct file_operations spufs_mem_fops = { 359 static const struct file_operations spufs_mem_fops = {
360 .open = spufs_mem_open, 360 .open = spufs_mem_open,
361 .release = spufs_mem_release, 361 .release = spufs_mem_release,
362 .read = spufs_mem_read, 362 .read = spufs_mem_read,
363 .write = spufs_mem_write, 363 .write = spufs_mem_write,
364 .llseek = generic_file_llseek, 364 .llseek = generic_file_llseek,
365 .mmap = spufs_mem_mmap, 365 .mmap = spufs_mem_mmap,
366 #ifdef CONFIG_SPU_FS_64K_LS 366 #ifdef CONFIG_SPU_FS_64K_LS
367 .get_unmapped_area = spufs_get_unmapped_area, 367 .get_unmapped_area = spufs_get_unmapped_area,
368 #endif 368 #endif
369 }; 369 };
370 370
371 static int spufs_ps_fault(struct vm_area_struct *vma, 371 static int spufs_ps_fault(struct vm_area_struct *vma,
372 struct vm_fault *vmf, 372 struct vm_fault *vmf,
373 unsigned long ps_offs, 373 unsigned long ps_offs,
374 unsigned long ps_size) 374 unsigned long ps_size)
375 { 375 {
376 struct spu_context *ctx = vma->vm_file->private_data; 376 struct spu_context *ctx = vma->vm_file->private_data;
377 unsigned long area, offset = vmf->pgoff << PAGE_SHIFT; 377 unsigned long area, offset = vmf->pgoff << PAGE_SHIFT;
378 int ret = 0; 378 int ret = 0;
379 379
380 spu_context_nospu_trace(spufs_ps_fault__enter, ctx); 380 spu_context_nospu_trace(spufs_ps_fault__enter, ctx);
381 381
382 if (offset >= ps_size) 382 if (offset >= ps_size)
383 return VM_FAULT_SIGBUS; 383 return VM_FAULT_SIGBUS;
384 384
385 if (fatal_signal_pending(current)) 385 if (fatal_signal_pending(current))
386 return VM_FAULT_SIGBUS; 386 return VM_FAULT_SIGBUS;
387 387
388 /* 388 /*
389 * Because we release the mmap_sem, the context may be destroyed while 389 * Because we release the mmap_sem, the context may be destroyed while
390 * we're in spu_wait. Grab an extra reference so it isn't destroyed 390 * we're in spu_wait. Grab an extra reference so it isn't destroyed
391 * in the meantime. 391 * in the meantime.
392 */ 392 */
393 get_spu_context(ctx); 393 get_spu_context(ctx);
394 394
395 /* 395 /*
396 * We have to wait for context to be loaded before we have 396 * We have to wait for context to be loaded before we have
397 * pages to hand out to the user, but we don't want to wait 397 * pages to hand out to the user, but we don't want to wait
398 * with the mmap_sem held. 398 * with the mmap_sem held.
399 * It is possible to drop the mmap_sem here, but then we need 399 * It is possible to drop the mmap_sem here, but then we need
400 * to return VM_FAULT_NOPAGE because the mappings may have 400 * to return VM_FAULT_NOPAGE because the mappings may have
401 * hanged. 401 * hanged.
402 */ 402 */
403 if (spu_acquire(ctx)) 403 if (spu_acquire(ctx))
404 goto refault; 404 goto refault;
405 405
406 if (ctx->state == SPU_STATE_SAVED) { 406 if (ctx->state == SPU_STATE_SAVED) {
407 up_read(&current->mm->mmap_sem); 407 up_read(&current->mm->mmap_sem);
408 spu_context_nospu_trace(spufs_ps_fault__sleep, ctx); 408 spu_context_nospu_trace(spufs_ps_fault__sleep, ctx);
409 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); 409 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
410 spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu); 410 spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu);
411 down_read(&current->mm->mmap_sem); 411 down_read(&current->mm->mmap_sem);
412 } else { 412 } else {
413 area = ctx->spu->problem_phys + ps_offs; 413 area = ctx->spu->problem_phys + ps_offs;
414 vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, 414 vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
415 (area + offset) >> PAGE_SHIFT); 415 (area + offset) >> PAGE_SHIFT);
416 spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu); 416 spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu);
417 } 417 }
418 418
419 if (!ret) 419 if (!ret)
420 spu_release(ctx); 420 spu_release(ctx);
421 421
422 refault: 422 refault:
423 put_spu_context(ctx); 423 put_spu_context(ctx);
424 return VM_FAULT_NOPAGE; 424 return VM_FAULT_NOPAGE;
425 } 425 }
426 426
427 #if SPUFS_MMAP_4K 427 #if SPUFS_MMAP_4K
428 static int spufs_cntl_mmap_fault(struct vm_area_struct *vma, 428 static int spufs_cntl_mmap_fault(struct vm_area_struct *vma,
429 struct vm_fault *vmf) 429 struct vm_fault *vmf)
430 { 430 {
431 return spufs_ps_fault(vma, vmf, 0x4000, SPUFS_CNTL_MAP_SIZE); 431 return spufs_ps_fault(vma, vmf, 0x4000, SPUFS_CNTL_MAP_SIZE);
432 } 432 }
433 433
434 static const struct vm_operations_struct spufs_cntl_mmap_vmops = { 434 static const struct vm_operations_struct spufs_cntl_mmap_vmops = {
435 .fault = spufs_cntl_mmap_fault, 435 .fault = spufs_cntl_mmap_fault,
436 }; 436 };
437 437
438 /* 438 /*
439 * mmap support for problem state control area [0x4000 - 0x4fff]. 439 * mmap support for problem state control area [0x4000 - 0x4fff].
440 */ 440 */
441 static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma) 441 static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
442 { 442 {
443 if (!(vma->vm_flags & VM_SHARED)) 443 if (!(vma->vm_flags & VM_SHARED))
444 return -EINVAL; 444 return -EINVAL;
445 445
446 vma->vm_flags |= VM_IO | VM_PFNMAP; 446 vma->vm_flags |= VM_IO | VM_PFNMAP;
447 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 447 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
448 448
449 vma->vm_ops = &spufs_cntl_mmap_vmops; 449 vma->vm_ops = &spufs_cntl_mmap_vmops;
450 return 0; 450 return 0;
451 } 451 }
452 #else /* SPUFS_MMAP_4K */ 452 #else /* SPUFS_MMAP_4K */
453 #define spufs_cntl_mmap NULL 453 #define spufs_cntl_mmap NULL
454 #endif /* !SPUFS_MMAP_4K */ 454 #endif /* !SPUFS_MMAP_4K */
455 455
456 static int spufs_cntl_get(void *data, u64 *val) 456 static int spufs_cntl_get(void *data, u64 *val)
457 { 457 {
458 struct spu_context *ctx = data; 458 struct spu_context *ctx = data;
459 int ret; 459 int ret;
460 460
461 ret = spu_acquire(ctx); 461 ret = spu_acquire(ctx);
462 if (ret) 462 if (ret)
463 return ret; 463 return ret;
464 *val = ctx->ops->status_read(ctx); 464 *val = ctx->ops->status_read(ctx);
465 spu_release(ctx); 465 spu_release(ctx);
466 466
467 return 0; 467 return 0;
468 } 468 }
469 469
470 static int spufs_cntl_set(void *data, u64 val) 470 static int spufs_cntl_set(void *data, u64 val)
471 { 471 {
472 struct spu_context *ctx = data; 472 struct spu_context *ctx = data;
473 int ret; 473 int ret;
474 474
475 ret = spu_acquire(ctx); 475 ret = spu_acquire(ctx);
476 if (ret) 476 if (ret)
477 return ret; 477 return ret;
478 ctx->ops->runcntl_write(ctx, val); 478 ctx->ops->runcntl_write(ctx, val);
479 spu_release(ctx); 479 spu_release(ctx);
480 480
481 return 0; 481 return 0;
482 } 482 }
483 483
484 static int spufs_cntl_open(struct inode *inode, struct file *file) 484 static int spufs_cntl_open(struct inode *inode, struct file *file)
485 { 485 {
486 struct spufs_inode_info *i = SPUFS_I(inode); 486 struct spufs_inode_info *i = SPUFS_I(inode);
487 struct spu_context *ctx = i->i_ctx; 487 struct spu_context *ctx = i->i_ctx;
488 488
489 mutex_lock(&ctx->mapping_lock); 489 mutex_lock(&ctx->mapping_lock);
490 file->private_data = ctx; 490 file->private_data = ctx;
491 if (!i->i_openers++) 491 if (!i->i_openers++)
492 ctx->cntl = inode->i_mapping; 492 ctx->cntl = inode->i_mapping;
493 mutex_unlock(&ctx->mapping_lock); 493 mutex_unlock(&ctx->mapping_lock);
494 return simple_attr_open(inode, file, spufs_cntl_get, 494 return simple_attr_open(inode, file, spufs_cntl_get,
495 spufs_cntl_set, "0x%08lx"); 495 spufs_cntl_set, "0x%08lx");
496 } 496 }
497 497
498 static int 498 static int
499 spufs_cntl_release(struct inode *inode, struct file *file) 499 spufs_cntl_release(struct inode *inode, struct file *file)
500 { 500 {
501 struct spufs_inode_info *i = SPUFS_I(inode); 501 struct spufs_inode_info *i = SPUFS_I(inode);
502 struct spu_context *ctx = i->i_ctx; 502 struct spu_context *ctx = i->i_ctx;
503 503
504 simple_attr_release(inode, file); 504 simple_attr_release(inode, file);
505 505
506 mutex_lock(&ctx->mapping_lock); 506 mutex_lock(&ctx->mapping_lock);
507 if (!--i->i_openers) 507 if (!--i->i_openers)
508 ctx->cntl = NULL; 508 ctx->cntl = NULL;
509 mutex_unlock(&ctx->mapping_lock); 509 mutex_unlock(&ctx->mapping_lock);
510 return 0; 510 return 0;
511 } 511 }
512 512
513 static const struct file_operations spufs_cntl_fops = { 513 static const struct file_operations spufs_cntl_fops = {
514 .open = spufs_cntl_open, 514 .open = spufs_cntl_open,
515 .release = spufs_cntl_release, 515 .release = spufs_cntl_release,
516 .read = simple_attr_read, 516 .read = simple_attr_read,
517 .write = simple_attr_write, 517 .write = simple_attr_write,
518 .llseek = generic_file_llseek, 518 .llseek = generic_file_llseek,
519 .mmap = spufs_cntl_mmap, 519 .mmap = spufs_cntl_mmap,
520 }; 520 };
521 521
522 static int 522 static int
523 spufs_regs_open(struct inode *inode, struct file *file) 523 spufs_regs_open(struct inode *inode, struct file *file)
524 { 524 {
525 struct spufs_inode_info *i = SPUFS_I(inode); 525 struct spufs_inode_info *i = SPUFS_I(inode);
526 file->private_data = i->i_ctx; 526 file->private_data = i->i_ctx;
527 return 0; 527 return 0;
528 } 528 }
529 529
530 static ssize_t 530 static ssize_t
531 __spufs_regs_read(struct spu_context *ctx, char __user *buffer, 531 __spufs_regs_read(struct spu_context *ctx, char __user *buffer,
532 size_t size, loff_t *pos) 532 size_t size, loff_t *pos)
533 { 533 {
534 struct spu_lscsa *lscsa = ctx->csa.lscsa; 534 struct spu_lscsa *lscsa = ctx->csa.lscsa;
535 return simple_read_from_buffer(buffer, size, pos, 535 return simple_read_from_buffer(buffer, size, pos,
536 lscsa->gprs, sizeof lscsa->gprs); 536 lscsa->gprs, sizeof lscsa->gprs);
537 } 537 }
538 538
539 static ssize_t 539 static ssize_t
540 spufs_regs_read(struct file *file, char __user *buffer, 540 spufs_regs_read(struct file *file, char __user *buffer,
541 size_t size, loff_t *pos) 541 size_t size, loff_t *pos)
542 { 542 {
543 int ret; 543 int ret;
544 struct spu_context *ctx = file->private_data; 544 struct spu_context *ctx = file->private_data;
545 545
546 /* pre-check for file position: if we'd return EOF, there's no point 546 /* pre-check for file position: if we'd return EOF, there's no point
547 * causing a deschedule */ 547 * causing a deschedule */
548 if (*pos >= sizeof(ctx->csa.lscsa->gprs)) 548 if (*pos >= sizeof(ctx->csa.lscsa->gprs))
549 return 0; 549 return 0;
550 550
551 ret = spu_acquire_saved(ctx); 551 ret = spu_acquire_saved(ctx);
552 if (ret) 552 if (ret)
553 return ret; 553 return ret;
554 ret = __spufs_regs_read(ctx, buffer, size, pos); 554 ret = __spufs_regs_read(ctx, buffer, size, pos);
555 spu_release_saved(ctx); 555 spu_release_saved(ctx);
556 return ret; 556 return ret;
557 } 557 }
558 558
559 static ssize_t 559 static ssize_t
560 spufs_regs_write(struct file *file, const char __user *buffer, 560 spufs_regs_write(struct file *file, const char __user *buffer,
561 size_t size, loff_t *pos) 561 size_t size, loff_t *pos)
562 { 562 {
563 struct spu_context *ctx = file->private_data; 563 struct spu_context *ctx = file->private_data;
564 struct spu_lscsa *lscsa = ctx->csa.lscsa; 564 struct spu_lscsa *lscsa = ctx->csa.lscsa;
565 int ret; 565 int ret;
566 566
567 if (*pos >= sizeof(lscsa->gprs)) 567 if (*pos >= sizeof(lscsa->gprs))
568 return -EFBIG; 568 return -EFBIG;
569 569
570 ret = spu_acquire_saved(ctx); 570 ret = spu_acquire_saved(ctx);
571 if (ret) 571 if (ret)
572 return ret; 572 return ret;
573 573
574 size = simple_write_to_buffer(lscsa->gprs, sizeof(lscsa->gprs), pos, 574 size = simple_write_to_buffer(lscsa->gprs, sizeof(lscsa->gprs), pos,
575 buffer, size); 575 buffer, size);
576 576
577 spu_release_saved(ctx); 577 spu_release_saved(ctx);
578 return size; 578 return size;
579 } 579 }
580 580
581 static const struct file_operations spufs_regs_fops = { 581 static const struct file_operations spufs_regs_fops = {
582 .open = spufs_regs_open, 582 .open = spufs_regs_open,
583 .read = spufs_regs_read, 583 .read = spufs_regs_read,
584 .write = spufs_regs_write, 584 .write = spufs_regs_write,
585 .llseek = generic_file_llseek, 585 .llseek = generic_file_llseek,
586 }; 586 };
587 587
588 static ssize_t 588 static ssize_t
589 __spufs_fpcr_read(struct spu_context *ctx, char __user * buffer, 589 __spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
590 size_t size, loff_t * pos) 590 size_t size, loff_t * pos)
591 { 591 {
592 struct spu_lscsa *lscsa = ctx->csa.lscsa; 592 struct spu_lscsa *lscsa = ctx->csa.lscsa;
593 return simple_read_from_buffer(buffer, size, pos, 593 return simple_read_from_buffer(buffer, size, pos,
594 &lscsa->fpcr, sizeof(lscsa->fpcr)); 594 &lscsa->fpcr, sizeof(lscsa->fpcr));
595 } 595 }
596 596
597 static ssize_t 597 static ssize_t
598 spufs_fpcr_read(struct file *file, char __user * buffer, 598 spufs_fpcr_read(struct file *file, char __user * buffer,
599 size_t size, loff_t * pos) 599 size_t size, loff_t * pos)
600 { 600 {
601 int ret; 601 int ret;
602 struct spu_context *ctx = file->private_data; 602 struct spu_context *ctx = file->private_data;
603 603
604 ret = spu_acquire_saved(ctx); 604 ret = spu_acquire_saved(ctx);
605 if (ret) 605 if (ret)
606 return ret; 606 return ret;
607 ret = __spufs_fpcr_read(ctx, buffer, size, pos); 607 ret = __spufs_fpcr_read(ctx, buffer, size, pos);
608 spu_release_saved(ctx); 608 spu_release_saved(ctx);
609 return ret; 609 return ret;
610 } 610 }
611 611
612 static ssize_t 612 static ssize_t
613 spufs_fpcr_write(struct file *file, const char __user * buffer, 613 spufs_fpcr_write(struct file *file, const char __user * buffer,
614 size_t size, loff_t * pos) 614 size_t size, loff_t * pos)
615 { 615 {
616 struct spu_context *ctx = file->private_data; 616 struct spu_context *ctx = file->private_data;
617 struct spu_lscsa *lscsa = ctx->csa.lscsa; 617 struct spu_lscsa *lscsa = ctx->csa.lscsa;
618 int ret; 618 int ret;
619 619
620 if (*pos >= sizeof(lscsa->fpcr)) 620 if (*pos >= sizeof(lscsa->fpcr))
621 return -EFBIG; 621 return -EFBIG;
622 622
623 ret = spu_acquire_saved(ctx); 623 ret = spu_acquire_saved(ctx);
624 if (ret) 624 if (ret)
625 return ret; 625 return ret;
626 626
627 size = simple_write_to_buffer(&lscsa->fpcr, sizeof(lscsa->fpcr), pos, 627 size = simple_write_to_buffer(&lscsa->fpcr, sizeof(lscsa->fpcr), pos,
628 buffer, size); 628 buffer, size);
629 629
630 spu_release_saved(ctx); 630 spu_release_saved(ctx);
631 return size; 631 return size;
632 } 632 }
633 633
634 static const struct file_operations spufs_fpcr_fops = { 634 static const struct file_operations spufs_fpcr_fops = {
635 .open = spufs_regs_open, 635 .open = spufs_regs_open,
636 .read = spufs_fpcr_read, 636 .read = spufs_fpcr_read,
637 .write = spufs_fpcr_write, 637 .write = spufs_fpcr_write,
638 .llseek = generic_file_llseek, 638 .llseek = generic_file_llseek,
639 }; 639 };
640 640
641 /* generic open function for all pipe-like files */ 641 /* generic open function for all pipe-like files */
642 static int spufs_pipe_open(struct inode *inode, struct file *file) 642 static int spufs_pipe_open(struct inode *inode, struct file *file)
643 { 643 {
644 struct spufs_inode_info *i = SPUFS_I(inode); 644 struct spufs_inode_info *i = SPUFS_I(inode);
645 file->private_data = i->i_ctx; 645 file->private_data = i->i_ctx;
646 646
647 return nonseekable_open(inode, file); 647 return nonseekable_open(inode, file);
648 } 648 }
649 649
650 /* 650 /*
651 * Read as many bytes from the mailbox as possible, until 651 * Read as many bytes from the mailbox as possible, until
652 * one of the conditions becomes true: 652 * one of the conditions becomes true:
653 * 653 *
654 * - no more data available in the mailbox 654 * - no more data available in the mailbox
655 * - end of the user provided buffer 655 * - end of the user provided buffer
656 * - end of the mapped area 656 * - end of the mapped area
657 */ 657 */
658 static ssize_t spufs_mbox_read(struct file *file, char __user *buf, 658 static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
659 size_t len, loff_t *pos) 659 size_t len, loff_t *pos)
660 { 660 {
661 struct spu_context *ctx = file->private_data; 661 struct spu_context *ctx = file->private_data;
662 u32 mbox_data, __user *udata; 662 u32 mbox_data, __user *udata;
663 ssize_t count; 663 ssize_t count;
664 664
665 if (len < 4) 665 if (len < 4)
666 return -EINVAL; 666 return -EINVAL;
667 667
668 if (!access_ok(VERIFY_WRITE, buf, len)) 668 if (!access_ok(VERIFY_WRITE, buf, len))
669 return -EFAULT; 669 return -EFAULT;
670 670
671 udata = (void __user *)buf; 671 udata = (void __user *)buf;
672 672
673 count = spu_acquire(ctx); 673 count = spu_acquire(ctx);
674 if (count) 674 if (count)
675 return count; 675 return count;
676 676
677 for (count = 0; (count + 4) <= len; count += 4, udata++) { 677 for (count = 0; (count + 4) <= len; count += 4, udata++) {
678 int ret; 678 int ret;
679 ret = ctx->ops->mbox_read(ctx, &mbox_data); 679 ret = ctx->ops->mbox_read(ctx, &mbox_data);
680 if (ret == 0) 680 if (ret == 0)
681 break; 681 break;
682 682
683 /* 683 /*
684 * at the end of the mapped area, we can fault 684 * at the end of the mapped area, we can fault
685 * but still need to return the data we have 685 * but still need to return the data we have
686 * read successfully so far. 686 * read successfully so far.
687 */ 687 */
688 ret = __put_user(mbox_data, udata); 688 ret = __put_user(mbox_data, udata);
689 if (ret) { 689 if (ret) {
690 if (!count) 690 if (!count)
691 count = -EFAULT; 691 count = -EFAULT;
692 break; 692 break;
693 } 693 }
694 } 694 }
695 spu_release(ctx); 695 spu_release(ctx);
696 696
697 if (!count) 697 if (!count)
698 count = -EAGAIN; 698 count = -EAGAIN;
699 699
700 return count; 700 return count;
701 } 701 }
702 702
703 static const struct file_operations spufs_mbox_fops = { 703 static const struct file_operations spufs_mbox_fops = {
704 .open = spufs_pipe_open, 704 .open = spufs_pipe_open,
705 .read = spufs_mbox_read, 705 .read = spufs_mbox_read,
706 .llseek = no_llseek, 706 .llseek = no_llseek,
707 }; 707 };
708 708
709 static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf, 709 static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
710 size_t len, loff_t *pos) 710 size_t len, loff_t *pos)
711 { 711 {
712 struct spu_context *ctx = file->private_data; 712 struct spu_context *ctx = file->private_data;
713 ssize_t ret; 713 ssize_t ret;
714 u32 mbox_stat; 714 u32 mbox_stat;
715 715
716 if (len < 4) 716 if (len < 4)
717 return -EINVAL; 717 return -EINVAL;
718 718
719 ret = spu_acquire(ctx); 719 ret = spu_acquire(ctx);
720 if (ret) 720 if (ret)
721 return ret; 721 return ret;
722 722
723 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff; 723 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
724 724
725 spu_release(ctx); 725 spu_release(ctx);
726 726
727 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat)) 727 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
728 return -EFAULT; 728 return -EFAULT;
729 729
730 return 4; 730 return 4;
731 } 731 }
732 732
733 static const struct file_operations spufs_mbox_stat_fops = { 733 static const struct file_operations spufs_mbox_stat_fops = {
734 .open = spufs_pipe_open, 734 .open = spufs_pipe_open,
735 .read = spufs_mbox_stat_read, 735 .read = spufs_mbox_stat_read,
736 .llseek = no_llseek, 736 .llseek = no_llseek,
737 }; 737 };
738 738
739 /* low-level ibox access function */ 739 /* low-level ibox access function */
740 size_t spu_ibox_read(struct spu_context *ctx, u32 *data) 740 size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
741 { 741 {
742 return ctx->ops->ibox_read(ctx, data); 742 return ctx->ops->ibox_read(ctx, data);
743 } 743 }
744 744
745 static int spufs_ibox_fasync(int fd, struct file *file, int on) 745 static int spufs_ibox_fasync(int fd, struct file *file, int on)
746 { 746 {
747 struct spu_context *ctx = file->private_data; 747 struct spu_context *ctx = file->private_data;
748 748
749 return fasync_helper(fd, file, on, &ctx->ibox_fasync); 749 return fasync_helper(fd, file, on, &ctx->ibox_fasync);
750 } 750 }
751 751
752 /* interrupt-level ibox callback function. */ 752 /* interrupt-level ibox callback function. */
753 void spufs_ibox_callback(struct spu *spu) 753 void spufs_ibox_callback(struct spu *spu)
754 { 754 {
755 struct spu_context *ctx = spu->ctx; 755 struct spu_context *ctx = spu->ctx;
756 756
757 if (!ctx) 757 if (!ctx)
758 return; 758 return;
759 759
760 wake_up_all(&ctx->ibox_wq); 760 wake_up_all(&ctx->ibox_wq);
761 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN); 761 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
762 } 762 }
763 763
764 /* 764 /*
765 * Read as many bytes from the interrupt mailbox as possible, until 765 * Read as many bytes from the interrupt mailbox as possible, until
766 * one of the conditions becomes true: 766 * one of the conditions becomes true:
767 * 767 *
768 * - no more data available in the mailbox 768 * - no more data available in the mailbox
769 * - end of the user provided buffer 769 * - end of the user provided buffer
770 * - end of the mapped area 770 * - end of the mapped area
771 * 771 *
772 * If the file is opened without O_NONBLOCK, we wait here until 772 * If the file is opened without O_NONBLOCK, we wait here until
773 * any data is available, but return when we have been able to 773 * any data is available, but return when we have been able to
774 * read something. 774 * read something.
775 */ 775 */
776 static ssize_t spufs_ibox_read(struct file *file, char __user *buf, 776 static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
777 size_t len, loff_t *pos) 777 size_t len, loff_t *pos)
778 { 778 {
779 struct spu_context *ctx = file->private_data; 779 struct spu_context *ctx = file->private_data;
780 u32 ibox_data, __user *udata; 780 u32 ibox_data, __user *udata;
781 ssize_t count; 781 ssize_t count;
782 782
783 if (len < 4) 783 if (len < 4)
784 return -EINVAL; 784 return -EINVAL;
785 785
786 if (!access_ok(VERIFY_WRITE, buf, len)) 786 if (!access_ok(VERIFY_WRITE, buf, len))
787 return -EFAULT; 787 return -EFAULT;
788 788
789 udata = (void __user *)buf; 789 udata = (void __user *)buf;
790 790
791 count = spu_acquire(ctx); 791 count = spu_acquire(ctx);
792 if (count) 792 if (count)
793 goto out; 793 goto out;
794 794
795 /* wait only for the first element */ 795 /* wait only for the first element */
796 count = 0; 796 count = 0;
797 if (file->f_flags & O_NONBLOCK) { 797 if (file->f_flags & O_NONBLOCK) {
798 if (!spu_ibox_read(ctx, &ibox_data)) { 798 if (!spu_ibox_read(ctx, &ibox_data)) {
799 count = -EAGAIN; 799 count = -EAGAIN;
800 goto out_unlock; 800 goto out_unlock;
801 } 801 }
802 } else { 802 } else {
803 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data)); 803 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
804 if (count) 804 if (count)
805 goto out; 805 goto out;
806 } 806 }
807 807
808 /* if we can't write at all, return -EFAULT */ 808 /* if we can't write at all, return -EFAULT */
809 count = __put_user(ibox_data, udata); 809 count = __put_user(ibox_data, udata);
810 if (count) 810 if (count)
811 goto out_unlock; 811 goto out_unlock;
812 812
813 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { 813 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
814 int ret; 814 int ret;
815 ret = ctx->ops->ibox_read(ctx, &ibox_data); 815 ret = ctx->ops->ibox_read(ctx, &ibox_data);
816 if (ret == 0) 816 if (ret == 0)
817 break; 817 break;
818 /* 818 /*
819 * at the end of the mapped area, we can fault 819 * at the end of the mapped area, we can fault
820 * but still need to return the data we have 820 * but still need to return the data we have
821 * read successfully so far. 821 * read successfully so far.
822 */ 822 */
823 ret = __put_user(ibox_data, udata); 823 ret = __put_user(ibox_data, udata);
824 if (ret) 824 if (ret)
825 break; 825 break;
826 } 826 }
827 827
828 out_unlock: 828 out_unlock:
829 spu_release(ctx); 829 spu_release(ctx);
830 out: 830 out:
831 return count; 831 return count;
832 } 832 }
833 833
834 static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait) 834 static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
835 { 835 {
836 struct spu_context *ctx = file->private_data; 836 struct spu_context *ctx = file->private_data;
837 unsigned int mask; 837 unsigned int mask;
838 838
839 poll_wait(file, &ctx->ibox_wq, wait); 839 poll_wait(file, &ctx->ibox_wq, wait);
840 840
841 /* 841 /*
842 * For now keep this uninterruptible and also ignore the rule 842 * For now keep this uninterruptible and also ignore the rule
843 * that poll should not sleep. Will be fixed later. 843 * that poll should not sleep. Will be fixed later.
844 */ 844 */
845 mutex_lock(&ctx->state_mutex); 845 mutex_lock(&ctx->state_mutex);
846 mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM); 846 mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
847 spu_release(ctx); 847 spu_release(ctx);
848 848
849 return mask; 849 return mask;
850 } 850 }
851 851
852 static const struct file_operations spufs_ibox_fops = { 852 static const struct file_operations spufs_ibox_fops = {
853 .open = spufs_pipe_open, 853 .open = spufs_pipe_open,
854 .read = spufs_ibox_read, 854 .read = spufs_ibox_read,
855 .poll = spufs_ibox_poll, 855 .poll = spufs_ibox_poll,
856 .fasync = spufs_ibox_fasync, 856 .fasync = spufs_ibox_fasync,
857 .llseek = no_llseek, 857 .llseek = no_llseek,
858 }; 858 };
859 859
860 static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf, 860 static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
861 size_t len, loff_t *pos) 861 size_t len, loff_t *pos)
862 { 862 {
863 struct spu_context *ctx = file->private_data; 863 struct spu_context *ctx = file->private_data;
864 ssize_t ret; 864 ssize_t ret;
865 u32 ibox_stat; 865 u32 ibox_stat;
866 866
867 if (len < 4) 867 if (len < 4)
868 return -EINVAL; 868 return -EINVAL;
869 869
870 ret = spu_acquire(ctx); 870 ret = spu_acquire(ctx);
871 if (ret) 871 if (ret)
872 return ret; 872 return ret;
873 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff; 873 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
874 spu_release(ctx); 874 spu_release(ctx);
875 875
876 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat)) 876 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
877 return -EFAULT; 877 return -EFAULT;
878 878
879 return 4; 879 return 4;
880 } 880 }
881 881
882 static const struct file_operations spufs_ibox_stat_fops = { 882 static const struct file_operations spufs_ibox_stat_fops = {
883 .open = spufs_pipe_open, 883 .open = spufs_pipe_open,
884 .read = spufs_ibox_stat_read, 884 .read = spufs_ibox_stat_read,
885 .llseek = no_llseek, 885 .llseek = no_llseek,
886 }; 886 };
887 887
888 /* low-level mailbox write */ 888 /* low-level mailbox write */
889 size_t spu_wbox_write(struct spu_context *ctx, u32 data) 889 size_t spu_wbox_write(struct spu_context *ctx, u32 data)
890 { 890 {
891 return ctx->ops->wbox_write(ctx, data); 891 return ctx->ops->wbox_write(ctx, data);
892 } 892 }
893 893
894 static int spufs_wbox_fasync(int fd, struct file *file, int on) 894 static int spufs_wbox_fasync(int fd, struct file *file, int on)
895 { 895 {
896 struct spu_context *ctx = file->private_data; 896 struct spu_context *ctx = file->private_data;
897 int ret; 897 int ret;
898 898
899 ret = fasync_helper(fd, file, on, &ctx->wbox_fasync); 899 ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
900 900
901 return ret; 901 return ret;
902 } 902 }
903 903
904 /* interrupt-level wbox callback function. */ 904 /* interrupt-level wbox callback function. */
905 void spufs_wbox_callback(struct spu *spu) 905 void spufs_wbox_callback(struct spu *spu)
906 { 906 {
907 struct spu_context *ctx = spu->ctx; 907 struct spu_context *ctx = spu->ctx;
908 908
909 if (!ctx) 909 if (!ctx)
910 return; 910 return;
911 911
912 wake_up_all(&ctx->wbox_wq); 912 wake_up_all(&ctx->wbox_wq);
913 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT); 913 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
914 } 914 }
915 915
916 /* 916 /*
917 * Write as many bytes to the interrupt mailbox as possible, until 917 * Write as many bytes to the interrupt mailbox as possible, until
918 * one of the conditions becomes true: 918 * one of the conditions becomes true:
919 * 919 *
920 * - the mailbox is full 920 * - the mailbox is full
921 * - end of the user provided buffer 921 * - end of the user provided buffer
922 * - end of the mapped area 922 * - end of the mapped area
923 * 923 *
924 * If the file is opened without O_NONBLOCK, we wait here until 924 * If the file is opened without O_NONBLOCK, we wait here until
925 * space is availabyl, but return when we have been able to 925 * space is availabyl, but return when we have been able to
926 * write something. 926 * write something.
927 */ 927 */
928 static ssize_t spufs_wbox_write(struct file *file, const char __user *buf, 928 static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
929 size_t len, loff_t *pos) 929 size_t len, loff_t *pos)
930 { 930 {
931 struct spu_context *ctx = file->private_data; 931 struct spu_context *ctx = file->private_data;
932 u32 wbox_data, __user *udata; 932 u32 wbox_data, __user *udata;
933 ssize_t count; 933 ssize_t count;
934 934
935 if (len < 4) 935 if (len < 4)
936 return -EINVAL; 936 return -EINVAL;
937 937
938 udata = (void __user *)buf; 938 udata = (void __user *)buf;
939 if (!access_ok(VERIFY_READ, buf, len)) 939 if (!access_ok(VERIFY_READ, buf, len))
940 return -EFAULT; 940 return -EFAULT;
941 941
942 if (__get_user(wbox_data, udata)) 942 if (__get_user(wbox_data, udata))
943 return -EFAULT; 943 return -EFAULT;
944 944
945 count = spu_acquire(ctx); 945 count = spu_acquire(ctx);
946 if (count) 946 if (count)
947 goto out; 947 goto out;
948 948
949 /* 949 /*
950 * make sure we can at least write one element, by waiting 950 * make sure we can at least write one element, by waiting
951 * in case of !O_NONBLOCK 951 * in case of !O_NONBLOCK
952 */ 952 */
953 count = 0; 953 count = 0;
954 if (file->f_flags & O_NONBLOCK) { 954 if (file->f_flags & O_NONBLOCK) {
955 if (!spu_wbox_write(ctx, wbox_data)) { 955 if (!spu_wbox_write(ctx, wbox_data)) {
956 count = -EAGAIN; 956 count = -EAGAIN;
957 goto out_unlock; 957 goto out_unlock;
958 } 958 }
959 } else { 959 } else {
960 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data)); 960 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
961 if (count) 961 if (count)
962 goto out; 962 goto out;
963 } 963 }
964 964
965 965
966 /* write as much as possible */ 966 /* write as much as possible */
967 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { 967 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
968 int ret; 968 int ret;
969 ret = __get_user(wbox_data, udata); 969 ret = __get_user(wbox_data, udata);
970 if (ret) 970 if (ret)
971 break; 971 break;
972 972
973 ret = spu_wbox_write(ctx, wbox_data); 973 ret = spu_wbox_write(ctx, wbox_data);
974 if (ret == 0) 974 if (ret == 0)
975 break; 975 break;
976 } 976 }
977 977
978 out_unlock: 978 out_unlock:
979 spu_release(ctx); 979 spu_release(ctx);
980 out: 980 out:
981 return count; 981 return count;
982 } 982 }
983 983
984 static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait) 984 static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
985 { 985 {
986 struct spu_context *ctx = file->private_data; 986 struct spu_context *ctx = file->private_data;
987 unsigned int mask; 987 unsigned int mask;
988 988
989 poll_wait(file, &ctx->wbox_wq, wait); 989 poll_wait(file, &ctx->wbox_wq, wait);
990 990
991 /* 991 /*
992 * For now keep this uninterruptible and also ignore the rule 992 * For now keep this uninterruptible and also ignore the rule
993 * that poll should not sleep. Will be fixed later. 993 * that poll should not sleep. Will be fixed later.
994 */ 994 */
995 mutex_lock(&ctx->state_mutex); 995 mutex_lock(&ctx->state_mutex);
996 mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM); 996 mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
997 spu_release(ctx); 997 spu_release(ctx);
998 998
999 return mask; 999 return mask;
1000 } 1000 }
1001 1001
1002 static const struct file_operations spufs_wbox_fops = { 1002 static const struct file_operations spufs_wbox_fops = {
1003 .open = spufs_pipe_open, 1003 .open = spufs_pipe_open,
1004 .write = spufs_wbox_write, 1004 .write = spufs_wbox_write,
1005 .poll = spufs_wbox_poll, 1005 .poll = spufs_wbox_poll,
1006 .fasync = spufs_wbox_fasync, 1006 .fasync = spufs_wbox_fasync,
1007 .llseek = no_llseek, 1007 .llseek = no_llseek,
1008 }; 1008 };
1009 1009
1010 static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf, 1010 static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
1011 size_t len, loff_t *pos) 1011 size_t len, loff_t *pos)
1012 { 1012 {
1013 struct spu_context *ctx = file->private_data; 1013 struct spu_context *ctx = file->private_data;
1014 ssize_t ret; 1014 ssize_t ret;
1015 u32 wbox_stat; 1015 u32 wbox_stat;
1016 1016
1017 if (len < 4) 1017 if (len < 4)
1018 return -EINVAL; 1018 return -EINVAL;
1019 1019
1020 ret = spu_acquire(ctx); 1020 ret = spu_acquire(ctx);
1021 if (ret) 1021 if (ret)
1022 return ret; 1022 return ret;
1023 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff; 1023 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
1024 spu_release(ctx); 1024 spu_release(ctx);
1025 1025
1026 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat)) 1026 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
1027 return -EFAULT; 1027 return -EFAULT;
1028 1028
1029 return 4; 1029 return 4;
1030 } 1030 }
1031 1031
1032 static const struct file_operations spufs_wbox_stat_fops = { 1032 static const struct file_operations spufs_wbox_stat_fops = {
1033 .open = spufs_pipe_open, 1033 .open = spufs_pipe_open,
1034 .read = spufs_wbox_stat_read, 1034 .read = spufs_wbox_stat_read,
1035 .llseek = no_llseek, 1035 .llseek = no_llseek,
1036 }; 1036 };
1037 1037
1038 static int spufs_signal1_open(struct inode *inode, struct file *file) 1038 static int spufs_signal1_open(struct inode *inode, struct file *file)
1039 { 1039 {
1040 struct spufs_inode_info *i = SPUFS_I(inode); 1040 struct spufs_inode_info *i = SPUFS_I(inode);
1041 struct spu_context *ctx = i->i_ctx; 1041 struct spu_context *ctx = i->i_ctx;
1042 1042
1043 mutex_lock(&ctx->mapping_lock); 1043 mutex_lock(&ctx->mapping_lock);
1044 file->private_data = ctx; 1044 file->private_data = ctx;
1045 if (!i->i_openers++) 1045 if (!i->i_openers++)
1046 ctx->signal1 = inode->i_mapping; 1046 ctx->signal1 = inode->i_mapping;
1047 mutex_unlock(&ctx->mapping_lock); 1047 mutex_unlock(&ctx->mapping_lock);
1048 return nonseekable_open(inode, file); 1048 return nonseekable_open(inode, file);
1049 } 1049 }
1050 1050
1051 static int 1051 static int
1052 spufs_signal1_release(struct inode *inode, struct file *file) 1052 spufs_signal1_release(struct inode *inode, struct file *file)
1053 { 1053 {
1054 struct spufs_inode_info *i = SPUFS_I(inode); 1054 struct spufs_inode_info *i = SPUFS_I(inode);
1055 struct spu_context *ctx = i->i_ctx; 1055 struct spu_context *ctx = i->i_ctx;
1056 1056
1057 mutex_lock(&ctx->mapping_lock); 1057 mutex_lock(&ctx->mapping_lock);
1058 if (!--i->i_openers) 1058 if (!--i->i_openers)
1059 ctx->signal1 = NULL; 1059 ctx->signal1 = NULL;
1060 mutex_unlock(&ctx->mapping_lock); 1060 mutex_unlock(&ctx->mapping_lock);
1061 return 0; 1061 return 0;
1062 } 1062 }
1063 1063
1064 static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf, 1064 static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
1065 size_t len, loff_t *pos) 1065 size_t len, loff_t *pos)
1066 { 1066 {
1067 int ret = 0; 1067 int ret = 0;
1068 u32 data; 1068 u32 data;
1069 1069
1070 if (len < 4) 1070 if (len < 4)
1071 return -EINVAL; 1071 return -EINVAL;
1072 1072
1073 if (ctx->csa.spu_chnlcnt_RW[3]) { 1073 if (ctx->csa.spu_chnlcnt_RW[3]) {
1074 data = ctx->csa.spu_chnldata_RW[3]; 1074 data = ctx->csa.spu_chnldata_RW[3];
1075 ret = 4; 1075 ret = 4;
1076 } 1076 }
1077 1077
1078 if (!ret) 1078 if (!ret)
1079 goto out; 1079 goto out;
1080 1080
1081 if (copy_to_user(buf, &data, 4)) 1081 if (copy_to_user(buf, &data, 4))
1082 return -EFAULT; 1082 return -EFAULT;
1083 1083
1084 out: 1084 out:
1085 return ret; 1085 return ret;
1086 } 1086 }
1087 1087
1088 static ssize_t spufs_signal1_read(struct file *file, char __user *buf, 1088 static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
1089 size_t len, loff_t *pos) 1089 size_t len, loff_t *pos)
1090 { 1090 {
1091 int ret; 1091 int ret;
1092 struct spu_context *ctx = file->private_data; 1092 struct spu_context *ctx = file->private_data;
1093 1093
1094 ret = spu_acquire_saved(ctx); 1094 ret = spu_acquire_saved(ctx);
1095 if (ret) 1095 if (ret)
1096 return ret; 1096 return ret;
1097 ret = __spufs_signal1_read(ctx, buf, len, pos); 1097 ret = __spufs_signal1_read(ctx, buf, len, pos);
1098 spu_release_saved(ctx); 1098 spu_release_saved(ctx);
1099 1099
1100 return ret; 1100 return ret;
1101 } 1101 }
1102 1102
1103 static ssize_t spufs_signal1_write(struct file *file, const char __user *buf, 1103 static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
1104 size_t len, loff_t *pos) 1104 size_t len, loff_t *pos)
1105 { 1105 {
1106 struct spu_context *ctx; 1106 struct spu_context *ctx;
1107 ssize_t ret; 1107 ssize_t ret;
1108 u32 data; 1108 u32 data;
1109 1109
1110 ctx = file->private_data; 1110 ctx = file->private_data;
1111 1111
1112 if (len < 4) 1112 if (len < 4)
1113 return -EINVAL; 1113 return -EINVAL;
1114 1114
1115 if (copy_from_user(&data, buf, 4)) 1115 if (copy_from_user(&data, buf, 4))
1116 return -EFAULT; 1116 return -EFAULT;
1117 1117
1118 ret = spu_acquire(ctx); 1118 ret = spu_acquire(ctx);
1119 if (ret) 1119 if (ret)
1120 return ret; 1120 return ret;
1121 ctx->ops->signal1_write(ctx, data); 1121 ctx->ops->signal1_write(ctx, data);
1122 spu_release(ctx); 1122 spu_release(ctx);
1123 1123
1124 return 4; 1124 return 4;
1125 } 1125 }
1126 1126
1127 static int 1127 static int
1128 spufs_signal1_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1128 spufs_signal1_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1129 { 1129 {
1130 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000 1130 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1131 return spufs_ps_fault(vma, vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE); 1131 return spufs_ps_fault(vma, vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE);
1132 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000 1132 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
1133 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole 1133 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1134 * signal 1 and 2 area 1134 * signal 1 and 2 area
1135 */ 1135 */
1136 return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE); 1136 return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
1137 #else 1137 #else
1138 #error unsupported page size 1138 #error unsupported page size
1139 #endif 1139 #endif
1140 } 1140 }
1141 1141
1142 static const struct vm_operations_struct spufs_signal1_mmap_vmops = { 1142 static const struct vm_operations_struct spufs_signal1_mmap_vmops = {
1143 .fault = spufs_signal1_mmap_fault, 1143 .fault = spufs_signal1_mmap_fault,
1144 }; 1144 };
1145 1145
1146 static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma) 1146 static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
1147 { 1147 {
1148 if (!(vma->vm_flags & VM_SHARED)) 1148 if (!(vma->vm_flags & VM_SHARED))
1149 return -EINVAL; 1149 return -EINVAL;
1150 1150
1151 vma->vm_flags |= VM_IO | VM_PFNMAP; 1151 vma->vm_flags |= VM_IO | VM_PFNMAP;
1152 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1152 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1153 1153
1154 vma->vm_ops = &spufs_signal1_mmap_vmops; 1154 vma->vm_ops = &spufs_signal1_mmap_vmops;
1155 return 0; 1155 return 0;
1156 } 1156 }
1157 1157
1158 static const struct file_operations spufs_signal1_fops = { 1158 static const struct file_operations spufs_signal1_fops = {
1159 .open = spufs_signal1_open, 1159 .open = spufs_signal1_open,
1160 .release = spufs_signal1_release, 1160 .release = spufs_signal1_release,
1161 .read = spufs_signal1_read, 1161 .read = spufs_signal1_read,
1162 .write = spufs_signal1_write, 1162 .write = spufs_signal1_write,
1163 .mmap = spufs_signal1_mmap, 1163 .mmap = spufs_signal1_mmap,
1164 .llseek = no_llseek, 1164 .llseek = no_llseek,
1165 }; 1165 };
1166 1166
1167 static const struct file_operations spufs_signal1_nosched_fops = { 1167 static const struct file_operations spufs_signal1_nosched_fops = {
1168 .open = spufs_signal1_open, 1168 .open = spufs_signal1_open,
1169 .release = spufs_signal1_release, 1169 .release = spufs_signal1_release,
1170 .write = spufs_signal1_write, 1170 .write = spufs_signal1_write,
1171 .mmap = spufs_signal1_mmap, 1171 .mmap = spufs_signal1_mmap,
1172 .llseek = no_llseek, 1172 .llseek = no_llseek,
1173 }; 1173 };
1174 1174
1175 static int spufs_signal2_open(struct inode *inode, struct file *file) 1175 static int spufs_signal2_open(struct inode *inode, struct file *file)
1176 { 1176 {
1177 struct spufs_inode_info *i = SPUFS_I(inode); 1177 struct spufs_inode_info *i = SPUFS_I(inode);
1178 struct spu_context *ctx = i->i_ctx; 1178 struct spu_context *ctx = i->i_ctx;
1179 1179
1180 mutex_lock(&ctx->mapping_lock); 1180 mutex_lock(&ctx->mapping_lock);
1181 file->private_data = ctx; 1181 file->private_data = ctx;
1182 if (!i->i_openers++) 1182 if (!i->i_openers++)
1183 ctx->signal2 = inode->i_mapping; 1183 ctx->signal2 = inode->i_mapping;
1184 mutex_unlock(&ctx->mapping_lock); 1184 mutex_unlock(&ctx->mapping_lock);
1185 return nonseekable_open(inode, file); 1185 return nonseekable_open(inode, file);
1186 } 1186 }
1187 1187
1188 static int 1188 static int
1189 spufs_signal2_release(struct inode *inode, struct file *file) 1189 spufs_signal2_release(struct inode *inode, struct file *file)
1190 { 1190 {
1191 struct spufs_inode_info *i = SPUFS_I(inode); 1191 struct spufs_inode_info *i = SPUFS_I(inode);
1192 struct spu_context *ctx = i->i_ctx; 1192 struct spu_context *ctx = i->i_ctx;
1193 1193
1194 mutex_lock(&ctx->mapping_lock); 1194 mutex_lock(&ctx->mapping_lock);
1195 if (!--i->i_openers) 1195 if (!--i->i_openers)
1196 ctx->signal2 = NULL; 1196 ctx->signal2 = NULL;
1197 mutex_unlock(&ctx->mapping_lock); 1197 mutex_unlock(&ctx->mapping_lock);
1198 return 0; 1198 return 0;
1199 } 1199 }
1200 1200
1201 static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf, 1201 static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
1202 size_t len, loff_t *pos) 1202 size_t len, loff_t *pos)
1203 { 1203 {
1204 int ret = 0; 1204 int ret = 0;
1205 u32 data; 1205 u32 data;
1206 1206
1207 if (len < 4) 1207 if (len < 4)
1208 return -EINVAL; 1208 return -EINVAL;
1209 1209
1210 if (ctx->csa.spu_chnlcnt_RW[4]) { 1210 if (ctx->csa.spu_chnlcnt_RW[4]) {
1211 data = ctx->csa.spu_chnldata_RW[4]; 1211 data = ctx->csa.spu_chnldata_RW[4];
1212 ret = 4; 1212 ret = 4;
1213 } 1213 }
1214 1214
1215 if (!ret) 1215 if (!ret)
1216 goto out; 1216 goto out;
1217 1217
1218 if (copy_to_user(buf, &data, 4)) 1218 if (copy_to_user(buf, &data, 4))
1219 return -EFAULT; 1219 return -EFAULT;
1220 1220
1221 out: 1221 out:
1222 return ret; 1222 return ret;
1223 } 1223 }
1224 1224
1225 static ssize_t spufs_signal2_read(struct file *file, char __user *buf, 1225 static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
1226 size_t len, loff_t *pos) 1226 size_t len, loff_t *pos)
1227 { 1227 {
1228 struct spu_context *ctx = file->private_data; 1228 struct spu_context *ctx = file->private_data;
1229 int ret; 1229 int ret;
1230 1230
1231 ret = spu_acquire_saved(ctx); 1231 ret = spu_acquire_saved(ctx);
1232 if (ret) 1232 if (ret)
1233 return ret; 1233 return ret;
1234 ret = __spufs_signal2_read(ctx, buf, len, pos); 1234 ret = __spufs_signal2_read(ctx, buf, len, pos);
1235 spu_release_saved(ctx); 1235 spu_release_saved(ctx);
1236 1236
1237 return ret; 1237 return ret;
1238 } 1238 }
1239 1239
1240 static ssize_t spufs_signal2_write(struct file *file, const char __user *buf, 1240 static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
1241 size_t len, loff_t *pos) 1241 size_t len, loff_t *pos)
1242 { 1242 {
1243 struct spu_context *ctx; 1243 struct spu_context *ctx;
1244 ssize_t ret; 1244 ssize_t ret;
1245 u32 data; 1245 u32 data;
1246 1246
1247 ctx = file->private_data; 1247 ctx = file->private_data;
1248 1248
1249 if (len < 4) 1249 if (len < 4)
1250 return -EINVAL; 1250 return -EINVAL;
1251 1251
1252 if (copy_from_user(&data, buf, 4)) 1252 if (copy_from_user(&data, buf, 4))
1253 return -EFAULT; 1253 return -EFAULT;
1254 1254
1255 ret = spu_acquire(ctx); 1255 ret = spu_acquire(ctx);
1256 if (ret) 1256 if (ret)
1257 return ret; 1257 return ret;
1258 ctx->ops->signal2_write(ctx, data); 1258 ctx->ops->signal2_write(ctx, data);
1259 spu_release(ctx); 1259 spu_release(ctx);
1260 1260
1261 return 4; 1261 return 4;
1262 } 1262 }
1263 1263
1264 #if SPUFS_MMAP_4K 1264 #if SPUFS_MMAP_4K
1265 static int 1265 static int
1266 spufs_signal2_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1266 spufs_signal2_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1267 { 1267 {
1268 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000 1268 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1269 return spufs_ps_fault(vma, vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE); 1269 return spufs_ps_fault(vma, vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE);
1270 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000 1270 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
1271 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole 1271 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1272 * signal 1 and 2 area 1272 * signal 1 and 2 area
1273 */ 1273 */
1274 return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE); 1274 return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
1275 #else 1275 #else
1276 #error unsupported page size 1276 #error unsupported page size
1277 #endif 1277 #endif
1278 } 1278 }
1279 1279
1280 static const struct vm_operations_struct spufs_signal2_mmap_vmops = { 1280 static const struct vm_operations_struct spufs_signal2_mmap_vmops = {
1281 .fault = spufs_signal2_mmap_fault, 1281 .fault = spufs_signal2_mmap_fault,
1282 }; 1282 };
1283 1283
1284 static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma) 1284 static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
1285 { 1285 {
1286 if (!(vma->vm_flags & VM_SHARED)) 1286 if (!(vma->vm_flags & VM_SHARED))
1287 return -EINVAL; 1287 return -EINVAL;
1288 1288
1289 vma->vm_flags |= VM_IO | VM_PFNMAP; 1289 vma->vm_flags |= VM_IO | VM_PFNMAP;
1290 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1290 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1291 1291
1292 vma->vm_ops = &spufs_signal2_mmap_vmops; 1292 vma->vm_ops = &spufs_signal2_mmap_vmops;
1293 return 0; 1293 return 0;
1294 } 1294 }
1295 #else /* SPUFS_MMAP_4K */ 1295 #else /* SPUFS_MMAP_4K */
1296 #define spufs_signal2_mmap NULL 1296 #define spufs_signal2_mmap NULL
1297 #endif /* !SPUFS_MMAP_4K */ 1297 #endif /* !SPUFS_MMAP_4K */
1298 1298
1299 static const struct file_operations spufs_signal2_fops = { 1299 static const struct file_operations spufs_signal2_fops = {
1300 .open = spufs_signal2_open, 1300 .open = spufs_signal2_open,
1301 .release = spufs_signal2_release, 1301 .release = spufs_signal2_release,
1302 .read = spufs_signal2_read, 1302 .read = spufs_signal2_read,
1303 .write = spufs_signal2_write, 1303 .write = spufs_signal2_write,
1304 .mmap = spufs_signal2_mmap, 1304 .mmap = spufs_signal2_mmap,
1305 .llseek = no_llseek, 1305 .llseek = no_llseek,
1306 }; 1306 };
1307 1307
1308 static const struct file_operations spufs_signal2_nosched_fops = { 1308 static const struct file_operations spufs_signal2_nosched_fops = {
1309 .open = spufs_signal2_open, 1309 .open = spufs_signal2_open,
1310 .release = spufs_signal2_release, 1310 .release = spufs_signal2_release,
1311 .write = spufs_signal2_write, 1311 .write = spufs_signal2_write,
1312 .mmap = spufs_signal2_mmap, 1312 .mmap = spufs_signal2_mmap,
1313 .llseek = no_llseek, 1313 .llseek = no_llseek,
1314 }; 1314 };
1315 1315
1316 /* 1316 /*
1317 * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the 1317 * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the
1318 * work of acquiring (or not) the SPU context before calling through 1318 * work of acquiring (or not) the SPU context before calling through
1319 * to the actual get routine. The set routine is called directly. 1319 * to the actual get routine. The set routine is called directly.
1320 */ 1320 */
1321 #define SPU_ATTR_NOACQUIRE 0 1321 #define SPU_ATTR_NOACQUIRE 0
1322 #define SPU_ATTR_ACQUIRE 1 1322 #define SPU_ATTR_ACQUIRE 1
1323 #define SPU_ATTR_ACQUIRE_SAVED 2 1323 #define SPU_ATTR_ACQUIRE_SAVED 2
1324 1324
1325 #define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \ 1325 #define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \
1326 static int __##__get(void *data, u64 *val) \ 1326 static int __##__get(void *data, u64 *val) \
1327 { \ 1327 { \
1328 struct spu_context *ctx = data; \ 1328 struct spu_context *ctx = data; \
1329 int ret = 0; \ 1329 int ret = 0; \
1330 \ 1330 \
1331 if (__acquire == SPU_ATTR_ACQUIRE) { \ 1331 if (__acquire == SPU_ATTR_ACQUIRE) { \
1332 ret = spu_acquire(ctx); \ 1332 ret = spu_acquire(ctx); \
1333 if (ret) \ 1333 if (ret) \
1334 return ret; \ 1334 return ret; \
1335 *val = __get(ctx); \ 1335 *val = __get(ctx); \
1336 spu_release(ctx); \ 1336 spu_release(ctx); \
1337 } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \ 1337 } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \
1338 ret = spu_acquire_saved(ctx); \ 1338 ret = spu_acquire_saved(ctx); \
1339 if (ret) \ 1339 if (ret) \
1340 return ret; \ 1340 return ret; \
1341 *val = __get(ctx); \ 1341 *val = __get(ctx); \
1342 spu_release_saved(ctx); \ 1342 spu_release_saved(ctx); \
1343 } else \ 1343 } else \
1344 *val = __get(ctx); \ 1344 *val = __get(ctx); \
1345 \ 1345 \
1346 return 0; \ 1346 return 0; \
1347 } \ 1347 } \
1348 DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt); 1348 DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt);
1349 1349
1350 static int spufs_signal1_type_set(void *data, u64 val) 1350 static int spufs_signal1_type_set(void *data, u64 val)
1351 { 1351 {
1352 struct spu_context *ctx = data; 1352 struct spu_context *ctx = data;
1353 int ret; 1353 int ret;
1354 1354
1355 ret = spu_acquire(ctx); 1355 ret = spu_acquire(ctx);
1356 if (ret) 1356 if (ret)
1357 return ret; 1357 return ret;
1358 ctx->ops->signal1_type_set(ctx, val); 1358 ctx->ops->signal1_type_set(ctx, val);
1359 spu_release(ctx); 1359 spu_release(ctx);
1360 1360
1361 return 0; 1361 return 0;
1362 } 1362 }
1363 1363
1364 static u64 spufs_signal1_type_get(struct spu_context *ctx) 1364 static u64 spufs_signal1_type_get(struct spu_context *ctx)
1365 { 1365 {
1366 return ctx->ops->signal1_type_get(ctx); 1366 return ctx->ops->signal1_type_get(ctx);
1367 } 1367 }
1368 DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get, 1368 DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
1369 spufs_signal1_type_set, "%llu\n", SPU_ATTR_ACQUIRE); 1369 spufs_signal1_type_set, "%llu\n", SPU_ATTR_ACQUIRE);
1370 1370
1371 1371
1372 static int spufs_signal2_type_set(void *data, u64 val) 1372 static int spufs_signal2_type_set(void *data, u64 val)
1373 { 1373 {
1374 struct spu_context *ctx = data; 1374 struct spu_context *ctx = data;
1375 int ret; 1375 int ret;
1376 1376
1377 ret = spu_acquire(ctx); 1377 ret = spu_acquire(ctx);
1378 if (ret) 1378 if (ret)
1379 return ret; 1379 return ret;
1380 ctx->ops->signal2_type_set(ctx, val); 1380 ctx->ops->signal2_type_set(ctx, val);
1381 spu_release(ctx); 1381 spu_release(ctx);
1382 1382
1383 return 0; 1383 return 0;
1384 } 1384 }
1385 1385
1386 static u64 spufs_signal2_type_get(struct spu_context *ctx) 1386 static u64 spufs_signal2_type_get(struct spu_context *ctx)
1387 { 1387 {
1388 return ctx->ops->signal2_type_get(ctx); 1388 return ctx->ops->signal2_type_get(ctx);
1389 } 1389 }
1390 DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get, 1390 DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
1391 spufs_signal2_type_set, "%llu\n", SPU_ATTR_ACQUIRE); 1391 spufs_signal2_type_set, "%llu\n", SPU_ATTR_ACQUIRE);
1392 1392
1393 #if SPUFS_MMAP_4K 1393 #if SPUFS_MMAP_4K
1394 static int 1394 static int
1395 spufs_mss_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1395 spufs_mss_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1396 { 1396 {
1397 return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_MSS_MAP_SIZE); 1397 return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_MSS_MAP_SIZE);
1398 } 1398 }
1399 1399
1400 static const struct vm_operations_struct spufs_mss_mmap_vmops = { 1400 static const struct vm_operations_struct spufs_mss_mmap_vmops = {
1401 .fault = spufs_mss_mmap_fault, 1401 .fault = spufs_mss_mmap_fault,
1402 }; 1402 };
1403 1403
1404 /* 1404 /*
1405 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff]. 1405 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1406 */ 1406 */
1407 static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma) 1407 static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
1408 { 1408 {
1409 if (!(vma->vm_flags & VM_SHARED)) 1409 if (!(vma->vm_flags & VM_SHARED))
1410 return -EINVAL; 1410 return -EINVAL;
1411 1411
1412 vma->vm_flags |= VM_IO | VM_PFNMAP; 1412 vma->vm_flags |= VM_IO | VM_PFNMAP;
1413 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1413 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1414 1414
1415 vma->vm_ops = &spufs_mss_mmap_vmops; 1415 vma->vm_ops = &spufs_mss_mmap_vmops;
1416 return 0; 1416 return 0;
1417 } 1417 }
1418 #else /* SPUFS_MMAP_4K */ 1418 #else /* SPUFS_MMAP_4K */
1419 #define spufs_mss_mmap NULL 1419 #define spufs_mss_mmap NULL
1420 #endif /* !SPUFS_MMAP_4K */ 1420 #endif /* !SPUFS_MMAP_4K */
1421 1421
1422 static int spufs_mss_open(struct inode *inode, struct file *file) 1422 static int spufs_mss_open(struct inode *inode, struct file *file)
1423 { 1423 {
1424 struct spufs_inode_info *i = SPUFS_I(inode); 1424 struct spufs_inode_info *i = SPUFS_I(inode);
1425 struct spu_context *ctx = i->i_ctx; 1425 struct spu_context *ctx = i->i_ctx;
1426 1426
1427 file->private_data = i->i_ctx; 1427 file->private_data = i->i_ctx;
1428 1428
1429 mutex_lock(&ctx->mapping_lock); 1429 mutex_lock(&ctx->mapping_lock);
1430 if (!i->i_openers++) 1430 if (!i->i_openers++)
1431 ctx->mss = inode->i_mapping; 1431 ctx->mss = inode->i_mapping;
1432 mutex_unlock(&ctx->mapping_lock); 1432 mutex_unlock(&ctx->mapping_lock);
1433 return nonseekable_open(inode, file); 1433 return nonseekable_open(inode, file);
1434 } 1434 }
1435 1435
1436 static int 1436 static int
1437 spufs_mss_release(struct inode *inode, struct file *file) 1437 spufs_mss_release(struct inode *inode, struct file *file)
1438 { 1438 {
1439 struct spufs_inode_info *i = SPUFS_I(inode); 1439 struct spufs_inode_info *i = SPUFS_I(inode);
1440 struct spu_context *ctx = i->i_ctx; 1440 struct spu_context *ctx = i->i_ctx;
1441 1441
1442 mutex_lock(&ctx->mapping_lock); 1442 mutex_lock(&ctx->mapping_lock);
1443 if (!--i->i_openers) 1443 if (!--i->i_openers)
1444 ctx->mss = NULL; 1444 ctx->mss = NULL;
1445 mutex_unlock(&ctx->mapping_lock); 1445 mutex_unlock(&ctx->mapping_lock);
1446 return 0; 1446 return 0;
1447 } 1447 }
1448 1448
1449 static const struct file_operations spufs_mss_fops = { 1449 static const struct file_operations spufs_mss_fops = {
1450 .open = spufs_mss_open, 1450 .open = spufs_mss_open,
1451 .release = spufs_mss_release, 1451 .release = spufs_mss_release,
1452 .mmap = spufs_mss_mmap, 1452 .mmap = spufs_mss_mmap,
1453 .llseek = no_llseek, 1453 .llseek = no_llseek,
1454 }; 1454 };
1455 1455
1456 static int 1456 static int
1457 spufs_psmap_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1457 spufs_psmap_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1458 { 1458 {
1459 return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_PS_MAP_SIZE); 1459 return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_PS_MAP_SIZE);
1460 } 1460 }
1461 1461
1462 static const struct vm_operations_struct spufs_psmap_mmap_vmops = { 1462 static const struct vm_operations_struct spufs_psmap_mmap_vmops = {
1463 .fault = spufs_psmap_mmap_fault, 1463 .fault = spufs_psmap_mmap_fault,
1464 }; 1464 };
1465 1465
1466 /* 1466 /*
1467 * mmap support for full problem state area [0x00000 - 0x1ffff]. 1467 * mmap support for full problem state area [0x00000 - 0x1ffff].
1468 */ 1468 */
1469 static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma) 1469 static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
1470 { 1470 {
1471 if (!(vma->vm_flags & VM_SHARED)) 1471 if (!(vma->vm_flags & VM_SHARED))
1472 return -EINVAL; 1472 return -EINVAL;
1473 1473
1474 vma->vm_flags |= VM_IO | VM_PFNMAP; 1474 vma->vm_flags |= VM_IO | VM_PFNMAP;
1475 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1475 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1476 1476
1477 vma->vm_ops = &spufs_psmap_mmap_vmops; 1477 vma->vm_ops = &spufs_psmap_mmap_vmops;
1478 return 0; 1478 return 0;
1479 } 1479 }
1480 1480
1481 static int spufs_psmap_open(struct inode *inode, struct file *file) 1481 static int spufs_psmap_open(struct inode *inode, struct file *file)
1482 { 1482 {
1483 struct spufs_inode_info *i = SPUFS_I(inode); 1483 struct spufs_inode_info *i = SPUFS_I(inode);
1484 struct spu_context *ctx = i->i_ctx; 1484 struct spu_context *ctx = i->i_ctx;
1485 1485
1486 mutex_lock(&ctx->mapping_lock); 1486 mutex_lock(&ctx->mapping_lock);
1487 file->private_data = i->i_ctx; 1487 file->private_data = i->i_ctx;
1488 if (!i->i_openers++) 1488 if (!i->i_openers++)
1489 ctx->psmap = inode->i_mapping; 1489 ctx->psmap = inode->i_mapping;
1490 mutex_unlock(&ctx->mapping_lock); 1490 mutex_unlock(&ctx->mapping_lock);
1491 return nonseekable_open(inode, file); 1491 return nonseekable_open(inode, file);
1492 } 1492 }
1493 1493
1494 static int 1494 static int
1495 spufs_psmap_release(struct inode *inode, struct file *file) 1495 spufs_psmap_release(struct inode *inode, struct file *file)
1496 { 1496 {
1497 struct spufs_inode_info *i = SPUFS_I(inode); 1497 struct spufs_inode_info *i = SPUFS_I(inode);
1498 struct spu_context *ctx = i->i_ctx; 1498 struct spu_context *ctx = i->i_ctx;
1499 1499
1500 mutex_lock(&ctx->mapping_lock); 1500 mutex_lock(&ctx->mapping_lock);
1501 if (!--i->i_openers) 1501 if (!--i->i_openers)
1502 ctx->psmap = NULL; 1502 ctx->psmap = NULL;
1503 mutex_unlock(&ctx->mapping_lock); 1503 mutex_unlock(&ctx->mapping_lock);
1504 return 0; 1504 return 0;
1505 } 1505 }
1506 1506
1507 static const struct file_operations spufs_psmap_fops = { 1507 static const struct file_operations spufs_psmap_fops = {
1508 .open = spufs_psmap_open, 1508 .open = spufs_psmap_open,
1509 .release = spufs_psmap_release, 1509 .release = spufs_psmap_release,
1510 .mmap = spufs_psmap_mmap, 1510 .mmap = spufs_psmap_mmap,
1511 .llseek = no_llseek, 1511 .llseek = no_llseek,
1512 }; 1512 };
1513 1513
1514 1514
1515 #if SPUFS_MMAP_4K 1515 #if SPUFS_MMAP_4K
1516 static int 1516 static int
1517 spufs_mfc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1517 spufs_mfc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1518 { 1518 {
1519 return spufs_ps_fault(vma, vmf, 0x3000, SPUFS_MFC_MAP_SIZE); 1519 return spufs_ps_fault(vma, vmf, 0x3000, SPUFS_MFC_MAP_SIZE);
1520 } 1520 }
1521 1521
1522 static const struct vm_operations_struct spufs_mfc_mmap_vmops = { 1522 static const struct vm_operations_struct spufs_mfc_mmap_vmops = {
1523 .fault = spufs_mfc_mmap_fault, 1523 .fault = spufs_mfc_mmap_fault,
1524 }; 1524 };
1525 1525
1526 /* 1526 /*
1527 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff]. 1527 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1528 */ 1528 */
1529 static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma) 1529 static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
1530 { 1530 {
1531 if (!(vma->vm_flags & VM_SHARED)) 1531 if (!(vma->vm_flags & VM_SHARED))
1532 return -EINVAL; 1532 return -EINVAL;
1533 1533
1534 vma->vm_flags |= VM_IO | VM_PFNMAP; 1534 vma->vm_flags |= VM_IO | VM_PFNMAP;
1535 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1535 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1536 1536
1537 vma->vm_ops = &spufs_mfc_mmap_vmops; 1537 vma->vm_ops = &spufs_mfc_mmap_vmops;
1538 return 0; 1538 return 0;
1539 } 1539 }
1540 #else /* SPUFS_MMAP_4K */ 1540 #else /* SPUFS_MMAP_4K */
1541 #define spufs_mfc_mmap NULL 1541 #define spufs_mfc_mmap NULL
1542 #endif /* !SPUFS_MMAP_4K */ 1542 #endif /* !SPUFS_MMAP_4K */
1543 1543
1544 static int spufs_mfc_open(struct inode *inode, struct file *file) 1544 static int spufs_mfc_open(struct inode *inode, struct file *file)
1545 { 1545 {
1546 struct spufs_inode_info *i = SPUFS_I(inode); 1546 struct spufs_inode_info *i = SPUFS_I(inode);
1547 struct spu_context *ctx = i->i_ctx; 1547 struct spu_context *ctx = i->i_ctx;
1548 1548
1549 /* we don't want to deal with DMA into other processes */ 1549 /* we don't want to deal with DMA into other processes */
1550 if (ctx->owner != current->mm) 1550 if (ctx->owner != current->mm)
1551 return -EINVAL; 1551 return -EINVAL;
1552 1552
1553 if (atomic_read(&inode->i_count) != 1) 1553 if (atomic_read(&inode->i_count) != 1)
1554 return -EBUSY; 1554 return -EBUSY;
1555 1555
1556 mutex_lock(&ctx->mapping_lock); 1556 mutex_lock(&ctx->mapping_lock);
1557 file->private_data = ctx; 1557 file->private_data = ctx;
1558 if (!i->i_openers++) 1558 if (!i->i_openers++)
1559 ctx->mfc = inode->i_mapping; 1559 ctx->mfc = inode->i_mapping;
1560 mutex_unlock(&ctx->mapping_lock); 1560 mutex_unlock(&ctx->mapping_lock);
1561 return nonseekable_open(inode, file); 1561 return nonseekable_open(inode, file);
1562 } 1562 }
1563 1563
1564 static int 1564 static int
1565 spufs_mfc_release(struct inode *inode, struct file *file) 1565 spufs_mfc_release(struct inode *inode, struct file *file)
1566 { 1566 {
1567 struct spufs_inode_info *i = SPUFS_I(inode); 1567 struct spufs_inode_info *i = SPUFS_I(inode);
1568 struct spu_context *ctx = i->i_ctx; 1568 struct spu_context *ctx = i->i_ctx;
1569 1569
1570 mutex_lock(&ctx->mapping_lock); 1570 mutex_lock(&ctx->mapping_lock);
1571 if (!--i->i_openers) 1571 if (!--i->i_openers)
1572 ctx->mfc = NULL; 1572 ctx->mfc = NULL;
1573 mutex_unlock(&ctx->mapping_lock); 1573 mutex_unlock(&ctx->mapping_lock);
1574 return 0; 1574 return 0;
1575 } 1575 }
1576 1576
1577 /* interrupt-level mfc callback function. */ 1577 /* interrupt-level mfc callback function. */
1578 void spufs_mfc_callback(struct spu *spu) 1578 void spufs_mfc_callback(struct spu *spu)
1579 { 1579 {
1580 struct spu_context *ctx = spu->ctx; 1580 struct spu_context *ctx = spu->ctx;
1581 1581
1582 if (!ctx) 1582 if (!ctx)
1583 return; 1583 return;
1584 1584
1585 wake_up_all(&ctx->mfc_wq); 1585 wake_up_all(&ctx->mfc_wq);
1586 1586
1587 pr_debug("%s %s\n", __func__, spu->name); 1587 pr_debug("%s %s\n", __func__, spu->name);
1588 if (ctx->mfc_fasync) { 1588 if (ctx->mfc_fasync) {
1589 u32 free_elements, tagstatus; 1589 u32 free_elements, tagstatus;
1590 unsigned int mask; 1590 unsigned int mask;
1591 1591
1592 /* no need for spu_acquire in interrupt context */ 1592 /* no need for spu_acquire in interrupt context */
1593 free_elements = ctx->ops->get_mfc_free_elements(ctx); 1593 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1594 tagstatus = ctx->ops->read_mfc_tagstatus(ctx); 1594 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1595 1595
1596 mask = 0; 1596 mask = 0;
1597 if (free_elements & 0xffff) 1597 if (free_elements & 0xffff)
1598 mask |= POLLOUT; 1598 mask |= POLLOUT;
1599 if (tagstatus & ctx->tagwait) 1599 if (tagstatus & ctx->tagwait)
1600 mask |= POLLIN; 1600 mask |= POLLIN;
1601 1601
1602 kill_fasync(&ctx->mfc_fasync, SIGIO, mask); 1602 kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
1603 } 1603 }
1604 } 1604 }
1605 1605
1606 static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status) 1606 static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
1607 { 1607 {
1608 /* See if there is one tag group is complete */ 1608 /* See if there is one tag group is complete */
1609 /* FIXME we need locking around tagwait */ 1609 /* FIXME we need locking around tagwait */
1610 *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait; 1610 *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
1611 ctx->tagwait &= ~*status; 1611 ctx->tagwait &= ~*status;
1612 if (*status) 1612 if (*status)
1613 return 1; 1613 return 1;
1614 1614
1615 /* enable interrupt waiting for any tag group, 1615 /* enable interrupt waiting for any tag group,
1616 may silently fail if interrupts are already enabled */ 1616 may silently fail if interrupts are already enabled */
1617 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1); 1617 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1618 return 0; 1618 return 0;
1619 } 1619 }
1620 1620
1621 static ssize_t spufs_mfc_read(struct file *file, char __user *buffer, 1621 static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
1622 size_t size, loff_t *pos) 1622 size_t size, loff_t *pos)
1623 { 1623 {
1624 struct spu_context *ctx = file->private_data; 1624 struct spu_context *ctx = file->private_data;
1625 int ret = -EINVAL; 1625 int ret = -EINVAL;
1626 u32 status; 1626 u32 status;
1627 1627
1628 if (size != 4) 1628 if (size != 4)
1629 goto out; 1629 goto out;
1630 1630
1631 ret = spu_acquire(ctx); 1631 ret = spu_acquire(ctx);
1632 if (ret) 1632 if (ret)
1633 return ret; 1633 return ret;
1634 1634
1635 ret = -EINVAL; 1635 ret = -EINVAL;
1636 if (file->f_flags & O_NONBLOCK) { 1636 if (file->f_flags & O_NONBLOCK) {
1637 status = ctx->ops->read_mfc_tagstatus(ctx); 1637 status = ctx->ops->read_mfc_tagstatus(ctx);
1638 if (!(status & ctx->tagwait)) 1638 if (!(status & ctx->tagwait))
1639 ret = -EAGAIN; 1639 ret = -EAGAIN;
1640 else 1640 else
1641 /* XXX(hch): shouldn't we clear ret here? */ 1641 /* XXX(hch): shouldn't we clear ret here? */
1642 ctx->tagwait &= ~status; 1642 ctx->tagwait &= ~status;
1643 } else { 1643 } else {
1644 ret = spufs_wait(ctx->mfc_wq, 1644 ret = spufs_wait(ctx->mfc_wq,
1645 spufs_read_mfc_tagstatus(ctx, &status)); 1645 spufs_read_mfc_tagstatus(ctx, &status));
1646 if (ret) 1646 if (ret)
1647 goto out; 1647 goto out;
1648 } 1648 }
1649 spu_release(ctx); 1649 spu_release(ctx);
1650 1650
1651 ret = 4; 1651 ret = 4;
1652 if (copy_to_user(buffer, &status, 4)) 1652 if (copy_to_user(buffer, &status, 4))
1653 ret = -EFAULT; 1653 ret = -EFAULT;
1654 1654
1655 out: 1655 out:
1656 return ret; 1656 return ret;
1657 } 1657 }
1658 1658
1659 static int spufs_check_valid_dma(struct mfc_dma_command *cmd) 1659 static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1660 { 1660 {
1661 pr_debug("queueing DMA %x %llx %x %x %x\n", cmd->lsa, 1661 pr_debug("queueing DMA %x %llx %x %x %x\n", cmd->lsa,
1662 cmd->ea, cmd->size, cmd->tag, cmd->cmd); 1662 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1663 1663
1664 switch (cmd->cmd) { 1664 switch (cmd->cmd) {
1665 case MFC_PUT_CMD: 1665 case MFC_PUT_CMD:
1666 case MFC_PUTF_CMD: 1666 case MFC_PUTF_CMD:
1667 case MFC_PUTB_CMD: 1667 case MFC_PUTB_CMD:
1668 case MFC_GET_CMD: 1668 case MFC_GET_CMD:
1669 case MFC_GETF_CMD: 1669 case MFC_GETF_CMD:
1670 case MFC_GETB_CMD: 1670 case MFC_GETB_CMD:
1671 break; 1671 break;
1672 default: 1672 default:
1673 pr_debug("invalid DMA opcode %x\n", cmd->cmd); 1673 pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1674 return -EIO; 1674 return -EIO;
1675 } 1675 }
1676 1676
1677 if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) { 1677 if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
1678 pr_debug("invalid DMA alignment, ea %llx lsa %x\n", 1678 pr_debug("invalid DMA alignment, ea %llx lsa %x\n",
1679 cmd->ea, cmd->lsa); 1679 cmd->ea, cmd->lsa);
1680 return -EIO; 1680 return -EIO;
1681 } 1681 }
1682 1682
1683 switch (cmd->size & 0xf) { 1683 switch (cmd->size & 0xf) {
1684 case 1: 1684 case 1:
1685 break; 1685 break;
1686 case 2: 1686 case 2:
1687 if (cmd->lsa & 1) 1687 if (cmd->lsa & 1)
1688 goto error; 1688 goto error;
1689 break; 1689 break;
1690 case 4: 1690 case 4:
1691 if (cmd->lsa & 3) 1691 if (cmd->lsa & 3)
1692 goto error; 1692 goto error;
1693 break; 1693 break;
1694 case 8: 1694 case 8:
1695 if (cmd->lsa & 7) 1695 if (cmd->lsa & 7)
1696 goto error; 1696 goto error;
1697 break; 1697 break;
1698 case 0: 1698 case 0:
1699 if (cmd->lsa & 15) 1699 if (cmd->lsa & 15)
1700 goto error; 1700 goto error;
1701 break; 1701 break;
1702 error: 1702 error:
1703 default: 1703 default:
1704 pr_debug("invalid DMA alignment %x for size %x\n", 1704 pr_debug("invalid DMA alignment %x for size %x\n",
1705 cmd->lsa & 0xf, cmd->size); 1705 cmd->lsa & 0xf, cmd->size);
1706 return -EIO; 1706 return -EIO;
1707 } 1707 }
1708 1708
1709 if (cmd->size > 16 * 1024) { 1709 if (cmd->size > 16 * 1024) {
1710 pr_debug("invalid DMA size %x\n", cmd->size); 1710 pr_debug("invalid DMA size %x\n", cmd->size);
1711 return -EIO; 1711 return -EIO;
1712 } 1712 }
1713 1713
1714 if (cmd->tag & 0xfff0) { 1714 if (cmd->tag & 0xfff0) {
1715 /* we reserve the higher tag numbers for kernel use */ 1715 /* we reserve the higher tag numbers for kernel use */
1716 pr_debug("invalid DMA tag\n"); 1716 pr_debug("invalid DMA tag\n");
1717 return -EIO; 1717 return -EIO;
1718 } 1718 }
1719 1719
1720 if (cmd->class) { 1720 if (cmd->class) {
1721 /* not supported in this version */ 1721 /* not supported in this version */
1722 pr_debug("invalid DMA class\n"); 1722 pr_debug("invalid DMA class\n");
1723 return -EIO; 1723 return -EIO;
1724 } 1724 }
1725 1725
1726 return 0; 1726 return 0;
1727 } 1727 }
1728 1728
1729 static int spu_send_mfc_command(struct spu_context *ctx, 1729 static int spu_send_mfc_command(struct spu_context *ctx,
1730 struct mfc_dma_command cmd, 1730 struct mfc_dma_command cmd,
1731 int *error) 1731 int *error)
1732 { 1732 {
1733 *error = ctx->ops->send_mfc_command(ctx, &cmd); 1733 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1734 if (*error == -EAGAIN) { 1734 if (*error == -EAGAIN) {
1735 /* wait for any tag group to complete 1735 /* wait for any tag group to complete
1736 so we have space for the new command */ 1736 so we have space for the new command */
1737 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1); 1737 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1738 /* try again, because the queue might be 1738 /* try again, because the queue might be
1739 empty again */ 1739 empty again */
1740 *error = ctx->ops->send_mfc_command(ctx, &cmd); 1740 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1741 if (*error == -EAGAIN) 1741 if (*error == -EAGAIN)
1742 return 0; 1742 return 0;
1743 } 1743 }
1744 return 1; 1744 return 1;
1745 } 1745 }
1746 1746
1747 static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer, 1747 static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1748 size_t size, loff_t *pos) 1748 size_t size, loff_t *pos)
1749 { 1749 {
1750 struct spu_context *ctx = file->private_data; 1750 struct spu_context *ctx = file->private_data;
1751 struct mfc_dma_command cmd; 1751 struct mfc_dma_command cmd;
1752 int ret = -EINVAL; 1752 int ret = -EINVAL;
1753 1753
1754 if (size != sizeof cmd) 1754 if (size != sizeof cmd)
1755 goto out; 1755 goto out;
1756 1756
1757 ret = -EFAULT; 1757 ret = -EFAULT;
1758 if (copy_from_user(&cmd, buffer, sizeof cmd)) 1758 if (copy_from_user(&cmd, buffer, sizeof cmd))
1759 goto out; 1759 goto out;
1760 1760
1761 ret = spufs_check_valid_dma(&cmd); 1761 ret = spufs_check_valid_dma(&cmd);
1762 if (ret) 1762 if (ret)
1763 goto out; 1763 goto out;
1764 1764
1765 ret = spu_acquire(ctx); 1765 ret = spu_acquire(ctx);
1766 if (ret) 1766 if (ret)
1767 goto out; 1767 goto out;
1768 1768
1769 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); 1769 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
1770 if (ret) 1770 if (ret)
1771 goto out; 1771 goto out;
1772 1772
1773 if (file->f_flags & O_NONBLOCK) { 1773 if (file->f_flags & O_NONBLOCK) {
1774 ret = ctx->ops->send_mfc_command(ctx, &cmd); 1774 ret = ctx->ops->send_mfc_command(ctx, &cmd);
1775 } else { 1775 } else {
1776 int status; 1776 int status;
1777 ret = spufs_wait(ctx->mfc_wq, 1777 ret = spufs_wait(ctx->mfc_wq,
1778 spu_send_mfc_command(ctx, cmd, &status)); 1778 spu_send_mfc_command(ctx, cmd, &status));
1779 if (ret) 1779 if (ret)
1780 goto out; 1780 goto out;
1781 if (status) 1781 if (status)
1782 ret = status; 1782 ret = status;
1783 } 1783 }
1784 1784
1785 if (ret) 1785 if (ret)
1786 goto out_unlock; 1786 goto out_unlock;
1787 1787
1788 ctx->tagwait |= 1 << cmd.tag; 1788 ctx->tagwait |= 1 << cmd.tag;
1789 ret = size; 1789 ret = size;
1790 1790
1791 out_unlock: 1791 out_unlock:
1792 spu_release(ctx); 1792 spu_release(ctx);
1793 out: 1793 out:
1794 return ret; 1794 return ret;
1795 } 1795 }
1796 1796
1797 static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait) 1797 static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
1798 { 1798 {
1799 struct spu_context *ctx = file->private_data; 1799 struct spu_context *ctx = file->private_data;
1800 u32 free_elements, tagstatus; 1800 u32 free_elements, tagstatus;
1801 unsigned int mask; 1801 unsigned int mask;
1802 1802
1803 poll_wait(file, &ctx->mfc_wq, wait); 1803 poll_wait(file, &ctx->mfc_wq, wait);
1804 1804
1805 /* 1805 /*
1806 * For now keep this uninterruptible and also ignore the rule 1806 * For now keep this uninterruptible and also ignore the rule
1807 * that poll should not sleep. Will be fixed later. 1807 * that poll should not sleep. Will be fixed later.
1808 */ 1808 */
1809 mutex_lock(&ctx->state_mutex); 1809 mutex_lock(&ctx->state_mutex);
1810 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2); 1810 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1811 free_elements = ctx->ops->get_mfc_free_elements(ctx); 1811 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1812 tagstatus = ctx->ops->read_mfc_tagstatus(ctx); 1812 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1813 spu_release(ctx); 1813 spu_release(ctx);
1814 1814
1815 mask = 0; 1815 mask = 0;
1816 if (free_elements & 0xffff) 1816 if (free_elements & 0xffff)
1817 mask |= POLLOUT | POLLWRNORM; 1817 mask |= POLLOUT | POLLWRNORM;
1818 if (tagstatus & ctx->tagwait) 1818 if (tagstatus & ctx->tagwait)
1819 mask |= POLLIN | POLLRDNORM; 1819 mask |= POLLIN | POLLRDNORM;
1820 1820
1821 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__, 1821 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__,
1822 free_elements, tagstatus, ctx->tagwait); 1822 free_elements, tagstatus, ctx->tagwait);
1823 1823
1824 return mask; 1824 return mask;
1825 } 1825 }
1826 1826
1827 static int spufs_mfc_flush(struct file *file, fl_owner_t id) 1827 static int spufs_mfc_flush(struct file *file, fl_owner_t id)
1828 { 1828 {
1829 struct spu_context *ctx = file->private_data; 1829 struct spu_context *ctx = file->private_data;
1830 int ret; 1830 int ret;
1831 1831
1832 ret = spu_acquire(ctx); 1832 ret = spu_acquire(ctx);
1833 if (ret) 1833 if (ret)
1834 goto out; 1834 goto out;
1835 #if 0 1835 #if 0
1836 /* this currently hangs */ 1836 /* this currently hangs */
1837 ret = spufs_wait(ctx->mfc_wq, 1837 ret = spufs_wait(ctx->mfc_wq,
1838 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2)); 1838 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
1839 if (ret) 1839 if (ret)
1840 goto out; 1840 goto out;
1841 ret = spufs_wait(ctx->mfc_wq, 1841 ret = spufs_wait(ctx->mfc_wq,
1842 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait); 1842 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
1843 if (ret) 1843 if (ret)
1844 goto out; 1844 goto out;
1845 #else 1845 #else
1846 ret = 0; 1846 ret = 0;
1847 #endif 1847 #endif
1848 spu_release(ctx); 1848 spu_release(ctx);
1849 out: 1849 out:
1850 return ret; 1850 return ret;
1851 } 1851 }
1852 1852
1853 static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync) 1853 static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1854 { 1854 {
1855 struct inode *inode = file->f_path.dentry->d_inode; 1855 struct inode *inode = file->f_path.dentry->d_inode;
1856 int err = filemap_write_and_wait_range(inode->i_mapping, start, end); 1856 int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
1857 if (!err) { 1857 if (!err) {
1858 mutex_lock(&inode->i_mutex); 1858 mutex_lock(&inode->i_mutex);
1859 err = spufs_mfc_flush(file, NULL); 1859 err = spufs_mfc_flush(file, NULL);
1860 mutex_unlock(&inode->i_mutex); 1860 mutex_unlock(&inode->i_mutex);
1861 } 1861 }
1862 return err; 1862 return err;
1863 } 1863 }
1864 1864
1865 static int spufs_mfc_fasync(int fd, struct file *file, int on) 1865 static int spufs_mfc_fasync(int fd, struct file *file, int on)
1866 { 1866 {
1867 struct spu_context *ctx = file->private_data; 1867 struct spu_context *ctx = file->private_data;
1868 1868
1869 return fasync_helper(fd, file, on, &ctx->mfc_fasync); 1869 return fasync_helper(fd, file, on, &ctx->mfc_fasync);
1870 } 1870 }
1871 1871
1872 static const struct file_operations spufs_mfc_fops = { 1872 static const struct file_operations spufs_mfc_fops = {
1873 .open = spufs_mfc_open, 1873 .open = spufs_mfc_open,
1874 .release = spufs_mfc_release, 1874 .release = spufs_mfc_release,
1875 .read = spufs_mfc_read, 1875 .read = spufs_mfc_read,
1876 .write = spufs_mfc_write, 1876 .write = spufs_mfc_write,
1877 .poll = spufs_mfc_poll, 1877 .poll = spufs_mfc_poll,
1878 .flush = spufs_mfc_flush, 1878 .flush = spufs_mfc_flush,
1879 .fsync = spufs_mfc_fsync, 1879 .fsync = spufs_mfc_fsync,
1880 .fasync = spufs_mfc_fasync, 1880 .fasync = spufs_mfc_fasync,
1881 .mmap = spufs_mfc_mmap, 1881 .mmap = spufs_mfc_mmap,
1882 .llseek = no_llseek, 1882 .llseek = no_llseek,
1883 }; 1883 };
1884 1884
1885 static int spufs_npc_set(void *data, u64 val) 1885 static int spufs_npc_set(void *data, u64 val)
1886 { 1886 {
1887 struct spu_context *ctx = data; 1887 struct spu_context *ctx = data;
1888 int ret; 1888 int ret;
1889 1889
1890 ret = spu_acquire(ctx); 1890 ret = spu_acquire(ctx);
1891 if (ret) 1891 if (ret)
1892 return ret; 1892 return ret;
1893 ctx->ops->npc_write(ctx, val); 1893 ctx->ops->npc_write(ctx, val);
1894 spu_release(ctx); 1894 spu_release(ctx);
1895 1895
1896 return 0; 1896 return 0;
1897 } 1897 }
1898 1898
1899 static u64 spufs_npc_get(struct spu_context *ctx) 1899 static u64 spufs_npc_get(struct spu_context *ctx)
1900 { 1900 {
1901 return ctx->ops->npc_read(ctx); 1901 return ctx->ops->npc_read(ctx);
1902 } 1902 }
1903 DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, 1903 DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
1904 "0x%llx\n", SPU_ATTR_ACQUIRE); 1904 "0x%llx\n", SPU_ATTR_ACQUIRE);
1905 1905
1906 static int spufs_decr_set(void *data, u64 val) 1906 static int spufs_decr_set(void *data, u64 val)
1907 { 1907 {
1908 struct spu_context *ctx = data; 1908 struct spu_context *ctx = data;
1909 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1909 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1910 int ret; 1910 int ret;
1911 1911
1912 ret = spu_acquire_saved(ctx); 1912 ret = spu_acquire_saved(ctx);
1913 if (ret) 1913 if (ret)
1914 return ret; 1914 return ret;
1915 lscsa->decr.slot[0] = (u32) val; 1915 lscsa->decr.slot[0] = (u32) val;
1916 spu_release_saved(ctx); 1916 spu_release_saved(ctx);
1917 1917
1918 return 0; 1918 return 0;
1919 } 1919 }
1920 1920
1921 static u64 spufs_decr_get(struct spu_context *ctx) 1921 static u64 spufs_decr_get(struct spu_context *ctx)
1922 { 1922 {
1923 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1923 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1924 return lscsa->decr.slot[0]; 1924 return lscsa->decr.slot[0];
1925 } 1925 }
1926 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set, 1926 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1927 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED); 1927 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED);
1928 1928
1929 static int spufs_decr_status_set(void *data, u64 val) 1929 static int spufs_decr_status_set(void *data, u64 val)
1930 { 1930 {
1931 struct spu_context *ctx = data; 1931 struct spu_context *ctx = data;
1932 int ret; 1932 int ret;
1933 1933
1934 ret = spu_acquire_saved(ctx); 1934 ret = spu_acquire_saved(ctx);
1935 if (ret) 1935 if (ret)
1936 return ret; 1936 return ret;
1937 if (val) 1937 if (val)
1938 ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING; 1938 ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING;
1939 else 1939 else
1940 ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING; 1940 ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING;
1941 spu_release_saved(ctx); 1941 spu_release_saved(ctx);
1942 1942
1943 return 0; 1943 return 0;
1944 } 1944 }
1945 1945
1946 static u64 spufs_decr_status_get(struct spu_context *ctx) 1946 static u64 spufs_decr_status_get(struct spu_context *ctx)
1947 { 1947 {
1948 if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) 1948 if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING)
1949 return SPU_DECR_STATUS_RUNNING; 1949 return SPU_DECR_STATUS_RUNNING;
1950 else 1950 else
1951 return 0; 1951 return 0;
1952 } 1952 }
1953 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get, 1953 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1954 spufs_decr_status_set, "0x%llx\n", 1954 spufs_decr_status_set, "0x%llx\n",
1955 SPU_ATTR_ACQUIRE_SAVED); 1955 SPU_ATTR_ACQUIRE_SAVED);
1956 1956
1957 static int spufs_event_mask_set(void *data, u64 val) 1957 static int spufs_event_mask_set(void *data, u64 val)
1958 { 1958 {
1959 struct spu_context *ctx = data; 1959 struct spu_context *ctx = data;
1960 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1960 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1961 int ret; 1961 int ret;
1962 1962
1963 ret = spu_acquire_saved(ctx); 1963 ret = spu_acquire_saved(ctx);
1964 if (ret) 1964 if (ret)
1965 return ret; 1965 return ret;
1966 lscsa->event_mask.slot[0] = (u32) val; 1966 lscsa->event_mask.slot[0] = (u32) val;
1967 spu_release_saved(ctx); 1967 spu_release_saved(ctx);
1968 1968
1969 return 0; 1969 return 0;
1970 } 1970 }
1971 1971
1972 static u64 spufs_event_mask_get(struct spu_context *ctx) 1972 static u64 spufs_event_mask_get(struct spu_context *ctx)
1973 { 1973 {
1974 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1974 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1975 return lscsa->event_mask.slot[0]; 1975 return lscsa->event_mask.slot[0];
1976 } 1976 }
1977 1977
1978 DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get, 1978 DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1979 spufs_event_mask_set, "0x%llx\n", 1979 spufs_event_mask_set, "0x%llx\n",
1980 SPU_ATTR_ACQUIRE_SAVED); 1980 SPU_ATTR_ACQUIRE_SAVED);
1981 1981
1982 static u64 spufs_event_status_get(struct spu_context *ctx) 1982 static u64 spufs_event_status_get(struct spu_context *ctx)
1983 { 1983 {
1984 struct spu_state *state = &ctx->csa; 1984 struct spu_state *state = &ctx->csa;
1985 u64 stat; 1985 u64 stat;
1986 stat = state->spu_chnlcnt_RW[0]; 1986 stat = state->spu_chnlcnt_RW[0];
1987 if (stat) 1987 if (stat)
1988 return state->spu_chnldata_RW[0]; 1988 return state->spu_chnldata_RW[0];
1989 return 0; 1989 return 0;
1990 } 1990 }
1991 DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get, 1991 DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
1992 NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED) 1992 NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
1993 1993
1994 static int spufs_srr0_set(void *data, u64 val) 1994 static int spufs_srr0_set(void *data, u64 val)
1995 { 1995 {
1996 struct spu_context *ctx = data; 1996 struct spu_context *ctx = data;
1997 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1997 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1998 int ret; 1998 int ret;
1999 1999
2000 ret = spu_acquire_saved(ctx); 2000 ret = spu_acquire_saved(ctx);
2001 if (ret) 2001 if (ret)
2002 return ret; 2002 return ret;
2003 lscsa->srr0.slot[0] = (u32) val; 2003 lscsa->srr0.slot[0] = (u32) val;
2004 spu_release_saved(ctx); 2004 spu_release_saved(ctx);
2005 2005
2006 return 0; 2006 return 0;
2007 } 2007 }
2008 2008
2009 static u64 spufs_srr0_get(struct spu_context *ctx) 2009 static u64 spufs_srr0_get(struct spu_context *ctx)
2010 { 2010 {
2011 struct spu_lscsa *lscsa = ctx->csa.lscsa; 2011 struct spu_lscsa *lscsa = ctx->csa.lscsa;
2012 return lscsa->srr0.slot[0]; 2012 return lscsa->srr0.slot[0];
2013 } 2013 }
2014 DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set, 2014 DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
2015 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED) 2015 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
2016 2016
2017 static u64 spufs_id_get(struct spu_context *ctx) 2017 static u64 spufs_id_get(struct spu_context *ctx)
2018 { 2018 {
2019 u64 num; 2019 u64 num;
2020 2020
2021 if (ctx->state == SPU_STATE_RUNNABLE) 2021 if (ctx->state == SPU_STATE_RUNNABLE)
2022 num = ctx->spu->number; 2022 num = ctx->spu->number;
2023 else 2023 else
2024 num = (unsigned int)-1; 2024 num = (unsigned int)-1;
2025 2025
2026 return num; 2026 return num;
2027 } 2027 }
2028 DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n", 2028 DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n",
2029 SPU_ATTR_ACQUIRE) 2029 SPU_ATTR_ACQUIRE)
2030 2030
2031 static u64 spufs_object_id_get(struct spu_context *ctx) 2031 static u64 spufs_object_id_get(struct spu_context *ctx)
2032 { 2032 {
2033 /* FIXME: Should there really be no locking here? */ 2033 /* FIXME: Should there really be no locking here? */
2034 return ctx->object_id; 2034 return ctx->object_id;
2035 } 2035 }
2036 2036
2037 static int spufs_object_id_set(void *data, u64 id) 2037 static int spufs_object_id_set(void *data, u64 id)
2038 { 2038 {
2039 struct spu_context *ctx = data; 2039 struct spu_context *ctx = data;
2040 ctx->object_id = id; 2040 ctx->object_id = id;
2041 2041
2042 return 0; 2042 return 0;
2043 } 2043 }
2044 2044
2045 DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get, 2045 DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
2046 spufs_object_id_set, "0x%llx\n", SPU_ATTR_NOACQUIRE); 2046 spufs_object_id_set, "0x%llx\n", SPU_ATTR_NOACQUIRE);
2047 2047
2048 static u64 spufs_lslr_get(struct spu_context *ctx) 2048 static u64 spufs_lslr_get(struct spu_context *ctx)
2049 { 2049 {
2050 return ctx->csa.priv2.spu_lslr_RW; 2050 return ctx->csa.priv2.spu_lslr_RW;
2051 } 2051 }
2052 DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n", 2052 DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n",
2053 SPU_ATTR_ACQUIRE_SAVED); 2053 SPU_ATTR_ACQUIRE_SAVED);
2054 2054
2055 static int spufs_info_open(struct inode *inode, struct file *file) 2055 static int spufs_info_open(struct inode *inode, struct file *file)
2056 { 2056 {
2057 struct spufs_inode_info *i = SPUFS_I(inode); 2057 struct spufs_inode_info *i = SPUFS_I(inode);
2058 struct spu_context *ctx = i->i_ctx; 2058 struct spu_context *ctx = i->i_ctx;
2059 file->private_data = ctx; 2059 file->private_data = ctx;
2060 return 0; 2060 return 0;
2061 } 2061 }
2062 2062
2063 static int spufs_caps_show(struct seq_file *s, void *private) 2063 static int spufs_caps_show(struct seq_file *s, void *private)
2064 { 2064 {
2065 struct spu_context *ctx = s->private; 2065 struct spu_context *ctx = s->private;
2066 2066
2067 if (!(ctx->flags & SPU_CREATE_NOSCHED)) 2067 if (!(ctx->flags & SPU_CREATE_NOSCHED))
2068 seq_puts(s, "sched\n"); 2068 seq_puts(s, "sched\n");
2069 if (!(ctx->flags & SPU_CREATE_ISOLATE)) 2069 if (!(ctx->flags & SPU_CREATE_ISOLATE))
2070 seq_puts(s, "step\n"); 2070 seq_puts(s, "step\n");
2071 return 0; 2071 return 0;
2072 } 2072 }
2073 2073
2074 static int spufs_caps_open(struct inode *inode, struct file *file) 2074 static int spufs_caps_open(struct inode *inode, struct file *file)
2075 { 2075 {
2076 return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx); 2076 return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx);
2077 } 2077 }
2078 2078
2079 static const struct file_operations spufs_caps_fops = { 2079 static const struct file_operations spufs_caps_fops = {
2080 .open = spufs_caps_open, 2080 .open = spufs_caps_open,
2081 .read = seq_read, 2081 .read = seq_read,
2082 .llseek = seq_lseek, 2082 .llseek = seq_lseek,
2083 .release = single_release, 2083 .release = single_release,
2084 }; 2084 };
2085 2085
2086 static ssize_t __spufs_mbox_info_read(struct spu_context *ctx, 2086 static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
2087 char __user *buf, size_t len, loff_t *pos) 2087 char __user *buf, size_t len, loff_t *pos)
2088 { 2088 {
2089 u32 data; 2089 u32 data;
2090 2090
2091 /* EOF if there's no entry in the mbox */ 2091 /* EOF if there's no entry in the mbox */
2092 if (!(ctx->csa.prob.mb_stat_R & 0x0000ff)) 2092 if (!(ctx->csa.prob.mb_stat_R & 0x0000ff))
2093 return 0; 2093 return 0;
2094 2094
2095 data = ctx->csa.prob.pu_mb_R; 2095 data = ctx->csa.prob.pu_mb_R;
2096 2096
2097 return simple_read_from_buffer(buf, len, pos, &data, sizeof data); 2097 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
2098 } 2098 }
2099 2099
2100 static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf, 2100 static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
2101 size_t len, loff_t *pos) 2101 size_t len, loff_t *pos)
2102 { 2102 {
2103 int ret; 2103 int ret;
2104 struct spu_context *ctx = file->private_data; 2104 struct spu_context *ctx = file->private_data;
2105 2105
2106 if (!access_ok(VERIFY_WRITE, buf, len)) 2106 if (!access_ok(VERIFY_WRITE, buf, len))
2107 return -EFAULT; 2107 return -EFAULT;
2108 2108
2109 ret = spu_acquire_saved(ctx); 2109 ret = spu_acquire_saved(ctx);
2110 if (ret) 2110 if (ret)
2111 return ret; 2111 return ret;
2112 spin_lock(&ctx->csa.register_lock); 2112 spin_lock(&ctx->csa.register_lock);
2113 ret = __spufs_mbox_info_read(ctx, buf, len, pos); 2113 ret = __spufs_mbox_info_read(ctx, buf, len, pos);
2114 spin_unlock(&ctx->csa.register_lock); 2114 spin_unlock(&ctx->csa.register_lock);
2115 spu_release_saved(ctx); 2115 spu_release_saved(ctx);
2116 2116
2117 return ret; 2117 return ret;
2118 } 2118 }
2119 2119
2120 static const struct file_operations spufs_mbox_info_fops = { 2120 static const struct file_operations spufs_mbox_info_fops = {
2121 .open = spufs_info_open, 2121 .open = spufs_info_open,
2122 .read = spufs_mbox_info_read, 2122 .read = spufs_mbox_info_read,
2123 .llseek = generic_file_llseek, 2123 .llseek = generic_file_llseek,
2124 }; 2124 };
2125 2125
2126 static ssize_t __spufs_ibox_info_read(struct spu_context *ctx, 2126 static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
2127 char __user *buf, size_t len, loff_t *pos) 2127 char __user *buf, size_t len, loff_t *pos)
2128 { 2128 {
2129 u32 data; 2129 u32 data;
2130 2130
2131 /* EOF if there's no entry in the ibox */ 2131 /* EOF if there's no entry in the ibox */
2132 if (!(ctx->csa.prob.mb_stat_R & 0xff0000)) 2132 if (!(ctx->csa.prob.mb_stat_R & 0xff0000))
2133 return 0; 2133 return 0;
2134 2134
2135 data = ctx->csa.priv2.puint_mb_R; 2135 data = ctx->csa.priv2.puint_mb_R;
2136 2136
2137 return simple_read_from_buffer(buf, len, pos, &data, sizeof data); 2137 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
2138 } 2138 }
2139 2139
2140 static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf, 2140 static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
2141 size_t len, loff_t *pos) 2141 size_t len, loff_t *pos)
2142 { 2142 {
2143 struct spu_context *ctx = file->private_data; 2143 struct spu_context *ctx = file->private_data;
2144 int ret; 2144 int ret;
2145 2145
2146 if (!access_ok(VERIFY_WRITE, buf, len)) 2146 if (!access_ok(VERIFY_WRITE, buf, len))
2147 return -EFAULT; 2147 return -EFAULT;
2148 2148
2149 ret = spu_acquire_saved(ctx); 2149 ret = spu_acquire_saved(ctx);
2150 if (ret) 2150 if (ret)
2151 return ret; 2151 return ret;
2152 spin_lock(&ctx->csa.register_lock); 2152 spin_lock(&ctx->csa.register_lock);
2153 ret = __spufs_ibox_info_read(ctx, buf, len, pos); 2153 ret = __spufs_ibox_info_read(ctx, buf, len, pos);
2154 spin_unlock(&ctx->csa.register_lock); 2154 spin_unlock(&ctx->csa.register_lock);
2155 spu_release_saved(ctx); 2155 spu_release_saved(ctx);
2156 2156
2157 return ret; 2157 return ret;
2158 } 2158 }
2159 2159
2160 static const struct file_operations spufs_ibox_info_fops = { 2160 static const struct file_operations spufs_ibox_info_fops = {
2161 .open = spufs_info_open, 2161 .open = spufs_info_open,
2162 .read = spufs_ibox_info_read, 2162 .read = spufs_ibox_info_read,
2163 .llseek = generic_file_llseek, 2163 .llseek = generic_file_llseek,
2164 }; 2164 };
2165 2165
2166 static ssize_t __spufs_wbox_info_read(struct spu_context *ctx, 2166 static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
2167 char __user *buf, size_t len, loff_t *pos) 2167 char __user *buf, size_t len, loff_t *pos)
2168 { 2168 {
2169 int i, cnt; 2169 int i, cnt;
2170 u32 data[4]; 2170 u32 data[4];
2171 u32 wbox_stat; 2171 u32 wbox_stat;
2172 2172
2173 wbox_stat = ctx->csa.prob.mb_stat_R; 2173 wbox_stat = ctx->csa.prob.mb_stat_R;
2174 cnt = 4 - ((wbox_stat & 0x00ff00) >> 8); 2174 cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
2175 for (i = 0; i < cnt; i++) { 2175 for (i = 0; i < cnt; i++) {
2176 data[i] = ctx->csa.spu_mailbox_data[i]; 2176 data[i] = ctx->csa.spu_mailbox_data[i];
2177 } 2177 }
2178 2178
2179 return simple_read_from_buffer(buf, len, pos, &data, 2179 return simple_read_from_buffer(buf, len, pos, &data,
2180 cnt * sizeof(u32)); 2180 cnt * sizeof(u32));
2181 } 2181 }
2182 2182
2183 static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf, 2183 static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
2184 size_t len, loff_t *pos) 2184 size_t len, loff_t *pos)
2185 { 2185 {
2186 struct spu_context *ctx = file->private_data; 2186 struct spu_context *ctx = file->private_data;
2187 int ret; 2187 int ret;
2188 2188
2189 if (!access_ok(VERIFY_WRITE, buf, len)) 2189 if (!access_ok(VERIFY_WRITE, buf, len))
2190 return -EFAULT; 2190 return -EFAULT;
2191 2191
2192 ret = spu_acquire_saved(ctx); 2192 ret = spu_acquire_saved(ctx);
2193 if (ret) 2193 if (ret)
2194 return ret; 2194 return ret;
2195 spin_lock(&ctx->csa.register_lock); 2195 spin_lock(&ctx->csa.register_lock);
2196 ret = __spufs_wbox_info_read(ctx, buf, len, pos); 2196 ret = __spufs_wbox_info_read(ctx, buf, len, pos);
2197 spin_unlock(&ctx->csa.register_lock); 2197 spin_unlock(&ctx->csa.register_lock);
2198 spu_release_saved(ctx); 2198 spu_release_saved(ctx);
2199 2199
2200 return ret; 2200 return ret;
2201 } 2201 }
2202 2202
2203 static const struct file_operations spufs_wbox_info_fops = { 2203 static const struct file_operations spufs_wbox_info_fops = {
2204 .open = spufs_info_open, 2204 .open = spufs_info_open,
2205 .read = spufs_wbox_info_read, 2205 .read = spufs_wbox_info_read,
2206 .llseek = generic_file_llseek, 2206 .llseek = generic_file_llseek,
2207 }; 2207 };
2208 2208
2209 static ssize_t __spufs_dma_info_read(struct spu_context *ctx, 2209 static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
2210 char __user *buf, size_t len, loff_t *pos) 2210 char __user *buf, size_t len, loff_t *pos)
2211 { 2211 {
2212 struct spu_dma_info info; 2212 struct spu_dma_info info;
2213 struct mfc_cq_sr *qp, *spuqp; 2213 struct mfc_cq_sr *qp, *spuqp;
2214 int i; 2214 int i;
2215 2215
2216 info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW; 2216 info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
2217 info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0]; 2217 info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
2218 info.dma_info_status = ctx->csa.spu_chnldata_RW[24]; 2218 info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
2219 info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25]; 2219 info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
2220 info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27]; 2220 info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
2221 for (i = 0; i < 16; i++) { 2221 for (i = 0; i < 16; i++) {
2222 qp = &info.dma_info_command_data[i]; 2222 qp = &info.dma_info_command_data[i];
2223 spuqp = &ctx->csa.priv2.spuq[i]; 2223 spuqp = &ctx->csa.priv2.spuq[i];
2224 2224
2225 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW; 2225 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
2226 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW; 2226 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
2227 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW; 2227 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
2228 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW; 2228 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
2229 } 2229 }
2230 2230
2231 return simple_read_from_buffer(buf, len, pos, &info, 2231 return simple_read_from_buffer(buf, len, pos, &info,
2232 sizeof info); 2232 sizeof info);
2233 } 2233 }
2234 2234
2235 static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, 2235 static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
2236 size_t len, loff_t *pos) 2236 size_t len, loff_t *pos)
2237 { 2237 {
2238 struct spu_context *ctx = file->private_data; 2238 struct spu_context *ctx = file->private_data;
2239 int ret; 2239 int ret;
2240 2240
2241 if (!access_ok(VERIFY_WRITE, buf, len)) 2241 if (!access_ok(VERIFY_WRITE, buf, len))
2242 return -EFAULT; 2242 return -EFAULT;
2243 2243
2244 ret = spu_acquire_saved(ctx); 2244 ret = spu_acquire_saved(ctx);
2245 if (ret) 2245 if (ret)
2246 return ret; 2246 return ret;
2247 spin_lock(&ctx->csa.register_lock); 2247 spin_lock(&ctx->csa.register_lock);
2248 ret = __spufs_dma_info_read(ctx, buf, len, pos); 2248 ret = __spufs_dma_info_read(ctx, buf, len, pos);
2249 spin_unlock(&ctx->csa.register_lock); 2249 spin_unlock(&ctx->csa.register_lock);
2250 spu_release_saved(ctx); 2250 spu_release_saved(ctx);
2251 2251
2252 return ret; 2252 return ret;
2253 } 2253 }
2254 2254
2255 static const struct file_operations spufs_dma_info_fops = { 2255 static const struct file_operations spufs_dma_info_fops = {
2256 .open = spufs_info_open, 2256 .open = spufs_info_open,
2257 .read = spufs_dma_info_read, 2257 .read = spufs_dma_info_read,
2258 .llseek = no_llseek, 2258 .llseek = no_llseek,
2259 }; 2259 };
2260 2260
2261 static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx, 2261 static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
2262 char __user *buf, size_t len, loff_t *pos) 2262 char __user *buf, size_t len, loff_t *pos)
2263 { 2263 {
2264 struct spu_proxydma_info info; 2264 struct spu_proxydma_info info;
2265 struct mfc_cq_sr *qp, *puqp; 2265 struct mfc_cq_sr *qp, *puqp;
2266 int ret = sizeof info; 2266 int ret = sizeof info;
2267 int i; 2267 int i;
2268 2268
2269 if (len < ret) 2269 if (len < ret)
2270 return -EINVAL; 2270 return -EINVAL;
2271 2271
2272 if (!access_ok(VERIFY_WRITE, buf, len)) 2272 if (!access_ok(VERIFY_WRITE, buf, len))
2273 return -EFAULT; 2273 return -EFAULT;
2274 2274
2275 info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW; 2275 info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
2276 info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW; 2276 info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
2277 info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R; 2277 info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
2278 for (i = 0; i < 8; i++) { 2278 for (i = 0; i < 8; i++) {
2279 qp = &info.proxydma_info_command_data[i]; 2279 qp = &info.proxydma_info_command_data[i];
2280 puqp = &ctx->csa.priv2.puq[i]; 2280 puqp = &ctx->csa.priv2.puq[i];
2281 2281
2282 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW; 2282 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
2283 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW; 2283 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
2284 qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW; 2284 qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
2285 qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW; 2285 qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
2286 } 2286 }
2287 2287
2288 return simple_read_from_buffer(buf, len, pos, &info, 2288 return simple_read_from_buffer(buf, len, pos, &info,
2289 sizeof info); 2289 sizeof info);
2290 } 2290 }
2291 2291
2292 static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf, 2292 static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
2293 size_t len, loff_t *pos) 2293 size_t len, loff_t *pos)
2294 { 2294 {
2295 struct spu_context *ctx = file->private_data; 2295 struct spu_context *ctx = file->private_data;
2296 int ret; 2296 int ret;
2297 2297
2298 ret = spu_acquire_saved(ctx); 2298 ret = spu_acquire_saved(ctx);
2299 if (ret) 2299 if (ret)
2300 return ret; 2300 return ret;
2301 spin_lock(&ctx->csa.register_lock); 2301 spin_lock(&ctx->csa.register_lock);
2302 ret = __spufs_proxydma_info_read(ctx, buf, len, pos); 2302 ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
2303 spin_unlock(&ctx->csa.register_lock); 2303 spin_unlock(&ctx->csa.register_lock);
2304 spu_release_saved(ctx); 2304 spu_release_saved(ctx);
2305 2305
2306 return ret; 2306 return ret;
2307 } 2307 }
2308 2308
2309 static const struct file_operations spufs_proxydma_info_fops = { 2309 static const struct file_operations spufs_proxydma_info_fops = {
2310 .open = spufs_info_open, 2310 .open = spufs_info_open,
2311 .read = spufs_proxydma_info_read, 2311 .read = spufs_proxydma_info_read,
2312 .llseek = no_llseek, 2312 .llseek = no_llseek,
2313 }; 2313 };
2314 2314
2315 static int spufs_show_tid(struct seq_file *s, void *private) 2315 static int spufs_show_tid(struct seq_file *s, void *private)
2316 { 2316 {
2317 struct spu_context *ctx = s->private; 2317 struct spu_context *ctx = s->private;
2318 2318
2319 seq_printf(s, "%d\n", ctx->tid); 2319 seq_printf(s, "%d\n", ctx->tid);
2320 return 0; 2320 return 0;
2321 } 2321 }
2322 2322
2323 static int spufs_tid_open(struct inode *inode, struct file *file) 2323 static int spufs_tid_open(struct inode *inode, struct file *file)
2324 { 2324 {
2325 return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx); 2325 return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx);
2326 } 2326 }
2327 2327
2328 static const struct file_operations spufs_tid_fops = { 2328 static const struct file_operations spufs_tid_fops = {
2329 .open = spufs_tid_open, 2329 .open = spufs_tid_open,
2330 .read = seq_read, 2330 .read = seq_read,
2331 .llseek = seq_lseek, 2331 .llseek = seq_lseek,
2332 .release = single_release, 2332 .release = single_release,
2333 }; 2333 };
2334 2334
2335 static const char *ctx_state_names[] = { 2335 static const char *ctx_state_names[] = {
2336 "user", "system", "iowait", "loaded" 2336 "user", "system", "iowait", "loaded"
2337 }; 2337 };
2338 2338
2339 static unsigned long long spufs_acct_time(struct spu_context *ctx, 2339 static unsigned long long spufs_acct_time(struct spu_context *ctx,
2340 enum spu_utilization_state state) 2340 enum spu_utilization_state state)
2341 { 2341 {
2342 struct timespec ts; 2342 struct timespec ts;
2343 unsigned long long time = ctx->stats.times[state]; 2343 unsigned long long time = ctx->stats.times[state];
2344 2344
2345 /* 2345 /*
2346 * In general, utilization statistics are updated by the controlling 2346 * In general, utilization statistics are updated by the controlling
2347 * thread as the spu context moves through various well defined 2347 * thread as the spu context moves through various well defined
2348 * state transitions, but if the context is lazily loaded its 2348 * state transitions, but if the context is lazily loaded its
2349 * utilization statistics are not updated as the controlling thread 2349 * utilization statistics are not updated as the controlling thread
2350 * is not tightly coupled with the execution of the spu context. We 2350 * is not tightly coupled with the execution of the spu context. We
2351 * calculate and apply the time delta from the last recorded state 2351 * calculate and apply the time delta from the last recorded state
2352 * of the spu context. 2352 * of the spu context.
2353 */ 2353 */
2354 if (ctx->spu && ctx->stats.util_state == state) { 2354 if (ctx->spu && ctx->stats.util_state == state) {
2355 ktime_get_ts(&ts); 2355 ktime_get_ts(&ts);
2356 time += timespec_to_ns(&ts) - ctx->stats.tstamp; 2356 time += timespec_to_ns(&ts) - ctx->stats.tstamp;
2357 } 2357 }
2358 2358
2359 return time / NSEC_PER_MSEC; 2359 return time / NSEC_PER_MSEC;
2360 } 2360 }
2361 2361
2362 static unsigned long long spufs_slb_flts(struct spu_context *ctx) 2362 static unsigned long long spufs_slb_flts(struct spu_context *ctx)
2363 { 2363 {
2364 unsigned long long slb_flts = ctx->stats.slb_flt; 2364 unsigned long long slb_flts = ctx->stats.slb_flt;
2365 2365
2366 if (ctx->state == SPU_STATE_RUNNABLE) { 2366 if (ctx->state == SPU_STATE_RUNNABLE) {
2367 slb_flts += (ctx->spu->stats.slb_flt - 2367 slb_flts += (ctx->spu->stats.slb_flt -
2368 ctx->stats.slb_flt_base); 2368 ctx->stats.slb_flt_base);
2369 } 2369 }
2370 2370
2371 return slb_flts; 2371 return slb_flts;
2372 } 2372 }
2373 2373
2374 static unsigned long long spufs_class2_intrs(struct spu_context *ctx) 2374 static unsigned long long spufs_class2_intrs(struct spu_context *ctx)
2375 { 2375 {
2376 unsigned long long class2_intrs = ctx->stats.class2_intr; 2376 unsigned long long class2_intrs = ctx->stats.class2_intr;
2377 2377
2378 if (ctx->state == SPU_STATE_RUNNABLE) { 2378 if (ctx->state == SPU_STATE_RUNNABLE) {
2379 class2_intrs += (ctx->spu->stats.class2_intr - 2379 class2_intrs += (ctx->spu->stats.class2_intr -
2380 ctx->stats.class2_intr_base); 2380 ctx->stats.class2_intr_base);
2381 } 2381 }
2382 2382
2383 return class2_intrs; 2383 return class2_intrs;
2384 } 2384 }
2385 2385
2386 2386
2387 static int spufs_show_stat(struct seq_file *s, void *private) 2387 static int spufs_show_stat(struct seq_file *s, void *private)
2388 { 2388 {
2389 struct spu_context *ctx = s->private; 2389 struct spu_context *ctx = s->private;
2390 int ret; 2390 int ret;
2391 2391
2392 ret = spu_acquire(ctx); 2392 ret = spu_acquire(ctx);
2393 if (ret) 2393 if (ret)
2394 return ret; 2394 return ret;
2395 2395
2396 seq_printf(s, "%s %llu %llu %llu %llu " 2396 seq_printf(s, "%s %llu %llu %llu %llu "
2397 "%llu %llu %llu %llu %llu %llu %llu %llu\n", 2397 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
2398 ctx_state_names[ctx->stats.util_state], 2398 ctx_state_names[ctx->stats.util_state],
2399 spufs_acct_time(ctx, SPU_UTIL_USER), 2399 spufs_acct_time(ctx, SPU_UTIL_USER),
2400 spufs_acct_time(ctx, SPU_UTIL_SYSTEM), 2400 spufs_acct_time(ctx, SPU_UTIL_SYSTEM),
2401 spufs_acct_time(ctx, SPU_UTIL_IOWAIT), 2401 spufs_acct_time(ctx, SPU_UTIL_IOWAIT),
2402 spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED), 2402 spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED),
2403 ctx->stats.vol_ctx_switch, 2403 ctx->stats.vol_ctx_switch,
2404 ctx->stats.invol_ctx_switch, 2404 ctx->stats.invol_ctx_switch,
2405 spufs_slb_flts(ctx), 2405 spufs_slb_flts(ctx),
2406 ctx->stats.hash_flt, 2406 ctx->stats.hash_flt,
2407 ctx->stats.min_flt, 2407 ctx->stats.min_flt,
2408 ctx->stats.maj_flt, 2408 ctx->stats.maj_flt,
2409 spufs_class2_intrs(ctx), 2409 spufs_class2_intrs(ctx),
2410 ctx->stats.libassist); 2410 ctx->stats.libassist);
2411 spu_release(ctx); 2411 spu_release(ctx);
2412 return 0; 2412 return 0;
2413 } 2413 }
2414 2414
2415 static int spufs_stat_open(struct inode *inode, struct file *file) 2415 static int spufs_stat_open(struct inode *inode, struct file *file)
2416 { 2416 {
2417 return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx); 2417 return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx);
2418 } 2418 }
2419 2419
2420 static const struct file_operations spufs_stat_fops = { 2420 static const struct file_operations spufs_stat_fops = {
2421 .open = spufs_stat_open, 2421 .open = spufs_stat_open,
2422 .read = seq_read, 2422 .read = seq_read,
2423 .llseek = seq_lseek, 2423 .llseek = seq_lseek,
2424 .release = single_release, 2424 .release = single_release,
2425 }; 2425 };
2426 2426
2427 static inline int spufs_switch_log_used(struct spu_context *ctx) 2427 static inline int spufs_switch_log_used(struct spu_context *ctx)
2428 { 2428 {
2429 return (ctx->switch_log->head - ctx->switch_log->tail) % 2429 return (ctx->switch_log->head - ctx->switch_log->tail) %
2430 SWITCH_LOG_BUFSIZE; 2430 SWITCH_LOG_BUFSIZE;
2431 } 2431 }
2432 2432
2433 static inline int spufs_switch_log_avail(struct spu_context *ctx) 2433 static inline int spufs_switch_log_avail(struct spu_context *ctx)
2434 { 2434 {
2435 return SWITCH_LOG_BUFSIZE - spufs_switch_log_used(ctx); 2435 return SWITCH_LOG_BUFSIZE - spufs_switch_log_used(ctx);
2436 } 2436 }
2437 2437
2438 static int spufs_switch_log_open(struct inode *inode, struct file *file) 2438 static int spufs_switch_log_open(struct inode *inode, struct file *file)
2439 { 2439 {
2440 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; 2440 struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2441 int rc; 2441 int rc;
2442 2442
2443 rc = spu_acquire(ctx); 2443 rc = spu_acquire(ctx);
2444 if (rc) 2444 if (rc)
2445 return rc; 2445 return rc;
2446 2446
2447 if (ctx->switch_log) { 2447 if (ctx->switch_log) {
2448 rc = -EBUSY; 2448 rc = -EBUSY;
2449 goto out; 2449 goto out;
2450 } 2450 }
2451 2451
2452 ctx->switch_log = kmalloc(sizeof(struct switch_log) + 2452 ctx->switch_log = kmalloc(sizeof(struct switch_log) +
2453 SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry), 2453 SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
2454 GFP_KERNEL); 2454 GFP_KERNEL);
2455 2455
2456 if (!ctx->switch_log) { 2456 if (!ctx->switch_log) {
2457 rc = -ENOMEM; 2457 rc = -ENOMEM;
2458 goto out; 2458 goto out;
2459 } 2459 }
2460 2460
2461 ctx->switch_log->head = ctx->switch_log->tail = 0; 2461 ctx->switch_log->head = ctx->switch_log->tail = 0;
2462 init_waitqueue_head(&ctx->switch_log->wait); 2462 init_waitqueue_head(&ctx->switch_log->wait);
2463 rc = 0; 2463 rc = 0;
2464 2464
2465 out: 2465 out:
2466 spu_release(ctx); 2466 spu_release(ctx);
2467 return rc; 2467 return rc;
2468 } 2468 }
2469 2469
2470 static int spufs_switch_log_release(struct inode *inode, struct file *file) 2470 static int spufs_switch_log_release(struct inode *inode, struct file *file)
2471 { 2471 {
2472 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; 2472 struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2473 int rc; 2473 int rc;
2474 2474
2475 rc = spu_acquire(ctx); 2475 rc = spu_acquire(ctx);
2476 if (rc) 2476 if (rc)
2477 return rc; 2477 return rc;
2478 2478
2479 kfree(ctx->switch_log); 2479 kfree(ctx->switch_log);
2480 ctx->switch_log = NULL; 2480 ctx->switch_log = NULL;
2481 spu_release(ctx); 2481 spu_release(ctx);
2482 2482
2483 return 0; 2483 return 0;
2484 } 2484 }
2485 2485
2486 static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n) 2486 static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)
2487 { 2487 {
2488 struct switch_log_entry *p; 2488 struct switch_log_entry *p;
2489 2489
2490 p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE; 2490 p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE;
2491 2491
2492 return snprintf(tbuf, n, "%u.%09u %d %u %u %llu\n", 2492 return snprintf(tbuf, n, "%u.%09u %d %u %u %llu\n",
2493 (unsigned int) p->tstamp.tv_sec, 2493 (unsigned int) p->tstamp.tv_sec,
2494 (unsigned int) p->tstamp.tv_nsec, 2494 (unsigned int) p->tstamp.tv_nsec,
2495 p->spu_id, 2495 p->spu_id,
2496 (unsigned int) p->type, 2496 (unsigned int) p->type,
2497 (unsigned int) p->val, 2497 (unsigned int) p->val,
2498 (unsigned long long) p->timebase); 2498 (unsigned long long) p->timebase);
2499 } 2499 }
2500 2500
2501 static ssize_t spufs_switch_log_read(struct file *file, char __user *buf, 2501 static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
2502 size_t len, loff_t *ppos) 2502 size_t len, loff_t *ppos)
2503 { 2503 {
2504 struct inode *inode = file->f_path.dentry->d_inode; 2504 struct inode *inode = file->f_path.dentry->d_inode;
2505 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; 2505 struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2506 int error = 0, cnt = 0; 2506 int error = 0, cnt = 0;
2507 2507
2508 if (!buf) 2508 if (!buf)
2509 return -EINVAL; 2509 return -EINVAL;
2510 2510
2511 error = spu_acquire(ctx); 2511 error = spu_acquire(ctx);
2512 if (error) 2512 if (error)
2513 return error; 2513 return error;
2514 2514
2515 while (cnt < len) { 2515 while (cnt < len) {
2516 char tbuf[128]; 2516 char tbuf[128];
2517 int width; 2517 int width;
2518 2518
2519 if (spufs_switch_log_used(ctx) == 0) { 2519 if (spufs_switch_log_used(ctx) == 0) {
2520 if (cnt > 0) { 2520 if (cnt > 0) {
2521 /* If there's data ready to go, we can 2521 /* If there's data ready to go, we can
2522 * just return straight away */ 2522 * just return straight away */
2523 break; 2523 break;
2524 2524
2525 } else if (file->f_flags & O_NONBLOCK) { 2525 } else if (file->f_flags & O_NONBLOCK) {
2526 error = -EAGAIN; 2526 error = -EAGAIN;
2527 break; 2527 break;
2528 2528
2529 } else { 2529 } else {
2530 /* spufs_wait will drop the mutex and 2530 /* spufs_wait will drop the mutex and
2531 * re-acquire, but since we're in read(), the 2531 * re-acquire, but since we're in read(), the
2532 * file cannot be _released (and so 2532 * file cannot be _released (and so
2533 * ctx->switch_log is stable). 2533 * ctx->switch_log is stable).
2534 */ 2534 */
2535 error = spufs_wait(ctx->switch_log->wait, 2535 error = spufs_wait(ctx->switch_log->wait,
2536 spufs_switch_log_used(ctx) > 0); 2536 spufs_switch_log_used(ctx) > 0);
2537 2537
2538 /* On error, spufs_wait returns without the 2538 /* On error, spufs_wait returns without the
2539 * state mutex held */ 2539 * state mutex held */
2540 if (error) 2540 if (error)
2541 return error; 2541 return error;
2542 2542
2543 /* We may have had entries read from underneath 2543 /* We may have had entries read from underneath
2544 * us while we dropped the mutex in spufs_wait, 2544 * us while we dropped the mutex in spufs_wait,
2545 * so re-check */ 2545 * so re-check */
2546 if (spufs_switch_log_used(ctx) == 0) 2546 if (spufs_switch_log_used(ctx) == 0)
2547 continue; 2547 continue;
2548 } 2548 }
2549 } 2549 }
2550 2550
2551 width = switch_log_sprint(ctx, tbuf, sizeof(tbuf)); 2551 width = switch_log_sprint(ctx, tbuf, sizeof(tbuf));
2552 if (width < len) 2552 if (width < len)
2553 ctx->switch_log->tail = 2553 ctx->switch_log->tail =
2554 (ctx->switch_log->tail + 1) % 2554 (ctx->switch_log->tail + 1) %
2555 SWITCH_LOG_BUFSIZE; 2555 SWITCH_LOG_BUFSIZE;
2556 else 2556 else
2557 /* If the record is greater than space available return 2557 /* If the record is greater than space available return
2558 * partial buffer (so far) */ 2558 * partial buffer (so far) */
2559 break; 2559 break;
2560 2560
2561 error = copy_to_user(buf + cnt, tbuf, width); 2561 error = copy_to_user(buf + cnt, tbuf, width);
2562 if (error) 2562 if (error)
2563 break; 2563 break;
2564 cnt += width; 2564 cnt += width;
2565 } 2565 }
2566 2566
2567 spu_release(ctx); 2567 spu_release(ctx);
2568 2568
2569 return cnt == 0 ? error : cnt; 2569 return cnt == 0 ? error : cnt;
2570 } 2570 }
2571 2571
2572 static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait) 2572 static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait)
2573 { 2573 {
2574 struct inode *inode = file->f_path.dentry->d_inode; 2574 struct inode *inode = file->f_path.dentry->d_inode;
2575 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; 2575 struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2576 unsigned int mask = 0; 2576 unsigned int mask = 0;
2577 int rc; 2577 int rc;
2578 2578
2579 poll_wait(file, &ctx->switch_log->wait, wait); 2579 poll_wait(file, &ctx->switch_log->wait, wait);
2580 2580
2581 rc = spu_acquire(ctx); 2581 rc = spu_acquire(ctx);
2582 if (rc) 2582 if (rc)
2583 return rc; 2583 return rc;
2584 2584
2585 if (spufs_switch_log_used(ctx) > 0) 2585 if (spufs_switch_log_used(ctx) > 0)
2586 mask |= POLLIN; 2586 mask |= POLLIN;
2587 2587
2588 spu_release(ctx); 2588 spu_release(ctx);
2589 2589
2590 return mask; 2590 return mask;
2591 } 2591 }
2592 2592
2593 static const struct file_operations spufs_switch_log_fops = { 2593 static const struct file_operations spufs_switch_log_fops = {
2594 .owner = THIS_MODULE, 2594 .owner = THIS_MODULE,
2595 .open = spufs_switch_log_open, 2595 .open = spufs_switch_log_open,
2596 .read = spufs_switch_log_read, 2596 .read = spufs_switch_log_read,
2597 .poll = spufs_switch_log_poll, 2597 .poll = spufs_switch_log_poll,
2598 .release = spufs_switch_log_release, 2598 .release = spufs_switch_log_release,
2599 .llseek = no_llseek, 2599 .llseek = no_llseek,
2600 }; 2600 };
2601 2601
2602 /** 2602 /**
2603 * Log a context switch event to a switch log reader. 2603 * Log a context switch event to a switch log reader.
2604 * 2604 *
2605 * Must be called with ctx->state_mutex held. 2605 * Must be called with ctx->state_mutex held.
2606 */ 2606 */
2607 void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx, 2607 void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
2608 u32 type, u32 val) 2608 u32 type, u32 val)
2609 { 2609 {
2610 if (!ctx->switch_log) 2610 if (!ctx->switch_log)
2611 return; 2611 return;
2612 2612
2613 if (spufs_switch_log_avail(ctx) > 1) { 2613 if (spufs_switch_log_avail(ctx) > 1) {
2614 struct switch_log_entry *p; 2614 struct switch_log_entry *p;
2615 2615
2616 p = ctx->switch_log->log + ctx->switch_log->head; 2616 p = ctx->switch_log->log + ctx->switch_log->head;
2617 ktime_get_ts(&p->tstamp); 2617 ktime_get_ts(&p->tstamp);
2618 p->timebase = get_tb(); 2618 p->timebase = get_tb();
2619 p->spu_id = spu ? spu->number : -1; 2619 p->spu_id = spu ? spu->number : -1;
2620 p->type = type; 2620 p->type = type;
2621 p->val = val; 2621 p->val = val;
2622 2622
2623 ctx->switch_log->head = 2623 ctx->switch_log->head =
2624 (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE; 2624 (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE;
2625 } 2625 }
2626 2626
2627 wake_up(&ctx->switch_log->wait); 2627 wake_up(&ctx->switch_log->wait);
2628 } 2628 }
2629 2629
2630 static int spufs_show_ctx(struct seq_file *s, void *private) 2630 static int spufs_show_ctx(struct seq_file *s, void *private)
2631 { 2631 {
2632 struct spu_context *ctx = s->private; 2632 struct spu_context *ctx = s->private;
2633 u64 mfc_control_RW; 2633 u64 mfc_control_RW;
2634 2634
2635 mutex_lock(&ctx->state_mutex); 2635 mutex_lock(&ctx->state_mutex);
2636 if (ctx->spu) { 2636 if (ctx->spu) {
2637 struct spu *spu = ctx->spu; 2637 struct spu *spu = ctx->spu;
2638 struct spu_priv2 __iomem *priv2 = spu->priv2; 2638 struct spu_priv2 __iomem *priv2 = spu->priv2;
2639 2639
2640 spin_lock_irq(&spu->register_lock); 2640 spin_lock_irq(&spu->register_lock);
2641 mfc_control_RW = in_be64(&priv2->mfc_control_RW); 2641 mfc_control_RW = in_be64(&priv2->mfc_control_RW);
2642 spin_unlock_irq(&spu->register_lock); 2642 spin_unlock_irq(&spu->register_lock);
2643 } else { 2643 } else {
2644 struct spu_state *csa = &ctx->csa; 2644 struct spu_state *csa = &ctx->csa;
2645 2645
2646 mfc_control_RW = csa->priv2.mfc_control_RW; 2646 mfc_control_RW = csa->priv2.mfc_control_RW;
2647 } 2647 }
2648 2648
2649 seq_printf(s, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)" 2649 seq_printf(s, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)"
2650 " %c %llx %llx %llx %llx %x %x\n", 2650 " %c %llx %llx %llx %llx %x %x\n",
2651 ctx->state == SPU_STATE_SAVED ? 'S' : 'R', 2651 ctx->state == SPU_STATE_SAVED ? 'S' : 'R',
2652 ctx->flags, 2652 ctx->flags,
2653 ctx->sched_flags, 2653 ctx->sched_flags,
2654 ctx->prio, 2654 ctx->prio,
2655 ctx->time_slice, 2655 ctx->time_slice,
2656 ctx->spu ? ctx->spu->number : -1, 2656 ctx->spu ? ctx->spu->number : -1,
2657 !list_empty(&ctx->rq) ? 'q' : ' ', 2657 !list_empty(&ctx->rq) ? 'q' : ' ',
2658 ctx->csa.class_0_pending, 2658 ctx->csa.class_0_pending,
2659 ctx->csa.class_0_dar, 2659 ctx->csa.class_0_dar,
2660 ctx->csa.class_1_dsisr, 2660 ctx->csa.class_1_dsisr,
2661 mfc_control_RW, 2661 mfc_control_RW,
2662 ctx->ops->runcntl_read(ctx), 2662 ctx->ops->runcntl_read(ctx),
2663 ctx->ops->status_read(ctx)); 2663 ctx->ops->status_read(ctx));
2664 2664
2665 mutex_unlock(&ctx->state_mutex); 2665 mutex_unlock(&ctx->state_mutex);
2666 2666
2667 return 0; 2667 return 0;
2668 } 2668 }
2669 2669
2670 static int spufs_ctx_open(struct inode *inode, struct file *file) 2670 static int spufs_ctx_open(struct inode *inode, struct file *file)
2671 { 2671 {
2672 return single_open(file, spufs_show_ctx, SPUFS_I(inode)->i_ctx); 2672 return single_open(file, spufs_show_ctx, SPUFS_I(inode)->i_ctx);
2673 } 2673 }
2674 2674
2675 static const struct file_operations spufs_ctx_fops = { 2675 static const struct file_operations spufs_ctx_fops = {
2676 .open = spufs_ctx_open, 2676 .open = spufs_ctx_open,
2677 .read = seq_read, 2677 .read = seq_read,
2678 .llseek = seq_lseek, 2678 .llseek = seq_lseek,
2679 .release = single_release, 2679 .release = single_release,
2680 }; 2680 };
2681 2681
2682 const struct spufs_tree_descr spufs_dir_contents[] = { 2682 const struct spufs_tree_descr spufs_dir_contents[] = {
2683 { "capabilities", &spufs_caps_fops, 0444, }, 2683 { "capabilities", &spufs_caps_fops, 0444, },
2684 { "mem", &spufs_mem_fops, 0666, LS_SIZE, }, 2684 { "mem", &spufs_mem_fops, 0666, LS_SIZE, },
2685 { "regs", &spufs_regs_fops, 0666, sizeof(struct spu_reg128[128]), }, 2685 { "regs", &spufs_regs_fops, 0666, sizeof(struct spu_reg128[128]), },
2686 { "mbox", &spufs_mbox_fops, 0444, }, 2686 { "mbox", &spufs_mbox_fops, 0444, },
2687 { "ibox", &spufs_ibox_fops, 0444, }, 2687 { "ibox", &spufs_ibox_fops, 0444, },
2688 { "wbox", &spufs_wbox_fops, 0222, }, 2688 { "wbox", &spufs_wbox_fops, 0222, },
2689 { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), }, 2689 { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },
2690 { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), }, 2690 { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },
2691 { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), }, 2691 { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },
2692 { "signal1", &spufs_signal1_fops, 0666, }, 2692 { "signal1", &spufs_signal1_fops, 0666, },
2693 { "signal2", &spufs_signal2_fops, 0666, }, 2693 { "signal2", &spufs_signal2_fops, 0666, },
2694 { "signal1_type", &spufs_signal1_type, 0666, }, 2694 { "signal1_type", &spufs_signal1_type, 0666, },
2695 { "signal2_type", &spufs_signal2_type, 0666, }, 2695 { "signal2_type", &spufs_signal2_type, 0666, },
2696 { "cntl", &spufs_cntl_fops, 0666, }, 2696 { "cntl", &spufs_cntl_fops, 0666, },
2697 { "fpcr", &spufs_fpcr_fops, 0666, sizeof(struct spu_reg128), }, 2697 { "fpcr", &spufs_fpcr_fops, 0666, sizeof(struct spu_reg128), },
2698 { "lslr", &spufs_lslr_ops, 0444, }, 2698 { "lslr", &spufs_lslr_ops, 0444, },
2699 { "mfc", &spufs_mfc_fops, 0666, }, 2699 { "mfc", &spufs_mfc_fops, 0666, },
2700 { "mss", &spufs_mss_fops, 0666, }, 2700 { "mss", &spufs_mss_fops, 0666, },
2701 { "npc", &spufs_npc_ops, 0666, }, 2701 { "npc", &spufs_npc_ops, 0666, },
2702 { "srr0", &spufs_srr0_ops, 0666, }, 2702 { "srr0", &spufs_srr0_ops, 0666, },
2703 { "decr", &spufs_decr_ops, 0666, }, 2703 { "decr", &spufs_decr_ops, 0666, },
2704 { "decr_status", &spufs_decr_status_ops, 0666, }, 2704 { "decr_status", &spufs_decr_status_ops, 0666, },
2705 { "event_mask", &spufs_event_mask_ops, 0666, }, 2705 { "event_mask", &spufs_event_mask_ops, 0666, },
2706 { "event_status", &spufs_event_status_ops, 0444, }, 2706 { "event_status", &spufs_event_status_ops, 0444, },
2707 { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, }, 2707 { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },
2708 { "phys-id", &spufs_id_ops, 0666, }, 2708 { "phys-id", &spufs_id_ops, 0666, },
2709 { "object-id", &spufs_object_id_ops, 0666, }, 2709 { "object-id", &spufs_object_id_ops, 0666, },
2710 { "mbox_info", &spufs_mbox_info_fops, 0444, sizeof(u32), }, 2710 { "mbox_info", &spufs_mbox_info_fops, 0444, sizeof(u32), },
2711 { "ibox_info", &spufs_ibox_info_fops, 0444, sizeof(u32), }, 2711 { "ibox_info", &spufs_ibox_info_fops, 0444, sizeof(u32), },
2712 { "wbox_info", &spufs_wbox_info_fops, 0444, sizeof(u32), }, 2712 { "wbox_info", &spufs_wbox_info_fops, 0444, sizeof(u32), },
2713 { "dma_info", &spufs_dma_info_fops, 0444, 2713 { "dma_info", &spufs_dma_info_fops, 0444,
2714 sizeof(struct spu_dma_info), }, 2714 sizeof(struct spu_dma_info), },
2715 { "proxydma_info", &spufs_proxydma_info_fops, 0444, 2715 { "proxydma_info", &spufs_proxydma_info_fops, 0444,
2716 sizeof(struct spu_proxydma_info)}, 2716 sizeof(struct spu_proxydma_info)},
2717 { "tid", &spufs_tid_fops, 0444, }, 2717 { "tid", &spufs_tid_fops, 0444, },
2718 { "stat", &spufs_stat_fops, 0444, }, 2718 { "stat", &spufs_stat_fops, 0444, },
2719 { "switch_log", &spufs_switch_log_fops, 0444 }, 2719 { "switch_log", &spufs_switch_log_fops, 0444 },
2720 {}, 2720 {},
2721 }; 2721 };
2722 2722
2723 const struct spufs_tree_descr spufs_dir_nosched_contents[] = { 2723 const struct spufs_tree_descr spufs_dir_nosched_contents[] = {
2724 { "capabilities", &spufs_caps_fops, 0444, }, 2724 { "capabilities", &spufs_caps_fops, 0444, },
2725 { "mem", &spufs_mem_fops, 0666, LS_SIZE, }, 2725 { "mem", &spufs_mem_fops, 0666, LS_SIZE, },
2726 { "mbox", &spufs_mbox_fops, 0444, }, 2726 { "mbox", &spufs_mbox_fops, 0444, },
2727 { "ibox", &spufs_ibox_fops, 0444, }, 2727 { "ibox", &spufs_ibox_fops, 0444, },
2728 { "wbox", &spufs_wbox_fops, 0222, }, 2728 { "wbox", &spufs_wbox_fops, 0222, },
2729 { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), }, 2729 { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },
2730 { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), }, 2730 { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },
2731 { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), }, 2731 { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },
2732 { "signal1", &spufs_signal1_nosched_fops, 0222, }, 2732 { "signal1", &spufs_signal1_nosched_fops, 0222, },
2733 { "signal2", &spufs_signal2_nosched_fops, 0222, }, 2733 { "signal2", &spufs_signal2_nosched_fops, 0222, },
2734 { "signal1_type", &spufs_signal1_type, 0666, }, 2734 { "signal1_type", &spufs_signal1_type, 0666, },
2735 { "signal2_type", &spufs_signal2_type, 0666, }, 2735 { "signal2_type", &spufs_signal2_type, 0666, },
2736 { "mss", &spufs_mss_fops, 0666, }, 2736 { "mss", &spufs_mss_fops, 0666, },
2737 { "mfc", &spufs_mfc_fops, 0666, }, 2737 { "mfc", &spufs_mfc_fops, 0666, },
2738 { "cntl", &spufs_cntl_fops, 0666, }, 2738 { "cntl", &spufs_cntl_fops, 0666, },
2739 { "npc", &spufs_npc_ops, 0666, }, 2739 { "npc", &spufs_npc_ops, 0666, },
2740 { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, }, 2740 { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },
2741 { "phys-id", &spufs_id_ops, 0666, }, 2741 { "phys-id", &spufs_id_ops, 0666, },
2742 { "object-id", &spufs_object_id_ops, 0666, }, 2742 { "object-id", &spufs_object_id_ops, 0666, },
2743 { "tid", &spufs_tid_fops, 0444, }, 2743 { "tid", &spufs_tid_fops, 0444, },
2744 { "stat", &spufs_stat_fops, 0444, }, 2744 { "stat", &spufs_stat_fops, 0444, },
2745 {}, 2745 {},
2746 }; 2746 };
2747 2747
2748 const struct spufs_tree_descr spufs_dir_debug_contents[] = { 2748 const struct spufs_tree_descr spufs_dir_debug_contents[] = {
2749 { ".ctx", &spufs_ctx_fops, 0444, }, 2749 { ".ctx", &spufs_ctx_fops, 0444, },
2750 {}, 2750 {},
2751 }; 2751 };
2752 2752
2753 const struct spufs_coredump_reader spufs_coredump_read[] = { 2753 const struct spufs_coredump_reader spufs_coredump_read[] = {
2754 { "regs", __spufs_regs_read, NULL, sizeof(struct spu_reg128[128])}, 2754 { "regs", __spufs_regs_read, NULL, sizeof(struct spu_reg128[128])},
2755 { "fpcr", __spufs_fpcr_read, NULL, sizeof(struct spu_reg128) }, 2755 { "fpcr", __spufs_fpcr_read, NULL, sizeof(struct spu_reg128) },
2756 { "lslr", NULL, spufs_lslr_get, 19 }, 2756 { "lslr", NULL, spufs_lslr_get, 19 },
2757 { "decr", NULL, spufs_decr_get, 19 }, 2757 { "decr", NULL, spufs_decr_get, 19 },
2758 { "decr_status", NULL, spufs_decr_status_get, 19 }, 2758 { "decr_status", NULL, spufs_decr_status_get, 19 },
2759 { "mem", __spufs_mem_read, NULL, LS_SIZE, }, 2759 { "mem", __spufs_mem_read, NULL, LS_SIZE, },
2760 { "signal1", __spufs_signal1_read, NULL, sizeof(u32) }, 2760 { "signal1", __spufs_signal1_read, NULL, sizeof(u32) },
2761 { "signal1_type", NULL, spufs_signal1_type_get, 19 }, 2761 { "signal1_type", NULL, spufs_signal1_type_get, 19 },
2762 { "signal2", __spufs_signal2_read, NULL, sizeof(u32) }, 2762 { "signal2", __spufs_signal2_read, NULL, sizeof(u32) },
2763 { "signal2_type", NULL, spufs_signal2_type_get, 19 }, 2763 { "signal2_type", NULL, spufs_signal2_type_get, 19 },
2764 { "event_mask", NULL, spufs_event_mask_get, 19 }, 2764 { "event_mask", NULL, spufs_event_mask_get, 19 },
2765 { "event_status", NULL, spufs_event_status_get, 19 }, 2765 { "event_status", NULL, spufs_event_status_get, 19 },
2766 { "mbox_info", __spufs_mbox_info_read, NULL, sizeof(u32) }, 2766 { "mbox_info", __spufs_mbox_info_read, NULL, sizeof(u32) },
2767 { "ibox_info", __spufs_ibox_info_read, NULL, sizeof(u32) }, 2767 { "ibox_info", __spufs_ibox_info_read, NULL, sizeof(u32) },
2768 { "wbox_info", __spufs_wbox_info_read, NULL, 4 * sizeof(u32)}, 2768 { "wbox_info", __spufs_wbox_info_read, NULL, 4 * sizeof(u32)},
2769 { "dma_info", __spufs_dma_info_read, NULL, sizeof(struct spu_dma_info)}, 2769 { "dma_info", __spufs_dma_info_read, NULL, sizeof(struct spu_dma_info)},
2770 { "proxydma_info", __spufs_proxydma_info_read, 2770 { "proxydma_info", __spufs_proxydma_info_read,
2771 NULL, sizeof(struct spu_proxydma_info)}, 2771 NULL, sizeof(struct spu_proxydma_info)},
2772 { "object-id", NULL, spufs_object_id_get, 19 }, 2772 { "object-id", NULL, spufs_object_id_get, 19 },
2773 { "npc", NULL, spufs_npc_get, 19 }, 2773 { "npc", NULL, spufs_npc_get, 19 },
2774 { NULL }, 2774 { NULL },
2775 }; 2775 };
2776 2776
arch/powerpc/platforms/cell/spufs/switch.c
1 /* 1 /*
2 * spu_switch.c 2 * spu_switch.c
3 * 3 *
4 * (C) Copyright IBM Corp. 2005 4 * (C) Copyright IBM Corp. 2005
5 * 5 *
6 * Author: Mark Nutter <mnutter@us.ibm.com> 6 * Author: Mark Nutter <mnutter@us.ibm.com>
7 * 7 *
8 * Host-side part of SPU context switch sequence outlined in 8 * Host-side part of SPU context switch sequence outlined in
9 * Synergistic Processor Element, Book IV. 9 * Synergistic Processor Element, Book IV.
10 * 10 *
11 * A fully premptive switch of an SPE is very expensive in terms 11 * A fully premptive switch of an SPE is very expensive in terms
12 * of time and system resources. SPE Book IV indicates that SPE 12 * of time and system resources. SPE Book IV indicates that SPE
13 * allocation should follow a "serially reusable device" model, 13 * allocation should follow a "serially reusable device" model,
14 * in which the SPE is assigned a task until it completes. When 14 * in which the SPE is assigned a task until it completes. When
15 * this is not possible, this sequence may be used to premptively 15 * this is not possible, this sequence may be used to premptively
16 * save, and then later (optionally) restore the context of a 16 * save, and then later (optionally) restore the context of a
17 * program executing on an SPE. 17 * program executing on an SPE.
18 * 18 *
19 * 19 *
20 * This program is free software; you can redistribute it and/or modify 20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by 21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2, or (at your option) 22 * the Free Software Foundation; either version 2, or (at your option)
23 * any later version. 23 * any later version.
24 * 24 *
25 * This program is distributed in the hope that it will be useful, 25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of 26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details. 28 * GNU General Public License for more details.
29 * 29 *
30 * You should have received a copy of the GNU General Public License 30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software 31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 32 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33 */ 33 */
34 34
35 #include <linux/module.h> 35 #include <linux/export.h>
36 #include <linux/errno.h> 36 #include <linux/errno.h>
37 #include <linux/hardirq.h> 37 #include <linux/hardirq.h>
38 #include <linux/sched.h> 38 #include <linux/sched.h>
39 #include <linux/kernel.h> 39 #include <linux/kernel.h>
40 #include <linux/mm.h> 40 #include <linux/mm.h>
41 #include <linux/vmalloc.h> 41 #include <linux/vmalloc.h>
42 #include <linux/smp.h> 42 #include <linux/smp.h>
43 #include <linux/stddef.h> 43 #include <linux/stddef.h>
44 #include <linux/unistd.h> 44 #include <linux/unistd.h>
45 45
46 #include <asm/io.h> 46 #include <asm/io.h>
47 #include <asm/spu.h> 47 #include <asm/spu.h>
48 #include <asm/spu_priv1.h> 48 #include <asm/spu_priv1.h>
49 #include <asm/spu_csa.h> 49 #include <asm/spu_csa.h>
50 #include <asm/mmu_context.h> 50 #include <asm/mmu_context.h>
51 51
52 #include "spufs.h" 52 #include "spufs.h"
53 53
54 #include "spu_save_dump.h" 54 #include "spu_save_dump.h"
55 #include "spu_restore_dump.h" 55 #include "spu_restore_dump.h"
56 56
57 #if 0 57 #if 0
58 #define POLL_WHILE_TRUE(_c) { \ 58 #define POLL_WHILE_TRUE(_c) { \
59 do { \ 59 do { \
60 } while (_c); \ 60 } while (_c); \
61 } 61 }
62 #else 62 #else
63 #define RELAX_SPIN_COUNT 1000 63 #define RELAX_SPIN_COUNT 1000
64 #define POLL_WHILE_TRUE(_c) { \ 64 #define POLL_WHILE_TRUE(_c) { \
65 do { \ 65 do { \
66 int _i; \ 66 int _i; \
67 for (_i=0; _i<RELAX_SPIN_COUNT && (_c); _i++) { \ 67 for (_i=0; _i<RELAX_SPIN_COUNT && (_c); _i++) { \
68 cpu_relax(); \ 68 cpu_relax(); \
69 } \ 69 } \
70 if (unlikely(_c)) yield(); \ 70 if (unlikely(_c)) yield(); \
71 else break; \ 71 else break; \
72 } while (_c); \ 72 } while (_c); \
73 } 73 }
74 #endif /* debug */ 74 #endif /* debug */
75 75
76 #define POLL_WHILE_FALSE(_c) POLL_WHILE_TRUE(!(_c)) 76 #define POLL_WHILE_FALSE(_c) POLL_WHILE_TRUE(!(_c))
77 77
78 static inline void acquire_spu_lock(struct spu *spu) 78 static inline void acquire_spu_lock(struct spu *spu)
79 { 79 {
80 /* Save, Step 1: 80 /* Save, Step 1:
81 * Restore, Step 1: 81 * Restore, Step 1:
82 * Acquire SPU-specific mutual exclusion lock. 82 * Acquire SPU-specific mutual exclusion lock.
83 * TBD. 83 * TBD.
84 */ 84 */
85 } 85 }
86 86
87 static inline void release_spu_lock(struct spu *spu) 87 static inline void release_spu_lock(struct spu *spu)
88 { 88 {
89 /* Restore, Step 76: 89 /* Restore, Step 76:
90 * Release SPU-specific mutual exclusion lock. 90 * Release SPU-specific mutual exclusion lock.
91 * TBD. 91 * TBD.
92 */ 92 */
93 } 93 }
94 94
95 static inline int check_spu_isolate(struct spu_state *csa, struct spu *spu) 95 static inline int check_spu_isolate(struct spu_state *csa, struct spu *spu)
96 { 96 {
97 struct spu_problem __iomem *prob = spu->problem; 97 struct spu_problem __iomem *prob = spu->problem;
98 u32 isolate_state; 98 u32 isolate_state;
99 99
100 /* Save, Step 2: 100 /* Save, Step 2:
101 * Save, Step 6: 101 * Save, Step 6:
102 * If SPU_Status[E,L,IS] any field is '1', this 102 * If SPU_Status[E,L,IS] any field is '1', this
103 * SPU is in isolate state and cannot be context 103 * SPU is in isolate state and cannot be context
104 * saved at this time. 104 * saved at this time.
105 */ 105 */
106 isolate_state = SPU_STATUS_ISOLATED_STATE | 106 isolate_state = SPU_STATUS_ISOLATED_STATE |
107 SPU_STATUS_ISOLATED_LOAD_STATUS | SPU_STATUS_ISOLATED_EXIT_STATUS; 107 SPU_STATUS_ISOLATED_LOAD_STATUS | SPU_STATUS_ISOLATED_EXIT_STATUS;
108 return (in_be32(&prob->spu_status_R) & isolate_state) ? 1 : 0; 108 return (in_be32(&prob->spu_status_R) & isolate_state) ? 1 : 0;
109 } 109 }
110 110
111 static inline void disable_interrupts(struct spu_state *csa, struct spu *spu) 111 static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
112 { 112 {
113 /* Save, Step 3: 113 /* Save, Step 3:
114 * Restore, Step 2: 114 * Restore, Step 2:
115 * Save INT_Mask_class0 in CSA. 115 * Save INT_Mask_class0 in CSA.
116 * Write INT_MASK_class0 with value of 0. 116 * Write INT_MASK_class0 with value of 0.
117 * Save INT_Mask_class1 in CSA. 117 * Save INT_Mask_class1 in CSA.
118 * Write INT_MASK_class1 with value of 0. 118 * Write INT_MASK_class1 with value of 0.
119 * Save INT_Mask_class2 in CSA. 119 * Save INT_Mask_class2 in CSA.
120 * Write INT_MASK_class2 with value of 0. 120 * Write INT_MASK_class2 with value of 0.
121 * Synchronize all three interrupts to be sure 121 * Synchronize all three interrupts to be sure
122 * we no longer execute a handler on another CPU. 122 * we no longer execute a handler on another CPU.
123 */ 123 */
124 spin_lock_irq(&spu->register_lock); 124 spin_lock_irq(&spu->register_lock);
125 if (csa) { 125 if (csa) {
126 csa->priv1.int_mask_class0_RW = spu_int_mask_get(spu, 0); 126 csa->priv1.int_mask_class0_RW = spu_int_mask_get(spu, 0);
127 csa->priv1.int_mask_class1_RW = spu_int_mask_get(spu, 1); 127 csa->priv1.int_mask_class1_RW = spu_int_mask_get(spu, 1);
128 csa->priv1.int_mask_class2_RW = spu_int_mask_get(spu, 2); 128 csa->priv1.int_mask_class2_RW = spu_int_mask_get(spu, 2);
129 } 129 }
130 spu_int_mask_set(spu, 0, 0ul); 130 spu_int_mask_set(spu, 0, 0ul);
131 spu_int_mask_set(spu, 1, 0ul); 131 spu_int_mask_set(spu, 1, 0ul);
132 spu_int_mask_set(spu, 2, 0ul); 132 spu_int_mask_set(spu, 2, 0ul);
133 eieio(); 133 eieio();
134 spin_unlock_irq(&spu->register_lock); 134 spin_unlock_irq(&spu->register_lock);
135 135
136 /* 136 /*
137 * This flag needs to be set before calling synchronize_irq so 137 * This flag needs to be set before calling synchronize_irq so
138 * that the update will be visible to the relevant handlers 138 * that the update will be visible to the relevant handlers
139 * via a simple load. 139 * via a simple load.
140 */ 140 */
141 set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags); 141 set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
142 clear_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags); 142 clear_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags);
143 synchronize_irq(spu->irqs[0]); 143 synchronize_irq(spu->irqs[0]);
144 synchronize_irq(spu->irqs[1]); 144 synchronize_irq(spu->irqs[1]);
145 synchronize_irq(spu->irqs[2]); 145 synchronize_irq(spu->irqs[2]);
146 } 146 }
147 147
148 static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu) 148 static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu)
149 { 149 {
150 /* Save, Step 4: 150 /* Save, Step 4:
151 * Restore, Step 25. 151 * Restore, Step 25.
152 * Set a software watchdog timer, which specifies the 152 * Set a software watchdog timer, which specifies the
153 * maximum allowable time for a context save sequence. 153 * maximum allowable time for a context save sequence.
154 * 154 *
155 * For present, this implementation will not set a global 155 * For present, this implementation will not set a global
156 * watchdog timer, as virtualization & variable system load 156 * watchdog timer, as virtualization & variable system load
157 * may cause unpredictable execution times. 157 * may cause unpredictable execution times.
158 */ 158 */
159 } 159 }
160 160
161 static inline void inhibit_user_access(struct spu_state *csa, struct spu *spu) 161 static inline void inhibit_user_access(struct spu_state *csa, struct spu *spu)
162 { 162 {
163 /* Save, Step 5: 163 /* Save, Step 5:
164 * Restore, Step 3: 164 * Restore, Step 3:
165 * Inhibit user-space access (if provided) to this 165 * Inhibit user-space access (if provided) to this
166 * SPU by unmapping the virtual pages assigned to 166 * SPU by unmapping the virtual pages assigned to
167 * the SPU memory-mapped I/O (MMIO) for problem 167 * the SPU memory-mapped I/O (MMIO) for problem
168 * state. TBD. 168 * state. TBD.
169 */ 169 */
170 } 170 }
171 171
172 static inline void set_switch_pending(struct spu_state *csa, struct spu *spu) 172 static inline void set_switch_pending(struct spu_state *csa, struct spu *spu)
173 { 173 {
174 /* Save, Step 7: 174 /* Save, Step 7:
175 * Restore, Step 5: 175 * Restore, Step 5:
176 * Set a software context switch pending flag. 176 * Set a software context switch pending flag.
177 * Done above in Step 3 - disable_interrupts(). 177 * Done above in Step 3 - disable_interrupts().
178 */ 178 */
179 } 179 }
180 180
181 static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu) 181 static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu)
182 { 182 {
183 struct spu_priv2 __iomem *priv2 = spu->priv2; 183 struct spu_priv2 __iomem *priv2 = spu->priv2;
184 184
185 /* Save, Step 8: 185 /* Save, Step 8:
186 * Suspend DMA and save MFC_CNTL. 186 * Suspend DMA and save MFC_CNTL.
187 */ 187 */
188 switch (in_be64(&priv2->mfc_control_RW) & 188 switch (in_be64(&priv2->mfc_control_RW) &
189 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) { 189 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) {
190 case MFC_CNTL_SUSPEND_IN_PROGRESS: 190 case MFC_CNTL_SUSPEND_IN_PROGRESS:
191 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) & 191 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
192 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) == 192 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
193 MFC_CNTL_SUSPEND_COMPLETE); 193 MFC_CNTL_SUSPEND_COMPLETE);
194 /* fall through */ 194 /* fall through */
195 case MFC_CNTL_SUSPEND_COMPLETE: 195 case MFC_CNTL_SUSPEND_COMPLETE:
196 if (csa) 196 if (csa)
197 csa->priv2.mfc_control_RW = 197 csa->priv2.mfc_control_RW =
198 in_be64(&priv2->mfc_control_RW) | 198 in_be64(&priv2->mfc_control_RW) |
199 MFC_CNTL_SUSPEND_DMA_QUEUE; 199 MFC_CNTL_SUSPEND_DMA_QUEUE;
200 break; 200 break;
201 case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION: 201 case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION:
202 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE); 202 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
203 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) & 203 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
204 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) == 204 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
205 MFC_CNTL_SUSPEND_COMPLETE); 205 MFC_CNTL_SUSPEND_COMPLETE);
206 if (csa) 206 if (csa)
207 csa->priv2.mfc_control_RW = 207 csa->priv2.mfc_control_RW =
208 in_be64(&priv2->mfc_control_RW) & 208 in_be64(&priv2->mfc_control_RW) &
209 ~MFC_CNTL_SUSPEND_DMA_QUEUE & 209 ~MFC_CNTL_SUSPEND_DMA_QUEUE &
210 ~MFC_CNTL_SUSPEND_MASK; 210 ~MFC_CNTL_SUSPEND_MASK;
211 break; 211 break;
212 } 212 }
213 } 213 }
214 214
215 static inline void save_spu_runcntl(struct spu_state *csa, struct spu *spu) 215 static inline void save_spu_runcntl(struct spu_state *csa, struct spu *spu)
216 { 216 {
217 struct spu_problem __iomem *prob = spu->problem; 217 struct spu_problem __iomem *prob = spu->problem;
218 218
219 /* Save, Step 9: 219 /* Save, Step 9:
220 * Save SPU_Runcntl in the CSA. This value contains 220 * Save SPU_Runcntl in the CSA. This value contains
221 * the "Application Desired State". 221 * the "Application Desired State".
222 */ 222 */
223 csa->prob.spu_runcntl_RW = in_be32(&prob->spu_runcntl_RW); 223 csa->prob.spu_runcntl_RW = in_be32(&prob->spu_runcntl_RW);
224 } 224 }
225 225
226 static inline void save_mfc_sr1(struct spu_state *csa, struct spu *spu) 226 static inline void save_mfc_sr1(struct spu_state *csa, struct spu *spu)
227 { 227 {
228 /* Save, Step 10: 228 /* Save, Step 10:
229 * Save MFC_SR1 in the CSA. 229 * Save MFC_SR1 in the CSA.
230 */ 230 */
231 csa->priv1.mfc_sr1_RW = spu_mfc_sr1_get(spu); 231 csa->priv1.mfc_sr1_RW = spu_mfc_sr1_get(spu);
232 } 232 }
233 233
234 static inline void save_spu_status(struct spu_state *csa, struct spu *spu) 234 static inline void save_spu_status(struct spu_state *csa, struct spu *spu)
235 { 235 {
236 struct spu_problem __iomem *prob = spu->problem; 236 struct spu_problem __iomem *prob = spu->problem;
237 237
238 /* Save, Step 11: 238 /* Save, Step 11:
239 * Read SPU_Status[R], and save to CSA. 239 * Read SPU_Status[R], and save to CSA.
240 */ 240 */
241 if ((in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) == 0) { 241 if ((in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) == 0) {
242 csa->prob.spu_status_R = in_be32(&prob->spu_status_R); 242 csa->prob.spu_status_R = in_be32(&prob->spu_status_R);
243 } else { 243 } else {
244 u32 stopped; 244 u32 stopped;
245 245
246 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); 246 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
247 eieio(); 247 eieio();
248 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & 248 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
249 SPU_STATUS_RUNNING); 249 SPU_STATUS_RUNNING);
250 stopped = 250 stopped =
251 SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP | 251 SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
252 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP; 252 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
253 if ((in_be32(&prob->spu_status_R) & stopped) == 0) 253 if ((in_be32(&prob->spu_status_R) & stopped) == 0)
254 csa->prob.spu_status_R = SPU_STATUS_RUNNING; 254 csa->prob.spu_status_R = SPU_STATUS_RUNNING;
255 else 255 else
256 csa->prob.spu_status_R = in_be32(&prob->spu_status_R); 256 csa->prob.spu_status_R = in_be32(&prob->spu_status_R);
257 } 257 }
258 } 258 }
259 259
260 static inline void save_mfc_stopped_status(struct spu_state *csa, 260 static inline void save_mfc_stopped_status(struct spu_state *csa,
261 struct spu *spu) 261 struct spu *spu)
262 { 262 {
263 struct spu_priv2 __iomem *priv2 = spu->priv2; 263 struct spu_priv2 __iomem *priv2 = spu->priv2;
264 const u64 mask = MFC_CNTL_DECREMENTER_RUNNING | 264 const u64 mask = MFC_CNTL_DECREMENTER_RUNNING |
265 MFC_CNTL_DMA_QUEUES_EMPTY; 265 MFC_CNTL_DMA_QUEUES_EMPTY;
266 266
267 /* Save, Step 12: 267 /* Save, Step 12:
268 * Read MFC_CNTL[Ds]. Update saved copy of 268 * Read MFC_CNTL[Ds]. Update saved copy of
269 * CSA.MFC_CNTL[Ds]. 269 * CSA.MFC_CNTL[Ds].
270 * 270 *
271 * update: do the same with MFC_CNTL[Q]. 271 * update: do the same with MFC_CNTL[Q].
272 */ 272 */
273 csa->priv2.mfc_control_RW &= ~mask; 273 csa->priv2.mfc_control_RW &= ~mask;
274 csa->priv2.mfc_control_RW |= in_be64(&priv2->mfc_control_RW) & mask; 274 csa->priv2.mfc_control_RW |= in_be64(&priv2->mfc_control_RW) & mask;
275 } 275 }
276 276
277 static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu) 277 static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu)
278 { 278 {
279 struct spu_priv2 __iomem *priv2 = spu->priv2; 279 struct spu_priv2 __iomem *priv2 = spu->priv2;
280 280
281 /* Save, Step 13: 281 /* Save, Step 13:
282 * Write MFC_CNTL[Dh] set to a '1' to halt 282 * Write MFC_CNTL[Dh] set to a '1' to halt
283 * the decrementer. 283 * the decrementer.
284 */ 284 */
285 out_be64(&priv2->mfc_control_RW, 285 out_be64(&priv2->mfc_control_RW,
286 MFC_CNTL_DECREMENTER_HALTED | MFC_CNTL_SUSPEND_MASK); 286 MFC_CNTL_DECREMENTER_HALTED | MFC_CNTL_SUSPEND_MASK);
287 eieio(); 287 eieio();
288 } 288 }
289 289
290 static inline void save_timebase(struct spu_state *csa, struct spu *spu) 290 static inline void save_timebase(struct spu_state *csa, struct spu *spu)
291 { 291 {
292 /* Save, Step 14: 292 /* Save, Step 14:
293 * Read PPE Timebase High and Timebase low registers 293 * Read PPE Timebase High and Timebase low registers
294 * and save in CSA. TBD. 294 * and save in CSA. TBD.
295 */ 295 */
296 csa->suspend_time = get_cycles(); 296 csa->suspend_time = get_cycles();
297 } 297 }
298 298
299 static inline void remove_other_spu_access(struct spu_state *csa, 299 static inline void remove_other_spu_access(struct spu_state *csa,
300 struct spu *spu) 300 struct spu *spu)
301 { 301 {
302 /* Save, Step 15: 302 /* Save, Step 15:
303 * Remove other SPU access to this SPU by unmapping 303 * Remove other SPU access to this SPU by unmapping
304 * this SPU's pages from their address space. TBD. 304 * this SPU's pages from their address space. TBD.
305 */ 305 */
306 } 306 }
307 307
308 static inline void do_mfc_mssync(struct spu_state *csa, struct spu *spu) 308 static inline void do_mfc_mssync(struct spu_state *csa, struct spu *spu)
309 { 309 {
310 struct spu_problem __iomem *prob = spu->problem; 310 struct spu_problem __iomem *prob = spu->problem;
311 311
312 /* Save, Step 16: 312 /* Save, Step 16:
313 * Restore, Step 11. 313 * Restore, Step 11.
314 * Write SPU_MSSync register. Poll SPU_MSSync[P] 314 * Write SPU_MSSync register. Poll SPU_MSSync[P]
315 * for a value of 0. 315 * for a value of 0.
316 */ 316 */
317 out_be64(&prob->spc_mssync_RW, 1UL); 317 out_be64(&prob->spc_mssync_RW, 1UL);
318 POLL_WHILE_TRUE(in_be64(&prob->spc_mssync_RW) & MS_SYNC_PENDING); 318 POLL_WHILE_TRUE(in_be64(&prob->spc_mssync_RW) & MS_SYNC_PENDING);
319 } 319 }
320 320
321 static inline void issue_mfc_tlbie(struct spu_state *csa, struct spu *spu) 321 static inline void issue_mfc_tlbie(struct spu_state *csa, struct spu *spu)
322 { 322 {
323 /* Save, Step 17: 323 /* Save, Step 17:
324 * Restore, Step 12. 324 * Restore, Step 12.
325 * Restore, Step 48. 325 * Restore, Step 48.
326 * Write TLB_Invalidate_Entry[IS,VPN,L,Lp]=0 register. 326 * Write TLB_Invalidate_Entry[IS,VPN,L,Lp]=0 register.
327 * Then issue a PPE sync instruction. 327 * Then issue a PPE sync instruction.
328 */ 328 */
329 spu_tlb_invalidate(spu); 329 spu_tlb_invalidate(spu);
330 mb(); 330 mb();
331 } 331 }
332 332
333 static inline void handle_pending_interrupts(struct spu_state *csa, 333 static inline void handle_pending_interrupts(struct spu_state *csa,
334 struct spu *spu) 334 struct spu *spu)
335 { 335 {
336 /* Save, Step 18: 336 /* Save, Step 18:
337 * Handle any pending interrupts from this SPU 337 * Handle any pending interrupts from this SPU
338 * here. This is OS or hypervisor specific. One 338 * here. This is OS or hypervisor specific. One
339 * option is to re-enable interrupts to handle any 339 * option is to re-enable interrupts to handle any
340 * pending interrupts, with the interrupt handlers 340 * pending interrupts, with the interrupt handlers
341 * recognizing the software Context Switch Pending 341 * recognizing the software Context Switch Pending
342 * flag, to ensure the SPU execution or MFC command 342 * flag, to ensure the SPU execution or MFC command
343 * queue is not restarted. TBD. 343 * queue is not restarted. TBD.
344 */ 344 */
345 } 345 }
346 346
347 static inline void save_mfc_queues(struct spu_state *csa, struct spu *spu) 347 static inline void save_mfc_queues(struct spu_state *csa, struct spu *spu)
348 { 348 {
349 struct spu_priv2 __iomem *priv2 = spu->priv2; 349 struct spu_priv2 __iomem *priv2 = spu->priv2;
350 int i; 350 int i;
351 351
352 /* Save, Step 19: 352 /* Save, Step 19:
353 * If MFC_Cntl[Se]=0 then save 353 * If MFC_Cntl[Se]=0 then save
354 * MFC command queues. 354 * MFC command queues.
355 */ 355 */
356 if ((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DMA_QUEUES_EMPTY) == 0) { 356 if ((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DMA_QUEUES_EMPTY) == 0) {
357 for (i = 0; i < 8; i++) { 357 for (i = 0; i < 8; i++) {
358 csa->priv2.puq[i].mfc_cq_data0_RW = 358 csa->priv2.puq[i].mfc_cq_data0_RW =
359 in_be64(&priv2->puq[i].mfc_cq_data0_RW); 359 in_be64(&priv2->puq[i].mfc_cq_data0_RW);
360 csa->priv2.puq[i].mfc_cq_data1_RW = 360 csa->priv2.puq[i].mfc_cq_data1_RW =
361 in_be64(&priv2->puq[i].mfc_cq_data1_RW); 361 in_be64(&priv2->puq[i].mfc_cq_data1_RW);
362 csa->priv2.puq[i].mfc_cq_data2_RW = 362 csa->priv2.puq[i].mfc_cq_data2_RW =
363 in_be64(&priv2->puq[i].mfc_cq_data2_RW); 363 in_be64(&priv2->puq[i].mfc_cq_data2_RW);
364 csa->priv2.puq[i].mfc_cq_data3_RW = 364 csa->priv2.puq[i].mfc_cq_data3_RW =
365 in_be64(&priv2->puq[i].mfc_cq_data3_RW); 365 in_be64(&priv2->puq[i].mfc_cq_data3_RW);
366 } 366 }
367 for (i = 0; i < 16; i++) { 367 for (i = 0; i < 16; i++) {
368 csa->priv2.spuq[i].mfc_cq_data0_RW = 368 csa->priv2.spuq[i].mfc_cq_data0_RW =
369 in_be64(&priv2->spuq[i].mfc_cq_data0_RW); 369 in_be64(&priv2->spuq[i].mfc_cq_data0_RW);
370 csa->priv2.spuq[i].mfc_cq_data1_RW = 370 csa->priv2.spuq[i].mfc_cq_data1_RW =
371 in_be64(&priv2->spuq[i].mfc_cq_data1_RW); 371 in_be64(&priv2->spuq[i].mfc_cq_data1_RW);
372 csa->priv2.spuq[i].mfc_cq_data2_RW = 372 csa->priv2.spuq[i].mfc_cq_data2_RW =
373 in_be64(&priv2->spuq[i].mfc_cq_data2_RW); 373 in_be64(&priv2->spuq[i].mfc_cq_data2_RW);
374 csa->priv2.spuq[i].mfc_cq_data3_RW = 374 csa->priv2.spuq[i].mfc_cq_data3_RW =
375 in_be64(&priv2->spuq[i].mfc_cq_data3_RW); 375 in_be64(&priv2->spuq[i].mfc_cq_data3_RW);
376 } 376 }
377 } 377 }
378 } 378 }
379 379
380 static inline void save_ppu_querymask(struct spu_state *csa, struct spu *spu) 380 static inline void save_ppu_querymask(struct spu_state *csa, struct spu *spu)
381 { 381 {
382 struct spu_problem __iomem *prob = spu->problem; 382 struct spu_problem __iomem *prob = spu->problem;
383 383
384 /* Save, Step 20: 384 /* Save, Step 20:
385 * Save the PPU_QueryMask register 385 * Save the PPU_QueryMask register
386 * in the CSA. 386 * in the CSA.
387 */ 387 */
388 csa->prob.dma_querymask_RW = in_be32(&prob->dma_querymask_RW); 388 csa->prob.dma_querymask_RW = in_be32(&prob->dma_querymask_RW);
389 } 389 }
390 390
391 static inline void save_ppu_querytype(struct spu_state *csa, struct spu *spu) 391 static inline void save_ppu_querytype(struct spu_state *csa, struct spu *spu)
392 { 392 {
393 struct spu_problem __iomem *prob = spu->problem; 393 struct spu_problem __iomem *prob = spu->problem;
394 394
395 /* Save, Step 21: 395 /* Save, Step 21:
396 * Save the PPU_QueryType register 396 * Save the PPU_QueryType register
397 * in the CSA. 397 * in the CSA.
398 */ 398 */
399 csa->prob.dma_querytype_RW = in_be32(&prob->dma_querytype_RW); 399 csa->prob.dma_querytype_RW = in_be32(&prob->dma_querytype_RW);
400 } 400 }
401 401
402 static inline void save_ppu_tagstatus(struct spu_state *csa, struct spu *spu) 402 static inline void save_ppu_tagstatus(struct spu_state *csa, struct spu *spu)
403 { 403 {
404 struct spu_problem __iomem *prob = spu->problem; 404 struct spu_problem __iomem *prob = spu->problem;
405 405
406 /* Save the Prxy_TagStatus register in the CSA. 406 /* Save the Prxy_TagStatus register in the CSA.
407 * 407 *
408 * It is unnecessary to restore dma_tagstatus_R, however, 408 * It is unnecessary to restore dma_tagstatus_R, however,
409 * dma_tagstatus_R in the CSA is accessed via backing_ops, so 409 * dma_tagstatus_R in the CSA is accessed via backing_ops, so
410 * we must save it. 410 * we must save it.
411 */ 411 */
412 csa->prob.dma_tagstatus_R = in_be32(&prob->dma_tagstatus_R); 412 csa->prob.dma_tagstatus_R = in_be32(&prob->dma_tagstatus_R);
413 } 413 }
414 414
415 static inline void save_mfc_csr_tsq(struct spu_state *csa, struct spu *spu) 415 static inline void save_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
416 { 416 {
417 struct spu_priv2 __iomem *priv2 = spu->priv2; 417 struct spu_priv2 __iomem *priv2 = spu->priv2;
418 418
419 /* Save, Step 22: 419 /* Save, Step 22:
420 * Save the MFC_CSR_TSQ register 420 * Save the MFC_CSR_TSQ register
421 * in the LSCSA. 421 * in the LSCSA.
422 */ 422 */
423 csa->priv2.spu_tag_status_query_RW = 423 csa->priv2.spu_tag_status_query_RW =
424 in_be64(&priv2->spu_tag_status_query_RW); 424 in_be64(&priv2->spu_tag_status_query_RW);
425 } 425 }
426 426
427 static inline void save_mfc_csr_cmd(struct spu_state *csa, struct spu *spu) 427 static inline void save_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)
428 { 428 {
429 struct spu_priv2 __iomem *priv2 = spu->priv2; 429 struct spu_priv2 __iomem *priv2 = spu->priv2;
430 430
431 /* Save, Step 23: 431 /* Save, Step 23:
432 * Save the MFC_CSR_CMD1 and MFC_CSR_CMD2 432 * Save the MFC_CSR_CMD1 and MFC_CSR_CMD2
433 * registers in the CSA. 433 * registers in the CSA.
434 */ 434 */
435 csa->priv2.spu_cmd_buf1_RW = in_be64(&priv2->spu_cmd_buf1_RW); 435 csa->priv2.spu_cmd_buf1_RW = in_be64(&priv2->spu_cmd_buf1_RW);
436 csa->priv2.spu_cmd_buf2_RW = in_be64(&priv2->spu_cmd_buf2_RW); 436 csa->priv2.spu_cmd_buf2_RW = in_be64(&priv2->spu_cmd_buf2_RW);
437 } 437 }
438 438
439 static inline void save_mfc_csr_ato(struct spu_state *csa, struct spu *spu) 439 static inline void save_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
440 { 440 {
441 struct spu_priv2 __iomem *priv2 = spu->priv2; 441 struct spu_priv2 __iomem *priv2 = spu->priv2;
442 442
443 /* Save, Step 24: 443 /* Save, Step 24:
444 * Save the MFC_CSR_ATO register in 444 * Save the MFC_CSR_ATO register in
445 * the CSA. 445 * the CSA.
446 */ 446 */
447 csa->priv2.spu_atomic_status_RW = in_be64(&priv2->spu_atomic_status_RW); 447 csa->priv2.spu_atomic_status_RW = in_be64(&priv2->spu_atomic_status_RW);
448 } 448 }
449 449
450 static inline void save_mfc_tclass_id(struct spu_state *csa, struct spu *spu) 450 static inline void save_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
451 { 451 {
452 /* Save, Step 25: 452 /* Save, Step 25:
453 * Save the MFC_TCLASS_ID register in 453 * Save the MFC_TCLASS_ID register in
454 * the CSA. 454 * the CSA.
455 */ 455 */
456 csa->priv1.mfc_tclass_id_RW = spu_mfc_tclass_id_get(spu); 456 csa->priv1.mfc_tclass_id_RW = spu_mfc_tclass_id_get(spu);
457 } 457 }
458 458
459 static inline void set_mfc_tclass_id(struct spu_state *csa, struct spu *spu) 459 static inline void set_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
460 { 460 {
461 /* Save, Step 26: 461 /* Save, Step 26:
462 * Restore, Step 23. 462 * Restore, Step 23.
463 * Write the MFC_TCLASS_ID register with 463 * Write the MFC_TCLASS_ID register with
464 * the value 0x10000000. 464 * the value 0x10000000.
465 */ 465 */
466 spu_mfc_tclass_id_set(spu, 0x10000000); 466 spu_mfc_tclass_id_set(spu, 0x10000000);
467 eieio(); 467 eieio();
468 } 468 }
469 469
470 static inline void purge_mfc_queue(struct spu_state *csa, struct spu *spu) 470 static inline void purge_mfc_queue(struct spu_state *csa, struct spu *spu)
471 { 471 {
472 struct spu_priv2 __iomem *priv2 = spu->priv2; 472 struct spu_priv2 __iomem *priv2 = spu->priv2;
473 473
474 /* Save, Step 27: 474 /* Save, Step 27:
475 * Restore, Step 14. 475 * Restore, Step 14.
476 * Write MFC_CNTL[Pc]=1 (purge queue). 476 * Write MFC_CNTL[Pc]=1 (purge queue).
477 */ 477 */
478 out_be64(&priv2->mfc_control_RW, 478 out_be64(&priv2->mfc_control_RW,
479 MFC_CNTL_PURGE_DMA_REQUEST | 479 MFC_CNTL_PURGE_DMA_REQUEST |
480 MFC_CNTL_SUSPEND_MASK); 480 MFC_CNTL_SUSPEND_MASK);
481 eieio(); 481 eieio();
482 } 482 }
483 483
484 static inline void wait_purge_complete(struct spu_state *csa, struct spu *spu) 484 static inline void wait_purge_complete(struct spu_state *csa, struct spu *spu)
485 { 485 {
486 struct spu_priv2 __iomem *priv2 = spu->priv2; 486 struct spu_priv2 __iomem *priv2 = spu->priv2;
487 487
488 /* Save, Step 28: 488 /* Save, Step 28:
489 * Poll MFC_CNTL[Ps] until value '11' is read 489 * Poll MFC_CNTL[Ps] until value '11' is read
490 * (purge complete). 490 * (purge complete).
491 */ 491 */
492 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) & 492 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
493 MFC_CNTL_PURGE_DMA_STATUS_MASK) == 493 MFC_CNTL_PURGE_DMA_STATUS_MASK) ==
494 MFC_CNTL_PURGE_DMA_COMPLETE); 494 MFC_CNTL_PURGE_DMA_COMPLETE);
495 } 495 }
496 496
497 static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu) 497 static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu)
498 { 498 {
499 /* Save, Step 30: 499 /* Save, Step 30:
500 * Restore, Step 18: 500 * Restore, Step 18:
501 * Write MFC_SR1 with MFC_SR1[D=0,S=1] and 501 * Write MFC_SR1 with MFC_SR1[D=0,S=1] and
502 * MFC_SR1[TL,R,Pr,T] set correctly for the 502 * MFC_SR1[TL,R,Pr,T] set correctly for the
503 * OS specific environment. 503 * OS specific environment.
504 * 504 *
505 * Implementation note: The SPU-side code 505 * Implementation note: The SPU-side code
506 * for save/restore is privileged, so the 506 * for save/restore is privileged, so the
507 * MFC_SR1[Pr] bit is not set. 507 * MFC_SR1[Pr] bit is not set.
508 * 508 *
509 */ 509 */
510 spu_mfc_sr1_set(spu, (MFC_STATE1_MASTER_RUN_CONTROL_MASK | 510 spu_mfc_sr1_set(spu, (MFC_STATE1_MASTER_RUN_CONTROL_MASK |
511 MFC_STATE1_RELOCATE_MASK | 511 MFC_STATE1_RELOCATE_MASK |
512 MFC_STATE1_BUS_TLBIE_MASK)); 512 MFC_STATE1_BUS_TLBIE_MASK));
513 } 513 }
514 514
515 static inline void save_spu_npc(struct spu_state *csa, struct spu *spu) 515 static inline void save_spu_npc(struct spu_state *csa, struct spu *spu)
516 { 516 {
517 struct spu_problem __iomem *prob = spu->problem; 517 struct spu_problem __iomem *prob = spu->problem;
518 518
519 /* Save, Step 31: 519 /* Save, Step 31:
520 * Save SPU_NPC in the CSA. 520 * Save SPU_NPC in the CSA.
521 */ 521 */
522 csa->prob.spu_npc_RW = in_be32(&prob->spu_npc_RW); 522 csa->prob.spu_npc_RW = in_be32(&prob->spu_npc_RW);
523 } 523 }
524 524
525 static inline void save_spu_privcntl(struct spu_state *csa, struct spu *spu) 525 static inline void save_spu_privcntl(struct spu_state *csa, struct spu *spu)
526 { 526 {
527 struct spu_priv2 __iomem *priv2 = spu->priv2; 527 struct spu_priv2 __iomem *priv2 = spu->priv2;
528 528
529 /* Save, Step 32: 529 /* Save, Step 32:
530 * Save SPU_PrivCntl in the CSA. 530 * Save SPU_PrivCntl in the CSA.
531 */ 531 */
532 csa->priv2.spu_privcntl_RW = in_be64(&priv2->spu_privcntl_RW); 532 csa->priv2.spu_privcntl_RW = in_be64(&priv2->spu_privcntl_RW);
533 } 533 }
534 534
535 static inline void reset_spu_privcntl(struct spu_state *csa, struct spu *spu) 535 static inline void reset_spu_privcntl(struct spu_state *csa, struct spu *spu)
536 { 536 {
537 struct spu_priv2 __iomem *priv2 = spu->priv2; 537 struct spu_priv2 __iomem *priv2 = spu->priv2;
538 538
539 /* Save, Step 33: 539 /* Save, Step 33:
540 * Restore, Step 16: 540 * Restore, Step 16:
541 * Write SPU_PrivCntl[S,Le,A] fields reset to 0. 541 * Write SPU_PrivCntl[S,Le,A] fields reset to 0.
542 */ 542 */
543 out_be64(&priv2->spu_privcntl_RW, 0UL); 543 out_be64(&priv2->spu_privcntl_RW, 0UL);
544 eieio(); 544 eieio();
545 } 545 }
546 546
547 static inline void save_spu_lslr(struct spu_state *csa, struct spu *spu) 547 static inline void save_spu_lslr(struct spu_state *csa, struct spu *spu)
548 { 548 {
549 struct spu_priv2 __iomem *priv2 = spu->priv2; 549 struct spu_priv2 __iomem *priv2 = spu->priv2;
550 550
551 /* Save, Step 34: 551 /* Save, Step 34:
552 * Save SPU_LSLR in the CSA. 552 * Save SPU_LSLR in the CSA.
553 */ 553 */
554 csa->priv2.spu_lslr_RW = in_be64(&priv2->spu_lslr_RW); 554 csa->priv2.spu_lslr_RW = in_be64(&priv2->spu_lslr_RW);
555 } 555 }
556 556
557 static inline void reset_spu_lslr(struct spu_state *csa, struct spu *spu) 557 static inline void reset_spu_lslr(struct spu_state *csa, struct spu *spu)
558 { 558 {
559 struct spu_priv2 __iomem *priv2 = spu->priv2; 559 struct spu_priv2 __iomem *priv2 = spu->priv2;
560 560
561 /* Save, Step 35: 561 /* Save, Step 35:
562 * Restore, Step 17. 562 * Restore, Step 17.
563 * Reset SPU_LSLR. 563 * Reset SPU_LSLR.
564 */ 564 */
565 out_be64(&priv2->spu_lslr_RW, LS_ADDR_MASK); 565 out_be64(&priv2->spu_lslr_RW, LS_ADDR_MASK);
566 eieio(); 566 eieio();
567 } 567 }
568 568
569 static inline void save_spu_cfg(struct spu_state *csa, struct spu *spu) 569 static inline void save_spu_cfg(struct spu_state *csa, struct spu *spu)
570 { 570 {
571 struct spu_priv2 __iomem *priv2 = spu->priv2; 571 struct spu_priv2 __iomem *priv2 = spu->priv2;
572 572
573 /* Save, Step 36: 573 /* Save, Step 36:
574 * Save SPU_Cfg in the CSA. 574 * Save SPU_Cfg in the CSA.
575 */ 575 */
576 csa->priv2.spu_cfg_RW = in_be64(&priv2->spu_cfg_RW); 576 csa->priv2.spu_cfg_RW = in_be64(&priv2->spu_cfg_RW);
577 } 577 }
578 578
579 static inline void save_pm_trace(struct spu_state *csa, struct spu *spu) 579 static inline void save_pm_trace(struct spu_state *csa, struct spu *spu)
580 { 580 {
581 /* Save, Step 37: 581 /* Save, Step 37:
582 * Save PM_Trace_Tag_Wait_Mask in the CSA. 582 * Save PM_Trace_Tag_Wait_Mask in the CSA.
583 * Not performed by this implementation. 583 * Not performed by this implementation.
584 */ 584 */
585 } 585 }
586 586
587 static inline void save_mfc_rag(struct spu_state *csa, struct spu *spu) 587 static inline void save_mfc_rag(struct spu_state *csa, struct spu *spu)
588 { 588 {
589 /* Save, Step 38: 589 /* Save, Step 38:
590 * Save RA_GROUP_ID register and the 590 * Save RA_GROUP_ID register and the
591 * RA_ENABLE reigster in the CSA. 591 * RA_ENABLE reigster in the CSA.
592 */ 592 */
593 csa->priv1.resource_allocation_groupID_RW = 593 csa->priv1.resource_allocation_groupID_RW =
594 spu_resource_allocation_groupID_get(spu); 594 spu_resource_allocation_groupID_get(spu);
595 csa->priv1.resource_allocation_enable_RW = 595 csa->priv1.resource_allocation_enable_RW =
596 spu_resource_allocation_enable_get(spu); 596 spu_resource_allocation_enable_get(spu);
597 } 597 }
598 598
599 static inline void save_ppu_mb_stat(struct spu_state *csa, struct spu *spu) 599 static inline void save_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
600 { 600 {
601 struct spu_problem __iomem *prob = spu->problem; 601 struct spu_problem __iomem *prob = spu->problem;
602 602
603 /* Save, Step 39: 603 /* Save, Step 39:
604 * Save MB_Stat register in the CSA. 604 * Save MB_Stat register in the CSA.
605 */ 605 */
606 csa->prob.mb_stat_R = in_be32(&prob->mb_stat_R); 606 csa->prob.mb_stat_R = in_be32(&prob->mb_stat_R);
607 } 607 }
608 608
609 static inline void save_ppu_mb(struct spu_state *csa, struct spu *spu) 609 static inline void save_ppu_mb(struct spu_state *csa, struct spu *spu)
610 { 610 {
611 struct spu_problem __iomem *prob = spu->problem; 611 struct spu_problem __iomem *prob = spu->problem;
612 612
613 /* Save, Step 40: 613 /* Save, Step 40:
614 * Save the PPU_MB register in the CSA. 614 * Save the PPU_MB register in the CSA.
615 */ 615 */
616 csa->prob.pu_mb_R = in_be32(&prob->pu_mb_R); 616 csa->prob.pu_mb_R = in_be32(&prob->pu_mb_R);
617 } 617 }
618 618
619 static inline void save_ppuint_mb(struct spu_state *csa, struct spu *spu) 619 static inline void save_ppuint_mb(struct spu_state *csa, struct spu *spu)
620 { 620 {
621 struct spu_priv2 __iomem *priv2 = spu->priv2; 621 struct spu_priv2 __iomem *priv2 = spu->priv2;
622 622
623 /* Save, Step 41: 623 /* Save, Step 41:
624 * Save the PPUINT_MB register in the CSA. 624 * Save the PPUINT_MB register in the CSA.
625 */ 625 */
626 csa->priv2.puint_mb_R = in_be64(&priv2->puint_mb_R); 626 csa->priv2.puint_mb_R = in_be64(&priv2->puint_mb_R);
627 } 627 }
628 628
629 static inline void save_ch_part1(struct spu_state *csa, struct spu *spu) 629 static inline void save_ch_part1(struct spu_state *csa, struct spu *spu)
630 { 630 {
631 struct spu_priv2 __iomem *priv2 = spu->priv2; 631 struct spu_priv2 __iomem *priv2 = spu->priv2;
632 u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL }; 632 u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
633 int i; 633 int i;
634 634
635 /* Save, Step 42: 635 /* Save, Step 42:
636 */ 636 */
637 637
638 /* Save CH 1, without channel count */ 638 /* Save CH 1, without channel count */
639 out_be64(&priv2->spu_chnlcntptr_RW, 1); 639 out_be64(&priv2->spu_chnlcntptr_RW, 1);
640 csa->spu_chnldata_RW[1] = in_be64(&priv2->spu_chnldata_RW); 640 csa->spu_chnldata_RW[1] = in_be64(&priv2->spu_chnldata_RW);
641 641
642 /* Save the following CH: [0,3,4,24,25,27] */ 642 /* Save the following CH: [0,3,4,24,25,27] */
643 for (i = 0; i < ARRAY_SIZE(ch_indices); i++) { 643 for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
644 idx = ch_indices[i]; 644 idx = ch_indices[i];
645 out_be64(&priv2->spu_chnlcntptr_RW, idx); 645 out_be64(&priv2->spu_chnlcntptr_RW, idx);
646 eieio(); 646 eieio();
647 csa->spu_chnldata_RW[idx] = in_be64(&priv2->spu_chnldata_RW); 647 csa->spu_chnldata_RW[idx] = in_be64(&priv2->spu_chnldata_RW);
648 csa->spu_chnlcnt_RW[idx] = in_be64(&priv2->spu_chnlcnt_RW); 648 csa->spu_chnlcnt_RW[idx] = in_be64(&priv2->spu_chnlcnt_RW);
649 out_be64(&priv2->spu_chnldata_RW, 0UL); 649 out_be64(&priv2->spu_chnldata_RW, 0UL);
650 out_be64(&priv2->spu_chnlcnt_RW, 0UL); 650 out_be64(&priv2->spu_chnlcnt_RW, 0UL);
651 eieio(); 651 eieio();
652 } 652 }
653 } 653 }
654 654
655 static inline void save_spu_mb(struct spu_state *csa, struct spu *spu) 655 static inline void save_spu_mb(struct spu_state *csa, struct spu *spu)
656 { 656 {
657 struct spu_priv2 __iomem *priv2 = spu->priv2; 657 struct spu_priv2 __iomem *priv2 = spu->priv2;
658 int i; 658 int i;
659 659
660 /* Save, Step 43: 660 /* Save, Step 43:
661 * Save SPU Read Mailbox Channel. 661 * Save SPU Read Mailbox Channel.
662 */ 662 */
663 out_be64(&priv2->spu_chnlcntptr_RW, 29UL); 663 out_be64(&priv2->spu_chnlcntptr_RW, 29UL);
664 eieio(); 664 eieio();
665 csa->spu_chnlcnt_RW[29] = in_be64(&priv2->spu_chnlcnt_RW); 665 csa->spu_chnlcnt_RW[29] = in_be64(&priv2->spu_chnlcnt_RW);
666 for (i = 0; i < 4; i++) { 666 for (i = 0; i < 4; i++) {
667 csa->spu_mailbox_data[i] = in_be64(&priv2->spu_chnldata_RW); 667 csa->spu_mailbox_data[i] = in_be64(&priv2->spu_chnldata_RW);
668 } 668 }
669 out_be64(&priv2->spu_chnlcnt_RW, 0UL); 669 out_be64(&priv2->spu_chnlcnt_RW, 0UL);
670 eieio(); 670 eieio();
671 } 671 }
672 672
673 static inline void save_mfc_cmd(struct spu_state *csa, struct spu *spu) 673 static inline void save_mfc_cmd(struct spu_state *csa, struct spu *spu)
674 { 674 {
675 struct spu_priv2 __iomem *priv2 = spu->priv2; 675 struct spu_priv2 __iomem *priv2 = spu->priv2;
676 676
677 /* Save, Step 44: 677 /* Save, Step 44:
678 * Save MFC_CMD Channel. 678 * Save MFC_CMD Channel.
679 */ 679 */
680 out_be64(&priv2->spu_chnlcntptr_RW, 21UL); 680 out_be64(&priv2->spu_chnlcntptr_RW, 21UL);
681 eieio(); 681 eieio();
682 csa->spu_chnlcnt_RW[21] = in_be64(&priv2->spu_chnlcnt_RW); 682 csa->spu_chnlcnt_RW[21] = in_be64(&priv2->spu_chnlcnt_RW);
683 eieio(); 683 eieio();
684 } 684 }
685 685
686 static inline void reset_ch(struct spu_state *csa, struct spu *spu) 686 static inline void reset_ch(struct spu_state *csa, struct spu *spu)
687 { 687 {
688 struct spu_priv2 __iomem *priv2 = spu->priv2; 688 struct spu_priv2 __iomem *priv2 = spu->priv2;
689 u64 ch_indices[4] = { 21UL, 23UL, 28UL, 30UL }; 689 u64 ch_indices[4] = { 21UL, 23UL, 28UL, 30UL };
690 u64 ch_counts[4] = { 16UL, 1UL, 1UL, 1UL }; 690 u64 ch_counts[4] = { 16UL, 1UL, 1UL, 1UL };
691 u64 idx; 691 u64 idx;
692 int i; 692 int i;
693 693
694 /* Save, Step 45: 694 /* Save, Step 45:
695 * Reset the following CH: [21, 23, 28, 30] 695 * Reset the following CH: [21, 23, 28, 30]
696 */ 696 */
697 for (i = 0; i < 4; i++) { 697 for (i = 0; i < 4; i++) {
698 idx = ch_indices[i]; 698 idx = ch_indices[i];
699 out_be64(&priv2->spu_chnlcntptr_RW, idx); 699 out_be64(&priv2->spu_chnlcntptr_RW, idx);
700 eieio(); 700 eieio();
701 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]); 701 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
702 eieio(); 702 eieio();
703 } 703 }
704 } 704 }
705 705
706 static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu) 706 static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu)
707 { 707 {
708 struct spu_priv2 __iomem *priv2 = spu->priv2; 708 struct spu_priv2 __iomem *priv2 = spu->priv2;
709 709
710 /* Save, Step 46: 710 /* Save, Step 46:
711 * Restore, Step 25. 711 * Restore, Step 25.
712 * Write MFC_CNTL[Sc]=0 (resume queue processing). 712 * Write MFC_CNTL[Sc]=0 (resume queue processing).
713 */ 713 */
714 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE); 714 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE);
715 } 715 }
716 716
717 static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu, 717 static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu,
718 unsigned int *code, int code_size) 718 unsigned int *code, int code_size)
719 { 719 {
720 /* Save, Step 47: 720 /* Save, Step 47:
721 * Restore, Step 30. 721 * Restore, Step 30.
722 * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All 722 * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All
723 * register, then initialize SLB_VSID and SLB_ESID 723 * register, then initialize SLB_VSID and SLB_ESID
724 * to provide access to SPU context save code and 724 * to provide access to SPU context save code and
725 * LSCSA. 725 * LSCSA.
726 * 726 *
727 * This implementation places both the context 727 * This implementation places both the context
728 * switch code and LSCSA in kernel address space. 728 * switch code and LSCSA in kernel address space.
729 * 729 *
730 * Further this implementation assumes that the 730 * Further this implementation assumes that the
731 * MFC_SR1[R]=1 (in other words, assume that 731 * MFC_SR1[R]=1 (in other words, assume that
732 * translation is desired by OS environment). 732 * translation is desired by OS environment).
733 */ 733 */
734 spu_invalidate_slbs(spu); 734 spu_invalidate_slbs(spu);
735 spu_setup_kernel_slbs(spu, csa->lscsa, code, code_size); 735 spu_setup_kernel_slbs(spu, csa->lscsa, code, code_size);
736 } 736 }
737 737
738 static inline void set_switch_active(struct spu_state *csa, struct spu *spu) 738 static inline void set_switch_active(struct spu_state *csa, struct spu *spu)
739 { 739 {
740 /* Save, Step 48: 740 /* Save, Step 48:
741 * Restore, Step 23. 741 * Restore, Step 23.
742 * Change the software context switch pending flag 742 * Change the software context switch pending flag
743 * to context switch active. This implementation does 743 * to context switch active. This implementation does
744 * not uses a switch active flag. 744 * not uses a switch active flag.
745 * 745 *
746 * Now that we have saved the mfc in the csa, we can add in the 746 * Now that we have saved the mfc in the csa, we can add in the
747 * restart command if an exception occurred. 747 * restart command if an exception occurred.
748 */ 748 */
749 if (test_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags)) 749 if (test_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags))
750 csa->priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND; 750 csa->priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND;
751 clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags); 751 clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
752 mb(); 752 mb();
753 } 753 }
754 754
755 static inline void enable_interrupts(struct spu_state *csa, struct spu *spu) 755 static inline void enable_interrupts(struct spu_state *csa, struct spu *spu)
756 { 756 {
757 unsigned long class1_mask = CLASS1_ENABLE_SEGMENT_FAULT_INTR | 757 unsigned long class1_mask = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
758 CLASS1_ENABLE_STORAGE_FAULT_INTR; 758 CLASS1_ENABLE_STORAGE_FAULT_INTR;
759 759
760 /* Save, Step 49: 760 /* Save, Step 49:
761 * Restore, Step 22: 761 * Restore, Step 22:
762 * Reset and then enable interrupts, as 762 * Reset and then enable interrupts, as
763 * needed by OS. 763 * needed by OS.
764 * 764 *
765 * This implementation enables only class1 765 * This implementation enables only class1
766 * (translation) interrupts. 766 * (translation) interrupts.
767 */ 767 */
768 spin_lock_irq(&spu->register_lock); 768 spin_lock_irq(&spu->register_lock);
769 spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK); 769 spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
770 spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK); 770 spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK);
771 spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK); 771 spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
772 spu_int_mask_set(spu, 0, 0ul); 772 spu_int_mask_set(spu, 0, 0ul);
773 spu_int_mask_set(spu, 1, class1_mask); 773 spu_int_mask_set(spu, 1, class1_mask);
774 spu_int_mask_set(spu, 2, 0ul); 774 spu_int_mask_set(spu, 2, 0ul);
775 spin_unlock_irq(&spu->register_lock); 775 spin_unlock_irq(&spu->register_lock);
776 } 776 }
777 777
778 static inline int send_mfc_dma(struct spu *spu, unsigned long ea, 778 static inline int send_mfc_dma(struct spu *spu, unsigned long ea,
779 unsigned int ls_offset, unsigned int size, 779 unsigned int ls_offset, unsigned int size,
780 unsigned int tag, unsigned int rclass, 780 unsigned int tag, unsigned int rclass,
781 unsigned int cmd) 781 unsigned int cmd)
782 { 782 {
783 struct spu_problem __iomem *prob = spu->problem; 783 struct spu_problem __iomem *prob = spu->problem;
784 union mfc_tag_size_class_cmd command; 784 union mfc_tag_size_class_cmd command;
785 unsigned int transfer_size; 785 unsigned int transfer_size;
786 volatile unsigned int status = 0x0; 786 volatile unsigned int status = 0x0;
787 787
788 while (size > 0) { 788 while (size > 0) {
789 transfer_size = 789 transfer_size =
790 (size > MFC_MAX_DMA_SIZE) ? MFC_MAX_DMA_SIZE : size; 790 (size > MFC_MAX_DMA_SIZE) ? MFC_MAX_DMA_SIZE : size;
791 command.u.mfc_size = transfer_size; 791 command.u.mfc_size = transfer_size;
792 command.u.mfc_tag = tag; 792 command.u.mfc_tag = tag;
793 command.u.mfc_rclassid = rclass; 793 command.u.mfc_rclassid = rclass;
794 command.u.mfc_cmd = cmd; 794 command.u.mfc_cmd = cmd;
795 do { 795 do {
796 out_be32(&prob->mfc_lsa_W, ls_offset); 796 out_be32(&prob->mfc_lsa_W, ls_offset);
797 out_be64(&prob->mfc_ea_W, ea); 797 out_be64(&prob->mfc_ea_W, ea);
798 out_be64(&prob->mfc_union_W.all64, command.all64); 798 out_be64(&prob->mfc_union_W.all64, command.all64);
799 status = 799 status =
800 in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32); 800 in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32);
801 if (unlikely(status & 0x2)) { 801 if (unlikely(status & 0x2)) {
802 cpu_relax(); 802 cpu_relax();
803 } 803 }
804 } while (status & 0x3); 804 } while (status & 0x3);
805 size -= transfer_size; 805 size -= transfer_size;
806 ea += transfer_size; 806 ea += transfer_size;
807 ls_offset += transfer_size; 807 ls_offset += transfer_size;
808 } 808 }
809 return 0; 809 return 0;
810 } 810 }
811 811
812 static inline void save_ls_16kb(struct spu_state *csa, struct spu *spu) 812 static inline void save_ls_16kb(struct spu_state *csa, struct spu *spu)
813 { 813 {
814 unsigned long addr = (unsigned long)&csa->lscsa->ls[0]; 814 unsigned long addr = (unsigned long)&csa->lscsa->ls[0];
815 unsigned int ls_offset = 0x0; 815 unsigned int ls_offset = 0x0;
816 unsigned int size = 16384; 816 unsigned int size = 16384;
817 unsigned int tag = 0; 817 unsigned int tag = 0;
818 unsigned int rclass = 0; 818 unsigned int rclass = 0;
819 unsigned int cmd = MFC_PUT_CMD; 819 unsigned int cmd = MFC_PUT_CMD;
820 820
821 /* Save, Step 50: 821 /* Save, Step 50:
822 * Issue a DMA command to copy the first 16K bytes 822 * Issue a DMA command to copy the first 16K bytes
823 * of local storage to the CSA. 823 * of local storage to the CSA.
824 */ 824 */
825 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd); 825 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
826 } 826 }
827 827
828 static inline void set_spu_npc(struct spu_state *csa, struct spu *spu) 828 static inline void set_spu_npc(struct spu_state *csa, struct spu *spu)
829 { 829 {
830 struct spu_problem __iomem *prob = spu->problem; 830 struct spu_problem __iomem *prob = spu->problem;
831 831
832 /* Save, Step 51: 832 /* Save, Step 51:
833 * Restore, Step 31. 833 * Restore, Step 31.
834 * Write SPU_NPC[IE]=0 and SPU_NPC[LSA] to entry 834 * Write SPU_NPC[IE]=0 and SPU_NPC[LSA] to entry
835 * point address of context save code in local 835 * point address of context save code in local
836 * storage. 836 * storage.
837 * 837 *
838 * This implementation uses SPU-side save/restore 838 * This implementation uses SPU-side save/restore
839 * programs with entry points at LSA of 0. 839 * programs with entry points at LSA of 0.
840 */ 840 */
841 out_be32(&prob->spu_npc_RW, 0); 841 out_be32(&prob->spu_npc_RW, 0);
842 eieio(); 842 eieio();
843 } 843 }
844 844
845 static inline void set_signot1(struct spu_state *csa, struct spu *spu) 845 static inline void set_signot1(struct spu_state *csa, struct spu *spu)
846 { 846 {
847 struct spu_problem __iomem *prob = spu->problem; 847 struct spu_problem __iomem *prob = spu->problem;
848 union { 848 union {
849 u64 ull; 849 u64 ull;
850 u32 ui[2]; 850 u32 ui[2];
851 } addr64; 851 } addr64;
852 852
853 /* Save, Step 52: 853 /* Save, Step 52:
854 * Restore, Step 32: 854 * Restore, Step 32:
855 * Write SPU_Sig_Notify_1 register with upper 32-bits 855 * Write SPU_Sig_Notify_1 register with upper 32-bits
856 * of the CSA.LSCSA effective address. 856 * of the CSA.LSCSA effective address.
857 */ 857 */
858 addr64.ull = (u64) csa->lscsa; 858 addr64.ull = (u64) csa->lscsa;
859 out_be32(&prob->signal_notify1, addr64.ui[0]); 859 out_be32(&prob->signal_notify1, addr64.ui[0]);
860 eieio(); 860 eieio();
861 } 861 }
862 862
863 static inline void set_signot2(struct spu_state *csa, struct spu *spu) 863 static inline void set_signot2(struct spu_state *csa, struct spu *spu)
864 { 864 {
865 struct spu_problem __iomem *prob = spu->problem; 865 struct spu_problem __iomem *prob = spu->problem;
866 union { 866 union {
867 u64 ull; 867 u64 ull;
868 u32 ui[2]; 868 u32 ui[2];
869 } addr64; 869 } addr64;
870 870
871 /* Save, Step 53: 871 /* Save, Step 53:
872 * Restore, Step 33: 872 * Restore, Step 33:
873 * Write SPU_Sig_Notify_2 register with lower 32-bits 873 * Write SPU_Sig_Notify_2 register with lower 32-bits
874 * of the CSA.LSCSA effective address. 874 * of the CSA.LSCSA effective address.
875 */ 875 */
876 addr64.ull = (u64) csa->lscsa; 876 addr64.ull = (u64) csa->lscsa;
877 out_be32(&prob->signal_notify2, addr64.ui[1]); 877 out_be32(&prob->signal_notify2, addr64.ui[1]);
878 eieio(); 878 eieio();
879 } 879 }
880 880
881 static inline void send_save_code(struct spu_state *csa, struct spu *spu) 881 static inline void send_save_code(struct spu_state *csa, struct spu *spu)
882 { 882 {
883 unsigned long addr = (unsigned long)&spu_save_code[0]; 883 unsigned long addr = (unsigned long)&spu_save_code[0];
884 unsigned int ls_offset = 0x0; 884 unsigned int ls_offset = 0x0;
885 unsigned int size = sizeof(spu_save_code); 885 unsigned int size = sizeof(spu_save_code);
886 unsigned int tag = 0; 886 unsigned int tag = 0;
887 unsigned int rclass = 0; 887 unsigned int rclass = 0;
888 unsigned int cmd = MFC_GETFS_CMD; 888 unsigned int cmd = MFC_GETFS_CMD;
889 889
890 /* Save, Step 54: 890 /* Save, Step 54:
891 * Issue a DMA command to copy context save code 891 * Issue a DMA command to copy context save code
892 * to local storage and start SPU. 892 * to local storage and start SPU.
893 */ 893 */
894 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd); 894 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
895 } 895 }
896 896
897 static inline void set_ppu_querymask(struct spu_state *csa, struct spu *spu) 897 static inline void set_ppu_querymask(struct spu_state *csa, struct spu *spu)
898 { 898 {
899 struct spu_problem __iomem *prob = spu->problem; 899 struct spu_problem __iomem *prob = spu->problem;
900 900
901 /* Save, Step 55: 901 /* Save, Step 55:
902 * Restore, Step 38. 902 * Restore, Step 38.
903 * Write PPU_QueryMask=1 (enable Tag Group 0) 903 * Write PPU_QueryMask=1 (enable Tag Group 0)
904 * and issue eieio instruction. 904 * and issue eieio instruction.
905 */ 905 */
906 out_be32(&prob->dma_querymask_RW, MFC_TAGID_TO_TAGMASK(0)); 906 out_be32(&prob->dma_querymask_RW, MFC_TAGID_TO_TAGMASK(0));
907 eieio(); 907 eieio();
908 } 908 }
909 909
910 static inline void wait_tag_complete(struct spu_state *csa, struct spu *spu) 910 static inline void wait_tag_complete(struct spu_state *csa, struct spu *spu)
911 { 911 {
912 struct spu_problem __iomem *prob = spu->problem; 912 struct spu_problem __iomem *prob = spu->problem;
913 u32 mask = MFC_TAGID_TO_TAGMASK(0); 913 u32 mask = MFC_TAGID_TO_TAGMASK(0);
914 unsigned long flags; 914 unsigned long flags;
915 915
916 /* Save, Step 56: 916 /* Save, Step 56:
917 * Restore, Step 39. 917 * Restore, Step 39.
918 * Restore, Step 39. 918 * Restore, Step 39.
919 * Restore, Step 46. 919 * Restore, Step 46.
920 * Poll PPU_TagStatus[gn] until 01 (Tag group 0 complete) 920 * Poll PPU_TagStatus[gn] until 01 (Tag group 0 complete)
921 * or write PPU_QueryType[TS]=01 and wait for Tag Group 921 * or write PPU_QueryType[TS]=01 and wait for Tag Group
922 * Complete Interrupt. Write INT_Stat_Class0 or 922 * Complete Interrupt. Write INT_Stat_Class0 or
923 * INT_Stat_Class2 with value of 'handled'. 923 * INT_Stat_Class2 with value of 'handled'.
924 */ 924 */
925 POLL_WHILE_FALSE(in_be32(&prob->dma_tagstatus_R) & mask); 925 POLL_WHILE_FALSE(in_be32(&prob->dma_tagstatus_R) & mask);
926 926
927 local_irq_save(flags); 927 local_irq_save(flags);
928 spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK); 928 spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
929 spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK); 929 spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
930 local_irq_restore(flags); 930 local_irq_restore(flags);
931 } 931 }
932 932
933 static inline void wait_spu_stopped(struct spu_state *csa, struct spu *spu) 933 static inline void wait_spu_stopped(struct spu_state *csa, struct spu *spu)
934 { 934 {
935 struct spu_problem __iomem *prob = spu->problem; 935 struct spu_problem __iomem *prob = spu->problem;
936 unsigned long flags; 936 unsigned long flags;
937 937
938 /* Save, Step 57: 938 /* Save, Step 57:
939 * Restore, Step 40. 939 * Restore, Step 40.
940 * Poll until SPU_Status[R]=0 or wait for SPU Class 0 940 * Poll until SPU_Status[R]=0 or wait for SPU Class 0
941 * or SPU Class 2 interrupt. Write INT_Stat_class0 941 * or SPU Class 2 interrupt. Write INT_Stat_class0
942 * or INT_Stat_class2 with value of handled. 942 * or INT_Stat_class2 with value of handled.
943 */ 943 */
944 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); 944 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
945 945
946 local_irq_save(flags); 946 local_irq_save(flags);
947 spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK); 947 spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
948 spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK); 948 spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
949 local_irq_restore(flags); 949 local_irq_restore(flags);
950 } 950 }
951 951
952 static inline int check_save_status(struct spu_state *csa, struct spu *spu) 952 static inline int check_save_status(struct spu_state *csa, struct spu *spu)
953 { 953 {
954 struct spu_problem __iomem *prob = spu->problem; 954 struct spu_problem __iomem *prob = spu->problem;
955 u32 complete; 955 u32 complete;
956 956
957 /* Save, Step 54: 957 /* Save, Step 54:
958 * If SPU_Status[P]=1 and SPU_Status[SC] = "success", 958 * If SPU_Status[P]=1 and SPU_Status[SC] = "success",
959 * context save succeeded, otherwise context save 959 * context save succeeded, otherwise context save
960 * failed. 960 * failed.
961 */ 961 */
962 complete = ((SPU_SAVE_COMPLETE << SPU_STOP_STATUS_SHIFT) | 962 complete = ((SPU_SAVE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
963 SPU_STATUS_STOPPED_BY_STOP); 963 SPU_STATUS_STOPPED_BY_STOP);
964 return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0; 964 return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;
965 } 965 }
966 966
967 static inline void terminate_spu_app(struct spu_state *csa, struct spu *spu) 967 static inline void terminate_spu_app(struct spu_state *csa, struct spu *spu)
968 { 968 {
969 /* Restore, Step 4: 969 /* Restore, Step 4:
970 * If required, notify the "using application" that 970 * If required, notify the "using application" that
971 * the SPU task has been terminated. TBD. 971 * the SPU task has been terminated. TBD.
972 */ 972 */
973 } 973 }
974 974
975 static inline void suspend_mfc_and_halt_decr(struct spu_state *csa, 975 static inline void suspend_mfc_and_halt_decr(struct spu_state *csa,
976 struct spu *spu) 976 struct spu *spu)
977 { 977 {
978 struct spu_priv2 __iomem *priv2 = spu->priv2; 978 struct spu_priv2 __iomem *priv2 = spu->priv2;
979 979
980 /* Restore, Step 7: 980 /* Restore, Step 7:
981 * Write MFC_Cntl[Dh,Sc,Sm]='1','1','0' to suspend 981 * Write MFC_Cntl[Dh,Sc,Sm]='1','1','0' to suspend
982 * the queue and halt the decrementer. 982 * the queue and halt the decrementer.
983 */ 983 */
984 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE | 984 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE |
985 MFC_CNTL_DECREMENTER_HALTED); 985 MFC_CNTL_DECREMENTER_HALTED);
986 eieio(); 986 eieio();
987 } 987 }
988 988
989 static inline void wait_suspend_mfc_complete(struct spu_state *csa, 989 static inline void wait_suspend_mfc_complete(struct spu_state *csa,
990 struct spu *spu) 990 struct spu *spu)
991 { 991 {
992 struct spu_priv2 __iomem *priv2 = spu->priv2; 992 struct spu_priv2 __iomem *priv2 = spu->priv2;
993 993
994 /* Restore, Step 8: 994 /* Restore, Step 8:
995 * Restore, Step 47. 995 * Restore, Step 47.
996 * Poll MFC_CNTL[Ss] until 11 is returned. 996 * Poll MFC_CNTL[Ss] until 11 is returned.
997 */ 997 */
998 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) & 998 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
999 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) == 999 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
1000 MFC_CNTL_SUSPEND_COMPLETE); 1000 MFC_CNTL_SUSPEND_COMPLETE);
1001 } 1001 }
1002 1002
1003 static inline int suspend_spe(struct spu_state *csa, struct spu *spu) 1003 static inline int suspend_spe(struct spu_state *csa, struct spu *spu)
1004 { 1004 {
1005 struct spu_problem __iomem *prob = spu->problem; 1005 struct spu_problem __iomem *prob = spu->problem;
1006 1006
1007 /* Restore, Step 9: 1007 /* Restore, Step 9:
1008 * If SPU_Status[R]=1, stop SPU execution 1008 * If SPU_Status[R]=1, stop SPU execution
1009 * and wait for stop to complete. 1009 * and wait for stop to complete.
1010 * 1010 *
1011 * Returns 1 if SPU_Status[R]=1 on entry. 1011 * Returns 1 if SPU_Status[R]=1 on entry.
1012 * 0 otherwise 1012 * 0 otherwise
1013 */ 1013 */
1014 if (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) { 1014 if (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) {
1015 if (in_be32(&prob->spu_status_R) & 1015 if (in_be32(&prob->spu_status_R) &
1016 SPU_STATUS_ISOLATED_EXIT_STATUS) { 1016 SPU_STATUS_ISOLATED_EXIT_STATUS) {
1017 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & 1017 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1018 SPU_STATUS_RUNNING); 1018 SPU_STATUS_RUNNING);
1019 } 1019 }
1020 if ((in_be32(&prob->spu_status_R) & 1020 if ((in_be32(&prob->spu_status_R) &
1021 SPU_STATUS_ISOLATED_LOAD_STATUS) 1021 SPU_STATUS_ISOLATED_LOAD_STATUS)
1022 || (in_be32(&prob->spu_status_R) & 1022 || (in_be32(&prob->spu_status_R) &
1023 SPU_STATUS_ISOLATED_STATE)) { 1023 SPU_STATUS_ISOLATED_STATE)) {
1024 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); 1024 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1025 eieio(); 1025 eieio();
1026 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & 1026 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1027 SPU_STATUS_RUNNING); 1027 SPU_STATUS_RUNNING);
1028 out_be32(&prob->spu_runcntl_RW, 0x2); 1028 out_be32(&prob->spu_runcntl_RW, 0x2);
1029 eieio(); 1029 eieio();
1030 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & 1030 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1031 SPU_STATUS_RUNNING); 1031 SPU_STATUS_RUNNING);
1032 } 1032 }
1033 if (in_be32(&prob->spu_status_R) & 1033 if (in_be32(&prob->spu_status_R) &
1034 SPU_STATUS_WAITING_FOR_CHANNEL) { 1034 SPU_STATUS_WAITING_FOR_CHANNEL) {
1035 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); 1035 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1036 eieio(); 1036 eieio();
1037 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & 1037 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1038 SPU_STATUS_RUNNING); 1038 SPU_STATUS_RUNNING);
1039 } 1039 }
1040 return 1; 1040 return 1;
1041 } 1041 }
1042 return 0; 1042 return 0;
1043 } 1043 }
1044 1044
1045 static inline void clear_spu_status(struct spu_state *csa, struct spu *spu) 1045 static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
1046 { 1046 {
1047 struct spu_problem __iomem *prob = spu->problem; 1047 struct spu_problem __iomem *prob = spu->problem;
1048 1048
1049 /* Restore, Step 10: 1049 /* Restore, Step 10:
1050 * If SPU_Status[R]=0 and SPU_Status[E,L,IS]=1, 1050 * If SPU_Status[R]=0 and SPU_Status[E,L,IS]=1,
1051 * release SPU from isolate state. 1051 * release SPU from isolate state.
1052 */ 1052 */
1053 if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) { 1053 if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) {
1054 if (in_be32(&prob->spu_status_R) & 1054 if (in_be32(&prob->spu_status_R) &
1055 SPU_STATUS_ISOLATED_EXIT_STATUS) { 1055 SPU_STATUS_ISOLATED_EXIT_STATUS) {
1056 spu_mfc_sr1_set(spu, 1056 spu_mfc_sr1_set(spu,
1057 MFC_STATE1_MASTER_RUN_CONTROL_MASK); 1057 MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1058 eieio(); 1058 eieio();
1059 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE); 1059 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1060 eieio(); 1060 eieio();
1061 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & 1061 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1062 SPU_STATUS_RUNNING); 1062 SPU_STATUS_RUNNING);
1063 } 1063 }
1064 if ((in_be32(&prob->spu_status_R) & 1064 if ((in_be32(&prob->spu_status_R) &
1065 SPU_STATUS_ISOLATED_LOAD_STATUS) 1065 SPU_STATUS_ISOLATED_LOAD_STATUS)
1066 || (in_be32(&prob->spu_status_R) & 1066 || (in_be32(&prob->spu_status_R) &
1067 SPU_STATUS_ISOLATED_STATE)) { 1067 SPU_STATUS_ISOLATED_STATE)) {
1068 spu_mfc_sr1_set(spu, 1068 spu_mfc_sr1_set(spu,
1069 MFC_STATE1_MASTER_RUN_CONTROL_MASK); 1069 MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1070 eieio(); 1070 eieio();
1071 out_be32(&prob->spu_runcntl_RW, 0x2); 1071 out_be32(&prob->spu_runcntl_RW, 0x2);
1072 eieio(); 1072 eieio();
1073 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & 1073 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1074 SPU_STATUS_RUNNING); 1074 SPU_STATUS_RUNNING);
1075 } 1075 }
1076 } 1076 }
1077 } 1077 }
1078 1078
1079 static inline void reset_ch_part1(struct spu_state *csa, struct spu *spu) 1079 static inline void reset_ch_part1(struct spu_state *csa, struct spu *spu)
1080 { 1080 {
1081 struct spu_priv2 __iomem *priv2 = spu->priv2; 1081 struct spu_priv2 __iomem *priv2 = spu->priv2;
1082 u64 ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL }; 1082 u64 ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1083 u64 idx; 1083 u64 idx;
1084 int i; 1084 int i;
1085 1085
1086 /* Restore, Step 20: 1086 /* Restore, Step 20:
1087 */ 1087 */
1088 1088
1089 /* Reset CH 1 */ 1089 /* Reset CH 1 */
1090 out_be64(&priv2->spu_chnlcntptr_RW, 1); 1090 out_be64(&priv2->spu_chnlcntptr_RW, 1);
1091 out_be64(&priv2->spu_chnldata_RW, 0UL); 1091 out_be64(&priv2->spu_chnldata_RW, 0UL);
1092 1092
1093 /* Reset the following CH: [0,3,4,24,25,27] */ 1093 /* Reset the following CH: [0,3,4,24,25,27] */
1094 for (i = 0; i < ARRAY_SIZE(ch_indices); i++) { 1094 for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
1095 idx = ch_indices[i]; 1095 idx = ch_indices[i];
1096 out_be64(&priv2->spu_chnlcntptr_RW, idx); 1096 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1097 eieio(); 1097 eieio();
1098 out_be64(&priv2->spu_chnldata_RW, 0UL); 1098 out_be64(&priv2->spu_chnldata_RW, 0UL);
1099 out_be64(&priv2->spu_chnlcnt_RW, 0UL); 1099 out_be64(&priv2->spu_chnlcnt_RW, 0UL);
1100 eieio(); 1100 eieio();
1101 } 1101 }
1102 } 1102 }
1103 1103
1104 static inline void reset_ch_part2(struct spu_state *csa, struct spu *spu) 1104 static inline void reset_ch_part2(struct spu_state *csa, struct spu *spu)
1105 { 1105 {
1106 struct spu_priv2 __iomem *priv2 = spu->priv2; 1106 struct spu_priv2 __iomem *priv2 = spu->priv2;
1107 u64 ch_indices[5] = { 21UL, 23UL, 28UL, 29UL, 30UL }; 1107 u64 ch_indices[5] = { 21UL, 23UL, 28UL, 29UL, 30UL };
1108 u64 ch_counts[5] = { 16UL, 1UL, 1UL, 0UL, 1UL }; 1108 u64 ch_counts[5] = { 16UL, 1UL, 1UL, 0UL, 1UL };
1109 u64 idx; 1109 u64 idx;
1110 int i; 1110 int i;
1111 1111
1112 /* Restore, Step 21: 1112 /* Restore, Step 21:
1113 * Reset the following CH: [21, 23, 28, 29, 30] 1113 * Reset the following CH: [21, 23, 28, 29, 30]
1114 */ 1114 */
1115 for (i = 0; i < 5; i++) { 1115 for (i = 0; i < 5; i++) {
1116 idx = ch_indices[i]; 1116 idx = ch_indices[i];
1117 out_be64(&priv2->spu_chnlcntptr_RW, idx); 1117 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1118 eieio(); 1118 eieio();
1119 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]); 1119 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
1120 eieio(); 1120 eieio();
1121 } 1121 }
1122 } 1122 }
1123 1123
1124 static inline void setup_spu_status_part1(struct spu_state *csa, 1124 static inline void setup_spu_status_part1(struct spu_state *csa,
1125 struct spu *spu) 1125 struct spu *spu)
1126 { 1126 {
1127 u32 status_P = SPU_STATUS_STOPPED_BY_STOP; 1127 u32 status_P = SPU_STATUS_STOPPED_BY_STOP;
1128 u32 status_I = SPU_STATUS_INVALID_INSTR; 1128 u32 status_I = SPU_STATUS_INVALID_INSTR;
1129 u32 status_H = SPU_STATUS_STOPPED_BY_HALT; 1129 u32 status_H = SPU_STATUS_STOPPED_BY_HALT;
1130 u32 status_S = SPU_STATUS_SINGLE_STEP; 1130 u32 status_S = SPU_STATUS_SINGLE_STEP;
1131 u32 status_S_I = SPU_STATUS_SINGLE_STEP | SPU_STATUS_INVALID_INSTR; 1131 u32 status_S_I = SPU_STATUS_SINGLE_STEP | SPU_STATUS_INVALID_INSTR;
1132 u32 status_S_P = SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_STOP; 1132 u32 status_S_P = SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_STOP;
1133 u32 status_P_H = SPU_STATUS_STOPPED_BY_HALT |SPU_STATUS_STOPPED_BY_STOP; 1133 u32 status_P_H = SPU_STATUS_STOPPED_BY_HALT |SPU_STATUS_STOPPED_BY_STOP;
1134 u32 status_P_I = SPU_STATUS_STOPPED_BY_STOP |SPU_STATUS_INVALID_INSTR; 1134 u32 status_P_I = SPU_STATUS_STOPPED_BY_STOP |SPU_STATUS_INVALID_INSTR;
1135 u32 status_code; 1135 u32 status_code;
1136 1136
1137 /* Restore, Step 27: 1137 /* Restore, Step 27:
1138 * If the CSA.SPU_Status[I,S,H,P]=1 then add the correct 1138 * If the CSA.SPU_Status[I,S,H,P]=1 then add the correct
1139 * instruction sequence to the end of the SPU based restore 1139 * instruction sequence to the end of the SPU based restore
1140 * code (after the "context restored" stop and signal) to 1140 * code (after the "context restored" stop and signal) to
1141 * restore the correct SPU status. 1141 * restore the correct SPU status.
1142 * 1142 *
1143 * NOTE: Rather than modifying the SPU executable, we 1143 * NOTE: Rather than modifying the SPU executable, we
1144 * instead add a new 'stopped_status' field to the 1144 * instead add a new 'stopped_status' field to the
1145 * LSCSA. The SPU-side restore reads this field and 1145 * LSCSA. The SPU-side restore reads this field and
1146 * takes the appropriate action when exiting. 1146 * takes the appropriate action when exiting.
1147 */ 1147 */
1148 1148
1149 status_code = 1149 status_code =
1150 (csa->prob.spu_status_R >> SPU_STOP_STATUS_SHIFT) & 0xFFFF; 1150 (csa->prob.spu_status_R >> SPU_STOP_STATUS_SHIFT) & 0xFFFF;
1151 if ((csa->prob.spu_status_R & status_P_I) == status_P_I) { 1151 if ((csa->prob.spu_status_R & status_P_I) == status_P_I) {
1152 1152
1153 /* SPU_Status[P,I]=1 - Illegal Instruction followed 1153 /* SPU_Status[P,I]=1 - Illegal Instruction followed
1154 * by Stop and Signal instruction, followed by 'br -4'. 1154 * by Stop and Signal instruction, followed by 'br -4'.
1155 * 1155 *
1156 */ 1156 */
1157 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_I; 1157 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_I;
1158 csa->lscsa->stopped_status.slot[1] = status_code; 1158 csa->lscsa->stopped_status.slot[1] = status_code;
1159 1159
1160 } else if ((csa->prob.spu_status_R & status_P_H) == status_P_H) { 1160 } else if ((csa->prob.spu_status_R & status_P_H) == status_P_H) {
1161 1161
1162 /* SPU_Status[P,H]=1 - Halt Conditional, followed 1162 /* SPU_Status[P,H]=1 - Halt Conditional, followed
1163 * by Stop and Signal instruction, followed by 1163 * by Stop and Signal instruction, followed by
1164 * 'br -4'. 1164 * 'br -4'.
1165 */ 1165 */
1166 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_H; 1166 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_H;
1167 csa->lscsa->stopped_status.slot[1] = status_code; 1167 csa->lscsa->stopped_status.slot[1] = status_code;
1168 1168
1169 } else if ((csa->prob.spu_status_R & status_S_P) == status_S_P) { 1169 } else if ((csa->prob.spu_status_R & status_S_P) == status_S_P) {
1170 1170
1171 /* SPU_Status[S,P]=1 - Stop and Signal instruction 1171 /* SPU_Status[S,P]=1 - Stop and Signal instruction
1172 * followed by 'br -4'. 1172 * followed by 'br -4'.
1173 */ 1173 */
1174 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_P; 1174 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_P;
1175 csa->lscsa->stopped_status.slot[1] = status_code; 1175 csa->lscsa->stopped_status.slot[1] = status_code;
1176 1176
1177 } else if ((csa->prob.spu_status_R & status_S_I) == status_S_I) { 1177 } else if ((csa->prob.spu_status_R & status_S_I) == status_S_I) {
1178 1178
1179 /* SPU_Status[S,I]=1 - Illegal instruction followed 1179 /* SPU_Status[S,I]=1 - Illegal instruction followed
1180 * by 'br -4'. 1180 * by 'br -4'.
1181 */ 1181 */
1182 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_I; 1182 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_I;
1183 csa->lscsa->stopped_status.slot[1] = status_code; 1183 csa->lscsa->stopped_status.slot[1] = status_code;
1184 1184
1185 } else if ((csa->prob.spu_status_R & status_P) == status_P) { 1185 } else if ((csa->prob.spu_status_R & status_P) == status_P) {
1186 1186
1187 /* SPU_Status[P]=1 - Stop and Signal instruction 1187 /* SPU_Status[P]=1 - Stop and Signal instruction
1188 * followed by 'br -4'. 1188 * followed by 'br -4'.
1189 */ 1189 */
1190 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P; 1190 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P;
1191 csa->lscsa->stopped_status.slot[1] = status_code; 1191 csa->lscsa->stopped_status.slot[1] = status_code;
1192 1192
1193 } else if ((csa->prob.spu_status_R & status_H) == status_H) { 1193 } else if ((csa->prob.spu_status_R & status_H) == status_H) {
1194 1194
1195 /* SPU_Status[H]=1 - Halt Conditional, followed 1195 /* SPU_Status[H]=1 - Halt Conditional, followed
1196 * by 'br -4'. 1196 * by 'br -4'.
1197 */ 1197 */
1198 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_H; 1198 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_H;
1199 1199
1200 } else if ((csa->prob.spu_status_R & status_S) == status_S) { 1200 } else if ((csa->prob.spu_status_R & status_S) == status_S) {
1201 1201
1202 /* SPU_Status[S]=1 - Two nop instructions. 1202 /* SPU_Status[S]=1 - Two nop instructions.
1203 */ 1203 */
1204 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S; 1204 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S;
1205 1205
1206 } else if ((csa->prob.spu_status_R & status_I) == status_I) { 1206 } else if ((csa->prob.spu_status_R & status_I) == status_I) {
1207 1207
1208 /* SPU_Status[I]=1 - Illegal instruction followed 1208 /* SPU_Status[I]=1 - Illegal instruction followed
1209 * by 'br -4'. 1209 * by 'br -4'.
1210 */ 1210 */
1211 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_I; 1211 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_I;
1212 1212
1213 } 1213 }
1214 } 1214 }
1215 1215
1216 static inline void setup_spu_status_part2(struct spu_state *csa, 1216 static inline void setup_spu_status_part2(struct spu_state *csa,
1217 struct spu *spu) 1217 struct spu *spu)
1218 { 1218 {
1219 u32 mask; 1219 u32 mask;
1220 1220
1221 /* Restore, Step 28: 1221 /* Restore, Step 28:
1222 * If the CSA.SPU_Status[I,S,H,P,R]=0 then 1222 * If the CSA.SPU_Status[I,S,H,P,R]=0 then
1223 * add a 'br *' instruction to the end of 1223 * add a 'br *' instruction to the end of
1224 * the SPU based restore code. 1224 * the SPU based restore code.
1225 * 1225 *
1226 * NOTE: Rather than modifying the SPU executable, we 1226 * NOTE: Rather than modifying the SPU executable, we
1227 * instead add a new 'stopped_status' field to the 1227 * instead add a new 'stopped_status' field to the
1228 * LSCSA. The SPU-side restore reads this field and 1228 * LSCSA. The SPU-side restore reads this field and
1229 * takes the appropriate action when exiting. 1229 * takes the appropriate action when exiting.
1230 */ 1230 */
1231 mask = SPU_STATUS_INVALID_INSTR | 1231 mask = SPU_STATUS_INVALID_INSTR |
1232 SPU_STATUS_SINGLE_STEP | 1232 SPU_STATUS_SINGLE_STEP |
1233 SPU_STATUS_STOPPED_BY_HALT | 1233 SPU_STATUS_STOPPED_BY_HALT |
1234 SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING; 1234 SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
1235 if (!(csa->prob.spu_status_R & mask)) { 1235 if (!(csa->prob.spu_status_R & mask)) {
1236 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_R; 1236 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_R;
1237 } 1237 }
1238 } 1238 }
1239 1239
1240 static inline void restore_mfc_rag(struct spu_state *csa, struct spu *spu) 1240 static inline void restore_mfc_rag(struct spu_state *csa, struct spu *spu)
1241 { 1241 {
1242 /* Restore, Step 29: 1242 /* Restore, Step 29:
1243 * Restore RA_GROUP_ID register and the 1243 * Restore RA_GROUP_ID register and the
1244 * RA_ENABLE reigster from the CSA. 1244 * RA_ENABLE reigster from the CSA.
1245 */ 1245 */
1246 spu_resource_allocation_groupID_set(spu, 1246 spu_resource_allocation_groupID_set(spu,
1247 csa->priv1.resource_allocation_groupID_RW); 1247 csa->priv1.resource_allocation_groupID_RW);
1248 spu_resource_allocation_enable_set(spu, 1248 spu_resource_allocation_enable_set(spu,
1249 csa->priv1.resource_allocation_enable_RW); 1249 csa->priv1.resource_allocation_enable_RW);
1250 } 1250 }
1251 1251
1252 static inline void send_restore_code(struct spu_state *csa, struct spu *spu) 1252 static inline void send_restore_code(struct spu_state *csa, struct spu *spu)
1253 { 1253 {
1254 unsigned long addr = (unsigned long)&spu_restore_code[0]; 1254 unsigned long addr = (unsigned long)&spu_restore_code[0];
1255 unsigned int ls_offset = 0x0; 1255 unsigned int ls_offset = 0x0;
1256 unsigned int size = sizeof(spu_restore_code); 1256 unsigned int size = sizeof(spu_restore_code);
1257 unsigned int tag = 0; 1257 unsigned int tag = 0;
1258 unsigned int rclass = 0; 1258 unsigned int rclass = 0;
1259 unsigned int cmd = MFC_GETFS_CMD; 1259 unsigned int cmd = MFC_GETFS_CMD;
1260 1260
1261 /* Restore, Step 37: 1261 /* Restore, Step 37:
1262 * Issue MFC DMA command to copy context 1262 * Issue MFC DMA command to copy context
1263 * restore code to local storage. 1263 * restore code to local storage.
1264 */ 1264 */
1265 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd); 1265 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
1266 } 1266 }
1267 1267
1268 static inline void setup_decr(struct spu_state *csa, struct spu *spu) 1268 static inline void setup_decr(struct spu_state *csa, struct spu *spu)
1269 { 1269 {
1270 /* Restore, Step 34: 1270 /* Restore, Step 34:
1271 * If CSA.MFC_CNTL[Ds]=1 (decrementer was 1271 * If CSA.MFC_CNTL[Ds]=1 (decrementer was
1272 * running) then adjust decrementer, set 1272 * running) then adjust decrementer, set
1273 * decrementer running status in LSCSA, 1273 * decrementer running status in LSCSA,
1274 * and set decrementer "wrapped" status 1274 * and set decrementer "wrapped" status
1275 * in LSCSA. 1275 * in LSCSA.
1276 */ 1276 */
1277 if (csa->priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) { 1277 if (csa->priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) {
1278 cycles_t resume_time = get_cycles(); 1278 cycles_t resume_time = get_cycles();
1279 cycles_t delta_time = resume_time - csa->suspend_time; 1279 cycles_t delta_time = resume_time - csa->suspend_time;
1280 1280
1281 csa->lscsa->decr_status.slot[0] = SPU_DECR_STATUS_RUNNING; 1281 csa->lscsa->decr_status.slot[0] = SPU_DECR_STATUS_RUNNING;
1282 if (csa->lscsa->decr.slot[0] < delta_time) { 1282 if (csa->lscsa->decr.slot[0] < delta_time) {
1283 csa->lscsa->decr_status.slot[0] |= 1283 csa->lscsa->decr_status.slot[0] |=
1284 SPU_DECR_STATUS_WRAPPED; 1284 SPU_DECR_STATUS_WRAPPED;
1285 } 1285 }
1286 1286
1287 csa->lscsa->decr.slot[0] -= delta_time; 1287 csa->lscsa->decr.slot[0] -= delta_time;
1288 } else { 1288 } else {
1289 csa->lscsa->decr_status.slot[0] = 0; 1289 csa->lscsa->decr_status.slot[0] = 0;
1290 } 1290 }
1291 } 1291 }
1292 1292
1293 static inline void setup_ppu_mb(struct spu_state *csa, struct spu *spu) 1293 static inline void setup_ppu_mb(struct spu_state *csa, struct spu *spu)
1294 { 1294 {
1295 /* Restore, Step 35: 1295 /* Restore, Step 35:
1296 * Copy the CSA.PU_MB data into the LSCSA. 1296 * Copy the CSA.PU_MB data into the LSCSA.
1297 */ 1297 */
1298 csa->lscsa->ppu_mb.slot[0] = csa->prob.pu_mb_R; 1298 csa->lscsa->ppu_mb.slot[0] = csa->prob.pu_mb_R;
1299 } 1299 }
1300 1300
1301 static inline void setup_ppuint_mb(struct spu_state *csa, struct spu *spu) 1301 static inline void setup_ppuint_mb(struct spu_state *csa, struct spu *spu)
1302 { 1302 {
1303 /* Restore, Step 36: 1303 /* Restore, Step 36:
1304 * Copy the CSA.PUINT_MB data into the LSCSA. 1304 * Copy the CSA.PUINT_MB data into the LSCSA.
1305 */ 1305 */
1306 csa->lscsa->ppuint_mb.slot[0] = csa->priv2.puint_mb_R; 1306 csa->lscsa->ppuint_mb.slot[0] = csa->priv2.puint_mb_R;
1307 } 1307 }
1308 1308
1309 static inline int check_restore_status(struct spu_state *csa, struct spu *spu) 1309 static inline int check_restore_status(struct spu_state *csa, struct spu *spu)
1310 { 1310 {
1311 struct spu_problem __iomem *prob = spu->problem; 1311 struct spu_problem __iomem *prob = spu->problem;
1312 u32 complete; 1312 u32 complete;
1313 1313
1314 /* Restore, Step 40: 1314 /* Restore, Step 40:
1315 * If SPU_Status[P]=1 and SPU_Status[SC] = "success", 1315 * If SPU_Status[P]=1 and SPU_Status[SC] = "success",
1316 * context restore succeeded, otherwise context restore 1316 * context restore succeeded, otherwise context restore
1317 * failed. 1317 * failed.
1318 */ 1318 */
1319 complete = ((SPU_RESTORE_COMPLETE << SPU_STOP_STATUS_SHIFT) | 1319 complete = ((SPU_RESTORE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
1320 SPU_STATUS_STOPPED_BY_STOP); 1320 SPU_STATUS_STOPPED_BY_STOP);
1321 return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0; 1321 return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;
1322 } 1322 }
1323 1323
1324 static inline void restore_spu_privcntl(struct spu_state *csa, struct spu *spu) 1324 static inline void restore_spu_privcntl(struct spu_state *csa, struct spu *spu)
1325 { 1325 {
1326 struct spu_priv2 __iomem *priv2 = spu->priv2; 1326 struct spu_priv2 __iomem *priv2 = spu->priv2;
1327 1327
1328 /* Restore, Step 41: 1328 /* Restore, Step 41:
1329 * Restore SPU_PrivCntl from the CSA. 1329 * Restore SPU_PrivCntl from the CSA.
1330 */ 1330 */
1331 out_be64(&priv2->spu_privcntl_RW, csa->priv2.spu_privcntl_RW); 1331 out_be64(&priv2->spu_privcntl_RW, csa->priv2.spu_privcntl_RW);
1332 eieio(); 1332 eieio();
1333 } 1333 }
1334 1334
1335 static inline void restore_status_part1(struct spu_state *csa, struct spu *spu) 1335 static inline void restore_status_part1(struct spu_state *csa, struct spu *spu)
1336 { 1336 {
1337 struct spu_problem __iomem *prob = spu->problem; 1337 struct spu_problem __iomem *prob = spu->problem;
1338 u32 mask; 1338 u32 mask;
1339 1339
1340 /* Restore, Step 42: 1340 /* Restore, Step 42:
1341 * If any CSA.SPU_Status[I,S,H,P]=1, then 1341 * If any CSA.SPU_Status[I,S,H,P]=1, then
1342 * restore the error or single step state. 1342 * restore the error or single step state.
1343 */ 1343 */
1344 mask = SPU_STATUS_INVALID_INSTR | 1344 mask = SPU_STATUS_INVALID_INSTR |
1345 SPU_STATUS_SINGLE_STEP | 1345 SPU_STATUS_SINGLE_STEP |
1346 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP; 1346 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
1347 if (csa->prob.spu_status_R & mask) { 1347 if (csa->prob.spu_status_R & mask) {
1348 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE); 1348 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1349 eieio(); 1349 eieio();
1350 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & 1350 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1351 SPU_STATUS_RUNNING); 1351 SPU_STATUS_RUNNING);
1352 } 1352 }
1353 } 1353 }
1354 1354
1355 static inline void restore_status_part2(struct spu_state *csa, struct spu *spu) 1355 static inline void restore_status_part2(struct spu_state *csa, struct spu *spu)
1356 { 1356 {
1357 struct spu_problem __iomem *prob = spu->problem; 1357 struct spu_problem __iomem *prob = spu->problem;
1358 u32 mask; 1358 u32 mask;
1359 1359
1360 /* Restore, Step 43: 1360 /* Restore, Step 43:
1361 * If all CSA.SPU_Status[I,S,H,P,R]=0 then write 1361 * If all CSA.SPU_Status[I,S,H,P,R]=0 then write
1362 * SPU_RunCntl[R0R1]='01', wait for SPU_Status[R]=1, 1362 * SPU_RunCntl[R0R1]='01', wait for SPU_Status[R]=1,
1363 * then write '00' to SPU_RunCntl[R0R1] and wait 1363 * then write '00' to SPU_RunCntl[R0R1] and wait
1364 * for SPU_Status[R]=0. 1364 * for SPU_Status[R]=0.
1365 */ 1365 */
1366 mask = SPU_STATUS_INVALID_INSTR | 1366 mask = SPU_STATUS_INVALID_INSTR |
1367 SPU_STATUS_SINGLE_STEP | 1367 SPU_STATUS_SINGLE_STEP |
1368 SPU_STATUS_STOPPED_BY_HALT | 1368 SPU_STATUS_STOPPED_BY_HALT |
1369 SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING; 1369 SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
1370 if (!(csa->prob.spu_status_R & mask)) { 1370 if (!(csa->prob.spu_status_R & mask)) {
1371 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE); 1371 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1372 eieio(); 1372 eieio();
1373 POLL_WHILE_FALSE(in_be32(&prob->spu_status_R) & 1373 POLL_WHILE_FALSE(in_be32(&prob->spu_status_R) &
1374 SPU_STATUS_RUNNING); 1374 SPU_STATUS_RUNNING);
1375 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); 1375 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1376 eieio(); 1376 eieio();
1377 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & 1377 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1378 SPU_STATUS_RUNNING); 1378 SPU_STATUS_RUNNING);
1379 } 1379 }
1380 } 1380 }
1381 1381
1382 static inline void restore_ls_16kb(struct spu_state *csa, struct spu *spu) 1382 static inline void restore_ls_16kb(struct spu_state *csa, struct spu *spu)
1383 { 1383 {
1384 unsigned long addr = (unsigned long)&csa->lscsa->ls[0]; 1384 unsigned long addr = (unsigned long)&csa->lscsa->ls[0];
1385 unsigned int ls_offset = 0x0; 1385 unsigned int ls_offset = 0x0;
1386 unsigned int size = 16384; 1386 unsigned int size = 16384;
1387 unsigned int tag = 0; 1387 unsigned int tag = 0;
1388 unsigned int rclass = 0; 1388 unsigned int rclass = 0;
1389 unsigned int cmd = MFC_GET_CMD; 1389 unsigned int cmd = MFC_GET_CMD;
1390 1390
1391 /* Restore, Step 44: 1391 /* Restore, Step 44:
1392 * Issue a DMA command to restore the first 1392 * Issue a DMA command to restore the first
1393 * 16kb of local storage from CSA. 1393 * 16kb of local storage from CSA.
1394 */ 1394 */
1395 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd); 1395 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
1396 } 1396 }
1397 1397
1398 static inline void suspend_mfc(struct spu_state *csa, struct spu *spu) 1398 static inline void suspend_mfc(struct spu_state *csa, struct spu *spu)
1399 { 1399 {
1400 struct spu_priv2 __iomem *priv2 = spu->priv2; 1400 struct spu_priv2 __iomem *priv2 = spu->priv2;
1401 1401
1402 /* Restore, Step 47. 1402 /* Restore, Step 47.
1403 * Write MFC_Cntl[Sc,Sm]='1','0' to suspend 1403 * Write MFC_Cntl[Sc,Sm]='1','0' to suspend
1404 * the queue. 1404 * the queue.
1405 */ 1405 */
1406 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE); 1406 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
1407 eieio(); 1407 eieio();
1408 } 1408 }
1409 1409
1410 static inline void clear_interrupts(struct spu_state *csa, struct spu *spu) 1410 static inline void clear_interrupts(struct spu_state *csa, struct spu *spu)
1411 { 1411 {
1412 /* Restore, Step 49: 1412 /* Restore, Step 49:
1413 * Write INT_MASK_class0 with value of 0. 1413 * Write INT_MASK_class0 with value of 0.
1414 * Write INT_MASK_class1 with value of 0. 1414 * Write INT_MASK_class1 with value of 0.
1415 * Write INT_MASK_class2 with value of 0. 1415 * Write INT_MASK_class2 with value of 0.
1416 * Write INT_STAT_class0 with value of -1. 1416 * Write INT_STAT_class0 with value of -1.
1417 * Write INT_STAT_class1 with value of -1. 1417 * Write INT_STAT_class1 with value of -1.
1418 * Write INT_STAT_class2 with value of -1. 1418 * Write INT_STAT_class2 with value of -1.
1419 */ 1419 */
1420 spin_lock_irq(&spu->register_lock); 1420 spin_lock_irq(&spu->register_lock);
1421 spu_int_mask_set(spu, 0, 0ul); 1421 spu_int_mask_set(spu, 0, 0ul);
1422 spu_int_mask_set(spu, 1, 0ul); 1422 spu_int_mask_set(spu, 1, 0ul);
1423 spu_int_mask_set(spu, 2, 0ul); 1423 spu_int_mask_set(spu, 2, 0ul);
1424 spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK); 1424 spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
1425 spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK); 1425 spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK);
1426 spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK); 1426 spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
1427 spin_unlock_irq(&spu->register_lock); 1427 spin_unlock_irq(&spu->register_lock);
1428 } 1428 }
1429 1429
1430 static inline void restore_mfc_queues(struct spu_state *csa, struct spu *spu) 1430 static inline void restore_mfc_queues(struct spu_state *csa, struct spu *spu)
1431 { 1431 {
1432 struct spu_priv2 __iomem *priv2 = spu->priv2; 1432 struct spu_priv2 __iomem *priv2 = spu->priv2;
1433 int i; 1433 int i;
1434 1434
1435 /* Restore, Step 50: 1435 /* Restore, Step 50:
1436 * If MFC_Cntl[Se]!=0 then restore 1436 * If MFC_Cntl[Se]!=0 then restore
1437 * MFC command queues. 1437 * MFC command queues.
1438 */ 1438 */
1439 if ((csa->priv2.mfc_control_RW & MFC_CNTL_DMA_QUEUES_EMPTY_MASK) == 0) { 1439 if ((csa->priv2.mfc_control_RW & MFC_CNTL_DMA_QUEUES_EMPTY_MASK) == 0) {
1440 for (i = 0; i < 8; i++) { 1440 for (i = 0; i < 8; i++) {
1441 out_be64(&priv2->puq[i].mfc_cq_data0_RW, 1441 out_be64(&priv2->puq[i].mfc_cq_data0_RW,
1442 csa->priv2.puq[i].mfc_cq_data0_RW); 1442 csa->priv2.puq[i].mfc_cq_data0_RW);
1443 out_be64(&priv2->puq[i].mfc_cq_data1_RW, 1443 out_be64(&priv2->puq[i].mfc_cq_data1_RW,
1444 csa->priv2.puq[i].mfc_cq_data1_RW); 1444 csa->priv2.puq[i].mfc_cq_data1_RW);
1445 out_be64(&priv2->puq[i].mfc_cq_data2_RW, 1445 out_be64(&priv2->puq[i].mfc_cq_data2_RW,
1446 csa->priv2.puq[i].mfc_cq_data2_RW); 1446 csa->priv2.puq[i].mfc_cq_data2_RW);
1447 out_be64(&priv2->puq[i].mfc_cq_data3_RW, 1447 out_be64(&priv2->puq[i].mfc_cq_data3_RW,
1448 csa->priv2.puq[i].mfc_cq_data3_RW); 1448 csa->priv2.puq[i].mfc_cq_data3_RW);
1449 } 1449 }
1450 for (i = 0; i < 16; i++) { 1450 for (i = 0; i < 16; i++) {
1451 out_be64(&priv2->spuq[i].mfc_cq_data0_RW, 1451 out_be64(&priv2->spuq[i].mfc_cq_data0_RW,
1452 csa->priv2.spuq[i].mfc_cq_data0_RW); 1452 csa->priv2.spuq[i].mfc_cq_data0_RW);
1453 out_be64(&priv2->spuq[i].mfc_cq_data1_RW, 1453 out_be64(&priv2->spuq[i].mfc_cq_data1_RW,
1454 csa->priv2.spuq[i].mfc_cq_data1_RW); 1454 csa->priv2.spuq[i].mfc_cq_data1_RW);
1455 out_be64(&priv2->spuq[i].mfc_cq_data2_RW, 1455 out_be64(&priv2->spuq[i].mfc_cq_data2_RW,
1456 csa->priv2.spuq[i].mfc_cq_data2_RW); 1456 csa->priv2.spuq[i].mfc_cq_data2_RW);
1457 out_be64(&priv2->spuq[i].mfc_cq_data3_RW, 1457 out_be64(&priv2->spuq[i].mfc_cq_data3_RW,
1458 csa->priv2.spuq[i].mfc_cq_data3_RW); 1458 csa->priv2.spuq[i].mfc_cq_data3_RW);
1459 } 1459 }
1460 } 1460 }
1461 eieio(); 1461 eieio();
1462 } 1462 }
1463 1463
1464 static inline void restore_ppu_querymask(struct spu_state *csa, struct spu *spu) 1464 static inline void restore_ppu_querymask(struct spu_state *csa, struct spu *spu)
1465 { 1465 {
1466 struct spu_problem __iomem *prob = spu->problem; 1466 struct spu_problem __iomem *prob = spu->problem;
1467 1467
1468 /* Restore, Step 51: 1468 /* Restore, Step 51:
1469 * Restore the PPU_QueryMask register from CSA. 1469 * Restore the PPU_QueryMask register from CSA.
1470 */ 1470 */
1471 out_be32(&prob->dma_querymask_RW, csa->prob.dma_querymask_RW); 1471 out_be32(&prob->dma_querymask_RW, csa->prob.dma_querymask_RW);
1472 eieio(); 1472 eieio();
1473 } 1473 }
1474 1474
1475 static inline void restore_ppu_querytype(struct spu_state *csa, struct spu *spu) 1475 static inline void restore_ppu_querytype(struct spu_state *csa, struct spu *spu)
1476 { 1476 {
1477 struct spu_problem __iomem *prob = spu->problem; 1477 struct spu_problem __iomem *prob = spu->problem;
1478 1478
1479 /* Restore, Step 52: 1479 /* Restore, Step 52:
1480 * Restore the PPU_QueryType register from CSA. 1480 * Restore the PPU_QueryType register from CSA.
1481 */ 1481 */
1482 out_be32(&prob->dma_querytype_RW, csa->prob.dma_querytype_RW); 1482 out_be32(&prob->dma_querytype_RW, csa->prob.dma_querytype_RW);
1483 eieio(); 1483 eieio();
1484 } 1484 }
1485 1485
1486 static inline void restore_mfc_csr_tsq(struct spu_state *csa, struct spu *spu) 1486 static inline void restore_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
1487 { 1487 {
1488 struct spu_priv2 __iomem *priv2 = spu->priv2; 1488 struct spu_priv2 __iomem *priv2 = spu->priv2;
1489 1489
1490 /* Restore, Step 53: 1490 /* Restore, Step 53:
1491 * Restore the MFC_CSR_TSQ register from CSA. 1491 * Restore the MFC_CSR_TSQ register from CSA.
1492 */ 1492 */
1493 out_be64(&priv2->spu_tag_status_query_RW, 1493 out_be64(&priv2->spu_tag_status_query_RW,
1494 csa->priv2.spu_tag_status_query_RW); 1494 csa->priv2.spu_tag_status_query_RW);
1495 eieio(); 1495 eieio();
1496 } 1496 }
1497 1497
1498 static inline void restore_mfc_csr_cmd(struct spu_state *csa, struct spu *spu) 1498 static inline void restore_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)
1499 { 1499 {
1500 struct spu_priv2 __iomem *priv2 = spu->priv2; 1500 struct spu_priv2 __iomem *priv2 = spu->priv2;
1501 1501
1502 /* Restore, Step 54: 1502 /* Restore, Step 54:
1503 * Restore the MFC_CSR_CMD1 and MFC_CSR_CMD2 1503 * Restore the MFC_CSR_CMD1 and MFC_CSR_CMD2
1504 * registers from CSA. 1504 * registers from CSA.
1505 */ 1505 */
1506 out_be64(&priv2->spu_cmd_buf1_RW, csa->priv2.spu_cmd_buf1_RW); 1506 out_be64(&priv2->spu_cmd_buf1_RW, csa->priv2.spu_cmd_buf1_RW);
1507 out_be64(&priv2->spu_cmd_buf2_RW, csa->priv2.spu_cmd_buf2_RW); 1507 out_be64(&priv2->spu_cmd_buf2_RW, csa->priv2.spu_cmd_buf2_RW);
1508 eieio(); 1508 eieio();
1509 } 1509 }
1510 1510
1511 static inline void restore_mfc_csr_ato(struct spu_state *csa, struct spu *spu) 1511 static inline void restore_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
1512 { 1512 {
1513 struct spu_priv2 __iomem *priv2 = spu->priv2; 1513 struct spu_priv2 __iomem *priv2 = spu->priv2;
1514 1514
1515 /* Restore, Step 55: 1515 /* Restore, Step 55:
1516 * Restore the MFC_CSR_ATO register from CSA. 1516 * Restore the MFC_CSR_ATO register from CSA.
1517 */ 1517 */
1518 out_be64(&priv2->spu_atomic_status_RW, csa->priv2.spu_atomic_status_RW); 1518 out_be64(&priv2->spu_atomic_status_RW, csa->priv2.spu_atomic_status_RW);
1519 } 1519 }
1520 1520
1521 static inline void restore_mfc_tclass_id(struct spu_state *csa, struct spu *spu) 1521 static inline void restore_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
1522 { 1522 {
1523 /* Restore, Step 56: 1523 /* Restore, Step 56:
1524 * Restore the MFC_TCLASS_ID register from CSA. 1524 * Restore the MFC_TCLASS_ID register from CSA.
1525 */ 1525 */
1526 spu_mfc_tclass_id_set(spu, csa->priv1.mfc_tclass_id_RW); 1526 spu_mfc_tclass_id_set(spu, csa->priv1.mfc_tclass_id_RW);
1527 eieio(); 1527 eieio();
1528 } 1528 }
1529 1529
1530 static inline void set_llr_event(struct spu_state *csa, struct spu *spu) 1530 static inline void set_llr_event(struct spu_state *csa, struct spu *spu)
1531 { 1531 {
1532 u64 ch0_cnt, ch0_data; 1532 u64 ch0_cnt, ch0_data;
1533 u64 ch1_data; 1533 u64 ch1_data;
1534 1534
1535 /* Restore, Step 57: 1535 /* Restore, Step 57:
1536 * Set the Lock Line Reservation Lost Event by: 1536 * Set the Lock Line Reservation Lost Event by:
1537 * 1. OR CSA.SPU_Event_Status with bit 21 (Lr) set to 1. 1537 * 1. OR CSA.SPU_Event_Status with bit 21 (Lr) set to 1.
1538 * 2. If CSA.SPU_Channel_0_Count=0 and 1538 * 2. If CSA.SPU_Channel_0_Count=0 and
1539 * CSA.SPU_Wr_Event_Mask[Lr]=1 and 1539 * CSA.SPU_Wr_Event_Mask[Lr]=1 and
1540 * CSA.SPU_Event_Status[Lr]=0 then set 1540 * CSA.SPU_Event_Status[Lr]=0 then set
1541 * CSA.SPU_Event_Status_Count=1. 1541 * CSA.SPU_Event_Status_Count=1.
1542 */ 1542 */
1543 ch0_cnt = csa->spu_chnlcnt_RW[0]; 1543 ch0_cnt = csa->spu_chnlcnt_RW[0];
1544 ch0_data = csa->spu_chnldata_RW[0]; 1544 ch0_data = csa->spu_chnldata_RW[0];
1545 ch1_data = csa->spu_chnldata_RW[1]; 1545 ch1_data = csa->spu_chnldata_RW[1];
1546 csa->spu_chnldata_RW[0] |= MFC_LLR_LOST_EVENT; 1546 csa->spu_chnldata_RW[0] |= MFC_LLR_LOST_EVENT;
1547 if ((ch0_cnt == 0) && !(ch0_data & MFC_LLR_LOST_EVENT) && 1547 if ((ch0_cnt == 0) && !(ch0_data & MFC_LLR_LOST_EVENT) &&
1548 (ch1_data & MFC_LLR_LOST_EVENT)) { 1548 (ch1_data & MFC_LLR_LOST_EVENT)) {
1549 csa->spu_chnlcnt_RW[0] = 1; 1549 csa->spu_chnlcnt_RW[0] = 1;
1550 } 1550 }
1551 } 1551 }
1552 1552
1553 static inline void restore_decr_wrapped(struct spu_state *csa, struct spu *spu) 1553 static inline void restore_decr_wrapped(struct spu_state *csa, struct spu *spu)
1554 { 1554 {
1555 /* Restore, Step 58: 1555 /* Restore, Step 58:
1556 * If the status of the CSA software decrementer 1556 * If the status of the CSA software decrementer
1557 * "wrapped" flag is set, OR in a '1' to 1557 * "wrapped" flag is set, OR in a '1' to
1558 * CSA.SPU_Event_Status[Tm]. 1558 * CSA.SPU_Event_Status[Tm].
1559 */ 1559 */
1560 if (!(csa->lscsa->decr_status.slot[0] & SPU_DECR_STATUS_WRAPPED)) 1560 if (!(csa->lscsa->decr_status.slot[0] & SPU_DECR_STATUS_WRAPPED))
1561 return; 1561 return;
1562 1562
1563 if ((csa->spu_chnlcnt_RW[0] == 0) && 1563 if ((csa->spu_chnlcnt_RW[0] == 0) &&
1564 (csa->spu_chnldata_RW[1] & 0x20) && 1564 (csa->spu_chnldata_RW[1] & 0x20) &&
1565 !(csa->spu_chnldata_RW[0] & 0x20)) 1565 !(csa->spu_chnldata_RW[0] & 0x20))
1566 csa->spu_chnlcnt_RW[0] = 1; 1566 csa->spu_chnlcnt_RW[0] = 1;
1567 1567
1568 csa->spu_chnldata_RW[0] |= 0x20; 1568 csa->spu_chnldata_RW[0] |= 0x20;
1569 } 1569 }
1570 1570
1571 static inline void restore_ch_part1(struct spu_state *csa, struct spu *spu) 1571 static inline void restore_ch_part1(struct spu_state *csa, struct spu *spu)
1572 { 1572 {
1573 struct spu_priv2 __iomem *priv2 = spu->priv2; 1573 struct spu_priv2 __iomem *priv2 = spu->priv2;
1574 u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL }; 1574 u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1575 int i; 1575 int i;
1576 1576
1577 /* Restore, Step 59: 1577 /* Restore, Step 59:
1578 * Restore the following CH: [0,3,4,24,25,27] 1578 * Restore the following CH: [0,3,4,24,25,27]
1579 */ 1579 */
1580 for (i = 0; i < ARRAY_SIZE(ch_indices); i++) { 1580 for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
1581 idx = ch_indices[i]; 1581 idx = ch_indices[i];
1582 out_be64(&priv2->spu_chnlcntptr_RW, idx); 1582 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1583 eieio(); 1583 eieio();
1584 out_be64(&priv2->spu_chnldata_RW, csa->spu_chnldata_RW[idx]); 1584 out_be64(&priv2->spu_chnldata_RW, csa->spu_chnldata_RW[idx]);
1585 out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[idx]); 1585 out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[idx]);
1586 eieio(); 1586 eieio();
1587 } 1587 }
1588 } 1588 }
1589 1589
1590 static inline void restore_ch_part2(struct spu_state *csa, struct spu *spu) 1590 static inline void restore_ch_part2(struct spu_state *csa, struct spu *spu)
1591 { 1591 {
1592 struct spu_priv2 __iomem *priv2 = spu->priv2; 1592 struct spu_priv2 __iomem *priv2 = spu->priv2;
1593 u64 ch_indices[3] = { 9UL, 21UL, 23UL }; 1593 u64 ch_indices[3] = { 9UL, 21UL, 23UL };
1594 u64 ch_counts[3] = { 1UL, 16UL, 1UL }; 1594 u64 ch_counts[3] = { 1UL, 16UL, 1UL };
1595 u64 idx; 1595 u64 idx;
1596 int i; 1596 int i;
1597 1597
1598 /* Restore, Step 60: 1598 /* Restore, Step 60:
1599 * Restore the following CH: [9,21,23]. 1599 * Restore the following CH: [9,21,23].
1600 */ 1600 */
1601 ch_counts[0] = 1UL; 1601 ch_counts[0] = 1UL;
1602 ch_counts[1] = csa->spu_chnlcnt_RW[21]; 1602 ch_counts[1] = csa->spu_chnlcnt_RW[21];
1603 ch_counts[2] = 1UL; 1603 ch_counts[2] = 1UL;
1604 for (i = 0; i < 3; i++) { 1604 for (i = 0; i < 3; i++) {
1605 idx = ch_indices[i]; 1605 idx = ch_indices[i];
1606 out_be64(&priv2->spu_chnlcntptr_RW, idx); 1606 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1607 eieio(); 1607 eieio();
1608 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]); 1608 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
1609 eieio(); 1609 eieio();
1610 } 1610 }
1611 } 1611 }
1612 1612
1613 static inline void restore_spu_lslr(struct spu_state *csa, struct spu *spu) 1613 static inline void restore_spu_lslr(struct spu_state *csa, struct spu *spu)
1614 { 1614 {
1615 struct spu_priv2 __iomem *priv2 = spu->priv2; 1615 struct spu_priv2 __iomem *priv2 = spu->priv2;
1616 1616
1617 /* Restore, Step 61: 1617 /* Restore, Step 61:
1618 * Restore the SPU_LSLR register from CSA. 1618 * Restore the SPU_LSLR register from CSA.
1619 */ 1619 */
1620 out_be64(&priv2->spu_lslr_RW, csa->priv2.spu_lslr_RW); 1620 out_be64(&priv2->spu_lslr_RW, csa->priv2.spu_lslr_RW);
1621 eieio(); 1621 eieio();
1622 } 1622 }
1623 1623
1624 static inline void restore_spu_cfg(struct spu_state *csa, struct spu *spu) 1624 static inline void restore_spu_cfg(struct spu_state *csa, struct spu *spu)
1625 { 1625 {
1626 struct spu_priv2 __iomem *priv2 = spu->priv2; 1626 struct spu_priv2 __iomem *priv2 = spu->priv2;
1627 1627
1628 /* Restore, Step 62: 1628 /* Restore, Step 62:
1629 * Restore the SPU_Cfg register from CSA. 1629 * Restore the SPU_Cfg register from CSA.
1630 */ 1630 */
1631 out_be64(&priv2->spu_cfg_RW, csa->priv2.spu_cfg_RW); 1631 out_be64(&priv2->spu_cfg_RW, csa->priv2.spu_cfg_RW);
1632 eieio(); 1632 eieio();
1633 } 1633 }
1634 1634
1635 static inline void restore_pm_trace(struct spu_state *csa, struct spu *spu) 1635 static inline void restore_pm_trace(struct spu_state *csa, struct spu *spu)
1636 { 1636 {
1637 /* Restore, Step 63: 1637 /* Restore, Step 63:
1638 * Restore PM_Trace_Tag_Wait_Mask from CSA. 1638 * Restore PM_Trace_Tag_Wait_Mask from CSA.
1639 * Not performed by this implementation. 1639 * Not performed by this implementation.
1640 */ 1640 */
1641 } 1641 }
1642 1642
1643 static inline void restore_spu_npc(struct spu_state *csa, struct spu *spu) 1643 static inline void restore_spu_npc(struct spu_state *csa, struct spu *spu)
1644 { 1644 {
1645 struct spu_problem __iomem *prob = spu->problem; 1645 struct spu_problem __iomem *prob = spu->problem;
1646 1646
1647 /* Restore, Step 64: 1647 /* Restore, Step 64:
1648 * Restore SPU_NPC from CSA. 1648 * Restore SPU_NPC from CSA.
1649 */ 1649 */
1650 out_be32(&prob->spu_npc_RW, csa->prob.spu_npc_RW); 1650 out_be32(&prob->spu_npc_RW, csa->prob.spu_npc_RW);
1651 eieio(); 1651 eieio();
1652 } 1652 }
1653 1653
1654 static inline void restore_spu_mb(struct spu_state *csa, struct spu *spu) 1654 static inline void restore_spu_mb(struct spu_state *csa, struct spu *spu)
1655 { 1655 {
1656 struct spu_priv2 __iomem *priv2 = spu->priv2; 1656 struct spu_priv2 __iomem *priv2 = spu->priv2;
1657 int i; 1657 int i;
1658 1658
1659 /* Restore, Step 65: 1659 /* Restore, Step 65:
1660 * Restore MFC_RdSPU_MB from CSA. 1660 * Restore MFC_RdSPU_MB from CSA.
1661 */ 1661 */
1662 out_be64(&priv2->spu_chnlcntptr_RW, 29UL); 1662 out_be64(&priv2->spu_chnlcntptr_RW, 29UL);
1663 eieio(); 1663 eieio();
1664 out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[29]); 1664 out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[29]);
1665 for (i = 0; i < 4; i++) { 1665 for (i = 0; i < 4; i++) {
1666 out_be64(&priv2->spu_chnldata_RW, csa->spu_mailbox_data[i]); 1666 out_be64(&priv2->spu_chnldata_RW, csa->spu_mailbox_data[i]);
1667 } 1667 }
1668 eieio(); 1668 eieio();
1669 } 1669 }
1670 1670
1671 static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu) 1671 static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
1672 { 1672 {
1673 struct spu_problem __iomem *prob = spu->problem; 1673 struct spu_problem __iomem *prob = spu->problem;
1674 u32 dummy = 0; 1674 u32 dummy = 0;
1675 1675
1676 /* Restore, Step 66: 1676 /* Restore, Step 66:
1677 * If CSA.MB_Stat[P]=0 (mailbox empty) then 1677 * If CSA.MB_Stat[P]=0 (mailbox empty) then
1678 * read from the PPU_MB register. 1678 * read from the PPU_MB register.
1679 */ 1679 */
1680 if ((csa->prob.mb_stat_R & 0xFF) == 0) { 1680 if ((csa->prob.mb_stat_R & 0xFF) == 0) {
1681 dummy = in_be32(&prob->pu_mb_R); 1681 dummy = in_be32(&prob->pu_mb_R);
1682 eieio(); 1682 eieio();
1683 } 1683 }
1684 } 1684 }
1685 1685
1686 static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu) 1686 static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu)
1687 { 1687 {
1688 struct spu_priv2 __iomem *priv2 = spu->priv2; 1688 struct spu_priv2 __iomem *priv2 = spu->priv2;
1689 u64 dummy = 0UL; 1689 u64 dummy = 0UL;
1690 1690
1691 /* Restore, Step 66: 1691 /* Restore, Step 66:
1692 * If CSA.MB_Stat[I]=0 (mailbox empty) then 1692 * If CSA.MB_Stat[I]=0 (mailbox empty) then
1693 * read from the PPUINT_MB register. 1693 * read from the PPUINT_MB register.
1694 */ 1694 */
1695 if ((csa->prob.mb_stat_R & 0xFF0000) == 0) { 1695 if ((csa->prob.mb_stat_R & 0xFF0000) == 0) {
1696 dummy = in_be64(&priv2->puint_mb_R); 1696 dummy = in_be64(&priv2->puint_mb_R);
1697 eieio(); 1697 eieio();
1698 spu_int_stat_clear(spu, 2, CLASS2_ENABLE_MAILBOX_INTR); 1698 spu_int_stat_clear(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
1699 eieio(); 1699 eieio();
1700 } 1700 }
1701 } 1701 }
1702 1702
1703 static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu) 1703 static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu)
1704 { 1704 {
1705 /* Restore, Step 69: 1705 /* Restore, Step 69:
1706 * Restore the MFC_SR1 register from CSA. 1706 * Restore the MFC_SR1 register from CSA.
1707 */ 1707 */
1708 spu_mfc_sr1_set(spu, csa->priv1.mfc_sr1_RW); 1708 spu_mfc_sr1_set(spu, csa->priv1.mfc_sr1_RW);
1709 eieio(); 1709 eieio();
1710 } 1710 }
1711 1711
1712 static inline void set_int_route(struct spu_state *csa, struct spu *spu) 1712 static inline void set_int_route(struct spu_state *csa, struct spu *spu)
1713 { 1713 {
1714 struct spu_context *ctx = spu->ctx; 1714 struct spu_context *ctx = spu->ctx;
1715 1715
1716 spu_cpu_affinity_set(spu, ctx->last_ran); 1716 spu_cpu_affinity_set(spu, ctx->last_ran);
1717 } 1717 }
1718 1718
1719 static inline void restore_other_spu_access(struct spu_state *csa, 1719 static inline void restore_other_spu_access(struct spu_state *csa,
1720 struct spu *spu) 1720 struct spu *spu)
1721 { 1721 {
1722 /* Restore, Step 70: 1722 /* Restore, Step 70:
1723 * Restore other SPU mappings to this SPU. TBD. 1723 * Restore other SPU mappings to this SPU. TBD.
1724 */ 1724 */
1725 } 1725 }
1726 1726
1727 static inline void restore_spu_runcntl(struct spu_state *csa, struct spu *spu) 1727 static inline void restore_spu_runcntl(struct spu_state *csa, struct spu *spu)
1728 { 1728 {
1729 struct spu_problem __iomem *prob = spu->problem; 1729 struct spu_problem __iomem *prob = spu->problem;
1730 1730
1731 /* Restore, Step 71: 1731 /* Restore, Step 71:
1732 * If CSA.SPU_Status[R]=1 then write 1732 * If CSA.SPU_Status[R]=1 then write
1733 * SPU_RunCntl[R0R1]='01'. 1733 * SPU_RunCntl[R0R1]='01'.
1734 */ 1734 */
1735 if (csa->prob.spu_status_R & SPU_STATUS_RUNNING) { 1735 if (csa->prob.spu_status_R & SPU_STATUS_RUNNING) {
1736 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE); 1736 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1737 eieio(); 1737 eieio();
1738 } 1738 }
1739 } 1739 }
1740 1740
1741 static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu) 1741 static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu)
1742 { 1742 {
1743 struct spu_priv2 __iomem *priv2 = spu->priv2; 1743 struct spu_priv2 __iomem *priv2 = spu->priv2;
1744 1744
1745 /* Restore, Step 72: 1745 /* Restore, Step 72:
1746 * Restore the MFC_CNTL register for the CSA. 1746 * Restore the MFC_CNTL register for the CSA.
1747 */ 1747 */
1748 out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW); 1748 out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW);
1749 eieio(); 1749 eieio();
1750 1750
1751 /* 1751 /*
1752 * The queue is put back into the same state that was evident prior to 1752 * The queue is put back into the same state that was evident prior to
1753 * the context switch. The suspend flag is added to the saved state in 1753 * the context switch. The suspend flag is added to the saved state in
1754 * the csa, if the operational state was suspending or suspended. In 1754 * the csa, if the operational state was suspending or suspended. In
1755 * this case, the code that suspended the mfc is responsible for 1755 * this case, the code that suspended the mfc is responsible for
1756 * continuing it. Note that SPE faults do not change the operational 1756 * continuing it. Note that SPE faults do not change the operational
1757 * state of the spu. 1757 * state of the spu.
1758 */ 1758 */
1759 } 1759 }
1760 1760
1761 static inline void enable_user_access(struct spu_state *csa, struct spu *spu) 1761 static inline void enable_user_access(struct spu_state *csa, struct spu *spu)
1762 { 1762 {
1763 /* Restore, Step 73: 1763 /* Restore, Step 73:
1764 * Enable user-space access (if provided) to this 1764 * Enable user-space access (if provided) to this
1765 * SPU by mapping the virtual pages assigned to 1765 * SPU by mapping the virtual pages assigned to
1766 * the SPU memory-mapped I/O (MMIO) for problem 1766 * the SPU memory-mapped I/O (MMIO) for problem
1767 * state. TBD. 1767 * state. TBD.
1768 */ 1768 */
1769 } 1769 }
1770 1770
1771 static inline void reset_switch_active(struct spu_state *csa, struct spu *spu) 1771 static inline void reset_switch_active(struct spu_state *csa, struct spu *spu)
1772 { 1772 {
1773 /* Restore, Step 74: 1773 /* Restore, Step 74:
1774 * Reset the "context switch active" flag. 1774 * Reset the "context switch active" flag.
1775 * Not performed by this implementation. 1775 * Not performed by this implementation.
1776 */ 1776 */
1777 } 1777 }
1778 1778
1779 static inline void reenable_interrupts(struct spu_state *csa, struct spu *spu) 1779 static inline void reenable_interrupts(struct spu_state *csa, struct spu *spu)
1780 { 1780 {
1781 /* Restore, Step 75: 1781 /* Restore, Step 75:
1782 * Re-enable SPU interrupts. 1782 * Re-enable SPU interrupts.
1783 */ 1783 */
1784 spin_lock_irq(&spu->register_lock); 1784 spin_lock_irq(&spu->register_lock);
1785 spu_int_mask_set(spu, 0, csa->priv1.int_mask_class0_RW); 1785 spu_int_mask_set(spu, 0, csa->priv1.int_mask_class0_RW);
1786 spu_int_mask_set(spu, 1, csa->priv1.int_mask_class1_RW); 1786 spu_int_mask_set(spu, 1, csa->priv1.int_mask_class1_RW);
1787 spu_int_mask_set(spu, 2, csa->priv1.int_mask_class2_RW); 1787 spu_int_mask_set(spu, 2, csa->priv1.int_mask_class2_RW);
1788 spin_unlock_irq(&spu->register_lock); 1788 spin_unlock_irq(&spu->register_lock);
1789 } 1789 }
1790 1790
1791 static int quiece_spu(struct spu_state *prev, struct spu *spu) 1791 static int quiece_spu(struct spu_state *prev, struct spu *spu)
1792 { 1792 {
1793 /* 1793 /*
1794 * Combined steps 2-18 of SPU context save sequence, which 1794 * Combined steps 2-18 of SPU context save sequence, which
1795 * quiesce the SPU state (disable SPU execution, MFC command 1795 * quiesce the SPU state (disable SPU execution, MFC command
1796 * queues, decrementer, SPU interrupts, etc.). 1796 * queues, decrementer, SPU interrupts, etc.).
1797 * 1797 *
1798 * Returns 0 on success. 1798 * Returns 0 on success.
1799 * 2 if failed step 2. 1799 * 2 if failed step 2.
1800 * 6 if failed step 6. 1800 * 6 if failed step 6.
1801 */ 1801 */
1802 1802
1803 if (check_spu_isolate(prev, spu)) { /* Step 2. */ 1803 if (check_spu_isolate(prev, spu)) { /* Step 2. */
1804 return 2; 1804 return 2;
1805 } 1805 }
1806 disable_interrupts(prev, spu); /* Step 3. */ 1806 disable_interrupts(prev, spu); /* Step 3. */
1807 set_watchdog_timer(prev, spu); /* Step 4. */ 1807 set_watchdog_timer(prev, spu); /* Step 4. */
1808 inhibit_user_access(prev, spu); /* Step 5. */ 1808 inhibit_user_access(prev, spu); /* Step 5. */
1809 if (check_spu_isolate(prev, spu)) { /* Step 6. */ 1809 if (check_spu_isolate(prev, spu)) { /* Step 6. */
1810 return 6; 1810 return 6;
1811 } 1811 }
1812 set_switch_pending(prev, spu); /* Step 7. */ 1812 set_switch_pending(prev, spu); /* Step 7. */
1813 save_mfc_cntl(prev, spu); /* Step 8. */ 1813 save_mfc_cntl(prev, spu); /* Step 8. */
1814 save_spu_runcntl(prev, spu); /* Step 9. */ 1814 save_spu_runcntl(prev, spu); /* Step 9. */
1815 save_mfc_sr1(prev, spu); /* Step 10. */ 1815 save_mfc_sr1(prev, spu); /* Step 10. */
1816 save_spu_status(prev, spu); /* Step 11. */ 1816 save_spu_status(prev, spu); /* Step 11. */
1817 save_mfc_stopped_status(prev, spu); /* Step 12. */ 1817 save_mfc_stopped_status(prev, spu); /* Step 12. */
1818 halt_mfc_decr(prev, spu); /* Step 13. */ 1818 halt_mfc_decr(prev, spu); /* Step 13. */
1819 save_timebase(prev, spu); /* Step 14. */ 1819 save_timebase(prev, spu); /* Step 14. */
1820 remove_other_spu_access(prev, spu); /* Step 15. */ 1820 remove_other_spu_access(prev, spu); /* Step 15. */
1821 do_mfc_mssync(prev, spu); /* Step 16. */ 1821 do_mfc_mssync(prev, spu); /* Step 16. */
1822 issue_mfc_tlbie(prev, spu); /* Step 17. */ 1822 issue_mfc_tlbie(prev, spu); /* Step 17. */
1823 handle_pending_interrupts(prev, spu); /* Step 18. */ 1823 handle_pending_interrupts(prev, spu); /* Step 18. */
1824 1824
1825 return 0; 1825 return 0;
1826 } 1826 }
1827 1827
1828 static void save_csa(struct spu_state *prev, struct spu *spu) 1828 static void save_csa(struct spu_state *prev, struct spu *spu)
1829 { 1829 {
1830 /* 1830 /*
1831 * Combine steps 19-44 of SPU context save sequence, which 1831 * Combine steps 19-44 of SPU context save sequence, which
1832 * save regions of the privileged & problem state areas. 1832 * save regions of the privileged & problem state areas.
1833 */ 1833 */
1834 1834
1835 save_mfc_queues(prev, spu); /* Step 19. */ 1835 save_mfc_queues(prev, spu); /* Step 19. */
1836 save_ppu_querymask(prev, spu); /* Step 20. */ 1836 save_ppu_querymask(prev, spu); /* Step 20. */
1837 save_ppu_querytype(prev, spu); /* Step 21. */ 1837 save_ppu_querytype(prev, spu); /* Step 21. */
1838 save_ppu_tagstatus(prev, spu); /* NEW. */ 1838 save_ppu_tagstatus(prev, spu); /* NEW. */
1839 save_mfc_csr_tsq(prev, spu); /* Step 22. */ 1839 save_mfc_csr_tsq(prev, spu); /* Step 22. */
1840 save_mfc_csr_cmd(prev, spu); /* Step 23. */ 1840 save_mfc_csr_cmd(prev, spu); /* Step 23. */
1841 save_mfc_csr_ato(prev, spu); /* Step 24. */ 1841 save_mfc_csr_ato(prev, spu); /* Step 24. */
1842 save_mfc_tclass_id(prev, spu); /* Step 25. */ 1842 save_mfc_tclass_id(prev, spu); /* Step 25. */
1843 set_mfc_tclass_id(prev, spu); /* Step 26. */ 1843 set_mfc_tclass_id(prev, spu); /* Step 26. */
1844 save_mfc_cmd(prev, spu); /* Step 26a - moved from 44. */ 1844 save_mfc_cmd(prev, spu); /* Step 26a - moved from 44. */
1845 purge_mfc_queue(prev, spu); /* Step 27. */ 1845 purge_mfc_queue(prev, spu); /* Step 27. */
1846 wait_purge_complete(prev, spu); /* Step 28. */ 1846 wait_purge_complete(prev, spu); /* Step 28. */
1847 setup_mfc_sr1(prev, spu); /* Step 30. */ 1847 setup_mfc_sr1(prev, spu); /* Step 30. */
1848 save_spu_npc(prev, spu); /* Step 31. */ 1848 save_spu_npc(prev, spu); /* Step 31. */
1849 save_spu_privcntl(prev, spu); /* Step 32. */ 1849 save_spu_privcntl(prev, spu); /* Step 32. */
1850 reset_spu_privcntl(prev, spu); /* Step 33. */ 1850 reset_spu_privcntl(prev, spu); /* Step 33. */
1851 save_spu_lslr(prev, spu); /* Step 34. */ 1851 save_spu_lslr(prev, spu); /* Step 34. */
1852 reset_spu_lslr(prev, spu); /* Step 35. */ 1852 reset_spu_lslr(prev, spu); /* Step 35. */
1853 save_spu_cfg(prev, spu); /* Step 36. */ 1853 save_spu_cfg(prev, spu); /* Step 36. */
1854 save_pm_trace(prev, spu); /* Step 37. */ 1854 save_pm_trace(prev, spu); /* Step 37. */
1855 save_mfc_rag(prev, spu); /* Step 38. */ 1855 save_mfc_rag(prev, spu); /* Step 38. */
1856 save_ppu_mb_stat(prev, spu); /* Step 39. */ 1856 save_ppu_mb_stat(prev, spu); /* Step 39. */
1857 save_ppu_mb(prev, spu); /* Step 40. */ 1857 save_ppu_mb(prev, spu); /* Step 40. */
1858 save_ppuint_mb(prev, spu); /* Step 41. */ 1858 save_ppuint_mb(prev, spu); /* Step 41. */
1859 save_ch_part1(prev, spu); /* Step 42. */ 1859 save_ch_part1(prev, spu); /* Step 42. */
1860 save_spu_mb(prev, spu); /* Step 43. */ 1860 save_spu_mb(prev, spu); /* Step 43. */
1861 reset_ch(prev, spu); /* Step 45. */ 1861 reset_ch(prev, spu); /* Step 45. */
1862 } 1862 }
1863 1863
1864 static void save_lscsa(struct spu_state *prev, struct spu *spu) 1864 static void save_lscsa(struct spu_state *prev, struct spu *spu)
1865 { 1865 {
1866 /* 1866 /*
1867 * Perform steps 46-57 of SPU context save sequence, 1867 * Perform steps 46-57 of SPU context save sequence,
1868 * which save regions of the local store and register 1868 * which save regions of the local store and register
1869 * file. 1869 * file.
1870 */ 1870 */
1871 1871
1872 resume_mfc_queue(prev, spu); /* Step 46. */ 1872 resume_mfc_queue(prev, spu); /* Step 46. */
1873 /* Step 47. */ 1873 /* Step 47. */
1874 setup_mfc_slbs(prev, spu, spu_save_code, sizeof(spu_save_code)); 1874 setup_mfc_slbs(prev, spu, spu_save_code, sizeof(spu_save_code));
1875 set_switch_active(prev, spu); /* Step 48. */ 1875 set_switch_active(prev, spu); /* Step 48. */
1876 enable_interrupts(prev, spu); /* Step 49. */ 1876 enable_interrupts(prev, spu); /* Step 49. */
1877 save_ls_16kb(prev, spu); /* Step 50. */ 1877 save_ls_16kb(prev, spu); /* Step 50. */
1878 set_spu_npc(prev, spu); /* Step 51. */ 1878 set_spu_npc(prev, spu); /* Step 51. */
1879 set_signot1(prev, spu); /* Step 52. */ 1879 set_signot1(prev, spu); /* Step 52. */
1880 set_signot2(prev, spu); /* Step 53. */ 1880 set_signot2(prev, spu); /* Step 53. */
1881 send_save_code(prev, spu); /* Step 54. */ 1881 send_save_code(prev, spu); /* Step 54. */
1882 set_ppu_querymask(prev, spu); /* Step 55. */ 1882 set_ppu_querymask(prev, spu); /* Step 55. */
1883 wait_tag_complete(prev, spu); /* Step 56. */ 1883 wait_tag_complete(prev, spu); /* Step 56. */
1884 wait_spu_stopped(prev, spu); /* Step 57. */ 1884 wait_spu_stopped(prev, spu); /* Step 57. */
1885 } 1885 }
1886 1886
1887 static void force_spu_isolate_exit(struct spu *spu) 1887 static void force_spu_isolate_exit(struct spu *spu)
1888 { 1888 {
1889 struct spu_problem __iomem *prob = spu->problem; 1889 struct spu_problem __iomem *prob = spu->problem;
1890 struct spu_priv2 __iomem *priv2 = spu->priv2; 1890 struct spu_priv2 __iomem *priv2 = spu->priv2;
1891 1891
1892 /* Stop SPE execution and wait for completion. */ 1892 /* Stop SPE execution and wait for completion. */
1893 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); 1893 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1894 iobarrier_rw(); 1894 iobarrier_rw();
1895 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); 1895 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
1896 1896
1897 /* Restart SPE master runcntl. */ 1897 /* Restart SPE master runcntl. */
1898 spu_mfc_sr1_set(spu, MFC_STATE1_MASTER_RUN_CONTROL_MASK); 1898 spu_mfc_sr1_set(spu, MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1899 iobarrier_w(); 1899 iobarrier_w();
1900 1900
1901 /* Initiate isolate exit request and wait for completion. */ 1901 /* Initiate isolate exit request and wait for completion. */
1902 out_be64(&priv2->spu_privcntl_RW, 4LL); 1902 out_be64(&priv2->spu_privcntl_RW, 4LL);
1903 iobarrier_w(); 1903 iobarrier_w();
1904 out_be32(&prob->spu_runcntl_RW, 2); 1904 out_be32(&prob->spu_runcntl_RW, 2);
1905 iobarrier_rw(); 1905 iobarrier_rw();
1906 POLL_WHILE_FALSE((in_be32(&prob->spu_status_R) 1906 POLL_WHILE_FALSE((in_be32(&prob->spu_status_R)
1907 & SPU_STATUS_STOPPED_BY_STOP)); 1907 & SPU_STATUS_STOPPED_BY_STOP));
1908 1908
1909 /* Reset load request to normal. */ 1909 /* Reset load request to normal. */
1910 out_be64(&priv2->spu_privcntl_RW, SPU_PRIVCNT_LOAD_REQUEST_NORMAL); 1910 out_be64(&priv2->spu_privcntl_RW, SPU_PRIVCNT_LOAD_REQUEST_NORMAL);
1911 iobarrier_w(); 1911 iobarrier_w();
1912 } 1912 }
1913 1913
1914 /** 1914 /**
1915 * stop_spu_isolate 1915 * stop_spu_isolate
1916 * Check SPU run-control state and force isolated 1916 * Check SPU run-control state and force isolated
1917 * exit function as necessary. 1917 * exit function as necessary.
1918 */ 1918 */
1919 static void stop_spu_isolate(struct spu *spu) 1919 static void stop_spu_isolate(struct spu *spu)
1920 { 1920 {
1921 struct spu_problem __iomem *prob = spu->problem; 1921 struct spu_problem __iomem *prob = spu->problem;
1922 1922
1923 if (in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_STATE) { 1923 if (in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_STATE) {
1924 /* The SPU is in isolated state; the only way 1924 /* The SPU is in isolated state; the only way
1925 * to get it out is to perform an isolated 1925 * to get it out is to perform an isolated
1926 * exit (clean) operation. 1926 * exit (clean) operation.
1927 */ 1927 */
1928 force_spu_isolate_exit(spu); 1928 force_spu_isolate_exit(spu);
1929 } 1929 }
1930 } 1930 }
1931 1931
1932 static void harvest(struct spu_state *prev, struct spu *spu) 1932 static void harvest(struct spu_state *prev, struct spu *spu)
1933 { 1933 {
1934 /* 1934 /*
1935 * Perform steps 2-25 of SPU context restore sequence, 1935 * Perform steps 2-25 of SPU context restore sequence,
1936 * which resets an SPU either after a failed save, or 1936 * which resets an SPU either after a failed save, or
1937 * when using SPU for first time. 1937 * when using SPU for first time.
1938 */ 1938 */
1939 1939
1940 disable_interrupts(prev, spu); /* Step 2. */ 1940 disable_interrupts(prev, spu); /* Step 2. */
1941 inhibit_user_access(prev, spu); /* Step 3. */ 1941 inhibit_user_access(prev, spu); /* Step 3. */
1942 terminate_spu_app(prev, spu); /* Step 4. */ 1942 terminate_spu_app(prev, spu); /* Step 4. */
1943 set_switch_pending(prev, spu); /* Step 5. */ 1943 set_switch_pending(prev, spu); /* Step 5. */
1944 stop_spu_isolate(spu); /* NEW. */ 1944 stop_spu_isolate(spu); /* NEW. */
1945 remove_other_spu_access(prev, spu); /* Step 6. */ 1945 remove_other_spu_access(prev, spu); /* Step 6. */
1946 suspend_mfc_and_halt_decr(prev, spu); /* Step 7. */ 1946 suspend_mfc_and_halt_decr(prev, spu); /* Step 7. */
1947 wait_suspend_mfc_complete(prev, spu); /* Step 8. */ 1947 wait_suspend_mfc_complete(prev, spu); /* Step 8. */
1948 if (!suspend_spe(prev, spu)) /* Step 9. */ 1948 if (!suspend_spe(prev, spu)) /* Step 9. */
1949 clear_spu_status(prev, spu); /* Step 10. */ 1949 clear_spu_status(prev, spu); /* Step 10. */
1950 do_mfc_mssync(prev, spu); /* Step 11. */ 1950 do_mfc_mssync(prev, spu); /* Step 11. */
1951 issue_mfc_tlbie(prev, spu); /* Step 12. */ 1951 issue_mfc_tlbie(prev, spu); /* Step 12. */
1952 handle_pending_interrupts(prev, spu); /* Step 13. */ 1952 handle_pending_interrupts(prev, spu); /* Step 13. */
1953 purge_mfc_queue(prev, spu); /* Step 14. */ 1953 purge_mfc_queue(prev, spu); /* Step 14. */
1954 wait_purge_complete(prev, spu); /* Step 15. */ 1954 wait_purge_complete(prev, spu); /* Step 15. */
1955 reset_spu_privcntl(prev, spu); /* Step 16. */ 1955 reset_spu_privcntl(prev, spu); /* Step 16. */
1956 reset_spu_lslr(prev, spu); /* Step 17. */ 1956 reset_spu_lslr(prev, spu); /* Step 17. */
1957 setup_mfc_sr1(prev, spu); /* Step 18. */ 1957 setup_mfc_sr1(prev, spu); /* Step 18. */
1958 spu_invalidate_slbs(spu); /* Step 19. */ 1958 spu_invalidate_slbs(spu); /* Step 19. */
1959 reset_ch_part1(prev, spu); /* Step 20. */ 1959 reset_ch_part1(prev, spu); /* Step 20. */
1960 reset_ch_part2(prev, spu); /* Step 21. */ 1960 reset_ch_part2(prev, spu); /* Step 21. */
1961 enable_interrupts(prev, spu); /* Step 22. */ 1961 enable_interrupts(prev, spu); /* Step 22. */
1962 set_switch_active(prev, spu); /* Step 23. */ 1962 set_switch_active(prev, spu); /* Step 23. */
1963 set_mfc_tclass_id(prev, spu); /* Step 24. */ 1963 set_mfc_tclass_id(prev, spu); /* Step 24. */
1964 resume_mfc_queue(prev, spu); /* Step 25. */ 1964 resume_mfc_queue(prev, spu); /* Step 25. */
1965 } 1965 }
1966 1966
1967 static void restore_lscsa(struct spu_state *next, struct spu *spu) 1967 static void restore_lscsa(struct spu_state *next, struct spu *spu)
1968 { 1968 {
1969 /* 1969 /*
1970 * Perform steps 26-40 of SPU context restore sequence, 1970 * Perform steps 26-40 of SPU context restore sequence,
1971 * which restores regions of the local store and register 1971 * which restores regions of the local store and register
1972 * file. 1972 * file.
1973 */ 1973 */
1974 1974
1975 set_watchdog_timer(next, spu); /* Step 26. */ 1975 set_watchdog_timer(next, spu); /* Step 26. */
1976 setup_spu_status_part1(next, spu); /* Step 27. */ 1976 setup_spu_status_part1(next, spu); /* Step 27. */
1977 setup_spu_status_part2(next, spu); /* Step 28. */ 1977 setup_spu_status_part2(next, spu); /* Step 28. */
1978 restore_mfc_rag(next, spu); /* Step 29. */ 1978 restore_mfc_rag(next, spu); /* Step 29. */
1979 /* Step 30. */ 1979 /* Step 30. */
1980 setup_mfc_slbs(next, spu, spu_restore_code, sizeof(spu_restore_code)); 1980 setup_mfc_slbs(next, spu, spu_restore_code, sizeof(spu_restore_code));
1981 set_spu_npc(next, spu); /* Step 31. */ 1981 set_spu_npc(next, spu); /* Step 31. */
1982 set_signot1(next, spu); /* Step 32. */ 1982 set_signot1(next, spu); /* Step 32. */
1983 set_signot2(next, spu); /* Step 33. */ 1983 set_signot2(next, spu); /* Step 33. */
1984 setup_decr(next, spu); /* Step 34. */ 1984 setup_decr(next, spu); /* Step 34. */
1985 setup_ppu_mb(next, spu); /* Step 35. */ 1985 setup_ppu_mb(next, spu); /* Step 35. */
1986 setup_ppuint_mb(next, spu); /* Step 36. */ 1986 setup_ppuint_mb(next, spu); /* Step 36. */
1987 send_restore_code(next, spu); /* Step 37. */ 1987 send_restore_code(next, spu); /* Step 37. */
1988 set_ppu_querymask(next, spu); /* Step 38. */ 1988 set_ppu_querymask(next, spu); /* Step 38. */
1989 wait_tag_complete(next, spu); /* Step 39. */ 1989 wait_tag_complete(next, spu); /* Step 39. */
1990 wait_spu_stopped(next, spu); /* Step 40. */ 1990 wait_spu_stopped(next, spu); /* Step 40. */
1991 } 1991 }
1992 1992
1993 static void restore_csa(struct spu_state *next, struct spu *spu) 1993 static void restore_csa(struct spu_state *next, struct spu *spu)
1994 { 1994 {
1995 /* 1995 /*
1996 * Combine steps 41-76 of SPU context restore sequence, which 1996 * Combine steps 41-76 of SPU context restore sequence, which
1997 * restore regions of the privileged & problem state areas. 1997 * restore regions of the privileged & problem state areas.
1998 */ 1998 */
1999 1999
2000 restore_spu_privcntl(next, spu); /* Step 41. */ 2000 restore_spu_privcntl(next, spu); /* Step 41. */
2001 restore_status_part1(next, spu); /* Step 42. */ 2001 restore_status_part1(next, spu); /* Step 42. */
2002 restore_status_part2(next, spu); /* Step 43. */ 2002 restore_status_part2(next, spu); /* Step 43. */
2003 restore_ls_16kb(next, spu); /* Step 44. */ 2003 restore_ls_16kb(next, spu); /* Step 44. */
2004 wait_tag_complete(next, spu); /* Step 45. */ 2004 wait_tag_complete(next, spu); /* Step 45. */
2005 suspend_mfc(next, spu); /* Step 46. */ 2005 suspend_mfc(next, spu); /* Step 46. */
2006 wait_suspend_mfc_complete(next, spu); /* Step 47. */ 2006 wait_suspend_mfc_complete(next, spu); /* Step 47. */
2007 issue_mfc_tlbie(next, spu); /* Step 48. */ 2007 issue_mfc_tlbie(next, spu); /* Step 48. */
2008 clear_interrupts(next, spu); /* Step 49. */ 2008 clear_interrupts(next, spu); /* Step 49. */
2009 restore_mfc_queues(next, spu); /* Step 50. */ 2009 restore_mfc_queues(next, spu); /* Step 50. */
2010 restore_ppu_querymask(next, spu); /* Step 51. */ 2010 restore_ppu_querymask(next, spu); /* Step 51. */
2011 restore_ppu_querytype(next, spu); /* Step 52. */ 2011 restore_ppu_querytype(next, spu); /* Step 52. */
2012 restore_mfc_csr_tsq(next, spu); /* Step 53. */ 2012 restore_mfc_csr_tsq(next, spu); /* Step 53. */
2013 restore_mfc_csr_cmd(next, spu); /* Step 54. */ 2013 restore_mfc_csr_cmd(next, spu); /* Step 54. */
2014 restore_mfc_csr_ato(next, spu); /* Step 55. */ 2014 restore_mfc_csr_ato(next, spu); /* Step 55. */
2015 restore_mfc_tclass_id(next, spu); /* Step 56. */ 2015 restore_mfc_tclass_id(next, spu); /* Step 56. */
2016 set_llr_event(next, spu); /* Step 57. */ 2016 set_llr_event(next, spu); /* Step 57. */
2017 restore_decr_wrapped(next, spu); /* Step 58. */ 2017 restore_decr_wrapped(next, spu); /* Step 58. */
2018 restore_ch_part1(next, spu); /* Step 59. */ 2018 restore_ch_part1(next, spu); /* Step 59. */
2019 restore_ch_part2(next, spu); /* Step 60. */ 2019 restore_ch_part2(next, spu); /* Step 60. */
2020 restore_spu_lslr(next, spu); /* Step 61. */ 2020 restore_spu_lslr(next, spu); /* Step 61. */
2021 restore_spu_cfg(next, spu); /* Step 62. */ 2021 restore_spu_cfg(next, spu); /* Step 62. */
2022 restore_pm_trace(next, spu); /* Step 63. */ 2022 restore_pm_trace(next, spu); /* Step 63. */
2023 restore_spu_npc(next, spu); /* Step 64. */ 2023 restore_spu_npc(next, spu); /* Step 64. */
2024 restore_spu_mb(next, spu); /* Step 65. */ 2024 restore_spu_mb(next, spu); /* Step 65. */
2025 check_ppu_mb_stat(next, spu); /* Step 66. */ 2025 check_ppu_mb_stat(next, spu); /* Step 66. */
2026 check_ppuint_mb_stat(next, spu); /* Step 67. */ 2026 check_ppuint_mb_stat(next, spu); /* Step 67. */
2027 spu_invalidate_slbs(spu); /* Modified Step 68. */ 2027 spu_invalidate_slbs(spu); /* Modified Step 68. */
2028 restore_mfc_sr1(next, spu); /* Step 69. */ 2028 restore_mfc_sr1(next, spu); /* Step 69. */
2029 set_int_route(next, spu); /* NEW */ 2029 set_int_route(next, spu); /* NEW */
2030 restore_other_spu_access(next, spu); /* Step 70. */ 2030 restore_other_spu_access(next, spu); /* Step 70. */
2031 restore_spu_runcntl(next, spu); /* Step 71. */ 2031 restore_spu_runcntl(next, spu); /* Step 71. */
2032 restore_mfc_cntl(next, spu); /* Step 72. */ 2032 restore_mfc_cntl(next, spu); /* Step 72. */
2033 enable_user_access(next, spu); /* Step 73. */ 2033 enable_user_access(next, spu); /* Step 73. */
2034 reset_switch_active(next, spu); /* Step 74. */ 2034 reset_switch_active(next, spu); /* Step 74. */
2035 reenable_interrupts(next, spu); /* Step 75. */ 2035 reenable_interrupts(next, spu); /* Step 75. */
2036 } 2036 }
2037 2037
2038 static int __do_spu_save(struct spu_state *prev, struct spu *spu) 2038 static int __do_spu_save(struct spu_state *prev, struct spu *spu)
2039 { 2039 {
2040 int rc; 2040 int rc;
2041 2041
2042 /* 2042 /*
2043 * SPU context save can be broken into three phases: 2043 * SPU context save can be broken into three phases:
2044 * 2044 *
2045 * (a) quiesce [steps 2-16]. 2045 * (a) quiesce [steps 2-16].
2046 * (b) save of CSA, performed by PPE [steps 17-42] 2046 * (b) save of CSA, performed by PPE [steps 17-42]
2047 * (c) save of LSCSA, mostly performed by SPU [steps 43-52]. 2047 * (c) save of LSCSA, mostly performed by SPU [steps 43-52].
2048 * 2048 *
2049 * Returns 0 on success. 2049 * Returns 0 on success.
2050 * 2,6 if failed to quiece SPU 2050 * 2,6 if failed to quiece SPU
2051 * 53 if SPU-side of save failed. 2051 * 53 if SPU-side of save failed.
2052 */ 2052 */
2053 2053
2054 rc = quiece_spu(prev, spu); /* Steps 2-16. */ 2054 rc = quiece_spu(prev, spu); /* Steps 2-16. */
2055 switch (rc) { 2055 switch (rc) {
2056 default: 2056 default:
2057 case 2: 2057 case 2:
2058 case 6: 2058 case 6:
2059 harvest(prev, spu); 2059 harvest(prev, spu);
2060 return rc; 2060 return rc;
2061 break; 2061 break;
2062 case 0: 2062 case 0:
2063 break; 2063 break;
2064 } 2064 }
2065 save_csa(prev, spu); /* Steps 17-43. */ 2065 save_csa(prev, spu); /* Steps 17-43. */
2066 save_lscsa(prev, spu); /* Steps 44-53. */ 2066 save_lscsa(prev, spu); /* Steps 44-53. */
2067 return check_save_status(prev, spu); /* Step 54. */ 2067 return check_save_status(prev, spu); /* Step 54. */
2068 } 2068 }
2069 2069
2070 static int __do_spu_restore(struct spu_state *next, struct spu *spu) 2070 static int __do_spu_restore(struct spu_state *next, struct spu *spu)
2071 { 2071 {
2072 int rc; 2072 int rc;
2073 2073
2074 /* 2074 /*
2075 * SPU context restore can be broken into three phases: 2075 * SPU context restore can be broken into three phases:
2076 * 2076 *
2077 * (a) harvest (or reset) SPU [steps 2-24]. 2077 * (a) harvest (or reset) SPU [steps 2-24].
2078 * (b) restore LSCSA [steps 25-40], mostly performed by SPU. 2078 * (b) restore LSCSA [steps 25-40], mostly performed by SPU.
2079 * (c) restore CSA [steps 41-76], performed by PPE. 2079 * (c) restore CSA [steps 41-76], performed by PPE.
2080 * 2080 *
2081 * The 'harvest' step is not performed here, but rather 2081 * The 'harvest' step is not performed here, but rather
2082 * as needed below. 2082 * as needed below.
2083 */ 2083 */
2084 2084
2085 restore_lscsa(next, spu); /* Steps 24-39. */ 2085 restore_lscsa(next, spu); /* Steps 24-39. */
2086 rc = check_restore_status(next, spu); /* Step 40. */ 2086 rc = check_restore_status(next, spu); /* Step 40. */
2087 switch (rc) { 2087 switch (rc) {
2088 default: 2088 default:
2089 /* Failed. Return now. */ 2089 /* Failed. Return now. */
2090 return rc; 2090 return rc;
2091 break; 2091 break;
2092 case 0: 2092 case 0:
2093 /* Fall through to next step. */ 2093 /* Fall through to next step. */
2094 break; 2094 break;
2095 } 2095 }
2096 restore_csa(next, spu); 2096 restore_csa(next, spu);
2097 2097
2098 return 0; 2098 return 0;
2099 } 2099 }
2100 2100
2101 /** 2101 /**
2102 * spu_save - SPU context save, with locking. 2102 * spu_save - SPU context save, with locking.
2103 * @prev: pointer to SPU context save area, to be saved. 2103 * @prev: pointer to SPU context save area, to be saved.
2104 * @spu: pointer to SPU iomem structure. 2104 * @spu: pointer to SPU iomem structure.
2105 * 2105 *
2106 * Acquire locks, perform the save operation then return. 2106 * Acquire locks, perform the save operation then return.
2107 */ 2107 */
2108 int spu_save(struct spu_state *prev, struct spu *spu) 2108 int spu_save(struct spu_state *prev, struct spu *spu)
2109 { 2109 {
2110 int rc; 2110 int rc;
2111 2111
2112 acquire_spu_lock(spu); /* Step 1. */ 2112 acquire_spu_lock(spu); /* Step 1. */
2113 rc = __do_spu_save(prev, spu); /* Steps 2-53. */ 2113 rc = __do_spu_save(prev, spu); /* Steps 2-53. */
2114 release_spu_lock(spu); 2114 release_spu_lock(spu);
2115 if (rc != 0 && rc != 2 && rc != 6) { 2115 if (rc != 0 && rc != 2 && rc != 6) {
2116 panic("%s failed on SPU[%d], rc=%d.\n", 2116 panic("%s failed on SPU[%d], rc=%d.\n",
2117 __func__, spu->number, rc); 2117 __func__, spu->number, rc);
2118 } 2118 }
2119 return 0; 2119 return 0;
2120 } 2120 }
2121 EXPORT_SYMBOL_GPL(spu_save); 2121 EXPORT_SYMBOL_GPL(spu_save);
2122 2122
2123 /** 2123 /**
2124 * spu_restore - SPU context restore, with harvest and locking. 2124 * spu_restore - SPU context restore, with harvest and locking.
2125 * @new: pointer to SPU context save area, to be restored. 2125 * @new: pointer to SPU context save area, to be restored.
2126 * @spu: pointer to SPU iomem structure. 2126 * @spu: pointer to SPU iomem structure.
2127 * 2127 *
2128 * Perform harvest + restore, as we may not be coming 2128 * Perform harvest + restore, as we may not be coming
2129 * from a previous successful save operation, and the 2129 * from a previous successful save operation, and the
2130 * hardware state is unknown. 2130 * hardware state is unknown.
2131 */ 2131 */
2132 int spu_restore(struct spu_state *new, struct spu *spu) 2132 int spu_restore(struct spu_state *new, struct spu *spu)
2133 { 2133 {
2134 int rc; 2134 int rc;
2135 2135
2136 acquire_spu_lock(spu); 2136 acquire_spu_lock(spu);
2137 harvest(NULL, spu); 2137 harvest(NULL, spu);
2138 spu->slb_replace = 0; 2138 spu->slb_replace = 0;
2139 rc = __do_spu_restore(new, spu); 2139 rc = __do_spu_restore(new, spu);
2140 release_spu_lock(spu); 2140 release_spu_lock(spu);
2141 if (rc) { 2141 if (rc) {
2142 panic("%s failed on SPU[%d] rc=%d.\n", 2142 panic("%s failed on SPU[%d] rc=%d.\n",
2143 __func__, spu->number, rc); 2143 __func__, spu->number, rc);
2144 } 2144 }
2145 return rc; 2145 return rc;
2146 } 2146 }
2147 EXPORT_SYMBOL_GPL(spu_restore); 2147 EXPORT_SYMBOL_GPL(spu_restore);
2148 2148
2149 static void init_prob(struct spu_state *csa) 2149 static void init_prob(struct spu_state *csa)
2150 { 2150 {
2151 csa->spu_chnlcnt_RW[9] = 1; 2151 csa->spu_chnlcnt_RW[9] = 1;
2152 csa->spu_chnlcnt_RW[21] = 16; 2152 csa->spu_chnlcnt_RW[21] = 16;
2153 csa->spu_chnlcnt_RW[23] = 1; 2153 csa->spu_chnlcnt_RW[23] = 1;
2154 csa->spu_chnlcnt_RW[28] = 1; 2154 csa->spu_chnlcnt_RW[28] = 1;
2155 csa->spu_chnlcnt_RW[30] = 1; 2155 csa->spu_chnlcnt_RW[30] = 1;
2156 csa->prob.spu_runcntl_RW = SPU_RUNCNTL_STOP; 2156 csa->prob.spu_runcntl_RW = SPU_RUNCNTL_STOP;
2157 csa->prob.mb_stat_R = 0x000400; 2157 csa->prob.mb_stat_R = 0x000400;
2158 } 2158 }
2159 2159
2160 static void init_priv1(struct spu_state *csa) 2160 static void init_priv1(struct spu_state *csa)
2161 { 2161 {
2162 /* Enable decode, relocate, tlbie response, master runcntl. */ 2162 /* Enable decode, relocate, tlbie response, master runcntl. */
2163 csa->priv1.mfc_sr1_RW = MFC_STATE1_LOCAL_STORAGE_DECODE_MASK | 2163 csa->priv1.mfc_sr1_RW = MFC_STATE1_LOCAL_STORAGE_DECODE_MASK |
2164 MFC_STATE1_MASTER_RUN_CONTROL_MASK | 2164 MFC_STATE1_MASTER_RUN_CONTROL_MASK |
2165 MFC_STATE1_PROBLEM_STATE_MASK | 2165 MFC_STATE1_PROBLEM_STATE_MASK |
2166 MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK; 2166 MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK;
2167 2167
2168 /* Enable OS-specific set of interrupts. */ 2168 /* Enable OS-specific set of interrupts. */
2169 csa->priv1.int_mask_class0_RW = CLASS0_ENABLE_DMA_ALIGNMENT_INTR | 2169 csa->priv1.int_mask_class0_RW = CLASS0_ENABLE_DMA_ALIGNMENT_INTR |
2170 CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR | 2170 CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR |
2171 CLASS0_ENABLE_SPU_ERROR_INTR; 2171 CLASS0_ENABLE_SPU_ERROR_INTR;
2172 csa->priv1.int_mask_class1_RW = CLASS1_ENABLE_SEGMENT_FAULT_INTR | 2172 csa->priv1.int_mask_class1_RW = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
2173 CLASS1_ENABLE_STORAGE_FAULT_INTR; 2173 CLASS1_ENABLE_STORAGE_FAULT_INTR;
2174 csa->priv1.int_mask_class2_RW = CLASS2_ENABLE_SPU_STOP_INTR | 2174 csa->priv1.int_mask_class2_RW = CLASS2_ENABLE_SPU_STOP_INTR |
2175 CLASS2_ENABLE_SPU_HALT_INTR | 2175 CLASS2_ENABLE_SPU_HALT_INTR |
2176 CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR; 2176 CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR;
2177 } 2177 }
2178 2178
2179 static void init_priv2(struct spu_state *csa) 2179 static void init_priv2(struct spu_state *csa)
2180 { 2180 {
2181 csa->priv2.spu_lslr_RW = LS_ADDR_MASK; 2181 csa->priv2.spu_lslr_RW = LS_ADDR_MASK;
2182 csa->priv2.mfc_control_RW = MFC_CNTL_RESUME_DMA_QUEUE | 2182 csa->priv2.mfc_control_RW = MFC_CNTL_RESUME_DMA_QUEUE |
2183 MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION | 2183 MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION |
2184 MFC_CNTL_DMA_QUEUES_EMPTY_MASK; 2184 MFC_CNTL_DMA_QUEUES_EMPTY_MASK;
2185 } 2185 }
2186 2186
2187 /** 2187 /**
2188 * spu_alloc_csa - allocate and initialize an SPU context save area. 2188 * spu_alloc_csa - allocate and initialize an SPU context save area.
2189 * 2189 *
2190 * Allocate and initialize the contents of an SPU context save area. 2190 * Allocate and initialize the contents of an SPU context save area.
2191 * This includes enabling address translation, interrupt masks, etc., 2191 * This includes enabling address translation, interrupt masks, etc.,
2192 * as appropriate for the given OS environment. 2192 * as appropriate for the given OS environment.
2193 * 2193 *
2194 * Note that storage for the 'lscsa' is allocated separately, 2194 * Note that storage for the 'lscsa' is allocated separately,
2195 * as it is by far the largest of the context save regions, 2195 * as it is by far the largest of the context save regions,
2196 * and may need to be pinned or otherwise specially aligned. 2196 * and may need to be pinned or otherwise specially aligned.
2197 */ 2197 */
2198 int spu_init_csa(struct spu_state *csa) 2198 int spu_init_csa(struct spu_state *csa)
2199 { 2199 {
2200 int rc; 2200 int rc;
2201 2201
2202 if (!csa) 2202 if (!csa)
2203 return -EINVAL; 2203 return -EINVAL;
2204 memset(csa, 0, sizeof(struct spu_state)); 2204 memset(csa, 0, sizeof(struct spu_state));
2205 2205
2206 rc = spu_alloc_lscsa(csa); 2206 rc = spu_alloc_lscsa(csa);
2207 if (rc) 2207 if (rc)
2208 return rc; 2208 return rc;
2209 2209
2210 spin_lock_init(&csa->register_lock); 2210 spin_lock_init(&csa->register_lock);
2211 2211
2212 init_prob(csa); 2212 init_prob(csa);
2213 init_priv1(csa); 2213 init_priv1(csa);
2214 init_priv2(csa); 2214 init_priv2(csa);
2215 2215
2216 return 0; 2216 return 0;
2217 } 2217 }
2218 2218
2219 void spu_fini_csa(struct spu_state *csa) 2219 void spu_fini_csa(struct spu_state *csa)
2220 { 2220 {
2221 spu_free_lscsa(csa); 2221 spu_free_lscsa(csa);
2222 } 2222 }
2223 2223
arch/powerpc/platforms/cell/spufs/syscalls.c
1 #include <linux/file.h> 1 #include <linux/file.h>
2 #include <linux/fs.h> 2 #include <linux/fs.h>
3 #include <linux/module.h> 3 #include <linux/export.h>
4 #include <linux/mount.h> 4 #include <linux/mount.h>
5 #include <linux/namei.h> 5 #include <linux/namei.h>
6 #include <linux/slab.h> 6 #include <linux/slab.h>
7 7
8 #include <asm/uaccess.h> 8 #include <asm/uaccess.h>
9 9
10 #include "spufs.h" 10 #include "spufs.h"
11 11
12 /** 12 /**
13 * sys_spu_run - run code loaded into an SPU 13 * sys_spu_run - run code loaded into an SPU
14 * 14 *
15 * @unpc: next program counter for the SPU 15 * @unpc: next program counter for the SPU
16 * @ustatus: status of the SPU 16 * @ustatus: status of the SPU
17 * 17 *
18 * This system call transfers the control of execution of a 18 * This system call transfers the control of execution of a
19 * user space thread to an SPU. It will return when the 19 * user space thread to an SPU. It will return when the
20 * SPU has finished executing or when it hits an error 20 * SPU has finished executing or when it hits an error
21 * condition and it will be interrupted if a signal needs 21 * condition and it will be interrupted if a signal needs
22 * to be delivered to a handler in user space. 22 * to be delivered to a handler in user space.
23 * 23 *
24 * The next program counter is set to the passed value 24 * The next program counter is set to the passed value
25 * before the SPU starts fetching code and the user space 25 * before the SPU starts fetching code and the user space
26 * pointer gets updated with the new value when returning 26 * pointer gets updated with the new value when returning
27 * from kernel space. 27 * from kernel space.
28 * 28 *
29 * The status value returned from spu_run reflects the 29 * The status value returned from spu_run reflects the
30 * value of the spu_status register after the SPU has stopped. 30 * value of the spu_status register after the SPU has stopped.
31 * 31 *
32 */ 32 */
33 static long do_spu_run(struct file *filp, 33 static long do_spu_run(struct file *filp,
34 __u32 __user *unpc, 34 __u32 __user *unpc,
35 __u32 __user *ustatus) 35 __u32 __user *ustatus)
36 { 36 {
37 long ret; 37 long ret;
38 struct spufs_inode_info *i; 38 struct spufs_inode_info *i;
39 u32 npc, status; 39 u32 npc, status;
40 40
41 ret = -EFAULT; 41 ret = -EFAULT;
42 if (get_user(npc, unpc)) 42 if (get_user(npc, unpc))
43 goto out; 43 goto out;
44 44
45 /* check if this file was created by spu_create */ 45 /* check if this file was created by spu_create */
46 ret = -EINVAL; 46 ret = -EINVAL;
47 if (filp->f_op != &spufs_context_fops) 47 if (filp->f_op != &spufs_context_fops)
48 goto out; 48 goto out;
49 49
50 i = SPUFS_I(filp->f_path.dentry->d_inode); 50 i = SPUFS_I(filp->f_path.dentry->d_inode);
51 ret = spufs_run_spu(i->i_ctx, &npc, &status); 51 ret = spufs_run_spu(i->i_ctx, &npc, &status);
52 52
53 if (put_user(npc, unpc)) 53 if (put_user(npc, unpc))
54 ret = -EFAULT; 54 ret = -EFAULT;
55 55
56 if (ustatus && put_user(status, ustatus)) 56 if (ustatus && put_user(status, ustatus))
57 ret = -EFAULT; 57 ret = -EFAULT;
58 out: 58 out:
59 return ret; 59 return ret;
60 } 60 }
61 61
62 static long do_spu_create(const char __user *pathname, unsigned int flags, 62 static long do_spu_create(const char __user *pathname, unsigned int flags,
63 mode_t mode, struct file *neighbor) 63 mode_t mode, struct file *neighbor)
64 { 64 {
65 struct path path; 65 struct path path;
66 struct dentry *dentry; 66 struct dentry *dentry;
67 int ret; 67 int ret;
68 68
69 dentry = user_path_create(AT_FDCWD, pathname, &path, 1); 69 dentry = user_path_create(AT_FDCWD, pathname, &path, 1);
70 ret = PTR_ERR(dentry); 70 ret = PTR_ERR(dentry);
71 if (!IS_ERR(dentry)) { 71 if (!IS_ERR(dentry)) {
72 ret = spufs_create(&path, dentry, flags, mode, neighbor); 72 ret = spufs_create(&path, dentry, flags, mode, neighbor);
73 mutex_unlock(&path.dentry->d_inode->i_mutex); 73 mutex_unlock(&path.dentry->d_inode->i_mutex);
74 dput(dentry); 74 dput(dentry);
75 path_put(&path); 75 path_put(&path);
76 } 76 }
77 77
78 return ret; 78 return ret;
79 } 79 }
80 80
81 struct spufs_calls spufs_calls = { 81 struct spufs_calls spufs_calls = {
82 .create_thread = do_spu_create, 82 .create_thread = do_spu_create,
83 .spu_run = do_spu_run, 83 .spu_run = do_spu_run,
84 .coredump_extra_notes_size = spufs_coredump_extra_notes_size, 84 .coredump_extra_notes_size = spufs_coredump_extra_notes_size,
85 .coredump_extra_notes_write = spufs_coredump_extra_notes_write, 85 .coredump_extra_notes_write = spufs_coredump_extra_notes_write,
86 .notify_spus_active = do_notify_spus_active, 86 .notify_spus_active = do_notify_spus_active,
87 .owner = THIS_MODULE, 87 .owner = THIS_MODULE,
88 }; 88 };
89 89
arch/powerpc/platforms/iseries/hvlpconfig.c
1 /* 1 /*
2 * Copyright (C) 2001 Kyle A. Lucke, IBM Corporation 2 * Copyright (C) 2001 Kyle A. Lucke, IBM Corporation
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or 6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version. 7 * (at your option) any later version.
8 * 8 *
9 * This program is distributed in the hope that it will be useful, 9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License 14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */ 17 */
18 18
19 #include <linux/module.h> 19 #include <linux/export.h>
20 #include <asm/iseries/hv_lp_config.h> 20 #include <asm/iseries/hv_lp_config.h>
21 #include "it_lp_naca.h" 21 #include "it_lp_naca.h"
22 22
23 HvLpIndex HvLpConfig_getLpIndex_outline(void) 23 HvLpIndex HvLpConfig_getLpIndex_outline(void)
24 { 24 {
25 return HvLpConfig_getLpIndex(); 25 return HvLpConfig_getLpIndex();
26 } 26 }
27 EXPORT_SYMBOL(HvLpConfig_getLpIndex_outline); 27 EXPORT_SYMBOL(HvLpConfig_getLpIndex_outline);
28 28
29 HvLpIndex HvLpConfig_getLpIndex(void) 29 HvLpIndex HvLpConfig_getLpIndex(void)
30 { 30 {
31 return itLpNaca.xLpIndex; 31 return itLpNaca.xLpIndex;
32 } 32 }
33 EXPORT_SYMBOL(HvLpConfig_getLpIndex); 33 EXPORT_SYMBOL(HvLpConfig_getLpIndex);
34 34
35 HvLpIndex HvLpConfig_getPrimaryLpIndex(void) 35 HvLpIndex HvLpConfig_getPrimaryLpIndex(void)
36 { 36 {
37 return itLpNaca.xPrimaryLpIndex; 37 return itLpNaca.xPrimaryLpIndex;
38 } 38 }
39 EXPORT_SYMBOL_GPL(HvLpConfig_getPrimaryLpIndex); 39 EXPORT_SYMBOL_GPL(HvLpConfig_getPrimaryLpIndex);
40 40
arch/powerpc/platforms/iseries/iommu.c
1 /* 1 /*
2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation 2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
3 * 3 *
4 * Rewrite, cleanup: 4 * Rewrite, cleanup:
5 * 5 *
6 * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation 6 * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
7 * Copyright (C) 2006 Olof Johansson <olof@lixom.net> 7 * Copyright (C) 2006 Olof Johansson <olof@lixom.net>
8 * 8 *
9 * Dynamic DMA mapping support, iSeries-specific parts. 9 * Dynamic DMA mapping support, iSeries-specific parts.
10 * 10 *
11 * 11 *
12 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by 13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or 14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version. 15 * (at your option) any later version.
16 * 16 *
17 * This program is distributed in the hope that it will be useful, 17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details. 20 * GNU General Public License for more details.
21 * 21 *
22 * You should have received a copy of the GNU General Public License 22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software 23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */ 25 */
26 26
27 #include <linux/types.h> 27 #include <linux/types.h>
28 #include <linux/dma-mapping.h> 28 #include <linux/dma-mapping.h>
29 #include <linux/list.h> 29 #include <linux/list.h>
30 #include <linux/pci.h> 30 #include <linux/pci.h>
31 #include <linux/module.h> 31 #include <linux/export.h>
32 #include <linux/slab.h> 32 #include <linux/slab.h>
33 33
34 #include <asm/iommu.h> 34 #include <asm/iommu.h>
35 #include <asm/vio.h> 35 #include <asm/vio.h>
36 #include <asm/tce.h> 36 #include <asm/tce.h>
37 #include <asm/machdep.h> 37 #include <asm/machdep.h>
38 #include <asm/abs_addr.h> 38 #include <asm/abs_addr.h>
39 #include <asm/prom.h> 39 #include <asm/prom.h>
40 #include <asm/pci-bridge.h> 40 #include <asm/pci-bridge.h>
41 #include <asm/iseries/hv_call_xm.h> 41 #include <asm/iseries/hv_call_xm.h>
42 #include <asm/iseries/hv_call_event.h> 42 #include <asm/iseries/hv_call_event.h>
43 #include <asm/iseries/iommu.h> 43 #include <asm/iseries/iommu.h>
44 44
45 static int tce_build_iSeries(struct iommu_table *tbl, long index, long npages, 45 static int tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
46 unsigned long uaddr, enum dma_data_direction direction, 46 unsigned long uaddr, enum dma_data_direction direction,
47 struct dma_attrs *attrs) 47 struct dma_attrs *attrs)
48 { 48 {
49 u64 rc; 49 u64 rc;
50 u64 tce, rpn; 50 u64 tce, rpn;
51 51
52 while (npages--) { 52 while (npages--) {
53 rpn = virt_to_abs(uaddr) >> TCE_SHIFT; 53 rpn = virt_to_abs(uaddr) >> TCE_SHIFT;
54 tce = (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT; 54 tce = (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
55 55
56 if (tbl->it_type == TCE_VB) { 56 if (tbl->it_type == TCE_VB) {
57 /* Virtual Bus */ 57 /* Virtual Bus */
58 tce |= TCE_VALID|TCE_ALLIO; 58 tce |= TCE_VALID|TCE_ALLIO;
59 if (direction != DMA_TO_DEVICE) 59 if (direction != DMA_TO_DEVICE)
60 tce |= TCE_VB_WRITE; 60 tce |= TCE_VB_WRITE;
61 } else { 61 } else {
62 /* PCI Bus */ 62 /* PCI Bus */
63 tce |= TCE_PCI_READ; /* Read allowed */ 63 tce |= TCE_PCI_READ; /* Read allowed */
64 if (direction != DMA_TO_DEVICE) 64 if (direction != DMA_TO_DEVICE)
65 tce |= TCE_PCI_WRITE; 65 tce |= TCE_PCI_WRITE;
66 } 66 }
67 67
68 rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, tce); 68 rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, tce);
69 if (rc) 69 if (rc)
70 panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%llx\n", 70 panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%llx\n",
71 rc); 71 rc);
72 index++; 72 index++;
73 uaddr += TCE_PAGE_SIZE; 73 uaddr += TCE_PAGE_SIZE;
74 } 74 }
75 return 0; 75 return 0;
76 } 76 }
77 77
78 static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages) 78 static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages)
79 { 79 {
80 u64 rc; 80 u64 rc;
81 81
82 while (npages--) { 82 while (npages--) {
83 rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0); 83 rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0);
84 if (rc) 84 if (rc)
85 panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%llx\n", 85 panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%llx\n",
86 rc); 86 rc);
87 index++; 87 index++;
88 } 88 }
89 } 89 }
90 90
91 /* 91 /*
92 * Structure passed to HvCallXm_getTceTableParms 92 * Structure passed to HvCallXm_getTceTableParms
93 */ 93 */
94 struct iommu_table_cb { 94 struct iommu_table_cb {
95 unsigned long itc_busno; /* Bus number for this tce table */ 95 unsigned long itc_busno; /* Bus number for this tce table */
96 unsigned long itc_start; /* Will be NULL for secondary */ 96 unsigned long itc_start; /* Will be NULL for secondary */
97 unsigned long itc_totalsize; /* Size (in pages) of whole table */ 97 unsigned long itc_totalsize; /* Size (in pages) of whole table */
98 unsigned long itc_offset; /* Index into real tce table of the 98 unsigned long itc_offset; /* Index into real tce table of the
99 start of our section */ 99 start of our section */
100 unsigned long itc_size; /* Size (in pages) of our section */ 100 unsigned long itc_size; /* Size (in pages) of our section */
101 unsigned long itc_index; /* Index of this tce table */ 101 unsigned long itc_index; /* Index of this tce table */
102 unsigned short itc_maxtables; /* Max num of tables for partition */ 102 unsigned short itc_maxtables; /* Max num of tables for partition */
103 unsigned char itc_virtbus; /* Flag to indicate virtual bus */ 103 unsigned char itc_virtbus; /* Flag to indicate virtual bus */
104 unsigned char itc_slotno; /* IOA Tce Slot Index */ 104 unsigned char itc_slotno; /* IOA Tce Slot Index */
105 unsigned char itc_rsvd[4]; 105 unsigned char itc_rsvd[4];
106 }; 106 };
107 107
108 /* 108 /*
109 * Call Hv with the architected data structure to get TCE table info. 109 * Call Hv with the architected data structure to get TCE table info.
110 * info. Put the returned data into the Linux representation of the 110 * info. Put the returned data into the Linux representation of the
111 * TCE table data. 111 * TCE table data.
112 * The Hardware Tce table comes in three flavors. 112 * The Hardware Tce table comes in three flavors.
113 * 1. TCE table shared between Buses. 113 * 1. TCE table shared between Buses.
114 * 2. TCE table per Bus. 114 * 2. TCE table per Bus.
115 * 3. TCE Table per IOA. 115 * 3. TCE Table per IOA.
116 */ 116 */
117 void iommu_table_getparms_iSeries(unsigned long busno, 117 void iommu_table_getparms_iSeries(unsigned long busno,
118 unsigned char slotno, 118 unsigned char slotno,
119 unsigned char virtbus, 119 unsigned char virtbus,
120 struct iommu_table* tbl) 120 struct iommu_table* tbl)
121 { 121 {
122 struct iommu_table_cb *parms; 122 struct iommu_table_cb *parms;
123 123
124 parms = kzalloc(sizeof(*parms), GFP_KERNEL); 124 parms = kzalloc(sizeof(*parms), GFP_KERNEL);
125 if (parms == NULL) 125 if (parms == NULL)
126 panic("PCI_DMA: TCE Table Allocation failed."); 126 panic("PCI_DMA: TCE Table Allocation failed.");
127 127
128 parms->itc_busno = busno; 128 parms->itc_busno = busno;
129 parms->itc_slotno = slotno; 129 parms->itc_slotno = slotno;
130 parms->itc_virtbus = virtbus; 130 parms->itc_virtbus = virtbus;
131 131
132 HvCallXm_getTceTableParms(iseries_hv_addr(parms)); 132 HvCallXm_getTceTableParms(iseries_hv_addr(parms));
133 133
134 if (parms->itc_size == 0) 134 if (parms->itc_size == 0)
135 panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms); 135 panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms);
136 136
137 /* itc_size is in pages worth of table, it_size is in # of entries */ 137 /* itc_size is in pages worth of table, it_size is in # of entries */
138 tbl->it_size = (parms->itc_size * TCE_PAGE_SIZE) / TCE_ENTRY_SIZE; 138 tbl->it_size = (parms->itc_size * TCE_PAGE_SIZE) / TCE_ENTRY_SIZE;
139 tbl->it_busno = parms->itc_busno; 139 tbl->it_busno = parms->itc_busno;
140 tbl->it_offset = parms->itc_offset; 140 tbl->it_offset = parms->itc_offset;
141 tbl->it_index = parms->itc_index; 141 tbl->it_index = parms->itc_index;
142 tbl->it_blocksize = 1; 142 tbl->it_blocksize = 1;
143 tbl->it_type = virtbus ? TCE_VB : TCE_PCI; 143 tbl->it_type = virtbus ? TCE_VB : TCE_PCI;
144 144
145 kfree(parms); 145 kfree(parms);
146 } 146 }
147 147
148 148
149 #ifdef CONFIG_PCI 149 #ifdef CONFIG_PCI
150 /* 150 /*
151 * This function compares the known tables to find an iommu_table 151 * This function compares the known tables to find an iommu_table
152 * that has already been built for hardware TCEs. 152 * that has already been built for hardware TCEs.
153 */ 153 */
154 static struct iommu_table *iommu_table_find(struct iommu_table * tbl) 154 static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
155 { 155 {
156 struct device_node *node; 156 struct device_node *node;
157 157
158 for (node = NULL; (node = of_find_all_nodes(node)); ) { 158 for (node = NULL; (node = of_find_all_nodes(node)); ) {
159 struct pci_dn *pdn = PCI_DN(node); 159 struct pci_dn *pdn = PCI_DN(node);
160 struct iommu_table *it; 160 struct iommu_table *it;
161 161
162 if (pdn == NULL) 162 if (pdn == NULL)
163 continue; 163 continue;
164 it = pdn->iommu_table; 164 it = pdn->iommu_table;
165 if ((it != NULL) && 165 if ((it != NULL) &&
166 (it->it_type == TCE_PCI) && 166 (it->it_type == TCE_PCI) &&
167 (it->it_offset == tbl->it_offset) && 167 (it->it_offset == tbl->it_offset) &&
168 (it->it_index == tbl->it_index) && 168 (it->it_index == tbl->it_index) &&
169 (it->it_size == tbl->it_size)) { 169 (it->it_size == tbl->it_size)) {
170 of_node_put(node); 170 of_node_put(node);
171 return it; 171 return it;
172 } 172 }
173 } 173 }
174 return NULL; 174 return NULL;
175 } 175 }
176 176
177 177
178 static void pci_dma_dev_setup_iseries(struct pci_dev *pdev) 178 static void pci_dma_dev_setup_iseries(struct pci_dev *pdev)
179 { 179 {
180 struct iommu_table *tbl; 180 struct iommu_table *tbl;
181 struct device_node *dn = pci_device_to_OF_node(pdev); 181 struct device_node *dn = pci_device_to_OF_node(pdev);
182 struct pci_dn *pdn = PCI_DN(dn); 182 struct pci_dn *pdn = PCI_DN(dn);
183 const u32 *lsn = of_get_property(dn, "linux,logical-slot-number", NULL); 183 const u32 *lsn = of_get_property(dn, "linux,logical-slot-number", NULL);
184 184
185 BUG_ON(lsn == NULL); 185 BUG_ON(lsn == NULL);
186 186
187 tbl = kzalloc(sizeof(struct iommu_table), GFP_KERNEL); 187 tbl = kzalloc(sizeof(struct iommu_table), GFP_KERNEL);
188 188
189 iommu_table_getparms_iSeries(pdn->busno, *lsn, 0, tbl); 189 iommu_table_getparms_iSeries(pdn->busno, *lsn, 0, tbl);
190 190
191 /* Look for existing tce table */ 191 /* Look for existing tce table */
192 pdn->iommu_table = iommu_table_find(tbl); 192 pdn->iommu_table = iommu_table_find(tbl);
193 if (pdn->iommu_table == NULL) 193 if (pdn->iommu_table == NULL)
194 pdn->iommu_table = iommu_init_table(tbl, -1); 194 pdn->iommu_table = iommu_init_table(tbl, -1);
195 else 195 else
196 kfree(tbl); 196 kfree(tbl);
197 set_iommu_table_base(&pdev->dev, pdn->iommu_table); 197 set_iommu_table_base(&pdev->dev, pdn->iommu_table);
198 } 198 }
199 #else 199 #else
200 #define pci_dma_dev_setup_iseries NULL 200 #define pci_dma_dev_setup_iseries NULL
201 #endif 201 #endif
202 202
203 static struct iommu_table veth_iommu_table; 203 static struct iommu_table veth_iommu_table;
204 static struct iommu_table vio_iommu_table; 204 static struct iommu_table vio_iommu_table;
205 205
206 void *iseries_hv_alloc(size_t size, dma_addr_t *dma_handle, gfp_t flag) 206 void *iseries_hv_alloc(size_t size, dma_addr_t *dma_handle, gfp_t flag)
207 { 207 {
208 return iommu_alloc_coherent(NULL, &vio_iommu_table, size, dma_handle, 208 return iommu_alloc_coherent(NULL, &vio_iommu_table, size, dma_handle,
209 DMA_BIT_MASK(32), flag, -1); 209 DMA_BIT_MASK(32), flag, -1);
210 } 210 }
211 EXPORT_SYMBOL_GPL(iseries_hv_alloc); 211 EXPORT_SYMBOL_GPL(iseries_hv_alloc);
212 212
213 void iseries_hv_free(size_t size, void *vaddr, dma_addr_t dma_handle) 213 void iseries_hv_free(size_t size, void *vaddr, dma_addr_t dma_handle)
214 { 214 {
215 iommu_free_coherent(&vio_iommu_table, size, vaddr, dma_handle); 215 iommu_free_coherent(&vio_iommu_table, size, vaddr, dma_handle);
216 } 216 }
217 EXPORT_SYMBOL_GPL(iseries_hv_free); 217 EXPORT_SYMBOL_GPL(iseries_hv_free);
218 218
219 dma_addr_t iseries_hv_map(void *vaddr, size_t size, 219 dma_addr_t iseries_hv_map(void *vaddr, size_t size,
220 enum dma_data_direction direction) 220 enum dma_data_direction direction)
221 { 221 {
222 return iommu_map_page(NULL, &vio_iommu_table, virt_to_page(vaddr), 222 return iommu_map_page(NULL, &vio_iommu_table, virt_to_page(vaddr),
223 (unsigned long)vaddr % PAGE_SIZE, size, 223 (unsigned long)vaddr % PAGE_SIZE, size,
224 DMA_BIT_MASK(32), direction, NULL); 224 DMA_BIT_MASK(32), direction, NULL);
225 } 225 }
226 226
227 void iseries_hv_unmap(dma_addr_t dma_handle, size_t size, 227 void iseries_hv_unmap(dma_addr_t dma_handle, size_t size,
228 enum dma_data_direction direction) 228 enum dma_data_direction direction)
229 { 229 {
230 iommu_unmap_page(&vio_iommu_table, dma_handle, size, direction, NULL); 230 iommu_unmap_page(&vio_iommu_table, dma_handle, size, direction, NULL);
231 } 231 }
232 232
233 void __init iommu_vio_init(void) 233 void __init iommu_vio_init(void)
234 { 234 {
235 iommu_table_getparms_iSeries(255, 0, 0xff, &veth_iommu_table); 235 iommu_table_getparms_iSeries(255, 0, 0xff, &veth_iommu_table);
236 veth_iommu_table.it_size /= 2; 236 veth_iommu_table.it_size /= 2;
237 vio_iommu_table = veth_iommu_table; 237 vio_iommu_table = veth_iommu_table;
238 vio_iommu_table.it_offset += veth_iommu_table.it_size; 238 vio_iommu_table.it_offset += veth_iommu_table.it_size;
239 239
240 if (!iommu_init_table(&veth_iommu_table, -1)) 240 if (!iommu_init_table(&veth_iommu_table, -1))
241 printk("Virtual Bus VETH TCE table failed.\n"); 241 printk("Virtual Bus VETH TCE table failed.\n");
242 if (!iommu_init_table(&vio_iommu_table, -1)) 242 if (!iommu_init_table(&vio_iommu_table, -1))
243 printk("Virtual Bus VIO TCE table failed.\n"); 243 printk("Virtual Bus VIO TCE table failed.\n");
244 } 244 }
245 245
246 struct iommu_table *vio_build_iommu_table_iseries(struct vio_dev *dev) 246 struct iommu_table *vio_build_iommu_table_iseries(struct vio_dev *dev)
247 { 247 {
248 if (strcmp(dev->type, "network") == 0) 248 if (strcmp(dev->type, "network") == 0)
249 return &veth_iommu_table; 249 return &veth_iommu_table;
250 return &vio_iommu_table; 250 return &vio_iommu_table;
251 } 251 }
252 252
253 void iommu_init_early_iSeries(void) 253 void iommu_init_early_iSeries(void)
254 { 254 {
255 ppc_md.tce_build = tce_build_iSeries; 255 ppc_md.tce_build = tce_build_iSeries;
256 ppc_md.tce_free = tce_free_iSeries; 256 ppc_md.tce_free = tce_free_iSeries;
257 257
258 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_iseries; 258 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_iseries;
259 set_pci_dma_ops(&dma_iommu_ops); 259 set_pci_dma_ops(&dma_iommu_ops);
260 } 260 }
261 261
arch/powerpc/platforms/iseries/ksyms.c
1 /* 1 /*
2 * (C) 2001-2005 PPC 64 Team, IBM Corp 2 * (C) 2001-2005 PPC 64 Team, IBM Corp
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License 5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version. 7 * 2 of the License, or (at your option) any later version.
8 */ 8 */
9 #include <linux/module.h> 9 #include <linux/export.h>
10 10
11 #include <asm/hw_irq.h> 11 #include <asm/hw_irq.h>
12 #include <asm/iseries/hv_call_sc.h> 12 #include <asm/iseries/hv_call_sc.h>
13 13
14 EXPORT_SYMBOL(HvCall0); 14 EXPORT_SYMBOL(HvCall0);
15 EXPORT_SYMBOL(HvCall1); 15 EXPORT_SYMBOL(HvCall1);
16 EXPORT_SYMBOL(HvCall2); 16 EXPORT_SYMBOL(HvCall2);
17 EXPORT_SYMBOL(HvCall3); 17 EXPORT_SYMBOL(HvCall3);
18 EXPORT_SYMBOL(HvCall4); 18 EXPORT_SYMBOL(HvCall4);
19 EXPORT_SYMBOL(HvCall5); 19 EXPORT_SYMBOL(HvCall5);
20 EXPORT_SYMBOL(HvCall6); 20 EXPORT_SYMBOL(HvCall6);
21 EXPORT_SYMBOL(HvCall7); 21 EXPORT_SYMBOL(HvCall7);
22 22
arch/powerpc/platforms/iseries/lpevents.c
1 /* 1 /*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or 6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version. 7 * (at your option) any later version.
8 */ 8 */
9 9
10 #include <linux/stddef.h> 10 #include <linux/stddef.h>
11 #include <linux/kernel.h> 11 #include <linux/kernel.h>
12 #include <linux/sched.h> 12 #include <linux/sched.h>
13 #include <linux/bootmem.h> 13 #include <linux/bootmem.h>
14 #include <linux/seq_file.h> 14 #include <linux/seq_file.h>
15 #include <linux/proc_fs.h> 15 #include <linux/proc_fs.h>
16 #include <linux/module.h> 16 #include <linux/export.h>
17 17
18 #include <asm/system.h> 18 #include <asm/system.h>
19 #include <asm/paca.h> 19 #include <asm/paca.h>
20 #include <asm/firmware.h> 20 #include <asm/firmware.h>
21 #include <asm/iseries/it_lp_queue.h> 21 #include <asm/iseries/it_lp_queue.h>
22 #include <asm/iseries/hv_lp_event.h> 22 #include <asm/iseries/hv_lp_event.h>
23 #include <asm/iseries/hv_call_event.h> 23 #include <asm/iseries/hv_call_event.h>
24 #include "it_lp_naca.h" 24 #include "it_lp_naca.h"
25 25
26 /* 26 /*
27 * The LpQueue is used to pass event data from the hypervisor to 27 * The LpQueue is used to pass event data from the hypervisor to
28 * the partition. This is where I/O interrupt events are communicated. 28 * the partition. This is where I/O interrupt events are communicated.
29 * 29 *
30 * It is written to by the hypervisor so cannot end up in the BSS. 30 * It is written to by the hypervisor so cannot end up in the BSS.
31 */ 31 */
32 struct hvlpevent_queue hvlpevent_queue __attribute__((__section__(".data"))); 32 struct hvlpevent_queue hvlpevent_queue __attribute__((__section__(".data")));
33 33
34 DEFINE_PER_CPU(unsigned long[HvLpEvent_Type_NumTypes], hvlpevent_counts); 34 DEFINE_PER_CPU(unsigned long[HvLpEvent_Type_NumTypes], hvlpevent_counts);
35 35
36 static char *event_types[HvLpEvent_Type_NumTypes] = { 36 static char *event_types[HvLpEvent_Type_NumTypes] = {
37 "Hypervisor", 37 "Hypervisor",
38 "Machine Facilities", 38 "Machine Facilities",
39 "Session Manager", 39 "Session Manager",
40 "SPD I/O", 40 "SPD I/O",
41 "Virtual Bus", 41 "Virtual Bus",
42 "PCI I/O", 42 "PCI I/O",
43 "RIO I/O", 43 "RIO I/O",
44 "Virtual Lan", 44 "Virtual Lan",
45 "Virtual I/O" 45 "Virtual I/O"
46 }; 46 };
47 47
48 /* Array of LpEvent handler functions */ 48 /* Array of LpEvent handler functions */
49 static LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes]; 49 static LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
50 static unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes]; 50 static unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes];
51 51
52 static struct HvLpEvent * get_next_hvlpevent(void) 52 static struct HvLpEvent * get_next_hvlpevent(void)
53 { 53 {
54 struct HvLpEvent * event; 54 struct HvLpEvent * event;
55 event = (struct HvLpEvent *)hvlpevent_queue.hq_current_event; 55 event = (struct HvLpEvent *)hvlpevent_queue.hq_current_event;
56 56
57 if (hvlpevent_is_valid(event)) { 57 if (hvlpevent_is_valid(event)) {
58 /* rmb() needed only for weakly consistent machines (regatta) */ 58 /* rmb() needed only for weakly consistent machines (regatta) */
59 rmb(); 59 rmb();
60 /* Set pointer to next potential event */ 60 /* Set pointer to next potential event */
61 hvlpevent_queue.hq_current_event += ((event->xSizeMinus1 + 61 hvlpevent_queue.hq_current_event += ((event->xSizeMinus1 +
62 IT_LP_EVENT_ALIGN) / IT_LP_EVENT_ALIGN) * 62 IT_LP_EVENT_ALIGN) / IT_LP_EVENT_ALIGN) *
63 IT_LP_EVENT_ALIGN; 63 IT_LP_EVENT_ALIGN;
64 64
65 /* Wrap to beginning if no room at end */ 65 /* Wrap to beginning if no room at end */
66 if (hvlpevent_queue.hq_current_event > 66 if (hvlpevent_queue.hq_current_event >
67 hvlpevent_queue.hq_last_event) { 67 hvlpevent_queue.hq_last_event) {
68 hvlpevent_queue.hq_current_event = 68 hvlpevent_queue.hq_current_event =
69 hvlpevent_queue.hq_event_stack; 69 hvlpevent_queue.hq_event_stack;
70 } 70 }
71 } else { 71 } else {
72 event = NULL; 72 event = NULL;
73 } 73 }
74 74
75 return event; 75 return event;
76 } 76 }
77 77
78 static unsigned long spread_lpevents = NR_CPUS; 78 static unsigned long spread_lpevents = NR_CPUS;
79 79
80 int hvlpevent_is_pending(void) 80 int hvlpevent_is_pending(void)
81 { 81 {
82 struct HvLpEvent *next_event; 82 struct HvLpEvent *next_event;
83 83
84 if (smp_processor_id() >= spread_lpevents) 84 if (smp_processor_id() >= spread_lpevents)
85 return 0; 85 return 0;
86 86
87 next_event = (struct HvLpEvent *)hvlpevent_queue.hq_current_event; 87 next_event = (struct HvLpEvent *)hvlpevent_queue.hq_current_event;
88 88
89 return hvlpevent_is_valid(next_event) || 89 return hvlpevent_is_valid(next_event) ||
90 hvlpevent_queue.hq_overflow_pending; 90 hvlpevent_queue.hq_overflow_pending;
91 } 91 }
92 92
93 static void hvlpevent_clear_valid(struct HvLpEvent * event) 93 static void hvlpevent_clear_valid(struct HvLpEvent * event)
94 { 94 {
95 /* Tell the Hypervisor that we're done with this event. 95 /* Tell the Hypervisor that we're done with this event.
96 * Also clear bits within this event that might look like valid bits. 96 * Also clear bits within this event that might look like valid bits.
97 * ie. on 64-byte boundaries. 97 * ie. on 64-byte boundaries.
98 */ 98 */
99 struct HvLpEvent *tmp; 99 struct HvLpEvent *tmp;
100 unsigned extra = ((event->xSizeMinus1 + IT_LP_EVENT_ALIGN) / 100 unsigned extra = ((event->xSizeMinus1 + IT_LP_EVENT_ALIGN) /
101 IT_LP_EVENT_ALIGN) - 1; 101 IT_LP_EVENT_ALIGN) - 1;
102 102
103 switch (extra) { 103 switch (extra) {
104 case 3: 104 case 3:
105 tmp = (struct HvLpEvent*)((char*)event + 3 * IT_LP_EVENT_ALIGN); 105 tmp = (struct HvLpEvent*)((char*)event + 3 * IT_LP_EVENT_ALIGN);
106 hvlpevent_invalidate(tmp); 106 hvlpevent_invalidate(tmp);
107 case 2: 107 case 2:
108 tmp = (struct HvLpEvent*)((char*)event + 2 * IT_LP_EVENT_ALIGN); 108 tmp = (struct HvLpEvent*)((char*)event + 2 * IT_LP_EVENT_ALIGN);
109 hvlpevent_invalidate(tmp); 109 hvlpevent_invalidate(tmp);
110 case 1: 110 case 1:
111 tmp = (struct HvLpEvent*)((char*)event + 1 * IT_LP_EVENT_ALIGN); 111 tmp = (struct HvLpEvent*)((char*)event + 1 * IT_LP_EVENT_ALIGN);
112 hvlpevent_invalidate(tmp); 112 hvlpevent_invalidate(tmp);
113 } 113 }
114 114
115 mb(); 115 mb();
116 116
117 hvlpevent_invalidate(event); 117 hvlpevent_invalidate(event);
118 } 118 }
119 119
120 void process_hvlpevents(void) 120 void process_hvlpevents(void)
121 { 121 {
122 struct HvLpEvent * event; 122 struct HvLpEvent * event;
123 123
124 restart: 124 restart:
125 /* If we have recursed, just return */ 125 /* If we have recursed, just return */
126 if (!spin_trylock(&hvlpevent_queue.hq_lock)) 126 if (!spin_trylock(&hvlpevent_queue.hq_lock))
127 return; 127 return;
128 128
129 for (;;) { 129 for (;;) {
130 event = get_next_hvlpevent(); 130 event = get_next_hvlpevent();
131 if (event) { 131 if (event) {
132 /* Call appropriate handler here, passing 132 /* Call appropriate handler here, passing
133 * a pointer to the LpEvent. The handler 133 * a pointer to the LpEvent. The handler
134 * must make a copy of the LpEvent if it 134 * must make a copy of the LpEvent if it
135 * needs it in a bottom half. (perhaps for 135 * needs it in a bottom half. (perhaps for
136 * an ACK) 136 * an ACK)
137 * 137 *
138 * Handlers are responsible for ACK processing 138 * Handlers are responsible for ACK processing
139 * 139 *
140 * The Hypervisor guarantees that LpEvents will 140 * The Hypervisor guarantees that LpEvents will
141 * only be delivered with types that we have 141 * only be delivered with types that we have
142 * registered for, so no type check is necessary 142 * registered for, so no type check is necessary
143 * here! 143 * here!
144 */ 144 */
145 if (event->xType < HvLpEvent_Type_NumTypes) 145 if (event->xType < HvLpEvent_Type_NumTypes)
146 __get_cpu_var(hvlpevent_counts)[event->xType]++; 146 __get_cpu_var(hvlpevent_counts)[event->xType]++;
147 if (event->xType < HvLpEvent_Type_NumTypes && 147 if (event->xType < HvLpEvent_Type_NumTypes &&
148 lpEventHandler[event->xType]) 148 lpEventHandler[event->xType])
149 lpEventHandler[event->xType](event); 149 lpEventHandler[event->xType](event);
150 else { 150 else {
151 u8 type = event->xType; 151 u8 type = event->xType;
152 152
153 /* 153 /*
154 * Don't printk in the spinlock as printk 154 * Don't printk in the spinlock as printk
155 * may require ack events form the HV to send 155 * may require ack events form the HV to send
156 * any characters there. 156 * any characters there.
157 */ 157 */
158 hvlpevent_clear_valid(event); 158 hvlpevent_clear_valid(event);
159 spin_unlock(&hvlpevent_queue.hq_lock); 159 spin_unlock(&hvlpevent_queue.hq_lock);
160 printk(KERN_INFO 160 printk(KERN_INFO
161 "Unexpected Lp Event type=%d\n", type); 161 "Unexpected Lp Event type=%d\n", type);
162 goto restart; 162 goto restart;
163 } 163 }
164 164
165 hvlpevent_clear_valid(event); 165 hvlpevent_clear_valid(event);
166 } else if (hvlpevent_queue.hq_overflow_pending) 166 } else if (hvlpevent_queue.hq_overflow_pending)
167 /* 167 /*
168 * No more valid events. If overflow events are 168 * No more valid events. If overflow events are
169 * pending process them 169 * pending process them
170 */ 170 */
171 HvCallEvent_getOverflowLpEvents(hvlpevent_queue.hq_index); 171 HvCallEvent_getOverflowLpEvents(hvlpevent_queue.hq_index);
172 else 172 else
173 break; 173 break;
174 } 174 }
175 175
176 spin_unlock(&hvlpevent_queue.hq_lock); 176 spin_unlock(&hvlpevent_queue.hq_lock);
177 } 177 }
178 178
179 static int set_spread_lpevents(char *str) 179 static int set_spread_lpevents(char *str)
180 { 180 {
181 unsigned long val = simple_strtoul(str, NULL, 0); 181 unsigned long val = simple_strtoul(str, NULL, 0);
182 182
183 /* 183 /*
184 * The parameter is the number of processors to share in processing 184 * The parameter is the number of processors to share in processing
185 * lp events. 185 * lp events.
186 */ 186 */
187 if (( val > 0) && (val <= NR_CPUS)) { 187 if (( val > 0) && (val <= NR_CPUS)) {
188 spread_lpevents = val; 188 spread_lpevents = val;
189 printk("lpevent processing spread over %ld processors\n", val); 189 printk("lpevent processing spread over %ld processors\n", val);
190 } else { 190 } else {
191 printk("invalid spread_lpevents %ld\n", val); 191 printk("invalid spread_lpevents %ld\n", val);
192 } 192 }
193 193
194 return 1; 194 return 1;
195 } 195 }
196 __setup("spread_lpevents=", set_spread_lpevents); 196 __setup("spread_lpevents=", set_spread_lpevents);
197 197
198 void __init setup_hvlpevent_queue(void) 198 void __init setup_hvlpevent_queue(void)
199 { 199 {
200 void *eventStack; 200 void *eventStack;
201 201
202 spin_lock_init(&hvlpevent_queue.hq_lock); 202 spin_lock_init(&hvlpevent_queue.hq_lock);
203 203
204 /* Allocate a page for the Event Stack. */ 204 /* Allocate a page for the Event Stack. */
205 eventStack = alloc_bootmem_pages(IT_LP_EVENT_STACK_SIZE); 205 eventStack = alloc_bootmem_pages(IT_LP_EVENT_STACK_SIZE);
206 memset(eventStack, 0, IT_LP_EVENT_STACK_SIZE); 206 memset(eventStack, 0, IT_LP_EVENT_STACK_SIZE);
207 207
208 /* Invoke the hypervisor to initialize the event stack */ 208 /* Invoke the hypervisor to initialize the event stack */
209 HvCallEvent_setLpEventStack(0, eventStack, IT_LP_EVENT_STACK_SIZE); 209 HvCallEvent_setLpEventStack(0, eventStack, IT_LP_EVENT_STACK_SIZE);
210 210
211 hvlpevent_queue.hq_event_stack = eventStack; 211 hvlpevent_queue.hq_event_stack = eventStack;
212 hvlpevent_queue.hq_current_event = eventStack; 212 hvlpevent_queue.hq_current_event = eventStack;
213 hvlpevent_queue.hq_last_event = (char *)eventStack + 213 hvlpevent_queue.hq_last_event = (char *)eventStack +
214 (IT_LP_EVENT_STACK_SIZE - IT_LP_EVENT_MAX_SIZE); 214 (IT_LP_EVENT_STACK_SIZE - IT_LP_EVENT_MAX_SIZE);
215 hvlpevent_queue.hq_index = 0; 215 hvlpevent_queue.hq_index = 0;
216 } 216 }
217 217
218 /* Register a handler for an LpEvent type */ 218 /* Register a handler for an LpEvent type */
219 int HvLpEvent_registerHandler(HvLpEvent_Type eventType, LpEventHandler handler) 219 int HvLpEvent_registerHandler(HvLpEvent_Type eventType, LpEventHandler handler)
220 { 220 {
221 if (eventType < HvLpEvent_Type_NumTypes) { 221 if (eventType < HvLpEvent_Type_NumTypes) {
222 lpEventHandler[eventType] = handler; 222 lpEventHandler[eventType] = handler;
223 return 0; 223 return 0;
224 } 224 }
225 return 1; 225 return 1;
226 } 226 }
227 EXPORT_SYMBOL(HvLpEvent_registerHandler); 227 EXPORT_SYMBOL(HvLpEvent_registerHandler);
228 228
229 int HvLpEvent_unregisterHandler(HvLpEvent_Type eventType) 229 int HvLpEvent_unregisterHandler(HvLpEvent_Type eventType)
230 { 230 {
231 might_sleep(); 231 might_sleep();
232 232
233 if (eventType < HvLpEvent_Type_NumTypes) { 233 if (eventType < HvLpEvent_Type_NumTypes) {
234 if (!lpEventHandlerPaths[eventType]) { 234 if (!lpEventHandlerPaths[eventType]) {
235 lpEventHandler[eventType] = NULL; 235 lpEventHandler[eventType] = NULL;
236 /* 236 /*
237 * We now sleep until all other CPUs have scheduled. 237 * We now sleep until all other CPUs have scheduled.
238 * This ensures that the deletion is seen by all 238 * This ensures that the deletion is seen by all
239 * other CPUs, and that the deleted handler isn't 239 * other CPUs, and that the deleted handler isn't
240 * still running on another CPU when we return. 240 * still running on another CPU when we return.
241 */ 241 */
242 synchronize_sched(); 242 synchronize_sched();
243 return 0; 243 return 0;
244 } 244 }
245 } 245 }
246 return 1; 246 return 1;
247 } 247 }
248 EXPORT_SYMBOL(HvLpEvent_unregisterHandler); 248 EXPORT_SYMBOL(HvLpEvent_unregisterHandler);
249 249
250 /* 250 /*
251 * lpIndex is the partition index of the target partition. 251 * lpIndex is the partition index of the target partition.
252 * needed only for VirtualIo, VirtualLan and SessionMgr. Zero 252 * needed only for VirtualIo, VirtualLan and SessionMgr. Zero
253 * indicates to use our partition index - for the other types. 253 * indicates to use our partition index - for the other types.
254 */ 254 */
255 int HvLpEvent_openPath(HvLpEvent_Type eventType, HvLpIndex lpIndex) 255 int HvLpEvent_openPath(HvLpEvent_Type eventType, HvLpIndex lpIndex)
256 { 256 {
257 if ((eventType < HvLpEvent_Type_NumTypes) && 257 if ((eventType < HvLpEvent_Type_NumTypes) &&
258 lpEventHandler[eventType]) { 258 lpEventHandler[eventType]) {
259 if (lpIndex == 0) 259 if (lpIndex == 0)
260 lpIndex = itLpNaca.xLpIndex; 260 lpIndex = itLpNaca.xLpIndex;
261 HvCallEvent_openLpEventPath(lpIndex, eventType); 261 HvCallEvent_openLpEventPath(lpIndex, eventType);
262 ++lpEventHandlerPaths[eventType]; 262 ++lpEventHandlerPaths[eventType];
263 return 0; 263 return 0;
264 } 264 }
265 return 1; 265 return 1;
266 } 266 }
267 267
268 int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex) 268 int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex)
269 { 269 {
270 if ((eventType < HvLpEvent_Type_NumTypes) && 270 if ((eventType < HvLpEvent_Type_NumTypes) &&
271 lpEventHandler[eventType] && 271 lpEventHandler[eventType] &&
272 lpEventHandlerPaths[eventType]) { 272 lpEventHandlerPaths[eventType]) {
273 if (lpIndex == 0) 273 if (lpIndex == 0)
274 lpIndex = itLpNaca.xLpIndex; 274 lpIndex = itLpNaca.xLpIndex;
275 HvCallEvent_closeLpEventPath(lpIndex, eventType); 275 HvCallEvent_closeLpEventPath(lpIndex, eventType);
276 --lpEventHandlerPaths[eventType]; 276 --lpEventHandlerPaths[eventType];
277 return 0; 277 return 0;
278 } 278 }
279 return 1; 279 return 1;
280 } 280 }
281 281
282 static int proc_lpevents_show(struct seq_file *m, void *v) 282 static int proc_lpevents_show(struct seq_file *m, void *v)
283 { 283 {
284 int cpu, i; 284 int cpu, i;
285 unsigned long sum; 285 unsigned long sum;
286 static unsigned long cpu_totals[NR_CPUS]; 286 static unsigned long cpu_totals[NR_CPUS];
287 287
288 /* FIXME: do we care that there's no locking here? */ 288 /* FIXME: do we care that there's no locking here? */
289 sum = 0; 289 sum = 0;
290 for_each_online_cpu(cpu) { 290 for_each_online_cpu(cpu) {
291 cpu_totals[cpu] = 0; 291 cpu_totals[cpu] = 0;
292 for (i = 0; i < HvLpEvent_Type_NumTypes; i++) { 292 for (i = 0; i < HvLpEvent_Type_NumTypes; i++) {
293 cpu_totals[cpu] += per_cpu(hvlpevent_counts, cpu)[i]; 293 cpu_totals[cpu] += per_cpu(hvlpevent_counts, cpu)[i];
294 } 294 }
295 sum += cpu_totals[cpu]; 295 sum += cpu_totals[cpu];
296 } 296 }
297 297
298 seq_printf(m, "LpEventQueue 0\n"); 298 seq_printf(m, "LpEventQueue 0\n");
299 seq_printf(m, " events processed:\t%lu\n", sum); 299 seq_printf(m, " events processed:\t%lu\n", sum);
300 300
301 for (i = 0; i < HvLpEvent_Type_NumTypes; ++i) { 301 for (i = 0; i < HvLpEvent_Type_NumTypes; ++i) {
302 sum = 0; 302 sum = 0;
303 for_each_online_cpu(cpu) { 303 for_each_online_cpu(cpu) {
304 sum += per_cpu(hvlpevent_counts, cpu)[i]; 304 sum += per_cpu(hvlpevent_counts, cpu)[i];
305 } 305 }
306 306
307 seq_printf(m, " %-20s %10lu\n", event_types[i], sum); 307 seq_printf(m, " %-20s %10lu\n", event_types[i], sum);
308 } 308 }
309 309
310 seq_printf(m, "\n events processed by processor:\n"); 310 seq_printf(m, "\n events processed by processor:\n");
311 311
312 for_each_online_cpu(cpu) { 312 for_each_online_cpu(cpu) {
313 seq_printf(m, " CPU%02d %10lu\n", cpu, cpu_totals[cpu]); 313 seq_printf(m, " CPU%02d %10lu\n", cpu, cpu_totals[cpu]);
314 } 314 }
315 315
316 return 0; 316 return 0;
317 } 317 }
318 318
319 static int proc_lpevents_open(struct inode *inode, struct file *file) 319 static int proc_lpevents_open(struct inode *inode, struct file *file)
320 { 320 {
321 return single_open(file, proc_lpevents_show, NULL); 321 return single_open(file, proc_lpevents_show, NULL);
322 } 322 }
323 323
324 static const struct file_operations proc_lpevents_operations = { 324 static const struct file_operations proc_lpevents_operations = {
325 .open = proc_lpevents_open, 325 .open = proc_lpevents_open,
326 .read = seq_read, 326 .read = seq_read,
327 .llseek = seq_lseek, 327 .llseek = seq_lseek,
328 .release = single_release, 328 .release = single_release,
329 }; 329 };
330 330
331 static int __init proc_lpevents_init(void) 331 static int __init proc_lpevents_init(void)
332 { 332 {
333 if (!firmware_has_feature(FW_FEATURE_ISERIES)) 333 if (!firmware_has_feature(FW_FEATURE_ISERIES))
334 return 0; 334 return 0;
335 335
336 proc_create("iSeries/lpevents", S_IFREG|S_IRUGO, NULL, 336 proc_create("iSeries/lpevents", S_IFREG|S_IRUGO, NULL,
337 &proc_lpevents_operations); 337 &proc_lpevents_operations);
338 return 0; 338 return 0;
339 } 339 }
340 __initcall(proc_lpevents_init); 340 __initcall(proc_lpevents_init);
341 341
342 342
arch/powerpc/platforms/iseries/vio.c
1 /* 1 /*
2 * Legacy iSeries specific vio initialisation 2 * Legacy iSeries specific vio initialisation
3 * that needs to be built in (not a module). 3 * that needs to be built in (not a module).
4 * 4 *
5 * ยฉ Copyright 2007 IBM Corporation 5 * ยฉ Copyright 2007 IBM Corporation
6 * Author: Stephen Rothwell 6 * Author: Stephen Rothwell
7 * Some parts collected from various other files 7 * Some parts collected from various other files
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as 10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the 11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version. 12 * License, or (at your option) any later version.
13 * 13 *
14 * This program is distributed in the hope that it will be useful, but 14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details. 17 * General Public License for more details.
18 * 18 *
19 * You should have received a copy of the GNU General Public License 19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation, 20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */ 22 */
23 #include <linux/of.h> 23 #include <linux/of.h>
24 #include <linux/init.h> 24 #include <linux/init.h>
25 #include <linux/slab.h> 25 #include <linux/slab.h>
26 #include <linux/completion.h> 26 #include <linux/completion.h>
27 #include <linux/proc_fs.h> 27 #include <linux/proc_fs.h>
28 #include <linux/module.h> 28 #include <linux/export.h>
29 29
30 #include <asm/firmware.h> 30 #include <asm/firmware.h>
31 #include <asm/vio.h> 31 #include <asm/vio.h>
32 #include <asm/iseries/vio.h> 32 #include <asm/iseries/vio.h>
33 #include <asm/iseries/iommu.h> 33 #include <asm/iseries/iommu.h>
34 #include <asm/iseries/hv_types.h> 34 #include <asm/iseries/hv_types.h>
35 #include <asm/iseries/hv_lp_event.h> 35 #include <asm/iseries/hv_lp_event.h>
36 36
37 #define FIRST_VTY 0 37 #define FIRST_VTY 0
38 #define NUM_VTYS 1 38 #define NUM_VTYS 1
39 #define FIRST_VSCSI (FIRST_VTY + NUM_VTYS) 39 #define FIRST_VSCSI (FIRST_VTY + NUM_VTYS)
40 #define NUM_VSCSIS 1 40 #define NUM_VSCSIS 1
41 #define FIRST_VLAN (FIRST_VSCSI + NUM_VSCSIS) 41 #define FIRST_VLAN (FIRST_VSCSI + NUM_VSCSIS)
42 #define NUM_VLANS HVMAXARCHITECTEDVIRTUALLANS 42 #define NUM_VLANS HVMAXARCHITECTEDVIRTUALLANS
43 #define FIRST_VIODASD (FIRST_VLAN + NUM_VLANS) 43 #define FIRST_VIODASD (FIRST_VLAN + NUM_VLANS)
44 #define NUM_VIODASDS HVMAXARCHITECTEDVIRTUALDISKS 44 #define NUM_VIODASDS HVMAXARCHITECTEDVIRTUALDISKS
45 #define FIRST_VIOCD (FIRST_VIODASD + NUM_VIODASDS) 45 #define FIRST_VIOCD (FIRST_VIODASD + NUM_VIODASDS)
46 #define NUM_VIOCDS HVMAXARCHITECTEDVIRTUALCDROMS 46 #define NUM_VIOCDS HVMAXARCHITECTEDVIRTUALCDROMS
47 #define FIRST_VIOTAPE (FIRST_VIOCD + NUM_VIOCDS) 47 #define FIRST_VIOTAPE (FIRST_VIOCD + NUM_VIOCDS)
48 #define NUM_VIOTAPES HVMAXARCHITECTEDVIRTUALTAPES 48 #define NUM_VIOTAPES HVMAXARCHITECTEDVIRTUALTAPES
49 49
50 struct vio_waitevent { 50 struct vio_waitevent {
51 struct completion com; 51 struct completion com;
52 int rc; 52 int rc;
53 u16 sub_result; 53 u16 sub_result;
54 }; 54 };
55 55
56 struct vio_resource { 56 struct vio_resource {
57 char rsrcname[10]; 57 char rsrcname[10];
58 char type[4]; 58 char type[4];
59 char model[3]; 59 char model[3];
60 }; 60 };
61 61
62 static struct property *new_property(const char *name, int length, 62 static struct property *new_property(const char *name, int length,
63 const void *value) 63 const void *value)
64 { 64 {
65 struct property *np = kzalloc(sizeof(*np) + strlen(name) + 1 + length, 65 struct property *np = kzalloc(sizeof(*np) + strlen(name) + 1 + length,
66 GFP_KERNEL); 66 GFP_KERNEL);
67 67
68 if (!np) 68 if (!np)
69 return NULL; 69 return NULL;
70 np->name = (char *)(np + 1); 70 np->name = (char *)(np + 1);
71 np->value = np->name + strlen(name) + 1; 71 np->value = np->name + strlen(name) + 1;
72 strcpy(np->name, name); 72 strcpy(np->name, name);
73 memcpy(np->value, value, length); 73 memcpy(np->value, value, length);
74 np->length = length; 74 np->length = length;
75 return np; 75 return np;
76 } 76 }
77 77
78 static void free_property(struct property *np) 78 static void free_property(struct property *np)
79 { 79 {
80 kfree(np); 80 kfree(np);
81 } 81 }
82 82
83 static struct device_node *new_node(const char *path, 83 static struct device_node *new_node(const char *path,
84 struct device_node *parent) 84 struct device_node *parent)
85 { 85 {
86 struct device_node *np = kzalloc(sizeof(*np), GFP_KERNEL); 86 struct device_node *np = kzalloc(sizeof(*np), GFP_KERNEL);
87 87
88 if (!np) 88 if (!np)
89 return NULL; 89 return NULL;
90 np->full_name = kstrdup(path, GFP_KERNEL); 90 np->full_name = kstrdup(path, GFP_KERNEL);
91 if (!np->full_name) { 91 if (!np->full_name) {
92 kfree(np); 92 kfree(np);
93 return NULL; 93 return NULL;
94 } 94 }
95 of_node_set_flag(np, OF_DYNAMIC); 95 of_node_set_flag(np, OF_DYNAMIC);
96 kref_init(&np->kref); 96 kref_init(&np->kref);
97 np->parent = of_node_get(parent); 97 np->parent = of_node_get(parent);
98 return np; 98 return np;
99 } 99 }
100 100
101 static void free_node(struct device_node *np) 101 static void free_node(struct device_node *np)
102 { 102 {
103 struct property *next; 103 struct property *next;
104 struct property *prop; 104 struct property *prop;
105 105
106 next = np->properties; 106 next = np->properties;
107 while (next) { 107 while (next) {
108 prop = next; 108 prop = next;
109 next = prop->next; 109 next = prop->next;
110 free_property(prop); 110 free_property(prop);
111 } 111 }
112 of_node_put(np->parent); 112 of_node_put(np->parent);
113 kfree(np->full_name); 113 kfree(np->full_name);
114 kfree(np); 114 kfree(np);
115 } 115 }
116 116
117 static int add_string_property(struct device_node *np, const char *name, 117 static int add_string_property(struct device_node *np, const char *name,
118 const char *value) 118 const char *value)
119 { 119 {
120 struct property *nprop = new_property(name, strlen(value) + 1, value); 120 struct property *nprop = new_property(name, strlen(value) + 1, value);
121 121
122 if (!nprop) 122 if (!nprop)
123 return 0; 123 return 0;
124 prom_add_property(np, nprop); 124 prom_add_property(np, nprop);
125 return 1; 125 return 1;
126 } 126 }
127 127
128 static int add_raw_property(struct device_node *np, const char *name, 128 static int add_raw_property(struct device_node *np, const char *name,
129 int length, const void *value) 129 int length, const void *value)
130 { 130 {
131 struct property *nprop = new_property(name, length, value); 131 struct property *nprop = new_property(name, length, value);
132 132
133 if (!nprop) 133 if (!nprop)
134 return 0; 134 return 0;
135 prom_add_property(np, nprop); 135 prom_add_property(np, nprop);
136 return 1; 136 return 1;
137 } 137 }
138 138
139 static struct device_node *do_device_node(struct device_node *parent, 139 static struct device_node *do_device_node(struct device_node *parent,
140 const char *name, u32 reg, u32 unit, const char *type, 140 const char *name, u32 reg, u32 unit, const char *type,
141 const char *compat, struct vio_resource *res) 141 const char *compat, struct vio_resource *res)
142 { 142 {
143 struct device_node *np; 143 struct device_node *np;
144 char path[32]; 144 char path[32];
145 145
146 snprintf(path, sizeof(path), "/vdevice/%s@%08x", name, reg); 146 snprintf(path, sizeof(path), "/vdevice/%s@%08x", name, reg);
147 np = new_node(path, parent); 147 np = new_node(path, parent);
148 if (!np) 148 if (!np)
149 return NULL; 149 return NULL;
150 if (!add_string_property(np, "name", name) || 150 if (!add_string_property(np, "name", name) ||
151 !add_string_property(np, "device_type", type) || 151 !add_string_property(np, "device_type", type) ||
152 !add_string_property(np, "compatible", compat) || 152 !add_string_property(np, "compatible", compat) ||
153 !add_raw_property(np, "reg", sizeof(reg), &reg) || 153 !add_raw_property(np, "reg", sizeof(reg), &reg) ||
154 !add_raw_property(np, "linux,unit_address", 154 !add_raw_property(np, "linux,unit_address",
155 sizeof(unit), &unit)) { 155 sizeof(unit), &unit)) {
156 goto node_free; 156 goto node_free;
157 } 157 }
158 if (res) { 158 if (res) {
159 if (!add_raw_property(np, "linux,vio_rsrcname", 159 if (!add_raw_property(np, "linux,vio_rsrcname",
160 sizeof(res->rsrcname), res->rsrcname) || 160 sizeof(res->rsrcname), res->rsrcname) ||
161 !add_raw_property(np, "linux,vio_type", 161 !add_raw_property(np, "linux,vio_type",
162 sizeof(res->type), res->type) || 162 sizeof(res->type), res->type) ||
163 !add_raw_property(np, "linux,vio_model", 163 !add_raw_property(np, "linux,vio_model",
164 sizeof(res->model), res->model)) 164 sizeof(res->model), res->model))
165 goto node_free; 165 goto node_free;
166 } 166 }
167 np->name = of_get_property(np, "name", NULL); 167 np->name = of_get_property(np, "name", NULL);
168 np->type = of_get_property(np, "device_type", NULL); 168 np->type = of_get_property(np, "device_type", NULL);
169 of_attach_node(np); 169 of_attach_node(np);
170 #ifdef CONFIG_PROC_DEVICETREE 170 #ifdef CONFIG_PROC_DEVICETREE
171 if (parent->pde) { 171 if (parent->pde) {
172 struct proc_dir_entry *ent; 172 struct proc_dir_entry *ent;
173 173
174 ent = proc_mkdir(strrchr(np->full_name, '/') + 1, parent->pde); 174 ent = proc_mkdir(strrchr(np->full_name, '/') + 1, parent->pde);
175 if (ent) 175 if (ent)
176 proc_device_tree_add_node(np, ent); 176 proc_device_tree_add_node(np, ent);
177 } 177 }
178 #endif 178 #endif
179 return np; 179 return np;
180 180
181 node_free: 181 node_free:
182 free_node(np); 182 free_node(np);
183 return NULL; 183 return NULL;
184 } 184 }
185 185
186 /* 186 /*
187 * This is here so that we can dynamically add viodasd 187 * This is here so that we can dynamically add viodasd
188 * devices without exposing all the above infrastructure. 188 * devices without exposing all the above infrastructure.
189 */ 189 */
190 struct vio_dev *vio_create_viodasd(u32 unit) 190 struct vio_dev *vio_create_viodasd(u32 unit)
191 { 191 {
192 struct device_node *vio_root; 192 struct device_node *vio_root;
193 struct device_node *np; 193 struct device_node *np;
194 struct vio_dev *vdev = NULL; 194 struct vio_dev *vdev = NULL;
195 195
196 vio_root = of_find_node_by_path("/vdevice"); 196 vio_root = of_find_node_by_path("/vdevice");
197 if (!vio_root) 197 if (!vio_root)
198 return NULL; 198 return NULL;
199 np = do_device_node(vio_root, "viodasd", FIRST_VIODASD + unit, unit, 199 np = do_device_node(vio_root, "viodasd", FIRST_VIODASD + unit, unit,
200 "block", "IBM,iSeries-viodasd", NULL); 200 "block", "IBM,iSeries-viodasd", NULL);
201 of_node_put(vio_root); 201 of_node_put(vio_root);
202 if (np) { 202 if (np) {
203 vdev = vio_register_device_node(np); 203 vdev = vio_register_device_node(np);
204 if (!vdev) 204 if (!vdev)
205 free_node(np); 205 free_node(np);
206 } 206 }
207 return vdev; 207 return vdev;
208 } 208 }
209 EXPORT_SYMBOL_GPL(vio_create_viodasd); 209 EXPORT_SYMBOL_GPL(vio_create_viodasd);
210 210
211 static void __init handle_block_event(struct HvLpEvent *event) 211 static void __init handle_block_event(struct HvLpEvent *event)
212 { 212 {
213 struct vioblocklpevent *bevent = (struct vioblocklpevent *)event; 213 struct vioblocklpevent *bevent = (struct vioblocklpevent *)event;
214 struct vio_waitevent *pwe; 214 struct vio_waitevent *pwe;
215 215
216 if (event == NULL) 216 if (event == NULL)
217 /* Notification that a partition went away! */ 217 /* Notification that a partition went away! */
218 return; 218 return;
219 /* First, we should NEVER get an int here...only acks */ 219 /* First, we should NEVER get an int here...only acks */
220 if (hvlpevent_is_int(event)) { 220 if (hvlpevent_is_int(event)) {
221 printk(KERN_WARNING "handle_viod_request: " 221 printk(KERN_WARNING "handle_viod_request: "
222 "Yikes! got an int in viodasd event handler!\n"); 222 "Yikes! got an int in viodasd event handler!\n");
223 if (hvlpevent_need_ack(event)) { 223 if (hvlpevent_need_ack(event)) {
224 event->xRc = HvLpEvent_Rc_InvalidSubtype; 224 event->xRc = HvLpEvent_Rc_InvalidSubtype;
225 HvCallEvent_ackLpEvent(event); 225 HvCallEvent_ackLpEvent(event);
226 } 226 }
227 return; 227 return;
228 } 228 }
229 229
230 switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) { 230 switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) {
231 case vioblockopen: 231 case vioblockopen:
232 /* 232 /*
233 * Handle a response to an open request. We get all the 233 * Handle a response to an open request. We get all the
234 * disk information in the response, so update it. The 234 * disk information in the response, so update it. The
235 * correlation token contains a pointer to a waitevent 235 * correlation token contains a pointer to a waitevent
236 * structure that has a completion in it. update the 236 * structure that has a completion in it. update the
237 * return code in the waitevent structure and post the 237 * return code in the waitevent structure and post the
238 * completion to wake up the guy who sent the request 238 * completion to wake up the guy who sent the request
239 */ 239 */
240 pwe = (struct vio_waitevent *)event->xCorrelationToken; 240 pwe = (struct vio_waitevent *)event->xCorrelationToken;
241 pwe->rc = event->xRc; 241 pwe->rc = event->xRc;
242 pwe->sub_result = bevent->sub_result; 242 pwe->sub_result = bevent->sub_result;
243 complete(&pwe->com); 243 complete(&pwe->com);
244 break; 244 break;
245 case vioblockclose: 245 case vioblockclose:
246 break; 246 break;
247 default: 247 default:
248 printk(KERN_WARNING "handle_viod_request: unexpected subtype!"); 248 printk(KERN_WARNING "handle_viod_request: unexpected subtype!");
249 if (hvlpevent_need_ack(event)) { 249 if (hvlpevent_need_ack(event)) {
250 event->xRc = HvLpEvent_Rc_InvalidSubtype; 250 event->xRc = HvLpEvent_Rc_InvalidSubtype;
251 HvCallEvent_ackLpEvent(event); 251 HvCallEvent_ackLpEvent(event);
252 } 252 }
253 } 253 }
254 } 254 }
255 255
256 static void __init probe_disk(struct device_node *vio_root, u32 unit) 256 static void __init probe_disk(struct device_node *vio_root, u32 unit)
257 { 257 {
258 HvLpEvent_Rc hvrc; 258 HvLpEvent_Rc hvrc;
259 struct vio_waitevent we; 259 struct vio_waitevent we;
260 u16 flags = 0; 260 u16 flags = 0;
261 261
262 retry: 262 retry:
263 init_completion(&we.com); 263 init_completion(&we.com);
264 264
265 /* Send the open event to OS/400 */ 265 /* Send the open event to OS/400 */
266 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, 266 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
267 HvLpEvent_Type_VirtualIo, 267 HvLpEvent_Type_VirtualIo,
268 viomajorsubtype_blockio | vioblockopen, 268 viomajorsubtype_blockio | vioblockopen,
269 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck, 269 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
270 viopath_sourceinst(viopath_hostLp), 270 viopath_sourceinst(viopath_hostLp),
271 viopath_targetinst(viopath_hostLp), 271 viopath_targetinst(viopath_hostLp),
272 (u64)(unsigned long)&we, VIOVERSION << 16, 272 (u64)(unsigned long)&we, VIOVERSION << 16,
273 ((u64)unit << 48) | ((u64)flags<< 32), 273 ((u64)unit << 48) | ((u64)flags<< 32),
274 0, 0, 0); 274 0, 0, 0);
275 if (hvrc != 0) { 275 if (hvrc != 0) {
276 printk(KERN_WARNING "probe_disk: bad rc on HV open %d\n", 276 printk(KERN_WARNING "probe_disk: bad rc on HV open %d\n",
277 (int)hvrc); 277 (int)hvrc);
278 return; 278 return;
279 } 279 }
280 280
281 wait_for_completion(&we.com); 281 wait_for_completion(&we.com);
282 282
283 if (we.rc != 0) { 283 if (we.rc != 0) {
284 if (flags != 0) 284 if (flags != 0)
285 return; 285 return;
286 /* try again with read only flag set */ 286 /* try again with read only flag set */
287 flags = vioblockflags_ro; 287 flags = vioblockflags_ro;
288 goto retry; 288 goto retry;
289 } 289 }
290 290
291 /* Send the close event to OS/400. We DON'T expect a response */ 291 /* Send the close event to OS/400. We DON'T expect a response */
292 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, 292 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
293 HvLpEvent_Type_VirtualIo, 293 HvLpEvent_Type_VirtualIo,
294 viomajorsubtype_blockio | vioblockclose, 294 viomajorsubtype_blockio | vioblockclose,
295 HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck, 295 HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck,
296 viopath_sourceinst(viopath_hostLp), 296 viopath_sourceinst(viopath_hostLp),
297 viopath_targetinst(viopath_hostLp), 297 viopath_targetinst(viopath_hostLp),
298 0, VIOVERSION << 16, 298 0, VIOVERSION << 16,
299 ((u64)unit << 48) | ((u64)flags << 32), 299 ((u64)unit << 48) | ((u64)flags << 32),
300 0, 0, 0); 300 0, 0, 0);
301 if (hvrc != 0) { 301 if (hvrc != 0) {
302 printk(KERN_WARNING "probe_disk: " 302 printk(KERN_WARNING "probe_disk: "
303 "bad rc sending event to OS/400 %d\n", (int)hvrc); 303 "bad rc sending event to OS/400 %d\n", (int)hvrc);
304 return; 304 return;
305 } 305 }
306 306
307 do_device_node(vio_root, "viodasd", FIRST_VIODASD + unit, unit, 307 do_device_node(vio_root, "viodasd", FIRST_VIODASD + unit, unit,
308 "block", "IBM,iSeries-viodasd", NULL); 308 "block", "IBM,iSeries-viodasd", NULL);
309 } 309 }
310 310
311 static void __init get_viodasd_info(struct device_node *vio_root) 311 static void __init get_viodasd_info(struct device_node *vio_root)
312 { 312 {
313 int rc; 313 int rc;
314 u32 unit; 314 u32 unit;
315 315
316 rc = viopath_open(viopath_hostLp, viomajorsubtype_blockio, 2); 316 rc = viopath_open(viopath_hostLp, viomajorsubtype_blockio, 2);
317 if (rc) { 317 if (rc) {
318 printk(KERN_WARNING "get_viodasd_info: " 318 printk(KERN_WARNING "get_viodasd_info: "
319 "error opening path to host partition %d\n", 319 "error opening path to host partition %d\n",
320 viopath_hostLp); 320 viopath_hostLp);
321 return; 321 return;
322 } 322 }
323 323
324 /* Initialize our request handler */ 324 /* Initialize our request handler */
325 vio_setHandler(viomajorsubtype_blockio, handle_block_event); 325 vio_setHandler(viomajorsubtype_blockio, handle_block_event);
326 326
327 for (unit = 0; unit < HVMAXARCHITECTEDVIRTUALDISKS; unit++) 327 for (unit = 0; unit < HVMAXARCHITECTEDVIRTUALDISKS; unit++)
328 probe_disk(vio_root, unit); 328 probe_disk(vio_root, unit);
329 329
330 vio_clearHandler(viomajorsubtype_blockio); 330 vio_clearHandler(viomajorsubtype_blockio);
331 viopath_close(viopath_hostLp, viomajorsubtype_blockio, 2); 331 viopath_close(viopath_hostLp, viomajorsubtype_blockio, 2);
332 } 332 }
333 333
334 static void __init handle_cd_event(struct HvLpEvent *event) 334 static void __init handle_cd_event(struct HvLpEvent *event)
335 { 335 {
336 struct viocdlpevent *bevent; 336 struct viocdlpevent *bevent;
337 struct vio_waitevent *pwe; 337 struct vio_waitevent *pwe;
338 338
339 if (!event) 339 if (!event)
340 /* Notification that a partition went away! */ 340 /* Notification that a partition went away! */
341 return; 341 return;
342 342
343 /* First, we should NEVER get an int here...only acks */ 343 /* First, we should NEVER get an int here...only acks */
344 if (hvlpevent_is_int(event)) { 344 if (hvlpevent_is_int(event)) {
345 printk(KERN_WARNING "handle_cd_event: got an unexpected int\n"); 345 printk(KERN_WARNING "handle_cd_event: got an unexpected int\n");
346 if (hvlpevent_need_ack(event)) { 346 if (hvlpevent_need_ack(event)) {
347 event->xRc = HvLpEvent_Rc_InvalidSubtype; 347 event->xRc = HvLpEvent_Rc_InvalidSubtype;
348 HvCallEvent_ackLpEvent(event); 348 HvCallEvent_ackLpEvent(event);
349 } 349 }
350 return; 350 return;
351 } 351 }
352 352
353 bevent = (struct viocdlpevent *)event; 353 bevent = (struct viocdlpevent *)event;
354 354
355 switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) { 355 switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) {
356 case viocdgetinfo: 356 case viocdgetinfo:
357 pwe = (struct vio_waitevent *)event->xCorrelationToken; 357 pwe = (struct vio_waitevent *)event->xCorrelationToken;
358 pwe->rc = event->xRc; 358 pwe->rc = event->xRc;
359 pwe->sub_result = bevent->sub_result; 359 pwe->sub_result = bevent->sub_result;
360 complete(&pwe->com); 360 complete(&pwe->com);
361 break; 361 break;
362 362
363 default: 363 default:
364 printk(KERN_WARNING "handle_cd_event: " 364 printk(KERN_WARNING "handle_cd_event: "
365 "message with unexpected subtype %0x04X!\n", 365 "message with unexpected subtype %0x04X!\n",
366 event->xSubtype & VIOMINOR_SUBTYPE_MASK); 366 event->xSubtype & VIOMINOR_SUBTYPE_MASK);
367 if (hvlpevent_need_ack(event)) { 367 if (hvlpevent_need_ack(event)) {
368 event->xRc = HvLpEvent_Rc_InvalidSubtype; 368 event->xRc = HvLpEvent_Rc_InvalidSubtype;
369 HvCallEvent_ackLpEvent(event); 369 HvCallEvent_ackLpEvent(event);
370 } 370 }
371 } 371 }
372 } 372 }
373 373
374 static void __init get_viocd_info(struct device_node *vio_root) 374 static void __init get_viocd_info(struct device_node *vio_root)
375 { 375 {
376 HvLpEvent_Rc hvrc; 376 HvLpEvent_Rc hvrc;
377 u32 unit; 377 u32 unit;
378 struct vio_waitevent we; 378 struct vio_waitevent we;
379 struct vio_resource *unitinfo; 379 struct vio_resource *unitinfo;
380 dma_addr_t unitinfo_dmaaddr; 380 dma_addr_t unitinfo_dmaaddr;
381 int ret; 381 int ret;
382 382
383 ret = viopath_open(viopath_hostLp, viomajorsubtype_cdio, 2); 383 ret = viopath_open(viopath_hostLp, viomajorsubtype_cdio, 2);
384 if (ret) { 384 if (ret) {
385 printk(KERN_WARNING 385 printk(KERN_WARNING
386 "get_viocd_info: error opening path to host partition %d\n", 386 "get_viocd_info: error opening path to host partition %d\n",
387 viopath_hostLp); 387 viopath_hostLp);
388 return; 388 return;
389 } 389 }
390 390
391 /* Initialize our request handler */ 391 /* Initialize our request handler */
392 vio_setHandler(viomajorsubtype_cdio, handle_cd_event); 392 vio_setHandler(viomajorsubtype_cdio, handle_cd_event);
393 393
394 unitinfo = iseries_hv_alloc( 394 unitinfo = iseries_hv_alloc(
395 sizeof(*unitinfo) * HVMAXARCHITECTEDVIRTUALCDROMS, 395 sizeof(*unitinfo) * HVMAXARCHITECTEDVIRTUALCDROMS,
396 &unitinfo_dmaaddr, GFP_ATOMIC); 396 &unitinfo_dmaaddr, GFP_ATOMIC);
397 if (!unitinfo) { 397 if (!unitinfo) {
398 printk(KERN_WARNING 398 printk(KERN_WARNING
399 "get_viocd_info: error allocating unitinfo\n"); 399 "get_viocd_info: error allocating unitinfo\n");
400 goto clear_handler; 400 goto clear_handler;
401 } 401 }
402 402
403 memset(unitinfo, 0, sizeof(*unitinfo) * HVMAXARCHITECTEDVIRTUALCDROMS); 403 memset(unitinfo, 0, sizeof(*unitinfo) * HVMAXARCHITECTEDVIRTUALCDROMS);
404 404
405 init_completion(&we.com); 405 init_completion(&we.com);
406 406
407 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, 407 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
408 HvLpEvent_Type_VirtualIo, 408 HvLpEvent_Type_VirtualIo,
409 viomajorsubtype_cdio | viocdgetinfo, 409 viomajorsubtype_cdio | viocdgetinfo,
410 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck, 410 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
411 viopath_sourceinst(viopath_hostLp), 411 viopath_sourceinst(viopath_hostLp),
412 viopath_targetinst(viopath_hostLp), 412 viopath_targetinst(viopath_hostLp),
413 (u64)&we, VIOVERSION << 16, unitinfo_dmaaddr, 0, 413 (u64)&we, VIOVERSION << 16, unitinfo_dmaaddr, 0,
414 sizeof(*unitinfo) * HVMAXARCHITECTEDVIRTUALCDROMS, 0); 414 sizeof(*unitinfo) * HVMAXARCHITECTEDVIRTUALCDROMS, 0);
415 if (hvrc != HvLpEvent_Rc_Good) { 415 if (hvrc != HvLpEvent_Rc_Good) {
416 printk(KERN_WARNING 416 printk(KERN_WARNING
417 "get_viocd_info: cdrom error sending event. rc %d\n", 417 "get_viocd_info: cdrom error sending event. rc %d\n",
418 (int)hvrc); 418 (int)hvrc);
419 goto hv_free; 419 goto hv_free;
420 } 420 }
421 421
422 wait_for_completion(&we.com); 422 wait_for_completion(&we.com);
423 423
424 if (we.rc) { 424 if (we.rc) {
425 printk(KERN_WARNING "get_viocd_info: bad rc %d:0x%04X\n", 425 printk(KERN_WARNING "get_viocd_info: bad rc %d:0x%04X\n",
426 we.rc, we.sub_result); 426 we.rc, we.sub_result);
427 goto hv_free; 427 goto hv_free;
428 } 428 }
429 429
430 for (unit = 0; (unit < HVMAXARCHITECTEDVIRTUALCDROMS) && 430 for (unit = 0; (unit < HVMAXARCHITECTEDVIRTUALCDROMS) &&
431 unitinfo[unit].rsrcname[0]; unit++) { 431 unitinfo[unit].rsrcname[0]; unit++) {
432 if (!do_device_node(vio_root, "viocd", FIRST_VIOCD + unit, unit, 432 if (!do_device_node(vio_root, "viocd", FIRST_VIOCD + unit, unit,
433 "block", "IBM,iSeries-viocd", &unitinfo[unit])) 433 "block", "IBM,iSeries-viocd", &unitinfo[unit]))
434 break; 434 break;
435 } 435 }
436 436
437 hv_free: 437 hv_free:
438 iseries_hv_free(sizeof(*unitinfo) * HVMAXARCHITECTEDVIRTUALCDROMS, 438 iseries_hv_free(sizeof(*unitinfo) * HVMAXARCHITECTEDVIRTUALCDROMS,
439 unitinfo, unitinfo_dmaaddr); 439 unitinfo, unitinfo_dmaaddr);
440 clear_handler: 440 clear_handler:
441 vio_clearHandler(viomajorsubtype_cdio); 441 vio_clearHandler(viomajorsubtype_cdio);
442 viopath_close(viopath_hostLp, viomajorsubtype_cdio, 2); 442 viopath_close(viopath_hostLp, viomajorsubtype_cdio, 2);
443 } 443 }
444 444
445 /* Handle interrupt events for tape */ 445 /* Handle interrupt events for tape */
446 static void __init handle_tape_event(struct HvLpEvent *event) 446 static void __init handle_tape_event(struct HvLpEvent *event)
447 { 447 {
448 struct vio_waitevent *we; 448 struct vio_waitevent *we;
449 struct viotapelpevent *tevent = (struct viotapelpevent *)event; 449 struct viotapelpevent *tevent = (struct viotapelpevent *)event;
450 450
451 if (event == NULL) 451 if (event == NULL)
452 /* Notification that a partition went away! */ 452 /* Notification that a partition went away! */
453 return; 453 return;
454 454
455 we = (struct vio_waitevent *)event->xCorrelationToken; 455 we = (struct vio_waitevent *)event->xCorrelationToken;
456 switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) { 456 switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) {
457 case viotapegetinfo: 457 case viotapegetinfo:
458 we->rc = tevent->sub_type_result; 458 we->rc = tevent->sub_type_result;
459 complete(&we->com); 459 complete(&we->com);
460 break; 460 break;
461 default: 461 default:
462 printk(KERN_WARNING "handle_tape_event: weird ack\n"); 462 printk(KERN_WARNING "handle_tape_event: weird ack\n");
463 } 463 }
464 } 464 }
465 465
466 static void __init get_viotape_info(struct device_node *vio_root) 466 static void __init get_viotape_info(struct device_node *vio_root)
467 { 467 {
468 HvLpEvent_Rc hvrc; 468 HvLpEvent_Rc hvrc;
469 u32 unit; 469 u32 unit;
470 struct vio_resource *unitinfo; 470 struct vio_resource *unitinfo;
471 dma_addr_t unitinfo_dmaaddr; 471 dma_addr_t unitinfo_dmaaddr;
472 size_t len = sizeof(*unitinfo) * HVMAXARCHITECTEDVIRTUALTAPES; 472 size_t len = sizeof(*unitinfo) * HVMAXARCHITECTEDVIRTUALTAPES;
473 struct vio_waitevent we; 473 struct vio_waitevent we;
474 int ret; 474 int ret;
475 475
476 init_completion(&we.com); 476 init_completion(&we.com);
477 477
478 ret = viopath_open(viopath_hostLp, viomajorsubtype_tape, 2); 478 ret = viopath_open(viopath_hostLp, viomajorsubtype_tape, 2);
479 if (ret) { 479 if (ret) {
480 printk(KERN_WARNING "get_viotape_info: " 480 printk(KERN_WARNING "get_viotape_info: "
481 "error on viopath_open to hostlp %d\n", ret); 481 "error on viopath_open to hostlp %d\n", ret);
482 return; 482 return;
483 } 483 }
484 484
485 vio_setHandler(viomajorsubtype_tape, handle_tape_event); 485 vio_setHandler(viomajorsubtype_tape, handle_tape_event);
486 486
487 unitinfo = iseries_hv_alloc(len, &unitinfo_dmaaddr, GFP_ATOMIC); 487 unitinfo = iseries_hv_alloc(len, &unitinfo_dmaaddr, GFP_ATOMIC);
488 if (!unitinfo) 488 if (!unitinfo)
489 goto clear_handler; 489 goto clear_handler;
490 490
491 memset(unitinfo, 0, len); 491 memset(unitinfo, 0, len);
492 492
493 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, 493 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
494 HvLpEvent_Type_VirtualIo, 494 HvLpEvent_Type_VirtualIo,
495 viomajorsubtype_tape | viotapegetinfo, 495 viomajorsubtype_tape | viotapegetinfo,
496 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck, 496 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
497 viopath_sourceinst(viopath_hostLp), 497 viopath_sourceinst(viopath_hostLp),
498 viopath_targetinst(viopath_hostLp), 498 viopath_targetinst(viopath_hostLp),
499 (u64)(unsigned long)&we, VIOVERSION << 16, 499 (u64)(unsigned long)&we, VIOVERSION << 16,
500 unitinfo_dmaaddr, len, 0, 0); 500 unitinfo_dmaaddr, len, 0, 0);
501 if (hvrc != HvLpEvent_Rc_Good) { 501 if (hvrc != HvLpEvent_Rc_Good) {
502 printk(KERN_WARNING "get_viotape_info: hv error on op %d\n", 502 printk(KERN_WARNING "get_viotape_info: hv error on op %d\n",
503 (int)hvrc); 503 (int)hvrc);
504 goto hv_free; 504 goto hv_free;
505 } 505 }
506 506
507 wait_for_completion(&we.com); 507 wait_for_completion(&we.com);
508 508
509 for (unit = 0; (unit < HVMAXARCHITECTEDVIRTUALTAPES) && 509 for (unit = 0; (unit < HVMAXARCHITECTEDVIRTUALTAPES) &&
510 unitinfo[unit].rsrcname[0]; unit++) { 510 unitinfo[unit].rsrcname[0]; unit++) {
511 if (!do_device_node(vio_root, "viotape", FIRST_VIOTAPE + unit, 511 if (!do_device_node(vio_root, "viotape", FIRST_VIOTAPE + unit,
512 unit, "byte", "IBM,iSeries-viotape", 512 unit, "byte", "IBM,iSeries-viotape",
513 &unitinfo[unit])) 513 &unitinfo[unit]))
514 break; 514 break;
515 } 515 }
516 516
517 hv_free: 517 hv_free:
518 iseries_hv_free(len, unitinfo, unitinfo_dmaaddr); 518 iseries_hv_free(len, unitinfo, unitinfo_dmaaddr);
519 clear_handler: 519 clear_handler:
520 vio_clearHandler(viomajorsubtype_tape); 520 vio_clearHandler(viomajorsubtype_tape);
521 viopath_close(viopath_hostLp, viomajorsubtype_tape, 2); 521 viopath_close(viopath_hostLp, viomajorsubtype_tape, 2);
522 } 522 }
523 523
524 static int __init iseries_vio_init(void) 524 static int __init iseries_vio_init(void)
525 { 525 {
526 struct device_node *vio_root; 526 struct device_node *vio_root;
527 int ret = -ENODEV; 527 int ret = -ENODEV;
528 528
529 if (!firmware_has_feature(FW_FEATURE_ISERIES)) 529 if (!firmware_has_feature(FW_FEATURE_ISERIES))
530 goto out; 530 goto out;
531 531
532 iommu_vio_init(); 532 iommu_vio_init();
533 533
534 vio_root = of_find_node_by_path("/vdevice"); 534 vio_root = of_find_node_by_path("/vdevice");
535 if (!vio_root) 535 if (!vio_root)
536 goto out; 536 goto out;
537 537
538 if (viopath_hostLp == HvLpIndexInvalid) { 538 if (viopath_hostLp == HvLpIndexInvalid) {
539 vio_set_hostlp(); 539 vio_set_hostlp();
540 /* If we don't have a host, bail out */ 540 /* If we don't have a host, bail out */
541 if (viopath_hostLp == HvLpIndexInvalid) 541 if (viopath_hostLp == HvLpIndexInvalid)
542 goto put_node; 542 goto put_node;
543 } 543 }
544 544
545 get_viodasd_info(vio_root); 545 get_viodasd_info(vio_root);
546 get_viocd_info(vio_root); 546 get_viocd_info(vio_root);
547 get_viotape_info(vio_root); 547 get_viotape_info(vio_root);
548 548
549 ret = 0; 549 ret = 0;
550 550
551 put_node: 551 put_node:
552 of_node_put(vio_root); 552 of_node_put(vio_root);
553 out: 553 out:
554 return ret; 554 return ret;
555 } 555 }
556 arch_initcall(iseries_vio_init); 556 arch_initcall(iseries_vio_init);
557 557
arch/powerpc/platforms/iseries/viopath.c
1 /* -*- linux-c -*- 1 /* -*- linux-c -*-
2 * 2 *
3 * iSeries Virtual I/O Message Path code 3 * iSeries Virtual I/O Message Path code
4 * 4 *
5 * Authors: Dave Boutcher <boutcher@us.ibm.com> 5 * Authors: Dave Boutcher <boutcher@us.ibm.com>
6 * Ryan Arnold <ryanarn@us.ibm.com> 6 * Ryan Arnold <ryanarn@us.ibm.com>
7 * Colin Devilbiss <devilbis@us.ibm.com> 7 * Colin Devilbiss <devilbis@us.ibm.com>
8 * 8 *
9 * (C) Copyright 2000-2005 IBM Corporation 9 * (C) Copyright 2000-2005 IBM Corporation
10 * 10 *
11 * This code is used by the iSeries virtual disk, cd, 11 * This code is used by the iSeries virtual disk, cd,
12 * tape, and console to communicate with OS/400 in another 12 * tape, and console to communicate with OS/400 in another
13 * partition. 13 * partition.
14 * 14 *
15 * This program is free software; you can redistribute it and/or 15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as 16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of the 17 * published by the Free Software Foundation; either version 2 of the
18 * License, or (at your option) anyu later version. 18 * License, or (at your option) anyu later version.
19 * 19 *
20 * This program is distributed in the hope that it will be useful, but 20 * This program is distributed in the hope that it will be useful, but
21 * WITHOUT ANY WARRANTY; without even the implied warranty of 21 * WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * General Public License for more details. 23 * General Public License for more details.
24 * 24 *
25 * You should have received a copy of the GNU General Public License 25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software Foundation, 26 * along with this program; if not, write to the Free Software Foundation,
27 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 27 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 * 28 *
29 */ 29 */
30 #include <linux/module.h> 30 #include <linux/export.h>
31 #include <linux/kernel.h> 31 #include <linux/kernel.h>
32 #include <linux/slab.h> 32 #include <linux/slab.h>
33 #include <linux/errno.h> 33 #include <linux/errno.h>
34 #include <linux/vmalloc.h> 34 #include <linux/vmalloc.h>
35 #include <linux/string.h> 35 #include <linux/string.h>
36 #include <linux/proc_fs.h> 36 #include <linux/proc_fs.h>
37 #include <linux/dma-mapping.h> 37 #include <linux/dma-mapping.h>
38 #include <linux/wait.h> 38 #include <linux/wait.h>
39 #include <linux/seq_file.h> 39 #include <linux/seq_file.h>
40 #include <linux/interrupt.h> 40 #include <linux/interrupt.h>
41 #include <linux/completion.h> 41 #include <linux/completion.h>
42 42
43 #include <asm/system.h> 43 #include <asm/system.h>
44 #include <asm/uaccess.h> 44 #include <asm/uaccess.h>
45 #include <asm/prom.h> 45 #include <asm/prom.h>
46 #include <asm/firmware.h> 46 #include <asm/firmware.h>
47 #include <asm/iseries/hv_types.h> 47 #include <asm/iseries/hv_types.h>
48 #include <asm/iseries/hv_lp_event.h> 48 #include <asm/iseries/hv_lp_event.h>
49 #include <asm/iseries/hv_lp_config.h> 49 #include <asm/iseries/hv_lp_config.h>
50 #include <asm/iseries/mf.h> 50 #include <asm/iseries/mf.h>
51 #include <asm/iseries/vio.h> 51 #include <asm/iseries/vio.h>
52 52
53 /* Status of the path to each other partition in the system. 53 /* Status of the path to each other partition in the system.
54 * This is overkill, since we will only ever establish connections 54 * This is overkill, since we will only ever establish connections
55 * to our hosting partition and the primary partition on the system. 55 * to our hosting partition and the primary partition on the system.
56 * But this allows for other support in the future. 56 * But this allows for other support in the future.
57 */ 57 */
58 static struct viopathStatus { 58 static struct viopathStatus {
59 int isOpen; /* Did we open the path? */ 59 int isOpen; /* Did we open the path? */
60 int isActive; /* Do we have a mon msg outstanding */ 60 int isActive; /* Do we have a mon msg outstanding */
61 int users[VIO_MAX_SUBTYPES]; 61 int users[VIO_MAX_SUBTYPES];
62 HvLpInstanceId mSourceInst; 62 HvLpInstanceId mSourceInst;
63 HvLpInstanceId mTargetInst; 63 HvLpInstanceId mTargetInst;
64 int numberAllocated; 64 int numberAllocated;
65 } viopathStatus[HVMAXARCHITECTEDLPS]; 65 } viopathStatus[HVMAXARCHITECTEDLPS];
66 66
67 static DEFINE_SPINLOCK(statuslock); 67 static DEFINE_SPINLOCK(statuslock);
68 68
69 /* 69 /*
70 * For each kind of event we allocate a buffer that is 70 * For each kind of event we allocate a buffer that is
71 * guaranteed not to cross a page boundary 71 * guaranteed not to cross a page boundary
72 */ 72 */
73 static unsigned char event_buffer[VIO_MAX_SUBTYPES * 256] 73 static unsigned char event_buffer[VIO_MAX_SUBTYPES * 256]
74 __attribute__((__aligned__(4096))); 74 __attribute__((__aligned__(4096)));
75 static atomic_t event_buffer_available[VIO_MAX_SUBTYPES]; 75 static atomic_t event_buffer_available[VIO_MAX_SUBTYPES];
76 static int event_buffer_initialised; 76 static int event_buffer_initialised;
77 77
78 static void handleMonitorEvent(struct HvLpEvent *event); 78 static void handleMonitorEvent(struct HvLpEvent *event);
79 79
80 /* 80 /*
81 * We use this structure to handle asynchronous responses. The caller 81 * We use this structure to handle asynchronous responses. The caller
82 * blocks on the semaphore and the handler posts the semaphore. However, 82 * blocks on the semaphore and the handler posts the semaphore. However,
83 * if system_state is not SYSTEM_RUNNING, then wait_atomic is used ... 83 * if system_state is not SYSTEM_RUNNING, then wait_atomic is used ...
84 */ 84 */
85 struct alloc_parms { 85 struct alloc_parms {
86 struct completion done; 86 struct completion done;
87 int number; 87 int number;
88 atomic_t wait_atomic; 88 atomic_t wait_atomic;
89 int used_wait_atomic; 89 int used_wait_atomic;
90 }; 90 };
91 91
92 /* Put a sequence number in each mon msg. The value is not 92 /* Put a sequence number in each mon msg. The value is not
93 * important. Start at something other than 0 just for 93 * important. Start at something other than 0 just for
94 * readability. wrapping this is ok. 94 * readability. wrapping this is ok.
95 */ 95 */
96 static u8 viomonseq = 22; 96 static u8 viomonseq = 22;
97 97
98 /* Our hosting logical partition. We get this at startup 98 /* Our hosting logical partition. We get this at startup
99 * time, and different modules access this variable directly. 99 * time, and different modules access this variable directly.
100 */ 100 */
101 HvLpIndex viopath_hostLp = HvLpIndexInvalid; 101 HvLpIndex viopath_hostLp = HvLpIndexInvalid;
102 EXPORT_SYMBOL(viopath_hostLp); 102 EXPORT_SYMBOL(viopath_hostLp);
103 HvLpIndex viopath_ourLp = HvLpIndexInvalid; 103 HvLpIndex viopath_ourLp = HvLpIndexInvalid;
104 EXPORT_SYMBOL(viopath_ourLp); 104 EXPORT_SYMBOL(viopath_ourLp);
105 105
106 /* For each kind of incoming event we set a pointer to a 106 /* For each kind of incoming event we set a pointer to a
107 * routine to call. 107 * routine to call.
108 */ 108 */
109 static vio_event_handler_t *vio_handler[VIO_MAX_SUBTYPES]; 109 static vio_event_handler_t *vio_handler[VIO_MAX_SUBTYPES];
110 110
111 #define VIOPATH_KERN_WARN KERN_WARNING "viopath: " 111 #define VIOPATH_KERN_WARN KERN_WARNING "viopath: "
112 #define VIOPATH_KERN_INFO KERN_INFO "viopath: " 112 #define VIOPATH_KERN_INFO KERN_INFO "viopath: "
113 113
114 static int proc_viopath_show(struct seq_file *m, void *v) 114 static int proc_viopath_show(struct seq_file *m, void *v)
115 { 115 {
116 char *buf; 116 char *buf;
117 u16 vlanMap; 117 u16 vlanMap;
118 dma_addr_t handle; 118 dma_addr_t handle;
119 HvLpEvent_Rc hvrc; 119 HvLpEvent_Rc hvrc;
120 DECLARE_COMPLETION_ONSTACK(done); 120 DECLARE_COMPLETION_ONSTACK(done);
121 struct device_node *node; 121 struct device_node *node;
122 const char *sysid; 122 const char *sysid;
123 123
124 buf = kzalloc(HW_PAGE_SIZE, GFP_KERNEL); 124 buf = kzalloc(HW_PAGE_SIZE, GFP_KERNEL);
125 if (!buf) 125 if (!buf)
126 return 0; 126 return 0;
127 127
128 handle = iseries_hv_map(buf, HW_PAGE_SIZE, DMA_FROM_DEVICE); 128 handle = iseries_hv_map(buf, HW_PAGE_SIZE, DMA_FROM_DEVICE);
129 129
130 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, 130 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
131 HvLpEvent_Type_VirtualIo, 131 HvLpEvent_Type_VirtualIo,
132 viomajorsubtype_config | vioconfigget, 132 viomajorsubtype_config | vioconfigget,
133 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck, 133 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
134 viopath_sourceinst(viopath_hostLp), 134 viopath_sourceinst(viopath_hostLp),
135 viopath_targetinst(viopath_hostLp), 135 viopath_targetinst(viopath_hostLp),
136 (u64)(unsigned long)&done, VIOVERSION << 16, 136 (u64)(unsigned long)&done, VIOVERSION << 16,
137 ((u64)handle) << 32, HW_PAGE_SIZE, 0, 0); 137 ((u64)handle) << 32, HW_PAGE_SIZE, 0, 0);
138 138
139 if (hvrc != HvLpEvent_Rc_Good) 139 if (hvrc != HvLpEvent_Rc_Good)
140 printk(VIOPATH_KERN_WARN "hv error on op %d\n", (int)hvrc); 140 printk(VIOPATH_KERN_WARN "hv error on op %d\n", (int)hvrc);
141 141
142 wait_for_completion(&done); 142 wait_for_completion(&done);
143 143
144 vlanMap = HvLpConfig_getVirtualLanIndexMap(); 144 vlanMap = HvLpConfig_getVirtualLanIndexMap();
145 145
146 buf[HW_PAGE_SIZE-1] = '\0'; 146 buf[HW_PAGE_SIZE-1] = '\0';
147 seq_printf(m, "%s", buf); 147 seq_printf(m, "%s", buf);
148 148
149 iseries_hv_unmap(handle, HW_PAGE_SIZE, DMA_FROM_DEVICE); 149 iseries_hv_unmap(handle, HW_PAGE_SIZE, DMA_FROM_DEVICE);
150 kfree(buf); 150 kfree(buf);
151 151
152 seq_printf(m, "AVAILABLE_VETH=%x\n", vlanMap); 152 seq_printf(m, "AVAILABLE_VETH=%x\n", vlanMap);
153 153
154 node = of_find_node_by_path("/"); 154 node = of_find_node_by_path("/");
155 sysid = NULL; 155 sysid = NULL;
156 if (node != NULL) 156 if (node != NULL)
157 sysid = of_get_property(node, "system-id", NULL); 157 sysid = of_get_property(node, "system-id", NULL);
158 158
159 if (sysid == NULL) 159 if (sysid == NULL)
160 seq_printf(m, "SRLNBR=<UNKNOWN>\n"); 160 seq_printf(m, "SRLNBR=<UNKNOWN>\n");
161 else 161 else
162 /* Skip "IBM," on front of serial number, see dt.c */ 162 /* Skip "IBM," on front of serial number, see dt.c */
163 seq_printf(m, "SRLNBR=%s\n", sysid + 4); 163 seq_printf(m, "SRLNBR=%s\n", sysid + 4);
164 164
165 of_node_put(node); 165 of_node_put(node);
166 166
167 return 0; 167 return 0;
168 } 168 }
169 169
170 static int proc_viopath_open(struct inode *inode, struct file *file) 170 static int proc_viopath_open(struct inode *inode, struct file *file)
171 { 171 {
172 return single_open(file, proc_viopath_show, NULL); 172 return single_open(file, proc_viopath_show, NULL);
173 } 173 }
174 174
175 static const struct file_operations proc_viopath_operations = { 175 static const struct file_operations proc_viopath_operations = {
176 .open = proc_viopath_open, 176 .open = proc_viopath_open,
177 .read = seq_read, 177 .read = seq_read,
178 .llseek = seq_lseek, 178 .llseek = seq_lseek,
179 .release = single_release, 179 .release = single_release,
180 }; 180 };
181 181
182 static int __init vio_proc_init(void) 182 static int __init vio_proc_init(void)
183 { 183 {
184 if (!firmware_has_feature(FW_FEATURE_ISERIES)) 184 if (!firmware_has_feature(FW_FEATURE_ISERIES))
185 return 0; 185 return 0;
186 186
187 proc_create("iSeries/config", 0, NULL, &proc_viopath_operations); 187 proc_create("iSeries/config", 0, NULL, &proc_viopath_operations);
188 return 0; 188 return 0;
189 } 189 }
190 __initcall(vio_proc_init); 190 __initcall(vio_proc_init);
191 191
192 /* See if a given LP is active. Allow for invalid lps to be passed in 192 /* See if a given LP is active. Allow for invalid lps to be passed in
193 * and just return invalid 193 * and just return invalid
194 */ 194 */
195 int viopath_isactive(HvLpIndex lp) 195 int viopath_isactive(HvLpIndex lp)
196 { 196 {
197 if (lp == HvLpIndexInvalid) 197 if (lp == HvLpIndexInvalid)
198 return 0; 198 return 0;
199 if (lp < HVMAXARCHITECTEDLPS) 199 if (lp < HVMAXARCHITECTEDLPS)
200 return viopathStatus[lp].isActive; 200 return viopathStatus[lp].isActive;
201 else 201 else
202 return 0; 202 return 0;
203 } 203 }
204 EXPORT_SYMBOL(viopath_isactive); 204 EXPORT_SYMBOL(viopath_isactive);
205 205
206 /* 206 /*
207 * We cache the source and target instance ids for each 207 * We cache the source and target instance ids for each
208 * partition. 208 * partition.
209 */ 209 */
210 HvLpInstanceId viopath_sourceinst(HvLpIndex lp) 210 HvLpInstanceId viopath_sourceinst(HvLpIndex lp)
211 { 211 {
212 return viopathStatus[lp].mSourceInst; 212 return viopathStatus[lp].mSourceInst;
213 } 213 }
214 EXPORT_SYMBOL(viopath_sourceinst); 214 EXPORT_SYMBOL(viopath_sourceinst);
215 215
216 HvLpInstanceId viopath_targetinst(HvLpIndex lp) 216 HvLpInstanceId viopath_targetinst(HvLpIndex lp)
217 { 217 {
218 return viopathStatus[lp].mTargetInst; 218 return viopathStatus[lp].mTargetInst;
219 } 219 }
220 EXPORT_SYMBOL(viopath_targetinst); 220 EXPORT_SYMBOL(viopath_targetinst);
221 221
222 /* 222 /*
223 * Send a monitor message. This is a message with the acknowledge 223 * Send a monitor message. This is a message with the acknowledge
224 * bit on that the other side will NOT explicitly acknowledge. When 224 * bit on that the other side will NOT explicitly acknowledge. When
225 * the other side goes down, the hypervisor will acknowledge any 225 * the other side goes down, the hypervisor will acknowledge any
226 * outstanding messages....so we will know when the other side dies. 226 * outstanding messages....so we will know when the other side dies.
227 */ 227 */
228 static void sendMonMsg(HvLpIndex remoteLp) 228 static void sendMonMsg(HvLpIndex remoteLp)
229 { 229 {
230 HvLpEvent_Rc hvrc; 230 HvLpEvent_Rc hvrc;
231 231
232 viopathStatus[remoteLp].mSourceInst = 232 viopathStatus[remoteLp].mSourceInst =
233 HvCallEvent_getSourceLpInstanceId(remoteLp, 233 HvCallEvent_getSourceLpInstanceId(remoteLp,
234 HvLpEvent_Type_VirtualIo); 234 HvLpEvent_Type_VirtualIo);
235 viopathStatus[remoteLp].mTargetInst = 235 viopathStatus[remoteLp].mTargetInst =
236 HvCallEvent_getTargetLpInstanceId(remoteLp, 236 HvCallEvent_getTargetLpInstanceId(remoteLp,
237 HvLpEvent_Type_VirtualIo); 237 HvLpEvent_Type_VirtualIo);
238 238
239 /* 239 /*
240 * Deliberately ignore the return code here. if we call this 240 * Deliberately ignore the return code here. if we call this
241 * more than once, we don't care. 241 * more than once, we don't care.
242 */ 242 */
243 vio_setHandler(viomajorsubtype_monitor, handleMonitorEvent); 243 vio_setHandler(viomajorsubtype_monitor, handleMonitorEvent);
244 244
245 hvrc = HvCallEvent_signalLpEventFast(remoteLp, HvLpEvent_Type_VirtualIo, 245 hvrc = HvCallEvent_signalLpEventFast(remoteLp, HvLpEvent_Type_VirtualIo,
246 viomajorsubtype_monitor, HvLpEvent_AckInd_DoAck, 246 viomajorsubtype_monitor, HvLpEvent_AckInd_DoAck,
247 HvLpEvent_AckType_DeferredAck, 247 HvLpEvent_AckType_DeferredAck,
248 viopathStatus[remoteLp].mSourceInst, 248 viopathStatus[remoteLp].mSourceInst,
249 viopathStatus[remoteLp].mTargetInst, 249 viopathStatus[remoteLp].mTargetInst,
250 viomonseq++, 0, 0, 0, 0, 0); 250 viomonseq++, 0, 0, 0, 0, 0);
251 251
252 if (hvrc == HvLpEvent_Rc_Good) 252 if (hvrc == HvLpEvent_Rc_Good)
253 viopathStatus[remoteLp].isActive = 1; 253 viopathStatus[remoteLp].isActive = 1;
254 else { 254 else {
255 printk(VIOPATH_KERN_WARN "could not connect to partition %d\n", 255 printk(VIOPATH_KERN_WARN "could not connect to partition %d\n",
256 remoteLp); 256 remoteLp);
257 viopathStatus[remoteLp].isActive = 0; 257 viopathStatus[remoteLp].isActive = 0;
258 } 258 }
259 } 259 }
260 260
261 static void handleMonitorEvent(struct HvLpEvent *event) 261 static void handleMonitorEvent(struct HvLpEvent *event)
262 { 262 {
263 HvLpIndex remoteLp; 263 HvLpIndex remoteLp;
264 int i; 264 int i;
265 265
266 /* 266 /*
267 * This handler is _also_ called as part of the loop 267 * This handler is _also_ called as part of the loop
268 * at the end of this routine, so it must be able to 268 * at the end of this routine, so it must be able to
269 * ignore NULL events... 269 * ignore NULL events...
270 */ 270 */
271 if (!event) 271 if (!event)
272 return; 272 return;
273 273
274 /* 274 /*
275 * First see if this is just a normal monitor message from the 275 * First see if this is just a normal monitor message from the
276 * other partition 276 * other partition
277 */ 277 */
278 if (hvlpevent_is_int(event)) { 278 if (hvlpevent_is_int(event)) {
279 remoteLp = event->xSourceLp; 279 remoteLp = event->xSourceLp;
280 if (!viopathStatus[remoteLp].isActive) 280 if (!viopathStatus[remoteLp].isActive)
281 sendMonMsg(remoteLp); 281 sendMonMsg(remoteLp);
282 return; 282 return;
283 } 283 }
284 284
285 /* 285 /*
286 * This path is for an acknowledgement; the other partition 286 * This path is for an acknowledgement; the other partition
287 * died 287 * died
288 */ 288 */
289 remoteLp = event->xTargetLp; 289 remoteLp = event->xTargetLp;
290 if ((event->xSourceInstanceId != viopathStatus[remoteLp].mSourceInst) || 290 if ((event->xSourceInstanceId != viopathStatus[remoteLp].mSourceInst) ||
291 (event->xTargetInstanceId != viopathStatus[remoteLp].mTargetInst)) { 291 (event->xTargetInstanceId != viopathStatus[remoteLp].mTargetInst)) {
292 printk(VIOPATH_KERN_WARN "ignoring ack....mismatched instances\n"); 292 printk(VIOPATH_KERN_WARN "ignoring ack....mismatched instances\n");
293 return; 293 return;
294 } 294 }
295 295
296 printk(VIOPATH_KERN_WARN "partition %d ended\n", remoteLp); 296 printk(VIOPATH_KERN_WARN "partition %d ended\n", remoteLp);
297 297
298 viopathStatus[remoteLp].isActive = 0; 298 viopathStatus[remoteLp].isActive = 0;
299 299
300 /* 300 /*
301 * For each active handler, pass them a NULL 301 * For each active handler, pass them a NULL
302 * message to indicate that the other partition 302 * message to indicate that the other partition
303 * died 303 * died
304 */ 304 */
305 for (i = 0; i < VIO_MAX_SUBTYPES; i++) { 305 for (i = 0; i < VIO_MAX_SUBTYPES; i++) {
306 if (vio_handler[i] != NULL) 306 if (vio_handler[i] != NULL)
307 (*vio_handler[i])(NULL); 307 (*vio_handler[i])(NULL);
308 } 308 }
309 } 309 }
310 310
311 int vio_setHandler(int subtype, vio_event_handler_t *beh) 311 int vio_setHandler(int subtype, vio_event_handler_t *beh)
312 { 312 {
313 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT; 313 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
314 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES)) 314 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
315 return -EINVAL; 315 return -EINVAL;
316 if (vio_handler[subtype] != NULL) 316 if (vio_handler[subtype] != NULL)
317 return -EBUSY; 317 return -EBUSY;
318 vio_handler[subtype] = beh; 318 vio_handler[subtype] = beh;
319 return 0; 319 return 0;
320 } 320 }
321 EXPORT_SYMBOL(vio_setHandler); 321 EXPORT_SYMBOL(vio_setHandler);
322 322
323 int vio_clearHandler(int subtype) 323 int vio_clearHandler(int subtype)
324 { 324 {
325 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT; 325 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
326 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES)) 326 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
327 return -EINVAL; 327 return -EINVAL;
328 if (vio_handler[subtype] == NULL) 328 if (vio_handler[subtype] == NULL)
329 return -EAGAIN; 329 return -EAGAIN;
330 vio_handler[subtype] = NULL; 330 vio_handler[subtype] = NULL;
331 return 0; 331 return 0;
332 } 332 }
333 EXPORT_SYMBOL(vio_clearHandler); 333 EXPORT_SYMBOL(vio_clearHandler);
334 334
335 static void handleConfig(struct HvLpEvent *event) 335 static void handleConfig(struct HvLpEvent *event)
336 { 336 {
337 if (!event) 337 if (!event)
338 return; 338 return;
339 if (hvlpevent_is_int(event)) { 339 if (hvlpevent_is_int(event)) {
340 printk(VIOPATH_KERN_WARN 340 printk(VIOPATH_KERN_WARN
341 "unexpected config request from partition %d", 341 "unexpected config request from partition %d",
342 event->xSourceLp); 342 event->xSourceLp);
343 343
344 if (hvlpevent_need_ack(event)) { 344 if (hvlpevent_need_ack(event)) {
345 event->xRc = HvLpEvent_Rc_InvalidSubtype; 345 event->xRc = HvLpEvent_Rc_InvalidSubtype;
346 HvCallEvent_ackLpEvent(event); 346 HvCallEvent_ackLpEvent(event);
347 } 347 }
348 return; 348 return;
349 } 349 }
350 350
351 complete((struct completion *)event->xCorrelationToken); 351 complete((struct completion *)event->xCorrelationToken);
352 } 352 }
353 353
354 /* 354 /*
355 * Initialization of the hosting partition 355 * Initialization of the hosting partition
356 */ 356 */
357 void vio_set_hostlp(void) 357 void vio_set_hostlp(void)
358 { 358 {
359 /* 359 /*
360 * If this has already been set then we DON'T want to either change 360 * If this has already been set then we DON'T want to either change
361 * it or re-register the proc file system 361 * it or re-register the proc file system
362 */ 362 */
363 if (viopath_hostLp != HvLpIndexInvalid) 363 if (viopath_hostLp != HvLpIndexInvalid)
364 return; 364 return;
365 365
366 /* 366 /*
367 * Figure out our hosting partition. This isn't allowed to change 367 * Figure out our hosting partition. This isn't allowed to change
368 * while we're active 368 * while we're active
369 */ 369 */
370 viopath_ourLp = HvLpConfig_getLpIndex(); 370 viopath_ourLp = HvLpConfig_getLpIndex();
371 viopath_hostLp = HvLpConfig_getHostingLpIndex(viopath_ourLp); 371 viopath_hostLp = HvLpConfig_getHostingLpIndex(viopath_ourLp);
372 372
373 if (viopath_hostLp != HvLpIndexInvalid) 373 if (viopath_hostLp != HvLpIndexInvalid)
374 vio_setHandler(viomajorsubtype_config, handleConfig); 374 vio_setHandler(viomajorsubtype_config, handleConfig);
375 } 375 }
376 EXPORT_SYMBOL(vio_set_hostlp); 376 EXPORT_SYMBOL(vio_set_hostlp);
377 377
378 static void vio_handleEvent(struct HvLpEvent *event) 378 static void vio_handleEvent(struct HvLpEvent *event)
379 { 379 {
380 HvLpIndex remoteLp; 380 HvLpIndex remoteLp;
381 int subtype = (event->xSubtype & VIOMAJOR_SUBTYPE_MASK) 381 int subtype = (event->xSubtype & VIOMAJOR_SUBTYPE_MASK)
382 >> VIOMAJOR_SUBTYPE_SHIFT; 382 >> VIOMAJOR_SUBTYPE_SHIFT;
383 383
384 if (hvlpevent_is_int(event)) { 384 if (hvlpevent_is_int(event)) {
385 remoteLp = event->xSourceLp; 385 remoteLp = event->xSourceLp;
386 /* 386 /*
387 * The isActive is checked because if the hosting partition 387 * The isActive is checked because if the hosting partition
388 * went down and came back up it would not be active but it 388 * went down and came back up it would not be active but it
389 * would have different source and target instances, in which 389 * would have different source and target instances, in which
390 * case we'd want to reset them. This case really protects 390 * case we'd want to reset them. This case really protects
391 * against an unauthorized active partition sending interrupts 391 * against an unauthorized active partition sending interrupts
392 * or acks to this linux partition. 392 * or acks to this linux partition.
393 */ 393 */
394 if (viopathStatus[remoteLp].isActive 394 if (viopathStatus[remoteLp].isActive
395 && (event->xSourceInstanceId != 395 && (event->xSourceInstanceId !=
396 viopathStatus[remoteLp].mTargetInst)) { 396 viopathStatus[remoteLp].mTargetInst)) {
397 printk(VIOPATH_KERN_WARN 397 printk(VIOPATH_KERN_WARN
398 "message from invalid partition. " 398 "message from invalid partition. "
399 "int msg rcvd, source inst (%d) doesn't match (%d)\n", 399 "int msg rcvd, source inst (%d) doesn't match (%d)\n",
400 viopathStatus[remoteLp].mTargetInst, 400 viopathStatus[remoteLp].mTargetInst,
401 event->xSourceInstanceId); 401 event->xSourceInstanceId);
402 return; 402 return;
403 } 403 }
404 404
405 if (viopathStatus[remoteLp].isActive 405 if (viopathStatus[remoteLp].isActive
406 && (event->xTargetInstanceId != 406 && (event->xTargetInstanceId !=
407 viopathStatus[remoteLp].mSourceInst)) { 407 viopathStatus[remoteLp].mSourceInst)) {
408 printk(VIOPATH_KERN_WARN 408 printk(VIOPATH_KERN_WARN
409 "message from invalid partition. " 409 "message from invalid partition. "
410 "int msg rcvd, target inst (%d) doesn't match (%d)\n", 410 "int msg rcvd, target inst (%d) doesn't match (%d)\n",
411 viopathStatus[remoteLp].mSourceInst, 411 viopathStatus[remoteLp].mSourceInst,
412 event->xTargetInstanceId); 412 event->xTargetInstanceId);
413 return; 413 return;
414 } 414 }
415 } else { 415 } else {
416 remoteLp = event->xTargetLp; 416 remoteLp = event->xTargetLp;
417 if (event->xSourceInstanceId != 417 if (event->xSourceInstanceId !=
418 viopathStatus[remoteLp].mSourceInst) { 418 viopathStatus[remoteLp].mSourceInst) {
419 printk(VIOPATH_KERN_WARN 419 printk(VIOPATH_KERN_WARN
420 "message from invalid partition. " 420 "message from invalid partition. "
421 "ack msg rcvd, source inst (%d) doesn't match (%d)\n", 421 "ack msg rcvd, source inst (%d) doesn't match (%d)\n",
422 viopathStatus[remoteLp].mSourceInst, 422 viopathStatus[remoteLp].mSourceInst,
423 event->xSourceInstanceId); 423 event->xSourceInstanceId);
424 return; 424 return;
425 } 425 }
426 426
427 if (event->xTargetInstanceId != 427 if (event->xTargetInstanceId !=
428 viopathStatus[remoteLp].mTargetInst) { 428 viopathStatus[remoteLp].mTargetInst) {
429 printk(VIOPATH_KERN_WARN 429 printk(VIOPATH_KERN_WARN
430 "message from invalid partition. " 430 "message from invalid partition. "
431 "viopath: ack msg rcvd, target inst (%d) doesn't match (%d)\n", 431 "viopath: ack msg rcvd, target inst (%d) doesn't match (%d)\n",
432 viopathStatus[remoteLp].mTargetInst, 432 viopathStatus[remoteLp].mTargetInst,
433 event->xTargetInstanceId); 433 event->xTargetInstanceId);
434 return; 434 return;
435 } 435 }
436 } 436 }
437 437
438 if (vio_handler[subtype] == NULL) { 438 if (vio_handler[subtype] == NULL) {
439 printk(VIOPATH_KERN_WARN 439 printk(VIOPATH_KERN_WARN
440 "unexpected virtual io event subtype %d from partition %d\n", 440 "unexpected virtual io event subtype %d from partition %d\n",
441 event->xSubtype, remoteLp); 441 event->xSubtype, remoteLp);
442 /* No handler. Ack if necessary */ 442 /* No handler. Ack if necessary */
443 if (hvlpevent_is_int(event) && hvlpevent_need_ack(event)) { 443 if (hvlpevent_is_int(event) && hvlpevent_need_ack(event)) {
444 event->xRc = HvLpEvent_Rc_InvalidSubtype; 444 event->xRc = HvLpEvent_Rc_InvalidSubtype;
445 HvCallEvent_ackLpEvent(event); 445 HvCallEvent_ackLpEvent(event);
446 } 446 }
447 return; 447 return;
448 } 448 }
449 449
450 /* This innocuous little line is where all the real work happens */ 450 /* This innocuous little line is where all the real work happens */
451 (*vio_handler[subtype])(event); 451 (*vio_handler[subtype])(event);
452 } 452 }
453 453
454 static void viopath_donealloc(void *parm, int number) 454 static void viopath_donealloc(void *parm, int number)
455 { 455 {
456 struct alloc_parms *parmsp = parm; 456 struct alloc_parms *parmsp = parm;
457 457
458 parmsp->number = number; 458 parmsp->number = number;
459 if (parmsp->used_wait_atomic) 459 if (parmsp->used_wait_atomic)
460 atomic_set(&parmsp->wait_atomic, 0); 460 atomic_set(&parmsp->wait_atomic, 0);
461 else 461 else
462 complete(&parmsp->done); 462 complete(&parmsp->done);
463 } 463 }
464 464
465 static int allocateEvents(HvLpIndex remoteLp, int numEvents) 465 static int allocateEvents(HvLpIndex remoteLp, int numEvents)
466 { 466 {
467 struct alloc_parms parms; 467 struct alloc_parms parms;
468 468
469 if (system_state != SYSTEM_RUNNING) { 469 if (system_state != SYSTEM_RUNNING) {
470 parms.used_wait_atomic = 1; 470 parms.used_wait_atomic = 1;
471 atomic_set(&parms.wait_atomic, 1); 471 atomic_set(&parms.wait_atomic, 1);
472 } else { 472 } else {
473 parms.used_wait_atomic = 0; 473 parms.used_wait_atomic = 0;
474 init_completion(&parms.done); 474 init_completion(&parms.done);
475 } 475 }
476 mf_allocate_lp_events(remoteLp, HvLpEvent_Type_VirtualIo, 250, /* It would be nice to put a real number here! */ 476 mf_allocate_lp_events(remoteLp, HvLpEvent_Type_VirtualIo, 250, /* It would be nice to put a real number here! */
477 numEvents, &viopath_donealloc, &parms); 477 numEvents, &viopath_donealloc, &parms);
478 if (system_state != SYSTEM_RUNNING) { 478 if (system_state != SYSTEM_RUNNING) {
479 while (atomic_read(&parms.wait_atomic)) 479 while (atomic_read(&parms.wait_atomic))
480 mb(); 480 mb();
481 } else 481 } else
482 wait_for_completion(&parms.done); 482 wait_for_completion(&parms.done);
483 return parms.number; 483 return parms.number;
484 } 484 }
485 485
486 int viopath_open(HvLpIndex remoteLp, int subtype, int numReq) 486 int viopath_open(HvLpIndex remoteLp, int subtype, int numReq)
487 { 487 {
488 int i; 488 int i;
489 unsigned long flags; 489 unsigned long flags;
490 int tempNumAllocated; 490 int tempNumAllocated;
491 491
492 if ((remoteLp >= HVMAXARCHITECTEDLPS) || (remoteLp == HvLpIndexInvalid)) 492 if ((remoteLp >= HVMAXARCHITECTEDLPS) || (remoteLp == HvLpIndexInvalid))
493 return -EINVAL; 493 return -EINVAL;
494 494
495 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT; 495 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
496 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES)) 496 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
497 return -EINVAL; 497 return -EINVAL;
498 498
499 spin_lock_irqsave(&statuslock, flags); 499 spin_lock_irqsave(&statuslock, flags);
500 500
501 if (!event_buffer_initialised) { 501 if (!event_buffer_initialised) {
502 for (i = 0; i < VIO_MAX_SUBTYPES; i++) 502 for (i = 0; i < VIO_MAX_SUBTYPES; i++)
503 atomic_set(&event_buffer_available[i], 1); 503 atomic_set(&event_buffer_available[i], 1);
504 event_buffer_initialised = 1; 504 event_buffer_initialised = 1;
505 } 505 }
506 506
507 viopathStatus[remoteLp].users[subtype]++; 507 viopathStatus[remoteLp].users[subtype]++;
508 508
509 if (!viopathStatus[remoteLp].isOpen) { 509 if (!viopathStatus[remoteLp].isOpen) {
510 viopathStatus[remoteLp].isOpen = 1; 510 viopathStatus[remoteLp].isOpen = 1;
511 HvCallEvent_openLpEventPath(remoteLp, HvLpEvent_Type_VirtualIo); 511 HvCallEvent_openLpEventPath(remoteLp, HvLpEvent_Type_VirtualIo);
512 512
513 /* 513 /*
514 * Don't hold the spinlock during an operation that 514 * Don't hold the spinlock during an operation that
515 * can sleep. 515 * can sleep.
516 */ 516 */
517 spin_unlock_irqrestore(&statuslock, flags); 517 spin_unlock_irqrestore(&statuslock, flags);
518 tempNumAllocated = allocateEvents(remoteLp, 1); 518 tempNumAllocated = allocateEvents(remoteLp, 1);
519 spin_lock_irqsave(&statuslock, flags); 519 spin_lock_irqsave(&statuslock, flags);
520 520
521 viopathStatus[remoteLp].numberAllocated += tempNumAllocated; 521 viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
522 522
523 if (viopathStatus[remoteLp].numberAllocated == 0) { 523 if (viopathStatus[remoteLp].numberAllocated == 0) {
524 HvCallEvent_closeLpEventPath(remoteLp, 524 HvCallEvent_closeLpEventPath(remoteLp,
525 HvLpEvent_Type_VirtualIo); 525 HvLpEvent_Type_VirtualIo);
526 526
527 spin_unlock_irqrestore(&statuslock, flags); 527 spin_unlock_irqrestore(&statuslock, flags);
528 return -ENOMEM; 528 return -ENOMEM;
529 } 529 }
530 530
531 viopathStatus[remoteLp].mSourceInst = 531 viopathStatus[remoteLp].mSourceInst =
532 HvCallEvent_getSourceLpInstanceId(remoteLp, 532 HvCallEvent_getSourceLpInstanceId(remoteLp,
533 HvLpEvent_Type_VirtualIo); 533 HvLpEvent_Type_VirtualIo);
534 viopathStatus[remoteLp].mTargetInst = 534 viopathStatus[remoteLp].mTargetInst =
535 HvCallEvent_getTargetLpInstanceId(remoteLp, 535 HvCallEvent_getTargetLpInstanceId(remoteLp,
536 HvLpEvent_Type_VirtualIo); 536 HvLpEvent_Type_VirtualIo);
537 HvLpEvent_registerHandler(HvLpEvent_Type_VirtualIo, 537 HvLpEvent_registerHandler(HvLpEvent_Type_VirtualIo,
538 &vio_handleEvent); 538 &vio_handleEvent);
539 sendMonMsg(remoteLp); 539 sendMonMsg(remoteLp);
540 printk(VIOPATH_KERN_INFO "opening connection to partition %d, " 540 printk(VIOPATH_KERN_INFO "opening connection to partition %d, "
541 "setting sinst %d, tinst %d\n", 541 "setting sinst %d, tinst %d\n",
542 remoteLp, viopathStatus[remoteLp].mSourceInst, 542 remoteLp, viopathStatus[remoteLp].mSourceInst,
543 viopathStatus[remoteLp].mTargetInst); 543 viopathStatus[remoteLp].mTargetInst);
544 } 544 }
545 545
546 spin_unlock_irqrestore(&statuslock, flags); 546 spin_unlock_irqrestore(&statuslock, flags);
547 tempNumAllocated = allocateEvents(remoteLp, numReq); 547 tempNumAllocated = allocateEvents(remoteLp, numReq);
548 spin_lock_irqsave(&statuslock, flags); 548 spin_lock_irqsave(&statuslock, flags);
549 viopathStatus[remoteLp].numberAllocated += tempNumAllocated; 549 viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
550 spin_unlock_irqrestore(&statuslock, flags); 550 spin_unlock_irqrestore(&statuslock, flags);
551 551
552 return 0; 552 return 0;
553 } 553 }
554 EXPORT_SYMBOL(viopath_open); 554 EXPORT_SYMBOL(viopath_open);
555 555
556 int viopath_close(HvLpIndex remoteLp, int subtype, int numReq) 556 int viopath_close(HvLpIndex remoteLp, int subtype, int numReq)
557 { 557 {
558 unsigned long flags; 558 unsigned long flags;
559 int i; 559 int i;
560 int numOpen; 560 int numOpen;
561 struct alloc_parms parms; 561 struct alloc_parms parms;
562 562
563 if ((remoteLp >= HVMAXARCHITECTEDLPS) || (remoteLp == HvLpIndexInvalid)) 563 if ((remoteLp >= HVMAXARCHITECTEDLPS) || (remoteLp == HvLpIndexInvalid))
564 return -EINVAL; 564 return -EINVAL;
565 565
566 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT; 566 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
567 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES)) 567 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
568 return -EINVAL; 568 return -EINVAL;
569 569
570 spin_lock_irqsave(&statuslock, flags); 570 spin_lock_irqsave(&statuslock, flags);
571 /* 571 /*
572 * If the viopath_close somehow gets called before a 572 * If the viopath_close somehow gets called before a
573 * viopath_open it could decrement to -1 which is a non 573 * viopath_open it could decrement to -1 which is a non
574 * recoverable state so we'll prevent this from 574 * recoverable state so we'll prevent this from
575 * happening. 575 * happening.
576 */ 576 */
577 if (viopathStatus[remoteLp].users[subtype] > 0) 577 if (viopathStatus[remoteLp].users[subtype] > 0)
578 viopathStatus[remoteLp].users[subtype]--; 578 viopathStatus[remoteLp].users[subtype]--;
579 579
580 spin_unlock_irqrestore(&statuslock, flags); 580 spin_unlock_irqrestore(&statuslock, flags);
581 581
582 parms.used_wait_atomic = 0; 582 parms.used_wait_atomic = 0;
583 init_completion(&parms.done); 583 init_completion(&parms.done);
584 mf_deallocate_lp_events(remoteLp, HvLpEvent_Type_VirtualIo, 584 mf_deallocate_lp_events(remoteLp, HvLpEvent_Type_VirtualIo,
585 numReq, &viopath_donealloc, &parms); 585 numReq, &viopath_donealloc, &parms);
586 wait_for_completion(&parms.done); 586 wait_for_completion(&parms.done);
587 587
588 spin_lock_irqsave(&statuslock, flags); 588 spin_lock_irqsave(&statuslock, flags);
589 for (i = 0, numOpen = 0; i < VIO_MAX_SUBTYPES; i++) 589 for (i = 0, numOpen = 0; i < VIO_MAX_SUBTYPES; i++)
590 numOpen += viopathStatus[remoteLp].users[i]; 590 numOpen += viopathStatus[remoteLp].users[i];
591 591
592 if ((viopathStatus[remoteLp].isOpen) && (numOpen == 0)) { 592 if ((viopathStatus[remoteLp].isOpen) && (numOpen == 0)) {
593 printk(VIOPATH_KERN_INFO "closing connection to partition %d\n", 593 printk(VIOPATH_KERN_INFO "closing connection to partition %d\n",
594 remoteLp); 594 remoteLp);
595 595
596 HvCallEvent_closeLpEventPath(remoteLp, 596 HvCallEvent_closeLpEventPath(remoteLp,
597 HvLpEvent_Type_VirtualIo); 597 HvLpEvent_Type_VirtualIo);
598 viopathStatus[remoteLp].isOpen = 0; 598 viopathStatus[remoteLp].isOpen = 0;
599 viopathStatus[remoteLp].isActive = 0; 599 viopathStatus[remoteLp].isActive = 0;
600 600
601 for (i = 0; i < VIO_MAX_SUBTYPES; i++) 601 for (i = 0; i < VIO_MAX_SUBTYPES; i++)
602 atomic_set(&event_buffer_available[i], 0); 602 atomic_set(&event_buffer_available[i], 0);
603 event_buffer_initialised = 0; 603 event_buffer_initialised = 0;
604 } 604 }
605 spin_unlock_irqrestore(&statuslock, flags); 605 spin_unlock_irqrestore(&statuslock, flags);
606 return 0; 606 return 0;
607 } 607 }
608 EXPORT_SYMBOL(viopath_close); 608 EXPORT_SYMBOL(viopath_close);
609 609
610 void *vio_get_event_buffer(int subtype) 610 void *vio_get_event_buffer(int subtype)
611 { 611 {
612 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT; 612 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
613 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES)) 613 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
614 return NULL; 614 return NULL;
615 615
616 if (atomic_dec_if_positive(&event_buffer_available[subtype]) == 0) 616 if (atomic_dec_if_positive(&event_buffer_available[subtype]) == 0)
617 return &event_buffer[subtype * 256]; 617 return &event_buffer[subtype * 256];
618 else 618 else
619 return NULL; 619 return NULL;
620 } 620 }
621 EXPORT_SYMBOL(vio_get_event_buffer); 621 EXPORT_SYMBOL(vio_get_event_buffer);
622 622
623 void vio_free_event_buffer(int subtype, void *buffer) 623 void vio_free_event_buffer(int subtype, void *buffer)
624 { 624 {
625 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT; 625 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
626 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES)) { 626 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES)) {
627 printk(VIOPATH_KERN_WARN 627 printk(VIOPATH_KERN_WARN
628 "unexpected subtype %d freeing event buffer\n", subtype); 628 "unexpected subtype %d freeing event buffer\n", subtype);
629 return; 629 return;
630 } 630 }
631 631
632 if (atomic_read(&event_buffer_available[subtype]) != 0) { 632 if (atomic_read(&event_buffer_available[subtype]) != 0) {
633 printk(VIOPATH_KERN_WARN 633 printk(VIOPATH_KERN_WARN
634 "freeing unallocated event buffer, subtype %d\n", 634 "freeing unallocated event buffer, subtype %d\n",
635 subtype); 635 subtype);
636 return; 636 return;
637 } 637 }
638 638
639 if (buffer != &event_buffer[subtype * 256]) { 639 if (buffer != &event_buffer[subtype * 256]) {
640 printk(VIOPATH_KERN_WARN 640 printk(VIOPATH_KERN_WARN
641 "freeing invalid event buffer, subtype %d\n", subtype); 641 "freeing invalid event buffer, subtype %d\n", subtype);
642 } 642 }
643 643
644 atomic_set(&event_buffer_available[subtype], 1); 644 atomic_set(&event_buffer_available[subtype], 1);
645 } 645 }
646 EXPORT_SYMBOL(vio_free_event_buffer); 646 EXPORT_SYMBOL(vio_free_event_buffer);
647 647
648 static const struct vio_error_entry vio_no_error = 648 static const struct vio_error_entry vio_no_error =
649 { 0, 0, "Non-VIO Error" }; 649 { 0, 0, "Non-VIO Error" };
650 static const struct vio_error_entry vio_unknown_error = 650 static const struct vio_error_entry vio_unknown_error =
651 { 0, EIO, "Unknown Error" }; 651 { 0, EIO, "Unknown Error" };
652 652
653 static const struct vio_error_entry vio_default_errors[] = { 653 static const struct vio_error_entry vio_default_errors[] = {
654 {0x0001, EIO, "No Connection"}, 654 {0x0001, EIO, "No Connection"},
655 {0x0002, EIO, "No Receiver"}, 655 {0x0002, EIO, "No Receiver"},
656 {0x0003, EIO, "No Buffer Available"}, 656 {0x0003, EIO, "No Buffer Available"},
657 {0x0004, EBADRQC, "Invalid Message Type"}, 657 {0x0004, EBADRQC, "Invalid Message Type"},
658 {0x0000, 0, NULL}, 658 {0x0000, 0, NULL},
659 }; 659 };
660 660
661 const struct vio_error_entry *vio_lookup_rc( 661 const struct vio_error_entry *vio_lookup_rc(
662 const struct vio_error_entry *local_table, u16 rc) 662 const struct vio_error_entry *local_table, u16 rc)
663 { 663 {
664 const struct vio_error_entry *cur; 664 const struct vio_error_entry *cur;
665 665
666 if (!rc) 666 if (!rc)
667 return &vio_no_error; 667 return &vio_no_error;
668 if (local_table) 668 if (local_table)
669 for (cur = local_table; cur->rc; ++cur) 669 for (cur = local_table; cur->rc; ++cur)
670 if (cur->rc == rc) 670 if (cur->rc == rc)
671 return cur; 671 return cur;
672 for (cur = vio_default_errors; cur->rc; ++cur) 672 for (cur = vio_default_errors; cur->rc; ++cur)
673 if (cur->rc == rc) 673 if (cur->rc == rc)
674 return cur; 674 return cur;
675 return &vio_unknown_error; 675 return &vio_unknown_error;
676 } 676 }
677 EXPORT_SYMBOL(vio_lookup_rc); 677 EXPORT_SYMBOL(vio_lookup_rc);
678 678
arch/powerpc/platforms/pasemi/dma_lib.c
1 /* 1 /*
2 * Copyright (C) 2006-2007 PA Semi, Inc 2 * Copyright (C) 2006-2007 PA Semi, Inc
3 * 3 *
4 * Common functions for DMA access on PA Semi PWRficient 4 * Common functions for DMA access on PA Semi PWRficient
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 * 9 *
10 * This program is distributed in the hope that it will be useful, 10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License 15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software 16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19 19
20 #include <linux/kernel.h> 20 #include <linux/kernel.h>
21 #include <linux/init.h> 21 #include <linux/init.h>
22 #include <linux/module.h> 22 #include <linux/export.h>
23 #include <linux/pci.h> 23 #include <linux/pci.h>
24 #include <linux/slab.h> 24 #include <linux/slab.h>
25 #include <linux/of.h> 25 #include <linux/of.h>
26 #include <linux/sched.h> 26 #include <linux/sched.h>
27 27
28 #include <asm/pasemi_dma.h> 28 #include <asm/pasemi_dma.h>
29 29
30 #define MAX_TXCH 64 30 #define MAX_TXCH 64
31 #define MAX_RXCH 64 31 #define MAX_RXCH 64
32 #define MAX_FLAGS 64 32 #define MAX_FLAGS 64
33 #define MAX_FUN 8 33 #define MAX_FUN 8
34 34
35 static struct pasdma_status *dma_status; 35 static struct pasdma_status *dma_status;
36 36
37 static void __iomem *iob_regs; 37 static void __iomem *iob_regs;
38 static void __iomem *mac_regs[6]; 38 static void __iomem *mac_regs[6];
39 static void __iomem *dma_regs; 39 static void __iomem *dma_regs;
40 40
41 static int base_hw_irq; 41 static int base_hw_irq;
42 42
43 static int num_txch, num_rxch; 43 static int num_txch, num_rxch;
44 44
45 static struct pci_dev *dma_pdev; 45 static struct pci_dev *dma_pdev;
46 46
47 /* Bitmaps to handle allocation of channels */ 47 /* Bitmaps to handle allocation of channels */
48 48
49 static DECLARE_BITMAP(txch_free, MAX_TXCH); 49 static DECLARE_BITMAP(txch_free, MAX_TXCH);
50 static DECLARE_BITMAP(rxch_free, MAX_RXCH); 50 static DECLARE_BITMAP(rxch_free, MAX_RXCH);
51 static DECLARE_BITMAP(flags_free, MAX_FLAGS); 51 static DECLARE_BITMAP(flags_free, MAX_FLAGS);
52 static DECLARE_BITMAP(fun_free, MAX_FUN); 52 static DECLARE_BITMAP(fun_free, MAX_FUN);
53 53
54 /* pasemi_read_iob_reg - read IOB register 54 /* pasemi_read_iob_reg - read IOB register
55 * @reg: Register to read (offset into PCI CFG space) 55 * @reg: Register to read (offset into PCI CFG space)
56 */ 56 */
57 unsigned int pasemi_read_iob_reg(unsigned int reg) 57 unsigned int pasemi_read_iob_reg(unsigned int reg)
58 { 58 {
59 return in_le32(iob_regs+reg); 59 return in_le32(iob_regs+reg);
60 } 60 }
61 EXPORT_SYMBOL(pasemi_read_iob_reg); 61 EXPORT_SYMBOL(pasemi_read_iob_reg);
62 62
63 /* pasemi_write_iob_reg - write IOB register 63 /* pasemi_write_iob_reg - write IOB register
64 * @reg: Register to write to (offset into PCI CFG space) 64 * @reg: Register to write to (offset into PCI CFG space)
65 * @val: Value to write 65 * @val: Value to write
66 */ 66 */
67 void pasemi_write_iob_reg(unsigned int reg, unsigned int val) 67 void pasemi_write_iob_reg(unsigned int reg, unsigned int val)
68 { 68 {
69 out_le32(iob_regs+reg, val); 69 out_le32(iob_regs+reg, val);
70 } 70 }
71 EXPORT_SYMBOL(pasemi_write_iob_reg); 71 EXPORT_SYMBOL(pasemi_write_iob_reg);
72 72
73 /* pasemi_read_mac_reg - read MAC register 73 /* pasemi_read_mac_reg - read MAC register
74 * @intf: MAC interface 74 * @intf: MAC interface
75 * @reg: Register to read (offset into PCI CFG space) 75 * @reg: Register to read (offset into PCI CFG space)
76 */ 76 */
77 unsigned int pasemi_read_mac_reg(int intf, unsigned int reg) 77 unsigned int pasemi_read_mac_reg(int intf, unsigned int reg)
78 { 78 {
79 return in_le32(mac_regs[intf]+reg); 79 return in_le32(mac_regs[intf]+reg);
80 } 80 }
81 EXPORT_SYMBOL(pasemi_read_mac_reg); 81 EXPORT_SYMBOL(pasemi_read_mac_reg);
82 82
83 /* pasemi_write_mac_reg - write MAC register 83 /* pasemi_write_mac_reg - write MAC register
84 * @intf: MAC interface 84 * @intf: MAC interface
85 * @reg: Register to write to (offset into PCI CFG space) 85 * @reg: Register to write to (offset into PCI CFG space)
86 * @val: Value to write 86 * @val: Value to write
87 */ 87 */
88 void pasemi_write_mac_reg(int intf, unsigned int reg, unsigned int val) 88 void pasemi_write_mac_reg(int intf, unsigned int reg, unsigned int val)
89 { 89 {
90 out_le32(mac_regs[intf]+reg, val); 90 out_le32(mac_regs[intf]+reg, val);
91 } 91 }
92 EXPORT_SYMBOL(pasemi_write_mac_reg); 92 EXPORT_SYMBOL(pasemi_write_mac_reg);
93 93
94 /* pasemi_read_dma_reg - read DMA register 94 /* pasemi_read_dma_reg - read DMA register
95 * @reg: Register to read (offset into PCI CFG space) 95 * @reg: Register to read (offset into PCI CFG space)
96 */ 96 */
97 unsigned int pasemi_read_dma_reg(unsigned int reg) 97 unsigned int pasemi_read_dma_reg(unsigned int reg)
98 { 98 {
99 return in_le32(dma_regs+reg); 99 return in_le32(dma_regs+reg);
100 } 100 }
101 EXPORT_SYMBOL(pasemi_read_dma_reg); 101 EXPORT_SYMBOL(pasemi_read_dma_reg);
102 102
103 /* pasemi_write_dma_reg - write DMA register 103 /* pasemi_write_dma_reg - write DMA register
104 * @reg: Register to write to (offset into PCI CFG space) 104 * @reg: Register to write to (offset into PCI CFG space)
105 * @val: Value to write 105 * @val: Value to write
106 */ 106 */
107 void pasemi_write_dma_reg(unsigned int reg, unsigned int val) 107 void pasemi_write_dma_reg(unsigned int reg, unsigned int val)
108 { 108 {
109 out_le32(dma_regs+reg, val); 109 out_le32(dma_regs+reg, val);
110 } 110 }
111 EXPORT_SYMBOL(pasemi_write_dma_reg); 111 EXPORT_SYMBOL(pasemi_write_dma_reg);
112 112
113 static int pasemi_alloc_tx_chan(enum pasemi_dmachan_type type) 113 static int pasemi_alloc_tx_chan(enum pasemi_dmachan_type type)
114 { 114 {
115 int bit; 115 int bit;
116 int start, limit; 116 int start, limit;
117 117
118 switch (type & (TXCHAN_EVT0|TXCHAN_EVT1)) { 118 switch (type & (TXCHAN_EVT0|TXCHAN_EVT1)) {
119 case TXCHAN_EVT0: 119 case TXCHAN_EVT0:
120 start = 0; 120 start = 0;
121 limit = 10; 121 limit = 10;
122 break; 122 break;
123 case TXCHAN_EVT1: 123 case TXCHAN_EVT1:
124 start = 10; 124 start = 10;
125 limit = MAX_TXCH; 125 limit = MAX_TXCH;
126 break; 126 break;
127 default: 127 default:
128 start = 0; 128 start = 0;
129 limit = MAX_TXCH; 129 limit = MAX_TXCH;
130 break; 130 break;
131 } 131 }
132 retry: 132 retry:
133 bit = find_next_bit(txch_free, MAX_TXCH, start); 133 bit = find_next_bit(txch_free, MAX_TXCH, start);
134 if (bit >= limit) 134 if (bit >= limit)
135 return -ENOSPC; 135 return -ENOSPC;
136 if (!test_and_clear_bit(bit, txch_free)) 136 if (!test_and_clear_bit(bit, txch_free))
137 goto retry; 137 goto retry;
138 138
139 return bit; 139 return bit;
140 } 140 }
141 141
142 static void pasemi_free_tx_chan(int chan) 142 static void pasemi_free_tx_chan(int chan)
143 { 143 {
144 BUG_ON(test_bit(chan, txch_free)); 144 BUG_ON(test_bit(chan, txch_free));
145 set_bit(chan, txch_free); 145 set_bit(chan, txch_free);
146 } 146 }
147 147
148 static int pasemi_alloc_rx_chan(void) 148 static int pasemi_alloc_rx_chan(void)
149 { 149 {
150 int bit; 150 int bit;
151 retry: 151 retry:
152 bit = find_first_bit(rxch_free, MAX_RXCH); 152 bit = find_first_bit(rxch_free, MAX_RXCH);
153 if (bit >= MAX_TXCH) 153 if (bit >= MAX_TXCH)
154 return -ENOSPC; 154 return -ENOSPC;
155 if (!test_and_clear_bit(bit, rxch_free)) 155 if (!test_and_clear_bit(bit, rxch_free))
156 goto retry; 156 goto retry;
157 157
158 return bit; 158 return bit;
159 } 159 }
160 160
161 static void pasemi_free_rx_chan(int chan) 161 static void pasemi_free_rx_chan(int chan)
162 { 162 {
163 BUG_ON(test_bit(chan, rxch_free)); 163 BUG_ON(test_bit(chan, rxch_free));
164 set_bit(chan, rxch_free); 164 set_bit(chan, rxch_free);
165 } 165 }
166 166
167 /* pasemi_dma_alloc_chan - Allocate a DMA channel 167 /* pasemi_dma_alloc_chan - Allocate a DMA channel
168 * @type: Type of channel to allocate 168 * @type: Type of channel to allocate
169 * @total_size: Total size of structure to allocate (to allow for more 169 * @total_size: Total size of structure to allocate (to allow for more
170 * room behind the structure to be used by the client) 170 * room behind the structure to be used by the client)
171 * @offset: Offset in bytes from start of the total structure to the beginning 171 * @offset: Offset in bytes from start of the total structure to the beginning
172 * of struct pasemi_dmachan. Needed when struct pasemi_dmachan is 172 * of struct pasemi_dmachan. Needed when struct pasemi_dmachan is
173 * not the first member of the client structure. 173 * not the first member of the client structure.
174 * 174 *
175 * pasemi_dma_alloc_chan allocates a DMA channel for use by a client. The 175 * pasemi_dma_alloc_chan allocates a DMA channel for use by a client. The
176 * type argument specifies whether it's a RX or TX channel, and in the case 176 * type argument specifies whether it's a RX or TX channel, and in the case
177 * of TX channels which group it needs to belong to (if any). 177 * of TX channels which group it needs to belong to (if any).
178 * 178 *
179 * Returns a pointer to the total structure allocated on success, NULL 179 * Returns a pointer to the total structure allocated on success, NULL
180 * on failure. 180 * on failure.
181 */ 181 */
182 void *pasemi_dma_alloc_chan(enum pasemi_dmachan_type type, 182 void *pasemi_dma_alloc_chan(enum pasemi_dmachan_type type,
183 int total_size, int offset) 183 int total_size, int offset)
184 { 184 {
185 void *buf; 185 void *buf;
186 struct pasemi_dmachan *chan; 186 struct pasemi_dmachan *chan;
187 int chno; 187 int chno;
188 188
189 BUG_ON(total_size < sizeof(struct pasemi_dmachan)); 189 BUG_ON(total_size < sizeof(struct pasemi_dmachan));
190 190
191 buf = kzalloc(total_size, GFP_KERNEL); 191 buf = kzalloc(total_size, GFP_KERNEL);
192 192
193 if (!buf) 193 if (!buf)
194 return NULL; 194 return NULL;
195 chan = buf + offset; 195 chan = buf + offset;
196 196
197 chan->priv = buf; 197 chan->priv = buf;
198 198
199 switch (type & (TXCHAN|RXCHAN)) { 199 switch (type & (TXCHAN|RXCHAN)) {
200 case RXCHAN: 200 case RXCHAN:
201 chno = pasemi_alloc_rx_chan(); 201 chno = pasemi_alloc_rx_chan();
202 chan->chno = chno; 202 chan->chno = chno;
203 chan->irq = irq_create_mapping(NULL, 203 chan->irq = irq_create_mapping(NULL,
204 base_hw_irq + num_txch + chno); 204 base_hw_irq + num_txch + chno);
205 chan->status = &dma_status->rx_sta[chno]; 205 chan->status = &dma_status->rx_sta[chno];
206 break; 206 break;
207 case TXCHAN: 207 case TXCHAN:
208 chno = pasemi_alloc_tx_chan(type); 208 chno = pasemi_alloc_tx_chan(type);
209 chan->chno = chno; 209 chan->chno = chno;
210 chan->irq = irq_create_mapping(NULL, base_hw_irq + chno); 210 chan->irq = irq_create_mapping(NULL, base_hw_irq + chno);
211 chan->status = &dma_status->tx_sta[chno]; 211 chan->status = &dma_status->tx_sta[chno];
212 break; 212 break;
213 } 213 }
214 214
215 chan->chan_type = type; 215 chan->chan_type = type;
216 216
217 return chan; 217 return chan;
218 } 218 }
219 EXPORT_SYMBOL(pasemi_dma_alloc_chan); 219 EXPORT_SYMBOL(pasemi_dma_alloc_chan);
220 220
221 /* pasemi_dma_free_chan - Free a previously allocated channel 221 /* pasemi_dma_free_chan - Free a previously allocated channel
222 * @chan: Channel to free 222 * @chan: Channel to free
223 * 223 *
224 * Frees a previously allocated channel. It will also deallocate any 224 * Frees a previously allocated channel. It will also deallocate any
225 * descriptor ring associated with the channel, if allocated. 225 * descriptor ring associated with the channel, if allocated.
226 */ 226 */
227 void pasemi_dma_free_chan(struct pasemi_dmachan *chan) 227 void pasemi_dma_free_chan(struct pasemi_dmachan *chan)
228 { 228 {
229 if (chan->ring_virt) 229 if (chan->ring_virt)
230 pasemi_dma_free_ring(chan); 230 pasemi_dma_free_ring(chan);
231 231
232 switch (chan->chan_type & (RXCHAN|TXCHAN)) { 232 switch (chan->chan_type & (RXCHAN|TXCHAN)) {
233 case RXCHAN: 233 case RXCHAN:
234 pasemi_free_rx_chan(chan->chno); 234 pasemi_free_rx_chan(chan->chno);
235 break; 235 break;
236 case TXCHAN: 236 case TXCHAN:
237 pasemi_free_tx_chan(chan->chno); 237 pasemi_free_tx_chan(chan->chno);
238 break; 238 break;
239 } 239 }
240 240
241 kfree(chan->priv); 241 kfree(chan->priv);
242 } 242 }
243 EXPORT_SYMBOL(pasemi_dma_free_chan); 243 EXPORT_SYMBOL(pasemi_dma_free_chan);
244 244
245 /* pasemi_dma_alloc_ring - Allocate descriptor ring for a channel 245 /* pasemi_dma_alloc_ring - Allocate descriptor ring for a channel
246 * @chan: Channel for which to allocate 246 * @chan: Channel for which to allocate
247 * @ring_size: Ring size in 64-bit (8-byte) words 247 * @ring_size: Ring size in 64-bit (8-byte) words
248 * 248 *
249 * Allocate a descriptor ring for a channel. Returns 0 on success, errno 249 * Allocate a descriptor ring for a channel. Returns 0 on success, errno
250 * on failure. The passed in struct pasemi_dmachan is updated with the 250 * on failure. The passed in struct pasemi_dmachan is updated with the
251 * virtual and DMA addresses of the ring. 251 * virtual and DMA addresses of the ring.
252 */ 252 */
253 int pasemi_dma_alloc_ring(struct pasemi_dmachan *chan, int ring_size) 253 int pasemi_dma_alloc_ring(struct pasemi_dmachan *chan, int ring_size)
254 { 254 {
255 BUG_ON(chan->ring_virt); 255 BUG_ON(chan->ring_virt);
256 256
257 chan->ring_size = ring_size; 257 chan->ring_size = ring_size;
258 258
259 chan->ring_virt = dma_alloc_coherent(&dma_pdev->dev, 259 chan->ring_virt = dma_alloc_coherent(&dma_pdev->dev,
260 ring_size * sizeof(u64), 260 ring_size * sizeof(u64),
261 &chan->ring_dma, GFP_KERNEL); 261 &chan->ring_dma, GFP_KERNEL);
262 262
263 if (!chan->ring_virt) 263 if (!chan->ring_virt)
264 return -ENOMEM; 264 return -ENOMEM;
265 265
266 memset(chan->ring_virt, 0, ring_size * sizeof(u64)); 266 memset(chan->ring_virt, 0, ring_size * sizeof(u64));
267 267
268 return 0; 268 return 0;
269 } 269 }
270 EXPORT_SYMBOL(pasemi_dma_alloc_ring); 270 EXPORT_SYMBOL(pasemi_dma_alloc_ring);
271 271
272 /* pasemi_dma_free_ring - Free an allocated descriptor ring for a channel 272 /* pasemi_dma_free_ring - Free an allocated descriptor ring for a channel
273 * @chan: Channel for which to free the descriptor ring 273 * @chan: Channel for which to free the descriptor ring
274 * 274 *
275 * Frees a previously allocated descriptor ring for a channel. 275 * Frees a previously allocated descriptor ring for a channel.
276 */ 276 */
277 void pasemi_dma_free_ring(struct pasemi_dmachan *chan) 277 void pasemi_dma_free_ring(struct pasemi_dmachan *chan)
278 { 278 {
279 BUG_ON(!chan->ring_virt); 279 BUG_ON(!chan->ring_virt);
280 280
281 dma_free_coherent(&dma_pdev->dev, chan->ring_size * sizeof(u64), 281 dma_free_coherent(&dma_pdev->dev, chan->ring_size * sizeof(u64),
282 chan->ring_virt, chan->ring_dma); 282 chan->ring_virt, chan->ring_dma);
283 chan->ring_virt = NULL; 283 chan->ring_virt = NULL;
284 chan->ring_size = 0; 284 chan->ring_size = 0;
285 chan->ring_dma = 0; 285 chan->ring_dma = 0;
286 } 286 }
287 EXPORT_SYMBOL(pasemi_dma_free_ring); 287 EXPORT_SYMBOL(pasemi_dma_free_ring);
288 288
289 /* pasemi_dma_start_chan - Start a DMA channel 289 /* pasemi_dma_start_chan - Start a DMA channel
290 * @chan: Channel to start 290 * @chan: Channel to start
291 * @cmdsta: Additional CCMDSTA/TCMDSTA bits to write 291 * @cmdsta: Additional CCMDSTA/TCMDSTA bits to write
292 * 292 *
293 * Enables (starts) a DMA channel with optional additional arguments. 293 * Enables (starts) a DMA channel with optional additional arguments.
294 */ 294 */
295 void pasemi_dma_start_chan(const struct pasemi_dmachan *chan, const u32 cmdsta) 295 void pasemi_dma_start_chan(const struct pasemi_dmachan *chan, const u32 cmdsta)
296 { 296 {
297 if (chan->chan_type == RXCHAN) 297 if (chan->chan_type == RXCHAN)
298 pasemi_write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno), 298 pasemi_write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno),
299 cmdsta | PAS_DMA_RXCHAN_CCMDSTA_EN); 299 cmdsta | PAS_DMA_RXCHAN_CCMDSTA_EN);
300 else 300 else
301 pasemi_write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno), 301 pasemi_write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno),
302 cmdsta | PAS_DMA_TXCHAN_TCMDSTA_EN); 302 cmdsta | PAS_DMA_TXCHAN_TCMDSTA_EN);
303 } 303 }
304 EXPORT_SYMBOL(pasemi_dma_start_chan); 304 EXPORT_SYMBOL(pasemi_dma_start_chan);
305 305
306 /* pasemi_dma_stop_chan - Stop a DMA channel 306 /* pasemi_dma_stop_chan - Stop a DMA channel
307 * @chan: Channel to stop 307 * @chan: Channel to stop
308 * 308 *
309 * Stops (disables) a DMA channel. This is done by setting the ST bit in the 309 * Stops (disables) a DMA channel. This is done by setting the ST bit in the
310 * CMDSTA register and waiting on the ACT (active) bit to clear, then 310 * CMDSTA register and waiting on the ACT (active) bit to clear, then
311 * finally disabling the whole channel. 311 * finally disabling the whole channel.
312 * 312 *
313 * This function will only try for a short while for the channel to stop, if 313 * This function will only try for a short while for the channel to stop, if
314 * it doesn't it will return failure. 314 * it doesn't it will return failure.
315 * 315 *
316 * Returns 1 on success, 0 on failure. 316 * Returns 1 on success, 0 on failure.
317 */ 317 */
318 #define MAX_RETRIES 5000 318 #define MAX_RETRIES 5000
319 int pasemi_dma_stop_chan(const struct pasemi_dmachan *chan) 319 int pasemi_dma_stop_chan(const struct pasemi_dmachan *chan)
320 { 320 {
321 int reg, retries; 321 int reg, retries;
322 u32 sta; 322 u32 sta;
323 323
324 if (chan->chan_type == RXCHAN) { 324 if (chan->chan_type == RXCHAN) {
325 reg = PAS_DMA_RXCHAN_CCMDSTA(chan->chno); 325 reg = PAS_DMA_RXCHAN_CCMDSTA(chan->chno);
326 pasemi_write_dma_reg(reg, PAS_DMA_RXCHAN_CCMDSTA_ST); 326 pasemi_write_dma_reg(reg, PAS_DMA_RXCHAN_CCMDSTA_ST);
327 for (retries = 0; retries < MAX_RETRIES; retries++) { 327 for (retries = 0; retries < MAX_RETRIES; retries++) {
328 sta = pasemi_read_dma_reg(reg); 328 sta = pasemi_read_dma_reg(reg);
329 if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)) { 329 if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)) {
330 pasemi_write_dma_reg(reg, 0); 330 pasemi_write_dma_reg(reg, 0);
331 return 1; 331 return 1;
332 } 332 }
333 cond_resched(); 333 cond_resched();
334 } 334 }
335 } else { 335 } else {
336 reg = PAS_DMA_TXCHAN_TCMDSTA(chan->chno); 336 reg = PAS_DMA_TXCHAN_TCMDSTA(chan->chno);
337 pasemi_write_dma_reg(reg, PAS_DMA_TXCHAN_TCMDSTA_ST); 337 pasemi_write_dma_reg(reg, PAS_DMA_TXCHAN_TCMDSTA_ST);
338 for (retries = 0; retries < MAX_RETRIES; retries++) { 338 for (retries = 0; retries < MAX_RETRIES; retries++) {
339 sta = pasemi_read_dma_reg(reg); 339 sta = pasemi_read_dma_reg(reg);
340 if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)) { 340 if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)) {
341 pasemi_write_dma_reg(reg, 0); 341 pasemi_write_dma_reg(reg, 0);
342 return 1; 342 return 1;
343 } 343 }
344 cond_resched(); 344 cond_resched();
345 } 345 }
346 } 346 }
347 347
348 return 0; 348 return 0;
349 } 349 }
350 EXPORT_SYMBOL(pasemi_dma_stop_chan); 350 EXPORT_SYMBOL(pasemi_dma_stop_chan);
351 351
352 /* pasemi_dma_alloc_buf - Allocate a buffer to use for DMA 352 /* pasemi_dma_alloc_buf - Allocate a buffer to use for DMA
353 * @chan: Channel to allocate for 353 * @chan: Channel to allocate for
354 * @size: Size of buffer in bytes 354 * @size: Size of buffer in bytes
355 * @handle: DMA handle 355 * @handle: DMA handle
356 * 356 *
357 * Allocate a buffer to be used by the DMA engine for read/write, 357 * Allocate a buffer to be used by the DMA engine for read/write,
358 * similar to dma_alloc_coherent(). 358 * similar to dma_alloc_coherent().
359 * 359 *
360 * Returns the virtual address of the buffer, or NULL in case of failure. 360 * Returns the virtual address of the buffer, or NULL in case of failure.
361 */ 361 */
362 void *pasemi_dma_alloc_buf(struct pasemi_dmachan *chan, int size, 362 void *pasemi_dma_alloc_buf(struct pasemi_dmachan *chan, int size,
363 dma_addr_t *handle) 363 dma_addr_t *handle)
364 { 364 {
365 return dma_alloc_coherent(&dma_pdev->dev, size, handle, GFP_KERNEL); 365 return dma_alloc_coherent(&dma_pdev->dev, size, handle, GFP_KERNEL);
366 } 366 }
367 EXPORT_SYMBOL(pasemi_dma_alloc_buf); 367 EXPORT_SYMBOL(pasemi_dma_alloc_buf);
368 368
369 /* pasemi_dma_free_buf - Free a buffer used for DMA 369 /* pasemi_dma_free_buf - Free a buffer used for DMA
370 * @chan: Channel the buffer was allocated for 370 * @chan: Channel the buffer was allocated for
371 * @size: Size of buffer in bytes 371 * @size: Size of buffer in bytes
372 * @handle: DMA handle 372 * @handle: DMA handle
373 * 373 *
374 * Frees a previously allocated buffer. 374 * Frees a previously allocated buffer.
375 */ 375 */
376 void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size, 376 void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size,
377 dma_addr_t *handle) 377 dma_addr_t *handle)
378 { 378 {
379 dma_free_coherent(&dma_pdev->dev, size, handle, GFP_KERNEL); 379 dma_free_coherent(&dma_pdev->dev, size, handle, GFP_KERNEL);
380 } 380 }
381 EXPORT_SYMBOL(pasemi_dma_free_buf); 381 EXPORT_SYMBOL(pasemi_dma_free_buf);
382 382
383 /* pasemi_dma_alloc_flag - Allocate a flag (event) for channel synchronization 383 /* pasemi_dma_alloc_flag - Allocate a flag (event) for channel synchronization
384 * 384 *
385 * Allocates a flag for use with channel synchronization (event descriptors). 385 * Allocates a flag for use with channel synchronization (event descriptors).
386 * Returns allocated flag (0-63), < 0 on error. 386 * Returns allocated flag (0-63), < 0 on error.
387 */ 387 */
388 int pasemi_dma_alloc_flag(void) 388 int pasemi_dma_alloc_flag(void)
389 { 389 {
390 int bit; 390 int bit;
391 391
392 retry: 392 retry:
393 bit = find_next_bit(flags_free, MAX_FLAGS, 0); 393 bit = find_next_bit(flags_free, MAX_FLAGS, 0);
394 if (bit >= MAX_FLAGS) 394 if (bit >= MAX_FLAGS)
395 return -ENOSPC; 395 return -ENOSPC;
396 if (!test_and_clear_bit(bit, flags_free)) 396 if (!test_and_clear_bit(bit, flags_free))
397 goto retry; 397 goto retry;
398 398
399 return bit; 399 return bit;
400 } 400 }
401 EXPORT_SYMBOL(pasemi_dma_alloc_flag); 401 EXPORT_SYMBOL(pasemi_dma_alloc_flag);
402 402
403 403
404 /* pasemi_dma_free_flag - Deallocates a flag (event) 404 /* pasemi_dma_free_flag - Deallocates a flag (event)
405 * @flag: Flag number to deallocate 405 * @flag: Flag number to deallocate
406 * 406 *
407 * Frees up a flag so it can be reused for other purposes. 407 * Frees up a flag so it can be reused for other purposes.
408 */ 408 */
409 void pasemi_dma_free_flag(int flag) 409 void pasemi_dma_free_flag(int flag)
410 { 410 {
411 BUG_ON(test_bit(flag, flags_free)); 411 BUG_ON(test_bit(flag, flags_free));
412 BUG_ON(flag >= MAX_FLAGS); 412 BUG_ON(flag >= MAX_FLAGS);
413 set_bit(flag, flags_free); 413 set_bit(flag, flags_free);
414 } 414 }
415 EXPORT_SYMBOL(pasemi_dma_free_flag); 415 EXPORT_SYMBOL(pasemi_dma_free_flag);
416 416
417 417
418 /* pasemi_dma_set_flag - Sets a flag (event) to 1 418 /* pasemi_dma_set_flag - Sets a flag (event) to 1
419 * @flag: Flag number to set active 419 * @flag: Flag number to set active
420 * 420 *
421 * Sets the flag provided to 1. 421 * Sets the flag provided to 1.
422 */ 422 */
423 void pasemi_dma_set_flag(int flag) 423 void pasemi_dma_set_flag(int flag)
424 { 424 {
425 BUG_ON(flag >= MAX_FLAGS); 425 BUG_ON(flag >= MAX_FLAGS);
426 if (flag < 32) 426 if (flag < 32)
427 pasemi_write_dma_reg(PAS_DMA_TXF_SFLG0, 1 << flag); 427 pasemi_write_dma_reg(PAS_DMA_TXF_SFLG0, 1 << flag);
428 else 428 else
429 pasemi_write_dma_reg(PAS_DMA_TXF_SFLG1, 1 << flag); 429 pasemi_write_dma_reg(PAS_DMA_TXF_SFLG1, 1 << flag);
430 } 430 }
431 EXPORT_SYMBOL(pasemi_dma_set_flag); 431 EXPORT_SYMBOL(pasemi_dma_set_flag);
432 432
433 /* pasemi_dma_clear_flag - Sets a flag (event) to 0 433 /* pasemi_dma_clear_flag - Sets a flag (event) to 0
434 * @flag: Flag number to set inactive 434 * @flag: Flag number to set inactive
435 * 435 *
436 * Sets the flag provided to 0. 436 * Sets the flag provided to 0.
437 */ 437 */
438 void pasemi_dma_clear_flag(int flag) 438 void pasemi_dma_clear_flag(int flag)
439 { 439 {
440 BUG_ON(flag >= MAX_FLAGS); 440 BUG_ON(flag >= MAX_FLAGS);
441 if (flag < 32) 441 if (flag < 32)
442 pasemi_write_dma_reg(PAS_DMA_TXF_CFLG0, 1 << flag); 442 pasemi_write_dma_reg(PAS_DMA_TXF_CFLG0, 1 << flag);
443 else 443 else
444 pasemi_write_dma_reg(PAS_DMA_TXF_CFLG1, 1 << flag); 444 pasemi_write_dma_reg(PAS_DMA_TXF_CFLG1, 1 << flag);
445 } 445 }
446 EXPORT_SYMBOL(pasemi_dma_clear_flag); 446 EXPORT_SYMBOL(pasemi_dma_clear_flag);
447 447
448 /* pasemi_dma_alloc_fun - Allocate a function engine 448 /* pasemi_dma_alloc_fun - Allocate a function engine
449 * 449 *
450 * Allocates a function engine to use for crypto/checksum offload 450 * Allocates a function engine to use for crypto/checksum offload
451 * Returns allocated engine (0-8), < 0 on error. 451 * Returns allocated engine (0-8), < 0 on error.
452 */ 452 */
453 int pasemi_dma_alloc_fun(void) 453 int pasemi_dma_alloc_fun(void)
454 { 454 {
455 int bit; 455 int bit;
456 456
457 retry: 457 retry:
458 bit = find_next_bit(fun_free, MAX_FLAGS, 0); 458 bit = find_next_bit(fun_free, MAX_FLAGS, 0);
459 if (bit >= MAX_FLAGS) 459 if (bit >= MAX_FLAGS)
460 return -ENOSPC; 460 return -ENOSPC;
461 if (!test_and_clear_bit(bit, fun_free)) 461 if (!test_and_clear_bit(bit, fun_free))
462 goto retry; 462 goto retry;
463 463
464 return bit; 464 return bit;
465 } 465 }
466 EXPORT_SYMBOL(pasemi_dma_alloc_fun); 466 EXPORT_SYMBOL(pasemi_dma_alloc_fun);
467 467
468 468
469 /* pasemi_dma_free_fun - Deallocates a function engine 469 /* pasemi_dma_free_fun - Deallocates a function engine
470 * @flag: Engine number to deallocate 470 * @flag: Engine number to deallocate
471 * 471 *
472 * Frees up a function engine so it can be used for other purposes. 472 * Frees up a function engine so it can be used for other purposes.
473 */ 473 */
474 void pasemi_dma_free_fun(int fun) 474 void pasemi_dma_free_fun(int fun)
475 { 475 {
476 BUG_ON(test_bit(fun, fun_free)); 476 BUG_ON(test_bit(fun, fun_free));
477 BUG_ON(fun >= MAX_FLAGS); 477 BUG_ON(fun >= MAX_FLAGS);
478 set_bit(fun, fun_free); 478 set_bit(fun, fun_free);
479 } 479 }
480 EXPORT_SYMBOL(pasemi_dma_free_fun); 480 EXPORT_SYMBOL(pasemi_dma_free_fun);
481 481
482 482
483 static void *map_onedev(struct pci_dev *p, int index) 483 static void *map_onedev(struct pci_dev *p, int index)
484 { 484 {
485 struct device_node *dn; 485 struct device_node *dn;
486 void __iomem *ret; 486 void __iomem *ret;
487 487
488 dn = pci_device_to_OF_node(p); 488 dn = pci_device_to_OF_node(p);
489 if (!dn) 489 if (!dn)
490 goto fallback; 490 goto fallback;
491 491
492 ret = of_iomap(dn, index); 492 ret = of_iomap(dn, index);
493 if (!ret) 493 if (!ret)
494 goto fallback; 494 goto fallback;
495 495
496 return ret; 496 return ret;
497 fallback: 497 fallback:
498 /* This is hardcoded and ugly, but we have some firmware versions 498 /* This is hardcoded and ugly, but we have some firmware versions
499 * that don't provide the register space in the device tree. Luckily 499 * that don't provide the register space in the device tree. Luckily
500 * they are at well-known locations so we can just do the math here. 500 * they are at well-known locations so we can just do the math here.
501 */ 501 */
502 return ioremap(0xe0000000 + (p->devfn << 12), 0x2000); 502 return ioremap(0xe0000000 + (p->devfn << 12), 0x2000);
503 } 503 }
504 504
505 /* pasemi_dma_init - Initialize the PA Semi DMA library 505 /* pasemi_dma_init - Initialize the PA Semi DMA library
506 * 506 *
507 * This function initializes the DMA library. It must be called before 507 * This function initializes the DMA library. It must be called before
508 * any other function in the library. 508 * any other function in the library.
509 * 509 *
510 * Returns 0 on success, errno on failure. 510 * Returns 0 on success, errno on failure.
511 */ 511 */
512 int pasemi_dma_init(void) 512 int pasemi_dma_init(void)
513 { 513 {
514 static DEFINE_SPINLOCK(init_lock); 514 static DEFINE_SPINLOCK(init_lock);
515 struct pci_dev *iob_pdev; 515 struct pci_dev *iob_pdev;
516 struct pci_dev *pdev; 516 struct pci_dev *pdev;
517 struct resource res; 517 struct resource res;
518 struct device_node *dn; 518 struct device_node *dn;
519 int i, intf, err = 0; 519 int i, intf, err = 0;
520 unsigned long timeout; 520 unsigned long timeout;
521 u32 tmp; 521 u32 tmp;
522 522
523 if (!machine_is(pasemi)) 523 if (!machine_is(pasemi))
524 return -ENODEV; 524 return -ENODEV;
525 525
526 spin_lock(&init_lock); 526 spin_lock(&init_lock);
527 527
528 /* Make sure we haven't already initialized */ 528 /* Make sure we haven't already initialized */
529 if (dma_pdev) 529 if (dma_pdev)
530 goto out; 530 goto out;
531 531
532 iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL); 532 iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
533 if (!iob_pdev) { 533 if (!iob_pdev) {
534 BUG(); 534 BUG();
535 printk(KERN_WARNING "Can't find I/O Bridge\n"); 535 printk(KERN_WARNING "Can't find I/O Bridge\n");
536 err = -ENODEV; 536 err = -ENODEV;
537 goto out; 537 goto out;
538 } 538 }
539 iob_regs = map_onedev(iob_pdev, 0); 539 iob_regs = map_onedev(iob_pdev, 0);
540 540
541 dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL); 541 dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
542 if (!dma_pdev) { 542 if (!dma_pdev) {
543 BUG(); 543 BUG();
544 printk(KERN_WARNING "Can't find DMA controller\n"); 544 printk(KERN_WARNING "Can't find DMA controller\n");
545 err = -ENODEV; 545 err = -ENODEV;
546 goto out; 546 goto out;
547 } 547 }
548 dma_regs = map_onedev(dma_pdev, 0); 548 dma_regs = map_onedev(dma_pdev, 0);
549 base_hw_irq = virq_to_hw(dma_pdev->irq); 549 base_hw_irq = virq_to_hw(dma_pdev->irq);
550 550
551 pci_read_config_dword(dma_pdev, PAS_DMA_CAP_TXCH, &tmp); 551 pci_read_config_dword(dma_pdev, PAS_DMA_CAP_TXCH, &tmp);
552 num_txch = (tmp & PAS_DMA_CAP_TXCH_TCHN_M) >> PAS_DMA_CAP_TXCH_TCHN_S; 552 num_txch = (tmp & PAS_DMA_CAP_TXCH_TCHN_M) >> PAS_DMA_CAP_TXCH_TCHN_S;
553 553
554 pci_read_config_dword(dma_pdev, PAS_DMA_CAP_RXCH, &tmp); 554 pci_read_config_dword(dma_pdev, PAS_DMA_CAP_RXCH, &tmp);
555 num_rxch = (tmp & PAS_DMA_CAP_RXCH_RCHN_M) >> PAS_DMA_CAP_RXCH_RCHN_S; 555 num_rxch = (tmp & PAS_DMA_CAP_RXCH_RCHN_M) >> PAS_DMA_CAP_RXCH_RCHN_S;
556 556
557 intf = 0; 557 intf = 0;
558 for (pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa006, NULL); 558 for (pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa006, NULL);
559 pdev; 559 pdev;
560 pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa006, pdev)) 560 pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa006, pdev))
561 mac_regs[intf++] = map_onedev(pdev, 0); 561 mac_regs[intf++] = map_onedev(pdev, 0);
562 562
563 pci_dev_put(pdev); 563 pci_dev_put(pdev);
564 564
565 for (pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa005, NULL); 565 for (pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa005, NULL);
566 pdev; 566 pdev;
567 pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa005, pdev)) 567 pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa005, pdev))
568 mac_regs[intf++] = map_onedev(pdev, 0); 568 mac_regs[intf++] = map_onedev(pdev, 0);
569 569
570 pci_dev_put(pdev); 570 pci_dev_put(pdev);
571 571
572 dn = pci_device_to_OF_node(iob_pdev); 572 dn = pci_device_to_OF_node(iob_pdev);
573 if (dn) 573 if (dn)
574 err = of_address_to_resource(dn, 1, &res); 574 err = of_address_to_resource(dn, 1, &res);
575 if (!dn || err) { 575 if (!dn || err) {
576 /* Fallback for old firmware */ 576 /* Fallback for old firmware */
577 res.start = 0xfd800000; 577 res.start = 0xfd800000;
578 res.end = res.start + 0x1000; 578 res.end = res.start + 0x1000;
579 } 579 }
580 dma_status = __ioremap(res.start, resource_size(&res), 0); 580 dma_status = __ioremap(res.start, resource_size(&res), 0);
581 pci_dev_put(iob_pdev); 581 pci_dev_put(iob_pdev);
582 582
583 for (i = 0; i < MAX_TXCH; i++) 583 for (i = 0; i < MAX_TXCH; i++)
584 __set_bit(i, txch_free); 584 __set_bit(i, txch_free);
585 585
586 for (i = 0; i < MAX_RXCH; i++) 586 for (i = 0; i < MAX_RXCH; i++)
587 __set_bit(i, rxch_free); 587 __set_bit(i, rxch_free);
588 588
589 timeout = jiffies + HZ; 589 timeout = jiffies + HZ;
590 pasemi_write_dma_reg(PAS_DMA_COM_RXCMD, 0); 590 pasemi_write_dma_reg(PAS_DMA_COM_RXCMD, 0);
591 while (pasemi_read_dma_reg(PAS_DMA_COM_RXSTA) & 1) { 591 while (pasemi_read_dma_reg(PAS_DMA_COM_RXSTA) & 1) {
592 if (time_after(jiffies, timeout)) { 592 if (time_after(jiffies, timeout)) {
593 pr_warning("Warning: Could not disable RX section\n"); 593 pr_warning("Warning: Could not disable RX section\n");
594 break; 594 break;
595 } 595 }
596 } 596 }
597 597
598 timeout = jiffies + HZ; 598 timeout = jiffies + HZ;
599 pasemi_write_dma_reg(PAS_DMA_COM_TXCMD, 0); 599 pasemi_write_dma_reg(PAS_DMA_COM_TXCMD, 0);
600 while (pasemi_read_dma_reg(PAS_DMA_COM_TXSTA) & 1) { 600 while (pasemi_read_dma_reg(PAS_DMA_COM_TXSTA) & 1) {
601 if (time_after(jiffies, timeout)) { 601 if (time_after(jiffies, timeout)) {
602 pr_warning("Warning: Could not disable TX section\n"); 602 pr_warning("Warning: Could not disable TX section\n");
603 break; 603 break;
604 } 604 }
605 } 605 }
606 606
607 /* setup resource allocations for the different DMA sections */ 607 /* setup resource allocations for the different DMA sections */
608 tmp = pasemi_read_dma_reg(PAS_DMA_COM_CFG); 608 tmp = pasemi_read_dma_reg(PAS_DMA_COM_CFG);
609 pasemi_write_dma_reg(PAS_DMA_COM_CFG, tmp | 0x18000000); 609 pasemi_write_dma_reg(PAS_DMA_COM_CFG, tmp | 0x18000000);
610 610
611 /* enable tx section */ 611 /* enable tx section */
612 pasemi_write_dma_reg(PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN); 612 pasemi_write_dma_reg(PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN);
613 613
614 /* enable rx section */ 614 /* enable rx section */
615 pasemi_write_dma_reg(PAS_DMA_COM_RXCMD, PAS_DMA_COM_RXCMD_EN); 615 pasemi_write_dma_reg(PAS_DMA_COM_RXCMD, PAS_DMA_COM_RXCMD_EN);
616 616
617 for (i = 0; i < MAX_FLAGS; i++) 617 for (i = 0; i < MAX_FLAGS; i++)
618 __set_bit(i, flags_free); 618 __set_bit(i, flags_free);
619 619
620 for (i = 0; i < MAX_FUN; i++) 620 for (i = 0; i < MAX_FUN; i++)
621 __set_bit(i, fun_free); 621 __set_bit(i, fun_free);
622 622
623 /* clear all status flags */ 623 /* clear all status flags */
624 pasemi_write_dma_reg(PAS_DMA_TXF_CFLG0, 0xffffffff); 624 pasemi_write_dma_reg(PAS_DMA_TXF_CFLG0, 0xffffffff);
625 pasemi_write_dma_reg(PAS_DMA_TXF_CFLG1, 0xffffffff); 625 pasemi_write_dma_reg(PAS_DMA_TXF_CFLG1, 0xffffffff);
626 626
627 printk(KERN_INFO "PA Semi PWRficient DMA library initialized " 627 printk(KERN_INFO "PA Semi PWRficient DMA library initialized "
628 "(%d tx, %d rx channels)\n", num_txch, num_rxch); 628 "(%d tx, %d rx channels)\n", num_txch, num_rxch);
629 629
630 out: 630 out:
631 spin_unlock(&init_lock); 631 spin_unlock(&init_lock);
632 return err; 632 return err;
633 } 633 }
634 EXPORT_SYMBOL(pasemi_dma_init); 634 EXPORT_SYMBOL(pasemi_dma_init);
635 635
arch/powerpc/platforms/powermac/low_i2c.c
1 /* 1 /*
2 * arch/powerpc/platforms/powermac/low_i2c.c 2 * arch/powerpc/platforms/powermac/low_i2c.c
3 * 3 *
4 * Copyright (C) 2003-2005 Ben. Herrenschmidt (benh@kernel.crashing.org) 4 * Copyright (C) 2003-2005 Ben. Herrenschmidt (benh@kernel.crashing.org)
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 * 10 *
11 * The linux i2c layer isn't completely suitable for our needs for various 11 * The linux i2c layer isn't completely suitable for our needs for various
12 * reasons ranging from too late initialisation to semantics not perfectly 12 * reasons ranging from too late initialisation to semantics not perfectly
13 * matching some requirements of the apple platform functions etc... 13 * matching some requirements of the apple platform functions etc...
14 * 14 *
15 * This file thus provides a simple low level unified i2c interface for 15 * This file thus provides a simple low level unified i2c interface for
16 * powermac that covers the various types of i2c busses used in Apple machines. 16 * powermac that covers the various types of i2c busses used in Apple machines.
17 * For now, keywest, PMU and SMU, though we could add Cuda, or other bit 17 * For now, keywest, PMU and SMU, though we could add Cuda, or other bit
18 * banging busses found on older chipstes in earlier machines if we ever need 18 * banging busses found on older chipstes in earlier machines if we ever need
19 * one of them. 19 * one of them.
20 * 20 *
21 * The drivers in this file are synchronous/blocking. In addition, the 21 * The drivers in this file are synchronous/blocking. In addition, the
22 * keywest one is fairly slow due to the use of msleep instead of interrupts 22 * keywest one is fairly slow due to the use of msleep instead of interrupts
23 * as the interrupt is currently used by i2c-keywest. In the long run, we 23 * as the interrupt is currently used by i2c-keywest. In the long run, we
24 * might want to get rid of those high-level interfaces to linux i2c layer 24 * might want to get rid of those high-level interfaces to linux i2c layer
25 * either completely (converting all drivers) or replacing them all with a 25 * either completely (converting all drivers) or replacing them all with a
26 * single stub driver on top of this one. Once done, the interrupt will be 26 * single stub driver on top of this one. Once done, the interrupt will be
27 * available for our use. 27 * available for our use.
28 */ 28 */
29 29
30 #undef DEBUG 30 #undef DEBUG
31 #undef DEBUG_LOW 31 #undef DEBUG_LOW
32 32
33 #include <linux/types.h> 33 #include <linux/types.h>
34 #include <linux/sched.h> 34 #include <linux/sched.h>
35 #include <linux/init.h> 35 #include <linux/init.h>
36 #include <linux/module.h> 36 #include <linux/export.h>
37 #include <linux/adb.h> 37 #include <linux/adb.h>
38 #include <linux/pmu.h> 38 #include <linux/pmu.h>
39 #include <linux/delay.h> 39 #include <linux/delay.h>
40 #include <linux/completion.h> 40 #include <linux/completion.h>
41 #include <linux/platform_device.h> 41 #include <linux/platform_device.h>
42 #include <linux/interrupt.h> 42 #include <linux/interrupt.h>
43 #include <linux/timer.h> 43 #include <linux/timer.h>
44 #include <linux/mutex.h> 44 #include <linux/mutex.h>
45 #include <linux/i2c.h> 45 #include <linux/i2c.h>
46 #include <linux/slab.h> 46 #include <linux/slab.h>
47 #include <asm/keylargo.h> 47 #include <asm/keylargo.h>
48 #include <asm/uninorth.h> 48 #include <asm/uninorth.h>
49 #include <asm/io.h> 49 #include <asm/io.h>
50 #include <asm/prom.h> 50 #include <asm/prom.h>
51 #include <asm/machdep.h> 51 #include <asm/machdep.h>
52 #include <asm/smu.h> 52 #include <asm/smu.h>
53 #include <asm/pmac_pfunc.h> 53 #include <asm/pmac_pfunc.h>
54 #include <asm/pmac_low_i2c.h> 54 #include <asm/pmac_low_i2c.h>
55 55
56 #ifdef DEBUG 56 #ifdef DEBUG
57 #define DBG(x...) do {\ 57 #define DBG(x...) do {\
58 printk(KERN_DEBUG "low_i2c:" x); \ 58 printk(KERN_DEBUG "low_i2c:" x); \
59 } while(0) 59 } while(0)
60 #else 60 #else
61 #define DBG(x...) 61 #define DBG(x...)
62 #endif 62 #endif
63 63
64 #ifdef DEBUG_LOW 64 #ifdef DEBUG_LOW
65 #define DBG_LOW(x...) do {\ 65 #define DBG_LOW(x...) do {\
66 printk(KERN_DEBUG "low_i2c:" x); \ 66 printk(KERN_DEBUG "low_i2c:" x); \
67 } while(0) 67 } while(0)
68 #else 68 #else
69 #define DBG_LOW(x...) 69 #define DBG_LOW(x...)
70 #endif 70 #endif
71 71
72 72
73 static int pmac_i2c_force_poll = 1; 73 static int pmac_i2c_force_poll = 1;
74 74
75 /* 75 /*
76 * A bus structure. Each bus in the system has such a structure associated. 76 * A bus structure. Each bus in the system has such a structure associated.
77 */ 77 */
78 struct pmac_i2c_bus 78 struct pmac_i2c_bus
79 { 79 {
80 struct list_head link; 80 struct list_head link;
81 struct device_node *controller; 81 struct device_node *controller;
82 struct device_node *busnode; 82 struct device_node *busnode;
83 int type; 83 int type;
84 int flags; 84 int flags;
85 struct i2c_adapter adapter; 85 struct i2c_adapter adapter;
86 void *hostdata; 86 void *hostdata;
87 int channel; /* some hosts have multiple */ 87 int channel; /* some hosts have multiple */
88 int mode; /* current mode */ 88 int mode; /* current mode */
89 struct mutex mutex; 89 struct mutex mutex;
90 int opened; 90 int opened;
91 int polled; /* open mode */ 91 int polled; /* open mode */
92 struct platform_device *platform_dev; 92 struct platform_device *platform_dev;
93 93
94 /* ops */ 94 /* ops */
95 int (*open)(struct pmac_i2c_bus *bus); 95 int (*open)(struct pmac_i2c_bus *bus);
96 void (*close)(struct pmac_i2c_bus *bus); 96 void (*close)(struct pmac_i2c_bus *bus);
97 int (*xfer)(struct pmac_i2c_bus *bus, u8 addrdir, int subsize, 97 int (*xfer)(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
98 u32 subaddr, u8 *data, int len); 98 u32 subaddr, u8 *data, int len);
99 }; 99 };
100 100
101 static LIST_HEAD(pmac_i2c_busses); 101 static LIST_HEAD(pmac_i2c_busses);
102 102
103 /* 103 /*
104 * Keywest implementation 104 * Keywest implementation
105 */ 105 */
106 106
107 struct pmac_i2c_host_kw 107 struct pmac_i2c_host_kw
108 { 108 {
109 struct mutex mutex; /* Access mutex for use by 109 struct mutex mutex; /* Access mutex for use by
110 * i2c-keywest */ 110 * i2c-keywest */
111 void __iomem *base; /* register base address */ 111 void __iomem *base; /* register base address */
112 int bsteps; /* register stepping */ 112 int bsteps; /* register stepping */
113 int speed; /* speed */ 113 int speed; /* speed */
114 int irq; 114 int irq;
115 u8 *data; 115 u8 *data;
116 unsigned len; 116 unsigned len;
117 int state; 117 int state;
118 int rw; 118 int rw;
119 int polled; 119 int polled;
120 int result; 120 int result;
121 struct completion complete; 121 struct completion complete;
122 spinlock_t lock; 122 spinlock_t lock;
123 struct timer_list timeout_timer; 123 struct timer_list timeout_timer;
124 }; 124 };
125 125
126 /* Register indices */ 126 /* Register indices */
127 typedef enum { 127 typedef enum {
128 reg_mode = 0, 128 reg_mode = 0,
129 reg_control, 129 reg_control,
130 reg_status, 130 reg_status,
131 reg_isr, 131 reg_isr,
132 reg_ier, 132 reg_ier,
133 reg_addr, 133 reg_addr,
134 reg_subaddr, 134 reg_subaddr,
135 reg_data 135 reg_data
136 } reg_t; 136 } reg_t;
137 137
138 /* The Tumbler audio equalizer can be really slow sometimes */ 138 /* The Tumbler audio equalizer can be really slow sometimes */
139 #define KW_POLL_TIMEOUT (2*HZ) 139 #define KW_POLL_TIMEOUT (2*HZ)
140 140
141 /* Mode register */ 141 /* Mode register */
142 #define KW_I2C_MODE_100KHZ 0x00 142 #define KW_I2C_MODE_100KHZ 0x00
143 #define KW_I2C_MODE_50KHZ 0x01 143 #define KW_I2C_MODE_50KHZ 0x01
144 #define KW_I2C_MODE_25KHZ 0x02 144 #define KW_I2C_MODE_25KHZ 0x02
145 #define KW_I2C_MODE_DUMB 0x00 145 #define KW_I2C_MODE_DUMB 0x00
146 #define KW_I2C_MODE_STANDARD 0x04 146 #define KW_I2C_MODE_STANDARD 0x04
147 #define KW_I2C_MODE_STANDARDSUB 0x08 147 #define KW_I2C_MODE_STANDARDSUB 0x08
148 #define KW_I2C_MODE_COMBINED 0x0C 148 #define KW_I2C_MODE_COMBINED 0x0C
149 #define KW_I2C_MODE_MODE_MASK 0x0C 149 #define KW_I2C_MODE_MODE_MASK 0x0C
150 #define KW_I2C_MODE_CHAN_MASK 0xF0 150 #define KW_I2C_MODE_CHAN_MASK 0xF0
151 151
152 /* Control register */ 152 /* Control register */
153 #define KW_I2C_CTL_AAK 0x01 153 #define KW_I2C_CTL_AAK 0x01
154 #define KW_I2C_CTL_XADDR 0x02 154 #define KW_I2C_CTL_XADDR 0x02
155 #define KW_I2C_CTL_STOP 0x04 155 #define KW_I2C_CTL_STOP 0x04
156 #define KW_I2C_CTL_START 0x08 156 #define KW_I2C_CTL_START 0x08
157 157
158 /* Status register */ 158 /* Status register */
159 #define KW_I2C_STAT_BUSY 0x01 159 #define KW_I2C_STAT_BUSY 0x01
160 #define KW_I2C_STAT_LAST_AAK 0x02 160 #define KW_I2C_STAT_LAST_AAK 0x02
161 #define KW_I2C_STAT_LAST_RW 0x04 161 #define KW_I2C_STAT_LAST_RW 0x04
162 #define KW_I2C_STAT_SDA 0x08 162 #define KW_I2C_STAT_SDA 0x08
163 #define KW_I2C_STAT_SCL 0x10 163 #define KW_I2C_STAT_SCL 0x10
164 164
165 /* IER & ISR registers */ 165 /* IER & ISR registers */
166 #define KW_I2C_IRQ_DATA 0x01 166 #define KW_I2C_IRQ_DATA 0x01
167 #define KW_I2C_IRQ_ADDR 0x02 167 #define KW_I2C_IRQ_ADDR 0x02
168 #define KW_I2C_IRQ_STOP 0x04 168 #define KW_I2C_IRQ_STOP 0x04
169 #define KW_I2C_IRQ_START 0x08 169 #define KW_I2C_IRQ_START 0x08
170 #define KW_I2C_IRQ_MASK 0x0F 170 #define KW_I2C_IRQ_MASK 0x0F
171 171
172 /* State machine states */ 172 /* State machine states */
173 enum { 173 enum {
174 state_idle, 174 state_idle,
175 state_addr, 175 state_addr,
176 state_read, 176 state_read,
177 state_write, 177 state_write,
178 state_stop, 178 state_stop,
179 state_dead 179 state_dead
180 }; 180 };
181 181
182 #define WRONG_STATE(name) do {\ 182 #define WRONG_STATE(name) do {\
183 printk(KERN_DEBUG "KW: wrong state. Got %s, state: %s " \ 183 printk(KERN_DEBUG "KW: wrong state. Got %s, state: %s " \
184 "(isr: %02x)\n", \ 184 "(isr: %02x)\n", \
185 name, __kw_state_names[host->state], isr); \ 185 name, __kw_state_names[host->state], isr); \
186 } while(0) 186 } while(0)
187 187
188 static const char *__kw_state_names[] = { 188 static const char *__kw_state_names[] = {
189 "state_idle", 189 "state_idle",
190 "state_addr", 190 "state_addr",
191 "state_read", 191 "state_read",
192 "state_write", 192 "state_write",
193 "state_stop", 193 "state_stop",
194 "state_dead" 194 "state_dead"
195 }; 195 };
196 196
197 static inline u8 __kw_read_reg(struct pmac_i2c_host_kw *host, reg_t reg) 197 static inline u8 __kw_read_reg(struct pmac_i2c_host_kw *host, reg_t reg)
198 { 198 {
199 return readb(host->base + (((unsigned int)reg) << host->bsteps)); 199 return readb(host->base + (((unsigned int)reg) << host->bsteps));
200 } 200 }
201 201
202 static inline void __kw_write_reg(struct pmac_i2c_host_kw *host, 202 static inline void __kw_write_reg(struct pmac_i2c_host_kw *host,
203 reg_t reg, u8 val) 203 reg_t reg, u8 val)
204 { 204 {
205 writeb(val, host->base + (((unsigned)reg) << host->bsteps)); 205 writeb(val, host->base + (((unsigned)reg) << host->bsteps));
206 (void)__kw_read_reg(host, reg_subaddr); 206 (void)__kw_read_reg(host, reg_subaddr);
207 } 207 }
208 208
209 #define kw_write_reg(reg, val) __kw_write_reg(host, reg, val) 209 #define kw_write_reg(reg, val) __kw_write_reg(host, reg, val)
210 #define kw_read_reg(reg) __kw_read_reg(host, reg) 210 #define kw_read_reg(reg) __kw_read_reg(host, reg)
211 211
212 static u8 kw_i2c_wait_interrupt(struct pmac_i2c_host_kw *host) 212 static u8 kw_i2c_wait_interrupt(struct pmac_i2c_host_kw *host)
213 { 213 {
214 int i, j; 214 int i, j;
215 u8 isr; 215 u8 isr;
216 216
217 for (i = 0; i < 1000; i++) { 217 for (i = 0; i < 1000; i++) {
218 isr = kw_read_reg(reg_isr) & KW_I2C_IRQ_MASK; 218 isr = kw_read_reg(reg_isr) & KW_I2C_IRQ_MASK;
219 if (isr != 0) 219 if (isr != 0)
220 return isr; 220 return isr;
221 221
222 /* This code is used with the timebase frozen, we cannot rely 222 /* This code is used with the timebase frozen, we cannot rely
223 * on udelay nor schedule when in polled mode ! 223 * on udelay nor schedule when in polled mode !
224 * For now, just use a bogus loop.... 224 * For now, just use a bogus loop....
225 */ 225 */
226 if (host->polled) { 226 if (host->polled) {
227 for (j = 1; j < 100000; j++) 227 for (j = 1; j < 100000; j++)
228 mb(); 228 mb();
229 } else 229 } else
230 msleep(1); 230 msleep(1);
231 } 231 }
232 return isr; 232 return isr;
233 } 233 }
234 234
235 static void kw_i2c_do_stop(struct pmac_i2c_host_kw *host, int result) 235 static void kw_i2c_do_stop(struct pmac_i2c_host_kw *host, int result)
236 { 236 {
237 kw_write_reg(reg_control, KW_I2C_CTL_STOP); 237 kw_write_reg(reg_control, KW_I2C_CTL_STOP);
238 host->state = state_stop; 238 host->state = state_stop;
239 host->result = result; 239 host->result = result;
240 } 240 }
241 241
242 242
243 static void kw_i2c_handle_interrupt(struct pmac_i2c_host_kw *host, u8 isr) 243 static void kw_i2c_handle_interrupt(struct pmac_i2c_host_kw *host, u8 isr)
244 { 244 {
245 u8 ack; 245 u8 ack;
246 246
247 DBG_LOW("kw_handle_interrupt(%s, isr: %x)\n", 247 DBG_LOW("kw_handle_interrupt(%s, isr: %x)\n",
248 __kw_state_names[host->state], isr); 248 __kw_state_names[host->state], isr);
249 249
250 if (host->state == state_idle) { 250 if (host->state == state_idle) {
251 printk(KERN_WARNING "low_i2c: Keywest got an out of state" 251 printk(KERN_WARNING "low_i2c: Keywest got an out of state"
252 " interrupt, ignoring\n"); 252 " interrupt, ignoring\n");
253 kw_write_reg(reg_isr, isr); 253 kw_write_reg(reg_isr, isr);
254 return; 254 return;
255 } 255 }
256 256
257 if (isr == 0) { 257 if (isr == 0) {
258 printk(KERN_WARNING "low_i2c: Timeout in i2c transfer" 258 printk(KERN_WARNING "low_i2c: Timeout in i2c transfer"
259 " on keywest !\n"); 259 " on keywest !\n");
260 if (host->state != state_stop) { 260 if (host->state != state_stop) {
261 kw_i2c_do_stop(host, -EIO); 261 kw_i2c_do_stop(host, -EIO);
262 return; 262 return;
263 } 263 }
264 ack = kw_read_reg(reg_status); 264 ack = kw_read_reg(reg_status);
265 if (ack & KW_I2C_STAT_BUSY) 265 if (ack & KW_I2C_STAT_BUSY)
266 kw_write_reg(reg_status, 0); 266 kw_write_reg(reg_status, 0);
267 host->state = state_idle; 267 host->state = state_idle;
268 kw_write_reg(reg_ier, 0x00); 268 kw_write_reg(reg_ier, 0x00);
269 if (!host->polled) 269 if (!host->polled)
270 complete(&host->complete); 270 complete(&host->complete);
271 return; 271 return;
272 } 272 }
273 273
274 if (isr & KW_I2C_IRQ_ADDR) { 274 if (isr & KW_I2C_IRQ_ADDR) {
275 ack = kw_read_reg(reg_status); 275 ack = kw_read_reg(reg_status);
276 if (host->state != state_addr) { 276 if (host->state != state_addr) {
277 WRONG_STATE("KW_I2C_IRQ_ADDR"); 277 WRONG_STATE("KW_I2C_IRQ_ADDR");
278 kw_i2c_do_stop(host, -EIO); 278 kw_i2c_do_stop(host, -EIO);
279 } 279 }
280 if ((ack & KW_I2C_STAT_LAST_AAK) == 0) { 280 if ((ack & KW_I2C_STAT_LAST_AAK) == 0) {
281 host->result = -ENXIO; 281 host->result = -ENXIO;
282 host->state = state_stop; 282 host->state = state_stop;
283 DBG_LOW("KW: NAK on address\n"); 283 DBG_LOW("KW: NAK on address\n");
284 } else { 284 } else {
285 if (host->len == 0) 285 if (host->len == 0)
286 kw_i2c_do_stop(host, 0); 286 kw_i2c_do_stop(host, 0);
287 else if (host->rw) { 287 else if (host->rw) {
288 host->state = state_read; 288 host->state = state_read;
289 if (host->len > 1) 289 if (host->len > 1)
290 kw_write_reg(reg_control, 290 kw_write_reg(reg_control,
291 KW_I2C_CTL_AAK); 291 KW_I2C_CTL_AAK);
292 } else { 292 } else {
293 host->state = state_write; 293 host->state = state_write;
294 kw_write_reg(reg_data, *(host->data++)); 294 kw_write_reg(reg_data, *(host->data++));
295 host->len--; 295 host->len--;
296 } 296 }
297 } 297 }
298 kw_write_reg(reg_isr, KW_I2C_IRQ_ADDR); 298 kw_write_reg(reg_isr, KW_I2C_IRQ_ADDR);
299 } 299 }
300 300
301 if (isr & KW_I2C_IRQ_DATA) { 301 if (isr & KW_I2C_IRQ_DATA) {
302 if (host->state == state_read) { 302 if (host->state == state_read) {
303 *(host->data++) = kw_read_reg(reg_data); 303 *(host->data++) = kw_read_reg(reg_data);
304 host->len--; 304 host->len--;
305 kw_write_reg(reg_isr, KW_I2C_IRQ_DATA); 305 kw_write_reg(reg_isr, KW_I2C_IRQ_DATA);
306 if (host->len == 0) 306 if (host->len == 0)
307 host->state = state_stop; 307 host->state = state_stop;
308 else if (host->len == 1) 308 else if (host->len == 1)
309 kw_write_reg(reg_control, 0); 309 kw_write_reg(reg_control, 0);
310 } else if (host->state == state_write) { 310 } else if (host->state == state_write) {
311 ack = kw_read_reg(reg_status); 311 ack = kw_read_reg(reg_status);
312 if ((ack & KW_I2C_STAT_LAST_AAK) == 0) { 312 if ((ack & KW_I2C_STAT_LAST_AAK) == 0) {
313 DBG_LOW("KW: nack on data write\n"); 313 DBG_LOW("KW: nack on data write\n");
314 host->result = -EFBIG; 314 host->result = -EFBIG;
315 host->state = state_stop; 315 host->state = state_stop;
316 } else if (host->len) { 316 } else if (host->len) {
317 kw_write_reg(reg_data, *(host->data++)); 317 kw_write_reg(reg_data, *(host->data++));
318 host->len--; 318 host->len--;
319 } else 319 } else
320 kw_i2c_do_stop(host, 0); 320 kw_i2c_do_stop(host, 0);
321 } else { 321 } else {
322 WRONG_STATE("KW_I2C_IRQ_DATA"); 322 WRONG_STATE("KW_I2C_IRQ_DATA");
323 if (host->state != state_stop) 323 if (host->state != state_stop)
324 kw_i2c_do_stop(host, -EIO); 324 kw_i2c_do_stop(host, -EIO);
325 } 325 }
326 kw_write_reg(reg_isr, KW_I2C_IRQ_DATA); 326 kw_write_reg(reg_isr, KW_I2C_IRQ_DATA);
327 } 327 }
328 328
329 if (isr & KW_I2C_IRQ_STOP) { 329 if (isr & KW_I2C_IRQ_STOP) {
330 kw_write_reg(reg_isr, KW_I2C_IRQ_STOP); 330 kw_write_reg(reg_isr, KW_I2C_IRQ_STOP);
331 if (host->state != state_stop) { 331 if (host->state != state_stop) {
332 WRONG_STATE("KW_I2C_IRQ_STOP"); 332 WRONG_STATE("KW_I2C_IRQ_STOP");
333 host->result = -EIO; 333 host->result = -EIO;
334 } 334 }
335 host->state = state_idle; 335 host->state = state_idle;
336 if (!host->polled) 336 if (!host->polled)
337 complete(&host->complete); 337 complete(&host->complete);
338 } 338 }
339 339
340 /* Below should only happen in manual mode which we don't use ... */ 340 /* Below should only happen in manual mode which we don't use ... */
341 if (isr & KW_I2C_IRQ_START) 341 if (isr & KW_I2C_IRQ_START)
342 kw_write_reg(reg_isr, KW_I2C_IRQ_START); 342 kw_write_reg(reg_isr, KW_I2C_IRQ_START);
343 343
344 } 344 }
345 345
346 /* Interrupt handler */ 346 /* Interrupt handler */
347 static irqreturn_t kw_i2c_irq(int irq, void *dev_id) 347 static irqreturn_t kw_i2c_irq(int irq, void *dev_id)
348 { 348 {
349 struct pmac_i2c_host_kw *host = dev_id; 349 struct pmac_i2c_host_kw *host = dev_id;
350 unsigned long flags; 350 unsigned long flags;
351 351
352 spin_lock_irqsave(&host->lock, flags); 352 spin_lock_irqsave(&host->lock, flags);
353 del_timer(&host->timeout_timer); 353 del_timer(&host->timeout_timer);
354 kw_i2c_handle_interrupt(host, kw_read_reg(reg_isr)); 354 kw_i2c_handle_interrupt(host, kw_read_reg(reg_isr));
355 if (host->state != state_idle) { 355 if (host->state != state_idle) {
356 host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT; 356 host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT;
357 add_timer(&host->timeout_timer); 357 add_timer(&host->timeout_timer);
358 } 358 }
359 spin_unlock_irqrestore(&host->lock, flags); 359 spin_unlock_irqrestore(&host->lock, flags);
360 return IRQ_HANDLED; 360 return IRQ_HANDLED;
361 } 361 }
362 362
363 static void kw_i2c_timeout(unsigned long data) 363 static void kw_i2c_timeout(unsigned long data)
364 { 364 {
365 struct pmac_i2c_host_kw *host = (struct pmac_i2c_host_kw *)data; 365 struct pmac_i2c_host_kw *host = (struct pmac_i2c_host_kw *)data;
366 unsigned long flags; 366 unsigned long flags;
367 367
368 spin_lock_irqsave(&host->lock, flags); 368 spin_lock_irqsave(&host->lock, flags);
369 kw_i2c_handle_interrupt(host, kw_read_reg(reg_isr)); 369 kw_i2c_handle_interrupt(host, kw_read_reg(reg_isr));
370 if (host->state != state_idle) { 370 if (host->state != state_idle) {
371 host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT; 371 host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT;
372 add_timer(&host->timeout_timer); 372 add_timer(&host->timeout_timer);
373 } 373 }
374 spin_unlock_irqrestore(&host->lock, flags); 374 spin_unlock_irqrestore(&host->lock, flags);
375 } 375 }
376 376
377 static int kw_i2c_open(struct pmac_i2c_bus *bus) 377 static int kw_i2c_open(struct pmac_i2c_bus *bus)
378 { 378 {
379 struct pmac_i2c_host_kw *host = bus->hostdata; 379 struct pmac_i2c_host_kw *host = bus->hostdata;
380 mutex_lock(&host->mutex); 380 mutex_lock(&host->mutex);
381 return 0; 381 return 0;
382 } 382 }
383 383
384 static void kw_i2c_close(struct pmac_i2c_bus *bus) 384 static void kw_i2c_close(struct pmac_i2c_bus *bus)
385 { 385 {
386 struct pmac_i2c_host_kw *host = bus->hostdata; 386 struct pmac_i2c_host_kw *host = bus->hostdata;
387 mutex_unlock(&host->mutex); 387 mutex_unlock(&host->mutex);
388 } 388 }
389 389
390 static int kw_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize, 390 static int kw_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
391 u32 subaddr, u8 *data, int len) 391 u32 subaddr, u8 *data, int len)
392 { 392 {
393 struct pmac_i2c_host_kw *host = bus->hostdata; 393 struct pmac_i2c_host_kw *host = bus->hostdata;
394 u8 mode_reg = host->speed; 394 u8 mode_reg = host->speed;
395 int use_irq = host->irq != NO_IRQ && !bus->polled; 395 int use_irq = host->irq != NO_IRQ && !bus->polled;
396 396
397 /* Setup mode & subaddress if any */ 397 /* Setup mode & subaddress if any */
398 switch(bus->mode) { 398 switch(bus->mode) {
399 case pmac_i2c_mode_dumb: 399 case pmac_i2c_mode_dumb:
400 return -EINVAL; 400 return -EINVAL;
401 case pmac_i2c_mode_std: 401 case pmac_i2c_mode_std:
402 mode_reg |= KW_I2C_MODE_STANDARD; 402 mode_reg |= KW_I2C_MODE_STANDARD;
403 if (subsize != 0) 403 if (subsize != 0)
404 return -EINVAL; 404 return -EINVAL;
405 break; 405 break;
406 case pmac_i2c_mode_stdsub: 406 case pmac_i2c_mode_stdsub:
407 mode_reg |= KW_I2C_MODE_STANDARDSUB; 407 mode_reg |= KW_I2C_MODE_STANDARDSUB;
408 if (subsize != 1) 408 if (subsize != 1)
409 return -EINVAL; 409 return -EINVAL;
410 break; 410 break;
411 case pmac_i2c_mode_combined: 411 case pmac_i2c_mode_combined:
412 mode_reg |= KW_I2C_MODE_COMBINED; 412 mode_reg |= KW_I2C_MODE_COMBINED;
413 if (subsize != 1) 413 if (subsize != 1)
414 return -EINVAL; 414 return -EINVAL;
415 break; 415 break;
416 } 416 }
417 417
418 /* Setup channel & clear pending irqs */ 418 /* Setup channel & clear pending irqs */
419 kw_write_reg(reg_isr, kw_read_reg(reg_isr)); 419 kw_write_reg(reg_isr, kw_read_reg(reg_isr));
420 kw_write_reg(reg_mode, mode_reg | (bus->channel << 4)); 420 kw_write_reg(reg_mode, mode_reg | (bus->channel << 4));
421 kw_write_reg(reg_status, 0); 421 kw_write_reg(reg_status, 0);
422 422
423 /* Set up address and r/w bit, strip possible stale bus number from 423 /* Set up address and r/w bit, strip possible stale bus number from
424 * address top bits 424 * address top bits
425 */ 425 */
426 kw_write_reg(reg_addr, addrdir & 0xff); 426 kw_write_reg(reg_addr, addrdir & 0xff);
427 427
428 /* Set up the sub address */ 428 /* Set up the sub address */
429 if ((mode_reg & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_STANDARDSUB 429 if ((mode_reg & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_STANDARDSUB
430 || (mode_reg & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_COMBINED) 430 || (mode_reg & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_COMBINED)
431 kw_write_reg(reg_subaddr, subaddr); 431 kw_write_reg(reg_subaddr, subaddr);
432 432
433 /* Prepare for async operations */ 433 /* Prepare for async operations */
434 host->data = data; 434 host->data = data;
435 host->len = len; 435 host->len = len;
436 host->state = state_addr; 436 host->state = state_addr;
437 host->result = 0; 437 host->result = 0;
438 host->rw = (addrdir & 1); 438 host->rw = (addrdir & 1);
439 host->polled = bus->polled; 439 host->polled = bus->polled;
440 440
441 /* Enable interrupt if not using polled mode and interrupt is 441 /* Enable interrupt if not using polled mode and interrupt is
442 * available 442 * available
443 */ 443 */
444 if (use_irq) { 444 if (use_irq) {
445 /* Clear completion */ 445 /* Clear completion */
446 INIT_COMPLETION(host->complete); 446 INIT_COMPLETION(host->complete);
447 /* Ack stale interrupts */ 447 /* Ack stale interrupts */
448 kw_write_reg(reg_isr, kw_read_reg(reg_isr)); 448 kw_write_reg(reg_isr, kw_read_reg(reg_isr));
449 /* Arm timeout */ 449 /* Arm timeout */
450 host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT; 450 host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT;
451 add_timer(&host->timeout_timer); 451 add_timer(&host->timeout_timer);
452 /* Enable emission */ 452 /* Enable emission */
453 kw_write_reg(reg_ier, KW_I2C_IRQ_MASK); 453 kw_write_reg(reg_ier, KW_I2C_IRQ_MASK);
454 } 454 }
455 455
456 /* Start sending address */ 456 /* Start sending address */
457 kw_write_reg(reg_control, KW_I2C_CTL_XADDR); 457 kw_write_reg(reg_control, KW_I2C_CTL_XADDR);
458 458
459 /* Wait for completion */ 459 /* Wait for completion */
460 if (use_irq) 460 if (use_irq)
461 wait_for_completion(&host->complete); 461 wait_for_completion(&host->complete);
462 else { 462 else {
463 while(host->state != state_idle) { 463 while(host->state != state_idle) {
464 unsigned long flags; 464 unsigned long flags;
465 465
466 u8 isr = kw_i2c_wait_interrupt(host); 466 u8 isr = kw_i2c_wait_interrupt(host);
467 spin_lock_irqsave(&host->lock, flags); 467 spin_lock_irqsave(&host->lock, flags);
468 kw_i2c_handle_interrupt(host, isr); 468 kw_i2c_handle_interrupt(host, isr);
469 spin_unlock_irqrestore(&host->lock, flags); 469 spin_unlock_irqrestore(&host->lock, flags);
470 } 470 }
471 } 471 }
472 472
473 /* Disable emission */ 473 /* Disable emission */
474 kw_write_reg(reg_ier, 0); 474 kw_write_reg(reg_ier, 0);
475 475
476 return host->result; 476 return host->result;
477 } 477 }
478 478
479 static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np) 479 static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np)
480 { 480 {
481 struct pmac_i2c_host_kw *host; 481 struct pmac_i2c_host_kw *host;
482 const u32 *psteps, *prate, *addrp; 482 const u32 *psteps, *prate, *addrp;
483 u32 steps; 483 u32 steps;
484 484
485 host = kzalloc(sizeof(struct pmac_i2c_host_kw), GFP_KERNEL); 485 host = kzalloc(sizeof(struct pmac_i2c_host_kw), GFP_KERNEL);
486 if (host == NULL) { 486 if (host == NULL) {
487 printk(KERN_ERR "low_i2c: Can't allocate host for %s\n", 487 printk(KERN_ERR "low_i2c: Can't allocate host for %s\n",
488 np->full_name); 488 np->full_name);
489 return NULL; 489 return NULL;
490 } 490 }
491 491
492 /* Apple is kind enough to provide a valid AAPL,address property 492 /* Apple is kind enough to provide a valid AAPL,address property
493 * on all i2c keywest nodes so far ... we would have to fallback 493 * on all i2c keywest nodes so far ... we would have to fallback
494 * to macio parsing if that wasn't the case 494 * to macio parsing if that wasn't the case
495 */ 495 */
496 addrp = of_get_property(np, "AAPL,address", NULL); 496 addrp = of_get_property(np, "AAPL,address", NULL);
497 if (addrp == NULL) { 497 if (addrp == NULL) {
498 printk(KERN_ERR "low_i2c: Can't find address for %s\n", 498 printk(KERN_ERR "low_i2c: Can't find address for %s\n",
499 np->full_name); 499 np->full_name);
500 kfree(host); 500 kfree(host);
501 return NULL; 501 return NULL;
502 } 502 }
503 mutex_init(&host->mutex); 503 mutex_init(&host->mutex);
504 init_completion(&host->complete); 504 init_completion(&host->complete);
505 spin_lock_init(&host->lock); 505 spin_lock_init(&host->lock);
506 init_timer(&host->timeout_timer); 506 init_timer(&host->timeout_timer);
507 host->timeout_timer.function = kw_i2c_timeout; 507 host->timeout_timer.function = kw_i2c_timeout;
508 host->timeout_timer.data = (unsigned long)host; 508 host->timeout_timer.data = (unsigned long)host;
509 509
510 psteps = of_get_property(np, "AAPL,address-step", NULL); 510 psteps = of_get_property(np, "AAPL,address-step", NULL);
511 steps = psteps ? (*psteps) : 0x10; 511 steps = psteps ? (*psteps) : 0x10;
512 for (host->bsteps = 0; (steps & 0x01) == 0; host->bsteps++) 512 for (host->bsteps = 0; (steps & 0x01) == 0; host->bsteps++)
513 steps >>= 1; 513 steps >>= 1;
514 /* Select interface rate */ 514 /* Select interface rate */
515 host->speed = KW_I2C_MODE_25KHZ; 515 host->speed = KW_I2C_MODE_25KHZ;
516 prate = of_get_property(np, "AAPL,i2c-rate", NULL); 516 prate = of_get_property(np, "AAPL,i2c-rate", NULL);
517 if (prate) switch(*prate) { 517 if (prate) switch(*prate) {
518 case 100: 518 case 100:
519 host->speed = KW_I2C_MODE_100KHZ; 519 host->speed = KW_I2C_MODE_100KHZ;
520 break; 520 break;
521 case 50: 521 case 50:
522 host->speed = KW_I2C_MODE_50KHZ; 522 host->speed = KW_I2C_MODE_50KHZ;
523 break; 523 break;
524 case 25: 524 case 25:
525 host->speed = KW_I2C_MODE_25KHZ; 525 host->speed = KW_I2C_MODE_25KHZ;
526 break; 526 break;
527 } 527 }
528 host->irq = irq_of_parse_and_map(np, 0); 528 host->irq = irq_of_parse_and_map(np, 0);
529 if (host->irq == NO_IRQ) 529 if (host->irq == NO_IRQ)
530 printk(KERN_WARNING 530 printk(KERN_WARNING
531 "low_i2c: Failed to map interrupt for %s\n", 531 "low_i2c: Failed to map interrupt for %s\n",
532 np->full_name); 532 np->full_name);
533 533
534 host->base = ioremap((*addrp), 0x1000); 534 host->base = ioremap((*addrp), 0x1000);
535 if (host->base == NULL) { 535 if (host->base == NULL) {
536 printk(KERN_ERR "low_i2c: Can't map registers for %s\n", 536 printk(KERN_ERR "low_i2c: Can't map registers for %s\n",
537 np->full_name); 537 np->full_name);
538 kfree(host); 538 kfree(host);
539 return NULL; 539 return NULL;
540 } 540 }
541 541
542 /* Make sure IRQ is disabled */ 542 /* Make sure IRQ is disabled */
543 kw_write_reg(reg_ier, 0); 543 kw_write_reg(reg_ier, 0);
544 544
545 /* Request chip interrupt. We set IRQF_NO_SUSPEND because we don't 545 /* Request chip interrupt. We set IRQF_NO_SUSPEND because we don't
546 * want that interrupt disabled between the 2 passes of driver 546 * want that interrupt disabled between the 2 passes of driver
547 * suspend or we'll have issues running the pfuncs 547 * suspend or we'll have issues running the pfuncs
548 */ 548 */
549 if (request_irq(host->irq, kw_i2c_irq, IRQF_NO_SUSPEND, 549 if (request_irq(host->irq, kw_i2c_irq, IRQF_NO_SUSPEND,
550 "keywest i2c", host)) 550 "keywest i2c", host))
551 host->irq = NO_IRQ; 551 host->irq = NO_IRQ;
552 552
553 printk(KERN_INFO "KeyWest i2c @0x%08x irq %d %s\n", 553 printk(KERN_INFO "KeyWest i2c @0x%08x irq %d %s\n",
554 *addrp, host->irq, np->full_name); 554 *addrp, host->irq, np->full_name);
555 555
556 return host; 556 return host;
557 } 557 }
558 558
559 559
560 static void __init kw_i2c_add(struct pmac_i2c_host_kw *host, 560 static void __init kw_i2c_add(struct pmac_i2c_host_kw *host,
561 struct device_node *controller, 561 struct device_node *controller,
562 struct device_node *busnode, 562 struct device_node *busnode,
563 int channel) 563 int channel)
564 { 564 {
565 struct pmac_i2c_bus *bus; 565 struct pmac_i2c_bus *bus;
566 566
567 bus = kzalloc(sizeof(struct pmac_i2c_bus), GFP_KERNEL); 567 bus = kzalloc(sizeof(struct pmac_i2c_bus), GFP_KERNEL);
568 if (bus == NULL) 568 if (bus == NULL)
569 return; 569 return;
570 570
571 bus->controller = of_node_get(controller); 571 bus->controller = of_node_get(controller);
572 bus->busnode = of_node_get(busnode); 572 bus->busnode = of_node_get(busnode);
573 bus->type = pmac_i2c_bus_keywest; 573 bus->type = pmac_i2c_bus_keywest;
574 bus->hostdata = host; 574 bus->hostdata = host;
575 bus->channel = channel; 575 bus->channel = channel;
576 bus->mode = pmac_i2c_mode_std; 576 bus->mode = pmac_i2c_mode_std;
577 bus->open = kw_i2c_open; 577 bus->open = kw_i2c_open;
578 bus->close = kw_i2c_close; 578 bus->close = kw_i2c_close;
579 bus->xfer = kw_i2c_xfer; 579 bus->xfer = kw_i2c_xfer;
580 mutex_init(&bus->mutex); 580 mutex_init(&bus->mutex);
581 if (controller == busnode) 581 if (controller == busnode)
582 bus->flags = pmac_i2c_multibus; 582 bus->flags = pmac_i2c_multibus;
583 list_add(&bus->link, &pmac_i2c_busses); 583 list_add(&bus->link, &pmac_i2c_busses);
584 584
585 printk(KERN_INFO " channel %d bus %s\n", channel, 585 printk(KERN_INFO " channel %d bus %s\n", channel,
586 (controller == busnode) ? "<multibus>" : busnode->full_name); 586 (controller == busnode) ? "<multibus>" : busnode->full_name);
587 } 587 }
588 588
589 static void __init kw_i2c_probe(void) 589 static void __init kw_i2c_probe(void)
590 { 590 {
591 struct device_node *np, *child, *parent; 591 struct device_node *np, *child, *parent;
592 592
593 /* Probe keywest-i2c busses */ 593 /* Probe keywest-i2c busses */
594 for_each_compatible_node(np, "i2c","keywest-i2c") { 594 for_each_compatible_node(np, "i2c","keywest-i2c") {
595 struct pmac_i2c_host_kw *host; 595 struct pmac_i2c_host_kw *host;
596 int multibus; 596 int multibus;
597 597
598 /* Found one, init a host structure */ 598 /* Found one, init a host structure */
599 host = kw_i2c_host_init(np); 599 host = kw_i2c_host_init(np);
600 if (host == NULL) 600 if (host == NULL)
601 continue; 601 continue;
602 602
603 /* Now check if we have a multibus setup (old style) or if we 603 /* Now check if we have a multibus setup (old style) or if we
604 * have proper bus nodes. Note that the "new" way (proper bus 604 * have proper bus nodes. Note that the "new" way (proper bus
605 * nodes) might cause us to not create some busses that are 605 * nodes) might cause us to not create some busses that are
606 * kept hidden in the device-tree. In the future, we might 606 * kept hidden in the device-tree. In the future, we might
607 * want to work around that by creating busses without a node 607 * want to work around that by creating busses without a node
608 * but not for now 608 * but not for now
609 */ 609 */
610 child = of_get_next_child(np, NULL); 610 child = of_get_next_child(np, NULL);
611 multibus = !child || strcmp(child->name, "i2c-bus"); 611 multibus = !child || strcmp(child->name, "i2c-bus");
612 of_node_put(child); 612 of_node_put(child);
613 613
614 /* For a multibus setup, we get the bus count based on the 614 /* For a multibus setup, we get the bus count based on the
615 * parent type 615 * parent type
616 */ 616 */
617 if (multibus) { 617 if (multibus) {
618 int chans, i; 618 int chans, i;
619 619
620 parent = of_get_parent(np); 620 parent = of_get_parent(np);
621 if (parent == NULL) 621 if (parent == NULL)
622 continue; 622 continue;
623 chans = parent->name[0] == 'u' ? 2 : 1; 623 chans = parent->name[0] == 'u' ? 2 : 1;
624 for (i = 0; i < chans; i++) 624 for (i = 0; i < chans; i++)
625 kw_i2c_add(host, np, np, i); 625 kw_i2c_add(host, np, np, i);
626 } else { 626 } else {
627 for (child = NULL; 627 for (child = NULL;
628 (child = of_get_next_child(np, child)) != NULL;) { 628 (child = of_get_next_child(np, child)) != NULL;) {
629 const u32 *reg = of_get_property(child, 629 const u32 *reg = of_get_property(child,
630 "reg", NULL); 630 "reg", NULL);
631 if (reg == NULL) 631 if (reg == NULL)
632 continue; 632 continue;
633 kw_i2c_add(host, np, child, *reg); 633 kw_i2c_add(host, np, child, *reg);
634 } 634 }
635 } 635 }
636 } 636 }
637 } 637 }
638 638
639 639
640 /* 640 /*
641 * 641 *
642 * PMU implementation 642 * PMU implementation
643 * 643 *
644 */ 644 */
645 645
646 #ifdef CONFIG_ADB_PMU 646 #ifdef CONFIG_ADB_PMU
647 647
648 /* 648 /*
649 * i2c command block to the PMU 649 * i2c command block to the PMU
650 */ 650 */
651 struct pmu_i2c_hdr { 651 struct pmu_i2c_hdr {
652 u8 bus; 652 u8 bus;
653 u8 mode; 653 u8 mode;
654 u8 bus2; 654 u8 bus2;
655 u8 address; 655 u8 address;
656 u8 sub_addr; 656 u8 sub_addr;
657 u8 comb_addr; 657 u8 comb_addr;
658 u8 count; 658 u8 count;
659 u8 data[]; 659 u8 data[];
660 }; 660 };
661 661
662 static void pmu_i2c_complete(struct adb_request *req) 662 static void pmu_i2c_complete(struct adb_request *req)
663 { 663 {
664 complete(req->arg); 664 complete(req->arg);
665 } 665 }
666 666
667 static int pmu_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize, 667 static int pmu_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
668 u32 subaddr, u8 *data, int len) 668 u32 subaddr, u8 *data, int len)
669 { 669 {
670 struct adb_request *req = bus->hostdata; 670 struct adb_request *req = bus->hostdata;
671 struct pmu_i2c_hdr *hdr = (struct pmu_i2c_hdr *)&req->data[1]; 671 struct pmu_i2c_hdr *hdr = (struct pmu_i2c_hdr *)&req->data[1];
672 struct completion comp; 672 struct completion comp;
673 int read = addrdir & 1; 673 int read = addrdir & 1;
674 int retry; 674 int retry;
675 int rc = 0; 675 int rc = 0;
676 676
677 /* For now, limit ourselves to 16 bytes transfers */ 677 /* For now, limit ourselves to 16 bytes transfers */
678 if (len > 16) 678 if (len > 16)
679 return -EINVAL; 679 return -EINVAL;
680 680
681 init_completion(&comp); 681 init_completion(&comp);
682 682
683 for (retry = 0; retry < 16; retry++) { 683 for (retry = 0; retry < 16; retry++) {
684 memset(req, 0, sizeof(struct adb_request)); 684 memset(req, 0, sizeof(struct adb_request));
685 hdr->bus = bus->channel; 685 hdr->bus = bus->channel;
686 hdr->count = len; 686 hdr->count = len;
687 687
688 switch(bus->mode) { 688 switch(bus->mode) {
689 case pmac_i2c_mode_std: 689 case pmac_i2c_mode_std:
690 if (subsize != 0) 690 if (subsize != 0)
691 return -EINVAL; 691 return -EINVAL;
692 hdr->address = addrdir; 692 hdr->address = addrdir;
693 hdr->mode = PMU_I2C_MODE_SIMPLE; 693 hdr->mode = PMU_I2C_MODE_SIMPLE;
694 break; 694 break;
695 case pmac_i2c_mode_stdsub: 695 case pmac_i2c_mode_stdsub:
696 case pmac_i2c_mode_combined: 696 case pmac_i2c_mode_combined:
697 if (subsize != 1) 697 if (subsize != 1)
698 return -EINVAL; 698 return -EINVAL;
699 hdr->address = addrdir & 0xfe; 699 hdr->address = addrdir & 0xfe;
700 hdr->comb_addr = addrdir; 700 hdr->comb_addr = addrdir;
701 hdr->sub_addr = subaddr; 701 hdr->sub_addr = subaddr;
702 if (bus->mode == pmac_i2c_mode_stdsub) 702 if (bus->mode == pmac_i2c_mode_stdsub)
703 hdr->mode = PMU_I2C_MODE_STDSUB; 703 hdr->mode = PMU_I2C_MODE_STDSUB;
704 else 704 else
705 hdr->mode = PMU_I2C_MODE_COMBINED; 705 hdr->mode = PMU_I2C_MODE_COMBINED;
706 break; 706 break;
707 default: 707 default:
708 return -EINVAL; 708 return -EINVAL;
709 } 709 }
710 710
711 INIT_COMPLETION(comp); 711 INIT_COMPLETION(comp);
712 req->data[0] = PMU_I2C_CMD; 712 req->data[0] = PMU_I2C_CMD;
713 req->reply[0] = 0xff; 713 req->reply[0] = 0xff;
714 req->nbytes = sizeof(struct pmu_i2c_hdr) + 1; 714 req->nbytes = sizeof(struct pmu_i2c_hdr) + 1;
715 req->done = pmu_i2c_complete; 715 req->done = pmu_i2c_complete;
716 req->arg = &comp; 716 req->arg = &comp;
717 if (!read && len) { 717 if (!read && len) {
718 memcpy(hdr->data, data, len); 718 memcpy(hdr->data, data, len);
719 req->nbytes += len; 719 req->nbytes += len;
720 } 720 }
721 rc = pmu_queue_request(req); 721 rc = pmu_queue_request(req);
722 if (rc) 722 if (rc)
723 return rc; 723 return rc;
724 wait_for_completion(&comp); 724 wait_for_completion(&comp);
725 if (req->reply[0] == PMU_I2C_STATUS_OK) 725 if (req->reply[0] == PMU_I2C_STATUS_OK)
726 break; 726 break;
727 msleep(15); 727 msleep(15);
728 } 728 }
729 if (req->reply[0] != PMU_I2C_STATUS_OK) 729 if (req->reply[0] != PMU_I2C_STATUS_OK)
730 return -EIO; 730 return -EIO;
731 731
732 for (retry = 0; retry < 16; retry++) { 732 for (retry = 0; retry < 16; retry++) {
733 memset(req, 0, sizeof(struct adb_request)); 733 memset(req, 0, sizeof(struct adb_request));
734 734
735 /* I know that looks like a lot, slow as hell, but darwin 735 /* I know that looks like a lot, slow as hell, but darwin
736 * does it so let's be on the safe side for now 736 * does it so let's be on the safe side for now
737 */ 737 */
738 msleep(15); 738 msleep(15);
739 739
740 hdr->bus = PMU_I2C_BUS_STATUS; 740 hdr->bus = PMU_I2C_BUS_STATUS;
741 741
742 INIT_COMPLETION(comp); 742 INIT_COMPLETION(comp);
743 req->data[0] = PMU_I2C_CMD; 743 req->data[0] = PMU_I2C_CMD;
744 req->reply[0] = 0xff; 744 req->reply[0] = 0xff;
745 req->nbytes = 2; 745 req->nbytes = 2;
746 req->done = pmu_i2c_complete; 746 req->done = pmu_i2c_complete;
747 req->arg = &comp; 747 req->arg = &comp;
748 rc = pmu_queue_request(req); 748 rc = pmu_queue_request(req);
749 if (rc) 749 if (rc)
750 return rc; 750 return rc;
751 wait_for_completion(&comp); 751 wait_for_completion(&comp);
752 752
753 if (req->reply[0] == PMU_I2C_STATUS_OK && !read) 753 if (req->reply[0] == PMU_I2C_STATUS_OK && !read)
754 return 0; 754 return 0;
755 if (req->reply[0] == PMU_I2C_STATUS_DATAREAD && read) { 755 if (req->reply[0] == PMU_I2C_STATUS_DATAREAD && read) {
756 int rlen = req->reply_len - 1; 756 int rlen = req->reply_len - 1;
757 757
758 if (rlen != len) { 758 if (rlen != len) {
759 printk(KERN_WARNING "low_i2c: PMU returned %d" 759 printk(KERN_WARNING "low_i2c: PMU returned %d"
760 " bytes, expected %d !\n", rlen, len); 760 " bytes, expected %d !\n", rlen, len);
761 return -EIO; 761 return -EIO;
762 } 762 }
763 if (len) 763 if (len)
764 memcpy(data, &req->reply[1], len); 764 memcpy(data, &req->reply[1], len);
765 return 0; 765 return 0;
766 } 766 }
767 } 767 }
768 return -EIO; 768 return -EIO;
769 } 769 }
770 770
771 static void __init pmu_i2c_probe(void) 771 static void __init pmu_i2c_probe(void)
772 { 772 {
773 struct pmac_i2c_bus *bus; 773 struct pmac_i2c_bus *bus;
774 struct device_node *busnode; 774 struct device_node *busnode;
775 int channel, sz; 775 int channel, sz;
776 776
777 if (!pmu_present()) 777 if (!pmu_present())
778 return; 778 return;
779 779
780 /* There might or might not be a "pmu-i2c" node, we use that 780 /* There might or might not be a "pmu-i2c" node, we use that
781 * or via-pmu itself, whatever we find. I haven't seen a machine 781 * or via-pmu itself, whatever we find. I haven't seen a machine
782 * with separate bus nodes, so we assume a multibus setup 782 * with separate bus nodes, so we assume a multibus setup
783 */ 783 */
784 busnode = of_find_node_by_name(NULL, "pmu-i2c"); 784 busnode = of_find_node_by_name(NULL, "pmu-i2c");
785 if (busnode == NULL) 785 if (busnode == NULL)
786 busnode = of_find_node_by_name(NULL, "via-pmu"); 786 busnode = of_find_node_by_name(NULL, "via-pmu");
787 if (busnode == NULL) 787 if (busnode == NULL)
788 return; 788 return;
789 789
790 printk(KERN_INFO "PMU i2c %s\n", busnode->full_name); 790 printk(KERN_INFO "PMU i2c %s\n", busnode->full_name);
791 791
792 /* 792 /*
793 * We add bus 1 and 2 only for now, bus 0 is "special" 793 * We add bus 1 and 2 only for now, bus 0 is "special"
794 */ 794 */
795 for (channel = 1; channel <= 2; channel++) { 795 for (channel = 1; channel <= 2; channel++) {
796 sz = sizeof(struct pmac_i2c_bus) + sizeof(struct adb_request); 796 sz = sizeof(struct pmac_i2c_bus) + sizeof(struct adb_request);
797 bus = kzalloc(sz, GFP_KERNEL); 797 bus = kzalloc(sz, GFP_KERNEL);
798 if (bus == NULL) 798 if (bus == NULL)
799 return; 799 return;
800 800
801 bus->controller = busnode; 801 bus->controller = busnode;
802 bus->busnode = busnode; 802 bus->busnode = busnode;
803 bus->type = pmac_i2c_bus_pmu; 803 bus->type = pmac_i2c_bus_pmu;
804 bus->channel = channel; 804 bus->channel = channel;
805 bus->mode = pmac_i2c_mode_std; 805 bus->mode = pmac_i2c_mode_std;
806 bus->hostdata = bus + 1; 806 bus->hostdata = bus + 1;
807 bus->xfer = pmu_i2c_xfer; 807 bus->xfer = pmu_i2c_xfer;
808 mutex_init(&bus->mutex); 808 mutex_init(&bus->mutex);
809 bus->flags = pmac_i2c_multibus; 809 bus->flags = pmac_i2c_multibus;
810 list_add(&bus->link, &pmac_i2c_busses); 810 list_add(&bus->link, &pmac_i2c_busses);
811 811
812 printk(KERN_INFO " channel %d bus <multibus>\n", channel); 812 printk(KERN_INFO " channel %d bus <multibus>\n", channel);
813 } 813 }
814 } 814 }
815 815
816 #endif /* CONFIG_ADB_PMU */ 816 #endif /* CONFIG_ADB_PMU */
817 817
818 818
819 /* 819 /*
820 * 820 *
821 * SMU implementation 821 * SMU implementation
822 * 822 *
823 */ 823 */
824 824
825 #ifdef CONFIG_PMAC_SMU 825 #ifdef CONFIG_PMAC_SMU
826 826
827 static void smu_i2c_complete(struct smu_i2c_cmd *cmd, void *misc) 827 static void smu_i2c_complete(struct smu_i2c_cmd *cmd, void *misc)
828 { 828 {
829 complete(misc); 829 complete(misc);
830 } 830 }
831 831
832 static int smu_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize, 832 static int smu_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
833 u32 subaddr, u8 *data, int len) 833 u32 subaddr, u8 *data, int len)
834 { 834 {
835 struct smu_i2c_cmd *cmd = bus->hostdata; 835 struct smu_i2c_cmd *cmd = bus->hostdata;
836 struct completion comp; 836 struct completion comp;
837 int read = addrdir & 1; 837 int read = addrdir & 1;
838 int rc = 0; 838 int rc = 0;
839 839
840 if ((read && len > SMU_I2C_READ_MAX) || 840 if ((read && len > SMU_I2C_READ_MAX) ||
841 ((!read) && len > SMU_I2C_WRITE_MAX)) 841 ((!read) && len > SMU_I2C_WRITE_MAX))
842 return -EINVAL; 842 return -EINVAL;
843 843
844 memset(cmd, 0, sizeof(struct smu_i2c_cmd)); 844 memset(cmd, 0, sizeof(struct smu_i2c_cmd));
845 cmd->info.bus = bus->channel; 845 cmd->info.bus = bus->channel;
846 cmd->info.devaddr = addrdir; 846 cmd->info.devaddr = addrdir;
847 cmd->info.datalen = len; 847 cmd->info.datalen = len;
848 848
849 switch(bus->mode) { 849 switch(bus->mode) {
850 case pmac_i2c_mode_std: 850 case pmac_i2c_mode_std:
851 if (subsize != 0) 851 if (subsize != 0)
852 return -EINVAL; 852 return -EINVAL;
853 cmd->info.type = SMU_I2C_TRANSFER_SIMPLE; 853 cmd->info.type = SMU_I2C_TRANSFER_SIMPLE;
854 break; 854 break;
855 case pmac_i2c_mode_stdsub: 855 case pmac_i2c_mode_stdsub:
856 case pmac_i2c_mode_combined: 856 case pmac_i2c_mode_combined:
857 if (subsize > 3 || subsize < 1) 857 if (subsize > 3 || subsize < 1)
858 return -EINVAL; 858 return -EINVAL;
859 cmd->info.sublen = subsize; 859 cmd->info.sublen = subsize;
860 /* that's big-endian only but heh ! */ 860 /* that's big-endian only but heh ! */
861 memcpy(&cmd->info.subaddr, ((char *)&subaddr) + (4 - subsize), 861 memcpy(&cmd->info.subaddr, ((char *)&subaddr) + (4 - subsize),
862 subsize); 862 subsize);
863 if (bus->mode == pmac_i2c_mode_stdsub) 863 if (bus->mode == pmac_i2c_mode_stdsub)
864 cmd->info.type = SMU_I2C_TRANSFER_STDSUB; 864 cmd->info.type = SMU_I2C_TRANSFER_STDSUB;
865 else 865 else
866 cmd->info.type = SMU_I2C_TRANSFER_COMBINED; 866 cmd->info.type = SMU_I2C_TRANSFER_COMBINED;
867 break; 867 break;
868 default: 868 default:
869 return -EINVAL; 869 return -EINVAL;
870 } 870 }
871 if (!read && len) 871 if (!read && len)
872 memcpy(cmd->info.data, data, len); 872 memcpy(cmd->info.data, data, len);
873 873
874 init_completion(&comp); 874 init_completion(&comp);
875 cmd->done = smu_i2c_complete; 875 cmd->done = smu_i2c_complete;
876 cmd->misc = &comp; 876 cmd->misc = &comp;
877 rc = smu_queue_i2c(cmd); 877 rc = smu_queue_i2c(cmd);
878 if (rc < 0) 878 if (rc < 0)
879 return rc; 879 return rc;
880 wait_for_completion(&comp); 880 wait_for_completion(&comp);
881 rc = cmd->status; 881 rc = cmd->status;
882 882
883 if (read && len) 883 if (read && len)
884 memcpy(data, cmd->info.data, len); 884 memcpy(data, cmd->info.data, len);
885 return rc < 0 ? rc : 0; 885 return rc < 0 ? rc : 0;
886 } 886 }
887 887
888 static void __init smu_i2c_probe(void) 888 static void __init smu_i2c_probe(void)
889 { 889 {
890 struct device_node *controller, *busnode; 890 struct device_node *controller, *busnode;
891 struct pmac_i2c_bus *bus; 891 struct pmac_i2c_bus *bus;
892 const u32 *reg; 892 const u32 *reg;
893 int sz; 893 int sz;
894 894
895 if (!smu_present()) 895 if (!smu_present())
896 return; 896 return;
897 897
898 controller = of_find_node_by_name(NULL, "smu-i2c-control"); 898 controller = of_find_node_by_name(NULL, "smu-i2c-control");
899 if (controller == NULL) 899 if (controller == NULL)
900 controller = of_find_node_by_name(NULL, "smu"); 900 controller = of_find_node_by_name(NULL, "smu");
901 if (controller == NULL) 901 if (controller == NULL)
902 return; 902 return;
903 903
904 printk(KERN_INFO "SMU i2c %s\n", controller->full_name); 904 printk(KERN_INFO "SMU i2c %s\n", controller->full_name);
905 905
906 /* Look for childs, note that they might not be of the right 906 /* Look for childs, note that they might not be of the right
907 * type as older device trees mix i2c busses and other things 907 * type as older device trees mix i2c busses and other things
908 * at the same level 908 * at the same level
909 */ 909 */
910 for (busnode = NULL; 910 for (busnode = NULL;
911 (busnode = of_get_next_child(controller, busnode)) != NULL;) { 911 (busnode = of_get_next_child(controller, busnode)) != NULL;) {
912 if (strcmp(busnode->type, "i2c") && 912 if (strcmp(busnode->type, "i2c") &&
913 strcmp(busnode->type, "i2c-bus")) 913 strcmp(busnode->type, "i2c-bus"))
914 continue; 914 continue;
915 reg = of_get_property(busnode, "reg", NULL); 915 reg = of_get_property(busnode, "reg", NULL);
916 if (reg == NULL) 916 if (reg == NULL)
917 continue; 917 continue;
918 918
919 sz = sizeof(struct pmac_i2c_bus) + sizeof(struct smu_i2c_cmd); 919 sz = sizeof(struct pmac_i2c_bus) + sizeof(struct smu_i2c_cmd);
920 bus = kzalloc(sz, GFP_KERNEL); 920 bus = kzalloc(sz, GFP_KERNEL);
921 if (bus == NULL) 921 if (bus == NULL)
922 return; 922 return;
923 923
924 bus->controller = controller; 924 bus->controller = controller;
925 bus->busnode = of_node_get(busnode); 925 bus->busnode = of_node_get(busnode);
926 bus->type = pmac_i2c_bus_smu; 926 bus->type = pmac_i2c_bus_smu;
927 bus->channel = *reg; 927 bus->channel = *reg;
928 bus->mode = pmac_i2c_mode_std; 928 bus->mode = pmac_i2c_mode_std;
929 bus->hostdata = bus + 1; 929 bus->hostdata = bus + 1;
930 bus->xfer = smu_i2c_xfer; 930 bus->xfer = smu_i2c_xfer;
931 mutex_init(&bus->mutex); 931 mutex_init(&bus->mutex);
932 bus->flags = 0; 932 bus->flags = 0;
933 list_add(&bus->link, &pmac_i2c_busses); 933 list_add(&bus->link, &pmac_i2c_busses);
934 934
935 printk(KERN_INFO " channel %x bus %s\n", 935 printk(KERN_INFO " channel %x bus %s\n",
936 bus->channel, busnode->full_name); 936 bus->channel, busnode->full_name);
937 } 937 }
938 } 938 }
939 939
940 #endif /* CONFIG_PMAC_SMU */ 940 #endif /* CONFIG_PMAC_SMU */
941 941
942 /* 942 /*
943 * 943 *
944 * Core code 944 * Core code
945 * 945 *
946 */ 946 */
947 947
948 948
949 struct pmac_i2c_bus *pmac_i2c_find_bus(struct device_node *node) 949 struct pmac_i2c_bus *pmac_i2c_find_bus(struct device_node *node)
950 { 950 {
951 struct device_node *p = of_node_get(node); 951 struct device_node *p = of_node_get(node);
952 struct device_node *prev = NULL; 952 struct device_node *prev = NULL;
953 struct pmac_i2c_bus *bus; 953 struct pmac_i2c_bus *bus;
954 954
955 while(p) { 955 while(p) {
956 list_for_each_entry(bus, &pmac_i2c_busses, link) { 956 list_for_each_entry(bus, &pmac_i2c_busses, link) {
957 if (p == bus->busnode) { 957 if (p == bus->busnode) {
958 if (prev && bus->flags & pmac_i2c_multibus) { 958 if (prev && bus->flags & pmac_i2c_multibus) {
959 const u32 *reg; 959 const u32 *reg;
960 reg = of_get_property(prev, "reg", 960 reg = of_get_property(prev, "reg",
961 NULL); 961 NULL);
962 if (!reg) 962 if (!reg)
963 continue; 963 continue;
964 if (((*reg) >> 8) != bus->channel) 964 if (((*reg) >> 8) != bus->channel)
965 continue; 965 continue;
966 } 966 }
967 of_node_put(p); 967 of_node_put(p);
968 of_node_put(prev); 968 of_node_put(prev);
969 return bus; 969 return bus;
970 } 970 }
971 } 971 }
972 of_node_put(prev); 972 of_node_put(prev);
973 prev = p; 973 prev = p;
974 p = of_get_parent(p); 974 p = of_get_parent(p);
975 } 975 }
976 return NULL; 976 return NULL;
977 } 977 }
978 EXPORT_SYMBOL_GPL(pmac_i2c_find_bus); 978 EXPORT_SYMBOL_GPL(pmac_i2c_find_bus);
979 979
980 u8 pmac_i2c_get_dev_addr(struct device_node *device) 980 u8 pmac_i2c_get_dev_addr(struct device_node *device)
981 { 981 {
982 const u32 *reg = of_get_property(device, "reg", NULL); 982 const u32 *reg = of_get_property(device, "reg", NULL);
983 983
984 if (reg == NULL) 984 if (reg == NULL)
985 return 0; 985 return 0;
986 986
987 return (*reg) & 0xff; 987 return (*reg) & 0xff;
988 } 988 }
989 EXPORT_SYMBOL_GPL(pmac_i2c_get_dev_addr); 989 EXPORT_SYMBOL_GPL(pmac_i2c_get_dev_addr);
990 990
991 struct device_node *pmac_i2c_get_controller(struct pmac_i2c_bus *bus) 991 struct device_node *pmac_i2c_get_controller(struct pmac_i2c_bus *bus)
992 { 992 {
993 return bus->controller; 993 return bus->controller;
994 } 994 }
995 EXPORT_SYMBOL_GPL(pmac_i2c_get_controller); 995 EXPORT_SYMBOL_GPL(pmac_i2c_get_controller);
996 996
997 struct device_node *pmac_i2c_get_bus_node(struct pmac_i2c_bus *bus) 997 struct device_node *pmac_i2c_get_bus_node(struct pmac_i2c_bus *bus)
998 { 998 {
999 return bus->busnode; 999 return bus->busnode;
1000 } 1000 }
1001 EXPORT_SYMBOL_GPL(pmac_i2c_get_bus_node); 1001 EXPORT_SYMBOL_GPL(pmac_i2c_get_bus_node);
1002 1002
1003 int pmac_i2c_get_type(struct pmac_i2c_bus *bus) 1003 int pmac_i2c_get_type(struct pmac_i2c_bus *bus)
1004 { 1004 {
1005 return bus->type; 1005 return bus->type;
1006 } 1006 }
1007 EXPORT_SYMBOL_GPL(pmac_i2c_get_type); 1007 EXPORT_SYMBOL_GPL(pmac_i2c_get_type);
1008 1008
1009 int pmac_i2c_get_flags(struct pmac_i2c_bus *bus) 1009 int pmac_i2c_get_flags(struct pmac_i2c_bus *bus)
1010 { 1010 {
1011 return bus->flags; 1011 return bus->flags;
1012 } 1012 }
1013 EXPORT_SYMBOL_GPL(pmac_i2c_get_flags); 1013 EXPORT_SYMBOL_GPL(pmac_i2c_get_flags);
1014 1014
1015 int pmac_i2c_get_channel(struct pmac_i2c_bus *bus) 1015 int pmac_i2c_get_channel(struct pmac_i2c_bus *bus)
1016 { 1016 {
1017 return bus->channel; 1017 return bus->channel;
1018 } 1018 }
1019 EXPORT_SYMBOL_GPL(pmac_i2c_get_channel); 1019 EXPORT_SYMBOL_GPL(pmac_i2c_get_channel);
1020 1020
1021 1021
1022 struct i2c_adapter *pmac_i2c_get_adapter(struct pmac_i2c_bus *bus) 1022 struct i2c_adapter *pmac_i2c_get_adapter(struct pmac_i2c_bus *bus)
1023 { 1023 {
1024 return &bus->adapter; 1024 return &bus->adapter;
1025 } 1025 }
1026 EXPORT_SYMBOL_GPL(pmac_i2c_get_adapter); 1026 EXPORT_SYMBOL_GPL(pmac_i2c_get_adapter);
1027 1027
1028 struct pmac_i2c_bus *pmac_i2c_adapter_to_bus(struct i2c_adapter *adapter) 1028 struct pmac_i2c_bus *pmac_i2c_adapter_to_bus(struct i2c_adapter *adapter)
1029 { 1029 {
1030 struct pmac_i2c_bus *bus; 1030 struct pmac_i2c_bus *bus;
1031 1031
1032 list_for_each_entry(bus, &pmac_i2c_busses, link) 1032 list_for_each_entry(bus, &pmac_i2c_busses, link)
1033 if (&bus->adapter == adapter) 1033 if (&bus->adapter == adapter)
1034 return bus; 1034 return bus;
1035 return NULL; 1035 return NULL;
1036 } 1036 }
1037 EXPORT_SYMBOL_GPL(pmac_i2c_adapter_to_bus); 1037 EXPORT_SYMBOL_GPL(pmac_i2c_adapter_to_bus);
1038 1038
1039 int pmac_i2c_match_adapter(struct device_node *dev, struct i2c_adapter *adapter) 1039 int pmac_i2c_match_adapter(struct device_node *dev, struct i2c_adapter *adapter)
1040 { 1040 {
1041 struct pmac_i2c_bus *bus = pmac_i2c_find_bus(dev); 1041 struct pmac_i2c_bus *bus = pmac_i2c_find_bus(dev);
1042 1042
1043 if (bus == NULL) 1043 if (bus == NULL)
1044 return 0; 1044 return 0;
1045 return (&bus->adapter == adapter); 1045 return (&bus->adapter == adapter);
1046 } 1046 }
1047 EXPORT_SYMBOL_GPL(pmac_i2c_match_adapter); 1047 EXPORT_SYMBOL_GPL(pmac_i2c_match_adapter);
1048 1048
1049 int pmac_low_i2c_lock(struct device_node *np) 1049 int pmac_low_i2c_lock(struct device_node *np)
1050 { 1050 {
1051 struct pmac_i2c_bus *bus, *found = NULL; 1051 struct pmac_i2c_bus *bus, *found = NULL;
1052 1052
1053 list_for_each_entry(bus, &pmac_i2c_busses, link) { 1053 list_for_each_entry(bus, &pmac_i2c_busses, link) {
1054 if (np == bus->controller) { 1054 if (np == bus->controller) {
1055 found = bus; 1055 found = bus;
1056 break; 1056 break;
1057 } 1057 }
1058 } 1058 }
1059 if (!found) 1059 if (!found)
1060 return -ENODEV; 1060 return -ENODEV;
1061 return pmac_i2c_open(bus, 0); 1061 return pmac_i2c_open(bus, 0);
1062 } 1062 }
1063 EXPORT_SYMBOL_GPL(pmac_low_i2c_lock); 1063 EXPORT_SYMBOL_GPL(pmac_low_i2c_lock);
1064 1064
1065 int pmac_low_i2c_unlock(struct device_node *np) 1065 int pmac_low_i2c_unlock(struct device_node *np)
1066 { 1066 {
1067 struct pmac_i2c_bus *bus, *found = NULL; 1067 struct pmac_i2c_bus *bus, *found = NULL;
1068 1068
1069 list_for_each_entry(bus, &pmac_i2c_busses, link) { 1069 list_for_each_entry(bus, &pmac_i2c_busses, link) {
1070 if (np == bus->controller) { 1070 if (np == bus->controller) {
1071 found = bus; 1071 found = bus;
1072 break; 1072 break;
1073 } 1073 }
1074 } 1074 }
1075 if (!found) 1075 if (!found)
1076 return -ENODEV; 1076 return -ENODEV;
1077 pmac_i2c_close(bus); 1077 pmac_i2c_close(bus);
1078 return 0; 1078 return 0;
1079 } 1079 }
1080 EXPORT_SYMBOL_GPL(pmac_low_i2c_unlock); 1080 EXPORT_SYMBOL_GPL(pmac_low_i2c_unlock);
1081 1081
1082 1082
1083 int pmac_i2c_open(struct pmac_i2c_bus *bus, int polled) 1083 int pmac_i2c_open(struct pmac_i2c_bus *bus, int polled)
1084 { 1084 {
1085 int rc; 1085 int rc;
1086 1086
1087 mutex_lock(&bus->mutex); 1087 mutex_lock(&bus->mutex);
1088 bus->polled = polled || pmac_i2c_force_poll; 1088 bus->polled = polled || pmac_i2c_force_poll;
1089 bus->opened = 1; 1089 bus->opened = 1;
1090 bus->mode = pmac_i2c_mode_std; 1090 bus->mode = pmac_i2c_mode_std;
1091 if (bus->open && (rc = bus->open(bus)) != 0) { 1091 if (bus->open && (rc = bus->open(bus)) != 0) {
1092 bus->opened = 0; 1092 bus->opened = 0;
1093 mutex_unlock(&bus->mutex); 1093 mutex_unlock(&bus->mutex);
1094 return rc; 1094 return rc;
1095 } 1095 }
1096 return 0; 1096 return 0;
1097 } 1097 }
1098 EXPORT_SYMBOL_GPL(pmac_i2c_open); 1098 EXPORT_SYMBOL_GPL(pmac_i2c_open);
1099 1099
1100 void pmac_i2c_close(struct pmac_i2c_bus *bus) 1100 void pmac_i2c_close(struct pmac_i2c_bus *bus)
1101 { 1101 {
1102 WARN_ON(!bus->opened); 1102 WARN_ON(!bus->opened);
1103 if (bus->close) 1103 if (bus->close)
1104 bus->close(bus); 1104 bus->close(bus);
1105 bus->opened = 0; 1105 bus->opened = 0;
1106 mutex_unlock(&bus->mutex); 1106 mutex_unlock(&bus->mutex);
1107 } 1107 }
1108 EXPORT_SYMBOL_GPL(pmac_i2c_close); 1108 EXPORT_SYMBOL_GPL(pmac_i2c_close);
1109 1109
1110 int pmac_i2c_setmode(struct pmac_i2c_bus *bus, int mode) 1110 int pmac_i2c_setmode(struct pmac_i2c_bus *bus, int mode)
1111 { 1111 {
1112 WARN_ON(!bus->opened); 1112 WARN_ON(!bus->opened);
1113 1113
1114 /* Report me if you see the error below as there might be a new 1114 /* Report me if you see the error below as there might be a new
1115 * "combined4" mode that I need to implement for the SMU bus 1115 * "combined4" mode that I need to implement for the SMU bus
1116 */ 1116 */
1117 if (mode < pmac_i2c_mode_dumb || mode > pmac_i2c_mode_combined) { 1117 if (mode < pmac_i2c_mode_dumb || mode > pmac_i2c_mode_combined) {
1118 printk(KERN_ERR "low_i2c: Invalid mode %d requested on" 1118 printk(KERN_ERR "low_i2c: Invalid mode %d requested on"
1119 " bus %s !\n", mode, bus->busnode->full_name); 1119 " bus %s !\n", mode, bus->busnode->full_name);
1120 return -EINVAL; 1120 return -EINVAL;
1121 } 1121 }
1122 bus->mode = mode; 1122 bus->mode = mode;
1123 1123
1124 return 0; 1124 return 0;
1125 } 1125 }
1126 EXPORT_SYMBOL_GPL(pmac_i2c_setmode); 1126 EXPORT_SYMBOL_GPL(pmac_i2c_setmode);
1127 1127
1128 int pmac_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize, 1128 int pmac_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
1129 u32 subaddr, u8 *data, int len) 1129 u32 subaddr, u8 *data, int len)
1130 { 1130 {
1131 int rc; 1131 int rc;
1132 1132
1133 WARN_ON(!bus->opened); 1133 WARN_ON(!bus->opened);
1134 1134
1135 DBG("xfer() chan=%d, addrdir=0x%x, mode=%d, subsize=%d, subaddr=0x%x," 1135 DBG("xfer() chan=%d, addrdir=0x%x, mode=%d, subsize=%d, subaddr=0x%x,"
1136 " %d bytes, bus %s\n", bus->channel, addrdir, bus->mode, subsize, 1136 " %d bytes, bus %s\n", bus->channel, addrdir, bus->mode, subsize,
1137 subaddr, len, bus->busnode->full_name); 1137 subaddr, len, bus->busnode->full_name);
1138 1138
1139 rc = bus->xfer(bus, addrdir, subsize, subaddr, data, len); 1139 rc = bus->xfer(bus, addrdir, subsize, subaddr, data, len);
1140 1140
1141 #ifdef DEBUG 1141 #ifdef DEBUG
1142 if (rc) 1142 if (rc)
1143 DBG("xfer error %d\n", rc); 1143 DBG("xfer error %d\n", rc);
1144 #endif 1144 #endif
1145 return rc; 1145 return rc;
1146 } 1146 }
1147 EXPORT_SYMBOL_GPL(pmac_i2c_xfer); 1147 EXPORT_SYMBOL_GPL(pmac_i2c_xfer);
1148 1148
1149 /* some quirks for platform function decoding */ 1149 /* some quirks for platform function decoding */
1150 enum { 1150 enum {
1151 pmac_i2c_quirk_invmask = 0x00000001u, 1151 pmac_i2c_quirk_invmask = 0x00000001u,
1152 pmac_i2c_quirk_skip = 0x00000002u, 1152 pmac_i2c_quirk_skip = 0x00000002u,
1153 }; 1153 };
1154 1154
1155 static void pmac_i2c_devscan(void (*callback)(struct device_node *dev, 1155 static void pmac_i2c_devscan(void (*callback)(struct device_node *dev,
1156 int quirks)) 1156 int quirks))
1157 { 1157 {
1158 struct pmac_i2c_bus *bus; 1158 struct pmac_i2c_bus *bus;
1159 struct device_node *np; 1159 struct device_node *np;
1160 static struct whitelist_ent { 1160 static struct whitelist_ent {
1161 char *name; 1161 char *name;
1162 char *compatible; 1162 char *compatible;
1163 int quirks; 1163 int quirks;
1164 } whitelist[] = { 1164 } whitelist[] = {
1165 /* XXX Study device-tree's & apple drivers are get the quirks 1165 /* XXX Study device-tree's & apple drivers are get the quirks
1166 * right ! 1166 * right !
1167 */ 1167 */
1168 /* Workaround: It seems that running the clockspreading 1168 /* Workaround: It seems that running the clockspreading
1169 * properties on the eMac will cause lockups during boot. 1169 * properties on the eMac will cause lockups during boot.
1170 * The machine seems to work fine without that. So for now, 1170 * The machine seems to work fine without that. So for now,
1171 * let's make sure i2c-hwclock doesn't match about "imic" 1171 * let's make sure i2c-hwclock doesn't match about "imic"
1172 * clocks and we'll figure out if we really need to do 1172 * clocks and we'll figure out if we really need to do
1173 * something special about those later. 1173 * something special about those later.
1174 */ 1174 */
1175 { "i2c-hwclock", "imic5002", pmac_i2c_quirk_skip }, 1175 { "i2c-hwclock", "imic5002", pmac_i2c_quirk_skip },
1176 { "i2c-hwclock", "imic5003", pmac_i2c_quirk_skip }, 1176 { "i2c-hwclock", "imic5003", pmac_i2c_quirk_skip },
1177 { "i2c-hwclock", NULL, pmac_i2c_quirk_invmask }, 1177 { "i2c-hwclock", NULL, pmac_i2c_quirk_invmask },
1178 { "i2c-cpu-voltage", NULL, 0}, 1178 { "i2c-cpu-voltage", NULL, 0},
1179 { "temp-monitor", NULL, 0 }, 1179 { "temp-monitor", NULL, 0 },
1180 { "supply-monitor", NULL, 0 }, 1180 { "supply-monitor", NULL, 0 },
1181 { NULL, NULL, 0 }, 1181 { NULL, NULL, 0 },
1182 }; 1182 };
1183 1183
1184 /* Only some devices need to have platform functions instanciated 1184 /* Only some devices need to have platform functions instanciated
1185 * here. For now, we have a table. Others, like 9554 i2c GPIOs used 1185 * here. For now, we have a table. Others, like 9554 i2c GPIOs used
1186 * on Xserve, if we ever do a driver for them, will use their own 1186 * on Xserve, if we ever do a driver for them, will use their own
1187 * platform function instance 1187 * platform function instance
1188 */ 1188 */
1189 list_for_each_entry(bus, &pmac_i2c_busses, link) { 1189 list_for_each_entry(bus, &pmac_i2c_busses, link) {
1190 for (np = NULL; 1190 for (np = NULL;
1191 (np = of_get_next_child(bus->busnode, np)) != NULL;) { 1191 (np = of_get_next_child(bus->busnode, np)) != NULL;) {
1192 struct whitelist_ent *p; 1192 struct whitelist_ent *p;
1193 /* If multibus, check if device is on that bus */ 1193 /* If multibus, check if device is on that bus */
1194 if (bus->flags & pmac_i2c_multibus) 1194 if (bus->flags & pmac_i2c_multibus)
1195 if (bus != pmac_i2c_find_bus(np)) 1195 if (bus != pmac_i2c_find_bus(np))
1196 continue; 1196 continue;
1197 for (p = whitelist; p->name != NULL; p++) { 1197 for (p = whitelist; p->name != NULL; p++) {
1198 if (strcmp(np->name, p->name)) 1198 if (strcmp(np->name, p->name))
1199 continue; 1199 continue;
1200 if (p->compatible && 1200 if (p->compatible &&
1201 !of_device_is_compatible(np, p->compatible)) 1201 !of_device_is_compatible(np, p->compatible))
1202 continue; 1202 continue;
1203 if (p->quirks & pmac_i2c_quirk_skip) 1203 if (p->quirks & pmac_i2c_quirk_skip)
1204 break; 1204 break;
1205 callback(np, p->quirks); 1205 callback(np, p->quirks);
1206 break; 1206 break;
1207 } 1207 }
1208 } 1208 }
1209 } 1209 }
1210 } 1210 }
1211 1211
1212 #define MAX_I2C_DATA 64 1212 #define MAX_I2C_DATA 64
1213 1213
1214 struct pmac_i2c_pf_inst 1214 struct pmac_i2c_pf_inst
1215 { 1215 {
1216 struct pmac_i2c_bus *bus; 1216 struct pmac_i2c_bus *bus;
1217 u8 addr; 1217 u8 addr;
1218 u8 buffer[MAX_I2C_DATA]; 1218 u8 buffer[MAX_I2C_DATA];
1219 u8 scratch[MAX_I2C_DATA]; 1219 u8 scratch[MAX_I2C_DATA];
1220 int bytes; 1220 int bytes;
1221 int quirks; 1221 int quirks;
1222 }; 1222 };
1223 1223
1224 static void* pmac_i2c_do_begin(struct pmf_function *func, struct pmf_args *args) 1224 static void* pmac_i2c_do_begin(struct pmf_function *func, struct pmf_args *args)
1225 { 1225 {
1226 struct pmac_i2c_pf_inst *inst; 1226 struct pmac_i2c_pf_inst *inst;
1227 struct pmac_i2c_bus *bus; 1227 struct pmac_i2c_bus *bus;
1228 1228
1229 bus = pmac_i2c_find_bus(func->node); 1229 bus = pmac_i2c_find_bus(func->node);
1230 if (bus == NULL) { 1230 if (bus == NULL) {
1231 printk(KERN_ERR "low_i2c: Can't find bus for %s (pfunc)\n", 1231 printk(KERN_ERR "low_i2c: Can't find bus for %s (pfunc)\n",
1232 func->node->full_name); 1232 func->node->full_name);
1233 return NULL; 1233 return NULL;
1234 } 1234 }
1235 if (pmac_i2c_open(bus, 0)) { 1235 if (pmac_i2c_open(bus, 0)) {
1236 printk(KERN_ERR "low_i2c: Can't open i2c bus for %s (pfunc)\n", 1236 printk(KERN_ERR "low_i2c: Can't open i2c bus for %s (pfunc)\n",
1237 func->node->full_name); 1237 func->node->full_name);
1238 return NULL; 1238 return NULL;
1239 } 1239 }
1240 1240
1241 /* XXX might need GFP_ATOMIC when called during the suspend process, 1241 /* XXX might need GFP_ATOMIC when called during the suspend process,
1242 * but then, there are already lots of issues with suspending when 1242 * but then, there are already lots of issues with suspending when
1243 * near OOM that need to be resolved, the allocator itself should 1243 * near OOM that need to be resolved, the allocator itself should
1244 * probably make GFP_NOIO implicit during suspend 1244 * probably make GFP_NOIO implicit during suspend
1245 */ 1245 */
1246 inst = kzalloc(sizeof(struct pmac_i2c_pf_inst), GFP_KERNEL); 1246 inst = kzalloc(sizeof(struct pmac_i2c_pf_inst), GFP_KERNEL);
1247 if (inst == NULL) { 1247 if (inst == NULL) {
1248 pmac_i2c_close(bus); 1248 pmac_i2c_close(bus);
1249 return NULL; 1249 return NULL;
1250 } 1250 }
1251 inst->bus = bus; 1251 inst->bus = bus;
1252 inst->addr = pmac_i2c_get_dev_addr(func->node); 1252 inst->addr = pmac_i2c_get_dev_addr(func->node);
1253 inst->quirks = (int)(long)func->driver_data; 1253 inst->quirks = (int)(long)func->driver_data;
1254 return inst; 1254 return inst;
1255 } 1255 }
1256 1256
1257 static void pmac_i2c_do_end(struct pmf_function *func, void *instdata) 1257 static void pmac_i2c_do_end(struct pmf_function *func, void *instdata)
1258 { 1258 {
1259 struct pmac_i2c_pf_inst *inst = instdata; 1259 struct pmac_i2c_pf_inst *inst = instdata;
1260 1260
1261 if (inst == NULL) 1261 if (inst == NULL)
1262 return; 1262 return;
1263 pmac_i2c_close(inst->bus); 1263 pmac_i2c_close(inst->bus);
1264 kfree(inst); 1264 kfree(inst);
1265 } 1265 }
1266 1266
1267 static int pmac_i2c_do_read(PMF_STD_ARGS, u32 len) 1267 static int pmac_i2c_do_read(PMF_STD_ARGS, u32 len)
1268 { 1268 {
1269 struct pmac_i2c_pf_inst *inst = instdata; 1269 struct pmac_i2c_pf_inst *inst = instdata;
1270 1270
1271 inst->bytes = len; 1271 inst->bytes = len;
1272 return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_read, 0, 0, 1272 return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_read, 0, 0,
1273 inst->buffer, len); 1273 inst->buffer, len);
1274 } 1274 }
1275 1275
1276 static int pmac_i2c_do_write(PMF_STD_ARGS, u32 len, const u8 *data) 1276 static int pmac_i2c_do_write(PMF_STD_ARGS, u32 len, const u8 *data)
1277 { 1277 {
1278 struct pmac_i2c_pf_inst *inst = instdata; 1278 struct pmac_i2c_pf_inst *inst = instdata;
1279 1279
1280 return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 0, 0, 1280 return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 0, 0,
1281 (u8 *)data, len); 1281 (u8 *)data, len);
1282 } 1282 }
1283 1283
1284 /* This function is used to do the masking & OR'ing for the "rmw" type 1284 /* This function is used to do the masking & OR'ing for the "rmw" type
1285 * callbacks. Ze should apply the mask and OR in the values in the 1285 * callbacks. Ze should apply the mask and OR in the values in the
1286 * buffer before writing back. The problem is that it seems that 1286 * buffer before writing back. The problem is that it seems that
1287 * various darwin drivers implement the mask/or differently, thus 1287 * various darwin drivers implement the mask/or differently, thus
1288 * we need to check the quirks first 1288 * we need to check the quirks first
1289 */ 1289 */
1290 static void pmac_i2c_do_apply_rmw(struct pmac_i2c_pf_inst *inst, 1290 static void pmac_i2c_do_apply_rmw(struct pmac_i2c_pf_inst *inst,
1291 u32 len, const u8 *mask, const u8 *val) 1291 u32 len, const u8 *mask, const u8 *val)
1292 { 1292 {
1293 int i; 1293 int i;
1294 1294
1295 if (inst->quirks & pmac_i2c_quirk_invmask) { 1295 if (inst->quirks & pmac_i2c_quirk_invmask) {
1296 for (i = 0; i < len; i ++) 1296 for (i = 0; i < len; i ++)
1297 inst->scratch[i] = (inst->buffer[i] & mask[i]) | val[i]; 1297 inst->scratch[i] = (inst->buffer[i] & mask[i]) | val[i];
1298 } else { 1298 } else {
1299 for (i = 0; i < len; i ++) 1299 for (i = 0; i < len; i ++)
1300 inst->scratch[i] = (inst->buffer[i] & ~mask[i]) 1300 inst->scratch[i] = (inst->buffer[i] & ~mask[i])
1301 | (val[i] & mask[i]); 1301 | (val[i] & mask[i]);
1302 } 1302 }
1303 } 1303 }
1304 1304
1305 static int pmac_i2c_do_rmw(PMF_STD_ARGS, u32 masklen, u32 valuelen, 1305 static int pmac_i2c_do_rmw(PMF_STD_ARGS, u32 masklen, u32 valuelen,
1306 u32 totallen, const u8 *maskdata, 1306 u32 totallen, const u8 *maskdata,
1307 const u8 *valuedata) 1307 const u8 *valuedata)
1308 { 1308 {
1309 struct pmac_i2c_pf_inst *inst = instdata; 1309 struct pmac_i2c_pf_inst *inst = instdata;
1310 1310
1311 if (masklen > inst->bytes || valuelen > inst->bytes || 1311 if (masklen > inst->bytes || valuelen > inst->bytes ||
1312 totallen > inst->bytes || valuelen > masklen) 1312 totallen > inst->bytes || valuelen > masklen)
1313 return -EINVAL; 1313 return -EINVAL;
1314 1314
1315 pmac_i2c_do_apply_rmw(inst, masklen, maskdata, valuedata); 1315 pmac_i2c_do_apply_rmw(inst, masklen, maskdata, valuedata);
1316 1316
1317 return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 0, 0, 1317 return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 0, 0,
1318 inst->scratch, totallen); 1318 inst->scratch, totallen);
1319 } 1319 }
1320 1320
1321 static int pmac_i2c_do_read_sub(PMF_STD_ARGS, u8 subaddr, u32 len) 1321 static int pmac_i2c_do_read_sub(PMF_STD_ARGS, u8 subaddr, u32 len)
1322 { 1322 {
1323 struct pmac_i2c_pf_inst *inst = instdata; 1323 struct pmac_i2c_pf_inst *inst = instdata;
1324 1324
1325 inst->bytes = len; 1325 inst->bytes = len;
1326 return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_read, 1, subaddr, 1326 return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_read, 1, subaddr,
1327 inst->buffer, len); 1327 inst->buffer, len);
1328 } 1328 }
1329 1329
1330 static int pmac_i2c_do_write_sub(PMF_STD_ARGS, u8 subaddr, u32 len, 1330 static int pmac_i2c_do_write_sub(PMF_STD_ARGS, u8 subaddr, u32 len,
1331 const u8 *data) 1331 const u8 *data)
1332 { 1332 {
1333 struct pmac_i2c_pf_inst *inst = instdata; 1333 struct pmac_i2c_pf_inst *inst = instdata;
1334 1334
1335 return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 1, 1335 return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 1,
1336 subaddr, (u8 *)data, len); 1336 subaddr, (u8 *)data, len);
1337 } 1337 }
1338 1338
1339 static int pmac_i2c_do_set_mode(PMF_STD_ARGS, int mode) 1339 static int pmac_i2c_do_set_mode(PMF_STD_ARGS, int mode)
1340 { 1340 {
1341 struct pmac_i2c_pf_inst *inst = instdata; 1341 struct pmac_i2c_pf_inst *inst = instdata;
1342 1342
1343 return pmac_i2c_setmode(inst->bus, mode); 1343 return pmac_i2c_setmode(inst->bus, mode);
1344 } 1344 }
1345 1345
1346 static int pmac_i2c_do_rmw_sub(PMF_STD_ARGS, u8 subaddr, u32 masklen, 1346 static int pmac_i2c_do_rmw_sub(PMF_STD_ARGS, u8 subaddr, u32 masklen,
1347 u32 valuelen, u32 totallen, const u8 *maskdata, 1347 u32 valuelen, u32 totallen, const u8 *maskdata,
1348 const u8 *valuedata) 1348 const u8 *valuedata)
1349 { 1349 {
1350 struct pmac_i2c_pf_inst *inst = instdata; 1350 struct pmac_i2c_pf_inst *inst = instdata;
1351 1351
1352 if (masklen > inst->bytes || valuelen > inst->bytes || 1352 if (masklen > inst->bytes || valuelen > inst->bytes ||
1353 totallen > inst->bytes || valuelen > masklen) 1353 totallen > inst->bytes || valuelen > masklen)
1354 return -EINVAL; 1354 return -EINVAL;
1355 1355
1356 pmac_i2c_do_apply_rmw(inst, masklen, maskdata, valuedata); 1356 pmac_i2c_do_apply_rmw(inst, masklen, maskdata, valuedata);
1357 1357
1358 return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 1, 1358 return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 1,
1359 subaddr, inst->scratch, totallen); 1359 subaddr, inst->scratch, totallen);
1360 } 1360 }
1361 1361
1362 static int pmac_i2c_do_mask_and_comp(PMF_STD_ARGS, u32 len, 1362 static int pmac_i2c_do_mask_and_comp(PMF_STD_ARGS, u32 len,
1363 const u8 *maskdata, 1363 const u8 *maskdata,
1364 const u8 *valuedata) 1364 const u8 *valuedata)
1365 { 1365 {
1366 struct pmac_i2c_pf_inst *inst = instdata; 1366 struct pmac_i2c_pf_inst *inst = instdata;
1367 int i, match; 1367 int i, match;
1368 1368
1369 /* Get return value pointer, it's assumed to be a u32 */ 1369 /* Get return value pointer, it's assumed to be a u32 */
1370 if (!args || !args->count || !args->u[0].p) 1370 if (!args || !args->count || !args->u[0].p)
1371 return -EINVAL; 1371 return -EINVAL;
1372 1372
1373 /* Check buffer */ 1373 /* Check buffer */
1374 if (len > inst->bytes) 1374 if (len > inst->bytes)
1375 return -EINVAL; 1375 return -EINVAL;
1376 1376
1377 for (i = 0, match = 1; match && i < len; i ++) 1377 for (i = 0, match = 1; match && i < len; i ++)
1378 if ((inst->buffer[i] & maskdata[i]) != valuedata[i]) 1378 if ((inst->buffer[i] & maskdata[i]) != valuedata[i])
1379 match = 0; 1379 match = 0;
1380 *args->u[0].p = match; 1380 *args->u[0].p = match;
1381 return 0; 1381 return 0;
1382 } 1382 }
1383 1383
1384 static int pmac_i2c_do_delay(PMF_STD_ARGS, u32 duration) 1384 static int pmac_i2c_do_delay(PMF_STD_ARGS, u32 duration)
1385 { 1385 {
1386 msleep((duration + 999) / 1000); 1386 msleep((duration + 999) / 1000);
1387 return 0; 1387 return 0;
1388 } 1388 }
1389 1389
1390 1390
1391 static struct pmf_handlers pmac_i2c_pfunc_handlers = { 1391 static struct pmf_handlers pmac_i2c_pfunc_handlers = {
1392 .begin = pmac_i2c_do_begin, 1392 .begin = pmac_i2c_do_begin,
1393 .end = pmac_i2c_do_end, 1393 .end = pmac_i2c_do_end,
1394 .read_i2c = pmac_i2c_do_read, 1394 .read_i2c = pmac_i2c_do_read,
1395 .write_i2c = pmac_i2c_do_write, 1395 .write_i2c = pmac_i2c_do_write,
1396 .rmw_i2c = pmac_i2c_do_rmw, 1396 .rmw_i2c = pmac_i2c_do_rmw,
1397 .read_i2c_sub = pmac_i2c_do_read_sub, 1397 .read_i2c_sub = pmac_i2c_do_read_sub,
1398 .write_i2c_sub = pmac_i2c_do_write_sub, 1398 .write_i2c_sub = pmac_i2c_do_write_sub,
1399 .rmw_i2c_sub = pmac_i2c_do_rmw_sub, 1399 .rmw_i2c_sub = pmac_i2c_do_rmw_sub,
1400 .set_i2c_mode = pmac_i2c_do_set_mode, 1400 .set_i2c_mode = pmac_i2c_do_set_mode,
1401 .mask_and_compare = pmac_i2c_do_mask_and_comp, 1401 .mask_and_compare = pmac_i2c_do_mask_and_comp,
1402 .delay = pmac_i2c_do_delay, 1402 .delay = pmac_i2c_do_delay,
1403 }; 1403 };
1404 1404
1405 static void __init pmac_i2c_dev_create(struct device_node *np, int quirks) 1405 static void __init pmac_i2c_dev_create(struct device_node *np, int quirks)
1406 { 1406 {
1407 DBG("dev_create(%s)\n", np->full_name); 1407 DBG("dev_create(%s)\n", np->full_name);
1408 1408
1409 pmf_register_driver(np, &pmac_i2c_pfunc_handlers, 1409 pmf_register_driver(np, &pmac_i2c_pfunc_handlers,
1410 (void *)(long)quirks); 1410 (void *)(long)quirks);
1411 } 1411 }
1412 1412
1413 static void __init pmac_i2c_dev_init(struct device_node *np, int quirks) 1413 static void __init pmac_i2c_dev_init(struct device_node *np, int quirks)
1414 { 1414 {
1415 DBG("dev_create(%s)\n", np->full_name); 1415 DBG("dev_create(%s)\n", np->full_name);
1416 1416
1417 pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_INIT, NULL); 1417 pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_INIT, NULL);
1418 } 1418 }
1419 1419
1420 static void pmac_i2c_dev_suspend(struct device_node *np, int quirks) 1420 static void pmac_i2c_dev_suspend(struct device_node *np, int quirks)
1421 { 1421 {
1422 DBG("dev_suspend(%s)\n", np->full_name); 1422 DBG("dev_suspend(%s)\n", np->full_name);
1423 pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_SLEEP, NULL); 1423 pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_SLEEP, NULL);
1424 } 1424 }
1425 1425
1426 static void pmac_i2c_dev_resume(struct device_node *np, int quirks) 1426 static void pmac_i2c_dev_resume(struct device_node *np, int quirks)
1427 { 1427 {
1428 DBG("dev_resume(%s)\n", np->full_name); 1428 DBG("dev_resume(%s)\n", np->full_name);
1429 pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_WAKE, NULL); 1429 pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_WAKE, NULL);
1430 } 1430 }
1431 1431
1432 void pmac_pfunc_i2c_suspend(void) 1432 void pmac_pfunc_i2c_suspend(void)
1433 { 1433 {
1434 pmac_i2c_devscan(pmac_i2c_dev_suspend); 1434 pmac_i2c_devscan(pmac_i2c_dev_suspend);
1435 } 1435 }
1436 1436
1437 void pmac_pfunc_i2c_resume(void) 1437 void pmac_pfunc_i2c_resume(void)
1438 { 1438 {
1439 pmac_i2c_devscan(pmac_i2c_dev_resume); 1439 pmac_i2c_devscan(pmac_i2c_dev_resume);
1440 } 1440 }
1441 1441
1442 /* 1442 /*
1443 * Initialize us: probe all i2c busses on the machine, instantiate 1443 * Initialize us: probe all i2c busses on the machine, instantiate
1444 * busses and platform functions as needed. 1444 * busses and platform functions as needed.
1445 */ 1445 */
1446 /* This is non-static as it might be called early by smp code */ 1446 /* This is non-static as it might be called early by smp code */
1447 int __init pmac_i2c_init(void) 1447 int __init pmac_i2c_init(void)
1448 { 1448 {
1449 static int i2c_inited; 1449 static int i2c_inited;
1450 1450
1451 if (i2c_inited) 1451 if (i2c_inited)
1452 return 0; 1452 return 0;
1453 i2c_inited = 1; 1453 i2c_inited = 1;
1454 1454
1455 /* Probe keywest-i2c busses */ 1455 /* Probe keywest-i2c busses */
1456 kw_i2c_probe(); 1456 kw_i2c_probe();
1457 1457
1458 #ifdef CONFIG_ADB_PMU 1458 #ifdef CONFIG_ADB_PMU
1459 /* Probe PMU i2c busses */ 1459 /* Probe PMU i2c busses */
1460 pmu_i2c_probe(); 1460 pmu_i2c_probe();
1461 #endif 1461 #endif
1462 1462
1463 #ifdef CONFIG_PMAC_SMU 1463 #ifdef CONFIG_PMAC_SMU
1464 /* Probe SMU i2c busses */ 1464 /* Probe SMU i2c busses */
1465 smu_i2c_probe(); 1465 smu_i2c_probe();
1466 #endif 1466 #endif
1467 1467
1468 /* Now add plaform functions for some known devices */ 1468 /* Now add plaform functions for some known devices */
1469 pmac_i2c_devscan(pmac_i2c_dev_create); 1469 pmac_i2c_devscan(pmac_i2c_dev_create);
1470 1470
1471 return 0; 1471 return 0;
1472 } 1472 }
1473 machine_arch_initcall(powermac, pmac_i2c_init); 1473 machine_arch_initcall(powermac, pmac_i2c_init);
1474 1474
1475 /* Since pmac_i2c_init can be called too early for the platform device 1475 /* Since pmac_i2c_init can be called too early for the platform device
1476 * registration, we need to do it at a later time. In our case, subsys 1476 * registration, we need to do it at a later time. In our case, subsys
1477 * happens to fit well, though I agree it's a bit of a hack... 1477 * happens to fit well, though I agree it's a bit of a hack...
1478 */ 1478 */
1479 static int __init pmac_i2c_create_platform_devices(void) 1479 static int __init pmac_i2c_create_platform_devices(void)
1480 { 1480 {
1481 struct pmac_i2c_bus *bus; 1481 struct pmac_i2c_bus *bus;
1482 int i = 0; 1482 int i = 0;
1483 1483
1484 /* In the case where we are initialized from smp_init(), we must 1484 /* In the case where we are initialized from smp_init(), we must
1485 * not use the timer (and thus the irq). It's safe from now on 1485 * not use the timer (and thus the irq). It's safe from now on
1486 * though 1486 * though
1487 */ 1487 */
1488 pmac_i2c_force_poll = 0; 1488 pmac_i2c_force_poll = 0;
1489 1489
1490 /* Create platform devices */ 1490 /* Create platform devices */
1491 list_for_each_entry(bus, &pmac_i2c_busses, link) { 1491 list_for_each_entry(bus, &pmac_i2c_busses, link) {
1492 bus->platform_dev = 1492 bus->platform_dev =
1493 platform_device_alloc("i2c-powermac", i++); 1493 platform_device_alloc("i2c-powermac", i++);
1494 if (bus->platform_dev == NULL) 1494 if (bus->platform_dev == NULL)
1495 return -ENOMEM; 1495 return -ENOMEM;
1496 bus->platform_dev->dev.platform_data = bus; 1496 bus->platform_dev->dev.platform_data = bus;
1497 platform_device_add(bus->platform_dev); 1497 platform_device_add(bus->platform_dev);
1498 } 1498 }
1499 1499
1500 /* Now call platform "init" functions */ 1500 /* Now call platform "init" functions */
1501 pmac_i2c_devscan(pmac_i2c_dev_init); 1501 pmac_i2c_devscan(pmac_i2c_dev_init);
1502 1502
1503 return 0; 1503 return 0;
1504 } 1504 }
1505 machine_subsys_initcall(powermac, pmac_i2c_create_platform_devices); 1505 machine_subsys_initcall(powermac, pmac_i2c_create_platform_devices);
1506 1506
arch/powerpc/platforms/powermac/nvram.c
1 /* 1 /*
2 * Copyright (C) 2002 Benjamin Herrenschmidt (benh@kernel.crashing.org) 2 * Copyright (C) 2002 Benjamin Herrenschmidt (benh@kernel.crashing.org)
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License 5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version. 7 * 2 of the License, or (at your option) any later version.
8 * 8 *
9 * Todo: - add support for the OF persistent properties 9 * Todo: - add support for the OF persistent properties
10 */ 10 */
11 #include <linux/module.h> 11 #include <linux/export.h>
12 #include <linux/kernel.h> 12 #include <linux/kernel.h>
13 #include <linux/stddef.h> 13 #include <linux/stddef.h>
14 #include <linux/string.h> 14 #include <linux/string.h>
15 #include <linux/nvram.h> 15 #include <linux/nvram.h>
16 #include <linux/init.h> 16 #include <linux/init.h>
17 #include <linux/delay.h> 17 #include <linux/delay.h>
18 #include <linux/errno.h> 18 #include <linux/errno.h>
19 #include <linux/adb.h> 19 #include <linux/adb.h>
20 #include <linux/pmu.h> 20 #include <linux/pmu.h>
21 #include <linux/bootmem.h> 21 #include <linux/bootmem.h>
22 #include <linux/completion.h> 22 #include <linux/completion.h>
23 #include <linux/spinlock.h> 23 #include <linux/spinlock.h>
24 #include <asm/sections.h> 24 #include <asm/sections.h>
25 #include <asm/io.h> 25 #include <asm/io.h>
26 #include <asm/system.h> 26 #include <asm/system.h>
27 #include <asm/prom.h> 27 #include <asm/prom.h>
28 #include <asm/machdep.h> 28 #include <asm/machdep.h>
29 #include <asm/nvram.h> 29 #include <asm/nvram.h>
30 30
31 #include "pmac.h" 31 #include "pmac.h"
32 32
33 #define DEBUG 33 #define DEBUG
34 34
35 #ifdef DEBUG 35 #ifdef DEBUG
36 #define DBG(x...) printk(x) 36 #define DBG(x...) printk(x)
37 #else 37 #else
38 #define DBG(x...) 38 #define DBG(x...)
39 #endif 39 #endif
40 40
41 #define NVRAM_SIZE 0x2000 /* 8kB of non-volatile RAM */ 41 #define NVRAM_SIZE 0x2000 /* 8kB of non-volatile RAM */
42 42
43 #define CORE99_SIGNATURE 0x5a 43 #define CORE99_SIGNATURE 0x5a
44 #define CORE99_ADLER_START 0x14 44 #define CORE99_ADLER_START 0x14
45 45
46 /* On Core99, nvram is either a sharp, a micron or an AMD flash */ 46 /* On Core99, nvram is either a sharp, a micron or an AMD flash */
47 #define SM_FLASH_STATUS_DONE 0x80 47 #define SM_FLASH_STATUS_DONE 0x80
48 #define SM_FLASH_STATUS_ERR 0x38 48 #define SM_FLASH_STATUS_ERR 0x38
49 49
50 #define SM_FLASH_CMD_ERASE_CONFIRM 0xd0 50 #define SM_FLASH_CMD_ERASE_CONFIRM 0xd0
51 #define SM_FLASH_CMD_ERASE_SETUP 0x20 51 #define SM_FLASH_CMD_ERASE_SETUP 0x20
52 #define SM_FLASH_CMD_RESET 0xff 52 #define SM_FLASH_CMD_RESET 0xff
53 #define SM_FLASH_CMD_WRITE_SETUP 0x40 53 #define SM_FLASH_CMD_WRITE_SETUP 0x40
54 #define SM_FLASH_CMD_CLEAR_STATUS 0x50 54 #define SM_FLASH_CMD_CLEAR_STATUS 0x50
55 #define SM_FLASH_CMD_READ_STATUS 0x70 55 #define SM_FLASH_CMD_READ_STATUS 0x70
56 56
57 /* CHRP NVRAM header */ 57 /* CHRP NVRAM header */
58 struct chrp_header { 58 struct chrp_header {
59 u8 signature; 59 u8 signature;
60 u8 cksum; 60 u8 cksum;
61 u16 len; 61 u16 len;
62 char name[12]; 62 char name[12];
63 u8 data[0]; 63 u8 data[0];
64 }; 64 };
65 65
66 struct core99_header { 66 struct core99_header {
67 struct chrp_header hdr; 67 struct chrp_header hdr;
68 u32 adler; 68 u32 adler;
69 u32 generation; 69 u32 generation;
70 u32 reserved[2]; 70 u32 reserved[2];
71 }; 71 };
72 72
73 /* 73 /*
74 * Read and write the non-volatile RAM on PowerMacs and CHRP machines. 74 * Read and write the non-volatile RAM on PowerMacs and CHRP machines.
75 */ 75 */
76 static int nvram_naddrs; 76 static int nvram_naddrs;
77 static volatile unsigned char __iomem *nvram_data; 77 static volatile unsigned char __iomem *nvram_data;
78 static int is_core_99; 78 static int is_core_99;
79 static int core99_bank = 0; 79 static int core99_bank = 0;
80 static int nvram_partitions[3]; 80 static int nvram_partitions[3];
81 // XXX Turn that into a sem 81 // XXX Turn that into a sem
82 static DEFINE_RAW_SPINLOCK(nv_lock); 82 static DEFINE_RAW_SPINLOCK(nv_lock);
83 83
84 static int (*core99_write_bank)(int bank, u8* datas); 84 static int (*core99_write_bank)(int bank, u8* datas);
85 static int (*core99_erase_bank)(int bank); 85 static int (*core99_erase_bank)(int bank);
86 86
87 static char *nvram_image; 87 static char *nvram_image;
88 88
89 89
90 static unsigned char core99_nvram_read_byte(int addr) 90 static unsigned char core99_nvram_read_byte(int addr)
91 { 91 {
92 if (nvram_image == NULL) 92 if (nvram_image == NULL)
93 return 0xff; 93 return 0xff;
94 return nvram_image[addr]; 94 return nvram_image[addr];
95 } 95 }
96 96
97 static void core99_nvram_write_byte(int addr, unsigned char val) 97 static void core99_nvram_write_byte(int addr, unsigned char val)
98 { 98 {
99 if (nvram_image == NULL) 99 if (nvram_image == NULL)
100 return; 100 return;
101 nvram_image[addr] = val; 101 nvram_image[addr] = val;
102 } 102 }
103 103
104 static ssize_t core99_nvram_read(char *buf, size_t count, loff_t *index) 104 static ssize_t core99_nvram_read(char *buf, size_t count, loff_t *index)
105 { 105 {
106 int i; 106 int i;
107 107
108 if (nvram_image == NULL) 108 if (nvram_image == NULL)
109 return -ENODEV; 109 return -ENODEV;
110 if (*index > NVRAM_SIZE) 110 if (*index > NVRAM_SIZE)
111 return 0; 111 return 0;
112 112
113 i = *index; 113 i = *index;
114 if (i + count > NVRAM_SIZE) 114 if (i + count > NVRAM_SIZE)
115 count = NVRAM_SIZE - i; 115 count = NVRAM_SIZE - i;
116 116
117 memcpy(buf, &nvram_image[i], count); 117 memcpy(buf, &nvram_image[i], count);
118 *index = i + count; 118 *index = i + count;
119 return count; 119 return count;
120 } 120 }
121 121
122 static ssize_t core99_nvram_write(char *buf, size_t count, loff_t *index) 122 static ssize_t core99_nvram_write(char *buf, size_t count, loff_t *index)
123 { 123 {
124 int i; 124 int i;
125 125
126 if (nvram_image == NULL) 126 if (nvram_image == NULL)
127 return -ENODEV; 127 return -ENODEV;
128 if (*index > NVRAM_SIZE) 128 if (*index > NVRAM_SIZE)
129 return 0; 129 return 0;
130 130
131 i = *index; 131 i = *index;
132 if (i + count > NVRAM_SIZE) 132 if (i + count > NVRAM_SIZE)
133 count = NVRAM_SIZE - i; 133 count = NVRAM_SIZE - i;
134 134
135 memcpy(&nvram_image[i], buf, count); 135 memcpy(&nvram_image[i], buf, count);
136 *index = i + count; 136 *index = i + count;
137 return count; 137 return count;
138 } 138 }
139 139
140 static ssize_t core99_nvram_size(void) 140 static ssize_t core99_nvram_size(void)
141 { 141 {
142 if (nvram_image == NULL) 142 if (nvram_image == NULL)
143 return -ENODEV; 143 return -ENODEV;
144 return NVRAM_SIZE; 144 return NVRAM_SIZE;
145 } 145 }
146 146
147 #ifdef CONFIG_PPC32 147 #ifdef CONFIG_PPC32
148 static volatile unsigned char __iomem *nvram_addr; 148 static volatile unsigned char __iomem *nvram_addr;
149 static int nvram_mult; 149 static int nvram_mult;
150 150
151 static unsigned char direct_nvram_read_byte(int addr) 151 static unsigned char direct_nvram_read_byte(int addr)
152 { 152 {
153 return in_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult]); 153 return in_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult]);
154 } 154 }
155 155
156 static void direct_nvram_write_byte(int addr, unsigned char val) 156 static void direct_nvram_write_byte(int addr, unsigned char val)
157 { 157 {
158 out_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult], val); 158 out_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult], val);
159 } 159 }
160 160
161 161
162 static unsigned char indirect_nvram_read_byte(int addr) 162 static unsigned char indirect_nvram_read_byte(int addr)
163 { 163 {
164 unsigned char val; 164 unsigned char val;
165 unsigned long flags; 165 unsigned long flags;
166 166
167 raw_spin_lock_irqsave(&nv_lock, flags); 167 raw_spin_lock_irqsave(&nv_lock, flags);
168 out_8(nvram_addr, addr >> 5); 168 out_8(nvram_addr, addr >> 5);
169 val = in_8(&nvram_data[(addr & 0x1f) << 4]); 169 val = in_8(&nvram_data[(addr & 0x1f) << 4]);
170 raw_spin_unlock_irqrestore(&nv_lock, flags); 170 raw_spin_unlock_irqrestore(&nv_lock, flags);
171 171
172 return val; 172 return val;
173 } 173 }
174 174
175 static void indirect_nvram_write_byte(int addr, unsigned char val) 175 static void indirect_nvram_write_byte(int addr, unsigned char val)
176 { 176 {
177 unsigned long flags; 177 unsigned long flags;
178 178
179 raw_spin_lock_irqsave(&nv_lock, flags); 179 raw_spin_lock_irqsave(&nv_lock, flags);
180 out_8(nvram_addr, addr >> 5); 180 out_8(nvram_addr, addr >> 5);
181 out_8(&nvram_data[(addr & 0x1f) << 4], val); 181 out_8(&nvram_data[(addr & 0x1f) << 4], val);
182 raw_spin_unlock_irqrestore(&nv_lock, flags); 182 raw_spin_unlock_irqrestore(&nv_lock, flags);
183 } 183 }
184 184
185 185
186 #ifdef CONFIG_ADB_PMU 186 #ifdef CONFIG_ADB_PMU
187 187
188 static void pmu_nvram_complete(struct adb_request *req) 188 static void pmu_nvram_complete(struct adb_request *req)
189 { 189 {
190 if (req->arg) 190 if (req->arg)
191 complete((struct completion *)req->arg); 191 complete((struct completion *)req->arg);
192 } 192 }
193 193
194 static unsigned char pmu_nvram_read_byte(int addr) 194 static unsigned char pmu_nvram_read_byte(int addr)
195 { 195 {
196 struct adb_request req; 196 struct adb_request req;
197 DECLARE_COMPLETION_ONSTACK(req_complete); 197 DECLARE_COMPLETION_ONSTACK(req_complete);
198 198
199 req.arg = system_state == SYSTEM_RUNNING ? &req_complete : NULL; 199 req.arg = system_state == SYSTEM_RUNNING ? &req_complete : NULL;
200 if (pmu_request(&req, pmu_nvram_complete, 3, PMU_READ_NVRAM, 200 if (pmu_request(&req, pmu_nvram_complete, 3, PMU_READ_NVRAM,
201 (addr >> 8) & 0xff, addr & 0xff)) 201 (addr >> 8) & 0xff, addr & 0xff))
202 return 0xff; 202 return 0xff;
203 if (system_state == SYSTEM_RUNNING) 203 if (system_state == SYSTEM_RUNNING)
204 wait_for_completion(&req_complete); 204 wait_for_completion(&req_complete);
205 while (!req.complete) 205 while (!req.complete)
206 pmu_poll(); 206 pmu_poll();
207 return req.reply[0]; 207 return req.reply[0];
208 } 208 }
209 209
210 static void pmu_nvram_write_byte(int addr, unsigned char val) 210 static void pmu_nvram_write_byte(int addr, unsigned char val)
211 { 211 {
212 struct adb_request req; 212 struct adb_request req;
213 DECLARE_COMPLETION_ONSTACK(req_complete); 213 DECLARE_COMPLETION_ONSTACK(req_complete);
214 214
215 req.arg = system_state == SYSTEM_RUNNING ? &req_complete : NULL; 215 req.arg = system_state == SYSTEM_RUNNING ? &req_complete : NULL;
216 if (pmu_request(&req, pmu_nvram_complete, 4, PMU_WRITE_NVRAM, 216 if (pmu_request(&req, pmu_nvram_complete, 4, PMU_WRITE_NVRAM,
217 (addr >> 8) & 0xff, addr & 0xff, val)) 217 (addr >> 8) & 0xff, addr & 0xff, val))
218 return; 218 return;
219 if (system_state == SYSTEM_RUNNING) 219 if (system_state == SYSTEM_RUNNING)
220 wait_for_completion(&req_complete); 220 wait_for_completion(&req_complete);
221 while (!req.complete) 221 while (!req.complete)
222 pmu_poll(); 222 pmu_poll();
223 } 223 }
224 224
225 #endif /* CONFIG_ADB_PMU */ 225 #endif /* CONFIG_ADB_PMU */
226 #endif /* CONFIG_PPC32 */ 226 #endif /* CONFIG_PPC32 */
227 227
228 static u8 chrp_checksum(struct chrp_header* hdr) 228 static u8 chrp_checksum(struct chrp_header* hdr)
229 { 229 {
230 u8 *ptr; 230 u8 *ptr;
231 u16 sum = hdr->signature; 231 u16 sum = hdr->signature;
232 for (ptr = (u8 *)&hdr->len; ptr < hdr->data; ptr++) 232 for (ptr = (u8 *)&hdr->len; ptr < hdr->data; ptr++)
233 sum += *ptr; 233 sum += *ptr;
234 while (sum > 0xFF) 234 while (sum > 0xFF)
235 sum = (sum & 0xFF) + (sum>>8); 235 sum = (sum & 0xFF) + (sum>>8);
236 return sum; 236 return sum;
237 } 237 }
238 238
239 static u32 core99_calc_adler(u8 *buffer) 239 static u32 core99_calc_adler(u8 *buffer)
240 { 240 {
241 int cnt; 241 int cnt;
242 u32 low, high; 242 u32 low, high;
243 243
244 buffer += CORE99_ADLER_START; 244 buffer += CORE99_ADLER_START;
245 low = 1; 245 low = 1;
246 high = 0; 246 high = 0;
247 for (cnt=0; cnt<(NVRAM_SIZE-CORE99_ADLER_START); cnt++) { 247 for (cnt=0; cnt<(NVRAM_SIZE-CORE99_ADLER_START); cnt++) {
248 if ((cnt % 5000) == 0) { 248 if ((cnt % 5000) == 0) {
249 high %= 65521UL; 249 high %= 65521UL;
250 high %= 65521UL; 250 high %= 65521UL;
251 } 251 }
252 low += buffer[cnt]; 252 low += buffer[cnt];
253 high += low; 253 high += low;
254 } 254 }
255 low %= 65521UL; 255 low %= 65521UL;
256 high %= 65521UL; 256 high %= 65521UL;
257 257
258 return (high << 16) | low; 258 return (high << 16) | low;
259 } 259 }
260 260
261 static u32 core99_check(u8* datas) 261 static u32 core99_check(u8* datas)
262 { 262 {
263 struct core99_header* hdr99 = (struct core99_header*)datas; 263 struct core99_header* hdr99 = (struct core99_header*)datas;
264 264
265 if (hdr99->hdr.signature != CORE99_SIGNATURE) { 265 if (hdr99->hdr.signature != CORE99_SIGNATURE) {
266 DBG("Invalid signature\n"); 266 DBG("Invalid signature\n");
267 return 0; 267 return 0;
268 } 268 }
269 if (hdr99->hdr.cksum != chrp_checksum(&hdr99->hdr)) { 269 if (hdr99->hdr.cksum != chrp_checksum(&hdr99->hdr)) {
270 DBG("Invalid checksum\n"); 270 DBG("Invalid checksum\n");
271 return 0; 271 return 0;
272 } 272 }
273 if (hdr99->adler != core99_calc_adler(datas)) { 273 if (hdr99->adler != core99_calc_adler(datas)) {
274 DBG("Invalid adler\n"); 274 DBG("Invalid adler\n");
275 return 0; 275 return 0;
276 } 276 }
277 return hdr99->generation; 277 return hdr99->generation;
278 } 278 }
279 279
280 static int sm_erase_bank(int bank) 280 static int sm_erase_bank(int bank)
281 { 281 {
282 int stat, i; 282 int stat, i;
283 unsigned long timeout; 283 unsigned long timeout;
284 284
285 u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE; 285 u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE;
286 286
287 DBG("nvram: Sharp/Micron Erasing bank %d...\n", bank); 287 DBG("nvram: Sharp/Micron Erasing bank %d...\n", bank);
288 288
289 out_8(base, SM_FLASH_CMD_ERASE_SETUP); 289 out_8(base, SM_FLASH_CMD_ERASE_SETUP);
290 out_8(base, SM_FLASH_CMD_ERASE_CONFIRM); 290 out_8(base, SM_FLASH_CMD_ERASE_CONFIRM);
291 timeout = 0; 291 timeout = 0;
292 do { 292 do {
293 if (++timeout > 1000000) { 293 if (++timeout > 1000000) {
294 printk(KERN_ERR "nvram: Sharp/Micron flash erase timeout !\n"); 294 printk(KERN_ERR "nvram: Sharp/Micron flash erase timeout !\n");
295 break; 295 break;
296 } 296 }
297 out_8(base, SM_FLASH_CMD_READ_STATUS); 297 out_8(base, SM_FLASH_CMD_READ_STATUS);
298 stat = in_8(base); 298 stat = in_8(base);
299 } while (!(stat & SM_FLASH_STATUS_DONE)); 299 } while (!(stat & SM_FLASH_STATUS_DONE));
300 300
301 out_8(base, SM_FLASH_CMD_CLEAR_STATUS); 301 out_8(base, SM_FLASH_CMD_CLEAR_STATUS);
302 out_8(base, SM_FLASH_CMD_RESET); 302 out_8(base, SM_FLASH_CMD_RESET);
303 303
304 for (i=0; i<NVRAM_SIZE; i++) 304 for (i=0; i<NVRAM_SIZE; i++)
305 if (base[i] != 0xff) { 305 if (base[i] != 0xff) {
306 printk(KERN_ERR "nvram: Sharp/Micron flash erase failed !\n"); 306 printk(KERN_ERR "nvram: Sharp/Micron flash erase failed !\n");
307 return -ENXIO; 307 return -ENXIO;
308 } 308 }
309 return 0; 309 return 0;
310 } 310 }
311 311
312 static int sm_write_bank(int bank, u8* datas) 312 static int sm_write_bank(int bank, u8* datas)
313 { 313 {
314 int i, stat = 0; 314 int i, stat = 0;
315 unsigned long timeout; 315 unsigned long timeout;
316 316
317 u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE; 317 u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE;
318 318
319 DBG("nvram: Sharp/Micron Writing bank %d...\n", bank); 319 DBG("nvram: Sharp/Micron Writing bank %d...\n", bank);
320 320
321 for (i=0; i<NVRAM_SIZE; i++) { 321 for (i=0; i<NVRAM_SIZE; i++) {
322 out_8(base+i, SM_FLASH_CMD_WRITE_SETUP); 322 out_8(base+i, SM_FLASH_CMD_WRITE_SETUP);
323 udelay(1); 323 udelay(1);
324 out_8(base+i, datas[i]); 324 out_8(base+i, datas[i]);
325 timeout = 0; 325 timeout = 0;
326 do { 326 do {
327 if (++timeout > 1000000) { 327 if (++timeout > 1000000) {
328 printk(KERN_ERR "nvram: Sharp/Micron flash write timeout !\n"); 328 printk(KERN_ERR "nvram: Sharp/Micron flash write timeout !\n");
329 break; 329 break;
330 } 330 }
331 out_8(base, SM_FLASH_CMD_READ_STATUS); 331 out_8(base, SM_FLASH_CMD_READ_STATUS);
332 stat = in_8(base); 332 stat = in_8(base);
333 } while (!(stat & SM_FLASH_STATUS_DONE)); 333 } while (!(stat & SM_FLASH_STATUS_DONE));
334 if (!(stat & SM_FLASH_STATUS_DONE)) 334 if (!(stat & SM_FLASH_STATUS_DONE))
335 break; 335 break;
336 } 336 }
337 out_8(base, SM_FLASH_CMD_CLEAR_STATUS); 337 out_8(base, SM_FLASH_CMD_CLEAR_STATUS);
338 out_8(base, SM_FLASH_CMD_RESET); 338 out_8(base, SM_FLASH_CMD_RESET);
339 for (i=0; i<NVRAM_SIZE; i++) 339 for (i=0; i<NVRAM_SIZE; i++)
340 if (base[i] != datas[i]) { 340 if (base[i] != datas[i]) {
341 printk(KERN_ERR "nvram: Sharp/Micron flash write failed !\n"); 341 printk(KERN_ERR "nvram: Sharp/Micron flash write failed !\n");
342 return -ENXIO; 342 return -ENXIO;
343 } 343 }
344 return 0; 344 return 0;
345 } 345 }
346 346
347 static int amd_erase_bank(int bank) 347 static int amd_erase_bank(int bank)
348 { 348 {
349 int i, stat = 0; 349 int i, stat = 0;
350 unsigned long timeout; 350 unsigned long timeout;
351 351
352 u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE; 352 u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE;
353 353
354 DBG("nvram: AMD Erasing bank %d...\n", bank); 354 DBG("nvram: AMD Erasing bank %d...\n", bank);
355 355
356 /* Unlock 1 */ 356 /* Unlock 1 */
357 out_8(base+0x555, 0xaa); 357 out_8(base+0x555, 0xaa);
358 udelay(1); 358 udelay(1);
359 /* Unlock 2 */ 359 /* Unlock 2 */
360 out_8(base+0x2aa, 0x55); 360 out_8(base+0x2aa, 0x55);
361 udelay(1); 361 udelay(1);
362 362
363 /* Sector-Erase */ 363 /* Sector-Erase */
364 out_8(base+0x555, 0x80); 364 out_8(base+0x555, 0x80);
365 udelay(1); 365 udelay(1);
366 out_8(base+0x555, 0xaa); 366 out_8(base+0x555, 0xaa);
367 udelay(1); 367 udelay(1);
368 out_8(base+0x2aa, 0x55); 368 out_8(base+0x2aa, 0x55);
369 udelay(1); 369 udelay(1);
370 out_8(base, 0x30); 370 out_8(base, 0x30);
371 udelay(1); 371 udelay(1);
372 372
373 timeout = 0; 373 timeout = 0;
374 do { 374 do {
375 if (++timeout > 1000000) { 375 if (++timeout > 1000000) {
376 printk(KERN_ERR "nvram: AMD flash erase timeout !\n"); 376 printk(KERN_ERR "nvram: AMD flash erase timeout !\n");
377 break; 377 break;
378 } 378 }
379 stat = in_8(base) ^ in_8(base); 379 stat = in_8(base) ^ in_8(base);
380 } while (stat != 0); 380 } while (stat != 0);
381 381
382 /* Reset */ 382 /* Reset */
383 out_8(base, 0xf0); 383 out_8(base, 0xf0);
384 udelay(1); 384 udelay(1);
385 385
386 for (i=0; i<NVRAM_SIZE; i++) 386 for (i=0; i<NVRAM_SIZE; i++)
387 if (base[i] != 0xff) { 387 if (base[i] != 0xff) {
388 printk(KERN_ERR "nvram: AMD flash erase failed !\n"); 388 printk(KERN_ERR "nvram: AMD flash erase failed !\n");
389 return -ENXIO; 389 return -ENXIO;
390 } 390 }
391 return 0; 391 return 0;
392 } 392 }
393 393
394 static int amd_write_bank(int bank, u8* datas) 394 static int amd_write_bank(int bank, u8* datas)
395 { 395 {
396 int i, stat = 0; 396 int i, stat = 0;
397 unsigned long timeout; 397 unsigned long timeout;
398 398
399 u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE; 399 u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE;
400 400
401 DBG("nvram: AMD Writing bank %d...\n", bank); 401 DBG("nvram: AMD Writing bank %d...\n", bank);
402 402
403 for (i=0; i<NVRAM_SIZE; i++) { 403 for (i=0; i<NVRAM_SIZE; i++) {
404 /* Unlock 1 */ 404 /* Unlock 1 */
405 out_8(base+0x555, 0xaa); 405 out_8(base+0x555, 0xaa);
406 udelay(1); 406 udelay(1);
407 /* Unlock 2 */ 407 /* Unlock 2 */
408 out_8(base+0x2aa, 0x55); 408 out_8(base+0x2aa, 0x55);
409 udelay(1); 409 udelay(1);
410 410
411 /* Write single word */ 411 /* Write single word */
412 out_8(base+0x555, 0xa0); 412 out_8(base+0x555, 0xa0);
413 udelay(1); 413 udelay(1);
414 out_8(base+i, datas[i]); 414 out_8(base+i, datas[i]);
415 415
416 timeout = 0; 416 timeout = 0;
417 do { 417 do {
418 if (++timeout > 1000000) { 418 if (++timeout > 1000000) {
419 printk(KERN_ERR "nvram: AMD flash write timeout !\n"); 419 printk(KERN_ERR "nvram: AMD flash write timeout !\n");
420 break; 420 break;
421 } 421 }
422 stat = in_8(base) ^ in_8(base); 422 stat = in_8(base) ^ in_8(base);
423 } while (stat != 0); 423 } while (stat != 0);
424 if (stat != 0) 424 if (stat != 0)
425 break; 425 break;
426 } 426 }
427 427
428 /* Reset */ 428 /* Reset */
429 out_8(base, 0xf0); 429 out_8(base, 0xf0);
430 udelay(1); 430 udelay(1);
431 431
432 for (i=0; i<NVRAM_SIZE; i++) 432 for (i=0; i<NVRAM_SIZE; i++)
433 if (base[i] != datas[i]) { 433 if (base[i] != datas[i]) {
434 printk(KERN_ERR "nvram: AMD flash write failed !\n"); 434 printk(KERN_ERR "nvram: AMD flash write failed !\n");
435 return -ENXIO; 435 return -ENXIO;
436 } 436 }
437 return 0; 437 return 0;
438 } 438 }
439 439
440 static void __init lookup_partitions(void) 440 static void __init lookup_partitions(void)
441 { 441 {
442 u8 buffer[17]; 442 u8 buffer[17];
443 int i, offset; 443 int i, offset;
444 struct chrp_header* hdr; 444 struct chrp_header* hdr;
445 445
446 if (pmac_newworld) { 446 if (pmac_newworld) {
447 nvram_partitions[pmac_nvram_OF] = -1; 447 nvram_partitions[pmac_nvram_OF] = -1;
448 nvram_partitions[pmac_nvram_XPRAM] = -1; 448 nvram_partitions[pmac_nvram_XPRAM] = -1;
449 nvram_partitions[pmac_nvram_NR] = -1; 449 nvram_partitions[pmac_nvram_NR] = -1;
450 hdr = (struct chrp_header *)buffer; 450 hdr = (struct chrp_header *)buffer;
451 451
452 offset = 0; 452 offset = 0;
453 buffer[16] = 0; 453 buffer[16] = 0;
454 do { 454 do {
455 for (i=0;i<16;i++) 455 for (i=0;i<16;i++)
456 buffer[i] = ppc_md.nvram_read_val(offset+i); 456 buffer[i] = ppc_md.nvram_read_val(offset+i);
457 if (!strcmp(hdr->name, "common")) 457 if (!strcmp(hdr->name, "common"))
458 nvram_partitions[pmac_nvram_OF] = offset + 0x10; 458 nvram_partitions[pmac_nvram_OF] = offset + 0x10;
459 if (!strcmp(hdr->name, "APL,MacOS75")) { 459 if (!strcmp(hdr->name, "APL,MacOS75")) {
460 nvram_partitions[pmac_nvram_XPRAM] = offset + 0x10; 460 nvram_partitions[pmac_nvram_XPRAM] = offset + 0x10;
461 nvram_partitions[pmac_nvram_NR] = offset + 0x110; 461 nvram_partitions[pmac_nvram_NR] = offset + 0x110;
462 } 462 }
463 offset += (hdr->len * 0x10); 463 offset += (hdr->len * 0x10);
464 } while(offset < NVRAM_SIZE); 464 } while(offset < NVRAM_SIZE);
465 } else { 465 } else {
466 nvram_partitions[pmac_nvram_OF] = 0x1800; 466 nvram_partitions[pmac_nvram_OF] = 0x1800;
467 nvram_partitions[pmac_nvram_XPRAM] = 0x1300; 467 nvram_partitions[pmac_nvram_XPRAM] = 0x1300;
468 nvram_partitions[pmac_nvram_NR] = 0x1400; 468 nvram_partitions[pmac_nvram_NR] = 0x1400;
469 } 469 }
470 DBG("nvram: OF partition at 0x%x\n", nvram_partitions[pmac_nvram_OF]); 470 DBG("nvram: OF partition at 0x%x\n", nvram_partitions[pmac_nvram_OF]);
471 DBG("nvram: XP partition at 0x%x\n", nvram_partitions[pmac_nvram_XPRAM]); 471 DBG("nvram: XP partition at 0x%x\n", nvram_partitions[pmac_nvram_XPRAM]);
472 DBG("nvram: NR partition at 0x%x\n", nvram_partitions[pmac_nvram_NR]); 472 DBG("nvram: NR partition at 0x%x\n", nvram_partitions[pmac_nvram_NR]);
473 } 473 }
474 474
475 static void core99_nvram_sync(void) 475 static void core99_nvram_sync(void)
476 { 476 {
477 struct core99_header* hdr99; 477 struct core99_header* hdr99;
478 unsigned long flags; 478 unsigned long flags;
479 479
480 if (!is_core_99 || !nvram_data || !nvram_image) 480 if (!is_core_99 || !nvram_data || !nvram_image)
481 return; 481 return;
482 482
483 raw_spin_lock_irqsave(&nv_lock, flags); 483 raw_spin_lock_irqsave(&nv_lock, flags);
484 if (!memcmp(nvram_image, (u8*)nvram_data + core99_bank*NVRAM_SIZE, 484 if (!memcmp(nvram_image, (u8*)nvram_data + core99_bank*NVRAM_SIZE,
485 NVRAM_SIZE)) 485 NVRAM_SIZE))
486 goto bail; 486 goto bail;
487 487
488 DBG("Updating nvram...\n"); 488 DBG("Updating nvram...\n");
489 489
490 hdr99 = (struct core99_header*)nvram_image; 490 hdr99 = (struct core99_header*)nvram_image;
491 hdr99->generation++; 491 hdr99->generation++;
492 hdr99->hdr.signature = CORE99_SIGNATURE; 492 hdr99->hdr.signature = CORE99_SIGNATURE;
493 hdr99->hdr.cksum = chrp_checksum(&hdr99->hdr); 493 hdr99->hdr.cksum = chrp_checksum(&hdr99->hdr);
494 hdr99->adler = core99_calc_adler(nvram_image); 494 hdr99->adler = core99_calc_adler(nvram_image);
495 core99_bank = core99_bank ? 0 : 1; 495 core99_bank = core99_bank ? 0 : 1;
496 if (core99_erase_bank) 496 if (core99_erase_bank)
497 if (core99_erase_bank(core99_bank)) { 497 if (core99_erase_bank(core99_bank)) {
498 printk("nvram: Error erasing bank %d\n", core99_bank); 498 printk("nvram: Error erasing bank %d\n", core99_bank);
499 goto bail; 499 goto bail;
500 } 500 }
501 if (core99_write_bank) 501 if (core99_write_bank)
502 if (core99_write_bank(core99_bank, nvram_image)) 502 if (core99_write_bank(core99_bank, nvram_image))
503 printk("nvram: Error writing bank %d\n", core99_bank); 503 printk("nvram: Error writing bank %d\n", core99_bank);
504 bail: 504 bail:
505 raw_spin_unlock_irqrestore(&nv_lock, flags); 505 raw_spin_unlock_irqrestore(&nv_lock, flags);
506 506
507 #ifdef DEBUG 507 #ifdef DEBUG
508 mdelay(2000); 508 mdelay(2000);
509 #endif 509 #endif
510 } 510 }
511 511
512 static int __init core99_nvram_setup(struct device_node *dp, unsigned long addr) 512 static int __init core99_nvram_setup(struct device_node *dp, unsigned long addr)
513 { 513 {
514 int i; 514 int i;
515 u32 gen_bank0, gen_bank1; 515 u32 gen_bank0, gen_bank1;
516 516
517 if (nvram_naddrs < 1) { 517 if (nvram_naddrs < 1) {
518 printk(KERN_ERR "nvram: no address\n"); 518 printk(KERN_ERR "nvram: no address\n");
519 return -EINVAL; 519 return -EINVAL;
520 } 520 }
521 nvram_image = alloc_bootmem(NVRAM_SIZE); 521 nvram_image = alloc_bootmem(NVRAM_SIZE);
522 if (nvram_image == NULL) { 522 if (nvram_image == NULL) {
523 printk(KERN_ERR "nvram: can't allocate ram image\n"); 523 printk(KERN_ERR "nvram: can't allocate ram image\n");
524 return -ENOMEM; 524 return -ENOMEM;
525 } 525 }
526 nvram_data = ioremap(addr, NVRAM_SIZE*2); 526 nvram_data = ioremap(addr, NVRAM_SIZE*2);
527 nvram_naddrs = 1; /* Make sure we get the correct case */ 527 nvram_naddrs = 1; /* Make sure we get the correct case */
528 528
529 DBG("nvram: Checking bank 0...\n"); 529 DBG("nvram: Checking bank 0...\n");
530 530
531 gen_bank0 = core99_check((u8 *)nvram_data); 531 gen_bank0 = core99_check((u8 *)nvram_data);
532 gen_bank1 = core99_check((u8 *)nvram_data + NVRAM_SIZE); 532 gen_bank1 = core99_check((u8 *)nvram_data + NVRAM_SIZE);
533 core99_bank = (gen_bank0 < gen_bank1) ? 1 : 0; 533 core99_bank = (gen_bank0 < gen_bank1) ? 1 : 0;
534 534
535 DBG("nvram: gen0=%d, gen1=%d\n", gen_bank0, gen_bank1); 535 DBG("nvram: gen0=%d, gen1=%d\n", gen_bank0, gen_bank1);
536 DBG("nvram: Active bank is: %d\n", core99_bank); 536 DBG("nvram: Active bank is: %d\n", core99_bank);
537 537
538 for (i=0; i<NVRAM_SIZE; i++) 538 for (i=0; i<NVRAM_SIZE; i++)
539 nvram_image[i] = nvram_data[i + core99_bank*NVRAM_SIZE]; 539 nvram_image[i] = nvram_data[i + core99_bank*NVRAM_SIZE];
540 540
541 ppc_md.nvram_read_val = core99_nvram_read_byte; 541 ppc_md.nvram_read_val = core99_nvram_read_byte;
542 ppc_md.nvram_write_val = core99_nvram_write_byte; 542 ppc_md.nvram_write_val = core99_nvram_write_byte;
543 ppc_md.nvram_read = core99_nvram_read; 543 ppc_md.nvram_read = core99_nvram_read;
544 ppc_md.nvram_write = core99_nvram_write; 544 ppc_md.nvram_write = core99_nvram_write;
545 ppc_md.nvram_size = core99_nvram_size; 545 ppc_md.nvram_size = core99_nvram_size;
546 ppc_md.nvram_sync = core99_nvram_sync; 546 ppc_md.nvram_sync = core99_nvram_sync;
547 ppc_md.machine_shutdown = core99_nvram_sync; 547 ppc_md.machine_shutdown = core99_nvram_sync;
548 /* 548 /*
549 * Maybe we could be smarter here though making an exclusive list 549 * Maybe we could be smarter here though making an exclusive list
550 * of known flash chips is a bit nasty as older OF didn't provide us 550 * of known flash chips is a bit nasty as older OF didn't provide us
551 * with a useful "compatible" entry. A solution would be to really 551 * with a useful "compatible" entry. A solution would be to really
552 * identify the chip using flash id commands and base ourselves on 552 * identify the chip using flash id commands and base ourselves on
553 * a list of known chips IDs 553 * a list of known chips IDs
554 */ 554 */
555 if (of_device_is_compatible(dp, "amd-0137")) { 555 if (of_device_is_compatible(dp, "amd-0137")) {
556 core99_erase_bank = amd_erase_bank; 556 core99_erase_bank = amd_erase_bank;
557 core99_write_bank = amd_write_bank; 557 core99_write_bank = amd_write_bank;
558 } else { 558 } else {
559 core99_erase_bank = sm_erase_bank; 559 core99_erase_bank = sm_erase_bank;
560 core99_write_bank = sm_write_bank; 560 core99_write_bank = sm_write_bank;
561 } 561 }
562 return 0; 562 return 0;
563 } 563 }
564 564
565 int __init pmac_nvram_init(void) 565 int __init pmac_nvram_init(void)
566 { 566 {
567 struct device_node *dp; 567 struct device_node *dp;
568 struct resource r1, r2; 568 struct resource r1, r2;
569 unsigned int s1 = 0, s2 = 0; 569 unsigned int s1 = 0, s2 = 0;
570 int err = 0; 570 int err = 0;
571 571
572 nvram_naddrs = 0; 572 nvram_naddrs = 0;
573 573
574 dp = of_find_node_by_name(NULL, "nvram"); 574 dp = of_find_node_by_name(NULL, "nvram");
575 if (dp == NULL) { 575 if (dp == NULL) {
576 printk(KERN_ERR "Can't find NVRAM device\n"); 576 printk(KERN_ERR "Can't find NVRAM device\n");
577 return -ENODEV; 577 return -ENODEV;
578 } 578 }
579 579
580 /* Try to obtain an address */ 580 /* Try to obtain an address */
581 if (of_address_to_resource(dp, 0, &r1) == 0) { 581 if (of_address_to_resource(dp, 0, &r1) == 0) {
582 nvram_naddrs = 1; 582 nvram_naddrs = 1;
583 s1 = resource_size(&r1); 583 s1 = resource_size(&r1);
584 if (of_address_to_resource(dp, 1, &r2) == 0) { 584 if (of_address_to_resource(dp, 1, &r2) == 0) {
585 nvram_naddrs = 2; 585 nvram_naddrs = 2;
586 s2 = resource_size(&r2); 586 s2 = resource_size(&r2);
587 } 587 }
588 } 588 }
589 589
590 is_core_99 = of_device_is_compatible(dp, "nvram,flash"); 590 is_core_99 = of_device_is_compatible(dp, "nvram,flash");
591 if (is_core_99) { 591 if (is_core_99) {
592 err = core99_nvram_setup(dp, r1.start); 592 err = core99_nvram_setup(dp, r1.start);
593 goto bail; 593 goto bail;
594 } 594 }
595 595
596 #ifdef CONFIG_PPC32 596 #ifdef CONFIG_PPC32
597 if (machine_is(chrp) && nvram_naddrs == 1) { 597 if (machine_is(chrp) && nvram_naddrs == 1) {
598 nvram_data = ioremap(r1.start, s1); 598 nvram_data = ioremap(r1.start, s1);
599 nvram_mult = 1; 599 nvram_mult = 1;
600 ppc_md.nvram_read_val = direct_nvram_read_byte; 600 ppc_md.nvram_read_val = direct_nvram_read_byte;
601 ppc_md.nvram_write_val = direct_nvram_write_byte; 601 ppc_md.nvram_write_val = direct_nvram_write_byte;
602 } else if (nvram_naddrs == 1) { 602 } else if (nvram_naddrs == 1) {
603 nvram_data = ioremap(r1.start, s1); 603 nvram_data = ioremap(r1.start, s1);
604 nvram_mult = (s1 + NVRAM_SIZE - 1) / NVRAM_SIZE; 604 nvram_mult = (s1 + NVRAM_SIZE - 1) / NVRAM_SIZE;
605 ppc_md.nvram_read_val = direct_nvram_read_byte; 605 ppc_md.nvram_read_val = direct_nvram_read_byte;
606 ppc_md.nvram_write_val = direct_nvram_write_byte; 606 ppc_md.nvram_write_val = direct_nvram_write_byte;
607 } else if (nvram_naddrs == 2) { 607 } else if (nvram_naddrs == 2) {
608 nvram_addr = ioremap(r1.start, s1); 608 nvram_addr = ioremap(r1.start, s1);
609 nvram_data = ioremap(r2.start, s2); 609 nvram_data = ioremap(r2.start, s2);
610 ppc_md.nvram_read_val = indirect_nvram_read_byte; 610 ppc_md.nvram_read_val = indirect_nvram_read_byte;
611 ppc_md.nvram_write_val = indirect_nvram_write_byte; 611 ppc_md.nvram_write_val = indirect_nvram_write_byte;
612 } else if (nvram_naddrs == 0 && sys_ctrler == SYS_CTRLER_PMU) { 612 } else if (nvram_naddrs == 0 && sys_ctrler == SYS_CTRLER_PMU) {
613 #ifdef CONFIG_ADB_PMU 613 #ifdef CONFIG_ADB_PMU
614 nvram_naddrs = -1; 614 nvram_naddrs = -1;
615 ppc_md.nvram_read_val = pmu_nvram_read_byte; 615 ppc_md.nvram_read_val = pmu_nvram_read_byte;
616 ppc_md.nvram_write_val = pmu_nvram_write_byte; 616 ppc_md.nvram_write_val = pmu_nvram_write_byte;
617 #endif /* CONFIG_ADB_PMU */ 617 #endif /* CONFIG_ADB_PMU */
618 } else { 618 } else {
619 printk(KERN_ERR "Incompatible type of NVRAM\n"); 619 printk(KERN_ERR "Incompatible type of NVRAM\n");
620 err = -ENXIO; 620 err = -ENXIO;
621 } 621 }
622 #endif /* CONFIG_PPC32 */ 622 #endif /* CONFIG_PPC32 */
623 bail: 623 bail:
624 of_node_put(dp); 624 of_node_put(dp);
625 if (err == 0) 625 if (err == 0)
626 lookup_partitions(); 626 lookup_partitions();
627 return err; 627 return err;
628 } 628 }
629 629
630 int pmac_get_partition(int partition) 630 int pmac_get_partition(int partition)
631 { 631 {
632 return nvram_partitions[partition]; 632 return nvram_partitions[partition];
633 } 633 }
634 634
635 u8 pmac_xpram_read(int xpaddr) 635 u8 pmac_xpram_read(int xpaddr)
636 { 636 {
637 int offset = pmac_get_partition(pmac_nvram_XPRAM); 637 int offset = pmac_get_partition(pmac_nvram_XPRAM);
638 638
639 if (offset < 0 || xpaddr < 0 || xpaddr > 0x100) 639 if (offset < 0 || xpaddr < 0 || xpaddr > 0x100)
640 return 0xff; 640 return 0xff;
641 641
642 return ppc_md.nvram_read_val(xpaddr + offset); 642 return ppc_md.nvram_read_val(xpaddr + offset);
643 } 643 }
644 644
645 void pmac_xpram_write(int xpaddr, u8 data) 645 void pmac_xpram_write(int xpaddr, u8 data)
646 { 646 {
647 int offset = pmac_get_partition(pmac_nvram_XPRAM); 647 int offset = pmac_get_partition(pmac_nvram_XPRAM);
648 648
649 if (offset < 0 || xpaddr < 0 || xpaddr > 0x100) 649 if (offset < 0 || xpaddr < 0 || xpaddr > 0x100)
650 return; 650 return;
651 651
652 ppc_md.nvram_write_val(xpaddr + offset, data); 652 ppc_md.nvram_write_val(xpaddr + offset, data);
653 } 653 }
654 654
655 EXPORT_SYMBOL(pmac_get_partition); 655 EXPORT_SYMBOL(pmac_get_partition);
656 EXPORT_SYMBOL(pmac_xpram_read); 656 EXPORT_SYMBOL(pmac_xpram_read);
657 EXPORT_SYMBOL(pmac_xpram_write); 657 EXPORT_SYMBOL(pmac_xpram_write);
658 658
arch/powerpc/platforms/ps3/interrupt.c
1 /* 1 /*
2 * PS3 interrupt routines. 2 * PS3 interrupt routines.
3 * 3 *
4 * Copyright (C) 2006 Sony Computer Entertainment Inc. 4 * Copyright (C) 2006 Sony Computer Entertainment Inc.
5 * Copyright 2006 Sony Corp. 5 * Copyright 2006 Sony Corp.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License. 9 * the Free Software Foundation; version 2 of the License.
10 * 10 *
11 * This program is distributed in the hope that it will be useful, 11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 * 15 *
16 * You should have received a copy of the GNU General Public License 16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */ 19 */
20 20
21 #include <linux/kernel.h> 21 #include <linux/kernel.h>
22 #include <linux/module.h> 22 #include <linux/export.h>
23 #include <linux/irq.h> 23 #include <linux/irq.h>
24 24
25 #include <asm/machdep.h> 25 #include <asm/machdep.h>
26 #include <asm/udbg.h> 26 #include <asm/udbg.h>
27 #include <asm/lv1call.h> 27 #include <asm/lv1call.h>
28 #include <asm/smp.h> 28 #include <asm/smp.h>
29 29
30 #include "platform.h" 30 #include "platform.h"
31 31
32 #if defined(DEBUG) 32 #if defined(DEBUG)
33 #define DBG udbg_printf 33 #define DBG udbg_printf
34 #else 34 #else
35 #define DBG pr_debug 35 #define DBG pr_debug
36 #endif 36 #endif
37 37
38 /** 38 /**
39 * struct ps3_bmp - a per cpu irq status and mask bitmap structure 39 * struct ps3_bmp - a per cpu irq status and mask bitmap structure
40 * @status: 256 bit status bitmap indexed by plug 40 * @status: 256 bit status bitmap indexed by plug
41 * @unused_1: 41 * @unused_1:
42 * @mask: 256 bit mask bitmap indexed by plug 42 * @mask: 256 bit mask bitmap indexed by plug
43 * @unused_2: 43 * @unused_2:
44 * @lock: 44 * @lock:
45 * @ipi_debug_brk_mask: 45 * @ipi_debug_brk_mask:
46 * 46 *
47 * The HV maintains per SMT thread mappings of HV outlet to HV plug on 47 * The HV maintains per SMT thread mappings of HV outlet to HV plug on
48 * behalf of the guest. These mappings are implemented as 256 bit guest 48 * behalf of the guest. These mappings are implemented as 256 bit guest
49 * supplied bitmaps indexed by plug number. The addresses of the bitmaps 49 * supplied bitmaps indexed by plug number. The addresses of the bitmaps
50 * are registered with the HV through lv1_configure_irq_state_bitmap(). 50 * are registered with the HV through lv1_configure_irq_state_bitmap().
51 * The HV requires that the 512 bits of status + mask not cross a page 51 * The HV requires that the 512 bits of status + mask not cross a page
52 * boundary. PS3_BMP_MINALIGN is used to define this minimal 64 byte 52 * boundary. PS3_BMP_MINALIGN is used to define this minimal 64 byte
53 * alignment. 53 * alignment.
54 * 54 *
55 * The HV supports 256 plugs per thread, assigned as {0..255}, for a total 55 * The HV supports 256 plugs per thread, assigned as {0..255}, for a total
56 * of 512 plugs supported on a processor. To simplify the logic this 56 * of 512 plugs supported on a processor. To simplify the logic this
57 * implementation equates HV plug value to Linux virq value, constrains each 57 * implementation equates HV plug value to Linux virq value, constrains each
58 * interrupt to have a system wide unique plug number, and limits the range 58 * interrupt to have a system wide unique plug number, and limits the range
59 * of the plug values to map into the first dword of the bitmaps. This 59 * of the plug values to map into the first dword of the bitmaps. This
60 * gives a usable range of plug values of {NUM_ISA_INTERRUPTS..63}. Note 60 * gives a usable range of plug values of {NUM_ISA_INTERRUPTS..63}. Note
61 * that there is no constraint on how many in this set an individual thread 61 * that there is no constraint on how many in this set an individual thread
62 * can acquire. 62 * can acquire.
63 * 63 *
64 * The mask is declared as unsigned long so we can use set/clear_bit on it. 64 * The mask is declared as unsigned long so we can use set/clear_bit on it.
65 */ 65 */
66 66
67 #define PS3_BMP_MINALIGN 64 67 #define PS3_BMP_MINALIGN 64
68 68
69 struct ps3_bmp { 69 struct ps3_bmp {
70 struct { 70 struct {
71 u64 status; 71 u64 status;
72 u64 unused_1[3]; 72 u64 unused_1[3];
73 unsigned long mask; 73 unsigned long mask;
74 u64 unused_2[3]; 74 u64 unused_2[3];
75 }; 75 };
76 u64 ipi_debug_brk_mask; 76 u64 ipi_debug_brk_mask;
77 spinlock_t lock; 77 spinlock_t lock;
78 }; 78 };
79 79
80 /** 80 /**
81 * struct ps3_private - a per cpu data structure 81 * struct ps3_private - a per cpu data structure
82 * @bmp: ps3_bmp structure 82 * @bmp: ps3_bmp structure
83 * @ppe_id: HV logical_ppe_id 83 * @ppe_id: HV logical_ppe_id
84 * @thread_id: HV thread_id 84 * @thread_id: HV thread_id
85 */ 85 */
86 86
87 struct ps3_private { 87 struct ps3_private {
88 struct ps3_bmp bmp __attribute__ ((aligned (PS3_BMP_MINALIGN))); 88 struct ps3_bmp bmp __attribute__ ((aligned (PS3_BMP_MINALIGN)));
89 u64 ppe_id; 89 u64 ppe_id;
90 u64 thread_id; 90 u64 thread_id;
91 }; 91 };
92 92
93 static DEFINE_PER_CPU(struct ps3_private, ps3_private); 93 static DEFINE_PER_CPU(struct ps3_private, ps3_private);
94 94
95 /** 95 /**
96 * ps3_chip_mask - Set an interrupt mask bit in ps3_bmp. 96 * ps3_chip_mask - Set an interrupt mask bit in ps3_bmp.
97 * @virq: The assigned Linux virq. 97 * @virq: The assigned Linux virq.
98 * 98 *
99 * Sets ps3_bmp.mask and calls lv1_did_update_interrupt_mask(). 99 * Sets ps3_bmp.mask and calls lv1_did_update_interrupt_mask().
100 */ 100 */
101 101
102 static void ps3_chip_mask(struct irq_data *d) 102 static void ps3_chip_mask(struct irq_data *d)
103 { 103 {
104 struct ps3_private *pd = irq_data_get_irq_chip_data(d); 104 struct ps3_private *pd = irq_data_get_irq_chip_data(d);
105 unsigned long flags; 105 unsigned long flags;
106 106
107 pr_debug("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__, 107 pr_debug("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__,
108 pd->thread_id, d->irq); 108 pd->thread_id, d->irq);
109 109
110 local_irq_save(flags); 110 local_irq_save(flags);
111 clear_bit(63 - d->irq, &pd->bmp.mask); 111 clear_bit(63 - d->irq, &pd->bmp.mask);
112 lv1_did_update_interrupt_mask(pd->ppe_id, pd->thread_id); 112 lv1_did_update_interrupt_mask(pd->ppe_id, pd->thread_id);
113 local_irq_restore(flags); 113 local_irq_restore(flags);
114 } 114 }
115 115
116 /** 116 /**
117 * ps3_chip_unmask - Clear an interrupt mask bit in ps3_bmp. 117 * ps3_chip_unmask - Clear an interrupt mask bit in ps3_bmp.
118 * @virq: The assigned Linux virq. 118 * @virq: The assigned Linux virq.
119 * 119 *
120 * Clears ps3_bmp.mask and calls lv1_did_update_interrupt_mask(). 120 * Clears ps3_bmp.mask and calls lv1_did_update_interrupt_mask().
121 */ 121 */
122 122
123 static void ps3_chip_unmask(struct irq_data *d) 123 static void ps3_chip_unmask(struct irq_data *d)
124 { 124 {
125 struct ps3_private *pd = irq_data_get_irq_chip_data(d); 125 struct ps3_private *pd = irq_data_get_irq_chip_data(d);
126 unsigned long flags; 126 unsigned long flags;
127 127
128 pr_debug("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__, 128 pr_debug("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__,
129 pd->thread_id, d->irq); 129 pd->thread_id, d->irq);
130 130
131 local_irq_save(flags); 131 local_irq_save(flags);
132 set_bit(63 - d->irq, &pd->bmp.mask); 132 set_bit(63 - d->irq, &pd->bmp.mask);
133 lv1_did_update_interrupt_mask(pd->ppe_id, pd->thread_id); 133 lv1_did_update_interrupt_mask(pd->ppe_id, pd->thread_id);
134 local_irq_restore(flags); 134 local_irq_restore(flags);
135 } 135 }
136 136
137 /** 137 /**
138 * ps3_chip_eoi - HV end-of-interrupt. 138 * ps3_chip_eoi - HV end-of-interrupt.
139 * @virq: The assigned Linux virq. 139 * @virq: The assigned Linux virq.
140 * 140 *
141 * Calls lv1_end_of_interrupt_ext(). 141 * Calls lv1_end_of_interrupt_ext().
142 */ 142 */
143 143
144 static void ps3_chip_eoi(struct irq_data *d) 144 static void ps3_chip_eoi(struct irq_data *d)
145 { 145 {
146 const struct ps3_private *pd = irq_data_get_irq_chip_data(d); 146 const struct ps3_private *pd = irq_data_get_irq_chip_data(d);
147 lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, d->irq); 147 lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, d->irq);
148 } 148 }
149 149
150 /** 150 /**
151 * ps3_irq_chip - Represents the ps3_bmp as a Linux struct irq_chip. 151 * ps3_irq_chip - Represents the ps3_bmp as a Linux struct irq_chip.
152 */ 152 */
153 153
154 static struct irq_chip ps3_irq_chip = { 154 static struct irq_chip ps3_irq_chip = {
155 .name = "ps3", 155 .name = "ps3",
156 .irq_mask = ps3_chip_mask, 156 .irq_mask = ps3_chip_mask,
157 .irq_unmask = ps3_chip_unmask, 157 .irq_unmask = ps3_chip_unmask,
158 .irq_eoi = ps3_chip_eoi, 158 .irq_eoi = ps3_chip_eoi,
159 }; 159 };
160 160
161 /** 161 /**
162 * ps3_virq_setup - virq related setup. 162 * ps3_virq_setup - virq related setup.
163 * @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be 163 * @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
164 * serviced on. 164 * serviced on.
165 * @outlet: The HV outlet from the various create outlet routines. 165 * @outlet: The HV outlet from the various create outlet routines.
166 * @virq: The assigned Linux virq. 166 * @virq: The assigned Linux virq.
167 * 167 *
168 * Calls irq_create_mapping() to get a virq and sets the chip data to 168 * Calls irq_create_mapping() to get a virq and sets the chip data to
169 * ps3_private data. 169 * ps3_private data.
170 */ 170 */
171 171
172 static int ps3_virq_setup(enum ps3_cpu_binding cpu, unsigned long outlet, 172 static int ps3_virq_setup(enum ps3_cpu_binding cpu, unsigned long outlet,
173 unsigned int *virq) 173 unsigned int *virq)
174 { 174 {
175 int result; 175 int result;
176 struct ps3_private *pd; 176 struct ps3_private *pd;
177 177
178 /* This defines the default interrupt distribution policy. */ 178 /* This defines the default interrupt distribution policy. */
179 179
180 if (cpu == PS3_BINDING_CPU_ANY) 180 if (cpu == PS3_BINDING_CPU_ANY)
181 cpu = 0; 181 cpu = 0;
182 182
183 pd = &per_cpu(ps3_private, cpu); 183 pd = &per_cpu(ps3_private, cpu);
184 184
185 *virq = irq_create_mapping(NULL, outlet); 185 *virq = irq_create_mapping(NULL, outlet);
186 186
187 if (*virq == NO_IRQ) { 187 if (*virq == NO_IRQ) {
188 pr_debug("%s:%d: irq_create_mapping failed: outlet %lu\n", 188 pr_debug("%s:%d: irq_create_mapping failed: outlet %lu\n",
189 __func__, __LINE__, outlet); 189 __func__, __LINE__, outlet);
190 result = -ENOMEM; 190 result = -ENOMEM;
191 goto fail_create; 191 goto fail_create;
192 } 192 }
193 193
194 pr_debug("%s:%d: outlet %lu => cpu %u, virq %u\n", __func__, __LINE__, 194 pr_debug("%s:%d: outlet %lu => cpu %u, virq %u\n", __func__, __LINE__,
195 outlet, cpu, *virq); 195 outlet, cpu, *virq);
196 196
197 result = irq_set_chip_data(*virq, pd); 197 result = irq_set_chip_data(*virq, pd);
198 198
199 if (result) { 199 if (result) {
200 pr_debug("%s:%d: irq_set_chip_data failed\n", 200 pr_debug("%s:%d: irq_set_chip_data failed\n",
201 __func__, __LINE__); 201 __func__, __LINE__);
202 goto fail_set; 202 goto fail_set;
203 } 203 }
204 204
205 ps3_chip_mask(irq_get_irq_data(*virq)); 205 ps3_chip_mask(irq_get_irq_data(*virq));
206 206
207 return result; 207 return result;
208 208
209 fail_set: 209 fail_set:
210 irq_dispose_mapping(*virq); 210 irq_dispose_mapping(*virq);
211 fail_create: 211 fail_create:
212 return result; 212 return result;
213 } 213 }
214 214
215 /** 215 /**
216 * ps3_virq_destroy - virq related teardown. 216 * ps3_virq_destroy - virq related teardown.
217 * @virq: The assigned Linux virq. 217 * @virq: The assigned Linux virq.
218 * 218 *
219 * Clears chip data and calls irq_dispose_mapping() for the virq. 219 * Clears chip data and calls irq_dispose_mapping() for the virq.
220 */ 220 */
221 221
222 static int ps3_virq_destroy(unsigned int virq) 222 static int ps3_virq_destroy(unsigned int virq)
223 { 223 {
224 const struct ps3_private *pd = irq_get_chip_data(virq); 224 const struct ps3_private *pd = irq_get_chip_data(virq);
225 225
226 pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__, 226 pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__,
227 __LINE__, pd->ppe_id, pd->thread_id, virq); 227 __LINE__, pd->ppe_id, pd->thread_id, virq);
228 228
229 irq_set_chip_data(virq, NULL); 229 irq_set_chip_data(virq, NULL);
230 irq_dispose_mapping(virq); 230 irq_dispose_mapping(virq);
231 231
232 pr_debug("%s:%d <-\n", __func__, __LINE__); 232 pr_debug("%s:%d <-\n", __func__, __LINE__);
233 return 0; 233 return 0;
234 } 234 }
235 235
236 /** 236 /**
237 * ps3_irq_plug_setup - Generic outlet and virq related setup. 237 * ps3_irq_plug_setup - Generic outlet and virq related setup.
238 * @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be 238 * @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
239 * serviced on. 239 * serviced on.
240 * @outlet: The HV outlet from the various create outlet routines. 240 * @outlet: The HV outlet from the various create outlet routines.
241 * @virq: The assigned Linux virq. 241 * @virq: The assigned Linux virq.
242 * 242 *
243 * Sets up virq and connects the irq plug. 243 * Sets up virq and connects the irq plug.
244 */ 244 */
245 245
246 int ps3_irq_plug_setup(enum ps3_cpu_binding cpu, unsigned long outlet, 246 int ps3_irq_plug_setup(enum ps3_cpu_binding cpu, unsigned long outlet,
247 unsigned int *virq) 247 unsigned int *virq)
248 { 248 {
249 int result; 249 int result;
250 struct ps3_private *pd; 250 struct ps3_private *pd;
251 251
252 result = ps3_virq_setup(cpu, outlet, virq); 252 result = ps3_virq_setup(cpu, outlet, virq);
253 253
254 if (result) { 254 if (result) {
255 pr_debug("%s:%d: ps3_virq_setup failed\n", __func__, __LINE__); 255 pr_debug("%s:%d: ps3_virq_setup failed\n", __func__, __LINE__);
256 goto fail_setup; 256 goto fail_setup;
257 } 257 }
258 258
259 pd = irq_get_chip_data(*virq); 259 pd = irq_get_chip_data(*virq);
260 260
261 /* Binds outlet to cpu + virq. */ 261 /* Binds outlet to cpu + virq. */
262 262
263 result = lv1_connect_irq_plug_ext(pd->ppe_id, pd->thread_id, *virq, 263 result = lv1_connect_irq_plug_ext(pd->ppe_id, pd->thread_id, *virq,
264 outlet, 0); 264 outlet, 0);
265 265
266 if (result) { 266 if (result) {
267 pr_info("%s:%d: lv1_connect_irq_plug_ext failed: %s\n", 267 pr_info("%s:%d: lv1_connect_irq_plug_ext failed: %s\n",
268 __func__, __LINE__, ps3_result(result)); 268 __func__, __LINE__, ps3_result(result));
269 result = -EPERM; 269 result = -EPERM;
270 goto fail_connect; 270 goto fail_connect;
271 } 271 }
272 272
273 return result; 273 return result;
274 274
275 fail_connect: 275 fail_connect:
276 ps3_virq_destroy(*virq); 276 ps3_virq_destroy(*virq);
277 fail_setup: 277 fail_setup:
278 return result; 278 return result;
279 } 279 }
280 EXPORT_SYMBOL_GPL(ps3_irq_plug_setup); 280 EXPORT_SYMBOL_GPL(ps3_irq_plug_setup);
281 281
282 /** 282 /**
283 * ps3_irq_plug_destroy - Generic outlet and virq related teardown. 283 * ps3_irq_plug_destroy - Generic outlet and virq related teardown.
284 * @virq: The assigned Linux virq. 284 * @virq: The assigned Linux virq.
285 * 285 *
286 * Disconnects the irq plug and tears down virq. 286 * Disconnects the irq plug and tears down virq.
287 * Do not call for system bus event interrupts setup with 287 * Do not call for system bus event interrupts setup with
288 * ps3_sb_event_receive_port_setup(). 288 * ps3_sb_event_receive_port_setup().
289 */ 289 */
290 290
291 int ps3_irq_plug_destroy(unsigned int virq) 291 int ps3_irq_plug_destroy(unsigned int virq)
292 { 292 {
293 int result; 293 int result;
294 const struct ps3_private *pd = irq_get_chip_data(virq); 294 const struct ps3_private *pd = irq_get_chip_data(virq);
295 295
296 pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__, 296 pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__,
297 __LINE__, pd->ppe_id, pd->thread_id, virq); 297 __LINE__, pd->ppe_id, pd->thread_id, virq);
298 298
299 ps3_chip_mask(irq_get_irq_data(virq)); 299 ps3_chip_mask(irq_get_irq_data(virq));
300 300
301 result = lv1_disconnect_irq_plug_ext(pd->ppe_id, pd->thread_id, virq); 301 result = lv1_disconnect_irq_plug_ext(pd->ppe_id, pd->thread_id, virq);
302 302
303 if (result) 303 if (result)
304 pr_info("%s:%d: lv1_disconnect_irq_plug_ext failed: %s\n", 304 pr_info("%s:%d: lv1_disconnect_irq_plug_ext failed: %s\n",
305 __func__, __LINE__, ps3_result(result)); 305 __func__, __LINE__, ps3_result(result));
306 306
307 ps3_virq_destroy(virq); 307 ps3_virq_destroy(virq);
308 308
309 return result; 309 return result;
310 } 310 }
311 EXPORT_SYMBOL_GPL(ps3_irq_plug_destroy); 311 EXPORT_SYMBOL_GPL(ps3_irq_plug_destroy);
312 312
313 /** 313 /**
314 * ps3_event_receive_port_setup - Setup an event receive port. 314 * ps3_event_receive_port_setup - Setup an event receive port.
315 * @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be 315 * @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
316 * serviced on. 316 * serviced on.
317 * @virq: The assigned Linux virq. 317 * @virq: The assigned Linux virq.
318 * 318 *
319 * The virq can be used with lv1_connect_interrupt_event_receive_port() to 319 * The virq can be used with lv1_connect_interrupt_event_receive_port() to
320 * arrange to receive interrupts from system-bus devices, or with 320 * arrange to receive interrupts from system-bus devices, or with
321 * ps3_send_event_locally() to signal events. 321 * ps3_send_event_locally() to signal events.
322 */ 322 */
323 323
324 int ps3_event_receive_port_setup(enum ps3_cpu_binding cpu, unsigned int *virq) 324 int ps3_event_receive_port_setup(enum ps3_cpu_binding cpu, unsigned int *virq)
325 { 325 {
326 int result; 326 int result;
327 u64 outlet; 327 u64 outlet;
328 328
329 result = lv1_construct_event_receive_port(&outlet); 329 result = lv1_construct_event_receive_port(&outlet);
330 330
331 if (result) { 331 if (result) {
332 pr_debug("%s:%d: lv1_construct_event_receive_port failed: %s\n", 332 pr_debug("%s:%d: lv1_construct_event_receive_port failed: %s\n",
333 __func__, __LINE__, ps3_result(result)); 333 __func__, __LINE__, ps3_result(result));
334 *virq = NO_IRQ; 334 *virq = NO_IRQ;
335 return result; 335 return result;
336 } 336 }
337 337
338 result = ps3_irq_plug_setup(cpu, outlet, virq); 338 result = ps3_irq_plug_setup(cpu, outlet, virq);
339 BUG_ON(result); 339 BUG_ON(result);
340 340
341 return result; 341 return result;
342 } 342 }
343 EXPORT_SYMBOL_GPL(ps3_event_receive_port_setup); 343 EXPORT_SYMBOL_GPL(ps3_event_receive_port_setup);
344 344
345 /** 345 /**
346 * ps3_event_receive_port_destroy - Destroy an event receive port. 346 * ps3_event_receive_port_destroy - Destroy an event receive port.
347 * @virq: The assigned Linux virq. 347 * @virq: The assigned Linux virq.
348 * 348 *
349 * Since ps3_event_receive_port_destroy destroys the receive port outlet, 349 * Since ps3_event_receive_port_destroy destroys the receive port outlet,
350 * SB devices need to call disconnect_interrupt_event_receive_port() before 350 * SB devices need to call disconnect_interrupt_event_receive_port() before
351 * this. 351 * this.
352 */ 352 */
353 353
354 int ps3_event_receive_port_destroy(unsigned int virq) 354 int ps3_event_receive_port_destroy(unsigned int virq)
355 { 355 {
356 int result; 356 int result;
357 357
358 pr_debug(" -> %s:%d virq %u\n", __func__, __LINE__, virq); 358 pr_debug(" -> %s:%d virq %u\n", __func__, __LINE__, virq);
359 359
360 ps3_chip_mask(irq_get_irq_data(virq)); 360 ps3_chip_mask(irq_get_irq_data(virq));
361 361
362 result = lv1_destruct_event_receive_port(virq_to_hw(virq)); 362 result = lv1_destruct_event_receive_port(virq_to_hw(virq));
363 363
364 if (result) 364 if (result)
365 pr_debug("%s:%d: lv1_destruct_event_receive_port failed: %s\n", 365 pr_debug("%s:%d: lv1_destruct_event_receive_port failed: %s\n",
366 __func__, __LINE__, ps3_result(result)); 366 __func__, __LINE__, ps3_result(result));
367 367
368 /* 368 /*
369 * Don't call ps3_virq_destroy() here since ps3_smp_cleanup_cpu() 369 * Don't call ps3_virq_destroy() here since ps3_smp_cleanup_cpu()
370 * calls from interrupt context (smp_call_function) when kexecing. 370 * calls from interrupt context (smp_call_function) when kexecing.
371 */ 371 */
372 372
373 pr_debug(" <- %s:%d\n", __func__, __LINE__); 373 pr_debug(" <- %s:%d\n", __func__, __LINE__);
374 return result; 374 return result;
375 } 375 }
376 376
377 int ps3_send_event_locally(unsigned int virq) 377 int ps3_send_event_locally(unsigned int virq)
378 { 378 {
379 return lv1_send_event_locally(virq_to_hw(virq)); 379 return lv1_send_event_locally(virq_to_hw(virq));
380 } 380 }
381 381
382 /** 382 /**
383 * ps3_sb_event_receive_port_setup - Setup a system bus event receive port. 383 * ps3_sb_event_receive_port_setup - Setup a system bus event receive port.
384 * @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be 384 * @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
385 * serviced on. 385 * serviced on.
386 * @dev: The system bus device instance. 386 * @dev: The system bus device instance.
387 * @virq: The assigned Linux virq. 387 * @virq: The assigned Linux virq.
388 * 388 *
389 * An event irq represents a virtual device interrupt. The interrupt_id 389 * An event irq represents a virtual device interrupt. The interrupt_id
390 * coresponds to the software interrupt number. 390 * coresponds to the software interrupt number.
391 */ 391 */
392 392
393 int ps3_sb_event_receive_port_setup(struct ps3_system_bus_device *dev, 393 int ps3_sb_event_receive_port_setup(struct ps3_system_bus_device *dev,
394 enum ps3_cpu_binding cpu, unsigned int *virq) 394 enum ps3_cpu_binding cpu, unsigned int *virq)
395 { 395 {
396 /* this should go in system-bus.c */ 396 /* this should go in system-bus.c */
397 397
398 int result; 398 int result;
399 399
400 result = ps3_event_receive_port_setup(cpu, virq); 400 result = ps3_event_receive_port_setup(cpu, virq);
401 401
402 if (result) 402 if (result)
403 return result; 403 return result;
404 404
405 result = lv1_connect_interrupt_event_receive_port(dev->bus_id, 405 result = lv1_connect_interrupt_event_receive_port(dev->bus_id,
406 dev->dev_id, virq_to_hw(*virq), dev->interrupt_id); 406 dev->dev_id, virq_to_hw(*virq), dev->interrupt_id);
407 407
408 if (result) { 408 if (result) {
409 pr_debug("%s:%d: lv1_connect_interrupt_event_receive_port" 409 pr_debug("%s:%d: lv1_connect_interrupt_event_receive_port"
410 " failed: %s\n", __func__, __LINE__, 410 " failed: %s\n", __func__, __LINE__,
411 ps3_result(result)); 411 ps3_result(result));
412 ps3_event_receive_port_destroy(*virq); 412 ps3_event_receive_port_destroy(*virq);
413 *virq = NO_IRQ; 413 *virq = NO_IRQ;
414 return result; 414 return result;
415 } 415 }
416 416
417 pr_debug("%s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__, 417 pr_debug("%s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__,
418 dev->interrupt_id, *virq); 418 dev->interrupt_id, *virq);
419 419
420 return 0; 420 return 0;
421 } 421 }
422 EXPORT_SYMBOL(ps3_sb_event_receive_port_setup); 422 EXPORT_SYMBOL(ps3_sb_event_receive_port_setup);
423 423
424 int ps3_sb_event_receive_port_destroy(struct ps3_system_bus_device *dev, 424 int ps3_sb_event_receive_port_destroy(struct ps3_system_bus_device *dev,
425 unsigned int virq) 425 unsigned int virq)
426 { 426 {
427 /* this should go in system-bus.c */ 427 /* this should go in system-bus.c */
428 428
429 int result; 429 int result;
430 430
431 pr_debug(" -> %s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__, 431 pr_debug(" -> %s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__,
432 dev->interrupt_id, virq); 432 dev->interrupt_id, virq);
433 433
434 result = lv1_disconnect_interrupt_event_receive_port(dev->bus_id, 434 result = lv1_disconnect_interrupt_event_receive_port(dev->bus_id,
435 dev->dev_id, virq_to_hw(virq), dev->interrupt_id); 435 dev->dev_id, virq_to_hw(virq), dev->interrupt_id);
436 436
437 if (result) 437 if (result)
438 pr_debug("%s:%d: lv1_disconnect_interrupt_event_receive_port" 438 pr_debug("%s:%d: lv1_disconnect_interrupt_event_receive_port"
439 " failed: %s\n", __func__, __LINE__, 439 " failed: %s\n", __func__, __LINE__,
440 ps3_result(result)); 440 ps3_result(result));
441 441
442 result = ps3_event_receive_port_destroy(virq); 442 result = ps3_event_receive_port_destroy(virq);
443 BUG_ON(result); 443 BUG_ON(result);
444 444
445 /* 445 /*
446 * ps3_event_receive_port_destroy() destroys the IRQ plug, 446 * ps3_event_receive_port_destroy() destroys the IRQ plug,
447 * so don't call ps3_irq_plug_destroy() here. 447 * so don't call ps3_irq_plug_destroy() here.
448 */ 448 */
449 449
450 result = ps3_virq_destroy(virq); 450 result = ps3_virq_destroy(virq);
451 BUG_ON(result); 451 BUG_ON(result);
452 452
453 pr_debug(" <- %s:%d\n", __func__, __LINE__); 453 pr_debug(" <- %s:%d\n", __func__, __LINE__);
454 return result; 454 return result;
455 } 455 }
456 EXPORT_SYMBOL(ps3_sb_event_receive_port_destroy); 456 EXPORT_SYMBOL(ps3_sb_event_receive_port_destroy);
457 457
458 /** 458 /**
459 * ps3_io_irq_setup - Setup a system bus io irq. 459 * ps3_io_irq_setup - Setup a system bus io irq.
460 * @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be 460 * @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
461 * serviced on. 461 * serviced on.
462 * @interrupt_id: The device interrupt id read from the system repository. 462 * @interrupt_id: The device interrupt id read from the system repository.
463 * @virq: The assigned Linux virq. 463 * @virq: The assigned Linux virq.
464 * 464 *
465 * An io irq represents a non-virtualized device interrupt. interrupt_id 465 * An io irq represents a non-virtualized device interrupt. interrupt_id
466 * coresponds to the interrupt number of the interrupt controller. 466 * coresponds to the interrupt number of the interrupt controller.
467 */ 467 */
468 468
469 int ps3_io_irq_setup(enum ps3_cpu_binding cpu, unsigned int interrupt_id, 469 int ps3_io_irq_setup(enum ps3_cpu_binding cpu, unsigned int interrupt_id,
470 unsigned int *virq) 470 unsigned int *virq)
471 { 471 {
472 int result; 472 int result;
473 u64 outlet; 473 u64 outlet;
474 474
475 result = lv1_construct_io_irq_outlet(interrupt_id, &outlet); 475 result = lv1_construct_io_irq_outlet(interrupt_id, &outlet);
476 476
477 if (result) { 477 if (result) {
478 pr_debug("%s:%d: lv1_construct_io_irq_outlet failed: %s\n", 478 pr_debug("%s:%d: lv1_construct_io_irq_outlet failed: %s\n",
479 __func__, __LINE__, ps3_result(result)); 479 __func__, __LINE__, ps3_result(result));
480 return result; 480 return result;
481 } 481 }
482 482
483 result = ps3_irq_plug_setup(cpu, outlet, virq); 483 result = ps3_irq_plug_setup(cpu, outlet, virq);
484 BUG_ON(result); 484 BUG_ON(result);
485 485
486 return result; 486 return result;
487 } 487 }
488 EXPORT_SYMBOL_GPL(ps3_io_irq_setup); 488 EXPORT_SYMBOL_GPL(ps3_io_irq_setup);
489 489
490 int ps3_io_irq_destroy(unsigned int virq) 490 int ps3_io_irq_destroy(unsigned int virq)
491 { 491 {
492 int result; 492 int result;
493 unsigned long outlet = virq_to_hw(virq); 493 unsigned long outlet = virq_to_hw(virq);
494 494
495 ps3_chip_mask(irq_get_irq_data(virq)); 495 ps3_chip_mask(irq_get_irq_data(virq));
496 496
497 /* 497 /*
498 * lv1_destruct_io_irq_outlet() will destroy the IRQ plug, 498 * lv1_destruct_io_irq_outlet() will destroy the IRQ plug,
499 * so call ps3_irq_plug_destroy() first. 499 * so call ps3_irq_plug_destroy() first.
500 */ 500 */
501 501
502 result = ps3_irq_plug_destroy(virq); 502 result = ps3_irq_plug_destroy(virq);
503 BUG_ON(result); 503 BUG_ON(result);
504 504
505 result = lv1_destruct_io_irq_outlet(outlet); 505 result = lv1_destruct_io_irq_outlet(outlet);
506 506
507 if (result) 507 if (result)
508 pr_debug("%s:%d: lv1_destruct_io_irq_outlet failed: %s\n", 508 pr_debug("%s:%d: lv1_destruct_io_irq_outlet failed: %s\n",
509 __func__, __LINE__, ps3_result(result)); 509 __func__, __LINE__, ps3_result(result));
510 510
511 return result; 511 return result;
512 } 512 }
513 EXPORT_SYMBOL_GPL(ps3_io_irq_destroy); 513 EXPORT_SYMBOL_GPL(ps3_io_irq_destroy);
514 514
515 /** 515 /**
516 * ps3_vuart_irq_setup - Setup the system virtual uart virq. 516 * ps3_vuart_irq_setup - Setup the system virtual uart virq.
517 * @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be 517 * @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
518 * serviced on. 518 * serviced on.
519 * @virt_addr_bmp: The caller supplied virtual uart interrupt bitmap. 519 * @virt_addr_bmp: The caller supplied virtual uart interrupt bitmap.
520 * @virq: The assigned Linux virq. 520 * @virq: The assigned Linux virq.
521 * 521 *
522 * The system supports only a single virtual uart, so multiple calls without 522 * The system supports only a single virtual uart, so multiple calls without
523 * freeing the interrupt will return a wrong state error. 523 * freeing the interrupt will return a wrong state error.
524 */ 524 */
525 525
526 int ps3_vuart_irq_setup(enum ps3_cpu_binding cpu, void* virt_addr_bmp, 526 int ps3_vuart_irq_setup(enum ps3_cpu_binding cpu, void* virt_addr_bmp,
527 unsigned int *virq) 527 unsigned int *virq)
528 { 528 {
529 int result; 529 int result;
530 u64 outlet; 530 u64 outlet;
531 u64 lpar_addr; 531 u64 lpar_addr;
532 532
533 BUG_ON(!is_kernel_addr((u64)virt_addr_bmp)); 533 BUG_ON(!is_kernel_addr((u64)virt_addr_bmp));
534 534
535 lpar_addr = ps3_mm_phys_to_lpar(__pa(virt_addr_bmp)); 535 lpar_addr = ps3_mm_phys_to_lpar(__pa(virt_addr_bmp));
536 536
537 result = lv1_configure_virtual_uart_irq(lpar_addr, &outlet); 537 result = lv1_configure_virtual_uart_irq(lpar_addr, &outlet);
538 538
539 if (result) { 539 if (result) {
540 pr_debug("%s:%d: lv1_configure_virtual_uart_irq failed: %s\n", 540 pr_debug("%s:%d: lv1_configure_virtual_uart_irq failed: %s\n",
541 __func__, __LINE__, ps3_result(result)); 541 __func__, __LINE__, ps3_result(result));
542 return result; 542 return result;
543 } 543 }
544 544
545 result = ps3_irq_plug_setup(cpu, outlet, virq); 545 result = ps3_irq_plug_setup(cpu, outlet, virq);
546 BUG_ON(result); 546 BUG_ON(result);
547 547
548 return result; 548 return result;
549 } 549 }
550 EXPORT_SYMBOL_GPL(ps3_vuart_irq_setup); 550 EXPORT_SYMBOL_GPL(ps3_vuart_irq_setup);
551 551
552 int ps3_vuart_irq_destroy(unsigned int virq) 552 int ps3_vuart_irq_destroy(unsigned int virq)
553 { 553 {
554 int result; 554 int result;
555 555
556 ps3_chip_mask(irq_get_irq_data(virq)); 556 ps3_chip_mask(irq_get_irq_data(virq));
557 result = lv1_deconfigure_virtual_uart_irq(); 557 result = lv1_deconfigure_virtual_uart_irq();
558 558
559 if (result) { 559 if (result) {
560 pr_debug("%s:%d: lv1_configure_virtual_uart_irq failed: %s\n", 560 pr_debug("%s:%d: lv1_configure_virtual_uart_irq failed: %s\n",
561 __func__, __LINE__, ps3_result(result)); 561 __func__, __LINE__, ps3_result(result));
562 return result; 562 return result;
563 } 563 }
564 564
565 result = ps3_irq_plug_destroy(virq); 565 result = ps3_irq_plug_destroy(virq);
566 BUG_ON(result); 566 BUG_ON(result);
567 567
568 return result; 568 return result;
569 } 569 }
570 EXPORT_SYMBOL_GPL(ps3_vuart_irq_destroy); 570 EXPORT_SYMBOL_GPL(ps3_vuart_irq_destroy);
571 571
572 /** 572 /**
573 * ps3_spe_irq_setup - Setup an spe virq. 573 * ps3_spe_irq_setup - Setup an spe virq.
574 * @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be 574 * @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
575 * serviced on. 575 * serviced on.
576 * @spe_id: The spe_id returned from lv1_construct_logical_spe(). 576 * @spe_id: The spe_id returned from lv1_construct_logical_spe().
577 * @class: The spe interrupt class {0,1,2}. 577 * @class: The spe interrupt class {0,1,2}.
578 * @virq: The assigned Linux virq. 578 * @virq: The assigned Linux virq.
579 * 579 *
580 */ 580 */
581 581
582 int ps3_spe_irq_setup(enum ps3_cpu_binding cpu, unsigned long spe_id, 582 int ps3_spe_irq_setup(enum ps3_cpu_binding cpu, unsigned long spe_id,
583 unsigned int class, unsigned int *virq) 583 unsigned int class, unsigned int *virq)
584 { 584 {
585 int result; 585 int result;
586 u64 outlet; 586 u64 outlet;
587 587
588 BUG_ON(class > 2); 588 BUG_ON(class > 2);
589 589
590 result = lv1_get_spe_irq_outlet(spe_id, class, &outlet); 590 result = lv1_get_spe_irq_outlet(spe_id, class, &outlet);
591 591
592 if (result) { 592 if (result) {
593 pr_debug("%s:%d: lv1_get_spe_irq_outlet failed: %s\n", 593 pr_debug("%s:%d: lv1_get_spe_irq_outlet failed: %s\n",
594 __func__, __LINE__, ps3_result(result)); 594 __func__, __LINE__, ps3_result(result));
595 return result; 595 return result;
596 } 596 }
597 597
598 result = ps3_irq_plug_setup(cpu, outlet, virq); 598 result = ps3_irq_plug_setup(cpu, outlet, virq);
599 BUG_ON(result); 599 BUG_ON(result);
600 600
601 return result; 601 return result;
602 } 602 }
603 603
604 int ps3_spe_irq_destroy(unsigned int virq) 604 int ps3_spe_irq_destroy(unsigned int virq)
605 { 605 {
606 int result; 606 int result;
607 607
608 ps3_chip_mask(irq_get_irq_data(virq)); 608 ps3_chip_mask(irq_get_irq_data(virq));
609 609
610 result = ps3_irq_plug_destroy(virq); 610 result = ps3_irq_plug_destroy(virq);
611 BUG_ON(result); 611 BUG_ON(result);
612 612
613 return result; 613 return result;
614 } 614 }
615 615
616 616
617 #define PS3_INVALID_OUTLET ((irq_hw_number_t)-1) 617 #define PS3_INVALID_OUTLET ((irq_hw_number_t)-1)
618 #define PS3_PLUG_MAX 63 618 #define PS3_PLUG_MAX 63
619 619
620 #if defined(DEBUG) 620 #if defined(DEBUG)
621 static void _dump_64_bmp(const char *header, const u64 *p, unsigned cpu, 621 static void _dump_64_bmp(const char *header, const u64 *p, unsigned cpu,
622 const char* func, int line) 622 const char* func, int line)
623 { 623 {
624 pr_debug("%s:%d: %s %u {%04lx_%04lx_%04lx_%04lx}\n", 624 pr_debug("%s:%d: %s %u {%04lx_%04lx_%04lx_%04lx}\n",
625 func, line, header, cpu, 625 func, line, header, cpu,
626 *p >> 48, (*p >> 32) & 0xffff, (*p >> 16) & 0xffff, 626 *p >> 48, (*p >> 32) & 0xffff, (*p >> 16) & 0xffff,
627 *p & 0xffff); 627 *p & 0xffff);
628 } 628 }
629 629
630 static void __maybe_unused _dump_256_bmp(const char *header, 630 static void __maybe_unused _dump_256_bmp(const char *header,
631 const u64 *p, unsigned cpu, const char* func, int line) 631 const u64 *p, unsigned cpu, const char* func, int line)
632 { 632 {
633 pr_debug("%s:%d: %s %u {%016lx:%016lx:%016lx:%016lx}\n", 633 pr_debug("%s:%d: %s %u {%016lx:%016lx:%016lx:%016lx}\n",
634 func, line, header, cpu, p[0], p[1], p[2], p[3]); 634 func, line, header, cpu, p[0], p[1], p[2], p[3]);
635 } 635 }
636 636
637 #define dump_bmp(_x) _dump_bmp(_x, __func__, __LINE__) 637 #define dump_bmp(_x) _dump_bmp(_x, __func__, __LINE__)
638 static void _dump_bmp(struct ps3_private* pd, const char* func, int line) 638 static void _dump_bmp(struct ps3_private* pd, const char* func, int line)
639 { 639 {
640 unsigned long flags; 640 unsigned long flags;
641 641
642 spin_lock_irqsave(&pd->bmp.lock, flags); 642 spin_lock_irqsave(&pd->bmp.lock, flags);
643 _dump_64_bmp("stat", &pd->bmp.status, pd->thread_id, func, line); 643 _dump_64_bmp("stat", &pd->bmp.status, pd->thread_id, func, line);
644 _dump_64_bmp("mask", &pd->bmp.mask, pd->thread_id, func, line); 644 _dump_64_bmp("mask", &pd->bmp.mask, pd->thread_id, func, line);
645 spin_unlock_irqrestore(&pd->bmp.lock, flags); 645 spin_unlock_irqrestore(&pd->bmp.lock, flags);
646 } 646 }
647 647
648 #define dump_mask(_x) _dump_mask(_x, __func__, __LINE__) 648 #define dump_mask(_x) _dump_mask(_x, __func__, __LINE__)
649 static void __maybe_unused _dump_mask(struct ps3_private *pd, 649 static void __maybe_unused _dump_mask(struct ps3_private *pd,
650 const char* func, int line) 650 const char* func, int line)
651 { 651 {
652 unsigned long flags; 652 unsigned long flags;
653 653
654 spin_lock_irqsave(&pd->bmp.lock, flags); 654 spin_lock_irqsave(&pd->bmp.lock, flags);
655 _dump_64_bmp("mask", &pd->bmp.mask, pd->thread_id, func, line); 655 _dump_64_bmp("mask", &pd->bmp.mask, pd->thread_id, func, line);
656 spin_unlock_irqrestore(&pd->bmp.lock, flags); 656 spin_unlock_irqrestore(&pd->bmp.lock, flags);
657 } 657 }
658 #else 658 #else
659 static void dump_bmp(struct ps3_private* pd) {}; 659 static void dump_bmp(struct ps3_private* pd) {};
660 #endif /* defined(DEBUG) */ 660 #endif /* defined(DEBUG) */
661 661
662 static int ps3_host_map(struct irq_host *h, unsigned int virq, 662 static int ps3_host_map(struct irq_host *h, unsigned int virq,
663 irq_hw_number_t hwirq) 663 irq_hw_number_t hwirq)
664 { 664 {
665 pr_debug("%s:%d: hwirq %lu, virq %u\n", __func__, __LINE__, hwirq, 665 pr_debug("%s:%d: hwirq %lu, virq %u\n", __func__, __LINE__, hwirq,
666 virq); 666 virq);
667 667
668 irq_set_chip_and_handler(virq, &ps3_irq_chip, handle_fasteoi_irq); 668 irq_set_chip_and_handler(virq, &ps3_irq_chip, handle_fasteoi_irq);
669 669
670 return 0; 670 return 0;
671 } 671 }
672 672
673 static int ps3_host_match(struct irq_host *h, struct device_node *np) 673 static int ps3_host_match(struct irq_host *h, struct device_node *np)
674 { 674 {
675 /* Match all */ 675 /* Match all */
676 return 1; 676 return 1;
677 } 677 }
678 678
679 static struct irq_host_ops ps3_host_ops = { 679 static struct irq_host_ops ps3_host_ops = {
680 .map = ps3_host_map, 680 .map = ps3_host_map,
681 .match = ps3_host_match, 681 .match = ps3_host_match,
682 }; 682 };
683 683
684 void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq) 684 void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq)
685 { 685 {
686 struct ps3_private *pd = &per_cpu(ps3_private, cpu); 686 struct ps3_private *pd = &per_cpu(ps3_private, cpu);
687 687
688 pd->bmp.ipi_debug_brk_mask = 0x8000000000000000UL >> virq; 688 pd->bmp.ipi_debug_brk_mask = 0x8000000000000000UL >> virq;
689 689
690 pr_debug("%s:%d: cpu %u, virq %u, mask %llxh\n", __func__, __LINE__, 690 pr_debug("%s:%d: cpu %u, virq %u, mask %llxh\n", __func__, __LINE__,
691 cpu, virq, pd->bmp.ipi_debug_brk_mask); 691 cpu, virq, pd->bmp.ipi_debug_brk_mask);
692 } 692 }
693 693
694 static unsigned int ps3_get_irq(void) 694 static unsigned int ps3_get_irq(void)
695 { 695 {
696 struct ps3_private *pd = &__get_cpu_var(ps3_private); 696 struct ps3_private *pd = &__get_cpu_var(ps3_private);
697 u64 x = (pd->bmp.status & pd->bmp.mask); 697 u64 x = (pd->bmp.status & pd->bmp.mask);
698 unsigned int plug; 698 unsigned int plug;
699 699
700 /* check for ipi break first to stop this cpu ASAP */ 700 /* check for ipi break first to stop this cpu ASAP */
701 701
702 if (x & pd->bmp.ipi_debug_brk_mask) 702 if (x & pd->bmp.ipi_debug_brk_mask)
703 x &= pd->bmp.ipi_debug_brk_mask; 703 x &= pd->bmp.ipi_debug_brk_mask;
704 704
705 asm volatile("cntlzd %0,%1" : "=r" (plug) : "r" (x)); 705 asm volatile("cntlzd %0,%1" : "=r" (plug) : "r" (x));
706 plug &= 0x3f; 706 plug &= 0x3f;
707 707
708 if (unlikely(plug == NO_IRQ)) { 708 if (unlikely(plug == NO_IRQ)) {
709 pr_debug("%s:%d: no plug found: thread_id %llu\n", __func__, 709 pr_debug("%s:%d: no plug found: thread_id %llu\n", __func__,
710 __LINE__, pd->thread_id); 710 __LINE__, pd->thread_id);
711 dump_bmp(&per_cpu(ps3_private, 0)); 711 dump_bmp(&per_cpu(ps3_private, 0));
712 dump_bmp(&per_cpu(ps3_private, 1)); 712 dump_bmp(&per_cpu(ps3_private, 1));
713 return NO_IRQ; 713 return NO_IRQ;
714 } 714 }
715 715
716 #if defined(DEBUG) 716 #if defined(DEBUG)
717 if (unlikely(plug < NUM_ISA_INTERRUPTS || plug > PS3_PLUG_MAX)) { 717 if (unlikely(plug < NUM_ISA_INTERRUPTS || plug > PS3_PLUG_MAX)) {
718 dump_bmp(&per_cpu(ps3_private, 0)); 718 dump_bmp(&per_cpu(ps3_private, 0));
719 dump_bmp(&per_cpu(ps3_private, 1)); 719 dump_bmp(&per_cpu(ps3_private, 1));
720 BUG(); 720 BUG();
721 } 721 }
722 #endif 722 #endif
723 return plug; 723 return plug;
724 } 724 }
725 725
726 void __init ps3_init_IRQ(void) 726 void __init ps3_init_IRQ(void)
727 { 727 {
728 int result; 728 int result;
729 unsigned cpu; 729 unsigned cpu;
730 struct irq_host *host; 730 struct irq_host *host;
731 731
732 host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0, &ps3_host_ops, 732 host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0, &ps3_host_ops,
733 PS3_INVALID_OUTLET); 733 PS3_INVALID_OUTLET);
734 irq_set_default_host(host); 734 irq_set_default_host(host);
735 irq_set_virq_count(PS3_PLUG_MAX + 1); 735 irq_set_virq_count(PS3_PLUG_MAX + 1);
736 736
737 for_each_possible_cpu(cpu) { 737 for_each_possible_cpu(cpu) {
738 struct ps3_private *pd = &per_cpu(ps3_private, cpu); 738 struct ps3_private *pd = &per_cpu(ps3_private, cpu);
739 739
740 lv1_get_logical_ppe_id(&pd->ppe_id); 740 lv1_get_logical_ppe_id(&pd->ppe_id);
741 pd->thread_id = get_hard_smp_processor_id(cpu); 741 pd->thread_id = get_hard_smp_processor_id(cpu);
742 spin_lock_init(&pd->bmp.lock); 742 spin_lock_init(&pd->bmp.lock);
743 743
744 pr_debug("%s:%d: ppe_id %llu, thread_id %llu, bmp %lxh\n", 744 pr_debug("%s:%d: ppe_id %llu, thread_id %llu, bmp %lxh\n",
745 __func__, __LINE__, pd->ppe_id, pd->thread_id, 745 __func__, __LINE__, pd->ppe_id, pd->thread_id,
746 ps3_mm_phys_to_lpar(__pa(&pd->bmp))); 746 ps3_mm_phys_to_lpar(__pa(&pd->bmp)));
747 747
748 result = lv1_configure_irq_state_bitmap(pd->ppe_id, 748 result = lv1_configure_irq_state_bitmap(pd->ppe_id,
749 pd->thread_id, ps3_mm_phys_to_lpar(__pa(&pd->bmp))); 749 pd->thread_id, ps3_mm_phys_to_lpar(__pa(&pd->bmp)));
750 750
751 if (result) 751 if (result)
752 pr_debug("%s:%d: lv1_configure_irq_state_bitmap failed:" 752 pr_debug("%s:%d: lv1_configure_irq_state_bitmap failed:"
753 " %s\n", __func__, __LINE__, 753 " %s\n", __func__, __LINE__,
754 ps3_result(result)); 754 ps3_result(result));
755 } 755 }
756 756
757 ppc_md.get_irq = ps3_get_irq; 757 ppc_md.get_irq = ps3_get_irq;
758 } 758 }
759 759
760 void ps3_shutdown_IRQ(int cpu) 760 void ps3_shutdown_IRQ(int cpu)
761 { 761 {
762 int result; 762 int result;
763 u64 ppe_id; 763 u64 ppe_id;
764 u64 thread_id = get_hard_smp_processor_id(cpu); 764 u64 thread_id = get_hard_smp_processor_id(cpu);
765 765
766 lv1_get_logical_ppe_id(&ppe_id); 766 lv1_get_logical_ppe_id(&ppe_id);
767 result = lv1_configure_irq_state_bitmap(ppe_id, thread_id, 0); 767 result = lv1_configure_irq_state_bitmap(ppe_id, thread_id, 0);
768 768
769 DBG("%s:%d: lv1_configure_irq_state_bitmap (%llu:%llu/%d) %s\n", __func__, 769 DBG("%s:%d: lv1_configure_irq_state_bitmap (%llu:%llu/%d) %s\n", __func__,
770 __LINE__, ppe_id, thread_id, cpu, ps3_result(result)); 770 __LINE__, ppe_id, thread_id, cpu, ps3_result(result));
771 } 771 }
772 772
arch/powerpc/platforms/ps3/mm.c
1 /* 1 /*
2 * PS3 address space management. 2 * PS3 address space management.
3 * 3 *
4 * Copyright (C) 2006 Sony Computer Entertainment Inc. 4 * Copyright (C) 2006 Sony Computer Entertainment Inc.
5 * Copyright 2006 Sony Corp. 5 * Copyright 2006 Sony Corp.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License. 9 * the Free Software Foundation; version 2 of the License.
10 * 10 *
11 * This program is distributed in the hope that it will be useful, 11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 * 15 *
16 * You should have received a copy of the GNU General Public License 16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */ 19 */
20 20
21 #include <linux/kernel.h> 21 #include <linux/kernel.h>
22 #include <linux/module.h> 22 #include <linux/export.h>
23 #include <linux/memory_hotplug.h> 23 #include <linux/memory_hotplug.h>
24 #include <linux/memblock.h> 24 #include <linux/memblock.h>
25 #include <linux/slab.h> 25 #include <linux/slab.h>
26 26
27 #include <asm/cell-regs.h> 27 #include <asm/cell-regs.h>
28 #include <asm/firmware.h> 28 #include <asm/firmware.h>
29 #include <asm/prom.h> 29 #include <asm/prom.h>
30 #include <asm/udbg.h> 30 #include <asm/udbg.h>
31 #include <asm/lv1call.h> 31 #include <asm/lv1call.h>
32 32
33 #include "platform.h" 33 #include "platform.h"
34 34
35 #if defined(DEBUG) 35 #if defined(DEBUG)
36 #define DBG udbg_printf 36 #define DBG udbg_printf
37 #else 37 #else
38 #define DBG pr_devel 38 #define DBG pr_devel
39 #endif 39 #endif
40 40
41 enum { 41 enum {
42 #if defined(CONFIG_PS3_DYNAMIC_DMA) 42 #if defined(CONFIG_PS3_DYNAMIC_DMA)
43 USE_DYNAMIC_DMA = 1, 43 USE_DYNAMIC_DMA = 1,
44 #else 44 #else
45 USE_DYNAMIC_DMA = 0, 45 USE_DYNAMIC_DMA = 0,
46 #endif 46 #endif
47 }; 47 };
48 48
49 enum { 49 enum {
50 PAGE_SHIFT_4K = 12U, 50 PAGE_SHIFT_4K = 12U,
51 PAGE_SHIFT_64K = 16U, 51 PAGE_SHIFT_64K = 16U,
52 PAGE_SHIFT_16M = 24U, 52 PAGE_SHIFT_16M = 24U,
53 }; 53 };
54 54
55 static unsigned long make_page_sizes(unsigned long a, unsigned long b) 55 static unsigned long make_page_sizes(unsigned long a, unsigned long b)
56 { 56 {
57 return (a << 56) | (b << 48); 57 return (a << 56) | (b << 48);
58 } 58 }
59 59
60 enum { 60 enum {
61 ALLOCATE_MEMORY_TRY_ALT_UNIT = 0X04, 61 ALLOCATE_MEMORY_TRY_ALT_UNIT = 0X04,
62 ALLOCATE_MEMORY_ADDR_ZERO = 0X08, 62 ALLOCATE_MEMORY_ADDR_ZERO = 0X08,
63 }; 63 };
64 64
65 /* valid htab sizes are {18,19,20} = 256K, 512K, 1M */ 65 /* valid htab sizes are {18,19,20} = 256K, 512K, 1M */
66 66
67 enum { 67 enum {
68 HTAB_SIZE_MAX = 20U, /* HV limit of 1MB */ 68 HTAB_SIZE_MAX = 20U, /* HV limit of 1MB */
69 HTAB_SIZE_MIN = 18U, /* CPU limit of 256KB */ 69 HTAB_SIZE_MIN = 18U, /* CPU limit of 256KB */
70 }; 70 };
71 71
72 /*============================================================================*/ 72 /*============================================================================*/
73 /* virtual address space routines */ 73 /* virtual address space routines */
74 /*============================================================================*/ 74 /*============================================================================*/
75 75
76 /** 76 /**
77 * struct mem_region - memory region structure 77 * struct mem_region - memory region structure
78 * @base: base address 78 * @base: base address
79 * @size: size in bytes 79 * @size: size in bytes
80 * @offset: difference between base and rm.size 80 * @offset: difference between base and rm.size
81 */ 81 */
82 82
83 struct mem_region { 83 struct mem_region {
84 u64 base; 84 u64 base;
85 u64 size; 85 u64 size;
86 unsigned long offset; 86 unsigned long offset;
87 }; 87 };
88 88
89 /** 89 /**
90 * struct map - address space state variables holder 90 * struct map - address space state variables holder
91 * @total: total memory available as reported by HV 91 * @total: total memory available as reported by HV
92 * @vas_id - HV virtual address space id 92 * @vas_id - HV virtual address space id
93 * @htab_size: htab size in bytes 93 * @htab_size: htab size in bytes
94 * 94 *
95 * The HV virtual address space (vas) allows for hotplug memory regions. 95 * The HV virtual address space (vas) allows for hotplug memory regions.
96 * Memory regions can be created and destroyed in the vas at runtime. 96 * Memory regions can be created and destroyed in the vas at runtime.
97 * @rm: real mode (bootmem) region 97 * @rm: real mode (bootmem) region
98 * @r1: hotplug memory region(s) 98 * @r1: hotplug memory region(s)
99 * 99 *
100 * ps3 addresses 100 * ps3 addresses
101 * virt_addr: a cpu 'translated' effective address 101 * virt_addr: a cpu 'translated' effective address
102 * phys_addr: an address in what Linux thinks is the physical address space 102 * phys_addr: an address in what Linux thinks is the physical address space
103 * lpar_addr: an address in the HV virtual address space 103 * lpar_addr: an address in the HV virtual address space
104 * bus_addr: an io controller 'translated' address on a device bus 104 * bus_addr: an io controller 'translated' address on a device bus
105 */ 105 */
106 106
107 struct map { 107 struct map {
108 u64 total; 108 u64 total;
109 u64 vas_id; 109 u64 vas_id;
110 u64 htab_size; 110 u64 htab_size;
111 struct mem_region rm; 111 struct mem_region rm;
112 struct mem_region r1; 112 struct mem_region r1;
113 }; 113 };
114 114
115 #define debug_dump_map(x) _debug_dump_map(x, __func__, __LINE__) 115 #define debug_dump_map(x) _debug_dump_map(x, __func__, __LINE__)
116 static void __maybe_unused _debug_dump_map(const struct map *m, 116 static void __maybe_unused _debug_dump_map(const struct map *m,
117 const char *func, int line) 117 const char *func, int line)
118 { 118 {
119 DBG("%s:%d: map.total = %llxh\n", func, line, m->total); 119 DBG("%s:%d: map.total = %llxh\n", func, line, m->total);
120 DBG("%s:%d: map.rm.size = %llxh\n", func, line, m->rm.size); 120 DBG("%s:%d: map.rm.size = %llxh\n", func, line, m->rm.size);
121 DBG("%s:%d: map.vas_id = %llu\n", func, line, m->vas_id); 121 DBG("%s:%d: map.vas_id = %llu\n", func, line, m->vas_id);
122 DBG("%s:%d: map.htab_size = %llxh\n", func, line, m->htab_size); 122 DBG("%s:%d: map.htab_size = %llxh\n", func, line, m->htab_size);
123 DBG("%s:%d: map.r1.base = %llxh\n", func, line, m->r1.base); 123 DBG("%s:%d: map.r1.base = %llxh\n", func, line, m->r1.base);
124 DBG("%s:%d: map.r1.offset = %lxh\n", func, line, m->r1.offset); 124 DBG("%s:%d: map.r1.offset = %lxh\n", func, line, m->r1.offset);
125 DBG("%s:%d: map.r1.size = %llxh\n", func, line, m->r1.size); 125 DBG("%s:%d: map.r1.size = %llxh\n", func, line, m->r1.size);
126 } 126 }
127 127
128 static struct map map; 128 static struct map map;
129 129
130 /** 130 /**
131 * ps3_mm_phys_to_lpar - translate a linux physical address to lpar address 131 * ps3_mm_phys_to_lpar - translate a linux physical address to lpar address
132 * @phys_addr: linux physical address 132 * @phys_addr: linux physical address
133 */ 133 */
134 134
135 unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr) 135 unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr)
136 { 136 {
137 BUG_ON(is_kernel_addr(phys_addr)); 137 BUG_ON(is_kernel_addr(phys_addr));
138 return (phys_addr < map.rm.size || phys_addr >= map.total) 138 return (phys_addr < map.rm.size || phys_addr >= map.total)
139 ? phys_addr : phys_addr + map.r1.offset; 139 ? phys_addr : phys_addr + map.r1.offset;
140 } 140 }
141 141
142 EXPORT_SYMBOL(ps3_mm_phys_to_lpar); 142 EXPORT_SYMBOL(ps3_mm_phys_to_lpar);
143 143
144 /** 144 /**
145 * ps3_mm_vas_create - create the virtual address space 145 * ps3_mm_vas_create - create the virtual address space
146 */ 146 */
147 147
148 void __init ps3_mm_vas_create(unsigned long* htab_size) 148 void __init ps3_mm_vas_create(unsigned long* htab_size)
149 { 149 {
150 int result; 150 int result;
151 u64 start_address; 151 u64 start_address;
152 u64 size; 152 u64 size;
153 u64 access_right; 153 u64 access_right;
154 u64 max_page_size; 154 u64 max_page_size;
155 u64 flags; 155 u64 flags;
156 156
157 result = lv1_query_logical_partition_address_region_info(0, 157 result = lv1_query_logical_partition_address_region_info(0,
158 &start_address, &size, &access_right, &max_page_size, 158 &start_address, &size, &access_right, &max_page_size,
159 &flags); 159 &flags);
160 160
161 if (result) { 161 if (result) {
162 DBG("%s:%d: lv1_query_logical_partition_address_region_info " 162 DBG("%s:%d: lv1_query_logical_partition_address_region_info "
163 "failed: %s\n", __func__, __LINE__, 163 "failed: %s\n", __func__, __LINE__,
164 ps3_result(result)); 164 ps3_result(result));
165 goto fail; 165 goto fail;
166 } 166 }
167 167
168 if (max_page_size < PAGE_SHIFT_16M) { 168 if (max_page_size < PAGE_SHIFT_16M) {
169 DBG("%s:%d: bad max_page_size %llxh\n", __func__, __LINE__, 169 DBG("%s:%d: bad max_page_size %llxh\n", __func__, __LINE__,
170 max_page_size); 170 max_page_size);
171 goto fail; 171 goto fail;
172 } 172 }
173 173
174 BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE > HTAB_SIZE_MAX); 174 BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE > HTAB_SIZE_MAX);
175 BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE < HTAB_SIZE_MIN); 175 BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE < HTAB_SIZE_MIN);
176 176
177 result = lv1_construct_virtual_address_space(CONFIG_PS3_HTAB_SIZE, 177 result = lv1_construct_virtual_address_space(CONFIG_PS3_HTAB_SIZE,
178 2, make_page_sizes(PAGE_SHIFT_16M, PAGE_SHIFT_64K), 178 2, make_page_sizes(PAGE_SHIFT_16M, PAGE_SHIFT_64K),
179 &map.vas_id, &map.htab_size); 179 &map.vas_id, &map.htab_size);
180 180
181 if (result) { 181 if (result) {
182 DBG("%s:%d: lv1_construct_virtual_address_space failed: %s\n", 182 DBG("%s:%d: lv1_construct_virtual_address_space failed: %s\n",
183 __func__, __LINE__, ps3_result(result)); 183 __func__, __LINE__, ps3_result(result));
184 goto fail; 184 goto fail;
185 } 185 }
186 186
187 result = lv1_select_virtual_address_space(map.vas_id); 187 result = lv1_select_virtual_address_space(map.vas_id);
188 188
189 if (result) { 189 if (result) {
190 DBG("%s:%d: lv1_select_virtual_address_space failed: %s\n", 190 DBG("%s:%d: lv1_select_virtual_address_space failed: %s\n",
191 __func__, __LINE__, ps3_result(result)); 191 __func__, __LINE__, ps3_result(result));
192 goto fail; 192 goto fail;
193 } 193 }
194 194
195 *htab_size = map.htab_size; 195 *htab_size = map.htab_size;
196 196
197 debug_dump_map(&map); 197 debug_dump_map(&map);
198 198
199 return; 199 return;
200 200
201 fail: 201 fail:
202 panic("ps3_mm_vas_create failed"); 202 panic("ps3_mm_vas_create failed");
203 } 203 }
204 204
205 /** 205 /**
206 * ps3_mm_vas_destroy - 206 * ps3_mm_vas_destroy -
207 */ 207 */
208 208
209 void ps3_mm_vas_destroy(void) 209 void ps3_mm_vas_destroy(void)
210 { 210 {
211 int result; 211 int result;
212 212
213 DBG("%s:%d: map.vas_id = %llu\n", __func__, __LINE__, map.vas_id); 213 DBG("%s:%d: map.vas_id = %llu\n", __func__, __LINE__, map.vas_id);
214 214
215 if (map.vas_id) { 215 if (map.vas_id) {
216 result = lv1_select_virtual_address_space(0); 216 result = lv1_select_virtual_address_space(0);
217 BUG_ON(result); 217 BUG_ON(result);
218 result = lv1_destruct_virtual_address_space(map.vas_id); 218 result = lv1_destruct_virtual_address_space(map.vas_id);
219 BUG_ON(result); 219 BUG_ON(result);
220 map.vas_id = 0; 220 map.vas_id = 0;
221 } 221 }
222 } 222 }
223 223
224 /*============================================================================*/ 224 /*============================================================================*/
225 /* memory hotplug routines */ 225 /* memory hotplug routines */
226 /*============================================================================*/ 226 /*============================================================================*/
227 227
228 /** 228 /**
229 * ps3_mm_region_create - create a memory region in the vas 229 * ps3_mm_region_create - create a memory region in the vas
230 * @r: pointer to a struct mem_region to accept initialized values 230 * @r: pointer to a struct mem_region to accept initialized values
231 * @size: requested region size 231 * @size: requested region size
232 * 232 *
233 * This implementation creates the region with the vas large page size. 233 * This implementation creates the region with the vas large page size.
234 * @size is rounded down to a multiple of the vas large page size. 234 * @size is rounded down to a multiple of the vas large page size.
235 */ 235 */
236 236
237 static int ps3_mm_region_create(struct mem_region *r, unsigned long size) 237 static int ps3_mm_region_create(struct mem_region *r, unsigned long size)
238 { 238 {
239 int result; 239 int result;
240 u64 muid; 240 u64 muid;
241 241
242 r->size = _ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M); 242 r->size = _ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M);
243 243
244 DBG("%s:%d requested %lxh\n", __func__, __LINE__, size); 244 DBG("%s:%d requested %lxh\n", __func__, __LINE__, size);
245 DBG("%s:%d actual %llxh\n", __func__, __LINE__, r->size); 245 DBG("%s:%d actual %llxh\n", __func__, __LINE__, r->size);
246 DBG("%s:%d difference %llxh (%lluMB)\n", __func__, __LINE__, 246 DBG("%s:%d difference %llxh (%lluMB)\n", __func__, __LINE__,
247 size - r->size, (size - r->size) / 1024 / 1024); 247 size - r->size, (size - r->size) / 1024 / 1024);
248 248
249 if (r->size == 0) { 249 if (r->size == 0) {
250 DBG("%s:%d: size == 0\n", __func__, __LINE__); 250 DBG("%s:%d: size == 0\n", __func__, __LINE__);
251 result = -1; 251 result = -1;
252 goto zero_region; 252 goto zero_region;
253 } 253 }
254 254
255 result = lv1_allocate_memory(r->size, PAGE_SHIFT_16M, 0, 255 result = lv1_allocate_memory(r->size, PAGE_SHIFT_16M, 0,
256 ALLOCATE_MEMORY_TRY_ALT_UNIT, &r->base, &muid); 256 ALLOCATE_MEMORY_TRY_ALT_UNIT, &r->base, &muid);
257 257
258 if (result || r->base < map.rm.size) { 258 if (result || r->base < map.rm.size) {
259 DBG("%s:%d: lv1_allocate_memory failed: %s\n", 259 DBG("%s:%d: lv1_allocate_memory failed: %s\n",
260 __func__, __LINE__, ps3_result(result)); 260 __func__, __LINE__, ps3_result(result));
261 goto zero_region; 261 goto zero_region;
262 } 262 }
263 263
264 r->offset = r->base - map.rm.size; 264 r->offset = r->base - map.rm.size;
265 return result; 265 return result;
266 266
267 zero_region: 267 zero_region:
268 r->size = r->base = r->offset = 0; 268 r->size = r->base = r->offset = 0;
269 return result; 269 return result;
270 } 270 }
271 271
272 /** 272 /**
273 * ps3_mm_region_destroy - destroy a memory region 273 * ps3_mm_region_destroy - destroy a memory region
274 * @r: pointer to struct mem_region 274 * @r: pointer to struct mem_region
275 */ 275 */
276 276
277 static void ps3_mm_region_destroy(struct mem_region *r) 277 static void ps3_mm_region_destroy(struct mem_region *r)
278 { 278 {
279 int result; 279 int result;
280 280
281 DBG("%s:%d: r->base = %llxh\n", __func__, __LINE__, r->base); 281 DBG("%s:%d: r->base = %llxh\n", __func__, __LINE__, r->base);
282 if (r->base) { 282 if (r->base) {
283 result = lv1_release_memory(r->base); 283 result = lv1_release_memory(r->base);
284 BUG_ON(result); 284 BUG_ON(result);
285 r->size = r->base = r->offset = 0; 285 r->size = r->base = r->offset = 0;
286 map.total = map.rm.size; 286 map.total = map.rm.size;
287 } 287 }
288 } 288 }
289 289
290 /** 290 /**
291 * ps3_mm_add_memory - hot add memory 291 * ps3_mm_add_memory - hot add memory
292 */ 292 */
293 293
294 static int __init ps3_mm_add_memory(void) 294 static int __init ps3_mm_add_memory(void)
295 { 295 {
296 int result; 296 int result;
297 unsigned long start_addr; 297 unsigned long start_addr;
298 unsigned long start_pfn; 298 unsigned long start_pfn;
299 unsigned long nr_pages; 299 unsigned long nr_pages;
300 300
301 if (!firmware_has_feature(FW_FEATURE_PS3_LV1)) 301 if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
302 return -ENODEV; 302 return -ENODEV;
303 303
304 BUG_ON(!mem_init_done); 304 BUG_ON(!mem_init_done);
305 305
306 start_addr = map.rm.size; 306 start_addr = map.rm.size;
307 start_pfn = start_addr >> PAGE_SHIFT; 307 start_pfn = start_addr >> PAGE_SHIFT;
308 nr_pages = (map.r1.size + PAGE_SIZE - 1) >> PAGE_SHIFT; 308 nr_pages = (map.r1.size + PAGE_SIZE - 1) >> PAGE_SHIFT;
309 309
310 DBG("%s:%d: start_addr %lxh, start_pfn %lxh, nr_pages %lxh\n", 310 DBG("%s:%d: start_addr %lxh, start_pfn %lxh, nr_pages %lxh\n",
311 __func__, __LINE__, start_addr, start_pfn, nr_pages); 311 __func__, __LINE__, start_addr, start_pfn, nr_pages);
312 312
313 result = add_memory(0, start_addr, map.r1.size); 313 result = add_memory(0, start_addr, map.r1.size);
314 314
315 if (result) { 315 if (result) {
316 pr_err("%s:%d: add_memory failed: (%d)\n", 316 pr_err("%s:%d: add_memory failed: (%d)\n",
317 __func__, __LINE__, result); 317 __func__, __LINE__, result);
318 return result; 318 return result;
319 } 319 }
320 320
321 memblock_add(start_addr, map.r1.size); 321 memblock_add(start_addr, map.r1.size);
322 memblock_analyze(); 322 memblock_analyze();
323 323
324 result = online_pages(start_pfn, nr_pages); 324 result = online_pages(start_pfn, nr_pages);
325 325
326 if (result) 326 if (result)
327 pr_err("%s:%d: online_pages failed: (%d)\n", 327 pr_err("%s:%d: online_pages failed: (%d)\n",
328 __func__, __LINE__, result); 328 __func__, __LINE__, result);
329 329
330 return result; 330 return result;
331 } 331 }
332 332
333 device_initcall(ps3_mm_add_memory); 333 device_initcall(ps3_mm_add_memory);
334 334
335 /*============================================================================*/ 335 /*============================================================================*/
336 /* dma routines */ 336 /* dma routines */
337 /*============================================================================*/ 337 /*============================================================================*/
338 338
339 /** 339 /**
340 * dma_sb_lpar_to_bus - Translate an lpar address to ioc mapped bus address. 340 * dma_sb_lpar_to_bus - Translate an lpar address to ioc mapped bus address.
341 * @r: pointer to dma region structure 341 * @r: pointer to dma region structure
342 * @lpar_addr: HV lpar address 342 * @lpar_addr: HV lpar address
343 */ 343 */
344 344
345 static unsigned long dma_sb_lpar_to_bus(struct ps3_dma_region *r, 345 static unsigned long dma_sb_lpar_to_bus(struct ps3_dma_region *r,
346 unsigned long lpar_addr) 346 unsigned long lpar_addr)
347 { 347 {
348 if (lpar_addr >= map.rm.size) 348 if (lpar_addr >= map.rm.size)
349 lpar_addr -= map.r1.offset; 349 lpar_addr -= map.r1.offset;
350 BUG_ON(lpar_addr < r->offset); 350 BUG_ON(lpar_addr < r->offset);
351 BUG_ON(lpar_addr >= r->offset + r->len); 351 BUG_ON(lpar_addr >= r->offset + r->len);
352 return r->bus_addr + lpar_addr - r->offset; 352 return r->bus_addr + lpar_addr - r->offset;
353 } 353 }
354 354
355 #define dma_dump_region(_a) _dma_dump_region(_a, __func__, __LINE__) 355 #define dma_dump_region(_a) _dma_dump_region(_a, __func__, __LINE__)
356 static void __maybe_unused _dma_dump_region(const struct ps3_dma_region *r, 356 static void __maybe_unused _dma_dump_region(const struct ps3_dma_region *r,
357 const char *func, int line) 357 const char *func, int line)
358 { 358 {
359 DBG("%s:%d: dev %llu:%llu\n", func, line, r->dev->bus_id, 359 DBG("%s:%d: dev %llu:%llu\n", func, line, r->dev->bus_id,
360 r->dev->dev_id); 360 r->dev->dev_id);
361 DBG("%s:%d: page_size %u\n", func, line, r->page_size); 361 DBG("%s:%d: page_size %u\n", func, line, r->page_size);
362 DBG("%s:%d: bus_addr %lxh\n", func, line, r->bus_addr); 362 DBG("%s:%d: bus_addr %lxh\n", func, line, r->bus_addr);
363 DBG("%s:%d: len %lxh\n", func, line, r->len); 363 DBG("%s:%d: len %lxh\n", func, line, r->len);
364 DBG("%s:%d: offset %lxh\n", func, line, r->offset); 364 DBG("%s:%d: offset %lxh\n", func, line, r->offset);
365 } 365 }
366 366
367 /** 367 /**
368 * dma_chunk - A chunk of dma pages mapped by the io controller. 368 * dma_chunk - A chunk of dma pages mapped by the io controller.
369 * @region - The dma region that owns this chunk. 369 * @region - The dma region that owns this chunk.
370 * @lpar_addr: Starting lpar address of the area to map. 370 * @lpar_addr: Starting lpar address of the area to map.
371 * @bus_addr: Starting ioc bus address of the area to map. 371 * @bus_addr: Starting ioc bus address of the area to map.
372 * @len: Length in bytes of the area to map. 372 * @len: Length in bytes of the area to map.
373 * @link: A struct list_head used with struct ps3_dma_region.chunk_list, the 373 * @link: A struct list_head used with struct ps3_dma_region.chunk_list, the
374 * list of all chuncks owned by the region. 374 * list of all chuncks owned by the region.
375 * 375 *
376 * This implementation uses a very simple dma page manager 376 * This implementation uses a very simple dma page manager
377 * based on the dma_chunk structure. This scheme assumes 377 * based on the dma_chunk structure. This scheme assumes
378 * that all drivers use very well behaved dma ops. 378 * that all drivers use very well behaved dma ops.
379 */ 379 */
380 380
381 struct dma_chunk { 381 struct dma_chunk {
382 struct ps3_dma_region *region; 382 struct ps3_dma_region *region;
383 unsigned long lpar_addr; 383 unsigned long lpar_addr;
384 unsigned long bus_addr; 384 unsigned long bus_addr;
385 unsigned long len; 385 unsigned long len;
386 struct list_head link; 386 struct list_head link;
387 unsigned int usage_count; 387 unsigned int usage_count;
388 }; 388 };
389 389
390 #define dma_dump_chunk(_a) _dma_dump_chunk(_a, __func__, __LINE__) 390 #define dma_dump_chunk(_a) _dma_dump_chunk(_a, __func__, __LINE__)
391 static void _dma_dump_chunk (const struct dma_chunk* c, const char* func, 391 static void _dma_dump_chunk (const struct dma_chunk* c, const char* func,
392 int line) 392 int line)
393 { 393 {
394 DBG("%s:%d: r.dev %llu:%llu\n", func, line, 394 DBG("%s:%d: r.dev %llu:%llu\n", func, line,
395 c->region->dev->bus_id, c->region->dev->dev_id); 395 c->region->dev->bus_id, c->region->dev->dev_id);
396 DBG("%s:%d: r.bus_addr %lxh\n", func, line, c->region->bus_addr); 396 DBG("%s:%d: r.bus_addr %lxh\n", func, line, c->region->bus_addr);
397 DBG("%s:%d: r.page_size %u\n", func, line, c->region->page_size); 397 DBG("%s:%d: r.page_size %u\n", func, line, c->region->page_size);
398 DBG("%s:%d: r.len %lxh\n", func, line, c->region->len); 398 DBG("%s:%d: r.len %lxh\n", func, line, c->region->len);
399 DBG("%s:%d: r.offset %lxh\n", func, line, c->region->offset); 399 DBG("%s:%d: r.offset %lxh\n", func, line, c->region->offset);
400 DBG("%s:%d: c.lpar_addr %lxh\n", func, line, c->lpar_addr); 400 DBG("%s:%d: c.lpar_addr %lxh\n", func, line, c->lpar_addr);
401 DBG("%s:%d: c.bus_addr %lxh\n", func, line, c->bus_addr); 401 DBG("%s:%d: c.bus_addr %lxh\n", func, line, c->bus_addr);
402 DBG("%s:%d: c.len %lxh\n", func, line, c->len); 402 DBG("%s:%d: c.len %lxh\n", func, line, c->len);
403 } 403 }
404 404
405 static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r, 405 static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r,
406 unsigned long bus_addr, unsigned long len) 406 unsigned long bus_addr, unsigned long len)
407 { 407 {
408 struct dma_chunk *c; 408 struct dma_chunk *c;
409 unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 1 << r->page_size); 409 unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 1 << r->page_size);
410 unsigned long aligned_len = _ALIGN_UP(len+bus_addr-aligned_bus, 410 unsigned long aligned_len = _ALIGN_UP(len+bus_addr-aligned_bus,
411 1 << r->page_size); 411 1 << r->page_size);
412 412
413 list_for_each_entry(c, &r->chunk_list.head, link) { 413 list_for_each_entry(c, &r->chunk_list.head, link) {
414 /* intersection */ 414 /* intersection */
415 if (aligned_bus >= c->bus_addr && 415 if (aligned_bus >= c->bus_addr &&
416 aligned_bus + aligned_len <= c->bus_addr + c->len) 416 aligned_bus + aligned_len <= c->bus_addr + c->len)
417 return c; 417 return c;
418 418
419 /* below */ 419 /* below */
420 if (aligned_bus + aligned_len <= c->bus_addr) 420 if (aligned_bus + aligned_len <= c->bus_addr)
421 continue; 421 continue;
422 422
423 /* above */ 423 /* above */
424 if (aligned_bus >= c->bus_addr + c->len) 424 if (aligned_bus >= c->bus_addr + c->len)
425 continue; 425 continue;
426 426
427 /* we don't handle the multi-chunk case for now */ 427 /* we don't handle the multi-chunk case for now */
428 dma_dump_chunk(c); 428 dma_dump_chunk(c);
429 BUG(); 429 BUG();
430 } 430 }
431 return NULL; 431 return NULL;
432 } 432 }
433 433
434 static struct dma_chunk *dma_find_chunk_lpar(struct ps3_dma_region *r, 434 static struct dma_chunk *dma_find_chunk_lpar(struct ps3_dma_region *r,
435 unsigned long lpar_addr, unsigned long len) 435 unsigned long lpar_addr, unsigned long len)
436 { 436 {
437 struct dma_chunk *c; 437 struct dma_chunk *c;
438 unsigned long aligned_lpar = _ALIGN_DOWN(lpar_addr, 1 << r->page_size); 438 unsigned long aligned_lpar = _ALIGN_DOWN(lpar_addr, 1 << r->page_size);
439 unsigned long aligned_len = _ALIGN_UP(len + lpar_addr - aligned_lpar, 439 unsigned long aligned_len = _ALIGN_UP(len + lpar_addr - aligned_lpar,
440 1 << r->page_size); 440 1 << r->page_size);
441 441
442 list_for_each_entry(c, &r->chunk_list.head, link) { 442 list_for_each_entry(c, &r->chunk_list.head, link) {
443 /* intersection */ 443 /* intersection */
444 if (c->lpar_addr <= aligned_lpar && 444 if (c->lpar_addr <= aligned_lpar &&
445 aligned_lpar < c->lpar_addr + c->len) { 445 aligned_lpar < c->lpar_addr + c->len) {
446 if (aligned_lpar + aligned_len <= c->lpar_addr + c->len) 446 if (aligned_lpar + aligned_len <= c->lpar_addr + c->len)
447 return c; 447 return c;
448 else { 448 else {
449 dma_dump_chunk(c); 449 dma_dump_chunk(c);
450 BUG(); 450 BUG();
451 } 451 }
452 } 452 }
453 /* below */ 453 /* below */
454 if (aligned_lpar + aligned_len <= c->lpar_addr) { 454 if (aligned_lpar + aligned_len <= c->lpar_addr) {
455 continue; 455 continue;
456 } 456 }
457 /* above */ 457 /* above */
458 if (c->lpar_addr + c->len <= aligned_lpar) { 458 if (c->lpar_addr + c->len <= aligned_lpar) {
459 continue; 459 continue;
460 } 460 }
461 } 461 }
462 return NULL; 462 return NULL;
463 } 463 }
464 464
465 static int dma_sb_free_chunk(struct dma_chunk *c) 465 static int dma_sb_free_chunk(struct dma_chunk *c)
466 { 466 {
467 int result = 0; 467 int result = 0;
468 468
469 if (c->bus_addr) { 469 if (c->bus_addr) {
470 result = lv1_unmap_device_dma_region(c->region->dev->bus_id, 470 result = lv1_unmap_device_dma_region(c->region->dev->bus_id,
471 c->region->dev->dev_id, c->bus_addr, c->len); 471 c->region->dev->dev_id, c->bus_addr, c->len);
472 BUG_ON(result); 472 BUG_ON(result);
473 } 473 }
474 474
475 kfree(c); 475 kfree(c);
476 return result; 476 return result;
477 } 477 }
478 478
479 static int dma_ioc0_free_chunk(struct dma_chunk *c) 479 static int dma_ioc0_free_chunk(struct dma_chunk *c)
480 { 480 {
481 int result = 0; 481 int result = 0;
482 int iopage; 482 int iopage;
483 unsigned long offset; 483 unsigned long offset;
484 struct ps3_dma_region *r = c->region; 484 struct ps3_dma_region *r = c->region;
485 485
486 DBG("%s:start\n", __func__); 486 DBG("%s:start\n", __func__);
487 for (iopage = 0; iopage < (c->len >> r->page_size); iopage++) { 487 for (iopage = 0; iopage < (c->len >> r->page_size); iopage++) {
488 offset = (1 << r->page_size) * iopage; 488 offset = (1 << r->page_size) * iopage;
489 /* put INVALID entry */ 489 /* put INVALID entry */
490 result = lv1_put_iopte(0, 490 result = lv1_put_iopte(0,
491 c->bus_addr + offset, 491 c->bus_addr + offset,
492 c->lpar_addr + offset, 492 c->lpar_addr + offset,
493 r->ioid, 493 r->ioid,
494 0); 494 0);
495 DBG("%s: bus=%#lx, lpar=%#lx, ioid=%d\n", __func__, 495 DBG("%s: bus=%#lx, lpar=%#lx, ioid=%d\n", __func__,
496 c->bus_addr + offset, 496 c->bus_addr + offset,
497 c->lpar_addr + offset, 497 c->lpar_addr + offset,
498 r->ioid); 498 r->ioid);
499 499
500 if (result) { 500 if (result) {
501 DBG("%s:%d: lv1_put_iopte failed: %s\n", __func__, 501 DBG("%s:%d: lv1_put_iopte failed: %s\n", __func__,
502 __LINE__, ps3_result(result)); 502 __LINE__, ps3_result(result));
503 } 503 }
504 } 504 }
505 kfree(c); 505 kfree(c);
506 DBG("%s:end\n", __func__); 506 DBG("%s:end\n", __func__);
507 return result; 507 return result;
508 } 508 }
509 509
510 /** 510 /**
511 * dma_sb_map_pages - Maps dma pages into the io controller bus address space. 511 * dma_sb_map_pages - Maps dma pages into the io controller bus address space.
512 * @r: Pointer to a struct ps3_dma_region. 512 * @r: Pointer to a struct ps3_dma_region.
513 * @phys_addr: Starting physical address of the area to map. 513 * @phys_addr: Starting physical address of the area to map.
514 * @len: Length in bytes of the area to map. 514 * @len: Length in bytes of the area to map.
515 * c_out: A pointer to receive an allocated struct dma_chunk for this area. 515 * c_out: A pointer to receive an allocated struct dma_chunk for this area.
516 * 516 *
517 * This is the lowest level dma mapping routine, and is the one that will 517 * This is the lowest level dma mapping routine, and is the one that will
518 * make the HV call to add the pages into the io controller address space. 518 * make the HV call to add the pages into the io controller address space.
519 */ 519 */
520 520
521 static int dma_sb_map_pages(struct ps3_dma_region *r, unsigned long phys_addr, 521 static int dma_sb_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
522 unsigned long len, struct dma_chunk **c_out, u64 iopte_flag) 522 unsigned long len, struct dma_chunk **c_out, u64 iopte_flag)
523 { 523 {
524 int result; 524 int result;
525 struct dma_chunk *c; 525 struct dma_chunk *c;
526 526
527 c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC); 527 c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC);
528 528
529 if (!c) { 529 if (!c) {
530 result = -ENOMEM; 530 result = -ENOMEM;
531 goto fail_alloc; 531 goto fail_alloc;
532 } 532 }
533 533
534 c->region = r; 534 c->region = r;
535 c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr); 535 c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
536 c->bus_addr = dma_sb_lpar_to_bus(r, c->lpar_addr); 536 c->bus_addr = dma_sb_lpar_to_bus(r, c->lpar_addr);
537 c->len = len; 537 c->len = len;
538 538
539 BUG_ON(iopte_flag != 0xf800000000000000UL); 539 BUG_ON(iopte_flag != 0xf800000000000000UL);
540 result = lv1_map_device_dma_region(c->region->dev->bus_id, 540 result = lv1_map_device_dma_region(c->region->dev->bus_id,
541 c->region->dev->dev_id, c->lpar_addr, 541 c->region->dev->dev_id, c->lpar_addr,
542 c->bus_addr, c->len, iopte_flag); 542 c->bus_addr, c->len, iopte_flag);
543 if (result) { 543 if (result) {
544 DBG("%s:%d: lv1_map_device_dma_region failed: %s\n", 544 DBG("%s:%d: lv1_map_device_dma_region failed: %s\n",
545 __func__, __LINE__, ps3_result(result)); 545 __func__, __LINE__, ps3_result(result));
546 goto fail_map; 546 goto fail_map;
547 } 547 }
548 548
549 list_add(&c->link, &r->chunk_list.head); 549 list_add(&c->link, &r->chunk_list.head);
550 550
551 *c_out = c; 551 *c_out = c;
552 return 0; 552 return 0;
553 553
554 fail_map: 554 fail_map:
555 kfree(c); 555 kfree(c);
556 fail_alloc: 556 fail_alloc:
557 *c_out = NULL; 557 *c_out = NULL;
558 DBG(" <- %s:%d\n", __func__, __LINE__); 558 DBG(" <- %s:%d\n", __func__, __LINE__);
559 return result; 559 return result;
560 } 560 }
561 561
562 static int dma_ioc0_map_pages(struct ps3_dma_region *r, unsigned long phys_addr, 562 static int dma_ioc0_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
563 unsigned long len, struct dma_chunk **c_out, 563 unsigned long len, struct dma_chunk **c_out,
564 u64 iopte_flag) 564 u64 iopte_flag)
565 { 565 {
566 int result; 566 int result;
567 struct dma_chunk *c, *last; 567 struct dma_chunk *c, *last;
568 int iopage, pages; 568 int iopage, pages;
569 unsigned long offset; 569 unsigned long offset;
570 570
571 DBG(KERN_ERR "%s: phy=%#lx, lpar%#lx, len=%#lx\n", __func__, 571 DBG(KERN_ERR "%s: phy=%#lx, lpar%#lx, len=%#lx\n", __func__,
572 phys_addr, ps3_mm_phys_to_lpar(phys_addr), len); 572 phys_addr, ps3_mm_phys_to_lpar(phys_addr), len);
573 c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC); 573 c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC);
574 574
575 if (!c) { 575 if (!c) {
576 result = -ENOMEM; 576 result = -ENOMEM;
577 goto fail_alloc; 577 goto fail_alloc;
578 } 578 }
579 579
580 c->region = r; 580 c->region = r;
581 c->len = len; 581 c->len = len;
582 c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr); 582 c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
583 /* allocate IO address */ 583 /* allocate IO address */
584 if (list_empty(&r->chunk_list.head)) { 584 if (list_empty(&r->chunk_list.head)) {
585 /* first one */ 585 /* first one */
586 c->bus_addr = r->bus_addr; 586 c->bus_addr = r->bus_addr;
587 } else { 587 } else {
588 /* derive from last bus addr*/ 588 /* derive from last bus addr*/
589 last = list_entry(r->chunk_list.head.next, 589 last = list_entry(r->chunk_list.head.next,
590 struct dma_chunk, link); 590 struct dma_chunk, link);
591 c->bus_addr = last->bus_addr + last->len; 591 c->bus_addr = last->bus_addr + last->len;
592 DBG("%s: last bus=%#lx, len=%#lx\n", __func__, 592 DBG("%s: last bus=%#lx, len=%#lx\n", __func__,
593 last->bus_addr, last->len); 593 last->bus_addr, last->len);
594 } 594 }
595 595
596 /* FIXME: check whether length exceeds region size */ 596 /* FIXME: check whether length exceeds region size */
597 597
598 /* build ioptes for the area */ 598 /* build ioptes for the area */
599 pages = len >> r->page_size; 599 pages = len >> r->page_size;
600 DBG("%s: pgsize=%#x len=%#lx pages=%#x iopteflag=%#llx\n", __func__, 600 DBG("%s: pgsize=%#x len=%#lx pages=%#x iopteflag=%#llx\n", __func__,
601 r->page_size, r->len, pages, iopte_flag); 601 r->page_size, r->len, pages, iopte_flag);
602 for (iopage = 0; iopage < pages; iopage++) { 602 for (iopage = 0; iopage < pages; iopage++) {
603 offset = (1 << r->page_size) * iopage; 603 offset = (1 << r->page_size) * iopage;
604 result = lv1_put_iopte(0, 604 result = lv1_put_iopte(0,
605 c->bus_addr + offset, 605 c->bus_addr + offset,
606 c->lpar_addr + offset, 606 c->lpar_addr + offset,
607 r->ioid, 607 r->ioid,
608 iopte_flag); 608 iopte_flag);
609 if (result) { 609 if (result) {
610 pr_warning("%s:%d: lv1_put_iopte failed: %s\n", 610 pr_warning("%s:%d: lv1_put_iopte failed: %s\n",
611 __func__, __LINE__, ps3_result(result)); 611 __func__, __LINE__, ps3_result(result));
612 goto fail_map; 612 goto fail_map;
613 } 613 }
614 DBG("%s: pg=%d bus=%#lx, lpar=%#lx, ioid=%#x\n", __func__, 614 DBG("%s: pg=%d bus=%#lx, lpar=%#lx, ioid=%#x\n", __func__,
615 iopage, c->bus_addr + offset, c->lpar_addr + offset, 615 iopage, c->bus_addr + offset, c->lpar_addr + offset,
616 r->ioid); 616 r->ioid);
617 } 617 }
618 618
619 /* be sure that last allocated one is inserted at head */ 619 /* be sure that last allocated one is inserted at head */
620 list_add(&c->link, &r->chunk_list.head); 620 list_add(&c->link, &r->chunk_list.head);
621 621
622 *c_out = c; 622 *c_out = c;
623 DBG("%s: end\n", __func__); 623 DBG("%s: end\n", __func__);
624 return 0; 624 return 0;
625 625
626 fail_map: 626 fail_map:
627 for (iopage--; 0 <= iopage; iopage--) { 627 for (iopage--; 0 <= iopage; iopage--) {
628 lv1_put_iopte(0, 628 lv1_put_iopte(0,
629 c->bus_addr + offset, 629 c->bus_addr + offset,
630 c->lpar_addr + offset, 630 c->lpar_addr + offset,
631 r->ioid, 631 r->ioid,
632 0); 632 0);
633 } 633 }
634 kfree(c); 634 kfree(c);
635 fail_alloc: 635 fail_alloc:
636 *c_out = NULL; 636 *c_out = NULL;
637 return result; 637 return result;
638 } 638 }
639 639
640 /** 640 /**
641 * dma_sb_region_create - Create a device dma region. 641 * dma_sb_region_create - Create a device dma region.
642 * @r: Pointer to a struct ps3_dma_region. 642 * @r: Pointer to a struct ps3_dma_region.
643 * 643 *
644 * This is the lowest level dma region create routine, and is the one that 644 * This is the lowest level dma region create routine, and is the one that
645 * will make the HV call to create the region. 645 * will make the HV call to create the region.
646 */ 646 */
647 647
648 static int dma_sb_region_create(struct ps3_dma_region *r) 648 static int dma_sb_region_create(struct ps3_dma_region *r)
649 { 649 {
650 int result; 650 int result;
651 u64 bus_addr; 651 u64 bus_addr;
652 652
653 DBG(" -> %s:%d:\n", __func__, __LINE__); 653 DBG(" -> %s:%d:\n", __func__, __LINE__);
654 654
655 BUG_ON(!r); 655 BUG_ON(!r);
656 656
657 if (!r->dev->bus_id) { 657 if (!r->dev->bus_id) {
658 pr_info("%s:%d: %llu:%llu no dma\n", __func__, __LINE__, 658 pr_info("%s:%d: %llu:%llu no dma\n", __func__, __LINE__,
659 r->dev->bus_id, r->dev->dev_id); 659 r->dev->bus_id, r->dev->dev_id);
660 return 0; 660 return 0;
661 } 661 }
662 662
663 DBG("%s:%u: len = 0x%lx, page_size = %u, offset = 0x%lx\n", __func__, 663 DBG("%s:%u: len = 0x%lx, page_size = %u, offset = 0x%lx\n", __func__,
664 __LINE__, r->len, r->page_size, r->offset); 664 __LINE__, r->len, r->page_size, r->offset);
665 665
666 BUG_ON(!r->len); 666 BUG_ON(!r->len);
667 BUG_ON(!r->page_size); 667 BUG_ON(!r->page_size);
668 BUG_ON(!r->region_ops); 668 BUG_ON(!r->region_ops);
669 669
670 INIT_LIST_HEAD(&r->chunk_list.head); 670 INIT_LIST_HEAD(&r->chunk_list.head);
671 spin_lock_init(&r->chunk_list.lock); 671 spin_lock_init(&r->chunk_list.lock);
672 672
673 result = lv1_allocate_device_dma_region(r->dev->bus_id, r->dev->dev_id, 673 result = lv1_allocate_device_dma_region(r->dev->bus_id, r->dev->dev_id,
674 roundup_pow_of_two(r->len), r->page_size, r->region_type, 674 roundup_pow_of_two(r->len), r->page_size, r->region_type,
675 &bus_addr); 675 &bus_addr);
676 r->bus_addr = bus_addr; 676 r->bus_addr = bus_addr;
677 677
678 if (result) { 678 if (result) {
679 DBG("%s:%d: lv1_allocate_device_dma_region failed: %s\n", 679 DBG("%s:%d: lv1_allocate_device_dma_region failed: %s\n",
680 __func__, __LINE__, ps3_result(result)); 680 __func__, __LINE__, ps3_result(result));
681 r->len = r->bus_addr = 0; 681 r->len = r->bus_addr = 0;
682 } 682 }
683 683
684 return result; 684 return result;
685 } 685 }
686 686
687 static int dma_ioc0_region_create(struct ps3_dma_region *r) 687 static int dma_ioc0_region_create(struct ps3_dma_region *r)
688 { 688 {
689 int result; 689 int result;
690 u64 bus_addr; 690 u64 bus_addr;
691 691
692 INIT_LIST_HEAD(&r->chunk_list.head); 692 INIT_LIST_HEAD(&r->chunk_list.head);
693 spin_lock_init(&r->chunk_list.lock); 693 spin_lock_init(&r->chunk_list.lock);
694 694
695 result = lv1_allocate_io_segment(0, 695 result = lv1_allocate_io_segment(0,
696 r->len, 696 r->len,
697 r->page_size, 697 r->page_size,
698 &bus_addr); 698 &bus_addr);
699 r->bus_addr = bus_addr; 699 r->bus_addr = bus_addr;
700 if (result) { 700 if (result) {
701 DBG("%s:%d: lv1_allocate_io_segment failed: %s\n", 701 DBG("%s:%d: lv1_allocate_io_segment failed: %s\n",
702 __func__, __LINE__, ps3_result(result)); 702 __func__, __LINE__, ps3_result(result));
703 r->len = r->bus_addr = 0; 703 r->len = r->bus_addr = 0;
704 } 704 }
705 DBG("%s: len=%#lx, pg=%d, bus=%#lx\n", __func__, 705 DBG("%s: len=%#lx, pg=%d, bus=%#lx\n", __func__,
706 r->len, r->page_size, r->bus_addr); 706 r->len, r->page_size, r->bus_addr);
707 return result; 707 return result;
708 } 708 }
709 709
710 /** 710 /**
711 * dma_region_free - Free a device dma region. 711 * dma_region_free - Free a device dma region.
712 * @r: Pointer to a struct ps3_dma_region. 712 * @r: Pointer to a struct ps3_dma_region.
713 * 713 *
714 * This is the lowest level dma region free routine, and is the one that 714 * This is the lowest level dma region free routine, and is the one that
715 * will make the HV call to free the region. 715 * will make the HV call to free the region.
716 */ 716 */
717 717
718 static int dma_sb_region_free(struct ps3_dma_region *r) 718 static int dma_sb_region_free(struct ps3_dma_region *r)
719 { 719 {
720 int result; 720 int result;
721 struct dma_chunk *c; 721 struct dma_chunk *c;
722 struct dma_chunk *tmp; 722 struct dma_chunk *tmp;
723 723
724 BUG_ON(!r); 724 BUG_ON(!r);
725 725
726 if (!r->dev->bus_id) { 726 if (!r->dev->bus_id) {
727 pr_info("%s:%d: %llu:%llu no dma\n", __func__, __LINE__, 727 pr_info("%s:%d: %llu:%llu no dma\n", __func__, __LINE__,
728 r->dev->bus_id, r->dev->dev_id); 728 r->dev->bus_id, r->dev->dev_id);
729 return 0; 729 return 0;
730 } 730 }
731 731
732 list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) { 732 list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) {
733 list_del(&c->link); 733 list_del(&c->link);
734 dma_sb_free_chunk(c); 734 dma_sb_free_chunk(c);
735 } 735 }
736 736
737 result = lv1_free_device_dma_region(r->dev->bus_id, r->dev->dev_id, 737 result = lv1_free_device_dma_region(r->dev->bus_id, r->dev->dev_id,
738 r->bus_addr); 738 r->bus_addr);
739 739
740 if (result) 740 if (result)
741 DBG("%s:%d: lv1_free_device_dma_region failed: %s\n", 741 DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
742 __func__, __LINE__, ps3_result(result)); 742 __func__, __LINE__, ps3_result(result));
743 743
744 r->bus_addr = 0; 744 r->bus_addr = 0;
745 745
746 return result; 746 return result;
747 } 747 }
748 748
749 static int dma_ioc0_region_free(struct ps3_dma_region *r) 749 static int dma_ioc0_region_free(struct ps3_dma_region *r)
750 { 750 {
751 int result; 751 int result;
752 struct dma_chunk *c, *n; 752 struct dma_chunk *c, *n;
753 753
754 DBG("%s: start\n", __func__); 754 DBG("%s: start\n", __func__);
755 list_for_each_entry_safe(c, n, &r->chunk_list.head, link) { 755 list_for_each_entry_safe(c, n, &r->chunk_list.head, link) {
756 list_del(&c->link); 756 list_del(&c->link);
757 dma_ioc0_free_chunk(c); 757 dma_ioc0_free_chunk(c);
758 } 758 }
759 759
760 result = lv1_release_io_segment(0, r->bus_addr); 760 result = lv1_release_io_segment(0, r->bus_addr);
761 761
762 if (result) 762 if (result)
763 DBG("%s:%d: lv1_free_device_dma_region failed: %s\n", 763 DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
764 __func__, __LINE__, ps3_result(result)); 764 __func__, __LINE__, ps3_result(result));
765 765
766 r->bus_addr = 0; 766 r->bus_addr = 0;
767 DBG("%s: end\n", __func__); 767 DBG("%s: end\n", __func__);
768 768
769 return result; 769 return result;
770 } 770 }
771 771
772 /** 772 /**
773 * dma_sb_map_area - Map an area of memory into a device dma region. 773 * dma_sb_map_area - Map an area of memory into a device dma region.
774 * @r: Pointer to a struct ps3_dma_region. 774 * @r: Pointer to a struct ps3_dma_region.
775 * @virt_addr: Starting virtual address of the area to map. 775 * @virt_addr: Starting virtual address of the area to map.
776 * @len: Length in bytes of the area to map. 776 * @len: Length in bytes of the area to map.
777 * @bus_addr: A pointer to return the starting ioc bus address of the area to 777 * @bus_addr: A pointer to return the starting ioc bus address of the area to
778 * map. 778 * map.
779 * 779 *
780 * This is the common dma mapping routine. 780 * This is the common dma mapping routine.
781 */ 781 */
782 782
783 static int dma_sb_map_area(struct ps3_dma_region *r, unsigned long virt_addr, 783 static int dma_sb_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
784 unsigned long len, dma_addr_t *bus_addr, 784 unsigned long len, dma_addr_t *bus_addr,
785 u64 iopte_flag) 785 u64 iopte_flag)
786 { 786 {
787 int result; 787 int result;
788 unsigned long flags; 788 unsigned long flags;
789 struct dma_chunk *c; 789 struct dma_chunk *c;
790 unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr) 790 unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
791 : virt_addr; 791 : virt_addr;
792 unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size); 792 unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size);
793 unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys, 793 unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys,
794 1 << r->page_size); 794 1 << r->page_size);
795 *bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr)); 795 *bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
796 796
797 if (!USE_DYNAMIC_DMA) { 797 if (!USE_DYNAMIC_DMA) {
798 unsigned long lpar_addr = ps3_mm_phys_to_lpar(phys_addr); 798 unsigned long lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
799 DBG(" -> %s:%d\n", __func__, __LINE__); 799 DBG(" -> %s:%d\n", __func__, __LINE__);
800 DBG("%s:%d virt_addr %lxh\n", __func__, __LINE__, 800 DBG("%s:%d virt_addr %lxh\n", __func__, __LINE__,
801 virt_addr); 801 virt_addr);
802 DBG("%s:%d phys_addr %lxh\n", __func__, __LINE__, 802 DBG("%s:%d phys_addr %lxh\n", __func__, __LINE__,
803 phys_addr); 803 phys_addr);
804 DBG("%s:%d lpar_addr %lxh\n", __func__, __LINE__, 804 DBG("%s:%d lpar_addr %lxh\n", __func__, __LINE__,
805 lpar_addr); 805 lpar_addr);
806 DBG("%s:%d len %lxh\n", __func__, __LINE__, len); 806 DBG("%s:%d len %lxh\n", __func__, __LINE__, len);
807 DBG("%s:%d bus_addr %llxh (%lxh)\n", __func__, __LINE__, 807 DBG("%s:%d bus_addr %llxh (%lxh)\n", __func__, __LINE__,
808 *bus_addr, len); 808 *bus_addr, len);
809 } 809 }
810 810
811 spin_lock_irqsave(&r->chunk_list.lock, flags); 811 spin_lock_irqsave(&r->chunk_list.lock, flags);
812 c = dma_find_chunk(r, *bus_addr, len); 812 c = dma_find_chunk(r, *bus_addr, len);
813 813
814 if (c) { 814 if (c) {
815 DBG("%s:%d: reusing mapped chunk", __func__, __LINE__); 815 DBG("%s:%d: reusing mapped chunk", __func__, __LINE__);
816 dma_dump_chunk(c); 816 dma_dump_chunk(c);
817 c->usage_count++; 817 c->usage_count++;
818 spin_unlock_irqrestore(&r->chunk_list.lock, flags); 818 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
819 return 0; 819 return 0;
820 } 820 }
821 821
822 result = dma_sb_map_pages(r, aligned_phys, aligned_len, &c, iopte_flag); 822 result = dma_sb_map_pages(r, aligned_phys, aligned_len, &c, iopte_flag);
823 823
824 if (result) { 824 if (result) {
825 *bus_addr = 0; 825 *bus_addr = 0;
826 DBG("%s:%d: dma_sb_map_pages failed (%d)\n", 826 DBG("%s:%d: dma_sb_map_pages failed (%d)\n",
827 __func__, __LINE__, result); 827 __func__, __LINE__, result);
828 spin_unlock_irqrestore(&r->chunk_list.lock, flags); 828 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
829 return result; 829 return result;
830 } 830 }
831 831
832 c->usage_count = 1; 832 c->usage_count = 1;
833 833
834 spin_unlock_irqrestore(&r->chunk_list.lock, flags); 834 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
835 return result; 835 return result;
836 } 836 }
837 837
838 static int dma_ioc0_map_area(struct ps3_dma_region *r, unsigned long virt_addr, 838 static int dma_ioc0_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
839 unsigned long len, dma_addr_t *bus_addr, 839 unsigned long len, dma_addr_t *bus_addr,
840 u64 iopte_flag) 840 u64 iopte_flag)
841 { 841 {
842 int result; 842 int result;
843 unsigned long flags; 843 unsigned long flags;
844 struct dma_chunk *c; 844 struct dma_chunk *c;
845 unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr) 845 unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
846 : virt_addr; 846 : virt_addr;
847 unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size); 847 unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size);
848 unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys, 848 unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys,
849 1 << r->page_size); 849 1 << r->page_size);
850 850
851 DBG(KERN_ERR "%s: vaddr=%#lx, len=%#lx\n", __func__, 851 DBG(KERN_ERR "%s: vaddr=%#lx, len=%#lx\n", __func__,
852 virt_addr, len); 852 virt_addr, len);
853 DBG(KERN_ERR "%s: ph=%#lx a_ph=%#lx a_l=%#lx\n", __func__, 853 DBG(KERN_ERR "%s: ph=%#lx a_ph=%#lx a_l=%#lx\n", __func__,
854 phys_addr, aligned_phys, aligned_len); 854 phys_addr, aligned_phys, aligned_len);
855 855
856 spin_lock_irqsave(&r->chunk_list.lock, flags); 856 spin_lock_irqsave(&r->chunk_list.lock, flags);
857 c = dma_find_chunk_lpar(r, ps3_mm_phys_to_lpar(phys_addr), len); 857 c = dma_find_chunk_lpar(r, ps3_mm_phys_to_lpar(phys_addr), len);
858 858
859 if (c) { 859 if (c) {
860 /* FIXME */ 860 /* FIXME */
861 BUG(); 861 BUG();
862 *bus_addr = c->bus_addr + phys_addr - aligned_phys; 862 *bus_addr = c->bus_addr + phys_addr - aligned_phys;
863 c->usage_count++; 863 c->usage_count++;
864 spin_unlock_irqrestore(&r->chunk_list.lock, flags); 864 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
865 return 0; 865 return 0;
866 } 866 }
867 867
868 result = dma_ioc0_map_pages(r, aligned_phys, aligned_len, &c, 868 result = dma_ioc0_map_pages(r, aligned_phys, aligned_len, &c,
869 iopte_flag); 869 iopte_flag);
870 870
871 if (result) { 871 if (result) {
872 *bus_addr = 0; 872 *bus_addr = 0;
873 DBG("%s:%d: dma_ioc0_map_pages failed (%d)\n", 873 DBG("%s:%d: dma_ioc0_map_pages failed (%d)\n",
874 __func__, __LINE__, result); 874 __func__, __LINE__, result);
875 spin_unlock_irqrestore(&r->chunk_list.lock, flags); 875 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
876 return result; 876 return result;
877 } 877 }
878 *bus_addr = c->bus_addr + phys_addr - aligned_phys; 878 *bus_addr = c->bus_addr + phys_addr - aligned_phys;
879 DBG("%s: va=%#lx pa=%#lx a_pa=%#lx bus=%#llx\n", __func__, 879 DBG("%s: va=%#lx pa=%#lx a_pa=%#lx bus=%#llx\n", __func__,
880 virt_addr, phys_addr, aligned_phys, *bus_addr); 880 virt_addr, phys_addr, aligned_phys, *bus_addr);
881 c->usage_count = 1; 881 c->usage_count = 1;
882 882
883 spin_unlock_irqrestore(&r->chunk_list.lock, flags); 883 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
884 return result; 884 return result;
885 } 885 }
886 886
887 /** 887 /**
888 * dma_sb_unmap_area - Unmap an area of memory from a device dma region. 888 * dma_sb_unmap_area - Unmap an area of memory from a device dma region.
889 * @r: Pointer to a struct ps3_dma_region. 889 * @r: Pointer to a struct ps3_dma_region.
890 * @bus_addr: The starting ioc bus address of the area to unmap. 890 * @bus_addr: The starting ioc bus address of the area to unmap.
891 * @len: Length in bytes of the area to unmap. 891 * @len: Length in bytes of the area to unmap.
892 * 892 *
893 * This is the common dma unmap routine. 893 * This is the common dma unmap routine.
894 */ 894 */
895 895
896 static int dma_sb_unmap_area(struct ps3_dma_region *r, dma_addr_t bus_addr, 896 static int dma_sb_unmap_area(struct ps3_dma_region *r, dma_addr_t bus_addr,
897 unsigned long len) 897 unsigned long len)
898 { 898 {
899 unsigned long flags; 899 unsigned long flags;
900 struct dma_chunk *c; 900 struct dma_chunk *c;
901 901
902 spin_lock_irqsave(&r->chunk_list.lock, flags); 902 spin_lock_irqsave(&r->chunk_list.lock, flags);
903 c = dma_find_chunk(r, bus_addr, len); 903 c = dma_find_chunk(r, bus_addr, len);
904 904
905 if (!c) { 905 if (!c) {
906 unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 906 unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
907 1 << r->page_size); 907 1 << r->page_size);
908 unsigned long aligned_len = _ALIGN_UP(len + bus_addr 908 unsigned long aligned_len = _ALIGN_UP(len + bus_addr
909 - aligned_bus, 1 << r->page_size); 909 - aligned_bus, 1 << r->page_size);
910 DBG("%s:%d: not found: bus_addr %llxh\n", 910 DBG("%s:%d: not found: bus_addr %llxh\n",
911 __func__, __LINE__, bus_addr); 911 __func__, __LINE__, bus_addr);
912 DBG("%s:%d: not found: len %lxh\n", 912 DBG("%s:%d: not found: len %lxh\n",
913 __func__, __LINE__, len); 913 __func__, __LINE__, len);
914 DBG("%s:%d: not found: aligned_bus %lxh\n", 914 DBG("%s:%d: not found: aligned_bus %lxh\n",
915 __func__, __LINE__, aligned_bus); 915 __func__, __LINE__, aligned_bus);
916 DBG("%s:%d: not found: aligned_len %lxh\n", 916 DBG("%s:%d: not found: aligned_len %lxh\n",
917 __func__, __LINE__, aligned_len); 917 __func__, __LINE__, aligned_len);
918 BUG(); 918 BUG();
919 } 919 }
920 920
921 c->usage_count--; 921 c->usage_count--;
922 922
923 if (!c->usage_count) { 923 if (!c->usage_count) {
924 list_del(&c->link); 924 list_del(&c->link);
925 dma_sb_free_chunk(c); 925 dma_sb_free_chunk(c);
926 } 926 }
927 927
928 spin_unlock_irqrestore(&r->chunk_list.lock, flags); 928 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
929 return 0; 929 return 0;
930 } 930 }
931 931
932 static int dma_ioc0_unmap_area(struct ps3_dma_region *r, 932 static int dma_ioc0_unmap_area(struct ps3_dma_region *r,
933 dma_addr_t bus_addr, unsigned long len) 933 dma_addr_t bus_addr, unsigned long len)
934 { 934 {
935 unsigned long flags; 935 unsigned long flags;
936 struct dma_chunk *c; 936 struct dma_chunk *c;
937 937
938 DBG("%s: start a=%#llx l=%#lx\n", __func__, bus_addr, len); 938 DBG("%s: start a=%#llx l=%#lx\n", __func__, bus_addr, len);
939 spin_lock_irqsave(&r->chunk_list.lock, flags); 939 spin_lock_irqsave(&r->chunk_list.lock, flags);
940 c = dma_find_chunk(r, bus_addr, len); 940 c = dma_find_chunk(r, bus_addr, len);
941 941
942 if (!c) { 942 if (!c) {
943 unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 943 unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
944 1 << r->page_size); 944 1 << r->page_size);
945 unsigned long aligned_len = _ALIGN_UP(len + bus_addr 945 unsigned long aligned_len = _ALIGN_UP(len + bus_addr
946 - aligned_bus, 946 - aligned_bus,
947 1 << r->page_size); 947 1 << r->page_size);
948 DBG("%s:%d: not found: bus_addr %llxh\n", 948 DBG("%s:%d: not found: bus_addr %llxh\n",
949 __func__, __LINE__, bus_addr); 949 __func__, __LINE__, bus_addr);
950 DBG("%s:%d: not found: len %lxh\n", 950 DBG("%s:%d: not found: len %lxh\n",
951 __func__, __LINE__, len); 951 __func__, __LINE__, len);
952 DBG("%s:%d: not found: aligned_bus %lxh\n", 952 DBG("%s:%d: not found: aligned_bus %lxh\n",
953 __func__, __LINE__, aligned_bus); 953 __func__, __LINE__, aligned_bus);
954 DBG("%s:%d: not found: aligned_len %lxh\n", 954 DBG("%s:%d: not found: aligned_len %lxh\n",
955 __func__, __LINE__, aligned_len); 955 __func__, __LINE__, aligned_len);
956 BUG(); 956 BUG();
957 } 957 }
958 958
959 c->usage_count--; 959 c->usage_count--;
960 960
961 if (!c->usage_count) { 961 if (!c->usage_count) {
962 list_del(&c->link); 962 list_del(&c->link);
963 dma_ioc0_free_chunk(c); 963 dma_ioc0_free_chunk(c);
964 } 964 }
965 965
966 spin_unlock_irqrestore(&r->chunk_list.lock, flags); 966 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
967 DBG("%s: end\n", __func__); 967 DBG("%s: end\n", __func__);
968 return 0; 968 return 0;
969 } 969 }
970 970
971 /** 971 /**
972 * dma_sb_region_create_linear - Setup a linear dma mapping for a device. 972 * dma_sb_region_create_linear - Setup a linear dma mapping for a device.
973 * @r: Pointer to a struct ps3_dma_region. 973 * @r: Pointer to a struct ps3_dma_region.
974 * 974 *
975 * This routine creates an HV dma region for the device and maps all available 975 * This routine creates an HV dma region for the device and maps all available
976 * ram into the io controller bus address space. 976 * ram into the io controller bus address space.
977 */ 977 */
978 978
979 static int dma_sb_region_create_linear(struct ps3_dma_region *r) 979 static int dma_sb_region_create_linear(struct ps3_dma_region *r)
980 { 980 {
981 int result; 981 int result;
982 unsigned long virt_addr, len; 982 unsigned long virt_addr, len;
983 dma_addr_t tmp; 983 dma_addr_t tmp;
984 984
985 if (r->len > 16*1024*1024) { /* FIXME: need proper fix */ 985 if (r->len > 16*1024*1024) { /* FIXME: need proper fix */
986 /* force 16M dma pages for linear mapping */ 986 /* force 16M dma pages for linear mapping */
987 if (r->page_size != PS3_DMA_16M) { 987 if (r->page_size != PS3_DMA_16M) {
988 pr_info("%s:%d: forcing 16M pages for linear map\n", 988 pr_info("%s:%d: forcing 16M pages for linear map\n",
989 __func__, __LINE__); 989 __func__, __LINE__);
990 r->page_size = PS3_DMA_16M; 990 r->page_size = PS3_DMA_16M;
991 r->len = _ALIGN_UP(r->len, 1 << r->page_size); 991 r->len = _ALIGN_UP(r->len, 1 << r->page_size);
992 } 992 }
993 } 993 }
994 994
995 result = dma_sb_region_create(r); 995 result = dma_sb_region_create(r);
996 BUG_ON(result); 996 BUG_ON(result);
997 997
998 if (r->offset < map.rm.size) { 998 if (r->offset < map.rm.size) {
999 /* Map (part of) 1st RAM chunk */ 999 /* Map (part of) 1st RAM chunk */
1000 virt_addr = map.rm.base + r->offset; 1000 virt_addr = map.rm.base + r->offset;
1001 len = map.rm.size - r->offset; 1001 len = map.rm.size - r->offset;
1002 if (len > r->len) 1002 if (len > r->len)
1003 len = r->len; 1003 len = r->len;
1004 result = dma_sb_map_area(r, virt_addr, len, &tmp, 1004 result = dma_sb_map_area(r, virt_addr, len, &tmp,
1005 CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_SO_RW | 1005 CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_SO_RW |
1006 CBE_IOPTE_M); 1006 CBE_IOPTE_M);
1007 BUG_ON(result); 1007 BUG_ON(result);
1008 } 1008 }
1009 1009
1010 if (r->offset + r->len > map.rm.size) { 1010 if (r->offset + r->len > map.rm.size) {
1011 /* Map (part of) 2nd RAM chunk */ 1011 /* Map (part of) 2nd RAM chunk */
1012 virt_addr = map.rm.size; 1012 virt_addr = map.rm.size;
1013 len = r->len; 1013 len = r->len;
1014 if (r->offset >= map.rm.size) 1014 if (r->offset >= map.rm.size)
1015 virt_addr += r->offset - map.rm.size; 1015 virt_addr += r->offset - map.rm.size;
1016 else 1016 else
1017 len -= map.rm.size - r->offset; 1017 len -= map.rm.size - r->offset;
1018 result = dma_sb_map_area(r, virt_addr, len, &tmp, 1018 result = dma_sb_map_area(r, virt_addr, len, &tmp,
1019 CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_SO_RW | 1019 CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_SO_RW |
1020 CBE_IOPTE_M); 1020 CBE_IOPTE_M);
1021 BUG_ON(result); 1021 BUG_ON(result);
1022 } 1022 }
1023 1023
1024 return result; 1024 return result;
1025 } 1025 }
1026 1026
1027 /** 1027 /**
1028 * dma_sb_region_free_linear - Free a linear dma mapping for a device. 1028 * dma_sb_region_free_linear - Free a linear dma mapping for a device.
1029 * @r: Pointer to a struct ps3_dma_region. 1029 * @r: Pointer to a struct ps3_dma_region.
1030 * 1030 *
1031 * This routine will unmap all mapped areas and free the HV dma region. 1031 * This routine will unmap all mapped areas and free the HV dma region.
1032 */ 1032 */
1033 1033
1034 static int dma_sb_region_free_linear(struct ps3_dma_region *r) 1034 static int dma_sb_region_free_linear(struct ps3_dma_region *r)
1035 { 1035 {
1036 int result; 1036 int result;
1037 dma_addr_t bus_addr; 1037 dma_addr_t bus_addr;
1038 unsigned long len, lpar_addr; 1038 unsigned long len, lpar_addr;
1039 1039
1040 if (r->offset < map.rm.size) { 1040 if (r->offset < map.rm.size) {
1041 /* Unmap (part of) 1st RAM chunk */ 1041 /* Unmap (part of) 1st RAM chunk */
1042 lpar_addr = map.rm.base + r->offset; 1042 lpar_addr = map.rm.base + r->offset;
1043 len = map.rm.size - r->offset; 1043 len = map.rm.size - r->offset;
1044 if (len > r->len) 1044 if (len > r->len)
1045 len = r->len; 1045 len = r->len;
1046 bus_addr = dma_sb_lpar_to_bus(r, lpar_addr); 1046 bus_addr = dma_sb_lpar_to_bus(r, lpar_addr);
1047 result = dma_sb_unmap_area(r, bus_addr, len); 1047 result = dma_sb_unmap_area(r, bus_addr, len);
1048 BUG_ON(result); 1048 BUG_ON(result);
1049 } 1049 }
1050 1050
1051 if (r->offset + r->len > map.rm.size) { 1051 if (r->offset + r->len > map.rm.size) {
1052 /* Unmap (part of) 2nd RAM chunk */ 1052 /* Unmap (part of) 2nd RAM chunk */
1053 lpar_addr = map.r1.base; 1053 lpar_addr = map.r1.base;
1054 len = r->len; 1054 len = r->len;
1055 if (r->offset >= map.rm.size) 1055 if (r->offset >= map.rm.size)
1056 lpar_addr += r->offset - map.rm.size; 1056 lpar_addr += r->offset - map.rm.size;
1057 else 1057 else
1058 len -= map.rm.size - r->offset; 1058 len -= map.rm.size - r->offset;
1059 bus_addr = dma_sb_lpar_to_bus(r, lpar_addr); 1059 bus_addr = dma_sb_lpar_to_bus(r, lpar_addr);
1060 result = dma_sb_unmap_area(r, bus_addr, len); 1060 result = dma_sb_unmap_area(r, bus_addr, len);
1061 BUG_ON(result); 1061 BUG_ON(result);
1062 } 1062 }
1063 1063
1064 result = dma_sb_region_free(r); 1064 result = dma_sb_region_free(r);
1065 BUG_ON(result); 1065 BUG_ON(result);
1066 1066
1067 return result; 1067 return result;
1068 } 1068 }
1069 1069
1070 /** 1070 /**
1071 * dma_sb_map_area_linear - Map an area of memory into a device dma region. 1071 * dma_sb_map_area_linear - Map an area of memory into a device dma region.
1072 * @r: Pointer to a struct ps3_dma_region. 1072 * @r: Pointer to a struct ps3_dma_region.
1073 * @virt_addr: Starting virtual address of the area to map. 1073 * @virt_addr: Starting virtual address of the area to map.
1074 * @len: Length in bytes of the area to map. 1074 * @len: Length in bytes of the area to map.
1075 * @bus_addr: A pointer to return the starting ioc bus address of the area to 1075 * @bus_addr: A pointer to return the starting ioc bus address of the area to
1076 * map. 1076 * map.
1077 * 1077 *
1078 * This routine just returns the corresponding bus address. Actual mapping 1078 * This routine just returns the corresponding bus address. Actual mapping
1079 * occurs in dma_region_create_linear(). 1079 * occurs in dma_region_create_linear().
1080 */ 1080 */
1081 1081
1082 static int dma_sb_map_area_linear(struct ps3_dma_region *r, 1082 static int dma_sb_map_area_linear(struct ps3_dma_region *r,
1083 unsigned long virt_addr, unsigned long len, dma_addr_t *bus_addr, 1083 unsigned long virt_addr, unsigned long len, dma_addr_t *bus_addr,
1084 u64 iopte_flag) 1084 u64 iopte_flag)
1085 { 1085 {
1086 unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr) 1086 unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
1087 : virt_addr; 1087 : virt_addr;
1088 *bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr)); 1088 *bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
1089 return 0; 1089 return 0;
1090 } 1090 }
1091 1091
1092 /** 1092 /**
1093 * dma_unmap_area_linear - Unmap an area of memory from a device dma region. 1093 * dma_unmap_area_linear - Unmap an area of memory from a device dma region.
1094 * @r: Pointer to a struct ps3_dma_region. 1094 * @r: Pointer to a struct ps3_dma_region.
1095 * @bus_addr: The starting ioc bus address of the area to unmap. 1095 * @bus_addr: The starting ioc bus address of the area to unmap.
1096 * @len: Length in bytes of the area to unmap. 1096 * @len: Length in bytes of the area to unmap.
1097 * 1097 *
1098 * This routine does nothing. Unmapping occurs in dma_sb_region_free_linear(). 1098 * This routine does nothing. Unmapping occurs in dma_sb_region_free_linear().
1099 */ 1099 */
1100 1100
1101 static int dma_sb_unmap_area_linear(struct ps3_dma_region *r, 1101 static int dma_sb_unmap_area_linear(struct ps3_dma_region *r,
1102 dma_addr_t bus_addr, unsigned long len) 1102 dma_addr_t bus_addr, unsigned long len)
1103 { 1103 {
1104 return 0; 1104 return 0;
1105 }; 1105 };
1106 1106
1107 static const struct ps3_dma_region_ops ps3_dma_sb_region_ops = { 1107 static const struct ps3_dma_region_ops ps3_dma_sb_region_ops = {
1108 .create = dma_sb_region_create, 1108 .create = dma_sb_region_create,
1109 .free = dma_sb_region_free, 1109 .free = dma_sb_region_free,
1110 .map = dma_sb_map_area, 1110 .map = dma_sb_map_area,
1111 .unmap = dma_sb_unmap_area 1111 .unmap = dma_sb_unmap_area
1112 }; 1112 };
1113 1113
1114 static const struct ps3_dma_region_ops ps3_dma_sb_region_linear_ops = { 1114 static const struct ps3_dma_region_ops ps3_dma_sb_region_linear_ops = {
1115 .create = dma_sb_region_create_linear, 1115 .create = dma_sb_region_create_linear,
1116 .free = dma_sb_region_free_linear, 1116 .free = dma_sb_region_free_linear,
1117 .map = dma_sb_map_area_linear, 1117 .map = dma_sb_map_area_linear,
1118 .unmap = dma_sb_unmap_area_linear 1118 .unmap = dma_sb_unmap_area_linear
1119 }; 1119 };
1120 1120
1121 static const struct ps3_dma_region_ops ps3_dma_ioc0_region_ops = { 1121 static const struct ps3_dma_region_ops ps3_dma_ioc0_region_ops = {
1122 .create = dma_ioc0_region_create, 1122 .create = dma_ioc0_region_create,
1123 .free = dma_ioc0_region_free, 1123 .free = dma_ioc0_region_free,
1124 .map = dma_ioc0_map_area, 1124 .map = dma_ioc0_map_area,
1125 .unmap = dma_ioc0_unmap_area 1125 .unmap = dma_ioc0_unmap_area
1126 }; 1126 };
1127 1127
1128 int ps3_dma_region_init(struct ps3_system_bus_device *dev, 1128 int ps3_dma_region_init(struct ps3_system_bus_device *dev,
1129 struct ps3_dma_region *r, enum ps3_dma_page_size page_size, 1129 struct ps3_dma_region *r, enum ps3_dma_page_size page_size,
1130 enum ps3_dma_region_type region_type, void *addr, unsigned long len) 1130 enum ps3_dma_region_type region_type, void *addr, unsigned long len)
1131 { 1131 {
1132 unsigned long lpar_addr; 1132 unsigned long lpar_addr;
1133 1133
1134 lpar_addr = addr ? ps3_mm_phys_to_lpar(__pa(addr)) : 0; 1134 lpar_addr = addr ? ps3_mm_phys_to_lpar(__pa(addr)) : 0;
1135 1135
1136 r->dev = dev; 1136 r->dev = dev;
1137 r->page_size = page_size; 1137 r->page_size = page_size;
1138 r->region_type = region_type; 1138 r->region_type = region_type;
1139 r->offset = lpar_addr; 1139 r->offset = lpar_addr;
1140 if (r->offset >= map.rm.size) 1140 if (r->offset >= map.rm.size)
1141 r->offset -= map.r1.offset; 1141 r->offset -= map.r1.offset;
1142 r->len = len ? len : _ALIGN_UP(map.total, 1 << r->page_size); 1142 r->len = len ? len : _ALIGN_UP(map.total, 1 << r->page_size);
1143 1143
1144 switch (dev->dev_type) { 1144 switch (dev->dev_type) {
1145 case PS3_DEVICE_TYPE_SB: 1145 case PS3_DEVICE_TYPE_SB:
1146 r->region_ops = (USE_DYNAMIC_DMA) 1146 r->region_ops = (USE_DYNAMIC_DMA)
1147 ? &ps3_dma_sb_region_ops 1147 ? &ps3_dma_sb_region_ops
1148 : &ps3_dma_sb_region_linear_ops; 1148 : &ps3_dma_sb_region_linear_ops;
1149 break; 1149 break;
1150 case PS3_DEVICE_TYPE_IOC0: 1150 case PS3_DEVICE_TYPE_IOC0:
1151 r->region_ops = &ps3_dma_ioc0_region_ops; 1151 r->region_ops = &ps3_dma_ioc0_region_ops;
1152 break; 1152 break;
1153 default: 1153 default:
1154 BUG(); 1154 BUG();
1155 return -EINVAL; 1155 return -EINVAL;
1156 } 1156 }
1157 return 0; 1157 return 0;
1158 } 1158 }
1159 EXPORT_SYMBOL(ps3_dma_region_init); 1159 EXPORT_SYMBOL(ps3_dma_region_init);
1160 1160
1161 int ps3_dma_region_create(struct ps3_dma_region *r) 1161 int ps3_dma_region_create(struct ps3_dma_region *r)
1162 { 1162 {
1163 BUG_ON(!r); 1163 BUG_ON(!r);
1164 BUG_ON(!r->region_ops); 1164 BUG_ON(!r->region_ops);
1165 BUG_ON(!r->region_ops->create); 1165 BUG_ON(!r->region_ops->create);
1166 return r->region_ops->create(r); 1166 return r->region_ops->create(r);
1167 } 1167 }
1168 EXPORT_SYMBOL(ps3_dma_region_create); 1168 EXPORT_SYMBOL(ps3_dma_region_create);
1169 1169
1170 int ps3_dma_region_free(struct ps3_dma_region *r) 1170 int ps3_dma_region_free(struct ps3_dma_region *r)
1171 { 1171 {
1172 BUG_ON(!r); 1172 BUG_ON(!r);
1173 BUG_ON(!r->region_ops); 1173 BUG_ON(!r->region_ops);
1174 BUG_ON(!r->region_ops->free); 1174 BUG_ON(!r->region_ops->free);
1175 return r->region_ops->free(r); 1175 return r->region_ops->free(r);
1176 } 1176 }
1177 EXPORT_SYMBOL(ps3_dma_region_free); 1177 EXPORT_SYMBOL(ps3_dma_region_free);
1178 1178
1179 int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr, 1179 int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr,
1180 unsigned long len, dma_addr_t *bus_addr, 1180 unsigned long len, dma_addr_t *bus_addr,
1181 u64 iopte_flag) 1181 u64 iopte_flag)
1182 { 1182 {
1183 return r->region_ops->map(r, virt_addr, len, bus_addr, iopte_flag); 1183 return r->region_ops->map(r, virt_addr, len, bus_addr, iopte_flag);
1184 } 1184 }
1185 1185
1186 int ps3_dma_unmap(struct ps3_dma_region *r, dma_addr_t bus_addr, 1186 int ps3_dma_unmap(struct ps3_dma_region *r, dma_addr_t bus_addr,
1187 unsigned long len) 1187 unsigned long len)
1188 { 1188 {
1189 return r->region_ops->unmap(r, bus_addr, len); 1189 return r->region_ops->unmap(r, bus_addr, len);
1190 } 1190 }
1191 1191
1192 /*============================================================================*/ 1192 /*============================================================================*/
1193 /* system startup routines */ 1193 /* system startup routines */
1194 /*============================================================================*/ 1194 /*============================================================================*/
1195 1195
1196 /** 1196 /**
1197 * ps3_mm_init - initialize the address space state variables 1197 * ps3_mm_init - initialize the address space state variables
1198 */ 1198 */
1199 1199
1200 void __init ps3_mm_init(void) 1200 void __init ps3_mm_init(void)
1201 { 1201 {
1202 int result; 1202 int result;
1203 1203
1204 DBG(" -> %s:%d\n", __func__, __LINE__); 1204 DBG(" -> %s:%d\n", __func__, __LINE__);
1205 1205
1206 result = ps3_repository_read_mm_info(&map.rm.base, &map.rm.size, 1206 result = ps3_repository_read_mm_info(&map.rm.base, &map.rm.size,
1207 &map.total); 1207 &map.total);
1208 1208
1209 if (result) 1209 if (result)
1210 panic("ps3_repository_read_mm_info() failed"); 1210 panic("ps3_repository_read_mm_info() failed");
1211 1211
1212 map.rm.offset = map.rm.base; 1212 map.rm.offset = map.rm.base;
1213 map.vas_id = map.htab_size = 0; 1213 map.vas_id = map.htab_size = 0;
1214 1214
1215 /* this implementation assumes map.rm.base is zero */ 1215 /* this implementation assumes map.rm.base is zero */
1216 1216
1217 BUG_ON(map.rm.base); 1217 BUG_ON(map.rm.base);
1218 BUG_ON(!map.rm.size); 1218 BUG_ON(!map.rm.size);
1219 1219
1220 1220
1221 /* arrange to do this in ps3_mm_add_memory */ 1221 /* arrange to do this in ps3_mm_add_memory */
1222 ps3_mm_region_create(&map.r1, map.total - map.rm.size); 1222 ps3_mm_region_create(&map.r1, map.total - map.rm.size);
1223 1223
1224 /* correct map.total for the real total amount of memory we use */ 1224 /* correct map.total for the real total amount of memory we use */
1225 map.total = map.rm.size + map.r1.size; 1225 map.total = map.rm.size + map.r1.size;
1226 1226
1227 DBG(" <- %s:%d\n", __func__, __LINE__); 1227 DBG(" <- %s:%d\n", __func__, __LINE__);
1228 } 1228 }
1229 1229
1230 /** 1230 /**
1231 * ps3_mm_shutdown - final cleanup of address space 1231 * ps3_mm_shutdown - final cleanup of address space
1232 */ 1232 */
1233 1233
1234 void ps3_mm_shutdown(void) 1234 void ps3_mm_shutdown(void)
1235 { 1235 {
1236 ps3_mm_region_destroy(&map.r1); 1236 ps3_mm_region_destroy(&map.r1);
1237 } 1237 }
1238 1238
arch/powerpc/platforms/ps3/system-bus.c
1 /* 1 /*
2 * PS3 system bus driver. 2 * PS3 system bus driver.
3 * 3 *
4 * Copyright (C) 2006 Sony Computer Entertainment Inc. 4 * Copyright (C) 2006 Sony Computer Entertainment Inc.
5 * Copyright 2006 Sony Corp. 5 * Copyright 2006 Sony Corp.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License. 9 * the Free Software Foundation; version 2 of the License.
10 * 10 *
11 * This program is distributed in the hope that it will be useful, 11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 * 15 *
16 * You should have received a copy of the GNU General Public License 16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */ 19 */
20 20
21 #include <linux/kernel.h> 21 #include <linux/kernel.h>
22 #include <linux/init.h> 22 #include <linux/init.h>
23 #include <linux/module.h> 23 #include <linux/export.h>
24 #include <linux/dma-mapping.h> 24 #include <linux/dma-mapping.h>
25 #include <linux/err.h> 25 #include <linux/err.h>
26 #include <linux/slab.h> 26 #include <linux/slab.h>
27 27
28 #include <asm/udbg.h> 28 #include <asm/udbg.h>
29 #include <asm/lv1call.h> 29 #include <asm/lv1call.h>
30 #include <asm/firmware.h> 30 #include <asm/firmware.h>
31 #include <asm/cell-regs.h> 31 #include <asm/cell-regs.h>
32 32
33 #include "platform.h" 33 #include "platform.h"
34 34
35 static struct device ps3_system_bus = { 35 static struct device ps3_system_bus = {
36 .init_name = "ps3_system", 36 .init_name = "ps3_system",
37 }; 37 };
38 38
39 /* FIXME: need device usage counters! */ 39 /* FIXME: need device usage counters! */
40 struct { 40 struct {
41 struct mutex mutex; 41 struct mutex mutex;
42 int sb_11; /* usb 0 */ 42 int sb_11; /* usb 0 */
43 int sb_12; /* usb 0 */ 43 int sb_12; /* usb 0 */
44 int gpu; 44 int gpu;
45 } static usage_hack; 45 } static usage_hack;
46 46
47 static int ps3_is_device(struct ps3_system_bus_device *dev, u64 bus_id, 47 static int ps3_is_device(struct ps3_system_bus_device *dev, u64 bus_id,
48 u64 dev_id) 48 u64 dev_id)
49 { 49 {
50 return dev->bus_id == bus_id && dev->dev_id == dev_id; 50 return dev->bus_id == bus_id && dev->dev_id == dev_id;
51 } 51 }
52 52
53 static int ps3_open_hv_device_sb(struct ps3_system_bus_device *dev) 53 static int ps3_open_hv_device_sb(struct ps3_system_bus_device *dev)
54 { 54 {
55 int result; 55 int result;
56 56
57 BUG_ON(!dev->bus_id); 57 BUG_ON(!dev->bus_id);
58 mutex_lock(&usage_hack.mutex); 58 mutex_lock(&usage_hack.mutex);
59 59
60 if (ps3_is_device(dev, 1, 1)) { 60 if (ps3_is_device(dev, 1, 1)) {
61 usage_hack.sb_11++; 61 usage_hack.sb_11++;
62 if (usage_hack.sb_11 > 1) { 62 if (usage_hack.sb_11 > 1) {
63 result = 0; 63 result = 0;
64 goto done; 64 goto done;
65 } 65 }
66 } 66 }
67 67
68 if (ps3_is_device(dev, 1, 2)) { 68 if (ps3_is_device(dev, 1, 2)) {
69 usage_hack.sb_12++; 69 usage_hack.sb_12++;
70 if (usage_hack.sb_12 > 1) { 70 if (usage_hack.sb_12 > 1) {
71 result = 0; 71 result = 0;
72 goto done; 72 goto done;
73 } 73 }
74 } 74 }
75 75
76 result = lv1_open_device(dev->bus_id, dev->dev_id, 0); 76 result = lv1_open_device(dev->bus_id, dev->dev_id, 0);
77 77
78 if (result) { 78 if (result) {
79 pr_debug("%s:%d: lv1_open_device failed: %s\n", __func__, 79 pr_debug("%s:%d: lv1_open_device failed: %s\n", __func__,
80 __LINE__, ps3_result(result)); 80 __LINE__, ps3_result(result));
81 result = -EPERM; 81 result = -EPERM;
82 } 82 }
83 83
84 done: 84 done:
85 mutex_unlock(&usage_hack.mutex); 85 mutex_unlock(&usage_hack.mutex);
86 return result; 86 return result;
87 } 87 }
88 88
89 static int ps3_close_hv_device_sb(struct ps3_system_bus_device *dev) 89 static int ps3_close_hv_device_sb(struct ps3_system_bus_device *dev)
90 { 90 {
91 int result; 91 int result;
92 92
93 BUG_ON(!dev->bus_id); 93 BUG_ON(!dev->bus_id);
94 mutex_lock(&usage_hack.mutex); 94 mutex_lock(&usage_hack.mutex);
95 95
96 if (ps3_is_device(dev, 1, 1)) { 96 if (ps3_is_device(dev, 1, 1)) {
97 usage_hack.sb_11--; 97 usage_hack.sb_11--;
98 if (usage_hack.sb_11) { 98 if (usage_hack.sb_11) {
99 result = 0; 99 result = 0;
100 goto done; 100 goto done;
101 } 101 }
102 } 102 }
103 103
104 if (ps3_is_device(dev, 1, 2)) { 104 if (ps3_is_device(dev, 1, 2)) {
105 usage_hack.sb_12--; 105 usage_hack.sb_12--;
106 if (usage_hack.sb_12) { 106 if (usage_hack.sb_12) {
107 result = 0; 107 result = 0;
108 goto done; 108 goto done;
109 } 109 }
110 } 110 }
111 111
112 result = lv1_close_device(dev->bus_id, dev->dev_id); 112 result = lv1_close_device(dev->bus_id, dev->dev_id);
113 BUG_ON(result); 113 BUG_ON(result);
114 114
115 done: 115 done:
116 mutex_unlock(&usage_hack.mutex); 116 mutex_unlock(&usage_hack.mutex);
117 return result; 117 return result;
118 } 118 }
119 119
120 static int ps3_open_hv_device_gpu(struct ps3_system_bus_device *dev) 120 static int ps3_open_hv_device_gpu(struct ps3_system_bus_device *dev)
121 { 121 {
122 int result; 122 int result;
123 123
124 mutex_lock(&usage_hack.mutex); 124 mutex_lock(&usage_hack.mutex);
125 125
126 usage_hack.gpu++; 126 usage_hack.gpu++;
127 if (usage_hack.gpu > 1) { 127 if (usage_hack.gpu > 1) {
128 result = 0; 128 result = 0;
129 goto done; 129 goto done;
130 } 130 }
131 131
132 result = lv1_gpu_open(0); 132 result = lv1_gpu_open(0);
133 133
134 if (result) { 134 if (result) {
135 pr_debug("%s:%d: lv1_gpu_open failed: %s\n", __func__, 135 pr_debug("%s:%d: lv1_gpu_open failed: %s\n", __func__,
136 __LINE__, ps3_result(result)); 136 __LINE__, ps3_result(result));
137 result = -EPERM; 137 result = -EPERM;
138 } 138 }
139 139
140 done: 140 done:
141 mutex_unlock(&usage_hack.mutex); 141 mutex_unlock(&usage_hack.mutex);
142 return result; 142 return result;
143 } 143 }
144 144
145 static int ps3_close_hv_device_gpu(struct ps3_system_bus_device *dev) 145 static int ps3_close_hv_device_gpu(struct ps3_system_bus_device *dev)
146 { 146 {
147 int result; 147 int result;
148 148
149 mutex_lock(&usage_hack.mutex); 149 mutex_lock(&usage_hack.mutex);
150 150
151 usage_hack.gpu--; 151 usage_hack.gpu--;
152 if (usage_hack.gpu) { 152 if (usage_hack.gpu) {
153 result = 0; 153 result = 0;
154 goto done; 154 goto done;
155 } 155 }
156 156
157 result = lv1_gpu_close(); 157 result = lv1_gpu_close();
158 BUG_ON(result); 158 BUG_ON(result);
159 159
160 done: 160 done:
161 mutex_unlock(&usage_hack.mutex); 161 mutex_unlock(&usage_hack.mutex);
162 return result; 162 return result;
163 } 163 }
164 164
165 int ps3_open_hv_device(struct ps3_system_bus_device *dev) 165 int ps3_open_hv_device(struct ps3_system_bus_device *dev)
166 { 166 {
167 BUG_ON(!dev); 167 BUG_ON(!dev);
168 pr_debug("%s:%d: match_id: %u\n", __func__, __LINE__, dev->match_id); 168 pr_debug("%s:%d: match_id: %u\n", __func__, __LINE__, dev->match_id);
169 169
170 switch (dev->match_id) { 170 switch (dev->match_id) {
171 case PS3_MATCH_ID_EHCI: 171 case PS3_MATCH_ID_EHCI:
172 case PS3_MATCH_ID_OHCI: 172 case PS3_MATCH_ID_OHCI:
173 case PS3_MATCH_ID_GELIC: 173 case PS3_MATCH_ID_GELIC:
174 case PS3_MATCH_ID_STOR_DISK: 174 case PS3_MATCH_ID_STOR_DISK:
175 case PS3_MATCH_ID_STOR_ROM: 175 case PS3_MATCH_ID_STOR_ROM:
176 case PS3_MATCH_ID_STOR_FLASH: 176 case PS3_MATCH_ID_STOR_FLASH:
177 return ps3_open_hv_device_sb(dev); 177 return ps3_open_hv_device_sb(dev);
178 178
179 case PS3_MATCH_ID_SOUND: 179 case PS3_MATCH_ID_SOUND:
180 case PS3_MATCH_ID_GPU: 180 case PS3_MATCH_ID_GPU:
181 return ps3_open_hv_device_gpu(dev); 181 return ps3_open_hv_device_gpu(dev);
182 182
183 case PS3_MATCH_ID_AV_SETTINGS: 183 case PS3_MATCH_ID_AV_SETTINGS:
184 case PS3_MATCH_ID_SYSTEM_MANAGER: 184 case PS3_MATCH_ID_SYSTEM_MANAGER:
185 pr_debug("%s:%d: unsupported match_id: %u\n", __func__, 185 pr_debug("%s:%d: unsupported match_id: %u\n", __func__,
186 __LINE__, dev->match_id); 186 __LINE__, dev->match_id);
187 pr_debug("%s:%d: bus_id: %llu\n", __func__, __LINE__, 187 pr_debug("%s:%d: bus_id: %llu\n", __func__, __LINE__,
188 dev->bus_id); 188 dev->bus_id);
189 BUG(); 189 BUG();
190 return -EINVAL; 190 return -EINVAL;
191 191
192 default: 192 default:
193 break; 193 break;
194 } 194 }
195 195
196 pr_debug("%s:%d: unknown match_id: %u\n", __func__, __LINE__, 196 pr_debug("%s:%d: unknown match_id: %u\n", __func__, __LINE__,
197 dev->match_id); 197 dev->match_id);
198 BUG(); 198 BUG();
199 return -ENODEV; 199 return -ENODEV;
200 } 200 }
201 EXPORT_SYMBOL_GPL(ps3_open_hv_device); 201 EXPORT_SYMBOL_GPL(ps3_open_hv_device);
202 202
203 int ps3_close_hv_device(struct ps3_system_bus_device *dev) 203 int ps3_close_hv_device(struct ps3_system_bus_device *dev)
204 { 204 {
205 BUG_ON(!dev); 205 BUG_ON(!dev);
206 pr_debug("%s:%d: match_id: %u\n", __func__, __LINE__, dev->match_id); 206 pr_debug("%s:%d: match_id: %u\n", __func__, __LINE__, dev->match_id);
207 207
208 switch (dev->match_id) { 208 switch (dev->match_id) {
209 case PS3_MATCH_ID_EHCI: 209 case PS3_MATCH_ID_EHCI:
210 case PS3_MATCH_ID_OHCI: 210 case PS3_MATCH_ID_OHCI:
211 case PS3_MATCH_ID_GELIC: 211 case PS3_MATCH_ID_GELIC:
212 case PS3_MATCH_ID_STOR_DISK: 212 case PS3_MATCH_ID_STOR_DISK:
213 case PS3_MATCH_ID_STOR_ROM: 213 case PS3_MATCH_ID_STOR_ROM:
214 case PS3_MATCH_ID_STOR_FLASH: 214 case PS3_MATCH_ID_STOR_FLASH:
215 return ps3_close_hv_device_sb(dev); 215 return ps3_close_hv_device_sb(dev);
216 216
217 case PS3_MATCH_ID_SOUND: 217 case PS3_MATCH_ID_SOUND:
218 case PS3_MATCH_ID_GPU: 218 case PS3_MATCH_ID_GPU:
219 return ps3_close_hv_device_gpu(dev); 219 return ps3_close_hv_device_gpu(dev);
220 220
221 case PS3_MATCH_ID_AV_SETTINGS: 221 case PS3_MATCH_ID_AV_SETTINGS:
222 case PS3_MATCH_ID_SYSTEM_MANAGER: 222 case PS3_MATCH_ID_SYSTEM_MANAGER:
223 pr_debug("%s:%d: unsupported match_id: %u\n", __func__, 223 pr_debug("%s:%d: unsupported match_id: %u\n", __func__,
224 __LINE__, dev->match_id); 224 __LINE__, dev->match_id);
225 pr_debug("%s:%d: bus_id: %llu\n", __func__, __LINE__, 225 pr_debug("%s:%d: bus_id: %llu\n", __func__, __LINE__,
226 dev->bus_id); 226 dev->bus_id);
227 BUG(); 227 BUG();
228 return -EINVAL; 228 return -EINVAL;
229 229
230 default: 230 default:
231 break; 231 break;
232 } 232 }
233 233
234 pr_debug("%s:%d: unknown match_id: %u\n", __func__, __LINE__, 234 pr_debug("%s:%d: unknown match_id: %u\n", __func__, __LINE__,
235 dev->match_id); 235 dev->match_id);
236 BUG(); 236 BUG();
237 return -ENODEV; 237 return -ENODEV;
238 } 238 }
239 EXPORT_SYMBOL_GPL(ps3_close_hv_device); 239 EXPORT_SYMBOL_GPL(ps3_close_hv_device);
240 240
241 #define dump_mmio_region(_a) _dump_mmio_region(_a, __func__, __LINE__) 241 #define dump_mmio_region(_a) _dump_mmio_region(_a, __func__, __LINE__)
242 static void _dump_mmio_region(const struct ps3_mmio_region* r, 242 static void _dump_mmio_region(const struct ps3_mmio_region* r,
243 const char* func, int line) 243 const char* func, int line)
244 { 244 {
245 pr_debug("%s:%d: dev %llu:%llu\n", func, line, r->dev->bus_id, 245 pr_debug("%s:%d: dev %llu:%llu\n", func, line, r->dev->bus_id,
246 r->dev->dev_id); 246 r->dev->dev_id);
247 pr_debug("%s:%d: bus_addr %lxh\n", func, line, r->bus_addr); 247 pr_debug("%s:%d: bus_addr %lxh\n", func, line, r->bus_addr);
248 pr_debug("%s:%d: len %lxh\n", func, line, r->len); 248 pr_debug("%s:%d: len %lxh\n", func, line, r->len);
249 pr_debug("%s:%d: lpar_addr %lxh\n", func, line, r->lpar_addr); 249 pr_debug("%s:%d: lpar_addr %lxh\n", func, line, r->lpar_addr);
250 } 250 }
251 251
252 static int ps3_sb_mmio_region_create(struct ps3_mmio_region *r) 252 static int ps3_sb_mmio_region_create(struct ps3_mmio_region *r)
253 { 253 {
254 int result; 254 int result;
255 u64 lpar_addr; 255 u64 lpar_addr;
256 256
257 result = lv1_map_device_mmio_region(r->dev->bus_id, r->dev->dev_id, 257 result = lv1_map_device_mmio_region(r->dev->bus_id, r->dev->dev_id,
258 r->bus_addr, r->len, r->page_size, &lpar_addr); 258 r->bus_addr, r->len, r->page_size, &lpar_addr);
259 r->lpar_addr = lpar_addr; 259 r->lpar_addr = lpar_addr;
260 260
261 if (result) { 261 if (result) {
262 pr_debug("%s:%d: lv1_map_device_mmio_region failed: %s\n", 262 pr_debug("%s:%d: lv1_map_device_mmio_region failed: %s\n",
263 __func__, __LINE__, ps3_result(result)); 263 __func__, __LINE__, ps3_result(result));
264 r->lpar_addr = 0; 264 r->lpar_addr = 0;
265 } 265 }
266 266
267 dump_mmio_region(r); 267 dump_mmio_region(r);
268 return result; 268 return result;
269 } 269 }
270 270
271 static int ps3_ioc0_mmio_region_create(struct ps3_mmio_region *r) 271 static int ps3_ioc0_mmio_region_create(struct ps3_mmio_region *r)
272 { 272 {
273 /* device specific; do nothing currently */ 273 /* device specific; do nothing currently */
274 return 0; 274 return 0;
275 } 275 }
276 276
277 int ps3_mmio_region_create(struct ps3_mmio_region *r) 277 int ps3_mmio_region_create(struct ps3_mmio_region *r)
278 { 278 {
279 return r->mmio_ops->create(r); 279 return r->mmio_ops->create(r);
280 } 280 }
281 EXPORT_SYMBOL_GPL(ps3_mmio_region_create); 281 EXPORT_SYMBOL_GPL(ps3_mmio_region_create);
282 282
283 static int ps3_sb_free_mmio_region(struct ps3_mmio_region *r) 283 static int ps3_sb_free_mmio_region(struct ps3_mmio_region *r)
284 { 284 {
285 int result; 285 int result;
286 286
287 dump_mmio_region(r); 287 dump_mmio_region(r);
288 result = lv1_unmap_device_mmio_region(r->dev->bus_id, r->dev->dev_id, 288 result = lv1_unmap_device_mmio_region(r->dev->bus_id, r->dev->dev_id,
289 r->lpar_addr); 289 r->lpar_addr);
290 290
291 if (result) 291 if (result)
292 pr_debug("%s:%d: lv1_unmap_device_mmio_region failed: %s\n", 292 pr_debug("%s:%d: lv1_unmap_device_mmio_region failed: %s\n",
293 __func__, __LINE__, ps3_result(result)); 293 __func__, __LINE__, ps3_result(result));
294 294
295 r->lpar_addr = 0; 295 r->lpar_addr = 0;
296 return result; 296 return result;
297 } 297 }
298 298
299 static int ps3_ioc0_free_mmio_region(struct ps3_mmio_region *r) 299 static int ps3_ioc0_free_mmio_region(struct ps3_mmio_region *r)
300 { 300 {
301 /* device specific; do nothing currently */ 301 /* device specific; do nothing currently */
302 return 0; 302 return 0;
303 } 303 }
304 304
305 305
306 int ps3_free_mmio_region(struct ps3_mmio_region *r) 306 int ps3_free_mmio_region(struct ps3_mmio_region *r)
307 { 307 {
308 return r->mmio_ops->free(r); 308 return r->mmio_ops->free(r);
309 } 309 }
310 310
311 EXPORT_SYMBOL_GPL(ps3_free_mmio_region); 311 EXPORT_SYMBOL_GPL(ps3_free_mmio_region);
312 312
313 static const struct ps3_mmio_region_ops ps3_mmio_sb_region_ops = { 313 static const struct ps3_mmio_region_ops ps3_mmio_sb_region_ops = {
314 .create = ps3_sb_mmio_region_create, 314 .create = ps3_sb_mmio_region_create,
315 .free = ps3_sb_free_mmio_region 315 .free = ps3_sb_free_mmio_region
316 }; 316 };
317 317
318 static const struct ps3_mmio_region_ops ps3_mmio_ioc0_region_ops = { 318 static const struct ps3_mmio_region_ops ps3_mmio_ioc0_region_ops = {
319 .create = ps3_ioc0_mmio_region_create, 319 .create = ps3_ioc0_mmio_region_create,
320 .free = ps3_ioc0_free_mmio_region 320 .free = ps3_ioc0_free_mmio_region
321 }; 321 };
322 322
323 int ps3_mmio_region_init(struct ps3_system_bus_device *dev, 323 int ps3_mmio_region_init(struct ps3_system_bus_device *dev,
324 struct ps3_mmio_region *r, unsigned long bus_addr, unsigned long len, 324 struct ps3_mmio_region *r, unsigned long bus_addr, unsigned long len,
325 enum ps3_mmio_page_size page_size) 325 enum ps3_mmio_page_size page_size)
326 { 326 {
327 r->dev = dev; 327 r->dev = dev;
328 r->bus_addr = bus_addr; 328 r->bus_addr = bus_addr;
329 r->len = len; 329 r->len = len;
330 r->page_size = page_size; 330 r->page_size = page_size;
331 switch (dev->dev_type) { 331 switch (dev->dev_type) {
332 case PS3_DEVICE_TYPE_SB: 332 case PS3_DEVICE_TYPE_SB:
333 r->mmio_ops = &ps3_mmio_sb_region_ops; 333 r->mmio_ops = &ps3_mmio_sb_region_ops;
334 break; 334 break;
335 case PS3_DEVICE_TYPE_IOC0: 335 case PS3_DEVICE_TYPE_IOC0:
336 r->mmio_ops = &ps3_mmio_ioc0_region_ops; 336 r->mmio_ops = &ps3_mmio_ioc0_region_ops;
337 break; 337 break;
338 default: 338 default:
339 BUG(); 339 BUG();
340 return -EINVAL; 340 return -EINVAL;
341 } 341 }
342 return 0; 342 return 0;
343 } 343 }
344 EXPORT_SYMBOL_GPL(ps3_mmio_region_init); 344 EXPORT_SYMBOL_GPL(ps3_mmio_region_init);
345 345
346 static int ps3_system_bus_match(struct device *_dev, 346 static int ps3_system_bus_match(struct device *_dev,
347 struct device_driver *_drv) 347 struct device_driver *_drv)
348 { 348 {
349 int result; 349 int result;
350 struct ps3_system_bus_driver *drv = ps3_drv_to_system_bus_drv(_drv); 350 struct ps3_system_bus_driver *drv = ps3_drv_to_system_bus_drv(_drv);
351 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); 351 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
352 352
353 if (!dev->match_sub_id) 353 if (!dev->match_sub_id)
354 result = dev->match_id == drv->match_id; 354 result = dev->match_id == drv->match_id;
355 else 355 else
356 result = dev->match_sub_id == drv->match_sub_id && 356 result = dev->match_sub_id == drv->match_sub_id &&
357 dev->match_id == drv->match_id; 357 dev->match_id == drv->match_id;
358 358
359 if (result) 359 if (result)
360 pr_info("%s:%d: dev=%u.%u(%s), drv=%u.%u(%s): match\n", 360 pr_info("%s:%d: dev=%u.%u(%s), drv=%u.%u(%s): match\n",
361 __func__, __LINE__, 361 __func__, __LINE__,
362 dev->match_id, dev->match_sub_id, dev_name(&dev->core), 362 dev->match_id, dev->match_sub_id, dev_name(&dev->core),
363 drv->match_id, drv->match_sub_id, drv->core.name); 363 drv->match_id, drv->match_sub_id, drv->core.name);
364 else 364 else
365 pr_debug("%s:%d: dev=%u.%u(%s), drv=%u.%u(%s): miss\n", 365 pr_debug("%s:%d: dev=%u.%u(%s), drv=%u.%u(%s): miss\n",
366 __func__, __LINE__, 366 __func__, __LINE__,
367 dev->match_id, dev->match_sub_id, dev_name(&dev->core), 367 dev->match_id, dev->match_sub_id, dev_name(&dev->core),
368 drv->match_id, drv->match_sub_id, drv->core.name); 368 drv->match_id, drv->match_sub_id, drv->core.name);
369 369
370 return result; 370 return result;
371 } 371 }
372 372
373 static int ps3_system_bus_probe(struct device *_dev) 373 static int ps3_system_bus_probe(struct device *_dev)
374 { 374 {
375 int result = 0; 375 int result = 0;
376 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); 376 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
377 struct ps3_system_bus_driver *drv; 377 struct ps3_system_bus_driver *drv;
378 378
379 BUG_ON(!dev); 379 BUG_ON(!dev);
380 dev_dbg(_dev, "%s:%d\n", __func__, __LINE__); 380 dev_dbg(_dev, "%s:%d\n", __func__, __LINE__);
381 381
382 drv = ps3_system_bus_dev_to_system_bus_drv(dev); 382 drv = ps3_system_bus_dev_to_system_bus_drv(dev);
383 BUG_ON(!drv); 383 BUG_ON(!drv);
384 384
385 if (drv->probe) 385 if (drv->probe)
386 result = drv->probe(dev); 386 result = drv->probe(dev);
387 else 387 else
388 pr_debug("%s:%d: %s no probe method\n", __func__, __LINE__, 388 pr_debug("%s:%d: %s no probe method\n", __func__, __LINE__,
389 dev_name(&dev->core)); 389 dev_name(&dev->core));
390 390
391 pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, dev_name(&dev->core)); 391 pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, dev_name(&dev->core));
392 return result; 392 return result;
393 } 393 }
394 394
395 static int ps3_system_bus_remove(struct device *_dev) 395 static int ps3_system_bus_remove(struct device *_dev)
396 { 396 {
397 int result = 0; 397 int result = 0;
398 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); 398 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
399 struct ps3_system_bus_driver *drv; 399 struct ps3_system_bus_driver *drv;
400 400
401 BUG_ON(!dev); 401 BUG_ON(!dev);
402 dev_dbg(_dev, "%s:%d\n", __func__, __LINE__); 402 dev_dbg(_dev, "%s:%d\n", __func__, __LINE__);
403 403
404 drv = ps3_system_bus_dev_to_system_bus_drv(dev); 404 drv = ps3_system_bus_dev_to_system_bus_drv(dev);
405 BUG_ON(!drv); 405 BUG_ON(!drv);
406 406
407 if (drv->remove) 407 if (drv->remove)
408 result = drv->remove(dev); 408 result = drv->remove(dev);
409 else 409 else
410 dev_dbg(&dev->core, "%s:%d %s: no remove method\n", 410 dev_dbg(&dev->core, "%s:%d %s: no remove method\n",
411 __func__, __LINE__, drv->core.name); 411 __func__, __LINE__, drv->core.name);
412 412
413 pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, dev_name(&dev->core)); 413 pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, dev_name(&dev->core));
414 return result; 414 return result;
415 } 415 }
416 416
417 static void ps3_system_bus_shutdown(struct device *_dev) 417 static void ps3_system_bus_shutdown(struct device *_dev)
418 { 418 {
419 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); 419 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
420 struct ps3_system_bus_driver *drv; 420 struct ps3_system_bus_driver *drv;
421 421
422 BUG_ON(!dev); 422 BUG_ON(!dev);
423 423
424 dev_dbg(&dev->core, " -> %s:%d: match_id %d\n", __func__, __LINE__, 424 dev_dbg(&dev->core, " -> %s:%d: match_id %d\n", __func__, __LINE__,
425 dev->match_id); 425 dev->match_id);
426 426
427 if (!dev->core.driver) { 427 if (!dev->core.driver) {
428 dev_dbg(&dev->core, "%s:%d: no driver bound\n", __func__, 428 dev_dbg(&dev->core, "%s:%d: no driver bound\n", __func__,
429 __LINE__); 429 __LINE__);
430 return; 430 return;
431 } 431 }
432 432
433 drv = ps3_system_bus_dev_to_system_bus_drv(dev); 433 drv = ps3_system_bus_dev_to_system_bus_drv(dev);
434 434
435 BUG_ON(!drv); 435 BUG_ON(!drv);
436 436
437 dev_dbg(&dev->core, "%s:%d: %s -> %s\n", __func__, __LINE__, 437 dev_dbg(&dev->core, "%s:%d: %s -> %s\n", __func__, __LINE__,
438 dev_name(&dev->core), drv->core.name); 438 dev_name(&dev->core), drv->core.name);
439 439
440 if (drv->shutdown) 440 if (drv->shutdown)
441 drv->shutdown(dev); 441 drv->shutdown(dev);
442 else if (drv->remove) { 442 else if (drv->remove) {
443 dev_dbg(&dev->core, "%s:%d %s: no shutdown, calling remove\n", 443 dev_dbg(&dev->core, "%s:%d %s: no shutdown, calling remove\n",
444 __func__, __LINE__, drv->core.name); 444 __func__, __LINE__, drv->core.name);
445 drv->remove(dev); 445 drv->remove(dev);
446 } else { 446 } else {
447 dev_dbg(&dev->core, "%s:%d %s: no shutdown method\n", 447 dev_dbg(&dev->core, "%s:%d %s: no shutdown method\n",
448 __func__, __LINE__, drv->core.name); 448 __func__, __LINE__, drv->core.name);
449 BUG(); 449 BUG();
450 } 450 }
451 451
452 dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__); 452 dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
453 } 453 }
454 454
455 static int ps3_system_bus_uevent(struct device *_dev, struct kobj_uevent_env *env) 455 static int ps3_system_bus_uevent(struct device *_dev, struct kobj_uevent_env *env)
456 { 456 {
457 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); 457 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
458 458
459 if (add_uevent_var(env, "MODALIAS=ps3:%d:%d", dev->match_id, 459 if (add_uevent_var(env, "MODALIAS=ps3:%d:%d", dev->match_id,
460 dev->match_sub_id)) 460 dev->match_sub_id))
461 return -ENOMEM; 461 return -ENOMEM;
462 return 0; 462 return 0;
463 } 463 }
464 464
465 static ssize_t modalias_show(struct device *_dev, struct device_attribute *a, 465 static ssize_t modalias_show(struct device *_dev, struct device_attribute *a,
466 char *buf) 466 char *buf)
467 { 467 {
468 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); 468 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
469 int len = snprintf(buf, PAGE_SIZE, "ps3:%d:%d\n", dev->match_id, 469 int len = snprintf(buf, PAGE_SIZE, "ps3:%d:%d\n", dev->match_id,
470 dev->match_sub_id); 470 dev->match_sub_id);
471 471
472 return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; 472 return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
473 } 473 }
474 474
475 static struct device_attribute ps3_system_bus_dev_attrs[] = { 475 static struct device_attribute ps3_system_bus_dev_attrs[] = {
476 __ATTR_RO(modalias), 476 __ATTR_RO(modalias),
477 __ATTR_NULL, 477 __ATTR_NULL,
478 }; 478 };
479 479
480 struct bus_type ps3_system_bus_type = { 480 struct bus_type ps3_system_bus_type = {
481 .name = "ps3_system_bus", 481 .name = "ps3_system_bus",
482 .match = ps3_system_bus_match, 482 .match = ps3_system_bus_match,
483 .uevent = ps3_system_bus_uevent, 483 .uevent = ps3_system_bus_uevent,
484 .probe = ps3_system_bus_probe, 484 .probe = ps3_system_bus_probe,
485 .remove = ps3_system_bus_remove, 485 .remove = ps3_system_bus_remove,
486 .shutdown = ps3_system_bus_shutdown, 486 .shutdown = ps3_system_bus_shutdown,
487 .dev_attrs = ps3_system_bus_dev_attrs, 487 .dev_attrs = ps3_system_bus_dev_attrs,
488 }; 488 };
489 489
490 static int __init ps3_system_bus_init(void) 490 static int __init ps3_system_bus_init(void)
491 { 491 {
492 int result; 492 int result;
493 493
494 if (!firmware_has_feature(FW_FEATURE_PS3_LV1)) 494 if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
495 return -ENODEV; 495 return -ENODEV;
496 496
497 pr_debug(" -> %s:%d\n", __func__, __LINE__); 497 pr_debug(" -> %s:%d\n", __func__, __LINE__);
498 498
499 mutex_init(&usage_hack.mutex); 499 mutex_init(&usage_hack.mutex);
500 500
501 result = device_register(&ps3_system_bus); 501 result = device_register(&ps3_system_bus);
502 BUG_ON(result); 502 BUG_ON(result);
503 503
504 result = bus_register(&ps3_system_bus_type); 504 result = bus_register(&ps3_system_bus_type);
505 BUG_ON(result); 505 BUG_ON(result);
506 506
507 pr_debug(" <- %s:%d\n", __func__, __LINE__); 507 pr_debug(" <- %s:%d\n", __func__, __LINE__);
508 return result; 508 return result;
509 } 509 }
510 510
511 core_initcall(ps3_system_bus_init); 511 core_initcall(ps3_system_bus_init);
512 512
513 /* Allocates a contiguous real buffer and creates mappings over it. 513 /* Allocates a contiguous real buffer and creates mappings over it.
514 * Returns the virtual address of the buffer and sets dma_handle 514 * Returns the virtual address of the buffer and sets dma_handle
515 * to the dma address (mapping) of the first page. 515 * to the dma address (mapping) of the first page.
516 */ 516 */
517 static void * ps3_alloc_coherent(struct device *_dev, size_t size, 517 static void * ps3_alloc_coherent(struct device *_dev, size_t size,
518 dma_addr_t *dma_handle, gfp_t flag) 518 dma_addr_t *dma_handle, gfp_t flag)
519 { 519 {
520 int result; 520 int result;
521 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); 521 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
522 unsigned long virt_addr; 522 unsigned long virt_addr;
523 523
524 flag &= ~(__GFP_DMA | __GFP_HIGHMEM); 524 flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
525 flag |= __GFP_ZERO; 525 flag |= __GFP_ZERO;
526 526
527 virt_addr = __get_free_pages(flag, get_order(size)); 527 virt_addr = __get_free_pages(flag, get_order(size));
528 528
529 if (!virt_addr) { 529 if (!virt_addr) {
530 pr_debug("%s:%d: get_free_pages failed\n", __func__, __LINE__); 530 pr_debug("%s:%d: get_free_pages failed\n", __func__, __LINE__);
531 goto clean_none; 531 goto clean_none;
532 } 532 }
533 533
534 result = ps3_dma_map(dev->d_region, virt_addr, size, dma_handle, 534 result = ps3_dma_map(dev->d_region, virt_addr, size, dma_handle,
535 CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | 535 CBE_IOPTE_PP_W | CBE_IOPTE_PP_R |
536 CBE_IOPTE_SO_RW | CBE_IOPTE_M); 536 CBE_IOPTE_SO_RW | CBE_IOPTE_M);
537 537
538 if (result) { 538 if (result) {
539 pr_debug("%s:%d: ps3_dma_map failed (%d)\n", 539 pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
540 __func__, __LINE__, result); 540 __func__, __LINE__, result);
541 BUG_ON("check region type"); 541 BUG_ON("check region type");
542 goto clean_alloc; 542 goto clean_alloc;
543 } 543 }
544 544
545 return (void*)virt_addr; 545 return (void*)virt_addr;
546 546
547 clean_alloc: 547 clean_alloc:
548 free_pages(virt_addr, get_order(size)); 548 free_pages(virt_addr, get_order(size));
549 clean_none: 549 clean_none:
550 dma_handle = NULL; 550 dma_handle = NULL;
551 return NULL; 551 return NULL;
552 } 552 }
553 553
554 static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr, 554 static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr,
555 dma_addr_t dma_handle) 555 dma_addr_t dma_handle)
556 { 556 {
557 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); 557 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
558 558
559 ps3_dma_unmap(dev->d_region, dma_handle, size); 559 ps3_dma_unmap(dev->d_region, dma_handle, size);
560 free_pages((unsigned long)vaddr, get_order(size)); 560 free_pages((unsigned long)vaddr, get_order(size));
561 } 561 }
562 562
563 /* Creates TCEs for a user provided buffer. The user buffer must be 563 /* Creates TCEs for a user provided buffer. The user buffer must be
564 * contiguous real kernel storage (not vmalloc). The address passed here 564 * contiguous real kernel storage (not vmalloc). The address passed here
565 * comprises a page address and offset into that page. The dma_addr_t 565 * comprises a page address and offset into that page. The dma_addr_t
566 * returned will point to the same byte within the page as was passed in. 566 * returned will point to the same byte within the page as was passed in.
567 */ 567 */
568 568
569 static dma_addr_t ps3_sb_map_page(struct device *_dev, struct page *page, 569 static dma_addr_t ps3_sb_map_page(struct device *_dev, struct page *page,
570 unsigned long offset, size_t size, enum dma_data_direction direction, 570 unsigned long offset, size_t size, enum dma_data_direction direction,
571 struct dma_attrs *attrs) 571 struct dma_attrs *attrs)
572 { 572 {
573 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); 573 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
574 int result; 574 int result;
575 dma_addr_t bus_addr; 575 dma_addr_t bus_addr;
576 void *ptr = page_address(page) + offset; 576 void *ptr = page_address(page) + offset;
577 577
578 result = ps3_dma_map(dev->d_region, (unsigned long)ptr, size, 578 result = ps3_dma_map(dev->d_region, (unsigned long)ptr, size,
579 &bus_addr, 579 &bus_addr,
580 CBE_IOPTE_PP_R | CBE_IOPTE_PP_W | 580 CBE_IOPTE_PP_R | CBE_IOPTE_PP_W |
581 CBE_IOPTE_SO_RW | CBE_IOPTE_M); 581 CBE_IOPTE_SO_RW | CBE_IOPTE_M);
582 582
583 if (result) { 583 if (result) {
584 pr_debug("%s:%d: ps3_dma_map failed (%d)\n", 584 pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
585 __func__, __LINE__, result); 585 __func__, __LINE__, result);
586 } 586 }
587 587
588 return bus_addr; 588 return bus_addr;
589 } 589 }
590 590
591 static dma_addr_t ps3_ioc0_map_page(struct device *_dev, struct page *page, 591 static dma_addr_t ps3_ioc0_map_page(struct device *_dev, struct page *page,
592 unsigned long offset, size_t size, 592 unsigned long offset, size_t size,
593 enum dma_data_direction direction, 593 enum dma_data_direction direction,
594 struct dma_attrs *attrs) 594 struct dma_attrs *attrs)
595 { 595 {
596 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); 596 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
597 int result; 597 int result;
598 dma_addr_t bus_addr; 598 dma_addr_t bus_addr;
599 u64 iopte_flag; 599 u64 iopte_flag;
600 void *ptr = page_address(page) + offset; 600 void *ptr = page_address(page) + offset;
601 601
602 iopte_flag = CBE_IOPTE_M; 602 iopte_flag = CBE_IOPTE_M;
603 switch (direction) { 603 switch (direction) {
604 case DMA_BIDIRECTIONAL: 604 case DMA_BIDIRECTIONAL:
605 iopte_flag |= CBE_IOPTE_PP_R | CBE_IOPTE_PP_W | CBE_IOPTE_SO_RW; 605 iopte_flag |= CBE_IOPTE_PP_R | CBE_IOPTE_PP_W | CBE_IOPTE_SO_RW;
606 break; 606 break;
607 case DMA_TO_DEVICE: 607 case DMA_TO_DEVICE:
608 iopte_flag |= CBE_IOPTE_PP_R | CBE_IOPTE_SO_R; 608 iopte_flag |= CBE_IOPTE_PP_R | CBE_IOPTE_SO_R;
609 break; 609 break;
610 case DMA_FROM_DEVICE: 610 case DMA_FROM_DEVICE:
611 iopte_flag |= CBE_IOPTE_PP_W | CBE_IOPTE_SO_RW; 611 iopte_flag |= CBE_IOPTE_PP_W | CBE_IOPTE_SO_RW;
612 break; 612 break;
613 default: 613 default:
614 /* not happned */ 614 /* not happned */
615 BUG(); 615 BUG();
616 }; 616 };
617 result = ps3_dma_map(dev->d_region, (unsigned long)ptr, size, 617 result = ps3_dma_map(dev->d_region, (unsigned long)ptr, size,
618 &bus_addr, iopte_flag); 618 &bus_addr, iopte_flag);
619 619
620 if (result) { 620 if (result) {
621 pr_debug("%s:%d: ps3_dma_map failed (%d)\n", 621 pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
622 __func__, __LINE__, result); 622 __func__, __LINE__, result);
623 } 623 }
624 return bus_addr; 624 return bus_addr;
625 } 625 }
626 626
627 static void ps3_unmap_page(struct device *_dev, dma_addr_t dma_addr, 627 static void ps3_unmap_page(struct device *_dev, dma_addr_t dma_addr,
628 size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) 628 size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
629 { 629 {
630 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); 630 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
631 int result; 631 int result;
632 632
633 result = ps3_dma_unmap(dev->d_region, dma_addr, size); 633 result = ps3_dma_unmap(dev->d_region, dma_addr, size);
634 634
635 if (result) { 635 if (result) {
636 pr_debug("%s:%d: ps3_dma_unmap failed (%d)\n", 636 pr_debug("%s:%d: ps3_dma_unmap failed (%d)\n",
637 __func__, __LINE__, result); 637 __func__, __LINE__, result);
638 } 638 }
639 } 639 }
640 640
641 static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sgl, 641 static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sgl,
642 int nents, enum dma_data_direction direction, struct dma_attrs *attrs) 642 int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
643 { 643 {
644 #if defined(CONFIG_PS3_DYNAMIC_DMA) 644 #if defined(CONFIG_PS3_DYNAMIC_DMA)
645 BUG_ON("do"); 645 BUG_ON("do");
646 return -EPERM; 646 return -EPERM;
647 #else 647 #else
648 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); 648 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
649 struct scatterlist *sg; 649 struct scatterlist *sg;
650 int i; 650 int i;
651 651
652 for_each_sg(sgl, sg, nents, i) { 652 for_each_sg(sgl, sg, nents, i) {
653 int result = ps3_dma_map(dev->d_region, sg_phys(sg), 653 int result = ps3_dma_map(dev->d_region, sg_phys(sg),
654 sg->length, &sg->dma_address, 0); 654 sg->length, &sg->dma_address, 0);
655 655
656 if (result) { 656 if (result) {
657 pr_debug("%s:%d: ps3_dma_map failed (%d)\n", 657 pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
658 __func__, __LINE__, result); 658 __func__, __LINE__, result);
659 return -EINVAL; 659 return -EINVAL;
660 } 660 }
661 661
662 sg->dma_length = sg->length; 662 sg->dma_length = sg->length;
663 } 663 }
664 664
665 return nents; 665 return nents;
666 #endif 666 #endif
667 } 667 }
668 668
669 static int ps3_ioc0_map_sg(struct device *_dev, struct scatterlist *sg, 669 static int ps3_ioc0_map_sg(struct device *_dev, struct scatterlist *sg,
670 int nents, 670 int nents,
671 enum dma_data_direction direction, 671 enum dma_data_direction direction,
672 struct dma_attrs *attrs) 672 struct dma_attrs *attrs)
673 { 673 {
674 BUG(); 674 BUG();
675 return 0; 675 return 0;
676 } 676 }
677 677
678 static void ps3_sb_unmap_sg(struct device *_dev, struct scatterlist *sg, 678 static void ps3_sb_unmap_sg(struct device *_dev, struct scatterlist *sg,
679 int nents, enum dma_data_direction direction, struct dma_attrs *attrs) 679 int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
680 { 680 {
681 #if defined(CONFIG_PS3_DYNAMIC_DMA) 681 #if defined(CONFIG_PS3_DYNAMIC_DMA)
682 BUG_ON("do"); 682 BUG_ON("do");
683 #endif 683 #endif
684 } 684 }
685 685
686 static void ps3_ioc0_unmap_sg(struct device *_dev, struct scatterlist *sg, 686 static void ps3_ioc0_unmap_sg(struct device *_dev, struct scatterlist *sg,
687 int nents, enum dma_data_direction direction, 687 int nents, enum dma_data_direction direction,
688 struct dma_attrs *attrs) 688 struct dma_attrs *attrs)
689 { 689 {
690 BUG(); 690 BUG();
691 } 691 }
692 692
693 static int ps3_dma_supported(struct device *_dev, u64 mask) 693 static int ps3_dma_supported(struct device *_dev, u64 mask)
694 { 694 {
695 return mask >= DMA_BIT_MASK(32); 695 return mask >= DMA_BIT_MASK(32);
696 } 696 }
697 697
698 static struct dma_map_ops ps3_sb_dma_ops = { 698 static struct dma_map_ops ps3_sb_dma_ops = {
699 .alloc_coherent = ps3_alloc_coherent, 699 .alloc_coherent = ps3_alloc_coherent,
700 .free_coherent = ps3_free_coherent, 700 .free_coherent = ps3_free_coherent,
701 .map_sg = ps3_sb_map_sg, 701 .map_sg = ps3_sb_map_sg,
702 .unmap_sg = ps3_sb_unmap_sg, 702 .unmap_sg = ps3_sb_unmap_sg,
703 .dma_supported = ps3_dma_supported, 703 .dma_supported = ps3_dma_supported,
704 .map_page = ps3_sb_map_page, 704 .map_page = ps3_sb_map_page,
705 .unmap_page = ps3_unmap_page, 705 .unmap_page = ps3_unmap_page,
706 }; 706 };
707 707
708 static struct dma_map_ops ps3_ioc0_dma_ops = { 708 static struct dma_map_ops ps3_ioc0_dma_ops = {
709 .alloc_coherent = ps3_alloc_coherent, 709 .alloc_coherent = ps3_alloc_coherent,
710 .free_coherent = ps3_free_coherent, 710 .free_coherent = ps3_free_coherent,
711 .map_sg = ps3_ioc0_map_sg, 711 .map_sg = ps3_ioc0_map_sg,
712 .unmap_sg = ps3_ioc0_unmap_sg, 712 .unmap_sg = ps3_ioc0_unmap_sg,
713 .dma_supported = ps3_dma_supported, 713 .dma_supported = ps3_dma_supported,
714 .map_page = ps3_ioc0_map_page, 714 .map_page = ps3_ioc0_map_page,
715 .unmap_page = ps3_unmap_page, 715 .unmap_page = ps3_unmap_page,
716 }; 716 };
717 717
718 /** 718 /**
719 * ps3_system_bus_release_device - remove a device from the system bus 719 * ps3_system_bus_release_device - remove a device from the system bus
720 */ 720 */
721 721
722 static void ps3_system_bus_release_device(struct device *_dev) 722 static void ps3_system_bus_release_device(struct device *_dev)
723 { 723 {
724 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); 724 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
725 kfree(dev); 725 kfree(dev);
726 } 726 }
727 727
728 /** 728 /**
729 * ps3_system_bus_device_register - add a device to the system bus 729 * ps3_system_bus_device_register - add a device to the system bus
730 * 730 *
731 * ps3_system_bus_device_register() expects the dev object to be allocated 731 * ps3_system_bus_device_register() expects the dev object to be allocated
732 * dynamically by the caller. The system bus takes ownership of the dev 732 * dynamically by the caller. The system bus takes ownership of the dev
733 * object and frees the object in ps3_system_bus_release_device(). 733 * object and frees the object in ps3_system_bus_release_device().
734 */ 734 */
735 735
736 int ps3_system_bus_device_register(struct ps3_system_bus_device *dev) 736 int ps3_system_bus_device_register(struct ps3_system_bus_device *dev)
737 { 737 {
738 int result; 738 int result;
739 static unsigned int dev_ioc0_count; 739 static unsigned int dev_ioc0_count;
740 static unsigned int dev_sb_count; 740 static unsigned int dev_sb_count;
741 static unsigned int dev_vuart_count; 741 static unsigned int dev_vuart_count;
742 static unsigned int dev_lpm_count; 742 static unsigned int dev_lpm_count;
743 743
744 if (!dev->core.parent) 744 if (!dev->core.parent)
745 dev->core.parent = &ps3_system_bus; 745 dev->core.parent = &ps3_system_bus;
746 dev->core.bus = &ps3_system_bus_type; 746 dev->core.bus = &ps3_system_bus_type;
747 dev->core.release = ps3_system_bus_release_device; 747 dev->core.release = ps3_system_bus_release_device;
748 748
749 switch (dev->dev_type) { 749 switch (dev->dev_type) {
750 case PS3_DEVICE_TYPE_IOC0: 750 case PS3_DEVICE_TYPE_IOC0:
751 dev->core.archdata.dma_ops = &ps3_ioc0_dma_ops; 751 dev->core.archdata.dma_ops = &ps3_ioc0_dma_ops;
752 dev_set_name(&dev->core, "ioc0_%02x", ++dev_ioc0_count); 752 dev_set_name(&dev->core, "ioc0_%02x", ++dev_ioc0_count);
753 break; 753 break;
754 case PS3_DEVICE_TYPE_SB: 754 case PS3_DEVICE_TYPE_SB:
755 dev->core.archdata.dma_ops = &ps3_sb_dma_ops; 755 dev->core.archdata.dma_ops = &ps3_sb_dma_ops;
756 dev_set_name(&dev->core, "sb_%02x", ++dev_sb_count); 756 dev_set_name(&dev->core, "sb_%02x", ++dev_sb_count);
757 757
758 break; 758 break;
759 case PS3_DEVICE_TYPE_VUART: 759 case PS3_DEVICE_TYPE_VUART:
760 dev_set_name(&dev->core, "vuart_%02x", ++dev_vuart_count); 760 dev_set_name(&dev->core, "vuart_%02x", ++dev_vuart_count);
761 break; 761 break;
762 case PS3_DEVICE_TYPE_LPM: 762 case PS3_DEVICE_TYPE_LPM:
763 dev_set_name(&dev->core, "lpm_%02x", ++dev_lpm_count); 763 dev_set_name(&dev->core, "lpm_%02x", ++dev_lpm_count);
764 break; 764 break;
765 default: 765 default:
766 BUG(); 766 BUG();
767 }; 767 };
768 768
769 dev->core.of_node = NULL; 769 dev->core.of_node = NULL;
770 set_dev_node(&dev->core, 0); 770 set_dev_node(&dev->core, 0);
771 771
772 pr_debug("%s:%d add %s\n", __func__, __LINE__, dev_name(&dev->core)); 772 pr_debug("%s:%d add %s\n", __func__, __LINE__, dev_name(&dev->core));
773 773
774 result = device_register(&dev->core); 774 result = device_register(&dev->core);
775 return result; 775 return result;
776 } 776 }
777 777
778 EXPORT_SYMBOL_GPL(ps3_system_bus_device_register); 778 EXPORT_SYMBOL_GPL(ps3_system_bus_device_register);
779 779
780 int ps3_system_bus_driver_register(struct ps3_system_bus_driver *drv) 780 int ps3_system_bus_driver_register(struct ps3_system_bus_driver *drv)
781 { 781 {
782 int result; 782 int result;
783 783
784 pr_debug(" -> %s:%d: %s\n", __func__, __LINE__, drv->core.name); 784 pr_debug(" -> %s:%d: %s\n", __func__, __LINE__, drv->core.name);
785 785
786 if (!firmware_has_feature(FW_FEATURE_PS3_LV1)) 786 if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
787 return -ENODEV; 787 return -ENODEV;
788 788
789 drv->core.bus = &ps3_system_bus_type; 789 drv->core.bus = &ps3_system_bus_type;
790 790
791 result = driver_register(&drv->core); 791 result = driver_register(&drv->core);
792 pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, drv->core.name); 792 pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, drv->core.name);
793 return result; 793 return result;
794 } 794 }
795 795
796 EXPORT_SYMBOL_GPL(ps3_system_bus_driver_register); 796 EXPORT_SYMBOL_GPL(ps3_system_bus_driver_register);
797 797
798 void ps3_system_bus_driver_unregister(struct ps3_system_bus_driver *drv) 798 void ps3_system_bus_driver_unregister(struct ps3_system_bus_driver *drv)
799 { 799 {
800 pr_debug(" -> %s:%d: %s\n", __func__, __LINE__, drv->core.name); 800 pr_debug(" -> %s:%d: %s\n", __func__, __LINE__, drv->core.name);
801 driver_unregister(&drv->core); 801 driver_unregister(&drv->core);
802 pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, drv->core.name); 802 pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, drv->core.name);
803 } 803 }
804 804
805 EXPORT_SYMBOL_GPL(ps3_system_bus_driver_unregister); 805 EXPORT_SYMBOL_GPL(ps3_system_bus_driver_unregister);
806 806
arch/powerpc/platforms/pseries/io_event_irq.c
1 /* 1 /*
2 * Copyright 2010 2011 Mark Nelson and Tseng-Hui (Frank) Lin, IBM Corporation 2 * Copyright 2010 2011 Mark Nelson and Tseng-Hui (Frank) Lin, IBM Corporation
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License 5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version. 7 * 2 of the License, or (at your option) any later version.
8 */ 8 */
9 9
10 #include <linux/errno.h> 10 #include <linux/errno.h>
11 #include <linux/slab.h> 11 #include <linux/slab.h>
12 #include <linux/module.h> 12 #include <linux/export.h>
13 #include <linux/irq.h> 13 #include <linux/irq.h>
14 #include <linux/interrupt.h> 14 #include <linux/interrupt.h>
15 #include <linux/of.h> 15 #include <linux/of.h>
16 #include <linux/list.h> 16 #include <linux/list.h>
17 #include <linux/notifier.h> 17 #include <linux/notifier.h>
18 18
19 #include <asm/machdep.h> 19 #include <asm/machdep.h>
20 #include <asm/rtas.h> 20 #include <asm/rtas.h>
21 #include <asm/irq.h> 21 #include <asm/irq.h>
22 #include <asm/io_event_irq.h> 22 #include <asm/io_event_irq.h>
23 23
24 #include "pseries.h" 24 #include "pseries.h"
25 25
26 /* 26 /*
27 * IO event interrupt is a mechanism provided by RTAS to return 27 * IO event interrupt is a mechanism provided by RTAS to return
28 * information about hardware error and non-error events. Device 28 * information about hardware error and non-error events. Device
29 * drivers can register their event handlers to receive events. 29 * drivers can register their event handlers to receive events.
30 * Device drivers are expected to use atomic_notifier_chain_register() 30 * Device drivers are expected to use atomic_notifier_chain_register()
31 * and atomic_notifier_chain_unregister() to register and unregister 31 * and atomic_notifier_chain_unregister() to register and unregister
32 * their event handlers. Since multiple IO event types and scopes 32 * their event handlers. Since multiple IO event types and scopes
33 * share an IO event interrupt, the event handlers are called one 33 * share an IO event interrupt, the event handlers are called one
34 * by one until the IO event is claimed by one of the handlers. 34 * by one until the IO event is claimed by one of the handlers.
35 * The event handlers are expected to return NOTIFY_OK if the 35 * The event handlers are expected to return NOTIFY_OK if the
36 * event is handled by the event handler or NOTIFY_DONE if the 36 * event is handled by the event handler or NOTIFY_DONE if the
37 * event does not belong to the handler. 37 * event does not belong to the handler.
38 * 38 *
39 * Usage: 39 * Usage:
40 * 40 *
41 * Notifier function: 41 * Notifier function:
42 * #include <asm/io_event_irq.h> 42 * #include <asm/io_event_irq.h>
43 * int event_handler(struct notifier_block *nb, unsigned long val, void *data) { 43 * int event_handler(struct notifier_block *nb, unsigned long val, void *data) {
44 * p = (struct pseries_io_event_sect_data *) data; 44 * p = (struct pseries_io_event_sect_data *) data;
45 * if (! is_my_event(p->scope, p->event_type)) return NOTIFY_DONE; 45 * if (! is_my_event(p->scope, p->event_type)) return NOTIFY_DONE;
46 * : 46 * :
47 * : 47 * :
48 * return NOTIFY_OK; 48 * return NOTIFY_OK;
49 * } 49 * }
50 * struct notifier_block event_nb = { 50 * struct notifier_block event_nb = {
51 * .notifier_call = event_handler, 51 * .notifier_call = event_handler,
52 * } 52 * }
53 * 53 *
54 * Registration: 54 * Registration:
55 * atomic_notifier_chain_register(&pseries_ioei_notifier_list, &event_nb); 55 * atomic_notifier_chain_register(&pseries_ioei_notifier_list, &event_nb);
56 * 56 *
57 * Unregistration: 57 * Unregistration:
58 * atomic_notifier_chain_unregister(&pseries_ioei_notifier_list, &event_nb); 58 * atomic_notifier_chain_unregister(&pseries_ioei_notifier_list, &event_nb);
59 */ 59 */
60 60
61 ATOMIC_NOTIFIER_HEAD(pseries_ioei_notifier_list); 61 ATOMIC_NOTIFIER_HEAD(pseries_ioei_notifier_list);
62 EXPORT_SYMBOL_GPL(pseries_ioei_notifier_list); 62 EXPORT_SYMBOL_GPL(pseries_ioei_notifier_list);
63 63
64 static int ioei_check_exception_token; 64 static int ioei_check_exception_token;
65 65
66 /* pSeries event log format */ 66 /* pSeries event log format */
67 67
68 /* Two bytes ASCII section IDs */ 68 /* Two bytes ASCII section IDs */
69 #define PSERIES_ELOG_SECT_ID_PRIV_HDR (('P' << 8) | 'H') 69 #define PSERIES_ELOG_SECT_ID_PRIV_HDR (('P' << 8) | 'H')
70 #define PSERIES_ELOG_SECT_ID_USER_HDR (('U' << 8) | 'H') 70 #define PSERIES_ELOG_SECT_ID_USER_HDR (('U' << 8) | 'H')
71 #define PSERIES_ELOG_SECT_ID_PRIMARY_SRC (('P' << 8) | 'S') 71 #define PSERIES_ELOG_SECT_ID_PRIMARY_SRC (('P' << 8) | 'S')
72 #define PSERIES_ELOG_SECT_ID_EXTENDED_UH (('E' << 8) | 'H') 72 #define PSERIES_ELOG_SECT_ID_EXTENDED_UH (('E' << 8) | 'H')
73 #define PSERIES_ELOG_SECT_ID_FAILING_MTMS (('M' << 8) | 'T') 73 #define PSERIES_ELOG_SECT_ID_FAILING_MTMS (('M' << 8) | 'T')
74 #define PSERIES_ELOG_SECT_ID_SECONDARY_SRC (('S' << 8) | 'S') 74 #define PSERIES_ELOG_SECT_ID_SECONDARY_SRC (('S' << 8) | 'S')
75 #define PSERIES_ELOG_SECT_ID_DUMP_LOCATOR (('D' << 8) | 'H') 75 #define PSERIES_ELOG_SECT_ID_DUMP_LOCATOR (('D' << 8) | 'H')
76 #define PSERIES_ELOG_SECT_ID_FW_ERROR (('S' << 8) | 'W') 76 #define PSERIES_ELOG_SECT_ID_FW_ERROR (('S' << 8) | 'W')
77 #define PSERIES_ELOG_SECT_ID_IMPACT_PART_ID (('L' << 8) | 'P') 77 #define PSERIES_ELOG_SECT_ID_IMPACT_PART_ID (('L' << 8) | 'P')
78 #define PSERIES_ELOG_SECT_ID_LOGIC_RESOURCE_ID (('L' << 8) | 'R') 78 #define PSERIES_ELOG_SECT_ID_LOGIC_RESOURCE_ID (('L' << 8) | 'R')
79 #define PSERIES_ELOG_SECT_ID_HMC_ID (('H' << 8) | 'M') 79 #define PSERIES_ELOG_SECT_ID_HMC_ID (('H' << 8) | 'M')
80 #define PSERIES_ELOG_SECT_ID_EPOW (('E' << 8) | 'P') 80 #define PSERIES_ELOG_SECT_ID_EPOW (('E' << 8) | 'P')
81 #define PSERIES_ELOG_SECT_ID_IO_EVENT (('I' << 8) | 'E') 81 #define PSERIES_ELOG_SECT_ID_IO_EVENT (('I' << 8) | 'E')
82 #define PSERIES_ELOG_SECT_ID_MANUFACT_INFO (('M' << 8) | 'I') 82 #define PSERIES_ELOG_SECT_ID_MANUFACT_INFO (('M' << 8) | 'I')
83 #define PSERIES_ELOG_SECT_ID_CALL_HOME (('C' << 8) | 'H') 83 #define PSERIES_ELOG_SECT_ID_CALL_HOME (('C' << 8) | 'H')
84 #define PSERIES_ELOG_SECT_ID_USER_DEF (('U' << 8) | 'D') 84 #define PSERIES_ELOG_SECT_ID_USER_DEF (('U' << 8) | 'D')
85 85
86 /* Vendor specific Platform Event Log Format, Version 6, section header */ 86 /* Vendor specific Platform Event Log Format, Version 6, section header */
87 struct pseries_elog_section { 87 struct pseries_elog_section {
88 uint16_t id; /* 0x00 2-byte ASCII section ID */ 88 uint16_t id; /* 0x00 2-byte ASCII section ID */
89 uint16_t length; /* 0x02 Section length in bytes */ 89 uint16_t length; /* 0x02 Section length in bytes */
90 uint8_t version; /* 0x04 Section version */ 90 uint8_t version; /* 0x04 Section version */
91 uint8_t subtype; /* 0x05 Section subtype */ 91 uint8_t subtype; /* 0x05 Section subtype */
92 uint16_t creator_component; /* 0x06 Creator component ID */ 92 uint16_t creator_component; /* 0x06 Creator component ID */
93 uint8_t data[]; /* 0x08 Start of section data */ 93 uint8_t data[]; /* 0x08 Start of section data */
94 }; 94 };
95 95
96 static char ioei_rtas_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned; 96 static char ioei_rtas_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned;
97 97
98 /** 98 /**
99 * Find data portion of a specific section in RTAS extended event log. 99 * Find data portion of a specific section in RTAS extended event log.
100 * @elog: RTAS error/event log. 100 * @elog: RTAS error/event log.
101 * @sect_id: secsion ID. 101 * @sect_id: secsion ID.
102 * 102 *
103 * Return: 103 * Return:
104 * pointer to the section data of the specified section 104 * pointer to the section data of the specified section
105 * NULL if not found 105 * NULL if not found
106 */ 106 */
107 static struct pseries_elog_section *find_xelog_section(struct rtas_error_log *elog, 107 static struct pseries_elog_section *find_xelog_section(struct rtas_error_log *elog,
108 uint16_t sect_id) 108 uint16_t sect_id)
109 { 109 {
110 struct rtas_ext_event_log_v6 *xelog = 110 struct rtas_ext_event_log_v6 *xelog =
111 (struct rtas_ext_event_log_v6 *) elog->buffer; 111 (struct rtas_ext_event_log_v6 *) elog->buffer;
112 struct pseries_elog_section *sect; 112 struct pseries_elog_section *sect;
113 unsigned char *p, *log_end; 113 unsigned char *p, *log_end;
114 114
115 /* Check that we understand the format */ 115 /* Check that we understand the format */
116 if (elog->extended_log_length < sizeof(struct rtas_ext_event_log_v6) || 116 if (elog->extended_log_length < sizeof(struct rtas_ext_event_log_v6) ||
117 xelog->log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG || 117 xelog->log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG ||
118 xelog->company_id != RTAS_V6EXT_COMPANY_ID_IBM) 118 xelog->company_id != RTAS_V6EXT_COMPANY_ID_IBM)
119 return NULL; 119 return NULL;
120 120
121 log_end = elog->buffer + elog->extended_log_length; 121 log_end = elog->buffer + elog->extended_log_length;
122 p = xelog->vendor_log; 122 p = xelog->vendor_log;
123 while (p < log_end) { 123 while (p < log_end) {
124 sect = (struct pseries_elog_section *)p; 124 sect = (struct pseries_elog_section *)p;
125 if (sect->id == sect_id) 125 if (sect->id == sect_id)
126 return sect; 126 return sect;
127 p += sect->length; 127 p += sect->length;
128 } 128 }
129 return NULL; 129 return NULL;
130 } 130 }
131 131
132 /** 132 /**
133 * Find the data portion of an IO Event section from event log. 133 * Find the data portion of an IO Event section from event log.
134 * @elog: RTAS error/event log. 134 * @elog: RTAS error/event log.
135 * 135 *
136 * Return: 136 * Return:
137 * pointer to a valid IO event section data. NULL if not found. 137 * pointer to a valid IO event section data. NULL if not found.
138 */ 138 */
139 static struct pseries_io_event * ioei_find_event(struct rtas_error_log *elog) 139 static struct pseries_io_event * ioei_find_event(struct rtas_error_log *elog)
140 { 140 {
141 struct pseries_elog_section *sect; 141 struct pseries_elog_section *sect;
142 142
143 /* We should only ever get called for io-event interrupts, but if 143 /* We should only ever get called for io-event interrupts, but if
144 * we do get called for another type then something went wrong so 144 * we do get called for another type then something went wrong so
145 * make some noise about it. 145 * make some noise about it.
146 * RTAS_TYPE_IO only exists in extended event log version 6 or later. 146 * RTAS_TYPE_IO only exists in extended event log version 6 or later.
147 * No need to check event log version. 147 * No need to check event log version.
148 */ 148 */
149 if (unlikely(elog->type != RTAS_TYPE_IO)) { 149 if (unlikely(elog->type != RTAS_TYPE_IO)) {
150 printk_once(KERN_WARNING "io_event_irq: Unexpected event type %d", 150 printk_once(KERN_WARNING "io_event_irq: Unexpected event type %d",
151 elog->type); 151 elog->type);
152 return NULL; 152 return NULL;
153 } 153 }
154 154
155 sect = find_xelog_section(elog, PSERIES_ELOG_SECT_ID_IO_EVENT); 155 sect = find_xelog_section(elog, PSERIES_ELOG_SECT_ID_IO_EVENT);
156 if (unlikely(!sect)) { 156 if (unlikely(!sect)) {
157 printk_once(KERN_WARNING "io_event_irq: RTAS extended event " 157 printk_once(KERN_WARNING "io_event_irq: RTAS extended event "
158 "log does not contain an IO Event section. " 158 "log does not contain an IO Event section. "
159 "Could be a bug in system firmware!\n"); 159 "Could be a bug in system firmware!\n");
160 return NULL; 160 return NULL;
161 } 161 }
162 return (struct pseries_io_event *) &sect->data; 162 return (struct pseries_io_event *) &sect->data;
163 } 163 }
164 164
165 /* 165 /*
166 * PAPR: 166 * PAPR:
167 * - check-exception returns the first found error or event and clear that 167 * - check-exception returns the first found error or event and clear that
168 * error or event so it is reported once. 168 * error or event so it is reported once.
169 * - Each interrupt returns one event. If a plateform chooses to report 169 * - Each interrupt returns one event. If a plateform chooses to report
170 * multiple events through a single interrupt, it must ensure that the 170 * multiple events through a single interrupt, it must ensure that the
171 * interrupt remains asserted until check-exception has been used to 171 * interrupt remains asserted until check-exception has been used to
172 * process all out-standing events for that interrupt. 172 * process all out-standing events for that interrupt.
173 * 173 *
174 * Implementation notes: 174 * Implementation notes:
175 * - Events must be processed in the order they are returned. Hence, 175 * - Events must be processed in the order they are returned. Hence,
176 * sequential in nature. 176 * sequential in nature.
177 * - The owner of an event is determined by combinations of scope, 177 * - The owner of an event is determined by combinations of scope,
178 * event type, and sub-type. There is no easy way to pre-sort clients 178 * event type, and sub-type. There is no easy way to pre-sort clients
179 * by scope or event type alone. For example, Torrent ISR route change 179 * by scope or event type alone. For example, Torrent ISR route change
180 * event is reported with scope 0x00 (Not Applicatable) rather than 180 * event is reported with scope 0x00 (Not Applicatable) rather than
181 * 0x3B (Torrent-hub). It is better to let the clients to identify 181 * 0x3B (Torrent-hub). It is better to let the clients to identify
182 * who owns the the event. 182 * who owns the the event.
183 */ 183 */
184 184
185 static irqreturn_t ioei_interrupt(int irq, void *dev_id) 185 static irqreturn_t ioei_interrupt(int irq, void *dev_id)
186 { 186 {
187 struct pseries_io_event *event; 187 struct pseries_io_event *event;
188 int rtas_rc; 188 int rtas_rc;
189 189
190 for (;;) { 190 for (;;) {
191 rtas_rc = rtas_call(ioei_check_exception_token, 6, 1, NULL, 191 rtas_rc = rtas_call(ioei_check_exception_token, 6, 1, NULL,
192 RTAS_VECTOR_EXTERNAL_INTERRUPT, 192 RTAS_VECTOR_EXTERNAL_INTERRUPT,
193 virq_to_hw(irq), 193 virq_to_hw(irq),
194 RTAS_IO_EVENTS, 1 /* Time Critical */, 194 RTAS_IO_EVENTS, 1 /* Time Critical */,
195 __pa(ioei_rtas_buf), 195 __pa(ioei_rtas_buf),
196 RTAS_DATA_BUF_SIZE); 196 RTAS_DATA_BUF_SIZE);
197 if (rtas_rc != 0) 197 if (rtas_rc != 0)
198 break; 198 break;
199 199
200 event = ioei_find_event((struct rtas_error_log *)ioei_rtas_buf); 200 event = ioei_find_event((struct rtas_error_log *)ioei_rtas_buf);
201 if (!event) 201 if (!event)
202 continue; 202 continue;
203 203
204 atomic_notifier_call_chain(&pseries_ioei_notifier_list, 204 atomic_notifier_call_chain(&pseries_ioei_notifier_list,
205 0, event); 205 0, event);
206 } 206 }
207 return IRQ_HANDLED; 207 return IRQ_HANDLED;
208 } 208 }
209 209
210 static int __init ioei_init(void) 210 static int __init ioei_init(void)
211 { 211 {
212 struct device_node *np; 212 struct device_node *np;
213 213
214 ioei_check_exception_token = rtas_token("check-exception"); 214 ioei_check_exception_token = rtas_token("check-exception");
215 if (ioei_check_exception_token == RTAS_UNKNOWN_SERVICE) 215 if (ioei_check_exception_token == RTAS_UNKNOWN_SERVICE)
216 return -ENODEV; 216 return -ENODEV;
217 217
218 np = of_find_node_by_path("/event-sources/ibm,io-events"); 218 np = of_find_node_by_path("/event-sources/ibm,io-events");
219 if (np) { 219 if (np) {
220 request_event_sources_irqs(np, ioei_interrupt, "IO_EVENT"); 220 request_event_sources_irqs(np, ioei_interrupt, "IO_EVENT");
221 pr_info("IBM I/O event interrupts enabled\n"); 221 pr_info("IBM I/O event interrupts enabled\n");
222 of_node_put(np); 222 of_node_put(np);
223 } else { 223 } else {
224 return -ENODEV; 224 return -ENODEV;
225 } 225 }
226 return 0; 226 return 0;
227 } 227 }
228 machine_subsys_initcall(pseries, ioei_init); 228 machine_subsys_initcall(pseries, ioei_init);
229 229
230 230
arch/powerpc/platforms/pseries/setup.c
1 /* 1 /*
2 * 64-bit pSeries and RS/6000 setup code. 2 * 64-bit pSeries and RS/6000 setup code.
3 * 3 *
4 * Copyright (C) 1995 Linus Torvalds 4 * Copyright (C) 1995 Linus Torvalds
5 * Adapted from 'alpha' version by Gary Thomas 5 * Adapted from 'alpha' version by Gary Thomas
6 * Modified by Cort Dougan (cort@cs.nmt.edu) 6 * Modified by Cort Dougan (cort@cs.nmt.edu)
7 * Modified by PPC64 Team, IBM Corp 7 * Modified by PPC64 Team, IBM Corp
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version. 12 * 2 of the License, or (at your option) any later version.
13 */ 13 */
14 14
15 /* 15 /*
16 * bootup setup stuff.. 16 * bootup setup stuff..
17 */ 17 */
18 18
19 #include <linux/cpu.h> 19 #include <linux/cpu.h>
20 #include <linux/errno.h> 20 #include <linux/errno.h>
21 #include <linux/sched.h> 21 #include <linux/sched.h>
22 #include <linux/kernel.h> 22 #include <linux/kernel.h>
23 #include <linux/mm.h> 23 #include <linux/mm.h>
24 #include <linux/stddef.h> 24 #include <linux/stddef.h>
25 #include <linux/unistd.h> 25 #include <linux/unistd.h>
26 #include <linux/user.h> 26 #include <linux/user.h>
27 #include <linux/tty.h> 27 #include <linux/tty.h>
28 #include <linux/major.h> 28 #include <linux/major.h>
29 #include <linux/interrupt.h> 29 #include <linux/interrupt.h>
30 #include <linux/reboot.h> 30 #include <linux/reboot.h>
31 #include <linux/init.h> 31 #include <linux/init.h>
32 #include <linux/ioport.h> 32 #include <linux/ioport.h>
33 #include <linux/console.h> 33 #include <linux/console.h>
34 #include <linux/pci.h> 34 #include <linux/pci.h>
35 #include <linux/utsname.h> 35 #include <linux/utsname.h>
36 #include <linux/adb.h> 36 #include <linux/adb.h>
37 #include <linux/module.h> 37 #include <linux/export.h>
38 #include <linux/delay.h> 38 #include <linux/delay.h>
39 #include <linux/irq.h> 39 #include <linux/irq.h>
40 #include <linux/seq_file.h> 40 #include <linux/seq_file.h>
41 #include <linux/root_dev.h> 41 #include <linux/root_dev.h>
42 42
43 #include <asm/mmu.h> 43 #include <asm/mmu.h>
44 #include <asm/processor.h> 44 #include <asm/processor.h>
45 #include <asm/io.h> 45 #include <asm/io.h>
46 #include <asm/pgtable.h> 46 #include <asm/pgtable.h>
47 #include <asm/prom.h> 47 #include <asm/prom.h>
48 #include <asm/rtas.h> 48 #include <asm/rtas.h>
49 #include <asm/pci-bridge.h> 49 #include <asm/pci-bridge.h>
50 #include <asm/iommu.h> 50 #include <asm/iommu.h>
51 #include <asm/dma.h> 51 #include <asm/dma.h>
52 #include <asm/machdep.h> 52 #include <asm/machdep.h>
53 #include <asm/irq.h> 53 #include <asm/irq.h>
54 #include <asm/time.h> 54 #include <asm/time.h>
55 #include <asm/nvram.h> 55 #include <asm/nvram.h>
56 #include <asm/pmc.h> 56 #include <asm/pmc.h>
57 #include <asm/mpic.h> 57 #include <asm/mpic.h>
58 #include <asm/xics.h> 58 #include <asm/xics.h>
59 #include <asm/ppc-pci.h> 59 #include <asm/ppc-pci.h>
60 #include <asm/i8259.h> 60 #include <asm/i8259.h>
61 #include <asm/udbg.h> 61 #include <asm/udbg.h>
62 #include <asm/smp.h> 62 #include <asm/smp.h>
63 #include <asm/firmware.h> 63 #include <asm/firmware.h>
64 #include <asm/eeh.h> 64 #include <asm/eeh.h>
65 #include <asm/pSeries_reconfig.h> 65 #include <asm/pSeries_reconfig.h>
66 66
67 #include "plpar_wrappers.h" 67 #include "plpar_wrappers.h"
68 #include "pseries.h" 68 #include "pseries.h"
69 69
70 int CMO_PrPSP = -1; 70 int CMO_PrPSP = -1;
71 int CMO_SecPSP = -1; 71 int CMO_SecPSP = -1;
72 unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT); 72 unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT);
73 EXPORT_SYMBOL(CMO_PageSize); 73 EXPORT_SYMBOL(CMO_PageSize);
74 74
75 int fwnmi_active; /* TRUE if an FWNMI handler is present */ 75 int fwnmi_active; /* TRUE if an FWNMI handler is present */
76 76
77 static void pseries_shared_idle_sleep(void); 77 static void pseries_shared_idle_sleep(void);
78 static void pseries_dedicated_idle_sleep(void); 78 static void pseries_dedicated_idle_sleep(void);
79 79
80 static struct device_node *pSeries_mpic_node; 80 static struct device_node *pSeries_mpic_node;
81 81
82 static void pSeries_show_cpuinfo(struct seq_file *m) 82 static void pSeries_show_cpuinfo(struct seq_file *m)
83 { 83 {
84 struct device_node *root; 84 struct device_node *root;
85 const char *model = ""; 85 const char *model = "";
86 86
87 root = of_find_node_by_path("/"); 87 root = of_find_node_by_path("/");
88 if (root) 88 if (root)
89 model = of_get_property(root, "model", NULL); 89 model = of_get_property(root, "model", NULL);
90 seq_printf(m, "machine\t\t: CHRP %s\n", model); 90 seq_printf(m, "machine\t\t: CHRP %s\n", model);
91 of_node_put(root); 91 of_node_put(root);
92 } 92 }
93 93
94 /* Initialize firmware assisted non-maskable interrupts if 94 /* Initialize firmware assisted non-maskable interrupts if
95 * the firmware supports this feature. 95 * the firmware supports this feature.
96 */ 96 */
97 static void __init fwnmi_init(void) 97 static void __init fwnmi_init(void)
98 { 98 {
99 unsigned long system_reset_addr, machine_check_addr; 99 unsigned long system_reset_addr, machine_check_addr;
100 100
101 int ibm_nmi_register = rtas_token("ibm,nmi-register"); 101 int ibm_nmi_register = rtas_token("ibm,nmi-register");
102 if (ibm_nmi_register == RTAS_UNKNOWN_SERVICE) 102 if (ibm_nmi_register == RTAS_UNKNOWN_SERVICE)
103 return; 103 return;
104 104
105 /* If the kernel's not linked at zero we point the firmware at low 105 /* If the kernel's not linked at zero we point the firmware at low
106 * addresses anyway, and use a trampoline to get to the real code. */ 106 * addresses anyway, and use a trampoline to get to the real code. */
107 system_reset_addr = __pa(system_reset_fwnmi) - PHYSICAL_START; 107 system_reset_addr = __pa(system_reset_fwnmi) - PHYSICAL_START;
108 machine_check_addr = __pa(machine_check_fwnmi) - PHYSICAL_START; 108 machine_check_addr = __pa(machine_check_fwnmi) - PHYSICAL_START;
109 109
110 if (0 == rtas_call(ibm_nmi_register, 2, 1, NULL, system_reset_addr, 110 if (0 == rtas_call(ibm_nmi_register, 2, 1, NULL, system_reset_addr,
111 machine_check_addr)) 111 machine_check_addr))
112 fwnmi_active = 1; 112 fwnmi_active = 1;
113 } 113 }
114 114
115 static void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc) 115 static void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc)
116 { 116 {
117 struct irq_chip *chip = irq_desc_get_chip(desc); 117 struct irq_chip *chip = irq_desc_get_chip(desc);
118 unsigned int cascade_irq = i8259_irq(); 118 unsigned int cascade_irq = i8259_irq();
119 119
120 if (cascade_irq != NO_IRQ) 120 if (cascade_irq != NO_IRQ)
121 generic_handle_irq(cascade_irq); 121 generic_handle_irq(cascade_irq);
122 122
123 chip->irq_eoi(&desc->irq_data); 123 chip->irq_eoi(&desc->irq_data);
124 } 124 }
125 125
126 static void __init pseries_setup_i8259_cascade(void) 126 static void __init pseries_setup_i8259_cascade(void)
127 { 127 {
128 struct device_node *np, *old, *found = NULL; 128 struct device_node *np, *old, *found = NULL;
129 unsigned int cascade; 129 unsigned int cascade;
130 const u32 *addrp; 130 const u32 *addrp;
131 unsigned long intack = 0; 131 unsigned long intack = 0;
132 int naddr; 132 int naddr;
133 133
134 for_each_node_by_type(np, "interrupt-controller") { 134 for_each_node_by_type(np, "interrupt-controller") {
135 if (of_device_is_compatible(np, "chrp,iic")) { 135 if (of_device_is_compatible(np, "chrp,iic")) {
136 found = np; 136 found = np;
137 break; 137 break;
138 } 138 }
139 } 139 }
140 140
141 if (found == NULL) { 141 if (found == NULL) {
142 printk(KERN_DEBUG "pic: no ISA interrupt controller\n"); 142 printk(KERN_DEBUG "pic: no ISA interrupt controller\n");
143 return; 143 return;
144 } 144 }
145 145
146 cascade = irq_of_parse_and_map(found, 0); 146 cascade = irq_of_parse_and_map(found, 0);
147 if (cascade == NO_IRQ) { 147 if (cascade == NO_IRQ) {
148 printk(KERN_ERR "pic: failed to map cascade interrupt"); 148 printk(KERN_ERR "pic: failed to map cascade interrupt");
149 return; 149 return;
150 } 150 }
151 pr_debug("pic: cascade mapped to irq %d\n", cascade); 151 pr_debug("pic: cascade mapped to irq %d\n", cascade);
152 152
153 for (old = of_node_get(found); old != NULL ; old = np) { 153 for (old = of_node_get(found); old != NULL ; old = np) {
154 np = of_get_parent(old); 154 np = of_get_parent(old);
155 of_node_put(old); 155 of_node_put(old);
156 if (np == NULL) 156 if (np == NULL)
157 break; 157 break;
158 if (strcmp(np->name, "pci") != 0) 158 if (strcmp(np->name, "pci") != 0)
159 continue; 159 continue;
160 addrp = of_get_property(np, "8259-interrupt-acknowledge", NULL); 160 addrp = of_get_property(np, "8259-interrupt-acknowledge", NULL);
161 if (addrp == NULL) 161 if (addrp == NULL)
162 continue; 162 continue;
163 naddr = of_n_addr_cells(np); 163 naddr = of_n_addr_cells(np);
164 intack = addrp[naddr-1]; 164 intack = addrp[naddr-1];
165 if (naddr > 1) 165 if (naddr > 1)
166 intack |= ((unsigned long)addrp[naddr-2]) << 32; 166 intack |= ((unsigned long)addrp[naddr-2]) << 32;
167 } 167 }
168 if (intack) 168 if (intack)
169 printk(KERN_DEBUG "pic: PCI 8259 intack at 0x%016lx\n", intack); 169 printk(KERN_DEBUG "pic: PCI 8259 intack at 0x%016lx\n", intack);
170 i8259_init(found, intack); 170 i8259_init(found, intack);
171 of_node_put(found); 171 of_node_put(found);
172 irq_set_chained_handler(cascade, pseries_8259_cascade); 172 irq_set_chained_handler(cascade, pseries_8259_cascade);
173 } 173 }
174 174
175 static void __init pseries_mpic_init_IRQ(void) 175 static void __init pseries_mpic_init_IRQ(void)
176 { 176 {
177 struct device_node *np; 177 struct device_node *np;
178 const unsigned int *opprop; 178 const unsigned int *opprop;
179 unsigned long openpic_addr = 0; 179 unsigned long openpic_addr = 0;
180 int naddr, n, i, opplen; 180 int naddr, n, i, opplen;
181 struct mpic *mpic; 181 struct mpic *mpic;
182 182
183 np = of_find_node_by_path("/"); 183 np = of_find_node_by_path("/");
184 naddr = of_n_addr_cells(np); 184 naddr = of_n_addr_cells(np);
185 opprop = of_get_property(np, "platform-open-pic", &opplen); 185 opprop = of_get_property(np, "platform-open-pic", &opplen);
186 if (opprop != 0) { 186 if (opprop != 0) {
187 openpic_addr = of_read_number(opprop, naddr); 187 openpic_addr = of_read_number(opprop, naddr);
188 printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr); 188 printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr);
189 } 189 }
190 of_node_put(np); 190 of_node_put(np);
191 191
192 BUG_ON(openpic_addr == 0); 192 BUG_ON(openpic_addr == 0);
193 193
194 /* Setup the openpic driver */ 194 /* Setup the openpic driver */
195 mpic = mpic_alloc(pSeries_mpic_node, openpic_addr, 195 mpic = mpic_alloc(pSeries_mpic_node, openpic_addr,
196 MPIC_PRIMARY, 196 MPIC_PRIMARY,
197 16, 250, /* isu size, irq count */ 197 16, 250, /* isu size, irq count */
198 " MPIC "); 198 " MPIC ");
199 BUG_ON(mpic == NULL); 199 BUG_ON(mpic == NULL);
200 200
201 /* Add ISUs */ 201 /* Add ISUs */
202 opplen /= sizeof(u32); 202 opplen /= sizeof(u32);
203 for (n = 0, i = naddr; i < opplen; i += naddr, n++) { 203 for (n = 0, i = naddr; i < opplen; i += naddr, n++) {
204 unsigned long isuaddr = of_read_number(opprop + i, naddr); 204 unsigned long isuaddr = of_read_number(opprop + i, naddr);
205 mpic_assign_isu(mpic, n, isuaddr); 205 mpic_assign_isu(mpic, n, isuaddr);
206 } 206 }
207 207
208 /* Setup top-level get_irq */ 208 /* Setup top-level get_irq */
209 ppc_md.get_irq = mpic_get_irq; 209 ppc_md.get_irq = mpic_get_irq;
210 210
211 /* All ISUs are setup, complete initialization */ 211 /* All ISUs are setup, complete initialization */
212 mpic_init(mpic); 212 mpic_init(mpic);
213 213
214 /* Look for cascade */ 214 /* Look for cascade */
215 pseries_setup_i8259_cascade(); 215 pseries_setup_i8259_cascade();
216 } 216 }
217 217
218 static void __init pseries_xics_init_IRQ(void) 218 static void __init pseries_xics_init_IRQ(void)
219 { 219 {
220 xics_init(); 220 xics_init();
221 pseries_setup_i8259_cascade(); 221 pseries_setup_i8259_cascade();
222 } 222 }
223 223
224 static void pseries_lpar_enable_pmcs(void) 224 static void pseries_lpar_enable_pmcs(void)
225 { 225 {
226 unsigned long set, reset; 226 unsigned long set, reset;
227 227
228 set = 1UL << 63; 228 set = 1UL << 63;
229 reset = 0; 229 reset = 0;
230 plpar_hcall_norets(H_PERFMON, set, reset); 230 plpar_hcall_norets(H_PERFMON, set, reset);
231 } 231 }
232 232
233 static void __init pseries_discover_pic(void) 233 static void __init pseries_discover_pic(void)
234 { 234 {
235 struct device_node *np; 235 struct device_node *np;
236 const char *typep; 236 const char *typep;
237 237
238 for (np = NULL; (np = of_find_node_by_name(np, 238 for (np = NULL; (np = of_find_node_by_name(np,
239 "interrupt-controller"));) { 239 "interrupt-controller"));) {
240 typep = of_get_property(np, "compatible", NULL); 240 typep = of_get_property(np, "compatible", NULL);
241 if (strstr(typep, "open-pic")) { 241 if (strstr(typep, "open-pic")) {
242 pSeries_mpic_node = of_node_get(np); 242 pSeries_mpic_node = of_node_get(np);
243 ppc_md.init_IRQ = pseries_mpic_init_IRQ; 243 ppc_md.init_IRQ = pseries_mpic_init_IRQ;
244 setup_kexec_cpu_down_mpic(); 244 setup_kexec_cpu_down_mpic();
245 smp_init_pseries_mpic(); 245 smp_init_pseries_mpic();
246 return; 246 return;
247 } else if (strstr(typep, "ppc-xicp")) { 247 } else if (strstr(typep, "ppc-xicp")) {
248 ppc_md.init_IRQ = pseries_xics_init_IRQ; 248 ppc_md.init_IRQ = pseries_xics_init_IRQ;
249 setup_kexec_cpu_down_xics(); 249 setup_kexec_cpu_down_xics();
250 smp_init_pseries_xics(); 250 smp_init_pseries_xics();
251 return; 251 return;
252 } 252 }
253 } 253 }
254 printk(KERN_ERR "pSeries_discover_pic: failed to recognize" 254 printk(KERN_ERR "pSeries_discover_pic: failed to recognize"
255 " interrupt-controller\n"); 255 " interrupt-controller\n");
256 } 256 }
257 257
258 static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node) 258 static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node)
259 { 259 {
260 struct device_node *np = node; 260 struct device_node *np = node;
261 struct pci_dn *pci = NULL; 261 struct pci_dn *pci = NULL;
262 int err = NOTIFY_OK; 262 int err = NOTIFY_OK;
263 263
264 switch (action) { 264 switch (action) {
265 case PSERIES_RECONFIG_ADD: 265 case PSERIES_RECONFIG_ADD:
266 pci = np->parent->data; 266 pci = np->parent->data;
267 if (pci) 267 if (pci)
268 update_dn_pci_info(np, pci->phb); 268 update_dn_pci_info(np, pci->phb);
269 break; 269 break;
270 default: 270 default:
271 err = NOTIFY_DONE; 271 err = NOTIFY_DONE;
272 break; 272 break;
273 } 273 }
274 return err; 274 return err;
275 } 275 }
276 276
277 static struct notifier_block pci_dn_reconfig_nb = { 277 static struct notifier_block pci_dn_reconfig_nb = {
278 .notifier_call = pci_dn_reconfig_notifier, 278 .notifier_call = pci_dn_reconfig_notifier,
279 }; 279 };
280 280
281 struct kmem_cache *dtl_cache; 281 struct kmem_cache *dtl_cache;
282 282
283 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 283 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
284 /* 284 /*
285 * Allocate space for the dispatch trace log for all possible cpus 285 * Allocate space for the dispatch trace log for all possible cpus
286 * and register the buffers with the hypervisor. This is used for 286 * and register the buffers with the hypervisor. This is used for
287 * computing time stolen by the hypervisor. 287 * computing time stolen by the hypervisor.
288 */ 288 */
289 static int alloc_dispatch_logs(void) 289 static int alloc_dispatch_logs(void)
290 { 290 {
291 int cpu, ret; 291 int cpu, ret;
292 struct paca_struct *pp; 292 struct paca_struct *pp;
293 struct dtl_entry *dtl; 293 struct dtl_entry *dtl;
294 294
295 if (!firmware_has_feature(FW_FEATURE_SPLPAR)) 295 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
296 return 0; 296 return 0;
297 297
298 if (!dtl_cache) 298 if (!dtl_cache)
299 return 0; 299 return 0;
300 300
301 for_each_possible_cpu(cpu) { 301 for_each_possible_cpu(cpu) {
302 pp = &paca[cpu]; 302 pp = &paca[cpu];
303 dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL); 303 dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL);
304 if (!dtl) { 304 if (!dtl) {
305 pr_warn("Failed to allocate dispatch trace log for cpu %d\n", 305 pr_warn("Failed to allocate dispatch trace log for cpu %d\n",
306 cpu); 306 cpu);
307 pr_warn("Stolen time statistics will be unreliable\n"); 307 pr_warn("Stolen time statistics will be unreliable\n");
308 break; 308 break;
309 } 309 }
310 310
311 pp->dtl_ridx = 0; 311 pp->dtl_ridx = 0;
312 pp->dispatch_log = dtl; 312 pp->dispatch_log = dtl;
313 pp->dispatch_log_end = dtl + N_DISPATCH_LOG; 313 pp->dispatch_log_end = dtl + N_DISPATCH_LOG;
314 pp->dtl_curr = dtl; 314 pp->dtl_curr = dtl;
315 } 315 }
316 316
317 /* Register the DTL for the current (boot) cpu */ 317 /* Register the DTL for the current (boot) cpu */
318 dtl = get_paca()->dispatch_log; 318 dtl = get_paca()->dispatch_log;
319 get_paca()->dtl_ridx = 0; 319 get_paca()->dtl_ridx = 0;
320 get_paca()->dtl_curr = dtl; 320 get_paca()->dtl_curr = dtl;
321 get_paca()->lppaca_ptr->dtl_idx = 0; 321 get_paca()->lppaca_ptr->dtl_idx = 0;
322 322
323 /* hypervisor reads buffer length from this field */ 323 /* hypervisor reads buffer length from this field */
324 dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES; 324 dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES;
325 ret = register_dtl(hard_smp_processor_id(), __pa(dtl)); 325 ret = register_dtl(hard_smp_processor_id(), __pa(dtl));
326 if (ret) 326 if (ret)
327 pr_err("WARNING: DTL registration of cpu %d (hw %d) failed " 327 pr_err("WARNING: DTL registration of cpu %d (hw %d) failed "
328 "with %d\n", smp_processor_id(), 328 "with %d\n", smp_processor_id(),
329 hard_smp_processor_id(), ret); 329 hard_smp_processor_id(), ret);
330 get_paca()->lppaca_ptr->dtl_enable_mask = 2; 330 get_paca()->lppaca_ptr->dtl_enable_mask = 2;
331 331
332 return 0; 332 return 0;
333 } 333 }
334 #else /* !CONFIG_VIRT_CPU_ACCOUNTING */ 334 #else /* !CONFIG_VIRT_CPU_ACCOUNTING */
335 static inline int alloc_dispatch_logs(void) 335 static inline int alloc_dispatch_logs(void)
336 { 336 {
337 return 0; 337 return 0;
338 } 338 }
339 #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ 339 #endif /* CONFIG_VIRT_CPU_ACCOUNTING */
340 340
341 static int alloc_dispatch_log_kmem_cache(void) 341 static int alloc_dispatch_log_kmem_cache(void)
342 { 342 {
343 dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES, 343 dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES,
344 DISPATCH_LOG_BYTES, 0, NULL); 344 DISPATCH_LOG_BYTES, 0, NULL);
345 if (!dtl_cache) { 345 if (!dtl_cache) {
346 pr_warn("Failed to create dispatch trace log buffer cache\n"); 346 pr_warn("Failed to create dispatch trace log buffer cache\n");
347 pr_warn("Stolen time statistics will be unreliable\n"); 347 pr_warn("Stolen time statistics will be unreliable\n");
348 return 0; 348 return 0;
349 } 349 }
350 350
351 return alloc_dispatch_logs(); 351 return alloc_dispatch_logs();
352 } 352 }
353 early_initcall(alloc_dispatch_log_kmem_cache); 353 early_initcall(alloc_dispatch_log_kmem_cache);
354 354
355 static void __init pSeries_setup_arch(void) 355 static void __init pSeries_setup_arch(void)
356 { 356 {
357 /* Discover PIC type and setup ppc_md accordingly */ 357 /* Discover PIC type and setup ppc_md accordingly */
358 pseries_discover_pic(); 358 pseries_discover_pic();
359 359
360 /* openpic global configuration register (64-bit format). */ 360 /* openpic global configuration register (64-bit format). */
361 /* openpic Interrupt Source Unit pointer (64-bit format). */ 361 /* openpic Interrupt Source Unit pointer (64-bit format). */
362 /* python0 facility area (mmio) (64-bit format) REAL address. */ 362 /* python0 facility area (mmio) (64-bit format) REAL address. */
363 363
364 /* init to some ~sane value until calibrate_delay() runs */ 364 /* init to some ~sane value until calibrate_delay() runs */
365 loops_per_jiffy = 50000000; 365 loops_per_jiffy = 50000000;
366 366
367 fwnmi_init(); 367 fwnmi_init();
368 368
369 /* Find and initialize PCI host bridges */ 369 /* Find and initialize PCI host bridges */
370 init_pci_config_tokens(); 370 init_pci_config_tokens();
371 find_and_init_phbs(); 371 find_and_init_phbs();
372 pSeries_reconfig_notifier_register(&pci_dn_reconfig_nb); 372 pSeries_reconfig_notifier_register(&pci_dn_reconfig_nb);
373 eeh_init(); 373 eeh_init();
374 374
375 pSeries_nvram_init(); 375 pSeries_nvram_init();
376 376
377 /* Choose an idle loop */ 377 /* Choose an idle loop */
378 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 378 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
379 vpa_init(boot_cpuid); 379 vpa_init(boot_cpuid);
380 if (get_lppaca()->shared_proc) { 380 if (get_lppaca()->shared_proc) {
381 printk(KERN_DEBUG "Using shared processor idle loop\n"); 381 printk(KERN_DEBUG "Using shared processor idle loop\n");
382 ppc_md.power_save = pseries_shared_idle_sleep; 382 ppc_md.power_save = pseries_shared_idle_sleep;
383 } else { 383 } else {
384 printk(KERN_DEBUG "Using dedicated idle loop\n"); 384 printk(KERN_DEBUG "Using dedicated idle loop\n");
385 ppc_md.power_save = pseries_dedicated_idle_sleep; 385 ppc_md.power_save = pseries_dedicated_idle_sleep;
386 } 386 }
387 } else { 387 } else {
388 printk(KERN_DEBUG "Using default idle loop\n"); 388 printk(KERN_DEBUG "Using default idle loop\n");
389 } 389 }
390 390
391 if (firmware_has_feature(FW_FEATURE_LPAR)) 391 if (firmware_has_feature(FW_FEATURE_LPAR))
392 ppc_md.enable_pmcs = pseries_lpar_enable_pmcs; 392 ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
393 else 393 else
394 ppc_md.enable_pmcs = power4_enable_pmcs; 394 ppc_md.enable_pmcs = power4_enable_pmcs;
395 } 395 }
396 396
397 static int __init pSeries_init_panel(void) 397 static int __init pSeries_init_panel(void)
398 { 398 {
399 /* Manually leave the kernel version on the panel. */ 399 /* Manually leave the kernel version on the panel. */
400 ppc_md.progress("Linux ppc64\n", 0); 400 ppc_md.progress("Linux ppc64\n", 0);
401 ppc_md.progress(init_utsname()->version, 0); 401 ppc_md.progress(init_utsname()->version, 0);
402 402
403 return 0; 403 return 0;
404 } 404 }
405 machine_arch_initcall(pseries, pSeries_init_panel); 405 machine_arch_initcall(pseries, pSeries_init_panel);
406 406
407 static int pseries_set_dabr(unsigned long dabr) 407 static int pseries_set_dabr(unsigned long dabr)
408 { 408 {
409 return plpar_hcall_norets(H_SET_DABR, dabr); 409 return plpar_hcall_norets(H_SET_DABR, dabr);
410 } 410 }
411 411
412 static int pseries_set_xdabr(unsigned long dabr) 412 static int pseries_set_xdabr(unsigned long dabr)
413 { 413 {
414 /* We want to catch accesses from kernel and userspace */ 414 /* We want to catch accesses from kernel and userspace */
415 return plpar_hcall_norets(H_SET_XDABR, dabr, 415 return plpar_hcall_norets(H_SET_XDABR, dabr,
416 H_DABRX_KERNEL | H_DABRX_USER); 416 H_DABRX_KERNEL | H_DABRX_USER);
417 } 417 }
418 418
419 #define CMO_CHARACTERISTICS_TOKEN 44 419 #define CMO_CHARACTERISTICS_TOKEN 44
420 #define CMO_MAXLENGTH 1026 420 #define CMO_MAXLENGTH 1026
421 421
422 void pSeries_coalesce_init(void) 422 void pSeries_coalesce_init(void)
423 { 423 {
424 struct hvcall_mpp_x_data mpp_x_data; 424 struct hvcall_mpp_x_data mpp_x_data;
425 425
426 if (firmware_has_feature(FW_FEATURE_CMO) && !h_get_mpp_x(&mpp_x_data)) 426 if (firmware_has_feature(FW_FEATURE_CMO) && !h_get_mpp_x(&mpp_x_data))
427 powerpc_firmware_features |= FW_FEATURE_XCMO; 427 powerpc_firmware_features |= FW_FEATURE_XCMO;
428 else 428 else
429 powerpc_firmware_features &= ~FW_FEATURE_XCMO; 429 powerpc_firmware_features &= ~FW_FEATURE_XCMO;
430 } 430 }
431 431
432 /** 432 /**
433 * fw_cmo_feature_init - FW_FEATURE_CMO is not stored in ibm,hypertas-functions, 433 * fw_cmo_feature_init - FW_FEATURE_CMO is not stored in ibm,hypertas-functions,
434 * handle that here. (Stolen from parse_system_parameter_string) 434 * handle that here. (Stolen from parse_system_parameter_string)
435 */ 435 */
436 void pSeries_cmo_feature_init(void) 436 void pSeries_cmo_feature_init(void)
437 { 437 {
438 char *ptr, *key, *value, *end; 438 char *ptr, *key, *value, *end;
439 int call_status; 439 int call_status;
440 int page_order = IOMMU_PAGE_SHIFT; 440 int page_order = IOMMU_PAGE_SHIFT;
441 441
442 pr_debug(" -> fw_cmo_feature_init()\n"); 442 pr_debug(" -> fw_cmo_feature_init()\n");
443 spin_lock(&rtas_data_buf_lock); 443 spin_lock(&rtas_data_buf_lock);
444 memset(rtas_data_buf, 0, RTAS_DATA_BUF_SIZE); 444 memset(rtas_data_buf, 0, RTAS_DATA_BUF_SIZE);
445 call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1, 445 call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
446 NULL, 446 NULL,
447 CMO_CHARACTERISTICS_TOKEN, 447 CMO_CHARACTERISTICS_TOKEN,
448 __pa(rtas_data_buf), 448 __pa(rtas_data_buf),
449 RTAS_DATA_BUF_SIZE); 449 RTAS_DATA_BUF_SIZE);
450 450
451 if (call_status != 0) { 451 if (call_status != 0) {
452 spin_unlock(&rtas_data_buf_lock); 452 spin_unlock(&rtas_data_buf_lock);
453 pr_debug("CMO not available\n"); 453 pr_debug("CMO not available\n");
454 pr_debug(" <- fw_cmo_feature_init()\n"); 454 pr_debug(" <- fw_cmo_feature_init()\n");
455 return; 455 return;
456 } 456 }
457 457
458 end = rtas_data_buf + CMO_MAXLENGTH - 2; 458 end = rtas_data_buf + CMO_MAXLENGTH - 2;
459 ptr = rtas_data_buf + 2; /* step over strlen value */ 459 ptr = rtas_data_buf + 2; /* step over strlen value */
460 key = value = ptr; 460 key = value = ptr;
461 461
462 while (*ptr && (ptr <= end)) { 462 while (*ptr && (ptr <= end)) {
463 /* Separate the key and value by replacing '=' with '\0' and 463 /* Separate the key and value by replacing '=' with '\0' and
464 * point the value at the string after the '=' 464 * point the value at the string after the '='
465 */ 465 */
466 if (ptr[0] == '=') { 466 if (ptr[0] == '=') {
467 ptr[0] = '\0'; 467 ptr[0] = '\0';
468 value = ptr + 1; 468 value = ptr + 1;
469 } else if (ptr[0] == '\0' || ptr[0] == ',') { 469 } else if (ptr[0] == '\0' || ptr[0] == ',') {
470 /* Terminate the string containing the key/value pair */ 470 /* Terminate the string containing the key/value pair */
471 ptr[0] = '\0'; 471 ptr[0] = '\0';
472 472
473 if (key == value) { 473 if (key == value) {
474 pr_debug("Malformed key/value pair\n"); 474 pr_debug("Malformed key/value pair\n");
475 /* Never found a '=', end processing */ 475 /* Never found a '=', end processing */
476 break; 476 break;
477 } 477 }
478 478
479 if (0 == strcmp(key, "CMOPageSize")) 479 if (0 == strcmp(key, "CMOPageSize"))
480 page_order = simple_strtol(value, NULL, 10); 480 page_order = simple_strtol(value, NULL, 10);
481 else if (0 == strcmp(key, "PrPSP")) 481 else if (0 == strcmp(key, "PrPSP"))
482 CMO_PrPSP = simple_strtol(value, NULL, 10); 482 CMO_PrPSP = simple_strtol(value, NULL, 10);
483 else if (0 == strcmp(key, "SecPSP")) 483 else if (0 == strcmp(key, "SecPSP"))
484 CMO_SecPSP = simple_strtol(value, NULL, 10); 484 CMO_SecPSP = simple_strtol(value, NULL, 10);
485 value = key = ptr + 1; 485 value = key = ptr + 1;
486 } 486 }
487 ptr++; 487 ptr++;
488 } 488 }
489 489
490 /* Page size is returned as the power of 2 of the page size, 490 /* Page size is returned as the power of 2 of the page size,
491 * convert to the page size in bytes before returning 491 * convert to the page size in bytes before returning
492 */ 492 */
493 CMO_PageSize = 1 << page_order; 493 CMO_PageSize = 1 << page_order;
494 pr_debug("CMO_PageSize = %lu\n", CMO_PageSize); 494 pr_debug("CMO_PageSize = %lu\n", CMO_PageSize);
495 495
496 if (CMO_PrPSP != -1 || CMO_SecPSP != -1) { 496 if (CMO_PrPSP != -1 || CMO_SecPSP != -1) {
497 pr_info("CMO enabled\n"); 497 pr_info("CMO enabled\n");
498 pr_debug("CMO enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP, 498 pr_debug("CMO enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP,
499 CMO_SecPSP); 499 CMO_SecPSP);
500 powerpc_firmware_features |= FW_FEATURE_CMO; 500 powerpc_firmware_features |= FW_FEATURE_CMO;
501 pSeries_coalesce_init(); 501 pSeries_coalesce_init();
502 } else 502 } else
503 pr_debug("CMO not enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP, 503 pr_debug("CMO not enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP,
504 CMO_SecPSP); 504 CMO_SecPSP);
505 spin_unlock(&rtas_data_buf_lock); 505 spin_unlock(&rtas_data_buf_lock);
506 pr_debug(" <- fw_cmo_feature_init()\n"); 506 pr_debug(" <- fw_cmo_feature_init()\n");
507 } 507 }
508 508
509 /* 509 /*
510 * Early initialization. Relocation is on but do not reference unbolted pages 510 * Early initialization. Relocation is on but do not reference unbolted pages
511 */ 511 */
512 static void __init pSeries_init_early(void) 512 static void __init pSeries_init_early(void)
513 { 513 {
514 pr_debug(" -> pSeries_init_early()\n"); 514 pr_debug(" -> pSeries_init_early()\n");
515 515
516 #ifdef CONFIG_HVC_CONSOLE 516 #ifdef CONFIG_HVC_CONSOLE
517 if (firmware_has_feature(FW_FEATURE_LPAR)) 517 if (firmware_has_feature(FW_FEATURE_LPAR))
518 hvc_vio_init_early(); 518 hvc_vio_init_early();
519 #endif 519 #endif
520 if (firmware_has_feature(FW_FEATURE_DABR)) 520 if (firmware_has_feature(FW_FEATURE_DABR))
521 ppc_md.set_dabr = pseries_set_dabr; 521 ppc_md.set_dabr = pseries_set_dabr;
522 else if (firmware_has_feature(FW_FEATURE_XDABR)) 522 else if (firmware_has_feature(FW_FEATURE_XDABR))
523 ppc_md.set_dabr = pseries_set_xdabr; 523 ppc_md.set_dabr = pseries_set_xdabr;
524 524
525 pSeries_cmo_feature_init(); 525 pSeries_cmo_feature_init();
526 iommu_init_early_pSeries(); 526 iommu_init_early_pSeries();
527 527
528 pr_debug(" <- pSeries_init_early()\n"); 528 pr_debug(" <- pSeries_init_early()\n");
529 } 529 }
530 530
531 /* 531 /*
532 * Called very early, MMU is off, device-tree isn't unflattened 532 * Called very early, MMU is off, device-tree isn't unflattened
533 */ 533 */
534 534
535 static int __init pSeries_probe_hypertas(unsigned long node, 535 static int __init pSeries_probe_hypertas(unsigned long node,
536 const char *uname, int depth, 536 const char *uname, int depth,
537 void *data) 537 void *data)
538 { 538 {
539 const char *hypertas; 539 const char *hypertas;
540 unsigned long len; 540 unsigned long len;
541 541
542 if (depth != 1 || 542 if (depth != 1 ||
543 (strcmp(uname, "rtas") != 0 && strcmp(uname, "rtas@0") != 0)) 543 (strcmp(uname, "rtas") != 0 && strcmp(uname, "rtas@0") != 0))
544 return 0; 544 return 0;
545 545
546 hypertas = of_get_flat_dt_prop(node, "ibm,hypertas-functions", &len); 546 hypertas = of_get_flat_dt_prop(node, "ibm,hypertas-functions", &len);
547 if (!hypertas) 547 if (!hypertas)
548 return 1; 548 return 1;
549 549
550 powerpc_firmware_features |= FW_FEATURE_LPAR; 550 powerpc_firmware_features |= FW_FEATURE_LPAR;
551 fw_feature_init(hypertas, len); 551 fw_feature_init(hypertas, len);
552 552
553 return 1; 553 return 1;
554 } 554 }
555 555
556 static int __init pSeries_probe(void) 556 static int __init pSeries_probe(void)
557 { 557 {
558 unsigned long root = of_get_flat_dt_root(); 558 unsigned long root = of_get_flat_dt_root();
559 char *dtype = of_get_flat_dt_prop(root, "device_type", NULL); 559 char *dtype = of_get_flat_dt_prop(root, "device_type", NULL);
560 560
561 if (dtype == NULL) 561 if (dtype == NULL)
562 return 0; 562 return 0;
563 if (strcmp(dtype, "chrp")) 563 if (strcmp(dtype, "chrp"))
564 return 0; 564 return 0;
565 565
566 /* Cell blades firmware claims to be chrp while it's not. Until this 566 /* Cell blades firmware claims to be chrp while it's not. Until this
567 * is fixed, we need to avoid those here. 567 * is fixed, we need to avoid those here.
568 */ 568 */
569 if (of_flat_dt_is_compatible(root, "IBM,CPBW-1.0") || 569 if (of_flat_dt_is_compatible(root, "IBM,CPBW-1.0") ||
570 of_flat_dt_is_compatible(root, "IBM,CBEA")) 570 of_flat_dt_is_compatible(root, "IBM,CBEA"))
571 return 0; 571 return 0;
572 572
573 pr_debug("pSeries detected, looking for LPAR capability...\n"); 573 pr_debug("pSeries detected, looking for LPAR capability...\n");
574 574
575 /* Now try to figure out if we are running on LPAR */ 575 /* Now try to figure out if we are running on LPAR */
576 of_scan_flat_dt(pSeries_probe_hypertas, NULL); 576 of_scan_flat_dt(pSeries_probe_hypertas, NULL);
577 577
578 if (firmware_has_feature(FW_FEATURE_LPAR)) 578 if (firmware_has_feature(FW_FEATURE_LPAR))
579 hpte_init_lpar(); 579 hpte_init_lpar();
580 else 580 else
581 hpte_init_native(); 581 hpte_init_native();
582 582
583 pr_debug("Machine is%s LPAR !\n", 583 pr_debug("Machine is%s LPAR !\n",
584 (powerpc_firmware_features & FW_FEATURE_LPAR) ? "" : " not"); 584 (powerpc_firmware_features & FW_FEATURE_LPAR) ? "" : " not");
585 585
586 return 1; 586 return 1;
587 } 587 }
588 588
589 589
590 DECLARE_PER_CPU(long, smt_snooze_delay); 590 DECLARE_PER_CPU(long, smt_snooze_delay);
591 591
592 static void pseries_dedicated_idle_sleep(void) 592 static void pseries_dedicated_idle_sleep(void)
593 { 593 {
594 unsigned int cpu = smp_processor_id(); 594 unsigned int cpu = smp_processor_id();
595 unsigned long start_snooze; 595 unsigned long start_snooze;
596 unsigned long in_purr, out_purr; 596 unsigned long in_purr, out_purr;
597 long snooze = __get_cpu_var(smt_snooze_delay); 597 long snooze = __get_cpu_var(smt_snooze_delay);
598 598
599 /* 599 /*
600 * Indicate to the HV that we are idle. Now would be 600 * Indicate to the HV that we are idle. Now would be
601 * a good time to find other work to dispatch. 601 * a good time to find other work to dispatch.
602 */ 602 */
603 get_lppaca()->idle = 1; 603 get_lppaca()->idle = 1;
604 get_lppaca()->donate_dedicated_cpu = 1; 604 get_lppaca()->donate_dedicated_cpu = 1;
605 in_purr = mfspr(SPRN_PURR); 605 in_purr = mfspr(SPRN_PURR);
606 606
607 /* 607 /*
608 * We come in with interrupts disabled, and need_resched() 608 * We come in with interrupts disabled, and need_resched()
609 * has been checked recently. If we should poll for a little 609 * has been checked recently. If we should poll for a little
610 * while, do so. 610 * while, do so.
611 */ 611 */
612 if (snooze) { 612 if (snooze) {
613 start_snooze = get_tb() + snooze * tb_ticks_per_usec; 613 start_snooze = get_tb() + snooze * tb_ticks_per_usec;
614 local_irq_enable(); 614 local_irq_enable();
615 set_thread_flag(TIF_POLLING_NRFLAG); 615 set_thread_flag(TIF_POLLING_NRFLAG);
616 616
617 while ((snooze < 0) || (get_tb() < start_snooze)) { 617 while ((snooze < 0) || (get_tb() < start_snooze)) {
618 if (need_resched() || cpu_is_offline(cpu)) 618 if (need_resched() || cpu_is_offline(cpu))
619 goto out; 619 goto out;
620 ppc64_runlatch_off(); 620 ppc64_runlatch_off();
621 HMT_low(); 621 HMT_low();
622 HMT_very_low(); 622 HMT_very_low();
623 } 623 }
624 624
625 HMT_medium(); 625 HMT_medium();
626 clear_thread_flag(TIF_POLLING_NRFLAG); 626 clear_thread_flag(TIF_POLLING_NRFLAG);
627 smp_mb(); 627 smp_mb();
628 local_irq_disable(); 628 local_irq_disable();
629 if (need_resched() || cpu_is_offline(cpu)) 629 if (need_resched() || cpu_is_offline(cpu))
630 goto out; 630 goto out;
631 } 631 }
632 632
633 cede_processor(); 633 cede_processor();
634 634
635 out: 635 out:
636 HMT_medium(); 636 HMT_medium();
637 out_purr = mfspr(SPRN_PURR); 637 out_purr = mfspr(SPRN_PURR);
638 get_lppaca()->wait_state_cycles += out_purr - in_purr; 638 get_lppaca()->wait_state_cycles += out_purr - in_purr;
639 get_lppaca()->donate_dedicated_cpu = 0; 639 get_lppaca()->donate_dedicated_cpu = 0;
640 get_lppaca()->idle = 0; 640 get_lppaca()->idle = 0;
641 } 641 }
642 642
643 static void pseries_shared_idle_sleep(void) 643 static void pseries_shared_idle_sleep(void)
644 { 644 {
645 /* 645 /*
646 * Indicate to the HV that we are idle. Now would be 646 * Indicate to the HV that we are idle. Now would be
647 * a good time to find other work to dispatch. 647 * a good time to find other work to dispatch.
648 */ 648 */
649 get_lppaca()->idle = 1; 649 get_lppaca()->idle = 1;
650 650
651 /* 651 /*
652 * Yield the processor to the hypervisor. We return if 652 * Yield the processor to the hypervisor. We return if
653 * an external interrupt occurs (which are driven prior 653 * an external interrupt occurs (which are driven prior
654 * to returning here) or if a prod occurs from another 654 * to returning here) or if a prod occurs from another
655 * processor. When returning here, external interrupts 655 * processor. When returning here, external interrupts
656 * are enabled. 656 * are enabled.
657 */ 657 */
658 cede_processor(); 658 cede_processor();
659 659
660 get_lppaca()->idle = 0; 660 get_lppaca()->idle = 0;
661 } 661 }
662 662
663 static int pSeries_pci_probe_mode(struct pci_bus *bus) 663 static int pSeries_pci_probe_mode(struct pci_bus *bus)
664 { 664 {
665 if (firmware_has_feature(FW_FEATURE_LPAR)) 665 if (firmware_has_feature(FW_FEATURE_LPAR))
666 return PCI_PROBE_DEVTREE; 666 return PCI_PROBE_DEVTREE;
667 return PCI_PROBE_NORMAL; 667 return PCI_PROBE_NORMAL;
668 } 668 }
669 669
670 /** 670 /**
671 * pSeries_power_off - tell firmware about how to power off the system. 671 * pSeries_power_off - tell firmware about how to power off the system.
672 * 672 *
673 * This function calls either the power-off rtas token in normal cases 673 * This function calls either the power-off rtas token in normal cases
674 * or the ibm,power-off-ups token (if present & requested) in case of 674 * or the ibm,power-off-ups token (if present & requested) in case of
675 * a power failure. If power-off token is used, power on will only be 675 * a power failure. If power-off token is used, power on will only be
676 * possible with power button press. If ibm,power-off-ups token is used 676 * possible with power button press. If ibm,power-off-ups token is used
677 * it will allow auto poweron after power is restored. 677 * it will allow auto poweron after power is restored.
678 */ 678 */
679 static void pSeries_power_off(void) 679 static void pSeries_power_off(void)
680 { 680 {
681 int rc; 681 int rc;
682 int rtas_poweroff_ups_token = rtas_token("ibm,power-off-ups"); 682 int rtas_poweroff_ups_token = rtas_token("ibm,power-off-ups");
683 683
684 if (rtas_flash_term_hook) 684 if (rtas_flash_term_hook)
685 rtas_flash_term_hook(SYS_POWER_OFF); 685 rtas_flash_term_hook(SYS_POWER_OFF);
686 686
687 if (rtas_poweron_auto == 0 || 687 if (rtas_poweron_auto == 0 ||
688 rtas_poweroff_ups_token == RTAS_UNKNOWN_SERVICE) { 688 rtas_poweroff_ups_token == RTAS_UNKNOWN_SERVICE) {
689 rc = rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1); 689 rc = rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1);
690 printk(KERN_INFO "RTAS power-off returned %d\n", rc); 690 printk(KERN_INFO "RTAS power-off returned %d\n", rc);
691 } else { 691 } else {
692 rc = rtas_call(rtas_poweroff_ups_token, 0, 1, NULL); 692 rc = rtas_call(rtas_poweroff_ups_token, 0, 1, NULL);
693 printk(KERN_INFO "RTAS ibm,power-off-ups returned %d\n", rc); 693 printk(KERN_INFO "RTAS ibm,power-off-ups returned %d\n", rc);
694 } 694 }
695 for (;;); 695 for (;;);
696 } 696 }
697 697
698 #ifndef CONFIG_PCI 698 #ifndef CONFIG_PCI
699 void pSeries_final_fixup(void) { } 699 void pSeries_final_fixup(void) { }
700 #endif 700 #endif
701 701
702 define_machine(pseries) { 702 define_machine(pseries) {
703 .name = "pSeries", 703 .name = "pSeries",
704 .probe = pSeries_probe, 704 .probe = pSeries_probe,
705 .setup_arch = pSeries_setup_arch, 705 .setup_arch = pSeries_setup_arch,
706 .init_early = pSeries_init_early, 706 .init_early = pSeries_init_early,
707 .show_cpuinfo = pSeries_show_cpuinfo, 707 .show_cpuinfo = pSeries_show_cpuinfo,
708 .log_error = pSeries_log_error, 708 .log_error = pSeries_log_error,
709 .pcibios_fixup = pSeries_final_fixup, 709 .pcibios_fixup = pSeries_final_fixup,
710 .pci_probe_mode = pSeries_pci_probe_mode, 710 .pci_probe_mode = pSeries_pci_probe_mode,
711 .restart = rtas_restart, 711 .restart = rtas_restart,
712 .power_off = pSeries_power_off, 712 .power_off = pSeries_power_off,
713 .halt = rtas_halt, 713 .halt = rtas_halt,
714 .panic = rtas_os_term, 714 .panic = rtas_os_term,
715 .get_boot_time = rtas_get_boot_time, 715 .get_boot_time = rtas_get_boot_time,
716 .get_rtc_time = rtas_get_rtc_time, 716 .get_rtc_time = rtas_get_rtc_time,
717 .set_rtc_time = rtas_set_rtc_time, 717 .set_rtc_time = rtas_set_rtc_time,
718 .calibrate_decr = generic_calibrate_decr, 718 .calibrate_decr = generic_calibrate_decr,
719 .progress = rtas_progress, 719 .progress = rtas_progress,
720 .system_reset_exception = pSeries_system_reset_exception, 720 .system_reset_exception = pSeries_system_reset_exception,
721 .machine_check_exception = pSeries_machine_check_exception, 721 .machine_check_exception = pSeries_machine_check_exception,
722 }; 722 };
723 723
arch/powerpc/sysdev/bestcomm/sram.c
1 /* 1 /*
2 * Simple memory allocator for on-board SRAM 2 * Simple memory allocator for on-board SRAM
3 * 3 *
4 * 4 *
5 * Maintainer : Sylvain Munaut <tnt@246tNt.com> 5 * Maintainer : Sylvain Munaut <tnt@246tNt.com>
6 * 6 *
7 * Copyright (C) 2005 Sylvain Munaut <tnt@246tNt.com> 7 * Copyright (C) 2005 Sylvain Munaut <tnt@246tNt.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public License 9 * This file is licensed under the terms of the GNU General Public License
10 * version 2. This program is licensed "as is" without any warranty of any 10 * version 2. This program is licensed "as is" without any warranty of any
11 * kind, whether express or implied. 11 * kind, whether express or implied.
12 */ 12 */
13 13
14 #include <linux/err.h> 14 #include <linux/err.h>
15 #include <linux/kernel.h> 15 #include <linux/kernel.h>
16 #include <linux/module.h> 16 #include <linux/export.h>
17 #include <linux/slab.h> 17 #include <linux/slab.h>
18 #include <linux/spinlock.h> 18 #include <linux/spinlock.h>
19 #include <linux/string.h> 19 #include <linux/string.h>
20 #include <linux/ioport.h> 20 #include <linux/ioport.h>
21 #include <linux/of.h> 21 #include <linux/of.h>
22 22
23 #include <asm/io.h> 23 #include <asm/io.h>
24 #include <asm/mmu.h> 24 #include <asm/mmu.h>
25 25
26 #include "sram.h" 26 #include "sram.h"
27 27
28 28
29 /* Struct keeping our 'state' */ 29 /* Struct keeping our 'state' */
30 struct bcom_sram *bcom_sram = NULL; 30 struct bcom_sram *bcom_sram = NULL;
31 EXPORT_SYMBOL_GPL(bcom_sram); /* needed for inline functions */ 31 EXPORT_SYMBOL_GPL(bcom_sram); /* needed for inline functions */
32 32
33 33
34 /* ======================================================================== */ 34 /* ======================================================================== */
35 /* Public API */ 35 /* Public API */
36 /* ======================================================================== */ 36 /* ======================================================================== */
37 /* DO NOT USE in interrupts, if needed in irq handler, we should use the 37 /* DO NOT USE in interrupts, if needed in irq handler, we should use the
38 _irqsave version of the spin_locks */ 38 _irqsave version of the spin_locks */
39 39
40 int bcom_sram_init(struct device_node *sram_node, char *owner) 40 int bcom_sram_init(struct device_node *sram_node, char *owner)
41 { 41 {
42 int rv; 42 int rv;
43 const u32 *regaddr_p; 43 const u32 *regaddr_p;
44 u64 regaddr64, size64; 44 u64 regaddr64, size64;
45 unsigned int psize; 45 unsigned int psize;
46 46
47 /* Create our state struct */ 47 /* Create our state struct */
48 if (bcom_sram) { 48 if (bcom_sram) {
49 printk(KERN_ERR "%s: bcom_sram_init: " 49 printk(KERN_ERR "%s: bcom_sram_init: "
50 "Already initialized !\n", owner); 50 "Already initialized !\n", owner);
51 return -EBUSY; 51 return -EBUSY;
52 } 52 }
53 53
54 bcom_sram = kmalloc(sizeof(struct bcom_sram), GFP_KERNEL); 54 bcom_sram = kmalloc(sizeof(struct bcom_sram), GFP_KERNEL);
55 if (!bcom_sram) { 55 if (!bcom_sram) {
56 printk(KERN_ERR "%s: bcom_sram_init: " 56 printk(KERN_ERR "%s: bcom_sram_init: "
57 "Couldn't allocate internal state !\n", owner); 57 "Couldn't allocate internal state !\n", owner);
58 return -ENOMEM; 58 return -ENOMEM;
59 } 59 }
60 60
61 /* Get address and size of the sram */ 61 /* Get address and size of the sram */
62 regaddr_p = of_get_address(sram_node, 0, &size64, NULL); 62 regaddr_p = of_get_address(sram_node, 0, &size64, NULL);
63 if (!regaddr_p) { 63 if (!regaddr_p) {
64 printk(KERN_ERR "%s: bcom_sram_init: " 64 printk(KERN_ERR "%s: bcom_sram_init: "
65 "Invalid device node !\n", owner); 65 "Invalid device node !\n", owner);
66 rv = -EINVAL; 66 rv = -EINVAL;
67 goto error_free; 67 goto error_free;
68 } 68 }
69 69
70 regaddr64 = of_translate_address(sram_node, regaddr_p); 70 regaddr64 = of_translate_address(sram_node, regaddr_p);
71 71
72 bcom_sram->base_phys = (phys_addr_t) regaddr64; 72 bcom_sram->base_phys = (phys_addr_t) regaddr64;
73 bcom_sram->size = (unsigned int) size64; 73 bcom_sram->size = (unsigned int) size64;
74 74
75 /* Request region */ 75 /* Request region */
76 if (!request_mem_region(bcom_sram->base_phys, bcom_sram->size, owner)) { 76 if (!request_mem_region(bcom_sram->base_phys, bcom_sram->size, owner)) {
77 printk(KERN_ERR "%s: bcom_sram_init: " 77 printk(KERN_ERR "%s: bcom_sram_init: "
78 "Couldn't request region !\n", owner); 78 "Couldn't request region !\n", owner);
79 rv = -EBUSY; 79 rv = -EBUSY;
80 goto error_free; 80 goto error_free;
81 } 81 }
82 82
83 /* Map SRAM */ 83 /* Map SRAM */
84 /* sram is not really __iomem */ 84 /* sram is not really __iomem */
85 bcom_sram->base_virt = (void*) ioremap(bcom_sram->base_phys, bcom_sram->size); 85 bcom_sram->base_virt = (void*) ioremap(bcom_sram->base_phys, bcom_sram->size);
86 86
87 if (!bcom_sram->base_virt) { 87 if (!bcom_sram->base_virt) {
88 printk(KERN_ERR "%s: bcom_sram_init: " 88 printk(KERN_ERR "%s: bcom_sram_init: "
89 "Map error SRAM zone 0x%08lx (0x%0x)!\n", 89 "Map error SRAM zone 0x%08lx (0x%0x)!\n",
90 owner, (long)bcom_sram->base_phys, bcom_sram->size ); 90 owner, (long)bcom_sram->base_phys, bcom_sram->size );
91 rv = -ENOMEM; 91 rv = -ENOMEM;
92 goto error_release; 92 goto error_release;
93 } 93 }
94 94
95 /* Create an rheap (defaults to 32 bits word alignment) */ 95 /* Create an rheap (defaults to 32 bits word alignment) */
96 bcom_sram->rh = rh_create(4); 96 bcom_sram->rh = rh_create(4);
97 97
98 /* Attach the free zones */ 98 /* Attach the free zones */
99 #if 0 99 #if 0
100 /* Currently disabled ... for future use only */ 100 /* Currently disabled ... for future use only */
101 reg_addr_p = of_get_property(sram_node, "available", &psize); 101 reg_addr_p = of_get_property(sram_node, "available", &psize);
102 #else 102 #else
103 regaddr_p = NULL; 103 regaddr_p = NULL;
104 psize = 0; 104 psize = 0;
105 #endif 105 #endif
106 106
107 if (!regaddr_p || !psize) { 107 if (!regaddr_p || !psize) {
108 /* Attach the whole zone */ 108 /* Attach the whole zone */
109 rh_attach_region(bcom_sram->rh, 0, bcom_sram->size); 109 rh_attach_region(bcom_sram->rh, 0, bcom_sram->size);
110 } else { 110 } else {
111 /* Attach each zone independently */ 111 /* Attach each zone independently */
112 while (psize >= 2 * sizeof(u32)) { 112 while (psize >= 2 * sizeof(u32)) {
113 phys_addr_t zbase = of_translate_address(sram_node, regaddr_p); 113 phys_addr_t zbase = of_translate_address(sram_node, regaddr_p);
114 rh_attach_region(bcom_sram->rh, zbase - bcom_sram->base_phys, regaddr_p[1]); 114 rh_attach_region(bcom_sram->rh, zbase - bcom_sram->base_phys, regaddr_p[1]);
115 regaddr_p += 2; 115 regaddr_p += 2;
116 psize -= 2 * sizeof(u32); 116 psize -= 2 * sizeof(u32);
117 } 117 }
118 } 118 }
119 119
120 /* Init our spinlock */ 120 /* Init our spinlock */
121 spin_lock_init(&bcom_sram->lock); 121 spin_lock_init(&bcom_sram->lock);
122 122
123 return 0; 123 return 0;
124 124
125 error_release: 125 error_release:
126 release_mem_region(bcom_sram->base_phys, bcom_sram->size); 126 release_mem_region(bcom_sram->base_phys, bcom_sram->size);
127 error_free: 127 error_free:
128 kfree(bcom_sram); 128 kfree(bcom_sram);
129 bcom_sram = NULL; 129 bcom_sram = NULL;
130 130
131 return rv; 131 return rv;
132 } 132 }
133 EXPORT_SYMBOL_GPL(bcom_sram_init); 133 EXPORT_SYMBOL_GPL(bcom_sram_init);
134 134
135 void bcom_sram_cleanup(void) 135 void bcom_sram_cleanup(void)
136 { 136 {
137 /* Free resources */ 137 /* Free resources */
138 if (bcom_sram) { 138 if (bcom_sram) {
139 rh_destroy(bcom_sram->rh); 139 rh_destroy(bcom_sram->rh);
140 iounmap((void __iomem *)bcom_sram->base_virt); 140 iounmap((void __iomem *)bcom_sram->base_virt);
141 release_mem_region(bcom_sram->base_phys, bcom_sram->size); 141 release_mem_region(bcom_sram->base_phys, bcom_sram->size);
142 kfree(bcom_sram); 142 kfree(bcom_sram);
143 bcom_sram = NULL; 143 bcom_sram = NULL;
144 } 144 }
145 } 145 }
146 EXPORT_SYMBOL_GPL(bcom_sram_cleanup); 146 EXPORT_SYMBOL_GPL(bcom_sram_cleanup);
147 147
148 void* bcom_sram_alloc(int size, int align, phys_addr_t *phys) 148 void* bcom_sram_alloc(int size, int align, phys_addr_t *phys)
149 { 149 {
150 unsigned long offset; 150 unsigned long offset;
151 151
152 spin_lock(&bcom_sram->lock); 152 spin_lock(&bcom_sram->lock);
153 offset = rh_alloc_align(bcom_sram->rh, size, align, NULL); 153 offset = rh_alloc_align(bcom_sram->rh, size, align, NULL);
154 spin_unlock(&bcom_sram->lock); 154 spin_unlock(&bcom_sram->lock);
155 155
156 if (IS_ERR_VALUE(offset)) 156 if (IS_ERR_VALUE(offset))
157 return NULL; 157 return NULL;
158 158
159 *phys = bcom_sram->base_phys + offset; 159 *phys = bcom_sram->base_phys + offset;
160 return bcom_sram->base_virt + offset; 160 return bcom_sram->base_virt + offset;
161 } 161 }
162 EXPORT_SYMBOL_GPL(bcom_sram_alloc); 162 EXPORT_SYMBOL_GPL(bcom_sram_alloc);
163 163
164 void bcom_sram_free(void *ptr) 164 void bcom_sram_free(void *ptr)
165 { 165 {
166 unsigned long offset; 166 unsigned long offset;
167 167
168 if (!ptr) 168 if (!ptr)
169 return; 169 return;
170 170
171 offset = ptr - bcom_sram->base_virt; 171 offset = ptr - bcom_sram->base_virt;
172 172
173 spin_lock(&bcom_sram->lock); 173 spin_lock(&bcom_sram->lock);
174 rh_free(bcom_sram->rh, offset); 174 rh_free(bcom_sram->rh, offset);
175 spin_unlock(&bcom_sram->lock); 175 spin_unlock(&bcom_sram->lock);
176 } 176 }
177 EXPORT_SYMBOL_GPL(bcom_sram_free); 177 EXPORT_SYMBOL_GPL(bcom_sram_free);
178 178
179 179
arch/powerpc/sysdev/fsl_lbc.c
1 /* 1 /*
2 * Freescale LBC and UPM routines. 2 * Freescale LBC and UPM routines.
3 * 3 *
4 * Copyright ยฉ 2007-2008 MontaVista Software, Inc. 4 * Copyright ยฉ 2007-2008 MontaVista Software, Inc.
5 * Copyright ยฉ 2010 Freescale Semiconductor 5 * Copyright ยฉ 2010 Freescale Semiconductor
6 * 6 *
7 * Author: Anton Vorontsov <avorontsov@ru.mvista.com> 7 * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
8 * Author: Jack Lan <Jack.Lan@freescale.com> 8 * Author: Jack Lan <Jack.Lan@freescale.com>
9 * Author: Roy Zang <tie-fei.zang@freescale.com> 9 * Author: Roy Zang <tie-fei.zang@freescale.com>
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or 13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version. 14 * (at your option) any later version.
15 */ 15 */
16 16
17 #include <linux/init.h> 17 #include <linux/init.h>
18 #include <linux/module.h> 18 #include <linux/export.h>
19 #include <linux/kernel.h> 19 #include <linux/kernel.h>
20 #include <linux/compiler.h> 20 #include <linux/compiler.h>
21 #include <linux/spinlock.h> 21 #include <linux/spinlock.h>
22 #include <linux/types.h> 22 #include <linux/types.h>
23 #include <linux/io.h> 23 #include <linux/io.h>
24 #include <linux/of.h> 24 #include <linux/of.h>
25 #include <linux/slab.h> 25 #include <linux/slab.h>
26 #include <linux/sched.h> 26 #include <linux/sched.h>
27 #include <linux/platform_device.h> 27 #include <linux/platform_device.h>
28 #include <linux/interrupt.h> 28 #include <linux/interrupt.h>
29 #include <linux/mod_devicetable.h> 29 #include <linux/mod_devicetable.h>
30 #include <asm/prom.h> 30 #include <asm/prom.h>
31 #include <asm/fsl_lbc.h> 31 #include <asm/fsl_lbc.h>
32 32
33 static spinlock_t fsl_lbc_lock = __SPIN_LOCK_UNLOCKED(fsl_lbc_lock); 33 static spinlock_t fsl_lbc_lock = __SPIN_LOCK_UNLOCKED(fsl_lbc_lock);
34 struct fsl_lbc_ctrl *fsl_lbc_ctrl_dev; 34 struct fsl_lbc_ctrl *fsl_lbc_ctrl_dev;
35 EXPORT_SYMBOL(fsl_lbc_ctrl_dev); 35 EXPORT_SYMBOL(fsl_lbc_ctrl_dev);
36 36
37 /** 37 /**
38 * fsl_lbc_addr - convert the base address 38 * fsl_lbc_addr - convert the base address
39 * @addr_base: base address of the memory bank 39 * @addr_base: base address of the memory bank
40 * 40 *
41 * This function converts a base address of lbc into the right format for the 41 * This function converts a base address of lbc into the right format for the
42 * BR register. If the SOC has eLBC then it returns 32bit physical address 42 * BR register. If the SOC has eLBC then it returns 32bit physical address
43 * else it convers a 34bit local bus physical address to correct format of 43 * else it convers a 34bit local bus physical address to correct format of
44 * 32bit address for BR register (Example: MPC8641). 44 * 32bit address for BR register (Example: MPC8641).
45 */ 45 */
46 u32 fsl_lbc_addr(phys_addr_t addr_base) 46 u32 fsl_lbc_addr(phys_addr_t addr_base)
47 { 47 {
48 struct device_node *np = fsl_lbc_ctrl_dev->dev->of_node; 48 struct device_node *np = fsl_lbc_ctrl_dev->dev->of_node;
49 u32 addr = addr_base & 0xffff8000; 49 u32 addr = addr_base & 0xffff8000;
50 50
51 if (of_device_is_compatible(np, "fsl,elbc")) 51 if (of_device_is_compatible(np, "fsl,elbc"))
52 return addr; 52 return addr;
53 53
54 return addr | ((addr_base & 0x300000000ull) >> 19); 54 return addr | ((addr_base & 0x300000000ull) >> 19);
55 } 55 }
56 EXPORT_SYMBOL(fsl_lbc_addr); 56 EXPORT_SYMBOL(fsl_lbc_addr);
57 57
58 /** 58 /**
59 * fsl_lbc_find - find Localbus bank 59 * fsl_lbc_find - find Localbus bank
60 * @addr_base: base address of the memory bank 60 * @addr_base: base address of the memory bank
61 * 61 *
62 * This function walks LBC banks comparing "Base address" field of the BR 62 * This function walks LBC banks comparing "Base address" field of the BR
63 * registers with the supplied addr_base argument. When bases match this 63 * registers with the supplied addr_base argument. When bases match this
64 * function returns bank number (starting with 0), otherwise it returns 64 * function returns bank number (starting with 0), otherwise it returns
65 * appropriate errno value. 65 * appropriate errno value.
66 */ 66 */
67 int fsl_lbc_find(phys_addr_t addr_base) 67 int fsl_lbc_find(phys_addr_t addr_base)
68 { 68 {
69 int i; 69 int i;
70 struct fsl_lbc_regs __iomem *lbc; 70 struct fsl_lbc_regs __iomem *lbc;
71 71
72 if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs) 72 if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
73 return -ENODEV; 73 return -ENODEV;
74 74
75 lbc = fsl_lbc_ctrl_dev->regs; 75 lbc = fsl_lbc_ctrl_dev->regs;
76 for (i = 0; i < ARRAY_SIZE(lbc->bank); i++) { 76 for (i = 0; i < ARRAY_SIZE(lbc->bank); i++) {
77 __be32 br = in_be32(&lbc->bank[i].br); 77 __be32 br = in_be32(&lbc->bank[i].br);
78 __be32 or = in_be32(&lbc->bank[i].or); 78 __be32 or = in_be32(&lbc->bank[i].or);
79 79
80 if (br & BR_V && (br & or & BR_BA) == fsl_lbc_addr(addr_base)) 80 if (br & BR_V && (br & or & BR_BA) == fsl_lbc_addr(addr_base))
81 return i; 81 return i;
82 } 82 }
83 83
84 return -ENOENT; 84 return -ENOENT;
85 } 85 }
86 EXPORT_SYMBOL(fsl_lbc_find); 86 EXPORT_SYMBOL(fsl_lbc_find);
87 87
88 /** 88 /**
89 * fsl_upm_find - find pre-programmed UPM via base address 89 * fsl_upm_find - find pre-programmed UPM via base address
90 * @addr_base: base address of the memory bank controlled by the UPM 90 * @addr_base: base address of the memory bank controlled by the UPM
91 * @upm: pointer to the allocated fsl_upm structure 91 * @upm: pointer to the allocated fsl_upm structure
92 * 92 *
93 * This function fills fsl_upm structure so you can use it with the rest of 93 * This function fills fsl_upm structure so you can use it with the rest of
94 * UPM API. On success this function returns 0, otherwise it returns 94 * UPM API. On success this function returns 0, otherwise it returns
95 * appropriate errno value. 95 * appropriate errno value.
96 */ 96 */
97 int fsl_upm_find(phys_addr_t addr_base, struct fsl_upm *upm) 97 int fsl_upm_find(phys_addr_t addr_base, struct fsl_upm *upm)
98 { 98 {
99 int bank; 99 int bank;
100 __be32 br; 100 __be32 br;
101 struct fsl_lbc_regs __iomem *lbc; 101 struct fsl_lbc_regs __iomem *lbc;
102 102
103 bank = fsl_lbc_find(addr_base); 103 bank = fsl_lbc_find(addr_base);
104 if (bank < 0) 104 if (bank < 0)
105 return bank; 105 return bank;
106 106
107 if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs) 107 if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
108 return -ENODEV; 108 return -ENODEV;
109 109
110 lbc = fsl_lbc_ctrl_dev->regs; 110 lbc = fsl_lbc_ctrl_dev->regs;
111 br = in_be32(&lbc->bank[bank].br); 111 br = in_be32(&lbc->bank[bank].br);
112 112
113 switch (br & BR_MSEL) { 113 switch (br & BR_MSEL) {
114 case BR_MS_UPMA: 114 case BR_MS_UPMA:
115 upm->mxmr = &lbc->mamr; 115 upm->mxmr = &lbc->mamr;
116 break; 116 break;
117 case BR_MS_UPMB: 117 case BR_MS_UPMB:
118 upm->mxmr = &lbc->mbmr; 118 upm->mxmr = &lbc->mbmr;
119 break; 119 break;
120 case BR_MS_UPMC: 120 case BR_MS_UPMC:
121 upm->mxmr = &lbc->mcmr; 121 upm->mxmr = &lbc->mcmr;
122 break; 122 break;
123 default: 123 default:
124 return -EINVAL; 124 return -EINVAL;
125 } 125 }
126 126
127 switch (br & BR_PS) { 127 switch (br & BR_PS) {
128 case BR_PS_8: 128 case BR_PS_8:
129 upm->width = 8; 129 upm->width = 8;
130 break; 130 break;
131 case BR_PS_16: 131 case BR_PS_16:
132 upm->width = 16; 132 upm->width = 16;
133 break; 133 break;
134 case BR_PS_32: 134 case BR_PS_32:
135 upm->width = 32; 135 upm->width = 32;
136 break; 136 break;
137 default: 137 default:
138 return -EINVAL; 138 return -EINVAL;
139 } 139 }
140 140
141 return 0; 141 return 0;
142 } 142 }
143 EXPORT_SYMBOL(fsl_upm_find); 143 EXPORT_SYMBOL(fsl_upm_find);
144 144
145 /** 145 /**
146 * fsl_upm_run_pattern - actually run an UPM pattern 146 * fsl_upm_run_pattern - actually run an UPM pattern
147 * @upm: pointer to the fsl_upm structure obtained via fsl_upm_find 147 * @upm: pointer to the fsl_upm structure obtained via fsl_upm_find
148 * @io_base: remapped pointer to where memory access should happen 148 * @io_base: remapped pointer to where memory access should happen
149 * @mar: MAR register content during pattern execution 149 * @mar: MAR register content during pattern execution
150 * 150 *
151 * This function triggers dummy write to the memory specified by the io_base, 151 * This function triggers dummy write to the memory specified by the io_base,
152 * thus UPM pattern actually executed. Note that mar usage depends on the 152 * thus UPM pattern actually executed. Note that mar usage depends on the
153 * pre-programmed AMX bits in the UPM RAM. 153 * pre-programmed AMX bits in the UPM RAM.
154 */ 154 */
155 int fsl_upm_run_pattern(struct fsl_upm *upm, void __iomem *io_base, u32 mar) 155 int fsl_upm_run_pattern(struct fsl_upm *upm, void __iomem *io_base, u32 mar)
156 { 156 {
157 int ret = 0; 157 int ret = 0;
158 unsigned long flags; 158 unsigned long flags;
159 159
160 if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs) 160 if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
161 return -ENODEV; 161 return -ENODEV;
162 162
163 spin_lock_irqsave(&fsl_lbc_lock, flags); 163 spin_lock_irqsave(&fsl_lbc_lock, flags);
164 164
165 out_be32(&fsl_lbc_ctrl_dev->regs->mar, mar); 165 out_be32(&fsl_lbc_ctrl_dev->regs->mar, mar);
166 166
167 switch (upm->width) { 167 switch (upm->width) {
168 case 8: 168 case 8:
169 out_8(io_base, 0x0); 169 out_8(io_base, 0x0);
170 break; 170 break;
171 case 16: 171 case 16:
172 out_be16(io_base, 0x0); 172 out_be16(io_base, 0x0);
173 break; 173 break;
174 case 32: 174 case 32:
175 out_be32(io_base, 0x0); 175 out_be32(io_base, 0x0);
176 break; 176 break;
177 default: 177 default:
178 ret = -EINVAL; 178 ret = -EINVAL;
179 break; 179 break;
180 } 180 }
181 181
182 spin_unlock_irqrestore(&fsl_lbc_lock, flags); 182 spin_unlock_irqrestore(&fsl_lbc_lock, flags);
183 183
184 return ret; 184 return ret;
185 } 185 }
186 EXPORT_SYMBOL(fsl_upm_run_pattern); 186 EXPORT_SYMBOL(fsl_upm_run_pattern);
187 187
188 static int __devinit fsl_lbc_ctrl_init(struct fsl_lbc_ctrl *ctrl, 188 static int __devinit fsl_lbc_ctrl_init(struct fsl_lbc_ctrl *ctrl,
189 struct device_node *node) 189 struct device_node *node)
190 { 190 {
191 struct fsl_lbc_regs __iomem *lbc = ctrl->regs; 191 struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
192 192
193 /* clear event registers */ 193 /* clear event registers */
194 setbits32(&lbc->ltesr, LTESR_CLEAR); 194 setbits32(&lbc->ltesr, LTESR_CLEAR);
195 out_be32(&lbc->lteatr, 0); 195 out_be32(&lbc->lteatr, 0);
196 out_be32(&lbc->ltear, 0); 196 out_be32(&lbc->ltear, 0);
197 out_be32(&lbc->lteccr, LTECCR_CLEAR); 197 out_be32(&lbc->lteccr, LTECCR_CLEAR);
198 out_be32(&lbc->ltedr, LTEDR_ENABLE); 198 out_be32(&lbc->ltedr, LTEDR_ENABLE);
199 199
200 /* Set the monitor timeout value to the maximum for erratum A001 */ 200 /* Set the monitor timeout value to the maximum for erratum A001 */
201 if (of_device_is_compatible(node, "fsl,elbc")) 201 if (of_device_is_compatible(node, "fsl,elbc"))
202 clrsetbits_be32(&lbc->lbcr, LBCR_BMT, LBCR_BMTPS); 202 clrsetbits_be32(&lbc->lbcr, LBCR_BMT, LBCR_BMTPS);
203 203
204 return 0; 204 return 0;
205 } 205 }
206 206
207 /* 207 /*
208 * NOTE: This interrupt is used to report localbus events of various kinds, 208 * NOTE: This interrupt is used to report localbus events of various kinds,
209 * such as transaction errors on the chipselects. 209 * such as transaction errors on the chipselects.
210 */ 210 */
211 211
212 static irqreturn_t fsl_lbc_ctrl_irq(int irqno, void *data) 212 static irqreturn_t fsl_lbc_ctrl_irq(int irqno, void *data)
213 { 213 {
214 struct fsl_lbc_ctrl *ctrl = data; 214 struct fsl_lbc_ctrl *ctrl = data;
215 struct fsl_lbc_regs __iomem *lbc = ctrl->regs; 215 struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
216 u32 status; 216 u32 status;
217 217
218 status = in_be32(&lbc->ltesr); 218 status = in_be32(&lbc->ltesr);
219 if (!status) 219 if (!status)
220 return IRQ_NONE; 220 return IRQ_NONE;
221 221
222 out_be32(&lbc->ltesr, LTESR_CLEAR); 222 out_be32(&lbc->ltesr, LTESR_CLEAR);
223 out_be32(&lbc->lteatr, 0); 223 out_be32(&lbc->lteatr, 0);
224 out_be32(&lbc->ltear, 0); 224 out_be32(&lbc->ltear, 0);
225 ctrl->irq_status = status; 225 ctrl->irq_status = status;
226 226
227 if (status & LTESR_BM) 227 if (status & LTESR_BM)
228 dev_err(ctrl->dev, "Local bus monitor time-out: " 228 dev_err(ctrl->dev, "Local bus monitor time-out: "
229 "LTESR 0x%08X\n", status); 229 "LTESR 0x%08X\n", status);
230 if (status & LTESR_WP) 230 if (status & LTESR_WP)
231 dev_err(ctrl->dev, "Write protect error: " 231 dev_err(ctrl->dev, "Write protect error: "
232 "LTESR 0x%08X\n", status); 232 "LTESR 0x%08X\n", status);
233 if (status & LTESR_ATMW) 233 if (status & LTESR_ATMW)
234 dev_err(ctrl->dev, "Atomic write error: " 234 dev_err(ctrl->dev, "Atomic write error: "
235 "LTESR 0x%08X\n", status); 235 "LTESR 0x%08X\n", status);
236 if (status & LTESR_ATMR) 236 if (status & LTESR_ATMR)
237 dev_err(ctrl->dev, "Atomic read error: " 237 dev_err(ctrl->dev, "Atomic read error: "
238 "LTESR 0x%08X\n", status); 238 "LTESR 0x%08X\n", status);
239 if (status & LTESR_CS) 239 if (status & LTESR_CS)
240 dev_err(ctrl->dev, "Chip select error: " 240 dev_err(ctrl->dev, "Chip select error: "
241 "LTESR 0x%08X\n", status); 241 "LTESR 0x%08X\n", status);
242 if (status & LTESR_UPM) 242 if (status & LTESR_UPM)
243 ; 243 ;
244 if (status & LTESR_FCT) { 244 if (status & LTESR_FCT) {
245 dev_err(ctrl->dev, "FCM command time-out: " 245 dev_err(ctrl->dev, "FCM command time-out: "
246 "LTESR 0x%08X\n", status); 246 "LTESR 0x%08X\n", status);
247 smp_wmb(); 247 smp_wmb();
248 wake_up(&ctrl->irq_wait); 248 wake_up(&ctrl->irq_wait);
249 } 249 }
250 if (status & LTESR_PAR) { 250 if (status & LTESR_PAR) {
251 dev_err(ctrl->dev, "Parity or Uncorrectable ECC error: " 251 dev_err(ctrl->dev, "Parity or Uncorrectable ECC error: "
252 "LTESR 0x%08X\n", status); 252 "LTESR 0x%08X\n", status);
253 smp_wmb(); 253 smp_wmb();
254 wake_up(&ctrl->irq_wait); 254 wake_up(&ctrl->irq_wait);
255 } 255 }
256 if (status & LTESR_CC) { 256 if (status & LTESR_CC) {
257 smp_wmb(); 257 smp_wmb();
258 wake_up(&ctrl->irq_wait); 258 wake_up(&ctrl->irq_wait);
259 } 259 }
260 if (status & ~LTESR_MASK) 260 if (status & ~LTESR_MASK)
261 dev_err(ctrl->dev, "Unknown error: " 261 dev_err(ctrl->dev, "Unknown error: "
262 "LTESR 0x%08X\n", status); 262 "LTESR 0x%08X\n", status);
263 return IRQ_HANDLED; 263 return IRQ_HANDLED;
264 } 264 }
265 265
266 /* 266 /*
267 * fsl_lbc_ctrl_probe 267 * fsl_lbc_ctrl_probe
268 * 268 *
269 * called by device layer when it finds a device matching 269 * called by device layer when it finds a device matching
270 * one our driver can handled. This code allocates all of 270 * one our driver can handled. This code allocates all of
271 * the resources needed for the controller only. The 271 * the resources needed for the controller only. The
272 * resources for the NAND banks themselves are allocated 272 * resources for the NAND banks themselves are allocated
273 * in the chip probe function. 273 * in the chip probe function.
274 */ 274 */
275 275
276 static int __devinit fsl_lbc_ctrl_probe(struct platform_device *dev) 276 static int __devinit fsl_lbc_ctrl_probe(struct platform_device *dev)
277 { 277 {
278 int ret; 278 int ret;
279 279
280 if (!dev->dev.of_node) { 280 if (!dev->dev.of_node) {
281 dev_err(&dev->dev, "Device OF-Node is NULL"); 281 dev_err(&dev->dev, "Device OF-Node is NULL");
282 return -EFAULT; 282 return -EFAULT;
283 } 283 }
284 284
285 fsl_lbc_ctrl_dev = kzalloc(sizeof(*fsl_lbc_ctrl_dev), GFP_KERNEL); 285 fsl_lbc_ctrl_dev = kzalloc(sizeof(*fsl_lbc_ctrl_dev), GFP_KERNEL);
286 if (!fsl_lbc_ctrl_dev) 286 if (!fsl_lbc_ctrl_dev)
287 return -ENOMEM; 287 return -ENOMEM;
288 288
289 dev_set_drvdata(&dev->dev, fsl_lbc_ctrl_dev); 289 dev_set_drvdata(&dev->dev, fsl_lbc_ctrl_dev);
290 290
291 spin_lock_init(&fsl_lbc_ctrl_dev->lock); 291 spin_lock_init(&fsl_lbc_ctrl_dev->lock);
292 init_waitqueue_head(&fsl_lbc_ctrl_dev->irq_wait); 292 init_waitqueue_head(&fsl_lbc_ctrl_dev->irq_wait);
293 293
294 fsl_lbc_ctrl_dev->regs = of_iomap(dev->dev.of_node, 0); 294 fsl_lbc_ctrl_dev->regs = of_iomap(dev->dev.of_node, 0);
295 if (!fsl_lbc_ctrl_dev->regs) { 295 if (!fsl_lbc_ctrl_dev->regs) {
296 dev_err(&dev->dev, "failed to get memory region\n"); 296 dev_err(&dev->dev, "failed to get memory region\n");
297 ret = -ENODEV; 297 ret = -ENODEV;
298 goto err; 298 goto err;
299 } 299 }
300 300
301 fsl_lbc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0); 301 fsl_lbc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
302 if (fsl_lbc_ctrl_dev->irq == NO_IRQ) { 302 if (fsl_lbc_ctrl_dev->irq == NO_IRQ) {
303 dev_err(&dev->dev, "failed to get irq resource\n"); 303 dev_err(&dev->dev, "failed to get irq resource\n");
304 ret = -ENODEV; 304 ret = -ENODEV;
305 goto err; 305 goto err;
306 } 306 }
307 307
308 fsl_lbc_ctrl_dev->dev = &dev->dev; 308 fsl_lbc_ctrl_dev->dev = &dev->dev;
309 309
310 ret = fsl_lbc_ctrl_init(fsl_lbc_ctrl_dev, dev->dev.of_node); 310 ret = fsl_lbc_ctrl_init(fsl_lbc_ctrl_dev, dev->dev.of_node);
311 if (ret < 0) 311 if (ret < 0)
312 goto err; 312 goto err;
313 313
314 ret = request_irq(fsl_lbc_ctrl_dev->irq, fsl_lbc_ctrl_irq, 0, 314 ret = request_irq(fsl_lbc_ctrl_dev->irq, fsl_lbc_ctrl_irq, 0,
315 "fsl-lbc", fsl_lbc_ctrl_dev); 315 "fsl-lbc", fsl_lbc_ctrl_dev);
316 if (ret != 0) { 316 if (ret != 0) {
317 dev_err(&dev->dev, "failed to install irq (%d)\n", 317 dev_err(&dev->dev, "failed to install irq (%d)\n",
318 fsl_lbc_ctrl_dev->irq); 318 fsl_lbc_ctrl_dev->irq);
319 ret = fsl_lbc_ctrl_dev->irq; 319 ret = fsl_lbc_ctrl_dev->irq;
320 goto err; 320 goto err;
321 } 321 }
322 322
323 /* Enable interrupts for any detected events */ 323 /* Enable interrupts for any detected events */
324 out_be32(&fsl_lbc_ctrl_dev->regs->lteir, LTEIR_ENABLE); 324 out_be32(&fsl_lbc_ctrl_dev->regs->lteir, LTEIR_ENABLE);
325 325
326 return 0; 326 return 0;
327 327
328 err: 328 err:
329 iounmap(fsl_lbc_ctrl_dev->regs); 329 iounmap(fsl_lbc_ctrl_dev->regs);
330 kfree(fsl_lbc_ctrl_dev); 330 kfree(fsl_lbc_ctrl_dev);
331 return ret; 331 return ret;
332 } 332 }
333 333
334 static const struct of_device_id fsl_lbc_match[] = { 334 static const struct of_device_id fsl_lbc_match[] = {
335 { .compatible = "fsl,elbc", }, 335 { .compatible = "fsl,elbc", },
336 { .compatible = "fsl,pq3-localbus", }, 336 { .compatible = "fsl,pq3-localbus", },
337 { .compatible = "fsl,pq2-localbus", }, 337 { .compatible = "fsl,pq2-localbus", },
338 { .compatible = "fsl,pq2pro-localbus", }, 338 { .compatible = "fsl,pq2pro-localbus", },
339 {}, 339 {},
340 }; 340 };
341 341
342 static struct platform_driver fsl_lbc_ctrl_driver = { 342 static struct platform_driver fsl_lbc_ctrl_driver = {
343 .driver = { 343 .driver = {
344 .name = "fsl-lbc", 344 .name = "fsl-lbc",
345 .of_match_table = fsl_lbc_match, 345 .of_match_table = fsl_lbc_match,
346 }, 346 },
347 .probe = fsl_lbc_ctrl_probe, 347 .probe = fsl_lbc_ctrl_probe,
348 }; 348 };
349 349
350 static int __init fsl_lbc_init(void) 350 static int __init fsl_lbc_init(void)
351 { 351 {
352 return platform_driver_register(&fsl_lbc_ctrl_driver); 352 return platform_driver_register(&fsl_lbc_ctrl_driver);
353 } 353 }
354 module_init(fsl_lbc_init); 354 module_init(fsl_lbc_init);
355 355
arch/powerpc/sysdev/fsl_rio.c
1 /* 1 /*
2 * Freescale MPC85xx/MPC86xx RapidIO support 2 * Freescale MPC85xx/MPC86xx RapidIO support
3 * 3 *
4 * Copyright 2009 Sysgo AG 4 * Copyright 2009 Sysgo AG
5 * Thomas Moll <thomas.moll@sysgo.com> 5 * Thomas Moll <thomas.moll@sysgo.com>
6 * - fixed maintenance access routines, check for aligned access 6 * - fixed maintenance access routines, check for aligned access
7 * 7 *
8 * Copyright 2009 Integrated Device Technology, Inc. 8 * Copyright 2009 Integrated Device Technology, Inc.
9 * Alex Bounine <alexandre.bounine@idt.com> 9 * Alex Bounine <alexandre.bounine@idt.com>
10 * - Added Port-Write message handling 10 * - Added Port-Write message handling
11 * - Added Machine Check exception handling 11 * - Added Machine Check exception handling
12 * 12 *
13 * Copyright (C) 2007, 2008, 2010 Freescale Semiconductor, Inc. 13 * Copyright (C) 2007, 2008, 2010 Freescale Semiconductor, Inc.
14 * Zhang Wei <wei.zhang@freescale.com> 14 * Zhang Wei <wei.zhang@freescale.com>
15 * 15 *
16 * Copyright 2005 MontaVista Software, Inc. 16 * Copyright 2005 MontaVista Software, Inc.
17 * Matt Porter <mporter@kernel.crashing.org> 17 * Matt Porter <mporter@kernel.crashing.org>
18 * 18 *
19 * This program is free software; you can redistribute it and/or modify it 19 * This program is free software; you can redistribute it and/or modify it
20 * under the terms of the GNU General Public License as published by the 20 * under the terms of the GNU General Public License as published by the
21 * Free Software Foundation; either version 2 of the License, or (at your 21 * Free Software Foundation; either version 2 of the License, or (at your
22 * option) any later version. 22 * option) any later version.
23 */ 23 */
24 24
25 #include <linux/init.h> 25 #include <linux/init.h>
26 #include <linux/module.h> 26 #include <linux/export.h>
27 #include <linux/types.h> 27 #include <linux/types.h>
28 #include <linux/dma-mapping.h> 28 #include <linux/dma-mapping.h>
29 #include <linux/interrupt.h> 29 #include <linux/interrupt.h>
30 #include <linux/device.h> 30 #include <linux/device.h>
31 #include <linux/rio.h> 31 #include <linux/rio.h>
32 #include <linux/rio_drv.h> 32 #include <linux/rio_drv.h>
33 #include <linux/of_platform.h> 33 #include <linux/of_platform.h>
34 #include <linux/delay.h> 34 #include <linux/delay.h>
35 #include <linux/slab.h> 35 #include <linux/slab.h>
36 #include <linux/kfifo.h> 36 #include <linux/kfifo.h>
37 37
38 #include <asm/io.h> 38 #include <asm/io.h>
39 #include <asm/machdep.h> 39 #include <asm/machdep.h>
40 #include <asm/uaccess.h> 40 #include <asm/uaccess.h>
41 41
42 #undef DEBUG_PW /* Port-Write debugging */ 42 #undef DEBUG_PW /* Port-Write debugging */
43 43
44 /* RapidIO definition irq, which read from OF-tree */ 44 /* RapidIO definition irq, which read from OF-tree */
45 #define IRQ_RIO_BELL(m) (((struct rio_priv *)(m->priv))->bellirq) 45 #define IRQ_RIO_BELL(m) (((struct rio_priv *)(m->priv))->bellirq)
46 #define IRQ_RIO_TX(m) (((struct rio_priv *)(m->priv))->txirq) 46 #define IRQ_RIO_TX(m) (((struct rio_priv *)(m->priv))->txirq)
47 #define IRQ_RIO_RX(m) (((struct rio_priv *)(m->priv))->rxirq) 47 #define IRQ_RIO_RX(m) (((struct rio_priv *)(m->priv))->rxirq)
48 #define IRQ_RIO_PW(m) (((struct rio_priv *)(m->priv))->pwirq) 48 #define IRQ_RIO_PW(m) (((struct rio_priv *)(m->priv))->pwirq)
49 49
50 #define IPWSR_CLEAR 0x98 50 #define IPWSR_CLEAR 0x98
51 #define OMSR_CLEAR 0x1cb3 51 #define OMSR_CLEAR 0x1cb3
52 #define IMSR_CLEAR 0x491 52 #define IMSR_CLEAR 0x491
53 #define IDSR_CLEAR 0x91 53 #define IDSR_CLEAR 0x91
54 #define ODSR_CLEAR 0x1c00 54 #define ODSR_CLEAR 0x1c00
55 #define LTLEECSR_ENABLE_ALL 0xFFC000FC 55 #define LTLEECSR_ENABLE_ALL 0xFFC000FC
56 #define ESCSR_CLEAR 0x07120204 56 #define ESCSR_CLEAR 0x07120204
57 #define IECSR_CLEAR 0x80000000 57 #define IECSR_CLEAR 0x80000000
58 58
59 #define RIO_PORT1_EDCSR 0x0640 59 #define RIO_PORT1_EDCSR 0x0640
60 #define RIO_PORT2_EDCSR 0x0680 60 #define RIO_PORT2_EDCSR 0x0680
61 #define RIO_PORT1_IECSR 0x10130 61 #define RIO_PORT1_IECSR 0x10130
62 #define RIO_PORT2_IECSR 0x101B0 62 #define RIO_PORT2_IECSR 0x101B0
63 #define RIO_IM0SR 0x13064 63 #define RIO_IM0SR 0x13064
64 #define RIO_IM1SR 0x13164 64 #define RIO_IM1SR 0x13164
65 #define RIO_OM0SR 0x13004 65 #define RIO_OM0SR 0x13004
66 #define RIO_OM1SR 0x13104 66 #define RIO_OM1SR 0x13104
67 67
68 #define RIO_ATMU_REGS_OFFSET 0x10c00 68 #define RIO_ATMU_REGS_OFFSET 0x10c00
69 #define RIO_P_MSG_REGS_OFFSET 0x11000 69 #define RIO_P_MSG_REGS_OFFSET 0x11000
70 #define RIO_S_MSG_REGS_OFFSET 0x13000 70 #define RIO_S_MSG_REGS_OFFSET 0x13000
71 #define RIO_GCCSR 0x13c 71 #define RIO_GCCSR 0x13c
72 #define RIO_ESCSR 0x158 72 #define RIO_ESCSR 0x158
73 #define RIO_PORT2_ESCSR 0x178 73 #define RIO_PORT2_ESCSR 0x178
74 #define RIO_CCSR 0x15c 74 #define RIO_CCSR 0x15c
75 #define RIO_LTLEDCSR 0x0608 75 #define RIO_LTLEDCSR 0x0608
76 #define RIO_LTLEDCSR_IER 0x80000000 76 #define RIO_LTLEDCSR_IER 0x80000000
77 #define RIO_LTLEDCSR_PRT 0x01000000 77 #define RIO_LTLEDCSR_PRT 0x01000000
78 #define RIO_LTLEECSR 0x060c 78 #define RIO_LTLEECSR 0x060c
79 #define RIO_EPWISR 0x10010 79 #define RIO_EPWISR 0x10010
80 #define RIO_ISR_AACR 0x10120 80 #define RIO_ISR_AACR 0x10120
81 #define RIO_ISR_AACR_AA 0x1 /* Accept All ID */ 81 #define RIO_ISR_AACR_AA 0x1 /* Accept All ID */
82 #define RIO_MAINT_WIN_SIZE 0x400000 82 #define RIO_MAINT_WIN_SIZE 0x400000
83 #define RIO_DBELL_WIN_SIZE 0x1000 83 #define RIO_DBELL_WIN_SIZE 0x1000
84 84
85 #define RIO_MSG_OMR_MUI 0x00000002 85 #define RIO_MSG_OMR_MUI 0x00000002
86 #define RIO_MSG_OSR_TE 0x00000080 86 #define RIO_MSG_OSR_TE 0x00000080
87 #define RIO_MSG_OSR_QOI 0x00000020 87 #define RIO_MSG_OSR_QOI 0x00000020
88 #define RIO_MSG_OSR_QFI 0x00000010 88 #define RIO_MSG_OSR_QFI 0x00000010
89 #define RIO_MSG_OSR_MUB 0x00000004 89 #define RIO_MSG_OSR_MUB 0x00000004
90 #define RIO_MSG_OSR_EOMI 0x00000002 90 #define RIO_MSG_OSR_EOMI 0x00000002
91 #define RIO_MSG_OSR_QEI 0x00000001 91 #define RIO_MSG_OSR_QEI 0x00000001
92 92
93 #define RIO_MSG_IMR_MI 0x00000002 93 #define RIO_MSG_IMR_MI 0x00000002
94 #define RIO_MSG_ISR_TE 0x00000080 94 #define RIO_MSG_ISR_TE 0x00000080
95 #define RIO_MSG_ISR_QFI 0x00000010 95 #define RIO_MSG_ISR_QFI 0x00000010
96 #define RIO_MSG_ISR_DIQI 0x00000001 96 #define RIO_MSG_ISR_DIQI 0x00000001
97 97
98 #define RIO_IPWMR_SEN 0x00100000 98 #define RIO_IPWMR_SEN 0x00100000
99 #define RIO_IPWMR_QFIE 0x00000100 99 #define RIO_IPWMR_QFIE 0x00000100
100 #define RIO_IPWMR_EIE 0x00000020 100 #define RIO_IPWMR_EIE 0x00000020
101 #define RIO_IPWMR_CQ 0x00000002 101 #define RIO_IPWMR_CQ 0x00000002
102 #define RIO_IPWMR_PWE 0x00000001 102 #define RIO_IPWMR_PWE 0x00000001
103 103
104 #define RIO_IPWSR_QF 0x00100000 104 #define RIO_IPWSR_QF 0x00100000
105 #define RIO_IPWSR_TE 0x00000080 105 #define RIO_IPWSR_TE 0x00000080
106 #define RIO_IPWSR_QFI 0x00000010 106 #define RIO_IPWSR_QFI 0x00000010
107 #define RIO_IPWSR_PWD 0x00000008 107 #define RIO_IPWSR_PWD 0x00000008
108 #define RIO_IPWSR_PWB 0x00000004 108 #define RIO_IPWSR_PWB 0x00000004
109 109
110 /* EPWISR Error match value */ 110 /* EPWISR Error match value */
111 #define RIO_EPWISR_PINT1 0x80000000 111 #define RIO_EPWISR_PINT1 0x80000000
112 #define RIO_EPWISR_PINT2 0x40000000 112 #define RIO_EPWISR_PINT2 0x40000000
113 #define RIO_EPWISR_MU 0x00000002 113 #define RIO_EPWISR_MU 0x00000002
114 #define RIO_EPWISR_PW 0x00000001 114 #define RIO_EPWISR_PW 0x00000001
115 115
116 #define RIO_MSG_DESC_SIZE 32 116 #define RIO_MSG_DESC_SIZE 32
117 #define RIO_MSG_BUFFER_SIZE 4096 117 #define RIO_MSG_BUFFER_SIZE 4096
118 #define RIO_MIN_TX_RING_SIZE 2 118 #define RIO_MIN_TX_RING_SIZE 2
119 #define RIO_MAX_TX_RING_SIZE 2048 119 #define RIO_MAX_TX_RING_SIZE 2048
120 #define RIO_MIN_RX_RING_SIZE 2 120 #define RIO_MIN_RX_RING_SIZE 2
121 #define RIO_MAX_RX_RING_SIZE 2048 121 #define RIO_MAX_RX_RING_SIZE 2048
122 122
123 #define DOORBELL_DMR_DI 0x00000002 123 #define DOORBELL_DMR_DI 0x00000002
124 #define DOORBELL_DSR_TE 0x00000080 124 #define DOORBELL_DSR_TE 0x00000080
125 #define DOORBELL_DSR_QFI 0x00000010 125 #define DOORBELL_DSR_QFI 0x00000010
126 #define DOORBELL_DSR_DIQI 0x00000001 126 #define DOORBELL_DSR_DIQI 0x00000001
127 #define DOORBELL_TID_OFFSET 0x02 127 #define DOORBELL_TID_OFFSET 0x02
128 #define DOORBELL_SID_OFFSET 0x04 128 #define DOORBELL_SID_OFFSET 0x04
129 #define DOORBELL_INFO_OFFSET 0x06 129 #define DOORBELL_INFO_OFFSET 0x06
130 130
131 #define DOORBELL_MESSAGE_SIZE 0x08 131 #define DOORBELL_MESSAGE_SIZE 0x08
132 #define DBELL_SID(x) (*(u16 *)(x + DOORBELL_SID_OFFSET)) 132 #define DBELL_SID(x) (*(u16 *)(x + DOORBELL_SID_OFFSET))
133 #define DBELL_TID(x) (*(u16 *)(x + DOORBELL_TID_OFFSET)) 133 #define DBELL_TID(x) (*(u16 *)(x + DOORBELL_TID_OFFSET))
134 #define DBELL_INF(x) (*(u16 *)(x + DOORBELL_INFO_OFFSET)) 134 #define DBELL_INF(x) (*(u16 *)(x + DOORBELL_INFO_OFFSET))
135 135
136 struct rio_atmu_regs { 136 struct rio_atmu_regs {
137 u32 rowtar; 137 u32 rowtar;
138 u32 rowtear; 138 u32 rowtear;
139 u32 rowbar; 139 u32 rowbar;
140 u32 pad2; 140 u32 pad2;
141 u32 rowar; 141 u32 rowar;
142 u32 pad3[3]; 142 u32 pad3[3];
143 }; 143 };
144 144
145 struct rio_msg_regs { 145 struct rio_msg_regs {
146 u32 omr; /* 0xD_3000 - Outbound message 0 mode register */ 146 u32 omr; /* 0xD_3000 - Outbound message 0 mode register */
147 u32 osr; /* 0xD_3004 - Outbound message 0 status register */ 147 u32 osr; /* 0xD_3004 - Outbound message 0 status register */
148 u32 pad1; 148 u32 pad1;
149 u32 odqdpar; /* 0xD_300C - Outbound message 0 descriptor queue 149 u32 odqdpar; /* 0xD_300C - Outbound message 0 descriptor queue
150 dequeue pointer address register */ 150 dequeue pointer address register */
151 u32 pad2; 151 u32 pad2;
152 u32 osar; /* 0xD_3014 - Outbound message 0 source address 152 u32 osar; /* 0xD_3014 - Outbound message 0 source address
153 register */ 153 register */
154 u32 odpr; /* 0xD_3018 - Outbound message 0 destination port 154 u32 odpr; /* 0xD_3018 - Outbound message 0 destination port
155 register */ 155 register */
156 u32 odatr; /* 0xD_301C - Outbound message 0 destination attributes 156 u32 odatr; /* 0xD_301C - Outbound message 0 destination attributes
157 Register*/ 157 Register*/
158 u32 odcr; /* 0xD_3020 - Outbound message 0 double-word count 158 u32 odcr; /* 0xD_3020 - Outbound message 0 double-word count
159 register */ 159 register */
160 u32 pad3; 160 u32 pad3;
161 u32 odqepar; /* 0xD_3028 - Outbound message 0 descriptor queue 161 u32 odqepar; /* 0xD_3028 - Outbound message 0 descriptor queue
162 enqueue pointer address register */ 162 enqueue pointer address register */
163 u32 pad4[13]; 163 u32 pad4[13];
164 u32 imr; /* 0xD_3060 - Inbound message 0 mode register */ 164 u32 imr; /* 0xD_3060 - Inbound message 0 mode register */
165 u32 isr; /* 0xD_3064 - Inbound message 0 status register */ 165 u32 isr; /* 0xD_3064 - Inbound message 0 status register */
166 u32 pad5; 166 u32 pad5;
167 u32 ifqdpar; /* 0xD_306C - Inbound message 0 frame queue dequeue 167 u32 ifqdpar; /* 0xD_306C - Inbound message 0 frame queue dequeue
168 pointer address register*/ 168 pointer address register*/
169 u32 pad6; 169 u32 pad6;
170 u32 ifqepar; /* 0xD_3074 - Inbound message 0 frame queue enqueue 170 u32 ifqepar; /* 0xD_3074 - Inbound message 0 frame queue enqueue
171 pointer address register */ 171 pointer address register */
172 u32 pad7[226]; 172 u32 pad7[226];
173 u32 odmr; /* 0xD_3400 - Outbound doorbell mode register */ 173 u32 odmr; /* 0xD_3400 - Outbound doorbell mode register */
174 u32 odsr; /* 0xD_3404 - Outbound doorbell status register */ 174 u32 odsr; /* 0xD_3404 - Outbound doorbell status register */
175 u32 res0[4]; 175 u32 res0[4];
176 u32 oddpr; /* 0xD_3418 - Outbound doorbell destination port 176 u32 oddpr; /* 0xD_3418 - Outbound doorbell destination port
177 register */ 177 register */
178 u32 oddatr; /* 0xD_341c - Outbound doorbell destination attributes 178 u32 oddatr; /* 0xD_341c - Outbound doorbell destination attributes
179 register */ 179 register */
180 u32 res1[3]; 180 u32 res1[3];
181 u32 odretcr; /* 0xD_342C - Outbound doorbell retry error threshold 181 u32 odretcr; /* 0xD_342C - Outbound doorbell retry error threshold
182 configuration register */ 182 configuration register */
183 u32 res2[12]; 183 u32 res2[12];
184 u32 dmr; /* 0xD_3460 - Inbound doorbell mode register */ 184 u32 dmr; /* 0xD_3460 - Inbound doorbell mode register */
185 u32 dsr; /* 0xD_3464 - Inbound doorbell status register */ 185 u32 dsr; /* 0xD_3464 - Inbound doorbell status register */
186 u32 pad8; 186 u32 pad8;
187 u32 dqdpar; /* 0xD_346C - Inbound doorbell queue dequeue Pointer 187 u32 dqdpar; /* 0xD_346C - Inbound doorbell queue dequeue Pointer
188 address register */ 188 address register */
189 u32 pad9; 189 u32 pad9;
190 u32 dqepar; /* 0xD_3474 - Inbound doorbell Queue enqueue pointer 190 u32 dqepar; /* 0xD_3474 - Inbound doorbell Queue enqueue pointer
191 address register */ 191 address register */
192 u32 pad10[26]; 192 u32 pad10[26];
193 u32 pwmr; /* 0xD_34E0 - Inbound port-write mode register */ 193 u32 pwmr; /* 0xD_34E0 - Inbound port-write mode register */
194 u32 pwsr; /* 0xD_34E4 - Inbound port-write status register */ 194 u32 pwsr; /* 0xD_34E4 - Inbound port-write status register */
195 u32 epwqbar; /* 0xD_34E8 - Extended Port-Write Queue Base Address 195 u32 epwqbar; /* 0xD_34E8 - Extended Port-Write Queue Base Address
196 register */ 196 register */
197 u32 pwqbar; /* 0xD_34EC - Inbound port-write queue base address 197 u32 pwqbar; /* 0xD_34EC - Inbound port-write queue base address
198 register */ 198 register */
199 }; 199 };
200 200
201 struct rio_tx_desc { 201 struct rio_tx_desc {
202 u32 res1; 202 u32 res1;
203 u32 saddr; 203 u32 saddr;
204 u32 dport; 204 u32 dport;
205 u32 dattr; 205 u32 dattr;
206 u32 res2; 206 u32 res2;
207 u32 res3; 207 u32 res3;
208 u32 dwcnt; 208 u32 dwcnt;
209 u32 res4; 209 u32 res4;
210 }; 210 };
211 211
212 struct rio_dbell_ring { 212 struct rio_dbell_ring {
213 void *virt; 213 void *virt;
214 dma_addr_t phys; 214 dma_addr_t phys;
215 }; 215 };
216 216
217 struct rio_msg_tx_ring { 217 struct rio_msg_tx_ring {
218 void *virt; 218 void *virt;
219 dma_addr_t phys; 219 dma_addr_t phys;
220 void *virt_buffer[RIO_MAX_TX_RING_SIZE]; 220 void *virt_buffer[RIO_MAX_TX_RING_SIZE];
221 dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE]; 221 dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE];
222 int tx_slot; 222 int tx_slot;
223 int size; 223 int size;
224 void *dev_id; 224 void *dev_id;
225 }; 225 };
226 226
227 struct rio_msg_rx_ring { 227 struct rio_msg_rx_ring {
228 void *virt; 228 void *virt;
229 dma_addr_t phys; 229 dma_addr_t phys;
230 void *virt_buffer[RIO_MAX_RX_RING_SIZE]; 230 void *virt_buffer[RIO_MAX_RX_RING_SIZE];
231 int rx_slot; 231 int rx_slot;
232 int size; 232 int size;
233 void *dev_id; 233 void *dev_id;
234 }; 234 };
235 235
236 struct rio_port_write_msg { 236 struct rio_port_write_msg {
237 void *virt; 237 void *virt;
238 dma_addr_t phys; 238 dma_addr_t phys;
239 u32 msg_count; 239 u32 msg_count;
240 u32 err_count; 240 u32 err_count;
241 u32 discard_count; 241 u32 discard_count;
242 }; 242 };
243 243
244 struct rio_priv { 244 struct rio_priv {
245 struct device *dev; 245 struct device *dev;
246 void __iomem *regs_win; 246 void __iomem *regs_win;
247 struct rio_atmu_regs __iomem *atmu_regs; 247 struct rio_atmu_regs __iomem *atmu_regs;
248 struct rio_atmu_regs __iomem *maint_atmu_regs; 248 struct rio_atmu_regs __iomem *maint_atmu_regs;
249 struct rio_atmu_regs __iomem *dbell_atmu_regs; 249 struct rio_atmu_regs __iomem *dbell_atmu_regs;
250 void __iomem *dbell_win; 250 void __iomem *dbell_win;
251 void __iomem *maint_win; 251 void __iomem *maint_win;
252 struct rio_msg_regs __iomem *msg_regs; 252 struct rio_msg_regs __iomem *msg_regs;
253 struct rio_dbell_ring dbell_ring; 253 struct rio_dbell_ring dbell_ring;
254 struct rio_msg_tx_ring msg_tx_ring; 254 struct rio_msg_tx_ring msg_tx_ring;
255 struct rio_msg_rx_ring msg_rx_ring; 255 struct rio_msg_rx_ring msg_rx_ring;
256 struct rio_port_write_msg port_write_msg; 256 struct rio_port_write_msg port_write_msg;
257 int bellirq; 257 int bellirq;
258 int txirq; 258 int txirq;
259 int rxirq; 259 int rxirq;
260 int pwirq; 260 int pwirq;
261 struct work_struct pw_work; 261 struct work_struct pw_work;
262 struct kfifo pw_fifo; 262 struct kfifo pw_fifo;
263 spinlock_t pw_fifo_lock; 263 spinlock_t pw_fifo_lock;
264 }; 264 };
265 265
266 #define __fsl_read_rio_config(x, addr, err, op) \ 266 #define __fsl_read_rio_config(x, addr, err, op) \
267 __asm__ __volatile__( \ 267 __asm__ __volatile__( \
268 "1: "op" %1,0(%2)\n" \ 268 "1: "op" %1,0(%2)\n" \
269 " eieio\n" \ 269 " eieio\n" \
270 "2:\n" \ 270 "2:\n" \
271 ".section .fixup,\"ax\"\n" \ 271 ".section .fixup,\"ax\"\n" \
272 "3: li %1,-1\n" \ 272 "3: li %1,-1\n" \
273 " li %0,%3\n" \ 273 " li %0,%3\n" \
274 " b 2b\n" \ 274 " b 2b\n" \
275 ".section __ex_table,\"a\"\n" \ 275 ".section __ex_table,\"a\"\n" \
276 " .align 2\n" \ 276 " .align 2\n" \
277 " .long 1b,3b\n" \ 277 " .long 1b,3b\n" \
278 ".text" \ 278 ".text" \
279 : "=r" (err), "=r" (x) \ 279 : "=r" (err), "=r" (x) \
280 : "b" (addr), "i" (-EFAULT), "0" (err)) 280 : "b" (addr), "i" (-EFAULT), "0" (err))
281 281
282 static void __iomem *rio_regs_win; 282 static void __iomem *rio_regs_win;
283 283
284 #ifdef CONFIG_E500 284 #ifdef CONFIG_E500
285 int fsl_rio_mcheck_exception(struct pt_regs *regs) 285 int fsl_rio_mcheck_exception(struct pt_regs *regs)
286 { 286 {
287 const struct exception_table_entry *entry; 287 const struct exception_table_entry *entry;
288 unsigned long reason; 288 unsigned long reason;
289 289
290 if (!rio_regs_win) 290 if (!rio_regs_win)
291 return 0; 291 return 0;
292 292
293 reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR)); 293 reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR));
294 if (reason & (RIO_LTLEDCSR_IER | RIO_LTLEDCSR_PRT)) { 294 if (reason & (RIO_LTLEDCSR_IER | RIO_LTLEDCSR_PRT)) {
295 /* Check if we are prepared to handle this fault */ 295 /* Check if we are prepared to handle this fault */
296 entry = search_exception_tables(regs->nip); 296 entry = search_exception_tables(regs->nip);
297 if (entry) { 297 if (entry) {
298 pr_debug("RIO: %s - MC Exception handled\n", 298 pr_debug("RIO: %s - MC Exception handled\n",
299 __func__); 299 __func__);
300 out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 300 out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR),
301 0); 301 0);
302 regs->msr |= MSR_RI; 302 regs->msr |= MSR_RI;
303 regs->nip = entry->fixup; 303 regs->nip = entry->fixup;
304 return 1; 304 return 1;
305 } 305 }
306 } 306 }
307 307
308 return 0; 308 return 0;
309 } 309 }
310 EXPORT_SYMBOL_GPL(fsl_rio_mcheck_exception); 310 EXPORT_SYMBOL_GPL(fsl_rio_mcheck_exception);
311 #endif 311 #endif
312 312
313 /** 313 /**
314 * fsl_rio_doorbell_send - Send a MPC85xx doorbell message 314 * fsl_rio_doorbell_send - Send a MPC85xx doorbell message
315 * @mport: RapidIO master port info 315 * @mport: RapidIO master port info
316 * @index: ID of RapidIO interface 316 * @index: ID of RapidIO interface
317 * @destid: Destination ID of target device 317 * @destid: Destination ID of target device
318 * @data: 16-bit info field of RapidIO doorbell message 318 * @data: 16-bit info field of RapidIO doorbell message
319 * 319 *
320 * Sends a MPC85xx doorbell message. Returns %0 on success or 320 * Sends a MPC85xx doorbell message. Returns %0 on success or
321 * %-EINVAL on failure. 321 * %-EINVAL on failure.
322 */ 322 */
323 static int fsl_rio_doorbell_send(struct rio_mport *mport, 323 static int fsl_rio_doorbell_send(struct rio_mport *mport,
324 int index, u16 destid, u16 data) 324 int index, u16 destid, u16 data)
325 { 325 {
326 struct rio_priv *priv = mport->priv; 326 struct rio_priv *priv = mport->priv;
327 pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n", 327 pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n",
328 index, destid, data); 328 index, destid, data);
329 switch (mport->phy_type) { 329 switch (mport->phy_type) {
330 case RIO_PHY_PARALLEL: 330 case RIO_PHY_PARALLEL:
331 out_be32(&priv->dbell_atmu_regs->rowtar, destid << 22); 331 out_be32(&priv->dbell_atmu_regs->rowtar, destid << 22);
332 out_be16(priv->dbell_win, data); 332 out_be16(priv->dbell_win, data);
333 break; 333 break;
334 case RIO_PHY_SERIAL: 334 case RIO_PHY_SERIAL:
335 /* In the serial version silicons, such as MPC8548, MPC8641, 335 /* In the serial version silicons, such as MPC8548, MPC8641,
336 * below operations is must be. 336 * below operations is must be.
337 */ 337 */
338 out_be32(&priv->msg_regs->odmr, 0x00000000); 338 out_be32(&priv->msg_regs->odmr, 0x00000000);
339 out_be32(&priv->msg_regs->odretcr, 0x00000004); 339 out_be32(&priv->msg_regs->odretcr, 0x00000004);
340 out_be32(&priv->msg_regs->oddpr, destid << 16); 340 out_be32(&priv->msg_regs->oddpr, destid << 16);
341 out_be32(&priv->msg_regs->oddatr, data); 341 out_be32(&priv->msg_regs->oddatr, data);
342 out_be32(&priv->msg_regs->odmr, 0x00000001); 342 out_be32(&priv->msg_regs->odmr, 0x00000001);
343 break; 343 break;
344 } 344 }
345 345
346 return 0; 346 return 0;
347 } 347 }
348 348
349 /** 349 /**
350 * fsl_local_config_read - Generate a MPC85xx local config space read 350 * fsl_local_config_read - Generate a MPC85xx local config space read
351 * @mport: RapidIO master port info 351 * @mport: RapidIO master port info
352 * @index: ID of RapdiIO interface 352 * @index: ID of RapdiIO interface
353 * @offset: Offset into configuration space 353 * @offset: Offset into configuration space
354 * @len: Length (in bytes) of the maintenance transaction 354 * @len: Length (in bytes) of the maintenance transaction
355 * @data: Value to be read into 355 * @data: Value to be read into
356 * 356 *
357 * Generates a MPC85xx local configuration space read. Returns %0 on 357 * Generates a MPC85xx local configuration space read. Returns %0 on
358 * success or %-EINVAL on failure. 358 * success or %-EINVAL on failure.
359 */ 359 */
360 static int fsl_local_config_read(struct rio_mport *mport, 360 static int fsl_local_config_read(struct rio_mport *mport,
361 int index, u32 offset, int len, u32 *data) 361 int index, u32 offset, int len, u32 *data)
362 { 362 {
363 struct rio_priv *priv = mport->priv; 363 struct rio_priv *priv = mport->priv;
364 pr_debug("fsl_local_config_read: index %d offset %8.8x\n", index, 364 pr_debug("fsl_local_config_read: index %d offset %8.8x\n", index,
365 offset); 365 offset);
366 *data = in_be32(priv->regs_win + offset); 366 *data = in_be32(priv->regs_win + offset);
367 367
368 return 0; 368 return 0;
369 } 369 }
370 370
371 /** 371 /**
372 * fsl_local_config_write - Generate a MPC85xx local config space write 372 * fsl_local_config_write - Generate a MPC85xx local config space write
373 * @mport: RapidIO master port info 373 * @mport: RapidIO master port info
374 * @index: ID of RapdiIO interface 374 * @index: ID of RapdiIO interface
375 * @offset: Offset into configuration space 375 * @offset: Offset into configuration space
376 * @len: Length (in bytes) of the maintenance transaction 376 * @len: Length (in bytes) of the maintenance transaction
377 * @data: Value to be written 377 * @data: Value to be written
378 * 378 *
379 * Generates a MPC85xx local configuration space write. Returns %0 on 379 * Generates a MPC85xx local configuration space write. Returns %0 on
380 * success or %-EINVAL on failure. 380 * success or %-EINVAL on failure.
381 */ 381 */
382 static int fsl_local_config_write(struct rio_mport *mport, 382 static int fsl_local_config_write(struct rio_mport *mport,
383 int index, u32 offset, int len, u32 data) 383 int index, u32 offset, int len, u32 data)
384 { 384 {
385 struct rio_priv *priv = mport->priv; 385 struct rio_priv *priv = mport->priv;
386 pr_debug 386 pr_debug
387 ("fsl_local_config_write: index %d offset %8.8x data %8.8x\n", 387 ("fsl_local_config_write: index %d offset %8.8x data %8.8x\n",
388 index, offset, data); 388 index, offset, data);
389 out_be32(priv->regs_win + offset, data); 389 out_be32(priv->regs_win + offset, data);
390 390
391 return 0; 391 return 0;
392 } 392 }
393 393
394 /** 394 /**
395 * fsl_rio_config_read - Generate a MPC85xx read maintenance transaction 395 * fsl_rio_config_read - Generate a MPC85xx read maintenance transaction
396 * @mport: RapidIO master port info 396 * @mport: RapidIO master port info
397 * @index: ID of RapdiIO interface 397 * @index: ID of RapdiIO interface
398 * @destid: Destination ID of transaction 398 * @destid: Destination ID of transaction
399 * @hopcount: Number of hops to target device 399 * @hopcount: Number of hops to target device
400 * @offset: Offset into configuration space 400 * @offset: Offset into configuration space
401 * @len: Length (in bytes) of the maintenance transaction 401 * @len: Length (in bytes) of the maintenance transaction
402 * @val: Location to be read into 402 * @val: Location to be read into
403 * 403 *
404 * Generates a MPC85xx read maintenance transaction. Returns %0 on 404 * Generates a MPC85xx read maintenance transaction. Returns %0 on
405 * success or %-EINVAL on failure. 405 * success or %-EINVAL on failure.
406 */ 406 */
407 static int 407 static int
408 fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid, 408 fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid,
409 u8 hopcount, u32 offset, int len, u32 *val) 409 u8 hopcount, u32 offset, int len, u32 *val)
410 { 410 {
411 struct rio_priv *priv = mport->priv; 411 struct rio_priv *priv = mport->priv;
412 u8 *data; 412 u8 *data;
413 u32 rval, err = 0; 413 u32 rval, err = 0;
414 414
415 pr_debug 415 pr_debug
416 ("fsl_rio_config_read: index %d destid %d hopcount %d offset %8.8x len %d\n", 416 ("fsl_rio_config_read: index %d destid %d hopcount %d offset %8.8x len %d\n",
417 index, destid, hopcount, offset, len); 417 index, destid, hopcount, offset, len);
418 418
419 /* 16MB maintenance window possible */ 419 /* 16MB maintenance window possible */
420 /* allow only aligned access to maintenance registers */ 420 /* allow only aligned access to maintenance registers */
421 if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len)) 421 if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len))
422 return -EINVAL; 422 return -EINVAL;
423 423
424 out_be32(&priv->maint_atmu_regs->rowtar, 424 out_be32(&priv->maint_atmu_regs->rowtar,
425 (destid << 22) | (hopcount << 12) | (offset >> 12)); 425 (destid << 22) | (hopcount << 12) | (offset >> 12));
426 out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); 426 out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10));
427 427
428 data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1)); 428 data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1));
429 switch (len) { 429 switch (len) {
430 case 1: 430 case 1:
431 __fsl_read_rio_config(rval, data, err, "lbz"); 431 __fsl_read_rio_config(rval, data, err, "lbz");
432 break; 432 break;
433 case 2: 433 case 2:
434 __fsl_read_rio_config(rval, data, err, "lhz"); 434 __fsl_read_rio_config(rval, data, err, "lhz");
435 break; 435 break;
436 case 4: 436 case 4:
437 __fsl_read_rio_config(rval, data, err, "lwz"); 437 __fsl_read_rio_config(rval, data, err, "lwz");
438 break; 438 break;
439 default: 439 default:
440 return -EINVAL; 440 return -EINVAL;
441 } 441 }
442 442
443 if (err) { 443 if (err) {
444 pr_debug("RIO: cfg_read error %d for %x:%x:%x\n", 444 pr_debug("RIO: cfg_read error %d for %x:%x:%x\n",
445 err, destid, hopcount, offset); 445 err, destid, hopcount, offset);
446 } 446 }
447 447
448 *val = rval; 448 *val = rval;
449 449
450 return err; 450 return err;
451 } 451 }
452 452
453 /** 453 /**
454 * fsl_rio_config_write - Generate a MPC85xx write maintenance transaction 454 * fsl_rio_config_write - Generate a MPC85xx write maintenance transaction
455 * @mport: RapidIO master port info 455 * @mport: RapidIO master port info
456 * @index: ID of RapdiIO interface 456 * @index: ID of RapdiIO interface
457 * @destid: Destination ID of transaction 457 * @destid: Destination ID of transaction
458 * @hopcount: Number of hops to target device 458 * @hopcount: Number of hops to target device
459 * @offset: Offset into configuration space 459 * @offset: Offset into configuration space
460 * @len: Length (in bytes) of the maintenance transaction 460 * @len: Length (in bytes) of the maintenance transaction
461 * @val: Value to be written 461 * @val: Value to be written
462 * 462 *
463 * Generates an MPC85xx write maintenance transaction. Returns %0 on 463 * Generates an MPC85xx write maintenance transaction. Returns %0 on
464 * success or %-EINVAL on failure. 464 * success or %-EINVAL on failure.
465 */ 465 */
466 static int 466 static int
467 fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid, 467 fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid,
468 u8 hopcount, u32 offset, int len, u32 val) 468 u8 hopcount, u32 offset, int len, u32 val)
469 { 469 {
470 struct rio_priv *priv = mport->priv; 470 struct rio_priv *priv = mport->priv;
471 u8 *data; 471 u8 *data;
472 pr_debug 472 pr_debug
473 ("fsl_rio_config_write: index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n", 473 ("fsl_rio_config_write: index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n",
474 index, destid, hopcount, offset, len, val); 474 index, destid, hopcount, offset, len, val);
475 475
476 /* 16MB maintenance windows possible */ 476 /* 16MB maintenance windows possible */
477 /* allow only aligned access to maintenance registers */ 477 /* allow only aligned access to maintenance registers */
478 if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len)) 478 if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len))
479 return -EINVAL; 479 return -EINVAL;
480 480
481 out_be32(&priv->maint_atmu_regs->rowtar, 481 out_be32(&priv->maint_atmu_regs->rowtar,
482 (destid << 22) | (hopcount << 12) | (offset >> 12)); 482 (destid << 22) | (hopcount << 12) | (offset >> 12));
483 out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); 483 out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10));
484 484
485 data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1)); 485 data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1));
486 switch (len) { 486 switch (len) {
487 case 1: 487 case 1:
488 out_8((u8 *) data, val); 488 out_8((u8 *) data, val);
489 break; 489 break;
490 case 2: 490 case 2:
491 out_be16((u16 *) data, val); 491 out_be16((u16 *) data, val);
492 break; 492 break;
493 case 4: 493 case 4:
494 out_be32((u32 *) data, val); 494 out_be32((u32 *) data, val);
495 break; 495 break;
496 default: 496 default:
497 return -EINVAL; 497 return -EINVAL;
498 } 498 }
499 499
500 return 0; 500 return 0;
501 } 501 }
502 502
503 /** 503 /**
504 * fsl_add_outb_message - Add message to the MPC85xx outbound message queue 504 * fsl_add_outb_message - Add message to the MPC85xx outbound message queue
505 * @mport: Master port with outbound message queue 505 * @mport: Master port with outbound message queue
506 * @rdev: Target of outbound message 506 * @rdev: Target of outbound message
507 * @mbox: Outbound mailbox 507 * @mbox: Outbound mailbox
508 * @buffer: Message to add to outbound queue 508 * @buffer: Message to add to outbound queue
509 * @len: Length of message 509 * @len: Length of message
510 * 510 *
511 * Adds the @buffer message to the MPC85xx outbound message queue. Returns 511 * Adds the @buffer message to the MPC85xx outbound message queue. Returns
512 * %0 on success or %-EINVAL on failure. 512 * %0 on success or %-EINVAL on failure.
513 */ 513 */
514 static int 514 static int
515 fsl_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, 515 fsl_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
516 void *buffer, size_t len) 516 void *buffer, size_t len)
517 { 517 {
518 struct rio_priv *priv = mport->priv; 518 struct rio_priv *priv = mport->priv;
519 u32 omr; 519 u32 omr;
520 struct rio_tx_desc *desc = (struct rio_tx_desc *)priv->msg_tx_ring.virt 520 struct rio_tx_desc *desc = (struct rio_tx_desc *)priv->msg_tx_ring.virt
521 + priv->msg_tx_ring.tx_slot; 521 + priv->msg_tx_ring.tx_slot;
522 int ret = 0; 522 int ret = 0;
523 523
524 pr_debug("RIO: fsl_add_outb_message(): destid %4.4x mbox %d buffer " \ 524 pr_debug("RIO: fsl_add_outb_message(): destid %4.4x mbox %d buffer " \
525 "%8.8x len %8.8x\n", rdev->destid, mbox, (int)buffer, len); 525 "%8.8x len %8.8x\n", rdev->destid, mbox, (int)buffer, len);
526 526
527 if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) { 527 if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) {
528 ret = -EINVAL; 528 ret = -EINVAL;
529 goto out; 529 goto out;
530 } 530 }
531 531
532 /* Copy and clear rest of buffer */ 532 /* Copy and clear rest of buffer */
533 memcpy(priv->msg_tx_ring.virt_buffer[priv->msg_tx_ring.tx_slot], buffer, 533 memcpy(priv->msg_tx_ring.virt_buffer[priv->msg_tx_ring.tx_slot], buffer,
534 len); 534 len);
535 if (len < (RIO_MAX_MSG_SIZE - 4)) 535 if (len < (RIO_MAX_MSG_SIZE - 4))
536 memset(priv->msg_tx_ring.virt_buffer[priv->msg_tx_ring.tx_slot] 536 memset(priv->msg_tx_ring.virt_buffer[priv->msg_tx_ring.tx_slot]
537 + len, 0, RIO_MAX_MSG_SIZE - len); 537 + len, 0, RIO_MAX_MSG_SIZE - len);
538 538
539 switch (mport->phy_type) { 539 switch (mport->phy_type) {
540 case RIO_PHY_PARALLEL: 540 case RIO_PHY_PARALLEL:
541 /* Set mbox field for message */ 541 /* Set mbox field for message */
542 desc->dport = mbox & 0x3; 542 desc->dport = mbox & 0x3;
543 543
544 /* Enable EOMI interrupt, set priority, and set destid */ 544 /* Enable EOMI interrupt, set priority, and set destid */
545 desc->dattr = 0x28000000 | (rdev->destid << 2); 545 desc->dattr = 0x28000000 | (rdev->destid << 2);
546 break; 546 break;
547 case RIO_PHY_SERIAL: 547 case RIO_PHY_SERIAL:
548 /* Set mbox field for message, and set destid */ 548 /* Set mbox field for message, and set destid */
549 desc->dport = (rdev->destid << 16) | (mbox & 0x3); 549 desc->dport = (rdev->destid << 16) | (mbox & 0x3);
550 550
551 /* Enable EOMI interrupt and priority */ 551 /* Enable EOMI interrupt and priority */
552 desc->dattr = 0x28000000; 552 desc->dattr = 0x28000000;
553 break; 553 break;
554 } 554 }
555 555
556 /* Set transfer size aligned to next power of 2 (in double words) */ 556 /* Set transfer size aligned to next power of 2 (in double words) */
557 desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len); 557 desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len);
558 558
559 /* Set snooping and source buffer address */ 559 /* Set snooping and source buffer address */
560 desc->saddr = 0x00000004 560 desc->saddr = 0x00000004
561 | priv->msg_tx_ring.phys_buffer[priv->msg_tx_ring.tx_slot]; 561 | priv->msg_tx_ring.phys_buffer[priv->msg_tx_ring.tx_slot];
562 562
563 /* Increment enqueue pointer */ 563 /* Increment enqueue pointer */
564 omr = in_be32(&priv->msg_regs->omr); 564 omr = in_be32(&priv->msg_regs->omr);
565 out_be32(&priv->msg_regs->omr, omr | RIO_MSG_OMR_MUI); 565 out_be32(&priv->msg_regs->omr, omr | RIO_MSG_OMR_MUI);
566 566
567 /* Go to next descriptor */ 567 /* Go to next descriptor */
568 if (++priv->msg_tx_ring.tx_slot == priv->msg_tx_ring.size) 568 if (++priv->msg_tx_ring.tx_slot == priv->msg_tx_ring.size)
569 priv->msg_tx_ring.tx_slot = 0; 569 priv->msg_tx_ring.tx_slot = 0;
570 570
571 out: 571 out:
572 return ret; 572 return ret;
573 } 573 }
574 574
575 /** 575 /**
576 * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler 576 * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler
577 * @irq: Linux interrupt number 577 * @irq: Linux interrupt number
578 * @dev_instance: Pointer to interrupt-specific data 578 * @dev_instance: Pointer to interrupt-specific data
579 * 579 *
580 * Handles outbound message interrupts. Executes a register outbound 580 * Handles outbound message interrupts. Executes a register outbound
581 * mailbox event handler and acks the interrupt occurrence. 581 * mailbox event handler and acks the interrupt occurrence.
582 */ 582 */
583 static irqreturn_t 583 static irqreturn_t
584 fsl_rio_tx_handler(int irq, void *dev_instance) 584 fsl_rio_tx_handler(int irq, void *dev_instance)
585 { 585 {
586 int osr; 586 int osr;
587 struct rio_mport *port = (struct rio_mport *)dev_instance; 587 struct rio_mport *port = (struct rio_mport *)dev_instance;
588 struct rio_priv *priv = port->priv; 588 struct rio_priv *priv = port->priv;
589 589
590 osr = in_be32(&priv->msg_regs->osr); 590 osr = in_be32(&priv->msg_regs->osr);
591 591
592 if (osr & RIO_MSG_OSR_TE) { 592 if (osr & RIO_MSG_OSR_TE) {
593 pr_info("RIO: outbound message transmission error\n"); 593 pr_info("RIO: outbound message transmission error\n");
594 out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_TE); 594 out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_TE);
595 goto out; 595 goto out;
596 } 596 }
597 597
598 if (osr & RIO_MSG_OSR_QOI) { 598 if (osr & RIO_MSG_OSR_QOI) {
599 pr_info("RIO: outbound message queue overflow\n"); 599 pr_info("RIO: outbound message queue overflow\n");
600 out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_QOI); 600 out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_QOI);
601 goto out; 601 goto out;
602 } 602 }
603 603
604 if (osr & RIO_MSG_OSR_EOMI) { 604 if (osr & RIO_MSG_OSR_EOMI) {
605 u32 dqp = in_be32(&priv->msg_regs->odqdpar); 605 u32 dqp = in_be32(&priv->msg_regs->odqdpar);
606 int slot = (dqp - priv->msg_tx_ring.phys) >> 5; 606 int slot = (dqp - priv->msg_tx_ring.phys) >> 5;
607 port->outb_msg[0].mcback(port, priv->msg_tx_ring.dev_id, -1, 607 port->outb_msg[0].mcback(port, priv->msg_tx_ring.dev_id, -1,
608 slot); 608 slot);
609 609
610 /* Ack the end-of-message interrupt */ 610 /* Ack the end-of-message interrupt */
611 out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_EOMI); 611 out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_EOMI);
612 } 612 }
613 613
614 out: 614 out:
615 return IRQ_HANDLED; 615 return IRQ_HANDLED;
616 } 616 }
617 617
618 /** 618 /**
619 * fsl_open_outb_mbox - Initialize MPC85xx outbound mailbox 619 * fsl_open_outb_mbox - Initialize MPC85xx outbound mailbox
620 * @mport: Master port implementing the outbound message unit 620 * @mport: Master port implementing the outbound message unit
621 * @dev_id: Device specific pointer to pass on event 621 * @dev_id: Device specific pointer to pass on event
622 * @mbox: Mailbox to open 622 * @mbox: Mailbox to open
623 * @entries: Number of entries in the outbound mailbox ring 623 * @entries: Number of entries in the outbound mailbox ring
624 * 624 *
625 * Initializes buffer ring, request the outbound message interrupt, 625 * Initializes buffer ring, request the outbound message interrupt,
626 * and enables the outbound message unit. Returns %0 on success and 626 * and enables the outbound message unit. Returns %0 on success and
627 * %-EINVAL or %-ENOMEM on failure. 627 * %-EINVAL or %-ENOMEM on failure.
628 */ 628 */
629 static int 629 static int
630 fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) 630 fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
631 { 631 {
632 int i, j, rc = 0; 632 int i, j, rc = 0;
633 struct rio_priv *priv = mport->priv; 633 struct rio_priv *priv = mport->priv;
634 634
635 if ((entries < RIO_MIN_TX_RING_SIZE) || 635 if ((entries < RIO_MIN_TX_RING_SIZE) ||
636 (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) { 636 (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) {
637 rc = -EINVAL; 637 rc = -EINVAL;
638 goto out; 638 goto out;
639 } 639 }
640 640
641 /* Initialize shadow copy ring */ 641 /* Initialize shadow copy ring */
642 priv->msg_tx_ring.dev_id = dev_id; 642 priv->msg_tx_ring.dev_id = dev_id;
643 priv->msg_tx_ring.size = entries; 643 priv->msg_tx_ring.size = entries;
644 644
645 for (i = 0; i < priv->msg_tx_ring.size; i++) { 645 for (i = 0; i < priv->msg_tx_ring.size; i++) {
646 priv->msg_tx_ring.virt_buffer[i] = 646 priv->msg_tx_ring.virt_buffer[i] =
647 dma_alloc_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, 647 dma_alloc_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
648 &priv->msg_tx_ring.phys_buffer[i], GFP_KERNEL); 648 &priv->msg_tx_ring.phys_buffer[i], GFP_KERNEL);
649 if (!priv->msg_tx_ring.virt_buffer[i]) { 649 if (!priv->msg_tx_ring.virt_buffer[i]) {
650 rc = -ENOMEM; 650 rc = -ENOMEM;
651 for (j = 0; j < priv->msg_tx_ring.size; j++) 651 for (j = 0; j < priv->msg_tx_ring.size; j++)
652 if (priv->msg_tx_ring.virt_buffer[j]) 652 if (priv->msg_tx_ring.virt_buffer[j])
653 dma_free_coherent(priv->dev, 653 dma_free_coherent(priv->dev,
654 RIO_MSG_BUFFER_SIZE, 654 RIO_MSG_BUFFER_SIZE,
655 priv->msg_tx_ring. 655 priv->msg_tx_ring.
656 virt_buffer[j], 656 virt_buffer[j],
657 priv->msg_tx_ring. 657 priv->msg_tx_ring.
658 phys_buffer[j]); 658 phys_buffer[j]);
659 goto out; 659 goto out;
660 } 660 }
661 } 661 }
662 662
663 /* Initialize outbound message descriptor ring */ 663 /* Initialize outbound message descriptor ring */
664 priv->msg_tx_ring.virt = dma_alloc_coherent(priv->dev, 664 priv->msg_tx_ring.virt = dma_alloc_coherent(priv->dev,
665 priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE, 665 priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
666 &priv->msg_tx_ring.phys, GFP_KERNEL); 666 &priv->msg_tx_ring.phys, GFP_KERNEL);
667 if (!priv->msg_tx_ring.virt) { 667 if (!priv->msg_tx_ring.virt) {
668 rc = -ENOMEM; 668 rc = -ENOMEM;
669 goto out_dma; 669 goto out_dma;
670 } 670 }
671 memset(priv->msg_tx_ring.virt, 0, 671 memset(priv->msg_tx_ring.virt, 0,
672 priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE); 672 priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE);
673 priv->msg_tx_ring.tx_slot = 0; 673 priv->msg_tx_ring.tx_slot = 0;
674 674
675 /* Point dequeue/enqueue pointers at first entry in ring */ 675 /* Point dequeue/enqueue pointers at first entry in ring */
676 out_be32(&priv->msg_regs->odqdpar, priv->msg_tx_ring.phys); 676 out_be32(&priv->msg_regs->odqdpar, priv->msg_tx_ring.phys);
677 out_be32(&priv->msg_regs->odqepar, priv->msg_tx_ring.phys); 677 out_be32(&priv->msg_regs->odqepar, priv->msg_tx_ring.phys);
678 678
679 /* Configure for snooping */ 679 /* Configure for snooping */
680 out_be32(&priv->msg_regs->osar, 0x00000004); 680 out_be32(&priv->msg_regs->osar, 0x00000004);
681 681
682 /* Clear interrupt status */ 682 /* Clear interrupt status */
683 out_be32(&priv->msg_regs->osr, 0x000000b3); 683 out_be32(&priv->msg_regs->osr, 0x000000b3);
684 684
685 /* Hook up outbound message handler */ 685 /* Hook up outbound message handler */
686 rc = request_irq(IRQ_RIO_TX(mport), fsl_rio_tx_handler, 0, 686 rc = request_irq(IRQ_RIO_TX(mport), fsl_rio_tx_handler, 0,
687 "msg_tx", (void *)mport); 687 "msg_tx", (void *)mport);
688 if (rc < 0) 688 if (rc < 0)
689 goto out_irq; 689 goto out_irq;
690 690
691 /* 691 /*
692 * Configure outbound message unit 692 * Configure outbound message unit
693 * Snooping 693 * Snooping
694 * Interrupts (all enabled, except QEIE) 694 * Interrupts (all enabled, except QEIE)
695 * Chaining mode 695 * Chaining mode
696 * Disable 696 * Disable
697 */ 697 */
698 out_be32(&priv->msg_regs->omr, 0x00100220); 698 out_be32(&priv->msg_regs->omr, 0x00100220);
699 699
700 /* Set number of entries */ 700 /* Set number of entries */
701 out_be32(&priv->msg_regs->omr, 701 out_be32(&priv->msg_regs->omr,
702 in_be32(&priv->msg_regs->omr) | 702 in_be32(&priv->msg_regs->omr) |
703 ((get_bitmask_order(entries) - 2) << 12)); 703 ((get_bitmask_order(entries) - 2) << 12));
704 704
705 /* Now enable the unit */ 705 /* Now enable the unit */
706 out_be32(&priv->msg_regs->omr, in_be32(&priv->msg_regs->omr) | 0x1); 706 out_be32(&priv->msg_regs->omr, in_be32(&priv->msg_regs->omr) | 0x1);
707 707
708 out: 708 out:
709 return rc; 709 return rc;
710 710
711 out_irq: 711 out_irq:
712 dma_free_coherent(priv->dev, 712 dma_free_coherent(priv->dev,
713 priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE, 713 priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
714 priv->msg_tx_ring.virt, priv->msg_tx_ring.phys); 714 priv->msg_tx_ring.virt, priv->msg_tx_ring.phys);
715 715
716 out_dma: 716 out_dma:
717 for (i = 0; i < priv->msg_tx_ring.size; i++) 717 for (i = 0; i < priv->msg_tx_ring.size; i++)
718 dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, 718 dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
719 priv->msg_tx_ring.virt_buffer[i], 719 priv->msg_tx_ring.virt_buffer[i],
720 priv->msg_tx_ring.phys_buffer[i]); 720 priv->msg_tx_ring.phys_buffer[i]);
721 721
722 return rc; 722 return rc;
723 } 723 }
724 724
725 /** 725 /**
726 * fsl_close_outb_mbox - Shut down MPC85xx outbound mailbox 726 * fsl_close_outb_mbox - Shut down MPC85xx outbound mailbox
727 * @mport: Master port implementing the outbound message unit 727 * @mport: Master port implementing the outbound message unit
728 * @mbox: Mailbox to close 728 * @mbox: Mailbox to close
729 * 729 *
730 * Disables the outbound message unit, free all buffers, and 730 * Disables the outbound message unit, free all buffers, and
731 * frees the outbound message interrupt. 731 * frees the outbound message interrupt.
732 */ 732 */
733 static void fsl_close_outb_mbox(struct rio_mport *mport, int mbox) 733 static void fsl_close_outb_mbox(struct rio_mport *mport, int mbox)
734 { 734 {
735 struct rio_priv *priv = mport->priv; 735 struct rio_priv *priv = mport->priv;
736 /* Disable inbound message unit */ 736 /* Disable inbound message unit */
737 out_be32(&priv->msg_regs->omr, 0); 737 out_be32(&priv->msg_regs->omr, 0);
738 738
739 /* Free ring */ 739 /* Free ring */
740 dma_free_coherent(priv->dev, 740 dma_free_coherent(priv->dev,
741 priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE, 741 priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
742 priv->msg_tx_ring.virt, priv->msg_tx_ring.phys); 742 priv->msg_tx_ring.virt, priv->msg_tx_ring.phys);
743 743
744 /* Free interrupt */ 744 /* Free interrupt */
745 free_irq(IRQ_RIO_TX(mport), (void *)mport); 745 free_irq(IRQ_RIO_TX(mport), (void *)mport);
746 } 746 }
747 747
748 /** 748 /**
749 * fsl_rio_rx_handler - MPC85xx inbound message interrupt handler 749 * fsl_rio_rx_handler - MPC85xx inbound message interrupt handler
750 * @irq: Linux interrupt number 750 * @irq: Linux interrupt number
751 * @dev_instance: Pointer to interrupt-specific data 751 * @dev_instance: Pointer to interrupt-specific data
752 * 752 *
753 * Handles inbound message interrupts. Executes a registered inbound 753 * Handles inbound message interrupts. Executes a registered inbound
754 * mailbox event handler and acks the interrupt occurrence. 754 * mailbox event handler and acks the interrupt occurrence.
755 */ 755 */
756 static irqreturn_t 756 static irqreturn_t
757 fsl_rio_rx_handler(int irq, void *dev_instance) 757 fsl_rio_rx_handler(int irq, void *dev_instance)
758 { 758 {
759 int isr; 759 int isr;
760 struct rio_mport *port = (struct rio_mport *)dev_instance; 760 struct rio_mport *port = (struct rio_mport *)dev_instance;
761 struct rio_priv *priv = port->priv; 761 struct rio_priv *priv = port->priv;
762 762
763 isr = in_be32(&priv->msg_regs->isr); 763 isr = in_be32(&priv->msg_regs->isr);
764 764
765 if (isr & RIO_MSG_ISR_TE) { 765 if (isr & RIO_MSG_ISR_TE) {
766 pr_info("RIO: inbound message reception error\n"); 766 pr_info("RIO: inbound message reception error\n");
767 out_be32((void *)&priv->msg_regs->isr, RIO_MSG_ISR_TE); 767 out_be32((void *)&priv->msg_regs->isr, RIO_MSG_ISR_TE);
768 goto out; 768 goto out;
769 } 769 }
770 770
771 /* XXX Need to check/dispatch until queue empty */ 771 /* XXX Need to check/dispatch until queue empty */
772 if (isr & RIO_MSG_ISR_DIQI) { 772 if (isr & RIO_MSG_ISR_DIQI) {
773 /* 773 /*
774 * We implement *only* mailbox 0, but can receive messages 774 * We implement *only* mailbox 0, but can receive messages
775 * for any mailbox/letter to that mailbox destination. So, 775 * for any mailbox/letter to that mailbox destination. So,
776 * make the callback with an unknown/invalid mailbox number 776 * make the callback with an unknown/invalid mailbox number
777 * argument. 777 * argument.
778 */ 778 */
779 port->inb_msg[0].mcback(port, priv->msg_rx_ring.dev_id, -1, -1); 779 port->inb_msg[0].mcback(port, priv->msg_rx_ring.dev_id, -1, -1);
780 780
781 /* Ack the queueing interrupt */ 781 /* Ack the queueing interrupt */
782 out_be32(&priv->msg_regs->isr, RIO_MSG_ISR_DIQI); 782 out_be32(&priv->msg_regs->isr, RIO_MSG_ISR_DIQI);
783 } 783 }
784 784
785 out: 785 out:
786 return IRQ_HANDLED; 786 return IRQ_HANDLED;
787 } 787 }
788 788
789 /** 789 /**
790 * fsl_open_inb_mbox - Initialize MPC85xx inbound mailbox 790 * fsl_open_inb_mbox - Initialize MPC85xx inbound mailbox
791 * @mport: Master port implementing the inbound message unit 791 * @mport: Master port implementing the inbound message unit
792 * @dev_id: Device specific pointer to pass on event 792 * @dev_id: Device specific pointer to pass on event
793 * @mbox: Mailbox to open 793 * @mbox: Mailbox to open
794 * @entries: Number of entries in the inbound mailbox ring 794 * @entries: Number of entries in the inbound mailbox ring
795 * 795 *
796 * Initializes buffer ring, request the inbound message interrupt, 796 * Initializes buffer ring, request the inbound message interrupt,
797 * and enables the inbound message unit. Returns %0 on success 797 * and enables the inbound message unit. Returns %0 on success
798 * and %-EINVAL or %-ENOMEM on failure. 798 * and %-EINVAL or %-ENOMEM on failure.
799 */ 799 */
800 static int 800 static int
801 fsl_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) 801 fsl_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
802 { 802 {
803 int i, rc = 0; 803 int i, rc = 0;
804 struct rio_priv *priv = mport->priv; 804 struct rio_priv *priv = mport->priv;
805 805
806 if ((entries < RIO_MIN_RX_RING_SIZE) || 806 if ((entries < RIO_MIN_RX_RING_SIZE) ||
807 (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) { 807 (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) {
808 rc = -EINVAL; 808 rc = -EINVAL;
809 goto out; 809 goto out;
810 } 810 }
811 811
812 /* Initialize client buffer ring */ 812 /* Initialize client buffer ring */
813 priv->msg_rx_ring.dev_id = dev_id; 813 priv->msg_rx_ring.dev_id = dev_id;
814 priv->msg_rx_ring.size = entries; 814 priv->msg_rx_ring.size = entries;
815 priv->msg_rx_ring.rx_slot = 0; 815 priv->msg_rx_ring.rx_slot = 0;
816 for (i = 0; i < priv->msg_rx_ring.size; i++) 816 for (i = 0; i < priv->msg_rx_ring.size; i++)
817 priv->msg_rx_ring.virt_buffer[i] = NULL; 817 priv->msg_rx_ring.virt_buffer[i] = NULL;
818 818
819 /* Initialize inbound message ring */ 819 /* Initialize inbound message ring */
820 priv->msg_rx_ring.virt = dma_alloc_coherent(priv->dev, 820 priv->msg_rx_ring.virt = dma_alloc_coherent(priv->dev,
821 priv->msg_rx_ring.size * RIO_MAX_MSG_SIZE, 821 priv->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
822 &priv->msg_rx_ring.phys, GFP_KERNEL); 822 &priv->msg_rx_ring.phys, GFP_KERNEL);
823 if (!priv->msg_rx_ring.virt) { 823 if (!priv->msg_rx_ring.virt) {
824 rc = -ENOMEM; 824 rc = -ENOMEM;
825 goto out; 825 goto out;
826 } 826 }
827 827
828 /* Point dequeue/enqueue pointers at first entry in ring */ 828 /* Point dequeue/enqueue pointers at first entry in ring */
829 out_be32(&priv->msg_regs->ifqdpar, (u32) priv->msg_rx_ring.phys); 829 out_be32(&priv->msg_regs->ifqdpar, (u32) priv->msg_rx_ring.phys);
830 out_be32(&priv->msg_regs->ifqepar, (u32) priv->msg_rx_ring.phys); 830 out_be32(&priv->msg_regs->ifqepar, (u32) priv->msg_rx_ring.phys);
831 831
832 /* Clear interrupt status */ 832 /* Clear interrupt status */
833 out_be32(&priv->msg_regs->isr, 0x00000091); 833 out_be32(&priv->msg_regs->isr, 0x00000091);
834 834
835 /* Hook up inbound message handler */ 835 /* Hook up inbound message handler */
836 rc = request_irq(IRQ_RIO_RX(mport), fsl_rio_rx_handler, 0, 836 rc = request_irq(IRQ_RIO_RX(mport), fsl_rio_rx_handler, 0,
837 "msg_rx", (void *)mport); 837 "msg_rx", (void *)mport);
838 if (rc < 0) { 838 if (rc < 0) {
839 dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, 839 dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
840 priv->msg_tx_ring.virt_buffer[i], 840 priv->msg_tx_ring.virt_buffer[i],
841 priv->msg_tx_ring.phys_buffer[i]); 841 priv->msg_tx_ring.phys_buffer[i]);
842 goto out; 842 goto out;
843 } 843 }
844 844
845 /* 845 /*
846 * Configure inbound message unit: 846 * Configure inbound message unit:
847 * Snooping 847 * Snooping
848 * 4KB max message size 848 * 4KB max message size
849 * Unmask all interrupt sources 849 * Unmask all interrupt sources
850 * Disable 850 * Disable
851 */ 851 */
852 out_be32(&priv->msg_regs->imr, 0x001b0060); 852 out_be32(&priv->msg_regs->imr, 0x001b0060);
853 853
854 /* Set number of queue entries */ 854 /* Set number of queue entries */
855 setbits32(&priv->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12); 855 setbits32(&priv->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12);
856 856
857 /* Now enable the unit */ 857 /* Now enable the unit */
858 setbits32(&priv->msg_regs->imr, 0x1); 858 setbits32(&priv->msg_regs->imr, 0x1);
859 859
860 out: 860 out:
861 return rc; 861 return rc;
862 } 862 }
863 863
864 /** 864 /**
865 * fsl_close_inb_mbox - Shut down MPC85xx inbound mailbox 865 * fsl_close_inb_mbox - Shut down MPC85xx inbound mailbox
866 * @mport: Master port implementing the inbound message unit 866 * @mport: Master port implementing the inbound message unit
867 * @mbox: Mailbox to close 867 * @mbox: Mailbox to close
868 * 868 *
869 * Disables the inbound message unit, free all buffers, and 869 * Disables the inbound message unit, free all buffers, and
870 * frees the inbound message interrupt. 870 * frees the inbound message interrupt.
871 */ 871 */
872 static void fsl_close_inb_mbox(struct rio_mport *mport, int mbox) 872 static void fsl_close_inb_mbox(struct rio_mport *mport, int mbox)
873 { 873 {
874 struct rio_priv *priv = mport->priv; 874 struct rio_priv *priv = mport->priv;
875 /* Disable inbound message unit */ 875 /* Disable inbound message unit */
876 out_be32(&priv->msg_regs->imr, 0); 876 out_be32(&priv->msg_regs->imr, 0);
877 877
878 /* Free ring */ 878 /* Free ring */
879 dma_free_coherent(priv->dev, priv->msg_rx_ring.size * RIO_MAX_MSG_SIZE, 879 dma_free_coherent(priv->dev, priv->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
880 priv->msg_rx_ring.virt, priv->msg_rx_ring.phys); 880 priv->msg_rx_ring.virt, priv->msg_rx_ring.phys);
881 881
882 /* Free interrupt */ 882 /* Free interrupt */
883 free_irq(IRQ_RIO_RX(mport), (void *)mport); 883 free_irq(IRQ_RIO_RX(mport), (void *)mport);
884 } 884 }
885 885
886 /** 886 /**
887 * fsl_add_inb_buffer - Add buffer to the MPC85xx inbound message queue 887 * fsl_add_inb_buffer - Add buffer to the MPC85xx inbound message queue
888 * @mport: Master port implementing the inbound message unit 888 * @mport: Master port implementing the inbound message unit
889 * @mbox: Inbound mailbox number 889 * @mbox: Inbound mailbox number
890 * @buf: Buffer to add to inbound queue 890 * @buf: Buffer to add to inbound queue
891 * 891 *
892 * Adds the @buf buffer to the MPC85xx inbound message queue. Returns 892 * Adds the @buf buffer to the MPC85xx inbound message queue. Returns
893 * %0 on success or %-EINVAL on failure. 893 * %0 on success or %-EINVAL on failure.
894 */ 894 */
895 static int fsl_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf) 895 static int fsl_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
896 { 896 {
897 int rc = 0; 897 int rc = 0;
898 struct rio_priv *priv = mport->priv; 898 struct rio_priv *priv = mport->priv;
899 899
900 pr_debug("RIO: fsl_add_inb_buffer(), msg_rx_ring.rx_slot %d\n", 900 pr_debug("RIO: fsl_add_inb_buffer(), msg_rx_ring.rx_slot %d\n",
901 priv->msg_rx_ring.rx_slot); 901 priv->msg_rx_ring.rx_slot);
902 902
903 if (priv->msg_rx_ring.virt_buffer[priv->msg_rx_ring.rx_slot]) { 903 if (priv->msg_rx_ring.virt_buffer[priv->msg_rx_ring.rx_slot]) {
904 printk(KERN_ERR 904 printk(KERN_ERR
905 "RIO: error adding inbound buffer %d, buffer exists\n", 905 "RIO: error adding inbound buffer %d, buffer exists\n",
906 priv->msg_rx_ring.rx_slot); 906 priv->msg_rx_ring.rx_slot);
907 rc = -EINVAL; 907 rc = -EINVAL;
908 goto out; 908 goto out;
909 } 909 }
910 910
911 priv->msg_rx_ring.virt_buffer[priv->msg_rx_ring.rx_slot] = buf; 911 priv->msg_rx_ring.virt_buffer[priv->msg_rx_ring.rx_slot] = buf;
912 if (++priv->msg_rx_ring.rx_slot == priv->msg_rx_ring.size) 912 if (++priv->msg_rx_ring.rx_slot == priv->msg_rx_ring.size)
913 priv->msg_rx_ring.rx_slot = 0; 913 priv->msg_rx_ring.rx_slot = 0;
914 914
915 out: 915 out:
916 return rc; 916 return rc;
917 } 917 }
918 918
919 /** 919 /**
920 * fsl_get_inb_message - Fetch inbound message from the MPC85xx message unit 920 * fsl_get_inb_message - Fetch inbound message from the MPC85xx message unit
921 * @mport: Master port implementing the inbound message unit 921 * @mport: Master port implementing the inbound message unit
922 * @mbox: Inbound mailbox number 922 * @mbox: Inbound mailbox number
923 * 923 *
924 * Gets the next available inbound message from the inbound message queue. 924 * Gets the next available inbound message from the inbound message queue.
925 * A pointer to the message is returned on success or NULL on failure. 925 * A pointer to the message is returned on success or NULL on failure.
926 */ 926 */
927 static void *fsl_get_inb_message(struct rio_mport *mport, int mbox) 927 static void *fsl_get_inb_message(struct rio_mport *mport, int mbox)
928 { 928 {
929 struct rio_priv *priv = mport->priv; 929 struct rio_priv *priv = mport->priv;
930 u32 phys_buf, virt_buf; 930 u32 phys_buf, virt_buf;
931 void *buf = NULL; 931 void *buf = NULL;
932 int buf_idx; 932 int buf_idx;
933 933
934 phys_buf = in_be32(&priv->msg_regs->ifqdpar); 934 phys_buf = in_be32(&priv->msg_regs->ifqdpar);
935 935
936 /* If no more messages, then bail out */ 936 /* If no more messages, then bail out */
937 if (phys_buf == in_be32(&priv->msg_regs->ifqepar)) 937 if (phys_buf == in_be32(&priv->msg_regs->ifqepar))
938 goto out2; 938 goto out2;
939 939
940 virt_buf = (u32) priv->msg_rx_ring.virt + (phys_buf 940 virt_buf = (u32) priv->msg_rx_ring.virt + (phys_buf
941 - priv->msg_rx_ring.phys); 941 - priv->msg_rx_ring.phys);
942 buf_idx = (phys_buf - priv->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE; 942 buf_idx = (phys_buf - priv->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE;
943 buf = priv->msg_rx_ring.virt_buffer[buf_idx]; 943 buf = priv->msg_rx_ring.virt_buffer[buf_idx];
944 944
945 if (!buf) { 945 if (!buf) {
946 printk(KERN_ERR 946 printk(KERN_ERR
947 "RIO: inbound message copy failed, no buffers\n"); 947 "RIO: inbound message copy failed, no buffers\n");
948 goto out1; 948 goto out1;
949 } 949 }
950 950
951 /* Copy max message size, caller is expected to allocate that big */ 951 /* Copy max message size, caller is expected to allocate that big */
952 memcpy(buf, (void *)virt_buf, RIO_MAX_MSG_SIZE); 952 memcpy(buf, (void *)virt_buf, RIO_MAX_MSG_SIZE);
953 953
954 /* Clear the available buffer */ 954 /* Clear the available buffer */
955 priv->msg_rx_ring.virt_buffer[buf_idx] = NULL; 955 priv->msg_rx_ring.virt_buffer[buf_idx] = NULL;
956 956
957 out1: 957 out1:
958 setbits32(&priv->msg_regs->imr, RIO_MSG_IMR_MI); 958 setbits32(&priv->msg_regs->imr, RIO_MSG_IMR_MI);
959 959
960 out2: 960 out2:
961 return buf; 961 return buf;
962 } 962 }
963 963
964 /** 964 /**
965 * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler 965 * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler
966 * @irq: Linux interrupt number 966 * @irq: Linux interrupt number
967 * @dev_instance: Pointer to interrupt-specific data 967 * @dev_instance: Pointer to interrupt-specific data
968 * 968 *
969 * Handles doorbell interrupts. Parses a list of registered 969 * Handles doorbell interrupts. Parses a list of registered
970 * doorbell event handlers and executes a matching event handler. 970 * doorbell event handlers and executes a matching event handler.
971 */ 971 */
972 static irqreturn_t 972 static irqreturn_t
973 fsl_rio_dbell_handler(int irq, void *dev_instance) 973 fsl_rio_dbell_handler(int irq, void *dev_instance)
974 { 974 {
975 int dsr; 975 int dsr;
976 struct rio_mport *port = (struct rio_mport *)dev_instance; 976 struct rio_mport *port = (struct rio_mport *)dev_instance;
977 struct rio_priv *priv = port->priv; 977 struct rio_priv *priv = port->priv;
978 978
979 dsr = in_be32(&priv->msg_regs->dsr); 979 dsr = in_be32(&priv->msg_regs->dsr);
980 980
981 if (dsr & DOORBELL_DSR_TE) { 981 if (dsr & DOORBELL_DSR_TE) {
982 pr_info("RIO: doorbell reception error\n"); 982 pr_info("RIO: doorbell reception error\n");
983 out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_TE); 983 out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_TE);
984 goto out; 984 goto out;
985 } 985 }
986 986
987 if (dsr & DOORBELL_DSR_QFI) { 987 if (dsr & DOORBELL_DSR_QFI) {
988 pr_info("RIO: doorbell queue full\n"); 988 pr_info("RIO: doorbell queue full\n");
989 out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_QFI); 989 out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_QFI);
990 } 990 }
991 991
992 /* XXX Need to check/dispatch until queue empty */ 992 /* XXX Need to check/dispatch until queue empty */
993 if (dsr & DOORBELL_DSR_DIQI) { 993 if (dsr & DOORBELL_DSR_DIQI) {
994 u32 dmsg = 994 u32 dmsg =
995 (u32) priv->dbell_ring.virt + 995 (u32) priv->dbell_ring.virt +
996 (in_be32(&priv->msg_regs->dqdpar) & 0xfff); 996 (in_be32(&priv->msg_regs->dqdpar) & 0xfff);
997 struct rio_dbell *dbell; 997 struct rio_dbell *dbell;
998 int found = 0; 998 int found = 0;
999 999
1000 pr_debug 1000 pr_debug
1001 ("RIO: processing doorbell, sid %2.2x tid %2.2x info %4.4x\n", 1001 ("RIO: processing doorbell, sid %2.2x tid %2.2x info %4.4x\n",
1002 DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg)); 1002 DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg));
1003 1003
1004 list_for_each_entry(dbell, &port->dbells, node) { 1004 list_for_each_entry(dbell, &port->dbells, node) {
1005 if ((dbell->res->start <= DBELL_INF(dmsg)) && 1005 if ((dbell->res->start <= DBELL_INF(dmsg)) &&
1006 (dbell->res->end >= DBELL_INF(dmsg))) { 1006 (dbell->res->end >= DBELL_INF(dmsg))) {
1007 found = 1; 1007 found = 1;
1008 break; 1008 break;
1009 } 1009 }
1010 } 1010 }
1011 if (found) { 1011 if (found) {
1012 dbell->dinb(port, dbell->dev_id, DBELL_SID(dmsg), DBELL_TID(dmsg), 1012 dbell->dinb(port, dbell->dev_id, DBELL_SID(dmsg), DBELL_TID(dmsg),
1013 DBELL_INF(dmsg)); 1013 DBELL_INF(dmsg));
1014 } else { 1014 } else {
1015 pr_debug 1015 pr_debug
1016 ("RIO: spurious doorbell, sid %2.2x tid %2.2x info %4.4x\n", 1016 ("RIO: spurious doorbell, sid %2.2x tid %2.2x info %4.4x\n",
1017 DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg)); 1017 DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg));
1018 } 1018 }
1019 setbits32(&priv->msg_regs->dmr, DOORBELL_DMR_DI); 1019 setbits32(&priv->msg_regs->dmr, DOORBELL_DMR_DI);
1020 out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_DIQI); 1020 out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_DIQI);
1021 } 1021 }
1022 1022
1023 out: 1023 out:
1024 return IRQ_HANDLED; 1024 return IRQ_HANDLED;
1025 } 1025 }
1026 1026
1027 /** 1027 /**
1028 * fsl_rio_doorbell_init - MPC85xx doorbell interface init 1028 * fsl_rio_doorbell_init - MPC85xx doorbell interface init
1029 * @mport: Master port implementing the inbound doorbell unit 1029 * @mport: Master port implementing the inbound doorbell unit
1030 * 1030 *
1031 * Initializes doorbell unit hardware and inbound DMA buffer 1031 * Initializes doorbell unit hardware and inbound DMA buffer
1032 * ring. Called from fsl_rio_setup(). Returns %0 on success 1032 * ring. Called from fsl_rio_setup(). Returns %0 on success
1033 * or %-ENOMEM on failure. 1033 * or %-ENOMEM on failure.
1034 */ 1034 */
1035 static int fsl_rio_doorbell_init(struct rio_mport *mport) 1035 static int fsl_rio_doorbell_init(struct rio_mport *mport)
1036 { 1036 {
1037 struct rio_priv *priv = mport->priv; 1037 struct rio_priv *priv = mport->priv;
1038 int rc = 0; 1038 int rc = 0;
1039 1039
1040 /* Map outbound doorbell window immediately after maintenance window */ 1040 /* Map outbound doorbell window immediately after maintenance window */
1041 priv->dbell_win = ioremap(mport->iores.start + RIO_MAINT_WIN_SIZE, 1041 priv->dbell_win = ioremap(mport->iores.start + RIO_MAINT_WIN_SIZE,
1042 RIO_DBELL_WIN_SIZE); 1042 RIO_DBELL_WIN_SIZE);
1043 if (!priv->dbell_win) { 1043 if (!priv->dbell_win) {
1044 printk(KERN_ERR 1044 printk(KERN_ERR
1045 "RIO: unable to map outbound doorbell window\n"); 1045 "RIO: unable to map outbound doorbell window\n");
1046 rc = -ENOMEM; 1046 rc = -ENOMEM;
1047 goto out; 1047 goto out;
1048 } 1048 }
1049 1049
1050 /* Initialize inbound doorbells */ 1050 /* Initialize inbound doorbells */
1051 priv->dbell_ring.virt = dma_alloc_coherent(priv->dev, 512 * 1051 priv->dbell_ring.virt = dma_alloc_coherent(priv->dev, 512 *
1052 DOORBELL_MESSAGE_SIZE, &priv->dbell_ring.phys, GFP_KERNEL); 1052 DOORBELL_MESSAGE_SIZE, &priv->dbell_ring.phys, GFP_KERNEL);
1053 if (!priv->dbell_ring.virt) { 1053 if (!priv->dbell_ring.virt) {
1054 printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n"); 1054 printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n");
1055 rc = -ENOMEM; 1055 rc = -ENOMEM;
1056 iounmap(priv->dbell_win); 1056 iounmap(priv->dbell_win);
1057 goto out; 1057 goto out;
1058 } 1058 }
1059 1059
1060 /* Point dequeue/enqueue pointers at first entry in ring */ 1060 /* Point dequeue/enqueue pointers at first entry in ring */
1061 out_be32(&priv->msg_regs->dqdpar, (u32) priv->dbell_ring.phys); 1061 out_be32(&priv->msg_regs->dqdpar, (u32) priv->dbell_ring.phys);
1062 out_be32(&priv->msg_regs->dqepar, (u32) priv->dbell_ring.phys); 1062 out_be32(&priv->msg_regs->dqepar, (u32) priv->dbell_ring.phys);
1063 1063
1064 /* Clear interrupt status */ 1064 /* Clear interrupt status */
1065 out_be32(&priv->msg_regs->dsr, 0x00000091); 1065 out_be32(&priv->msg_regs->dsr, 0x00000091);
1066 1066
1067 /* Hook up doorbell handler */ 1067 /* Hook up doorbell handler */
1068 rc = request_irq(IRQ_RIO_BELL(mport), fsl_rio_dbell_handler, 0, 1068 rc = request_irq(IRQ_RIO_BELL(mport), fsl_rio_dbell_handler, 0,
1069 "dbell_rx", (void *)mport); 1069 "dbell_rx", (void *)mport);
1070 if (rc < 0) { 1070 if (rc < 0) {
1071 iounmap(priv->dbell_win); 1071 iounmap(priv->dbell_win);
1072 dma_free_coherent(priv->dev, 512 * DOORBELL_MESSAGE_SIZE, 1072 dma_free_coherent(priv->dev, 512 * DOORBELL_MESSAGE_SIZE,
1073 priv->dbell_ring.virt, priv->dbell_ring.phys); 1073 priv->dbell_ring.virt, priv->dbell_ring.phys);
1074 printk(KERN_ERR 1074 printk(KERN_ERR
1075 "MPC85xx RIO: unable to request inbound doorbell irq"); 1075 "MPC85xx RIO: unable to request inbound doorbell irq");
1076 goto out; 1076 goto out;
1077 } 1077 }
1078 1078
1079 /* Configure doorbells for snooping, 512 entries, and enable */ 1079 /* Configure doorbells for snooping, 512 entries, and enable */
1080 out_be32(&priv->msg_regs->dmr, 0x00108161); 1080 out_be32(&priv->msg_regs->dmr, 0x00108161);
1081 1081
1082 out: 1082 out:
1083 return rc; 1083 return rc;
1084 } 1084 }
1085 1085
1086 static void port_error_handler(struct rio_mport *port, int offset) 1086 static void port_error_handler(struct rio_mport *port, int offset)
1087 { 1087 {
1088 /*XXX: Error recovery is not implemented, we just clear errors */ 1088 /*XXX: Error recovery is not implemented, we just clear errors */
1089 out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0); 1089 out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0);
1090 1090
1091 if (offset == 0) { 1091 if (offset == 0) {
1092 out_be32((u32 *)(rio_regs_win + RIO_PORT1_EDCSR), 0); 1092 out_be32((u32 *)(rio_regs_win + RIO_PORT1_EDCSR), 0);
1093 out_be32((u32 *)(rio_regs_win + RIO_PORT1_IECSR), IECSR_CLEAR); 1093 out_be32((u32 *)(rio_regs_win + RIO_PORT1_IECSR), IECSR_CLEAR);
1094 out_be32((u32 *)(rio_regs_win + RIO_ESCSR), ESCSR_CLEAR); 1094 out_be32((u32 *)(rio_regs_win + RIO_ESCSR), ESCSR_CLEAR);
1095 } else { 1095 } else {
1096 out_be32((u32 *)(rio_regs_win + RIO_PORT2_EDCSR), 0); 1096 out_be32((u32 *)(rio_regs_win + RIO_PORT2_EDCSR), 0);
1097 out_be32((u32 *)(rio_regs_win + RIO_PORT2_IECSR), IECSR_CLEAR); 1097 out_be32((u32 *)(rio_regs_win + RIO_PORT2_IECSR), IECSR_CLEAR);
1098 out_be32((u32 *)(rio_regs_win + RIO_PORT2_ESCSR), ESCSR_CLEAR); 1098 out_be32((u32 *)(rio_regs_win + RIO_PORT2_ESCSR), ESCSR_CLEAR);
1099 } 1099 }
1100 } 1100 }
1101 1101
1102 static void msg_unit_error_handler(struct rio_mport *port) 1102 static void msg_unit_error_handler(struct rio_mport *port)
1103 { 1103 {
1104 struct rio_priv *priv = port->priv; 1104 struct rio_priv *priv = port->priv;
1105 1105
1106 /*XXX: Error recovery is not implemented, we just clear errors */ 1106 /*XXX: Error recovery is not implemented, we just clear errors */
1107 out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0); 1107 out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0);
1108 1108
1109 out_be32((u32 *)(rio_regs_win + RIO_IM0SR), IMSR_CLEAR); 1109 out_be32((u32 *)(rio_regs_win + RIO_IM0SR), IMSR_CLEAR);
1110 out_be32((u32 *)(rio_regs_win + RIO_IM1SR), IMSR_CLEAR); 1110 out_be32((u32 *)(rio_regs_win + RIO_IM1SR), IMSR_CLEAR);
1111 out_be32((u32 *)(rio_regs_win + RIO_OM0SR), OMSR_CLEAR); 1111 out_be32((u32 *)(rio_regs_win + RIO_OM0SR), OMSR_CLEAR);
1112 out_be32((u32 *)(rio_regs_win + RIO_OM1SR), OMSR_CLEAR); 1112 out_be32((u32 *)(rio_regs_win + RIO_OM1SR), OMSR_CLEAR);
1113 1113
1114 out_be32(&priv->msg_regs->odsr, ODSR_CLEAR); 1114 out_be32(&priv->msg_regs->odsr, ODSR_CLEAR);
1115 out_be32(&priv->msg_regs->dsr, IDSR_CLEAR); 1115 out_be32(&priv->msg_regs->dsr, IDSR_CLEAR);
1116 1116
1117 out_be32(&priv->msg_regs->pwsr, IPWSR_CLEAR); 1117 out_be32(&priv->msg_regs->pwsr, IPWSR_CLEAR);
1118 } 1118 }
1119 1119
1120 /** 1120 /**
1121 * fsl_rio_port_write_handler - MPC85xx port write interrupt handler 1121 * fsl_rio_port_write_handler - MPC85xx port write interrupt handler
1122 * @irq: Linux interrupt number 1122 * @irq: Linux interrupt number
1123 * @dev_instance: Pointer to interrupt-specific data 1123 * @dev_instance: Pointer to interrupt-specific data
1124 * 1124 *
1125 * Handles port write interrupts. Parses a list of registered 1125 * Handles port write interrupts. Parses a list of registered
1126 * port write event handlers and executes a matching event handler. 1126 * port write event handlers and executes a matching event handler.
1127 */ 1127 */
1128 static irqreturn_t 1128 static irqreturn_t
1129 fsl_rio_port_write_handler(int irq, void *dev_instance) 1129 fsl_rio_port_write_handler(int irq, void *dev_instance)
1130 { 1130 {
1131 u32 ipwmr, ipwsr; 1131 u32 ipwmr, ipwsr;
1132 struct rio_mport *port = (struct rio_mport *)dev_instance; 1132 struct rio_mport *port = (struct rio_mport *)dev_instance;
1133 struct rio_priv *priv = port->priv; 1133 struct rio_priv *priv = port->priv;
1134 u32 epwisr, tmp; 1134 u32 epwisr, tmp;
1135 1135
1136 epwisr = in_be32(priv->regs_win + RIO_EPWISR); 1136 epwisr = in_be32(priv->regs_win + RIO_EPWISR);
1137 if (!(epwisr & RIO_EPWISR_PW)) 1137 if (!(epwisr & RIO_EPWISR_PW))
1138 goto pw_done; 1138 goto pw_done;
1139 1139
1140 ipwmr = in_be32(&priv->msg_regs->pwmr); 1140 ipwmr = in_be32(&priv->msg_regs->pwmr);
1141 ipwsr = in_be32(&priv->msg_regs->pwsr); 1141 ipwsr = in_be32(&priv->msg_regs->pwsr);
1142 1142
1143 #ifdef DEBUG_PW 1143 #ifdef DEBUG_PW
1144 pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr); 1144 pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr);
1145 if (ipwsr & RIO_IPWSR_QF) 1145 if (ipwsr & RIO_IPWSR_QF)
1146 pr_debug(" QF"); 1146 pr_debug(" QF");
1147 if (ipwsr & RIO_IPWSR_TE) 1147 if (ipwsr & RIO_IPWSR_TE)
1148 pr_debug(" TE"); 1148 pr_debug(" TE");
1149 if (ipwsr & RIO_IPWSR_QFI) 1149 if (ipwsr & RIO_IPWSR_QFI)
1150 pr_debug(" QFI"); 1150 pr_debug(" QFI");
1151 if (ipwsr & RIO_IPWSR_PWD) 1151 if (ipwsr & RIO_IPWSR_PWD)
1152 pr_debug(" PWD"); 1152 pr_debug(" PWD");
1153 if (ipwsr & RIO_IPWSR_PWB) 1153 if (ipwsr & RIO_IPWSR_PWB)
1154 pr_debug(" PWB"); 1154 pr_debug(" PWB");
1155 pr_debug(" )\n"); 1155 pr_debug(" )\n");
1156 #endif 1156 #endif
1157 /* Schedule deferred processing if PW was received */ 1157 /* Schedule deferred processing if PW was received */
1158 if (ipwsr & RIO_IPWSR_QFI) { 1158 if (ipwsr & RIO_IPWSR_QFI) {
1159 /* Save PW message (if there is room in FIFO), 1159 /* Save PW message (if there is room in FIFO),
1160 * otherwise discard it. 1160 * otherwise discard it.
1161 */ 1161 */
1162 if (kfifo_avail(&priv->pw_fifo) >= RIO_PW_MSG_SIZE) { 1162 if (kfifo_avail(&priv->pw_fifo) >= RIO_PW_MSG_SIZE) {
1163 priv->port_write_msg.msg_count++; 1163 priv->port_write_msg.msg_count++;
1164 kfifo_in(&priv->pw_fifo, priv->port_write_msg.virt, 1164 kfifo_in(&priv->pw_fifo, priv->port_write_msg.virt,
1165 RIO_PW_MSG_SIZE); 1165 RIO_PW_MSG_SIZE);
1166 } else { 1166 } else {
1167 priv->port_write_msg.discard_count++; 1167 priv->port_write_msg.discard_count++;
1168 pr_debug("RIO: ISR Discarded Port-Write Msg(s) (%d)\n", 1168 pr_debug("RIO: ISR Discarded Port-Write Msg(s) (%d)\n",
1169 priv->port_write_msg.discard_count); 1169 priv->port_write_msg.discard_count);
1170 } 1170 }
1171 /* Clear interrupt and issue Clear Queue command. This allows 1171 /* Clear interrupt and issue Clear Queue command. This allows
1172 * another port-write to be received. 1172 * another port-write to be received.
1173 */ 1173 */
1174 out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_QFI); 1174 out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_QFI);
1175 out_be32(&priv->msg_regs->pwmr, ipwmr | RIO_IPWMR_CQ); 1175 out_be32(&priv->msg_regs->pwmr, ipwmr | RIO_IPWMR_CQ);
1176 1176
1177 schedule_work(&priv->pw_work); 1177 schedule_work(&priv->pw_work);
1178 } 1178 }
1179 1179
1180 if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) { 1180 if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
1181 priv->port_write_msg.err_count++; 1181 priv->port_write_msg.err_count++;
1182 pr_debug("RIO: Port-Write Transaction Err (%d)\n", 1182 pr_debug("RIO: Port-Write Transaction Err (%d)\n",
1183 priv->port_write_msg.err_count); 1183 priv->port_write_msg.err_count);
1184 /* Clear Transaction Error: port-write controller should be 1184 /* Clear Transaction Error: port-write controller should be
1185 * disabled when clearing this error 1185 * disabled when clearing this error
1186 */ 1186 */
1187 out_be32(&priv->msg_regs->pwmr, ipwmr & ~RIO_IPWMR_PWE); 1187 out_be32(&priv->msg_regs->pwmr, ipwmr & ~RIO_IPWMR_PWE);
1188 out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_TE); 1188 out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_TE);
1189 out_be32(&priv->msg_regs->pwmr, ipwmr); 1189 out_be32(&priv->msg_regs->pwmr, ipwmr);
1190 } 1190 }
1191 1191
1192 if (ipwsr & RIO_IPWSR_PWD) { 1192 if (ipwsr & RIO_IPWSR_PWD) {
1193 priv->port_write_msg.discard_count++; 1193 priv->port_write_msg.discard_count++;
1194 pr_debug("RIO: Port Discarded Port-Write Msg(s) (%d)\n", 1194 pr_debug("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
1195 priv->port_write_msg.discard_count); 1195 priv->port_write_msg.discard_count);
1196 out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_PWD); 1196 out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_PWD);
1197 } 1197 }
1198 1198
1199 pw_done: 1199 pw_done:
1200 if (epwisr & RIO_EPWISR_PINT1) { 1200 if (epwisr & RIO_EPWISR_PINT1) {
1201 tmp = in_be32(priv->regs_win + RIO_LTLEDCSR); 1201 tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
1202 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); 1202 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
1203 port_error_handler(port, 0); 1203 port_error_handler(port, 0);
1204 } 1204 }
1205 1205
1206 if (epwisr & RIO_EPWISR_PINT2) { 1206 if (epwisr & RIO_EPWISR_PINT2) {
1207 tmp = in_be32(priv->regs_win + RIO_LTLEDCSR); 1207 tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
1208 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); 1208 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
1209 port_error_handler(port, 1); 1209 port_error_handler(port, 1);
1210 } 1210 }
1211 1211
1212 if (epwisr & RIO_EPWISR_MU) { 1212 if (epwisr & RIO_EPWISR_MU) {
1213 tmp = in_be32(priv->regs_win + RIO_LTLEDCSR); 1213 tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
1214 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); 1214 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
1215 msg_unit_error_handler(port); 1215 msg_unit_error_handler(port);
1216 } 1216 }
1217 1217
1218 return IRQ_HANDLED; 1218 return IRQ_HANDLED;
1219 } 1219 }
1220 1220
1221 static void fsl_pw_dpc(struct work_struct *work) 1221 static void fsl_pw_dpc(struct work_struct *work)
1222 { 1222 {
1223 struct rio_priv *priv = container_of(work, struct rio_priv, pw_work); 1223 struct rio_priv *priv = container_of(work, struct rio_priv, pw_work);
1224 unsigned long flags; 1224 unsigned long flags;
1225 u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)]; 1225 u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)];
1226 1226
1227 /* 1227 /*
1228 * Process port-write messages 1228 * Process port-write messages
1229 */ 1229 */
1230 spin_lock_irqsave(&priv->pw_fifo_lock, flags); 1230 spin_lock_irqsave(&priv->pw_fifo_lock, flags);
1231 while (kfifo_out(&priv->pw_fifo, (unsigned char *)msg_buffer, 1231 while (kfifo_out(&priv->pw_fifo, (unsigned char *)msg_buffer,
1232 RIO_PW_MSG_SIZE)) { 1232 RIO_PW_MSG_SIZE)) {
1233 /* Process one message */ 1233 /* Process one message */
1234 spin_unlock_irqrestore(&priv->pw_fifo_lock, flags); 1234 spin_unlock_irqrestore(&priv->pw_fifo_lock, flags);
1235 #ifdef DEBUG_PW 1235 #ifdef DEBUG_PW
1236 { 1236 {
1237 u32 i; 1237 u32 i;
1238 pr_debug("%s : Port-Write Message:", __func__); 1238 pr_debug("%s : Port-Write Message:", __func__);
1239 for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) { 1239 for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) {
1240 if ((i%4) == 0) 1240 if ((i%4) == 0)
1241 pr_debug("\n0x%02x: 0x%08x", i*4, 1241 pr_debug("\n0x%02x: 0x%08x", i*4,
1242 msg_buffer[i]); 1242 msg_buffer[i]);
1243 else 1243 else
1244 pr_debug(" 0x%08x", msg_buffer[i]); 1244 pr_debug(" 0x%08x", msg_buffer[i]);
1245 } 1245 }
1246 pr_debug("\n"); 1246 pr_debug("\n");
1247 } 1247 }
1248 #endif 1248 #endif
1249 /* Pass the port-write message to RIO core for processing */ 1249 /* Pass the port-write message to RIO core for processing */
1250 rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer); 1250 rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer);
1251 spin_lock_irqsave(&priv->pw_fifo_lock, flags); 1251 spin_lock_irqsave(&priv->pw_fifo_lock, flags);
1252 } 1252 }
1253 spin_unlock_irqrestore(&priv->pw_fifo_lock, flags); 1253 spin_unlock_irqrestore(&priv->pw_fifo_lock, flags);
1254 } 1254 }
1255 1255
1256 /** 1256 /**
1257 * fsl_rio_pw_enable - enable/disable port-write interface init 1257 * fsl_rio_pw_enable - enable/disable port-write interface init
1258 * @mport: Master port implementing the port write unit 1258 * @mport: Master port implementing the port write unit
1259 * @enable: 1=enable; 0=disable port-write message handling 1259 * @enable: 1=enable; 0=disable port-write message handling
1260 */ 1260 */
1261 static int fsl_rio_pw_enable(struct rio_mport *mport, int enable) 1261 static int fsl_rio_pw_enable(struct rio_mport *mport, int enable)
1262 { 1262 {
1263 struct rio_priv *priv = mport->priv; 1263 struct rio_priv *priv = mport->priv;
1264 u32 rval; 1264 u32 rval;
1265 1265
1266 rval = in_be32(&priv->msg_regs->pwmr); 1266 rval = in_be32(&priv->msg_regs->pwmr);
1267 1267
1268 if (enable) 1268 if (enable)
1269 rval |= RIO_IPWMR_PWE; 1269 rval |= RIO_IPWMR_PWE;
1270 else 1270 else
1271 rval &= ~RIO_IPWMR_PWE; 1271 rval &= ~RIO_IPWMR_PWE;
1272 1272
1273 out_be32(&priv->msg_regs->pwmr, rval); 1273 out_be32(&priv->msg_regs->pwmr, rval);
1274 1274
1275 return 0; 1275 return 0;
1276 } 1276 }
1277 1277
1278 /** 1278 /**
1279 * fsl_rio_port_write_init - MPC85xx port write interface init 1279 * fsl_rio_port_write_init - MPC85xx port write interface init
1280 * @mport: Master port implementing the port write unit 1280 * @mport: Master port implementing the port write unit
1281 * 1281 *
1282 * Initializes port write unit hardware and DMA buffer 1282 * Initializes port write unit hardware and DMA buffer
1283 * ring. Called from fsl_rio_setup(). Returns %0 on success 1283 * ring. Called from fsl_rio_setup(). Returns %0 on success
1284 * or %-ENOMEM on failure. 1284 * or %-ENOMEM on failure.
1285 */ 1285 */
1286 static int fsl_rio_port_write_init(struct rio_mport *mport) 1286 static int fsl_rio_port_write_init(struct rio_mport *mport)
1287 { 1287 {
1288 struct rio_priv *priv = mport->priv; 1288 struct rio_priv *priv = mport->priv;
1289 int rc = 0; 1289 int rc = 0;
1290 1290
1291 /* Following configurations require a disabled port write controller */ 1291 /* Following configurations require a disabled port write controller */
1292 out_be32(&priv->msg_regs->pwmr, 1292 out_be32(&priv->msg_regs->pwmr,
1293 in_be32(&priv->msg_regs->pwmr) & ~RIO_IPWMR_PWE); 1293 in_be32(&priv->msg_regs->pwmr) & ~RIO_IPWMR_PWE);
1294 1294
1295 /* Initialize port write */ 1295 /* Initialize port write */
1296 priv->port_write_msg.virt = dma_alloc_coherent(priv->dev, 1296 priv->port_write_msg.virt = dma_alloc_coherent(priv->dev,
1297 RIO_PW_MSG_SIZE, 1297 RIO_PW_MSG_SIZE,
1298 &priv->port_write_msg.phys, GFP_KERNEL); 1298 &priv->port_write_msg.phys, GFP_KERNEL);
1299 if (!priv->port_write_msg.virt) { 1299 if (!priv->port_write_msg.virt) {
1300 pr_err("RIO: unable allocate port write queue\n"); 1300 pr_err("RIO: unable allocate port write queue\n");
1301 return -ENOMEM; 1301 return -ENOMEM;
1302 } 1302 }
1303 1303
1304 priv->port_write_msg.err_count = 0; 1304 priv->port_write_msg.err_count = 0;
1305 priv->port_write_msg.discard_count = 0; 1305 priv->port_write_msg.discard_count = 0;
1306 1306
1307 /* Point dequeue/enqueue pointers at first entry */ 1307 /* Point dequeue/enqueue pointers at first entry */
1308 out_be32(&priv->msg_regs->epwqbar, 0); 1308 out_be32(&priv->msg_regs->epwqbar, 0);
1309 out_be32(&priv->msg_regs->pwqbar, (u32) priv->port_write_msg.phys); 1309 out_be32(&priv->msg_regs->pwqbar, (u32) priv->port_write_msg.phys);
1310 1310
1311 pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n", 1311 pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n",
1312 in_be32(&priv->msg_regs->epwqbar), 1312 in_be32(&priv->msg_regs->epwqbar),
1313 in_be32(&priv->msg_regs->pwqbar)); 1313 in_be32(&priv->msg_regs->pwqbar));
1314 1314
1315 /* Clear interrupt status IPWSR */ 1315 /* Clear interrupt status IPWSR */
1316 out_be32(&priv->msg_regs->pwsr, 1316 out_be32(&priv->msg_regs->pwsr,
1317 (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD)); 1317 (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
1318 1318
1319 /* Configure port write contoller for snooping enable all reporting, 1319 /* Configure port write contoller for snooping enable all reporting,
1320 clear queue full */ 1320 clear queue full */
1321 out_be32(&priv->msg_regs->pwmr, 1321 out_be32(&priv->msg_regs->pwmr,
1322 RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ); 1322 RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ);
1323 1323
1324 1324
1325 /* Hook up port-write handler */ 1325 /* Hook up port-write handler */
1326 rc = request_irq(IRQ_RIO_PW(mport), fsl_rio_port_write_handler, 1326 rc = request_irq(IRQ_RIO_PW(mport), fsl_rio_port_write_handler,
1327 IRQF_SHARED, "port-write", (void *)mport); 1327 IRQF_SHARED, "port-write", (void *)mport);
1328 if (rc < 0) { 1328 if (rc < 0) {
1329 pr_err("MPC85xx RIO: unable to request inbound doorbell irq"); 1329 pr_err("MPC85xx RIO: unable to request inbound doorbell irq");
1330 goto err_out; 1330 goto err_out;
1331 } 1331 }
1332 /* Enable Error Interrupt */ 1332 /* Enable Error Interrupt */
1333 out_be32((u32 *)(rio_regs_win + RIO_LTLEECSR), LTLEECSR_ENABLE_ALL); 1333 out_be32((u32 *)(rio_regs_win + RIO_LTLEECSR), LTLEECSR_ENABLE_ALL);
1334 1334
1335 INIT_WORK(&priv->pw_work, fsl_pw_dpc); 1335 INIT_WORK(&priv->pw_work, fsl_pw_dpc);
1336 spin_lock_init(&priv->pw_fifo_lock); 1336 spin_lock_init(&priv->pw_fifo_lock);
1337 if (kfifo_alloc(&priv->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) { 1337 if (kfifo_alloc(&priv->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) {
1338 pr_err("FIFO allocation failed\n"); 1338 pr_err("FIFO allocation failed\n");
1339 rc = -ENOMEM; 1339 rc = -ENOMEM;
1340 goto err_out_irq; 1340 goto err_out_irq;
1341 } 1341 }
1342 1342
1343 pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n", 1343 pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n",
1344 in_be32(&priv->msg_regs->pwmr), 1344 in_be32(&priv->msg_regs->pwmr),
1345 in_be32(&priv->msg_regs->pwsr)); 1345 in_be32(&priv->msg_regs->pwsr));
1346 1346
1347 return rc; 1347 return rc;
1348 1348
1349 err_out_irq: 1349 err_out_irq:
1350 free_irq(IRQ_RIO_PW(mport), (void *)mport); 1350 free_irq(IRQ_RIO_PW(mport), (void *)mport);
1351 err_out: 1351 err_out:
1352 dma_free_coherent(priv->dev, RIO_PW_MSG_SIZE, 1352 dma_free_coherent(priv->dev, RIO_PW_MSG_SIZE,
1353 priv->port_write_msg.virt, 1353 priv->port_write_msg.virt,
1354 priv->port_write_msg.phys); 1354 priv->port_write_msg.phys);
1355 return rc; 1355 return rc;
1356 } 1356 }
1357 1357
1358 static inline void fsl_rio_info(struct device *dev, u32 ccsr) 1358 static inline void fsl_rio_info(struct device *dev, u32 ccsr)
1359 { 1359 {
1360 const char *str; 1360 const char *str;
1361 if (ccsr & 1) { 1361 if (ccsr & 1) {
1362 /* Serial phy */ 1362 /* Serial phy */
1363 switch (ccsr >> 30) { 1363 switch (ccsr >> 30) {
1364 case 0: 1364 case 0:
1365 str = "1"; 1365 str = "1";
1366 break; 1366 break;
1367 case 1: 1367 case 1:
1368 str = "4"; 1368 str = "4";
1369 break; 1369 break;
1370 default: 1370 default:
1371 str = "Unknown"; 1371 str = "Unknown";
1372 break; 1372 break;
1373 } 1373 }
1374 dev_info(dev, "Hardware port width: %s\n", str); 1374 dev_info(dev, "Hardware port width: %s\n", str);
1375 1375
1376 switch ((ccsr >> 27) & 7) { 1376 switch ((ccsr >> 27) & 7) {
1377 case 0: 1377 case 0:
1378 str = "Single-lane 0"; 1378 str = "Single-lane 0";
1379 break; 1379 break;
1380 case 1: 1380 case 1:
1381 str = "Single-lane 2"; 1381 str = "Single-lane 2";
1382 break; 1382 break;
1383 case 2: 1383 case 2:
1384 str = "Four-lane"; 1384 str = "Four-lane";
1385 break; 1385 break;
1386 default: 1386 default:
1387 str = "Unknown"; 1387 str = "Unknown";
1388 break; 1388 break;
1389 } 1389 }
1390 dev_info(dev, "Training connection status: %s\n", str); 1390 dev_info(dev, "Training connection status: %s\n", str);
1391 } else { 1391 } else {
1392 /* Parallel phy */ 1392 /* Parallel phy */
1393 if (!(ccsr & 0x80000000)) 1393 if (!(ccsr & 0x80000000))
1394 dev_info(dev, "Output port operating in 8-bit mode\n"); 1394 dev_info(dev, "Output port operating in 8-bit mode\n");
1395 if (!(ccsr & 0x08000000)) 1395 if (!(ccsr & 0x08000000))
1396 dev_info(dev, "Input port operating in 8-bit mode\n"); 1396 dev_info(dev, "Input port operating in 8-bit mode\n");
1397 } 1397 }
1398 } 1398 }
1399 1399
1400 /** 1400 /**
1401 * fsl_rio_setup - Setup Freescale PowerPC RapidIO interface 1401 * fsl_rio_setup - Setup Freescale PowerPC RapidIO interface
1402 * @dev: platform_device pointer 1402 * @dev: platform_device pointer
1403 * 1403 *
1404 * Initializes MPC85xx RapidIO hardware interface, configures 1404 * Initializes MPC85xx RapidIO hardware interface, configures
1405 * master port with system-specific info, and registers the 1405 * master port with system-specific info, and registers the
1406 * master port with the RapidIO subsystem. 1406 * master port with the RapidIO subsystem.
1407 */ 1407 */
1408 int fsl_rio_setup(struct platform_device *dev) 1408 int fsl_rio_setup(struct platform_device *dev)
1409 { 1409 {
1410 struct rio_ops *ops; 1410 struct rio_ops *ops;
1411 struct rio_mport *port; 1411 struct rio_mport *port;
1412 struct rio_priv *priv; 1412 struct rio_priv *priv;
1413 int rc = 0; 1413 int rc = 0;
1414 const u32 *dt_range, *cell; 1414 const u32 *dt_range, *cell;
1415 struct resource regs; 1415 struct resource regs;
1416 int rlen; 1416 int rlen;
1417 u32 ccsr; 1417 u32 ccsr;
1418 u64 law_start, law_size; 1418 u64 law_start, law_size;
1419 int paw, aw, sw; 1419 int paw, aw, sw;
1420 1420
1421 if (!dev->dev.of_node) { 1421 if (!dev->dev.of_node) {
1422 dev_err(&dev->dev, "Device OF-Node is NULL"); 1422 dev_err(&dev->dev, "Device OF-Node is NULL");
1423 return -EFAULT; 1423 return -EFAULT;
1424 } 1424 }
1425 1425
1426 rc = of_address_to_resource(dev->dev.of_node, 0, &regs); 1426 rc = of_address_to_resource(dev->dev.of_node, 0, &regs);
1427 if (rc) { 1427 if (rc) {
1428 dev_err(&dev->dev, "Can't get %s property 'reg'\n", 1428 dev_err(&dev->dev, "Can't get %s property 'reg'\n",
1429 dev->dev.of_node->full_name); 1429 dev->dev.of_node->full_name);
1430 return -EFAULT; 1430 return -EFAULT;
1431 } 1431 }
1432 dev_info(&dev->dev, "Of-device full name %s\n", dev->dev.of_node->full_name); 1432 dev_info(&dev->dev, "Of-device full name %s\n", dev->dev.of_node->full_name);
1433 dev_info(&dev->dev, "Regs: %pR\n", &regs); 1433 dev_info(&dev->dev, "Regs: %pR\n", &regs);
1434 1434
1435 dt_range = of_get_property(dev->dev.of_node, "ranges", &rlen); 1435 dt_range = of_get_property(dev->dev.of_node, "ranges", &rlen);
1436 if (!dt_range) { 1436 if (!dt_range) {
1437 dev_err(&dev->dev, "Can't get %s property 'ranges'\n", 1437 dev_err(&dev->dev, "Can't get %s property 'ranges'\n",
1438 dev->dev.of_node->full_name); 1438 dev->dev.of_node->full_name);
1439 return -EFAULT; 1439 return -EFAULT;
1440 } 1440 }
1441 1441
1442 /* Get node address wide */ 1442 /* Get node address wide */
1443 cell = of_get_property(dev->dev.of_node, "#address-cells", NULL); 1443 cell = of_get_property(dev->dev.of_node, "#address-cells", NULL);
1444 if (cell) 1444 if (cell)
1445 aw = *cell; 1445 aw = *cell;
1446 else 1446 else
1447 aw = of_n_addr_cells(dev->dev.of_node); 1447 aw = of_n_addr_cells(dev->dev.of_node);
1448 /* Get node size wide */ 1448 /* Get node size wide */
1449 cell = of_get_property(dev->dev.of_node, "#size-cells", NULL); 1449 cell = of_get_property(dev->dev.of_node, "#size-cells", NULL);
1450 if (cell) 1450 if (cell)
1451 sw = *cell; 1451 sw = *cell;
1452 else 1452 else
1453 sw = of_n_size_cells(dev->dev.of_node); 1453 sw = of_n_size_cells(dev->dev.of_node);
1454 /* Get parent address wide wide */ 1454 /* Get parent address wide wide */
1455 paw = of_n_addr_cells(dev->dev.of_node); 1455 paw = of_n_addr_cells(dev->dev.of_node);
1456 1456
1457 law_start = of_read_number(dt_range + aw, paw); 1457 law_start = of_read_number(dt_range + aw, paw);
1458 law_size = of_read_number(dt_range + aw + paw, sw); 1458 law_size = of_read_number(dt_range + aw + paw, sw);
1459 1459
1460 dev_info(&dev->dev, "LAW start 0x%016llx, size 0x%016llx.\n", 1460 dev_info(&dev->dev, "LAW start 0x%016llx, size 0x%016llx.\n",
1461 law_start, law_size); 1461 law_start, law_size);
1462 1462
1463 ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL); 1463 ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL);
1464 if (!ops) { 1464 if (!ops) {
1465 rc = -ENOMEM; 1465 rc = -ENOMEM;
1466 goto err_ops; 1466 goto err_ops;
1467 } 1467 }
1468 ops->lcread = fsl_local_config_read; 1468 ops->lcread = fsl_local_config_read;
1469 ops->lcwrite = fsl_local_config_write; 1469 ops->lcwrite = fsl_local_config_write;
1470 ops->cread = fsl_rio_config_read; 1470 ops->cread = fsl_rio_config_read;
1471 ops->cwrite = fsl_rio_config_write; 1471 ops->cwrite = fsl_rio_config_write;
1472 ops->dsend = fsl_rio_doorbell_send; 1472 ops->dsend = fsl_rio_doorbell_send;
1473 ops->pwenable = fsl_rio_pw_enable; 1473 ops->pwenable = fsl_rio_pw_enable;
1474 ops->open_outb_mbox = fsl_open_outb_mbox; 1474 ops->open_outb_mbox = fsl_open_outb_mbox;
1475 ops->open_inb_mbox = fsl_open_inb_mbox; 1475 ops->open_inb_mbox = fsl_open_inb_mbox;
1476 ops->close_outb_mbox = fsl_close_outb_mbox; 1476 ops->close_outb_mbox = fsl_close_outb_mbox;
1477 ops->close_inb_mbox = fsl_close_inb_mbox; 1477 ops->close_inb_mbox = fsl_close_inb_mbox;
1478 ops->add_outb_message = fsl_add_outb_message; 1478 ops->add_outb_message = fsl_add_outb_message;
1479 ops->add_inb_buffer = fsl_add_inb_buffer; 1479 ops->add_inb_buffer = fsl_add_inb_buffer;
1480 ops->get_inb_message = fsl_get_inb_message; 1480 ops->get_inb_message = fsl_get_inb_message;
1481 1481
1482 port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL); 1482 port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL);
1483 if (!port) { 1483 if (!port) {
1484 rc = -ENOMEM; 1484 rc = -ENOMEM;
1485 goto err_port; 1485 goto err_port;
1486 } 1486 }
1487 port->index = 0; 1487 port->index = 0;
1488 1488
1489 priv = kzalloc(sizeof(struct rio_priv), GFP_KERNEL); 1489 priv = kzalloc(sizeof(struct rio_priv), GFP_KERNEL);
1490 if (!priv) { 1490 if (!priv) {
1491 printk(KERN_ERR "Can't alloc memory for 'priv'\n"); 1491 printk(KERN_ERR "Can't alloc memory for 'priv'\n");
1492 rc = -ENOMEM; 1492 rc = -ENOMEM;
1493 goto err_priv; 1493 goto err_priv;
1494 } 1494 }
1495 1495
1496 INIT_LIST_HEAD(&port->dbells); 1496 INIT_LIST_HEAD(&port->dbells);
1497 port->iores.start = law_start; 1497 port->iores.start = law_start;
1498 port->iores.end = law_start + law_size - 1; 1498 port->iores.end = law_start + law_size - 1;
1499 port->iores.flags = IORESOURCE_MEM; 1499 port->iores.flags = IORESOURCE_MEM;
1500 port->iores.name = "rio_io_win"; 1500 port->iores.name = "rio_io_win";
1501 1501
1502 if (request_resource(&iomem_resource, &port->iores) < 0) { 1502 if (request_resource(&iomem_resource, &port->iores) < 0) {
1503 dev_err(&dev->dev, "RIO: Error requesting master port region" 1503 dev_err(&dev->dev, "RIO: Error requesting master port region"
1504 " 0x%016llx-0x%016llx\n", 1504 " 0x%016llx-0x%016llx\n",
1505 (u64)port->iores.start, (u64)port->iores.end); 1505 (u64)port->iores.start, (u64)port->iores.end);
1506 rc = -ENOMEM; 1506 rc = -ENOMEM;
1507 goto err_res; 1507 goto err_res;
1508 } 1508 }
1509 1509
1510 priv->pwirq = irq_of_parse_and_map(dev->dev.of_node, 0); 1510 priv->pwirq = irq_of_parse_and_map(dev->dev.of_node, 0);
1511 priv->bellirq = irq_of_parse_and_map(dev->dev.of_node, 2); 1511 priv->bellirq = irq_of_parse_and_map(dev->dev.of_node, 2);
1512 priv->txirq = irq_of_parse_and_map(dev->dev.of_node, 3); 1512 priv->txirq = irq_of_parse_and_map(dev->dev.of_node, 3);
1513 priv->rxirq = irq_of_parse_and_map(dev->dev.of_node, 4); 1513 priv->rxirq = irq_of_parse_and_map(dev->dev.of_node, 4);
1514 dev_info(&dev->dev, "pwirq: %d, bellirq: %d, txirq: %d, rxirq %d\n", 1514 dev_info(&dev->dev, "pwirq: %d, bellirq: %d, txirq: %d, rxirq %d\n",
1515 priv->pwirq, priv->bellirq, priv->txirq, priv->rxirq); 1515 priv->pwirq, priv->bellirq, priv->txirq, priv->rxirq);
1516 1516
1517 rio_init_dbell_res(&port->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); 1517 rio_init_dbell_res(&port->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
1518 rio_init_mbox_res(&port->riores[RIO_INB_MBOX_RESOURCE], 0, 0); 1518 rio_init_mbox_res(&port->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
1519 rio_init_mbox_res(&port->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0); 1519 rio_init_mbox_res(&port->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
1520 strcpy(port->name, "RIO0 mport"); 1520 strcpy(port->name, "RIO0 mport");
1521 1521
1522 priv->dev = &dev->dev; 1522 priv->dev = &dev->dev;
1523 1523
1524 port->ops = ops; 1524 port->ops = ops;
1525 port->priv = priv; 1525 port->priv = priv;
1526 port->phys_efptr = 0x100; 1526 port->phys_efptr = 0x100;
1527 1527
1528 priv->regs_win = ioremap(regs.start, resource_size(&regs)); 1528 priv->regs_win = ioremap(regs.start, resource_size(&regs));
1529 rio_regs_win = priv->regs_win; 1529 rio_regs_win = priv->regs_win;
1530 1530
1531 /* Probe the master port phy type */ 1531 /* Probe the master port phy type */
1532 ccsr = in_be32(priv->regs_win + RIO_CCSR); 1532 ccsr = in_be32(priv->regs_win + RIO_CCSR);
1533 port->phy_type = (ccsr & 1) ? RIO_PHY_SERIAL : RIO_PHY_PARALLEL; 1533 port->phy_type = (ccsr & 1) ? RIO_PHY_SERIAL : RIO_PHY_PARALLEL;
1534 dev_info(&dev->dev, "RapidIO PHY type: %s\n", 1534 dev_info(&dev->dev, "RapidIO PHY type: %s\n",
1535 (port->phy_type == RIO_PHY_PARALLEL) ? "parallel" : 1535 (port->phy_type == RIO_PHY_PARALLEL) ? "parallel" :
1536 ((port->phy_type == RIO_PHY_SERIAL) ? "serial" : 1536 ((port->phy_type == RIO_PHY_SERIAL) ? "serial" :
1537 "unknown")); 1537 "unknown"));
1538 /* Checking the port training status */ 1538 /* Checking the port training status */
1539 if (in_be32((priv->regs_win + RIO_ESCSR)) & 1) { 1539 if (in_be32((priv->regs_win + RIO_ESCSR)) & 1) {
1540 dev_err(&dev->dev, "Port is not ready. " 1540 dev_err(&dev->dev, "Port is not ready. "
1541 "Try to restart connection...\n"); 1541 "Try to restart connection...\n");
1542 switch (port->phy_type) { 1542 switch (port->phy_type) {
1543 case RIO_PHY_SERIAL: 1543 case RIO_PHY_SERIAL:
1544 /* Disable ports */ 1544 /* Disable ports */
1545 out_be32(priv->regs_win + RIO_CCSR, 0); 1545 out_be32(priv->regs_win + RIO_CCSR, 0);
1546 /* Set 1x lane */ 1546 /* Set 1x lane */
1547 setbits32(priv->regs_win + RIO_CCSR, 0x02000000); 1547 setbits32(priv->regs_win + RIO_CCSR, 0x02000000);
1548 /* Enable ports */ 1548 /* Enable ports */
1549 setbits32(priv->regs_win + RIO_CCSR, 0x00600000); 1549 setbits32(priv->regs_win + RIO_CCSR, 0x00600000);
1550 break; 1550 break;
1551 case RIO_PHY_PARALLEL: 1551 case RIO_PHY_PARALLEL:
1552 /* Disable ports */ 1552 /* Disable ports */
1553 out_be32(priv->regs_win + RIO_CCSR, 0x22000000); 1553 out_be32(priv->regs_win + RIO_CCSR, 0x22000000);
1554 /* Enable ports */ 1554 /* Enable ports */
1555 out_be32(priv->regs_win + RIO_CCSR, 0x44000000); 1555 out_be32(priv->regs_win + RIO_CCSR, 0x44000000);
1556 break; 1556 break;
1557 } 1557 }
1558 msleep(100); 1558 msleep(100);
1559 if (in_be32((priv->regs_win + RIO_ESCSR)) & 1) { 1559 if (in_be32((priv->regs_win + RIO_ESCSR)) & 1) {
1560 dev_err(&dev->dev, "Port restart failed.\n"); 1560 dev_err(&dev->dev, "Port restart failed.\n");
1561 rc = -ENOLINK; 1561 rc = -ENOLINK;
1562 goto err; 1562 goto err;
1563 } 1563 }
1564 dev_info(&dev->dev, "Port restart success!\n"); 1564 dev_info(&dev->dev, "Port restart success!\n");
1565 } 1565 }
1566 fsl_rio_info(&dev->dev, ccsr); 1566 fsl_rio_info(&dev->dev, ccsr);
1567 1567
1568 port->sys_size = (in_be32((priv->regs_win + RIO_PEF_CAR)) 1568 port->sys_size = (in_be32((priv->regs_win + RIO_PEF_CAR))
1569 & RIO_PEF_CTLS) >> 4; 1569 & RIO_PEF_CTLS) >> 4;
1570 dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n", 1570 dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n",
1571 port->sys_size ? 65536 : 256); 1571 port->sys_size ? 65536 : 256);
1572 1572
1573 if (rio_register_mport(port)) 1573 if (rio_register_mport(port))
1574 goto err; 1574 goto err;
1575 1575
1576 if (port->host_deviceid >= 0) 1576 if (port->host_deviceid >= 0)
1577 out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST | 1577 out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST |
1578 RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED); 1578 RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED);
1579 else 1579 else
1580 out_be32(priv->regs_win + RIO_GCCSR, 0x00000000); 1580 out_be32(priv->regs_win + RIO_GCCSR, 0x00000000);
1581 1581
1582 priv->atmu_regs = (struct rio_atmu_regs *)(priv->regs_win 1582 priv->atmu_regs = (struct rio_atmu_regs *)(priv->regs_win
1583 + RIO_ATMU_REGS_OFFSET); 1583 + RIO_ATMU_REGS_OFFSET);
1584 priv->maint_atmu_regs = priv->atmu_regs + 1; 1584 priv->maint_atmu_regs = priv->atmu_regs + 1;
1585 priv->dbell_atmu_regs = priv->atmu_regs + 2; 1585 priv->dbell_atmu_regs = priv->atmu_regs + 2;
1586 priv->msg_regs = (struct rio_msg_regs *)(priv->regs_win + 1586 priv->msg_regs = (struct rio_msg_regs *)(priv->regs_win +
1587 ((port->phy_type == RIO_PHY_SERIAL) ? 1587 ((port->phy_type == RIO_PHY_SERIAL) ?
1588 RIO_S_MSG_REGS_OFFSET : RIO_P_MSG_REGS_OFFSET)); 1588 RIO_S_MSG_REGS_OFFSET : RIO_P_MSG_REGS_OFFSET));
1589 1589
1590 /* Set to receive any dist ID for serial RapidIO controller. */ 1590 /* Set to receive any dist ID for serial RapidIO controller. */
1591 if (port->phy_type == RIO_PHY_SERIAL) 1591 if (port->phy_type == RIO_PHY_SERIAL)
1592 out_be32((priv->regs_win + RIO_ISR_AACR), RIO_ISR_AACR_AA); 1592 out_be32((priv->regs_win + RIO_ISR_AACR), RIO_ISR_AACR_AA);
1593 1593
1594 /* Configure maintenance transaction window */ 1594 /* Configure maintenance transaction window */
1595 out_be32(&priv->maint_atmu_regs->rowbar, law_start >> 12); 1595 out_be32(&priv->maint_atmu_regs->rowbar, law_start >> 12);
1596 out_be32(&priv->maint_atmu_regs->rowar, 1596 out_be32(&priv->maint_atmu_regs->rowar,
1597 0x80077000 | (ilog2(RIO_MAINT_WIN_SIZE) - 1)); 1597 0x80077000 | (ilog2(RIO_MAINT_WIN_SIZE) - 1));
1598 1598
1599 priv->maint_win = ioremap(law_start, RIO_MAINT_WIN_SIZE); 1599 priv->maint_win = ioremap(law_start, RIO_MAINT_WIN_SIZE);
1600 1600
1601 /* Configure outbound doorbell window */ 1601 /* Configure outbound doorbell window */
1602 out_be32(&priv->dbell_atmu_regs->rowbar, 1602 out_be32(&priv->dbell_atmu_regs->rowbar,
1603 (law_start + RIO_MAINT_WIN_SIZE) >> 12); 1603 (law_start + RIO_MAINT_WIN_SIZE) >> 12);
1604 out_be32(&priv->dbell_atmu_regs->rowar, 0x8004200b); /* 4k */ 1604 out_be32(&priv->dbell_atmu_regs->rowar, 0x8004200b); /* 4k */
1605 fsl_rio_doorbell_init(port); 1605 fsl_rio_doorbell_init(port);
1606 fsl_rio_port_write_init(port); 1606 fsl_rio_port_write_init(port);
1607 1607
1608 return 0; 1608 return 0;
1609 err: 1609 err:
1610 iounmap(priv->regs_win); 1610 iounmap(priv->regs_win);
1611 err_res: 1611 err_res:
1612 kfree(priv); 1612 kfree(priv);
1613 err_priv: 1613 err_priv:
1614 kfree(port); 1614 kfree(port);
1615 err_port: 1615 err_port:
1616 kfree(ops); 1616 kfree(ops);
1617 err_ops: 1617 err_ops:
1618 return rc; 1618 return rc;
1619 } 1619 }
1620 1620
1621 /* The probe function for RapidIO peer-to-peer network. 1621 /* The probe function for RapidIO peer-to-peer network.
1622 */ 1622 */
1623 static int __devinit fsl_of_rio_rpn_probe(struct platform_device *dev) 1623 static int __devinit fsl_of_rio_rpn_probe(struct platform_device *dev)
1624 { 1624 {
1625 printk(KERN_INFO "Setting up RapidIO peer-to-peer network %s\n", 1625 printk(KERN_INFO "Setting up RapidIO peer-to-peer network %s\n",
1626 dev->dev.of_node->full_name); 1626 dev->dev.of_node->full_name);
1627 1627
1628 return fsl_rio_setup(dev); 1628 return fsl_rio_setup(dev);
1629 }; 1629 };
1630 1630
1631 static const struct of_device_id fsl_of_rio_rpn_ids[] = { 1631 static const struct of_device_id fsl_of_rio_rpn_ids[] = {
1632 { 1632 {
1633 .compatible = "fsl,rapidio-delta", 1633 .compatible = "fsl,rapidio-delta",
1634 }, 1634 },
1635 {}, 1635 {},
1636 }; 1636 };
1637 1637
1638 static struct platform_driver fsl_of_rio_rpn_driver = { 1638 static struct platform_driver fsl_of_rio_rpn_driver = {
1639 .driver = { 1639 .driver = {
1640 .name = "fsl-of-rio", 1640 .name = "fsl-of-rio",
1641 .owner = THIS_MODULE, 1641 .owner = THIS_MODULE,
1642 .of_match_table = fsl_of_rio_rpn_ids, 1642 .of_match_table = fsl_of_rio_rpn_ids,
1643 }, 1643 },
1644 .probe = fsl_of_rio_rpn_probe, 1644 .probe = fsl_of_rio_rpn_probe,
1645 }; 1645 };
1646 1646
1647 static __init int fsl_of_rio_rpn_init(void) 1647 static __init int fsl_of_rio_rpn_init(void)
1648 { 1648 {
1649 return platform_driver_register(&fsl_of_rio_rpn_driver); 1649 return platform_driver_register(&fsl_of_rio_rpn_driver);
1650 } 1650 }
1651 1651
1652 subsys_initcall(fsl_of_rio_rpn_init); 1652 subsys_initcall(fsl_of_rio_rpn_init);
1653 1653
arch/powerpc/sysdev/fsl_soc.c
1 /* 1 /*
2 * FSL SoC setup code 2 * FSL SoC setup code
3 * 3 *
4 * Maintained by Kumar Gala (see MAINTAINERS for contact information) 4 * Maintained by Kumar Gala (see MAINTAINERS for contact information)
5 * 5 *
6 * 2006 (c) MontaVista Software, Inc. 6 * 2006 (c) MontaVista Software, Inc.
7 * Vitaly Bordug <vbordug@ru.mvista.com> 7 * Vitaly Bordug <vbordug@ru.mvista.com>
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify it 9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the 10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your 11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version. 12 * option) any later version.
13 */ 13 */
14 14
15 #include <linux/stddef.h> 15 #include <linux/stddef.h>
16 #include <linux/kernel.h> 16 #include <linux/kernel.h>
17 #include <linux/init.h> 17 #include <linux/init.h>
18 #include <linux/errno.h> 18 #include <linux/errno.h>
19 #include <linux/major.h> 19 #include <linux/major.h>
20 #include <linux/delay.h> 20 #include <linux/delay.h>
21 #include <linux/irq.h> 21 #include <linux/irq.h>
22 #include <linux/module.h> 22 #include <linux/export.h>
23 #include <linux/device.h> 23 #include <linux/device.h>
24 #include <linux/platform_device.h> 24 #include <linux/platform_device.h>
25 #include <linux/of.h> 25 #include <linux/of.h>
26 #include <linux/of_platform.h> 26 #include <linux/of_platform.h>
27 #include <linux/phy.h> 27 #include <linux/phy.h>
28 #include <linux/phy_fixed.h> 28 #include <linux/phy_fixed.h>
29 #include <linux/spi/spi.h> 29 #include <linux/spi/spi.h>
30 #include <linux/fsl_devices.h> 30 #include <linux/fsl_devices.h>
31 #include <linux/fs_enet_pd.h> 31 #include <linux/fs_enet_pd.h>
32 #include <linux/fs_uart_pd.h> 32 #include <linux/fs_uart_pd.h>
33 33
34 #include <asm/system.h> 34 #include <asm/system.h>
35 #include <linux/atomic.h> 35 #include <linux/atomic.h>
36 #include <asm/io.h> 36 #include <asm/io.h>
37 #include <asm/irq.h> 37 #include <asm/irq.h>
38 #include <asm/time.h> 38 #include <asm/time.h>
39 #include <asm/prom.h> 39 #include <asm/prom.h>
40 #include <asm/machdep.h> 40 #include <asm/machdep.h>
41 #include <sysdev/fsl_soc.h> 41 #include <sysdev/fsl_soc.h>
42 #include <mm/mmu_decl.h> 42 #include <mm/mmu_decl.h>
43 #include <asm/cpm2.h> 43 #include <asm/cpm2.h>
44 #include <asm/fsl_hcalls.h> /* For the Freescale hypervisor */ 44 #include <asm/fsl_hcalls.h> /* For the Freescale hypervisor */
45 45
46 extern void init_fcc_ioports(struct fs_platform_info*); 46 extern void init_fcc_ioports(struct fs_platform_info*);
47 extern void init_fec_ioports(struct fs_platform_info*); 47 extern void init_fec_ioports(struct fs_platform_info*);
48 extern void init_smc_ioports(struct fs_uart_platform_info*); 48 extern void init_smc_ioports(struct fs_uart_platform_info*);
49 static phys_addr_t immrbase = -1; 49 static phys_addr_t immrbase = -1;
50 50
51 phys_addr_t get_immrbase(void) 51 phys_addr_t get_immrbase(void)
52 { 52 {
53 struct device_node *soc; 53 struct device_node *soc;
54 54
55 if (immrbase != -1) 55 if (immrbase != -1)
56 return immrbase; 56 return immrbase;
57 57
58 soc = of_find_node_by_type(NULL, "soc"); 58 soc = of_find_node_by_type(NULL, "soc");
59 if (soc) { 59 if (soc) {
60 int size; 60 int size;
61 u32 naddr; 61 u32 naddr;
62 const u32 *prop = of_get_property(soc, "#address-cells", &size); 62 const u32 *prop = of_get_property(soc, "#address-cells", &size);
63 63
64 if (prop && size == 4) 64 if (prop && size == 4)
65 naddr = *prop; 65 naddr = *prop;
66 else 66 else
67 naddr = 2; 67 naddr = 2;
68 68
69 prop = of_get_property(soc, "ranges", &size); 69 prop = of_get_property(soc, "ranges", &size);
70 if (prop) 70 if (prop)
71 immrbase = of_translate_address(soc, prop + naddr); 71 immrbase = of_translate_address(soc, prop + naddr);
72 72
73 of_node_put(soc); 73 of_node_put(soc);
74 } 74 }
75 75
76 return immrbase; 76 return immrbase;
77 } 77 }
78 78
79 EXPORT_SYMBOL(get_immrbase); 79 EXPORT_SYMBOL(get_immrbase);
80 80
81 static u32 sysfreq = -1; 81 static u32 sysfreq = -1;
82 82
83 u32 fsl_get_sys_freq(void) 83 u32 fsl_get_sys_freq(void)
84 { 84 {
85 struct device_node *soc; 85 struct device_node *soc;
86 const u32 *prop; 86 const u32 *prop;
87 int size; 87 int size;
88 88
89 if (sysfreq != -1) 89 if (sysfreq != -1)
90 return sysfreq; 90 return sysfreq;
91 91
92 soc = of_find_node_by_type(NULL, "soc"); 92 soc = of_find_node_by_type(NULL, "soc");
93 if (!soc) 93 if (!soc)
94 return -1; 94 return -1;
95 95
96 prop = of_get_property(soc, "clock-frequency", &size); 96 prop = of_get_property(soc, "clock-frequency", &size);
97 if (!prop || size != sizeof(*prop) || *prop == 0) 97 if (!prop || size != sizeof(*prop) || *prop == 0)
98 prop = of_get_property(soc, "bus-frequency", &size); 98 prop = of_get_property(soc, "bus-frequency", &size);
99 99
100 if (prop && size == sizeof(*prop)) 100 if (prop && size == sizeof(*prop))
101 sysfreq = *prop; 101 sysfreq = *prop;
102 102
103 of_node_put(soc); 103 of_node_put(soc);
104 return sysfreq; 104 return sysfreq;
105 } 105 }
106 EXPORT_SYMBOL(fsl_get_sys_freq); 106 EXPORT_SYMBOL(fsl_get_sys_freq);
107 107
108 #if defined(CONFIG_CPM2) || defined(CONFIG_QUICC_ENGINE) || defined(CONFIG_8xx) 108 #if defined(CONFIG_CPM2) || defined(CONFIG_QUICC_ENGINE) || defined(CONFIG_8xx)
109 109
110 static u32 brgfreq = -1; 110 static u32 brgfreq = -1;
111 111
112 u32 get_brgfreq(void) 112 u32 get_brgfreq(void)
113 { 113 {
114 struct device_node *node; 114 struct device_node *node;
115 const unsigned int *prop; 115 const unsigned int *prop;
116 int size; 116 int size;
117 117
118 if (brgfreq != -1) 118 if (brgfreq != -1)
119 return brgfreq; 119 return brgfreq;
120 120
121 node = of_find_compatible_node(NULL, NULL, "fsl,cpm-brg"); 121 node = of_find_compatible_node(NULL, NULL, "fsl,cpm-brg");
122 if (node) { 122 if (node) {
123 prop = of_get_property(node, "clock-frequency", &size); 123 prop = of_get_property(node, "clock-frequency", &size);
124 if (prop && size == 4) 124 if (prop && size == 4)
125 brgfreq = *prop; 125 brgfreq = *prop;
126 126
127 of_node_put(node); 127 of_node_put(node);
128 return brgfreq; 128 return brgfreq;
129 } 129 }
130 130
131 /* Legacy device binding -- will go away when no users are left. */ 131 /* Legacy device binding -- will go away when no users are left. */
132 node = of_find_node_by_type(NULL, "cpm"); 132 node = of_find_node_by_type(NULL, "cpm");
133 if (!node) 133 if (!node)
134 node = of_find_compatible_node(NULL, NULL, "fsl,qe"); 134 node = of_find_compatible_node(NULL, NULL, "fsl,qe");
135 if (!node) 135 if (!node)
136 node = of_find_node_by_type(NULL, "qe"); 136 node = of_find_node_by_type(NULL, "qe");
137 137
138 if (node) { 138 if (node) {
139 prop = of_get_property(node, "brg-frequency", &size); 139 prop = of_get_property(node, "brg-frequency", &size);
140 if (prop && size == 4) 140 if (prop && size == 4)
141 brgfreq = *prop; 141 brgfreq = *prop;
142 142
143 if (brgfreq == -1 || brgfreq == 0) { 143 if (brgfreq == -1 || brgfreq == 0) {
144 prop = of_get_property(node, "bus-frequency", &size); 144 prop = of_get_property(node, "bus-frequency", &size);
145 if (prop && size == 4) 145 if (prop && size == 4)
146 brgfreq = *prop / 2; 146 brgfreq = *prop / 2;
147 } 147 }
148 of_node_put(node); 148 of_node_put(node);
149 } 149 }
150 150
151 return brgfreq; 151 return brgfreq;
152 } 152 }
153 153
154 EXPORT_SYMBOL(get_brgfreq); 154 EXPORT_SYMBOL(get_brgfreq);
155 155
156 static u32 fs_baudrate = -1; 156 static u32 fs_baudrate = -1;
157 157
158 u32 get_baudrate(void) 158 u32 get_baudrate(void)
159 { 159 {
160 struct device_node *node; 160 struct device_node *node;
161 161
162 if (fs_baudrate != -1) 162 if (fs_baudrate != -1)
163 return fs_baudrate; 163 return fs_baudrate;
164 164
165 node = of_find_node_by_type(NULL, "serial"); 165 node = of_find_node_by_type(NULL, "serial");
166 if (node) { 166 if (node) {
167 int size; 167 int size;
168 const unsigned int *prop = of_get_property(node, 168 const unsigned int *prop = of_get_property(node,
169 "current-speed", &size); 169 "current-speed", &size);
170 170
171 if (prop) 171 if (prop)
172 fs_baudrate = *prop; 172 fs_baudrate = *prop;
173 of_node_put(node); 173 of_node_put(node);
174 } 174 }
175 175
176 return fs_baudrate; 176 return fs_baudrate;
177 } 177 }
178 178
179 EXPORT_SYMBOL(get_baudrate); 179 EXPORT_SYMBOL(get_baudrate);
180 #endif /* CONFIG_CPM2 */ 180 #endif /* CONFIG_CPM2 */
181 181
182 #ifdef CONFIG_FIXED_PHY 182 #ifdef CONFIG_FIXED_PHY
183 static int __init of_add_fixed_phys(void) 183 static int __init of_add_fixed_phys(void)
184 { 184 {
185 int ret; 185 int ret;
186 struct device_node *np; 186 struct device_node *np;
187 u32 *fixed_link; 187 u32 *fixed_link;
188 struct fixed_phy_status status = {}; 188 struct fixed_phy_status status = {};
189 189
190 for_each_node_by_name(np, "ethernet") { 190 for_each_node_by_name(np, "ethernet") {
191 fixed_link = (u32 *)of_get_property(np, "fixed-link", NULL); 191 fixed_link = (u32 *)of_get_property(np, "fixed-link", NULL);
192 if (!fixed_link) 192 if (!fixed_link)
193 continue; 193 continue;
194 194
195 status.link = 1; 195 status.link = 1;
196 status.duplex = fixed_link[1]; 196 status.duplex = fixed_link[1];
197 status.speed = fixed_link[2]; 197 status.speed = fixed_link[2];
198 status.pause = fixed_link[3]; 198 status.pause = fixed_link[3];
199 status.asym_pause = fixed_link[4]; 199 status.asym_pause = fixed_link[4];
200 200
201 ret = fixed_phy_add(PHY_POLL, fixed_link[0], &status); 201 ret = fixed_phy_add(PHY_POLL, fixed_link[0], &status);
202 if (ret) { 202 if (ret) {
203 of_node_put(np); 203 of_node_put(np);
204 return ret; 204 return ret;
205 } 205 }
206 } 206 }
207 207
208 return 0; 208 return 0;
209 } 209 }
210 arch_initcall(of_add_fixed_phys); 210 arch_initcall(of_add_fixed_phys);
211 #endif /* CONFIG_FIXED_PHY */ 211 #endif /* CONFIG_FIXED_PHY */
212 212
213 #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx) 213 #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
214 static __be32 __iomem *rstcr; 214 static __be32 __iomem *rstcr;
215 215
216 static int __init setup_rstcr(void) 216 static int __init setup_rstcr(void)
217 { 217 {
218 struct device_node *np; 218 struct device_node *np;
219 219
220 for_each_node_by_name(np, "global-utilities") { 220 for_each_node_by_name(np, "global-utilities") {
221 if ((of_get_property(np, "fsl,has-rstcr", NULL))) { 221 if ((of_get_property(np, "fsl,has-rstcr", NULL))) {
222 rstcr = of_iomap(np, 0) + 0xb0; 222 rstcr = of_iomap(np, 0) + 0xb0;
223 if (!rstcr) 223 if (!rstcr)
224 printk (KERN_ERR "Error: reset control " 224 printk (KERN_ERR "Error: reset control "
225 "register not mapped!\n"); 225 "register not mapped!\n");
226 break; 226 break;
227 } 227 }
228 } 228 }
229 229
230 if (!rstcr && ppc_md.restart == fsl_rstcr_restart) 230 if (!rstcr && ppc_md.restart == fsl_rstcr_restart)
231 printk(KERN_ERR "No RSTCR register, warm reboot won't work\n"); 231 printk(KERN_ERR "No RSTCR register, warm reboot won't work\n");
232 232
233 if (np) 233 if (np)
234 of_node_put(np); 234 of_node_put(np);
235 235
236 return 0; 236 return 0;
237 } 237 }
238 238
239 arch_initcall(setup_rstcr); 239 arch_initcall(setup_rstcr);
240 240
241 void fsl_rstcr_restart(char *cmd) 241 void fsl_rstcr_restart(char *cmd)
242 { 242 {
243 local_irq_disable(); 243 local_irq_disable();
244 if (rstcr) 244 if (rstcr)
245 /* set reset control register */ 245 /* set reset control register */
246 out_be32(rstcr, 0x2); /* HRESET_REQ */ 246 out_be32(rstcr, 0x2); /* HRESET_REQ */
247 247
248 while (1) ; 248 while (1) ;
249 } 249 }
250 #endif 250 #endif
251 251
252 #if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) 252 #if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
253 struct platform_diu_data_ops diu_ops; 253 struct platform_diu_data_ops diu_ops;
254 EXPORT_SYMBOL(diu_ops); 254 EXPORT_SYMBOL(diu_ops);
255 #endif 255 #endif
256 256
257 /* 257 /*
258 * Restart the current partition 258 * Restart the current partition
259 * 259 *
260 * This function should be assigned to the ppc_md.restart function pointer, 260 * This function should be assigned to the ppc_md.restart function pointer,
261 * to initiate a partition restart when we're running under the Freescale 261 * to initiate a partition restart when we're running under the Freescale
262 * hypervisor. 262 * hypervisor.
263 */ 263 */
264 void fsl_hv_restart(char *cmd) 264 void fsl_hv_restart(char *cmd)
265 { 265 {
266 pr_info("hv restart\n"); 266 pr_info("hv restart\n");
267 fh_partition_restart(-1); 267 fh_partition_restart(-1);
268 } 268 }
269 269
270 /* 270 /*
271 * Halt the current partition 271 * Halt the current partition
272 * 272 *
273 * This function should be assigned to the ppc_md.power_off and ppc_md.halt 273 * This function should be assigned to the ppc_md.power_off and ppc_md.halt
274 * function pointers, to shut down the partition when we're running under 274 * function pointers, to shut down the partition when we're running under
275 * the Freescale hypervisor. 275 * the Freescale hypervisor.
276 */ 276 */
277 void fsl_hv_halt(void) 277 void fsl_hv_halt(void)
278 { 278 {
279 pr_info("hv exit\n"); 279 pr_info("hv exit\n");
280 fh_partition_stop(-1); 280 fh_partition_stop(-1);
281 } 281 }
282 282
arch/powerpc/sysdev/qe_lib/ucc.c
1 /* 1 /*
2 * arch/powerpc/sysdev/qe_lib/ucc.c 2 * arch/powerpc/sysdev/qe_lib/ucc.c
3 * 3 *
4 * QE UCC API Set - UCC specific routines implementations. 4 * QE UCC API Set - UCC specific routines implementations.
5 * 5 *
6 * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved. 6 * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
7 * 7 *
8 * Authors: Shlomi Gridish <gridish@freescale.com> 8 * Authors: Shlomi Gridish <gridish@freescale.com>
9 * Li Yang <leoli@freescale.com> 9 * Li Yang <leoli@freescale.com>
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify it 11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the 12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your 13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version. 14 * option) any later version.
15 */ 15 */
16 #include <linux/kernel.h> 16 #include <linux/kernel.h>
17 #include <linux/init.h> 17 #include <linux/init.h>
18 #include <linux/errno.h> 18 #include <linux/errno.h>
19 #include <linux/stddef.h> 19 #include <linux/stddef.h>
20 #include <linux/spinlock.h> 20 #include <linux/spinlock.h>
21 #include <linux/module.h> 21 #include <linux/export.h>
22 22
23 #include <asm/irq.h> 23 #include <asm/irq.h>
24 #include <asm/io.h> 24 #include <asm/io.h>
25 #include <asm/immap_qe.h> 25 #include <asm/immap_qe.h>
26 #include <asm/qe.h> 26 #include <asm/qe.h>
27 #include <asm/ucc.h> 27 #include <asm/ucc.h>
28 28
29 int ucc_set_qe_mux_mii_mng(unsigned int ucc_num) 29 int ucc_set_qe_mux_mii_mng(unsigned int ucc_num)
30 { 30 {
31 unsigned long flags; 31 unsigned long flags;
32 32
33 if (ucc_num > UCC_MAX_NUM - 1) 33 if (ucc_num > UCC_MAX_NUM - 1)
34 return -EINVAL; 34 return -EINVAL;
35 35
36 spin_lock_irqsave(&cmxgcr_lock, flags); 36 spin_lock_irqsave(&cmxgcr_lock, flags);
37 clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG, 37 clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
38 ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT); 38 ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT);
39 spin_unlock_irqrestore(&cmxgcr_lock, flags); 39 spin_unlock_irqrestore(&cmxgcr_lock, flags);
40 40
41 return 0; 41 return 0;
42 } 42 }
43 EXPORT_SYMBOL(ucc_set_qe_mux_mii_mng); 43 EXPORT_SYMBOL(ucc_set_qe_mux_mii_mng);
44 44
45 /* Configure the UCC to either Slow or Fast. 45 /* Configure the UCC to either Slow or Fast.
46 * 46 *
47 * A given UCC can be figured to support either "slow" devices (e.g. UART) 47 * A given UCC can be figured to support either "slow" devices (e.g. UART)
48 * or "fast" devices (e.g. Ethernet). 48 * or "fast" devices (e.g. Ethernet).
49 * 49 *
50 * 'ucc_num' is the UCC number, from 0 - 7. 50 * 'ucc_num' is the UCC number, from 0 - 7.
51 * 51 *
52 * This function also sets the UCC_GUEMR_SET_RESERVED3 bit because that bit 52 * This function also sets the UCC_GUEMR_SET_RESERVED3 bit because that bit
53 * must always be set to 1. 53 * must always be set to 1.
54 */ 54 */
55 int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed) 55 int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed)
56 { 56 {
57 u8 __iomem *guemr; 57 u8 __iomem *guemr;
58 58
59 /* The GUEMR register is at the same location for both slow and fast 59 /* The GUEMR register is at the same location for both slow and fast
60 devices, so we just use uccX.slow.guemr. */ 60 devices, so we just use uccX.slow.guemr. */
61 switch (ucc_num) { 61 switch (ucc_num) {
62 case 0: guemr = &qe_immr->ucc1.slow.guemr; 62 case 0: guemr = &qe_immr->ucc1.slow.guemr;
63 break; 63 break;
64 case 1: guemr = &qe_immr->ucc2.slow.guemr; 64 case 1: guemr = &qe_immr->ucc2.slow.guemr;
65 break; 65 break;
66 case 2: guemr = &qe_immr->ucc3.slow.guemr; 66 case 2: guemr = &qe_immr->ucc3.slow.guemr;
67 break; 67 break;
68 case 3: guemr = &qe_immr->ucc4.slow.guemr; 68 case 3: guemr = &qe_immr->ucc4.slow.guemr;
69 break; 69 break;
70 case 4: guemr = &qe_immr->ucc5.slow.guemr; 70 case 4: guemr = &qe_immr->ucc5.slow.guemr;
71 break; 71 break;
72 case 5: guemr = &qe_immr->ucc6.slow.guemr; 72 case 5: guemr = &qe_immr->ucc6.slow.guemr;
73 break; 73 break;
74 case 6: guemr = &qe_immr->ucc7.slow.guemr; 74 case 6: guemr = &qe_immr->ucc7.slow.guemr;
75 break; 75 break;
76 case 7: guemr = &qe_immr->ucc8.slow.guemr; 76 case 7: guemr = &qe_immr->ucc8.slow.guemr;
77 break; 77 break;
78 default: 78 default:
79 return -EINVAL; 79 return -EINVAL;
80 } 80 }
81 81
82 clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK, 82 clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK,
83 UCC_GUEMR_SET_RESERVED3 | speed); 83 UCC_GUEMR_SET_RESERVED3 | speed);
84 84
85 return 0; 85 return 0;
86 } 86 }
87 87
88 static void get_cmxucr_reg(unsigned int ucc_num, __be32 __iomem **cmxucr, 88 static void get_cmxucr_reg(unsigned int ucc_num, __be32 __iomem **cmxucr,
89 unsigned int *reg_num, unsigned int *shift) 89 unsigned int *reg_num, unsigned int *shift)
90 { 90 {
91 unsigned int cmx = ((ucc_num & 1) << 1) + (ucc_num > 3); 91 unsigned int cmx = ((ucc_num & 1) << 1) + (ucc_num > 3);
92 92
93 *reg_num = cmx + 1; 93 *reg_num = cmx + 1;
94 *cmxucr = &qe_immr->qmx.cmxucr[cmx]; 94 *cmxucr = &qe_immr->qmx.cmxucr[cmx];
95 *shift = 16 - 8 * (ucc_num & 2); 95 *shift = 16 - 8 * (ucc_num & 2);
96 } 96 }
97 97
98 int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask) 98 int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask)
99 { 99 {
100 __be32 __iomem *cmxucr; 100 __be32 __iomem *cmxucr;
101 unsigned int reg_num; 101 unsigned int reg_num;
102 unsigned int shift; 102 unsigned int shift;
103 103
104 /* check if the UCC number is in range. */ 104 /* check if the UCC number is in range. */
105 if (ucc_num > UCC_MAX_NUM - 1) 105 if (ucc_num > UCC_MAX_NUM - 1)
106 return -EINVAL; 106 return -EINVAL;
107 107
108 get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift); 108 get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift);
109 109
110 if (set) 110 if (set)
111 setbits32(cmxucr, mask << shift); 111 setbits32(cmxucr, mask << shift);
112 else 112 else
113 clrbits32(cmxucr, mask << shift); 113 clrbits32(cmxucr, mask << shift);
114 114
115 return 0; 115 return 0;
116 } 116 }
117 117
118 int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock, 118 int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock,
119 enum comm_dir mode) 119 enum comm_dir mode)
120 { 120 {
121 __be32 __iomem *cmxucr; 121 __be32 __iomem *cmxucr;
122 unsigned int reg_num; 122 unsigned int reg_num;
123 unsigned int shift; 123 unsigned int shift;
124 u32 clock_bits = 0; 124 u32 clock_bits = 0;
125 125
126 /* check if the UCC number is in range. */ 126 /* check if the UCC number is in range. */
127 if (ucc_num > UCC_MAX_NUM - 1) 127 if (ucc_num > UCC_MAX_NUM - 1)
128 return -EINVAL; 128 return -EINVAL;
129 129
130 /* The communications direction must be RX or TX */ 130 /* The communications direction must be RX or TX */
131 if (!((mode == COMM_DIR_RX) || (mode == COMM_DIR_TX))) 131 if (!((mode == COMM_DIR_RX) || (mode == COMM_DIR_TX)))
132 return -EINVAL; 132 return -EINVAL;
133 133
134 get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift); 134 get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift);
135 135
136 switch (reg_num) { 136 switch (reg_num) {
137 case 1: 137 case 1:
138 switch (clock) { 138 switch (clock) {
139 case QE_BRG1: clock_bits = 1; break; 139 case QE_BRG1: clock_bits = 1; break;
140 case QE_BRG2: clock_bits = 2; break; 140 case QE_BRG2: clock_bits = 2; break;
141 case QE_BRG7: clock_bits = 3; break; 141 case QE_BRG7: clock_bits = 3; break;
142 case QE_BRG8: clock_bits = 4; break; 142 case QE_BRG8: clock_bits = 4; break;
143 case QE_CLK9: clock_bits = 5; break; 143 case QE_CLK9: clock_bits = 5; break;
144 case QE_CLK10: clock_bits = 6; break; 144 case QE_CLK10: clock_bits = 6; break;
145 case QE_CLK11: clock_bits = 7; break; 145 case QE_CLK11: clock_bits = 7; break;
146 case QE_CLK12: clock_bits = 8; break; 146 case QE_CLK12: clock_bits = 8; break;
147 case QE_CLK15: clock_bits = 9; break; 147 case QE_CLK15: clock_bits = 9; break;
148 case QE_CLK16: clock_bits = 10; break; 148 case QE_CLK16: clock_bits = 10; break;
149 default: break; 149 default: break;
150 } 150 }
151 break; 151 break;
152 case 2: 152 case 2:
153 switch (clock) { 153 switch (clock) {
154 case QE_BRG5: clock_bits = 1; break; 154 case QE_BRG5: clock_bits = 1; break;
155 case QE_BRG6: clock_bits = 2; break; 155 case QE_BRG6: clock_bits = 2; break;
156 case QE_BRG7: clock_bits = 3; break; 156 case QE_BRG7: clock_bits = 3; break;
157 case QE_BRG8: clock_bits = 4; break; 157 case QE_BRG8: clock_bits = 4; break;
158 case QE_CLK13: clock_bits = 5; break; 158 case QE_CLK13: clock_bits = 5; break;
159 case QE_CLK14: clock_bits = 6; break; 159 case QE_CLK14: clock_bits = 6; break;
160 case QE_CLK19: clock_bits = 7; break; 160 case QE_CLK19: clock_bits = 7; break;
161 case QE_CLK20: clock_bits = 8; break; 161 case QE_CLK20: clock_bits = 8; break;
162 case QE_CLK15: clock_bits = 9; break; 162 case QE_CLK15: clock_bits = 9; break;
163 case QE_CLK16: clock_bits = 10; break; 163 case QE_CLK16: clock_bits = 10; break;
164 default: break; 164 default: break;
165 } 165 }
166 break; 166 break;
167 case 3: 167 case 3:
168 switch (clock) { 168 switch (clock) {
169 case QE_BRG9: clock_bits = 1; break; 169 case QE_BRG9: clock_bits = 1; break;
170 case QE_BRG10: clock_bits = 2; break; 170 case QE_BRG10: clock_bits = 2; break;
171 case QE_BRG15: clock_bits = 3; break; 171 case QE_BRG15: clock_bits = 3; break;
172 case QE_BRG16: clock_bits = 4; break; 172 case QE_BRG16: clock_bits = 4; break;
173 case QE_CLK3: clock_bits = 5; break; 173 case QE_CLK3: clock_bits = 5; break;
174 case QE_CLK4: clock_bits = 6; break; 174 case QE_CLK4: clock_bits = 6; break;
175 case QE_CLK17: clock_bits = 7; break; 175 case QE_CLK17: clock_bits = 7; break;
176 case QE_CLK18: clock_bits = 8; break; 176 case QE_CLK18: clock_bits = 8; break;
177 case QE_CLK7: clock_bits = 9; break; 177 case QE_CLK7: clock_bits = 9; break;
178 case QE_CLK8: clock_bits = 10; break; 178 case QE_CLK8: clock_bits = 10; break;
179 case QE_CLK16: clock_bits = 11; break; 179 case QE_CLK16: clock_bits = 11; break;
180 default: break; 180 default: break;
181 } 181 }
182 break; 182 break;
183 case 4: 183 case 4:
184 switch (clock) { 184 switch (clock) {
185 case QE_BRG13: clock_bits = 1; break; 185 case QE_BRG13: clock_bits = 1; break;
186 case QE_BRG14: clock_bits = 2; break; 186 case QE_BRG14: clock_bits = 2; break;
187 case QE_BRG15: clock_bits = 3; break; 187 case QE_BRG15: clock_bits = 3; break;
188 case QE_BRG16: clock_bits = 4; break; 188 case QE_BRG16: clock_bits = 4; break;
189 case QE_CLK5: clock_bits = 5; break; 189 case QE_CLK5: clock_bits = 5; break;
190 case QE_CLK6: clock_bits = 6; break; 190 case QE_CLK6: clock_bits = 6; break;
191 case QE_CLK21: clock_bits = 7; break; 191 case QE_CLK21: clock_bits = 7; break;
192 case QE_CLK22: clock_bits = 8; break; 192 case QE_CLK22: clock_bits = 8; break;
193 case QE_CLK7: clock_bits = 9; break; 193 case QE_CLK7: clock_bits = 9; break;
194 case QE_CLK8: clock_bits = 10; break; 194 case QE_CLK8: clock_bits = 10; break;
195 case QE_CLK16: clock_bits = 11; break; 195 case QE_CLK16: clock_bits = 11; break;
196 default: break; 196 default: break;
197 } 197 }
198 break; 198 break;
199 default: break; 199 default: break;
200 } 200 }
201 201
202 /* Check for invalid combination of clock and UCC number */ 202 /* Check for invalid combination of clock and UCC number */
203 if (!clock_bits) 203 if (!clock_bits)
204 return -ENOENT; 204 return -ENOENT;
205 205
206 if (mode == COMM_DIR_RX) 206 if (mode == COMM_DIR_RX)
207 shift += 4; 207 shift += 4;
208 208
209 clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift, 209 clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
210 clock_bits << shift); 210 clock_bits << shift);
211 211
212 return 0; 212 return 0;
213 } 213 }
214 214
arch/powerpc/sysdev/qe_lib/ucc_fast.c
1 /* 1 /*
2 * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved. 2 * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
3 * 3 *
4 * Authors: Shlomi Gridish <gridish@freescale.com> 4 * Authors: Shlomi Gridish <gridish@freescale.com>
5 * Li Yang <leoli@freescale.com> 5 * Li Yang <leoli@freescale.com>
6 * 6 *
7 * Description: 7 * Description:
8 * QE UCC Fast API Set - UCC Fast specific routines implementations. 8 * QE UCC Fast API Set - UCC Fast specific routines implementations.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the 11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your 12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version. 13 * option) any later version.
14 */ 14 */
15 #include <linux/kernel.h> 15 #include <linux/kernel.h>
16 #include <linux/init.h> 16 #include <linux/init.h>
17 #include <linux/errno.h> 17 #include <linux/errno.h>
18 #include <linux/slab.h> 18 #include <linux/slab.h>
19 #include <linux/stddef.h> 19 #include <linux/stddef.h>
20 #include <linux/interrupt.h> 20 #include <linux/interrupt.h>
21 #include <linux/err.h> 21 #include <linux/err.h>
22 #include <linux/module.h> 22 #include <linux/export.h>
23 23
24 #include <asm/io.h> 24 #include <asm/io.h>
25 #include <asm/immap_qe.h> 25 #include <asm/immap_qe.h>
26 #include <asm/qe.h> 26 #include <asm/qe.h>
27 27
28 #include <asm/ucc.h> 28 #include <asm/ucc.h>
29 #include <asm/ucc_fast.h> 29 #include <asm/ucc_fast.h>
30 30
31 void ucc_fast_dump_regs(struct ucc_fast_private * uccf) 31 void ucc_fast_dump_regs(struct ucc_fast_private * uccf)
32 { 32 {
33 printk(KERN_INFO "UCC%u Fast registers:\n", uccf->uf_info->ucc_num); 33 printk(KERN_INFO "UCC%u Fast registers:\n", uccf->uf_info->ucc_num);
34 printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs); 34 printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs);
35 35
36 printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n", 36 printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n",
37 &uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr)); 37 &uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr));
38 printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n", 38 printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n",
39 &uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr)); 39 &uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr));
40 printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n", 40 printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n",
41 &uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr)); 41 &uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr));
42 printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n", 42 printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n",
43 &uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr)); 43 &uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr));
44 printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n", 44 printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n",
45 &uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce)); 45 &uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce));
46 printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n", 46 printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n",
47 &uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm)); 47 &uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm));
48 printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n", 48 printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n",
49 &uccf->uf_regs->uccs, in_8(&uccf->uf_regs->uccs)); 49 &uccf->uf_regs->uccs, in_8(&uccf->uf_regs->uccs));
50 printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n", 50 printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n",
51 &uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb)); 51 &uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb));
52 printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n", 52 printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n",
53 &uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs)); 53 &uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs));
54 printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n", 54 printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n",
55 &uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet)); 55 &uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet));
56 printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n", 56 printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n",
57 &uccf->uf_regs->urfset, in_be16(&uccf->uf_regs->urfset)); 57 &uccf->uf_regs->urfset, in_be16(&uccf->uf_regs->urfset));
58 printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n", 58 printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n",
59 &uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb)); 59 &uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb));
60 printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n", 60 printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n",
61 &uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs)); 61 &uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs));
62 printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n", 62 printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n",
63 &uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet)); 63 &uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet));
64 printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n", 64 printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n",
65 &uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt)); 65 &uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt));
66 printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n", 66 printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n",
67 &uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt)); 67 &uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt));
68 printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n", 68 printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n",
69 &uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry)); 69 &uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry));
70 printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n", 70 printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n",
71 &uccf->uf_regs->guemr, in_8(&uccf->uf_regs->guemr)); 71 &uccf->uf_regs->guemr, in_8(&uccf->uf_regs->guemr));
72 } 72 }
73 EXPORT_SYMBOL(ucc_fast_dump_regs); 73 EXPORT_SYMBOL(ucc_fast_dump_regs);
74 74
75 u32 ucc_fast_get_qe_cr_subblock(int uccf_num) 75 u32 ucc_fast_get_qe_cr_subblock(int uccf_num)
76 { 76 {
77 switch (uccf_num) { 77 switch (uccf_num) {
78 case 0: return QE_CR_SUBBLOCK_UCCFAST1; 78 case 0: return QE_CR_SUBBLOCK_UCCFAST1;
79 case 1: return QE_CR_SUBBLOCK_UCCFAST2; 79 case 1: return QE_CR_SUBBLOCK_UCCFAST2;
80 case 2: return QE_CR_SUBBLOCK_UCCFAST3; 80 case 2: return QE_CR_SUBBLOCK_UCCFAST3;
81 case 3: return QE_CR_SUBBLOCK_UCCFAST4; 81 case 3: return QE_CR_SUBBLOCK_UCCFAST4;
82 case 4: return QE_CR_SUBBLOCK_UCCFAST5; 82 case 4: return QE_CR_SUBBLOCK_UCCFAST5;
83 case 5: return QE_CR_SUBBLOCK_UCCFAST6; 83 case 5: return QE_CR_SUBBLOCK_UCCFAST6;
84 case 6: return QE_CR_SUBBLOCK_UCCFAST7; 84 case 6: return QE_CR_SUBBLOCK_UCCFAST7;
85 case 7: return QE_CR_SUBBLOCK_UCCFAST8; 85 case 7: return QE_CR_SUBBLOCK_UCCFAST8;
86 default: return QE_CR_SUBBLOCK_INVALID; 86 default: return QE_CR_SUBBLOCK_INVALID;
87 } 87 }
88 } 88 }
89 EXPORT_SYMBOL(ucc_fast_get_qe_cr_subblock); 89 EXPORT_SYMBOL(ucc_fast_get_qe_cr_subblock);
90 90
91 void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf) 91 void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf)
92 { 92 {
93 out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD); 93 out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD);
94 } 94 }
95 EXPORT_SYMBOL(ucc_fast_transmit_on_demand); 95 EXPORT_SYMBOL(ucc_fast_transmit_on_demand);
96 96
97 void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode) 97 void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode)
98 { 98 {
99 struct ucc_fast __iomem *uf_regs; 99 struct ucc_fast __iomem *uf_regs;
100 u32 gumr; 100 u32 gumr;
101 101
102 uf_regs = uccf->uf_regs; 102 uf_regs = uccf->uf_regs;
103 103
104 /* Enable reception and/or transmission on this UCC. */ 104 /* Enable reception and/or transmission on this UCC. */
105 gumr = in_be32(&uf_regs->gumr); 105 gumr = in_be32(&uf_regs->gumr);
106 if (mode & COMM_DIR_TX) { 106 if (mode & COMM_DIR_TX) {
107 gumr |= UCC_FAST_GUMR_ENT; 107 gumr |= UCC_FAST_GUMR_ENT;
108 uccf->enabled_tx = 1; 108 uccf->enabled_tx = 1;
109 } 109 }
110 if (mode & COMM_DIR_RX) { 110 if (mode & COMM_DIR_RX) {
111 gumr |= UCC_FAST_GUMR_ENR; 111 gumr |= UCC_FAST_GUMR_ENR;
112 uccf->enabled_rx = 1; 112 uccf->enabled_rx = 1;
113 } 113 }
114 out_be32(&uf_regs->gumr, gumr); 114 out_be32(&uf_regs->gumr, gumr);
115 } 115 }
116 EXPORT_SYMBOL(ucc_fast_enable); 116 EXPORT_SYMBOL(ucc_fast_enable);
117 117
118 void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode) 118 void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode)
119 { 119 {
120 struct ucc_fast __iomem *uf_regs; 120 struct ucc_fast __iomem *uf_regs;
121 u32 gumr; 121 u32 gumr;
122 122
123 uf_regs = uccf->uf_regs; 123 uf_regs = uccf->uf_regs;
124 124
125 /* Disable reception and/or transmission on this UCC. */ 125 /* Disable reception and/or transmission on this UCC. */
126 gumr = in_be32(&uf_regs->gumr); 126 gumr = in_be32(&uf_regs->gumr);
127 if (mode & COMM_DIR_TX) { 127 if (mode & COMM_DIR_TX) {
128 gumr &= ~UCC_FAST_GUMR_ENT; 128 gumr &= ~UCC_FAST_GUMR_ENT;
129 uccf->enabled_tx = 0; 129 uccf->enabled_tx = 0;
130 } 130 }
131 if (mode & COMM_DIR_RX) { 131 if (mode & COMM_DIR_RX) {
132 gumr &= ~UCC_FAST_GUMR_ENR; 132 gumr &= ~UCC_FAST_GUMR_ENR;
133 uccf->enabled_rx = 0; 133 uccf->enabled_rx = 0;
134 } 134 }
135 out_be32(&uf_regs->gumr, gumr); 135 out_be32(&uf_regs->gumr, gumr);
136 } 136 }
137 EXPORT_SYMBOL(ucc_fast_disable); 137 EXPORT_SYMBOL(ucc_fast_disable);
138 138
139 int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** uccf_ret) 139 int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** uccf_ret)
140 { 140 {
141 struct ucc_fast_private *uccf; 141 struct ucc_fast_private *uccf;
142 struct ucc_fast __iomem *uf_regs; 142 struct ucc_fast __iomem *uf_regs;
143 u32 gumr; 143 u32 gumr;
144 int ret; 144 int ret;
145 145
146 if (!uf_info) 146 if (!uf_info)
147 return -EINVAL; 147 return -EINVAL;
148 148
149 /* check if the UCC port number is in range. */ 149 /* check if the UCC port number is in range. */
150 if ((uf_info->ucc_num < 0) || (uf_info->ucc_num > UCC_MAX_NUM - 1)) { 150 if ((uf_info->ucc_num < 0) || (uf_info->ucc_num > UCC_MAX_NUM - 1)) {
151 printk(KERN_ERR "%s: illegal UCC number\n", __func__); 151 printk(KERN_ERR "%s: illegal UCC number\n", __func__);
152 return -EINVAL; 152 return -EINVAL;
153 } 153 }
154 154
155 /* Check that 'max_rx_buf_length' is properly aligned (4). */ 155 /* Check that 'max_rx_buf_length' is properly aligned (4). */
156 if (uf_info->max_rx_buf_length & (UCC_FAST_MRBLR_ALIGNMENT - 1)) { 156 if (uf_info->max_rx_buf_length & (UCC_FAST_MRBLR_ALIGNMENT - 1)) {
157 printk(KERN_ERR "%s: max_rx_buf_length not aligned\n", 157 printk(KERN_ERR "%s: max_rx_buf_length not aligned\n",
158 __func__); 158 __func__);
159 return -EINVAL; 159 return -EINVAL;
160 } 160 }
161 161
162 /* Validate Virtual Fifo register values */ 162 /* Validate Virtual Fifo register values */
163 if (uf_info->urfs < UCC_FAST_URFS_MIN_VAL) { 163 if (uf_info->urfs < UCC_FAST_URFS_MIN_VAL) {
164 printk(KERN_ERR "%s: urfs is too small\n", __func__); 164 printk(KERN_ERR "%s: urfs is too small\n", __func__);
165 return -EINVAL; 165 return -EINVAL;
166 } 166 }
167 167
168 if (uf_info->urfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { 168 if (uf_info->urfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
169 printk(KERN_ERR "%s: urfs is not aligned\n", __func__); 169 printk(KERN_ERR "%s: urfs is not aligned\n", __func__);
170 return -EINVAL; 170 return -EINVAL;
171 } 171 }
172 172
173 if (uf_info->urfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { 173 if (uf_info->urfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
174 printk(KERN_ERR "%s: urfet is not aligned.\n", __func__); 174 printk(KERN_ERR "%s: urfet is not aligned.\n", __func__);
175 return -EINVAL; 175 return -EINVAL;
176 } 176 }
177 177
178 if (uf_info->urfset & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { 178 if (uf_info->urfset & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
179 printk(KERN_ERR "%s: urfset is not aligned\n", __func__); 179 printk(KERN_ERR "%s: urfset is not aligned\n", __func__);
180 return -EINVAL; 180 return -EINVAL;
181 } 181 }
182 182
183 if (uf_info->utfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { 183 if (uf_info->utfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
184 printk(KERN_ERR "%s: utfs is not aligned\n", __func__); 184 printk(KERN_ERR "%s: utfs is not aligned\n", __func__);
185 return -EINVAL; 185 return -EINVAL;
186 } 186 }
187 187
188 if (uf_info->utfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { 188 if (uf_info->utfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
189 printk(KERN_ERR "%s: utfet is not aligned\n", __func__); 189 printk(KERN_ERR "%s: utfet is not aligned\n", __func__);
190 return -EINVAL; 190 return -EINVAL;
191 } 191 }
192 192
193 if (uf_info->utftt & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { 193 if (uf_info->utftt & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
194 printk(KERN_ERR "%s: utftt is not aligned\n", __func__); 194 printk(KERN_ERR "%s: utftt is not aligned\n", __func__);
195 return -EINVAL; 195 return -EINVAL;
196 } 196 }
197 197
198 uccf = kzalloc(sizeof(struct ucc_fast_private), GFP_KERNEL); 198 uccf = kzalloc(sizeof(struct ucc_fast_private), GFP_KERNEL);
199 if (!uccf) { 199 if (!uccf) {
200 printk(KERN_ERR "%s: Cannot allocate private data\n", 200 printk(KERN_ERR "%s: Cannot allocate private data\n",
201 __func__); 201 __func__);
202 return -ENOMEM; 202 return -ENOMEM;
203 } 203 }
204 204
205 /* Fill fast UCC structure */ 205 /* Fill fast UCC structure */
206 uccf->uf_info = uf_info; 206 uccf->uf_info = uf_info;
207 /* Set the PHY base address */ 207 /* Set the PHY base address */
208 uccf->uf_regs = ioremap(uf_info->regs, sizeof(struct ucc_fast)); 208 uccf->uf_regs = ioremap(uf_info->regs, sizeof(struct ucc_fast));
209 if (uccf->uf_regs == NULL) { 209 if (uccf->uf_regs == NULL) {
210 printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__); 210 printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__);
211 kfree(uccf); 211 kfree(uccf);
212 return -ENOMEM; 212 return -ENOMEM;
213 } 213 }
214 214
215 uccf->enabled_tx = 0; 215 uccf->enabled_tx = 0;
216 uccf->enabled_rx = 0; 216 uccf->enabled_rx = 0;
217 uccf->stopped_tx = 0; 217 uccf->stopped_tx = 0;
218 uccf->stopped_rx = 0; 218 uccf->stopped_rx = 0;
219 uf_regs = uccf->uf_regs; 219 uf_regs = uccf->uf_regs;
220 uccf->p_ucce = &uf_regs->ucce; 220 uccf->p_ucce = &uf_regs->ucce;
221 uccf->p_uccm = &uf_regs->uccm; 221 uccf->p_uccm = &uf_regs->uccm;
222 #ifdef CONFIG_UGETH_TX_ON_DEMAND 222 #ifdef CONFIG_UGETH_TX_ON_DEMAND
223 uccf->p_utodr = &uf_regs->utodr; 223 uccf->p_utodr = &uf_regs->utodr;
224 #endif 224 #endif
225 #ifdef STATISTICS 225 #ifdef STATISTICS
226 uccf->tx_frames = 0; 226 uccf->tx_frames = 0;
227 uccf->rx_frames = 0; 227 uccf->rx_frames = 0;
228 uccf->rx_discarded = 0; 228 uccf->rx_discarded = 0;
229 #endif /* STATISTICS */ 229 #endif /* STATISTICS */
230 230
231 /* Set UCC to fast type */ 231 /* Set UCC to fast type */
232 ret = ucc_set_type(uf_info->ucc_num, UCC_SPEED_TYPE_FAST); 232 ret = ucc_set_type(uf_info->ucc_num, UCC_SPEED_TYPE_FAST);
233 if (ret) { 233 if (ret) {
234 printk(KERN_ERR "%s: cannot set UCC type\n", __func__); 234 printk(KERN_ERR "%s: cannot set UCC type\n", __func__);
235 ucc_fast_free(uccf); 235 ucc_fast_free(uccf);
236 return ret; 236 return ret;
237 } 237 }
238 238
239 uccf->mrblr = uf_info->max_rx_buf_length; 239 uccf->mrblr = uf_info->max_rx_buf_length;
240 240
241 /* Set GUMR */ 241 /* Set GUMR */
242 /* For more details see the hardware spec. */ 242 /* For more details see the hardware spec. */
243 gumr = uf_info->ttx_trx; 243 gumr = uf_info->ttx_trx;
244 if (uf_info->tci) 244 if (uf_info->tci)
245 gumr |= UCC_FAST_GUMR_TCI; 245 gumr |= UCC_FAST_GUMR_TCI;
246 if (uf_info->cdp) 246 if (uf_info->cdp)
247 gumr |= UCC_FAST_GUMR_CDP; 247 gumr |= UCC_FAST_GUMR_CDP;
248 if (uf_info->ctsp) 248 if (uf_info->ctsp)
249 gumr |= UCC_FAST_GUMR_CTSP; 249 gumr |= UCC_FAST_GUMR_CTSP;
250 if (uf_info->cds) 250 if (uf_info->cds)
251 gumr |= UCC_FAST_GUMR_CDS; 251 gumr |= UCC_FAST_GUMR_CDS;
252 if (uf_info->ctss) 252 if (uf_info->ctss)
253 gumr |= UCC_FAST_GUMR_CTSS; 253 gumr |= UCC_FAST_GUMR_CTSS;
254 if (uf_info->txsy) 254 if (uf_info->txsy)
255 gumr |= UCC_FAST_GUMR_TXSY; 255 gumr |= UCC_FAST_GUMR_TXSY;
256 if (uf_info->rsyn) 256 if (uf_info->rsyn)
257 gumr |= UCC_FAST_GUMR_RSYN; 257 gumr |= UCC_FAST_GUMR_RSYN;
258 gumr |= uf_info->synl; 258 gumr |= uf_info->synl;
259 if (uf_info->rtsm) 259 if (uf_info->rtsm)
260 gumr |= UCC_FAST_GUMR_RTSM; 260 gumr |= UCC_FAST_GUMR_RTSM;
261 gumr |= uf_info->renc; 261 gumr |= uf_info->renc;
262 if (uf_info->revd) 262 if (uf_info->revd)
263 gumr |= UCC_FAST_GUMR_REVD; 263 gumr |= UCC_FAST_GUMR_REVD;
264 gumr |= uf_info->tenc; 264 gumr |= uf_info->tenc;
265 gumr |= uf_info->tcrc; 265 gumr |= uf_info->tcrc;
266 gumr |= uf_info->mode; 266 gumr |= uf_info->mode;
267 out_be32(&uf_regs->gumr, gumr); 267 out_be32(&uf_regs->gumr, gumr);
268 268
269 /* Allocate memory for Tx Virtual Fifo */ 269 /* Allocate memory for Tx Virtual Fifo */
270 uccf->ucc_fast_tx_virtual_fifo_base_offset = 270 uccf->ucc_fast_tx_virtual_fifo_base_offset =
271 qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT); 271 qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
272 if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) { 272 if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) {
273 printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n", 273 printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n",
274 __func__); 274 __func__);
275 uccf->ucc_fast_tx_virtual_fifo_base_offset = 0; 275 uccf->ucc_fast_tx_virtual_fifo_base_offset = 0;
276 ucc_fast_free(uccf); 276 ucc_fast_free(uccf);
277 return -ENOMEM; 277 return -ENOMEM;
278 } 278 }
279 279
280 /* Allocate memory for Rx Virtual Fifo */ 280 /* Allocate memory for Rx Virtual Fifo */
281 uccf->ucc_fast_rx_virtual_fifo_base_offset = 281 uccf->ucc_fast_rx_virtual_fifo_base_offset =
282 qe_muram_alloc(uf_info->urfs + 282 qe_muram_alloc(uf_info->urfs +
283 UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR, 283 UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR,
284 UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT); 284 UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
285 if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) { 285 if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) {
286 printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n", 286 printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n",
287 __func__); 287 __func__);
288 uccf->ucc_fast_rx_virtual_fifo_base_offset = 0; 288 uccf->ucc_fast_rx_virtual_fifo_base_offset = 0;
289 ucc_fast_free(uccf); 289 ucc_fast_free(uccf);
290 return -ENOMEM; 290 return -ENOMEM;
291 } 291 }
292 292
293 /* Set Virtual Fifo registers */ 293 /* Set Virtual Fifo registers */
294 out_be16(&uf_regs->urfs, uf_info->urfs); 294 out_be16(&uf_regs->urfs, uf_info->urfs);
295 out_be16(&uf_regs->urfet, uf_info->urfet); 295 out_be16(&uf_regs->urfet, uf_info->urfet);
296 out_be16(&uf_regs->urfset, uf_info->urfset); 296 out_be16(&uf_regs->urfset, uf_info->urfset);
297 out_be16(&uf_regs->utfs, uf_info->utfs); 297 out_be16(&uf_regs->utfs, uf_info->utfs);
298 out_be16(&uf_regs->utfet, uf_info->utfet); 298 out_be16(&uf_regs->utfet, uf_info->utfet);
299 out_be16(&uf_regs->utftt, uf_info->utftt); 299 out_be16(&uf_regs->utftt, uf_info->utftt);
300 /* utfb, urfb are offsets from MURAM base */ 300 /* utfb, urfb are offsets from MURAM base */
301 out_be32(&uf_regs->utfb, uccf->ucc_fast_tx_virtual_fifo_base_offset); 301 out_be32(&uf_regs->utfb, uccf->ucc_fast_tx_virtual_fifo_base_offset);
302 out_be32(&uf_regs->urfb, uccf->ucc_fast_rx_virtual_fifo_base_offset); 302 out_be32(&uf_regs->urfb, uccf->ucc_fast_rx_virtual_fifo_base_offset);
303 303
304 /* Mux clocking */ 304 /* Mux clocking */
305 /* Grant Support */ 305 /* Grant Support */
306 ucc_set_qe_mux_grant(uf_info->ucc_num, uf_info->grant_support); 306 ucc_set_qe_mux_grant(uf_info->ucc_num, uf_info->grant_support);
307 /* Breakpoint Support */ 307 /* Breakpoint Support */
308 ucc_set_qe_mux_bkpt(uf_info->ucc_num, uf_info->brkpt_support); 308 ucc_set_qe_mux_bkpt(uf_info->ucc_num, uf_info->brkpt_support);
309 /* Set Tsa or NMSI mode. */ 309 /* Set Tsa or NMSI mode. */
310 ucc_set_qe_mux_tsa(uf_info->ucc_num, uf_info->tsa); 310 ucc_set_qe_mux_tsa(uf_info->ucc_num, uf_info->tsa);
311 /* If NMSI (not Tsa), set Tx and Rx clock. */ 311 /* If NMSI (not Tsa), set Tx and Rx clock. */
312 if (!uf_info->tsa) { 312 if (!uf_info->tsa) {
313 /* Rx clock routing */ 313 /* Rx clock routing */
314 if ((uf_info->rx_clock != QE_CLK_NONE) && 314 if ((uf_info->rx_clock != QE_CLK_NONE) &&
315 ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->rx_clock, 315 ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->rx_clock,
316 COMM_DIR_RX)) { 316 COMM_DIR_RX)) {
317 printk(KERN_ERR "%s: illegal value for RX clock\n", 317 printk(KERN_ERR "%s: illegal value for RX clock\n",
318 __func__); 318 __func__);
319 ucc_fast_free(uccf); 319 ucc_fast_free(uccf);
320 return -EINVAL; 320 return -EINVAL;
321 } 321 }
322 /* Tx clock routing */ 322 /* Tx clock routing */
323 if ((uf_info->tx_clock != QE_CLK_NONE) && 323 if ((uf_info->tx_clock != QE_CLK_NONE) &&
324 ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->tx_clock, 324 ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->tx_clock,
325 COMM_DIR_TX)) { 325 COMM_DIR_TX)) {
326 printk(KERN_ERR "%s: illegal value for TX clock\n", 326 printk(KERN_ERR "%s: illegal value for TX clock\n",
327 __func__); 327 __func__);
328 ucc_fast_free(uccf); 328 ucc_fast_free(uccf);
329 return -EINVAL; 329 return -EINVAL;
330 } 330 }
331 } 331 }
332 332
333 /* Set interrupt mask register at UCC level. */ 333 /* Set interrupt mask register at UCC level. */
334 out_be32(&uf_regs->uccm, uf_info->uccm_mask); 334 out_be32(&uf_regs->uccm, uf_info->uccm_mask);
335 335
336 /* First, clear anything pending at UCC level, 336 /* First, clear anything pending at UCC level,
337 * otherwise, old garbage may come through 337 * otherwise, old garbage may come through
338 * as soon as the dam is opened. */ 338 * as soon as the dam is opened. */
339 339
340 /* Writing '1' clears */ 340 /* Writing '1' clears */
341 out_be32(&uf_regs->ucce, 0xffffffff); 341 out_be32(&uf_regs->ucce, 0xffffffff);
342 342
343 *uccf_ret = uccf; 343 *uccf_ret = uccf;
344 return 0; 344 return 0;
345 } 345 }
346 EXPORT_SYMBOL(ucc_fast_init); 346 EXPORT_SYMBOL(ucc_fast_init);
347 347
348 void ucc_fast_free(struct ucc_fast_private * uccf) 348 void ucc_fast_free(struct ucc_fast_private * uccf)
349 { 349 {
350 if (!uccf) 350 if (!uccf)
351 return; 351 return;
352 352
353 if (uccf->ucc_fast_tx_virtual_fifo_base_offset) 353 if (uccf->ucc_fast_tx_virtual_fifo_base_offset)
354 qe_muram_free(uccf->ucc_fast_tx_virtual_fifo_base_offset); 354 qe_muram_free(uccf->ucc_fast_tx_virtual_fifo_base_offset);
355 355
356 if (uccf->ucc_fast_rx_virtual_fifo_base_offset) 356 if (uccf->ucc_fast_rx_virtual_fifo_base_offset)
357 qe_muram_free(uccf->ucc_fast_rx_virtual_fifo_base_offset); 357 qe_muram_free(uccf->ucc_fast_rx_virtual_fifo_base_offset);
358 358
359 if (uccf->uf_regs) 359 if (uccf->uf_regs)
360 iounmap(uccf->uf_regs); 360 iounmap(uccf->uf_regs);
361 361
362 kfree(uccf); 362 kfree(uccf);
363 } 363 }
364 EXPORT_SYMBOL(ucc_fast_free); 364 EXPORT_SYMBOL(ucc_fast_free);
365 365
arch/powerpc/sysdev/qe_lib/ucc_slow.c
1 /* 1 /*
2 * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved. 2 * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
3 * 3 *
4 * Authors: Shlomi Gridish <gridish@freescale.com> 4 * Authors: Shlomi Gridish <gridish@freescale.com>
5 * Li Yang <leoli@freescale.com> 5 * Li Yang <leoli@freescale.com>
6 * 6 *
7 * Description: 7 * Description:
8 * QE UCC Slow API Set - UCC Slow specific routines implementations. 8 * QE UCC Slow API Set - UCC Slow specific routines implementations.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the 11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your 12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version. 13 * option) any later version.
14 */ 14 */
15 #include <linux/kernel.h> 15 #include <linux/kernel.h>
16 #include <linux/init.h> 16 #include <linux/init.h>
17 #include <linux/errno.h> 17 #include <linux/errno.h>
18 #include <linux/slab.h> 18 #include <linux/slab.h>
19 #include <linux/stddef.h> 19 #include <linux/stddef.h>
20 #include <linux/interrupt.h> 20 #include <linux/interrupt.h>
21 #include <linux/err.h> 21 #include <linux/err.h>
22 #include <linux/module.h> 22 #include <linux/export.h>
23 23
24 #include <asm/io.h> 24 #include <asm/io.h>
25 #include <asm/immap_qe.h> 25 #include <asm/immap_qe.h>
26 #include <asm/qe.h> 26 #include <asm/qe.h>
27 27
28 #include <asm/ucc.h> 28 #include <asm/ucc.h>
29 #include <asm/ucc_slow.h> 29 #include <asm/ucc_slow.h>
30 30
31 u32 ucc_slow_get_qe_cr_subblock(int uccs_num) 31 u32 ucc_slow_get_qe_cr_subblock(int uccs_num)
32 { 32 {
33 switch (uccs_num) { 33 switch (uccs_num) {
34 case 0: return QE_CR_SUBBLOCK_UCCSLOW1; 34 case 0: return QE_CR_SUBBLOCK_UCCSLOW1;
35 case 1: return QE_CR_SUBBLOCK_UCCSLOW2; 35 case 1: return QE_CR_SUBBLOCK_UCCSLOW2;
36 case 2: return QE_CR_SUBBLOCK_UCCSLOW3; 36 case 2: return QE_CR_SUBBLOCK_UCCSLOW3;
37 case 3: return QE_CR_SUBBLOCK_UCCSLOW4; 37 case 3: return QE_CR_SUBBLOCK_UCCSLOW4;
38 case 4: return QE_CR_SUBBLOCK_UCCSLOW5; 38 case 4: return QE_CR_SUBBLOCK_UCCSLOW5;
39 case 5: return QE_CR_SUBBLOCK_UCCSLOW6; 39 case 5: return QE_CR_SUBBLOCK_UCCSLOW6;
40 case 6: return QE_CR_SUBBLOCK_UCCSLOW7; 40 case 6: return QE_CR_SUBBLOCK_UCCSLOW7;
41 case 7: return QE_CR_SUBBLOCK_UCCSLOW8; 41 case 7: return QE_CR_SUBBLOCK_UCCSLOW8;
42 default: return QE_CR_SUBBLOCK_INVALID; 42 default: return QE_CR_SUBBLOCK_INVALID;
43 } 43 }
44 } 44 }
45 EXPORT_SYMBOL(ucc_slow_get_qe_cr_subblock); 45 EXPORT_SYMBOL(ucc_slow_get_qe_cr_subblock);
46 46
47 void ucc_slow_poll_transmitter_now(struct ucc_slow_private * uccs) 47 void ucc_slow_poll_transmitter_now(struct ucc_slow_private * uccs)
48 { 48 {
49 out_be16(&uccs->us_regs->utodr, UCC_SLOW_TOD); 49 out_be16(&uccs->us_regs->utodr, UCC_SLOW_TOD);
50 } 50 }
51 51
52 void ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs) 52 void ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs)
53 { 53 {
54 struct ucc_slow_info *us_info = uccs->us_info; 54 struct ucc_slow_info *us_info = uccs->us_info;
55 u32 id; 55 u32 id;
56 56
57 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); 57 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
58 qe_issue_cmd(QE_GRACEFUL_STOP_TX, id, 58 qe_issue_cmd(QE_GRACEFUL_STOP_TX, id,
59 QE_CR_PROTOCOL_UNSPECIFIED, 0); 59 QE_CR_PROTOCOL_UNSPECIFIED, 0);
60 } 60 }
61 EXPORT_SYMBOL(ucc_slow_graceful_stop_tx); 61 EXPORT_SYMBOL(ucc_slow_graceful_stop_tx);
62 62
63 void ucc_slow_stop_tx(struct ucc_slow_private * uccs) 63 void ucc_slow_stop_tx(struct ucc_slow_private * uccs)
64 { 64 {
65 struct ucc_slow_info *us_info = uccs->us_info; 65 struct ucc_slow_info *us_info = uccs->us_info;
66 u32 id; 66 u32 id;
67 67
68 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); 68 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
69 qe_issue_cmd(QE_STOP_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0); 69 qe_issue_cmd(QE_STOP_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
70 } 70 }
71 EXPORT_SYMBOL(ucc_slow_stop_tx); 71 EXPORT_SYMBOL(ucc_slow_stop_tx);
72 72
73 void ucc_slow_restart_tx(struct ucc_slow_private * uccs) 73 void ucc_slow_restart_tx(struct ucc_slow_private * uccs)
74 { 74 {
75 struct ucc_slow_info *us_info = uccs->us_info; 75 struct ucc_slow_info *us_info = uccs->us_info;
76 u32 id; 76 u32 id;
77 77
78 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); 78 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
79 qe_issue_cmd(QE_RESTART_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0); 79 qe_issue_cmd(QE_RESTART_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
80 } 80 }
81 EXPORT_SYMBOL(ucc_slow_restart_tx); 81 EXPORT_SYMBOL(ucc_slow_restart_tx);
82 82
83 void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode) 83 void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
84 { 84 {
85 struct ucc_slow *us_regs; 85 struct ucc_slow *us_regs;
86 u32 gumr_l; 86 u32 gumr_l;
87 87
88 us_regs = uccs->us_regs; 88 us_regs = uccs->us_regs;
89 89
90 /* Enable reception and/or transmission on this UCC. */ 90 /* Enable reception and/or transmission on this UCC. */
91 gumr_l = in_be32(&us_regs->gumr_l); 91 gumr_l = in_be32(&us_regs->gumr_l);
92 if (mode & COMM_DIR_TX) { 92 if (mode & COMM_DIR_TX) {
93 gumr_l |= UCC_SLOW_GUMR_L_ENT; 93 gumr_l |= UCC_SLOW_GUMR_L_ENT;
94 uccs->enabled_tx = 1; 94 uccs->enabled_tx = 1;
95 } 95 }
96 if (mode & COMM_DIR_RX) { 96 if (mode & COMM_DIR_RX) {
97 gumr_l |= UCC_SLOW_GUMR_L_ENR; 97 gumr_l |= UCC_SLOW_GUMR_L_ENR;
98 uccs->enabled_rx = 1; 98 uccs->enabled_rx = 1;
99 } 99 }
100 out_be32(&us_regs->gumr_l, gumr_l); 100 out_be32(&us_regs->gumr_l, gumr_l);
101 } 101 }
102 EXPORT_SYMBOL(ucc_slow_enable); 102 EXPORT_SYMBOL(ucc_slow_enable);
103 103
104 void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode) 104 void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
105 { 105 {
106 struct ucc_slow *us_regs; 106 struct ucc_slow *us_regs;
107 u32 gumr_l; 107 u32 gumr_l;
108 108
109 us_regs = uccs->us_regs; 109 us_regs = uccs->us_regs;
110 110
111 /* Disable reception and/or transmission on this UCC. */ 111 /* Disable reception and/or transmission on this UCC. */
112 gumr_l = in_be32(&us_regs->gumr_l); 112 gumr_l = in_be32(&us_regs->gumr_l);
113 if (mode & COMM_DIR_TX) { 113 if (mode & COMM_DIR_TX) {
114 gumr_l &= ~UCC_SLOW_GUMR_L_ENT; 114 gumr_l &= ~UCC_SLOW_GUMR_L_ENT;
115 uccs->enabled_tx = 0; 115 uccs->enabled_tx = 0;
116 } 116 }
117 if (mode & COMM_DIR_RX) { 117 if (mode & COMM_DIR_RX) {
118 gumr_l &= ~UCC_SLOW_GUMR_L_ENR; 118 gumr_l &= ~UCC_SLOW_GUMR_L_ENR;
119 uccs->enabled_rx = 0; 119 uccs->enabled_rx = 0;
120 } 120 }
121 out_be32(&us_regs->gumr_l, gumr_l); 121 out_be32(&us_regs->gumr_l, gumr_l);
122 } 122 }
123 EXPORT_SYMBOL(ucc_slow_disable); 123 EXPORT_SYMBOL(ucc_slow_disable);
124 124
125 /* Initialize the UCC for Slow operations 125 /* Initialize the UCC for Slow operations
126 * 126 *
127 * The caller should initialize the following us_info 127 * The caller should initialize the following us_info
128 */ 128 */
129 int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret) 129 int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret)
130 { 130 {
131 struct ucc_slow_private *uccs; 131 struct ucc_slow_private *uccs;
132 u32 i; 132 u32 i;
133 struct ucc_slow __iomem *us_regs; 133 struct ucc_slow __iomem *us_regs;
134 u32 gumr; 134 u32 gumr;
135 struct qe_bd *bd; 135 struct qe_bd *bd;
136 u32 id; 136 u32 id;
137 u32 command; 137 u32 command;
138 int ret = 0; 138 int ret = 0;
139 139
140 if (!us_info) 140 if (!us_info)
141 return -EINVAL; 141 return -EINVAL;
142 142
143 /* check if the UCC port number is in range. */ 143 /* check if the UCC port number is in range. */
144 if ((us_info->ucc_num < 0) || (us_info->ucc_num > UCC_MAX_NUM - 1)) { 144 if ((us_info->ucc_num < 0) || (us_info->ucc_num > UCC_MAX_NUM - 1)) {
145 printk(KERN_ERR "%s: illegal UCC number\n", __func__); 145 printk(KERN_ERR "%s: illegal UCC number\n", __func__);
146 return -EINVAL; 146 return -EINVAL;
147 } 147 }
148 148
149 /* 149 /*
150 * Set mrblr 150 * Set mrblr
151 * Check that 'max_rx_buf_length' is properly aligned (4), unless 151 * Check that 'max_rx_buf_length' is properly aligned (4), unless
152 * rfw is 1, meaning that QE accepts one byte at a time, unlike normal 152 * rfw is 1, meaning that QE accepts one byte at a time, unlike normal
153 * case when QE accepts 32 bits at a time. 153 * case when QE accepts 32 bits at a time.
154 */ 154 */
155 if ((!us_info->rfw) && 155 if ((!us_info->rfw) &&
156 (us_info->max_rx_buf_length & (UCC_SLOW_MRBLR_ALIGNMENT - 1))) { 156 (us_info->max_rx_buf_length & (UCC_SLOW_MRBLR_ALIGNMENT - 1))) {
157 printk(KERN_ERR "max_rx_buf_length not aligned.\n"); 157 printk(KERN_ERR "max_rx_buf_length not aligned.\n");
158 return -EINVAL; 158 return -EINVAL;
159 } 159 }
160 160
161 uccs = kzalloc(sizeof(struct ucc_slow_private), GFP_KERNEL); 161 uccs = kzalloc(sizeof(struct ucc_slow_private), GFP_KERNEL);
162 if (!uccs) { 162 if (!uccs) {
163 printk(KERN_ERR "%s: Cannot allocate private data\n", 163 printk(KERN_ERR "%s: Cannot allocate private data\n",
164 __func__); 164 __func__);
165 return -ENOMEM; 165 return -ENOMEM;
166 } 166 }
167 167
168 /* Fill slow UCC structure */ 168 /* Fill slow UCC structure */
169 uccs->us_info = us_info; 169 uccs->us_info = us_info;
170 /* Set the PHY base address */ 170 /* Set the PHY base address */
171 uccs->us_regs = ioremap(us_info->regs, sizeof(struct ucc_slow)); 171 uccs->us_regs = ioremap(us_info->regs, sizeof(struct ucc_slow));
172 if (uccs->us_regs == NULL) { 172 if (uccs->us_regs == NULL) {
173 printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__); 173 printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__);
174 kfree(uccs); 174 kfree(uccs);
175 return -ENOMEM; 175 return -ENOMEM;
176 } 176 }
177 177
178 uccs->saved_uccm = 0; 178 uccs->saved_uccm = 0;
179 uccs->p_rx_frame = 0; 179 uccs->p_rx_frame = 0;
180 us_regs = uccs->us_regs; 180 us_regs = uccs->us_regs;
181 uccs->p_ucce = (u16 *) & (us_regs->ucce); 181 uccs->p_ucce = (u16 *) & (us_regs->ucce);
182 uccs->p_uccm = (u16 *) & (us_regs->uccm); 182 uccs->p_uccm = (u16 *) & (us_regs->uccm);
183 #ifdef STATISTICS 183 #ifdef STATISTICS
184 uccs->rx_frames = 0; 184 uccs->rx_frames = 0;
185 uccs->tx_frames = 0; 185 uccs->tx_frames = 0;
186 uccs->rx_discarded = 0; 186 uccs->rx_discarded = 0;
187 #endif /* STATISTICS */ 187 #endif /* STATISTICS */
188 188
189 /* Get PRAM base */ 189 /* Get PRAM base */
190 uccs->us_pram_offset = 190 uccs->us_pram_offset =
191 qe_muram_alloc(UCC_SLOW_PRAM_SIZE, ALIGNMENT_OF_UCC_SLOW_PRAM); 191 qe_muram_alloc(UCC_SLOW_PRAM_SIZE, ALIGNMENT_OF_UCC_SLOW_PRAM);
192 if (IS_ERR_VALUE(uccs->us_pram_offset)) { 192 if (IS_ERR_VALUE(uccs->us_pram_offset)) {
193 printk(KERN_ERR "%s: cannot allocate MURAM for PRAM", __func__); 193 printk(KERN_ERR "%s: cannot allocate MURAM for PRAM", __func__);
194 ucc_slow_free(uccs); 194 ucc_slow_free(uccs);
195 return -ENOMEM; 195 return -ENOMEM;
196 } 196 }
197 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); 197 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
198 qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, id, us_info->protocol, 198 qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, id, us_info->protocol,
199 uccs->us_pram_offset); 199 uccs->us_pram_offset);
200 200
201 uccs->us_pram = qe_muram_addr(uccs->us_pram_offset); 201 uccs->us_pram = qe_muram_addr(uccs->us_pram_offset);
202 202
203 /* Set UCC to slow type */ 203 /* Set UCC to slow type */
204 ret = ucc_set_type(us_info->ucc_num, UCC_SPEED_TYPE_SLOW); 204 ret = ucc_set_type(us_info->ucc_num, UCC_SPEED_TYPE_SLOW);
205 if (ret) { 205 if (ret) {
206 printk(KERN_ERR "%s: cannot set UCC type", __func__); 206 printk(KERN_ERR "%s: cannot set UCC type", __func__);
207 ucc_slow_free(uccs); 207 ucc_slow_free(uccs);
208 return ret; 208 return ret;
209 } 209 }
210 210
211 out_be16(&uccs->us_pram->mrblr, us_info->max_rx_buf_length); 211 out_be16(&uccs->us_pram->mrblr, us_info->max_rx_buf_length);
212 212
213 INIT_LIST_HEAD(&uccs->confQ); 213 INIT_LIST_HEAD(&uccs->confQ);
214 214
215 /* Allocate BDs. */ 215 /* Allocate BDs. */
216 uccs->rx_base_offset = 216 uccs->rx_base_offset =
217 qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd), 217 qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd),
218 QE_ALIGNMENT_OF_BD); 218 QE_ALIGNMENT_OF_BD);
219 if (IS_ERR_VALUE(uccs->rx_base_offset)) { 219 if (IS_ERR_VALUE(uccs->rx_base_offset)) {
220 printk(KERN_ERR "%s: cannot allocate %u RX BDs\n", __func__, 220 printk(KERN_ERR "%s: cannot allocate %u RX BDs\n", __func__,
221 us_info->rx_bd_ring_len); 221 us_info->rx_bd_ring_len);
222 uccs->rx_base_offset = 0; 222 uccs->rx_base_offset = 0;
223 ucc_slow_free(uccs); 223 ucc_slow_free(uccs);
224 return -ENOMEM; 224 return -ENOMEM;
225 } 225 }
226 226
227 uccs->tx_base_offset = 227 uccs->tx_base_offset =
228 qe_muram_alloc(us_info->tx_bd_ring_len * sizeof(struct qe_bd), 228 qe_muram_alloc(us_info->tx_bd_ring_len * sizeof(struct qe_bd),
229 QE_ALIGNMENT_OF_BD); 229 QE_ALIGNMENT_OF_BD);
230 if (IS_ERR_VALUE(uccs->tx_base_offset)) { 230 if (IS_ERR_VALUE(uccs->tx_base_offset)) {
231 printk(KERN_ERR "%s: cannot allocate TX BDs", __func__); 231 printk(KERN_ERR "%s: cannot allocate TX BDs", __func__);
232 uccs->tx_base_offset = 0; 232 uccs->tx_base_offset = 0;
233 ucc_slow_free(uccs); 233 ucc_slow_free(uccs);
234 return -ENOMEM; 234 return -ENOMEM;
235 } 235 }
236 236
237 /* Init Tx bds */ 237 /* Init Tx bds */
238 bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset); 238 bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset);
239 for (i = 0; i < us_info->tx_bd_ring_len - 1; i++) { 239 for (i = 0; i < us_info->tx_bd_ring_len - 1; i++) {
240 /* clear bd buffer */ 240 /* clear bd buffer */
241 out_be32(&bd->buf, 0); 241 out_be32(&bd->buf, 0);
242 /* set bd status and length */ 242 /* set bd status and length */
243 out_be32((u32 *) bd, 0); 243 out_be32((u32 *) bd, 0);
244 bd++; 244 bd++;
245 } 245 }
246 /* for last BD set Wrap bit */ 246 /* for last BD set Wrap bit */
247 out_be32(&bd->buf, 0); 247 out_be32(&bd->buf, 0);
248 out_be32((u32 *) bd, cpu_to_be32(T_W)); 248 out_be32((u32 *) bd, cpu_to_be32(T_W));
249 249
250 /* Init Rx bds */ 250 /* Init Rx bds */
251 bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset); 251 bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset);
252 for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) { 252 for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) {
253 /* set bd status and length */ 253 /* set bd status and length */
254 out_be32((u32*)bd, 0); 254 out_be32((u32*)bd, 0);
255 /* clear bd buffer */ 255 /* clear bd buffer */
256 out_be32(&bd->buf, 0); 256 out_be32(&bd->buf, 0);
257 bd++; 257 bd++;
258 } 258 }
259 /* for last BD set Wrap bit */ 259 /* for last BD set Wrap bit */
260 out_be32((u32*)bd, cpu_to_be32(R_W)); 260 out_be32((u32*)bd, cpu_to_be32(R_W));
261 out_be32(&bd->buf, 0); 261 out_be32(&bd->buf, 0);
262 262
263 /* Set GUMR (For more details see the hardware spec.). */ 263 /* Set GUMR (For more details see the hardware spec.). */
264 /* gumr_h */ 264 /* gumr_h */
265 gumr = us_info->tcrc; 265 gumr = us_info->tcrc;
266 if (us_info->cdp) 266 if (us_info->cdp)
267 gumr |= UCC_SLOW_GUMR_H_CDP; 267 gumr |= UCC_SLOW_GUMR_H_CDP;
268 if (us_info->ctsp) 268 if (us_info->ctsp)
269 gumr |= UCC_SLOW_GUMR_H_CTSP; 269 gumr |= UCC_SLOW_GUMR_H_CTSP;
270 if (us_info->cds) 270 if (us_info->cds)
271 gumr |= UCC_SLOW_GUMR_H_CDS; 271 gumr |= UCC_SLOW_GUMR_H_CDS;
272 if (us_info->ctss) 272 if (us_info->ctss)
273 gumr |= UCC_SLOW_GUMR_H_CTSS; 273 gumr |= UCC_SLOW_GUMR_H_CTSS;
274 if (us_info->tfl) 274 if (us_info->tfl)
275 gumr |= UCC_SLOW_GUMR_H_TFL; 275 gumr |= UCC_SLOW_GUMR_H_TFL;
276 if (us_info->rfw) 276 if (us_info->rfw)
277 gumr |= UCC_SLOW_GUMR_H_RFW; 277 gumr |= UCC_SLOW_GUMR_H_RFW;
278 if (us_info->txsy) 278 if (us_info->txsy)
279 gumr |= UCC_SLOW_GUMR_H_TXSY; 279 gumr |= UCC_SLOW_GUMR_H_TXSY;
280 if (us_info->rtsm) 280 if (us_info->rtsm)
281 gumr |= UCC_SLOW_GUMR_H_RTSM; 281 gumr |= UCC_SLOW_GUMR_H_RTSM;
282 out_be32(&us_regs->gumr_h, gumr); 282 out_be32(&us_regs->gumr_h, gumr);
283 283
284 /* gumr_l */ 284 /* gumr_l */
285 gumr = us_info->tdcr | us_info->rdcr | us_info->tenc | us_info->renc | 285 gumr = us_info->tdcr | us_info->rdcr | us_info->tenc | us_info->renc |
286 us_info->diag | us_info->mode; 286 us_info->diag | us_info->mode;
287 if (us_info->tci) 287 if (us_info->tci)
288 gumr |= UCC_SLOW_GUMR_L_TCI; 288 gumr |= UCC_SLOW_GUMR_L_TCI;
289 if (us_info->rinv) 289 if (us_info->rinv)
290 gumr |= UCC_SLOW_GUMR_L_RINV; 290 gumr |= UCC_SLOW_GUMR_L_RINV;
291 if (us_info->tinv) 291 if (us_info->tinv)
292 gumr |= UCC_SLOW_GUMR_L_TINV; 292 gumr |= UCC_SLOW_GUMR_L_TINV;
293 if (us_info->tend) 293 if (us_info->tend)
294 gumr |= UCC_SLOW_GUMR_L_TEND; 294 gumr |= UCC_SLOW_GUMR_L_TEND;
295 out_be32(&us_regs->gumr_l, gumr); 295 out_be32(&us_regs->gumr_l, gumr);
296 296
297 /* Function code registers */ 297 /* Function code registers */
298 298
299 /* if the data is in cachable memory, the 'global' */ 299 /* if the data is in cachable memory, the 'global' */
300 /* in the function code should be set. */ 300 /* in the function code should be set. */
301 uccs->us_pram->tbmr = UCC_BMR_BO_BE; 301 uccs->us_pram->tbmr = UCC_BMR_BO_BE;
302 uccs->us_pram->rbmr = UCC_BMR_BO_BE; 302 uccs->us_pram->rbmr = UCC_BMR_BO_BE;
303 303
304 /* rbase, tbase are offsets from MURAM base */ 304 /* rbase, tbase are offsets from MURAM base */
305 out_be16(&uccs->us_pram->rbase, uccs->rx_base_offset); 305 out_be16(&uccs->us_pram->rbase, uccs->rx_base_offset);
306 out_be16(&uccs->us_pram->tbase, uccs->tx_base_offset); 306 out_be16(&uccs->us_pram->tbase, uccs->tx_base_offset);
307 307
308 /* Mux clocking */ 308 /* Mux clocking */
309 /* Grant Support */ 309 /* Grant Support */
310 ucc_set_qe_mux_grant(us_info->ucc_num, us_info->grant_support); 310 ucc_set_qe_mux_grant(us_info->ucc_num, us_info->grant_support);
311 /* Breakpoint Support */ 311 /* Breakpoint Support */
312 ucc_set_qe_mux_bkpt(us_info->ucc_num, us_info->brkpt_support); 312 ucc_set_qe_mux_bkpt(us_info->ucc_num, us_info->brkpt_support);
313 /* Set Tsa or NMSI mode. */ 313 /* Set Tsa or NMSI mode. */
314 ucc_set_qe_mux_tsa(us_info->ucc_num, us_info->tsa); 314 ucc_set_qe_mux_tsa(us_info->ucc_num, us_info->tsa);
315 /* If NMSI (not Tsa), set Tx and Rx clock. */ 315 /* If NMSI (not Tsa), set Tx and Rx clock. */
316 if (!us_info->tsa) { 316 if (!us_info->tsa) {
317 /* Rx clock routing */ 317 /* Rx clock routing */
318 if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->rx_clock, 318 if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->rx_clock,
319 COMM_DIR_RX)) { 319 COMM_DIR_RX)) {
320 printk(KERN_ERR "%s: illegal value for RX clock\n", 320 printk(KERN_ERR "%s: illegal value for RX clock\n",
321 __func__); 321 __func__);
322 ucc_slow_free(uccs); 322 ucc_slow_free(uccs);
323 return -EINVAL; 323 return -EINVAL;
324 } 324 }
325 /* Tx clock routing */ 325 /* Tx clock routing */
326 if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->tx_clock, 326 if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->tx_clock,
327 COMM_DIR_TX)) { 327 COMM_DIR_TX)) {
328 printk(KERN_ERR "%s: illegal value for TX clock\n", 328 printk(KERN_ERR "%s: illegal value for TX clock\n",
329 __func__); 329 __func__);
330 ucc_slow_free(uccs); 330 ucc_slow_free(uccs);
331 return -EINVAL; 331 return -EINVAL;
332 } 332 }
333 } 333 }
334 334
335 /* Set interrupt mask register at UCC level. */ 335 /* Set interrupt mask register at UCC level. */
336 out_be16(&us_regs->uccm, us_info->uccm_mask); 336 out_be16(&us_regs->uccm, us_info->uccm_mask);
337 337
338 /* First, clear anything pending at UCC level, 338 /* First, clear anything pending at UCC level,
339 * otherwise, old garbage may come through 339 * otherwise, old garbage may come through
340 * as soon as the dam is opened. */ 340 * as soon as the dam is opened. */
341 341
342 /* Writing '1' clears */ 342 /* Writing '1' clears */
343 out_be16(&us_regs->ucce, 0xffff); 343 out_be16(&us_regs->ucce, 0xffff);
344 344
345 /* Issue QE Init command */ 345 /* Issue QE Init command */
346 if (us_info->init_tx && us_info->init_rx) 346 if (us_info->init_tx && us_info->init_rx)
347 command = QE_INIT_TX_RX; 347 command = QE_INIT_TX_RX;
348 else if (us_info->init_tx) 348 else if (us_info->init_tx)
349 command = QE_INIT_TX; 349 command = QE_INIT_TX;
350 else 350 else
351 command = QE_INIT_RX; /* We know at least one is TRUE */ 351 command = QE_INIT_RX; /* We know at least one is TRUE */
352 352
353 qe_issue_cmd(command, id, us_info->protocol, 0); 353 qe_issue_cmd(command, id, us_info->protocol, 0);
354 354
355 *uccs_ret = uccs; 355 *uccs_ret = uccs;
356 return 0; 356 return 0;
357 } 357 }
358 EXPORT_SYMBOL(ucc_slow_init); 358 EXPORT_SYMBOL(ucc_slow_init);
359 359
360 void ucc_slow_free(struct ucc_slow_private * uccs) 360 void ucc_slow_free(struct ucc_slow_private * uccs)
361 { 361 {
362 if (!uccs) 362 if (!uccs)
363 return; 363 return;
364 364
365 if (uccs->rx_base_offset) 365 if (uccs->rx_base_offset)
366 qe_muram_free(uccs->rx_base_offset); 366 qe_muram_free(uccs->rx_base_offset);
367 367
368 if (uccs->tx_base_offset) 368 if (uccs->tx_base_offset)
369 qe_muram_free(uccs->tx_base_offset); 369 qe_muram_free(uccs->tx_base_offset);
370 370
371 if (uccs->us_pram) 371 if (uccs->us_pram)
372 qe_muram_free(uccs->us_pram_offset); 372 qe_muram_free(uccs->us_pram_offset);
373 373
374 if (uccs->us_regs) 374 if (uccs->us_regs)
375 iounmap(uccs->us_regs); 375 iounmap(uccs->us_regs);
376 376
377 kfree(uccs); 377 kfree(uccs);
378 } 378 }
379 EXPORT_SYMBOL(ucc_slow_free); 379 EXPORT_SYMBOL(ucc_slow_free);
380 380
381 381
arch/powerpc/sysdev/tsi108_dev.c
1 /* 1 /*
2 * tsi108/109 device setup code 2 * tsi108/109 device setup code
3 * 3 *
4 * Maintained by Roy Zang < tie-fei.zang@freescale.com > 4 * Maintained by Roy Zang < tie-fei.zang@freescale.com >
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the 7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your 8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version. 9 * option) any later version.
10 */ 10 */
11 11
12 #include <linux/stddef.h> 12 #include <linux/stddef.h>
13 #include <linux/kernel.h> 13 #include <linux/kernel.h>
14 #include <linux/init.h> 14 #include <linux/init.h>
15 #include <linux/errno.h> 15 #include <linux/errno.h>
16 #include <linux/major.h> 16 #include <linux/major.h>
17 #include <linux/delay.h> 17 #include <linux/delay.h>
18 #include <linux/irq.h> 18 #include <linux/irq.h>
19 #include <linux/module.h> 19 #include <linux/export.h>
20 #include <linux/device.h> 20 #include <linux/device.h>
21 #include <linux/platform_device.h> 21 #include <linux/platform_device.h>
22 #include <linux/of_net.h> 22 #include <linux/of_net.h>
23 #include <asm/tsi108.h> 23 #include <asm/tsi108.h>
24 24
25 #include <asm/system.h> 25 #include <asm/system.h>
26 #include <linux/atomic.h> 26 #include <linux/atomic.h>
27 #include <asm/io.h> 27 #include <asm/io.h>
28 #include <asm/irq.h> 28 #include <asm/irq.h>
29 #include <asm/prom.h> 29 #include <asm/prom.h>
30 #include <mm/mmu_decl.h> 30 #include <mm/mmu_decl.h>
31 31
32 #undef DEBUG 32 #undef DEBUG
33 33
34 #ifdef DEBUG 34 #ifdef DEBUG
35 #define DBG(fmt...) do { printk(fmt); } while(0) 35 #define DBG(fmt...) do { printk(fmt); } while(0)
36 #else 36 #else
37 #define DBG(fmt...) do { } while(0) 37 #define DBG(fmt...) do { } while(0)
38 #endif 38 #endif
39 39
40 static phys_addr_t tsi108_csr_base = -1; 40 static phys_addr_t tsi108_csr_base = -1;
41 41
42 phys_addr_t get_csrbase(void) 42 phys_addr_t get_csrbase(void)
43 { 43 {
44 struct device_node *tsi; 44 struct device_node *tsi;
45 45
46 if (tsi108_csr_base != -1) 46 if (tsi108_csr_base != -1)
47 return tsi108_csr_base; 47 return tsi108_csr_base;
48 48
49 tsi = of_find_node_by_type(NULL, "tsi-bridge"); 49 tsi = of_find_node_by_type(NULL, "tsi-bridge");
50 if (tsi) { 50 if (tsi) {
51 unsigned int size; 51 unsigned int size;
52 const void *prop = of_get_property(tsi, "reg", &size); 52 const void *prop = of_get_property(tsi, "reg", &size);
53 tsi108_csr_base = of_translate_address(tsi, prop); 53 tsi108_csr_base = of_translate_address(tsi, prop);
54 of_node_put(tsi); 54 of_node_put(tsi);
55 }; 55 };
56 return tsi108_csr_base; 56 return tsi108_csr_base;
57 } 57 }
58 58
59 u32 get_vir_csrbase(void) 59 u32 get_vir_csrbase(void)
60 { 60 {
61 return (u32) (ioremap(get_csrbase(), 0x10000)); 61 return (u32) (ioremap(get_csrbase(), 0x10000));
62 } 62 }
63 63
64 EXPORT_SYMBOL(get_csrbase); 64 EXPORT_SYMBOL(get_csrbase);
65 EXPORT_SYMBOL(get_vir_csrbase); 65 EXPORT_SYMBOL(get_vir_csrbase);
66 66
67 static int __init tsi108_eth_of_init(void) 67 static int __init tsi108_eth_of_init(void)
68 { 68 {
69 struct device_node *np; 69 struct device_node *np;
70 unsigned int i = 0; 70 unsigned int i = 0;
71 struct platform_device *tsi_eth_dev; 71 struct platform_device *tsi_eth_dev;
72 struct resource res; 72 struct resource res;
73 int ret; 73 int ret;
74 74
75 for_each_compatible_node(np, "network", "tsi108-ethernet") { 75 for_each_compatible_node(np, "network", "tsi108-ethernet") {
76 struct resource r[2]; 76 struct resource r[2];
77 struct device_node *phy, *mdio; 77 struct device_node *phy, *mdio;
78 hw_info tsi_eth_data; 78 hw_info tsi_eth_data;
79 const unsigned int *phy_id; 79 const unsigned int *phy_id;
80 const void *mac_addr; 80 const void *mac_addr;
81 const phandle *ph; 81 const phandle *ph;
82 82
83 memset(r, 0, sizeof(r)); 83 memset(r, 0, sizeof(r));
84 memset(&tsi_eth_data, 0, sizeof(tsi_eth_data)); 84 memset(&tsi_eth_data, 0, sizeof(tsi_eth_data));
85 85
86 ret = of_address_to_resource(np, 0, &r[0]); 86 ret = of_address_to_resource(np, 0, &r[0]);
87 DBG("%s: name:start->end = %s:%pR\n", 87 DBG("%s: name:start->end = %s:%pR\n",
88 __func__, r[0].name, &r[0]); 88 __func__, r[0].name, &r[0]);
89 if (ret) 89 if (ret)
90 goto err; 90 goto err;
91 91
92 r[1].name = "tx"; 92 r[1].name = "tx";
93 r[1].start = irq_of_parse_and_map(np, 0); 93 r[1].start = irq_of_parse_and_map(np, 0);
94 r[1].end = irq_of_parse_and_map(np, 0); 94 r[1].end = irq_of_parse_and_map(np, 0);
95 r[1].flags = IORESOURCE_IRQ; 95 r[1].flags = IORESOURCE_IRQ;
96 DBG("%s: name:start->end = %s:%pR\n", 96 DBG("%s: name:start->end = %s:%pR\n",
97 __func__, r[1].name, &r[1]); 97 __func__, r[1].name, &r[1]);
98 98
99 tsi_eth_dev = 99 tsi_eth_dev =
100 platform_device_register_simple("tsi-ethernet", i++, &r[0], 100 platform_device_register_simple("tsi-ethernet", i++, &r[0],
101 1); 101 1);
102 102
103 if (IS_ERR(tsi_eth_dev)) { 103 if (IS_ERR(tsi_eth_dev)) {
104 ret = PTR_ERR(tsi_eth_dev); 104 ret = PTR_ERR(tsi_eth_dev);
105 goto err; 105 goto err;
106 } 106 }
107 107
108 mac_addr = of_get_mac_address(np); 108 mac_addr = of_get_mac_address(np);
109 if (mac_addr) 109 if (mac_addr)
110 memcpy(tsi_eth_data.mac_addr, mac_addr, 6); 110 memcpy(tsi_eth_data.mac_addr, mac_addr, 6);
111 111
112 ph = of_get_property(np, "mdio-handle", NULL); 112 ph = of_get_property(np, "mdio-handle", NULL);
113 mdio = of_find_node_by_phandle(*ph); 113 mdio = of_find_node_by_phandle(*ph);
114 ret = of_address_to_resource(mdio, 0, &res); 114 ret = of_address_to_resource(mdio, 0, &res);
115 of_node_put(mdio); 115 of_node_put(mdio);
116 if (ret) 116 if (ret)
117 goto unreg; 117 goto unreg;
118 118
119 ph = of_get_property(np, "phy-handle", NULL); 119 ph = of_get_property(np, "phy-handle", NULL);
120 phy = of_find_node_by_phandle(*ph); 120 phy = of_find_node_by_phandle(*ph);
121 121
122 if (phy == NULL) { 122 if (phy == NULL) {
123 ret = -ENODEV; 123 ret = -ENODEV;
124 goto unreg; 124 goto unreg;
125 } 125 }
126 126
127 phy_id = of_get_property(phy, "reg", NULL); 127 phy_id = of_get_property(phy, "reg", NULL);
128 128
129 tsi_eth_data.regs = r[0].start; 129 tsi_eth_data.regs = r[0].start;
130 tsi_eth_data.phyregs = res.start; 130 tsi_eth_data.phyregs = res.start;
131 tsi_eth_data.phy = *phy_id; 131 tsi_eth_data.phy = *phy_id;
132 tsi_eth_data.irq_num = irq_of_parse_and_map(np, 0); 132 tsi_eth_data.irq_num = irq_of_parse_and_map(np, 0);
133 133
134 /* Some boards with the TSI108 bridge (e.g. Holly) 134 /* Some boards with the TSI108 bridge (e.g. Holly)
135 * have a miswiring of the ethernet PHYs which 135 * have a miswiring of the ethernet PHYs which
136 * requires a workaround. The special 136 * requires a workaround. The special
137 * "txc-rxc-delay-disable" property enables this 137 * "txc-rxc-delay-disable" property enables this
138 * workaround. FIXME: Need to port the tsi108_eth 138 * workaround. FIXME: Need to port the tsi108_eth
139 * driver itself to phylib and use a non-misleading 139 * driver itself to phylib and use a non-misleading
140 * name for the workaround flag - it's not actually to 140 * name for the workaround flag - it's not actually to
141 * do with the model of PHY in use */ 141 * do with the model of PHY in use */
142 if (of_get_property(phy, "txc-rxc-delay-disable", NULL)) 142 if (of_get_property(phy, "txc-rxc-delay-disable", NULL))
143 tsi_eth_data.phy_type = TSI108_PHY_BCM54XX; 143 tsi_eth_data.phy_type = TSI108_PHY_BCM54XX;
144 of_node_put(phy); 144 of_node_put(phy);
145 145
146 ret = 146 ret =
147 platform_device_add_data(tsi_eth_dev, &tsi_eth_data, 147 platform_device_add_data(tsi_eth_dev, &tsi_eth_data,
148 sizeof(hw_info)); 148 sizeof(hw_info));
149 if (ret) 149 if (ret)
150 goto unreg; 150 goto unreg;
151 } 151 }
152 return 0; 152 return 0;
153 unreg: 153 unreg:
154 platform_device_unregister(tsi_eth_dev); 154 platform_device_unregister(tsi_eth_dev);
155 err: 155 err:
156 of_node_put(np); 156 of_node_put(np);
157 return ret; 157 return ret;
158 } 158 }
159 159
160 arch_initcall(tsi108_eth_of_init); 160 arch_initcall(tsi108_eth_of_init);
161 161
arch/powerpc/xmon/xmon.c
1 /* 1 /*
2 * Routines providing a simple monitor for use on the PowerMac. 2 * Routines providing a simple monitor for use on the PowerMac.
3 * 3 *
4 * Copyright (C) 1996-2005 Paul Mackerras. 4 * Copyright (C) 1996-2005 Paul Mackerras.
5 * Copyright (C) 2001 PPC64 Team, IBM Corp 5 * Copyright (C) 2001 PPC64 Team, IBM Corp
6 * Copyrignt (C) 2006 Michael Ellerman, IBM Corp 6 * Copyrignt (C) 2006 Michael Ellerman, IBM Corp
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version. 11 * 2 of the License, or (at your option) any later version.
12 */ 12 */
13 #include <linux/errno.h> 13 #include <linux/errno.h>
14 #include <linux/sched.h> 14 #include <linux/sched.h>
15 #include <linux/smp.h> 15 #include <linux/smp.h>
16 #include <linux/mm.h> 16 #include <linux/mm.h>
17 #include <linux/reboot.h> 17 #include <linux/reboot.h>
18 #include <linux/delay.h> 18 #include <linux/delay.h>
19 #include <linux/kallsyms.h> 19 #include <linux/kallsyms.h>
20 #include <linux/cpumask.h> 20 #include <linux/cpumask.h>
21 #include <linux/module.h> 21 #include <linux/export.h>
22 #include <linux/sysrq.h> 22 #include <linux/sysrq.h>
23 #include <linux/interrupt.h> 23 #include <linux/interrupt.h>
24 #include <linux/irq.h> 24 #include <linux/irq.h>
25 #include <linux/bug.h> 25 #include <linux/bug.h>
26 26
27 #include <asm/ptrace.h> 27 #include <asm/ptrace.h>
28 #include <asm/string.h> 28 #include <asm/string.h>
29 #include <asm/prom.h> 29 #include <asm/prom.h>
30 #include <asm/machdep.h> 30 #include <asm/machdep.h>
31 #include <asm/xmon.h> 31 #include <asm/xmon.h>
32 #include <asm/processor.h> 32 #include <asm/processor.h>
33 #include <asm/pgtable.h> 33 #include <asm/pgtable.h>
34 #include <asm/mmu.h> 34 #include <asm/mmu.h>
35 #include <asm/mmu_context.h> 35 #include <asm/mmu_context.h>
36 #include <asm/cputable.h> 36 #include <asm/cputable.h>
37 #include <asm/rtas.h> 37 #include <asm/rtas.h>
38 #include <asm/sstep.h> 38 #include <asm/sstep.h>
39 #include <asm/irq_regs.h> 39 #include <asm/irq_regs.h>
40 #include <asm/spu.h> 40 #include <asm/spu.h>
41 #include <asm/spu_priv1.h> 41 #include <asm/spu_priv1.h>
42 #include <asm/firmware.h> 42 #include <asm/firmware.h>
43 #include <asm/setjmp.h> 43 #include <asm/setjmp.h>
44 #include <asm/reg.h> 44 #include <asm/reg.h>
45 45
46 #ifdef CONFIG_PPC64 46 #ifdef CONFIG_PPC64
47 #include <asm/hvcall.h> 47 #include <asm/hvcall.h>
48 #include <asm/paca.h> 48 #include <asm/paca.h>
49 #endif 49 #endif
50 50
51 #include "nonstdio.h" 51 #include "nonstdio.h"
52 #include "dis-asm.h" 52 #include "dis-asm.h"
53 53
54 #define scanhex xmon_scanhex 54 #define scanhex xmon_scanhex
55 #define skipbl xmon_skipbl 55 #define skipbl xmon_skipbl
56 56
57 #ifdef CONFIG_SMP 57 #ifdef CONFIG_SMP
58 static cpumask_t cpus_in_xmon = CPU_MASK_NONE; 58 static cpumask_t cpus_in_xmon = CPU_MASK_NONE;
59 static unsigned long xmon_taken = 1; 59 static unsigned long xmon_taken = 1;
60 static int xmon_owner; 60 static int xmon_owner;
61 static int xmon_gate; 61 static int xmon_gate;
62 #endif /* CONFIG_SMP */ 62 #endif /* CONFIG_SMP */
63 63
64 static unsigned long in_xmon __read_mostly = 0; 64 static unsigned long in_xmon __read_mostly = 0;
65 65
66 static unsigned long adrs; 66 static unsigned long adrs;
67 static int size = 1; 67 static int size = 1;
68 #define MAX_DUMP (128 * 1024) 68 #define MAX_DUMP (128 * 1024)
69 static unsigned long ndump = 64; 69 static unsigned long ndump = 64;
70 static unsigned long nidump = 16; 70 static unsigned long nidump = 16;
71 static unsigned long ncsum = 4096; 71 static unsigned long ncsum = 4096;
72 static int termch; 72 static int termch;
73 static char tmpstr[128]; 73 static char tmpstr[128];
74 74
75 static long bus_error_jmp[JMP_BUF_LEN]; 75 static long bus_error_jmp[JMP_BUF_LEN];
76 static int catch_memory_errors; 76 static int catch_memory_errors;
77 static long *xmon_fault_jmp[NR_CPUS]; 77 static long *xmon_fault_jmp[NR_CPUS];
78 78
79 /* Breakpoint stuff */ 79 /* Breakpoint stuff */
80 struct bpt { 80 struct bpt {
81 unsigned long address; 81 unsigned long address;
82 unsigned int instr[2]; 82 unsigned int instr[2];
83 atomic_t ref_count; 83 atomic_t ref_count;
84 int enabled; 84 int enabled;
85 unsigned long pad; 85 unsigned long pad;
86 }; 86 };
87 87
88 /* Bits in bpt.enabled */ 88 /* Bits in bpt.enabled */
89 #define BP_IABR_TE 1 /* IABR translation enabled */ 89 #define BP_IABR_TE 1 /* IABR translation enabled */
90 #define BP_IABR 2 90 #define BP_IABR 2
91 #define BP_TRAP 8 91 #define BP_TRAP 8
92 #define BP_DABR 0x10 92 #define BP_DABR 0x10
93 93
94 #define NBPTS 256 94 #define NBPTS 256
95 static struct bpt bpts[NBPTS]; 95 static struct bpt bpts[NBPTS];
96 static struct bpt dabr; 96 static struct bpt dabr;
97 static struct bpt *iabr; 97 static struct bpt *iabr;
98 static unsigned bpinstr = 0x7fe00008; /* trap */ 98 static unsigned bpinstr = 0x7fe00008; /* trap */
99 99
100 #define BP_NUM(bp) ((bp) - bpts + 1) 100 #define BP_NUM(bp) ((bp) - bpts + 1)
101 101
102 /* Prototypes */ 102 /* Prototypes */
103 static int cmds(struct pt_regs *); 103 static int cmds(struct pt_regs *);
104 static int mread(unsigned long, void *, int); 104 static int mread(unsigned long, void *, int);
105 static int mwrite(unsigned long, void *, int); 105 static int mwrite(unsigned long, void *, int);
106 static int handle_fault(struct pt_regs *); 106 static int handle_fault(struct pt_regs *);
107 static void byterev(unsigned char *, int); 107 static void byterev(unsigned char *, int);
108 static void memex(void); 108 static void memex(void);
109 static int bsesc(void); 109 static int bsesc(void);
110 static void dump(void); 110 static void dump(void);
111 static void prdump(unsigned long, long); 111 static void prdump(unsigned long, long);
112 static int ppc_inst_dump(unsigned long, long, int); 112 static int ppc_inst_dump(unsigned long, long, int);
113 static void dump_log_buf(void); 113 static void dump_log_buf(void);
114 static void backtrace(struct pt_regs *); 114 static void backtrace(struct pt_regs *);
115 static void excprint(struct pt_regs *); 115 static void excprint(struct pt_regs *);
116 static void prregs(struct pt_regs *); 116 static void prregs(struct pt_regs *);
117 static void memops(int); 117 static void memops(int);
118 static void memlocate(void); 118 static void memlocate(void);
119 static void memzcan(void); 119 static void memzcan(void);
120 static void memdiffs(unsigned char *, unsigned char *, unsigned, unsigned); 120 static void memdiffs(unsigned char *, unsigned char *, unsigned, unsigned);
121 int skipbl(void); 121 int skipbl(void);
122 int scanhex(unsigned long *valp); 122 int scanhex(unsigned long *valp);
123 static void scannl(void); 123 static void scannl(void);
124 static int hexdigit(int); 124 static int hexdigit(int);
125 void getstring(char *, int); 125 void getstring(char *, int);
126 static void flush_input(void); 126 static void flush_input(void);
127 static int inchar(void); 127 static int inchar(void);
128 static void take_input(char *); 128 static void take_input(char *);
129 static unsigned long read_spr(int); 129 static unsigned long read_spr(int);
130 static void write_spr(int, unsigned long); 130 static void write_spr(int, unsigned long);
131 static void super_regs(void); 131 static void super_regs(void);
132 static void remove_bpts(void); 132 static void remove_bpts(void);
133 static void insert_bpts(void); 133 static void insert_bpts(void);
134 static void remove_cpu_bpts(void); 134 static void remove_cpu_bpts(void);
135 static void insert_cpu_bpts(void); 135 static void insert_cpu_bpts(void);
136 static struct bpt *at_breakpoint(unsigned long pc); 136 static struct bpt *at_breakpoint(unsigned long pc);
137 static struct bpt *in_breakpoint_table(unsigned long pc, unsigned long *offp); 137 static struct bpt *in_breakpoint_table(unsigned long pc, unsigned long *offp);
138 static int do_step(struct pt_regs *); 138 static int do_step(struct pt_regs *);
139 static void bpt_cmds(void); 139 static void bpt_cmds(void);
140 static void cacheflush(void); 140 static void cacheflush(void);
141 static int cpu_cmd(void); 141 static int cpu_cmd(void);
142 static void csum(void); 142 static void csum(void);
143 static void bootcmds(void); 143 static void bootcmds(void);
144 static void proccall(void); 144 static void proccall(void);
145 void dump_segments(void); 145 void dump_segments(void);
146 static void symbol_lookup(void); 146 static void symbol_lookup(void);
147 static void xmon_show_stack(unsigned long sp, unsigned long lr, 147 static void xmon_show_stack(unsigned long sp, unsigned long lr,
148 unsigned long pc); 148 unsigned long pc);
149 static void xmon_print_symbol(unsigned long address, const char *mid, 149 static void xmon_print_symbol(unsigned long address, const char *mid,
150 const char *after); 150 const char *after);
151 static const char *getvecname(unsigned long vec); 151 static const char *getvecname(unsigned long vec);
152 152
153 static int do_spu_cmd(void); 153 static int do_spu_cmd(void);
154 154
155 #ifdef CONFIG_44x 155 #ifdef CONFIG_44x
156 static void dump_tlb_44x(void); 156 static void dump_tlb_44x(void);
157 #endif 157 #endif
158 #ifdef CONFIG_PPC_BOOK3E 158 #ifdef CONFIG_PPC_BOOK3E
159 static void dump_tlb_book3e(void); 159 static void dump_tlb_book3e(void);
160 #endif 160 #endif
161 161
162 static int xmon_no_auto_backtrace; 162 static int xmon_no_auto_backtrace;
163 163
164 extern void xmon_enter(void); 164 extern void xmon_enter(void);
165 extern void xmon_leave(void); 165 extern void xmon_leave(void);
166 166
167 #ifdef CONFIG_PPC64 167 #ifdef CONFIG_PPC64
168 #define REG "%.16lx" 168 #define REG "%.16lx"
169 #define REGS_PER_LINE 4 169 #define REGS_PER_LINE 4
170 #define LAST_VOLATILE 13 170 #define LAST_VOLATILE 13
171 #else 171 #else
172 #define REG "%.8lx" 172 #define REG "%.8lx"
173 #define REGS_PER_LINE 8 173 #define REGS_PER_LINE 8
174 #define LAST_VOLATILE 12 174 #define LAST_VOLATILE 12
175 #endif 175 #endif
176 176
177 #define GETWORD(v) (((v)[0] << 24) + ((v)[1] << 16) + ((v)[2] << 8) + (v)[3]) 177 #define GETWORD(v) (((v)[0] << 24) + ((v)[1] << 16) + ((v)[2] << 8) + (v)[3])
178 178
179 #define isxdigit(c) (('0' <= (c) && (c) <= '9') \ 179 #define isxdigit(c) (('0' <= (c) && (c) <= '9') \
180 || ('a' <= (c) && (c) <= 'f') \ 180 || ('a' <= (c) && (c) <= 'f') \
181 || ('A' <= (c) && (c) <= 'F')) 181 || ('A' <= (c) && (c) <= 'F'))
182 #define isalnum(c) (('0' <= (c) && (c) <= '9') \ 182 #define isalnum(c) (('0' <= (c) && (c) <= '9') \
183 || ('a' <= (c) && (c) <= 'z') \ 183 || ('a' <= (c) && (c) <= 'z') \
184 || ('A' <= (c) && (c) <= 'Z')) 184 || ('A' <= (c) && (c) <= 'Z'))
185 #define isspace(c) (c == ' ' || c == '\t' || c == 10 || c == 13 || c == 0) 185 #define isspace(c) (c == ' ' || c == '\t' || c == 10 || c == 13 || c == 0)
186 186
187 static char *help_string = "\ 187 static char *help_string = "\
188 Commands:\n\ 188 Commands:\n\
189 b show breakpoints\n\ 189 b show breakpoints\n\
190 bd set data breakpoint\n\ 190 bd set data breakpoint\n\
191 bi set instruction breakpoint\n\ 191 bi set instruction breakpoint\n\
192 bc clear breakpoint\n" 192 bc clear breakpoint\n"
193 #ifdef CONFIG_SMP 193 #ifdef CONFIG_SMP
194 "\ 194 "\
195 c print cpus stopped in xmon\n\ 195 c print cpus stopped in xmon\n\
196 c# try to switch to cpu number h (in hex)\n" 196 c# try to switch to cpu number h (in hex)\n"
197 #endif 197 #endif
198 "\ 198 "\
199 C checksum\n\ 199 C checksum\n\
200 d dump bytes\n\ 200 d dump bytes\n\
201 di dump instructions\n\ 201 di dump instructions\n\
202 df dump float values\n\ 202 df dump float values\n\
203 dd dump double values\n\ 203 dd dump double values\n\
204 dl dump the kernel log buffer\n\ 204 dl dump the kernel log buffer\n\
205 dr dump stream of raw bytes\n\ 205 dr dump stream of raw bytes\n\
206 e print exception information\n\ 206 e print exception information\n\
207 f flush cache\n\ 207 f flush cache\n\
208 la lookup symbol+offset of specified address\n\ 208 la lookup symbol+offset of specified address\n\
209 ls lookup address of specified symbol\n\ 209 ls lookup address of specified symbol\n\
210 m examine/change memory\n\ 210 m examine/change memory\n\
211 mm move a block of memory\n\ 211 mm move a block of memory\n\
212 ms set a block of memory\n\ 212 ms set a block of memory\n\
213 md compare two blocks of memory\n\ 213 md compare two blocks of memory\n\
214 ml locate a block of memory\n\ 214 ml locate a block of memory\n\
215 mz zero a block of memory\n\ 215 mz zero a block of memory\n\
216 mi show information about memory allocation\n\ 216 mi show information about memory allocation\n\
217 p call a procedure\n\ 217 p call a procedure\n\
218 r print registers\n\ 218 r print registers\n\
219 s single step\n" 219 s single step\n"
220 #ifdef CONFIG_SPU_BASE 220 #ifdef CONFIG_SPU_BASE
221 " ss stop execution on all spus\n\ 221 " ss stop execution on all spus\n\
222 sr restore execution on stopped spus\n\ 222 sr restore execution on stopped spus\n\
223 sf # dump spu fields for spu # (in hex)\n\ 223 sf # dump spu fields for spu # (in hex)\n\
224 sd # dump spu local store for spu # (in hex)\n\ 224 sd # dump spu local store for spu # (in hex)\n\
225 sdi # disassemble spu local store for spu # (in hex)\n" 225 sdi # disassemble spu local store for spu # (in hex)\n"
226 #endif 226 #endif
227 " S print special registers\n\ 227 " S print special registers\n\
228 t print backtrace\n\ 228 t print backtrace\n\
229 x exit monitor and recover\n\ 229 x exit monitor and recover\n\
230 X exit monitor and dont recover\n" 230 X exit monitor and dont recover\n"
231 #ifdef CONFIG_PPC64 231 #ifdef CONFIG_PPC64
232 " u dump segment table or SLB\n" 232 " u dump segment table or SLB\n"
233 #endif 233 #endif
234 #ifdef CONFIG_PPC_STD_MMU_32 234 #ifdef CONFIG_PPC_STD_MMU_32
235 " u dump segment registers\n" 235 " u dump segment registers\n"
236 #endif 236 #endif
237 #ifdef CONFIG_44x 237 #ifdef CONFIG_44x
238 " u dump TLB\n" 238 " u dump TLB\n"
239 #endif 239 #endif
240 " ? help\n" 240 " ? help\n"
241 " zr reboot\n\ 241 " zr reboot\n\
242 zh halt\n" 242 zh halt\n"
243 ; 243 ;
244 244
245 static struct pt_regs *xmon_regs; 245 static struct pt_regs *xmon_regs;
246 246
247 static inline void sync(void) 247 static inline void sync(void)
248 { 248 {
249 asm volatile("sync; isync"); 249 asm volatile("sync; isync");
250 } 250 }
251 251
252 static inline void store_inst(void *p) 252 static inline void store_inst(void *p)
253 { 253 {
254 asm volatile ("dcbst 0,%0; sync; icbi 0,%0; isync" : : "r" (p)); 254 asm volatile ("dcbst 0,%0; sync; icbi 0,%0; isync" : : "r" (p));
255 } 255 }
256 256
257 static inline void cflush(void *p) 257 static inline void cflush(void *p)
258 { 258 {
259 asm volatile ("dcbf 0,%0; icbi 0,%0" : : "r" (p)); 259 asm volatile ("dcbf 0,%0; icbi 0,%0" : : "r" (p));
260 } 260 }
261 261
262 static inline void cinval(void *p) 262 static inline void cinval(void *p)
263 { 263 {
264 asm volatile ("dcbi 0,%0; icbi 0,%0" : : "r" (p)); 264 asm volatile ("dcbi 0,%0; icbi 0,%0" : : "r" (p));
265 } 265 }
266 266
267 /* 267 /*
268 * Disable surveillance (the service processor watchdog function) 268 * Disable surveillance (the service processor watchdog function)
269 * while we are in xmon. 269 * while we are in xmon.
270 * XXX we should re-enable it when we leave. :) 270 * XXX we should re-enable it when we leave. :)
271 */ 271 */
272 #define SURVEILLANCE_TOKEN 9000 272 #define SURVEILLANCE_TOKEN 9000
273 273
274 static inline void disable_surveillance(void) 274 static inline void disable_surveillance(void)
275 { 275 {
276 #ifdef CONFIG_PPC_PSERIES 276 #ifdef CONFIG_PPC_PSERIES
277 /* Since this can't be a module, args should end up below 4GB. */ 277 /* Since this can't be a module, args should end up below 4GB. */
278 static struct rtas_args args; 278 static struct rtas_args args;
279 279
280 /* 280 /*
281 * At this point we have got all the cpus we can into 281 * At this point we have got all the cpus we can into
282 * xmon, so there is hopefully no other cpu calling RTAS 282 * xmon, so there is hopefully no other cpu calling RTAS
283 * at the moment, even though we don't take rtas.lock. 283 * at the moment, even though we don't take rtas.lock.
284 * If we did try to take rtas.lock there would be a 284 * If we did try to take rtas.lock there would be a
285 * real possibility of deadlock. 285 * real possibility of deadlock.
286 */ 286 */
287 args.token = rtas_token("set-indicator"); 287 args.token = rtas_token("set-indicator");
288 if (args.token == RTAS_UNKNOWN_SERVICE) 288 if (args.token == RTAS_UNKNOWN_SERVICE)
289 return; 289 return;
290 args.nargs = 3; 290 args.nargs = 3;
291 args.nret = 1; 291 args.nret = 1;
292 args.rets = &args.args[3]; 292 args.rets = &args.args[3];
293 args.args[0] = SURVEILLANCE_TOKEN; 293 args.args[0] = SURVEILLANCE_TOKEN;
294 args.args[1] = 0; 294 args.args[1] = 0;
295 args.args[2] = 0; 295 args.args[2] = 0;
296 enter_rtas(__pa(&args)); 296 enter_rtas(__pa(&args));
297 #endif /* CONFIG_PPC_PSERIES */ 297 #endif /* CONFIG_PPC_PSERIES */
298 } 298 }
299 299
300 #ifdef CONFIG_SMP 300 #ifdef CONFIG_SMP
301 static int xmon_speaker; 301 static int xmon_speaker;
302 302
303 static void get_output_lock(void) 303 static void get_output_lock(void)
304 { 304 {
305 int me = smp_processor_id() + 0x100; 305 int me = smp_processor_id() + 0x100;
306 int last_speaker = 0, prev; 306 int last_speaker = 0, prev;
307 long timeout; 307 long timeout;
308 308
309 if (xmon_speaker == me) 309 if (xmon_speaker == me)
310 return; 310 return;
311 for (;;) { 311 for (;;) {
312 if (xmon_speaker == 0) { 312 if (xmon_speaker == 0) {
313 last_speaker = cmpxchg(&xmon_speaker, 0, me); 313 last_speaker = cmpxchg(&xmon_speaker, 0, me);
314 if (last_speaker == 0) 314 if (last_speaker == 0)
315 return; 315 return;
316 } 316 }
317 timeout = 10000000; 317 timeout = 10000000;
318 while (xmon_speaker == last_speaker) { 318 while (xmon_speaker == last_speaker) {
319 if (--timeout > 0) 319 if (--timeout > 0)
320 continue; 320 continue;
321 /* hostile takeover */ 321 /* hostile takeover */
322 prev = cmpxchg(&xmon_speaker, last_speaker, me); 322 prev = cmpxchg(&xmon_speaker, last_speaker, me);
323 if (prev == last_speaker) 323 if (prev == last_speaker)
324 return; 324 return;
325 break; 325 break;
326 } 326 }
327 } 327 }
328 } 328 }
329 329
330 static void release_output_lock(void) 330 static void release_output_lock(void)
331 { 331 {
332 xmon_speaker = 0; 332 xmon_speaker = 0;
333 } 333 }
334 334
335 int cpus_are_in_xmon(void) 335 int cpus_are_in_xmon(void)
336 { 336 {
337 return !cpumask_empty(&cpus_in_xmon); 337 return !cpumask_empty(&cpus_in_xmon);
338 } 338 }
339 #endif 339 #endif
340 340
341 static inline int unrecoverable_excp(struct pt_regs *regs) 341 static inline int unrecoverable_excp(struct pt_regs *regs)
342 { 342 {
343 #ifdef CONFIG_4xx 343 #ifdef CONFIG_4xx
344 /* We have no MSR_RI bit on 4xx, so we simply return false */ 344 /* We have no MSR_RI bit on 4xx, so we simply return false */
345 return 0; 345 return 0;
346 #else 346 #else
347 return ((regs->msr & MSR_RI) == 0); 347 return ((regs->msr & MSR_RI) == 0);
348 #endif 348 #endif
349 } 349 }
350 350
351 static int xmon_core(struct pt_regs *regs, int fromipi) 351 static int xmon_core(struct pt_regs *regs, int fromipi)
352 { 352 {
353 int cmd = 0; 353 int cmd = 0;
354 struct bpt *bp; 354 struct bpt *bp;
355 long recurse_jmp[JMP_BUF_LEN]; 355 long recurse_jmp[JMP_BUF_LEN];
356 unsigned long offset; 356 unsigned long offset;
357 unsigned long flags; 357 unsigned long flags;
358 #ifdef CONFIG_SMP 358 #ifdef CONFIG_SMP
359 int cpu; 359 int cpu;
360 int secondary; 360 int secondary;
361 unsigned long timeout; 361 unsigned long timeout;
362 #endif 362 #endif
363 363
364 local_irq_save(flags); 364 local_irq_save(flags);
365 365
366 bp = in_breakpoint_table(regs->nip, &offset); 366 bp = in_breakpoint_table(regs->nip, &offset);
367 if (bp != NULL) { 367 if (bp != NULL) {
368 regs->nip = bp->address + offset; 368 regs->nip = bp->address + offset;
369 atomic_dec(&bp->ref_count); 369 atomic_dec(&bp->ref_count);
370 } 370 }
371 371
372 remove_cpu_bpts(); 372 remove_cpu_bpts();
373 373
374 #ifdef CONFIG_SMP 374 #ifdef CONFIG_SMP
375 cpu = smp_processor_id(); 375 cpu = smp_processor_id();
376 if (cpumask_test_cpu(cpu, &cpus_in_xmon)) { 376 if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
377 get_output_lock(); 377 get_output_lock();
378 excprint(regs); 378 excprint(regs);
379 printf("cpu 0x%x: Exception %lx %s in xmon, " 379 printf("cpu 0x%x: Exception %lx %s in xmon, "
380 "returning to main loop\n", 380 "returning to main loop\n",
381 cpu, regs->trap, getvecname(TRAP(regs))); 381 cpu, regs->trap, getvecname(TRAP(regs)));
382 release_output_lock(); 382 release_output_lock();
383 longjmp(xmon_fault_jmp[cpu], 1); 383 longjmp(xmon_fault_jmp[cpu], 1);
384 } 384 }
385 385
386 if (setjmp(recurse_jmp) != 0) { 386 if (setjmp(recurse_jmp) != 0) {
387 if (!in_xmon || !xmon_gate) { 387 if (!in_xmon || !xmon_gate) {
388 get_output_lock(); 388 get_output_lock();
389 printf("xmon: WARNING: bad recursive fault " 389 printf("xmon: WARNING: bad recursive fault "
390 "on cpu 0x%x\n", cpu); 390 "on cpu 0x%x\n", cpu);
391 release_output_lock(); 391 release_output_lock();
392 goto waiting; 392 goto waiting;
393 } 393 }
394 secondary = !(xmon_taken && cpu == xmon_owner); 394 secondary = !(xmon_taken && cpu == xmon_owner);
395 goto cmdloop; 395 goto cmdloop;
396 } 396 }
397 397
398 xmon_fault_jmp[cpu] = recurse_jmp; 398 xmon_fault_jmp[cpu] = recurse_jmp;
399 cpumask_set_cpu(cpu, &cpus_in_xmon); 399 cpumask_set_cpu(cpu, &cpus_in_xmon);
400 400
401 bp = NULL; 401 bp = NULL;
402 if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) 402 if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT))
403 bp = at_breakpoint(regs->nip); 403 bp = at_breakpoint(regs->nip);
404 if (bp || unrecoverable_excp(regs)) 404 if (bp || unrecoverable_excp(regs))
405 fromipi = 0; 405 fromipi = 0;
406 406
407 if (!fromipi) { 407 if (!fromipi) {
408 get_output_lock(); 408 get_output_lock();
409 excprint(regs); 409 excprint(regs);
410 if (bp) { 410 if (bp) {
411 printf("cpu 0x%x stopped at breakpoint 0x%x (", 411 printf("cpu 0x%x stopped at breakpoint 0x%x (",
412 cpu, BP_NUM(bp)); 412 cpu, BP_NUM(bp));
413 xmon_print_symbol(regs->nip, " ", ")\n"); 413 xmon_print_symbol(regs->nip, " ", ")\n");
414 } 414 }
415 if (unrecoverable_excp(regs)) 415 if (unrecoverable_excp(regs))
416 printf("WARNING: exception is not recoverable, " 416 printf("WARNING: exception is not recoverable, "
417 "can't continue\n"); 417 "can't continue\n");
418 release_output_lock(); 418 release_output_lock();
419 } 419 }
420 420
421 waiting: 421 waiting:
422 secondary = 1; 422 secondary = 1;
423 while (secondary && !xmon_gate) { 423 while (secondary && !xmon_gate) {
424 if (in_xmon == 0) { 424 if (in_xmon == 0) {
425 if (fromipi) 425 if (fromipi)
426 goto leave; 426 goto leave;
427 secondary = test_and_set_bit(0, &in_xmon); 427 secondary = test_and_set_bit(0, &in_xmon);
428 } 428 }
429 barrier(); 429 barrier();
430 } 430 }
431 431
432 if (!secondary && !xmon_gate) { 432 if (!secondary && !xmon_gate) {
433 /* we are the first cpu to come in */ 433 /* we are the first cpu to come in */
434 /* interrupt other cpu(s) */ 434 /* interrupt other cpu(s) */
435 int ncpus = num_online_cpus(); 435 int ncpus = num_online_cpus();
436 436
437 xmon_owner = cpu; 437 xmon_owner = cpu;
438 mb(); 438 mb();
439 if (ncpus > 1) { 439 if (ncpus > 1) {
440 smp_send_debugger_break(); 440 smp_send_debugger_break();
441 /* wait for other cpus to come in */ 441 /* wait for other cpus to come in */
442 for (timeout = 100000000; timeout != 0; --timeout) { 442 for (timeout = 100000000; timeout != 0; --timeout) {
443 if (cpumask_weight(&cpus_in_xmon) >= ncpus) 443 if (cpumask_weight(&cpus_in_xmon) >= ncpus)
444 break; 444 break;
445 barrier(); 445 barrier();
446 } 446 }
447 } 447 }
448 remove_bpts(); 448 remove_bpts();
449 disable_surveillance(); 449 disable_surveillance();
450 /* for breakpoint or single step, print the current instr. */ 450 /* for breakpoint or single step, print the current instr. */
451 if (bp || TRAP(regs) == 0xd00) 451 if (bp || TRAP(regs) == 0xd00)
452 ppc_inst_dump(regs->nip, 1, 0); 452 ppc_inst_dump(regs->nip, 1, 0);
453 printf("enter ? for help\n"); 453 printf("enter ? for help\n");
454 mb(); 454 mb();
455 xmon_gate = 1; 455 xmon_gate = 1;
456 barrier(); 456 barrier();
457 } 457 }
458 458
459 cmdloop: 459 cmdloop:
460 while (in_xmon) { 460 while (in_xmon) {
461 if (secondary) { 461 if (secondary) {
462 if (cpu == xmon_owner) { 462 if (cpu == xmon_owner) {
463 if (!test_and_set_bit(0, &xmon_taken)) { 463 if (!test_and_set_bit(0, &xmon_taken)) {
464 secondary = 0; 464 secondary = 0;
465 continue; 465 continue;
466 } 466 }
467 /* missed it */ 467 /* missed it */
468 while (cpu == xmon_owner) 468 while (cpu == xmon_owner)
469 barrier(); 469 barrier();
470 } 470 }
471 barrier(); 471 barrier();
472 } else { 472 } else {
473 cmd = cmds(regs); 473 cmd = cmds(regs);
474 if (cmd != 0) { 474 if (cmd != 0) {
475 /* exiting xmon */ 475 /* exiting xmon */
476 insert_bpts(); 476 insert_bpts();
477 xmon_gate = 0; 477 xmon_gate = 0;
478 wmb(); 478 wmb();
479 in_xmon = 0; 479 in_xmon = 0;
480 break; 480 break;
481 } 481 }
482 /* have switched to some other cpu */ 482 /* have switched to some other cpu */
483 secondary = 1; 483 secondary = 1;
484 } 484 }
485 } 485 }
486 leave: 486 leave:
487 cpumask_clear_cpu(cpu, &cpus_in_xmon); 487 cpumask_clear_cpu(cpu, &cpus_in_xmon);
488 xmon_fault_jmp[cpu] = NULL; 488 xmon_fault_jmp[cpu] = NULL;
489 #else 489 #else
490 /* UP is simple... */ 490 /* UP is simple... */
491 if (in_xmon) { 491 if (in_xmon) {
492 printf("Exception %lx %s in xmon, returning to main loop\n", 492 printf("Exception %lx %s in xmon, returning to main loop\n",
493 regs->trap, getvecname(TRAP(regs))); 493 regs->trap, getvecname(TRAP(regs)));
494 longjmp(xmon_fault_jmp[0], 1); 494 longjmp(xmon_fault_jmp[0], 1);
495 } 495 }
496 if (setjmp(recurse_jmp) == 0) { 496 if (setjmp(recurse_jmp) == 0) {
497 xmon_fault_jmp[0] = recurse_jmp; 497 xmon_fault_jmp[0] = recurse_jmp;
498 in_xmon = 1; 498 in_xmon = 1;
499 499
500 excprint(regs); 500 excprint(regs);
501 bp = at_breakpoint(regs->nip); 501 bp = at_breakpoint(regs->nip);
502 if (bp) { 502 if (bp) {
503 printf("Stopped at breakpoint %x (", BP_NUM(bp)); 503 printf("Stopped at breakpoint %x (", BP_NUM(bp));
504 xmon_print_symbol(regs->nip, " ", ")\n"); 504 xmon_print_symbol(regs->nip, " ", ")\n");
505 } 505 }
506 if (unrecoverable_excp(regs)) 506 if (unrecoverable_excp(regs))
507 printf("WARNING: exception is not recoverable, " 507 printf("WARNING: exception is not recoverable, "
508 "can't continue\n"); 508 "can't continue\n");
509 remove_bpts(); 509 remove_bpts();
510 disable_surveillance(); 510 disable_surveillance();
511 /* for breakpoint or single step, print the current instr. */ 511 /* for breakpoint or single step, print the current instr. */
512 if (bp || TRAP(regs) == 0xd00) 512 if (bp || TRAP(regs) == 0xd00)
513 ppc_inst_dump(regs->nip, 1, 0); 513 ppc_inst_dump(regs->nip, 1, 0);
514 printf("enter ? for help\n"); 514 printf("enter ? for help\n");
515 } 515 }
516 516
517 cmd = cmds(regs); 517 cmd = cmds(regs);
518 518
519 insert_bpts(); 519 insert_bpts();
520 in_xmon = 0; 520 in_xmon = 0;
521 #endif 521 #endif
522 522
523 #ifdef CONFIG_BOOKE 523 #ifdef CONFIG_BOOKE
524 if (regs->msr & MSR_DE) { 524 if (regs->msr & MSR_DE) {
525 bp = at_breakpoint(regs->nip); 525 bp = at_breakpoint(regs->nip);
526 if (bp != NULL) { 526 if (bp != NULL) {
527 regs->nip = (unsigned long) &bp->instr[0]; 527 regs->nip = (unsigned long) &bp->instr[0];
528 atomic_inc(&bp->ref_count); 528 atomic_inc(&bp->ref_count);
529 } 529 }
530 } 530 }
531 #else 531 #else
532 if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) { 532 if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) {
533 bp = at_breakpoint(regs->nip); 533 bp = at_breakpoint(regs->nip);
534 if (bp != NULL) { 534 if (bp != NULL) {
535 int stepped = emulate_step(regs, bp->instr[0]); 535 int stepped = emulate_step(regs, bp->instr[0]);
536 if (stepped == 0) { 536 if (stepped == 0) {
537 regs->nip = (unsigned long) &bp->instr[0]; 537 regs->nip = (unsigned long) &bp->instr[0];
538 atomic_inc(&bp->ref_count); 538 atomic_inc(&bp->ref_count);
539 } else if (stepped < 0) { 539 } else if (stepped < 0) {
540 printf("Couldn't single-step %s instruction\n", 540 printf("Couldn't single-step %s instruction\n",
541 (IS_RFID(bp->instr[0])? "rfid": "mtmsrd")); 541 (IS_RFID(bp->instr[0])? "rfid": "mtmsrd"));
542 } 542 }
543 } 543 }
544 } 544 }
545 #endif 545 #endif
546 insert_cpu_bpts(); 546 insert_cpu_bpts();
547 547
548 local_irq_restore(flags); 548 local_irq_restore(flags);
549 549
550 return cmd != 'X' && cmd != EOF; 550 return cmd != 'X' && cmd != EOF;
551 } 551 }
552 552
553 int xmon(struct pt_regs *excp) 553 int xmon(struct pt_regs *excp)
554 { 554 {
555 struct pt_regs regs; 555 struct pt_regs regs;
556 556
557 if (excp == NULL) { 557 if (excp == NULL) {
558 ppc_save_regs(&regs); 558 ppc_save_regs(&regs);
559 excp = &regs; 559 excp = &regs;
560 } 560 }
561 561
562 return xmon_core(excp, 0); 562 return xmon_core(excp, 0);
563 } 563 }
564 EXPORT_SYMBOL(xmon); 564 EXPORT_SYMBOL(xmon);
565 565
566 irqreturn_t xmon_irq(int irq, void *d) 566 irqreturn_t xmon_irq(int irq, void *d)
567 { 567 {
568 unsigned long flags; 568 unsigned long flags;
569 local_irq_save(flags); 569 local_irq_save(flags);
570 printf("Keyboard interrupt\n"); 570 printf("Keyboard interrupt\n");
571 xmon(get_irq_regs()); 571 xmon(get_irq_regs());
572 local_irq_restore(flags); 572 local_irq_restore(flags);
573 return IRQ_HANDLED; 573 return IRQ_HANDLED;
574 } 574 }
575 575
576 static int xmon_bpt(struct pt_regs *regs) 576 static int xmon_bpt(struct pt_regs *regs)
577 { 577 {
578 struct bpt *bp; 578 struct bpt *bp;
579 unsigned long offset; 579 unsigned long offset;
580 580
581 if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) != (MSR_IR|MSR_64BIT)) 581 if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) != (MSR_IR|MSR_64BIT))
582 return 0; 582 return 0;
583 583
584 /* Are we at the trap at bp->instr[1] for some bp? */ 584 /* Are we at the trap at bp->instr[1] for some bp? */
585 bp = in_breakpoint_table(regs->nip, &offset); 585 bp = in_breakpoint_table(regs->nip, &offset);
586 if (bp != NULL && offset == 4) { 586 if (bp != NULL && offset == 4) {
587 regs->nip = bp->address + 4; 587 regs->nip = bp->address + 4;
588 atomic_dec(&bp->ref_count); 588 atomic_dec(&bp->ref_count);
589 return 1; 589 return 1;
590 } 590 }
591 591
592 /* Are we at a breakpoint? */ 592 /* Are we at a breakpoint? */
593 bp = at_breakpoint(regs->nip); 593 bp = at_breakpoint(regs->nip);
594 if (!bp) 594 if (!bp)
595 return 0; 595 return 0;
596 596
597 xmon_core(regs, 0); 597 xmon_core(regs, 0);
598 598
599 return 1; 599 return 1;
600 } 600 }
601 601
602 static int xmon_sstep(struct pt_regs *regs) 602 static int xmon_sstep(struct pt_regs *regs)
603 { 603 {
604 if (user_mode(regs)) 604 if (user_mode(regs))
605 return 0; 605 return 0;
606 xmon_core(regs, 0); 606 xmon_core(regs, 0);
607 return 1; 607 return 1;
608 } 608 }
609 609
610 static int xmon_dabr_match(struct pt_regs *regs) 610 static int xmon_dabr_match(struct pt_regs *regs)
611 { 611 {
612 if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) != (MSR_IR|MSR_64BIT)) 612 if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) != (MSR_IR|MSR_64BIT))
613 return 0; 613 return 0;
614 if (dabr.enabled == 0) 614 if (dabr.enabled == 0)
615 return 0; 615 return 0;
616 xmon_core(regs, 0); 616 xmon_core(regs, 0);
617 return 1; 617 return 1;
618 } 618 }
619 619
620 static int xmon_iabr_match(struct pt_regs *regs) 620 static int xmon_iabr_match(struct pt_regs *regs)
621 { 621 {
622 if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) != (MSR_IR|MSR_64BIT)) 622 if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) != (MSR_IR|MSR_64BIT))
623 return 0; 623 return 0;
624 if (iabr == NULL) 624 if (iabr == NULL)
625 return 0; 625 return 0;
626 xmon_core(regs, 0); 626 xmon_core(regs, 0);
627 return 1; 627 return 1;
628 } 628 }
629 629
630 static int xmon_ipi(struct pt_regs *regs) 630 static int xmon_ipi(struct pt_regs *regs)
631 { 631 {
632 #ifdef CONFIG_SMP 632 #ifdef CONFIG_SMP
633 if (in_xmon && !cpumask_test_cpu(smp_processor_id(), &cpus_in_xmon)) 633 if (in_xmon && !cpumask_test_cpu(smp_processor_id(), &cpus_in_xmon))
634 xmon_core(regs, 1); 634 xmon_core(regs, 1);
635 #endif 635 #endif
636 return 0; 636 return 0;
637 } 637 }
638 638
639 static int xmon_fault_handler(struct pt_regs *regs) 639 static int xmon_fault_handler(struct pt_regs *regs)
640 { 640 {
641 struct bpt *bp; 641 struct bpt *bp;
642 unsigned long offset; 642 unsigned long offset;
643 643
644 if (in_xmon && catch_memory_errors) 644 if (in_xmon && catch_memory_errors)
645 handle_fault(regs); /* doesn't return */ 645 handle_fault(regs); /* doesn't return */
646 646
647 if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) { 647 if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) {
648 bp = in_breakpoint_table(regs->nip, &offset); 648 bp = in_breakpoint_table(regs->nip, &offset);
649 if (bp != NULL) { 649 if (bp != NULL) {
650 regs->nip = bp->address + offset; 650 regs->nip = bp->address + offset;
651 atomic_dec(&bp->ref_count); 651 atomic_dec(&bp->ref_count);
652 } 652 }
653 } 653 }
654 654
655 return 0; 655 return 0;
656 } 656 }
657 657
658 static struct bpt *at_breakpoint(unsigned long pc) 658 static struct bpt *at_breakpoint(unsigned long pc)
659 { 659 {
660 int i; 660 int i;
661 struct bpt *bp; 661 struct bpt *bp;
662 662
663 bp = bpts; 663 bp = bpts;
664 for (i = 0; i < NBPTS; ++i, ++bp) 664 for (i = 0; i < NBPTS; ++i, ++bp)
665 if (bp->enabled && pc == bp->address) 665 if (bp->enabled && pc == bp->address)
666 return bp; 666 return bp;
667 return NULL; 667 return NULL;
668 } 668 }
669 669
670 static struct bpt *in_breakpoint_table(unsigned long nip, unsigned long *offp) 670 static struct bpt *in_breakpoint_table(unsigned long nip, unsigned long *offp)
671 { 671 {
672 unsigned long off; 672 unsigned long off;
673 673
674 off = nip - (unsigned long) bpts; 674 off = nip - (unsigned long) bpts;
675 if (off >= sizeof(bpts)) 675 if (off >= sizeof(bpts))
676 return NULL; 676 return NULL;
677 off %= sizeof(struct bpt); 677 off %= sizeof(struct bpt);
678 if (off != offsetof(struct bpt, instr[0]) 678 if (off != offsetof(struct bpt, instr[0])
679 && off != offsetof(struct bpt, instr[1])) 679 && off != offsetof(struct bpt, instr[1]))
680 return NULL; 680 return NULL;
681 *offp = off - offsetof(struct bpt, instr[0]); 681 *offp = off - offsetof(struct bpt, instr[0]);
682 return (struct bpt *) (nip - off); 682 return (struct bpt *) (nip - off);
683 } 683 }
684 684
685 static struct bpt *new_breakpoint(unsigned long a) 685 static struct bpt *new_breakpoint(unsigned long a)
686 { 686 {
687 struct bpt *bp; 687 struct bpt *bp;
688 688
689 a &= ~3UL; 689 a &= ~3UL;
690 bp = at_breakpoint(a); 690 bp = at_breakpoint(a);
691 if (bp) 691 if (bp)
692 return bp; 692 return bp;
693 693
694 for (bp = bpts; bp < &bpts[NBPTS]; ++bp) { 694 for (bp = bpts; bp < &bpts[NBPTS]; ++bp) {
695 if (!bp->enabled && atomic_read(&bp->ref_count) == 0) { 695 if (!bp->enabled && atomic_read(&bp->ref_count) == 0) {
696 bp->address = a; 696 bp->address = a;
697 bp->instr[1] = bpinstr; 697 bp->instr[1] = bpinstr;
698 store_inst(&bp->instr[1]); 698 store_inst(&bp->instr[1]);
699 return bp; 699 return bp;
700 } 700 }
701 } 701 }
702 702
703 printf("Sorry, no free breakpoints. Please clear one first.\n"); 703 printf("Sorry, no free breakpoints. Please clear one first.\n");
704 return NULL; 704 return NULL;
705 } 705 }
706 706
707 static void insert_bpts(void) 707 static void insert_bpts(void)
708 { 708 {
709 int i; 709 int i;
710 struct bpt *bp; 710 struct bpt *bp;
711 711
712 bp = bpts; 712 bp = bpts;
713 for (i = 0; i < NBPTS; ++i, ++bp) { 713 for (i = 0; i < NBPTS; ++i, ++bp) {
714 if ((bp->enabled & (BP_TRAP|BP_IABR)) == 0) 714 if ((bp->enabled & (BP_TRAP|BP_IABR)) == 0)
715 continue; 715 continue;
716 if (mread(bp->address, &bp->instr[0], 4) != 4) { 716 if (mread(bp->address, &bp->instr[0], 4) != 4) {
717 printf("Couldn't read instruction at %lx, " 717 printf("Couldn't read instruction at %lx, "
718 "disabling breakpoint there\n", bp->address); 718 "disabling breakpoint there\n", bp->address);
719 bp->enabled = 0; 719 bp->enabled = 0;
720 continue; 720 continue;
721 } 721 }
722 if (IS_MTMSRD(bp->instr[0]) || IS_RFID(bp->instr[0])) { 722 if (IS_MTMSRD(bp->instr[0]) || IS_RFID(bp->instr[0])) {
723 printf("Breakpoint at %lx is on an mtmsrd or rfid " 723 printf("Breakpoint at %lx is on an mtmsrd or rfid "
724 "instruction, disabling it\n", bp->address); 724 "instruction, disabling it\n", bp->address);
725 bp->enabled = 0; 725 bp->enabled = 0;
726 continue; 726 continue;
727 } 727 }
728 store_inst(&bp->instr[0]); 728 store_inst(&bp->instr[0]);
729 if (bp->enabled & BP_IABR) 729 if (bp->enabled & BP_IABR)
730 continue; 730 continue;
731 if (mwrite(bp->address, &bpinstr, 4) != 4) { 731 if (mwrite(bp->address, &bpinstr, 4) != 4) {
732 printf("Couldn't write instruction at %lx, " 732 printf("Couldn't write instruction at %lx, "
733 "disabling breakpoint there\n", bp->address); 733 "disabling breakpoint there\n", bp->address);
734 bp->enabled &= ~BP_TRAP; 734 bp->enabled &= ~BP_TRAP;
735 continue; 735 continue;
736 } 736 }
737 store_inst((void *)bp->address); 737 store_inst((void *)bp->address);
738 } 738 }
739 } 739 }
740 740
741 static void insert_cpu_bpts(void) 741 static void insert_cpu_bpts(void)
742 { 742 {
743 if (dabr.enabled) 743 if (dabr.enabled)
744 set_dabr(dabr.address | (dabr.enabled & 7)); 744 set_dabr(dabr.address | (dabr.enabled & 7));
745 if (iabr && cpu_has_feature(CPU_FTR_IABR)) 745 if (iabr && cpu_has_feature(CPU_FTR_IABR))
746 mtspr(SPRN_IABR, iabr->address 746 mtspr(SPRN_IABR, iabr->address
747 | (iabr->enabled & (BP_IABR|BP_IABR_TE))); 747 | (iabr->enabled & (BP_IABR|BP_IABR_TE)));
748 } 748 }
749 749
750 static void remove_bpts(void) 750 static void remove_bpts(void)
751 { 751 {
752 int i; 752 int i;
753 struct bpt *bp; 753 struct bpt *bp;
754 unsigned instr; 754 unsigned instr;
755 755
756 bp = bpts; 756 bp = bpts;
757 for (i = 0; i < NBPTS; ++i, ++bp) { 757 for (i = 0; i < NBPTS; ++i, ++bp) {
758 if ((bp->enabled & (BP_TRAP|BP_IABR)) != BP_TRAP) 758 if ((bp->enabled & (BP_TRAP|BP_IABR)) != BP_TRAP)
759 continue; 759 continue;
760 if (mread(bp->address, &instr, 4) == 4 760 if (mread(bp->address, &instr, 4) == 4
761 && instr == bpinstr 761 && instr == bpinstr
762 && mwrite(bp->address, &bp->instr, 4) != 4) 762 && mwrite(bp->address, &bp->instr, 4) != 4)
763 printf("Couldn't remove breakpoint at %lx\n", 763 printf("Couldn't remove breakpoint at %lx\n",
764 bp->address); 764 bp->address);
765 else 765 else
766 store_inst((void *)bp->address); 766 store_inst((void *)bp->address);
767 } 767 }
768 } 768 }
769 769
770 static void remove_cpu_bpts(void) 770 static void remove_cpu_bpts(void)
771 { 771 {
772 set_dabr(0); 772 set_dabr(0);
773 if (cpu_has_feature(CPU_FTR_IABR)) 773 if (cpu_has_feature(CPU_FTR_IABR))
774 mtspr(SPRN_IABR, 0); 774 mtspr(SPRN_IABR, 0);
775 } 775 }
776 776
777 /* Command interpreting routine */ 777 /* Command interpreting routine */
778 static char *last_cmd; 778 static char *last_cmd;
779 779
780 static int 780 static int
781 cmds(struct pt_regs *excp) 781 cmds(struct pt_regs *excp)
782 { 782 {
783 int cmd = 0; 783 int cmd = 0;
784 784
785 last_cmd = NULL; 785 last_cmd = NULL;
786 xmon_regs = excp; 786 xmon_regs = excp;
787 787
788 if (!xmon_no_auto_backtrace) { 788 if (!xmon_no_auto_backtrace) {
789 xmon_no_auto_backtrace = 1; 789 xmon_no_auto_backtrace = 1;
790 xmon_show_stack(excp->gpr[1], excp->link, excp->nip); 790 xmon_show_stack(excp->gpr[1], excp->link, excp->nip);
791 } 791 }
792 792
793 for(;;) { 793 for(;;) {
794 #ifdef CONFIG_SMP 794 #ifdef CONFIG_SMP
795 printf("%x:", smp_processor_id()); 795 printf("%x:", smp_processor_id());
796 #endif /* CONFIG_SMP */ 796 #endif /* CONFIG_SMP */
797 printf("mon> "); 797 printf("mon> ");
798 flush_input(); 798 flush_input();
799 termch = 0; 799 termch = 0;
800 cmd = skipbl(); 800 cmd = skipbl();
801 if( cmd == '\n' ) { 801 if( cmd == '\n' ) {
802 if (last_cmd == NULL) 802 if (last_cmd == NULL)
803 continue; 803 continue;
804 take_input(last_cmd); 804 take_input(last_cmd);
805 last_cmd = NULL; 805 last_cmd = NULL;
806 cmd = inchar(); 806 cmd = inchar();
807 } 807 }
808 switch (cmd) { 808 switch (cmd) {
809 case 'm': 809 case 'm':
810 cmd = inchar(); 810 cmd = inchar();
811 switch (cmd) { 811 switch (cmd) {
812 case 'm': 812 case 'm':
813 case 's': 813 case 's':
814 case 'd': 814 case 'd':
815 memops(cmd); 815 memops(cmd);
816 break; 816 break;
817 case 'l': 817 case 'l':
818 memlocate(); 818 memlocate();
819 break; 819 break;
820 case 'z': 820 case 'z':
821 memzcan(); 821 memzcan();
822 break; 822 break;
823 case 'i': 823 case 'i':
824 show_mem(0); 824 show_mem(0);
825 break; 825 break;
826 default: 826 default:
827 termch = cmd; 827 termch = cmd;
828 memex(); 828 memex();
829 } 829 }
830 break; 830 break;
831 case 'd': 831 case 'd':
832 dump(); 832 dump();
833 break; 833 break;
834 case 'l': 834 case 'l':
835 symbol_lookup(); 835 symbol_lookup();
836 break; 836 break;
837 case 'r': 837 case 'r':
838 prregs(excp); /* print regs */ 838 prregs(excp); /* print regs */
839 break; 839 break;
840 case 'e': 840 case 'e':
841 excprint(excp); 841 excprint(excp);
842 break; 842 break;
843 case 'S': 843 case 'S':
844 super_regs(); 844 super_regs();
845 break; 845 break;
846 case 't': 846 case 't':
847 backtrace(excp); 847 backtrace(excp);
848 break; 848 break;
849 case 'f': 849 case 'f':
850 cacheflush(); 850 cacheflush();
851 break; 851 break;
852 case 's': 852 case 's':
853 if (do_spu_cmd() == 0) 853 if (do_spu_cmd() == 0)
854 break; 854 break;
855 if (do_step(excp)) 855 if (do_step(excp))
856 return cmd; 856 return cmd;
857 break; 857 break;
858 case 'x': 858 case 'x':
859 case 'X': 859 case 'X':
860 return cmd; 860 return cmd;
861 case EOF: 861 case EOF:
862 printf(" <no input ...>\n"); 862 printf(" <no input ...>\n");
863 mdelay(2000); 863 mdelay(2000);
864 return cmd; 864 return cmd;
865 case '?': 865 case '?':
866 xmon_puts(help_string); 866 xmon_puts(help_string);
867 break; 867 break;
868 case 'b': 868 case 'b':
869 bpt_cmds(); 869 bpt_cmds();
870 break; 870 break;
871 case 'C': 871 case 'C':
872 csum(); 872 csum();
873 break; 873 break;
874 case 'c': 874 case 'c':
875 if (cpu_cmd()) 875 if (cpu_cmd())
876 return 0; 876 return 0;
877 break; 877 break;
878 case 'z': 878 case 'z':
879 bootcmds(); 879 bootcmds();
880 break; 880 break;
881 case 'p': 881 case 'p':
882 proccall(); 882 proccall();
883 break; 883 break;
884 #ifdef CONFIG_PPC_STD_MMU 884 #ifdef CONFIG_PPC_STD_MMU
885 case 'u': 885 case 'u':
886 dump_segments(); 886 dump_segments();
887 break; 887 break;
888 #endif 888 #endif
889 #ifdef CONFIG_4xx 889 #ifdef CONFIG_4xx
890 case 'u': 890 case 'u':
891 dump_tlb_44x(); 891 dump_tlb_44x();
892 break; 892 break;
893 #endif 893 #endif
894 #ifdef CONFIG_PPC_BOOK3E 894 #ifdef CONFIG_PPC_BOOK3E
895 case 'u': 895 case 'u':
896 dump_tlb_book3e(); 896 dump_tlb_book3e();
897 break; 897 break;
898 #endif 898 #endif
899 default: 899 default:
900 printf("Unrecognized command: "); 900 printf("Unrecognized command: ");
901 do { 901 do {
902 if (' ' < cmd && cmd <= '~') 902 if (' ' < cmd && cmd <= '~')
903 putchar(cmd); 903 putchar(cmd);
904 else 904 else
905 printf("\\x%x", cmd); 905 printf("\\x%x", cmd);
906 cmd = inchar(); 906 cmd = inchar();
907 } while (cmd != '\n'); 907 } while (cmd != '\n');
908 printf(" (type ? for help)\n"); 908 printf(" (type ? for help)\n");
909 break; 909 break;
910 } 910 }
911 } 911 }
912 } 912 }
913 913
914 #ifdef CONFIG_BOOKE 914 #ifdef CONFIG_BOOKE
915 static int do_step(struct pt_regs *regs) 915 static int do_step(struct pt_regs *regs)
916 { 916 {
917 regs->msr |= MSR_DE; 917 regs->msr |= MSR_DE;
918 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM); 918 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
919 return 1; 919 return 1;
920 } 920 }
921 #else 921 #else
922 /* 922 /*
923 * Step a single instruction. 923 * Step a single instruction.
924 * Some instructions we emulate, others we execute with MSR_SE set. 924 * Some instructions we emulate, others we execute with MSR_SE set.
925 */ 925 */
926 static int do_step(struct pt_regs *regs) 926 static int do_step(struct pt_regs *regs)
927 { 927 {
928 unsigned int instr; 928 unsigned int instr;
929 int stepped; 929 int stepped;
930 930
931 /* check we are in 64-bit kernel mode, translation enabled */ 931 /* check we are in 64-bit kernel mode, translation enabled */
932 if ((regs->msr & (MSR_64BIT|MSR_PR|MSR_IR)) == (MSR_64BIT|MSR_IR)) { 932 if ((regs->msr & (MSR_64BIT|MSR_PR|MSR_IR)) == (MSR_64BIT|MSR_IR)) {
933 if (mread(regs->nip, &instr, 4) == 4) { 933 if (mread(regs->nip, &instr, 4) == 4) {
934 stepped = emulate_step(regs, instr); 934 stepped = emulate_step(regs, instr);
935 if (stepped < 0) { 935 if (stepped < 0) {
936 printf("Couldn't single-step %s instruction\n", 936 printf("Couldn't single-step %s instruction\n",
937 (IS_RFID(instr)? "rfid": "mtmsrd")); 937 (IS_RFID(instr)? "rfid": "mtmsrd"));
938 return 0; 938 return 0;
939 } 939 }
940 if (stepped > 0) { 940 if (stepped > 0) {
941 regs->trap = 0xd00 | (regs->trap & 1); 941 regs->trap = 0xd00 | (regs->trap & 1);
942 printf("stepped to "); 942 printf("stepped to ");
943 xmon_print_symbol(regs->nip, " ", "\n"); 943 xmon_print_symbol(regs->nip, " ", "\n");
944 ppc_inst_dump(regs->nip, 1, 0); 944 ppc_inst_dump(regs->nip, 1, 0);
945 return 0; 945 return 0;
946 } 946 }
947 } 947 }
948 } 948 }
949 regs->msr |= MSR_SE; 949 regs->msr |= MSR_SE;
950 return 1; 950 return 1;
951 } 951 }
952 #endif 952 #endif
953 953
954 static void bootcmds(void) 954 static void bootcmds(void)
955 { 955 {
956 int cmd; 956 int cmd;
957 957
958 cmd = inchar(); 958 cmd = inchar();
959 if (cmd == 'r') 959 if (cmd == 'r')
960 ppc_md.restart(NULL); 960 ppc_md.restart(NULL);
961 else if (cmd == 'h') 961 else if (cmd == 'h')
962 ppc_md.halt(); 962 ppc_md.halt();
963 else if (cmd == 'p') 963 else if (cmd == 'p')
964 ppc_md.power_off(); 964 ppc_md.power_off();
965 } 965 }
966 966
967 static int cpu_cmd(void) 967 static int cpu_cmd(void)
968 { 968 {
969 #ifdef CONFIG_SMP 969 #ifdef CONFIG_SMP
970 unsigned long cpu; 970 unsigned long cpu;
971 int timeout; 971 int timeout;
972 int count; 972 int count;
973 973
974 if (!scanhex(&cpu)) { 974 if (!scanhex(&cpu)) {
975 /* print cpus waiting or in xmon */ 975 /* print cpus waiting or in xmon */
976 printf("cpus stopped:"); 976 printf("cpus stopped:");
977 count = 0; 977 count = 0;
978 for (cpu = 0; cpu < NR_CPUS; ++cpu) { 978 for (cpu = 0; cpu < NR_CPUS; ++cpu) {
979 if (cpumask_test_cpu(cpu, &cpus_in_xmon)) { 979 if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
980 if (count == 0) 980 if (count == 0)
981 printf(" %x", cpu); 981 printf(" %x", cpu);
982 ++count; 982 ++count;
983 } else { 983 } else {
984 if (count > 1) 984 if (count > 1)
985 printf("-%x", cpu - 1); 985 printf("-%x", cpu - 1);
986 count = 0; 986 count = 0;
987 } 987 }
988 } 988 }
989 if (count > 1) 989 if (count > 1)
990 printf("-%x", NR_CPUS - 1); 990 printf("-%x", NR_CPUS - 1);
991 printf("\n"); 991 printf("\n");
992 return 0; 992 return 0;
993 } 993 }
994 /* try to switch to cpu specified */ 994 /* try to switch to cpu specified */
995 if (!cpumask_test_cpu(cpu, &cpus_in_xmon)) { 995 if (!cpumask_test_cpu(cpu, &cpus_in_xmon)) {
996 printf("cpu 0x%x isn't in xmon\n", cpu); 996 printf("cpu 0x%x isn't in xmon\n", cpu);
997 return 0; 997 return 0;
998 } 998 }
999 xmon_taken = 0; 999 xmon_taken = 0;
1000 mb(); 1000 mb();
1001 xmon_owner = cpu; 1001 xmon_owner = cpu;
1002 timeout = 10000000; 1002 timeout = 10000000;
1003 while (!xmon_taken) { 1003 while (!xmon_taken) {
1004 if (--timeout == 0) { 1004 if (--timeout == 0) {
1005 if (test_and_set_bit(0, &xmon_taken)) 1005 if (test_and_set_bit(0, &xmon_taken))
1006 break; 1006 break;
1007 /* take control back */ 1007 /* take control back */
1008 mb(); 1008 mb();
1009 xmon_owner = smp_processor_id(); 1009 xmon_owner = smp_processor_id();
1010 printf("cpu %u didn't take control\n", cpu); 1010 printf("cpu %u didn't take control\n", cpu);
1011 return 0; 1011 return 0;
1012 } 1012 }
1013 barrier(); 1013 barrier();
1014 } 1014 }
1015 return 1; 1015 return 1;
1016 #else 1016 #else
1017 return 0; 1017 return 0;
1018 #endif /* CONFIG_SMP */ 1018 #endif /* CONFIG_SMP */
1019 } 1019 }
1020 1020
1021 static unsigned short fcstab[256] = { 1021 static unsigned short fcstab[256] = {
1022 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf, 1022 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
1023 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7, 1023 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7,
1024 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e, 1024 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e,
1025 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876, 1025 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876,
1026 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd, 1026 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd,
1027 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5, 1027 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5,
1028 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c, 1028 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c,
1029 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974, 1029 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974,
1030 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb, 1030 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb,
1031 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3, 1031 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3,
1032 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a, 1032 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a,
1033 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72, 1033 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72,
1034 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9, 1034 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9,
1035 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1, 1035 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1,
1036 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738, 1036 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738,
1037 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70, 1037 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70,
1038 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7, 1038 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7,
1039 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff, 1039 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff,
1040 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036, 1040 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036,
1041 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e, 1041 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e,
1042 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5, 1042 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5,
1043 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd, 1043 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd,
1044 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134, 1044 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134,
1045 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c, 1045 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c,
1046 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3, 1046 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3,
1047 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb, 1047 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb,
1048 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232, 1048 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232,
1049 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a, 1049 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a,
1050 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1, 1050 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1,
1051 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9, 1051 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9,
1052 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330, 1052 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330,
1053 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78 1053 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78
1054 }; 1054 };
1055 1055
1056 #define FCS(fcs, c) (((fcs) >> 8) ^ fcstab[((fcs) ^ (c)) & 0xff]) 1056 #define FCS(fcs, c) (((fcs) >> 8) ^ fcstab[((fcs) ^ (c)) & 0xff])
1057 1057
1058 static void 1058 static void
1059 csum(void) 1059 csum(void)
1060 { 1060 {
1061 unsigned int i; 1061 unsigned int i;
1062 unsigned short fcs; 1062 unsigned short fcs;
1063 unsigned char v; 1063 unsigned char v;
1064 1064
1065 if (!scanhex(&adrs)) 1065 if (!scanhex(&adrs))
1066 return; 1066 return;
1067 if (!scanhex(&ncsum)) 1067 if (!scanhex(&ncsum))
1068 return; 1068 return;
1069 fcs = 0xffff; 1069 fcs = 0xffff;
1070 for (i = 0; i < ncsum; ++i) { 1070 for (i = 0; i < ncsum; ++i) {
1071 if (mread(adrs+i, &v, 1) == 0) { 1071 if (mread(adrs+i, &v, 1) == 0) {
1072 printf("csum stopped at %x\n", adrs+i); 1072 printf("csum stopped at %x\n", adrs+i);
1073 break; 1073 break;
1074 } 1074 }
1075 fcs = FCS(fcs, v); 1075 fcs = FCS(fcs, v);
1076 } 1076 }
1077 printf("%x\n", fcs); 1077 printf("%x\n", fcs);
1078 } 1078 }
1079 1079
1080 /* 1080 /*
1081 * Check if this is a suitable place to put a breakpoint. 1081 * Check if this is a suitable place to put a breakpoint.
1082 */ 1082 */
1083 static long check_bp_loc(unsigned long addr) 1083 static long check_bp_loc(unsigned long addr)
1084 { 1084 {
1085 unsigned int instr; 1085 unsigned int instr;
1086 1086
1087 addr &= ~3; 1087 addr &= ~3;
1088 if (!is_kernel_addr(addr)) { 1088 if (!is_kernel_addr(addr)) {
1089 printf("Breakpoints may only be placed at kernel addresses\n"); 1089 printf("Breakpoints may only be placed at kernel addresses\n");
1090 return 0; 1090 return 0;
1091 } 1091 }
1092 if (!mread(addr, &instr, sizeof(instr))) { 1092 if (!mread(addr, &instr, sizeof(instr))) {
1093 printf("Can't read instruction at address %lx\n", addr); 1093 printf("Can't read instruction at address %lx\n", addr);
1094 return 0; 1094 return 0;
1095 } 1095 }
1096 if (IS_MTMSRD(instr) || IS_RFID(instr)) { 1096 if (IS_MTMSRD(instr) || IS_RFID(instr)) {
1097 printf("Breakpoints may not be placed on mtmsrd or rfid " 1097 printf("Breakpoints may not be placed on mtmsrd or rfid "
1098 "instructions\n"); 1098 "instructions\n");
1099 return 0; 1099 return 0;
1100 } 1100 }
1101 return 1; 1101 return 1;
1102 } 1102 }
1103 1103
1104 static char *breakpoint_help_string = 1104 static char *breakpoint_help_string =
1105 "Breakpoint command usage:\n" 1105 "Breakpoint command usage:\n"
1106 "b show breakpoints\n" 1106 "b show breakpoints\n"
1107 "b <addr> [cnt] set breakpoint at given instr addr\n" 1107 "b <addr> [cnt] set breakpoint at given instr addr\n"
1108 "bc clear all breakpoints\n" 1108 "bc clear all breakpoints\n"
1109 "bc <n/addr> clear breakpoint number n or at addr\n" 1109 "bc <n/addr> clear breakpoint number n or at addr\n"
1110 "bi <addr> [cnt] set hardware instr breakpoint (POWER3/RS64 only)\n" 1110 "bi <addr> [cnt] set hardware instr breakpoint (POWER3/RS64 only)\n"
1111 "bd <addr> [cnt] set hardware data breakpoint\n" 1111 "bd <addr> [cnt] set hardware data breakpoint\n"
1112 ""; 1112 "";
1113 1113
1114 static void 1114 static void
1115 bpt_cmds(void) 1115 bpt_cmds(void)
1116 { 1116 {
1117 int cmd; 1117 int cmd;
1118 unsigned long a; 1118 unsigned long a;
1119 int mode, i; 1119 int mode, i;
1120 struct bpt *bp; 1120 struct bpt *bp;
1121 const char badaddr[] = "Only kernel addresses are permitted " 1121 const char badaddr[] = "Only kernel addresses are permitted "
1122 "for breakpoints\n"; 1122 "for breakpoints\n";
1123 1123
1124 cmd = inchar(); 1124 cmd = inchar();
1125 switch (cmd) { 1125 switch (cmd) {
1126 #ifndef CONFIG_8xx 1126 #ifndef CONFIG_8xx
1127 case 'd': /* bd - hardware data breakpoint */ 1127 case 'd': /* bd - hardware data breakpoint */
1128 mode = 7; 1128 mode = 7;
1129 cmd = inchar(); 1129 cmd = inchar();
1130 if (cmd == 'r') 1130 if (cmd == 'r')
1131 mode = 5; 1131 mode = 5;
1132 else if (cmd == 'w') 1132 else if (cmd == 'w')
1133 mode = 6; 1133 mode = 6;
1134 else 1134 else
1135 termch = cmd; 1135 termch = cmd;
1136 dabr.address = 0; 1136 dabr.address = 0;
1137 dabr.enabled = 0; 1137 dabr.enabled = 0;
1138 if (scanhex(&dabr.address)) { 1138 if (scanhex(&dabr.address)) {
1139 if (!is_kernel_addr(dabr.address)) { 1139 if (!is_kernel_addr(dabr.address)) {
1140 printf(badaddr); 1140 printf(badaddr);
1141 break; 1141 break;
1142 } 1142 }
1143 dabr.address &= ~7; 1143 dabr.address &= ~7;
1144 dabr.enabled = mode | BP_DABR; 1144 dabr.enabled = mode | BP_DABR;
1145 } 1145 }
1146 break; 1146 break;
1147 1147
1148 case 'i': /* bi - hardware instr breakpoint */ 1148 case 'i': /* bi - hardware instr breakpoint */
1149 if (!cpu_has_feature(CPU_FTR_IABR)) { 1149 if (!cpu_has_feature(CPU_FTR_IABR)) {
1150 printf("Hardware instruction breakpoint " 1150 printf("Hardware instruction breakpoint "
1151 "not supported on this cpu\n"); 1151 "not supported on this cpu\n");
1152 break; 1152 break;
1153 } 1153 }
1154 if (iabr) { 1154 if (iabr) {
1155 iabr->enabled &= ~(BP_IABR | BP_IABR_TE); 1155 iabr->enabled &= ~(BP_IABR | BP_IABR_TE);
1156 iabr = NULL; 1156 iabr = NULL;
1157 } 1157 }
1158 if (!scanhex(&a)) 1158 if (!scanhex(&a))
1159 break; 1159 break;
1160 if (!check_bp_loc(a)) 1160 if (!check_bp_loc(a))
1161 break; 1161 break;
1162 bp = new_breakpoint(a); 1162 bp = new_breakpoint(a);
1163 if (bp != NULL) { 1163 if (bp != NULL) {
1164 bp->enabled |= BP_IABR | BP_IABR_TE; 1164 bp->enabled |= BP_IABR | BP_IABR_TE;
1165 iabr = bp; 1165 iabr = bp;
1166 } 1166 }
1167 break; 1167 break;
1168 #endif 1168 #endif
1169 1169
1170 case 'c': 1170 case 'c':
1171 if (!scanhex(&a)) { 1171 if (!scanhex(&a)) {
1172 /* clear all breakpoints */ 1172 /* clear all breakpoints */
1173 for (i = 0; i < NBPTS; ++i) 1173 for (i = 0; i < NBPTS; ++i)
1174 bpts[i].enabled = 0; 1174 bpts[i].enabled = 0;
1175 iabr = NULL; 1175 iabr = NULL;
1176 dabr.enabled = 0; 1176 dabr.enabled = 0;
1177 printf("All breakpoints cleared\n"); 1177 printf("All breakpoints cleared\n");
1178 break; 1178 break;
1179 } 1179 }
1180 1180
1181 if (a <= NBPTS && a >= 1) { 1181 if (a <= NBPTS && a >= 1) {
1182 /* assume a breakpoint number */ 1182 /* assume a breakpoint number */
1183 bp = &bpts[a-1]; /* bp nums are 1 based */ 1183 bp = &bpts[a-1]; /* bp nums are 1 based */
1184 } else { 1184 } else {
1185 /* assume a breakpoint address */ 1185 /* assume a breakpoint address */
1186 bp = at_breakpoint(a); 1186 bp = at_breakpoint(a);
1187 if (bp == NULL) { 1187 if (bp == NULL) {
1188 printf("No breakpoint at %x\n", a); 1188 printf("No breakpoint at %x\n", a);
1189 break; 1189 break;
1190 } 1190 }
1191 } 1191 }
1192 1192
1193 printf("Cleared breakpoint %x (", BP_NUM(bp)); 1193 printf("Cleared breakpoint %x (", BP_NUM(bp));
1194 xmon_print_symbol(bp->address, " ", ")\n"); 1194 xmon_print_symbol(bp->address, " ", ")\n");
1195 bp->enabled = 0; 1195 bp->enabled = 0;
1196 break; 1196 break;
1197 1197
1198 default: 1198 default:
1199 termch = cmd; 1199 termch = cmd;
1200 cmd = skipbl(); 1200 cmd = skipbl();
1201 if (cmd == '?') { 1201 if (cmd == '?') {
1202 printf(breakpoint_help_string); 1202 printf(breakpoint_help_string);
1203 break; 1203 break;
1204 } 1204 }
1205 termch = cmd; 1205 termch = cmd;
1206 if (!scanhex(&a)) { 1206 if (!scanhex(&a)) {
1207 /* print all breakpoints */ 1207 /* print all breakpoints */
1208 printf(" type address\n"); 1208 printf(" type address\n");
1209 if (dabr.enabled) { 1209 if (dabr.enabled) {
1210 printf(" data "REG" [", dabr.address); 1210 printf(" data "REG" [", dabr.address);
1211 if (dabr.enabled & 1) 1211 if (dabr.enabled & 1)
1212 printf("r"); 1212 printf("r");
1213 if (dabr.enabled & 2) 1213 if (dabr.enabled & 2)
1214 printf("w"); 1214 printf("w");
1215 printf("]\n"); 1215 printf("]\n");
1216 } 1216 }
1217 for (bp = bpts; bp < &bpts[NBPTS]; ++bp) { 1217 for (bp = bpts; bp < &bpts[NBPTS]; ++bp) {
1218 if (!bp->enabled) 1218 if (!bp->enabled)
1219 continue; 1219 continue;
1220 printf("%2x %s ", BP_NUM(bp), 1220 printf("%2x %s ", BP_NUM(bp),
1221 (bp->enabled & BP_IABR)? "inst": "trap"); 1221 (bp->enabled & BP_IABR)? "inst": "trap");
1222 xmon_print_symbol(bp->address, " ", "\n"); 1222 xmon_print_symbol(bp->address, " ", "\n");
1223 } 1223 }
1224 break; 1224 break;
1225 } 1225 }
1226 1226
1227 if (!check_bp_loc(a)) 1227 if (!check_bp_loc(a))
1228 break; 1228 break;
1229 bp = new_breakpoint(a); 1229 bp = new_breakpoint(a);
1230 if (bp != NULL) 1230 if (bp != NULL)
1231 bp->enabled |= BP_TRAP; 1231 bp->enabled |= BP_TRAP;
1232 break; 1232 break;
1233 } 1233 }
1234 } 1234 }
1235 1235
1236 /* Very cheap human name for vector lookup. */ 1236 /* Very cheap human name for vector lookup. */
1237 static 1237 static
1238 const char *getvecname(unsigned long vec) 1238 const char *getvecname(unsigned long vec)
1239 { 1239 {
1240 char *ret; 1240 char *ret;
1241 1241
1242 switch (vec) { 1242 switch (vec) {
1243 case 0x100: ret = "(System Reset)"; break; 1243 case 0x100: ret = "(System Reset)"; break;
1244 case 0x200: ret = "(Machine Check)"; break; 1244 case 0x200: ret = "(Machine Check)"; break;
1245 case 0x300: ret = "(Data Access)"; break; 1245 case 0x300: ret = "(Data Access)"; break;
1246 case 0x380: ret = "(Data SLB Access)"; break; 1246 case 0x380: ret = "(Data SLB Access)"; break;
1247 case 0x400: ret = "(Instruction Access)"; break; 1247 case 0x400: ret = "(Instruction Access)"; break;
1248 case 0x480: ret = "(Instruction SLB Access)"; break; 1248 case 0x480: ret = "(Instruction SLB Access)"; break;
1249 case 0x500: ret = "(Hardware Interrupt)"; break; 1249 case 0x500: ret = "(Hardware Interrupt)"; break;
1250 case 0x600: ret = "(Alignment)"; break; 1250 case 0x600: ret = "(Alignment)"; break;
1251 case 0x700: ret = "(Program Check)"; break; 1251 case 0x700: ret = "(Program Check)"; break;
1252 case 0x800: ret = "(FPU Unavailable)"; break; 1252 case 0x800: ret = "(FPU Unavailable)"; break;
1253 case 0x900: ret = "(Decrementer)"; break; 1253 case 0x900: ret = "(Decrementer)"; break;
1254 case 0xc00: ret = "(System Call)"; break; 1254 case 0xc00: ret = "(System Call)"; break;
1255 case 0xd00: ret = "(Single Step)"; break; 1255 case 0xd00: ret = "(Single Step)"; break;
1256 case 0xf00: ret = "(Performance Monitor)"; break; 1256 case 0xf00: ret = "(Performance Monitor)"; break;
1257 case 0xf20: ret = "(Altivec Unavailable)"; break; 1257 case 0xf20: ret = "(Altivec Unavailable)"; break;
1258 case 0x1300: ret = "(Instruction Breakpoint)"; break; 1258 case 0x1300: ret = "(Instruction Breakpoint)"; break;
1259 default: ret = ""; 1259 default: ret = "";
1260 } 1260 }
1261 return ret; 1261 return ret;
1262 } 1262 }
1263 1263
1264 static void get_function_bounds(unsigned long pc, unsigned long *startp, 1264 static void get_function_bounds(unsigned long pc, unsigned long *startp,
1265 unsigned long *endp) 1265 unsigned long *endp)
1266 { 1266 {
1267 unsigned long size, offset; 1267 unsigned long size, offset;
1268 const char *name; 1268 const char *name;
1269 1269
1270 *startp = *endp = 0; 1270 *startp = *endp = 0;
1271 if (pc == 0) 1271 if (pc == 0)
1272 return; 1272 return;
1273 if (setjmp(bus_error_jmp) == 0) { 1273 if (setjmp(bus_error_jmp) == 0) {
1274 catch_memory_errors = 1; 1274 catch_memory_errors = 1;
1275 sync(); 1275 sync();
1276 name = kallsyms_lookup(pc, &size, &offset, NULL, tmpstr); 1276 name = kallsyms_lookup(pc, &size, &offset, NULL, tmpstr);
1277 if (name != NULL) { 1277 if (name != NULL) {
1278 *startp = pc - offset; 1278 *startp = pc - offset;
1279 *endp = pc - offset + size; 1279 *endp = pc - offset + size;
1280 } 1280 }
1281 sync(); 1281 sync();
1282 } 1282 }
1283 catch_memory_errors = 0; 1283 catch_memory_errors = 0;
1284 } 1284 }
1285 1285
1286 static int xmon_depth_to_print = 64; 1286 static int xmon_depth_to_print = 64;
1287 1287
1288 #define LRSAVE_OFFSET (STACK_FRAME_LR_SAVE * sizeof(unsigned long)) 1288 #define LRSAVE_OFFSET (STACK_FRAME_LR_SAVE * sizeof(unsigned long))
1289 #define MARKER_OFFSET (STACK_FRAME_MARKER * sizeof(unsigned long)) 1289 #define MARKER_OFFSET (STACK_FRAME_MARKER * sizeof(unsigned long))
1290 1290
1291 #ifdef __powerpc64__ 1291 #ifdef __powerpc64__
1292 #define REGS_OFFSET 0x70 1292 #define REGS_OFFSET 0x70
1293 #else 1293 #else
1294 #define REGS_OFFSET 16 1294 #define REGS_OFFSET 16
1295 #endif 1295 #endif
1296 1296
1297 static void xmon_show_stack(unsigned long sp, unsigned long lr, 1297 static void xmon_show_stack(unsigned long sp, unsigned long lr,
1298 unsigned long pc) 1298 unsigned long pc)
1299 { 1299 {
1300 unsigned long ip; 1300 unsigned long ip;
1301 unsigned long newsp; 1301 unsigned long newsp;
1302 unsigned long marker; 1302 unsigned long marker;
1303 int count = 0; 1303 int count = 0;
1304 struct pt_regs regs; 1304 struct pt_regs regs;
1305 1305
1306 do { 1306 do {
1307 if (sp < PAGE_OFFSET) { 1307 if (sp < PAGE_OFFSET) {
1308 if (sp != 0) 1308 if (sp != 0)
1309 printf("SP (%lx) is in userspace\n", sp); 1309 printf("SP (%lx) is in userspace\n", sp);
1310 break; 1310 break;
1311 } 1311 }
1312 1312
1313 if (!mread(sp + LRSAVE_OFFSET, &ip, sizeof(unsigned long)) 1313 if (!mread(sp + LRSAVE_OFFSET, &ip, sizeof(unsigned long))
1314 || !mread(sp, &newsp, sizeof(unsigned long))) { 1314 || !mread(sp, &newsp, sizeof(unsigned long))) {
1315 printf("Couldn't read stack frame at %lx\n", sp); 1315 printf("Couldn't read stack frame at %lx\n", sp);
1316 break; 1316 break;
1317 } 1317 }
1318 1318
1319 /* 1319 /*
1320 * For the first stack frame, try to work out if 1320 * For the first stack frame, try to work out if
1321 * LR and/or the saved LR value in the bottommost 1321 * LR and/or the saved LR value in the bottommost
1322 * stack frame are valid. 1322 * stack frame are valid.
1323 */ 1323 */
1324 if ((pc | lr) != 0) { 1324 if ((pc | lr) != 0) {
1325 unsigned long fnstart, fnend; 1325 unsigned long fnstart, fnend;
1326 unsigned long nextip; 1326 unsigned long nextip;
1327 int printip = 1; 1327 int printip = 1;
1328 1328
1329 get_function_bounds(pc, &fnstart, &fnend); 1329 get_function_bounds(pc, &fnstart, &fnend);
1330 nextip = 0; 1330 nextip = 0;
1331 if (newsp > sp) 1331 if (newsp > sp)
1332 mread(newsp + LRSAVE_OFFSET, &nextip, 1332 mread(newsp + LRSAVE_OFFSET, &nextip,
1333 sizeof(unsigned long)); 1333 sizeof(unsigned long));
1334 if (lr == ip) { 1334 if (lr == ip) {
1335 if (lr < PAGE_OFFSET 1335 if (lr < PAGE_OFFSET
1336 || (fnstart <= lr && lr < fnend)) 1336 || (fnstart <= lr && lr < fnend))
1337 printip = 0; 1337 printip = 0;
1338 } else if (lr == nextip) { 1338 } else if (lr == nextip) {
1339 printip = 0; 1339 printip = 0;
1340 } else if (lr >= PAGE_OFFSET 1340 } else if (lr >= PAGE_OFFSET
1341 && !(fnstart <= lr && lr < fnend)) { 1341 && !(fnstart <= lr && lr < fnend)) {
1342 printf("[link register ] "); 1342 printf("[link register ] ");
1343 xmon_print_symbol(lr, " ", "\n"); 1343 xmon_print_symbol(lr, " ", "\n");
1344 } 1344 }
1345 if (printip) { 1345 if (printip) {
1346 printf("["REG"] ", sp); 1346 printf("["REG"] ", sp);
1347 xmon_print_symbol(ip, " ", " (unreliable)\n"); 1347 xmon_print_symbol(ip, " ", " (unreliable)\n");
1348 } 1348 }
1349 pc = lr = 0; 1349 pc = lr = 0;
1350 1350
1351 } else { 1351 } else {
1352 printf("["REG"] ", sp); 1352 printf("["REG"] ", sp);
1353 xmon_print_symbol(ip, " ", "\n"); 1353 xmon_print_symbol(ip, " ", "\n");
1354 } 1354 }
1355 1355
1356 /* Look for "regshere" marker to see if this is 1356 /* Look for "regshere" marker to see if this is
1357 an exception frame. */ 1357 an exception frame. */
1358 if (mread(sp + MARKER_OFFSET, &marker, sizeof(unsigned long)) 1358 if (mread(sp + MARKER_OFFSET, &marker, sizeof(unsigned long))
1359 && marker == STACK_FRAME_REGS_MARKER) { 1359 && marker == STACK_FRAME_REGS_MARKER) {
1360 if (mread(sp + REGS_OFFSET, &regs, sizeof(regs)) 1360 if (mread(sp + REGS_OFFSET, &regs, sizeof(regs))
1361 != sizeof(regs)) { 1361 != sizeof(regs)) {
1362 printf("Couldn't read registers at %lx\n", 1362 printf("Couldn't read registers at %lx\n",
1363 sp + REGS_OFFSET); 1363 sp + REGS_OFFSET);
1364 break; 1364 break;
1365 } 1365 }
1366 printf("--- Exception: %lx %s at ", regs.trap, 1366 printf("--- Exception: %lx %s at ", regs.trap,
1367 getvecname(TRAP(&regs))); 1367 getvecname(TRAP(&regs)));
1368 pc = regs.nip; 1368 pc = regs.nip;
1369 lr = regs.link; 1369 lr = regs.link;
1370 xmon_print_symbol(pc, " ", "\n"); 1370 xmon_print_symbol(pc, " ", "\n");
1371 } 1371 }
1372 1372
1373 if (newsp == 0) 1373 if (newsp == 0)
1374 break; 1374 break;
1375 1375
1376 sp = newsp; 1376 sp = newsp;
1377 } while (count++ < xmon_depth_to_print); 1377 } while (count++ < xmon_depth_to_print);
1378 } 1378 }
1379 1379
1380 static void backtrace(struct pt_regs *excp) 1380 static void backtrace(struct pt_regs *excp)
1381 { 1381 {
1382 unsigned long sp; 1382 unsigned long sp;
1383 1383
1384 if (scanhex(&sp)) 1384 if (scanhex(&sp))
1385 xmon_show_stack(sp, 0, 0); 1385 xmon_show_stack(sp, 0, 0);
1386 else 1386 else
1387 xmon_show_stack(excp->gpr[1], excp->link, excp->nip); 1387 xmon_show_stack(excp->gpr[1], excp->link, excp->nip);
1388 scannl(); 1388 scannl();
1389 } 1389 }
1390 1390
1391 static void print_bug_trap(struct pt_regs *regs) 1391 static void print_bug_trap(struct pt_regs *regs)
1392 { 1392 {
1393 #ifdef CONFIG_BUG 1393 #ifdef CONFIG_BUG
1394 const struct bug_entry *bug; 1394 const struct bug_entry *bug;
1395 unsigned long addr; 1395 unsigned long addr;
1396 1396
1397 if (regs->msr & MSR_PR) 1397 if (regs->msr & MSR_PR)
1398 return; /* not in kernel */ 1398 return; /* not in kernel */
1399 addr = regs->nip; /* address of trap instruction */ 1399 addr = regs->nip; /* address of trap instruction */
1400 if (addr < PAGE_OFFSET) 1400 if (addr < PAGE_OFFSET)
1401 return; 1401 return;
1402 bug = find_bug(regs->nip); 1402 bug = find_bug(regs->nip);
1403 if (bug == NULL) 1403 if (bug == NULL)
1404 return; 1404 return;
1405 if (is_warning_bug(bug)) 1405 if (is_warning_bug(bug))
1406 return; 1406 return;
1407 1407
1408 #ifdef CONFIG_DEBUG_BUGVERBOSE 1408 #ifdef CONFIG_DEBUG_BUGVERBOSE
1409 printf("kernel BUG at %s:%u!\n", 1409 printf("kernel BUG at %s:%u!\n",
1410 bug->file, bug->line); 1410 bug->file, bug->line);
1411 #else 1411 #else
1412 printf("kernel BUG at %p!\n", (void *)bug->bug_addr); 1412 printf("kernel BUG at %p!\n", (void *)bug->bug_addr);
1413 #endif 1413 #endif
1414 #endif /* CONFIG_BUG */ 1414 #endif /* CONFIG_BUG */
1415 } 1415 }
1416 1416
1417 static void excprint(struct pt_regs *fp) 1417 static void excprint(struct pt_regs *fp)
1418 { 1418 {
1419 unsigned long trap; 1419 unsigned long trap;
1420 1420
1421 #ifdef CONFIG_SMP 1421 #ifdef CONFIG_SMP
1422 printf("cpu 0x%x: ", smp_processor_id()); 1422 printf("cpu 0x%x: ", smp_processor_id());
1423 #endif /* CONFIG_SMP */ 1423 #endif /* CONFIG_SMP */
1424 1424
1425 trap = TRAP(fp); 1425 trap = TRAP(fp);
1426 printf("Vector: %lx %s at [%lx]\n", fp->trap, getvecname(trap), fp); 1426 printf("Vector: %lx %s at [%lx]\n", fp->trap, getvecname(trap), fp);
1427 printf(" pc: "); 1427 printf(" pc: ");
1428 xmon_print_symbol(fp->nip, ": ", "\n"); 1428 xmon_print_symbol(fp->nip, ": ", "\n");
1429 1429
1430 printf(" lr: ", fp->link); 1430 printf(" lr: ", fp->link);
1431 xmon_print_symbol(fp->link, ": ", "\n"); 1431 xmon_print_symbol(fp->link, ": ", "\n");
1432 1432
1433 printf(" sp: %lx\n", fp->gpr[1]); 1433 printf(" sp: %lx\n", fp->gpr[1]);
1434 printf(" msr: %lx\n", fp->msr); 1434 printf(" msr: %lx\n", fp->msr);
1435 1435
1436 if (trap == 0x300 || trap == 0x380 || trap == 0x600) { 1436 if (trap == 0x300 || trap == 0x380 || trap == 0x600) {
1437 printf(" dar: %lx\n", fp->dar); 1437 printf(" dar: %lx\n", fp->dar);
1438 if (trap != 0x380) 1438 if (trap != 0x380)
1439 printf(" dsisr: %lx\n", fp->dsisr); 1439 printf(" dsisr: %lx\n", fp->dsisr);
1440 } 1440 }
1441 1441
1442 printf(" current = 0x%lx\n", current); 1442 printf(" current = 0x%lx\n", current);
1443 #ifdef CONFIG_PPC64 1443 #ifdef CONFIG_PPC64
1444 printf(" paca = 0x%lx\n", get_paca()); 1444 printf(" paca = 0x%lx\n", get_paca());
1445 #endif 1445 #endif
1446 if (current) { 1446 if (current) {
1447 printf(" pid = %ld, comm = %s\n", 1447 printf(" pid = %ld, comm = %s\n",
1448 current->pid, current->comm); 1448 current->pid, current->comm);
1449 } 1449 }
1450 1450
1451 if (trap == 0x700) 1451 if (trap == 0x700)
1452 print_bug_trap(fp); 1452 print_bug_trap(fp);
1453 } 1453 }
1454 1454
1455 static void prregs(struct pt_regs *fp) 1455 static void prregs(struct pt_regs *fp)
1456 { 1456 {
1457 int n, trap; 1457 int n, trap;
1458 unsigned long base; 1458 unsigned long base;
1459 struct pt_regs regs; 1459 struct pt_regs regs;
1460 1460
1461 if (scanhex(&base)) { 1461 if (scanhex(&base)) {
1462 if (setjmp(bus_error_jmp) == 0) { 1462 if (setjmp(bus_error_jmp) == 0) {
1463 catch_memory_errors = 1; 1463 catch_memory_errors = 1;
1464 sync(); 1464 sync();
1465 regs = *(struct pt_regs *)base; 1465 regs = *(struct pt_regs *)base;
1466 sync(); 1466 sync();
1467 __delay(200); 1467 __delay(200);
1468 } else { 1468 } else {
1469 catch_memory_errors = 0; 1469 catch_memory_errors = 0;
1470 printf("*** Error reading registers from "REG"\n", 1470 printf("*** Error reading registers from "REG"\n",
1471 base); 1471 base);
1472 return; 1472 return;
1473 } 1473 }
1474 catch_memory_errors = 0; 1474 catch_memory_errors = 0;
1475 fp = &regs; 1475 fp = &regs;
1476 } 1476 }
1477 1477
1478 #ifdef CONFIG_PPC64 1478 #ifdef CONFIG_PPC64
1479 if (FULL_REGS(fp)) { 1479 if (FULL_REGS(fp)) {
1480 for (n = 0; n < 16; ++n) 1480 for (n = 0; n < 16; ++n)
1481 printf("R%.2ld = "REG" R%.2ld = "REG"\n", 1481 printf("R%.2ld = "REG" R%.2ld = "REG"\n",
1482 n, fp->gpr[n], n+16, fp->gpr[n+16]); 1482 n, fp->gpr[n], n+16, fp->gpr[n+16]);
1483 } else { 1483 } else {
1484 for (n = 0; n < 7; ++n) 1484 for (n = 0; n < 7; ++n)
1485 printf("R%.2ld = "REG" R%.2ld = "REG"\n", 1485 printf("R%.2ld = "REG" R%.2ld = "REG"\n",
1486 n, fp->gpr[n], n+7, fp->gpr[n+7]); 1486 n, fp->gpr[n], n+7, fp->gpr[n+7]);
1487 } 1487 }
1488 #else 1488 #else
1489 for (n = 0; n < 32; ++n) { 1489 for (n = 0; n < 32; ++n) {
1490 printf("R%.2d = %.8x%s", n, fp->gpr[n], 1490 printf("R%.2d = %.8x%s", n, fp->gpr[n],
1491 (n & 3) == 3? "\n": " "); 1491 (n & 3) == 3? "\n": " ");
1492 if (n == 12 && !FULL_REGS(fp)) { 1492 if (n == 12 && !FULL_REGS(fp)) {
1493 printf("\n"); 1493 printf("\n");
1494 break; 1494 break;
1495 } 1495 }
1496 } 1496 }
1497 #endif 1497 #endif
1498 printf("pc = "); 1498 printf("pc = ");
1499 xmon_print_symbol(fp->nip, " ", "\n"); 1499 xmon_print_symbol(fp->nip, " ", "\n");
1500 if (TRAP(fp) != 0xc00 && cpu_has_feature(CPU_FTR_CFAR)) { 1500 if (TRAP(fp) != 0xc00 && cpu_has_feature(CPU_FTR_CFAR)) {
1501 printf("cfar= "); 1501 printf("cfar= ");
1502 xmon_print_symbol(fp->orig_gpr3, " ", "\n"); 1502 xmon_print_symbol(fp->orig_gpr3, " ", "\n");
1503 } 1503 }
1504 printf("lr = "); 1504 printf("lr = ");
1505 xmon_print_symbol(fp->link, " ", "\n"); 1505 xmon_print_symbol(fp->link, " ", "\n");
1506 printf("msr = "REG" cr = %.8lx\n", fp->msr, fp->ccr); 1506 printf("msr = "REG" cr = %.8lx\n", fp->msr, fp->ccr);
1507 printf("ctr = "REG" xer = "REG" trap = %4lx\n", 1507 printf("ctr = "REG" xer = "REG" trap = %4lx\n",
1508 fp->ctr, fp->xer, fp->trap); 1508 fp->ctr, fp->xer, fp->trap);
1509 trap = TRAP(fp); 1509 trap = TRAP(fp);
1510 if (trap == 0x300 || trap == 0x380 || trap == 0x600) 1510 if (trap == 0x300 || trap == 0x380 || trap == 0x600)
1511 printf("dar = "REG" dsisr = %.8lx\n", fp->dar, fp->dsisr); 1511 printf("dar = "REG" dsisr = %.8lx\n", fp->dar, fp->dsisr);
1512 } 1512 }
1513 1513
1514 static void cacheflush(void) 1514 static void cacheflush(void)
1515 { 1515 {
1516 int cmd; 1516 int cmd;
1517 unsigned long nflush; 1517 unsigned long nflush;
1518 1518
1519 cmd = inchar(); 1519 cmd = inchar();
1520 if (cmd != 'i') 1520 if (cmd != 'i')
1521 termch = cmd; 1521 termch = cmd;
1522 scanhex((void *)&adrs); 1522 scanhex((void *)&adrs);
1523 if (termch != '\n') 1523 if (termch != '\n')
1524 termch = 0; 1524 termch = 0;
1525 nflush = 1; 1525 nflush = 1;
1526 scanhex(&nflush); 1526 scanhex(&nflush);
1527 nflush = (nflush + L1_CACHE_BYTES - 1) / L1_CACHE_BYTES; 1527 nflush = (nflush + L1_CACHE_BYTES - 1) / L1_CACHE_BYTES;
1528 if (setjmp(bus_error_jmp) == 0) { 1528 if (setjmp(bus_error_jmp) == 0) {
1529 catch_memory_errors = 1; 1529 catch_memory_errors = 1;
1530 sync(); 1530 sync();
1531 1531
1532 if (cmd != 'i') { 1532 if (cmd != 'i') {
1533 for (; nflush > 0; --nflush, adrs += L1_CACHE_BYTES) 1533 for (; nflush > 0; --nflush, adrs += L1_CACHE_BYTES)
1534 cflush((void *) adrs); 1534 cflush((void *) adrs);
1535 } else { 1535 } else {
1536 for (; nflush > 0; --nflush, adrs += L1_CACHE_BYTES) 1536 for (; nflush > 0; --nflush, adrs += L1_CACHE_BYTES)
1537 cinval((void *) adrs); 1537 cinval((void *) adrs);
1538 } 1538 }
1539 sync(); 1539 sync();
1540 /* wait a little while to see if we get a machine check */ 1540 /* wait a little while to see if we get a machine check */
1541 __delay(200); 1541 __delay(200);
1542 } 1542 }
1543 catch_memory_errors = 0; 1543 catch_memory_errors = 0;
1544 } 1544 }
1545 1545
1546 static unsigned long 1546 static unsigned long
1547 read_spr(int n) 1547 read_spr(int n)
1548 { 1548 {
1549 unsigned int instrs[2]; 1549 unsigned int instrs[2];
1550 unsigned long (*code)(void); 1550 unsigned long (*code)(void);
1551 unsigned long ret = -1UL; 1551 unsigned long ret = -1UL;
1552 #ifdef CONFIG_PPC64 1552 #ifdef CONFIG_PPC64
1553 unsigned long opd[3]; 1553 unsigned long opd[3];
1554 1554
1555 opd[0] = (unsigned long)instrs; 1555 opd[0] = (unsigned long)instrs;
1556 opd[1] = 0; 1556 opd[1] = 0;
1557 opd[2] = 0; 1557 opd[2] = 0;
1558 code = (unsigned long (*)(void)) opd; 1558 code = (unsigned long (*)(void)) opd;
1559 #else 1559 #else
1560 code = (unsigned long (*)(void)) instrs; 1560 code = (unsigned long (*)(void)) instrs;
1561 #endif 1561 #endif
1562 1562
1563 /* mfspr r3,n; blr */ 1563 /* mfspr r3,n; blr */
1564 instrs[0] = 0x7c6002a6 + ((n & 0x1F) << 16) + ((n & 0x3e0) << 6); 1564 instrs[0] = 0x7c6002a6 + ((n & 0x1F) << 16) + ((n & 0x3e0) << 6);
1565 instrs[1] = 0x4e800020; 1565 instrs[1] = 0x4e800020;
1566 store_inst(instrs); 1566 store_inst(instrs);
1567 store_inst(instrs+1); 1567 store_inst(instrs+1);
1568 1568
1569 if (setjmp(bus_error_jmp) == 0) { 1569 if (setjmp(bus_error_jmp) == 0) {
1570 catch_memory_errors = 1; 1570 catch_memory_errors = 1;
1571 sync(); 1571 sync();
1572 1572
1573 ret = code(); 1573 ret = code();
1574 1574
1575 sync(); 1575 sync();
1576 /* wait a little while to see if we get a machine check */ 1576 /* wait a little while to see if we get a machine check */
1577 __delay(200); 1577 __delay(200);
1578 n = size; 1578 n = size;
1579 } 1579 }
1580 1580
1581 return ret; 1581 return ret;
1582 } 1582 }
1583 1583
1584 static void 1584 static void
1585 write_spr(int n, unsigned long val) 1585 write_spr(int n, unsigned long val)
1586 { 1586 {
1587 unsigned int instrs[2]; 1587 unsigned int instrs[2];
1588 unsigned long (*code)(unsigned long); 1588 unsigned long (*code)(unsigned long);
1589 #ifdef CONFIG_PPC64 1589 #ifdef CONFIG_PPC64
1590 unsigned long opd[3]; 1590 unsigned long opd[3];
1591 1591
1592 opd[0] = (unsigned long)instrs; 1592 opd[0] = (unsigned long)instrs;
1593 opd[1] = 0; 1593 opd[1] = 0;
1594 opd[2] = 0; 1594 opd[2] = 0;
1595 code = (unsigned long (*)(unsigned long)) opd; 1595 code = (unsigned long (*)(unsigned long)) opd;
1596 #else 1596 #else
1597 code = (unsigned long (*)(unsigned long)) instrs; 1597 code = (unsigned long (*)(unsigned long)) instrs;
1598 #endif 1598 #endif
1599 1599
1600 instrs[0] = 0x7c6003a6 + ((n & 0x1F) << 16) + ((n & 0x3e0) << 6); 1600 instrs[0] = 0x7c6003a6 + ((n & 0x1F) << 16) + ((n & 0x3e0) << 6);
1601 instrs[1] = 0x4e800020; 1601 instrs[1] = 0x4e800020;
1602 store_inst(instrs); 1602 store_inst(instrs);
1603 store_inst(instrs+1); 1603 store_inst(instrs+1);
1604 1604
1605 if (setjmp(bus_error_jmp) == 0) { 1605 if (setjmp(bus_error_jmp) == 0) {
1606 catch_memory_errors = 1; 1606 catch_memory_errors = 1;
1607 sync(); 1607 sync();
1608 1608
1609 code(val); 1609 code(val);
1610 1610
1611 sync(); 1611 sync();
1612 /* wait a little while to see if we get a machine check */ 1612 /* wait a little while to see if we get a machine check */
1613 __delay(200); 1613 __delay(200);
1614 n = size; 1614 n = size;
1615 } 1615 }
1616 } 1616 }
1617 1617
1618 static unsigned long regno; 1618 static unsigned long regno;
1619 extern char exc_prolog; 1619 extern char exc_prolog;
1620 extern char dec_exc; 1620 extern char dec_exc;
1621 1621
1622 static void super_regs(void) 1622 static void super_regs(void)
1623 { 1623 {
1624 int cmd; 1624 int cmd;
1625 unsigned long val; 1625 unsigned long val;
1626 1626
1627 cmd = skipbl(); 1627 cmd = skipbl();
1628 if (cmd == '\n') { 1628 if (cmd == '\n') {
1629 unsigned long sp, toc; 1629 unsigned long sp, toc;
1630 asm("mr %0,1" : "=r" (sp) :); 1630 asm("mr %0,1" : "=r" (sp) :);
1631 asm("mr %0,2" : "=r" (toc) :); 1631 asm("mr %0,2" : "=r" (toc) :);
1632 1632
1633 printf("msr = "REG" sprg0= "REG"\n", 1633 printf("msr = "REG" sprg0= "REG"\n",
1634 mfmsr(), mfspr(SPRN_SPRG0)); 1634 mfmsr(), mfspr(SPRN_SPRG0));
1635 printf("pvr = "REG" sprg1= "REG"\n", 1635 printf("pvr = "REG" sprg1= "REG"\n",
1636 mfspr(SPRN_PVR), mfspr(SPRN_SPRG1)); 1636 mfspr(SPRN_PVR), mfspr(SPRN_SPRG1));
1637 printf("dec = "REG" sprg2= "REG"\n", 1637 printf("dec = "REG" sprg2= "REG"\n",
1638 mfspr(SPRN_DEC), mfspr(SPRN_SPRG2)); 1638 mfspr(SPRN_DEC), mfspr(SPRN_SPRG2));
1639 printf("sp = "REG" sprg3= "REG"\n", sp, mfspr(SPRN_SPRG3)); 1639 printf("sp = "REG" sprg3= "REG"\n", sp, mfspr(SPRN_SPRG3));
1640 printf("toc = "REG" dar = "REG"\n", toc, mfspr(SPRN_DAR)); 1640 printf("toc = "REG" dar = "REG"\n", toc, mfspr(SPRN_DAR));
1641 #ifdef CONFIG_PPC_ISERIES 1641 #ifdef CONFIG_PPC_ISERIES
1642 if (firmware_has_feature(FW_FEATURE_ISERIES)) { 1642 if (firmware_has_feature(FW_FEATURE_ISERIES)) {
1643 struct paca_struct *ptrPaca; 1643 struct paca_struct *ptrPaca;
1644 struct lppaca *ptrLpPaca; 1644 struct lppaca *ptrLpPaca;
1645 1645
1646 /* Dump out relevant Paca data areas. */ 1646 /* Dump out relevant Paca data areas. */
1647 printf("Paca: \n"); 1647 printf("Paca: \n");
1648 ptrPaca = get_paca(); 1648 ptrPaca = get_paca();
1649 1649
1650 printf(" Local Processor Control Area (LpPaca): \n"); 1650 printf(" Local Processor Control Area (LpPaca): \n");
1651 ptrLpPaca = ptrPaca->lppaca_ptr; 1651 ptrLpPaca = ptrPaca->lppaca_ptr;
1652 printf(" Saved Srr0=%.16lx Saved Srr1=%.16lx \n", 1652 printf(" Saved Srr0=%.16lx Saved Srr1=%.16lx \n",
1653 ptrLpPaca->saved_srr0, ptrLpPaca->saved_srr1); 1653 ptrLpPaca->saved_srr0, ptrLpPaca->saved_srr1);
1654 printf(" Saved Gpr3=%.16lx Saved Gpr4=%.16lx \n", 1654 printf(" Saved Gpr3=%.16lx Saved Gpr4=%.16lx \n",
1655 ptrLpPaca->saved_gpr3, ptrLpPaca->saved_gpr4); 1655 ptrLpPaca->saved_gpr3, ptrLpPaca->saved_gpr4);
1656 printf(" Saved Gpr5=%.16lx \n", 1656 printf(" Saved Gpr5=%.16lx \n",
1657 ptrLpPaca->gpr5_dword.saved_gpr5); 1657 ptrLpPaca->gpr5_dword.saved_gpr5);
1658 } 1658 }
1659 #endif 1659 #endif
1660 1660
1661 return; 1661 return;
1662 } 1662 }
1663 1663
1664 scanhex(&regno); 1664 scanhex(&regno);
1665 switch (cmd) { 1665 switch (cmd) {
1666 case 'w': 1666 case 'w':
1667 val = read_spr(regno); 1667 val = read_spr(regno);
1668 scanhex(&val); 1668 scanhex(&val);
1669 write_spr(regno, val); 1669 write_spr(regno, val);
1670 /* fall through */ 1670 /* fall through */
1671 case 'r': 1671 case 'r':
1672 printf("spr %lx = %lx\n", regno, read_spr(regno)); 1672 printf("spr %lx = %lx\n", regno, read_spr(regno));
1673 break; 1673 break;
1674 } 1674 }
1675 scannl(); 1675 scannl();
1676 } 1676 }
1677 1677
1678 /* 1678 /*
1679 * Stuff for reading and writing memory safely 1679 * Stuff for reading and writing memory safely
1680 */ 1680 */
1681 static int 1681 static int
1682 mread(unsigned long adrs, void *buf, int size) 1682 mread(unsigned long adrs, void *buf, int size)
1683 { 1683 {
1684 volatile int n; 1684 volatile int n;
1685 char *p, *q; 1685 char *p, *q;
1686 1686
1687 n = 0; 1687 n = 0;
1688 if (setjmp(bus_error_jmp) == 0) { 1688 if (setjmp(bus_error_jmp) == 0) {
1689 catch_memory_errors = 1; 1689 catch_memory_errors = 1;
1690 sync(); 1690 sync();
1691 p = (char *)adrs; 1691 p = (char *)adrs;
1692 q = (char *)buf; 1692 q = (char *)buf;
1693 switch (size) { 1693 switch (size) {
1694 case 2: 1694 case 2:
1695 *(u16 *)q = *(u16 *)p; 1695 *(u16 *)q = *(u16 *)p;
1696 break; 1696 break;
1697 case 4: 1697 case 4:
1698 *(u32 *)q = *(u32 *)p; 1698 *(u32 *)q = *(u32 *)p;
1699 break; 1699 break;
1700 case 8: 1700 case 8:
1701 *(u64 *)q = *(u64 *)p; 1701 *(u64 *)q = *(u64 *)p;
1702 break; 1702 break;
1703 default: 1703 default:
1704 for( ; n < size; ++n) { 1704 for( ; n < size; ++n) {
1705 *q++ = *p++; 1705 *q++ = *p++;
1706 sync(); 1706 sync();
1707 } 1707 }
1708 } 1708 }
1709 sync(); 1709 sync();
1710 /* wait a little while to see if we get a machine check */ 1710 /* wait a little while to see if we get a machine check */
1711 __delay(200); 1711 __delay(200);
1712 n = size; 1712 n = size;
1713 } 1713 }
1714 catch_memory_errors = 0; 1714 catch_memory_errors = 0;
1715 return n; 1715 return n;
1716 } 1716 }
1717 1717
1718 static int 1718 static int
1719 mwrite(unsigned long adrs, void *buf, int size) 1719 mwrite(unsigned long adrs, void *buf, int size)
1720 { 1720 {
1721 volatile int n; 1721 volatile int n;
1722 char *p, *q; 1722 char *p, *q;
1723 1723
1724 n = 0; 1724 n = 0;
1725 if (setjmp(bus_error_jmp) == 0) { 1725 if (setjmp(bus_error_jmp) == 0) {
1726 catch_memory_errors = 1; 1726 catch_memory_errors = 1;
1727 sync(); 1727 sync();
1728 p = (char *) adrs; 1728 p = (char *) adrs;
1729 q = (char *) buf; 1729 q = (char *) buf;
1730 switch (size) { 1730 switch (size) {
1731 case 2: 1731 case 2:
1732 *(u16 *)p = *(u16 *)q; 1732 *(u16 *)p = *(u16 *)q;
1733 break; 1733 break;
1734 case 4: 1734 case 4:
1735 *(u32 *)p = *(u32 *)q; 1735 *(u32 *)p = *(u32 *)q;
1736 break; 1736 break;
1737 case 8: 1737 case 8:
1738 *(u64 *)p = *(u64 *)q; 1738 *(u64 *)p = *(u64 *)q;
1739 break; 1739 break;
1740 default: 1740 default:
1741 for ( ; n < size; ++n) { 1741 for ( ; n < size; ++n) {
1742 *p++ = *q++; 1742 *p++ = *q++;
1743 sync(); 1743 sync();
1744 } 1744 }
1745 } 1745 }
1746 sync(); 1746 sync();
1747 /* wait a little while to see if we get a machine check */ 1747 /* wait a little while to see if we get a machine check */
1748 __delay(200); 1748 __delay(200);
1749 n = size; 1749 n = size;
1750 } else { 1750 } else {
1751 printf("*** Error writing address %x\n", adrs + n); 1751 printf("*** Error writing address %x\n", adrs + n);
1752 } 1752 }
1753 catch_memory_errors = 0; 1753 catch_memory_errors = 0;
1754 return n; 1754 return n;
1755 } 1755 }
1756 1756
1757 static int fault_type; 1757 static int fault_type;
1758 static int fault_except; 1758 static int fault_except;
1759 static char *fault_chars[] = { "--", "**", "##" }; 1759 static char *fault_chars[] = { "--", "**", "##" };
1760 1760
1761 static int handle_fault(struct pt_regs *regs) 1761 static int handle_fault(struct pt_regs *regs)
1762 { 1762 {
1763 fault_except = TRAP(regs); 1763 fault_except = TRAP(regs);
1764 switch (TRAP(regs)) { 1764 switch (TRAP(regs)) {
1765 case 0x200: 1765 case 0x200:
1766 fault_type = 0; 1766 fault_type = 0;
1767 break; 1767 break;
1768 case 0x300: 1768 case 0x300:
1769 case 0x380: 1769 case 0x380:
1770 fault_type = 1; 1770 fault_type = 1;
1771 break; 1771 break;
1772 default: 1772 default:
1773 fault_type = 2; 1773 fault_type = 2;
1774 } 1774 }
1775 1775
1776 longjmp(bus_error_jmp, 1); 1776 longjmp(bus_error_jmp, 1);
1777 1777
1778 return 0; 1778 return 0;
1779 } 1779 }
1780 1780
1781 #define SWAP(a, b, t) ((t) = (a), (a) = (b), (b) = (t)) 1781 #define SWAP(a, b, t) ((t) = (a), (a) = (b), (b) = (t))
1782 1782
1783 static void 1783 static void
1784 byterev(unsigned char *val, int size) 1784 byterev(unsigned char *val, int size)
1785 { 1785 {
1786 int t; 1786 int t;
1787 1787
1788 switch (size) { 1788 switch (size) {
1789 case 2: 1789 case 2:
1790 SWAP(val[0], val[1], t); 1790 SWAP(val[0], val[1], t);
1791 break; 1791 break;
1792 case 4: 1792 case 4:
1793 SWAP(val[0], val[3], t); 1793 SWAP(val[0], val[3], t);
1794 SWAP(val[1], val[2], t); 1794 SWAP(val[1], val[2], t);
1795 break; 1795 break;
1796 case 8: /* is there really any use for this? */ 1796 case 8: /* is there really any use for this? */
1797 SWAP(val[0], val[7], t); 1797 SWAP(val[0], val[7], t);
1798 SWAP(val[1], val[6], t); 1798 SWAP(val[1], val[6], t);
1799 SWAP(val[2], val[5], t); 1799 SWAP(val[2], val[5], t);
1800 SWAP(val[3], val[4], t); 1800 SWAP(val[3], val[4], t);
1801 break; 1801 break;
1802 } 1802 }
1803 } 1803 }
1804 1804
1805 static int brev; 1805 static int brev;
1806 static int mnoread; 1806 static int mnoread;
1807 1807
1808 static char *memex_help_string = 1808 static char *memex_help_string =
1809 "Memory examine command usage:\n" 1809 "Memory examine command usage:\n"
1810 "m [addr] [flags] examine/change memory\n" 1810 "m [addr] [flags] examine/change memory\n"
1811 " addr is optional. will start where left off.\n" 1811 " addr is optional. will start where left off.\n"
1812 " flags may include chars from this set:\n" 1812 " flags may include chars from this set:\n"
1813 " b modify by bytes (default)\n" 1813 " b modify by bytes (default)\n"
1814 " w modify by words (2 byte)\n" 1814 " w modify by words (2 byte)\n"
1815 " l modify by longs (4 byte)\n" 1815 " l modify by longs (4 byte)\n"
1816 " d modify by doubleword (8 byte)\n" 1816 " d modify by doubleword (8 byte)\n"
1817 " r toggle reverse byte order mode\n" 1817 " r toggle reverse byte order mode\n"
1818 " n do not read memory (for i/o spaces)\n" 1818 " n do not read memory (for i/o spaces)\n"
1819 " . ok to read (default)\n" 1819 " . ok to read (default)\n"
1820 "NOTE: flags are saved as defaults\n" 1820 "NOTE: flags are saved as defaults\n"
1821 ""; 1821 "";
1822 1822
1823 static char *memex_subcmd_help_string = 1823 static char *memex_subcmd_help_string =
1824 "Memory examine subcommands:\n" 1824 "Memory examine subcommands:\n"
1825 " hexval write this val to current location\n" 1825 " hexval write this val to current location\n"
1826 " 'string' write chars from string to this location\n" 1826 " 'string' write chars from string to this location\n"
1827 " ' increment address\n" 1827 " ' increment address\n"
1828 " ^ decrement address\n" 1828 " ^ decrement address\n"
1829 " / increment addr by 0x10. //=0x100, ///=0x1000, etc\n" 1829 " / increment addr by 0x10. //=0x100, ///=0x1000, etc\n"
1830 " \\ decrement addr by 0x10. \\\\=0x100, \\\\\\=0x1000, etc\n" 1830 " \\ decrement addr by 0x10. \\\\=0x100, \\\\\\=0x1000, etc\n"
1831 " ` clear no-read flag\n" 1831 " ` clear no-read flag\n"
1832 " ; stay at this addr\n" 1832 " ; stay at this addr\n"
1833 " v change to byte mode\n" 1833 " v change to byte mode\n"
1834 " w change to word (2 byte) mode\n" 1834 " w change to word (2 byte) mode\n"
1835 " l change to long (4 byte) mode\n" 1835 " l change to long (4 byte) mode\n"
1836 " u change to doubleword (8 byte) mode\n" 1836 " u change to doubleword (8 byte) mode\n"
1837 " m addr change current addr\n" 1837 " m addr change current addr\n"
1838 " n toggle no-read flag\n" 1838 " n toggle no-read flag\n"
1839 " r toggle byte reverse flag\n" 1839 " r toggle byte reverse flag\n"
1840 " < count back up count bytes\n" 1840 " < count back up count bytes\n"
1841 " > count skip forward count bytes\n" 1841 " > count skip forward count bytes\n"
1842 " x exit this mode\n" 1842 " x exit this mode\n"
1843 ""; 1843 "";
1844 1844
1845 static void 1845 static void
1846 memex(void) 1846 memex(void)
1847 { 1847 {
1848 int cmd, inc, i, nslash; 1848 int cmd, inc, i, nslash;
1849 unsigned long n; 1849 unsigned long n;
1850 unsigned char val[16]; 1850 unsigned char val[16];
1851 1851
1852 scanhex((void *)&adrs); 1852 scanhex((void *)&adrs);
1853 cmd = skipbl(); 1853 cmd = skipbl();
1854 if (cmd == '?') { 1854 if (cmd == '?') {
1855 printf(memex_help_string); 1855 printf(memex_help_string);
1856 return; 1856 return;
1857 } else { 1857 } else {
1858 termch = cmd; 1858 termch = cmd;
1859 } 1859 }
1860 last_cmd = "m\n"; 1860 last_cmd = "m\n";
1861 while ((cmd = skipbl()) != '\n') { 1861 while ((cmd = skipbl()) != '\n') {
1862 switch( cmd ){ 1862 switch( cmd ){
1863 case 'b': size = 1; break; 1863 case 'b': size = 1; break;
1864 case 'w': size = 2; break; 1864 case 'w': size = 2; break;
1865 case 'l': size = 4; break; 1865 case 'l': size = 4; break;
1866 case 'd': size = 8; break; 1866 case 'd': size = 8; break;
1867 case 'r': brev = !brev; break; 1867 case 'r': brev = !brev; break;
1868 case 'n': mnoread = 1; break; 1868 case 'n': mnoread = 1; break;
1869 case '.': mnoread = 0; break; 1869 case '.': mnoread = 0; break;
1870 } 1870 }
1871 } 1871 }
1872 if( size <= 0 ) 1872 if( size <= 0 )
1873 size = 1; 1873 size = 1;
1874 else if( size > 8 ) 1874 else if( size > 8 )
1875 size = 8; 1875 size = 8;
1876 for(;;){ 1876 for(;;){
1877 if (!mnoread) 1877 if (!mnoread)
1878 n = mread(adrs, val, size); 1878 n = mread(adrs, val, size);
1879 printf(REG"%c", adrs, brev? 'r': ' '); 1879 printf(REG"%c", adrs, brev? 'r': ' ');
1880 if (!mnoread) { 1880 if (!mnoread) {
1881 if (brev) 1881 if (brev)
1882 byterev(val, size); 1882 byterev(val, size);
1883 putchar(' '); 1883 putchar(' ');
1884 for (i = 0; i < n; ++i) 1884 for (i = 0; i < n; ++i)
1885 printf("%.2x", val[i]); 1885 printf("%.2x", val[i]);
1886 for (; i < size; ++i) 1886 for (; i < size; ++i)
1887 printf("%s", fault_chars[fault_type]); 1887 printf("%s", fault_chars[fault_type]);
1888 } 1888 }
1889 putchar(' '); 1889 putchar(' ');
1890 inc = size; 1890 inc = size;
1891 nslash = 0; 1891 nslash = 0;
1892 for(;;){ 1892 for(;;){
1893 if( scanhex(&n) ){ 1893 if( scanhex(&n) ){
1894 for (i = 0; i < size; ++i) 1894 for (i = 0; i < size; ++i)
1895 val[i] = n >> (i * 8); 1895 val[i] = n >> (i * 8);
1896 if (!brev) 1896 if (!brev)
1897 byterev(val, size); 1897 byterev(val, size);
1898 mwrite(adrs, val, size); 1898 mwrite(adrs, val, size);
1899 inc = size; 1899 inc = size;
1900 } 1900 }
1901 cmd = skipbl(); 1901 cmd = skipbl();
1902 if (cmd == '\n') 1902 if (cmd == '\n')
1903 break; 1903 break;
1904 inc = 0; 1904 inc = 0;
1905 switch (cmd) { 1905 switch (cmd) {
1906 case '\'': 1906 case '\'':
1907 for(;;){ 1907 for(;;){
1908 n = inchar(); 1908 n = inchar();
1909 if( n == '\\' ) 1909 if( n == '\\' )
1910 n = bsesc(); 1910 n = bsesc();
1911 else if( n == '\'' ) 1911 else if( n == '\'' )
1912 break; 1912 break;
1913 for (i = 0; i < size; ++i) 1913 for (i = 0; i < size; ++i)
1914 val[i] = n >> (i * 8); 1914 val[i] = n >> (i * 8);
1915 if (!brev) 1915 if (!brev)
1916 byterev(val, size); 1916 byterev(val, size);
1917 mwrite(adrs, val, size); 1917 mwrite(adrs, val, size);
1918 adrs += size; 1918 adrs += size;
1919 } 1919 }
1920 adrs -= size; 1920 adrs -= size;
1921 inc = size; 1921 inc = size;
1922 break; 1922 break;
1923 case ',': 1923 case ',':
1924 adrs += size; 1924 adrs += size;
1925 break; 1925 break;
1926 case '.': 1926 case '.':
1927 mnoread = 0; 1927 mnoread = 0;
1928 break; 1928 break;
1929 case ';': 1929 case ';':
1930 break; 1930 break;
1931 case 'x': 1931 case 'x':
1932 case EOF: 1932 case EOF:
1933 scannl(); 1933 scannl();
1934 return; 1934 return;
1935 case 'b': 1935 case 'b':
1936 case 'v': 1936 case 'v':
1937 size = 1; 1937 size = 1;
1938 break; 1938 break;
1939 case 'w': 1939 case 'w':
1940 size = 2; 1940 size = 2;
1941 break; 1941 break;
1942 case 'l': 1942 case 'l':
1943 size = 4; 1943 size = 4;
1944 break; 1944 break;
1945 case 'u': 1945 case 'u':
1946 size = 8; 1946 size = 8;
1947 break; 1947 break;
1948 case '^': 1948 case '^':
1949 adrs -= size; 1949 adrs -= size;
1950 break; 1950 break;
1951 break; 1951 break;
1952 case '/': 1952 case '/':
1953 if (nslash > 0) 1953 if (nslash > 0)
1954 adrs -= 1 << nslash; 1954 adrs -= 1 << nslash;
1955 else 1955 else
1956 nslash = 0; 1956 nslash = 0;
1957 nslash += 4; 1957 nslash += 4;
1958 adrs += 1 << nslash; 1958 adrs += 1 << nslash;
1959 break; 1959 break;
1960 case '\\': 1960 case '\\':
1961 if (nslash < 0) 1961 if (nslash < 0)
1962 adrs += 1 << -nslash; 1962 adrs += 1 << -nslash;
1963 else 1963 else
1964 nslash = 0; 1964 nslash = 0;
1965 nslash -= 4; 1965 nslash -= 4;
1966 adrs -= 1 << -nslash; 1966 adrs -= 1 << -nslash;
1967 break; 1967 break;
1968 case 'm': 1968 case 'm':
1969 scanhex((void *)&adrs); 1969 scanhex((void *)&adrs);
1970 break; 1970 break;
1971 case 'n': 1971 case 'n':
1972 mnoread = 1; 1972 mnoread = 1;
1973 break; 1973 break;
1974 case 'r': 1974 case 'r':
1975 brev = !brev; 1975 brev = !brev;
1976 break; 1976 break;
1977 case '<': 1977 case '<':
1978 n = size; 1978 n = size;
1979 scanhex(&n); 1979 scanhex(&n);
1980 adrs -= n; 1980 adrs -= n;
1981 break; 1981 break;
1982 case '>': 1982 case '>':
1983 n = size; 1983 n = size;
1984 scanhex(&n); 1984 scanhex(&n);
1985 adrs += n; 1985 adrs += n;
1986 break; 1986 break;
1987 case '?': 1987 case '?':
1988 printf(memex_subcmd_help_string); 1988 printf(memex_subcmd_help_string);
1989 break; 1989 break;
1990 } 1990 }
1991 } 1991 }
1992 adrs += inc; 1992 adrs += inc;
1993 } 1993 }
1994 } 1994 }
1995 1995
1996 static int 1996 static int
1997 bsesc(void) 1997 bsesc(void)
1998 { 1998 {
1999 int c; 1999 int c;
2000 2000
2001 c = inchar(); 2001 c = inchar();
2002 switch( c ){ 2002 switch( c ){
2003 case 'n': c = '\n'; break; 2003 case 'n': c = '\n'; break;
2004 case 'r': c = '\r'; break; 2004 case 'r': c = '\r'; break;
2005 case 'b': c = '\b'; break; 2005 case 'b': c = '\b'; break;
2006 case 't': c = '\t'; break; 2006 case 't': c = '\t'; break;
2007 } 2007 }
2008 return c; 2008 return c;
2009 } 2009 }
2010 2010
2011 static void xmon_rawdump (unsigned long adrs, long ndump) 2011 static void xmon_rawdump (unsigned long adrs, long ndump)
2012 { 2012 {
2013 long n, m, r, nr; 2013 long n, m, r, nr;
2014 unsigned char temp[16]; 2014 unsigned char temp[16];
2015 2015
2016 for (n = ndump; n > 0;) { 2016 for (n = ndump; n > 0;) {
2017 r = n < 16? n: 16; 2017 r = n < 16? n: 16;
2018 nr = mread(adrs, temp, r); 2018 nr = mread(adrs, temp, r);
2019 adrs += nr; 2019 adrs += nr;
2020 for (m = 0; m < r; ++m) { 2020 for (m = 0; m < r; ++m) {
2021 if (m < nr) 2021 if (m < nr)
2022 printf("%.2x", temp[m]); 2022 printf("%.2x", temp[m]);
2023 else 2023 else
2024 printf("%s", fault_chars[fault_type]); 2024 printf("%s", fault_chars[fault_type]);
2025 } 2025 }
2026 n -= r; 2026 n -= r;
2027 if (nr < r) 2027 if (nr < r)
2028 break; 2028 break;
2029 } 2029 }
2030 printf("\n"); 2030 printf("\n");
2031 } 2031 }
2032 2032
2033 #define isxdigit(c) (('0' <= (c) && (c) <= '9') \ 2033 #define isxdigit(c) (('0' <= (c) && (c) <= '9') \
2034 || ('a' <= (c) && (c) <= 'f') \ 2034 || ('a' <= (c) && (c) <= 'f') \
2035 || ('A' <= (c) && (c) <= 'F')) 2035 || ('A' <= (c) && (c) <= 'F'))
2036 static void 2036 static void
2037 dump(void) 2037 dump(void)
2038 { 2038 {
2039 int c; 2039 int c;
2040 2040
2041 c = inchar(); 2041 c = inchar();
2042 if ((isxdigit(c) && c != 'f' && c != 'd') || c == '\n') 2042 if ((isxdigit(c) && c != 'f' && c != 'd') || c == '\n')
2043 termch = c; 2043 termch = c;
2044 scanhex((void *)&adrs); 2044 scanhex((void *)&adrs);
2045 if (termch != '\n') 2045 if (termch != '\n')
2046 termch = 0; 2046 termch = 0;
2047 if (c == 'i') { 2047 if (c == 'i') {
2048 scanhex(&nidump); 2048 scanhex(&nidump);
2049 if (nidump == 0) 2049 if (nidump == 0)
2050 nidump = 16; 2050 nidump = 16;
2051 else if (nidump > MAX_DUMP) 2051 else if (nidump > MAX_DUMP)
2052 nidump = MAX_DUMP; 2052 nidump = MAX_DUMP;
2053 adrs += ppc_inst_dump(adrs, nidump, 1); 2053 adrs += ppc_inst_dump(adrs, nidump, 1);
2054 last_cmd = "di\n"; 2054 last_cmd = "di\n";
2055 } else if (c == 'l') { 2055 } else if (c == 'l') {
2056 dump_log_buf(); 2056 dump_log_buf();
2057 } else if (c == 'r') { 2057 } else if (c == 'r') {
2058 scanhex(&ndump); 2058 scanhex(&ndump);
2059 if (ndump == 0) 2059 if (ndump == 0)
2060 ndump = 64; 2060 ndump = 64;
2061 xmon_rawdump(adrs, ndump); 2061 xmon_rawdump(adrs, ndump);
2062 adrs += ndump; 2062 adrs += ndump;
2063 last_cmd = "dr\n"; 2063 last_cmd = "dr\n";
2064 } else { 2064 } else {
2065 scanhex(&ndump); 2065 scanhex(&ndump);
2066 if (ndump == 0) 2066 if (ndump == 0)
2067 ndump = 64; 2067 ndump = 64;
2068 else if (ndump > MAX_DUMP) 2068 else if (ndump > MAX_DUMP)
2069 ndump = MAX_DUMP; 2069 ndump = MAX_DUMP;
2070 prdump(adrs, ndump); 2070 prdump(adrs, ndump);
2071 adrs += ndump; 2071 adrs += ndump;
2072 last_cmd = "d\n"; 2072 last_cmd = "d\n";
2073 } 2073 }
2074 } 2074 }
2075 2075
2076 static void 2076 static void
2077 prdump(unsigned long adrs, long ndump) 2077 prdump(unsigned long adrs, long ndump)
2078 { 2078 {
2079 long n, m, c, r, nr; 2079 long n, m, c, r, nr;
2080 unsigned char temp[16]; 2080 unsigned char temp[16];
2081 2081
2082 for (n = ndump; n > 0;) { 2082 for (n = ndump; n > 0;) {
2083 printf(REG, adrs); 2083 printf(REG, adrs);
2084 putchar(' '); 2084 putchar(' ');
2085 r = n < 16? n: 16; 2085 r = n < 16? n: 16;
2086 nr = mread(adrs, temp, r); 2086 nr = mread(adrs, temp, r);
2087 adrs += nr; 2087 adrs += nr;
2088 for (m = 0; m < r; ++m) { 2088 for (m = 0; m < r; ++m) {
2089 if ((m & (sizeof(long) - 1)) == 0 && m > 0) 2089 if ((m & (sizeof(long) - 1)) == 0 && m > 0)
2090 putchar(' '); 2090 putchar(' ');
2091 if (m < nr) 2091 if (m < nr)
2092 printf("%.2x", temp[m]); 2092 printf("%.2x", temp[m]);
2093 else 2093 else
2094 printf("%s", fault_chars[fault_type]); 2094 printf("%s", fault_chars[fault_type]);
2095 } 2095 }
2096 for (; m < 16; ++m) { 2096 for (; m < 16; ++m) {
2097 if ((m & (sizeof(long) - 1)) == 0) 2097 if ((m & (sizeof(long) - 1)) == 0)
2098 putchar(' '); 2098 putchar(' ');
2099 printf(" "); 2099 printf(" ");
2100 } 2100 }
2101 printf(" |"); 2101 printf(" |");
2102 for (m = 0; m < r; ++m) { 2102 for (m = 0; m < r; ++m) {
2103 if (m < nr) { 2103 if (m < nr) {
2104 c = temp[m]; 2104 c = temp[m];
2105 putchar(' ' <= c && c <= '~'? c: '.'); 2105 putchar(' ' <= c && c <= '~'? c: '.');
2106 } else 2106 } else
2107 putchar(' '); 2107 putchar(' ');
2108 } 2108 }
2109 n -= r; 2109 n -= r;
2110 for (; m < 16; ++m) 2110 for (; m < 16; ++m)
2111 putchar(' '); 2111 putchar(' ');
2112 printf("|\n"); 2112 printf("|\n");
2113 if (nr < r) 2113 if (nr < r)
2114 break; 2114 break;
2115 } 2115 }
2116 } 2116 }
2117 2117
2118 typedef int (*instruction_dump_func)(unsigned long inst, unsigned long addr); 2118 typedef int (*instruction_dump_func)(unsigned long inst, unsigned long addr);
2119 2119
2120 static int 2120 static int
2121 generic_inst_dump(unsigned long adr, long count, int praddr, 2121 generic_inst_dump(unsigned long adr, long count, int praddr,
2122 instruction_dump_func dump_func) 2122 instruction_dump_func dump_func)
2123 { 2123 {
2124 int nr, dotted; 2124 int nr, dotted;
2125 unsigned long first_adr; 2125 unsigned long first_adr;
2126 unsigned long inst, last_inst = 0; 2126 unsigned long inst, last_inst = 0;
2127 unsigned char val[4]; 2127 unsigned char val[4];
2128 2128
2129 dotted = 0; 2129 dotted = 0;
2130 for (first_adr = adr; count > 0; --count, adr += 4) { 2130 for (first_adr = adr; count > 0; --count, adr += 4) {
2131 nr = mread(adr, val, 4); 2131 nr = mread(adr, val, 4);
2132 if (nr == 0) { 2132 if (nr == 0) {
2133 if (praddr) { 2133 if (praddr) {
2134 const char *x = fault_chars[fault_type]; 2134 const char *x = fault_chars[fault_type];
2135 printf(REG" %s%s%s%s\n", adr, x, x, x, x); 2135 printf(REG" %s%s%s%s\n", adr, x, x, x, x);
2136 } 2136 }
2137 break; 2137 break;
2138 } 2138 }
2139 inst = GETWORD(val); 2139 inst = GETWORD(val);
2140 if (adr > first_adr && inst == last_inst) { 2140 if (adr > first_adr && inst == last_inst) {
2141 if (!dotted) { 2141 if (!dotted) {
2142 printf(" ...\n"); 2142 printf(" ...\n");
2143 dotted = 1; 2143 dotted = 1;
2144 } 2144 }
2145 continue; 2145 continue;
2146 } 2146 }
2147 dotted = 0; 2147 dotted = 0;
2148 last_inst = inst; 2148 last_inst = inst;
2149 if (praddr) 2149 if (praddr)
2150 printf(REG" %.8x", adr, inst); 2150 printf(REG" %.8x", adr, inst);
2151 printf("\t"); 2151 printf("\t");
2152 dump_func(inst, adr); 2152 dump_func(inst, adr);
2153 printf("\n"); 2153 printf("\n");
2154 } 2154 }
2155 return adr - first_adr; 2155 return adr - first_adr;
2156 } 2156 }
2157 2157
2158 static int 2158 static int
2159 ppc_inst_dump(unsigned long adr, long count, int praddr) 2159 ppc_inst_dump(unsigned long adr, long count, int praddr)
2160 { 2160 {
2161 return generic_inst_dump(adr, count, praddr, print_insn_powerpc); 2161 return generic_inst_dump(adr, count, praddr, print_insn_powerpc);
2162 } 2162 }
2163 2163
2164 void 2164 void
2165 print_address(unsigned long addr) 2165 print_address(unsigned long addr)
2166 { 2166 {
2167 xmon_print_symbol(addr, "\t# ", ""); 2167 xmon_print_symbol(addr, "\t# ", "");
2168 } 2168 }
2169 2169
2170 void 2170 void
2171 dump_log_buf(void) 2171 dump_log_buf(void)
2172 { 2172 {
2173 const unsigned long size = 128; 2173 const unsigned long size = 128;
2174 unsigned long end, addr; 2174 unsigned long end, addr;
2175 unsigned char buf[size + 1]; 2175 unsigned char buf[size + 1];
2176 2176
2177 addr = 0; 2177 addr = 0;
2178 buf[size] = '\0'; 2178 buf[size] = '\0';
2179 2179
2180 if (setjmp(bus_error_jmp) != 0) { 2180 if (setjmp(bus_error_jmp) != 0) {
2181 printf("Unable to lookup symbol __log_buf!\n"); 2181 printf("Unable to lookup symbol __log_buf!\n");
2182 return; 2182 return;
2183 } 2183 }
2184 2184
2185 catch_memory_errors = 1; 2185 catch_memory_errors = 1;
2186 sync(); 2186 sync();
2187 addr = kallsyms_lookup_name("__log_buf"); 2187 addr = kallsyms_lookup_name("__log_buf");
2188 2188
2189 if (! addr) 2189 if (! addr)
2190 printf("Symbol __log_buf not found!\n"); 2190 printf("Symbol __log_buf not found!\n");
2191 else { 2191 else {
2192 end = addr + (1 << CONFIG_LOG_BUF_SHIFT); 2192 end = addr + (1 << CONFIG_LOG_BUF_SHIFT);
2193 while (addr < end) { 2193 while (addr < end) {
2194 if (! mread(addr, buf, size)) { 2194 if (! mread(addr, buf, size)) {
2195 printf("Can't read memory at address 0x%lx\n", addr); 2195 printf("Can't read memory at address 0x%lx\n", addr);
2196 break; 2196 break;
2197 } 2197 }
2198 2198
2199 printf("%s", buf); 2199 printf("%s", buf);
2200 2200
2201 if (strlen(buf) < size) 2201 if (strlen(buf) < size)
2202 break; 2202 break;
2203 2203
2204 addr += size; 2204 addr += size;
2205 } 2205 }
2206 } 2206 }
2207 2207
2208 sync(); 2208 sync();
2209 /* wait a little while to see if we get a machine check */ 2209 /* wait a little while to see if we get a machine check */
2210 __delay(200); 2210 __delay(200);
2211 catch_memory_errors = 0; 2211 catch_memory_errors = 0;
2212 } 2212 }
2213 2213
2214 /* 2214 /*
2215 * Memory operations - move, set, print differences 2215 * Memory operations - move, set, print differences
2216 */ 2216 */
2217 static unsigned long mdest; /* destination address */ 2217 static unsigned long mdest; /* destination address */
2218 static unsigned long msrc; /* source address */ 2218 static unsigned long msrc; /* source address */
2219 static unsigned long mval; /* byte value to set memory to */ 2219 static unsigned long mval; /* byte value to set memory to */
2220 static unsigned long mcount; /* # bytes to affect */ 2220 static unsigned long mcount; /* # bytes to affect */
2221 static unsigned long mdiffs; /* max # differences to print */ 2221 static unsigned long mdiffs; /* max # differences to print */
2222 2222
2223 static void 2223 static void
2224 memops(int cmd) 2224 memops(int cmd)
2225 { 2225 {
2226 scanhex((void *)&mdest); 2226 scanhex((void *)&mdest);
2227 if( termch != '\n' ) 2227 if( termch != '\n' )
2228 termch = 0; 2228 termch = 0;
2229 scanhex((void *)(cmd == 's'? &mval: &msrc)); 2229 scanhex((void *)(cmd == 's'? &mval: &msrc));
2230 if( termch != '\n' ) 2230 if( termch != '\n' )
2231 termch = 0; 2231 termch = 0;
2232 scanhex((void *)&mcount); 2232 scanhex((void *)&mcount);
2233 switch( cmd ){ 2233 switch( cmd ){
2234 case 'm': 2234 case 'm':
2235 memmove((void *)mdest, (void *)msrc, mcount); 2235 memmove((void *)mdest, (void *)msrc, mcount);
2236 break; 2236 break;
2237 case 's': 2237 case 's':
2238 memset((void *)mdest, mval, mcount); 2238 memset((void *)mdest, mval, mcount);
2239 break; 2239 break;
2240 case 'd': 2240 case 'd':
2241 if( termch != '\n' ) 2241 if( termch != '\n' )
2242 termch = 0; 2242 termch = 0;
2243 scanhex((void *)&mdiffs); 2243 scanhex((void *)&mdiffs);
2244 memdiffs((unsigned char *)mdest, (unsigned char *)msrc, mcount, mdiffs); 2244 memdiffs((unsigned char *)mdest, (unsigned char *)msrc, mcount, mdiffs);
2245 break; 2245 break;
2246 } 2246 }
2247 } 2247 }
2248 2248
2249 static void 2249 static void
2250 memdiffs(unsigned char *p1, unsigned char *p2, unsigned nb, unsigned maxpr) 2250 memdiffs(unsigned char *p1, unsigned char *p2, unsigned nb, unsigned maxpr)
2251 { 2251 {
2252 unsigned n, prt; 2252 unsigned n, prt;
2253 2253
2254 prt = 0; 2254 prt = 0;
2255 for( n = nb; n > 0; --n ) 2255 for( n = nb; n > 0; --n )
2256 if( *p1++ != *p2++ ) 2256 if( *p1++ != *p2++ )
2257 if( ++prt <= maxpr ) 2257 if( ++prt <= maxpr )
2258 printf("%.16x %.2x # %.16x %.2x\n", p1 - 1, 2258 printf("%.16x %.2x # %.16x %.2x\n", p1 - 1,
2259 p1[-1], p2 - 1, p2[-1]); 2259 p1[-1], p2 - 1, p2[-1]);
2260 if( prt > maxpr ) 2260 if( prt > maxpr )
2261 printf("Total of %d differences\n", prt); 2261 printf("Total of %d differences\n", prt);
2262 } 2262 }
2263 2263
2264 static unsigned mend; 2264 static unsigned mend;
2265 static unsigned mask; 2265 static unsigned mask;
2266 2266
2267 static void 2267 static void
2268 memlocate(void) 2268 memlocate(void)
2269 { 2269 {
2270 unsigned a, n; 2270 unsigned a, n;
2271 unsigned char val[4]; 2271 unsigned char val[4];
2272 2272
2273 last_cmd = "ml"; 2273 last_cmd = "ml";
2274 scanhex((void *)&mdest); 2274 scanhex((void *)&mdest);
2275 if (termch != '\n') { 2275 if (termch != '\n') {
2276 termch = 0; 2276 termch = 0;
2277 scanhex((void *)&mend); 2277 scanhex((void *)&mend);
2278 if (termch != '\n') { 2278 if (termch != '\n') {
2279 termch = 0; 2279 termch = 0;
2280 scanhex((void *)&mval); 2280 scanhex((void *)&mval);
2281 mask = ~0; 2281 mask = ~0;
2282 if (termch != '\n') termch = 0; 2282 if (termch != '\n') termch = 0;
2283 scanhex((void *)&mask); 2283 scanhex((void *)&mask);
2284 } 2284 }
2285 } 2285 }
2286 n = 0; 2286 n = 0;
2287 for (a = mdest; a < mend; a += 4) { 2287 for (a = mdest; a < mend; a += 4) {
2288 if (mread(a, val, 4) == 4 2288 if (mread(a, val, 4) == 4
2289 && ((GETWORD(val) ^ mval) & mask) == 0) { 2289 && ((GETWORD(val) ^ mval) & mask) == 0) {
2290 printf("%.16x: %.16x\n", a, GETWORD(val)); 2290 printf("%.16x: %.16x\n", a, GETWORD(val));
2291 if (++n >= 10) 2291 if (++n >= 10)
2292 break; 2292 break;
2293 } 2293 }
2294 } 2294 }
2295 } 2295 }
2296 2296
2297 static unsigned long mskip = 0x1000; 2297 static unsigned long mskip = 0x1000;
2298 static unsigned long mlim = 0xffffffff; 2298 static unsigned long mlim = 0xffffffff;
2299 2299
2300 static void 2300 static void
2301 memzcan(void) 2301 memzcan(void)
2302 { 2302 {
2303 unsigned char v; 2303 unsigned char v;
2304 unsigned a; 2304 unsigned a;
2305 int ok, ook; 2305 int ok, ook;
2306 2306
2307 scanhex(&mdest); 2307 scanhex(&mdest);
2308 if (termch != '\n') termch = 0; 2308 if (termch != '\n') termch = 0;
2309 scanhex(&mskip); 2309 scanhex(&mskip);
2310 if (termch != '\n') termch = 0; 2310 if (termch != '\n') termch = 0;
2311 scanhex(&mlim); 2311 scanhex(&mlim);
2312 ook = 0; 2312 ook = 0;
2313 for (a = mdest; a < mlim; a += mskip) { 2313 for (a = mdest; a < mlim; a += mskip) {
2314 ok = mread(a, &v, 1); 2314 ok = mread(a, &v, 1);
2315 if (ok && !ook) { 2315 if (ok && !ook) {
2316 printf("%.8x .. ", a); 2316 printf("%.8x .. ", a);
2317 } else if (!ok && ook) 2317 } else if (!ok && ook)
2318 printf("%.8x\n", a - mskip); 2318 printf("%.8x\n", a - mskip);
2319 ook = ok; 2319 ook = ok;
2320 if (a + mskip < a) 2320 if (a + mskip < a)
2321 break; 2321 break;
2322 } 2322 }
2323 if (ook) 2323 if (ook)
2324 printf("%.8x\n", a - mskip); 2324 printf("%.8x\n", a - mskip);
2325 } 2325 }
2326 2326
2327 static void proccall(void) 2327 static void proccall(void)
2328 { 2328 {
2329 unsigned long args[8]; 2329 unsigned long args[8];
2330 unsigned long ret; 2330 unsigned long ret;
2331 int i; 2331 int i;
2332 typedef unsigned long (*callfunc_t)(unsigned long, unsigned long, 2332 typedef unsigned long (*callfunc_t)(unsigned long, unsigned long,
2333 unsigned long, unsigned long, unsigned long, 2333 unsigned long, unsigned long, unsigned long,
2334 unsigned long, unsigned long, unsigned long); 2334 unsigned long, unsigned long, unsigned long);
2335 callfunc_t func; 2335 callfunc_t func;
2336 2336
2337 if (!scanhex(&adrs)) 2337 if (!scanhex(&adrs))
2338 return; 2338 return;
2339 if (termch != '\n') 2339 if (termch != '\n')
2340 termch = 0; 2340 termch = 0;
2341 for (i = 0; i < 8; ++i) 2341 for (i = 0; i < 8; ++i)
2342 args[i] = 0; 2342 args[i] = 0;
2343 for (i = 0; i < 8; ++i) { 2343 for (i = 0; i < 8; ++i) {
2344 if (!scanhex(&args[i]) || termch == '\n') 2344 if (!scanhex(&args[i]) || termch == '\n')
2345 break; 2345 break;
2346 termch = 0; 2346 termch = 0;
2347 } 2347 }
2348 func = (callfunc_t) adrs; 2348 func = (callfunc_t) adrs;
2349 ret = 0; 2349 ret = 0;
2350 if (setjmp(bus_error_jmp) == 0) { 2350 if (setjmp(bus_error_jmp) == 0) {
2351 catch_memory_errors = 1; 2351 catch_memory_errors = 1;
2352 sync(); 2352 sync();
2353 ret = func(args[0], args[1], args[2], args[3], 2353 ret = func(args[0], args[1], args[2], args[3],
2354 args[4], args[5], args[6], args[7]); 2354 args[4], args[5], args[6], args[7]);
2355 sync(); 2355 sync();
2356 printf("return value is %x\n", ret); 2356 printf("return value is %x\n", ret);
2357 } else { 2357 } else {
2358 printf("*** %x exception occurred\n", fault_except); 2358 printf("*** %x exception occurred\n", fault_except);
2359 } 2359 }
2360 catch_memory_errors = 0; 2360 catch_memory_errors = 0;
2361 } 2361 }
2362 2362
2363 /* Input scanning routines */ 2363 /* Input scanning routines */
2364 int 2364 int
2365 skipbl(void) 2365 skipbl(void)
2366 { 2366 {
2367 int c; 2367 int c;
2368 2368
2369 if( termch != 0 ){ 2369 if( termch != 0 ){
2370 c = termch; 2370 c = termch;
2371 termch = 0; 2371 termch = 0;
2372 } else 2372 } else
2373 c = inchar(); 2373 c = inchar();
2374 while( c == ' ' || c == '\t' ) 2374 while( c == ' ' || c == '\t' )
2375 c = inchar(); 2375 c = inchar();
2376 return c; 2376 return c;
2377 } 2377 }
2378 2378
2379 #define N_PTREGS 44 2379 #define N_PTREGS 44
2380 static char *regnames[N_PTREGS] = { 2380 static char *regnames[N_PTREGS] = {
2381 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 2381 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
2382 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", 2382 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
2383 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", 2383 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
2384 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", 2384 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
2385 "pc", "msr", "or3", "ctr", "lr", "xer", "ccr", 2385 "pc", "msr", "or3", "ctr", "lr", "xer", "ccr",
2386 #ifdef CONFIG_PPC64 2386 #ifdef CONFIG_PPC64
2387 "softe", 2387 "softe",
2388 #else 2388 #else
2389 "mq", 2389 "mq",
2390 #endif 2390 #endif
2391 "trap", "dar", "dsisr", "res" 2391 "trap", "dar", "dsisr", "res"
2392 }; 2392 };
2393 2393
2394 int 2394 int
2395 scanhex(unsigned long *vp) 2395 scanhex(unsigned long *vp)
2396 { 2396 {
2397 int c, d; 2397 int c, d;
2398 unsigned long v; 2398 unsigned long v;
2399 2399
2400 c = skipbl(); 2400 c = skipbl();
2401 if (c == '%') { 2401 if (c == '%') {
2402 /* parse register name */ 2402 /* parse register name */
2403 char regname[8]; 2403 char regname[8];
2404 int i; 2404 int i;
2405 2405
2406 for (i = 0; i < sizeof(regname) - 1; ++i) { 2406 for (i = 0; i < sizeof(regname) - 1; ++i) {
2407 c = inchar(); 2407 c = inchar();
2408 if (!isalnum(c)) { 2408 if (!isalnum(c)) {
2409 termch = c; 2409 termch = c;
2410 break; 2410 break;
2411 } 2411 }
2412 regname[i] = c; 2412 regname[i] = c;
2413 } 2413 }
2414 regname[i] = 0; 2414 regname[i] = 0;
2415 for (i = 0; i < N_PTREGS; ++i) { 2415 for (i = 0; i < N_PTREGS; ++i) {
2416 if (strcmp(regnames[i], regname) == 0) { 2416 if (strcmp(regnames[i], regname) == 0) {
2417 if (xmon_regs == NULL) { 2417 if (xmon_regs == NULL) {
2418 printf("regs not available\n"); 2418 printf("regs not available\n");
2419 return 0; 2419 return 0;
2420 } 2420 }
2421 *vp = ((unsigned long *)xmon_regs)[i]; 2421 *vp = ((unsigned long *)xmon_regs)[i];
2422 return 1; 2422 return 1;
2423 } 2423 }
2424 } 2424 }
2425 printf("invalid register name '%%%s'\n", regname); 2425 printf("invalid register name '%%%s'\n", regname);
2426 return 0; 2426 return 0;
2427 } 2427 }
2428 2428
2429 /* skip leading "0x" if any */ 2429 /* skip leading "0x" if any */
2430 2430
2431 if (c == '0') { 2431 if (c == '0') {
2432 c = inchar(); 2432 c = inchar();
2433 if (c == 'x') { 2433 if (c == 'x') {
2434 c = inchar(); 2434 c = inchar();
2435 } else { 2435 } else {
2436 d = hexdigit(c); 2436 d = hexdigit(c);
2437 if (d == EOF) { 2437 if (d == EOF) {
2438 termch = c; 2438 termch = c;
2439 *vp = 0; 2439 *vp = 0;
2440 return 1; 2440 return 1;
2441 } 2441 }
2442 } 2442 }
2443 } else if (c == '$') { 2443 } else if (c == '$') {
2444 int i; 2444 int i;
2445 for (i=0; i<63; i++) { 2445 for (i=0; i<63; i++) {
2446 c = inchar(); 2446 c = inchar();
2447 if (isspace(c)) { 2447 if (isspace(c)) {
2448 termch = c; 2448 termch = c;
2449 break; 2449 break;
2450 } 2450 }
2451 tmpstr[i] = c; 2451 tmpstr[i] = c;
2452 } 2452 }
2453 tmpstr[i++] = 0; 2453 tmpstr[i++] = 0;
2454 *vp = 0; 2454 *vp = 0;
2455 if (setjmp(bus_error_jmp) == 0) { 2455 if (setjmp(bus_error_jmp) == 0) {
2456 catch_memory_errors = 1; 2456 catch_memory_errors = 1;
2457 sync(); 2457 sync();
2458 *vp = kallsyms_lookup_name(tmpstr); 2458 *vp = kallsyms_lookup_name(tmpstr);
2459 sync(); 2459 sync();
2460 } 2460 }
2461 catch_memory_errors = 0; 2461 catch_memory_errors = 0;
2462 if (!(*vp)) { 2462 if (!(*vp)) {
2463 printf("unknown symbol '%s'\n", tmpstr); 2463 printf("unknown symbol '%s'\n", tmpstr);
2464 return 0; 2464 return 0;
2465 } 2465 }
2466 return 1; 2466 return 1;
2467 } 2467 }
2468 2468
2469 d = hexdigit(c); 2469 d = hexdigit(c);
2470 if (d == EOF) { 2470 if (d == EOF) {
2471 termch = c; 2471 termch = c;
2472 return 0; 2472 return 0;
2473 } 2473 }
2474 v = 0; 2474 v = 0;
2475 do { 2475 do {
2476 v = (v << 4) + d; 2476 v = (v << 4) + d;
2477 c = inchar(); 2477 c = inchar();
2478 d = hexdigit(c); 2478 d = hexdigit(c);
2479 } while (d != EOF); 2479 } while (d != EOF);
2480 termch = c; 2480 termch = c;
2481 *vp = v; 2481 *vp = v;
2482 return 1; 2482 return 1;
2483 } 2483 }
2484 2484
2485 static void 2485 static void
2486 scannl(void) 2486 scannl(void)
2487 { 2487 {
2488 int c; 2488 int c;
2489 2489
2490 c = termch; 2490 c = termch;
2491 termch = 0; 2491 termch = 0;
2492 while( c != '\n' ) 2492 while( c != '\n' )
2493 c = inchar(); 2493 c = inchar();
2494 } 2494 }
2495 2495
2496 static int hexdigit(int c) 2496 static int hexdigit(int c)
2497 { 2497 {
2498 if( '0' <= c && c <= '9' ) 2498 if( '0' <= c && c <= '9' )
2499 return c - '0'; 2499 return c - '0';
2500 if( 'A' <= c && c <= 'F' ) 2500 if( 'A' <= c && c <= 'F' )
2501 return c - ('A' - 10); 2501 return c - ('A' - 10);
2502 if( 'a' <= c && c <= 'f' ) 2502 if( 'a' <= c && c <= 'f' )
2503 return c - ('a' - 10); 2503 return c - ('a' - 10);
2504 return EOF; 2504 return EOF;
2505 } 2505 }
2506 2506
2507 void 2507 void
2508 getstring(char *s, int size) 2508 getstring(char *s, int size)
2509 { 2509 {
2510 int c; 2510 int c;
2511 2511
2512 c = skipbl(); 2512 c = skipbl();
2513 do { 2513 do {
2514 if( size > 1 ){ 2514 if( size > 1 ){
2515 *s++ = c; 2515 *s++ = c;
2516 --size; 2516 --size;
2517 } 2517 }
2518 c = inchar(); 2518 c = inchar();
2519 } while( c != ' ' && c != '\t' && c != '\n' ); 2519 } while( c != ' ' && c != '\t' && c != '\n' );
2520 termch = c; 2520 termch = c;
2521 *s = 0; 2521 *s = 0;
2522 } 2522 }
2523 2523
2524 static char line[256]; 2524 static char line[256];
2525 static char *lineptr; 2525 static char *lineptr;
2526 2526
2527 static void 2527 static void
2528 flush_input(void) 2528 flush_input(void)
2529 { 2529 {
2530 lineptr = NULL; 2530 lineptr = NULL;
2531 } 2531 }
2532 2532
2533 static int 2533 static int
2534 inchar(void) 2534 inchar(void)
2535 { 2535 {
2536 if (lineptr == NULL || *lineptr == 0) { 2536 if (lineptr == NULL || *lineptr == 0) {
2537 if (xmon_gets(line, sizeof(line)) == NULL) { 2537 if (xmon_gets(line, sizeof(line)) == NULL) {
2538 lineptr = NULL; 2538 lineptr = NULL;
2539 return EOF; 2539 return EOF;
2540 } 2540 }
2541 lineptr = line; 2541 lineptr = line;
2542 } 2542 }
2543 return *lineptr++; 2543 return *lineptr++;
2544 } 2544 }
2545 2545
2546 static void 2546 static void
2547 take_input(char *str) 2547 take_input(char *str)
2548 { 2548 {
2549 lineptr = str; 2549 lineptr = str;
2550 } 2550 }
2551 2551
2552 2552
2553 static void 2553 static void
2554 symbol_lookup(void) 2554 symbol_lookup(void)
2555 { 2555 {
2556 int type = inchar(); 2556 int type = inchar();
2557 unsigned long addr; 2557 unsigned long addr;
2558 static char tmp[64]; 2558 static char tmp[64];
2559 2559
2560 switch (type) { 2560 switch (type) {
2561 case 'a': 2561 case 'a':
2562 if (scanhex(&addr)) 2562 if (scanhex(&addr))
2563 xmon_print_symbol(addr, ": ", "\n"); 2563 xmon_print_symbol(addr, ": ", "\n");
2564 termch = 0; 2564 termch = 0;
2565 break; 2565 break;
2566 case 's': 2566 case 's':
2567 getstring(tmp, 64); 2567 getstring(tmp, 64);
2568 if (setjmp(bus_error_jmp) == 0) { 2568 if (setjmp(bus_error_jmp) == 0) {
2569 catch_memory_errors = 1; 2569 catch_memory_errors = 1;
2570 sync(); 2570 sync();
2571 addr = kallsyms_lookup_name(tmp); 2571 addr = kallsyms_lookup_name(tmp);
2572 if (addr) 2572 if (addr)
2573 printf("%s: %lx\n", tmp, addr); 2573 printf("%s: %lx\n", tmp, addr);
2574 else 2574 else
2575 printf("Symbol '%s' not found.\n", tmp); 2575 printf("Symbol '%s' not found.\n", tmp);
2576 sync(); 2576 sync();
2577 } 2577 }
2578 catch_memory_errors = 0; 2578 catch_memory_errors = 0;
2579 termch = 0; 2579 termch = 0;
2580 break; 2580 break;
2581 } 2581 }
2582 } 2582 }
2583 2583
2584 2584
2585 /* Print an address in numeric and symbolic form (if possible) */ 2585 /* Print an address in numeric and symbolic form (if possible) */
2586 static void xmon_print_symbol(unsigned long address, const char *mid, 2586 static void xmon_print_symbol(unsigned long address, const char *mid,
2587 const char *after) 2587 const char *after)
2588 { 2588 {
2589 char *modname; 2589 char *modname;
2590 const char *name = NULL; 2590 const char *name = NULL;
2591 unsigned long offset, size; 2591 unsigned long offset, size;
2592 2592
2593 printf(REG, address); 2593 printf(REG, address);
2594 if (setjmp(bus_error_jmp) == 0) { 2594 if (setjmp(bus_error_jmp) == 0) {
2595 catch_memory_errors = 1; 2595 catch_memory_errors = 1;
2596 sync(); 2596 sync();
2597 name = kallsyms_lookup(address, &size, &offset, &modname, 2597 name = kallsyms_lookup(address, &size, &offset, &modname,
2598 tmpstr); 2598 tmpstr);
2599 sync(); 2599 sync();
2600 /* wait a little while to see if we get a machine check */ 2600 /* wait a little while to see if we get a machine check */
2601 __delay(200); 2601 __delay(200);
2602 } 2602 }
2603 2603
2604 catch_memory_errors = 0; 2604 catch_memory_errors = 0;
2605 2605
2606 if (name) { 2606 if (name) {
2607 printf("%s%s+%#lx/%#lx", mid, name, offset, size); 2607 printf("%s%s+%#lx/%#lx", mid, name, offset, size);
2608 if (modname) 2608 if (modname)
2609 printf(" [%s]", modname); 2609 printf(" [%s]", modname);
2610 } 2610 }
2611 printf("%s", after); 2611 printf("%s", after);
2612 } 2612 }
2613 2613
2614 #ifdef CONFIG_PPC_BOOK3S_64 2614 #ifdef CONFIG_PPC_BOOK3S_64
2615 static void dump_slb(void) 2615 static void dump_slb(void)
2616 { 2616 {
2617 int i; 2617 int i;
2618 unsigned long esid,vsid,valid; 2618 unsigned long esid,vsid,valid;
2619 unsigned long llp; 2619 unsigned long llp;
2620 2620
2621 printf("SLB contents of cpu %x\n", smp_processor_id()); 2621 printf("SLB contents of cpu %x\n", smp_processor_id());
2622 2622
2623 for (i = 0; i < mmu_slb_size; i++) { 2623 for (i = 0; i < mmu_slb_size; i++) {
2624 asm volatile("slbmfee %0,%1" : "=r" (esid) : "r" (i)); 2624 asm volatile("slbmfee %0,%1" : "=r" (esid) : "r" (i));
2625 asm volatile("slbmfev %0,%1" : "=r" (vsid) : "r" (i)); 2625 asm volatile("slbmfev %0,%1" : "=r" (vsid) : "r" (i));
2626 valid = (esid & SLB_ESID_V); 2626 valid = (esid & SLB_ESID_V);
2627 if (valid | esid | vsid) { 2627 if (valid | esid | vsid) {
2628 printf("%02d %016lx %016lx", i, esid, vsid); 2628 printf("%02d %016lx %016lx", i, esid, vsid);
2629 if (valid) { 2629 if (valid) {
2630 llp = vsid & SLB_VSID_LLP; 2630 llp = vsid & SLB_VSID_LLP;
2631 if (vsid & SLB_VSID_B_1T) { 2631 if (vsid & SLB_VSID_B_1T) {
2632 printf(" 1T ESID=%9lx VSID=%13lx LLP:%3lx \n", 2632 printf(" 1T ESID=%9lx VSID=%13lx LLP:%3lx \n",
2633 GET_ESID_1T(esid), 2633 GET_ESID_1T(esid),
2634 (vsid & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T, 2634 (vsid & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T,
2635 llp); 2635 llp);
2636 } else { 2636 } else {
2637 printf(" 256M ESID=%9lx VSID=%13lx LLP:%3lx \n", 2637 printf(" 256M ESID=%9lx VSID=%13lx LLP:%3lx \n",
2638 GET_ESID(esid), 2638 GET_ESID(esid),
2639 (vsid & ~SLB_VSID_B) >> SLB_VSID_SHIFT, 2639 (vsid & ~SLB_VSID_B) >> SLB_VSID_SHIFT,
2640 llp); 2640 llp);
2641 } 2641 }
2642 } else 2642 } else
2643 printf("\n"); 2643 printf("\n");
2644 } 2644 }
2645 } 2645 }
2646 } 2646 }
2647 2647
2648 static void dump_stab(void) 2648 static void dump_stab(void)
2649 { 2649 {
2650 int i; 2650 int i;
2651 unsigned long *tmp = (unsigned long *)get_paca()->stab_addr; 2651 unsigned long *tmp = (unsigned long *)get_paca()->stab_addr;
2652 2652
2653 printf("Segment table contents of cpu %x\n", smp_processor_id()); 2653 printf("Segment table contents of cpu %x\n", smp_processor_id());
2654 2654
2655 for (i = 0; i < PAGE_SIZE/16; i++) { 2655 for (i = 0; i < PAGE_SIZE/16; i++) {
2656 unsigned long a, b; 2656 unsigned long a, b;
2657 2657
2658 a = *tmp++; 2658 a = *tmp++;
2659 b = *tmp++; 2659 b = *tmp++;
2660 2660
2661 if (a || b) { 2661 if (a || b) {
2662 printf("%03d %016lx ", i, a); 2662 printf("%03d %016lx ", i, a);
2663 printf("%016lx\n", b); 2663 printf("%016lx\n", b);
2664 } 2664 }
2665 } 2665 }
2666 } 2666 }
2667 2667
2668 void dump_segments(void) 2668 void dump_segments(void)
2669 { 2669 {
2670 if (mmu_has_feature(MMU_FTR_SLB)) 2670 if (mmu_has_feature(MMU_FTR_SLB))
2671 dump_slb(); 2671 dump_slb();
2672 else 2672 else
2673 dump_stab(); 2673 dump_stab();
2674 } 2674 }
2675 #endif 2675 #endif
2676 2676
2677 #ifdef CONFIG_PPC_STD_MMU_32 2677 #ifdef CONFIG_PPC_STD_MMU_32
2678 void dump_segments(void) 2678 void dump_segments(void)
2679 { 2679 {
2680 int i; 2680 int i;
2681 2681
2682 printf("sr0-15 ="); 2682 printf("sr0-15 =");
2683 for (i = 0; i < 16; ++i) 2683 for (i = 0; i < 16; ++i)
2684 printf(" %x", mfsrin(i)); 2684 printf(" %x", mfsrin(i));
2685 printf("\n"); 2685 printf("\n");
2686 } 2686 }
2687 #endif 2687 #endif
2688 2688
2689 #ifdef CONFIG_44x 2689 #ifdef CONFIG_44x
2690 static void dump_tlb_44x(void) 2690 static void dump_tlb_44x(void)
2691 { 2691 {
2692 int i; 2692 int i;
2693 2693
2694 for (i = 0; i < PPC44x_TLB_SIZE; i++) { 2694 for (i = 0; i < PPC44x_TLB_SIZE; i++) {
2695 unsigned long w0,w1,w2; 2695 unsigned long w0,w1,w2;
2696 asm volatile("tlbre %0,%1,0" : "=r" (w0) : "r" (i)); 2696 asm volatile("tlbre %0,%1,0" : "=r" (w0) : "r" (i));
2697 asm volatile("tlbre %0,%1,1" : "=r" (w1) : "r" (i)); 2697 asm volatile("tlbre %0,%1,1" : "=r" (w1) : "r" (i));
2698 asm volatile("tlbre %0,%1,2" : "=r" (w2) : "r" (i)); 2698 asm volatile("tlbre %0,%1,2" : "=r" (w2) : "r" (i));
2699 printf("[%02x] %08x %08x %08x ", i, w0, w1, w2); 2699 printf("[%02x] %08x %08x %08x ", i, w0, w1, w2);
2700 if (w0 & PPC44x_TLB_VALID) { 2700 if (w0 & PPC44x_TLB_VALID) {
2701 printf("V %08x -> %01x%08x %c%c%c%c%c", 2701 printf("V %08x -> %01x%08x %c%c%c%c%c",
2702 w0 & PPC44x_TLB_EPN_MASK, 2702 w0 & PPC44x_TLB_EPN_MASK,
2703 w1 & PPC44x_TLB_ERPN_MASK, 2703 w1 & PPC44x_TLB_ERPN_MASK,
2704 w1 & PPC44x_TLB_RPN_MASK, 2704 w1 & PPC44x_TLB_RPN_MASK,
2705 (w2 & PPC44x_TLB_W) ? 'W' : 'w', 2705 (w2 & PPC44x_TLB_W) ? 'W' : 'w',
2706 (w2 & PPC44x_TLB_I) ? 'I' : 'i', 2706 (w2 & PPC44x_TLB_I) ? 'I' : 'i',
2707 (w2 & PPC44x_TLB_M) ? 'M' : 'm', 2707 (w2 & PPC44x_TLB_M) ? 'M' : 'm',
2708 (w2 & PPC44x_TLB_G) ? 'G' : 'g', 2708 (w2 & PPC44x_TLB_G) ? 'G' : 'g',
2709 (w2 & PPC44x_TLB_E) ? 'E' : 'e'); 2709 (w2 & PPC44x_TLB_E) ? 'E' : 'e');
2710 } 2710 }
2711 printf("\n"); 2711 printf("\n");
2712 } 2712 }
2713 } 2713 }
2714 #endif /* CONFIG_44x */ 2714 #endif /* CONFIG_44x */
2715 2715
2716 #ifdef CONFIG_PPC_BOOK3E 2716 #ifdef CONFIG_PPC_BOOK3E
2717 static void dump_tlb_book3e(void) 2717 static void dump_tlb_book3e(void)
2718 { 2718 {
2719 u32 mmucfg, pidmask, lpidmask; 2719 u32 mmucfg, pidmask, lpidmask;
2720 u64 ramask; 2720 u64 ramask;
2721 int i, tlb, ntlbs, pidsz, lpidsz, rasz, lrat = 0; 2721 int i, tlb, ntlbs, pidsz, lpidsz, rasz, lrat = 0;
2722 int mmu_version; 2722 int mmu_version;
2723 static const char *pgsz_names[] = { 2723 static const char *pgsz_names[] = {
2724 " 1K", 2724 " 1K",
2725 " 2K", 2725 " 2K",
2726 " 4K", 2726 " 4K",
2727 " 8K", 2727 " 8K",
2728 " 16K", 2728 " 16K",
2729 " 32K", 2729 " 32K",
2730 " 64K", 2730 " 64K",
2731 "128K", 2731 "128K",
2732 "256K", 2732 "256K",
2733 "512K", 2733 "512K",
2734 " 1M", 2734 " 1M",
2735 " 2M", 2735 " 2M",
2736 " 4M", 2736 " 4M",
2737 " 8M", 2737 " 8M",
2738 " 16M", 2738 " 16M",
2739 " 32M", 2739 " 32M",
2740 " 64M", 2740 " 64M",
2741 "128M", 2741 "128M",
2742 "256M", 2742 "256M",
2743 "512M", 2743 "512M",
2744 " 1G", 2744 " 1G",
2745 " 2G", 2745 " 2G",
2746 " 4G", 2746 " 4G",
2747 " 8G", 2747 " 8G",
2748 " 16G", 2748 " 16G",
2749 " 32G", 2749 " 32G",
2750 " 64G", 2750 " 64G",
2751 "128G", 2751 "128G",
2752 "256G", 2752 "256G",
2753 "512G", 2753 "512G",
2754 " 1T", 2754 " 1T",
2755 " 2T", 2755 " 2T",
2756 }; 2756 };
2757 2757
2758 /* Gather some infos about the MMU */ 2758 /* Gather some infos about the MMU */
2759 mmucfg = mfspr(SPRN_MMUCFG); 2759 mmucfg = mfspr(SPRN_MMUCFG);
2760 mmu_version = (mmucfg & 3) + 1; 2760 mmu_version = (mmucfg & 3) + 1;
2761 ntlbs = ((mmucfg >> 2) & 3) + 1; 2761 ntlbs = ((mmucfg >> 2) & 3) + 1;
2762 pidsz = ((mmucfg >> 6) & 0x1f) + 1; 2762 pidsz = ((mmucfg >> 6) & 0x1f) + 1;
2763 lpidsz = (mmucfg >> 24) & 0xf; 2763 lpidsz = (mmucfg >> 24) & 0xf;
2764 rasz = (mmucfg >> 16) & 0x7f; 2764 rasz = (mmucfg >> 16) & 0x7f;
2765 if ((mmu_version > 1) && (mmucfg & 0x10000)) 2765 if ((mmu_version > 1) && (mmucfg & 0x10000))
2766 lrat = 1; 2766 lrat = 1;
2767 printf("Book3E MMU MAV=%d.0,%d TLBs,%d-bit PID,%d-bit LPID,%d-bit RA\n", 2767 printf("Book3E MMU MAV=%d.0,%d TLBs,%d-bit PID,%d-bit LPID,%d-bit RA\n",
2768 mmu_version, ntlbs, pidsz, lpidsz, rasz); 2768 mmu_version, ntlbs, pidsz, lpidsz, rasz);
2769 pidmask = (1ul << pidsz) - 1; 2769 pidmask = (1ul << pidsz) - 1;
2770 lpidmask = (1ul << lpidsz) - 1; 2770 lpidmask = (1ul << lpidsz) - 1;
2771 ramask = (1ull << rasz) - 1; 2771 ramask = (1ull << rasz) - 1;
2772 2772
2773 for (tlb = 0; tlb < ntlbs; tlb++) { 2773 for (tlb = 0; tlb < ntlbs; tlb++) {
2774 u32 tlbcfg; 2774 u32 tlbcfg;
2775 int nent, assoc, new_cc = 1; 2775 int nent, assoc, new_cc = 1;
2776 printf("TLB %d:\n------\n", tlb); 2776 printf("TLB %d:\n------\n", tlb);
2777 switch(tlb) { 2777 switch(tlb) {
2778 case 0: 2778 case 0:
2779 tlbcfg = mfspr(SPRN_TLB0CFG); 2779 tlbcfg = mfspr(SPRN_TLB0CFG);
2780 break; 2780 break;
2781 case 1: 2781 case 1:
2782 tlbcfg = mfspr(SPRN_TLB1CFG); 2782 tlbcfg = mfspr(SPRN_TLB1CFG);
2783 break; 2783 break;
2784 case 2: 2784 case 2:
2785 tlbcfg = mfspr(SPRN_TLB2CFG); 2785 tlbcfg = mfspr(SPRN_TLB2CFG);
2786 break; 2786 break;
2787 case 3: 2787 case 3:
2788 tlbcfg = mfspr(SPRN_TLB3CFG); 2788 tlbcfg = mfspr(SPRN_TLB3CFG);
2789 break; 2789 break;
2790 default: 2790 default:
2791 printf("Unsupported TLB number !\n"); 2791 printf("Unsupported TLB number !\n");
2792 continue; 2792 continue;
2793 } 2793 }
2794 nent = tlbcfg & 0xfff; 2794 nent = tlbcfg & 0xfff;
2795 assoc = (tlbcfg >> 24) & 0xff; 2795 assoc = (tlbcfg >> 24) & 0xff;
2796 for (i = 0; i < nent; i++) { 2796 for (i = 0; i < nent; i++) {
2797 u32 mas0 = MAS0_TLBSEL(tlb); 2797 u32 mas0 = MAS0_TLBSEL(tlb);
2798 u32 mas1 = MAS1_TSIZE(BOOK3E_PAGESZ_4K); 2798 u32 mas1 = MAS1_TSIZE(BOOK3E_PAGESZ_4K);
2799 u64 mas2 = 0; 2799 u64 mas2 = 0;
2800 u64 mas7_mas3; 2800 u64 mas7_mas3;
2801 int esel = i, cc = i; 2801 int esel = i, cc = i;
2802 2802
2803 if (assoc != 0) { 2803 if (assoc != 0) {
2804 cc = i / assoc; 2804 cc = i / assoc;
2805 esel = i % assoc; 2805 esel = i % assoc;
2806 mas2 = cc * 0x1000; 2806 mas2 = cc * 0x1000;
2807 } 2807 }
2808 2808
2809 mas0 |= MAS0_ESEL(esel); 2809 mas0 |= MAS0_ESEL(esel);
2810 mtspr(SPRN_MAS0, mas0); 2810 mtspr(SPRN_MAS0, mas0);
2811 mtspr(SPRN_MAS1, mas1); 2811 mtspr(SPRN_MAS1, mas1);
2812 mtspr(SPRN_MAS2, mas2); 2812 mtspr(SPRN_MAS2, mas2);
2813 asm volatile("tlbre 0,0,0" : : : "memory"); 2813 asm volatile("tlbre 0,0,0" : : : "memory");
2814 mas1 = mfspr(SPRN_MAS1); 2814 mas1 = mfspr(SPRN_MAS1);
2815 mas2 = mfspr(SPRN_MAS2); 2815 mas2 = mfspr(SPRN_MAS2);
2816 mas7_mas3 = mfspr(SPRN_MAS7_MAS3); 2816 mas7_mas3 = mfspr(SPRN_MAS7_MAS3);
2817 if (assoc && (i % assoc) == 0) 2817 if (assoc && (i % assoc) == 0)
2818 new_cc = 1; 2818 new_cc = 1;
2819 if (!(mas1 & MAS1_VALID)) 2819 if (!(mas1 & MAS1_VALID))
2820 continue; 2820 continue;
2821 if (assoc == 0) 2821 if (assoc == 0)
2822 printf("%04x- ", i); 2822 printf("%04x- ", i);
2823 else if (new_cc) 2823 else if (new_cc)
2824 printf("%04x-%c", cc, 'A' + esel); 2824 printf("%04x-%c", cc, 'A' + esel);
2825 else 2825 else
2826 printf(" |%c", 'A' + esel); 2826 printf(" |%c", 'A' + esel);
2827 new_cc = 0; 2827 new_cc = 0;
2828 printf(" %016llx %04x %s %c%c AS%c", 2828 printf(" %016llx %04x %s %c%c AS%c",
2829 mas2 & ~0x3ffull, 2829 mas2 & ~0x3ffull,
2830 (mas1 >> 16) & 0x3fff, 2830 (mas1 >> 16) & 0x3fff,
2831 pgsz_names[(mas1 >> 7) & 0x1f], 2831 pgsz_names[(mas1 >> 7) & 0x1f],
2832 mas1 & MAS1_IND ? 'I' : ' ', 2832 mas1 & MAS1_IND ? 'I' : ' ',
2833 mas1 & MAS1_IPROT ? 'P' : ' ', 2833 mas1 & MAS1_IPROT ? 'P' : ' ',
2834 mas1 & MAS1_TS ? '1' : '0'); 2834 mas1 & MAS1_TS ? '1' : '0');
2835 printf(" %c%c%c%c%c%c%c", 2835 printf(" %c%c%c%c%c%c%c",
2836 mas2 & MAS2_X0 ? 'a' : ' ', 2836 mas2 & MAS2_X0 ? 'a' : ' ',
2837 mas2 & MAS2_X1 ? 'v' : ' ', 2837 mas2 & MAS2_X1 ? 'v' : ' ',
2838 mas2 & MAS2_W ? 'w' : ' ', 2838 mas2 & MAS2_W ? 'w' : ' ',
2839 mas2 & MAS2_I ? 'i' : ' ', 2839 mas2 & MAS2_I ? 'i' : ' ',
2840 mas2 & MAS2_M ? 'm' : ' ', 2840 mas2 & MAS2_M ? 'm' : ' ',
2841 mas2 & MAS2_G ? 'g' : ' ', 2841 mas2 & MAS2_G ? 'g' : ' ',
2842 mas2 & MAS2_E ? 'e' : ' '); 2842 mas2 & MAS2_E ? 'e' : ' ');
2843 printf(" %016llx", mas7_mas3 & ramask & ~0x7ffull); 2843 printf(" %016llx", mas7_mas3 & ramask & ~0x7ffull);
2844 if (mas1 & MAS1_IND) 2844 if (mas1 & MAS1_IND)
2845 printf(" %s\n", 2845 printf(" %s\n",
2846 pgsz_names[(mas7_mas3 >> 1) & 0x1f]); 2846 pgsz_names[(mas7_mas3 >> 1) & 0x1f]);
2847 else 2847 else
2848 printf(" U%c%c%c S%c%c%c\n", 2848 printf(" U%c%c%c S%c%c%c\n",
2849 mas7_mas3 & MAS3_UX ? 'x' : ' ', 2849 mas7_mas3 & MAS3_UX ? 'x' : ' ',
2850 mas7_mas3 & MAS3_UW ? 'w' : ' ', 2850 mas7_mas3 & MAS3_UW ? 'w' : ' ',
2851 mas7_mas3 & MAS3_UR ? 'r' : ' ', 2851 mas7_mas3 & MAS3_UR ? 'r' : ' ',
2852 mas7_mas3 & MAS3_SX ? 'x' : ' ', 2852 mas7_mas3 & MAS3_SX ? 'x' : ' ',
2853 mas7_mas3 & MAS3_SW ? 'w' : ' ', 2853 mas7_mas3 & MAS3_SW ? 'w' : ' ',
2854 mas7_mas3 & MAS3_SR ? 'r' : ' '); 2854 mas7_mas3 & MAS3_SR ? 'r' : ' ');
2855 } 2855 }
2856 } 2856 }
2857 } 2857 }
2858 #endif /* CONFIG_PPC_BOOK3E */ 2858 #endif /* CONFIG_PPC_BOOK3E */
2859 2859
2860 static void xmon_init(int enable) 2860 static void xmon_init(int enable)
2861 { 2861 {
2862 #ifdef CONFIG_PPC_ISERIES 2862 #ifdef CONFIG_PPC_ISERIES
2863 if (firmware_has_feature(FW_FEATURE_ISERIES)) 2863 if (firmware_has_feature(FW_FEATURE_ISERIES))
2864 return; 2864 return;
2865 #endif 2865 #endif
2866 if (enable) { 2866 if (enable) {
2867 __debugger = xmon; 2867 __debugger = xmon;
2868 __debugger_ipi = xmon_ipi; 2868 __debugger_ipi = xmon_ipi;
2869 __debugger_bpt = xmon_bpt; 2869 __debugger_bpt = xmon_bpt;
2870 __debugger_sstep = xmon_sstep; 2870 __debugger_sstep = xmon_sstep;
2871 __debugger_iabr_match = xmon_iabr_match; 2871 __debugger_iabr_match = xmon_iabr_match;
2872 __debugger_dabr_match = xmon_dabr_match; 2872 __debugger_dabr_match = xmon_dabr_match;
2873 __debugger_fault_handler = xmon_fault_handler; 2873 __debugger_fault_handler = xmon_fault_handler;
2874 } else { 2874 } else {
2875 __debugger = NULL; 2875 __debugger = NULL;
2876 __debugger_ipi = NULL; 2876 __debugger_ipi = NULL;
2877 __debugger_bpt = NULL; 2877 __debugger_bpt = NULL;
2878 __debugger_sstep = NULL; 2878 __debugger_sstep = NULL;
2879 __debugger_iabr_match = NULL; 2879 __debugger_iabr_match = NULL;
2880 __debugger_dabr_match = NULL; 2880 __debugger_dabr_match = NULL;
2881 __debugger_fault_handler = NULL; 2881 __debugger_fault_handler = NULL;
2882 } 2882 }
2883 xmon_map_scc(); 2883 xmon_map_scc();
2884 } 2884 }
2885 2885
2886 #ifdef CONFIG_MAGIC_SYSRQ 2886 #ifdef CONFIG_MAGIC_SYSRQ
2887 static void sysrq_handle_xmon(int key) 2887 static void sysrq_handle_xmon(int key)
2888 { 2888 {
2889 /* ensure xmon is enabled */ 2889 /* ensure xmon is enabled */
2890 xmon_init(1); 2890 xmon_init(1);
2891 debugger(get_irq_regs()); 2891 debugger(get_irq_regs());
2892 } 2892 }
2893 2893
2894 static struct sysrq_key_op sysrq_xmon_op = { 2894 static struct sysrq_key_op sysrq_xmon_op = {
2895 .handler = sysrq_handle_xmon, 2895 .handler = sysrq_handle_xmon,
2896 .help_msg = "Xmon", 2896 .help_msg = "Xmon",
2897 .action_msg = "Entering xmon", 2897 .action_msg = "Entering xmon",
2898 }; 2898 };
2899 2899
2900 static int __init setup_xmon_sysrq(void) 2900 static int __init setup_xmon_sysrq(void)
2901 { 2901 {
2902 #ifdef CONFIG_PPC_ISERIES 2902 #ifdef CONFIG_PPC_ISERIES
2903 if (firmware_has_feature(FW_FEATURE_ISERIES)) 2903 if (firmware_has_feature(FW_FEATURE_ISERIES))
2904 return 0; 2904 return 0;
2905 #endif 2905 #endif
2906 register_sysrq_key('x', &sysrq_xmon_op); 2906 register_sysrq_key('x', &sysrq_xmon_op);
2907 return 0; 2907 return 0;
2908 } 2908 }
2909 __initcall(setup_xmon_sysrq); 2909 __initcall(setup_xmon_sysrq);
2910 #endif /* CONFIG_MAGIC_SYSRQ */ 2910 #endif /* CONFIG_MAGIC_SYSRQ */
2911 2911
2912 static int __initdata xmon_early, xmon_off; 2912 static int __initdata xmon_early, xmon_off;
2913 2913
2914 static int __init early_parse_xmon(char *p) 2914 static int __init early_parse_xmon(char *p)
2915 { 2915 {
2916 if (!p || strncmp(p, "early", 5) == 0) { 2916 if (!p || strncmp(p, "early", 5) == 0) {
2917 /* just "xmon" is equivalent to "xmon=early" */ 2917 /* just "xmon" is equivalent to "xmon=early" */
2918 xmon_init(1); 2918 xmon_init(1);
2919 xmon_early = 1; 2919 xmon_early = 1;
2920 } else if (strncmp(p, "on", 2) == 0) 2920 } else if (strncmp(p, "on", 2) == 0)
2921 xmon_init(1); 2921 xmon_init(1);
2922 else if (strncmp(p, "off", 3) == 0) 2922 else if (strncmp(p, "off", 3) == 0)
2923 xmon_off = 1; 2923 xmon_off = 1;
2924 else if (strncmp(p, "nobt", 4) == 0) 2924 else if (strncmp(p, "nobt", 4) == 0)
2925 xmon_no_auto_backtrace = 1; 2925 xmon_no_auto_backtrace = 1;
2926 else 2926 else
2927 return 1; 2927 return 1;
2928 2928
2929 return 0; 2929 return 0;
2930 } 2930 }
2931 early_param("xmon", early_parse_xmon); 2931 early_param("xmon", early_parse_xmon);
2932 2932
2933 void __init xmon_setup(void) 2933 void __init xmon_setup(void)
2934 { 2934 {
2935 #ifdef CONFIG_XMON_DEFAULT 2935 #ifdef CONFIG_XMON_DEFAULT
2936 if (!xmon_off) 2936 if (!xmon_off)
2937 xmon_init(1); 2937 xmon_init(1);
2938 #endif 2938 #endif
2939 if (xmon_early) 2939 if (xmon_early)
2940 debugger(NULL); 2940 debugger(NULL);
2941 } 2941 }
2942 2942
2943 #ifdef CONFIG_SPU_BASE 2943 #ifdef CONFIG_SPU_BASE
2944 2944
2945 struct spu_info { 2945 struct spu_info {
2946 struct spu *spu; 2946 struct spu *spu;
2947 u64 saved_mfc_sr1_RW; 2947 u64 saved_mfc_sr1_RW;
2948 u32 saved_spu_runcntl_RW; 2948 u32 saved_spu_runcntl_RW;
2949 unsigned long dump_addr; 2949 unsigned long dump_addr;
2950 u8 stopped_ok; 2950 u8 stopped_ok;
2951 }; 2951 };
2952 2952
2953 #define XMON_NUM_SPUS 16 /* Enough for current hardware */ 2953 #define XMON_NUM_SPUS 16 /* Enough for current hardware */
2954 2954
2955 static struct spu_info spu_info[XMON_NUM_SPUS]; 2955 static struct spu_info spu_info[XMON_NUM_SPUS];
2956 2956
2957 void xmon_register_spus(struct list_head *list) 2957 void xmon_register_spus(struct list_head *list)
2958 { 2958 {
2959 struct spu *spu; 2959 struct spu *spu;
2960 2960
2961 list_for_each_entry(spu, list, full_list) { 2961 list_for_each_entry(spu, list, full_list) {
2962 if (spu->number >= XMON_NUM_SPUS) { 2962 if (spu->number >= XMON_NUM_SPUS) {
2963 WARN_ON(1); 2963 WARN_ON(1);
2964 continue; 2964 continue;
2965 } 2965 }
2966 2966
2967 spu_info[spu->number].spu = spu; 2967 spu_info[spu->number].spu = spu;
2968 spu_info[spu->number].stopped_ok = 0; 2968 spu_info[spu->number].stopped_ok = 0;
2969 spu_info[spu->number].dump_addr = (unsigned long) 2969 spu_info[spu->number].dump_addr = (unsigned long)
2970 spu_info[spu->number].spu->local_store; 2970 spu_info[spu->number].spu->local_store;
2971 } 2971 }
2972 } 2972 }
2973 2973
2974 static void stop_spus(void) 2974 static void stop_spus(void)
2975 { 2975 {
2976 struct spu *spu; 2976 struct spu *spu;
2977 int i; 2977 int i;
2978 u64 tmp; 2978 u64 tmp;
2979 2979
2980 for (i = 0; i < XMON_NUM_SPUS; i++) { 2980 for (i = 0; i < XMON_NUM_SPUS; i++) {
2981 if (!spu_info[i].spu) 2981 if (!spu_info[i].spu)
2982 continue; 2982 continue;
2983 2983
2984 if (setjmp(bus_error_jmp) == 0) { 2984 if (setjmp(bus_error_jmp) == 0) {
2985 catch_memory_errors = 1; 2985 catch_memory_errors = 1;
2986 sync(); 2986 sync();
2987 2987
2988 spu = spu_info[i].spu; 2988 spu = spu_info[i].spu;
2989 2989
2990 spu_info[i].saved_spu_runcntl_RW = 2990 spu_info[i].saved_spu_runcntl_RW =
2991 in_be32(&spu->problem->spu_runcntl_RW); 2991 in_be32(&spu->problem->spu_runcntl_RW);
2992 2992
2993 tmp = spu_mfc_sr1_get(spu); 2993 tmp = spu_mfc_sr1_get(spu);
2994 spu_info[i].saved_mfc_sr1_RW = tmp; 2994 spu_info[i].saved_mfc_sr1_RW = tmp;
2995 2995
2996 tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK; 2996 tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
2997 spu_mfc_sr1_set(spu, tmp); 2997 spu_mfc_sr1_set(spu, tmp);
2998 2998
2999 sync(); 2999 sync();
3000 __delay(200); 3000 __delay(200);
3001 3001
3002 spu_info[i].stopped_ok = 1; 3002 spu_info[i].stopped_ok = 1;
3003 3003
3004 printf("Stopped spu %.2d (was %s)\n", i, 3004 printf("Stopped spu %.2d (was %s)\n", i,
3005 spu_info[i].saved_spu_runcntl_RW ? 3005 spu_info[i].saved_spu_runcntl_RW ?
3006 "running" : "stopped"); 3006 "running" : "stopped");
3007 } else { 3007 } else {
3008 catch_memory_errors = 0; 3008 catch_memory_errors = 0;
3009 printf("*** Error stopping spu %.2d\n", i); 3009 printf("*** Error stopping spu %.2d\n", i);
3010 } 3010 }
3011 catch_memory_errors = 0; 3011 catch_memory_errors = 0;
3012 } 3012 }
3013 } 3013 }
3014 3014
3015 static void restart_spus(void) 3015 static void restart_spus(void)
3016 { 3016 {
3017 struct spu *spu; 3017 struct spu *spu;
3018 int i; 3018 int i;
3019 3019
3020 for (i = 0; i < XMON_NUM_SPUS; i++) { 3020 for (i = 0; i < XMON_NUM_SPUS; i++) {
3021 if (!spu_info[i].spu) 3021 if (!spu_info[i].spu)
3022 continue; 3022 continue;
3023 3023
3024 if (!spu_info[i].stopped_ok) { 3024 if (!spu_info[i].stopped_ok) {
3025 printf("*** Error, spu %d was not successfully stopped" 3025 printf("*** Error, spu %d was not successfully stopped"
3026 ", not restarting\n", i); 3026 ", not restarting\n", i);
3027 continue; 3027 continue;
3028 } 3028 }
3029 3029
3030 if (setjmp(bus_error_jmp) == 0) { 3030 if (setjmp(bus_error_jmp) == 0) {
3031 catch_memory_errors = 1; 3031 catch_memory_errors = 1;
3032 sync(); 3032 sync();
3033 3033
3034 spu = spu_info[i].spu; 3034 spu = spu_info[i].spu;
3035 spu_mfc_sr1_set(spu, spu_info[i].saved_mfc_sr1_RW); 3035 spu_mfc_sr1_set(spu, spu_info[i].saved_mfc_sr1_RW);
3036 out_be32(&spu->problem->spu_runcntl_RW, 3036 out_be32(&spu->problem->spu_runcntl_RW,
3037 spu_info[i].saved_spu_runcntl_RW); 3037 spu_info[i].saved_spu_runcntl_RW);
3038 3038
3039 sync(); 3039 sync();
3040 __delay(200); 3040 __delay(200);
3041 3041
3042 printf("Restarted spu %.2d\n", i); 3042 printf("Restarted spu %.2d\n", i);
3043 } else { 3043 } else {
3044 catch_memory_errors = 0; 3044 catch_memory_errors = 0;
3045 printf("*** Error restarting spu %.2d\n", i); 3045 printf("*** Error restarting spu %.2d\n", i);
3046 } 3046 }
3047 catch_memory_errors = 0; 3047 catch_memory_errors = 0;
3048 } 3048 }
3049 } 3049 }
3050 3050
3051 #define DUMP_WIDTH 23 3051 #define DUMP_WIDTH 23
3052 #define DUMP_VALUE(format, field, value) \ 3052 #define DUMP_VALUE(format, field, value) \
3053 do { \ 3053 do { \
3054 if (setjmp(bus_error_jmp) == 0) { \ 3054 if (setjmp(bus_error_jmp) == 0) { \
3055 catch_memory_errors = 1; \ 3055 catch_memory_errors = 1; \
3056 sync(); \ 3056 sync(); \
3057 printf(" %-*s = "format"\n", DUMP_WIDTH, \ 3057 printf(" %-*s = "format"\n", DUMP_WIDTH, \
3058 #field, value); \ 3058 #field, value); \
3059 sync(); \ 3059 sync(); \
3060 __delay(200); \ 3060 __delay(200); \
3061 } else { \ 3061 } else { \
3062 catch_memory_errors = 0; \ 3062 catch_memory_errors = 0; \
3063 printf(" %-*s = *** Error reading field.\n", \ 3063 printf(" %-*s = *** Error reading field.\n", \
3064 DUMP_WIDTH, #field); \ 3064 DUMP_WIDTH, #field); \
3065 } \ 3065 } \
3066 catch_memory_errors = 0; \ 3066 catch_memory_errors = 0; \
3067 } while (0) 3067 } while (0)
3068 3068
3069 #define DUMP_FIELD(obj, format, field) \ 3069 #define DUMP_FIELD(obj, format, field) \
3070 DUMP_VALUE(format, field, obj->field) 3070 DUMP_VALUE(format, field, obj->field)
3071 3071
3072 static void dump_spu_fields(struct spu *spu) 3072 static void dump_spu_fields(struct spu *spu)
3073 { 3073 {
3074 printf("Dumping spu fields at address %p:\n", spu); 3074 printf("Dumping spu fields at address %p:\n", spu);
3075 3075
3076 DUMP_FIELD(spu, "0x%x", number); 3076 DUMP_FIELD(spu, "0x%x", number);
3077 DUMP_FIELD(spu, "%s", name); 3077 DUMP_FIELD(spu, "%s", name);
3078 DUMP_FIELD(spu, "0x%lx", local_store_phys); 3078 DUMP_FIELD(spu, "0x%lx", local_store_phys);
3079 DUMP_FIELD(spu, "0x%p", local_store); 3079 DUMP_FIELD(spu, "0x%p", local_store);
3080 DUMP_FIELD(spu, "0x%lx", ls_size); 3080 DUMP_FIELD(spu, "0x%lx", ls_size);
3081 DUMP_FIELD(spu, "0x%x", node); 3081 DUMP_FIELD(spu, "0x%x", node);
3082 DUMP_FIELD(spu, "0x%lx", flags); 3082 DUMP_FIELD(spu, "0x%lx", flags);
3083 DUMP_FIELD(spu, "%d", class_0_pending); 3083 DUMP_FIELD(spu, "%d", class_0_pending);
3084 DUMP_FIELD(spu, "0x%lx", class_0_dar); 3084 DUMP_FIELD(spu, "0x%lx", class_0_dar);
3085 DUMP_FIELD(spu, "0x%lx", class_1_dar); 3085 DUMP_FIELD(spu, "0x%lx", class_1_dar);
3086 DUMP_FIELD(spu, "0x%lx", class_1_dsisr); 3086 DUMP_FIELD(spu, "0x%lx", class_1_dsisr);
3087 DUMP_FIELD(spu, "0x%lx", irqs[0]); 3087 DUMP_FIELD(spu, "0x%lx", irqs[0]);
3088 DUMP_FIELD(spu, "0x%lx", irqs[1]); 3088 DUMP_FIELD(spu, "0x%lx", irqs[1]);
3089 DUMP_FIELD(spu, "0x%lx", irqs[2]); 3089 DUMP_FIELD(spu, "0x%lx", irqs[2]);
3090 DUMP_FIELD(spu, "0x%x", slb_replace); 3090 DUMP_FIELD(spu, "0x%x", slb_replace);
3091 DUMP_FIELD(spu, "%d", pid); 3091 DUMP_FIELD(spu, "%d", pid);
3092 DUMP_FIELD(spu, "0x%p", mm); 3092 DUMP_FIELD(spu, "0x%p", mm);
3093 DUMP_FIELD(spu, "0x%p", ctx); 3093 DUMP_FIELD(spu, "0x%p", ctx);
3094 DUMP_FIELD(spu, "0x%p", rq); 3094 DUMP_FIELD(spu, "0x%p", rq);
3095 DUMP_FIELD(spu, "0x%p", timestamp); 3095 DUMP_FIELD(spu, "0x%p", timestamp);
3096 DUMP_FIELD(spu, "0x%lx", problem_phys); 3096 DUMP_FIELD(spu, "0x%lx", problem_phys);
3097 DUMP_FIELD(spu, "0x%p", problem); 3097 DUMP_FIELD(spu, "0x%p", problem);
3098 DUMP_VALUE("0x%x", problem->spu_runcntl_RW, 3098 DUMP_VALUE("0x%x", problem->spu_runcntl_RW,
3099 in_be32(&spu->problem->spu_runcntl_RW)); 3099 in_be32(&spu->problem->spu_runcntl_RW));
3100 DUMP_VALUE("0x%x", problem->spu_status_R, 3100 DUMP_VALUE("0x%x", problem->spu_status_R,
3101 in_be32(&spu->problem->spu_status_R)); 3101 in_be32(&spu->problem->spu_status_R));
3102 DUMP_VALUE("0x%x", problem->spu_npc_RW, 3102 DUMP_VALUE("0x%x", problem->spu_npc_RW,
3103 in_be32(&spu->problem->spu_npc_RW)); 3103 in_be32(&spu->problem->spu_npc_RW));
3104 DUMP_FIELD(spu, "0x%p", priv2); 3104 DUMP_FIELD(spu, "0x%p", priv2);
3105 DUMP_FIELD(spu, "0x%p", pdata); 3105 DUMP_FIELD(spu, "0x%p", pdata);
3106 } 3106 }
3107 3107
3108 int 3108 int
3109 spu_inst_dump(unsigned long adr, long count, int praddr) 3109 spu_inst_dump(unsigned long adr, long count, int praddr)
3110 { 3110 {
3111 return generic_inst_dump(adr, count, praddr, print_insn_spu); 3111 return generic_inst_dump(adr, count, praddr, print_insn_spu);
3112 } 3112 }
3113 3113
3114 static void dump_spu_ls(unsigned long num, int subcmd) 3114 static void dump_spu_ls(unsigned long num, int subcmd)
3115 { 3115 {
3116 unsigned long offset, addr, ls_addr; 3116 unsigned long offset, addr, ls_addr;
3117 3117
3118 if (setjmp(bus_error_jmp) == 0) { 3118 if (setjmp(bus_error_jmp) == 0) {
3119 catch_memory_errors = 1; 3119 catch_memory_errors = 1;
3120 sync(); 3120 sync();
3121 ls_addr = (unsigned long)spu_info[num].spu->local_store; 3121 ls_addr = (unsigned long)spu_info[num].spu->local_store;
3122 sync(); 3122 sync();
3123 __delay(200); 3123 __delay(200);
3124 } else { 3124 } else {
3125 catch_memory_errors = 0; 3125 catch_memory_errors = 0;
3126 printf("*** Error: accessing spu info for spu %d\n", num); 3126 printf("*** Error: accessing spu info for spu %d\n", num);
3127 return; 3127 return;
3128 } 3128 }
3129 catch_memory_errors = 0; 3129 catch_memory_errors = 0;
3130 3130
3131 if (scanhex(&offset)) 3131 if (scanhex(&offset))
3132 addr = ls_addr + offset; 3132 addr = ls_addr + offset;
3133 else 3133 else
3134 addr = spu_info[num].dump_addr; 3134 addr = spu_info[num].dump_addr;
3135 3135
3136 if (addr >= ls_addr + LS_SIZE) { 3136 if (addr >= ls_addr + LS_SIZE) {
3137 printf("*** Error: address outside of local store\n"); 3137 printf("*** Error: address outside of local store\n");
3138 return; 3138 return;
3139 } 3139 }
3140 3140
3141 switch (subcmd) { 3141 switch (subcmd) {
3142 case 'i': 3142 case 'i':
3143 addr += spu_inst_dump(addr, 16, 1); 3143 addr += spu_inst_dump(addr, 16, 1);
3144 last_cmd = "sdi\n"; 3144 last_cmd = "sdi\n";
3145 break; 3145 break;
3146 default: 3146 default:
3147 prdump(addr, 64); 3147 prdump(addr, 64);
3148 addr += 64; 3148 addr += 64;
3149 last_cmd = "sd\n"; 3149 last_cmd = "sd\n";
3150 break; 3150 break;
3151 } 3151 }
3152 3152
3153 spu_info[num].dump_addr = addr; 3153 spu_info[num].dump_addr = addr;
3154 } 3154 }
3155 3155
3156 static int do_spu_cmd(void) 3156 static int do_spu_cmd(void)
3157 { 3157 {
3158 static unsigned long num = 0; 3158 static unsigned long num = 0;
3159 int cmd, subcmd = 0; 3159 int cmd, subcmd = 0;
3160 3160
3161 cmd = inchar(); 3161 cmd = inchar();
3162 switch (cmd) { 3162 switch (cmd) {
3163 case 's': 3163 case 's':
3164 stop_spus(); 3164 stop_spus();
3165 break; 3165 break;
3166 case 'r': 3166 case 'r':
3167 restart_spus(); 3167 restart_spus();
3168 break; 3168 break;
3169 case 'd': 3169 case 'd':
3170 subcmd = inchar(); 3170 subcmd = inchar();
3171 if (isxdigit(subcmd) || subcmd == '\n') 3171 if (isxdigit(subcmd) || subcmd == '\n')
3172 termch = subcmd; 3172 termch = subcmd;
3173 case 'f': 3173 case 'f':
3174 scanhex(&num); 3174 scanhex(&num);
3175 if (num >= XMON_NUM_SPUS || !spu_info[num].spu) { 3175 if (num >= XMON_NUM_SPUS || !spu_info[num].spu) {
3176 printf("*** Error: invalid spu number\n"); 3176 printf("*** Error: invalid spu number\n");
3177 return 0; 3177 return 0;
3178 } 3178 }
3179 3179
3180 switch (cmd) { 3180 switch (cmd) {
3181 case 'f': 3181 case 'f':
3182 dump_spu_fields(spu_info[num].spu); 3182 dump_spu_fields(spu_info[num].spu);
3183 break; 3183 break;
3184 default: 3184 default:
3185 dump_spu_ls(num, subcmd); 3185 dump_spu_ls(num, subcmd);
3186 break; 3186 break;
3187 } 3187 }
3188 3188
3189 break; 3189 break;
3190 default: 3190 default:
3191 return -1; 3191 return -1;
3192 } 3192 }
3193 3193
3194 return 0; 3194 return 0;
3195 } 3195 }
3196 #else /* ! CONFIG_SPU_BASE */ 3196 #else /* ! CONFIG_SPU_BASE */
3197 static int do_spu_cmd(void) 3197 static int do_spu_cmd(void)
3198 { 3198 {
3199 return -1; 3199 return -1;
3200 } 3200 }
3201 #endif 3201 #endif
3202 3202