Commit 5c200197130e307de6eba72fc335c83c9dd6a5bc
Committed by
Ralf Baechle
1 parent
b2f909419b
Exists in
master
and in
6 other branches
MIPS: ASID conflict after CPU hotplug
I am running SMP Linux 2.6.37-rc1 on BMIPS5000 (single core dual thread) and observe some abnormalities when doing system suspend/resume which I narrowed down to cpu hotplugging. The suspend brings the second thread processor down and then restarts it, after which I see memory corruption in userspace. I started digging and found out that problem occurs because while doing execve() the child process is getting the same ASID as the parent, which obviously corrupts parent's address space. Further digging showed that activate_mm() calls get_new_mmu_context() to get a new ASID, but at this time ASID field in entryHi is 1, and asid_cache(cpu) is 0x100 (it was just reset to ASID_FIRST_VERSION when the secondary TP was booting). So, get_new_mmu_context() increments the asid_cache(cpu) value to 0x101, and thus puts 0x01 into entryHi. The result - ASID field does not get changed as it was supposed to. My solution is very simple - do not reset asid_cache(cpu) on TP warm restart. Patchwork: https://patchwork.linux-mips.org/patch/1797/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Showing 1 changed file with 2 additions and 1 deletions Inline Diff
arch/mips/kernel/traps.c
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle | 6 | * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle |
7 | * Copyright (C) 1995, 1996 Paul M. Antoine | 7 | * Copyright (C) 1995, 1996 Paul M. Antoine |
8 | * Copyright (C) 1998 Ulf Carlsson | 8 | * Copyright (C) 1998 Ulf Carlsson |
9 | * Copyright (C) 1999 Silicon Graphics, Inc. | 9 | * Copyright (C) 1999 Silicon Graphics, Inc. |
10 | * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com | 10 | * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com |
11 | * Copyright (C) 2000, 01 MIPS Technologies, Inc. | 11 | * Copyright (C) 2000, 01 MIPS Technologies, Inc. |
12 | * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki | 12 | * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki |
13 | */ | 13 | */ |
14 | #include <linux/bug.h> | 14 | #include <linux/bug.h> |
15 | #include <linux/compiler.h> | 15 | #include <linux/compiler.h> |
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/mm.h> | 18 | #include <linux/mm.h> |
19 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
20 | #include <linux/smp.h> | 20 | #include <linux/smp.h> |
21 | #include <linux/spinlock.h> | 21 | #include <linux/spinlock.h> |
22 | #include <linux/kallsyms.h> | 22 | #include <linux/kallsyms.h> |
23 | #include <linux/bootmem.h> | 23 | #include <linux/bootmem.h> |
24 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
25 | #include <linux/ptrace.h> | 25 | #include <linux/ptrace.h> |
26 | #include <linux/kgdb.h> | 26 | #include <linux/kgdb.h> |
27 | #include <linux/kdebug.h> | 27 | #include <linux/kdebug.h> |
28 | #include <linux/kprobes.h> | 28 | #include <linux/kprobes.h> |
29 | #include <linux/notifier.h> | 29 | #include <linux/notifier.h> |
30 | #include <linux/kdb.h> | 30 | #include <linux/kdb.h> |
31 | #include <linux/irq.h> | 31 | #include <linux/irq.h> |
32 | #include <linux/perf_event.h> | 32 | #include <linux/perf_event.h> |
33 | 33 | ||
34 | #include <asm/bootinfo.h> | 34 | #include <asm/bootinfo.h> |
35 | #include <asm/branch.h> | 35 | #include <asm/branch.h> |
36 | #include <asm/break.h> | 36 | #include <asm/break.h> |
37 | #include <asm/cop2.h> | 37 | #include <asm/cop2.h> |
38 | #include <asm/cpu.h> | 38 | #include <asm/cpu.h> |
39 | #include <asm/dsp.h> | 39 | #include <asm/dsp.h> |
40 | #include <asm/fpu.h> | 40 | #include <asm/fpu.h> |
41 | #include <asm/fpu_emulator.h> | 41 | #include <asm/fpu_emulator.h> |
42 | #include <asm/mipsregs.h> | 42 | #include <asm/mipsregs.h> |
43 | #include <asm/mipsmtregs.h> | 43 | #include <asm/mipsmtregs.h> |
44 | #include <asm/module.h> | 44 | #include <asm/module.h> |
45 | #include <asm/pgtable.h> | 45 | #include <asm/pgtable.h> |
46 | #include <asm/ptrace.h> | 46 | #include <asm/ptrace.h> |
47 | #include <asm/sections.h> | 47 | #include <asm/sections.h> |
48 | #include <asm/system.h> | 48 | #include <asm/system.h> |
49 | #include <asm/tlbdebug.h> | 49 | #include <asm/tlbdebug.h> |
50 | #include <asm/traps.h> | 50 | #include <asm/traps.h> |
51 | #include <asm/uaccess.h> | 51 | #include <asm/uaccess.h> |
52 | #include <asm/watch.h> | 52 | #include <asm/watch.h> |
53 | #include <asm/mmu_context.h> | 53 | #include <asm/mmu_context.h> |
54 | #include <asm/types.h> | 54 | #include <asm/types.h> |
55 | #include <asm/stacktrace.h> | 55 | #include <asm/stacktrace.h> |
56 | #include <asm/uasm.h> | 56 | #include <asm/uasm.h> |
57 | 57 | ||
58 | extern void check_wait(void); | 58 | extern void check_wait(void); |
59 | extern asmlinkage void r4k_wait(void); | 59 | extern asmlinkage void r4k_wait(void); |
60 | extern asmlinkage void rollback_handle_int(void); | 60 | extern asmlinkage void rollback_handle_int(void); |
61 | extern asmlinkage void handle_int(void); | 61 | extern asmlinkage void handle_int(void); |
62 | extern asmlinkage void handle_tlbm(void); | 62 | extern asmlinkage void handle_tlbm(void); |
63 | extern asmlinkage void handle_tlbl(void); | 63 | extern asmlinkage void handle_tlbl(void); |
64 | extern asmlinkage void handle_tlbs(void); | 64 | extern asmlinkage void handle_tlbs(void); |
65 | extern asmlinkage void handle_adel(void); | 65 | extern asmlinkage void handle_adel(void); |
66 | extern asmlinkage void handle_ades(void); | 66 | extern asmlinkage void handle_ades(void); |
67 | extern asmlinkage void handle_ibe(void); | 67 | extern asmlinkage void handle_ibe(void); |
68 | extern asmlinkage void handle_dbe(void); | 68 | extern asmlinkage void handle_dbe(void); |
69 | extern asmlinkage void handle_sys(void); | 69 | extern asmlinkage void handle_sys(void); |
70 | extern asmlinkage void handle_bp(void); | 70 | extern asmlinkage void handle_bp(void); |
71 | extern asmlinkage void handle_ri(void); | 71 | extern asmlinkage void handle_ri(void); |
72 | extern asmlinkage void handle_ri_rdhwr_vivt(void); | 72 | extern asmlinkage void handle_ri_rdhwr_vivt(void); |
73 | extern asmlinkage void handle_ri_rdhwr(void); | 73 | extern asmlinkage void handle_ri_rdhwr(void); |
74 | extern asmlinkage void handle_cpu(void); | 74 | extern asmlinkage void handle_cpu(void); |
75 | extern asmlinkage void handle_ov(void); | 75 | extern asmlinkage void handle_ov(void); |
76 | extern asmlinkage void handle_tr(void); | 76 | extern asmlinkage void handle_tr(void); |
77 | extern asmlinkage void handle_fpe(void); | 77 | extern asmlinkage void handle_fpe(void); |
78 | extern asmlinkage void handle_mdmx(void); | 78 | extern asmlinkage void handle_mdmx(void); |
79 | extern asmlinkage void handle_watch(void); | 79 | extern asmlinkage void handle_watch(void); |
80 | extern asmlinkage void handle_mt(void); | 80 | extern asmlinkage void handle_mt(void); |
81 | extern asmlinkage void handle_dsp(void); | 81 | extern asmlinkage void handle_dsp(void); |
82 | extern asmlinkage void handle_mcheck(void); | 82 | extern asmlinkage void handle_mcheck(void); |
83 | extern asmlinkage void handle_reserved(void); | 83 | extern asmlinkage void handle_reserved(void); |
84 | 84 | ||
85 | extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, | 85 | extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, |
86 | struct mips_fpu_struct *ctx, int has_fpu, | 86 | struct mips_fpu_struct *ctx, int has_fpu, |
87 | void *__user *fault_addr); | 87 | void *__user *fault_addr); |
88 | 88 | ||
89 | void (*board_be_init)(void); | 89 | void (*board_be_init)(void); |
90 | int (*board_be_handler)(struct pt_regs *regs, int is_fixup); | 90 | int (*board_be_handler)(struct pt_regs *regs, int is_fixup); |
91 | void (*board_nmi_handler_setup)(void); | 91 | void (*board_nmi_handler_setup)(void); |
92 | void (*board_ejtag_handler_setup)(void); | 92 | void (*board_ejtag_handler_setup)(void); |
93 | void (*board_bind_eic_interrupt)(int irq, int regset); | 93 | void (*board_bind_eic_interrupt)(int irq, int regset); |
94 | 94 | ||
95 | 95 | ||
96 | static void show_raw_backtrace(unsigned long reg29) | 96 | static void show_raw_backtrace(unsigned long reg29) |
97 | { | 97 | { |
98 | unsigned long *sp = (unsigned long *)(reg29 & ~3); | 98 | unsigned long *sp = (unsigned long *)(reg29 & ~3); |
99 | unsigned long addr; | 99 | unsigned long addr; |
100 | 100 | ||
101 | printk("Call Trace:"); | 101 | printk("Call Trace:"); |
102 | #ifdef CONFIG_KALLSYMS | 102 | #ifdef CONFIG_KALLSYMS |
103 | printk("\n"); | 103 | printk("\n"); |
104 | #endif | 104 | #endif |
105 | while (!kstack_end(sp)) { | 105 | while (!kstack_end(sp)) { |
106 | unsigned long __user *p = | 106 | unsigned long __user *p = |
107 | (unsigned long __user *)(unsigned long)sp++; | 107 | (unsigned long __user *)(unsigned long)sp++; |
108 | if (__get_user(addr, p)) { | 108 | if (__get_user(addr, p)) { |
109 | printk(" (Bad stack address)"); | 109 | printk(" (Bad stack address)"); |
110 | break; | 110 | break; |
111 | } | 111 | } |
112 | if (__kernel_text_address(addr)) | 112 | if (__kernel_text_address(addr)) |
113 | print_ip_sym(addr); | 113 | print_ip_sym(addr); |
114 | } | 114 | } |
115 | printk("\n"); | 115 | printk("\n"); |
116 | } | 116 | } |
117 | 117 | ||
118 | #ifdef CONFIG_KALLSYMS | 118 | #ifdef CONFIG_KALLSYMS |
119 | int raw_show_trace; | 119 | int raw_show_trace; |
120 | static int __init set_raw_show_trace(char *str) | 120 | static int __init set_raw_show_trace(char *str) |
121 | { | 121 | { |
122 | raw_show_trace = 1; | 122 | raw_show_trace = 1; |
123 | return 1; | 123 | return 1; |
124 | } | 124 | } |
125 | __setup("raw_show_trace", set_raw_show_trace); | 125 | __setup("raw_show_trace", set_raw_show_trace); |
126 | #endif | 126 | #endif |
127 | 127 | ||
128 | static void show_backtrace(struct task_struct *task, const struct pt_regs *regs) | 128 | static void show_backtrace(struct task_struct *task, const struct pt_regs *regs) |
129 | { | 129 | { |
130 | unsigned long sp = regs->regs[29]; | 130 | unsigned long sp = regs->regs[29]; |
131 | unsigned long ra = regs->regs[31]; | 131 | unsigned long ra = regs->regs[31]; |
132 | unsigned long pc = regs->cp0_epc; | 132 | unsigned long pc = regs->cp0_epc; |
133 | 133 | ||
134 | if (raw_show_trace || !__kernel_text_address(pc)) { | 134 | if (raw_show_trace || !__kernel_text_address(pc)) { |
135 | show_raw_backtrace(sp); | 135 | show_raw_backtrace(sp); |
136 | return; | 136 | return; |
137 | } | 137 | } |
138 | printk("Call Trace:\n"); | 138 | printk("Call Trace:\n"); |
139 | do { | 139 | do { |
140 | print_ip_sym(pc); | 140 | print_ip_sym(pc); |
141 | pc = unwind_stack(task, &sp, pc, &ra); | 141 | pc = unwind_stack(task, &sp, pc, &ra); |
142 | } while (pc); | 142 | } while (pc); |
143 | printk("\n"); | 143 | printk("\n"); |
144 | } | 144 | } |
145 | 145 | ||
146 | /* | 146 | /* |
147 | * This routine abuses get_user()/put_user() to reference pointers | 147 | * This routine abuses get_user()/put_user() to reference pointers |
148 | * with at least a bit of error checking ... | 148 | * with at least a bit of error checking ... |
149 | */ | 149 | */ |
150 | static void show_stacktrace(struct task_struct *task, | 150 | static void show_stacktrace(struct task_struct *task, |
151 | const struct pt_regs *regs) | 151 | const struct pt_regs *regs) |
152 | { | 152 | { |
153 | const int field = 2 * sizeof(unsigned long); | 153 | const int field = 2 * sizeof(unsigned long); |
154 | long stackdata; | 154 | long stackdata; |
155 | int i; | 155 | int i; |
156 | unsigned long __user *sp = (unsigned long __user *)regs->regs[29]; | 156 | unsigned long __user *sp = (unsigned long __user *)regs->regs[29]; |
157 | 157 | ||
158 | printk("Stack :"); | 158 | printk("Stack :"); |
159 | i = 0; | 159 | i = 0; |
160 | while ((unsigned long) sp & (PAGE_SIZE - 1)) { | 160 | while ((unsigned long) sp & (PAGE_SIZE - 1)) { |
161 | if (i && ((i % (64 / field)) == 0)) | 161 | if (i && ((i % (64 / field)) == 0)) |
162 | printk("\n "); | 162 | printk("\n "); |
163 | if (i > 39) { | 163 | if (i > 39) { |
164 | printk(" ..."); | 164 | printk(" ..."); |
165 | break; | 165 | break; |
166 | } | 166 | } |
167 | 167 | ||
168 | if (__get_user(stackdata, sp++)) { | 168 | if (__get_user(stackdata, sp++)) { |
169 | printk(" (Bad stack address)"); | 169 | printk(" (Bad stack address)"); |
170 | break; | 170 | break; |
171 | } | 171 | } |
172 | 172 | ||
173 | printk(" %0*lx", field, stackdata); | 173 | printk(" %0*lx", field, stackdata); |
174 | i++; | 174 | i++; |
175 | } | 175 | } |
176 | printk("\n"); | 176 | printk("\n"); |
177 | show_backtrace(task, regs); | 177 | show_backtrace(task, regs); |
178 | } | 178 | } |
179 | 179 | ||
180 | void show_stack(struct task_struct *task, unsigned long *sp) | 180 | void show_stack(struct task_struct *task, unsigned long *sp) |
181 | { | 181 | { |
182 | struct pt_regs regs; | 182 | struct pt_regs regs; |
183 | if (sp) { | 183 | if (sp) { |
184 | regs.regs[29] = (unsigned long)sp; | 184 | regs.regs[29] = (unsigned long)sp; |
185 | regs.regs[31] = 0; | 185 | regs.regs[31] = 0; |
186 | regs.cp0_epc = 0; | 186 | regs.cp0_epc = 0; |
187 | } else { | 187 | } else { |
188 | if (task && task != current) { | 188 | if (task && task != current) { |
189 | regs.regs[29] = task->thread.reg29; | 189 | regs.regs[29] = task->thread.reg29; |
190 | regs.regs[31] = 0; | 190 | regs.regs[31] = 0; |
191 | regs.cp0_epc = task->thread.reg31; | 191 | regs.cp0_epc = task->thread.reg31; |
192 | #ifdef CONFIG_KGDB_KDB | 192 | #ifdef CONFIG_KGDB_KDB |
193 | } else if (atomic_read(&kgdb_active) != -1 && | 193 | } else if (atomic_read(&kgdb_active) != -1 && |
194 | kdb_current_regs) { | 194 | kdb_current_regs) { |
195 | memcpy(®s, kdb_current_regs, sizeof(regs)); | 195 | memcpy(®s, kdb_current_regs, sizeof(regs)); |
196 | #endif /* CONFIG_KGDB_KDB */ | 196 | #endif /* CONFIG_KGDB_KDB */ |
197 | } else { | 197 | } else { |
198 | prepare_frametrace(®s); | 198 | prepare_frametrace(®s); |
199 | } | 199 | } |
200 | } | 200 | } |
201 | show_stacktrace(task, ®s); | 201 | show_stacktrace(task, ®s); |
202 | } | 202 | } |
203 | 203 | ||
204 | /* | 204 | /* |
205 | * The architecture-independent dump_stack generator | 205 | * The architecture-independent dump_stack generator |
206 | */ | 206 | */ |
207 | void dump_stack(void) | 207 | void dump_stack(void) |
208 | { | 208 | { |
209 | struct pt_regs regs; | 209 | struct pt_regs regs; |
210 | 210 | ||
211 | prepare_frametrace(®s); | 211 | prepare_frametrace(®s); |
212 | show_backtrace(current, ®s); | 212 | show_backtrace(current, ®s); |
213 | } | 213 | } |
214 | 214 | ||
215 | EXPORT_SYMBOL(dump_stack); | 215 | EXPORT_SYMBOL(dump_stack); |
216 | 216 | ||
217 | static void show_code(unsigned int __user *pc) | 217 | static void show_code(unsigned int __user *pc) |
218 | { | 218 | { |
219 | long i; | 219 | long i; |
220 | unsigned short __user *pc16 = NULL; | 220 | unsigned short __user *pc16 = NULL; |
221 | 221 | ||
222 | printk("\nCode:"); | 222 | printk("\nCode:"); |
223 | 223 | ||
224 | if ((unsigned long)pc & 1) | 224 | if ((unsigned long)pc & 1) |
225 | pc16 = (unsigned short __user *)((unsigned long)pc & ~1); | 225 | pc16 = (unsigned short __user *)((unsigned long)pc & ~1); |
226 | for(i = -3 ; i < 6 ; i++) { | 226 | for(i = -3 ; i < 6 ; i++) { |
227 | unsigned int insn; | 227 | unsigned int insn; |
228 | if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) { | 228 | if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) { |
229 | printk(" (Bad address in epc)\n"); | 229 | printk(" (Bad address in epc)\n"); |
230 | break; | 230 | break; |
231 | } | 231 | } |
232 | printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>')); | 232 | printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>')); |
233 | } | 233 | } |
234 | } | 234 | } |
235 | 235 | ||
236 | static void __show_regs(const struct pt_regs *regs) | 236 | static void __show_regs(const struct pt_regs *regs) |
237 | { | 237 | { |
238 | const int field = 2 * sizeof(unsigned long); | 238 | const int field = 2 * sizeof(unsigned long); |
239 | unsigned int cause = regs->cp0_cause; | 239 | unsigned int cause = regs->cp0_cause; |
240 | int i; | 240 | int i; |
241 | 241 | ||
242 | printk("Cpu %d\n", smp_processor_id()); | 242 | printk("Cpu %d\n", smp_processor_id()); |
243 | 243 | ||
244 | /* | 244 | /* |
245 | * Saved main processor registers | 245 | * Saved main processor registers |
246 | */ | 246 | */ |
247 | for (i = 0; i < 32; ) { | 247 | for (i = 0; i < 32; ) { |
248 | if ((i % 4) == 0) | 248 | if ((i % 4) == 0) |
249 | printk("$%2d :", i); | 249 | printk("$%2d :", i); |
250 | if (i == 0) | 250 | if (i == 0) |
251 | printk(" %0*lx", field, 0UL); | 251 | printk(" %0*lx", field, 0UL); |
252 | else if (i == 26 || i == 27) | 252 | else if (i == 26 || i == 27) |
253 | printk(" %*s", field, ""); | 253 | printk(" %*s", field, ""); |
254 | else | 254 | else |
255 | printk(" %0*lx", field, regs->regs[i]); | 255 | printk(" %0*lx", field, regs->regs[i]); |
256 | 256 | ||
257 | i++; | 257 | i++; |
258 | if ((i % 4) == 0) | 258 | if ((i % 4) == 0) |
259 | printk("\n"); | 259 | printk("\n"); |
260 | } | 260 | } |
261 | 261 | ||
262 | #ifdef CONFIG_CPU_HAS_SMARTMIPS | 262 | #ifdef CONFIG_CPU_HAS_SMARTMIPS |
263 | printk("Acx : %0*lx\n", field, regs->acx); | 263 | printk("Acx : %0*lx\n", field, regs->acx); |
264 | #endif | 264 | #endif |
265 | printk("Hi : %0*lx\n", field, regs->hi); | 265 | printk("Hi : %0*lx\n", field, regs->hi); |
266 | printk("Lo : %0*lx\n", field, regs->lo); | 266 | printk("Lo : %0*lx\n", field, regs->lo); |
267 | 267 | ||
268 | /* | 268 | /* |
269 | * Saved cp0 registers | 269 | * Saved cp0 registers |
270 | */ | 270 | */ |
271 | printk("epc : %0*lx %pS\n", field, regs->cp0_epc, | 271 | printk("epc : %0*lx %pS\n", field, regs->cp0_epc, |
272 | (void *) regs->cp0_epc); | 272 | (void *) regs->cp0_epc); |
273 | printk(" %s\n", print_tainted()); | 273 | printk(" %s\n", print_tainted()); |
274 | printk("ra : %0*lx %pS\n", field, regs->regs[31], | 274 | printk("ra : %0*lx %pS\n", field, regs->regs[31], |
275 | (void *) regs->regs[31]); | 275 | (void *) regs->regs[31]); |
276 | 276 | ||
277 | printk("Status: %08x ", (uint32_t) regs->cp0_status); | 277 | printk("Status: %08x ", (uint32_t) regs->cp0_status); |
278 | 278 | ||
279 | if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) { | 279 | if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) { |
280 | if (regs->cp0_status & ST0_KUO) | 280 | if (regs->cp0_status & ST0_KUO) |
281 | printk("KUo "); | 281 | printk("KUo "); |
282 | if (regs->cp0_status & ST0_IEO) | 282 | if (regs->cp0_status & ST0_IEO) |
283 | printk("IEo "); | 283 | printk("IEo "); |
284 | if (regs->cp0_status & ST0_KUP) | 284 | if (regs->cp0_status & ST0_KUP) |
285 | printk("KUp "); | 285 | printk("KUp "); |
286 | if (regs->cp0_status & ST0_IEP) | 286 | if (regs->cp0_status & ST0_IEP) |
287 | printk("IEp "); | 287 | printk("IEp "); |
288 | if (regs->cp0_status & ST0_KUC) | 288 | if (regs->cp0_status & ST0_KUC) |
289 | printk("KUc "); | 289 | printk("KUc "); |
290 | if (regs->cp0_status & ST0_IEC) | 290 | if (regs->cp0_status & ST0_IEC) |
291 | printk("IEc "); | 291 | printk("IEc "); |
292 | } else { | 292 | } else { |
293 | if (regs->cp0_status & ST0_KX) | 293 | if (regs->cp0_status & ST0_KX) |
294 | printk("KX "); | 294 | printk("KX "); |
295 | if (regs->cp0_status & ST0_SX) | 295 | if (regs->cp0_status & ST0_SX) |
296 | printk("SX "); | 296 | printk("SX "); |
297 | if (regs->cp0_status & ST0_UX) | 297 | if (regs->cp0_status & ST0_UX) |
298 | printk("UX "); | 298 | printk("UX "); |
299 | switch (regs->cp0_status & ST0_KSU) { | 299 | switch (regs->cp0_status & ST0_KSU) { |
300 | case KSU_USER: | 300 | case KSU_USER: |
301 | printk("USER "); | 301 | printk("USER "); |
302 | break; | 302 | break; |
303 | case KSU_SUPERVISOR: | 303 | case KSU_SUPERVISOR: |
304 | printk("SUPERVISOR "); | 304 | printk("SUPERVISOR "); |
305 | break; | 305 | break; |
306 | case KSU_KERNEL: | 306 | case KSU_KERNEL: |
307 | printk("KERNEL "); | 307 | printk("KERNEL "); |
308 | break; | 308 | break; |
309 | default: | 309 | default: |
310 | printk("BAD_MODE "); | 310 | printk("BAD_MODE "); |
311 | break; | 311 | break; |
312 | } | 312 | } |
313 | if (regs->cp0_status & ST0_ERL) | 313 | if (regs->cp0_status & ST0_ERL) |
314 | printk("ERL "); | 314 | printk("ERL "); |
315 | if (regs->cp0_status & ST0_EXL) | 315 | if (regs->cp0_status & ST0_EXL) |
316 | printk("EXL "); | 316 | printk("EXL "); |
317 | if (regs->cp0_status & ST0_IE) | 317 | if (regs->cp0_status & ST0_IE) |
318 | printk("IE "); | 318 | printk("IE "); |
319 | } | 319 | } |
320 | printk("\n"); | 320 | printk("\n"); |
321 | 321 | ||
322 | printk("Cause : %08x\n", cause); | 322 | printk("Cause : %08x\n", cause); |
323 | 323 | ||
324 | cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE; | 324 | cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE; |
325 | if (1 <= cause && cause <= 5) | 325 | if (1 <= cause && cause <= 5) |
326 | printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr); | 326 | printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr); |
327 | 327 | ||
328 | printk("PrId : %08x (%s)\n", read_c0_prid(), | 328 | printk("PrId : %08x (%s)\n", read_c0_prid(), |
329 | cpu_name_string()); | 329 | cpu_name_string()); |
330 | } | 330 | } |
331 | 331 | ||
332 | /* | 332 | /* |
333 | * FIXME: really the generic show_regs should take a const pointer argument. | 333 | * FIXME: really the generic show_regs should take a const pointer argument. |
334 | */ | 334 | */ |
335 | void show_regs(struct pt_regs *regs) | 335 | void show_regs(struct pt_regs *regs) |
336 | { | 336 | { |
337 | __show_regs((struct pt_regs *)regs); | 337 | __show_regs((struct pt_regs *)regs); |
338 | } | 338 | } |
339 | 339 | ||
340 | void show_registers(struct pt_regs *regs) | 340 | void show_registers(struct pt_regs *regs) |
341 | { | 341 | { |
342 | const int field = 2 * sizeof(unsigned long); | 342 | const int field = 2 * sizeof(unsigned long); |
343 | 343 | ||
344 | __show_regs(regs); | 344 | __show_regs(regs); |
345 | print_modules(); | 345 | print_modules(); |
346 | printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n", | 346 | printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n", |
347 | current->comm, current->pid, current_thread_info(), current, | 347 | current->comm, current->pid, current_thread_info(), current, |
348 | field, current_thread_info()->tp_value); | 348 | field, current_thread_info()->tp_value); |
349 | if (cpu_has_userlocal) { | 349 | if (cpu_has_userlocal) { |
350 | unsigned long tls; | 350 | unsigned long tls; |
351 | 351 | ||
352 | tls = read_c0_userlocal(); | 352 | tls = read_c0_userlocal(); |
353 | if (tls != current_thread_info()->tp_value) | 353 | if (tls != current_thread_info()->tp_value) |
354 | printk("*HwTLS: %0*lx\n", field, tls); | 354 | printk("*HwTLS: %0*lx\n", field, tls); |
355 | } | 355 | } |
356 | 356 | ||
357 | show_stacktrace(current, regs); | 357 | show_stacktrace(current, regs); |
358 | show_code((unsigned int __user *) regs->cp0_epc); | 358 | show_code((unsigned int __user *) regs->cp0_epc); |
359 | printk("\n"); | 359 | printk("\n"); |
360 | } | 360 | } |
361 | 361 | ||
362 | static int regs_to_trapnr(struct pt_regs *regs) | 362 | static int regs_to_trapnr(struct pt_regs *regs) |
363 | { | 363 | { |
364 | return (regs->cp0_cause >> 2) & 0x1f; | 364 | return (regs->cp0_cause >> 2) & 0x1f; |
365 | } | 365 | } |
366 | 366 | ||
367 | static DEFINE_RAW_SPINLOCK(die_lock); | 367 | static DEFINE_RAW_SPINLOCK(die_lock); |
368 | 368 | ||
369 | void __noreturn die(const char *str, struct pt_regs *regs) | 369 | void __noreturn die(const char *str, struct pt_regs *regs) |
370 | { | 370 | { |
371 | static int die_counter; | 371 | static int die_counter; |
372 | int sig = SIGSEGV; | 372 | int sig = SIGSEGV; |
373 | #ifdef CONFIG_MIPS_MT_SMTC | 373 | #ifdef CONFIG_MIPS_MT_SMTC |
374 | unsigned long dvpret; | 374 | unsigned long dvpret; |
375 | #endif /* CONFIG_MIPS_MT_SMTC */ | 375 | #endif /* CONFIG_MIPS_MT_SMTC */ |
376 | 376 | ||
377 | oops_enter(); | 377 | oops_enter(); |
378 | 378 | ||
379 | if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP) | 379 | if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP) |
380 | sig = 0; | 380 | sig = 0; |
381 | 381 | ||
382 | console_verbose(); | 382 | console_verbose(); |
383 | raw_spin_lock_irq(&die_lock); | 383 | raw_spin_lock_irq(&die_lock); |
384 | #ifdef CONFIG_MIPS_MT_SMTC | 384 | #ifdef CONFIG_MIPS_MT_SMTC |
385 | dvpret = dvpe(); | 385 | dvpret = dvpe(); |
386 | #endif /* CONFIG_MIPS_MT_SMTC */ | 386 | #endif /* CONFIG_MIPS_MT_SMTC */ |
387 | bust_spinlocks(1); | 387 | bust_spinlocks(1); |
388 | #ifdef CONFIG_MIPS_MT_SMTC | 388 | #ifdef CONFIG_MIPS_MT_SMTC |
389 | mips_mt_regdump(dvpret); | 389 | mips_mt_regdump(dvpret); |
390 | #endif /* CONFIG_MIPS_MT_SMTC */ | 390 | #endif /* CONFIG_MIPS_MT_SMTC */ |
391 | 391 | ||
392 | printk("%s[#%d]:\n", str, ++die_counter); | 392 | printk("%s[#%d]:\n", str, ++die_counter); |
393 | show_registers(regs); | 393 | show_registers(regs); |
394 | add_taint(TAINT_DIE); | 394 | add_taint(TAINT_DIE); |
395 | raw_spin_unlock_irq(&die_lock); | 395 | raw_spin_unlock_irq(&die_lock); |
396 | 396 | ||
397 | oops_exit(); | 397 | oops_exit(); |
398 | 398 | ||
399 | if (in_interrupt()) | 399 | if (in_interrupt()) |
400 | panic("Fatal exception in interrupt"); | 400 | panic("Fatal exception in interrupt"); |
401 | 401 | ||
402 | if (panic_on_oops) { | 402 | if (panic_on_oops) { |
403 | printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n"); | 403 | printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n"); |
404 | ssleep(5); | 404 | ssleep(5); |
405 | panic("Fatal exception"); | 405 | panic("Fatal exception"); |
406 | } | 406 | } |
407 | 407 | ||
408 | do_exit(sig); | 408 | do_exit(sig); |
409 | } | 409 | } |
410 | 410 | ||
411 | extern struct exception_table_entry __start___dbe_table[]; | 411 | extern struct exception_table_entry __start___dbe_table[]; |
412 | extern struct exception_table_entry __stop___dbe_table[]; | 412 | extern struct exception_table_entry __stop___dbe_table[]; |
413 | 413 | ||
414 | __asm__( | 414 | __asm__( |
415 | " .section __dbe_table, \"a\"\n" | 415 | " .section __dbe_table, \"a\"\n" |
416 | " .previous \n"); | 416 | " .previous \n"); |
417 | 417 | ||
418 | /* Given an address, look for it in the exception tables. */ | 418 | /* Given an address, look for it in the exception tables. */ |
419 | static const struct exception_table_entry *search_dbe_tables(unsigned long addr) | 419 | static const struct exception_table_entry *search_dbe_tables(unsigned long addr) |
420 | { | 420 | { |
421 | const struct exception_table_entry *e; | 421 | const struct exception_table_entry *e; |
422 | 422 | ||
423 | e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr); | 423 | e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr); |
424 | if (!e) | 424 | if (!e) |
425 | e = search_module_dbetables(addr); | 425 | e = search_module_dbetables(addr); |
426 | return e; | 426 | return e; |
427 | } | 427 | } |
428 | 428 | ||
429 | asmlinkage void do_be(struct pt_regs *regs) | 429 | asmlinkage void do_be(struct pt_regs *regs) |
430 | { | 430 | { |
431 | const int field = 2 * sizeof(unsigned long); | 431 | const int field = 2 * sizeof(unsigned long); |
432 | const struct exception_table_entry *fixup = NULL; | 432 | const struct exception_table_entry *fixup = NULL; |
433 | int data = regs->cp0_cause & 4; | 433 | int data = regs->cp0_cause & 4; |
434 | int action = MIPS_BE_FATAL; | 434 | int action = MIPS_BE_FATAL; |
435 | 435 | ||
436 | /* XXX For now. Fixme, this searches the wrong table ... */ | 436 | /* XXX For now. Fixme, this searches the wrong table ... */ |
437 | if (data && !user_mode(regs)) | 437 | if (data && !user_mode(regs)) |
438 | fixup = search_dbe_tables(exception_epc(regs)); | 438 | fixup = search_dbe_tables(exception_epc(regs)); |
439 | 439 | ||
440 | if (fixup) | 440 | if (fixup) |
441 | action = MIPS_BE_FIXUP; | 441 | action = MIPS_BE_FIXUP; |
442 | 442 | ||
443 | if (board_be_handler) | 443 | if (board_be_handler) |
444 | action = board_be_handler(regs, fixup != NULL); | 444 | action = board_be_handler(regs, fixup != NULL); |
445 | 445 | ||
446 | switch (action) { | 446 | switch (action) { |
447 | case MIPS_BE_DISCARD: | 447 | case MIPS_BE_DISCARD: |
448 | return; | 448 | return; |
449 | case MIPS_BE_FIXUP: | 449 | case MIPS_BE_FIXUP: |
450 | if (fixup) { | 450 | if (fixup) { |
451 | regs->cp0_epc = fixup->nextinsn; | 451 | regs->cp0_epc = fixup->nextinsn; |
452 | return; | 452 | return; |
453 | } | 453 | } |
454 | break; | 454 | break; |
455 | default: | 455 | default: |
456 | break; | 456 | break; |
457 | } | 457 | } |
458 | 458 | ||
459 | /* | 459 | /* |
460 | * Assume it would be too dangerous to continue ... | 460 | * Assume it would be too dangerous to continue ... |
461 | */ | 461 | */ |
462 | printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n", | 462 | printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n", |
463 | data ? "Data" : "Instruction", | 463 | data ? "Data" : "Instruction", |
464 | field, regs->cp0_epc, field, regs->regs[31]); | 464 | field, regs->cp0_epc, field, regs->regs[31]); |
465 | if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs), SIGBUS) | 465 | if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs), SIGBUS) |
466 | == NOTIFY_STOP) | 466 | == NOTIFY_STOP) |
467 | return; | 467 | return; |
468 | 468 | ||
469 | die_if_kernel("Oops", regs); | 469 | die_if_kernel("Oops", regs); |
470 | force_sig(SIGBUS, current); | 470 | force_sig(SIGBUS, current); |
471 | } | 471 | } |
472 | 472 | ||
473 | /* | 473 | /* |
474 | * ll/sc, rdhwr, sync emulation | 474 | * ll/sc, rdhwr, sync emulation |
475 | */ | 475 | */ |
476 | 476 | ||
477 | #define OPCODE 0xfc000000 | 477 | #define OPCODE 0xfc000000 |
478 | #define BASE 0x03e00000 | 478 | #define BASE 0x03e00000 |
479 | #define RT 0x001f0000 | 479 | #define RT 0x001f0000 |
480 | #define OFFSET 0x0000ffff | 480 | #define OFFSET 0x0000ffff |
481 | #define LL 0xc0000000 | 481 | #define LL 0xc0000000 |
482 | #define SC 0xe0000000 | 482 | #define SC 0xe0000000 |
483 | #define SPEC0 0x00000000 | 483 | #define SPEC0 0x00000000 |
484 | #define SPEC3 0x7c000000 | 484 | #define SPEC3 0x7c000000 |
485 | #define RD 0x0000f800 | 485 | #define RD 0x0000f800 |
486 | #define FUNC 0x0000003f | 486 | #define FUNC 0x0000003f |
487 | #define SYNC 0x0000000f | 487 | #define SYNC 0x0000000f |
488 | #define RDHWR 0x0000003b | 488 | #define RDHWR 0x0000003b |
489 | 489 | ||
490 | /* | 490 | /* |
491 | * The ll_bit is cleared by r*_switch.S | 491 | * The ll_bit is cleared by r*_switch.S |
492 | */ | 492 | */ |
493 | 493 | ||
494 | unsigned int ll_bit; | 494 | unsigned int ll_bit; |
495 | struct task_struct *ll_task; | 495 | struct task_struct *ll_task; |
496 | 496 | ||
497 | static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode) | 497 | static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode) |
498 | { | 498 | { |
499 | unsigned long value, __user *vaddr; | 499 | unsigned long value, __user *vaddr; |
500 | long offset; | 500 | long offset; |
501 | 501 | ||
502 | /* | 502 | /* |
503 | * analyse the ll instruction that just caused a ri exception | 503 | * analyse the ll instruction that just caused a ri exception |
504 | * and put the referenced address to addr. | 504 | * and put the referenced address to addr. |
505 | */ | 505 | */ |
506 | 506 | ||
507 | /* sign extend offset */ | 507 | /* sign extend offset */ |
508 | offset = opcode & OFFSET; | 508 | offset = opcode & OFFSET; |
509 | offset <<= 16; | 509 | offset <<= 16; |
510 | offset >>= 16; | 510 | offset >>= 16; |
511 | 511 | ||
512 | vaddr = (unsigned long __user *) | 512 | vaddr = (unsigned long __user *) |
513 | ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset); | 513 | ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset); |
514 | 514 | ||
515 | if ((unsigned long)vaddr & 3) | 515 | if ((unsigned long)vaddr & 3) |
516 | return SIGBUS; | 516 | return SIGBUS; |
517 | if (get_user(value, vaddr)) | 517 | if (get_user(value, vaddr)) |
518 | return SIGSEGV; | 518 | return SIGSEGV; |
519 | 519 | ||
520 | preempt_disable(); | 520 | preempt_disable(); |
521 | 521 | ||
522 | if (ll_task == NULL || ll_task == current) { | 522 | if (ll_task == NULL || ll_task == current) { |
523 | ll_bit = 1; | 523 | ll_bit = 1; |
524 | } else { | 524 | } else { |
525 | ll_bit = 0; | 525 | ll_bit = 0; |
526 | } | 526 | } |
527 | ll_task = current; | 527 | ll_task = current; |
528 | 528 | ||
529 | preempt_enable(); | 529 | preempt_enable(); |
530 | 530 | ||
531 | regs->regs[(opcode & RT) >> 16] = value; | 531 | regs->regs[(opcode & RT) >> 16] = value; |
532 | 532 | ||
533 | return 0; | 533 | return 0; |
534 | } | 534 | } |
535 | 535 | ||
536 | static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode) | 536 | static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode) |
537 | { | 537 | { |
538 | unsigned long __user *vaddr; | 538 | unsigned long __user *vaddr; |
539 | unsigned long reg; | 539 | unsigned long reg; |
540 | long offset; | 540 | long offset; |
541 | 541 | ||
542 | /* | 542 | /* |
543 | * analyse the sc instruction that just caused a ri exception | 543 | * analyse the sc instruction that just caused a ri exception |
544 | * and put the referenced address to addr. | 544 | * and put the referenced address to addr. |
545 | */ | 545 | */ |
546 | 546 | ||
547 | /* sign extend offset */ | 547 | /* sign extend offset */ |
548 | offset = opcode & OFFSET; | 548 | offset = opcode & OFFSET; |
549 | offset <<= 16; | 549 | offset <<= 16; |
550 | offset >>= 16; | 550 | offset >>= 16; |
551 | 551 | ||
552 | vaddr = (unsigned long __user *) | 552 | vaddr = (unsigned long __user *) |
553 | ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset); | 553 | ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset); |
554 | reg = (opcode & RT) >> 16; | 554 | reg = (opcode & RT) >> 16; |
555 | 555 | ||
556 | if ((unsigned long)vaddr & 3) | 556 | if ((unsigned long)vaddr & 3) |
557 | return SIGBUS; | 557 | return SIGBUS; |
558 | 558 | ||
559 | preempt_disable(); | 559 | preempt_disable(); |
560 | 560 | ||
561 | if (ll_bit == 0 || ll_task != current) { | 561 | if (ll_bit == 0 || ll_task != current) { |
562 | regs->regs[reg] = 0; | 562 | regs->regs[reg] = 0; |
563 | preempt_enable(); | 563 | preempt_enable(); |
564 | return 0; | 564 | return 0; |
565 | } | 565 | } |
566 | 566 | ||
567 | preempt_enable(); | 567 | preempt_enable(); |
568 | 568 | ||
569 | if (put_user(regs->regs[reg], vaddr)) | 569 | if (put_user(regs->regs[reg], vaddr)) |
570 | return SIGSEGV; | 570 | return SIGSEGV; |
571 | 571 | ||
572 | regs->regs[reg] = 1; | 572 | regs->regs[reg] = 1; |
573 | 573 | ||
574 | return 0; | 574 | return 0; |
575 | } | 575 | } |
576 | 576 | ||
577 | /* | 577 | /* |
578 | * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both | 578 | * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both |
579 | * opcodes are supposed to result in coprocessor unusable exceptions if | 579 | * opcodes are supposed to result in coprocessor unusable exceptions if |
580 | * executed on ll/sc-less processors. That's the theory. In practice a | 580 | * executed on ll/sc-less processors. That's the theory. In practice a |
581 | * few processors such as NEC's VR4100 throw reserved instruction exceptions | 581 | * few processors such as NEC's VR4100 throw reserved instruction exceptions |
582 | * instead, so we're doing the emulation thing in both exception handlers. | 582 | * instead, so we're doing the emulation thing in both exception handlers. |
583 | */ | 583 | */ |
584 | static int simulate_llsc(struct pt_regs *regs, unsigned int opcode) | 584 | static int simulate_llsc(struct pt_regs *regs, unsigned int opcode) |
585 | { | 585 | { |
586 | if ((opcode & OPCODE) == LL) { | 586 | if ((opcode & OPCODE) == LL) { |
587 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, | 587 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, |
588 | 1, regs, 0); | 588 | 1, regs, 0); |
589 | return simulate_ll(regs, opcode); | 589 | return simulate_ll(regs, opcode); |
590 | } | 590 | } |
591 | if ((opcode & OPCODE) == SC) { | 591 | if ((opcode & OPCODE) == SC) { |
592 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, | 592 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, |
593 | 1, regs, 0); | 593 | 1, regs, 0); |
594 | return simulate_sc(regs, opcode); | 594 | return simulate_sc(regs, opcode); |
595 | } | 595 | } |
596 | 596 | ||
597 | return -1; /* Must be something else ... */ | 597 | return -1; /* Must be something else ... */ |
598 | } | 598 | } |
599 | 599 | ||
600 | /* | 600 | /* |
601 | * Simulate trapping 'rdhwr' instructions to provide user accessible | 601 | * Simulate trapping 'rdhwr' instructions to provide user accessible |
602 | * registers not implemented in hardware. | 602 | * registers not implemented in hardware. |
603 | */ | 603 | */ |
604 | static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode) | 604 | static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode) |
605 | { | 605 | { |
606 | struct thread_info *ti = task_thread_info(current); | 606 | struct thread_info *ti = task_thread_info(current); |
607 | 607 | ||
608 | if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) { | 608 | if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) { |
609 | int rd = (opcode & RD) >> 11; | 609 | int rd = (opcode & RD) >> 11; |
610 | int rt = (opcode & RT) >> 16; | 610 | int rt = (opcode & RT) >> 16; |
611 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, | 611 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, |
612 | 1, regs, 0); | 612 | 1, regs, 0); |
613 | switch (rd) { | 613 | switch (rd) { |
614 | case 0: /* CPU number */ | 614 | case 0: /* CPU number */ |
615 | regs->regs[rt] = smp_processor_id(); | 615 | regs->regs[rt] = smp_processor_id(); |
616 | return 0; | 616 | return 0; |
617 | case 1: /* SYNCI length */ | 617 | case 1: /* SYNCI length */ |
618 | regs->regs[rt] = min(current_cpu_data.dcache.linesz, | 618 | regs->regs[rt] = min(current_cpu_data.dcache.linesz, |
619 | current_cpu_data.icache.linesz); | 619 | current_cpu_data.icache.linesz); |
620 | return 0; | 620 | return 0; |
621 | case 2: /* Read count register */ | 621 | case 2: /* Read count register */ |
622 | regs->regs[rt] = read_c0_count(); | 622 | regs->regs[rt] = read_c0_count(); |
623 | return 0; | 623 | return 0; |
624 | case 3: /* Count register resolution */ | 624 | case 3: /* Count register resolution */ |
625 | switch (current_cpu_data.cputype) { | 625 | switch (current_cpu_data.cputype) { |
626 | case CPU_20KC: | 626 | case CPU_20KC: |
627 | case CPU_25KF: | 627 | case CPU_25KF: |
628 | regs->regs[rt] = 1; | 628 | regs->regs[rt] = 1; |
629 | break; | 629 | break; |
630 | default: | 630 | default: |
631 | regs->regs[rt] = 2; | 631 | regs->regs[rt] = 2; |
632 | } | 632 | } |
633 | return 0; | 633 | return 0; |
634 | case 29: | 634 | case 29: |
635 | regs->regs[rt] = ti->tp_value; | 635 | regs->regs[rt] = ti->tp_value; |
636 | return 0; | 636 | return 0; |
637 | default: | 637 | default: |
638 | return -1; | 638 | return -1; |
639 | } | 639 | } |
640 | } | 640 | } |
641 | 641 | ||
642 | /* Not ours. */ | 642 | /* Not ours. */ |
643 | return -1; | 643 | return -1; |
644 | } | 644 | } |
645 | 645 | ||
646 | static int simulate_sync(struct pt_regs *regs, unsigned int opcode) | 646 | static int simulate_sync(struct pt_regs *regs, unsigned int opcode) |
647 | { | 647 | { |
648 | if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) { | 648 | if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) { |
649 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, | 649 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, |
650 | 1, regs, 0); | 650 | 1, regs, 0); |
651 | return 0; | 651 | return 0; |
652 | } | 652 | } |
653 | 653 | ||
654 | return -1; /* Must be something else ... */ | 654 | return -1; /* Must be something else ... */ |
655 | } | 655 | } |
656 | 656 | ||
657 | asmlinkage void do_ov(struct pt_regs *regs) | 657 | asmlinkage void do_ov(struct pt_regs *regs) |
658 | { | 658 | { |
659 | siginfo_t info; | 659 | siginfo_t info; |
660 | 660 | ||
661 | die_if_kernel("Integer overflow", regs); | 661 | die_if_kernel("Integer overflow", regs); |
662 | 662 | ||
663 | info.si_code = FPE_INTOVF; | 663 | info.si_code = FPE_INTOVF; |
664 | info.si_signo = SIGFPE; | 664 | info.si_signo = SIGFPE; |
665 | info.si_errno = 0; | 665 | info.si_errno = 0; |
666 | info.si_addr = (void __user *) regs->cp0_epc; | 666 | info.si_addr = (void __user *) regs->cp0_epc; |
667 | force_sig_info(SIGFPE, &info, current); | 667 | force_sig_info(SIGFPE, &info, current); |
668 | } | 668 | } |
669 | 669 | ||
670 | static int process_fpemu_return(int sig, void __user *fault_addr) | 670 | static int process_fpemu_return(int sig, void __user *fault_addr) |
671 | { | 671 | { |
672 | if (sig == SIGSEGV || sig == SIGBUS) { | 672 | if (sig == SIGSEGV || sig == SIGBUS) { |
673 | struct siginfo si = {0}; | 673 | struct siginfo si = {0}; |
674 | si.si_addr = fault_addr; | 674 | si.si_addr = fault_addr; |
675 | si.si_signo = sig; | 675 | si.si_signo = sig; |
676 | if (sig == SIGSEGV) { | 676 | if (sig == SIGSEGV) { |
677 | if (find_vma(current->mm, (unsigned long)fault_addr)) | 677 | if (find_vma(current->mm, (unsigned long)fault_addr)) |
678 | si.si_code = SEGV_ACCERR; | 678 | si.si_code = SEGV_ACCERR; |
679 | else | 679 | else |
680 | si.si_code = SEGV_MAPERR; | 680 | si.si_code = SEGV_MAPERR; |
681 | } else { | 681 | } else { |
682 | si.si_code = BUS_ADRERR; | 682 | si.si_code = BUS_ADRERR; |
683 | } | 683 | } |
684 | force_sig_info(sig, &si, current); | 684 | force_sig_info(sig, &si, current); |
685 | return 1; | 685 | return 1; |
686 | } else if (sig) { | 686 | } else if (sig) { |
687 | force_sig(sig, current); | 687 | force_sig(sig, current); |
688 | return 1; | 688 | return 1; |
689 | } else { | 689 | } else { |
690 | return 0; | 690 | return 0; |
691 | } | 691 | } |
692 | } | 692 | } |
693 | 693 | ||
694 | /* | 694 | /* |
695 | * XXX Delayed fp exceptions when doing a lazy ctx switch XXX | 695 | * XXX Delayed fp exceptions when doing a lazy ctx switch XXX |
696 | */ | 696 | */ |
697 | asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) | 697 | asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) |
698 | { | 698 | { |
699 | siginfo_t info = {0}; | 699 | siginfo_t info = {0}; |
700 | 700 | ||
701 | if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), SIGFPE) | 701 | if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), SIGFPE) |
702 | == NOTIFY_STOP) | 702 | == NOTIFY_STOP) |
703 | return; | 703 | return; |
704 | die_if_kernel("FP exception in kernel code", regs); | 704 | die_if_kernel("FP exception in kernel code", regs); |
705 | 705 | ||
706 | if (fcr31 & FPU_CSR_UNI_X) { | 706 | if (fcr31 & FPU_CSR_UNI_X) { |
707 | int sig; | 707 | int sig; |
708 | void __user *fault_addr = NULL; | 708 | void __user *fault_addr = NULL; |
709 | 709 | ||
710 | /* | 710 | /* |
711 | * Unimplemented operation exception. If we've got the full | 711 | * Unimplemented operation exception. If we've got the full |
712 | * software emulator on-board, let's use it... | 712 | * software emulator on-board, let's use it... |
713 | * | 713 | * |
714 | * Force FPU to dump state into task/thread context. We're | 714 | * Force FPU to dump state into task/thread context. We're |
715 | * moving a lot of data here for what is probably a single | 715 | * moving a lot of data here for what is probably a single |
716 | * instruction, but the alternative is to pre-decode the FP | 716 | * instruction, but the alternative is to pre-decode the FP |
717 | * register operands before invoking the emulator, which seems | 717 | * register operands before invoking the emulator, which seems |
718 | * a bit extreme for what should be an infrequent event. | 718 | * a bit extreme for what should be an infrequent event. |
719 | */ | 719 | */ |
720 | /* Ensure 'resume' not overwrite saved fp context again. */ | 720 | /* Ensure 'resume' not overwrite saved fp context again. */ |
721 | lose_fpu(1); | 721 | lose_fpu(1); |
722 | 722 | ||
723 | /* Run the emulator */ | 723 | /* Run the emulator */ |
724 | sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, | 724 | sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, |
725 | &fault_addr); | 725 | &fault_addr); |
726 | 726 | ||
727 | /* | 727 | /* |
728 | * We can't allow the emulated instruction to leave any of | 728 | * We can't allow the emulated instruction to leave any of |
729 | * the cause bit set in $fcr31. | 729 | * the cause bit set in $fcr31. |
730 | */ | 730 | */ |
731 | current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; | 731 | current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; |
732 | 732 | ||
733 | /* Restore the hardware register state */ | 733 | /* Restore the hardware register state */ |
734 | own_fpu(1); /* Using the FPU again. */ | 734 | own_fpu(1); /* Using the FPU again. */ |
735 | 735 | ||
736 | /* If something went wrong, signal */ | 736 | /* If something went wrong, signal */ |
737 | process_fpemu_return(sig, fault_addr); | 737 | process_fpemu_return(sig, fault_addr); |
738 | 738 | ||
739 | return; | 739 | return; |
740 | } else if (fcr31 & FPU_CSR_INV_X) | 740 | } else if (fcr31 & FPU_CSR_INV_X) |
741 | info.si_code = FPE_FLTINV; | 741 | info.si_code = FPE_FLTINV; |
742 | else if (fcr31 & FPU_CSR_DIV_X) | 742 | else if (fcr31 & FPU_CSR_DIV_X) |
743 | info.si_code = FPE_FLTDIV; | 743 | info.si_code = FPE_FLTDIV; |
744 | else if (fcr31 & FPU_CSR_OVF_X) | 744 | else if (fcr31 & FPU_CSR_OVF_X) |
745 | info.si_code = FPE_FLTOVF; | 745 | info.si_code = FPE_FLTOVF; |
746 | else if (fcr31 & FPU_CSR_UDF_X) | 746 | else if (fcr31 & FPU_CSR_UDF_X) |
747 | info.si_code = FPE_FLTUND; | 747 | info.si_code = FPE_FLTUND; |
748 | else if (fcr31 & FPU_CSR_INE_X) | 748 | else if (fcr31 & FPU_CSR_INE_X) |
749 | info.si_code = FPE_FLTRES; | 749 | info.si_code = FPE_FLTRES; |
750 | else | 750 | else |
751 | info.si_code = __SI_FAULT; | 751 | info.si_code = __SI_FAULT; |
752 | info.si_signo = SIGFPE; | 752 | info.si_signo = SIGFPE; |
753 | info.si_errno = 0; | 753 | info.si_errno = 0; |
754 | info.si_addr = (void __user *) regs->cp0_epc; | 754 | info.si_addr = (void __user *) regs->cp0_epc; |
755 | force_sig_info(SIGFPE, &info, current); | 755 | force_sig_info(SIGFPE, &info, current); |
756 | } | 756 | } |
757 | 757 | ||
758 | static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, | 758 | static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, |
759 | const char *str) | 759 | const char *str) |
760 | { | 760 | { |
761 | siginfo_t info; | 761 | siginfo_t info; |
762 | char b[40]; | 762 | char b[40]; |
763 | 763 | ||
764 | #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP | 764 | #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP |
765 | if (kgdb_ll_trap(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) | 765 | if (kgdb_ll_trap(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) |
766 | return; | 766 | return; |
767 | #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ | 767 | #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ |
768 | 768 | ||
769 | if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) | 769 | if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) |
770 | return; | 770 | return; |
771 | 771 | ||
772 | /* | 772 | /* |
773 | * A short test says that IRIX 5.3 sends SIGTRAP for all trap | 773 | * A short test says that IRIX 5.3 sends SIGTRAP for all trap |
774 | * insns, even for trap and break codes that indicate arithmetic | 774 | * insns, even for trap and break codes that indicate arithmetic |
775 | * failures. Weird ... | 775 | * failures. Weird ... |
776 | * But should we continue the brokenness??? --macro | 776 | * But should we continue the brokenness??? --macro |
777 | */ | 777 | */ |
778 | switch (code) { | 778 | switch (code) { |
779 | case BRK_OVERFLOW: | 779 | case BRK_OVERFLOW: |
780 | case BRK_DIVZERO: | 780 | case BRK_DIVZERO: |
781 | scnprintf(b, sizeof(b), "%s instruction in kernel code", str); | 781 | scnprintf(b, sizeof(b), "%s instruction in kernel code", str); |
782 | die_if_kernel(b, regs); | 782 | die_if_kernel(b, regs); |
783 | if (code == BRK_DIVZERO) | 783 | if (code == BRK_DIVZERO) |
784 | info.si_code = FPE_INTDIV; | 784 | info.si_code = FPE_INTDIV; |
785 | else | 785 | else |
786 | info.si_code = FPE_INTOVF; | 786 | info.si_code = FPE_INTOVF; |
787 | info.si_signo = SIGFPE; | 787 | info.si_signo = SIGFPE; |
788 | info.si_errno = 0; | 788 | info.si_errno = 0; |
789 | info.si_addr = (void __user *) regs->cp0_epc; | 789 | info.si_addr = (void __user *) regs->cp0_epc; |
790 | force_sig_info(SIGFPE, &info, current); | 790 | force_sig_info(SIGFPE, &info, current); |
791 | break; | 791 | break; |
792 | case BRK_BUG: | 792 | case BRK_BUG: |
793 | die_if_kernel("Kernel bug detected", regs); | 793 | die_if_kernel("Kernel bug detected", regs); |
794 | force_sig(SIGTRAP, current); | 794 | force_sig(SIGTRAP, current); |
795 | break; | 795 | break; |
796 | case BRK_MEMU: | 796 | case BRK_MEMU: |
797 | /* | 797 | /* |
798 | * Address errors may be deliberately induced by the FPU | 798 | * Address errors may be deliberately induced by the FPU |
799 | * emulator to retake control of the CPU after executing the | 799 | * emulator to retake control of the CPU after executing the |
800 | * instruction in the delay slot of an emulated branch. | 800 | * instruction in the delay slot of an emulated branch. |
801 | * | 801 | * |
802 | * Terminate if exception was recognized as a delay slot return | 802 | * Terminate if exception was recognized as a delay slot return |
803 | * otherwise handle as normal. | 803 | * otherwise handle as normal. |
804 | */ | 804 | */ |
805 | if (do_dsemulret(regs)) | 805 | if (do_dsemulret(regs)) |
806 | return; | 806 | return; |
807 | 807 | ||
808 | die_if_kernel("Math emu break/trap", regs); | 808 | die_if_kernel("Math emu break/trap", regs); |
809 | force_sig(SIGTRAP, current); | 809 | force_sig(SIGTRAP, current); |
810 | break; | 810 | break; |
811 | default: | 811 | default: |
812 | scnprintf(b, sizeof(b), "%s instruction in kernel code", str); | 812 | scnprintf(b, sizeof(b), "%s instruction in kernel code", str); |
813 | die_if_kernel(b, regs); | 813 | die_if_kernel(b, regs); |
814 | force_sig(SIGTRAP, current); | 814 | force_sig(SIGTRAP, current); |
815 | } | 815 | } |
816 | } | 816 | } |
817 | 817 | ||
818 | asmlinkage void do_bp(struct pt_regs *regs) | 818 | asmlinkage void do_bp(struct pt_regs *regs) |
819 | { | 819 | { |
820 | unsigned int opcode, bcode; | 820 | unsigned int opcode, bcode; |
821 | 821 | ||
822 | if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) | 822 | if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) |
823 | goto out_sigsegv; | 823 | goto out_sigsegv; |
824 | 824 | ||
825 | /* | 825 | /* |
826 | * There is the ancient bug in the MIPS assemblers that the break | 826 | * There is the ancient bug in the MIPS assemblers that the break |
827 | * code starts left to bit 16 instead to bit 6 in the opcode. | 827 | * code starts left to bit 16 instead to bit 6 in the opcode. |
828 | * Gas is bug-compatible, but not always, grrr... | 828 | * Gas is bug-compatible, but not always, grrr... |
829 | * We handle both cases with a simple heuristics. --macro | 829 | * We handle both cases with a simple heuristics. --macro |
830 | */ | 830 | */ |
831 | bcode = ((opcode >> 6) & ((1 << 20) - 1)); | 831 | bcode = ((opcode >> 6) & ((1 << 20) - 1)); |
832 | if (bcode >= (1 << 10)) | 832 | if (bcode >= (1 << 10)) |
833 | bcode >>= 10; | 833 | bcode >>= 10; |
834 | 834 | ||
835 | /* | 835 | /* |
836 | * notify the kprobe handlers, if instruction is likely to | 836 | * notify the kprobe handlers, if instruction is likely to |
837 | * pertain to them. | 837 | * pertain to them. |
838 | */ | 838 | */ |
839 | switch (bcode) { | 839 | switch (bcode) { |
840 | case BRK_KPROBE_BP: | 840 | case BRK_KPROBE_BP: |
841 | if (notify_die(DIE_BREAK, "debug", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) | 841 | if (notify_die(DIE_BREAK, "debug", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) |
842 | return; | 842 | return; |
843 | else | 843 | else |
844 | break; | 844 | break; |
845 | case BRK_KPROBE_SSTEPBP: | 845 | case BRK_KPROBE_SSTEPBP: |
846 | if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) | 846 | if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) |
847 | return; | 847 | return; |
848 | else | 848 | else |
849 | break; | 849 | break; |
850 | default: | 850 | default: |
851 | break; | 851 | break; |
852 | } | 852 | } |
853 | 853 | ||
854 | do_trap_or_bp(regs, bcode, "Break"); | 854 | do_trap_or_bp(regs, bcode, "Break"); |
855 | return; | 855 | return; |
856 | 856 | ||
857 | out_sigsegv: | 857 | out_sigsegv: |
858 | force_sig(SIGSEGV, current); | 858 | force_sig(SIGSEGV, current); |
859 | } | 859 | } |
860 | 860 | ||
861 | asmlinkage void do_tr(struct pt_regs *regs) | 861 | asmlinkage void do_tr(struct pt_regs *regs) |
862 | { | 862 | { |
863 | unsigned int opcode, tcode = 0; | 863 | unsigned int opcode, tcode = 0; |
864 | 864 | ||
865 | if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) | 865 | if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) |
866 | goto out_sigsegv; | 866 | goto out_sigsegv; |
867 | 867 | ||
868 | /* Immediate versions don't provide a code. */ | 868 | /* Immediate versions don't provide a code. */ |
869 | if (!(opcode & OPCODE)) | 869 | if (!(opcode & OPCODE)) |
870 | tcode = ((opcode >> 6) & ((1 << 10) - 1)); | 870 | tcode = ((opcode >> 6) & ((1 << 10) - 1)); |
871 | 871 | ||
872 | do_trap_or_bp(regs, tcode, "Trap"); | 872 | do_trap_or_bp(regs, tcode, "Trap"); |
873 | return; | 873 | return; |
874 | 874 | ||
875 | out_sigsegv: | 875 | out_sigsegv: |
876 | force_sig(SIGSEGV, current); | 876 | force_sig(SIGSEGV, current); |
877 | } | 877 | } |
878 | 878 | ||
879 | asmlinkage void do_ri(struct pt_regs *regs) | 879 | asmlinkage void do_ri(struct pt_regs *regs) |
880 | { | 880 | { |
881 | unsigned int __user *epc = (unsigned int __user *)exception_epc(regs); | 881 | unsigned int __user *epc = (unsigned int __user *)exception_epc(regs); |
882 | unsigned long old_epc = regs->cp0_epc; | 882 | unsigned long old_epc = regs->cp0_epc; |
883 | unsigned int opcode = 0; | 883 | unsigned int opcode = 0; |
884 | int status = -1; | 884 | int status = -1; |
885 | 885 | ||
886 | if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), SIGILL) | 886 | if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), SIGILL) |
887 | == NOTIFY_STOP) | 887 | == NOTIFY_STOP) |
888 | return; | 888 | return; |
889 | 889 | ||
890 | die_if_kernel("Reserved instruction in kernel code", regs); | 890 | die_if_kernel("Reserved instruction in kernel code", regs); |
891 | 891 | ||
892 | if (unlikely(compute_return_epc(regs) < 0)) | 892 | if (unlikely(compute_return_epc(regs) < 0)) |
893 | return; | 893 | return; |
894 | 894 | ||
895 | if (unlikely(get_user(opcode, epc) < 0)) | 895 | if (unlikely(get_user(opcode, epc) < 0)) |
896 | status = SIGSEGV; | 896 | status = SIGSEGV; |
897 | 897 | ||
898 | if (!cpu_has_llsc && status < 0) | 898 | if (!cpu_has_llsc && status < 0) |
899 | status = simulate_llsc(regs, opcode); | 899 | status = simulate_llsc(regs, opcode); |
900 | 900 | ||
901 | if (status < 0) | 901 | if (status < 0) |
902 | status = simulate_rdhwr(regs, opcode); | 902 | status = simulate_rdhwr(regs, opcode); |
903 | 903 | ||
904 | if (status < 0) | 904 | if (status < 0) |
905 | status = simulate_sync(regs, opcode); | 905 | status = simulate_sync(regs, opcode); |
906 | 906 | ||
907 | if (status < 0) | 907 | if (status < 0) |
908 | status = SIGILL; | 908 | status = SIGILL; |
909 | 909 | ||
910 | if (unlikely(status > 0)) { | 910 | if (unlikely(status > 0)) { |
911 | regs->cp0_epc = old_epc; /* Undo skip-over. */ | 911 | regs->cp0_epc = old_epc; /* Undo skip-over. */ |
912 | force_sig(status, current); | 912 | force_sig(status, current); |
913 | } | 913 | } |
914 | } | 914 | } |
915 | 915 | ||
916 | /* | 916 | /* |
917 | * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've | 917 | * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've |
918 | * emulated more than some threshold number of instructions, force migration to | 918 | * emulated more than some threshold number of instructions, force migration to |
919 | * a "CPU" that has FP support. | 919 | * a "CPU" that has FP support. |
920 | */ | 920 | */ |
921 | static void mt_ase_fp_affinity(void) | 921 | static void mt_ase_fp_affinity(void) |
922 | { | 922 | { |
923 | #ifdef CONFIG_MIPS_MT_FPAFF | 923 | #ifdef CONFIG_MIPS_MT_FPAFF |
924 | if (mt_fpemul_threshold > 0 && | 924 | if (mt_fpemul_threshold > 0 && |
925 | ((current->thread.emulated_fp++ > mt_fpemul_threshold))) { | 925 | ((current->thread.emulated_fp++ > mt_fpemul_threshold))) { |
926 | /* | 926 | /* |
927 | * If there's no FPU present, or if the application has already | 927 | * If there's no FPU present, or if the application has already |
928 | * restricted the allowed set to exclude any CPUs with FPUs, | 928 | * restricted the allowed set to exclude any CPUs with FPUs, |
929 | * we'll skip the procedure. | 929 | * we'll skip the procedure. |
930 | */ | 930 | */ |
931 | if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) { | 931 | if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) { |
932 | cpumask_t tmask; | 932 | cpumask_t tmask; |
933 | 933 | ||
934 | current->thread.user_cpus_allowed | 934 | current->thread.user_cpus_allowed |
935 | = current->cpus_allowed; | 935 | = current->cpus_allowed; |
936 | cpus_and(tmask, current->cpus_allowed, | 936 | cpus_and(tmask, current->cpus_allowed, |
937 | mt_fpu_cpumask); | 937 | mt_fpu_cpumask); |
938 | set_cpus_allowed_ptr(current, &tmask); | 938 | set_cpus_allowed_ptr(current, &tmask); |
939 | set_thread_flag(TIF_FPUBOUND); | 939 | set_thread_flag(TIF_FPUBOUND); |
940 | } | 940 | } |
941 | } | 941 | } |
942 | #endif /* CONFIG_MIPS_MT_FPAFF */ | 942 | #endif /* CONFIG_MIPS_MT_FPAFF */ |
943 | } | 943 | } |
944 | 944 | ||
945 | /* | 945 | /* |
946 | * No lock; only written during early bootup by CPU 0. | 946 | * No lock; only written during early bootup by CPU 0. |
947 | */ | 947 | */ |
948 | static RAW_NOTIFIER_HEAD(cu2_chain); | 948 | static RAW_NOTIFIER_HEAD(cu2_chain); |
949 | 949 | ||
950 | int __ref register_cu2_notifier(struct notifier_block *nb) | 950 | int __ref register_cu2_notifier(struct notifier_block *nb) |
951 | { | 951 | { |
952 | return raw_notifier_chain_register(&cu2_chain, nb); | 952 | return raw_notifier_chain_register(&cu2_chain, nb); |
953 | } | 953 | } |
954 | 954 | ||
955 | int cu2_notifier_call_chain(unsigned long val, void *v) | 955 | int cu2_notifier_call_chain(unsigned long val, void *v) |
956 | { | 956 | { |
957 | return raw_notifier_call_chain(&cu2_chain, val, v); | 957 | return raw_notifier_call_chain(&cu2_chain, val, v); |
958 | } | 958 | } |
959 | 959 | ||
960 | static int default_cu2_call(struct notifier_block *nfb, unsigned long action, | 960 | static int default_cu2_call(struct notifier_block *nfb, unsigned long action, |
961 | void *data) | 961 | void *data) |
962 | { | 962 | { |
963 | struct pt_regs *regs = data; | 963 | struct pt_regs *regs = data; |
964 | 964 | ||
965 | switch (action) { | 965 | switch (action) { |
966 | default: | 966 | default: |
967 | die_if_kernel("Unhandled kernel unaligned access or invalid " | 967 | die_if_kernel("Unhandled kernel unaligned access or invalid " |
968 | "instruction", regs); | 968 | "instruction", regs); |
969 | /* Fall through */ | 969 | /* Fall through */ |
970 | 970 | ||
971 | case CU2_EXCEPTION: | 971 | case CU2_EXCEPTION: |
972 | force_sig(SIGILL, current); | 972 | force_sig(SIGILL, current); |
973 | } | 973 | } |
974 | 974 | ||
975 | return NOTIFY_OK; | 975 | return NOTIFY_OK; |
976 | } | 976 | } |
977 | 977 | ||
978 | asmlinkage void do_cpu(struct pt_regs *regs) | 978 | asmlinkage void do_cpu(struct pt_regs *regs) |
979 | { | 979 | { |
980 | unsigned int __user *epc; | 980 | unsigned int __user *epc; |
981 | unsigned long old_epc; | 981 | unsigned long old_epc; |
982 | unsigned int opcode; | 982 | unsigned int opcode; |
983 | unsigned int cpid; | 983 | unsigned int cpid; |
984 | int status; | 984 | int status; |
985 | unsigned long __maybe_unused flags; | 985 | unsigned long __maybe_unused flags; |
986 | 986 | ||
987 | die_if_kernel("do_cpu invoked from kernel context!", regs); | 987 | die_if_kernel("do_cpu invoked from kernel context!", regs); |
988 | 988 | ||
989 | cpid = (regs->cp0_cause >> CAUSEB_CE) & 3; | 989 | cpid = (regs->cp0_cause >> CAUSEB_CE) & 3; |
990 | 990 | ||
991 | switch (cpid) { | 991 | switch (cpid) { |
992 | case 0: | 992 | case 0: |
993 | epc = (unsigned int __user *)exception_epc(regs); | 993 | epc = (unsigned int __user *)exception_epc(regs); |
994 | old_epc = regs->cp0_epc; | 994 | old_epc = regs->cp0_epc; |
995 | opcode = 0; | 995 | opcode = 0; |
996 | status = -1; | 996 | status = -1; |
997 | 997 | ||
998 | if (unlikely(compute_return_epc(regs) < 0)) | 998 | if (unlikely(compute_return_epc(regs) < 0)) |
999 | return; | 999 | return; |
1000 | 1000 | ||
1001 | if (unlikely(get_user(opcode, epc) < 0)) | 1001 | if (unlikely(get_user(opcode, epc) < 0)) |
1002 | status = SIGSEGV; | 1002 | status = SIGSEGV; |
1003 | 1003 | ||
1004 | if (!cpu_has_llsc && status < 0) | 1004 | if (!cpu_has_llsc && status < 0) |
1005 | status = simulate_llsc(regs, opcode); | 1005 | status = simulate_llsc(regs, opcode); |
1006 | 1006 | ||
1007 | if (status < 0) | 1007 | if (status < 0) |
1008 | status = simulate_rdhwr(regs, opcode); | 1008 | status = simulate_rdhwr(regs, opcode); |
1009 | 1009 | ||
1010 | if (status < 0) | 1010 | if (status < 0) |
1011 | status = SIGILL; | 1011 | status = SIGILL; |
1012 | 1012 | ||
1013 | if (unlikely(status > 0)) { | 1013 | if (unlikely(status > 0)) { |
1014 | regs->cp0_epc = old_epc; /* Undo skip-over. */ | 1014 | regs->cp0_epc = old_epc; /* Undo skip-over. */ |
1015 | force_sig(status, current); | 1015 | force_sig(status, current); |
1016 | } | 1016 | } |
1017 | 1017 | ||
1018 | return; | 1018 | return; |
1019 | 1019 | ||
1020 | case 1: | 1020 | case 1: |
1021 | if (used_math()) /* Using the FPU again. */ | 1021 | if (used_math()) /* Using the FPU again. */ |
1022 | own_fpu(1); | 1022 | own_fpu(1); |
1023 | else { /* First time FPU user. */ | 1023 | else { /* First time FPU user. */ |
1024 | init_fpu(); | 1024 | init_fpu(); |
1025 | set_used_math(); | 1025 | set_used_math(); |
1026 | } | 1026 | } |
1027 | 1027 | ||
1028 | if (!raw_cpu_has_fpu) { | 1028 | if (!raw_cpu_has_fpu) { |
1029 | int sig; | 1029 | int sig; |
1030 | void __user *fault_addr = NULL; | 1030 | void __user *fault_addr = NULL; |
1031 | sig = fpu_emulator_cop1Handler(regs, | 1031 | sig = fpu_emulator_cop1Handler(regs, |
1032 | ¤t->thread.fpu, | 1032 | ¤t->thread.fpu, |
1033 | 0, &fault_addr); | 1033 | 0, &fault_addr); |
1034 | if (!process_fpemu_return(sig, fault_addr)) | 1034 | if (!process_fpemu_return(sig, fault_addr)) |
1035 | mt_ase_fp_affinity(); | 1035 | mt_ase_fp_affinity(); |
1036 | } | 1036 | } |
1037 | 1037 | ||
1038 | return; | 1038 | return; |
1039 | 1039 | ||
1040 | case 2: | 1040 | case 2: |
1041 | raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs); | 1041 | raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs); |
1042 | return; | 1042 | return; |
1043 | 1043 | ||
1044 | case 3: | 1044 | case 3: |
1045 | break; | 1045 | break; |
1046 | } | 1046 | } |
1047 | 1047 | ||
1048 | force_sig(SIGILL, current); | 1048 | force_sig(SIGILL, current); |
1049 | } | 1049 | } |
1050 | 1050 | ||
1051 | asmlinkage void do_mdmx(struct pt_regs *regs) | 1051 | asmlinkage void do_mdmx(struct pt_regs *regs) |
1052 | { | 1052 | { |
1053 | force_sig(SIGILL, current); | 1053 | force_sig(SIGILL, current); |
1054 | } | 1054 | } |
1055 | 1055 | ||
1056 | /* | 1056 | /* |
1057 | * Called with interrupts disabled. | 1057 | * Called with interrupts disabled. |
1058 | */ | 1058 | */ |
1059 | asmlinkage void do_watch(struct pt_regs *regs) | 1059 | asmlinkage void do_watch(struct pt_regs *regs) |
1060 | { | 1060 | { |
1061 | u32 cause; | 1061 | u32 cause; |
1062 | 1062 | ||
1063 | /* | 1063 | /* |
1064 | * Clear WP (bit 22) bit of cause register so we don't loop | 1064 | * Clear WP (bit 22) bit of cause register so we don't loop |
1065 | * forever. | 1065 | * forever. |
1066 | */ | 1066 | */ |
1067 | cause = read_c0_cause(); | 1067 | cause = read_c0_cause(); |
1068 | cause &= ~(1 << 22); | 1068 | cause &= ~(1 << 22); |
1069 | write_c0_cause(cause); | 1069 | write_c0_cause(cause); |
1070 | 1070 | ||
1071 | /* | 1071 | /* |
1072 | * If the current thread has the watch registers loaded, save | 1072 | * If the current thread has the watch registers loaded, save |
1073 | * their values and send SIGTRAP. Otherwise another thread | 1073 | * their values and send SIGTRAP. Otherwise another thread |
1074 | * left the registers set, clear them and continue. | 1074 | * left the registers set, clear them and continue. |
1075 | */ | 1075 | */ |
1076 | if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) { | 1076 | if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) { |
1077 | mips_read_watch_registers(); | 1077 | mips_read_watch_registers(); |
1078 | local_irq_enable(); | 1078 | local_irq_enable(); |
1079 | force_sig(SIGTRAP, current); | 1079 | force_sig(SIGTRAP, current); |
1080 | } else { | 1080 | } else { |
1081 | mips_clear_watch_registers(); | 1081 | mips_clear_watch_registers(); |
1082 | local_irq_enable(); | 1082 | local_irq_enable(); |
1083 | } | 1083 | } |
1084 | } | 1084 | } |
1085 | 1085 | ||
1086 | asmlinkage void do_mcheck(struct pt_regs *regs) | 1086 | asmlinkage void do_mcheck(struct pt_regs *regs) |
1087 | { | 1087 | { |
1088 | const int field = 2 * sizeof(unsigned long); | 1088 | const int field = 2 * sizeof(unsigned long); |
1089 | int multi_match = regs->cp0_status & ST0_TS; | 1089 | int multi_match = regs->cp0_status & ST0_TS; |
1090 | 1090 | ||
1091 | show_regs(regs); | 1091 | show_regs(regs); |
1092 | 1092 | ||
1093 | if (multi_match) { | 1093 | if (multi_match) { |
1094 | printk("Index : %0x\n", read_c0_index()); | 1094 | printk("Index : %0x\n", read_c0_index()); |
1095 | printk("Pagemask: %0x\n", read_c0_pagemask()); | 1095 | printk("Pagemask: %0x\n", read_c0_pagemask()); |
1096 | printk("EntryHi : %0*lx\n", field, read_c0_entryhi()); | 1096 | printk("EntryHi : %0*lx\n", field, read_c0_entryhi()); |
1097 | printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0()); | 1097 | printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0()); |
1098 | printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1()); | 1098 | printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1()); |
1099 | printk("\n"); | 1099 | printk("\n"); |
1100 | dump_tlb_all(); | 1100 | dump_tlb_all(); |
1101 | } | 1101 | } |
1102 | 1102 | ||
1103 | show_code((unsigned int __user *) regs->cp0_epc); | 1103 | show_code((unsigned int __user *) regs->cp0_epc); |
1104 | 1104 | ||
1105 | /* | 1105 | /* |
1106 | * Some chips may have other causes of machine check (e.g. SB1 | 1106 | * Some chips may have other causes of machine check (e.g. SB1 |
1107 | * graduation timer) | 1107 | * graduation timer) |
1108 | */ | 1108 | */ |
1109 | panic("Caught Machine Check exception - %scaused by multiple " | 1109 | panic("Caught Machine Check exception - %scaused by multiple " |
1110 | "matching entries in the TLB.", | 1110 | "matching entries in the TLB.", |
1111 | (multi_match) ? "" : "not "); | 1111 | (multi_match) ? "" : "not "); |
1112 | } | 1112 | } |
1113 | 1113 | ||
1114 | asmlinkage void do_mt(struct pt_regs *regs) | 1114 | asmlinkage void do_mt(struct pt_regs *regs) |
1115 | { | 1115 | { |
1116 | int subcode; | 1116 | int subcode; |
1117 | 1117 | ||
1118 | subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT) | 1118 | subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT) |
1119 | >> VPECONTROL_EXCPT_SHIFT; | 1119 | >> VPECONTROL_EXCPT_SHIFT; |
1120 | switch (subcode) { | 1120 | switch (subcode) { |
1121 | case 0: | 1121 | case 0: |
1122 | printk(KERN_DEBUG "Thread Underflow\n"); | 1122 | printk(KERN_DEBUG "Thread Underflow\n"); |
1123 | break; | 1123 | break; |
1124 | case 1: | 1124 | case 1: |
1125 | printk(KERN_DEBUG "Thread Overflow\n"); | 1125 | printk(KERN_DEBUG "Thread Overflow\n"); |
1126 | break; | 1126 | break; |
1127 | case 2: | 1127 | case 2: |
1128 | printk(KERN_DEBUG "Invalid YIELD Qualifier\n"); | 1128 | printk(KERN_DEBUG "Invalid YIELD Qualifier\n"); |
1129 | break; | 1129 | break; |
1130 | case 3: | 1130 | case 3: |
1131 | printk(KERN_DEBUG "Gating Storage Exception\n"); | 1131 | printk(KERN_DEBUG "Gating Storage Exception\n"); |
1132 | break; | 1132 | break; |
1133 | case 4: | 1133 | case 4: |
1134 | printk(KERN_DEBUG "YIELD Scheduler Exception\n"); | 1134 | printk(KERN_DEBUG "YIELD Scheduler Exception\n"); |
1135 | break; | 1135 | break; |
1136 | case 5: | 1136 | case 5: |
1137 | printk(KERN_DEBUG "Gating Storage Schedulier Exception\n"); | 1137 | printk(KERN_DEBUG "Gating Storage Schedulier Exception\n"); |
1138 | break; | 1138 | break; |
1139 | default: | 1139 | default: |
1140 | printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n", | 1140 | printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n", |
1141 | subcode); | 1141 | subcode); |
1142 | break; | 1142 | break; |
1143 | } | 1143 | } |
1144 | die_if_kernel("MIPS MT Thread exception in kernel", regs); | 1144 | die_if_kernel("MIPS MT Thread exception in kernel", regs); |
1145 | 1145 | ||
1146 | force_sig(SIGILL, current); | 1146 | force_sig(SIGILL, current); |
1147 | } | 1147 | } |
1148 | 1148 | ||
1149 | 1149 | ||
1150 | asmlinkage void do_dsp(struct pt_regs *regs) | 1150 | asmlinkage void do_dsp(struct pt_regs *regs) |
1151 | { | 1151 | { |
1152 | if (cpu_has_dsp) | 1152 | if (cpu_has_dsp) |
1153 | panic("Unexpected DSP exception\n"); | 1153 | panic("Unexpected DSP exception\n"); |
1154 | 1154 | ||
1155 | force_sig(SIGILL, current); | 1155 | force_sig(SIGILL, current); |
1156 | } | 1156 | } |
1157 | 1157 | ||
1158 | asmlinkage void do_reserved(struct pt_regs *regs) | 1158 | asmlinkage void do_reserved(struct pt_regs *regs) |
1159 | { | 1159 | { |
1160 | /* | 1160 | /* |
1161 | * Game over - no way to handle this if it ever occurs. Most probably | 1161 | * Game over - no way to handle this if it ever occurs. Most probably |
1162 | * caused by a new unknown cpu type or after another deadly | 1162 | * caused by a new unknown cpu type or after another deadly |
1163 | * hard/software error. | 1163 | * hard/software error. |
1164 | */ | 1164 | */ |
1165 | show_regs(regs); | 1165 | show_regs(regs); |
1166 | panic("Caught reserved exception %ld - should not happen.", | 1166 | panic("Caught reserved exception %ld - should not happen.", |
1167 | (regs->cp0_cause & 0x7f) >> 2); | 1167 | (regs->cp0_cause & 0x7f) >> 2); |
1168 | } | 1168 | } |
1169 | 1169 | ||
1170 | static int __initdata l1parity = 1; | 1170 | static int __initdata l1parity = 1; |
1171 | static int __init nol1parity(char *s) | 1171 | static int __init nol1parity(char *s) |
1172 | { | 1172 | { |
1173 | l1parity = 0; | 1173 | l1parity = 0; |
1174 | return 1; | 1174 | return 1; |
1175 | } | 1175 | } |
1176 | __setup("nol1par", nol1parity); | 1176 | __setup("nol1par", nol1parity); |
1177 | static int __initdata l2parity = 1; | 1177 | static int __initdata l2parity = 1; |
1178 | static int __init nol2parity(char *s) | 1178 | static int __init nol2parity(char *s) |
1179 | { | 1179 | { |
1180 | l2parity = 0; | 1180 | l2parity = 0; |
1181 | return 1; | 1181 | return 1; |
1182 | } | 1182 | } |
1183 | __setup("nol2par", nol2parity); | 1183 | __setup("nol2par", nol2parity); |
1184 | 1184 | ||
1185 | /* | 1185 | /* |
1186 | * Some MIPS CPUs can enable/disable for cache parity detection, but do | 1186 | * Some MIPS CPUs can enable/disable for cache parity detection, but do |
1187 | * it different ways. | 1187 | * it different ways. |
1188 | */ | 1188 | */ |
1189 | static inline void parity_protection_init(void) | 1189 | static inline void parity_protection_init(void) |
1190 | { | 1190 | { |
1191 | switch (current_cpu_type()) { | 1191 | switch (current_cpu_type()) { |
1192 | case CPU_24K: | 1192 | case CPU_24K: |
1193 | case CPU_34K: | 1193 | case CPU_34K: |
1194 | case CPU_74K: | 1194 | case CPU_74K: |
1195 | case CPU_1004K: | 1195 | case CPU_1004K: |
1196 | { | 1196 | { |
1197 | #define ERRCTL_PE 0x80000000 | 1197 | #define ERRCTL_PE 0x80000000 |
1198 | #define ERRCTL_L2P 0x00800000 | 1198 | #define ERRCTL_L2P 0x00800000 |
1199 | unsigned long errctl; | 1199 | unsigned long errctl; |
1200 | unsigned int l1parity_present, l2parity_present; | 1200 | unsigned int l1parity_present, l2parity_present; |
1201 | 1201 | ||
1202 | errctl = read_c0_ecc(); | 1202 | errctl = read_c0_ecc(); |
1203 | errctl &= ~(ERRCTL_PE|ERRCTL_L2P); | 1203 | errctl &= ~(ERRCTL_PE|ERRCTL_L2P); |
1204 | 1204 | ||
1205 | /* probe L1 parity support */ | 1205 | /* probe L1 parity support */ |
1206 | write_c0_ecc(errctl | ERRCTL_PE); | 1206 | write_c0_ecc(errctl | ERRCTL_PE); |
1207 | back_to_back_c0_hazard(); | 1207 | back_to_back_c0_hazard(); |
1208 | l1parity_present = (read_c0_ecc() & ERRCTL_PE); | 1208 | l1parity_present = (read_c0_ecc() & ERRCTL_PE); |
1209 | 1209 | ||
1210 | /* probe L2 parity support */ | 1210 | /* probe L2 parity support */ |
1211 | write_c0_ecc(errctl|ERRCTL_L2P); | 1211 | write_c0_ecc(errctl|ERRCTL_L2P); |
1212 | back_to_back_c0_hazard(); | 1212 | back_to_back_c0_hazard(); |
1213 | l2parity_present = (read_c0_ecc() & ERRCTL_L2P); | 1213 | l2parity_present = (read_c0_ecc() & ERRCTL_L2P); |
1214 | 1214 | ||
1215 | if (l1parity_present && l2parity_present) { | 1215 | if (l1parity_present && l2parity_present) { |
1216 | if (l1parity) | 1216 | if (l1parity) |
1217 | errctl |= ERRCTL_PE; | 1217 | errctl |= ERRCTL_PE; |
1218 | if (l1parity ^ l2parity) | 1218 | if (l1parity ^ l2parity) |
1219 | errctl |= ERRCTL_L2P; | 1219 | errctl |= ERRCTL_L2P; |
1220 | } else if (l1parity_present) { | 1220 | } else if (l1parity_present) { |
1221 | if (l1parity) | 1221 | if (l1parity) |
1222 | errctl |= ERRCTL_PE; | 1222 | errctl |= ERRCTL_PE; |
1223 | } else if (l2parity_present) { | 1223 | } else if (l2parity_present) { |
1224 | if (l2parity) | 1224 | if (l2parity) |
1225 | errctl |= ERRCTL_L2P; | 1225 | errctl |= ERRCTL_L2P; |
1226 | } else { | 1226 | } else { |
1227 | /* No parity available */ | 1227 | /* No parity available */ |
1228 | } | 1228 | } |
1229 | 1229 | ||
1230 | printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl); | 1230 | printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl); |
1231 | 1231 | ||
1232 | write_c0_ecc(errctl); | 1232 | write_c0_ecc(errctl); |
1233 | back_to_back_c0_hazard(); | 1233 | back_to_back_c0_hazard(); |
1234 | errctl = read_c0_ecc(); | 1234 | errctl = read_c0_ecc(); |
1235 | printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl); | 1235 | printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl); |
1236 | 1236 | ||
1237 | if (l1parity_present) | 1237 | if (l1parity_present) |
1238 | printk(KERN_INFO "Cache parity protection %sabled\n", | 1238 | printk(KERN_INFO "Cache parity protection %sabled\n", |
1239 | (errctl & ERRCTL_PE) ? "en" : "dis"); | 1239 | (errctl & ERRCTL_PE) ? "en" : "dis"); |
1240 | 1240 | ||
1241 | if (l2parity_present) { | 1241 | if (l2parity_present) { |
1242 | if (l1parity_present && l1parity) | 1242 | if (l1parity_present && l1parity) |
1243 | errctl ^= ERRCTL_L2P; | 1243 | errctl ^= ERRCTL_L2P; |
1244 | printk(KERN_INFO "L2 cache parity protection %sabled\n", | 1244 | printk(KERN_INFO "L2 cache parity protection %sabled\n", |
1245 | (errctl & ERRCTL_L2P) ? "en" : "dis"); | 1245 | (errctl & ERRCTL_L2P) ? "en" : "dis"); |
1246 | } | 1246 | } |
1247 | } | 1247 | } |
1248 | break; | 1248 | break; |
1249 | 1249 | ||
1250 | case CPU_5KC: | 1250 | case CPU_5KC: |
1251 | write_c0_ecc(0x80000000); | 1251 | write_c0_ecc(0x80000000); |
1252 | back_to_back_c0_hazard(); | 1252 | back_to_back_c0_hazard(); |
1253 | /* Set the PE bit (bit 31) in the c0_errctl register. */ | 1253 | /* Set the PE bit (bit 31) in the c0_errctl register. */ |
1254 | printk(KERN_INFO "Cache parity protection %sabled\n", | 1254 | printk(KERN_INFO "Cache parity protection %sabled\n", |
1255 | (read_c0_ecc() & 0x80000000) ? "en" : "dis"); | 1255 | (read_c0_ecc() & 0x80000000) ? "en" : "dis"); |
1256 | break; | 1256 | break; |
1257 | case CPU_20KC: | 1257 | case CPU_20KC: |
1258 | case CPU_25KF: | 1258 | case CPU_25KF: |
1259 | /* Clear the DE bit (bit 16) in the c0_status register. */ | 1259 | /* Clear the DE bit (bit 16) in the c0_status register. */ |
1260 | printk(KERN_INFO "Enable cache parity protection for " | 1260 | printk(KERN_INFO "Enable cache parity protection for " |
1261 | "MIPS 20KC/25KF CPUs.\n"); | 1261 | "MIPS 20KC/25KF CPUs.\n"); |
1262 | clear_c0_status(ST0_DE); | 1262 | clear_c0_status(ST0_DE); |
1263 | break; | 1263 | break; |
1264 | default: | 1264 | default: |
1265 | break; | 1265 | break; |
1266 | } | 1266 | } |
1267 | } | 1267 | } |
1268 | 1268 | ||
1269 | asmlinkage void cache_parity_error(void) | 1269 | asmlinkage void cache_parity_error(void) |
1270 | { | 1270 | { |
1271 | const int field = 2 * sizeof(unsigned long); | 1271 | const int field = 2 * sizeof(unsigned long); |
1272 | unsigned int reg_val; | 1272 | unsigned int reg_val; |
1273 | 1273 | ||
1274 | /* For the moment, report the problem and hang. */ | 1274 | /* For the moment, report the problem and hang. */ |
1275 | printk("Cache error exception:\n"); | 1275 | printk("Cache error exception:\n"); |
1276 | printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc()); | 1276 | printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc()); |
1277 | reg_val = read_c0_cacheerr(); | 1277 | reg_val = read_c0_cacheerr(); |
1278 | printk("c0_cacheerr == %08x\n", reg_val); | 1278 | printk("c0_cacheerr == %08x\n", reg_val); |
1279 | 1279 | ||
1280 | printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n", | 1280 | printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n", |
1281 | reg_val & (1<<30) ? "secondary" : "primary", | 1281 | reg_val & (1<<30) ? "secondary" : "primary", |
1282 | reg_val & (1<<31) ? "data" : "insn"); | 1282 | reg_val & (1<<31) ? "data" : "insn"); |
1283 | printk("Error bits: %s%s%s%s%s%s%s\n", | 1283 | printk("Error bits: %s%s%s%s%s%s%s\n", |
1284 | reg_val & (1<<29) ? "ED " : "", | 1284 | reg_val & (1<<29) ? "ED " : "", |
1285 | reg_val & (1<<28) ? "ET " : "", | 1285 | reg_val & (1<<28) ? "ET " : "", |
1286 | reg_val & (1<<26) ? "EE " : "", | 1286 | reg_val & (1<<26) ? "EE " : "", |
1287 | reg_val & (1<<25) ? "EB " : "", | 1287 | reg_val & (1<<25) ? "EB " : "", |
1288 | reg_val & (1<<24) ? "EI " : "", | 1288 | reg_val & (1<<24) ? "EI " : "", |
1289 | reg_val & (1<<23) ? "E1 " : "", | 1289 | reg_val & (1<<23) ? "E1 " : "", |
1290 | reg_val & (1<<22) ? "E0 " : ""); | 1290 | reg_val & (1<<22) ? "E0 " : ""); |
1291 | printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1)); | 1291 | printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1)); |
1292 | 1292 | ||
1293 | #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) | 1293 | #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) |
1294 | if (reg_val & (1<<22)) | 1294 | if (reg_val & (1<<22)) |
1295 | printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0()); | 1295 | printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0()); |
1296 | 1296 | ||
1297 | if (reg_val & (1<<23)) | 1297 | if (reg_val & (1<<23)) |
1298 | printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1()); | 1298 | printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1()); |
1299 | #endif | 1299 | #endif |
1300 | 1300 | ||
1301 | panic("Can't handle the cache error!"); | 1301 | panic("Can't handle the cache error!"); |
1302 | } | 1302 | } |
1303 | 1303 | ||
1304 | /* | 1304 | /* |
1305 | * SDBBP EJTAG debug exception handler. | 1305 | * SDBBP EJTAG debug exception handler. |
1306 | * We skip the instruction and return to the next instruction. | 1306 | * We skip the instruction and return to the next instruction. |
1307 | */ | 1307 | */ |
1308 | void ejtag_exception_handler(struct pt_regs *regs) | 1308 | void ejtag_exception_handler(struct pt_regs *regs) |
1309 | { | 1309 | { |
1310 | const int field = 2 * sizeof(unsigned long); | 1310 | const int field = 2 * sizeof(unsigned long); |
1311 | unsigned long depc, old_epc; | 1311 | unsigned long depc, old_epc; |
1312 | unsigned int debug; | 1312 | unsigned int debug; |
1313 | 1313 | ||
1314 | printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n"); | 1314 | printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n"); |
1315 | depc = read_c0_depc(); | 1315 | depc = read_c0_depc(); |
1316 | debug = read_c0_debug(); | 1316 | debug = read_c0_debug(); |
1317 | printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug); | 1317 | printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug); |
1318 | if (debug & 0x80000000) { | 1318 | if (debug & 0x80000000) { |
1319 | /* | 1319 | /* |
1320 | * In branch delay slot. | 1320 | * In branch delay slot. |
1321 | * We cheat a little bit here and use EPC to calculate the | 1321 | * We cheat a little bit here and use EPC to calculate the |
1322 | * debug return address (DEPC). EPC is restored after the | 1322 | * debug return address (DEPC). EPC is restored after the |
1323 | * calculation. | 1323 | * calculation. |
1324 | */ | 1324 | */ |
1325 | old_epc = regs->cp0_epc; | 1325 | old_epc = regs->cp0_epc; |
1326 | regs->cp0_epc = depc; | 1326 | regs->cp0_epc = depc; |
1327 | __compute_return_epc(regs); | 1327 | __compute_return_epc(regs); |
1328 | depc = regs->cp0_epc; | 1328 | depc = regs->cp0_epc; |
1329 | regs->cp0_epc = old_epc; | 1329 | regs->cp0_epc = old_epc; |
1330 | } else | 1330 | } else |
1331 | depc += 4; | 1331 | depc += 4; |
1332 | write_c0_depc(depc); | 1332 | write_c0_depc(depc); |
1333 | 1333 | ||
1334 | #if 0 | 1334 | #if 0 |
1335 | printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n"); | 1335 | printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n"); |
1336 | write_c0_debug(debug | 0x100); | 1336 | write_c0_debug(debug | 0x100); |
1337 | #endif | 1337 | #endif |
1338 | } | 1338 | } |
1339 | 1339 | ||
1340 | /* | 1340 | /* |
1341 | * NMI exception handler. | 1341 | * NMI exception handler. |
1342 | */ | 1342 | */ |
1343 | NORET_TYPE void ATTRIB_NORET nmi_exception_handler(struct pt_regs *regs) | 1343 | NORET_TYPE void ATTRIB_NORET nmi_exception_handler(struct pt_regs *regs) |
1344 | { | 1344 | { |
1345 | bust_spinlocks(1); | 1345 | bust_spinlocks(1); |
1346 | printk("NMI taken!!!!\n"); | 1346 | printk("NMI taken!!!!\n"); |
1347 | die("NMI", regs); | 1347 | die("NMI", regs); |
1348 | } | 1348 | } |
1349 | 1349 | ||
1350 | #define VECTORSPACING 0x100 /* for EI/VI mode */ | 1350 | #define VECTORSPACING 0x100 /* for EI/VI mode */ |
1351 | 1351 | ||
1352 | unsigned long ebase; | 1352 | unsigned long ebase; |
1353 | unsigned long exception_handlers[32]; | 1353 | unsigned long exception_handlers[32]; |
1354 | unsigned long vi_handlers[64]; | 1354 | unsigned long vi_handlers[64]; |
1355 | 1355 | ||
1356 | void __init *set_except_vector(int n, void *addr) | 1356 | void __init *set_except_vector(int n, void *addr) |
1357 | { | 1357 | { |
1358 | unsigned long handler = (unsigned long) addr; | 1358 | unsigned long handler = (unsigned long) addr; |
1359 | unsigned long old_handler = exception_handlers[n]; | 1359 | unsigned long old_handler = exception_handlers[n]; |
1360 | 1360 | ||
1361 | exception_handlers[n] = handler; | 1361 | exception_handlers[n] = handler; |
1362 | if (n == 0 && cpu_has_divec) { | 1362 | if (n == 0 && cpu_has_divec) { |
1363 | unsigned long jump_mask = ~((1 << 28) - 1); | 1363 | unsigned long jump_mask = ~((1 << 28) - 1); |
1364 | u32 *buf = (u32 *)(ebase + 0x200); | 1364 | u32 *buf = (u32 *)(ebase + 0x200); |
1365 | unsigned int k0 = 26; | 1365 | unsigned int k0 = 26; |
1366 | if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) { | 1366 | if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) { |
1367 | uasm_i_j(&buf, handler & ~jump_mask); | 1367 | uasm_i_j(&buf, handler & ~jump_mask); |
1368 | uasm_i_nop(&buf); | 1368 | uasm_i_nop(&buf); |
1369 | } else { | 1369 | } else { |
1370 | UASM_i_LA(&buf, k0, handler); | 1370 | UASM_i_LA(&buf, k0, handler); |
1371 | uasm_i_jr(&buf, k0); | 1371 | uasm_i_jr(&buf, k0); |
1372 | uasm_i_nop(&buf); | 1372 | uasm_i_nop(&buf); |
1373 | } | 1373 | } |
1374 | local_flush_icache_range(ebase + 0x200, (unsigned long)buf); | 1374 | local_flush_icache_range(ebase + 0x200, (unsigned long)buf); |
1375 | } | 1375 | } |
1376 | return (void *)old_handler; | 1376 | return (void *)old_handler; |
1377 | } | 1377 | } |
1378 | 1378 | ||
1379 | static asmlinkage void do_default_vi(void) | 1379 | static asmlinkage void do_default_vi(void) |
1380 | { | 1380 | { |
1381 | show_regs(get_irq_regs()); | 1381 | show_regs(get_irq_regs()); |
1382 | panic("Caught unexpected vectored interrupt."); | 1382 | panic("Caught unexpected vectored interrupt."); |
1383 | } | 1383 | } |
1384 | 1384 | ||
1385 | static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) | 1385 | static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) |
1386 | { | 1386 | { |
1387 | unsigned long handler; | 1387 | unsigned long handler; |
1388 | unsigned long old_handler = vi_handlers[n]; | 1388 | unsigned long old_handler = vi_handlers[n]; |
1389 | int srssets = current_cpu_data.srsets; | 1389 | int srssets = current_cpu_data.srsets; |
1390 | u32 *w; | 1390 | u32 *w; |
1391 | unsigned char *b; | 1391 | unsigned char *b; |
1392 | 1392 | ||
1393 | BUG_ON(!cpu_has_veic && !cpu_has_vint); | 1393 | BUG_ON(!cpu_has_veic && !cpu_has_vint); |
1394 | 1394 | ||
1395 | if (addr == NULL) { | 1395 | if (addr == NULL) { |
1396 | handler = (unsigned long) do_default_vi; | 1396 | handler = (unsigned long) do_default_vi; |
1397 | srs = 0; | 1397 | srs = 0; |
1398 | } else | 1398 | } else |
1399 | handler = (unsigned long) addr; | 1399 | handler = (unsigned long) addr; |
1400 | vi_handlers[n] = (unsigned long) addr; | 1400 | vi_handlers[n] = (unsigned long) addr; |
1401 | 1401 | ||
1402 | b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING); | 1402 | b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING); |
1403 | 1403 | ||
1404 | if (srs >= srssets) | 1404 | if (srs >= srssets) |
1405 | panic("Shadow register set %d not supported", srs); | 1405 | panic("Shadow register set %d not supported", srs); |
1406 | 1406 | ||
1407 | if (cpu_has_veic) { | 1407 | if (cpu_has_veic) { |
1408 | if (board_bind_eic_interrupt) | 1408 | if (board_bind_eic_interrupt) |
1409 | board_bind_eic_interrupt(n, srs); | 1409 | board_bind_eic_interrupt(n, srs); |
1410 | } else if (cpu_has_vint) { | 1410 | } else if (cpu_has_vint) { |
1411 | /* SRSMap is only defined if shadow sets are implemented */ | 1411 | /* SRSMap is only defined if shadow sets are implemented */ |
1412 | if (srssets > 1) | 1412 | if (srssets > 1) |
1413 | change_c0_srsmap(0xf << n*4, srs << n*4); | 1413 | change_c0_srsmap(0xf << n*4, srs << n*4); |
1414 | } | 1414 | } |
1415 | 1415 | ||
1416 | if (srs == 0) { | 1416 | if (srs == 0) { |
1417 | /* | 1417 | /* |
1418 | * If no shadow set is selected then use the default handler | 1418 | * If no shadow set is selected then use the default handler |
1419 | * that does normal register saving and a standard interrupt exit | 1419 | * that does normal register saving and a standard interrupt exit |
1420 | */ | 1420 | */ |
1421 | 1421 | ||
1422 | extern char except_vec_vi, except_vec_vi_lui; | 1422 | extern char except_vec_vi, except_vec_vi_lui; |
1423 | extern char except_vec_vi_ori, except_vec_vi_end; | 1423 | extern char except_vec_vi_ori, except_vec_vi_end; |
1424 | extern char rollback_except_vec_vi; | 1424 | extern char rollback_except_vec_vi; |
1425 | char *vec_start = (cpu_wait == r4k_wait) ? | 1425 | char *vec_start = (cpu_wait == r4k_wait) ? |
1426 | &rollback_except_vec_vi : &except_vec_vi; | 1426 | &rollback_except_vec_vi : &except_vec_vi; |
1427 | #ifdef CONFIG_MIPS_MT_SMTC | 1427 | #ifdef CONFIG_MIPS_MT_SMTC |
1428 | /* | 1428 | /* |
1429 | * We need to provide the SMTC vectored interrupt handler | 1429 | * We need to provide the SMTC vectored interrupt handler |
1430 | * not only with the address of the handler, but with the | 1430 | * not only with the address of the handler, but with the |
1431 | * Status.IM bit to be masked before going there. | 1431 | * Status.IM bit to be masked before going there. |
1432 | */ | 1432 | */ |
1433 | extern char except_vec_vi_mori; | 1433 | extern char except_vec_vi_mori; |
1434 | const int mori_offset = &except_vec_vi_mori - vec_start; | 1434 | const int mori_offset = &except_vec_vi_mori - vec_start; |
1435 | #endif /* CONFIG_MIPS_MT_SMTC */ | 1435 | #endif /* CONFIG_MIPS_MT_SMTC */ |
1436 | const int handler_len = &except_vec_vi_end - vec_start; | 1436 | const int handler_len = &except_vec_vi_end - vec_start; |
1437 | const int lui_offset = &except_vec_vi_lui - vec_start; | 1437 | const int lui_offset = &except_vec_vi_lui - vec_start; |
1438 | const int ori_offset = &except_vec_vi_ori - vec_start; | 1438 | const int ori_offset = &except_vec_vi_ori - vec_start; |
1439 | 1439 | ||
1440 | if (handler_len > VECTORSPACING) { | 1440 | if (handler_len > VECTORSPACING) { |
1441 | /* | 1441 | /* |
1442 | * Sigh... panicing won't help as the console | 1442 | * Sigh... panicing won't help as the console |
1443 | * is probably not configured :( | 1443 | * is probably not configured :( |
1444 | */ | 1444 | */ |
1445 | panic("VECTORSPACING too small"); | 1445 | panic("VECTORSPACING too small"); |
1446 | } | 1446 | } |
1447 | 1447 | ||
1448 | memcpy(b, vec_start, handler_len); | 1448 | memcpy(b, vec_start, handler_len); |
1449 | #ifdef CONFIG_MIPS_MT_SMTC | 1449 | #ifdef CONFIG_MIPS_MT_SMTC |
1450 | BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ | 1450 | BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ |
1451 | 1451 | ||
1452 | w = (u32 *)(b + mori_offset); | 1452 | w = (u32 *)(b + mori_offset); |
1453 | *w = (*w & 0xffff0000) | (0x100 << n); | 1453 | *w = (*w & 0xffff0000) | (0x100 << n); |
1454 | #endif /* CONFIG_MIPS_MT_SMTC */ | 1454 | #endif /* CONFIG_MIPS_MT_SMTC */ |
1455 | w = (u32 *)(b + lui_offset); | 1455 | w = (u32 *)(b + lui_offset); |
1456 | *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); | 1456 | *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); |
1457 | w = (u32 *)(b + ori_offset); | 1457 | w = (u32 *)(b + ori_offset); |
1458 | *w = (*w & 0xffff0000) | ((u32)handler & 0xffff); | 1458 | *w = (*w & 0xffff0000) | ((u32)handler & 0xffff); |
1459 | local_flush_icache_range((unsigned long)b, | 1459 | local_flush_icache_range((unsigned long)b, |
1460 | (unsigned long)(b+handler_len)); | 1460 | (unsigned long)(b+handler_len)); |
1461 | } | 1461 | } |
1462 | else { | 1462 | else { |
1463 | /* | 1463 | /* |
1464 | * In other cases jump directly to the interrupt handler | 1464 | * In other cases jump directly to the interrupt handler |
1465 | * | 1465 | * |
1466 | * It is the handlers responsibility to save registers if required | 1466 | * It is the handlers responsibility to save registers if required |
1467 | * (eg hi/lo) and return from the exception using "eret" | 1467 | * (eg hi/lo) and return from the exception using "eret" |
1468 | */ | 1468 | */ |
1469 | w = (u32 *)b; | 1469 | w = (u32 *)b; |
1470 | *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */ | 1470 | *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */ |
1471 | *w = 0; | 1471 | *w = 0; |
1472 | local_flush_icache_range((unsigned long)b, | 1472 | local_flush_icache_range((unsigned long)b, |
1473 | (unsigned long)(b+8)); | 1473 | (unsigned long)(b+8)); |
1474 | } | 1474 | } |
1475 | 1475 | ||
1476 | return (void *)old_handler; | 1476 | return (void *)old_handler; |
1477 | } | 1477 | } |
1478 | 1478 | ||
1479 | void *set_vi_handler(int n, vi_handler_t addr) | 1479 | void *set_vi_handler(int n, vi_handler_t addr) |
1480 | { | 1480 | { |
1481 | return set_vi_srs_handler(n, addr, 0); | 1481 | return set_vi_srs_handler(n, addr, 0); |
1482 | } | 1482 | } |
1483 | 1483 | ||
1484 | extern void cpu_cache_init(void); | 1484 | extern void cpu_cache_init(void); |
1485 | extern void tlb_init(void); | 1485 | extern void tlb_init(void); |
1486 | extern void flush_tlb_handlers(void); | 1486 | extern void flush_tlb_handlers(void); |
1487 | 1487 | ||
1488 | /* | 1488 | /* |
1489 | * Timer interrupt | 1489 | * Timer interrupt |
1490 | */ | 1490 | */ |
1491 | int cp0_compare_irq; | 1491 | int cp0_compare_irq; |
1492 | int cp0_compare_irq_shift; | 1492 | int cp0_compare_irq_shift; |
1493 | 1493 | ||
1494 | /* | 1494 | /* |
1495 | * Performance counter IRQ or -1 if shared with timer | 1495 | * Performance counter IRQ or -1 if shared with timer |
1496 | */ | 1496 | */ |
1497 | int cp0_perfcount_irq; | 1497 | int cp0_perfcount_irq; |
1498 | EXPORT_SYMBOL_GPL(cp0_perfcount_irq); | 1498 | EXPORT_SYMBOL_GPL(cp0_perfcount_irq); |
1499 | 1499 | ||
1500 | static int __cpuinitdata noulri; | 1500 | static int __cpuinitdata noulri; |
1501 | 1501 | ||
1502 | static int __init ulri_disable(char *s) | 1502 | static int __init ulri_disable(char *s) |
1503 | { | 1503 | { |
1504 | pr_info("Disabling ulri\n"); | 1504 | pr_info("Disabling ulri\n"); |
1505 | noulri = 1; | 1505 | noulri = 1; |
1506 | 1506 | ||
1507 | return 1; | 1507 | return 1; |
1508 | } | 1508 | } |
1509 | __setup("noulri", ulri_disable); | 1509 | __setup("noulri", ulri_disable); |
1510 | 1510 | ||
1511 | void __cpuinit per_cpu_trap_init(void) | 1511 | void __cpuinit per_cpu_trap_init(void) |
1512 | { | 1512 | { |
1513 | unsigned int cpu = smp_processor_id(); | 1513 | unsigned int cpu = smp_processor_id(); |
1514 | unsigned int status_set = ST0_CU0; | 1514 | unsigned int status_set = ST0_CU0; |
1515 | unsigned int hwrena = cpu_hwrena_impl_bits; | 1515 | unsigned int hwrena = cpu_hwrena_impl_bits; |
1516 | #ifdef CONFIG_MIPS_MT_SMTC | 1516 | #ifdef CONFIG_MIPS_MT_SMTC |
1517 | int secondaryTC = 0; | 1517 | int secondaryTC = 0; |
1518 | int bootTC = (cpu == 0); | 1518 | int bootTC = (cpu == 0); |
1519 | 1519 | ||
1520 | /* | 1520 | /* |
1521 | * Only do per_cpu_trap_init() for first TC of Each VPE. | 1521 | * Only do per_cpu_trap_init() for first TC of Each VPE. |
1522 | * Note that this hack assumes that the SMTC init code | 1522 | * Note that this hack assumes that the SMTC init code |
1523 | * assigns TCs consecutively and in ascending order. | 1523 | * assigns TCs consecutively and in ascending order. |
1524 | */ | 1524 | */ |
1525 | 1525 | ||
1526 | if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && | 1526 | if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && |
1527 | ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id)) | 1527 | ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id)) |
1528 | secondaryTC = 1; | 1528 | secondaryTC = 1; |
1529 | #endif /* CONFIG_MIPS_MT_SMTC */ | 1529 | #endif /* CONFIG_MIPS_MT_SMTC */ |
1530 | 1530 | ||
1531 | /* | 1531 | /* |
1532 | * Disable coprocessors and select 32-bit or 64-bit addressing | 1532 | * Disable coprocessors and select 32-bit or 64-bit addressing |
1533 | * and the 16/32 or 32/32 FPR register model. Reset the BEV | 1533 | * and the 16/32 or 32/32 FPR register model. Reset the BEV |
1534 | * flag that some firmware may have left set and the TS bit (for | 1534 | * flag that some firmware may have left set and the TS bit (for |
1535 | * IP27). Set XX for ISA IV code to work. | 1535 | * IP27). Set XX for ISA IV code to work. |
1536 | */ | 1536 | */ |
1537 | #ifdef CONFIG_64BIT | 1537 | #ifdef CONFIG_64BIT |
1538 | status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; | 1538 | status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; |
1539 | #endif | 1539 | #endif |
1540 | if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV) | 1540 | if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV) |
1541 | status_set |= ST0_XX; | 1541 | status_set |= ST0_XX; |
1542 | if (cpu_has_dsp) | 1542 | if (cpu_has_dsp) |
1543 | status_set |= ST0_MX; | 1543 | status_set |= ST0_MX; |
1544 | 1544 | ||
1545 | change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, | 1545 | change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, |
1546 | status_set); | 1546 | status_set); |
1547 | 1547 | ||
1548 | if (cpu_has_mips_r2) | 1548 | if (cpu_has_mips_r2) |
1549 | hwrena |= 0x0000000f; | 1549 | hwrena |= 0x0000000f; |
1550 | 1550 | ||
1551 | if (!noulri && cpu_has_userlocal) | 1551 | if (!noulri && cpu_has_userlocal) |
1552 | hwrena |= (1 << 29); | 1552 | hwrena |= (1 << 29); |
1553 | 1553 | ||
1554 | if (hwrena) | 1554 | if (hwrena) |
1555 | write_c0_hwrena(hwrena); | 1555 | write_c0_hwrena(hwrena); |
1556 | 1556 | ||
1557 | #ifdef CONFIG_MIPS_MT_SMTC | 1557 | #ifdef CONFIG_MIPS_MT_SMTC |
1558 | if (!secondaryTC) { | 1558 | if (!secondaryTC) { |
1559 | #endif /* CONFIG_MIPS_MT_SMTC */ | 1559 | #endif /* CONFIG_MIPS_MT_SMTC */ |
1560 | 1560 | ||
1561 | if (cpu_has_veic || cpu_has_vint) { | 1561 | if (cpu_has_veic || cpu_has_vint) { |
1562 | unsigned long sr = set_c0_status(ST0_BEV); | 1562 | unsigned long sr = set_c0_status(ST0_BEV); |
1563 | write_c0_ebase(ebase); | 1563 | write_c0_ebase(ebase); |
1564 | write_c0_status(sr); | 1564 | write_c0_status(sr); |
1565 | /* Setting vector spacing enables EI/VI mode */ | 1565 | /* Setting vector spacing enables EI/VI mode */ |
1566 | change_c0_intctl(0x3e0, VECTORSPACING); | 1566 | change_c0_intctl(0x3e0, VECTORSPACING); |
1567 | } | 1567 | } |
1568 | if (cpu_has_divec) { | 1568 | if (cpu_has_divec) { |
1569 | if (cpu_has_mipsmt) { | 1569 | if (cpu_has_mipsmt) { |
1570 | unsigned int vpflags = dvpe(); | 1570 | unsigned int vpflags = dvpe(); |
1571 | set_c0_cause(CAUSEF_IV); | 1571 | set_c0_cause(CAUSEF_IV); |
1572 | evpe(vpflags); | 1572 | evpe(vpflags); |
1573 | } else | 1573 | } else |
1574 | set_c0_cause(CAUSEF_IV); | 1574 | set_c0_cause(CAUSEF_IV); |
1575 | } | 1575 | } |
1576 | 1576 | ||
1577 | /* | 1577 | /* |
1578 | * Before R2 both interrupt numbers were fixed to 7, so on R2 only: | 1578 | * Before R2 both interrupt numbers were fixed to 7, so on R2 only: |
1579 | * | 1579 | * |
1580 | * o read IntCtl.IPTI to determine the timer interrupt | 1580 | * o read IntCtl.IPTI to determine the timer interrupt |
1581 | * o read IntCtl.IPPCI to determine the performance counter interrupt | 1581 | * o read IntCtl.IPPCI to determine the performance counter interrupt |
1582 | */ | 1582 | */ |
1583 | if (cpu_has_mips_r2) { | 1583 | if (cpu_has_mips_r2) { |
1584 | cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP; | 1584 | cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP; |
1585 | cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7; | 1585 | cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7; |
1586 | cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7; | 1586 | cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7; |
1587 | if (cp0_perfcount_irq == cp0_compare_irq) | 1587 | if (cp0_perfcount_irq == cp0_compare_irq) |
1588 | cp0_perfcount_irq = -1; | 1588 | cp0_perfcount_irq = -1; |
1589 | } else { | 1589 | } else { |
1590 | cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ; | 1590 | cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ; |
1591 | cp0_compare_irq_shift = cp0_compare_irq; | 1591 | cp0_compare_irq_shift = cp0_compare_irq; |
1592 | cp0_perfcount_irq = -1; | 1592 | cp0_perfcount_irq = -1; |
1593 | } | 1593 | } |
1594 | 1594 | ||
1595 | #ifdef CONFIG_MIPS_MT_SMTC | 1595 | #ifdef CONFIG_MIPS_MT_SMTC |
1596 | } | 1596 | } |
1597 | #endif /* CONFIG_MIPS_MT_SMTC */ | 1597 | #endif /* CONFIG_MIPS_MT_SMTC */ |
1598 | 1598 | ||
1599 | cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; | 1599 | if (!cpu_data[cpu].asid_cache) |
1600 | cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; | ||
1600 | 1601 | ||
1601 | atomic_inc(&init_mm.mm_count); | 1602 | atomic_inc(&init_mm.mm_count); |
1602 | current->active_mm = &init_mm; | 1603 | current->active_mm = &init_mm; |
1603 | BUG_ON(current->mm); | 1604 | BUG_ON(current->mm); |
1604 | enter_lazy_tlb(&init_mm, current); | 1605 | enter_lazy_tlb(&init_mm, current); |
1605 | 1606 | ||
1606 | #ifdef CONFIG_MIPS_MT_SMTC | 1607 | #ifdef CONFIG_MIPS_MT_SMTC |
1607 | if (bootTC) { | 1608 | if (bootTC) { |
1608 | #endif /* CONFIG_MIPS_MT_SMTC */ | 1609 | #endif /* CONFIG_MIPS_MT_SMTC */ |
1609 | cpu_cache_init(); | 1610 | cpu_cache_init(); |
1610 | tlb_init(); | 1611 | tlb_init(); |
1611 | #ifdef CONFIG_MIPS_MT_SMTC | 1612 | #ifdef CONFIG_MIPS_MT_SMTC |
1612 | } else if (!secondaryTC) { | 1613 | } else if (!secondaryTC) { |
1613 | /* | 1614 | /* |
1614 | * First TC in non-boot VPE must do subset of tlb_init() | 1615 | * First TC in non-boot VPE must do subset of tlb_init() |
1615 | * for MMU countrol registers. | 1616 | * for MMU countrol registers. |
1616 | */ | 1617 | */ |
1617 | write_c0_pagemask(PM_DEFAULT_MASK); | 1618 | write_c0_pagemask(PM_DEFAULT_MASK); |
1618 | write_c0_wired(0); | 1619 | write_c0_wired(0); |
1619 | } | 1620 | } |
1620 | #endif /* CONFIG_MIPS_MT_SMTC */ | 1621 | #endif /* CONFIG_MIPS_MT_SMTC */ |
1621 | TLBMISS_HANDLER_SETUP(); | 1622 | TLBMISS_HANDLER_SETUP(); |
1622 | } | 1623 | } |
1623 | 1624 | ||
1624 | /* Install CPU exception handler */ | 1625 | /* Install CPU exception handler */ |
1625 | void __init set_handler(unsigned long offset, void *addr, unsigned long size) | 1626 | void __init set_handler(unsigned long offset, void *addr, unsigned long size) |
1626 | { | 1627 | { |
1627 | memcpy((void *)(ebase + offset), addr, size); | 1628 | memcpy((void *)(ebase + offset), addr, size); |
1628 | local_flush_icache_range(ebase + offset, ebase + offset + size); | 1629 | local_flush_icache_range(ebase + offset, ebase + offset + size); |
1629 | } | 1630 | } |
1630 | 1631 | ||
1631 | static char panic_null_cerr[] __cpuinitdata = | 1632 | static char panic_null_cerr[] __cpuinitdata = |
1632 | "Trying to set NULL cache error exception handler"; | 1633 | "Trying to set NULL cache error exception handler"; |
1633 | 1634 | ||
1634 | /* | 1635 | /* |
1635 | * Install uncached CPU exception handler. | 1636 | * Install uncached CPU exception handler. |
1636 | * This is suitable only for the cache error exception which is the only | 1637 | * This is suitable only for the cache error exception which is the only |
1637 | * exception handler that is being run uncached. | 1638 | * exception handler that is being run uncached. |
1638 | */ | 1639 | */ |
1639 | void __cpuinit set_uncached_handler(unsigned long offset, void *addr, | 1640 | void __cpuinit set_uncached_handler(unsigned long offset, void *addr, |
1640 | unsigned long size) | 1641 | unsigned long size) |
1641 | { | 1642 | { |
1642 | unsigned long uncached_ebase = CKSEG1ADDR(ebase); | 1643 | unsigned long uncached_ebase = CKSEG1ADDR(ebase); |
1643 | 1644 | ||
1644 | if (!addr) | 1645 | if (!addr) |
1645 | panic(panic_null_cerr); | 1646 | panic(panic_null_cerr); |
1646 | 1647 | ||
1647 | memcpy((void *)(uncached_ebase + offset), addr, size); | 1648 | memcpy((void *)(uncached_ebase + offset), addr, size); |
1648 | } | 1649 | } |
1649 | 1650 | ||
1650 | static int __initdata rdhwr_noopt; | 1651 | static int __initdata rdhwr_noopt; |
1651 | static int __init set_rdhwr_noopt(char *str) | 1652 | static int __init set_rdhwr_noopt(char *str) |
1652 | { | 1653 | { |
1653 | rdhwr_noopt = 1; | 1654 | rdhwr_noopt = 1; |
1654 | return 1; | 1655 | return 1; |
1655 | } | 1656 | } |
1656 | 1657 | ||
1657 | __setup("rdhwr_noopt", set_rdhwr_noopt); | 1658 | __setup("rdhwr_noopt", set_rdhwr_noopt); |
1658 | 1659 | ||
1659 | void __init trap_init(void) | 1660 | void __init trap_init(void) |
1660 | { | 1661 | { |
1661 | extern char except_vec3_generic, except_vec3_r4000; | 1662 | extern char except_vec3_generic, except_vec3_r4000; |
1662 | extern char except_vec4; | 1663 | extern char except_vec4; |
1663 | unsigned long i; | 1664 | unsigned long i; |
1664 | int rollback; | 1665 | int rollback; |
1665 | 1666 | ||
1666 | check_wait(); | 1667 | check_wait(); |
1667 | rollback = (cpu_wait == r4k_wait); | 1668 | rollback = (cpu_wait == r4k_wait); |
1668 | 1669 | ||
1669 | #if defined(CONFIG_KGDB) | 1670 | #if defined(CONFIG_KGDB) |
1670 | if (kgdb_early_setup) | 1671 | if (kgdb_early_setup) |
1671 | return; /* Already done */ | 1672 | return; /* Already done */ |
1672 | #endif | 1673 | #endif |
1673 | 1674 | ||
1674 | if (cpu_has_veic || cpu_has_vint) { | 1675 | if (cpu_has_veic || cpu_has_vint) { |
1675 | unsigned long size = 0x200 + VECTORSPACING*64; | 1676 | unsigned long size = 0x200 + VECTORSPACING*64; |
1676 | ebase = (unsigned long) | 1677 | ebase = (unsigned long) |
1677 | __alloc_bootmem(size, 1 << fls(size), 0); | 1678 | __alloc_bootmem(size, 1 << fls(size), 0); |
1678 | } else { | 1679 | } else { |
1679 | ebase = CKSEG0; | 1680 | ebase = CKSEG0; |
1680 | if (cpu_has_mips_r2) | 1681 | if (cpu_has_mips_r2) |
1681 | ebase += (read_c0_ebase() & 0x3ffff000); | 1682 | ebase += (read_c0_ebase() & 0x3ffff000); |
1682 | } | 1683 | } |
1683 | 1684 | ||
1684 | per_cpu_trap_init(); | 1685 | per_cpu_trap_init(); |
1685 | 1686 | ||
1686 | /* | 1687 | /* |
1687 | * Copy the generic exception handlers to their final destination. | 1688 | * Copy the generic exception handlers to their final destination. |
1688 | * This will be overriden later as suitable for a particular | 1689 | * This will be overriden later as suitable for a particular |
1689 | * configuration. | 1690 | * configuration. |
1690 | */ | 1691 | */ |
1691 | set_handler(0x180, &except_vec3_generic, 0x80); | 1692 | set_handler(0x180, &except_vec3_generic, 0x80); |
1692 | 1693 | ||
1693 | /* | 1694 | /* |
1694 | * Setup default vectors | 1695 | * Setup default vectors |
1695 | */ | 1696 | */ |
1696 | for (i = 0; i <= 31; i++) | 1697 | for (i = 0; i <= 31; i++) |
1697 | set_except_vector(i, handle_reserved); | 1698 | set_except_vector(i, handle_reserved); |
1698 | 1699 | ||
1699 | /* | 1700 | /* |
1700 | * Copy the EJTAG debug exception vector handler code to it's final | 1701 | * Copy the EJTAG debug exception vector handler code to it's final |
1701 | * destination. | 1702 | * destination. |
1702 | */ | 1703 | */ |
1703 | if (cpu_has_ejtag && board_ejtag_handler_setup) | 1704 | if (cpu_has_ejtag && board_ejtag_handler_setup) |
1704 | board_ejtag_handler_setup(); | 1705 | board_ejtag_handler_setup(); |
1705 | 1706 | ||
1706 | /* | 1707 | /* |
1707 | * Only some CPUs have the watch exceptions. | 1708 | * Only some CPUs have the watch exceptions. |
1708 | */ | 1709 | */ |
1709 | if (cpu_has_watch) | 1710 | if (cpu_has_watch) |
1710 | set_except_vector(23, handle_watch); | 1711 | set_except_vector(23, handle_watch); |
1711 | 1712 | ||
1712 | /* | 1713 | /* |
1713 | * Initialise interrupt handlers | 1714 | * Initialise interrupt handlers |
1714 | */ | 1715 | */ |
1715 | if (cpu_has_veic || cpu_has_vint) { | 1716 | if (cpu_has_veic || cpu_has_vint) { |
1716 | int nvec = cpu_has_veic ? 64 : 8; | 1717 | int nvec = cpu_has_veic ? 64 : 8; |
1717 | for (i = 0; i < nvec; i++) | 1718 | for (i = 0; i < nvec; i++) |
1718 | set_vi_handler(i, NULL); | 1719 | set_vi_handler(i, NULL); |
1719 | } | 1720 | } |
1720 | else if (cpu_has_divec) | 1721 | else if (cpu_has_divec) |
1721 | set_handler(0x200, &except_vec4, 0x8); | 1722 | set_handler(0x200, &except_vec4, 0x8); |
1722 | 1723 | ||
1723 | /* | 1724 | /* |
1724 | * Some CPUs can enable/disable for cache parity detection, but does | 1725 | * Some CPUs can enable/disable for cache parity detection, but does |
1725 | * it different ways. | 1726 | * it different ways. |
1726 | */ | 1727 | */ |
1727 | parity_protection_init(); | 1728 | parity_protection_init(); |
1728 | 1729 | ||
1729 | /* | 1730 | /* |
1730 | * The Data Bus Errors / Instruction Bus Errors are signaled | 1731 | * The Data Bus Errors / Instruction Bus Errors are signaled |
1731 | * by external hardware. Therefore these two exceptions | 1732 | * by external hardware. Therefore these two exceptions |
1732 | * may have board specific handlers. | 1733 | * may have board specific handlers. |
1733 | */ | 1734 | */ |
1734 | if (board_be_init) | 1735 | if (board_be_init) |
1735 | board_be_init(); | 1736 | board_be_init(); |
1736 | 1737 | ||
1737 | set_except_vector(0, rollback ? rollback_handle_int : handle_int); | 1738 | set_except_vector(0, rollback ? rollback_handle_int : handle_int); |
1738 | set_except_vector(1, handle_tlbm); | 1739 | set_except_vector(1, handle_tlbm); |
1739 | set_except_vector(2, handle_tlbl); | 1740 | set_except_vector(2, handle_tlbl); |
1740 | set_except_vector(3, handle_tlbs); | 1741 | set_except_vector(3, handle_tlbs); |
1741 | 1742 | ||
1742 | set_except_vector(4, handle_adel); | 1743 | set_except_vector(4, handle_adel); |
1743 | set_except_vector(5, handle_ades); | 1744 | set_except_vector(5, handle_ades); |
1744 | 1745 | ||
1745 | set_except_vector(6, handle_ibe); | 1746 | set_except_vector(6, handle_ibe); |
1746 | set_except_vector(7, handle_dbe); | 1747 | set_except_vector(7, handle_dbe); |
1747 | 1748 | ||
1748 | set_except_vector(8, handle_sys); | 1749 | set_except_vector(8, handle_sys); |
1749 | set_except_vector(9, handle_bp); | 1750 | set_except_vector(9, handle_bp); |
1750 | set_except_vector(10, rdhwr_noopt ? handle_ri : | 1751 | set_except_vector(10, rdhwr_noopt ? handle_ri : |
1751 | (cpu_has_vtag_icache ? | 1752 | (cpu_has_vtag_icache ? |
1752 | handle_ri_rdhwr_vivt : handle_ri_rdhwr)); | 1753 | handle_ri_rdhwr_vivt : handle_ri_rdhwr)); |
1753 | set_except_vector(11, handle_cpu); | 1754 | set_except_vector(11, handle_cpu); |
1754 | set_except_vector(12, handle_ov); | 1755 | set_except_vector(12, handle_ov); |
1755 | set_except_vector(13, handle_tr); | 1756 | set_except_vector(13, handle_tr); |
1756 | 1757 | ||
1757 | if (current_cpu_type() == CPU_R6000 || | 1758 | if (current_cpu_type() == CPU_R6000 || |
1758 | current_cpu_type() == CPU_R6000A) { | 1759 | current_cpu_type() == CPU_R6000A) { |
1759 | /* | 1760 | /* |
1760 | * The R6000 is the only R-series CPU that features a machine | 1761 | * The R6000 is the only R-series CPU that features a machine |
1761 | * check exception (similar to the R4000 cache error) and | 1762 | * check exception (similar to the R4000 cache error) and |
1762 | * unaligned ldc1/sdc1 exception. The handlers have not been | 1763 | * unaligned ldc1/sdc1 exception. The handlers have not been |
1763 | * written yet. Well, anyway there is no R6000 machine on the | 1764 | * written yet. Well, anyway there is no R6000 machine on the |
1764 | * current list of targets for Linux/MIPS. | 1765 | * current list of targets for Linux/MIPS. |
1765 | * (Duh, crap, there is someone with a triple R6k machine) | 1766 | * (Duh, crap, there is someone with a triple R6k machine) |
1766 | */ | 1767 | */ |
1767 | //set_except_vector(14, handle_mc); | 1768 | //set_except_vector(14, handle_mc); |
1768 | //set_except_vector(15, handle_ndc); | 1769 | //set_except_vector(15, handle_ndc); |
1769 | } | 1770 | } |
1770 | 1771 | ||
1771 | 1772 | ||
1772 | if (board_nmi_handler_setup) | 1773 | if (board_nmi_handler_setup) |
1773 | board_nmi_handler_setup(); | 1774 | board_nmi_handler_setup(); |
1774 | 1775 | ||
1775 | if (cpu_has_fpu && !cpu_has_nofpuex) | 1776 | if (cpu_has_fpu && !cpu_has_nofpuex) |
1776 | set_except_vector(15, handle_fpe); | 1777 | set_except_vector(15, handle_fpe); |
1777 | 1778 | ||
1778 | set_except_vector(22, handle_mdmx); | 1779 | set_except_vector(22, handle_mdmx); |
1779 | 1780 | ||
1780 | if (cpu_has_mcheck) | 1781 | if (cpu_has_mcheck) |
1781 | set_except_vector(24, handle_mcheck); | 1782 | set_except_vector(24, handle_mcheck); |
1782 | 1783 | ||
1783 | if (cpu_has_mipsmt) | 1784 | if (cpu_has_mipsmt) |
1784 | set_except_vector(25, handle_mt); | 1785 | set_except_vector(25, handle_mt); |
1785 | 1786 | ||
1786 | set_except_vector(26, handle_dsp); | 1787 | set_except_vector(26, handle_dsp); |
1787 | 1788 | ||
1788 | if (cpu_has_vce) | 1789 | if (cpu_has_vce) |
1789 | /* Special exception: R4[04]00 uses also the divec space. */ | 1790 | /* Special exception: R4[04]00 uses also the divec space. */ |
1790 | memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100); | 1791 | memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100); |
1791 | else if (cpu_has_4kex) | 1792 | else if (cpu_has_4kex) |
1792 | memcpy((void *)(ebase + 0x180), &except_vec3_generic, 0x80); | 1793 | memcpy((void *)(ebase + 0x180), &except_vec3_generic, 0x80); |
1793 | else | 1794 | else |
1794 | memcpy((void *)(ebase + 0x080), &except_vec3_generic, 0x80); | 1795 | memcpy((void *)(ebase + 0x080), &except_vec3_generic, 0x80); |
1795 | 1796 | ||
1796 | local_flush_icache_range(ebase, ebase + 0x400); | 1797 | local_flush_icache_range(ebase, ebase + 0x400); |
1797 | flush_tlb_handlers(); | 1798 | flush_tlb_handlers(); |
1798 | 1799 | ||
1799 | sort_extable(__start___dbe_table, __stop___dbe_table); | 1800 | sort_extable(__start___dbe_table, __stop___dbe_table); |
1800 | 1801 | ||
1801 | cu2_notifier(default_cu2_call, 0x80000000); /* Run last */ | 1802 | cu2_notifier(default_cu2_call, 0x80000000); /* Run last */ |
1802 | } | 1803 | } |
1803 | 1804 |