Commit 6d888d1ab0000dff8ea2901bcdf5d213f2a54e8b
Committed by
Benjamin Herrenschmidt
1 parent
84b073868b
Exists in
smarc-imx_3.14.28_1.0.0_ga
and in
1 other branch
powerpc: Only print PACATMSCRATCH in oops when TM is active
If TM is not active there is no need to print PACATMSCRATCH so we can save ourselves a line. Signed-off-by: Anton Blanchard <anton@samba.org> Acked-by: Michael Neuling <mikey@neuling.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Showing 1 changed file with 2 additions and 1 deletions Inline Diff
arch/powerpc/kernel/process.c
1 | /* | 1 | /* |
2 | * Derived from "arch/i386/kernel/process.c" | 2 | * Derived from "arch/i386/kernel/process.c" |
3 | * Copyright (C) 1995 Linus Torvalds | 3 | * Copyright (C) 1995 Linus Torvalds |
4 | * | 4 | * |
5 | * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and | 5 | * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and |
6 | * Paul Mackerras (paulus@cs.anu.edu.au) | 6 | * Paul Mackerras (paulus@cs.anu.edu.au) |
7 | * | 7 | * |
8 | * PowerPC version | 8 | * PowerPC version |
9 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | 9 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or | 11 | * This program is free software; you can redistribute it and/or |
12 | * modify it under the terms of the GNU General Public License | 12 | * modify it under the terms of the GNU General Public License |
13 | * as published by the Free Software Foundation; either version | 13 | * as published by the Free Software Foundation; either version |
14 | * 2 of the License, or (at your option) any later version. | 14 | * 2 of the License, or (at your option) any later version. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
18 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
20 | #include <linux/mm.h> | 20 | #include <linux/mm.h> |
21 | #include <linux/smp.h> | 21 | #include <linux/smp.h> |
22 | #include <linux/stddef.h> | 22 | #include <linux/stddef.h> |
23 | #include <linux/unistd.h> | 23 | #include <linux/unistd.h> |
24 | #include <linux/ptrace.h> | 24 | #include <linux/ptrace.h> |
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | #include <linux/user.h> | 26 | #include <linux/user.h> |
27 | #include <linux/elf.h> | 27 | #include <linux/elf.h> |
28 | #include <linux/init.h> | 28 | #include <linux/init.h> |
29 | #include <linux/prctl.h> | 29 | #include <linux/prctl.h> |
30 | #include <linux/init_task.h> | 30 | #include <linux/init_task.h> |
31 | #include <linux/export.h> | 31 | #include <linux/export.h> |
32 | #include <linux/kallsyms.h> | 32 | #include <linux/kallsyms.h> |
33 | #include <linux/mqueue.h> | 33 | #include <linux/mqueue.h> |
34 | #include <linux/hardirq.h> | 34 | #include <linux/hardirq.h> |
35 | #include <linux/utsname.h> | 35 | #include <linux/utsname.h> |
36 | #include <linux/ftrace.h> | 36 | #include <linux/ftrace.h> |
37 | #include <linux/kernel_stat.h> | 37 | #include <linux/kernel_stat.h> |
38 | #include <linux/personality.h> | 38 | #include <linux/personality.h> |
39 | #include <linux/random.h> | 39 | #include <linux/random.h> |
40 | #include <linux/hw_breakpoint.h> | 40 | #include <linux/hw_breakpoint.h> |
41 | 41 | ||
42 | #include <asm/pgtable.h> | 42 | #include <asm/pgtable.h> |
43 | #include <asm/uaccess.h> | 43 | #include <asm/uaccess.h> |
44 | #include <asm/io.h> | 44 | #include <asm/io.h> |
45 | #include <asm/processor.h> | 45 | #include <asm/processor.h> |
46 | #include <asm/mmu.h> | 46 | #include <asm/mmu.h> |
47 | #include <asm/prom.h> | 47 | #include <asm/prom.h> |
48 | #include <asm/machdep.h> | 48 | #include <asm/machdep.h> |
49 | #include <asm/time.h> | 49 | #include <asm/time.h> |
50 | #include <asm/runlatch.h> | 50 | #include <asm/runlatch.h> |
51 | #include <asm/syscalls.h> | 51 | #include <asm/syscalls.h> |
52 | #include <asm/switch_to.h> | 52 | #include <asm/switch_to.h> |
53 | #include <asm/tm.h> | 53 | #include <asm/tm.h> |
54 | #include <asm/debug.h> | 54 | #include <asm/debug.h> |
55 | #ifdef CONFIG_PPC64 | 55 | #ifdef CONFIG_PPC64 |
56 | #include <asm/firmware.h> | 56 | #include <asm/firmware.h> |
57 | #endif | 57 | #endif |
58 | #include <linux/kprobes.h> | 58 | #include <linux/kprobes.h> |
59 | #include <linux/kdebug.h> | 59 | #include <linux/kdebug.h> |
60 | 60 | ||
61 | /* Transactional Memory debug */ | 61 | /* Transactional Memory debug */ |
62 | #ifdef TM_DEBUG_SW | 62 | #ifdef TM_DEBUG_SW |
63 | #define TM_DEBUG(x...) printk(KERN_INFO x) | 63 | #define TM_DEBUG(x...) printk(KERN_INFO x) |
64 | #else | 64 | #else |
65 | #define TM_DEBUG(x...) do { } while(0) | 65 | #define TM_DEBUG(x...) do { } while(0) |
66 | #endif | 66 | #endif |
67 | 67 | ||
68 | extern unsigned long _get_SP(void); | 68 | extern unsigned long _get_SP(void); |
69 | 69 | ||
70 | #ifndef CONFIG_SMP | 70 | #ifndef CONFIG_SMP |
71 | struct task_struct *last_task_used_math = NULL; | 71 | struct task_struct *last_task_used_math = NULL; |
72 | struct task_struct *last_task_used_altivec = NULL; | 72 | struct task_struct *last_task_used_altivec = NULL; |
73 | struct task_struct *last_task_used_vsx = NULL; | 73 | struct task_struct *last_task_used_vsx = NULL; |
74 | struct task_struct *last_task_used_spe = NULL; | 74 | struct task_struct *last_task_used_spe = NULL; |
75 | #endif | 75 | #endif |
76 | 76 | ||
77 | #ifdef CONFIG_PPC_FPU | 77 | #ifdef CONFIG_PPC_FPU |
78 | /* | 78 | /* |
79 | * Make sure the floating-point register state in the | 79 | * Make sure the floating-point register state in the |
80 | * the thread_struct is up to date for task tsk. | 80 | * the thread_struct is up to date for task tsk. |
81 | */ | 81 | */ |
82 | void flush_fp_to_thread(struct task_struct *tsk) | 82 | void flush_fp_to_thread(struct task_struct *tsk) |
83 | { | 83 | { |
84 | if (tsk->thread.regs) { | 84 | if (tsk->thread.regs) { |
85 | /* | 85 | /* |
86 | * We need to disable preemption here because if we didn't, | 86 | * We need to disable preemption here because if we didn't, |
87 | * another process could get scheduled after the regs->msr | 87 | * another process could get scheduled after the regs->msr |
88 | * test but before we have finished saving the FP registers | 88 | * test but before we have finished saving the FP registers |
89 | * to the thread_struct. That process could take over the | 89 | * to the thread_struct. That process could take over the |
90 | * FPU, and then when we get scheduled again we would store | 90 | * FPU, and then when we get scheduled again we would store |
91 | * bogus values for the remaining FP registers. | 91 | * bogus values for the remaining FP registers. |
92 | */ | 92 | */ |
93 | preempt_disable(); | 93 | preempt_disable(); |
94 | if (tsk->thread.regs->msr & MSR_FP) { | 94 | if (tsk->thread.regs->msr & MSR_FP) { |
95 | #ifdef CONFIG_SMP | 95 | #ifdef CONFIG_SMP |
96 | /* | 96 | /* |
97 | * This should only ever be called for current or | 97 | * This should only ever be called for current or |
98 | * for a stopped child process. Since we save away | 98 | * for a stopped child process. Since we save away |
99 | * the FP register state on context switch on SMP, | 99 | * the FP register state on context switch on SMP, |
100 | * there is something wrong if a stopped child appears | 100 | * there is something wrong if a stopped child appears |
101 | * to still have its FP state in the CPU registers. | 101 | * to still have its FP state in the CPU registers. |
102 | */ | 102 | */ |
103 | BUG_ON(tsk != current); | 103 | BUG_ON(tsk != current); |
104 | #endif | 104 | #endif |
105 | giveup_fpu(tsk); | 105 | giveup_fpu(tsk); |
106 | } | 106 | } |
107 | preempt_enable(); | 107 | preempt_enable(); |
108 | } | 108 | } |
109 | } | 109 | } |
110 | EXPORT_SYMBOL_GPL(flush_fp_to_thread); | 110 | EXPORT_SYMBOL_GPL(flush_fp_to_thread); |
111 | #endif | 111 | #endif |
112 | 112 | ||
113 | void enable_kernel_fp(void) | 113 | void enable_kernel_fp(void) |
114 | { | 114 | { |
115 | WARN_ON(preemptible()); | 115 | WARN_ON(preemptible()); |
116 | 116 | ||
117 | #ifdef CONFIG_SMP | 117 | #ifdef CONFIG_SMP |
118 | if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) | 118 | if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) |
119 | giveup_fpu(current); | 119 | giveup_fpu(current); |
120 | else | 120 | else |
121 | giveup_fpu(NULL); /* just enables FP for kernel */ | 121 | giveup_fpu(NULL); /* just enables FP for kernel */ |
122 | #else | 122 | #else |
123 | giveup_fpu(last_task_used_math); | 123 | giveup_fpu(last_task_used_math); |
124 | #endif /* CONFIG_SMP */ | 124 | #endif /* CONFIG_SMP */ |
125 | } | 125 | } |
126 | EXPORT_SYMBOL(enable_kernel_fp); | 126 | EXPORT_SYMBOL(enable_kernel_fp); |
127 | 127 | ||
128 | #ifdef CONFIG_ALTIVEC | 128 | #ifdef CONFIG_ALTIVEC |
129 | void enable_kernel_altivec(void) | 129 | void enable_kernel_altivec(void) |
130 | { | 130 | { |
131 | WARN_ON(preemptible()); | 131 | WARN_ON(preemptible()); |
132 | 132 | ||
133 | #ifdef CONFIG_SMP | 133 | #ifdef CONFIG_SMP |
134 | if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) | 134 | if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) |
135 | giveup_altivec(current); | 135 | giveup_altivec(current); |
136 | else | 136 | else |
137 | giveup_altivec_notask(); | 137 | giveup_altivec_notask(); |
138 | #else | 138 | #else |
139 | giveup_altivec(last_task_used_altivec); | 139 | giveup_altivec(last_task_used_altivec); |
140 | #endif /* CONFIG_SMP */ | 140 | #endif /* CONFIG_SMP */ |
141 | } | 141 | } |
142 | EXPORT_SYMBOL(enable_kernel_altivec); | 142 | EXPORT_SYMBOL(enable_kernel_altivec); |
143 | 143 | ||
144 | /* | 144 | /* |
145 | * Make sure the VMX/Altivec register state in the | 145 | * Make sure the VMX/Altivec register state in the |
146 | * the thread_struct is up to date for task tsk. | 146 | * the thread_struct is up to date for task tsk. |
147 | */ | 147 | */ |
148 | void flush_altivec_to_thread(struct task_struct *tsk) | 148 | void flush_altivec_to_thread(struct task_struct *tsk) |
149 | { | 149 | { |
150 | if (tsk->thread.regs) { | 150 | if (tsk->thread.regs) { |
151 | preempt_disable(); | 151 | preempt_disable(); |
152 | if (tsk->thread.regs->msr & MSR_VEC) { | 152 | if (tsk->thread.regs->msr & MSR_VEC) { |
153 | #ifdef CONFIG_SMP | 153 | #ifdef CONFIG_SMP |
154 | BUG_ON(tsk != current); | 154 | BUG_ON(tsk != current); |
155 | #endif | 155 | #endif |
156 | giveup_altivec(tsk); | 156 | giveup_altivec(tsk); |
157 | } | 157 | } |
158 | preempt_enable(); | 158 | preempt_enable(); |
159 | } | 159 | } |
160 | } | 160 | } |
161 | EXPORT_SYMBOL_GPL(flush_altivec_to_thread); | 161 | EXPORT_SYMBOL_GPL(flush_altivec_to_thread); |
162 | #endif /* CONFIG_ALTIVEC */ | 162 | #endif /* CONFIG_ALTIVEC */ |
163 | 163 | ||
164 | #ifdef CONFIG_VSX | 164 | #ifdef CONFIG_VSX |
165 | #if 0 | 165 | #if 0 |
166 | /* not currently used, but some crazy RAID module might want to later */ | 166 | /* not currently used, but some crazy RAID module might want to later */ |
167 | void enable_kernel_vsx(void) | 167 | void enable_kernel_vsx(void) |
168 | { | 168 | { |
169 | WARN_ON(preemptible()); | 169 | WARN_ON(preemptible()); |
170 | 170 | ||
171 | #ifdef CONFIG_SMP | 171 | #ifdef CONFIG_SMP |
172 | if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) | 172 | if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) |
173 | giveup_vsx(current); | 173 | giveup_vsx(current); |
174 | else | 174 | else |
175 | giveup_vsx(NULL); /* just enable vsx for kernel - force */ | 175 | giveup_vsx(NULL); /* just enable vsx for kernel - force */ |
176 | #else | 176 | #else |
177 | giveup_vsx(last_task_used_vsx); | 177 | giveup_vsx(last_task_used_vsx); |
178 | #endif /* CONFIG_SMP */ | 178 | #endif /* CONFIG_SMP */ |
179 | } | 179 | } |
180 | EXPORT_SYMBOL(enable_kernel_vsx); | 180 | EXPORT_SYMBOL(enable_kernel_vsx); |
181 | #endif | 181 | #endif |
182 | 182 | ||
183 | void giveup_vsx(struct task_struct *tsk) | 183 | void giveup_vsx(struct task_struct *tsk) |
184 | { | 184 | { |
185 | giveup_fpu(tsk); | 185 | giveup_fpu(tsk); |
186 | giveup_altivec(tsk); | 186 | giveup_altivec(tsk); |
187 | __giveup_vsx(tsk); | 187 | __giveup_vsx(tsk); |
188 | } | 188 | } |
189 | 189 | ||
190 | void flush_vsx_to_thread(struct task_struct *tsk) | 190 | void flush_vsx_to_thread(struct task_struct *tsk) |
191 | { | 191 | { |
192 | if (tsk->thread.regs) { | 192 | if (tsk->thread.regs) { |
193 | preempt_disable(); | 193 | preempt_disable(); |
194 | if (tsk->thread.regs->msr & MSR_VSX) { | 194 | if (tsk->thread.regs->msr & MSR_VSX) { |
195 | #ifdef CONFIG_SMP | 195 | #ifdef CONFIG_SMP |
196 | BUG_ON(tsk != current); | 196 | BUG_ON(tsk != current); |
197 | #endif | 197 | #endif |
198 | giveup_vsx(tsk); | 198 | giveup_vsx(tsk); |
199 | } | 199 | } |
200 | preempt_enable(); | 200 | preempt_enable(); |
201 | } | 201 | } |
202 | } | 202 | } |
203 | EXPORT_SYMBOL_GPL(flush_vsx_to_thread); | 203 | EXPORT_SYMBOL_GPL(flush_vsx_to_thread); |
204 | #endif /* CONFIG_VSX */ | 204 | #endif /* CONFIG_VSX */ |
205 | 205 | ||
206 | #ifdef CONFIG_SPE | 206 | #ifdef CONFIG_SPE |
207 | 207 | ||
208 | void enable_kernel_spe(void) | 208 | void enable_kernel_spe(void) |
209 | { | 209 | { |
210 | WARN_ON(preemptible()); | 210 | WARN_ON(preemptible()); |
211 | 211 | ||
212 | #ifdef CONFIG_SMP | 212 | #ifdef CONFIG_SMP |
213 | if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) | 213 | if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) |
214 | giveup_spe(current); | 214 | giveup_spe(current); |
215 | else | 215 | else |
216 | giveup_spe(NULL); /* just enable SPE for kernel - force */ | 216 | giveup_spe(NULL); /* just enable SPE for kernel - force */ |
217 | #else | 217 | #else |
218 | giveup_spe(last_task_used_spe); | 218 | giveup_spe(last_task_used_spe); |
219 | #endif /* __SMP __ */ | 219 | #endif /* __SMP __ */ |
220 | } | 220 | } |
221 | EXPORT_SYMBOL(enable_kernel_spe); | 221 | EXPORT_SYMBOL(enable_kernel_spe); |
222 | 222 | ||
223 | void flush_spe_to_thread(struct task_struct *tsk) | 223 | void flush_spe_to_thread(struct task_struct *tsk) |
224 | { | 224 | { |
225 | if (tsk->thread.regs) { | 225 | if (tsk->thread.regs) { |
226 | preempt_disable(); | 226 | preempt_disable(); |
227 | if (tsk->thread.regs->msr & MSR_SPE) { | 227 | if (tsk->thread.regs->msr & MSR_SPE) { |
228 | #ifdef CONFIG_SMP | 228 | #ifdef CONFIG_SMP |
229 | BUG_ON(tsk != current); | 229 | BUG_ON(tsk != current); |
230 | #endif | 230 | #endif |
231 | tsk->thread.spefscr = mfspr(SPRN_SPEFSCR); | 231 | tsk->thread.spefscr = mfspr(SPRN_SPEFSCR); |
232 | giveup_spe(tsk); | 232 | giveup_spe(tsk); |
233 | } | 233 | } |
234 | preempt_enable(); | 234 | preempt_enable(); |
235 | } | 235 | } |
236 | } | 236 | } |
237 | #endif /* CONFIG_SPE */ | 237 | #endif /* CONFIG_SPE */ |
238 | 238 | ||
239 | #ifndef CONFIG_SMP | 239 | #ifndef CONFIG_SMP |
240 | /* | 240 | /* |
241 | * If we are doing lazy switching of CPU state (FP, altivec or SPE), | 241 | * If we are doing lazy switching of CPU state (FP, altivec or SPE), |
242 | * and the current task has some state, discard it. | 242 | * and the current task has some state, discard it. |
243 | */ | 243 | */ |
244 | void discard_lazy_cpu_state(void) | 244 | void discard_lazy_cpu_state(void) |
245 | { | 245 | { |
246 | preempt_disable(); | 246 | preempt_disable(); |
247 | if (last_task_used_math == current) | 247 | if (last_task_used_math == current) |
248 | last_task_used_math = NULL; | 248 | last_task_used_math = NULL; |
249 | #ifdef CONFIG_ALTIVEC | 249 | #ifdef CONFIG_ALTIVEC |
250 | if (last_task_used_altivec == current) | 250 | if (last_task_used_altivec == current) |
251 | last_task_used_altivec = NULL; | 251 | last_task_used_altivec = NULL; |
252 | #endif /* CONFIG_ALTIVEC */ | 252 | #endif /* CONFIG_ALTIVEC */ |
253 | #ifdef CONFIG_VSX | 253 | #ifdef CONFIG_VSX |
254 | if (last_task_used_vsx == current) | 254 | if (last_task_used_vsx == current) |
255 | last_task_used_vsx = NULL; | 255 | last_task_used_vsx = NULL; |
256 | #endif /* CONFIG_VSX */ | 256 | #endif /* CONFIG_VSX */ |
257 | #ifdef CONFIG_SPE | 257 | #ifdef CONFIG_SPE |
258 | if (last_task_used_spe == current) | 258 | if (last_task_used_spe == current) |
259 | last_task_used_spe = NULL; | 259 | last_task_used_spe = NULL; |
260 | #endif | 260 | #endif |
261 | preempt_enable(); | 261 | preempt_enable(); |
262 | } | 262 | } |
263 | #endif /* CONFIG_SMP */ | 263 | #endif /* CONFIG_SMP */ |
264 | 264 | ||
265 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | 265 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
266 | void do_send_trap(struct pt_regs *regs, unsigned long address, | 266 | void do_send_trap(struct pt_regs *regs, unsigned long address, |
267 | unsigned long error_code, int signal_code, int breakpt) | 267 | unsigned long error_code, int signal_code, int breakpt) |
268 | { | 268 | { |
269 | siginfo_t info; | 269 | siginfo_t info; |
270 | 270 | ||
271 | current->thread.trap_nr = signal_code; | 271 | current->thread.trap_nr = signal_code; |
272 | if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, | 272 | if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, |
273 | 11, SIGSEGV) == NOTIFY_STOP) | 273 | 11, SIGSEGV) == NOTIFY_STOP) |
274 | return; | 274 | return; |
275 | 275 | ||
276 | /* Deliver the signal to userspace */ | 276 | /* Deliver the signal to userspace */ |
277 | info.si_signo = SIGTRAP; | 277 | info.si_signo = SIGTRAP; |
278 | info.si_errno = breakpt; /* breakpoint or watchpoint id */ | 278 | info.si_errno = breakpt; /* breakpoint or watchpoint id */ |
279 | info.si_code = signal_code; | 279 | info.si_code = signal_code; |
280 | info.si_addr = (void __user *)address; | 280 | info.si_addr = (void __user *)address; |
281 | force_sig_info(SIGTRAP, &info, current); | 281 | force_sig_info(SIGTRAP, &info, current); |
282 | } | 282 | } |
283 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ | 283 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ |
284 | void do_break (struct pt_regs *regs, unsigned long address, | 284 | void do_break (struct pt_regs *regs, unsigned long address, |
285 | unsigned long error_code) | 285 | unsigned long error_code) |
286 | { | 286 | { |
287 | siginfo_t info; | 287 | siginfo_t info; |
288 | 288 | ||
289 | current->thread.trap_nr = TRAP_HWBKPT; | 289 | current->thread.trap_nr = TRAP_HWBKPT; |
290 | if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, | 290 | if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, |
291 | 11, SIGSEGV) == NOTIFY_STOP) | 291 | 11, SIGSEGV) == NOTIFY_STOP) |
292 | return; | 292 | return; |
293 | 293 | ||
294 | if (debugger_break_match(regs)) | 294 | if (debugger_break_match(regs)) |
295 | return; | 295 | return; |
296 | 296 | ||
297 | /* Clear the breakpoint */ | 297 | /* Clear the breakpoint */ |
298 | hw_breakpoint_disable(); | 298 | hw_breakpoint_disable(); |
299 | 299 | ||
300 | /* Deliver the signal to userspace */ | 300 | /* Deliver the signal to userspace */ |
301 | info.si_signo = SIGTRAP; | 301 | info.si_signo = SIGTRAP; |
302 | info.si_errno = 0; | 302 | info.si_errno = 0; |
303 | info.si_code = TRAP_HWBKPT; | 303 | info.si_code = TRAP_HWBKPT; |
304 | info.si_addr = (void __user *)address; | 304 | info.si_addr = (void __user *)address; |
305 | force_sig_info(SIGTRAP, &info, current); | 305 | force_sig_info(SIGTRAP, &info, current); |
306 | } | 306 | } |
307 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ | 307 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ |
308 | 308 | ||
309 | static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk); | 309 | static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk); |
310 | 310 | ||
311 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | 311 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
312 | /* | 312 | /* |
313 | * Set the debug registers back to their default "safe" values. | 313 | * Set the debug registers back to their default "safe" values. |
314 | */ | 314 | */ |
315 | static void set_debug_reg_defaults(struct thread_struct *thread) | 315 | static void set_debug_reg_defaults(struct thread_struct *thread) |
316 | { | 316 | { |
317 | thread->debug.iac1 = thread->debug.iac2 = 0; | 317 | thread->debug.iac1 = thread->debug.iac2 = 0; |
318 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | 318 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 |
319 | thread->debug.iac3 = thread->debug.iac4 = 0; | 319 | thread->debug.iac3 = thread->debug.iac4 = 0; |
320 | #endif | 320 | #endif |
321 | thread->debug.dac1 = thread->debug.dac2 = 0; | 321 | thread->debug.dac1 = thread->debug.dac2 = 0; |
322 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | 322 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 |
323 | thread->debug.dvc1 = thread->debug.dvc2 = 0; | 323 | thread->debug.dvc1 = thread->debug.dvc2 = 0; |
324 | #endif | 324 | #endif |
325 | thread->debug.dbcr0 = 0; | 325 | thread->debug.dbcr0 = 0; |
326 | #ifdef CONFIG_BOOKE | 326 | #ifdef CONFIG_BOOKE |
327 | /* | 327 | /* |
328 | * Force User/Supervisor bits to b11 (user-only MSR[PR]=1) | 328 | * Force User/Supervisor bits to b11 (user-only MSR[PR]=1) |
329 | */ | 329 | */ |
330 | thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | | 330 | thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | |
331 | DBCR1_IAC3US | DBCR1_IAC4US; | 331 | DBCR1_IAC3US | DBCR1_IAC4US; |
332 | /* | 332 | /* |
333 | * Force Data Address Compare User/Supervisor bits to be User-only | 333 | * Force Data Address Compare User/Supervisor bits to be User-only |
334 | * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0. | 334 | * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0. |
335 | */ | 335 | */ |
336 | thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US; | 336 | thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US; |
337 | #else | 337 | #else |
338 | thread->debug.dbcr1 = 0; | 338 | thread->debug.dbcr1 = 0; |
339 | #endif | 339 | #endif |
340 | } | 340 | } |
341 | 341 | ||
342 | static void prime_debug_regs(struct thread_struct *thread) | 342 | static void prime_debug_regs(struct thread_struct *thread) |
343 | { | 343 | { |
344 | /* | 344 | /* |
345 | * We could have inherited MSR_DE from userspace, since | 345 | * We could have inherited MSR_DE from userspace, since |
346 | * it doesn't get cleared on exception entry. Make sure | 346 | * it doesn't get cleared on exception entry. Make sure |
347 | * MSR_DE is clear before we enable any debug events. | 347 | * MSR_DE is clear before we enable any debug events. |
348 | */ | 348 | */ |
349 | mtmsr(mfmsr() & ~MSR_DE); | 349 | mtmsr(mfmsr() & ~MSR_DE); |
350 | 350 | ||
351 | mtspr(SPRN_IAC1, thread->debug.iac1); | 351 | mtspr(SPRN_IAC1, thread->debug.iac1); |
352 | mtspr(SPRN_IAC2, thread->debug.iac2); | 352 | mtspr(SPRN_IAC2, thread->debug.iac2); |
353 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | 353 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 |
354 | mtspr(SPRN_IAC3, thread->debug.iac3); | 354 | mtspr(SPRN_IAC3, thread->debug.iac3); |
355 | mtspr(SPRN_IAC4, thread->debug.iac4); | 355 | mtspr(SPRN_IAC4, thread->debug.iac4); |
356 | #endif | 356 | #endif |
357 | mtspr(SPRN_DAC1, thread->debug.dac1); | 357 | mtspr(SPRN_DAC1, thread->debug.dac1); |
358 | mtspr(SPRN_DAC2, thread->debug.dac2); | 358 | mtspr(SPRN_DAC2, thread->debug.dac2); |
359 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | 359 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 |
360 | mtspr(SPRN_DVC1, thread->debug.dvc1); | 360 | mtspr(SPRN_DVC1, thread->debug.dvc1); |
361 | mtspr(SPRN_DVC2, thread->debug.dvc2); | 361 | mtspr(SPRN_DVC2, thread->debug.dvc2); |
362 | #endif | 362 | #endif |
363 | mtspr(SPRN_DBCR0, thread->debug.dbcr0); | 363 | mtspr(SPRN_DBCR0, thread->debug.dbcr0); |
364 | mtspr(SPRN_DBCR1, thread->debug.dbcr1); | 364 | mtspr(SPRN_DBCR1, thread->debug.dbcr1); |
365 | #ifdef CONFIG_BOOKE | 365 | #ifdef CONFIG_BOOKE |
366 | mtspr(SPRN_DBCR2, thread->debug.dbcr2); | 366 | mtspr(SPRN_DBCR2, thread->debug.dbcr2); |
367 | #endif | 367 | #endif |
368 | } | 368 | } |
369 | /* | 369 | /* |
370 | * Unless neither the old or new thread are making use of the | 370 | * Unless neither the old or new thread are making use of the |
371 | * debug registers, set the debug registers from the values | 371 | * debug registers, set the debug registers from the values |
372 | * stored in the new thread. | 372 | * stored in the new thread. |
373 | */ | 373 | */ |
374 | void switch_booke_debug_regs(struct thread_struct *new_thread) | 374 | void switch_booke_debug_regs(struct thread_struct *new_thread) |
375 | { | 375 | { |
376 | if ((current->thread.debug.dbcr0 & DBCR0_IDM) | 376 | if ((current->thread.debug.dbcr0 & DBCR0_IDM) |
377 | || (new_thread->debug.dbcr0 & DBCR0_IDM)) | 377 | || (new_thread->debug.dbcr0 & DBCR0_IDM)) |
378 | prime_debug_regs(new_thread); | 378 | prime_debug_regs(new_thread); |
379 | } | 379 | } |
380 | EXPORT_SYMBOL_GPL(switch_booke_debug_regs); | 380 | EXPORT_SYMBOL_GPL(switch_booke_debug_regs); |
381 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ | 381 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ |
382 | #ifndef CONFIG_HAVE_HW_BREAKPOINT | 382 | #ifndef CONFIG_HAVE_HW_BREAKPOINT |
383 | static void set_debug_reg_defaults(struct thread_struct *thread) | 383 | static void set_debug_reg_defaults(struct thread_struct *thread) |
384 | { | 384 | { |
385 | thread->hw_brk.address = 0; | 385 | thread->hw_brk.address = 0; |
386 | thread->hw_brk.type = 0; | 386 | thread->hw_brk.type = 0; |
387 | set_breakpoint(&thread->hw_brk); | 387 | set_breakpoint(&thread->hw_brk); |
388 | } | 388 | } |
389 | #endif /* !CONFIG_HAVE_HW_BREAKPOINT */ | 389 | #endif /* !CONFIG_HAVE_HW_BREAKPOINT */ |
390 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ | 390 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ |
391 | 391 | ||
392 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | 392 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
393 | static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) | 393 | static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) |
394 | { | 394 | { |
395 | mtspr(SPRN_DAC1, dabr); | 395 | mtspr(SPRN_DAC1, dabr); |
396 | #ifdef CONFIG_PPC_47x | 396 | #ifdef CONFIG_PPC_47x |
397 | isync(); | 397 | isync(); |
398 | #endif | 398 | #endif |
399 | return 0; | 399 | return 0; |
400 | } | 400 | } |
401 | #elif defined(CONFIG_PPC_BOOK3S) | 401 | #elif defined(CONFIG_PPC_BOOK3S) |
402 | static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) | 402 | static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) |
403 | { | 403 | { |
404 | mtspr(SPRN_DABR, dabr); | 404 | mtspr(SPRN_DABR, dabr); |
405 | if (cpu_has_feature(CPU_FTR_DABRX)) | 405 | if (cpu_has_feature(CPU_FTR_DABRX)) |
406 | mtspr(SPRN_DABRX, dabrx); | 406 | mtspr(SPRN_DABRX, dabrx); |
407 | return 0; | 407 | return 0; |
408 | } | 408 | } |
409 | #else | 409 | #else |
410 | static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) | 410 | static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) |
411 | { | 411 | { |
412 | return -EINVAL; | 412 | return -EINVAL; |
413 | } | 413 | } |
414 | #endif | 414 | #endif |
415 | 415 | ||
416 | static inline int set_dabr(struct arch_hw_breakpoint *brk) | 416 | static inline int set_dabr(struct arch_hw_breakpoint *brk) |
417 | { | 417 | { |
418 | unsigned long dabr, dabrx; | 418 | unsigned long dabr, dabrx; |
419 | 419 | ||
420 | dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR); | 420 | dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR); |
421 | dabrx = ((brk->type >> 3) & 0x7); | 421 | dabrx = ((brk->type >> 3) & 0x7); |
422 | 422 | ||
423 | if (ppc_md.set_dabr) | 423 | if (ppc_md.set_dabr) |
424 | return ppc_md.set_dabr(dabr, dabrx); | 424 | return ppc_md.set_dabr(dabr, dabrx); |
425 | 425 | ||
426 | return __set_dabr(dabr, dabrx); | 426 | return __set_dabr(dabr, dabrx); |
427 | } | 427 | } |
428 | 428 | ||
429 | static inline int set_dawr(struct arch_hw_breakpoint *brk) | 429 | static inline int set_dawr(struct arch_hw_breakpoint *brk) |
430 | { | 430 | { |
431 | unsigned long dawr, dawrx, mrd; | 431 | unsigned long dawr, dawrx, mrd; |
432 | 432 | ||
433 | dawr = brk->address; | 433 | dawr = brk->address; |
434 | 434 | ||
435 | dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \ | 435 | dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \ |
436 | << (63 - 58); //* read/write bits */ | 436 | << (63 - 58); //* read/write bits */ |
437 | dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \ | 437 | dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \ |
438 | << (63 - 59); //* translate */ | 438 | << (63 - 59); //* translate */ |
439 | dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \ | 439 | dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \ |
440 | >> 3; //* PRIM bits */ | 440 | >> 3; //* PRIM bits */ |
441 | /* dawr length is stored in field MDR bits 48:53. Matches range in | 441 | /* dawr length is stored in field MDR bits 48:53. Matches range in |
442 | doublewords (64 bits) baised by -1 eg. 0b000000=1DW and | 442 | doublewords (64 bits) baised by -1 eg. 0b000000=1DW and |
443 | 0b111111=64DW. | 443 | 0b111111=64DW. |
444 | brk->len is in bytes. | 444 | brk->len is in bytes. |
445 | This aligns up to double word size, shifts and does the bias. | 445 | This aligns up to double word size, shifts and does the bias. |
446 | */ | 446 | */ |
447 | mrd = ((brk->len + 7) >> 3) - 1; | 447 | mrd = ((brk->len + 7) >> 3) - 1; |
448 | dawrx |= (mrd & 0x3f) << (63 - 53); | 448 | dawrx |= (mrd & 0x3f) << (63 - 53); |
449 | 449 | ||
450 | if (ppc_md.set_dawr) | 450 | if (ppc_md.set_dawr) |
451 | return ppc_md.set_dawr(dawr, dawrx); | 451 | return ppc_md.set_dawr(dawr, dawrx); |
452 | mtspr(SPRN_DAWR, dawr); | 452 | mtspr(SPRN_DAWR, dawr); |
453 | mtspr(SPRN_DAWRX, dawrx); | 453 | mtspr(SPRN_DAWRX, dawrx); |
454 | return 0; | 454 | return 0; |
455 | } | 455 | } |
456 | 456 | ||
457 | int set_breakpoint(struct arch_hw_breakpoint *brk) | 457 | int set_breakpoint(struct arch_hw_breakpoint *brk) |
458 | { | 458 | { |
459 | __get_cpu_var(current_brk) = *brk; | 459 | __get_cpu_var(current_brk) = *brk; |
460 | 460 | ||
461 | if (cpu_has_feature(CPU_FTR_DAWR)) | 461 | if (cpu_has_feature(CPU_FTR_DAWR)) |
462 | return set_dawr(brk); | 462 | return set_dawr(brk); |
463 | 463 | ||
464 | return set_dabr(brk); | 464 | return set_dabr(brk); |
465 | } | 465 | } |
466 | 466 | ||
467 | #ifdef CONFIG_PPC64 | 467 | #ifdef CONFIG_PPC64 |
468 | DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array); | 468 | DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array); |
469 | #endif | 469 | #endif |
470 | 470 | ||
471 | static inline bool hw_brk_match(struct arch_hw_breakpoint *a, | 471 | static inline bool hw_brk_match(struct arch_hw_breakpoint *a, |
472 | struct arch_hw_breakpoint *b) | 472 | struct arch_hw_breakpoint *b) |
473 | { | 473 | { |
474 | if (a->address != b->address) | 474 | if (a->address != b->address) |
475 | return false; | 475 | return false; |
476 | if (a->type != b->type) | 476 | if (a->type != b->type) |
477 | return false; | 477 | return false; |
478 | if (a->len != b->len) | 478 | if (a->len != b->len) |
479 | return false; | 479 | return false; |
480 | return true; | 480 | return true; |
481 | } | 481 | } |
482 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 482 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
483 | static inline void tm_reclaim_task(struct task_struct *tsk) | 483 | static inline void tm_reclaim_task(struct task_struct *tsk) |
484 | { | 484 | { |
485 | /* We have to work out if we're switching from/to a task that's in the | 485 | /* We have to work out if we're switching from/to a task that's in the |
486 | * middle of a transaction. | 486 | * middle of a transaction. |
487 | * | 487 | * |
488 | * In switching we need to maintain a 2nd register state as | 488 | * In switching we need to maintain a 2nd register state as |
489 | * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the | 489 | * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the |
490 | * checkpointed (tbegin) state in ckpt_regs and saves the transactional | 490 | * checkpointed (tbegin) state in ckpt_regs and saves the transactional |
491 | * (current) FPRs into oldtask->thread.transact_fpr[]. | 491 | * (current) FPRs into oldtask->thread.transact_fpr[]. |
492 | * | 492 | * |
493 | * We also context switch (save) TFHAR/TEXASR/TFIAR in here. | 493 | * We also context switch (save) TFHAR/TEXASR/TFIAR in here. |
494 | */ | 494 | */ |
495 | struct thread_struct *thr = &tsk->thread; | 495 | struct thread_struct *thr = &tsk->thread; |
496 | 496 | ||
497 | if (!thr->regs) | 497 | if (!thr->regs) |
498 | return; | 498 | return; |
499 | 499 | ||
500 | if (!MSR_TM_ACTIVE(thr->regs->msr)) | 500 | if (!MSR_TM_ACTIVE(thr->regs->msr)) |
501 | goto out_and_saveregs; | 501 | goto out_and_saveregs; |
502 | 502 | ||
503 | /* Stash the original thread MSR, as giveup_fpu et al will | 503 | /* Stash the original thread MSR, as giveup_fpu et al will |
504 | * modify it. We hold onto it to see whether the task used | 504 | * modify it. We hold onto it to see whether the task used |
505 | * FP & vector regs. | 505 | * FP & vector regs. |
506 | */ | 506 | */ |
507 | thr->tm_orig_msr = thr->regs->msr; | 507 | thr->tm_orig_msr = thr->regs->msr; |
508 | 508 | ||
509 | TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, " | 509 | TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, " |
510 | "ccr=%lx, msr=%lx, trap=%lx)\n", | 510 | "ccr=%lx, msr=%lx, trap=%lx)\n", |
511 | tsk->pid, thr->regs->nip, | 511 | tsk->pid, thr->regs->nip, |
512 | thr->regs->ccr, thr->regs->msr, | 512 | thr->regs->ccr, thr->regs->msr, |
513 | thr->regs->trap); | 513 | thr->regs->trap); |
514 | 514 | ||
515 | tm_reclaim(thr, thr->regs->msr, TM_CAUSE_RESCHED); | 515 | tm_reclaim(thr, thr->regs->msr, TM_CAUSE_RESCHED); |
516 | 516 | ||
517 | TM_DEBUG("--- tm_reclaim on pid %d complete\n", | 517 | TM_DEBUG("--- tm_reclaim on pid %d complete\n", |
518 | tsk->pid); | 518 | tsk->pid); |
519 | 519 | ||
520 | out_and_saveregs: | 520 | out_and_saveregs: |
521 | /* Always save the regs here, even if a transaction's not active. | 521 | /* Always save the regs here, even if a transaction's not active. |
522 | * This context-switches a thread's TM info SPRs. We do it here to | 522 | * This context-switches a thread's TM info SPRs. We do it here to |
523 | * be consistent with the restore path (in recheckpoint) which | 523 | * be consistent with the restore path (in recheckpoint) which |
524 | * cannot happen later in _switch(). | 524 | * cannot happen later in _switch(). |
525 | */ | 525 | */ |
526 | tm_save_sprs(thr); | 526 | tm_save_sprs(thr); |
527 | } | 527 | } |
528 | 528 | ||
529 | static inline void tm_recheckpoint_new_task(struct task_struct *new) | 529 | static inline void tm_recheckpoint_new_task(struct task_struct *new) |
530 | { | 530 | { |
531 | unsigned long msr; | 531 | unsigned long msr; |
532 | 532 | ||
533 | if (!cpu_has_feature(CPU_FTR_TM)) | 533 | if (!cpu_has_feature(CPU_FTR_TM)) |
534 | return; | 534 | return; |
535 | 535 | ||
536 | /* Recheckpoint the registers of the thread we're about to switch to. | 536 | /* Recheckpoint the registers of the thread we're about to switch to. |
537 | * | 537 | * |
538 | * If the task was using FP, we non-lazily reload both the original and | 538 | * If the task was using FP, we non-lazily reload both the original and |
539 | * the speculative FP register states. This is because the kernel | 539 | * the speculative FP register states. This is because the kernel |
540 | * doesn't see if/when a TM rollback occurs, so if we take an FP | 540 | * doesn't see if/when a TM rollback occurs, so if we take an FP |
541 | * unavoidable later, we are unable to determine which set of FP regs | 541 | * unavoidable later, we are unable to determine which set of FP regs |
542 | * need to be restored. | 542 | * need to be restored. |
543 | */ | 543 | */ |
544 | if (!new->thread.regs) | 544 | if (!new->thread.regs) |
545 | return; | 545 | return; |
546 | 546 | ||
547 | /* The TM SPRs are restored here, so that TEXASR.FS can be set | 547 | /* The TM SPRs are restored here, so that TEXASR.FS can be set |
548 | * before the trecheckpoint and no explosion occurs. | 548 | * before the trecheckpoint and no explosion occurs. |
549 | */ | 549 | */ |
550 | tm_restore_sprs(&new->thread); | 550 | tm_restore_sprs(&new->thread); |
551 | 551 | ||
552 | if (!MSR_TM_ACTIVE(new->thread.regs->msr)) | 552 | if (!MSR_TM_ACTIVE(new->thread.regs->msr)) |
553 | return; | 553 | return; |
554 | msr = new->thread.tm_orig_msr; | 554 | msr = new->thread.tm_orig_msr; |
555 | /* Recheckpoint to restore original checkpointed register state. */ | 555 | /* Recheckpoint to restore original checkpointed register state. */ |
556 | TM_DEBUG("*** tm_recheckpoint of pid %d " | 556 | TM_DEBUG("*** tm_recheckpoint of pid %d " |
557 | "(new->msr 0x%lx, new->origmsr 0x%lx)\n", | 557 | "(new->msr 0x%lx, new->origmsr 0x%lx)\n", |
558 | new->pid, new->thread.regs->msr, msr); | 558 | new->pid, new->thread.regs->msr, msr); |
559 | 559 | ||
560 | /* This loads the checkpointed FP/VEC state, if used */ | 560 | /* This loads the checkpointed FP/VEC state, if used */ |
561 | tm_recheckpoint(&new->thread, msr); | 561 | tm_recheckpoint(&new->thread, msr); |
562 | 562 | ||
563 | /* This loads the speculative FP/VEC state, if used */ | 563 | /* This loads the speculative FP/VEC state, if used */ |
564 | if (msr & MSR_FP) { | 564 | if (msr & MSR_FP) { |
565 | do_load_up_transact_fpu(&new->thread); | 565 | do_load_up_transact_fpu(&new->thread); |
566 | new->thread.regs->msr |= | 566 | new->thread.regs->msr |= |
567 | (MSR_FP | new->thread.fpexc_mode); | 567 | (MSR_FP | new->thread.fpexc_mode); |
568 | } | 568 | } |
569 | #ifdef CONFIG_ALTIVEC | 569 | #ifdef CONFIG_ALTIVEC |
570 | if (msr & MSR_VEC) { | 570 | if (msr & MSR_VEC) { |
571 | do_load_up_transact_altivec(&new->thread); | 571 | do_load_up_transact_altivec(&new->thread); |
572 | new->thread.regs->msr |= MSR_VEC; | 572 | new->thread.regs->msr |= MSR_VEC; |
573 | } | 573 | } |
574 | #endif | 574 | #endif |
575 | /* We may as well turn on VSX too since all the state is restored now */ | 575 | /* We may as well turn on VSX too since all the state is restored now */ |
576 | if (msr & MSR_VSX) | 576 | if (msr & MSR_VSX) |
577 | new->thread.regs->msr |= MSR_VSX; | 577 | new->thread.regs->msr |= MSR_VSX; |
578 | 578 | ||
579 | TM_DEBUG("*** tm_recheckpoint of pid %d complete " | 579 | TM_DEBUG("*** tm_recheckpoint of pid %d complete " |
580 | "(kernel msr 0x%lx)\n", | 580 | "(kernel msr 0x%lx)\n", |
581 | new->pid, mfmsr()); | 581 | new->pid, mfmsr()); |
582 | } | 582 | } |
583 | 583 | ||
584 | static inline void __switch_to_tm(struct task_struct *prev) | 584 | static inline void __switch_to_tm(struct task_struct *prev) |
585 | { | 585 | { |
586 | if (cpu_has_feature(CPU_FTR_TM)) { | 586 | if (cpu_has_feature(CPU_FTR_TM)) { |
587 | tm_enable(); | 587 | tm_enable(); |
588 | tm_reclaim_task(prev); | 588 | tm_reclaim_task(prev); |
589 | } | 589 | } |
590 | } | 590 | } |
591 | #else | 591 | #else |
592 | #define tm_recheckpoint_new_task(new) | 592 | #define tm_recheckpoint_new_task(new) |
593 | #define __switch_to_tm(prev) | 593 | #define __switch_to_tm(prev) |
594 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ | 594 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ |
595 | 595 | ||
596 | struct task_struct *__switch_to(struct task_struct *prev, | 596 | struct task_struct *__switch_to(struct task_struct *prev, |
597 | struct task_struct *new) | 597 | struct task_struct *new) |
598 | { | 598 | { |
599 | struct thread_struct *new_thread, *old_thread; | 599 | struct thread_struct *new_thread, *old_thread; |
600 | struct task_struct *last; | 600 | struct task_struct *last; |
601 | #ifdef CONFIG_PPC_BOOK3S_64 | 601 | #ifdef CONFIG_PPC_BOOK3S_64 |
602 | struct ppc64_tlb_batch *batch; | 602 | struct ppc64_tlb_batch *batch; |
603 | #endif | 603 | #endif |
604 | 604 | ||
605 | WARN_ON(!irqs_disabled()); | 605 | WARN_ON(!irqs_disabled()); |
606 | 606 | ||
607 | /* Back up the TAR across context switches. | 607 | /* Back up the TAR across context switches. |
608 | * Note that the TAR is not available for use in the kernel. (To | 608 | * Note that the TAR is not available for use in the kernel. (To |
609 | * provide this, the TAR should be backed up/restored on exception | 609 | * provide this, the TAR should be backed up/restored on exception |
610 | * entry/exit instead, and be in pt_regs. FIXME, this should be in | 610 | * entry/exit instead, and be in pt_regs. FIXME, this should be in |
611 | * pt_regs anyway (for debug).) | 611 | * pt_regs anyway (for debug).) |
612 | * Save the TAR here before we do treclaim/trecheckpoint as these | 612 | * Save the TAR here before we do treclaim/trecheckpoint as these |
613 | * will change the TAR. | 613 | * will change the TAR. |
614 | */ | 614 | */ |
615 | save_tar(&prev->thread); | 615 | save_tar(&prev->thread); |
616 | 616 | ||
617 | __switch_to_tm(prev); | 617 | __switch_to_tm(prev); |
618 | 618 | ||
619 | #ifdef CONFIG_SMP | 619 | #ifdef CONFIG_SMP |
620 | /* avoid complexity of lazy save/restore of fpu | 620 | /* avoid complexity of lazy save/restore of fpu |
621 | * by just saving it every time we switch out if | 621 | * by just saving it every time we switch out if |
622 | * this task used the fpu during the last quantum. | 622 | * this task used the fpu during the last quantum. |
623 | * | 623 | * |
624 | * If it tries to use the fpu again, it'll trap and | 624 | * If it tries to use the fpu again, it'll trap and |
625 | * reload its fp regs. So we don't have to do a restore | 625 | * reload its fp regs. So we don't have to do a restore |
626 | * every switch, just a save. | 626 | * every switch, just a save. |
627 | * -- Cort | 627 | * -- Cort |
628 | */ | 628 | */ |
629 | if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP)) | 629 | if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP)) |
630 | giveup_fpu(prev); | 630 | giveup_fpu(prev); |
631 | #ifdef CONFIG_ALTIVEC | 631 | #ifdef CONFIG_ALTIVEC |
632 | /* | 632 | /* |
633 | * If the previous thread used altivec in the last quantum | 633 | * If the previous thread used altivec in the last quantum |
634 | * (thus changing altivec regs) then save them. | 634 | * (thus changing altivec regs) then save them. |
635 | * We used to check the VRSAVE register but not all apps | 635 | * We used to check the VRSAVE register but not all apps |
636 | * set it, so we don't rely on it now (and in fact we need | 636 | * set it, so we don't rely on it now (and in fact we need |
637 | * to save & restore VSCR even if VRSAVE == 0). -- paulus | 637 | * to save & restore VSCR even if VRSAVE == 0). -- paulus |
638 | * | 638 | * |
639 | * On SMP we always save/restore altivec regs just to avoid the | 639 | * On SMP we always save/restore altivec regs just to avoid the |
640 | * complexity of changing processors. | 640 | * complexity of changing processors. |
641 | * -- Cort | 641 | * -- Cort |
642 | */ | 642 | */ |
643 | if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)) | 643 | if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)) |
644 | giveup_altivec(prev); | 644 | giveup_altivec(prev); |
645 | #endif /* CONFIG_ALTIVEC */ | 645 | #endif /* CONFIG_ALTIVEC */ |
646 | #ifdef CONFIG_VSX | 646 | #ifdef CONFIG_VSX |
647 | if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX)) | 647 | if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX)) |
648 | /* VMX and FPU registers are already save here */ | 648 | /* VMX and FPU registers are already save here */ |
649 | __giveup_vsx(prev); | 649 | __giveup_vsx(prev); |
650 | #endif /* CONFIG_VSX */ | 650 | #endif /* CONFIG_VSX */ |
651 | #ifdef CONFIG_SPE | 651 | #ifdef CONFIG_SPE |
652 | /* | 652 | /* |
653 | * If the previous thread used spe in the last quantum | 653 | * If the previous thread used spe in the last quantum |
654 | * (thus changing spe regs) then save them. | 654 | * (thus changing spe regs) then save them. |
655 | * | 655 | * |
656 | * On SMP we always save/restore spe regs just to avoid the | 656 | * On SMP we always save/restore spe regs just to avoid the |
657 | * complexity of changing processors. | 657 | * complexity of changing processors. |
658 | */ | 658 | */ |
659 | if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE))) | 659 | if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE))) |
660 | giveup_spe(prev); | 660 | giveup_spe(prev); |
661 | #endif /* CONFIG_SPE */ | 661 | #endif /* CONFIG_SPE */ |
662 | 662 | ||
663 | #else /* CONFIG_SMP */ | 663 | #else /* CONFIG_SMP */ |
664 | #ifdef CONFIG_ALTIVEC | 664 | #ifdef CONFIG_ALTIVEC |
665 | /* Avoid the trap. On smp this this never happens since | 665 | /* Avoid the trap. On smp this this never happens since |
666 | * we don't set last_task_used_altivec -- Cort | 666 | * we don't set last_task_used_altivec -- Cort |
667 | */ | 667 | */ |
668 | if (new->thread.regs && last_task_used_altivec == new) | 668 | if (new->thread.regs && last_task_used_altivec == new) |
669 | new->thread.regs->msr |= MSR_VEC; | 669 | new->thread.regs->msr |= MSR_VEC; |
670 | #endif /* CONFIG_ALTIVEC */ | 670 | #endif /* CONFIG_ALTIVEC */ |
671 | #ifdef CONFIG_VSX | 671 | #ifdef CONFIG_VSX |
672 | if (new->thread.regs && last_task_used_vsx == new) | 672 | if (new->thread.regs && last_task_used_vsx == new) |
673 | new->thread.regs->msr |= MSR_VSX; | 673 | new->thread.regs->msr |= MSR_VSX; |
674 | #endif /* CONFIG_VSX */ | 674 | #endif /* CONFIG_VSX */ |
675 | #ifdef CONFIG_SPE | 675 | #ifdef CONFIG_SPE |
676 | /* Avoid the trap. On smp this this never happens since | 676 | /* Avoid the trap. On smp this this never happens since |
677 | * we don't set last_task_used_spe | 677 | * we don't set last_task_used_spe |
678 | */ | 678 | */ |
679 | if (new->thread.regs && last_task_used_spe == new) | 679 | if (new->thread.regs && last_task_used_spe == new) |
680 | new->thread.regs->msr |= MSR_SPE; | 680 | new->thread.regs->msr |= MSR_SPE; |
681 | #endif /* CONFIG_SPE */ | 681 | #endif /* CONFIG_SPE */ |
682 | 682 | ||
683 | #endif /* CONFIG_SMP */ | 683 | #endif /* CONFIG_SMP */ |
684 | 684 | ||
685 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | 685 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
686 | switch_booke_debug_regs(&new->thread); | 686 | switch_booke_debug_regs(&new->thread); |
687 | #else | 687 | #else |
688 | /* | 688 | /* |
689 | * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would | 689 | * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would |
690 | * schedule DABR | 690 | * schedule DABR |
691 | */ | 691 | */ |
692 | #ifndef CONFIG_HAVE_HW_BREAKPOINT | 692 | #ifndef CONFIG_HAVE_HW_BREAKPOINT |
693 | if (unlikely(hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk))) | 693 | if (unlikely(hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk))) |
694 | set_breakpoint(&new->thread.hw_brk); | 694 | set_breakpoint(&new->thread.hw_brk); |
695 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ | 695 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ |
696 | #endif | 696 | #endif |
697 | 697 | ||
698 | 698 | ||
699 | new_thread = &new->thread; | 699 | new_thread = &new->thread; |
700 | old_thread = ¤t->thread; | 700 | old_thread = ¤t->thread; |
701 | 701 | ||
702 | #ifdef CONFIG_PPC64 | 702 | #ifdef CONFIG_PPC64 |
703 | /* | 703 | /* |
704 | * Collect processor utilization data per process | 704 | * Collect processor utilization data per process |
705 | */ | 705 | */ |
706 | if (firmware_has_feature(FW_FEATURE_SPLPAR)) { | 706 | if (firmware_has_feature(FW_FEATURE_SPLPAR)) { |
707 | struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); | 707 | struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); |
708 | long unsigned start_tb, current_tb; | 708 | long unsigned start_tb, current_tb; |
709 | start_tb = old_thread->start_tb; | 709 | start_tb = old_thread->start_tb; |
710 | cu->current_tb = current_tb = mfspr(SPRN_PURR); | 710 | cu->current_tb = current_tb = mfspr(SPRN_PURR); |
711 | old_thread->accum_tb += (current_tb - start_tb); | 711 | old_thread->accum_tb += (current_tb - start_tb); |
712 | new_thread->start_tb = current_tb; | 712 | new_thread->start_tb = current_tb; |
713 | } | 713 | } |
714 | #endif /* CONFIG_PPC64 */ | 714 | #endif /* CONFIG_PPC64 */ |
715 | 715 | ||
716 | #ifdef CONFIG_PPC_BOOK3S_64 | 716 | #ifdef CONFIG_PPC_BOOK3S_64 |
717 | batch = &__get_cpu_var(ppc64_tlb_batch); | 717 | batch = &__get_cpu_var(ppc64_tlb_batch); |
718 | if (batch->active) { | 718 | if (batch->active) { |
719 | current_thread_info()->local_flags |= _TLF_LAZY_MMU; | 719 | current_thread_info()->local_flags |= _TLF_LAZY_MMU; |
720 | if (batch->index) | 720 | if (batch->index) |
721 | __flush_tlb_pending(batch); | 721 | __flush_tlb_pending(batch); |
722 | batch->active = 0; | 722 | batch->active = 0; |
723 | } | 723 | } |
724 | #endif /* CONFIG_PPC_BOOK3S_64 */ | 724 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
725 | 725 | ||
726 | /* | 726 | /* |
727 | * We can't take a PMU exception inside _switch() since there is a | 727 | * We can't take a PMU exception inside _switch() since there is a |
728 | * window where the kernel stack SLB and the kernel stack are out | 728 | * window where the kernel stack SLB and the kernel stack are out |
729 | * of sync. Hard disable here. | 729 | * of sync. Hard disable here. |
730 | */ | 730 | */ |
731 | hard_irq_disable(); | 731 | hard_irq_disable(); |
732 | 732 | ||
733 | tm_recheckpoint_new_task(new); | 733 | tm_recheckpoint_new_task(new); |
734 | 734 | ||
735 | last = _switch(old_thread, new_thread); | 735 | last = _switch(old_thread, new_thread); |
736 | 736 | ||
737 | #ifdef CONFIG_PPC_BOOK3S_64 | 737 | #ifdef CONFIG_PPC_BOOK3S_64 |
738 | if (current_thread_info()->local_flags & _TLF_LAZY_MMU) { | 738 | if (current_thread_info()->local_flags & _TLF_LAZY_MMU) { |
739 | current_thread_info()->local_flags &= ~_TLF_LAZY_MMU; | 739 | current_thread_info()->local_flags &= ~_TLF_LAZY_MMU; |
740 | batch = &__get_cpu_var(ppc64_tlb_batch); | 740 | batch = &__get_cpu_var(ppc64_tlb_batch); |
741 | batch->active = 1; | 741 | batch->active = 1; |
742 | } | 742 | } |
743 | #endif /* CONFIG_PPC_BOOK3S_64 */ | 743 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
744 | 744 | ||
745 | return last; | 745 | return last; |
746 | } | 746 | } |
747 | 747 | ||
748 | static int instructions_to_print = 16; | 748 | static int instructions_to_print = 16; |
749 | 749 | ||
750 | static void show_instructions(struct pt_regs *regs) | 750 | static void show_instructions(struct pt_regs *regs) |
751 | { | 751 | { |
752 | int i; | 752 | int i; |
753 | unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 * | 753 | unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 * |
754 | sizeof(int)); | 754 | sizeof(int)); |
755 | 755 | ||
756 | printk("Instruction dump:"); | 756 | printk("Instruction dump:"); |
757 | 757 | ||
758 | for (i = 0; i < instructions_to_print; i++) { | 758 | for (i = 0; i < instructions_to_print; i++) { |
759 | int instr; | 759 | int instr; |
760 | 760 | ||
761 | if (!(i % 8)) | 761 | if (!(i % 8)) |
762 | printk("\n"); | 762 | printk("\n"); |
763 | 763 | ||
764 | #if !defined(CONFIG_BOOKE) | 764 | #if !defined(CONFIG_BOOKE) |
765 | /* If executing with the IMMU off, adjust pc rather | 765 | /* If executing with the IMMU off, adjust pc rather |
766 | * than print XXXXXXXX. | 766 | * than print XXXXXXXX. |
767 | */ | 767 | */ |
768 | if (!(regs->msr & MSR_IR)) | 768 | if (!(regs->msr & MSR_IR)) |
769 | pc = (unsigned long)phys_to_virt(pc); | 769 | pc = (unsigned long)phys_to_virt(pc); |
770 | #endif | 770 | #endif |
771 | 771 | ||
772 | /* We use __get_user here *only* to avoid an OOPS on a | 772 | /* We use __get_user here *only* to avoid an OOPS on a |
773 | * bad address because the pc *should* only be a | 773 | * bad address because the pc *should* only be a |
774 | * kernel address. | 774 | * kernel address. |
775 | */ | 775 | */ |
776 | if (!__kernel_text_address(pc) || | 776 | if (!__kernel_text_address(pc) || |
777 | __get_user(instr, (unsigned int __user *)pc)) { | 777 | __get_user(instr, (unsigned int __user *)pc)) { |
778 | printk(KERN_CONT "XXXXXXXX "); | 778 | printk(KERN_CONT "XXXXXXXX "); |
779 | } else { | 779 | } else { |
780 | if (regs->nip == pc) | 780 | if (regs->nip == pc) |
781 | printk(KERN_CONT "<%08x> ", instr); | 781 | printk(KERN_CONT "<%08x> ", instr); |
782 | else | 782 | else |
783 | printk(KERN_CONT "%08x ", instr); | 783 | printk(KERN_CONT "%08x ", instr); |
784 | } | 784 | } |
785 | 785 | ||
786 | pc += sizeof(int); | 786 | pc += sizeof(int); |
787 | } | 787 | } |
788 | 788 | ||
789 | printk("\n"); | 789 | printk("\n"); |
790 | } | 790 | } |
791 | 791 | ||
792 | static struct regbit { | 792 | static struct regbit { |
793 | unsigned long bit; | 793 | unsigned long bit; |
794 | const char *name; | 794 | const char *name; |
795 | } msr_bits[] = { | 795 | } msr_bits[] = { |
796 | #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE) | 796 | #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE) |
797 | {MSR_SF, "SF"}, | 797 | {MSR_SF, "SF"}, |
798 | {MSR_HV, "HV"}, | 798 | {MSR_HV, "HV"}, |
799 | #endif | 799 | #endif |
800 | {MSR_VEC, "VEC"}, | 800 | {MSR_VEC, "VEC"}, |
801 | {MSR_VSX, "VSX"}, | 801 | {MSR_VSX, "VSX"}, |
802 | #ifdef CONFIG_BOOKE | 802 | #ifdef CONFIG_BOOKE |
803 | {MSR_CE, "CE"}, | 803 | {MSR_CE, "CE"}, |
804 | #endif | 804 | #endif |
805 | {MSR_EE, "EE"}, | 805 | {MSR_EE, "EE"}, |
806 | {MSR_PR, "PR"}, | 806 | {MSR_PR, "PR"}, |
807 | {MSR_FP, "FP"}, | 807 | {MSR_FP, "FP"}, |
808 | {MSR_ME, "ME"}, | 808 | {MSR_ME, "ME"}, |
809 | #ifdef CONFIG_BOOKE | 809 | #ifdef CONFIG_BOOKE |
810 | {MSR_DE, "DE"}, | 810 | {MSR_DE, "DE"}, |
811 | #else | 811 | #else |
812 | {MSR_SE, "SE"}, | 812 | {MSR_SE, "SE"}, |
813 | {MSR_BE, "BE"}, | 813 | {MSR_BE, "BE"}, |
814 | #endif | 814 | #endif |
815 | {MSR_IR, "IR"}, | 815 | {MSR_IR, "IR"}, |
816 | {MSR_DR, "DR"}, | 816 | {MSR_DR, "DR"}, |
817 | {MSR_PMM, "PMM"}, | 817 | {MSR_PMM, "PMM"}, |
818 | #ifndef CONFIG_BOOKE | 818 | #ifndef CONFIG_BOOKE |
819 | {MSR_RI, "RI"}, | 819 | {MSR_RI, "RI"}, |
820 | {MSR_LE, "LE"}, | 820 | {MSR_LE, "LE"}, |
821 | #endif | 821 | #endif |
822 | {0, NULL} | 822 | {0, NULL} |
823 | }; | 823 | }; |
824 | 824 | ||
825 | static void printbits(unsigned long val, struct regbit *bits) | 825 | static void printbits(unsigned long val, struct regbit *bits) |
826 | { | 826 | { |
827 | const char *sep = ""; | 827 | const char *sep = ""; |
828 | 828 | ||
829 | printk("<"); | 829 | printk("<"); |
830 | for (; bits->bit; ++bits) | 830 | for (; bits->bit; ++bits) |
831 | if (val & bits->bit) { | 831 | if (val & bits->bit) { |
832 | printk("%s%s", sep, bits->name); | 832 | printk("%s%s", sep, bits->name); |
833 | sep = ","; | 833 | sep = ","; |
834 | } | 834 | } |
835 | printk(">"); | 835 | printk(">"); |
836 | } | 836 | } |
837 | 837 | ||
838 | #ifdef CONFIG_PPC64 | 838 | #ifdef CONFIG_PPC64 |
839 | #define REG "%016lx" | 839 | #define REG "%016lx" |
840 | #define REGS_PER_LINE 4 | 840 | #define REGS_PER_LINE 4 |
841 | #define LAST_VOLATILE 13 | 841 | #define LAST_VOLATILE 13 |
842 | #else | 842 | #else |
843 | #define REG "%08lx" | 843 | #define REG "%08lx" |
844 | #define REGS_PER_LINE 8 | 844 | #define REGS_PER_LINE 8 |
845 | #define LAST_VOLATILE 12 | 845 | #define LAST_VOLATILE 12 |
846 | #endif | 846 | #endif |
847 | 847 | ||
848 | void show_regs(struct pt_regs * regs) | 848 | void show_regs(struct pt_regs * regs) |
849 | { | 849 | { |
850 | int i, trap; | 850 | int i, trap; |
851 | 851 | ||
852 | show_regs_print_info(KERN_DEFAULT); | 852 | show_regs_print_info(KERN_DEFAULT); |
853 | 853 | ||
854 | printk("NIP: "REG" LR: "REG" CTR: "REG"\n", | 854 | printk("NIP: "REG" LR: "REG" CTR: "REG"\n", |
855 | regs->nip, regs->link, regs->ctr); | 855 | regs->nip, regs->link, regs->ctr); |
856 | printk("REGS: %p TRAP: %04lx %s (%s)\n", | 856 | printk("REGS: %p TRAP: %04lx %s (%s)\n", |
857 | regs, regs->trap, print_tainted(), init_utsname()->release); | 857 | regs, regs->trap, print_tainted(), init_utsname()->release); |
858 | printk("MSR: "REG" ", regs->msr); | 858 | printk("MSR: "REG" ", regs->msr); |
859 | printbits(regs->msr, msr_bits); | 859 | printbits(regs->msr, msr_bits); |
860 | printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); | 860 | printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); |
861 | trap = TRAP(regs); | 861 | trap = TRAP(regs); |
862 | if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) | 862 | if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) |
863 | printk("CFAR: "REG" ", regs->orig_gpr3); | 863 | printk("CFAR: "REG" ", regs->orig_gpr3); |
864 | if (trap == 0x200 || trap == 0x300 || trap == 0x600) | 864 | if (trap == 0x200 || trap == 0x300 || trap == 0x600) |
865 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | 865 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) |
866 | printk("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr); | 866 | printk("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr); |
867 | #else | 867 | #else |
868 | printk("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr); | 868 | printk("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr); |
869 | #endif | 869 | #endif |
870 | #ifdef CONFIG_PPC64 | 870 | #ifdef CONFIG_PPC64 |
871 | printk("SOFTE: %ld ", regs->softe); | 871 | printk("SOFTE: %ld ", regs->softe); |
872 | #endif | 872 | #endif |
873 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 873 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
874 | printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch); | 874 | if (MSR_TM_ACTIVE(regs->msr)) |
875 | printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch); | ||
875 | #endif | 876 | #endif |
876 | 877 | ||
877 | for (i = 0; i < 32; i++) { | 878 | for (i = 0; i < 32; i++) { |
878 | if ((i % REGS_PER_LINE) == 0) | 879 | if ((i % REGS_PER_LINE) == 0) |
879 | printk("\nGPR%02d: ", i); | 880 | printk("\nGPR%02d: ", i); |
880 | printk(REG " ", regs->gpr[i]); | 881 | printk(REG " ", regs->gpr[i]); |
881 | if (i == LAST_VOLATILE && !FULL_REGS(regs)) | 882 | if (i == LAST_VOLATILE && !FULL_REGS(regs)) |
882 | break; | 883 | break; |
883 | } | 884 | } |
884 | printk("\n"); | 885 | printk("\n"); |
885 | #ifdef CONFIG_KALLSYMS | 886 | #ifdef CONFIG_KALLSYMS |
886 | /* | 887 | /* |
887 | * Lookup NIP late so we have the best change of getting the | 888 | * Lookup NIP late so we have the best change of getting the |
888 | * above info out without failing | 889 | * above info out without failing |
889 | */ | 890 | */ |
890 | printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip); | 891 | printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip); |
891 | printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link); | 892 | printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link); |
892 | #endif | 893 | #endif |
893 | show_stack(current, (unsigned long *) regs->gpr[1]); | 894 | show_stack(current, (unsigned long *) regs->gpr[1]); |
894 | if (!user_mode(regs)) | 895 | if (!user_mode(regs)) |
895 | show_instructions(regs); | 896 | show_instructions(regs); |
896 | } | 897 | } |
897 | 898 | ||
898 | void exit_thread(void) | 899 | void exit_thread(void) |
899 | { | 900 | { |
900 | discard_lazy_cpu_state(); | 901 | discard_lazy_cpu_state(); |
901 | } | 902 | } |
902 | 903 | ||
903 | void flush_thread(void) | 904 | void flush_thread(void) |
904 | { | 905 | { |
905 | discard_lazy_cpu_state(); | 906 | discard_lazy_cpu_state(); |
906 | 907 | ||
907 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 908 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
908 | flush_ptrace_hw_breakpoint(current); | 909 | flush_ptrace_hw_breakpoint(current); |
909 | #else /* CONFIG_HAVE_HW_BREAKPOINT */ | 910 | #else /* CONFIG_HAVE_HW_BREAKPOINT */ |
910 | set_debug_reg_defaults(¤t->thread); | 911 | set_debug_reg_defaults(¤t->thread); |
911 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ | 912 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ |
912 | } | 913 | } |
913 | 914 | ||
914 | void | 915 | void |
915 | release_thread(struct task_struct *t) | 916 | release_thread(struct task_struct *t) |
916 | { | 917 | { |
917 | } | 918 | } |
918 | 919 | ||
919 | /* | 920 | /* |
920 | * this gets called so that we can store coprocessor state into memory and | 921 | * this gets called so that we can store coprocessor state into memory and |
921 | * copy the current task into the new thread. | 922 | * copy the current task into the new thread. |
922 | */ | 923 | */ |
923 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) | 924 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) |
924 | { | 925 | { |
925 | flush_fp_to_thread(src); | 926 | flush_fp_to_thread(src); |
926 | flush_altivec_to_thread(src); | 927 | flush_altivec_to_thread(src); |
927 | flush_vsx_to_thread(src); | 928 | flush_vsx_to_thread(src); |
928 | flush_spe_to_thread(src); | 929 | flush_spe_to_thread(src); |
929 | 930 | ||
930 | *dst = *src; | 931 | *dst = *src; |
931 | 932 | ||
932 | clear_task_ebb(dst); | 933 | clear_task_ebb(dst); |
933 | 934 | ||
934 | return 0; | 935 | return 0; |
935 | } | 936 | } |
936 | 937 | ||
937 | /* | 938 | /* |
938 | * Copy a thread.. | 939 | * Copy a thread.. |
939 | */ | 940 | */ |
940 | extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */ | 941 | extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */ |
941 | 942 | ||
942 | int copy_thread(unsigned long clone_flags, unsigned long usp, | 943 | int copy_thread(unsigned long clone_flags, unsigned long usp, |
943 | unsigned long arg, struct task_struct *p) | 944 | unsigned long arg, struct task_struct *p) |
944 | { | 945 | { |
945 | struct pt_regs *childregs, *kregs; | 946 | struct pt_regs *childregs, *kregs; |
946 | extern void ret_from_fork(void); | 947 | extern void ret_from_fork(void); |
947 | extern void ret_from_kernel_thread(void); | 948 | extern void ret_from_kernel_thread(void); |
948 | void (*f)(void); | 949 | void (*f)(void); |
949 | unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE; | 950 | unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE; |
950 | 951 | ||
951 | /* Copy registers */ | 952 | /* Copy registers */ |
952 | sp -= sizeof(struct pt_regs); | 953 | sp -= sizeof(struct pt_regs); |
953 | childregs = (struct pt_regs *) sp; | 954 | childregs = (struct pt_regs *) sp; |
954 | if (unlikely(p->flags & PF_KTHREAD)) { | 955 | if (unlikely(p->flags & PF_KTHREAD)) { |
955 | struct thread_info *ti = (void *)task_stack_page(p); | 956 | struct thread_info *ti = (void *)task_stack_page(p); |
956 | memset(childregs, 0, sizeof(struct pt_regs)); | 957 | memset(childregs, 0, sizeof(struct pt_regs)); |
957 | childregs->gpr[1] = sp + sizeof(struct pt_regs); | 958 | childregs->gpr[1] = sp + sizeof(struct pt_regs); |
958 | childregs->gpr[14] = usp; /* function */ | 959 | childregs->gpr[14] = usp; /* function */ |
959 | #ifdef CONFIG_PPC64 | 960 | #ifdef CONFIG_PPC64 |
960 | clear_tsk_thread_flag(p, TIF_32BIT); | 961 | clear_tsk_thread_flag(p, TIF_32BIT); |
961 | childregs->softe = 1; | 962 | childregs->softe = 1; |
962 | #endif | 963 | #endif |
963 | childregs->gpr[15] = arg; | 964 | childregs->gpr[15] = arg; |
964 | p->thread.regs = NULL; /* no user register state */ | 965 | p->thread.regs = NULL; /* no user register state */ |
965 | ti->flags |= _TIF_RESTOREALL; | 966 | ti->flags |= _TIF_RESTOREALL; |
966 | f = ret_from_kernel_thread; | 967 | f = ret_from_kernel_thread; |
967 | } else { | 968 | } else { |
968 | struct pt_regs *regs = current_pt_regs(); | 969 | struct pt_regs *regs = current_pt_regs(); |
969 | CHECK_FULL_REGS(regs); | 970 | CHECK_FULL_REGS(regs); |
970 | *childregs = *regs; | 971 | *childregs = *regs; |
971 | if (usp) | 972 | if (usp) |
972 | childregs->gpr[1] = usp; | 973 | childregs->gpr[1] = usp; |
973 | p->thread.regs = childregs; | 974 | p->thread.regs = childregs; |
974 | childregs->gpr[3] = 0; /* Result from fork() */ | 975 | childregs->gpr[3] = 0; /* Result from fork() */ |
975 | if (clone_flags & CLONE_SETTLS) { | 976 | if (clone_flags & CLONE_SETTLS) { |
976 | #ifdef CONFIG_PPC64 | 977 | #ifdef CONFIG_PPC64 |
977 | if (!is_32bit_task()) | 978 | if (!is_32bit_task()) |
978 | childregs->gpr[13] = childregs->gpr[6]; | 979 | childregs->gpr[13] = childregs->gpr[6]; |
979 | else | 980 | else |
980 | #endif | 981 | #endif |
981 | childregs->gpr[2] = childregs->gpr[6]; | 982 | childregs->gpr[2] = childregs->gpr[6]; |
982 | } | 983 | } |
983 | 984 | ||
984 | f = ret_from_fork; | 985 | f = ret_from_fork; |
985 | } | 986 | } |
986 | sp -= STACK_FRAME_OVERHEAD; | 987 | sp -= STACK_FRAME_OVERHEAD; |
987 | 988 | ||
988 | /* | 989 | /* |
989 | * The way this works is that at some point in the future | 990 | * The way this works is that at some point in the future |
990 | * some task will call _switch to switch to the new task. | 991 | * some task will call _switch to switch to the new task. |
991 | * That will pop off the stack frame created below and start | 992 | * That will pop off the stack frame created below and start |
992 | * the new task running at ret_from_fork. The new task will | 993 | * the new task running at ret_from_fork. The new task will |
993 | * do some house keeping and then return from the fork or clone | 994 | * do some house keeping and then return from the fork or clone |
994 | * system call, using the stack frame created above. | 995 | * system call, using the stack frame created above. |
995 | */ | 996 | */ |
996 | ((unsigned long *)sp)[0] = 0; | 997 | ((unsigned long *)sp)[0] = 0; |
997 | sp -= sizeof(struct pt_regs); | 998 | sp -= sizeof(struct pt_regs); |
998 | kregs = (struct pt_regs *) sp; | 999 | kregs = (struct pt_regs *) sp; |
999 | sp -= STACK_FRAME_OVERHEAD; | 1000 | sp -= STACK_FRAME_OVERHEAD; |
1000 | p->thread.ksp = sp; | 1001 | p->thread.ksp = sp; |
1001 | #ifdef CONFIG_PPC32 | 1002 | #ifdef CONFIG_PPC32 |
1002 | p->thread.ksp_limit = (unsigned long)task_stack_page(p) + | 1003 | p->thread.ksp_limit = (unsigned long)task_stack_page(p) + |
1003 | _ALIGN_UP(sizeof(struct thread_info), 16); | 1004 | _ALIGN_UP(sizeof(struct thread_info), 16); |
1004 | #endif | 1005 | #endif |
1005 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 1006 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
1006 | p->thread.ptrace_bps[0] = NULL; | 1007 | p->thread.ptrace_bps[0] = NULL; |
1007 | #endif | 1008 | #endif |
1008 | 1009 | ||
1009 | p->thread.fp_save_area = NULL; | 1010 | p->thread.fp_save_area = NULL; |
1010 | #ifdef CONFIG_ALTIVEC | 1011 | #ifdef CONFIG_ALTIVEC |
1011 | p->thread.vr_save_area = NULL; | 1012 | p->thread.vr_save_area = NULL; |
1012 | #endif | 1013 | #endif |
1013 | 1014 | ||
1014 | #ifdef CONFIG_PPC_STD_MMU_64 | 1015 | #ifdef CONFIG_PPC_STD_MMU_64 |
1015 | if (mmu_has_feature(MMU_FTR_SLB)) { | 1016 | if (mmu_has_feature(MMU_FTR_SLB)) { |
1016 | unsigned long sp_vsid; | 1017 | unsigned long sp_vsid; |
1017 | unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; | 1018 | unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; |
1018 | 1019 | ||
1019 | if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) | 1020 | if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) |
1020 | sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T) | 1021 | sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T) |
1021 | << SLB_VSID_SHIFT_1T; | 1022 | << SLB_VSID_SHIFT_1T; |
1022 | else | 1023 | else |
1023 | sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M) | 1024 | sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M) |
1024 | << SLB_VSID_SHIFT; | 1025 | << SLB_VSID_SHIFT; |
1025 | sp_vsid |= SLB_VSID_KERNEL | llp; | 1026 | sp_vsid |= SLB_VSID_KERNEL | llp; |
1026 | p->thread.ksp_vsid = sp_vsid; | 1027 | p->thread.ksp_vsid = sp_vsid; |
1027 | } | 1028 | } |
1028 | #endif /* CONFIG_PPC_STD_MMU_64 */ | 1029 | #endif /* CONFIG_PPC_STD_MMU_64 */ |
1029 | #ifdef CONFIG_PPC64 | 1030 | #ifdef CONFIG_PPC64 |
1030 | if (cpu_has_feature(CPU_FTR_DSCR)) { | 1031 | if (cpu_has_feature(CPU_FTR_DSCR)) { |
1031 | p->thread.dscr_inherit = current->thread.dscr_inherit; | 1032 | p->thread.dscr_inherit = current->thread.dscr_inherit; |
1032 | p->thread.dscr = current->thread.dscr; | 1033 | p->thread.dscr = current->thread.dscr; |
1033 | } | 1034 | } |
1034 | if (cpu_has_feature(CPU_FTR_HAS_PPR)) | 1035 | if (cpu_has_feature(CPU_FTR_HAS_PPR)) |
1035 | p->thread.ppr = INIT_PPR; | 1036 | p->thread.ppr = INIT_PPR; |
1036 | #endif | 1037 | #endif |
1037 | /* | 1038 | /* |
1038 | * The PPC64 ABI makes use of a TOC to contain function | 1039 | * The PPC64 ABI makes use of a TOC to contain function |
1039 | * pointers. The function (ret_from_except) is actually a pointer | 1040 | * pointers. The function (ret_from_except) is actually a pointer |
1040 | * to the TOC entry. The first entry is a pointer to the actual | 1041 | * to the TOC entry. The first entry is a pointer to the actual |
1041 | * function. | 1042 | * function. |
1042 | */ | 1043 | */ |
1043 | #ifdef CONFIG_PPC64 | 1044 | #ifdef CONFIG_PPC64 |
1044 | kregs->nip = *((unsigned long *)f); | 1045 | kregs->nip = *((unsigned long *)f); |
1045 | #else | 1046 | #else |
1046 | kregs->nip = (unsigned long)f; | 1047 | kregs->nip = (unsigned long)f; |
1047 | #endif | 1048 | #endif |
1048 | return 0; | 1049 | return 0; |
1049 | } | 1050 | } |
1050 | 1051 | ||
1051 | /* | 1052 | /* |
1052 | * Set up a thread for executing a new program | 1053 | * Set up a thread for executing a new program |
1053 | */ | 1054 | */ |
1054 | void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) | 1055 | void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) |
1055 | { | 1056 | { |
1056 | #ifdef CONFIG_PPC64 | 1057 | #ifdef CONFIG_PPC64 |
1057 | unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */ | 1058 | unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */ |
1058 | #endif | 1059 | #endif |
1059 | 1060 | ||
1060 | /* | 1061 | /* |
1061 | * If we exec out of a kernel thread then thread.regs will not be | 1062 | * If we exec out of a kernel thread then thread.regs will not be |
1062 | * set. Do it now. | 1063 | * set. Do it now. |
1063 | */ | 1064 | */ |
1064 | if (!current->thread.regs) { | 1065 | if (!current->thread.regs) { |
1065 | struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE; | 1066 | struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE; |
1066 | current->thread.regs = regs - 1; | 1067 | current->thread.regs = regs - 1; |
1067 | } | 1068 | } |
1068 | 1069 | ||
1069 | memset(regs->gpr, 0, sizeof(regs->gpr)); | 1070 | memset(regs->gpr, 0, sizeof(regs->gpr)); |
1070 | regs->ctr = 0; | 1071 | regs->ctr = 0; |
1071 | regs->link = 0; | 1072 | regs->link = 0; |
1072 | regs->xer = 0; | 1073 | regs->xer = 0; |
1073 | regs->ccr = 0; | 1074 | regs->ccr = 0; |
1074 | regs->gpr[1] = sp; | 1075 | regs->gpr[1] = sp; |
1075 | 1076 | ||
1076 | /* | 1077 | /* |
1077 | * We have just cleared all the nonvolatile GPRs, so make | 1078 | * We have just cleared all the nonvolatile GPRs, so make |
1078 | * FULL_REGS(regs) return true. This is necessary to allow | 1079 | * FULL_REGS(regs) return true. This is necessary to allow |
1079 | * ptrace to examine the thread immediately after exec. | 1080 | * ptrace to examine the thread immediately after exec. |
1080 | */ | 1081 | */ |
1081 | regs->trap &= ~1UL; | 1082 | regs->trap &= ~1UL; |
1082 | 1083 | ||
1083 | #ifdef CONFIG_PPC32 | 1084 | #ifdef CONFIG_PPC32 |
1084 | regs->mq = 0; | 1085 | regs->mq = 0; |
1085 | regs->nip = start; | 1086 | regs->nip = start; |
1086 | regs->msr = MSR_USER; | 1087 | regs->msr = MSR_USER; |
1087 | #else | 1088 | #else |
1088 | if (!is_32bit_task()) { | 1089 | if (!is_32bit_task()) { |
1089 | unsigned long entry, toc; | 1090 | unsigned long entry, toc; |
1090 | 1091 | ||
1091 | /* start is a relocated pointer to the function descriptor for | 1092 | /* start is a relocated pointer to the function descriptor for |
1092 | * the elf _start routine. The first entry in the function | 1093 | * the elf _start routine. The first entry in the function |
1093 | * descriptor is the entry address of _start and the second | 1094 | * descriptor is the entry address of _start and the second |
1094 | * entry is the TOC value we need to use. | 1095 | * entry is the TOC value we need to use. |
1095 | */ | 1096 | */ |
1096 | __get_user(entry, (unsigned long __user *)start); | 1097 | __get_user(entry, (unsigned long __user *)start); |
1097 | __get_user(toc, (unsigned long __user *)start+1); | 1098 | __get_user(toc, (unsigned long __user *)start+1); |
1098 | 1099 | ||
1099 | /* Check whether the e_entry function descriptor entries | 1100 | /* Check whether the e_entry function descriptor entries |
1100 | * need to be relocated before we can use them. | 1101 | * need to be relocated before we can use them. |
1101 | */ | 1102 | */ |
1102 | if (load_addr != 0) { | 1103 | if (load_addr != 0) { |
1103 | entry += load_addr; | 1104 | entry += load_addr; |
1104 | toc += load_addr; | 1105 | toc += load_addr; |
1105 | } | 1106 | } |
1106 | regs->nip = entry; | 1107 | regs->nip = entry; |
1107 | regs->gpr[2] = toc; | 1108 | regs->gpr[2] = toc; |
1108 | regs->msr = MSR_USER64; | 1109 | regs->msr = MSR_USER64; |
1109 | } else { | 1110 | } else { |
1110 | regs->nip = start; | 1111 | regs->nip = start; |
1111 | regs->gpr[2] = 0; | 1112 | regs->gpr[2] = 0; |
1112 | regs->msr = MSR_USER32; | 1113 | regs->msr = MSR_USER32; |
1113 | } | 1114 | } |
1114 | #endif | 1115 | #endif |
1115 | discard_lazy_cpu_state(); | 1116 | discard_lazy_cpu_state(); |
1116 | #ifdef CONFIG_VSX | 1117 | #ifdef CONFIG_VSX |
1117 | current->thread.used_vsr = 0; | 1118 | current->thread.used_vsr = 0; |
1118 | #endif | 1119 | #endif |
1119 | memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state)); | 1120 | memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state)); |
1120 | current->thread.fp_save_area = NULL; | 1121 | current->thread.fp_save_area = NULL; |
1121 | #ifdef CONFIG_ALTIVEC | 1122 | #ifdef CONFIG_ALTIVEC |
1122 | memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state)); | 1123 | memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state)); |
1123 | current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */ | 1124 | current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */ |
1124 | current->thread.vr_save_area = NULL; | 1125 | current->thread.vr_save_area = NULL; |
1125 | current->thread.vrsave = 0; | 1126 | current->thread.vrsave = 0; |
1126 | current->thread.used_vr = 0; | 1127 | current->thread.used_vr = 0; |
1127 | #endif /* CONFIG_ALTIVEC */ | 1128 | #endif /* CONFIG_ALTIVEC */ |
1128 | #ifdef CONFIG_SPE | 1129 | #ifdef CONFIG_SPE |
1129 | memset(current->thread.evr, 0, sizeof(current->thread.evr)); | 1130 | memset(current->thread.evr, 0, sizeof(current->thread.evr)); |
1130 | current->thread.acc = 0; | 1131 | current->thread.acc = 0; |
1131 | current->thread.spefscr = 0; | 1132 | current->thread.spefscr = 0; |
1132 | current->thread.used_spe = 0; | 1133 | current->thread.used_spe = 0; |
1133 | #endif /* CONFIG_SPE */ | 1134 | #endif /* CONFIG_SPE */ |
1134 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 1135 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
1135 | if (cpu_has_feature(CPU_FTR_TM)) | 1136 | if (cpu_has_feature(CPU_FTR_TM)) |
1136 | regs->msr |= MSR_TM; | 1137 | regs->msr |= MSR_TM; |
1137 | current->thread.tm_tfhar = 0; | 1138 | current->thread.tm_tfhar = 0; |
1138 | current->thread.tm_texasr = 0; | 1139 | current->thread.tm_texasr = 0; |
1139 | current->thread.tm_tfiar = 0; | 1140 | current->thread.tm_tfiar = 0; |
1140 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ | 1141 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ |
1141 | } | 1142 | } |
1142 | 1143 | ||
1143 | #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \ | 1144 | #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \ |
1144 | | PR_FP_EXC_RES | PR_FP_EXC_INV) | 1145 | | PR_FP_EXC_RES | PR_FP_EXC_INV) |
1145 | 1146 | ||
1146 | int set_fpexc_mode(struct task_struct *tsk, unsigned int val) | 1147 | int set_fpexc_mode(struct task_struct *tsk, unsigned int val) |
1147 | { | 1148 | { |
1148 | struct pt_regs *regs = tsk->thread.regs; | 1149 | struct pt_regs *regs = tsk->thread.regs; |
1149 | 1150 | ||
1150 | /* This is a bit hairy. If we are an SPE enabled processor | 1151 | /* This is a bit hairy. If we are an SPE enabled processor |
1151 | * (have embedded fp) we store the IEEE exception enable flags in | 1152 | * (have embedded fp) we store the IEEE exception enable flags in |
1152 | * fpexc_mode. fpexc_mode is also used for setting FP exception | 1153 | * fpexc_mode. fpexc_mode is also used for setting FP exception |
1153 | * mode (asyn, precise, disabled) for 'Classic' FP. */ | 1154 | * mode (asyn, precise, disabled) for 'Classic' FP. */ |
1154 | if (val & PR_FP_EXC_SW_ENABLE) { | 1155 | if (val & PR_FP_EXC_SW_ENABLE) { |
1155 | #ifdef CONFIG_SPE | 1156 | #ifdef CONFIG_SPE |
1156 | if (cpu_has_feature(CPU_FTR_SPE)) { | 1157 | if (cpu_has_feature(CPU_FTR_SPE)) { |
1157 | tsk->thread.fpexc_mode = val & | 1158 | tsk->thread.fpexc_mode = val & |
1158 | (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); | 1159 | (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); |
1159 | return 0; | 1160 | return 0; |
1160 | } else { | 1161 | } else { |
1161 | return -EINVAL; | 1162 | return -EINVAL; |
1162 | } | 1163 | } |
1163 | #else | 1164 | #else |
1164 | return -EINVAL; | 1165 | return -EINVAL; |
1165 | #endif | 1166 | #endif |
1166 | } | 1167 | } |
1167 | 1168 | ||
1168 | /* on a CONFIG_SPE this does not hurt us. The bits that | 1169 | /* on a CONFIG_SPE this does not hurt us. The bits that |
1169 | * __pack_fe01 use do not overlap with bits used for | 1170 | * __pack_fe01 use do not overlap with bits used for |
1170 | * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits | 1171 | * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits |
1171 | * on CONFIG_SPE implementations are reserved so writing to | 1172 | * on CONFIG_SPE implementations are reserved so writing to |
1172 | * them does not change anything */ | 1173 | * them does not change anything */ |
1173 | if (val > PR_FP_EXC_PRECISE) | 1174 | if (val > PR_FP_EXC_PRECISE) |
1174 | return -EINVAL; | 1175 | return -EINVAL; |
1175 | tsk->thread.fpexc_mode = __pack_fe01(val); | 1176 | tsk->thread.fpexc_mode = __pack_fe01(val); |
1176 | if (regs != NULL && (regs->msr & MSR_FP) != 0) | 1177 | if (regs != NULL && (regs->msr & MSR_FP) != 0) |
1177 | regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1)) | 1178 | regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1)) |
1178 | | tsk->thread.fpexc_mode; | 1179 | | tsk->thread.fpexc_mode; |
1179 | return 0; | 1180 | return 0; |
1180 | } | 1181 | } |
1181 | 1182 | ||
1182 | int get_fpexc_mode(struct task_struct *tsk, unsigned long adr) | 1183 | int get_fpexc_mode(struct task_struct *tsk, unsigned long adr) |
1183 | { | 1184 | { |
1184 | unsigned int val; | 1185 | unsigned int val; |
1185 | 1186 | ||
1186 | if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) | 1187 | if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) |
1187 | #ifdef CONFIG_SPE | 1188 | #ifdef CONFIG_SPE |
1188 | if (cpu_has_feature(CPU_FTR_SPE)) | 1189 | if (cpu_has_feature(CPU_FTR_SPE)) |
1189 | val = tsk->thread.fpexc_mode; | 1190 | val = tsk->thread.fpexc_mode; |
1190 | else | 1191 | else |
1191 | return -EINVAL; | 1192 | return -EINVAL; |
1192 | #else | 1193 | #else |
1193 | return -EINVAL; | 1194 | return -EINVAL; |
1194 | #endif | 1195 | #endif |
1195 | else | 1196 | else |
1196 | val = __unpack_fe01(tsk->thread.fpexc_mode); | 1197 | val = __unpack_fe01(tsk->thread.fpexc_mode); |
1197 | return put_user(val, (unsigned int __user *) adr); | 1198 | return put_user(val, (unsigned int __user *) adr); |
1198 | } | 1199 | } |
1199 | 1200 | ||
1200 | int set_endian(struct task_struct *tsk, unsigned int val) | 1201 | int set_endian(struct task_struct *tsk, unsigned int val) |
1201 | { | 1202 | { |
1202 | struct pt_regs *regs = tsk->thread.regs; | 1203 | struct pt_regs *regs = tsk->thread.regs; |
1203 | 1204 | ||
1204 | if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) || | 1205 | if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) || |
1205 | (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE))) | 1206 | (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE))) |
1206 | return -EINVAL; | 1207 | return -EINVAL; |
1207 | 1208 | ||
1208 | if (regs == NULL) | 1209 | if (regs == NULL) |
1209 | return -EINVAL; | 1210 | return -EINVAL; |
1210 | 1211 | ||
1211 | if (val == PR_ENDIAN_BIG) | 1212 | if (val == PR_ENDIAN_BIG) |
1212 | regs->msr &= ~MSR_LE; | 1213 | regs->msr &= ~MSR_LE; |
1213 | else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE) | 1214 | else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE) |
1214 | regs->msr |= MSR_LE; | 1215 | regs->msr |= MSR_LE; |
1215 | else | 1216 | else |
1216 | return -EINVAL; | 1217 | return -EINVAL; |
1217 | 1218 | ||
1218 | return 0; | 1219 | return 0; |
1219 | } | 1220 | } |
1220 | 1221 | ||
1221 | int get_endian(struct task_struct *tsk, unsigned long adr) | 1222 | int get_endian(struct task_struct *tsk, unsigned long adr) |
1222 | { | 1223 | { |
1223 | struct pt_regs *regs = tsk->thread.regs; | 1224 | struct pt_regs *regs = tsk->thread.regs; |
1224 | unsigned int val; | 1225 | unsigned int val; |
1225 | 1226 | ||
1226 | if (!cpu_has_feature(CPU_FTR_PPC_LE) && | 1227 | if (!cpu_has_feature(CPU_FTR_PPC_LE) && |
1227 | !cpu_has_feature(CPU_FTR_REAL_LE)) | 1228 | !cpu_has_feature(CPU_FTR_REAL_LE)) |
1228 | return -EINVAL; | 1229 | return -EINVAL; |
1229 | 1230 | ||
1230 | if (regs == NULL) | 1231 | if (regs == NULL) |
1231 | return -EINVAL; | 1232 | return -EINVAL; |
1232 | 1233 | ||
1233 | if (regs->msr & MSR_LE) { | 1234 | if (regs->msr & MSR_LE) { |
1234 | if (cpu_has_feature(CPU_FTR_REAL_LE)) | 1235 | if (cpu_has_feature(CPU_FTR_REAL_LE)) |
1235 | val = PR_ENDIAN_LITTLE; | 1236 | val = PR_ENDIAN_LITTLE; |
1236 | else | 1237 | else |
1237 | val = PR_ENDIAN_PPC_LITTLE; | 1238 | val = PR_ENDIAN_PPC_LITTLE; |
1238 | } else | 1239 | } else |
1239 | val = PR_ENDIAN_BIG; | 1240 | val = PR_ENDIAN_BIG; |
1240 | 1241 | ||
1241 | return put_user(val, (unsigned int __user *)adr); | 1242 | return put_user(val, (unsigned int __user *)adr); |
1242 | } | 1243 | } |
1243 | 1244 | ||
1244 | int set_unalign_ctl(struct task_struct *tsk, unsigned int val) | 1245 | int set_unalign_ctl(struct task_struct *tsk, unsigned int val) |
1245 | { | 1246 | { |
1246 | tsk->thread.align_ctl = val; | 1247 | tsk->thread.align_ctl = val; |
1247 | return 0; | 1248 | return 0; |
1248 | } | 1249 | } |
1249 | 1250 | ||
1250 | int get_unalign_ctl(struct task_struct *tsk, unsigned long adr) | 1251 | int get_unalign_ctl(struct task_struct *tsk, unsigned long adr) |
1251 | { | 1252 | { |
1252 | return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr); | 1253 | return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr); |
1253 | } | 1254 | } |
1254 | 1255 | ||
1255 | static inline int valid_irq_stack(unsigned long sp, struct task_struct *p, | 1256 | static inline int valid_irq_stack(unsigned long sp, struct task_struct *p, |
1256 | unsigned long nbytes) | 1257 | unsigned long nbytes) |
1257 | { | 1258 | { |
1258 | unsigned long stack_page; | 1259 | unsigned long stack_page; |
1259 | unsigned long cpu = task_cpu(p); | 1260 | unsigned long cpu = task_cpu(p); |
1260 | 1261 | ||
1261 | /* | 1262 | /* |
1262 | * Avoid crashing if the stack has overflowed and corrupted | 1263 | * Avoid crashing if the stack has overflowed and corrupted |
1263 | * task_cpu(p), which is in the thread_info struct. | 1264 | * task_cpu(p), which is in the thread_info struct. |
1264 | */ | 1265 | */ |
1265 | if (cpu < NR_CPUS && cpu_possible(cpu)) { | 1266 | if (cpu < NR_CPUS && cpu_possible(cpu)) { |
1266 | stack_page = (unsigned long) hardirq_ctx[cpu]; | 1267 | stack_page = (unsigned long) hardirq_ctx[cpu]; |
1267 | if (sp >= stack_page + sizeof(struct thread_struct) | 1268 | if (sp >= stack_page + sizeof(struct thread_struct) |
1268 | && sp <= stack_page + THREAD_SIZE - nbytes) | 1269 | && sp <= stack_page + THREAD_SIZE - nbytes) |
1269 | return 1; | 1270 | return 1; |
1270 | 1271 | ||
1271 | stack_page = (unsigned long) softirq_ctx[cpu]; | 1272 | stack_page = (unsigned long) softirq_ctx[cpu]; |
1272 | if (sp >= stack_page + sizeof(struct thread_struct) | 1273 | if (sp >= stack_page + sizeof(struct thread_struct) |
1273 | && sp <= stack_page + THREAD_SIZE - nbytes) | 1274 | && sp <= stack_page + THREAD_SIZE - nbytes) |
1274 | return 1; | 1275 | return 1; |
1275 | } | 1276 | } |
1276 | return 0; | 1277 | return 0; |
1277 | } | 1278 | } |
1278 | 1279 | ||
1279 | int validate_sp(unsigned long sp, struct task_struct *p, | 1280 | int validate_sp(unsigned long sp, struct task_struct *p, |
1280 | unsigned long nbytes) | 1281 | unsigned long nbytes) |
1281 | { | 1282 | { |
1282 | unsigned long stack_page = (unsigned long)task_stack_page(p); | 1283 | unsigned long stack_page = (unsigned long)task_stack_page(p); |
1283 | 1284 | ||
1284 | if (sp >= stack_page + sizeof(struct thread_struct) | 1285 | if (sp >= stack_page + sizeof(struct thread_struct) |
1285 | && sp <= stack_page + THREAD_SIZE - nbytes) | 1286 | && sp <= stack_page + THREAD_SIZE - nbytes) |
1286 | return 1; | 1287 | return 1; |
1287 | 1288 | ||
1288 | return valid_irq_stack(sp, p, nbytes); | 1289 | return valid_irq_stack(sp, p, nbytes); |
1289 | } | 1290 | } |
1290 | 1291 | ||
1291 | EXPORT_SYMBOL(validate_sp); | 1292 | EXPORT_SYMBOL(validate_sp); |
1292 | 1293 | ||
1293 | unsigned long get_wchan(struct task_struct *p) | 1294 | unsigned long get_wchan(struct task_struct *p) |
1294 | { | 1295 | { |
1295 | unsigned long ip, sp; | 1296 | unsigned long ip, sp; |
1296 | int count = 0; | 1297 | int count = 0; |
1297 | 1298 | ||
1298 | if (!p || p == current || p->state == TASK_RUNNING) | 1299 | if (!p || p == current || p->state == TASK_RUNNING) |
1299 | return 0; | 1300 | return 0; |
1300 | 1301 | ||
1301 | sp = p->thread.ksp; | 1302 | sp = p->thread.ksp; |
1302 | if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD)) | 1303 | if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD)) |
1303 | return 0; | 1304 | return 0; |
1304 | 1305 | ||
1305 | do { | 1306 | do { |
1306 | sp = *(unsigned long *)sp; | 1307 | sp = *(unsigned long *)sp; |
1307 | if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD)) | 1308 | if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD)) |
1308 | return 0; | 1309 | return 0; |
1309 | if (count > 0) { | 1310 | if (count > 0) { |
1310 | ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE]; | 1311 | ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE]; |
1311 | if (!in_sched_functions(ip)) | 1312 | if (!in_sched_functions(ip)) |
1312 | return ip; | 1313 | return ip; |
1313 | } | 1314 | } |
1314 | } while (count++ < 16); | 1315 | } while (count++ < 16); |
1315 | return 0; | 1316 | return 0; |
1316 | } | 1317 | } |
1317 | 1318 | ||
1318 | static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH; | 1319 | static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH; |
1319 | 1320 | ||
1320 | void show_stack(struct task_struct *tsk, unsigned long *stack) | 1321 | void show_stack(struct task_struct *tsk, unsigned long *stack) |
1321 | { | 1322 | { |
1322 | unsigned long sp, ip, lr, newsp; | 1323 | unsigned long sp, ip, lr, newsp; |
1323 | int count = 0; | 1324 | int count = 0; |
1324 | int firstframe = 1; | 1325 | int firstframe = 1; |
1325 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 1326 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1326 | int curr_frame = current->curr_ret_stack; | 1327 | int curr_frame = current->curr_ret_stack; |
1327 | extern void return_to_handler(void); | 1328 | extern void return_to_handler(void); |
1328 | unsigned long rth = (unsigned long)return_to_handler; | 1329 | unsigned long rth = (unsigned long)return_to_handler; |
1329 | unsigned long mrth = -1; | 1330 | unsigned long mrth = -1; |
1330 | #ifdef CONFIG_PPC64 | 1331 | #ifdef CONFIG_PPC64 |
1331 | extern void mod_return_to_handler(void); | 1332 | extern void mod_return_to_handler(void); |
1332 | rth = *(unsigned long *)rth; | 1333 | rth = *(unsigned long *)rth; |
1333 | mrth = (unsigned long)mod_return_to_handler; | 1334 | mrth = (unsigned long)mod_return_to_handler; |
1334 | mrth = *(unsigned long *)mrth; | 1335 | mrth = *(unsigned long *)mrth; |
1335 | #endif | 1336 | #endif |
1336 | #endif | 1337 | #endif |
1337 | 1338 | ||
1338 | sp = (unsigned long) stack; | 1339 | sp = (unsigned long) stack; |
1339 | if (tsk == NULL) | 1340 | if (tsk == NULL) |
1340 | tsk = current; | 1341 | tsk = current; |
1341 | if (sp == 0) { | 1342 | if (sp == 0) { |
1342 | if (tsk == current) | 1343 | if (tsk == current) |
1343 | asm("mr %0,1" : "=r" (sp)); | 1344 | asm("mr %0,1" : "=r" (sp)); |
1344 | else | 1345 | else |
1345 | sp = tsk->thread.ksp; | 1346 | sp = tsk->thread.ksp; |
1346 | } | 1347 | } |
1347 | 1348 | ||
1348 | lr = 0; | 1349 | lr = 0; |
1349 | printk("Call Trace:\n"); | 1350 | printk("Call Trace:\n"); |
1350 | do { | 1351 | do { |
1351 | if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD)) | 1352 | if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD)) |
1352 | return; | 1353 | return; |
1353 | 1354 | ||
1354 | stack = (unsigned long *) sp; | 1355 | stack = (unsigned long *) sp; |
1355 | newsp = stack[0]; | 1356 | newsp = stack[0]; |
1356 | ip = stack[STACK_FRAME_LR_SAVE]; | 1357 | ip = stack[STACK_FRAME_LR_SAVE]; |
1357 | if (!firstframe || ip != lr) { | 1358 | if (!firstframe || ip != lr) { |
1358 | printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); | 1359 | printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); |
1359 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 1360 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1360 | if ((ip == rth || ip == mrth) && curr_frame >= 0) { | 1361 | if ((ip == rth || ip == mrth) && curr_frame >= 0) { |
1361 | printk(" (%pS)", | 1362 | printk(" (%pS)", |
1362 | (void *)current->ret_stack[curr_frame].ret); | 1363 | (void *)current->ret_stack[curr_frame].ret); |
1363 | curr_frame--; | 1364 | curr_frame--; |
1364 | } | 1365 | } |
1365 | #endif | 1366 | #endif |
1366 | if (firstframe) | 1367 | if (firstframe) |
1367 | printk(" (unreliable)"); | 1368 | printk(" (unreliable)"); |
1368 | printk("\n"); | 1369 | printk("\n"); |
1369 | } | 1370 | } |
1370 | firstframe = 0; | 1371 | firstframe = 0; |
1371 | 1372 | ||
1372 | /* | 1373 | /* |
1373 | * See if this is an exception frame. | 1374 | * See if this is an exception frame. |
1374 | * We look for the "regshere" marker in the current frame. | 1375 | * We look for the "regshere" marker in the current frame. |
1375 | */ | 1376 | */ |
1376 | if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE) | 1377 | if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE) |
1377 | && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { | 1378 | && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { |
1378 | struct pt_regs *regs = (struct pt_regs *) | 1379 | struct pt_regs *regs = (struct pt_regs *) |
1379 | (sp + STACK_FRAME_OVERHEAD); | 1380 | (sp + STACK_FRAME_OVERHEAD); |
1380 | lr = regs->link; | 1381 | lr = regs->link; |
1381 | printk("--- Exception: %lx at %pS\n LR = %pS\n", | 1382 | printk("--- Exception: %lx at %pS\n LR = %pS\n", |
1382 | regs->trap, (void *)regs->nip, (void *)lr); | 1383 | regs->trap, (void *)regs->nip, (void *)lr); |
1383 | firstframe = 1; | 1384 | firstframe = 1; |
1384 | } | 1385 | } |
1385 | 1386 | ||
1386 | sp = newsp; | 1387 | sp = newsp; |
1387 | } while (count++ < kstack_depth_to_print); | 1388 | } while (count++ < kstack_depth_to_print); |
1388 | } | 1389 | } |
1389 | 1390 | ||
1390 | #ifdef CONFIG_PPC64 | 1391 | #ifdef CONFIG_PPC64 |
1391 | /* Called with hard IRQs off */ | 1392 | /* Called with hard IRQs off */ |
1392 | void notrace __ppc64_runlatch_on(void) | 1393 | void notrace __ppc64_runlatch_on(void) |
1393 | { | 1394 | { |
1394 | struct thread_info *ti = current_thread_info(); | 1395 | struct thread_info *ti = current_thread_info(); |
1395 | unsigned long ctrl; | 1396 | unsigned long ctrl; |
1396 | 1397 | ||
1397 | ctrl = mfspr(SPRN_CTRLF); | 1398 | ctrl = mfspr(SPRN_CTRLF); |
1398 | ctrl |= CTRL_RUNLATCH; | 1399 | ctrl |= CTRL_RUNLATCH; |
1399 | mtspr(SPRN_CTRLT, ctrl); | 1400 | mtspr(SPRN_CTRLT, ctrl); |
1400 | 1401 | ||
1401 | ti->local_flags |= _TLF_RUNLATCH; | 1402 | ti->local_flags |= _TLF_RUNLATCH; |
1402 | } | 1403 | } |
1403 | 1404 | ||
1404 | /* Called with hard IRQs off */ | 1405 | /* Called with hard IRQs off */ |
1405 | void notrace __ppc64_runlatch_off(void) | 1406 | void notrace __ppc64_runlatch_off(void) |
1406 | { | 1407 | { |
1407 | struct thread_info *ti = current_thread_info(); | 1408 | struct thread_info *ti = current_thread_info(); |
1408 | unsigned long ctrl; | 1409 | unsigned long ctrl; |
1409 | 1410 | ||
1410 | ti->local_flags &= ~_TLF_RUNLATCH; | 1411 | ti->local_flags &= ~_TLF_RUNLATCH; |
1411 | 1412 | ||
1412 | ctrl = mfspr(SPRN_CTRLF); | 1413 | ctrl = mfspr(SPRN_CTRLF); |
1413 | ctrl &= ~CTRL_RUNLATCH; | 1414 | ctrl &= ~CTRL_RUNLATCH; |
1414 | mtspr(SPRN_CTRLT, ctrl); | 1415 | mtspr(SPRN_CTRLT, ctrl); |
1415 | } | 1416 | } |
1416 | #endif /* CONFIG_PPC64 */ | 1417 | #endif /* CONFIG_PPC64 */ |
1417 | 1418 | ||
1418 | unsigned long arch_align_stack(unsigned long sp) | 1419 | unsigned long arch_align_stack(unsigned long sp) |
1419 | { | 1420 | { |
1420 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) | 1421 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) |
1421 | sp -= get_random_int() & ~PAGE_MASK; | 1422 | sp -= get_random_int() & ~PAGE_MASK; |
1422 | return sp & ~0xf; | 1423 | return sp & ~0xf; |
1423 | } | 1424 | } |
1424 | 1425 | ||
1425 | static inline unsigned long brk_rnd(void) | 1426 | static inline unsigned long brk_rnd(void) |
1426 | { | 1427 | { |
1427 | unsigned long rnd = 0; | 1428 | unsigned long rnd = 0; |
1428 | 1429 | ||
1429 | /* 8MB for 32bit, 1GB for 64bit */ | 1430 | /* 8MB for 32bit, 1GB for 64bit */ |
1430 | if (is_32bit_task()) | 1431 | if (is_32bit_task()) |
1431 | rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT))); | 1432 | rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT))); |
1432 | else | 1433 | else |
1433 | rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT))); | 1434 | rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT))); |
1434 | 1435 | ||
1435 | return rnd << PAGE_SHIFT; | 1436 | return rnd << PAGE_SHIFT; |
1436 | } | 1437 | } |
1437 | 1438 | ||
1438 | unsigned long arch_randomize_brk(struct mm_struct *mm) | 1439 | unsigned long arch_randomize_brk(struct mm_struct *mm) |
1439 | { | 1440 | { |
1440 | unsigned long base = mm->brk; | 1441 | unsigned long base = mm->brk; |
1441 | unsigned long ret; | 1442 | unsigned long ret; |
1442 | 1443 | ||
1443 | #ifdef CONFIG_PPC_STD_MMU_64 | 1444 | #ifdef CONFIG_PPC_STD_MMU_64 |
1444 | /* | 1445 | /* |
1445 | * If we are using 1TB segments and we are allowed to randomise | 1446 | * If we are using 1TB segments and we are allowed to randomise |
1446 | * the heap, we can put it above 1TB so it is backed by a 1TB | 1447 | * the heap, we can put it above 1TB so it is backed by a 1TB |
1447 | * segment. Otherwise the heap will be in the bottom 1TB | 1448 | * segment. Otherwise the heap will be in the bottom 1TB |
1448 | * which always uses 256MB segments and this may result in a | 1449 | * which always uses 256MB segments and this may result in a |
1449 | * performance penalty. | 1450 | * performance penalty. |
1450 | */ | 1451 | */ |
1451 | if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T)) | 1452 | if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T)) |
1452 | base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T); | 1453 | base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T); |
1453 | #endif | 1454 | #endif |
1454 | 1455 | ||
1455 | ret = PAGE_ALIGN(base + brk_rnd()); | 1456 | ret = PAGE_ALIGN(base + brk_rnd()); |
1456 | 1457 | ||
1457 | if (ret < mm->brk) | 1458 | if (ret < mm->brk) |
1458 | return mm->brk; | 1459 | return mm->brk; |
1459 | 1460 | ||
1460 | return ret; | 1461 | return ret; |
1461 | } | 1462 | } |
1462 | 1463 | ||
1463 | unsigned long randomize_et_dyn(unsigned long base) | 1464 | unsigned long randomize_et_dyn(unsigned long base) |
1464 | { | 1465 | { |
1465 | unsigned long ret = PAGE_ALIGN(base + brk_rnd()); | 1466 | unsigned long ret = PAGE_ALIGN(base + brk_rnd()); |
1466 | 1467 | ||
1467 | if (ret < base) | 1468 | if (ret < base) |
1468 | return base; | 1469 | return base; |
1469 | 1470 | ||
1470 | return ret; | 1471 | return ret; |
1471 | } | 1472 | } |
1472 | 1473 |