Commit 5388fb1025443ec223ba556b10efc4c5f83f8682
1 parent
593195f9b2
Exists in
master
and in
7 other branches
[PATCH] powerpc: Avoid potential FP corruption with preempt and UP
Heikki Lindholm pointed out that there was a potential race with the lazy CPU state (FP, VR, EVR) stuff if preempt is enabled. The race is that in the process of restoring FP state on sigreturn, the task gets preempted by a user task that wants to use the FPU. It will take an FP unavailable exception, which will write the current FPU state to the thread_struct, overwriting the values which sigreturn has stored. Note that this can only happen on UP since we don't implement lazy CPU state on SMP. The fix is to flush the lazy CPU state before updating the thread_struct. To do this we re-use the flush_lazy_cpu_state() function from process.c. Signed-off-by: Paul Mackerras <paulus@samba.org>
Showing 4 changed files with 31 additions and 24 deletions Side-by-side Diff
arch/powerpc/kernel/process.c
... | ... | @@ -201,13 +201,13 @@ |
201 | 201 | } |
202 | 202 | #endif /* CONFIG_SPE */ |
203 | 203 | |
204 | +#ifndef CONFIG_SMP | |
204 | 205 | /* |
205 | 206 | * If we are doing lazy switching of CPU state (FP, altivec or SPE), |
206 | 207 | * and the current task has some state, discard it. |
207 | 208 | */ |
208 | -static inline void discard_lazy_cpu_state(void) | |
209 | +void discard_lazy_cpu_state(void) | |
209 | 210 | { |
210 | -#ifndef CONFIG_SMP | |
211 | 211 | preempt_disable(); |
212 | 212 | if (last_task_used_math == current) |
213 | 213 | last_task_used_math = NULL; |
214 | 214 | |
... | ... | @@ -220,8 +220,8 @@ |
220 | 220 | last_task_used_spe = NULL; |
221 | 221 | #endif |
222 | 222 | preempt_enable(); |
223 | -#endif /* CONFIG_SMP */ | |
224 | 223 | } |
224 | +#endif /* CONFIG_SMP */ | |
225 | 225 | |
226 | 226 | int set_dabr(unsigned long dabr) |
227 | 227 | { |
arch/powerpc/kernel/signal_32.c
... | ... | @@ -497,6 +497,15 @@ |
497 | 497 | if (err) |
498 | 498 | return 1; |
499 | 499 | |
500 | + /* | |
501 | + * Do this before updating the thread state in | |
502 | + * current->thread.fpr/vr/evr. That way, if we get preempted | |
503 | + * and another task grabs the FPU/Altivec/SPE, it won't be | |
504 | + * tempted to save the current CPU state into the thread_struct | |
505 | + * and corrupt what we are writing there. | |
506 | + */ | |
507 | + discard_lazy_cpu_state(); | |
508 | + | |
500 | 509 | /* force the process to reload the FP registers from |
501 | 510 | current->thread when it next does FP instructions */ |
502 | 511 | regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1); |
... | ... | @@ -538,18 +547,6 @@ |
538 | 547 | return 1; |
539 | 548 | #endif /* CONFIG_SPE */ |
540 | 549 | |
541 | -#ifndef CONFIG_SMP | |
542 | - preempt_disable(); | |
543 | - if (last_task_used_math == current) | |
544 | - last_task_used_math = NULL; | |
545 | - if (last_task_used_altivec == current) | |
546 | - last_task_used_altivec = NULL; | |
547 | -#ifdef CONFIG_SPE | |
548 | - if (last_task_used_spe == current) | |
549 | - last_task_used_spe = NULL; | |
550 | -#endif | |
551 | - preempt_enable(); | |
552 | -#endif | |
553 | 550 | return 0; |
554 | 551 | } |
555 | 552 |
arch/powerpc/kernel/signal_64.c
... | ... | @@ -207,10 +207,20 @@ |
207 | 207 | |
208 | 208 | if (!sig) |
209 | 209 | regs->gpr[13] = save_r13; |
210 | - err |= __copy_from_user(¤t->thread.fpr, &sc->fp_regs, FP_REGS_SIZE); | |
211 | 210 | if (set != NULL) |
212 | 211 | err |= __get_user(set->sig[0], &sc->oldmask); |
213 | 212 | |
213 | + /* | |
214 | + * Do this before updating the thread state in | |
215 | + * current->thread.fpr/vr. That way, if we get preempted | |
216 | + * and another task grabs the FPU/Altivec, it won't be | |
217 | + * tempted to save the current CPU state into the thread_struct | |
218 | + * and corrupt what we are writing there. | |
219 | + */ | |
220 | + discard_lazy_cpu_state(); | |
221 | + | |
222 | + err |= __copy_from_user(¤t->thread.fpr, &sc->fp_regs, FP_REGS_SIZE); | |
223 | + | |
214 | 224 | #ifdef CONFIG_ALTIVEC |
215 | 225 | err |= __get_user(v_regs, &sc->v_regs); |
216 | 226 | err |= __get_user(msr, &sc->gp_regs[PT_MSR]); |
... | ... | @@ -229,14 +239,6 @@ |
229 | 239 | current->thread.vrsave = 0; |
230 | 240 | #endif /* CONFIG_ALTIVEC */ |
231 | 241 | |
232 | -#ifndef CONFIG_SMP | |
233 | - preempt_disable(); | |
234 | - if (last_task_used_math == current) | |
235 | - last_task_used_math = NULL; | |
236 | - if (last_task_used_altivec == current) | |
237 | - last_task_used_altivec = NULL; | |
238 | - preempt_enable(); | |
239 | -#endif | |
240 | 242 | /* Force reload of FP/VEC */ |
241 | 243 | regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC); |
242 | 244 |
include/asm-powerpc/system.h
... | ... | @@ -133,6 +133,14 @@ |
133 | 133 | extern void cvt_fd(float *from, double *to, struct thread_struct *thread); |
134 | 134 | extern void cvt_df(double *from, float *to, struct thread_struct *thread); |
135 | 135 | |
136 | +#ifndef CONFIG_SMP | |
137 | +extern void discard_lazy_cpu_state(void); | |
138 | +#else | |
139 | +static inline void discard_lazy_cpu_state(void) | |
140 | +{ | |
141 | +} | |
142 | +#endif | |
143 | + | |
136 | 144 | #ifdef CONFIG_ALTIVEC |
137 | 145 | extern void flush_altivec_to_thread(struct task_struct *); |
138 | 146 | #else |