Commit eb05df9e7e793f3134dbb574c7ccc05f7932bc59

Authored by Linus Torvalds

Merge branch 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 cleanups from Peter Anvin:
 "The biggest textual change is the cleanup to use symbolic constants
  for x86 trap values.

  The only *functional* change and the reason for the x86/x32 dependency
  is the move of is_ia32_task() into <asm/thread_info.h> so that it can
  be used in other code that needs to understand if a system call comes
  from the compat entry point (and therefore uses i386 system call
  numbers) or not.  One intended user for that is the BPF system call
  filter.  Moving it out of <asm/compat.h> means we can define it
  unconditionally, returning always true on i386."

* 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86: Move is_ia32_task to asm/thread_info.h from asm/compat.h
  x86: Rename trap_no to trap_nr in thread_struct
  x86: Use enum instead of literals for trap values

Showing 14 changed files Side-by-side Diff

arch/x86/ia32/ia32_signal.c
... ... @@ -346,7 +346,7 @@
346 346 put_user_ex(regs->dx, &sc->dx);
347 347 put_user_ex(regs->cx, &sc->cx);
348 348 put_user_ex(regs->ax, &sc->ax);
349   - put_user_ex(current->thread.trap_no, &sc->trapno);
  349 + put_user_ex(current->thread.trap_nr, &sc->trapno);
350 350 put_user_ex(current->thread.error_code, &sc->err);
351 351 put_user_ex(regs->ip, &sc->ip);
352 352 put_user_ex(regs->cs, (unsigned int __user *)&sc->cs);
arch/x86/include/asm/compat.h
... ... @@ -235,15 +235,6 @@
235 235 return (void __user *)round_down(sp - len, 16);
236 236 }
237 237  
238   -static inline bool is_ia32_task(void)
239   -{
240   -#ifdef CONFIG_IA32_EMULATION
241   - if (current_thread_info()->status & TS_COMPAT)
242   - return true;
243   -#endif
244   - return false;
245   -}
246   -
247 238 static inline bool is_x32_task(void)
248 239 {
249 240 #ifdef CONFIG_X86_X32_ABI
arch/x86/include/asm/processor.h
... ... @@ -463,7 +463,7 @@
463 463 unsigned long ptrace_dr7;
464 464 /* Fault info: */
465 465 unsigned long cr2;
466   - unsigned long trap_no;
  466 + unsigned long trap_nr;
467 467 unsigned long error_code;
468 468 /* floating point and extended processor state */
469 469 struct fpu fpu;
arch/x86/include/asm/thread_info.h
... ... @@ -266,6 +266,18 @@
266 266 ti->status |= TS_RESTORE_SIGMASK;
267 267 set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags);
268 268 }
  269 +
  270 +static inline bool is_ia32_task(void)
  271 +{
  272 +#ifdef CONFIG_X86_32
  273 + return true;
  274 +#endif
  275 +#ifdef CONFIG_IA32_EMULATION
  276 + if (current_thread_info()->status & TS_COMPAT)
  277 + return true;
  278 +#endif
  279 + return false;
  280 +}
269 281 #endif /* !__ASSEMBLY__ */
270 282  
271 283 #ifndef __ASSEMBLY__
arch/x86/include/asm/traps.h
... ... @@ -89,5 +89,30 @@
89 89 asmlinkage void mce_threshold_interrupt(void);
90 90 #endif
91 91  
  92 +/* Interrupts/Exceptions */
  93 +enum {
  94 + X86_TRAP_DE = 0, /* 0, Divide-by-zero */
  95 + X86_TRAP_DB, /* 1, Debug */
  96 + X86_TRAP_NMI, /* 2, Non-maskable Interrupt */
  97 + X86_TRAP_BP, /* 3, Breakpoint */
  98 + X86_TRAP_OF, /* 4, Overflow */
  99 + X86_TRAP_BR, /* 5, Bound Range Exceeded */
  100 + X86_TRAP_UD, /* 6, Invalid Opcode */
  101 + X86_TRAP_NM, /* 7, Device Not Available */
  102 + X86_TRAP_DF, /* 8, Double Fault */
  103 + X86_TRAP_OLD_MF, /* 9, Coprocessor Segment Overrun */
  104 + X86_TRAP_TS, /* 10, Invalid TSS */
  105 + X86_TRAP_NP, /* 11, Segment Not Present */
  106 + X86_TRAP_SS, /* 12, Stack Segment Fault */
  107 + X86_TRAP_GP, /* 13, General Protection Fault */
  108 + X86_TRAP_PF, /* 14, Page Fault */
  109 + X86_TRAP_SPURIOUS, /* 15, Spurious Interrupt */
  110 + X86_TRAP_MF, /* 16, x87 Floating-Point Exception */
  111 + X86_TRAP_AC, /* 17, Alignment Check */
  112 + X86_TRAP_MC, /* 18, Machine Check */
  113 + X86_TRAP_XF, /* 19, SIMD Floating-Point Exception */
  114 + X86_TRAP_IRET = 32, /* 32, IRET Exception */
  115 +};
  116 +
92 117 #endif /* _ASM_X86_TRAPS_H */
arch/x86/kernel/dumpstack.c
... ... @@ -268,7 +268,7 @@
268 268 #endif
269 269 printk("\n");
270 270 if (notify_die(DIE_OOPS, str, regs, err,
271   - current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
  271 + current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
272 272 return 1;
273 273  
274 274 show_registers(regs);
arch/x86/kernel/irqinit.c
... ... @@ -60,7 +60,7 @@
60 60 outb(0, 0xF0);
61 61 if (ignore_fpu_irq || !boot_cpu_data.hard_math)
62 62 return IRQ_NONE;
63   - math_error(get_irq_regs(), 0, 16);
  63 + math_error(get_irq_regs(), 0, X86_TRAP_MF);
64 64 return IRQ_HANDLED;
65 65 }
66 66  
arch/x86/kernel/ptrace.c
... ... @@ -33,6 +33,7 @@
33 33 #include <asm/prctl.h>
34 34 #include <asm/proto.h>
35 35 #include <asm/hw_breakpoint.h>
  36 +#include <asm/traps.h>
36 37  
37 38 #include "tls.h"
38 39  
... ... @@ -1425,7 +1426,7 @@
1425 1426 int error_code, int si_code,
1426 1427 struct siginfo *info)
1427 1428 {
1428   - tsk->thread.trap_no = 1;
  1429 + tsk->thread.trap_nr = X86_TRAP_DB;
1429 1430 tsk->thread.error_code = error_code;
1430 1431  
1431 1432 memset(info, 0, sizeof(*info));
arch/x86/kernel/signal.c
... ... @@ -151,7 +151,7 @@
151 151 put_user_ex(regs->r15, &sc->r15);
152 152 #endif /* CONFIG_X86_64 */
153 153  
154   - put_user_ex(current->thread.trap_no, &sc->trapno);
  154 + put_user_ex(current->thread.trap_nr, &sc->trapno);
155 155 put_user_ex(current->thread.error_code, &sc->err);
156 156 put_user_ex(regs->ip, &sc->ip);
157 157 #ifdef CONFIG_X86_32
arch/x86/kernel/traps.c
... ... @@ -119,7 +119,7 @@
119 119 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
120 120 * On nmi (interrupt 2), do_trap should not be called.
121 121 */
122   - if (trapnr < 6)
  122 + if (trapnr < X86_TRAP_UD)
123 123 goto vm86_trap;
124 124 goto trap_signal;
125 125 }
... ... @@ -132,7 +132,7 @@
132 132 trap_signal:
133 133 #endif
134 134 /*
135   - * We want error_code and trap_no set for userspace faults and
  135 + * We want error_code and trap_nr set for userspace faults and
136 136 * kernelspace faults which result in die(), but not
137 137 * kernelspace faults which are fixed up. die() gives the
138 138 * process no chance to handle the signal and notice the
... ... @@ -141,7 +141,7 @@
141 141 * delivered, faults. See also do_general_protection below.
142 142 */
143 143 tsk->thread.error_code = error_code;
144   - tsk->thread.trap_no = trapnr;
  144 + tsk->thread.trap_nr = trapnr;
145 145  
146 146 #ifdef CONFIG_X86_64
147 147 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
... ... @@ -164,7 +164,7 @@
164 164 kernel_trap:
165 165 if (!fixup_exception(regs)) {
166 166 tsk->thread.error_code = error_code;
167   - tsk->thread.trap_no = trapnr;
  167 + tsk->thread.trap_nr = trapnr;
168 168 die(str, regs, error_code);
169 169 }
170 170 return;
171 171  
172 172  
173 173  
174 174  
... ... @@ -203,27 +203,31 @@
203 203 do_trap(trapnr, signr, str, regs, error_code, &info); \
204 204 }
205 205  
206   -DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip)
207   -DO_ERROR(4, SIGSEGV, "overflow", overflow)
208   -DO_ERROR(5, SIGSEGV, "bounds", bounds)
209   -DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip)
210   -DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
211   -DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
212   -DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
  206 +DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV,
  207 + regs->ip)
  208 +DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow)
  209 +DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds)
  210 +DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN,
  211 + regs->ip)
  212 +DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",
  213 + coprocessor_segment_overrun)
  214 +DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS)
  215 +DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present)
213 216 #ifdef CONFIG_X86_32
214   -DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
  217 +DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment)
215 218 #endif
216   -DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
  219 +DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check,
  220 + BUS_ADRALN, 0)
217 221  
218 222 #ifdef CONFIG_X86_64
219 223 /* Runs on IST stack */
220 224 dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
221 225 {
222 226 if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
223   - 12, SIGBUS) == NOTIFY_STOP)
  227 + X86_TRAP_SS, SIGBUS) == NOTIFY_STOP)
224 228 return;
225 229 preempt_conditional_sti(regs);
226   - do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
  230 + do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
227 231 preempt_conditional_cli(regs);
228 232 }
229 233  
230 234  
... ... @@ -233,10 +237,10 @@
233 237 struct task_struct *tsk = current;
234 238  
235 239 /* Return not checked because double check cannot be ignored */
236   - notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
  240 + notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
237 241  
238 242 tsk->thread.error_code = error_code;
239   - tsk->thread.trap_no = 8;
  243 + tsk->thread.trap_nr = X86_TRAP_DF;
240 244  
241 245 /*
242 246 * This is always a kernel trap and never fixable (and thus must
... ... @@ -264,7 +268,7 @@
264 268 goto gp_in_kernel;
265 269  
266 270 tsk->thread.error_code = error_code;
267   - tsk->thread.trap_no = 13;
  271 + tsk->thread.trap_nr = X86_TRAP_GP;
268 272  
269 273 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
270 274 printk_ratelimit()) {
... ... @@ -291,9 +295,9 @@
291 295 return;
292 296  
293 297 tsk->thread.error_code = error_code;
294   - tsk->thread.trap_no = 13;
295   - if (notify_die(DIE_GPF, "general protection fault", regs,
296   - error_code, 13, SIGSEGV) == NOTIFY_STOP)
  298 + tsk->thread.trap_nr = X86_TRAP_GP;
  299 + if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
  300 + X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
297 301 return;
298 302 die("general protection fault", regs, error_code);
299 303 }
300 304  
... ... @@ -302,13 +306,13 @@
302 306 dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
303 307 {
304 308 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
305   - if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
306   - == NOTIFY_STOP)
  309 + if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
  310 + SIGTRAP) == NOTIFY_STOP)
307 311 return;
308 312 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
309 313  
310   - if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
311   - == NOTIFY_STOP)
  314 + if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
  315 + SIGTRAP) == NOTIFY_STOP)
312 316 return;
313 317  
314 318 /*
... ... @@ -317,7 +321,7 @@
317 321 */
318 322 debug_stack_usage_inc();
319 323 preempt_conditional_sti(regs);
320   - do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
  324 + do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
321 325 preempt_conditional_cli(regs);
322 326 debug_stack_usage_dec();
323 327 }
... ... @@ -422,8 +426,8 @@
422 426 preempt_conditional_sti(regs);
423 427  
424 428 if (regs->flags & X86_VM_MASK) {
425   - handle_vm86_trap((struct kernel_vm86_regs *) regs,
426   - error_code, 1);
  429 + handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
  430 + X86_TRAP_DB);
427 431 preempt_conditional_cli(regs);
428 432 debug_stack_usage_dec();
429 433 return;
... ... @@ -460,7 +464,8 @@
460 464 struct task_struct *task = current;
461 465 siginfo_t info;
462 466 unsigned short err;
463   - char *str = (trapnr == 16) ? "fpu exception" : "simd exception";
  467 + char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
  468 + "simd exception";
464 469  
465 470 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
466 471 return;
... ... @@ -470,7 +475,7 @@
470 475 {
471 476 if (!fixup_exception(regs)) {
472 477 task->thread.error_code = error_code;
473   - task->thread.trap_no = trapnr;
  478 + task->thread.trap_nr = trapnr;
474 479 die(str, regs, error_code);
475 480 }
476 481 return;
477 482  
... ... @@ -480,12 +485,12 @@
480 485 * Save the info for the exception handler and clear the error.
481 486 */
482 487 save_init_fpu(task);
483   - task->thread.trap_no = trapnr;
  488 + task->thread.trap_nr = trapnr;
484 489 task->thread.error_code = error_code;
485 490 info.si_signo = SIGFPE;
486 491 info.si_errno = 0;
487 492 info.si_addr = (void __user *)regs->ip;
488   - if (trapnr == 16) {
  493 + if (trapnr == X86_TRAP_MF) {
489 494 unsigned short cwd, swd;
490 495 /*
491 496 * (~cwd & swd) will mask out exceptions that are not set to unmasked
492 497  
... ... @@ -529,10 +534,11 @@
529 534 info.si_code = FPE_FLTRES;
530 535 } else {
531 536 /*
532   - * If we're using IRQ 13, or supposedly even some trap 16
533   - * implementations, it's possible we get a spurious trap...
  537 + * If we're using IRQ 13, or supposedly even some trap
  538 + * X86_TRAP_MF implementations, it's possible
  539 + * we get a spurious trap, which is not an error.
534 540 */
535   - return; /* Spurious trap, no error */
  541 + return;
536 542 }
537 543 force_sig_info(SIGFPE, &info, task);
538 544 }
539 545  
... ... @@ -543,13 +549,13 @@
543 549 ignore_fpu_irq = 1;
544 550 #endif
545 551  
546   - math_error(regs, error_code, 16);
  552 + math_error(regs, error_code, X86_TRAP_MF);
547 553 }
548 554  
549 555 dotraplinkage void
550 556 do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
551 557 {
552   - math_error(regs, error_code, 19);
  558 + math_error(regs, error_code, X86_TRAP_XF);
553 559 }
554 560  
555 561 dotraplinkage void
556 562  
557 563  
558 564  
... ... @@ -643,20 +649,21 @@
643 649 info.si_errno = 0;
644 650 info.si_code = ILL_BADSTK;
645 651 info.si_addr = NULL;
646   - if (notify_die(DIE_TRAP, "iret exception",
647   - regs, error_code, 32, SIGILL) == NOTIFY_STOP)
  652 + if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
  653 + X86_TRAP_IRET, SIGILL) == NOTIFY_STOP)
648 654 return;
649   - do_trap(32, SIGILL, "iret exception", regs, error_code, &info);
  655 + do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
  656 + &info);
650 657 }
651 658 #endif
652 659  
653 660 /* Set of traps needed for early debugging. */
654 661 void __init early_trap_init(void)
655 662 {
656   - set_intr_gate_ist(1, &debug, DEBUG_STACK);
  663 + set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
657 664 /* int3 can be called from all */
658   - set_system_intr_gate_ist(3, &int3, DEBUG_STACK);
659   - set_intr_gate(14, &page_fault);
  665 + set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
  666 + set_intr_gate(X86_TRAP_PF, &page_fault);
660 667 load_idt(&idt_descr);
661 668 }
662 669  
663 670  
664 671  
665 672  
666 673  
667 674  
668 675  
... ... @@ -672,30 +679,30 @@
672 679 early_iounmap(p, 4);
673 680 #endif
674 681  
675   - set_intr_gate(0, &divide_error);
676   - set_intr_gate_ist(2, &nmi, NMI_STACK);
  682 + set_intr_gate(X86_TRAP_DE, &divide_error);
  683 + set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK);
677 684 /* int4 can be called from all */
678   - set_system_intr_gate(4, &overflow);
679   - set_intr_gate(5, &bounds);
680   - set_intr_gate(6, &invalid_op);
681   - set_intr_gate(7, &device_not_available);
  685 + set_system_intr_gate(X86_TRAP_OF, &overflow);
  686 + set_intr_gate(X86_TRAP_BR, &bounds);
  687 + set_intr_gate(X86_TRAP_UD, &invalid_op);
  688 + set_intr_gate(X86_TRAP_NM, &device_not_available);
682 689 #ifdef CONFIG_X86_32
683   - set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS);
  690 + set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS);
684 691 #else
685   - set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK);
  692 + set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK);
686 693 #endif
687   - set_intr_gate(9, &coprocessor_segment_overrun);
688   - set_intr_gate(10, &invalid_TSS);
689   - set_intr_gate(11, &segment_not_present);
690   - set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK);
691   - set_intr_gate(13, &general_protection);
692   - set_intr_gate(15, &spurious_interrupt_bug);
693   - set_intr_gate(16, &coprocessor_error);
694   - set_intr_gate(17, &alignment_check);
  694 + set_intr_gate(X86_TRAP_OLD_MF, &coprocessor_segment_overrun);
  695 + set_intr_gate(X86_TRAP_TS, &invalid_TSS);
  696 + set_intr_gate(X86_TRAP_NP, &segment_not_present);
  697 + set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK);
  698 + set_intr_gate(X86_TRAP_GP, &general_protection);
  699 + set_intr_gate(X86_TRAP_SPURIOUS, &spurious_interrupt_bug);
  700 + set_intr_gate(X86_TRAP_MF, &coprocessor_error);
  701 + set_intr_gate(X86_TRAP_AC, &alignment_check);
695 702 #ifdef CONFIG_X86_MCE
696   - set_intr_gate_ist(18, &machine_check, MCE_STACK);
  703 + set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK);
697 704 #endif
698   - set_intr_gate(19, &simd_coprocessor_error);
  705 + set_intr_gate(X86_TRAP_XF, &simd_coprocessor_error);
699 706  
700 707 /* Reserve all the builtin and the syscall vector: */
701 708 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
... ... @@ -720,8 +727,8 @@
720 727  
721 728 #ifdef CONFIG_X86_64
722 729 memcpy(&nmi_idt_table, &idt_table, IDT_ENTRIES * 16);
723   - set_nmi_gate(1, &debug);
724   - set_nmi_gate(3, &int3);
  730 + set_nmi_gate(X86_TRAP_DB, &debug);
  731 + set_nmi_gate(X86_TRAP_BP, &int3);
725 732 #endif
726 733 }
arch/x86/kernel/vm86_32.c
... ... @@ -569,7 +569,7 @@
569 569 }
570 570 if (trapno != 1)
571 571 return 1; /* we let this handle by the calling routine */
572   - current->thread.trap_no = trapno;
  572 + current->thread.trap_nr = trapno;
573 573 current->thread.error_code = error_code;
574 574 force_sig(SIGTRAP, current);
575 575 return 0;
arch/x86/kernel/vsyscall_64.c
... ... @@ -152,7 +152,7 @@
152 152  
153 153 thread->error_code = 6; /* user fault, no page, write */
154 154 thread->cr2 = ptr;
155   - thread->trap_no = 14;
  155 + thread->trap_nr = X86_TRAP_PF;
156 156  
157 157 memset(&info, 0, sizeof(info));
158 158 info.si_signo = SIGSEGV;
arch/x86/math-emu/fpu_entry.c
... ... @@ -28,6 +28,7 @@
28 28 #include <linux/regset.h>
29 29  
30 30 #include <asm/uaccess.h>
  31 +#include <asm/traps.h>
31 32 #include <asm/desc.h>
32 33 #include <asm/user.h>
33 34 #include <asm/i387.h>
... ... @@ -269,7 +270,7 @@
269 270 FPU_EIP = FPU_ORIG_EIP; /* Point to current FPU instruction. */
270 271  
271 272 RE_ENTRANT_CHECK_OFF;
272   - current->thread.trap_no = 16;
  273 + current->thread.trap_nr = X86_TRAP_MF;
273 274 current->thread.error_code = 0;
274 275 send_sig(SIGFPE, current, 1);
275 276 return;
... ... @@ -662,7 +663,7 @@
662 663 void math_abort(struct math_emu_info *info, unsigned int signal)
663 664 {
664 665 FPU_EIP = FPU_ORIG_EIP;
665   - current->thread.trap_no = 16;
  666 + current->thread.trap_nr = X86_TRAP_MF;
666 667 current->thread.error_code = 0;
667 668 send_sig(signal, current, 1);
668 669 RE_ENTRANT_CHECK_OFF;
... ... @@ -615,7 +615,7 @@
615 615 dump_pagetable(address);
616 616  
617 617 tsk->thread.cr2 = address;
618   - tsk->thread.trap_no = 14;
  618 + tsk->thread.trap_nr = X86_TRAP_PF;
619 619 tsk->thread.error_code = error_code;
620 620  
621 621 if (__die("Bad pagetable", regs, error_code))
... ... @@ -636,7 +636,7 @@
636 636 /* Are we prepared to handle this kernel fault? */
637 637 if (fixup_exception(regs)) {
638 638 if (current_thread_info()->sig_on_uaccess_error && signal) {
639   - tsk->thread.trap_no = 14;
  639 + tsk->thread.trap_nr = X86_TRAP_PF;
640 640 tsk->thread.error_code = error_code | PF_USER;
641 641 tsk->thread.cr2 = address;
642 642  
... ... @@ -676,7 +676,7 @@
676 676 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
677 677  
678 678 tsk->thread.cr2 = address;
679   - tsk->thread.trap_no = 14;
  679 + tsk->thread.trap_nr = X86_TRAP_PF;
680 680 tsk->thread.error_code = error_code;
681 681  
682 682 sig = SIGKILL;
... ... @@ -754,7 +754,7 @@
754 754 /* Kernel addresses are always protection faults: */
755 755 tsk->thread.cr2 = address;
756 756 tsk->thread.error_code = error_code | (address >= TASK_SIZE);
757   - tsk->thread.trap_no = 14;
  757 + tsk->thread.trap_nr = X86_TRAP_PF;
758 758  
759 759 force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
760 760  
... ... @@ -838,7 +838,7 @@
838 838  
839 839 tsk->thread.cr2 = address;
840 840 tsk->thread.error_code = error_code;
841   - tsk->thread.trap_no = 14;
  841 + tsk->thread.trap_nr = X86_TRAP_PF;
842 842  
843 843 #ifdef CONFIG_MEMORY_FAILURE
844 844 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {