Commit e996e58133c475bcf3a229f716b9457267cbaa0a

Authored by Vivek Goyal
Committed by Linus Torvalds
1 parent 35ed319a36

[PATCH] kdump: save registers early (inline functions)

- If system panics then cpu register states are captured through funciton
  crash_get_current_regs().  This is not a inline function hence a stack frame
  is pushed on to the stack and then cpu register state is captured.  Later
  this frame is popped and new frames are pushed (machine_kexec).

- In theory this is not very right as we are capturing register states for a
  frame and that frame is no more valid.  This seems to have created back
  trace problems for ppc64.

- This patch fixes it up.  The very first thing it does after entering
  crash_kexec() is to capture the register states.  Anyway we don't want the
  back trace beyond crash_kexec().  crash_get_current_regs() has been made
  inline

- crash_setup_regs() is the top architecture dependent function which should
  be responsible for capturing the register states as well as to do some
  architecture dependent tricks.  For ex.  fixing up ss and esp for i386.
  crash_setup_regs() has also been made inline to ensure no new call frame is
  pushed onto stack.

Signed-off-by: Vivek Goyal <vgoyal@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 3 changed files with 51 additions and 45 deletions Side-by-side Diff

arch/i386/kernel/crash.c
... ... @@ -82,53 +82,12 @@
82 82 final_note(buf);
83 83 }
84 84  
85   -static void crash_get_current_regs(struct pt_regs *regs)
  85 +static void crash_save_self(struct pt_regs *regs)
86 86 {
87   - __asm__ __volatile__("movl %%ebx,%0" : "=m"(regs->ebx));
88   - __asm__ __volatile__("movl %%ecx,%0" : "=m"(regs->ecx));
89   - __asm__ __volatile__("movl %%edx,%0" : "=m"(regs->edx));
90   - __asm__ __volatile__("movl %%esi,%0" : "=m"(regs->esi));
91   - __asm__ __volatile__("movl %%edi,%0" : "=m"(regs->edi));
92   - __asm__ __volatile__("movl %%ebp,%0" : "=m"(regs->ebp));
93   - __asm__ __volatile__("movl %%eax,%0" : "=m"(regs->eax));
94   - __asm__ __volatile__("movl %%esp,%0" : "=m"(regs->esp));
95   - __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(regs->xss));
96   - __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(regs->xcs));
97   - __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(regs->xds));
98   - __asm__ __volatile__("movw %%es, %%ax;" :"=a"(regs->xes));
99   - __asm__ __volatile__("pushfl; popl %0" :"=m"(regs->eflags));
100   -
101   - regs->eip = (unsigned long)current_text_addr();
102   -}
103   -
104   -/* CPU does not save ss and esp on stack if execution is already
105   - * running in kernel mode at the time of NMI occurrence. This code
106   - * fixes it.
107   - */
108   -static void crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs)
109   -{
110   - memcpy(newregs, oldregs, sizeof(*newregs));
111   - newregs->esp = (unsigned long)&(oldregs->esp);
112   - __asm__ __volatile__(
113   - "xorl %%eax, %%eax\n\t"
114   - "movw %%ss, %%ax\n\t"
115   - :"=a"(newregs->xss));
116   -}
117   -
118   -/* We may have saved_regs from where the error came from
119   - * or it is NULL if via a direct panic().
120   - */
121   -static void crash_save_self(struct pt_regs *saved_regs)
122   -{
123   - struct pt_regs regs;
124 87 int cpu;
125 88  
126 89 cpu = smp_processor_id();
127   - if (saved_regs)
128   - crash_setup_regs(&regs, saved_regs);
129   - else
130   - crash_get_current_regs(&regs);
131   - crash_save_this_cpu(&regs, cpu);
  90 + crash_save_this_cpu(regs, cpu);
132 91 }
133 92  
134 93 #ifdef CONFIG_SMP
... ... @@ -147,7 +106,7 @@
147 106 local_irq_disable();
148 107  
149 108 if (!user_mode(regs)) {
150   - crash_setup_regs(&fixed_regs, regs);
  109 + crash_fixup_ss_esp(&fixed_regs, regs);
151 110 regs = &fixed_regs;
152 111 }
153 112 crash_save_this_cpu(regs, cpu);
include/asm-i386/kexec.h
... ... @@ -2,6 +2,7 @@
2 2 #define _I386_KEXEC_H
3 3  
4 4 #include <asm/fixmap.h>
  5 +#include <asm/ptrace.h>
5 6  
6 7 /*
7 8 * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
... ... @@ -26,6 +27,50 @@
26 27 #define KEXEC_ARCH KEXEC_ARCH_386
27 28  
28 29 #define MAX_NOTE_BYTES 1024
  30 +
  31 +/* CPU does not save ss and esp on stack if execution is already
  32 + * running in kernel mode at the time of NMI occurrence. This code
  33 + * fixes it.
  34 + */
  35 +static inline void crash_fixup_ss_esp(struct pt_regs *newregs,
  36 + struct pt_regs *oldregs)
  37 +{
  38 + memcpy(newregs, oldregs, sizeof(*newregs));
  39 + newregs->esp = (unsigned long)&(oldregs->esp);
  40 + __asm__ __volatile__(
  41 + "xorl %%eax, %%eax\n\t"
  42 + "movw %%ss, %%ax\n\t"
  43 + :"=a"(newregs->xss));
  44 +}
  45 +
  46 +/*
  47 + * This function is responsible for capturing register states if coming
  48 + * via panic otherwise just fix up the ss and esp if coming via kernel
  49 + * mode exception.
  50 + */
  51 +static inline void crash_setup_regs(struct pt_regs *newregs,
  52 + struct pt_regs *oldregs)
  53 +{
  54 + if (oldregs)
  55 + crash_fixup_ss_esp(newregs, oldregs);
  56 + else {
  57 + __asm__ __volatile__("movl %%ebx,%0" : "=m"(newregs->ebx));
  58 + __asm__ __volatile__("movl %%ecx,%0" : "=m"(newregs->ecx));
  59 + __asm__ __volatile__("movl %%edx,%0" : "=m"(newregs->edx));
  60 + __asm__ __volatile__("movl %%esi,%0" : "=m"(newregs->esi));
  61 + __asm__ __volatile__("movl %%edi,%0" : "=m"(newregs->edi));
  62 + __asm__ __volatile__("movl %%ebp,%0" : "=m"(newregs->ebp));
  63 + __asm__ __volatile__("movl %%eax,%0" : "=m"(newregs->eax));
  64 + __asm__ __volatile__("movl %%esp,%0" : "=m"(newregs->esp));
  65 + __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(newregs->xss));
  66 + __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(newregs->xcs));
  67 + __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(newregs->xds));
  68 + __asm__ __volatile__("movw %%es, %%ax;" :"=a"(newregs->xes));
  69 + __asm__ __volatile__("pushfl; popl %0" :"=m"(newregs->eflags));
  70 +
  71 + newregs->eip = (unsigned long)current_text_addr();
  72 + }
  73 +}
29 74  
30 75 #endif /* _I386_KEXEC_H */
... ... @@ -1057,7 +1057,9 @@
1057 1057 if (!locked) {
1058 1058 image = xchg(&kexec_crash_image, NULL);
1059 1059 if (image) {
1060   - machine_crash_shutdown(regs);
  1060 + struct pt_regs fixed_regs;
  1061 + crash_setup_regs(&fixed_regs, regs);
  1062 + machine_crash_shutdown(&fixed_regs);
1061 1063 machine_kexec(image);
1062 1064 }
1063 1065 xchg(&kexec_lock, 0);