Commit 729aa21ab85b5d24f3c2b1e581d71c2333f46628

Authored by Ingo Molnar

Merge branch 'perf/stacktrace' of git://git.kernel.org/pub/scm/linux/kernel/git/…

…frederic/random-tracing into perf/core

Showing 3 changed files Side-by-side Diff

arch/x86/include/asm/perf_event.h
... ... @@ -152,6 +152,11 @@
152 152 (regs)->bp = caller_frame_pointer(); \
153 153 (regs)->cs = __KERNEL_CS; \
154 154 regs->flags = 0; \
  155 + asm volatile( \
  156 + _ASM_MOV "%%"_ASM_SP ", %0\n" \
  157 + : "=m" ((regs)->sp) \
  158 + :: "memory" \
  159 + ); \
155 160 }
156 161  
157 162 #else
arch/x86/kernel/dumpstack_64.c
... ... @@ -105,34 +105,6 @@
105 105 }
106 106  
107 107 /*
108   - * We are returning from the irq stack and go to the previous one.
109   - * If the previous stack is also in the irq stack, then bp in the first
110   - * frame of the irq stack points to the previous, interrupted one.
111   - * Otherwise we have another level of indirection: We first save
112   - * the bp of the previous stack, then we switch the stack to the irq one
113   - * and save a new bp that links to the previous one.
114   - * (See save_args())
115   - */
116   -static inline unsigned long
117   -fixup_bp_irq_link(unsigned long bp, unsigned long *stack,
118   - unsigned long *irq_stack, unsigned long *irq_stack_end)
119   -{
120   -#ifdef CONFIG_FRAME_POINTER
121   - struct stack_frame *frame = (struct stack_frame *)bp;
122   - unsigned long next;
123   -
124   - if (!in_irq_stack(stack, irq_stack, irq_stack_end)) {
125   - if (!probe_kernel_address(&frame->next_frame, next))
126   - return next;
127   - else
128   - WARN_ONCE(1, "Perf: bad frame pointer = %p in "
129   - "callchain\n", &frame->next_frame);
130   - }
131   -#endif
132   - return bp;
133   -}
134   -
135   -/*
136 108 * x86-64 can have up to three kernel stacks:
137 109 * process stack
138 110 * interrupt stack
139 111  
... ... @@ -155,9 +127,12 @@
155 127 task = current;
156 128  
157 129 if (!stack) {
158   - stack = &dummy;
159   - if (task && task != current)
  130 + if (regs)
  131 + stack = (unsigned long *)regs->sp;
  132 + else if (task && task != current)
160 133 stack = (unsigned long *)task->thread.sp;
  134 + else
  135 + stack = &dummy;
161 136 }
162 137  
163 138 if (!bp)
... ... @@ -205,8 +180,6 @@
205 180 * pointer (index -1 to end) in the IRQ stack:
206 181 */
207 182 stack = (unsigned long *) (irq_stack_end[-1]);
208   - bp = fixup_bp_irq_link(bp, stack, irq_stack,
209   - irq_stack_end);
210 183 irq_stack_end = NULL;
211 184 ops->stack(data, "EOI");
212 185 continue;
arch/x86/kernel/entry_64.S
... ... @@ -297,27 +297,26 @@
297 297 .endm
298 298  
299 299 /* save partial stack frame */
300   - .pushsection .kprobes.text, "ax"
301   -ENTRY(save_args)
302   - XCPT_FRAME
  300 + .macro SAVE_ARGS_IRQ
303 301 cld
304   - /*
305   - * start from rbp in pt_regs and jump over
306   - * return address.
307   - */
308   - movq_cfi rdi, RDI+8-RBP
309   - movq_cfi rsi, RSI+8-RBP
310   - movq_cfi rdx, RDX+8-RBP
311   - movq_cfi rcx, RCX+8-RBP
312   - movq_cfi rax, RAX+8-RBP
313   - movq_cfi r8, R8+8-RBP
314   - movq_cfi r9, R9+8-RBP
315   - movq_cfi r10, R10+8-RBP
316   - movq_cfi r11, R11+8-RBP
  302 + /* start from rbp in pt_regs and jump over */
  303 + movq_cfi rdi, RDI-RBP
  304 + movq_cfi rsi, RSI-RBP
  305 + movq_cfi rdx, RDX-RBP
  306 + movq_cfi rcx, RCX-RBP
  307 + movq_cfi rax, RAX-RBP
  308 + movq_cfi r8, R8-RBP
  309 + movq_cfi r9, R9-RBP
  310 + movq_cfi r10, R10-RBP
  311 + movq_cfi r11, R11-RBP
317 312  
318   - leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
319   - movq_cfi rbp, 8 /* push %rbp */
320   - leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
  313 + /* Save rbp so that we can unwind from get_irq_regs() */
  314 + movq_cfi rbp, 0
  315 +
  316 + /* Save previous stack value */
  317 + movq %rsp, %rsi
  318 +
  319 + leaq -RBP(%rsp),%rdi /* arg1 for handler */
321 320 testl $3, CS(%rdi)
322 321 je 1f
323 322 SWAPGS
324 323  
325 324  
... ... @@ -329,20 +328,15 @@
329 328 */
330 329 1: incl PER_CPU_VAR(irq_count)
331 330 jne 2f
332   - popq_cfi %rax /* move return address... */
333 331 mov PER_CPU_VAR(irq_stack_ptr),%rsp
334 332 EMPTY_FRAME 0
335   - pushq_cfi %rbp /* backlink for unwinder */
336   - pushq_cfi %rax /* ... to the new stack */
337   - /*
338   - * We entered an interrupt context - irqs are off:
339   - */
340   -2: TRACE_IRQS_OFF
341   - ret
342   - CFI_ENDPROC
343   -END(save_args)
344   - .popsection
345 333  
  334 +2: /* Store previous stack value */
  335 + pushq %rsi
  336 + /* We entered an interrupt context - irqs are off: */
  337 + TRACE_IRQS_OFF
  338 + .endm
  339 +
346 340 ENTRY(save_rest)
347 341 PARTIAL_FRAME 1 REST_SKIP+8
348 342 movq 5*8+16(%rsp), %r11 /* save return address */
... ... @@ -791,7 +785,7 @@
791 785 /* reserve pt_regs for scratch regs and rbp */
792 786 subq $ORIG_RAX-RBP, %rsp
793 787 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
794   - call save_args
  788 + SAVE_ARGS_IRQ
795 789 PARTIAL_FRAME 0
796 790 call \func
797 791 .endm
798 792  
799 793  
800 794  
... ... @@ -814,15 +808,14 @@
814 808 DISABLE_INTERRUPTS(CLBR_NONE)
815 809 TRACE_IRQS_OFF
816 810 decl PER_CPU_VAR(irq_count)
817   - leaveq
818 811  
819   - CFI_RESTORE rbp
  812 + /* Restore saved previous stack */
  813 + popq %rsi
  814 + leaq 16(%rsi), %rsp
  815 +
820 816 CFI_DEF_CFA_REGISTER rsp
821   - CFI_ADJUST_CFA_OFFSET -8
  817 + CFI_ADJUST_CFA_OFFSET -16
822 818  
823   - /* we did not save rbx, restore only from ARGOFFSET */
824   - addq $8, %rsp
825   - CFI_ADJUST_CFA_OFFSET -8
826 819 exit_intr:
827 820 GET_THREAD_INFO(%rcx)
828 821 testl $3,CS-ARGOFFSET(%rsp)