Commit 61c1917f47f73c968e92d04d15370b1dc3ec4592

Authored by Frederic Weisbecker
Committed by Ingo Molnar
1 parent 5b74ed4729

perf events, x86/stacktrace: Make stack walking optional

The current print_context_stack helper that does the stack
walking job is good for usual stacktraces as it walks through
all the stack and reports even addresses that look unreliable,
which is nice when we don't have frame pointers for example.

But we have users like perf that only require reliable
stacktraces, and those may want a more adapted stack walker, so
lets make this function a callback in stacktrace_ops that users
can tune for their needs.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <1261024834-5336-1-git-send-regression-fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

Showing 9 changed files with 43 additions and 25 deletions Side-by-side Diff

arch/x86/include/asm/stacktrace.h
... ... @@ -5,6 +5,23 @@
5 5  
6 6 int x86_is_stack_id(int id, char *name);
7 7  
  8 +struct thread_info;
  9 +struct stacktrace_ops;
  10 +
  11 +typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
  12 + unsigned long *stack,
  13 + unsigned long bp,
  14 + const struct stacktrace_ops *ops,
  15 + void *data,
  16 + unsigned long *end,
  17 + int *graph);
  18 +
  19 +extern unsigned long
  20 +print_context_stack(struct thread_info *tinfo,
  21 + unsigned long *stack, unsigned long bp,
  22 + const struct stacktrace_ops *ops, void *data,
  23 + unsigned long *end, int *graph);
  24 +
8 25 /* Generic stack tracer with callbacks */
9 26  
10 27 struct stacktrace_ops {
... ... @@ -14,6 +31,7 @@
14 31 void (*address)(void *data, unsigned long address, int reliable);
15 32 /* On negative return stop dumping */
16 33 int (*stack)(void *data, char *name);
  34 + walk_stack_t walk_stack;
17 35 };
18 36  
19 37 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
arch/x86/kernel/cpu/perf_event.c
... ... @@ -2336,6 +2336,7 @@
2336 2336 .warning_symbol = backtrace_warning_symbol,
2337 2337 .stack = backtrace_stack,
2338 2338 .address = backtrace_address,
  2339 + .walk_stack = print_context_stack,
2339 2340 };
2340 2341  
2341 2342 #include "../dumpstack.h"
arch/x86/kernel/dumpstack.c
... ... @@ -141,10 +141,11 @@
141 141 }
142 142  
143 143 static const struct stacktrace_ops print_trace_ops = {
144   - .warning = print_trace_warning,
145   - .warning_symbol = print_trace_warning_symbol,
146   - .stack = print_trace_stack,
147   - .address = print_trace_address,
  144 + .warning = print_trace_warning,
  145 + .warning_symbol = print_trace_warning_symbol,
  146 + .stack = print_trace_stack,
  147 + .address = print_trace_address,
  148 + .walk_stack = print_context_stack,
148 149 };
149 150  
150 151 void
arch/x86/kernel/dumpstack.h
... ... @@ -14,12 +14,6 @@
14 14 #define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :)
15 15 #endif
16 16  
17   -extern unsigned long
18   -print_context_stack(struct thread_info *tinfo,
19   - unsigned long *stack, unsigned long bp,
20   - const struct stacktrace_ops *ops, void *data,
21   - unsigned long *end, int *graph);
22   -
23 17 extern void
24 18 show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
25 19 unsigned long *stack, unsigned long bp, char *log_lvl);
arch/x86/kernel/dumpstack_32.c
... ... @@ -58,7 +58,7 @@
58 58  
59 59 context = (struct thread_info *)
60 60 ((unsigned long)stack & (~(THREAD_SIZE - 1)));
61   - bp = print_context_stack(context, stack, bp, ops, data, NULL, &graph);
  61 + bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
62 62  
63 63 stack = (unsigned long *)context->previous_esp;
64 64 if (!stack)
arch/x86/kernel/dumpstack_64.c
... ... @@ -188,8 +188,8 @@
188 188 if (ops->stack(data, id) < 0)
189 189 break;
190 190  
191   - bp = print_context_stack(tinfo, stack, bp, ops,
192   - data, estack_end, &graph);
  191 + bp = ops->walk_stack(tinfo, stack, bp, ops,
  192 + data, estack_end, &graph);
193 193 ops->stack(data, "<EOE>");
194 194 /*
195 195 * We link to the next stack via the
arch/x86/kernel/stacktrace.c
... ... @@ -53,17 +53,19 @@
53 53 }
54 54  
55 55 static const struct stacktrace_ops save_stack_ops = {
56   - .warning = save_stack_warning,
57   - .warning_symbol = save_stack_warning_symbol,
58   - .stack = save_stack_stack,
59   - .address = save_stack_address,
  56 + .warning = save_stack_warning,
  57 + .warning_symbol = save_stack_warning_symbol,
  58 + .stack = save_stack_stack,
  59 + .address = save_stack_address,
  60 + .walk_stack = print_context_stack,
60 61 };
61 62  
62 63 static const struct stacktrace_ops save_stack_ops_nosched = {
63   - .warning = save_stack_warning,
64   - .warning_symbol = save_stack_warning_symbol,
65   - .stack = save_stack_stack,
66   - .address = save_stack_address_nosched,
  64 + .warning = save_stack_warning,
  65 + .warning_symbol = save_stack_warning_symbol,
  66 + .stack = save_stack_stack,
  67 + .address = save_stack_address_nosched,
  68 + .walk_stack = print_context_stack,
67 69 };
68 70  
69 71 /*
arch/x86/oprofile/backtrace.c
... ... @@ -41,10 +41,11 @@
41 41 }
42 42  
43 43 static struct stacktrace_ops backtrace_ops = {
44   - .warning = backtrace_warning,
45   - .warning_symbol = backtrace_warning_symbol,
46   - .stack = backtrace_stack,
47   - .address = backtrace_address,
  44 + .warning = backtrace_warning,
  45 + .warning_symbol = backtrace_warning_symbol,
  46 + .stack = backtrace_stack,
  47 + .address = backtrace_address,
  48 + .walk_stack = print_context_stack,
48 49 };
49 50  
50 51 struct frame_head {
kernel/trace/trace_sysprof.c
... ... @@ -93,6 +93,7 @@
93 93 .warning_symbol = backtrace_warning_symbol,
94 94 .stack = backtrace_stack,
95 95 .address = backtrace_address,
  96 + .walk_stack = print_context_stack,
96 97 };
97 98  
98 99 static int