Commit 9c0729dc8062bed96189bd14ac6d4920f3958743
Committed by
Frederic Weisbecker
1 parent
3985c7ce85
Exists in
master
and in
4 other branches
x86: Eliminate bp argument from the stack tracing routines
The various stack tracing routines take a 'bp' argument in which the caller is supposed to provide the base pointer to use, or 0 if doesn't have one. Since bp is garbage whenever CONFIG_FRAME_POINTER is not defined, this means all callers in principle should either always pass 0, or be conditional on CONFIG_FRAME_POINTER. However, there are only really three use cases for stack tracing: (a) Trace the current task, including IRQ stack if any (b) Trace the current task, but skip IRQ stack (c) Trace some other task In all cases, if CONFIG_FRAME_POINTER is not defined, bp should just be 0. If it _is_ defined, then - in case (a) bp should be gotten directly from the CPU's register, so the caller should pass NULL for regs, - in case (b) the caller should should pass the IRQ registers to dump_trace(), - in case (c) bp should be gotten from the top of the task's stack, so the caller should pass NULL for regs. Hence, the bp argument is not necessary because the combination of task and regs is sufficient to determine an appropriate value for bp. This patch introduces a new inline function stack_frame(task, regs) that computes the desired bp. This function is then called from the two versions of dump_stack(). Signed-off-by: Soren Sandmann <ssp@redhat.com> Acked-by: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Arjan van de Ven <arjan@infradead.org>, Cc: Frederic Weisbecker <fweisbec@gmail.com>, Cc: Arnaldo Carvalho de Melo <acme@redhat.com>, LKML-Reference: <m3oc9rop28.fsf@dhcp-100-3-82.bos.redhat.com>> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Showing 11 changed files with 62 additions and 55 deletions Side-by-side Diff
- arch/x86/include/asm/kdebug.h
- arch/x86/include/asm/stacktrace.h
- arch/x86/kernel/cpu/perf_event.c
- arch/x86/kernel/dumpstack.c
- arch/x86/kernel/dumpstack_32.c
- arch/x86/kernel/dumpstack_64.c
- arch/x86/kernel/process.c
- arch/x86/kernel/stacktrace.c
- arch/x86/mm/kmemcheck/error.c
- arch/x86/oprofile/backtrace.c
- include/linux/stacktrace.h
arch/x86/include/asm/kdebug.h
... | ... | @@ -28,7 +28,7 @@ |
28 | 28 | extern int __must_check __die(const char *, struct pt_regs *, long); |
29 | 29 | extern void show_registers(struct pt_regs *regs); |
30 | 30 | extern void show_trace(struct task_struct *t, struct pt_regs *regs, |
31 | - unsigned long *sp, unsigned long bp); | |
31 | + unsigned long *sp); | |
32 | 32 | extern void __show_regs(struct pt_regs *regs, int all); |
33 | 33 | extern void show_regs(struct pt_regs *regs); |
34 | 34 | extern unsigned long oops_begin(void); |
arch/x86/include/asm/stacktrace.h
... | ... | @@ -7,6 +7,7 @@ |
7 | 7 | #define _ASM_X86_STACKTRACE_H |
8 | 8 | |
9 | 9 | #include <linux/uaccess.h> |
10 | +#include <linux/ptrace.h> | |
10 | 11 | |
11 | 12 | extern int kstack_depth_to_print; |
12 | 13 | |
... | ... | @@ -46,7 +47,7 @@ |
46 | 47 | }; |
47 | 48 | |
48 | 49 | void dump_trace(struct task_struct *tsk, struct pt_regs *regs, |
49 | - unsigned long *stack, unsigned long bp, | |
50 | + unsigned long *stack, | |
50 | 51 | const struct stacktrace_ops *ops, void *data); |
51 | 52 | |
52 | 53 | #ifdef CONFIG_X86_32 |
53 | 54 | |
54 | 55 | |
... | ... | @@ -57,13 +58,39 @@ |
57 | 58 | #define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :) |
58 | 59 | #endif |
59 | 60 | |
61 | +#ifdef CONFIG_FRAME_POINTER | |
62 | +static inline unsigned long | |
63 | +stack_frame(struct task_struct *task, struct pt_regs *regs) | |
64 | +{ | |
65 | + unsigned long bp; | |
66 | + | |
67 | + if (regs) | |
68 | + return regs->bp; | |
69 | + | |
70 | + if (task == current) { | |
71 | + /* Grab bp right from our regs */ | |
72 | + get_bp(bp); | |
73 | + return bp; | |
74 | + } | |
75 | + | |
76 | + /* bp is the last reg pushed by switch_to */ | |
77 | + return *(unsigned long *)task->thread.sp; | |
78 | +} | |
79 | +#else | |
80 | +static inline unsigned long | |
81 | +stack_frame(struct task_struct *task, struct pt_regs *regs) | |
82 | +{ | |
83 | + return 0; | |
84 | +} | |
85 | +#endif | |
86 | + | |
60 | 87 | extern void |
61 | 88 | show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, |
62 | - unsigned long *stack, unsigned long bp, char *log_lvl); | |
89 | + unsigned long *stack, char *log_lvl); | |
63 | 90 | |
64 | 91 | extern void |
65 | 92 | show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, |
66 | - unsigned long *sp, unsigned long bp, char *log_lvl); | |
93 | + unsigned long *sp, char *log_lvl); | |
67 | 94 | |
68 | 95 | extern unsigned int code_bytes; |
69 | 96 |
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/dumpstack.c
... | ... | @@ -175,21 +175,21 @@ |
175 | 175 | |
176 | 176 | void |
177 | 177 | show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, |
178 | - unsigned long *stack, unsigned long bp, char *log_lvl) | |
178 | + unsigned long *stack, char *log_lvl) | |
179 | 179 | { |
180 | 180 | printk("%sCall Trace:\n", log_lvl); |
181 | - dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl); | |
181 | + dump_trace(task, regs, stack, &print_trace_ops, log_lvl); | |
182 | 182 | } |
183 | 183 | |
184 | 184 | void show_trace(struct task_struct *task, struct pt_regs *regs, |
185 | - unsigned long *stack, unsigned long bp) | |
185 | + unsigned long *stack) | |
186 | 186 | { |
187 | - show_trace_log_lvl(task, regs, stack, bp, ""); | |
187 | + show_trace_log_lvl(task, regs, stack, ""); | |
188 | 188 | } |
189 | 189 | |
190 | 190 | void show_stack(struct task_struct *task, unsigned long *sp) |
191 | 191 | { |
192 | - show_stack_log_lvl(task, NULL, sp, 0, ""); | |
192 | + show_stack_log_lvl(task, NULL, sp, ""); | |
193 | 193 | } |
194 | 194 | |
195 | 195 | /* |
... | ... | @@ -210,7 +210,7 @@ |
210 | 210 | init_utsname()->release, |
211 | 211 | (int)strcspn(init_utsname()->version, " "), |
212 | 212 | init_utsname()->version); |
213 | - show_trace(NULL, NULL, &stack, bp); | |
213 | + show_trace(NULL, NULL, &stack); | |
214 | 214 | } |
215 | 215 | EXPORT_SYMBOL(dump_stack); |
216 | 216 |
arch/x86/kernel/dumpstack_32.c
... | ... | @@ -17,11 +17,12 @@ |
17 | 17 | #include <asm/stacktrace.h> |
18 | 18 | |
19 | 19 | |
20 | -void dump_trace(struct task_struct *task, struct pt_regs *regs, | |
21 | - unsigned long *stack, unsigned long bp, | |
20 | +void dump_trace(struct task_struct *task, | |
21 | + struct pt_regs *regs, unsigned long *stack, | |
22 | 22 | const struct stacktrace_ops *ops, void *data) |
23 | 23 | { |
24 | 24 | int graph = 0; |
25 | + unsigned long bp; | |
25 | 26 | |
26 | 27 | if (!task) |
27 | 28 | task = current; |
... | ... | @@ -34,18 +35,7 @@ |
34 | 35 | stack = (unsigned long *)task->thread.sp; |
35 | 36 | } |
36 | 37 | |
37 | -#ifdef CONFIG_FRAME_POINTER | |
38 | - if (!bp) { | |
39 | - if (task == current) { | |
40 | - /* Grab bp right from our regs */ | |
41 | - get_bp(bp); | |
42 | - } else { | |
43 | - /* bp is the last reg pushed by switch_to */ | |
44 | - bp = *(unsigned long *) task->thread.sp; | |
45 | - } | |
46 | - } | |
47 | -#endif | |
48 | - | |
38 | + bp = stack_frame(task, regs); | |
49 | 39 | for (;;) { |
50 | 40 | struct thread_info *context; |
51 | 41 | |
... | ... | @@ -65,7 +55,7 @@ |
65 | 55 | |
66 | 56 | void |
67 | 57 | show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, |
68 | - unsigned long *sp, unsigned long bp, char *log_lvl) | |
58 | + unsigned long *sp, char *log_lvl) | |
69 | 59 | { |
70 | 60 | unsigned long *stack; |
71 | 61 | int i; |
... | ... | @@ -87,7 +77,7 @@ |
87 | 77 | touch_nmi_watchdog(); |
88 | 78 | } |
89 | 79 | printk(KERN_CONT "\n"); |
90 | - show_trace_log_lvl(task, regs, sp, bp, log_lvl); | |
80 | + show_trace_log_lvl(task, regs, sp, log_lvl); | |
91 | 81 | } |
92 | 82 | |
93 | 83 | |
... | ... | @@ -112,8 +102,7 @@ |
112 | 102 | u8 *ip; |
113 | 103 | |
114 | 104 | printk(KERN_EMERG "Stack:\n"); |
115 | - show_stack_log_lvl(NULL, regs, ®s->sp, | |
116 | - 0, KERN_EMERG); | |
105 | + show_stack_log_lvl(NULL, regs, ®s->sp, KERN_EMERG); | |
117 | 106 | |
118 | 107 | printk(KERN_EMERG "Code: "); |
119 | 108 |
arch/x86/kernel/dumpstack_64.c
... | ... | @@ -139,8 +139,8 @@ |
139 | 139 | * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack |
140 | 140 | */ |
141 | 141 | |
142 | -void dump_trace(struct task_struct *task, struct pt_regs *regs, | |
143 | - unsigned long *stack, unsigned long bp, | |
142 | +void dump_trace(struct task_struct *task, | |
143 | + struct pt_regs *regs, unsigned long *stack, | |
144 | 144 | const struct stacktrace_ops *ops, void *data) |
145 | 145 | { |
146 | 146 | const unsigned cpu = get_cpu(); |
... | ... | @@ -149,6 +149,7 @@ |
149 | 149 | unsigned used = 0; |
150 | 150 | struct thread_info *tinfo; |
151 | 151 | int graph = 0; |
152 | + unsigned long bp; | |
152 | 153 | |
153 | 154 | if (!task) |
154 | 155 | task = current; |
... | ... | @@ -160,18 +161,7 @@ |
160 | 161 | stack = (unsigned long *)task->thread.sp; |
161 | 162 | } |
162 | 163 | |
163 | -#ifdef CONFIG_FRAME_POINTER | |
164 | - if (!bp) { | |
165 | - if (task == current) { | |
166 | - /* Grab bp right from our regs */ | |
167 | - get_bp(bp); | |
168 | - } else { | |
169 | - /* bp is the last reg pushed by switch_to */ | |
170 | - bp = *(unsigned long *) task->thread.sp; | |
171 | - } | |
172 | - } | |
173 | -#endif | |
174 | - | |
164 | + bp = stack_frame(task, regs); | |
175 | 165 | /* |
176 | 166 | * Print function call entries in all stacks, starting at the |
177 | 167 | * current stack address. If the stacks consist of nested |
... | ... | @@ -235,7 +225,7 @@ |
235 | 225 | |
236 | 226 | void |
237 | 227 | show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, |
238 | - unsigned long *sp, unsigned long bp, char *log_lvl) | |
228 | + unsigned long *sp, char *log_lvl) | |
239 | 229 | { |
240 | 230 | unsigned long *irq_stack_end; |
241 | 231 | unsigned long *irq_stack; |
... | ... | @@ -279,7 +269,7 @@ |
279 | 269 | preempt_enable(); |
280 | 270 | |
281 | 271 | printk(KERN_CONT "\n"); |
282 | - show_trace_log_lvl(task, regs, sp, bp, log_lvl); | |
272 | + show_trace_log_lvl(task, regs, sp, log_lvl); | |
283 | 273 | } |
284 | 274 | |
285 | 275 | void show_registers(struct pt_regs *regs) |
... | ... | @@ -308,7 +298,7 @@ |
308 | 298 | |
309 | 299 | printk(KERN_EMERG "Stack:\n"); |
310 | 300 | show_stack_log_lvl(NULL, regs, (unsigned long *)sp, |
311 | - regs->bp, KERN_EMERG); | |
301 | + KERN_EMERG); | |
312 | 302 | |
313 | 303 | printk(KERN_EMERG "Code: "); |
314 | 304 |
arch/x86/kernel/process.c
... | ... | @@ -91,8 +91,7 @@ |
91 | 91 | void show_regs(struct pt_regs *regs) |
92 | 92 | { |
93 | 93 | show_registers(regs); |
94 | - show_trace(NULL, regs, (unsigned long *)kernel_stack_pointer(regs), | |
95 | - regs->bp); | |
94 | + show_trace(NULL, regs, (unsigned long *)kernel_stack_pointer(regs)); | |
96 | 95 | } |
97 | 96 | |
98 | 97 | void show_regs_common(void) |
arch/x86/kernel/stacktrace.c
... | ... | @@ -73,22 +73,22 @@ |
73 | 73 | */ |
74 | 74 | void save_stack_trace(struct stack_trace *trace) |
75 | 75 | { |
76 | - dump_trace(current, NULL, NULL, 0, &save_stack_ops, trace); | |
76 | + dump_trace(current, NULL, NULL, &save_stack_ops, trace); | |
77 | 77 | if (trace->nr_entries < trace->max_entries) |
78 | 78 | trace->entries[trace->nr_entries++] = ULONG_MAX; |
79 | 79 | } |
80 | 80 | EXPORT_SYMBOL_GPL(save_stack_trace); |
81 | 81 | |
82 | -void save_stack_trace_bp(struct stack_trace *trace, unsigned long bp) | |
82 | +void save_stack_trace_regs(struct stack_trace *trace, struct pt_regs *regs) | |
83 | 83 | { |
84 | - dump_trace(current, NULL, NULL, bp, &save_stack_ops, trace); | |
84 | + dump_trace(current, regs, NULL, &save_stack_ops, trace); | |
85 | 85 | if (trace->nr_entries < trace->max_entries) |
86 | 86 | trace->entries[trace->nr_entries++] = ULONG_MAX; |
87 | 87 | } |
88 | 88 | |
89 | 89 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) |
90 | 90 | { |
91 | - dump_trace(tsk, NULL, NULL, 0, &save_stack_ops_nosched, trace); | |
91 | + dump_trace(tsk, NULL, NULL, &save_stack_ops_nosched, trace); | |
92 | 92 | if (trace->nr_entries < trace->max_entries) |
93 | 93 | trace->entries[trace->nr_entries++] = ULONG_MAX; |
94 | 94 | } |
arch/x86/mm/kmemcheck/error.c
... | ... | @@ -185,7 +185,7 @@ |
185 | 185 | e->trace.entries = e->trace_entries; |
186 | 186 | e->trace.max_entries = ARRAY_SIZE(e->trace_entries); |
187 | 187 | e->trace.skip = 0; |
188 | - save_stack_trace_bp(&e->trace, regs->bp); | |
188 | + save_stack_trace_regs(&e->trace, regs); | |
189 | 189 | |
190 | 190 | /* Round address down to nearest 16 bytes */ |
191 | 191 | shadow_copy = kmemcheck_shadow_lookup(address |
arch/x86/oprofile/backtrace.c
... | ... | @@ -126,7 +126,7 @@ |
126 | 126 | if (!user_mode_vm(regs)) { |
127 | 127 | unsigned long stack = kernel_stack_pointer(regs); |
128 | 128 | if (depth) |
129 | - dump_trace(NULL, regs, (unsigned long *)stack, 0, | |
129 | + dump_trace(NULL, regs, (unsigned long *)stack, | |
130 | 130 | &backtrace_ops, &depth); |
131 | 131 | return; |
132 | 132 | } |
include/linux/stacktrace.h
... | ... | @@ -2,6 +2,7 @@ |
2 | 2 | #define __LINUX_STACKTRACE_H |
3 | 3 | |
4 | 4 | struct task_struct; |
5 | +struct pt_regs; | |
5 | 6 | |
6 | 7 | #ifdef CONFIG_STACKTRACE |
7 | 8 | struct task_struct; |
... | ... | @@ -13,7 +14,8 @@ |
13 | 14 | }; |
14 | 15 | |
15 | 16 | extern void save_stack_trace(struct stack_trace *trace); |
16 | -extern void save_stack_trace_bp(struct stack_trace *trace, unsigned long bp); | |
17 | +extern void save_stack_trace_regs(struct stack_trace *trace, | |
18 | + struct pt_regs *regs); | |
17 | 19 | extern void save_stack_trace_tsk(struct task_struct *tsk, |
18 | 20 | struct stack_trace *trace); |
19 | 21 |