Blame view
kernel/trace/trace_stack.c
10.9 KB
b24413180
|
1 |
// SPDX-License-Identifier: GPL-2.0 |
e5a81b629
|
2 3 4 5 |
/* * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> * */ |
68db0cf10
|
6 |
#include <linux/sched/task_stack.h> |
e5a81b629
|
7 8 9 10 11 |
#include <linux/stacktrace.h> #include <linux/kallsyms.h> #include <linux/seq_file.h> #include <linux/spinlock.h> #include <linux/uaccess.h> |
e5a81b629
|
12 13 |
#include <linux/ftrace.h> #include <linux/module.h> |
f38f1d2aa
|
14 |
#include <linux/sysctl.h> |
e5a81b629
|
15 |
#include <linux/init.h> |
762e12078
|
16 17 |
#include <asm/setup.h> |
e5a81b629
|
18 |
#include "trace.h" |
1b6cced6e
|
19 20 |
static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] = { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX }; |
bb99d8cce
|
21 |
unsigned stack_trace_index[STACK_TRACE_ENTRIES]; |
1b6cced6e
|
22 |
|
4df297129
|
23 24 25 26 27 |
/* * Reserve one entry for the passed in ip. This will allow * us to remove most or all of the stack size overhead * added by the stack tracer itself. */ |
bb99d8cce
|
28 |
struct stack_trace stack_trace_max = { |
4df297129
|
29 |
.max_entries = STACK_TRACE_ENTRIES - 1, |
72ac426a5
|
30 |
.entries = &stack_dump_trace[0], |
e5a81b629
|
31 |
}; |
bb99d8cce
|
32 |
unsigned long stack_trace_max_size; |
d332736df
|
33 |
arch_spinlock_t stack_trace_max_lock = |
edc35bd72
|
34 |
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
e5a81b629
|
35 |
|
8aaf1ee70
|
36 |
DEFINE_PER_CPU(int, disable_stack_tracer); |
f38f1d2aa
|
37 38 39 40 |
static DEFINE_MUTEX(stack_sysctl_mutex); int stack_tracer_enabled; static int last_stack_tracer_enabled; |
e5a81b629
|
41 |
|
bb99d8cce
|
42 |
void stack_trace_print(void) |
e31721819
|
43 44 45 46 47 48 49 50 |
{ long i; int size; pr_emerg(" Depth Size Location (%d entries) " " ----- ---- -------- ", |
bb99d8cce
|
51 |
stack_trace_max.nr_entries); |
e31721819
|
52 |
|
bb99d8cce
|
53 |
for (i = 0; i < stack_trace_max.nr_entries; i++) { |
e31721819
|
54 55 |
if (stack_dump_trace[i] == ULONG_MAX) break; |
bb99d8cce
|
56 |
if (i+1 == stack_trace_max.nr_entries || |
e31721819
|
57 |
stack_dump_trace[i+1] == ULONG_MAX) |
bb99d8cce
|
58 |
size = stack_trace_index[i]; |
e31721819
|
59 |
else |
bb99d8cce
|
60 |
size = stack_trace_index[i] - stack_trace_index[i+1]; |
e31721819
|
61 |
|
bb99d8cce
|
62 63 |
pr_emerg("%3ld) %8d %5d %pS ", i, stack_trace_index[i], |
e31721819
|
64 65 66 |
size, (void *)stack_dump_trace[i]); } } |
bb99d8cce
|
67 |
/* |
505d3085d
|
68 |
* When arch-specific code overrides this function, the following |
d332736df
|
69 |
* data should be filled up, assuming stack_trace_max_lock is held to |
bb99d8cce
|
70 71 72 73 74 75 |
* prevent concurrent updates. * stack_trace_index[] * stack_trace_max * stack_trace_max_size */ void __weak |
d4ecbfc49
|
76 |
check_stack(unsigned long ip, unsigned long *stack) |
e5a81b629
|
77 |
{ |
e31721819
|
78 |
unsigned long this_size, flags; unsigned long *p, *top, *start; |
4df297129
|
79 80 |
static int tracer_frame; int frame_size = ACCESS_ONCE(tracer_frame); |
72ac426a5
|
81 |
int i, x; |
e5a81b629
|
82 |
|
87889501d
|
83 |
this_size = ((unsigned long)stack) & (THREAD_SIZE-1); |
e5a81b629
|
84 |
this_size = THREAD_SIZE - this_size; |
4df297129
|
85 86 |
/* Remove the frame of the tracer */ this_size -= frame_size; |
e5a81b629
|
87 |
|
bb99d8cce
|
88 |
if (this_size <= stack_trace_max_size) |
e5a81b629
|
89 |
return; |
81520a1b0
|
90 |
/* we do not handle interrupt stacks yet */ |
87889501d
|
91 |
if (!object_is_on_stack(stack)) |
81520a1b0
|
92 |
return; |
1904be1b6
|
93 94 95 |
/* Can't do this from NMI context (can cause deadlocks) */ if (in_nmi()) return; |
a5e25883a
|
96 |
local_irq_save(flags); |
d332736df
|
97 |
arch_spin_lock(&stack_trace_max_lock); |
e5a81b629
|
98 |
|
4df297129
|
99 100 101 |
/* In case another CPU set the tracer_frame on us */ if (unlikely(!frame_size)) this_size -= tracer_frame; |
e5a81b629
|
102 |
/* a race could have already updated it */ |
bb99d8cce
|
103 |
if (this_size <= stack_trace_max_size) |
e5a81b629
|
104 |
goto out; |
bb99d8cce
|
105 |
stack_trace_max_size = this_size; |
e5a81b629
|
106 |
|
bb99d8cce
|
107 108 |
stack_trace_max.nr_entries = 0; stack_trace_max.skip = 3; |
e5a81b629
|
109 |
|
bb99d8cce
|
110 |
save_stack_trace(&stack_trace_max); |
e5a81b629
|
111 |
|
72ac426a5
|
112 |
/* Skip over the overhead of the stack tracer itself */ |
bb99d8cce
|
113 |
for (i = 0; i < stack_trace_max.nr_entries; i++) { |
72ac426a5
|
114 115 116 |
if (stack_dump_trace[i] == ip) break; } |
d4ecbfc49
|
117 118 |
/* |
6ccd83714
|
119 120 121 122 123 124 125 |
* Some archs may not have the passed in ip in the dump. * If that happens, we need to show everything. */ if (i == stack_trace_max.nr_entries) i = 0; /* |
1b6cced6e
|
126 127 |
* Now find where in the stack these are. */ |
72ac426a5
|
128 |
x = 0; |
87889501d
|
129 |
start = stack; |
1b6cced6e
|
130 131 132 133 134 135 136 137 138 139 |
top = (unsigned long *) (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE); /* * Loop through all the entries. One of the entries may * for some reason be missed on the stack, so we may * have to account for them. If they are all there, this * loop will only happen once. This code only takes place * on a new max, so it is far from a fast path. */ |
bb99d8cce
|
140 |
while (i < stack_trace_max.nr_entries) { |
0a37119d9
|
141 |
int found = 0; |
1b6cced6e
|
142 |
|
bb99d8cce
|
143 |
stack_trace_index[x] = this_size; |
1b6cced6e
|
144 |
p = start; |
bb99d8cce
|
145 |
for (; p < top && i < stack_trace_max.nr_entries; p++) { |
72ac426a5
|
146 147 |
if (stack_dump_trace[i] == ULONG_MAX) break; |
6e22c8366
|
148 149 150 151 152 |
/* * The READ_ONCE_NOCHECK is used to let KASAN know that * this is not a stack-out-of-bounds error. */ if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) { |
72ac426a5
|
153 |
stack_dump_trace[x] = stack_dump_trace[i++]; |
bb99d8cce
|
154 |
this_size = stack_trace_index[x++] = |
1b6cced6e
|
155 |
(top - p) * sizeof(unsigned long); |
0a37119d9
|
156 |
found = 1; |
1b6cced6e
|
157 158 |
/* Start the search from here */ start = p + 1; |
4df297129
|
159 160 161 162 163 164 165 |
/* * We do not want to show the overhead * of the stack tracer stack in the * max stack. If we haven't figured * out what that is, then figure it out * now. */ |
72ac426a5
|
166 |
if (unlikely(!tracer_frame)) { |
4df297129
|
167 168 |
tracer_frame = (p - stack) * sizeof(unsigned long); |
bb99d8cce
|
169 |
stack_trace_max_size -= tracer_frame; |
4df297129
|
170 |
} |
1b6cced6e
|
171 172 |
} } |
0a37119d9
|
173 174 |
if (!found) i++; |
1b6cced6e
|
175 |
} |
bb99d8cce
|
176 |
stack_trace_max.nr_entries = x; |
72ac426a5
|
177 178 |
for (; x < i; x++) stack_dump_trace[x] = ULONG_MAX; |
a70857e46
|
179 |
if (task_stack_end_corrupted(current)) { |
bb99d8cce
|
180 |
stack_trace_print(); |
e31721819
|
181 182 |
BUG(); } |
e5a81b629
|
183 |
out: |
d332736df
|
184 |
arch_spin_unlock(&stack_trace_max_lock); |
a5e25883a
|
185 |
local_irq_restore(flags); |
e5a81b629
|
186 187 188 |
} static void |
a1e2e31d1
|
189 190 |
stack_trace_call(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct pt_regs *pt_regs) |
e5a81b629
|
191 |
{ |
87889501d
|
192 |
unsigned long stack; |
e5a81b629
|
193 |
|
5168ae50a
|
194 |
preempt_disable_notrace(); |
e5a81b629
|
195 |
|
e5a81b629
|
196 |
/* no atomic needed, we only modify this variable by this cpu */ |
8aaf1ee70
|
197 198 |
__this_cpu_inc(disable_stack_tracer); if (__this_cpu_read(disable_stack_tracer) != 1) |
e5a81b629
|
199 |
goto out; |
72ac426a5
|
200 |
ip += MCOUNT_INSN_SIZE; |
4df297129
|
201 202 |
check_stack(ip, &stack); |
e5a81b629
|
203 204 |
out: |
8aaf1ee70
|
205 |
__this_cpu_dec(disable_stack_tracer); |
e5a81b629
|
206 |
/* prevent recursion in schedule */ |
5168ae50a
|
207 |
preempt_enable_notrace(); |
e5a81b629
|
208 209 210 211 212 |
} static struct ftrace_ops trace_ops __read_mostly = { .func = stack_trace_call, |
4740974a6
|
213 |
.flags = FTRACE_OPS_FL_RECURSION_SAFE, |
e5a81b629
|
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 |
}; static ssize_t stack_max_size_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { unsigned long *ptr = filp->private_data; char buf[64]; int r; r = snprintf(buf, sizeof(buf), "%ld ", *ptr); if (r > sizeof(buf)) r = sizeof(buf); return simple_read_from_buffer(ubuf, count, ppos, buf, r); } static ssize_t stack_max_size_write(struct file *filp, const char __user *ubuf, size_t count, loff_t *ppos) { long *ptr = filp->private_data; unsigned long val, flags; |
e5a81b629
|
237 |
int ret; |
22fe9b54d
|
238 239 |
ret = kstrtoul_from_user(ubuf, count, 10, &val); if (ret) |
e5a81b629
|
240 |
return ret; |
a5e25883a
|
241 |
local_irq_save(flags); |
4f48f8b7f
|
242 243 244 245 |
/* * In case we trace inside arch_spin_lock() or after (NMI), * we will cause circular lock, so we also need to increase |
8aaf1ee70
|
246 |
* the percpu disable_stack_tracer here. |
4f48f8b7f
|
247 |
*/ |
8aaf1ee70
|
248 |
__this_cpu_inc(disable_stack_tracer); |
4f48f8b7f
|
249 |
|
d332736df
|
250 |
arch_spin_lock(&stack_trace_max_lock); |
e5a81b629
|
251 |
*ptr = val; |
d332736df
|
252 |
arch_spin_unlock(&stack_trace_max_lock); |
4f48f8b7f
|
253 |
|
8aaf1ee70
|
254 |
__this_cpu_dec(disable_stack_tracer); |
a5e25883a
|
255 |
local_irq_restore(flags); |
e5a81b629
|
256 257 258 |
return count; } |
f38f1d2aa
|
259 |
static const struct file_operations stack_max_size_fops = { |
e5a81b629
|
260 261 262 |
.open = tracing_open_generic, .read = stack_max_size_read, .write = stack_max_size_write, |
6038f373a
|
263 |
.llseek = default_llseek, |
e5a81b629
|
264 265 266 |
}; static void * |
2fc5f0cff
|
267 |
__next(struct seq_file *m, loff_t *pos) |
e5a81b629
|
268 |
{ |
2fc5f0cff
|
269 |
long n = *pos - 1; |
e5a81b629
|
270 |
|
bb99d8cce
|
271 |
if (n > stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX) |
e5a81b629
|
272 |
return NULL; |
2fc5f0cff
|
273 |
m->private = (void *)n; |
1b6cced6e
|
274 |
return &m->private; |
e5a81b629
|
275 |
} |
2fc5f0cff
|
276 277 |
static void * t_next(struct seq_file *m, void *v, loff_t *pos) |
e5a81b629
|
278 |
{ |
2fc5f0cff
|
279 280 281 |
(*pos)++; return __next(m, pos); } |
e5a81b629
|
282 |
|
2fc5f0cff
|
283 284 |
static void *t_start(struct seq_file *m, loff_t *pos) { |
e5a81b629
|
285 |
local_irq_disable(); |
4f48f8b7f
|
286 |
|
8aaf1ee70
|
287 |
__this_cpu_inc(disable_stack_tracer); |
4f48f8b7f
|
288 |
|
d332736df
|
289 |
arch_spin_lock(&stack_trace_max_lock); |
e5a81b629
|
290 |
|
522a110b4
|
291 292 |
if (*pos == 0) return SEQ_START_TOKEN; |
2fc5f0cff
|
293 |
return __next(m, pos); |
e5a81b629
|
294 295 296 297 |
} static void t_stop(struct seq_file *m, void *p) { |
d332736df
|
298 |
arch_spin_unlock(&stack_trace_max_lock); |
4f48f8b7f
|
299 |
|
8aaf1ee70
|
300 |
__this_cpu_dec(disable_stack_tracer); |
4f48f8b7f
|
301 |
|
e5a81b629
|
302 303 |
local_irq_enable(); } |
962e3707d
|
304 |
static void trace_lookup_stack(struct seq_file *m, long i) |
e5a81b629
|
305 |
{ |
1b6cced6e
|
306 |
unsigned long addr = stack_dump_trace[i]; |
e5a81b629
|
307 |
|
962e3707d
|
308 309 |
seq_printf(m, "%pS ", (void *)addr); |
e5a81b629
|
310 |
} |
e447e1df2
|
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 |
static void print_disabled(struct seq_file *m) { seq_puts(m, "# " "# Stack tracer disabled " "# " "# To enable the stack tracer, either add 'stacktrace' to the " "# kernel command line " "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled' " "# "); } |
e5a81b629
|
328 329 |
static int t_show(struct seq_file *m, void *v) { |
522a110b4
|
330 |
long i; |
1b6cced6e
|
331 |
int size; |
522a110b4
|
332 |
if (v == SEQ_START_TOKEN) { |
eb1871f34
|
333 |
seq_printf(m, " Depth Size Location" |
1b6cced6e
|
334 335 |
" (%d entries) " |
eb1871f34
|
336 337 |
" ----- ---- -------- ", |
bb99d8cce
|
338 |
stack_trace_max.nr_entries); |
e447e1df2
|
339 |
|
bb99d8cce
|
340 |
if (!stack_tracer_enabled && !stack_trace_max_size) |
e447e1df2
|
341 |
print_disabled(m); |
1b6cced6e
|
342 343 |
return 0; } |
e5a81b629
|
344 |
|
522a110b4
|
345 |
i = *(long *)v; |
bb99d8cce
|
346 |
if (i >= stack_trace_max.nr_entries || |
1b6cced6e
|
347 |
stack_dump_trace[i] == ULONG_MAX) |
e5a81b629
|
348 |
return 0; |
bb99d8cce
|
349 |
if (i+1 == stack_trace_max.nr_entries || |
1b6cced6e
|
350 |
stack_dump_trace[i+1] == ULONG_MAX) |
bb99d8cce
|
351 |
size = stack_trace_index[i]; |
1b6cced6e
|
352 |
else |
bb99d8cce
|
353 |
size = stack_trace_index[i] - stack_trace_index[i+1]; |
1b6cced6e
|
354 |
|
bb99d8cce
|
355 |
seq_printf(m, "%3ld) %8d %5d ", i, stack_trace_index[i], size); |
1b6cced6e
|
356 357 |
trace_lookup_stack(m, i); |
e5a81b629
|
358 359 360 |
return 0; } |
f38f1d2aa
|
361 |
static const struct seq_operations stack_trace_seq_ops = { |
e5a81b629
|
362 363 364 365 366 367 368 369 |
.start = t_start, .next = t_next, .stop = t_stop, .show = t_show, }; static int stack_trace_open(struct inode *inode, struct file *file) { |
d8cc1ab79
|
370 |
return seq_open(file, &stack_trace_seq_ops); |
e5a81b629
|
371 |
} |
f38f1d2aa
|
372 |
static const struct file_operations stack_trace_fops = { |
e5a81b629
|
373 374 375 |
.open = stack_trace_open, .read = seq_read, .llseek = seq_lseek, |
d8cc1ab79
|
376 |
.release = seq_release, |
e5a81b629
|
377 |
}; |
bbd1d27d8
|
378 |
#ifdef CONFIG_DYNAMIC_FTRACE |
d2d45c7a0
|
379 380 381 |
static int stack_trace_filter_open(struct inode *inode, struct file *file) { |
0f1797656
|
382 383 384 |
struct ftrace_ops *ops = inode->i_private; return ftrace_regex_open(ops, FTRACE_ITER_FILTER, |
d2d45c7a0
|
385 386 387 388 389 390 391 |
inode, file); } static const struct file_operations stack_trace_filter_fops = { .open = stack_trace_filter_open, .read = seq_read, .write = ftrace_filter_write, |
098c879e1
|
392 |
.llseek = tracing_lseek, |
d2d45c7a0
|
393 394 |
.release = ftrace_regex_release, }; |
bbd1d27d8
|
395 |
#endif /* CONFIG_DYNAMIC_FTRACE */ |
f38f1d2aa
|
396 397 |
int stack_trace_sysctl(struct ctl_table *table, int write, |
8d65af789
|
398 |
void __user *buffer, size_t *lenp, |
f38f1d2aa
|
399 400 401 402 403 |
loff_t *ppos) { int ret; mutex_lock(&stack_sysctl_mutex); |
8d65af789
|
404 |
ret = proc_dointvec(table, write, buffer, lenp, ppos); |
f38f1d2aa
|
405 406 |
if (ret || !write || |
a32c7765e
|
407 |
(last_stack_tracer_enabled == !!stack_tracer_enabled)) |
f38f1d2aa
|
408 |
goto out; |
a32c7765e
|
409 |
last_stack_tracer_enabled = !!stack_tracer_enabled; |
f38f1d2aa
|
410 411 412 413 414 415 416 417 418 419 |
if (stack_tracer_enabled) register_ftrace_function(&trace_ops); else unregister_ftrace_function(&trace_ops); out: mutex_unlock(&stack_sysctl_mutex); return ret; } |
762e12078
|
420 |
static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata; |
f38f1d2aa
|
421 422 |
static __init int enable_stacktrace(char *str) { |
762e12078
|
423 424 |
if (strncmp(str, "_filter=", 8) == 0) strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE); |
e05a43b74
|
425 426 |
stack_tracer_enabled = 1; last_stack_tracer_enabled = 1; |
f38f1d2aa
|
427 428 429 |
return 1; } __setup("stacktrace", enable_stacktrace); |
e5a81b629
|
430 431 432 |
static __init int stack_trace_init(void) { struct dentry *d_tracer; |
e5a81b629
|
433 434 |
d_tracer = tracing_init_dentry(); |
14a5ae40f
|
435 |
if (IS_ERR(d_tracer)) |
ed6f1c996
|
436 |
return 0; |
e5a81b629
|
437 |
|
5452af664
|
438 |
trace_create_file("stack_max_size", 0644, d_tracer, |
bb99d8cce
|
439 |
&stack_trace_max_size, &stack_max_size_fops); |
e5a81b629
|
440 |
|
5452af664
|
441 442 |
trace_create_file("stack_trace", 0444, d_tracer, NULL, &stack_trace_fops); |
e5a81b629
|
443 |
|
bbd1d27d8
|
444 |
#ifdef CONFIG_DYNAMIC_FTRACE |
d2d45c7a0
|
445 |
trace_create_file("stack_trace_filter", 0444, d_tracer, |
0f1797656
|
446 |
&trace_ops, &stack_trace_filter_fops); |
bbd1d27d8
|
447 |
#endif |
d2d45c7a0
|
448 |
|
762e12078
|
449 450 |
if (stack_trace_filter_buf[0]) ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1); |
e05a43b74
|
451 |
if (stack_tracer_enabled) |
f38f1d2aa
|
452 |
register_ftrace_function(&trace_ops); |
e5a81b629
|
453 454 455 456 457 |
return 0; } device_initcall(stack_trace_init); |