Blame view
kernel/trace/trace_stack.c
7.61 KB
e5a81b629 ftrace: add stack... |
1 2 3 4 5 6 7 8 9 10 11 12 |
/* * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> * */ #include <linux/stacktrace.h> #include <linux/kallsyms.h> #include <linux/seq_file.h> #include <linux/spinlock.h> #include <linux/uaccess.h> #include <linux/debugfs.h> #include <linux/ftrace.h> #include <linux/module.h> |
f38f1d2aa trace: add a way ... |
13 |
#include <linux/sysctl.h> |
e5a81b629 ftrace: add stack... |
14 15 16 17 18 |
#include <linux/init.h> #include <linux/fs.h> #include "trace.h" #define STACK_TRACE_ENTRIES 500 |
1b6cced6e ftrace: stack tra... |
19 20 21 |
static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] = { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX }; static unsigned stack_dump_index[STACK_TRACE_ENTRIES]; |
e5a81b629 ftrace: add stack... |
22 23 24 25 26 27 |
static struct stack_trace max_stack_trace = { .max_entries = STACK_TRACE_ENTRIES, .entries = stack_dump_trace, }; static unsigned long max_stack_size; |
445c89514 locking: Convert ... |
28 |
static arch_spinlock_t max_stack_lock = |
edc35bd72 locking: Rename _... |
29 |
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
e5a81b629 ftrace: add stack... |
30 31 32 |
static int stack_trace_disabled __read_mostly; static DEFINE_PER_CPU(int, trace_active); |
f38f1d2aa trace: add a way ... |
33 34 35 36 |
static DEFINE_MUTEX(stack_sysctl_mutex); int stack_tracer_enabled; static int last_stack_tracer_enabled; |
e5a81b629 ftrace: add stack... |
37 38 39 |
static inline void check_stack(void) { |
1b6cced6e ftrace: stack tra... |
40 41 42 |
unsigned long this_size, flags; unsigned long *p, *top, *start; int i; |
e5a81b629 ftrace: add stack... |
43 44 45 46 47 48 |
this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1); this_size = THREAD_SIZE - this_size; if (this_size <= max_stack_size) return; |
81520a1b0 ftrace: stack tra... |
49 50 51 |
/* we do not handle interrupt stacks yet */ if (!object_is_on_stack(&this_size)) return; |
a5e25883a ftrace: replace r... |
52 |
local_irq_save(flags); |
0199c4e68 locking: Convert ... |
53 |
arch_spin_lock(&max_stack_lock); |
e5a81b629 ftrace: add stack... |
54 55 56 57 58 59 60 61 |
/* a race could have already updated it */ if (this_size <= max_stack_size) goto out; max_stack_size = this_size; max_stack_trace.nr_entries = 0; |
1b6cced6e ftrace: stack tra... |
62 |
max_stack_trace.skip = 3; |
e5a81b629 ftrace: add stack... |
63 64 |
save_stack_trace(&max_stack_trace); |
1b6cced6e ftrace: stack tra... |
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
/* * Now find where in the stack these are. */ i = 0; start = &this_size; top = (unsigned long *) (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE); /* * Loop through all the entries. One of the entries may * for some reason be missed on the stack, so we may * have to account for them. If they are all there, this * loop will only happen once. This code only takes place * on a new max, so it is far from a fast path. */ while (i < max_stack_trace.nr_entries) { |
0a37119d9 trace: fix output... |
81 |
int found = 0; |
1b6cced6e ftrace: stack tra... |
82 83 84 85 86 87 88 89 |
stack_dump_index[i] = this_size; p = start; for (; p < top && i < max_stack_trace.nr_entries; p++) { if (*p == stack_dump_trace[i]) { this_size = stack_dump_index[i++] = (top - p) * sizeof(unsigned long); |
0a37119d9 trace: fix output... |
90 |
found = 1; |
1b6cced6e ftrace: stack tra... |
91 92 93 94 |
/* Start the search from here */ start = p + 1; } } |
0a37119d9 trace: fix output... |
95 96 |
if (!found) i++; |
1b6cced6e ftrace: stack tra... |
97 |
} |
e5a81b629 ftrace: add stack... |
98 |
out: |
0199c4e68 locking: Convert ... |
99 |
arch_spin_unlock(&max_stack_lock); |
a5e25883a ftrace: replace r... |
100 |
local_irq_restore(flags); |
e5a81b629 ftrace: add stack... |
101 102 103 104 105 |
} static void stack_trace_call(unsigned long ip, unsigned long parent_ip) { |
5168ae50a tracing: Remove f... |
106 |
int cpu; |
e5a81b629 ftrace: add stack... |
107 108 109 |
if (unlikely(!ftrace_enabled || stack_trace_disabled)) return; |
5168ae50a tracing: Remove f... |
110 |
preempt_disable_notrace(); |
e5a81b629 ftrace: add stack... |
111 112 113 114 115 116 117 118 119 120 121 |
cpu = raw_smp_processor_id(); /* no atomic needed, we only modify this variable by this cpu */ if (per_cpu(trace_active, cpu)++ != 0) goto out; check_stack(); out: per_cpu(trace_active, cpu)--; /* prevent recursion in schedule */ |
5168ae50a tracing: Remove f... |
122 |
preempt_enable_notrace(); |
e5a81b629 ftrace: add stack... |
123 124 125 126 127 |
} static struct ftrace_ops trace_ops __read_mostly = { .func = stack_trace_call, |
b848914ce ftrace: Implement... |
128 |
.flags = FTRACE_OPS_FL_GLOBAL, |
e5a81b629 ftrace: add stack... |
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
}; static ssize_t stack_max_size_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { unsigned long *ptr = filp->private_data; char buf[64]; int r; r = snprintf(buf, sizeof(buf), "%ld ", *ptr); if (r > sizeof(buf)) r = sizeof(buf); return simple_read_from_buffer(ubuf, count, ppos, buf, r); } static ssize_t stack_max_size_write(struct file *filp, const char __user *ubuf, size_t count, loff_t *ppos) { long *ptr = filp->private_data; unsigned long val, flags; |
e5a81b629 ftrace: add stack... |
152 |
int ret; |
4f48f8b7f tracing: Fix circ... |
153 |
int cpu; |
e5a81b629 ftrace: add stack... |
154 |
|
22fe9b54d tracing: Convert ... |
155 156 |
ret = kstrtoul_from_user(ubuf, count, 10, &val); if (ret) |
e5a81b629 ftrace: add stack... |
157 |
return ret; |
a5e25883a ftrace: replace r... |
158 |
local_irq_save(flags); |
4f48f8b7f tracing: Fix circ... |
159 160 161 162 163 164 165 166 |
/* * In case we trace inside arch_spin_lock() or after (NMI), * we will cause circular lock, so we also need to increase * the percpu trace_active here. */ cpu = smp_processor_id(); per_cpu(trace_active, cpu)++; |
0199c4e68 locking: Convert ... |
167 |
arch_spin_lock(&max_stack_lock); |
e5a81b629 ftrace: add stack... |
168 |
*ptr = val; |
0199c4e68 locking: Convert ... |
169 |
arch_spin_unlock(&max_stack_lock); |
4f48f8b7f tracing: Fix circ... |
170 171 |
per_cpu(trace_active, cpu)--; |
a5e25883a ftrace: replace r... |
172 |
local_irq_restore(flags); |
e5a81b629 ftrace: add stack... |
173 174 175 |
return count; } |
f38f1d2aa trace: add a way ... |
176 |
static const struct file_operations stack_max_size_fops = { |
e5a81b629 ftrace: add stack... |
177 178 179 |
.open = tracing_open_generic, .read = stack_max_size_read, .write = stack_max_size_write, |
6038f373a llseek: automatic... |
180 |
.llseek = default_llseek, |
e5a81b629 ftrace: add stack... |
181 182 183 |
}; static void * |
2fc5f0cff trace_stack: Simp... |
184 |
__next(struct seq_file *m, loff_t *pos) |
e5a81b629 ftrace: add stack... |
185 |
{ |
2fc5f0cff trace_stack: Simp... |
186 |
long n = *pos - 1; |
e5a81b629 ftrace: add stack... |
187 |
|
2fc5f0cff trace_stack: Simp... |
188 |
if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX) |
e5a81b629 ftrace: add stack... |
189 |
return NULL; |
2fc5f0cff trace_stack: Simp... |
190 |
m->private = (void *)n; |
1b6cced6e ftrace: stack tra... |
191 |
return &m->private; |
e5a81b629 ftrace: add stack... |
192 |
} |
2fc5f0cff trace_stack: Simp... |
193 194 |
static void * t_next(struct seq_file *m, void *v, loff_t *pos) |
e5a81b629 ftrace: add stack... |
195 |
{ |
2fc5f0cff trace_stack: Simp... |
196 197 198 |
(*pos)++; return __next(m, pos); } |
e5a81b629 ftrace: add stack... |
199 |
|
2fc5f0cff trace_stack: Simp... |
200 201 |
static void *t_start(struct seq_file *m, loff_t *pos) { |
4f48f8b7f tracing: Fix circ... |
202 |
int cpu; |
e5a81b629 ftrace: add stack... |
203 |
local_irq_disable(); |
4f48f8b7f tracing: Fix circ... |
204 205 206 |
cpu = smp_processor_id(); per_cpu(trace_active, cpu)++; |
0199c4e68 locking: Convert ... |
207 |
arch_spin_lock(&max_stack_lock); |
e5a81b629 ftrace: add stack... |
208 |
|
522a110b4 function tracing:... |
209 210 |
if (*pos == 0) return SEQ_START_TOKEN; |
2fc5f0cff trace_stack: Simp... |
211 |
return __next(m, pos); |
e5a81b629 ftrace: add stack... |
212 213 214 215 |
} static void t_stop(struct seq_file *m, void *p) { |
4f48f8b7f tracing: Fix circ... |
216 |
int cpu; |
0199c4e68 locking: Convert ... |
217 |
arch_spin_unlock(&max_stack_lock); |
4f48f8b7f tracing: Fix circ... |
218 219 220 |
cpu = smp_processor_id(); per_cpu(trace_active, cpu)--; |
e5a81b629 ftrace: add stack... |
221 222 |
local_irq_enable(); } |
1b6cced6e ftrace: stack tra... |
223 |
static int trace_lookup_stack(struct seq_file *m, long i) |
e5a81b629 ftrace: add stack... |
224 |
{ |
1b6cced6e ftrace: stack tra... |
225 |
unsigned long addr = stack_dump_trace[i]; |
e5a81b629 ftrace: add stack... |
226 |
|
151772dbf tracing/trace_sta... |
227 228 |
return seq_printf(m, "%pS ", (void *)addr); |
e5a81b629 ftrace: add stack... |
229 |
} |
e447e1df2 tracing: explain ... |
230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 |
static void print_disabled(struct seq_file *m) { seq_puts(m, "# " "# Stack tracer disabled " "# " "# To enable the stack tracer, either add 'stacktrace' to the " "# kernel command line " "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled' " "# "); } |
e5a81b629 ftrace: add stack... |
247 248 |
static int t_show(struct seq_file *m, void *v) { |
522a110b4 function tracing:... |
249 |
long i; |
1b6cced6e ftrace: stack tra... |
250 |
int size; |
522a110b4 function tracing:... |
251 |
if (v == SEQ_START_TOKEN) { |
eb1871f34 tracing: left ali... |
252 |
seq_printf(m, " Depth Size Location" |
1b6cced6e ftrace: stack tra... |
253 254 |
" (%d entries) " |
eb1871f34 tracing: left ali... |
255 256 |
" ----- ---- -------- ", |
083a63b48 tracing/trace_sta... |
257 |
max_stack_trace.nr_entries - 1); |
e447e1df2 tracing: explain ... |
258 259 260 |
if (!stack_tracer_enabled && !max_stack_size) print_disabled(m); |
1b6cced6e ftrace: stack tra... |
261 262 |
return 0; } |
e5a81b629 ftrace: add stack... |
263 |
|
522a110b4 function tracing:... |
264 |
i = *(long *)v; |
1b6cced6e ftrace: stack tra... |
265 266 |
if (i >= max_stack_trace.nr_entries || stack_dump_trace[i] == ULONG_MAX) |
e5a81b629 ftrace: add stack... |
267 |
return 0; |
1b6cced6e ftrace: stack tra... |
268 269 270 271 272 273 274 275 276 |
if (i+1 == max_stack_trace.nr_entries || stack_dump_trace[i+1] == ULONG_MAX) size = stack_dump_index[i]; else size = stack_dump_index[i] - stack_dump_index[i+1]; seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size); trace_lookup_stack(m, i); |
e5a81b629 ftrace: add stack... |
277 278 279 |
return 0; } |
f38f1d2aa trace: add a way ... |
280 |
static const struct seq_operations stack_trace_seq_ops = { |
e5a81b629 ftrace: add stack... |
281 282 283 284 285 286 287 288 |
.start = t_start, .next = t_next, .stop = t_stop, .show = t_show, }; static int stack_trace_open(struct inode *inode, struct file *file) { |
d8cc1ab79 trace_stack: Fix ... |
289 |
return seq_open(file, &stack_trace_seq_ops); |
e5a81b629 ftrace: add stack... |
290 |
} |
f38f1d2aa trace: add a way ... |
291 |
static const struct file_operations stack_trace_fops = { |
e5a81b629 ftrace: add stack... |
292 293 294 |
.open = stack_trace_open, .read = seq_read, .llseek = seq_lseek, |
d8cc1ab79 trace_stack: Fix ... |
295 |
.release = seq_release, |
e5a81b629 ftrace: add stack... |
296 |
}; |
f38f1d2aa trace: add a way ... |
297 298 |
int stack_trace_sysctl(struct ctl_table *table, int write, |
8d65af789 sysctl: remove "s... |
299 |
void __user *buffer, size_t *lenp, |
f38f1d2aa trace: add a way ... |
300 301 302 303 304 |
loff_t *ppos) { int ret; mutex_lock(&stack_sysctl_mutex); |
8d65af789 sysctl: remove "s... |
305 |
ret = proc_dointvec(table, write, buffer, lenp, ppos); |
f38f1d2aa trace: add a way ... |
306 307 |
if (ret || !write || |
a32c7765e tracing: Fix stac... |
308 |
(last_stack_tracer_enabled == !!stack_tracer_enabled)) |
f38f1d2aa trace: add a way ... |
309 |
goto out; |
a32c7765e tracing: Fix stac... |
310 |
last_stack_tracer_enabled = !!stack_tracer_enabled; |
f38f1d2aa trace: add a way ... |
311 312 313 314 315 316 317 318 319 320 |
if (stack_tracer_enabled) register_ftrace_function(&trace_ops); else unregister_ftrace_function(&trace_ops); out: mutex_unlock(&stack_sysctl_mutex); return ret; } |
f38f1d2aa trace: add a way ... |
321 322 |
static __init int enable_stacktrace(char *str) { |
e05a43b74 trace: better use... |
323 324 |
stack_tracer_enabled = 1; last_stack_tracer_enabled = 1; |
f38f1d2aa trace: add a way ... |
325 326 327 |
return 1; } __setup("stacktrace", enable_stacktrace); |
e5a81b629 ftrace: add stack... |
328 329 330 |
static __init int stack_trace_init(void) { struct dentry *d_tracer; |
e5a81b629 ftrace: add stack... |
331 332 |
d_tracer = tracing_init_dentry(); |
5452af664 tracing/ftrace: f... |
333 334 |
trace_create_file("stack_max_size", 0644, d_tracer, &max_stack_size, &stack_max_size_fops); |
e5a81b629 ftrace: add stack... |
335 |
|
5452af664 tracing/ftrace: f... |
336 337 |
trace_create_file("stack_trace", 0444, d_tracer, NULL, &stack_trace_fops); |
e5a81b629 ftrace: add stack... |
338 |
|
e05a43b74 trace: better use... |
339 |
if (stack_tracer_enabled) |
f38f1d2aa trace: add a way ... |
340 |
register_ftrace_function(&trace_ops); |
e5a81b629 ftrace: add stack... |
341 342 343 344 345 |
return 0; } device_initcall(stack_trace_init); |