Blame view

kernel/trace/trace_stack.c 10.9 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
e5a81b629   Steven Rostedt   ftrace: add stack...
2
3
4
5
  /*
   * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
   *
   */
68db0cf10   Ingo Molnar   sched/headers: Pr...
6
  #include <linux/sched/task_stack.h>
e5a81b629   Steven Rostedt   ftrace: add stack...
7
8
9
10
11
  #include <linux/stacktrace.h>
  #include <linux/kallsyms.h>
  #include <linux/seq_file.h>
  #include <linux/spinlock.h>
  #include <linux/uaccess.h>
e5a81b629   Steven Rostedt   ftrace: add stack...
12
13
  #include <linux/ftrace.h>
  #include <linux/module.h>
f38f1d2aa   Steven Rostedt   trace: add a way ...
14
  #include <linux/sysctl.h>
e5a81b629   Steven Rostedt   ftrace: add stack...
15
  #include <linux/init.h>
762e12078   Steven Rostedt   tracing: Have sta...
16
17
  
  #include <asm/setup.h>
e5a81b629   Steven Rostedt   ftrace: add stack...
18
  #include "trace.h"
1b6cced6e   Steven Rostedt   ftrace: stack tra...
19
20
  static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
  	 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
21
  unsigned stack_trace_index[STACK_TRACE_ENTRIES];
1b6cced6e   Steven Rostedt   ftrace: stack tra...
22

4df297129   Steven Rostedt (Red Hat)   tracing: Remove m...
23
24
25
26
27
  /*
   * Reserve one entry for the passed in ip. This will allow
   * us to remove most or all of the stack size overhead
   * added by the stack tracer itself.
   */
bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
28
  struct stack_trace stack_trace_max = {
4df297129   Steven Rostedt (Red Hat)   tracing: Remove m...
29
  	.max_entries		= STACK_TRACE_ENTRIES - 1,
72ac426a5   Steven Rostedt (Red Hat)   tracing: Clean up...
30
  	.entries		= &stack_dump_trace[0],
e5a81b629   Steven Rostedt   ftrace: add stack...
31
  };
bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
32
  unsigned long stack_trace_max_size;
d332736df   Steven Rostedt (Red Hat)   tracing: Rename m...
33
  arch_spinlock_t stack_trace_max_lock =
edc35bd72   Thomas Gleixner   locking: Rename _...
34
  	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
e5a81b629   Steven Rostedt   ftrace: add stack...
35

8aaf1ee70   Steven Rostedt (VMware)   tracing: Rename t...
36
  DEFINE_PER_CPU(int, disable_stack_tracer);
f38f1d2aa   Steven Rostedt   trace: add a way ...
37
38
39
40
  static DEFINE_MUTEX(stack_sysctl_mutex);
  
  int stack_tracer_enabled;
  static int last_stack_tracer_enabled;
e5a81b629   Steven Rostedt   ftrace: add stack...
41

bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
42
  void stack_trace_print(void)
e31721819   Minchan Kim   tracing: Print ma...
43
44
45
46
47
48
49
50
  {
  	long i;
  	int size;
  
  	pr_emerg("        Depth    Size   Location    (%d entries)
  "
  			   "        -----    ----   --------
  ",
bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
51
  			   stack_trace_max.nr_entries);
e31721819   Minchan Kim   tracing: Print ma...
52

bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
53
  	for (i = 0; i < stack_trace_max.nr_entries; i++) {
e31721819   Minchan Kim   tracing: Print ma...
54
55
  		if (stack_dump_trace[i] == ULONG_MAX)
  			break;
bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
56
  		if (i+1 == stack_trace_max.nr_entries ||
e31721819   Minchan Kim   tracing: Print ma...
57
  				stack_dump_trace[i+1] == ULONG_MAX)
bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
58
  			size = stack_trace_index[i];
e31721819   Minchan Kim   tracing: Print ma...
59
  		else
bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
60
  			size = stack_trace_index[i] - stack_trace_index[i+1];
e31721819   Minchan Kim   tracing: Print ma...
61

bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
62
63
  		pr_emerg("%3ld) %8d   %5d   %pS
  ", i, stack_trace_index[i],
e31721819   Minchan Kim   tracing: Print ma...
64
65
66
  				size, (void *)stack_dump_trace[i]);
  	}
  }
bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
67
  /*
505d3085d   Masahiro Yamada   scripts/spelling....
68
   * When arch-specific code overrides this function, the following
d332736df   Steven Rostedt (Red Hat)   tracing: Rename m...
69
   * data should be filled up, assuming stack_trace_max_lock is held to
bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
70
71
72
73
74
75
   * prevent concurrent updates.
   *     stack_trace_index[]
   *     stack_trace_max
   *     stack_trace_max_size
   */
  void __weak
d4ecbfc49   Steven Rostedt (Red Hat)   tracing: Fix stac...
76
  check_stack(unsigned long ip, unsigned long *stack)
e5a81b629   Steven Rostedt   ftrace: add stack...
77
  {
e31721819   Minchan Kim   tracing: Print ma...
78
  	unsigned long this_size, flags; unsigned long *p, *top, *start;
4df297129   Steven Rostedt (Red Hat)   tracing: Remove m...
79
80
  	static int tracer_frame;
  	int frame_size = ACCESS_ONCE(tracer_frame);
72ac426a5   Steven Rostedt (Red Hat)   tracing: Clean up...
81
  	int i, x;
e5a81b629   Steven Rostedt   ftrace: add stack...
82

87889501d   Steven Rostedt (Red Hat)   tracing: Use stac...
83
  	this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
e5a81b629   Steven Rostedt   ftrace: add stack...
84
  	this_size = THREAD_SIZE - this_size;
4df297129   Steven Rostedt (Red Hat)   tracing: Remove m...
85
86
  	/* Remove the frame of the tracer */
  	this_size -= frame_size;
e5a81b629   Steven Rostedt   ftrace: add stack...
87

bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
88
  	if (this_size <= stack_trace_max_size)
e5a81b629   Steven Rostedt   ftrace: add stack...
89
  		return;
81520a1b0   Steven Rostedt   ftrace: stack tra...
90
  	/* we do not handle interrupt stacks yet */
87889501d   Steven Rostedt (Red Hat)   tracing: Use stac...
91
  	if (!object_is_on_stack(stack))
81520a1b0   Steven Rostedt   ftrace: stack tra...
92
  		return;
1904be1b6   Steven Rostedt (Red Hat)   tracing: Do not a...
93
94
95
  	/* Can't do this from NMI context (can cause deadlocks) */
  	if (in_nmi())
  		return;
a5e25883a   Steven Rostedt   ftrace: replace r...
96
  	local_irq_save(flags);
d332736df   Steven Rostedt (Red Hat)   tracing: Rename m...
97
  	arch_spin_lock(&stack_trace_max_lock);
e5a81b629   Steven Rostedt   ftrace: add stack...
98

4df297129   Steven Rostedt (Red Hat)   tracing: Remove m...
99
100
101
  	/* In case another CPU set the tracer_frame on us */
  	if (unlikely(!frame_size))
  		this_size -= tracer_frame;
e5a81b629   Steven Rostedt   ftrace: add stack...
102
  	/* a race could have already updated it */
bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
103
  	if (this_size <= stack_trace_max_size)
e5a81b629   Steven Rostedt   ftrace: add stack...
104
  		goto out;
bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
105
  	stack_trace_max_size = this_size;
e5a81b629   Steven Rostedt   ftrace: add stack...
106

bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
107
108
  	stack_trace_max.nr_entries = 0;
  	stack_trace_max.skip = 3;
e5a81b629   Steven Rostedt   ftrace: add stack...
109

bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
110
  	save_stack_trace(&stack_trace_max);
e5a81b629   Steven Rostedt   ftrace: add stack...
111

72ac426a5   Steven Rostedt (Red Hat)   tracing: Clean up...
112
  	/* Skip over the overhead of the stack tracer itself */
bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
113
  	for (i = 0; i < stack_trace_max.nr_entries; i++) {
72ac426a5   Steven Rostedt (Red Hat)   tracing: Clean up...
114
115
116
  		if (stack_dump_trace[i] == ip)
  			break;
  	}
d4ecbfc49   Steven Rostedt (Red Hat)   tracing: Fix stac...
117
118
  
  	/*
6ccd83714   Steven Rostedt   tracing/stacktrac...
119
120
121
122
123
124
125
  	 * Some archs may not have the passed in ip in the dump.
  	 * If that happens, we need to show everything.
  	 */
  	if (i == stack_trace_max.nr_entries)
  		i = 0;
  
  	/*
1b6cced6e   Steven Rostedt   ftrace: stack tra...
126
127
  	 * Now find where in the stack these are.
  	 */
72ac426a5   Steven Rostedt (Red Hat)   tracing: Clean up...
128
  	x = 0;
87889501d   Steven Rostedt (Red Hat)   tracing: Use stac...
129
  	start = stack;
1b6cced6e   Steven Rostedt   ftrace: stack tra...
130
131
132
133
134
135
136
137
138
139
  	top = (unsigned long *)
  		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
  
  	/*
  	 * Loop through all the entries. One of the entries may
  	 * for some reason be missed on the stack, so we may
  	 * have to account for them. If they are all there, this
  	 * loop will only happen once. This code only takes place
  	 * on a new max, so it is far from a fast path.
  	 */
bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
140
  	while (i < stack_trace_max.nr_entries) {
0a37119d9   Steven Rostedt   trace: fix output...
141
  		int found = 0;
1b6cced6e   Steven Rostedt   ftrace: stack tra...
142

bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
143
  		stack_trace_index[x] = this_size;
1b6cced6e   Steven Rostedt   ftrace: stack tra...
144
  		p = start;
bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
145
  		for (; p < top && i < stack_trace_max.nr_entries; p++) {
72ac426a5   Steven Rostedt (Red Hat)   tracing: Clean up...
146
147
  			if (stack_dump_trace[i] == ULONG_MAX)
  				break;
6e22c8366   Yang Shi   tracing, kasan: S...
148
149
150
151
152
  			/*
  			 * The READ_ONCE_NOCHECK is used to let KASAN know that
  			 * this is not a stack-out-of-bounds error.
  			 */
  			if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) {
72ac426a5   Steven Rostedt (Red Hat)   tracing: Clean up...
153
  				stack_dump_trace[x] = stack_dump_trace[i++];
bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
154
  				this_size = stack_trace_index[x++] =
1b6cced6e   Steven Rostedt   ftrace: stack tra...
155
  					(top - p) * sizeof(unsigned long);
0a37119d9   Steven Rostedt   trace: fix output...
156
  				found = 1;
1b6cced6e   Steven Rostedt   ftrace: stack tra...
157
158
  				/* Start the search from here */
  				start = p + 1;
4df297129   Steven Rostedt (Red Hat)   tracing: Remove m...
159
160
161
162
163
164
165
  				/*
  				 * We do not want to show the overhead
  				 * of the stack tracer stack in the
  				 * max stack. If we haven't figured
  				 * out what that is, then figure it out
  				 * now.
  				 */
72ac426a5   Steven Rostedt (Red Hat)   tracing: Clean up...
166
  				if (unlikely(!tracer_frame)) {
4df297129   Steven Rostedt (Red Hat)   tracing: Remove m...
167
168
  					tracer_frame = (p - stack) *
  						sizeof(unsigned long);
bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
169
  					stack_trace_max_size -= tracer_frame;
4df297129   Steven Rostedt (Red Hat)   tracing: Remove m...
170
  				}
1b6cced6e   Steven Rostedt   ftrace: stack tra...
171
172
  			}
  		}
0a37119d9   Steven Rostedt   trace: fix output...
173
174
  		if (!found)
  			i++;
1b6cced6e   Steven Rostedt   ftrace: stack tra...
175
  	}
bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
176
  	stack_trace_max.nr_entries = x;
72ac426a5   Steven Rostedt (Red Hat)   tracing: Clean up...
177
178
  	for (; x < i; x++)
  		stack_dump_trace[x] = ULONG_MAX;
a70857e46   Aaron Tomlin   sched: Add helper...
179
  	if (task_stack_end_corrupted(current)) {
bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
180
  		stack_trace_print();
e31721819   Minchan Kim   tracing: Print ma...
181
182
  		BUG();
  	}
e5a81b629   Steven Rostedt   ftrace: add stack...
183
   out:
d332736df   Steven Rostedt (Red Hat)   tracing: Rename m...
184
  	arch_spin_unlock(&stack_trace_max_lock);
a5e25883a   Steven Rostedt   ftrace: replace r...
185
  	local_irq_restore(flags);
e5a81b629   Steven Rostedt   ftrace: add stack...
186
187
188
  }
  
  static void
a1e2e31d1   Steven Rostedt   ftrace: Return pt...
189
190
  stack_trace_call(unsigned long ip, unsigned long parent_ip,
  		 struct ftrace_ops *op, struct pt_regs *pt_regs)
e5a81b629   Steven Rostedt   ftrace: add stack...
191
  {
87889501d   Steven Rostedt (Red Hat)   tracing: Use stac...
192
  	unsigned long stack;
e5a81b629   Steven Rostedt   ftrace: add stack...
193

5168ae50a   Steven Rostedt   tracing: Remove f...
194
  	preempt_disable_notrace();
e5a81b629   Steven Rostedt   ftrace: add stack...
195

e5a81b629   Steven Rostedt   ftrace: add stack...
196
  	/* no atomic needed, we only modify this variable by this cpu */
8aaf1ee70   Steven Rostedt (VMware)   tracing: Rename t...
197
198
  	__this_cpu_inc(disable_stack_tracer);
  	if (__this_cpu_read(disable_stack_tracer) != 1)
e5a81b629   Steven Rostedt   ftrace: add stack...
199
  		goto out;
72ac426a5   Steven Rostedt (Red Hat)   tracing: Clean up...
200
  	ip += MCOUNT_INSN_SIZE;
4df297129   Steven Rostedt (Red Hat)   tracing: Remove m...
201
202
  
  	check_stack(ip, &stack);
e5a81b629   Steven Rostedt   ftrace: add stack...
203
204
  
   out:
8aaf1ee70   Steven Rostedt (VMware)   tracing: Rename t...
205
  	__this_cpu_dec(disable_stack_tracer);
e5a81b629   Steven Rostedt   ftrace: add stack...
206
  	/* prevent recursion in schedule */
5168ae50a   Steven Rostedt   tracing: Remove f...
207
  	preempt_enable_notrace();
e5a81b629   Steven Rostedt   ftrace: add stack...
208
209
210
211
212
  }
  
  static struct ftrace_ops trace_ops __read_mostly =
  {
  	.func = stack_trace_call,
4740974a6   Steven Rostedt   ftrace: Add defau...
213
  	.flags = FTRACE_OPS_FL_RECURSION_SAFE,
e5a81b629   Steven Rostedt   ftrace: add stack...
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
  };
  
  static ssize_t
  stack_max_size_read(struct file *filp, char __user *ubuf,
  		    size_t count, loff_t *ppos)
  {
  	unsigned long *ptr = filp->private_data;
  	char buf[64];
  	int r;
  
  	r = snprintf(buf, sizeof(buf), "%ld
  ", *ptr);
  	if (r > sizeof(buf))
  		r = sizeof(buf);
  	return simple_read_from_buffer(ubuf, count, ppos, buf, r);
  }
  
  static ssize_t
  stack_max_size_write(struct file *filp, const char __user *ubuf,
  		     size_t count, loff_t *ppos)
  {
  	long *ptr = filp->private_data;
  	unsigned long val, flags;
e5a81b629   Steven Rostedt   ftrace: add stack...
237
  	int ret;
22fe9b54d   Peter Huewe   tracing: Convert ...
238
239
  	ret = kstrtoul_from_user(ubuf, count, 10, &val);
  	if (ret)
e5a81b629   Steven Rostedt   ftrace: add stack...
240
  		return ret;
a5e25883a   Steven Rostedt   ftrace: replace r...
241
  	local_irq_save(flags);
4f48f8b7f   Lai Jiangshan   tracing: Fix circ...
242
243
244
245
  
  	/*
  	 * In case we trace inside arch_spin_lock() or after (NMI),
  	 * we will cause circular lock, so we also need to increase
8aaf1ee70   Steven Rostedt (VMware)   tracing: Rename t...
246
  	 * the percpu disable_stack_tracer here.
4f48f8b7f   Lai Jiangshan   tracing: Fix circ...
247
  	 */
8aaf1ee70   Steven Rostedt (VMware)   tracing: Rename t...
248
  	__this_cpu_inc(disable_stack_tracer);
4f48f8b7f   Lai Jiangshan   tracing: Fix circ...
249

d332736df   Steven Rostedt (Red Hat)   tracing: Rename m...
250
  	arch_spin_lock(&stack_trace_max_lock);
e5a81b629   Steven Rostedt   ftrace: add stack...
251
  	*ptr = val;
d332736df   Steven Rostedt (Red Hat)   tracing: Rename m...
252
  	arch_spin_unlock(&stack_trace_max_lock);
4f48f8b7f   Lai Jiangshan   tracing: Fix circ...
253

8aaf1ee70   Steven Rostedt (VMware)   tracing: Rename t...
254
  	__this_cpu_dec(disable_stack_tracer);
a5e25883a   Steven Rostedt   ftrace: replace r...
255
  	local_irq_restore(flags);
e5a81b629   Steven Rostedt   ftrace: add stack...
256
257
258
  
  	return count;
  }
f38f1d2aa   Steven Rostedt   trace: add a way ...
259
  static const struct file_operations stack_max_size_fops = {
e5a81b629   Steven Rostedt   ftrace: add stack...
260
261
262
  	.open		= tracing_open_generic,
  	.read		= stack_max_size_read,
  	.write		= stack_max_size_write,
6038f373a   Arnd Bergmann   llseek: automatic...
263
  	.llseek		= default_llseek,
e5a81b629   Steven Rostedt   ftrace: add stack...
264
265
266
  };
  
  static void *
2fc5f0cff   Li Zefan   trace_stack: Simp...
267
  __next(struct seq_file *m, loff_t *pos)
e5a81b629   Steven Rostedt   ftrace: add stack...
268
  {
2fc5f0cff   Li Zefan   trace_stack: Simp...
269
  	long n = *pos - 1;
e5a81b629   Steven Rostedt   ftrace: add stack...
270

bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
271
  	if (n > stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX)
e5a81b629   Steven Rostedt   ftrace: add stack...
272
  		return NULL;
2fc5f0cff   Li Zefan   trace_stack: Simp...
273
  	m->private = (void *)n;
1b6cced6e   Steven Rostedt   ftrace: stack tra...
274
  	return &m->private;
e5a81b629   Steven Rostedt   ftrace: add stack...
275
  }
2fc5f0cff   Li Zefan   trace_stack: Simp...
276
277
  static void *
  t_next(struct seq_file *m, void *v, loff_t *pos)
e5a81b629   Steven Rostedt   ftrace: add stack...
278
  {
2fc5f0cff   Li Zefan   trace_stack: Simp...
279
280
281
  	(*pos)++;
  	return __next(m, pos);
  }
e5a81b629   Steven Rostedt   ftrace: add stack...
282

2fc5f0cff   Li Zefan   trace_stack: Simp...
283
284
  static void *t_start(struct seq_file *m, loff_t *pos)
  {
e5a81b629   Steven Rostedt   ftrace: add stack...
285
  	local_irq_disable();
4f48f8b7f   Lai Jiangshan   tracing: Fix circ...
286

8aaf1ee70   Steven Rostedt (VMware)   tracing: Rename t...
287
  	__this_cpu_inc(disable_stack_tracer);
4f48f8b7f   Lai Jiangshan   tracing: Fix circ...
288

d332736df   Steven Rostedt (Red Hat)   tracing: Rename m...
289
  	arch_spin_lock(&stack_trace_max_lock);
e5a81b629   Steven Rostedt   ftrace: add stack...
290

522a110b4   Liming Wang   function tracing:...
291
292
  	if (*pos == 0)
  		return SEQ_START_TOKEN;
2fc5f0cff   Li Zefan   trace_stack: Simp...
293
  	return __next(m, pos);
e5a81b629   Steven Rostedt   ftrace: add stack...
294
295
296
297
  }
  
  static void t_stop(struct seq_file *m, void *p)
  {
d332736df   Steven Rostedt (Red Hat)   tracing: Rename m...
298
  	arch_spin_unlock(&stack_trace_max_lock);
4f48f8b7f   Lai Jiangshan   tracing: Fix circ...
299

8aaf1ee70   Steven Rostedt (VMware)   tracing: Rename t...
300
  	__this_cpu_dec(disable_stack_tracer);
4f48f8b7f   Lai Jiangshan   tracing: Fix circ...
301

e5a81b629   Steven Rostedt   ftrace: add stack...
302
303
  	local_irq_enable();
  }
962e3707d   Joe Perches   tracing: remove u...
304
  static void trace_lookup_stack(struct seq_file *m, long i)
e5a81b629   Steven Rostedt   ftrace: add stack...
305
  {
1b6cced6e   Steven Rostedt   ftrace: stack tra...
306
  	unsigned long addr = stack_dump_trace[i];
e5a81b629   Steven Rostedt   ftrace: add stack...
307

962e3707d   Joe Perches   tracing: remove u...
308
309
  	seq_printf(m, "%pS
  ", (void *)addr);
e5a81b629   Steven Rostedt   ftrace: add stack...
310
  }
e447e1df2   Steven Rostedt   tracing: explain ...
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
  static void print_disabled(struct seq_file *m)
  {
  	seq_puts(m, "#
  "
  		 "#  Stack tracer disabled
  "
  		 "#
  "
  		 "# To enable the stack tracer, either add 'stacktrace' to the
  "
  		 "# kernel command line
  "
  		 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'
  "
  		 "#
  ");
  }
e5a81b629   Steven Rostedt   ftrace: add stack...
328
329
  static int t_show(struct seq_file *m, void *v)
  {
522a110b4   Liming Wang   function tracing:...
330
  	long i;
1b6cced6e   Steven Rostedt   ftrace: stack tra...
331
  	int size;
522a110b4   Liming Wang   function tracing:...
332
  	if (v == SEQ_START_TOKEN) {
eb1871f34   Steven Rostedt   tracing: left ali...
333
  		seq_printf(m, "        Depth    Size   Location"
1b6cced6e   Steven Rostedt   ftrace: stack tra...
334
335
  			   "    (%d entries)
  "
eb1871f34   Steven Rostedt   tracing: left ali...
336
337
  			   "        -----    ----   --------
  ",
bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
338
  			   stack_trace_max.nr_entries);
e447e1df2   Steven Rostedt   tracing: explain ...
339

bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
340
  		if (!stack_tracer_enabled && !stack_trace_max_size)
e447e1df2   Steven Rostedt   tracing: explain ...
341
  			print_disabled(m);
1b6cced6e   Steven Rostedt   ftrace: stack tra...
342
343
  		return 0;
  	}
e5a81b629   Steven Rostedt   ftrace: add stack...
344

522a110b4   Liming Wang   function tracing:...
345
  	i = *(long *)v;
bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
346
  	if (i >= stack_trace_max.nr_entries ||
1b6cced6e   Steven Rostedt   ftrace: stack tra...
347
  	    stack_dump_trace[i] == ULONG_MAX)
e5a81b629   Steven Rostedt   ftrace: add stack...
348
  		return 0;
bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
349
  	if (i+1 == stack_trace_max.nr_entries ||
1b6cced6e   Steven Rostedt   ftrace: stack tra...
350
  	    stack_dump_trace[i+1] == ULONG_MAX)
bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
351
  		size = stack_trace_index[i];
1b6cced6e   Steven Rostedt   ftrace: stack tra...
352
  	else
bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
353
  		size = stack_trace_index[i] - stack_trace_index[i+1];
1b6cced6e   Steven Rostedt   ftrace: stack tra...
354

bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
355
  	seq_printf(m, "%3ld) %8d   %5d   ", i, stack_trace_index[i], size);
1b6cced6e   Steven Rostedt   ftrace: stack tra...
356
357
  
  	trace_lookup_stack(m, i);
e5a81b629   Steven Rostedt   ftrace: add stack...
358
359
360
  
  	return 0;
  }
f38f1d2aa   Steven Rostedt   trace: add a way ...
361
  static const struct seq_operations stack_trace_seq_ops = {
e5a81b629   Steven Rostedt   ftrace: add stack...
362
363
364
365
366
367
368
369
  	.start		= t_start,
  	.next		= t_next,
  	.stop		= t_stop,
  	.show		= t_show,
  };
  
  static int stack_trace_open(struct inode *inode, struct file *file)
  {
d8cc1ab79   Li Zefan   trace_stack: Fix ...
370
  	return seq_open(file, &stack_trace_seq_ops);
e5a81b629   Steven Rostedt   ftrace: add stack...
371
  }
f38f1d2aa   Steven Rostedt   trace: add a way ...
372
  static const struct file_operations stack_trace_fops = {
e5a81b629   Steven Rostedt   ftrace: add stack...
373
374
375
  	.open		= stack_trace_open,
  	.read		= seq_read,
  	.llseek		= seq_lseek,
d8cc1ab79   Li Zefan   trace_stack: Fix ...
376
  	.release	= seq_release,
e5a81b629   Steven Rostedt   ftrace: add stack...
377
  };
bbd1d27d8   Steven Rostedt (VMware)   tracing: Do note ...
378
  #ifdef CONFIG_DYNAMIC_FTRACE
d2d45c7a0   Steven Rostedt   tracing: Have sta...
379
380
381
  static int
  stack_trace_filter_open(struct inode *inode, struct file *file)
  {
0f1797656   Steven Rostedt (VMware)   ftrace: Fix regre...
382
383
384
  	struct ftrace_ops *ops = inode->i_private;
  
  	return ftrace_regex_open(ops, FTRACE_ITER_FILTER,
d2d45c7a0   Steven Rostedt   tracing: Have sta...
385
386
387
388
389
390
391
  				 inode, file);
  }
  
  static const struct file_operations stack_trace_filter_fops = {
  	.open = stack_trace_filter_open,
  	.read = seq_read,
  	.write = ftrace_filter_write,
098c879e1   Steven Rostedt (Red Hat)   tracing: Add gene...
392
  	.llseek = tracing_lseek,
d2d45c7a0   Steven Rostedt   tracing: Have sta...
393
394
  	.release = ftrace_regex_release,
  };
bbd1d27d8   Steven Rostedt (VMware)   tracing: Do note ...
395
  #endif /* CONFIG_DYNAMIC_FTRACE */
f38f1d2aa   Steven Rostedt   trace: add a way ...
396
397
  int
  stack_trace_sysctl(struct ctl_table *table, int write,
8d65af789   Alexey Dobriyan   sysctl: remove "s...
398
  		   void __user *buffer, size_t *lenp,
f38f1d2aa   Steven Rostedt   trace: add a way ...
399
400
401
402
403
  		   loff_t *ppos)
  {
  	int ret;
  
  	mutex_lock(&stack_sysctl_mutex);
8d65af789   Alexey Dobriyan   sysctl: remove "s...
404
  	ret = proc_dointvec(table, write, buffer, lenp, ppos);
f38f1d2aa   Steven Rostedt   trace: add a way ...
405
406
  
  	if (ret || !write ||
a32c7765e   Li Zefan   tracing: Fix stac...
407
  	    (last_stack_tracer_enabled == !!stack_tracer_enabled))
f38f1d2aa   Steven Rostedt   trace: add a way ...
408
  		goto out;
a32c7765e   Li Zefan   tracing: Fix stac...
409
  	last_stack_tracer_enabled = !!stack_tracer_enabled;
f38f1d2aa   Steven Rostedt   trace: add a way ...
410
411
412
413
414
415
416
417
418
419
  
  	if (stack_tracer_enabled)
  		register_ftrace_function(&trace_ops);
  	else
  		unregister_ftrace_function(&trace_ops);
  
   out:
  	mutex_unlock(&stack_sysctl_mutex);
  	return ret;
  }
762e12078   Steven Rostedt   tracing: Have sta...
420
  static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
f38f1d2aa   Steven Rostedt   trace: add a way ...
421
422
  static __init int enable_stacktrace(char *str)
  {
762e12078   Steven Rostedt   tracing: Have sta...
423
424
  	if (strncmp(str, "_filter=", 8) == 0)
  		strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
e05a43b74   Steven Rostedt   trace: better use...
425
426
  	stack_tracer_enabled = 1;
  	last_stack_tracer_enabled = 1;
f38f1d2aa   Steven Rostedt   trace: add a way ...
427
428
429
  	return 1;
  }
  __setup("stacktrace", enable_stacktrace);
e5a81b629   Steven Rostedt   ftrace: add stack...
430
431
432
  static __init int stack_trace_init(void)
  {
  	struct dentry *d_tracer;
e5a81b629   Steven Rostedt   ftrace: add stack...
433
434
  
  	d_tracer = tracing_init_dentry();
14a5ae40f   Steven Rostedt (Red Hat)   tracing: Use IS_E...
435
  	if (IS_ERR(d_tracer))
ed6f1c996   Namhyung Kim   tracing: Check re...
436
  		return 0;
e5a81b629   Steven Rostedt   ftrace: add stack...
437

5452af664   Frederic Weisbecker   tracing/ftrace: f...
438
  	trace_create_file("stack_max_size", 0644, d_tracer,
bb99d8cce   AKASHI Takahiro   tracing: Allow ar...
439
  			&stack_trace_max_size, &stack_max_size_fops);
e5a81b629   Steven Rostedt   ftrace: add stack...
440

5452af664   Frederic Weisbecker   tracing/ftrace: f...
441
442
  	trace_create_file("stack_trace", 0444, d_tracer,
  			NULL, &stack_trace_fops);
e5a81b629   Steven Rostedt   ftrace: add stack...
443

bbd1d27d8   Steven Rostedt (VMware)   tracing: Do note ...
444
  #ifdef CONFIG_DYNAMIC_FTRACE
d2d45c7a0   Steven Rostedt   tracing: Have sta...
445
  	trace_create_file("stack_trace_filter", 0444, d_tracer,
0f1797656   Steven Rostedt (VMware)   ftrace: Fix regre...
446
  			  &trace_ops, &stack_trace_filter_fops);
bbd1d27d8   Steven Rostedt (VMware)   tracing: Do note ...
447
  #endif
d2d45c7a0   Steven Rostedt   tracing: Have sta...
448

762e12078   Steven Rostedt   tracing: Have sta...
449
450
  	if (stack_trace_filter_buf[0])
  		ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
e05a43b74   Steven Rostedt   trace: better use...
451
  	if (stack_tracer_enabled)
f38f1d2aa   Steven Rostedt   trace: add a way ...
452
  		register_ftrace_function(&trace_ops);
e5a81b629   Steven Rostedt   ftrace: add stack...
453
454
455
456
457
  
  	return 0;
  }
  
  device_initcall(stack_trace_init);