Blame view

kernel/trace/trace_stack.c 10 KB
e5a81b629   Steven Rostedt   ftrace: add stack...
1
2
3
4
5
6
7
8
9
10
11
12
  /*
   * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
   *
   */
  #include <linux/stacktrace.h>
  #include <linux/kallsyms.h>
  #include <linux/seq_file.h>
  #include <linux/spinlock.h>
  #include <linux/uaccess.h>
  #include <linux/debugfs.h>
  #include <linux/ftrace.h>
  #include <linux/module.h>
f38f1d2aa   Steven Rostedt   trace: add a way ...
13
  #include <linux/sysctl.h>
e5a81b629   Steven Rostedt   ftrace: add stack...
14
15
  #include <linux/init.h>
  #include <linux/fs.h>
762e12078   Steven Rostedt   tracing: Have sta...
16
17
  
  #include <asm/setup.h>
e5a81b629   Steven Rostedt   ftrace: add stack...
18
19
20
  #include "trace.h"
  
  #define STACK_TRACE_ENTRIES 500
d4ecbfc49   Steven Rostedt (Red Hat)   tracing: Fix stac...
21
  #ifdef CC_USING_FENTRY
4df297129   Steven Rostedt (Red Hat)   tracing: Remove m...
22
  # define fentry		1
d4ecbfc49   Steven Rostedt (Red Hat)   tracing: Fix stac...
23
  #else
4df297129   Steven Rostedt (Red Hat)   tracing: Remove m...
24
  # define fentry		0
d4ecbfc49   Steven Rostedt (Red Hat)   tracing: Fix stac...
25
  #endif
1b6cced6e   Steven Rostedt   ftrace: stack tra...
26
27
28
  static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
  	 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
  static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
4df297129   Steven Rostedt (Red Hat)   tracing: Remove m...
29
30
31
32
33
  /*
   * Reserve one entry for the passed in ip. This will allow
   * us to remove most or all of the stack size overhead
   * added by the stack tracer itself.
   */
e5a81b629   Steven Rostedt   ftrace: add stack...
34
  static struct stack_trace max_stack_trace = {
4df297129   Steven Rostedt (Red Hat)   tracing: Remove m...
35
36
  	.max_entries		= STACK_TRACE_ENTRIES - 1,
  	.entries		= &stack_dump_trace[1],
e5a81b629   Steven Rostedt   ftrace: add stack...
37
38
39
  };
  
  static unsigned long max_stack_size;
445c89514   Thomas Gleixner   locking: Convert ...
40
  static arch_spinlock_t max_stack_lock =
edc35bd72   Thomas Gleixner   locking: Rename _...
41
  	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
e5a81b629   Steven Rostedt   ftrace: add stack...
42

e5a81b629   Steven Rostedt   ftrace: add stack...
43
  static DEFINE_PER_CPU(int, trace_active);
f38f1d2aa   Steven Rostedt   trace: add a way ...
44
45
46
47
  static DEFINE_MUTEX(stack_sysctl_mutex);
  
  int stack_tracer_enabled;
  static int last_stack_tracer_enabled;
e5a81b629   Steven Rostedt   ftrace: add stack...
48

87889501d   Steven Rostedt (Red Hat)   tracing: Use stac...
49
  static inline void
d4ecbfc49   Steven Rostedt (Red Hat)   tracing: Fix stac...
50
  check_stack(unsigned long ip, unsigned long *stack)
e5a81b629   Steven Rostedt   ftrace: add stack...
51
  {
1b6cced6e   Steven Rostedt   ftrace: stack tra...
52
53
  	unsigned long this_size, flags;
  	unsigned long *p, *top, *start;
4df297129   Steven Rostedt (Red Hat)   tracing: Remove m...
54
55
  	static int tracer_frame;
  	int frame_size = ACCESS_ONCE(tracer_frame);
1b6cced6e   Steven Rostedt   ftrace: stack tra...
56
  	int i;
e5a81b629   Steven Rostedt   ftrace: add stack...
57

87889501d   Steven Rostedt (Red Hat)   tracing: Use stac...
58
  	this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
e5a81b629   Steven Rostedt   ftrace: add stack...
59
  	this_size = THREAD_SIZE - this_size;
4df297129   Steven Rostedt (Red Hat)   tracing: Remove m...
60
61
  	/* Remove the frame of the tracer */
  	this_size -= frame_size;
e5a81b629   Steven Rostedt   ftrace: add stack...
62
63
64
  
  	if (this_size <= max_stack_size)
  		return;
81520a1b0   Steven Rostedt   ftrace: stack tra...
65
  	/* we do not handle interrupt stacks yet */
87889501d   Steven Rostedt (Red Hat)   tracing: Use stac...
66
  	if (!object_is_on_stack(stack))
81520a1b0   Steven Rostedt   ftrace: stack tra...
67
  		return;
a5e25883a   Steven Rostedt   ftrace: replace r...
68
  	local_irq_save(flags);
0199c4e68   Thomas Gleixner   locking: Convert ...
69
  	arch_spin_lock(&max_stack_lock);
e5a81b629   Steven Rostedt   ftrace: add stack...
70

4df297129   Steven Rostedt (Red Hat)   tracing: Remove m...
71
72
73
  	/* In case another CPU set the tracer_frame on us */
  	if (unlikely(!frame_size))
  		this_size -= tracer_frame;
e5a81b629   Steven Rostedt   ftrace: add stack...
74
75
76
77
78
79
80
  	/* a race could have already updated it */
  	if (this_size <= max_stack_size)
  		goto out;
  
  	max_stack_size = this_size;
  
  	max_stack_trace.nr_entries	= 0;
1b6cced6e   Steven Rostedt   ftrace: stack tra...
81
  	max_stack_trace.skip		= 3;
e5a81b629   Steven Rostedt   ftrace: add stack...
82
83
  
  	save_stack_trace(&max_stack_trace);
1b6cced6e   Steven Rostedt   ftrace: stack tra...
84
  	/*
4df297129   Steven Rostedt (Red Hat)   tracing: Remove m...
85
86
87
  	 * Add the passed in ip from the function tracer.
  	 * Searching for this on the stack will skip over
  	 * most of the overhead from the stack tracer itself.
d4ecbfc49   Steven Rostedt (Red Hat)   tracing: Fix stac...
88
  	 */
4df297129   Steven Rostedt (Red Hat)   tracing: Remove m...
89
90
  	stack_dump_trace[0] = ip;
  	max_stack_trace.nr_entries++;
d4ecbfc49   Steven Rostedt (Red Hat)   tracing: Fix stac...
91
92
  
  	/*
1b6cced6e   Steven Rostedt   ftrace: stack tra...
93
94
95
  	 * Now find where in the stack these are.
  	 */
  	i = 0;
87889501d   Steven Rostedt (Red Hat)   tracing: Use stac...
96
  	start = stack;
1b6cced6e   Steven Rostedt   ftrace: stack tra...
97
98
99
100
101
102
103
104
105
106
107
  	top = (unsigned long *)
  		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
  
  	/*
  	 * Loop through all the entries. One of the entries may
  	 * for some reason be missed on the stack, so we may
  	 * have to account for them. If they are all there, this
  	 * loop will only happen once. This code only takes place
  	 * on a new max, so it is far from a fast path.
  	 */
  	while (i < max_stack_trace.nr_entries) {
0a37119d9   Steven Rostedt   trace: fix output...
108
  		int found = 0;
1b6cced6e   Steven Rostedt   ftrace: stack tra...
109
110
111
112
113
114
115
116
  
  		stack_dump_index[i] = this_size;
  		p = start;
  
  		for (; p < top && i < max_stack_trace.nr_entries; p++) {
  			if (*p == stack_dump_trace[i]) {
  				this_size = stack_dump_index[i++] =
  					(top - p) * sizeof(unsigned long);
0a37119d9   Steven Rostedt   trace: fix output...
117
  				found = 1;
1b6cced6e   Steven Rostedt   ftrace: stack tra...
118
119
  				/* Start the search from here */
  				start = p + 1;
4df297129   Steven Rostedt (Red Hat)   tracing: Remove m...
120
121
122
123
124
125
126
127
128
129
130
131
  				/*
  				 * We do not want to show the overhead
  				 * of the stack tracer stack in the
  				 * max stack. If we haven't figured
  				 * out what that is, then figure it out
  				 * now.
  				 */
  				if (unlikely(!tracer_frame) && i == 1) {
  					tracer_frame = (p - stack) *
  						sizeof(unsigned long);
  					max_stack_size -= tracer_frame;
  				}
1b6cced6e   Steven Rostedt   ftrace: stack tra...
132
133
  			}
  		}
0a37119d9   Steven Rostedt   trace: fix output...
134
135
  		if (!found)
  			i++;
1b6cced6e   Steven Rostedt   ftrace: stack tra...
136
  	}
e5a81b629   Steven Rostedt   ftrace: add stack...
137
   out:
0199c4e68   Thomas Gleixner   locking: Convert ...
138
  	arch_spin_unlock(&max_stack_lock);
a5e25883a   Steven Rostedt   ftrace: replace r...
139
  	local_irq_restore(flags);
e5a81b629   Steven Rostedt   ftrace: add stack...
140
141
142
  }
  
  static void
a1e2e31d1   Steven Rostedt   ftrace: Return pt...
143
144
  stack_trace_call(unsigned long ip, unsigned long parent_ip,
  		 struct ftrace_ops *op, struct pt_regs *pt_regs)
e5a81b629   Steven Rostedt   ftrace: add stack...
145
  {
87889501d   Steven Rostedt (Red Hat)   tracing: Use stac...
146
  	unsigned long stack;
5168ae50a   Steven Rostedt   tracing: Remove f...
147
  	int cpu;
e5a81b629   Steven Rostedt   ftrace: add stack...
148

5168ae50a   Steven Rostedt   tracing: Remove f...
149
  	preempt_disable_notrace();
e5a81b629   Steven Rostedt   ftrace: add stack...
150
151
152
153
154
  
  	cpu = raw_smp_processor_id();
  	/* no atomic needed, we only modify this variable by this cpu */
  	if (per_cpu(trace_active, cpu)++ != 0)
  		goto out;
4df297129   Steven Rostedt (Red Hat)   tracing: Remove m...
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
  	/*
  	 * When fentry is used, the traced function does not get
  	 * its stack frame set up, and we lose the parent.
  	 * The ip is pretty useless because the function tracer
  	 * was called before that function set up its stack frame.
  	 * In this case, we use the parent ip.
  	 *
  	 * By adding the return address of either the parent ip
  	 * or the current ip we can disregard most of the stack usage
  	 * caused by the stack tracer itself.
  	 *
  	 * The function tracer always reports the address of where the
  	 * mcount call was, but the stack will hold the return address.
  	 */
  	if (fentry)
  		ip = parent_ip;
  	else
  		ip += MCOUNT_INSN_SIZE;
  
  	check_stack(ip, &stack);
e5a81b629   Steven Rostedt   ftrace: add stack...
175
176
177
178
  
   out:
  	per_cpu(trace_active, cpu)--;
  	/* prevent recursion in schedule */
5168ae50a   Steven Rostedt   tracing: Remove f...
179
  	preempt_enable_notrace();
e5a81b629   Steven Rostedt   ftrace: add stack...
180
181
182
183
184
  }
  
  static struct ftrace_ops trace_ops __read_mostly =
  {
  	.func = stack_trace_call,
4740974a6   Steven Rostedt   ftrace: Add defau...
185
  	.flags = FTRACE_OPS_FL_RECURSION_SAFE,
e5a81b629   Steven Rostedt   ftrace: add stack...
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
  };
  
  static ssize_t
  stack_max_size_read(struct file *filp, char __user *ubuf,
  		    size_t count, loff_t *ppos)
  {
  	unsigned long *ptr = filp->private_data;
  	char buf[64];
  	int r;
  
  	r = snprintf(buf, sizeof(buf), "%ld
  ", *ptr);
  	if (r > sizeof(buf))
  		r = sizeof(buf);
  	return simple_read_from_buffer(ubuf, count, ppos, buf, r);
  }
  
  static ssize_t
  stack_max_size_write(struct file *filp, const char __user *ubuf,
  		     size_t count, loff_t *ppos)
  {
  	long *ptr = filp->private_data;
  	unsigned long val, flags;
e5a81b629   Steven Rostedt   ftrace: add stack...
209
  	int ret;
4f48f8b7f   Lai Jiangshan   tracing: Fix circ...
210
  	int cpu;
e5a81b629   Steven Rostedt   ftrace: add stack...
211

22fe9b54d   Peter Huewe   tracing: Convert ...
212
213
  	ret = kstrtoul_from_user(ubuf, count, 10, &val);
  	if (ret)
e5a81b629   Steven Rostedt   ftrace: add stack...
214
  		return ret;
a5e25883a   Steven Rostedt   ftrace: replace r...
215
  	local_irq_save(flags);
4f48f8b7f   Lai Jiangshan   tracing: Fix circ...
216
217
218
219
220
221
222
223
  
  	/*
  	 * In case we trace inside arch_spin_lock() or after (NMI),
  	 * we will cause circular lock, so we also need to increase
  	 * the percpu trace_active here.
  	 */
  	cpu = smp_processor_id();
  	per_cpu(trace_active, cpu)++;
0199c4e68   Thomas Gleixner   locking: Convert ...
224
  	arch_spin_lock(&max_stack_lock);
e5a81b629   Steven Rostedt   ftrace: add stack...
225
  	*ptr = val;
0199c4e68   Thomas Gleixner   locking: Convert ...
226
  	arch_spin_unlock(&max_stack_lock);
4f48f8b7f   Lai Jiangshan   tracing: Fix circ...
227
228
  
  	per_cpu(trace_active, cpu)--;
a5e25883a   Steven Rostedt   ftrace: replace r...
229
  	local_irq_restore(flags);
e5a81b629   Steven Rostedt   ftrace: add stack...
230
231
232
  
  	return count;
  }
f38f1d2aa   Steven Rostedt   trace: add a way ...
233
  static const struct file_operations stack_max_size_fops = {
e5a81b629   Steven Rostedt   ftrace: add stack...
234
235
236
  	.open		= tracing_open_generic,
  	.read		= stack_max_size_read,
  	.write		= stack_max_size_write,
6038f373a   Arnd Bergmann   llseek: automatic...
237
  	.llseek		= default_llseek,
e5a81b629   Steven Rostedt   ftrace: add stack...
238
239
240
  };
  
  static void *
2fc5f0cff   Li Zefan   trace_stack: Simp...
241
  __next(struct seq_file *m, loff_t *pos)
e5a81b629   Steven Rostedt   ftrace: add stack...
242
  {
2fc5f0cff   Li Zefan   trace_stack: Simp...
243
  	long n = *pos - 1;
e5a81b629   Steven Rostedt   ftrace: add stack...
244

2fc5f0cff   Li Zefan   trace_stack: Simp...
245
  	if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
e5a81b629   Steven Rostedt   ftrace: add stack...
246
  		return NULL;
2fc5f0cff   Li Zefan   trace_stack: Simp...
247
  	m->private = (void *)n;
1b6cced6e   Steven Rostedt   ftrace: stack tra...
248
  	return &m->private;
e5a81b629   Steven Rostedt   ftrace: add stack...
249
  }
2fc5f0cff   Li Zefan   trace_stack: Simp...
250
251
  static void *
  t_next(struct seq_file *m, void *v, loff_t *pos)
e5a81b629   Steven Rostedt   ftrace: add stack...
252
  {
2fc5f0cff   Li Zefan   trace_stack: Simp...
253
254
255
  	(*pos)++;
  	return __next(m, pos);
  }
e5a81b629   Steven Rostedt   ftrace: add stack...
256

2fc5f0cff   Li Zefan   trace_stack: Simp...
257
258
  static void *t_start(struct seq_file *m, loff_t *pos)
  {
4f48f8b7f   Lai Jiangshan   tracing: Fix circ...
259
  	int cpu;
e5a81b629   Steven Rostedt   ftrace: add stack...
260
  	local_irq_disable();
4f48f8b7f   Lai Jiangshan   tracing: Fix circ...
261
262
263
  
  	cpu = smp_processor_id();
  	per_cpu(trace_active, cpu)++;
0199c4e68   Thomas Gleixner   locking: Convert ...
264
  	arch_spin_lock(&max_stack_lock);
e5a81b629   Steven Rostedt   ftrace: add stack...
265

522a110b4   Liming Wang   function tracing:...
266
267
  	if (*pos == 0)
  		return SEQ_START_TOKEN;
2fc5f0cff   Li Zefan   trace_stack: Simp...
268
  	return __next(m, pos);
e5a81b629   Steven Rostedt   ftrace: add stack...
269
270
271
272
  }
  
  static void t_stop(struct seq_file *m, void *p)
  {
4f48f8b7f   Lai Jiangshan   tracing: Fix circ...
273
  	int cpu;
0199c4e68   Thomas Gleixner   locking: Convert ...
274
  	arch_spin_unlock(&max_stack_lock);
4f48f8b7f   Lai Jiangshan   tracing: Fix circ...
275
276
277
  
  	cpu = smp_processor_id();
  	per_cpu(trace_active, cpu)--;
e5a81b629   Steven Rostedt   ftrace: add stack...
278
279
  	local_irq_enable();
  }
1b6cced6e   Steven Rostedt   ftrace: stack tra...
280
  static int trace_lookup_stack(struct seq_file *m, long i)
e5a81b629   Steven Rostedt   ftrace: add stack...
281
  {
1b6cced6e   Steven Rostedt   ftrace: stack tra...
282
  	unsigned long addr = stack_dump_trace[i];
e5a81b629   Steven Rostedt   ftrace: add stack...
283

151772dbf   Anton Blanchard   tracing/trace_sta...
284
285
  	return seq_printf(m, "%pS
  ", (void *)addr);
e5a81b629   Steven Rostedt   ftrace: add stack...
286
  }
e447e1df2   Steven Rostedt   tracing: explain ...
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
  static void print_disabled(struct seq_file *m)
  {
  	seq_puts(m, "#
  "
  		 "#  Stack tracer disabled
  "
  		 "#
  "
  		 "# To enable the stack tracer, either add 'stacktrace' to the
  "
  		 "# kernel command line
  "
  		 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'
  "
  		 "#
  ");
  }
e5a81b629   Steven Rostedt   ftrace: add stack...
304
305
  static int t_show(struct seq_file *m, void *v)
  {
522a110b4   Liming Wang   function tracing:...
306
  	long i;
1b6cced6e   Steven Rostedt   ftrace: stack tra...
307
  	int size;
522a110b4   Liming Wang   function tracing:...
308
  	if (v == SEQ_START_TOKEN) {
eb1871f34   Steven Rostedt   tracing: left ali...
309
  		seq_printf(m, "        Depth    Size   Location"
1b6cced6e   Steven Rostedt   ftrace: stack tra...
310
311
  			   "    (%d entries)
  "
eb1871f34   Steven Rostedt   tracing: left ali...
312
313
  			   "        -----    ----   --------
  ",
083a63b48   walimis   tracing/trace_sta...
314
  			   max_stack_trace.nr_entries - 1);
e447e1df2   Steven Rostedt   tracing: explain ...
315
316
317
  
  		if (!stack_tracer_enabled && !max_stack_size)
  			print_disabled(m);
1b6cced6e   Steven Rostedt   ftrace: stack tra...
318
319
  		return 0;
  	}
e5a81b629   Steven Rostedt   ftrace: add stack...
320

522a110b4   Liming Wang   function tracing:...
321
  	i = *(long *)v;
1b6cced6e   Steven Rostedt   ftrace: stack tra...
322
323
  	if (i >= max_stack_trace.nr_entries ||
  	    stack_dump_trace[i] == ULONG_MAX)
e5a81b629   Steven Rostedt   ftrace: add stack...
324
  		return 0;
1b6cced6e   Steven Rostedt   ftrace: stack tra...
325
326
327
328
329
330
331
332
333
  	if (i+1 == max_stack_trace.nr_entries ||
  	    stack_dump_trace[i+1] == ULONG_MAX)
  		size = stack_dump_index[i];
  	else
  		size = stack_dump_index[i] - stack_dump_index[i+1];
  
  	seq_printf(m, "%3ld) %8d   %5d   ", i, stack_dump_index[i], size);
  
  	trace_lookup_stack(m, i);
e5a81b629   Steven Rostedt   ftrace: add stack...
334
335
336
  
  	return 0;
  }
f38f1d2aa   Steven Rostedt   trace: add a way ...
337
  static const struct seq_operations stack_trace_seq_ops = {
e5a81b629   Steven Rostedt   ftrace: add stack...
338
339
340
341
342
343
344
345
  	.start		= t_start,
  	.next		= t_next,
  	.stop		= t_stop,
  	.show		= t_show,
  };
  
  static int stack_trace_open(struct inode *inode, struct file *file)
  {
d8cc1ab79   Li Zefan   trace_stack: Fix ...
346
  	return seq_open(file, &stack_trace_seq_ops);
e5a81b629   Steven Rostedt   ftrace: add stack...
347
  }
f38f1d2aa   Steven Rostedt   trace: add a way ...
348
  static const struct file_operations stack_trace_fops = {
e5a81b629   Steven Rostedt   ftrace: add stack...
349
350
351
  	.open		= stack_trace_open,
  	.read		= seq_read,
  	.llseek		= seq_lseek,
d8cc1ab79   Li Zefan   trace_stack: Fix ...
352
  	.release	= seq_release,
e5a81b629   Steven Rostedt   ftrace: add stack...
353
  };
d2d45c7a0   Steven Rostedt   tracing: Have sta...
354
355
356
357
358
359
360
361
362
363
364
  static int
  stack_trace_filter_open(struct inode *inode, struct file *file)
  {
  	return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
  				 inode, file);
  }
  
  static const struct file_operations stack_trace_filter_fops = {
  	.open = stack_trace_filter_open,
  	.read = seq_read,
  	.write = ftrace_filter_write,
6a76f8c0a   Namhyung Kim   tracing: Fix poss...
365
  	.llseek = ftrace_filter_lseek,
d2d45c7a0   Steven Rostedt   tracing: Have sta...
366
367
  	.release = ftrace_regex_release,
  };
f38f1d2aa   Steven Rostedt   trace: add a way ...
368
369
  int
  stack_trace_sysctl(struct ctl_table *table, int write,
8d65af789   Alexey Dobriyan   sysctl: remove "s...
370
  		   void __user *buffer, size_t *lenp,
f38f1d2aa   Steven Rostedt   trace: add a way ...
371
372
373
374
375
  		   loff_t *ppos)
  {
  	int ret;
  
  	mutex_lock(&stack_sysctl_mutex);
8d65af789   Alexey Dobriyan   sysctl: remove "s...
376
  	ret = proc_dointvec(table, write, buffer, lenp, ppos);
f38f1d2aa   Steven Rostedt   trace: add a way ...
377
378
  
  	if (ret || !write ||
a32c7765e   Li Zefan   tracing: Fix stac...
379
  	    (last_stack_tracer_enabled == !!stack_tracer_enabled))
f38f1d2aa   Steven Rostedt   trace: add a way ...
380
  		goto out;
a32c7765e   Li Zefan   tracing: Fix stac...
381
  	last_stack_tracer_enabled = !!stack_tracer_enabled;
f38f1d2aa   Steven Rostedt   trace: add a way ...
382
383
384
385
386
387
388
389
390
391
  
  	if (stack_tracer_enabled)
  		register_ftrace_function(&trace_ops);
  	else
  		unregister_ftrace_function(&trace_ops);
  
   out:
  	mutex_unlock(&stack_sysctl_mutex);
  	return ret;
  }
762e12078   Steven Rostedt   tracing: Have sta...
392
  static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
f38f1d2aa   Steven Rostedt   trace: add a way ...
393
394
  static __init int enable_stacktrace(char *str)
  {
762e12078   Steven Rostedt   tracing: Have sta...
395
396
  	if (strncmp(str, "_filter=", 8) == 0)
  		strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
e05a43b74   Steven Rostedt   trace: better use...
397
398
  	stack_tracer_enabled = 1;
  	last_stack_tracer_enabled = 1;
f38f1d2aa   Steven Rostedt   trace: add a way ...
399
400
401
  	return 1;
  }
  __setup("stacktrace", enable_stacktrace);
e5a81b629   Steven Rostedt   ftrace: add stack...
402
403
404
  static __init int stack_trace_init(void)
  {
  	struct dentry *d_tracer;
e5a81b629   Steven Rostedt   ftrace: add stack...
405
406
  
  	d_tracer = tracing_init_dentry();
ed6f1c996   Namhyung Kim   tracing: Check re...
407
408
  	if (!d_tracer)
  		return 0;
e5a81b629   Steven Rostedt   ftrace: add stack...
409

5452af664   Frederic Weisbecker   tracing/ftrace: f...
410
411
  	trace_create_file("stack_max_size", 0644, d_tracer,
  			&max_stack_size, &stack_max_size_fops);
e5a81b629   Steven Rostedt   ftrace: add stack...
412

5452af664   Frederic Weisbecker   tracing/ftrace: f...
413
414
  	trace_create_file("stack_trace", 0444, d_tracer,
  			NULL, &stack_trace_fops);
e5a81b629   Steven Rostedt   ftrace: add stack...
415

d2d45c7a0   Steven Rostedt   tracing: Have sta...
416
417
  	trace_create_file("stack_trace_filter", 0444, d_tracer,
  			NULL, &stack_trace_filter_fops);
762e12078   Steven Rostedt   tracing: Have sta...
418
419
  	if (stack_trace_filter_buf[0])
  		ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
e05a43b74   Steven Rostedt   trace: better use...
420
  	if (stack_tracer_enabled)
f38f1d2aa   Steven Rostedt   trace: add a way ...
421
  		register_ftrace_function(&trace_ops);
e5a81b629   Steven Rostedt   ftrace: add stack...
422
423
424
425
426
  
  	return 0;
  }
  
  device_initcall(stack_trace_init);