Blame view

kernel/trace/trace_functions.c 15.3 KB
1b29b0188   Steven Rostedt   ftrace: function ...
1
2
3
4
5
6
7
8
9
  /*
   * ring buffer based function tracer
   *
   * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
   * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
   *
   * Based on code from the latency_tracer, that is:
   *
   *  Copyright (C) 2004-2006 Ingo Molnar
6d49e352a   Nadia Yvette Chambers   propagate name ch...
10
   *  Copyright (C) 2004 Nadia Yvette Chambers
1b29b0188   Steven Rostedt   ftrace: function ...
11
   */
23b4ff3aa   Steven Rostedt   ftrace: add trace...
12
  #include <linux/ring_buffer.h>
1b29b0188   Steven Rostedt   ftrace: function ...
13
14
15
  #include <linux/debugfs.h>
  #include <linux/uaccess.h>
  #include <linux/ftrace.h>
f20a58062   Steven Rostedt (Red Hat)   ftrace: Allow ins...
16
  #include <linux/slab.h>
2e0f57618   Ingo Molnar   ftrace: build fix
17
  #include <linux/fs.h>
1b29b0188   Steven Rostedt   ftrace: function ...
18
19
  
  #include "trace.h"
f20a58062   Steven Rostedt (Red Hat)   ftrace: Allow ins...
20
21
22
23
24
25
26
27
  static void tracing_start_function_trace(struct trace_array *tr);
  static void tracing_stop_function_trace(struct trace_array *tr);
  static void
  function_trace_call(unsigned long ip, unsigned long parent_ip,
  		    struct ftrace_ops *op, struct pt_regs *pt_regs);
  static void
  function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  			  struct ftrace_ops *op, struct pt_regs *pt_regs);
f20a58062   Steven Rostedt (Red Hat)   ftrace: Allow ins...
28
29
30
31
32
33
34
35
36
37
  static struct tracer_flags func_flags;
  
  /* Our option */
  enum {
  	TRACE_FUNC_OPT_STACK	= 0x1,
  };
  
  static int allocate_ftrace_ops(struct trace_array *tr)
  {
  	struct ftrace_ops *ops;
a225cdd26   Steven Rostedt   ftrace: remove st...
38

f20a58062   Steven Rostedt (Red Hat)   ftrace: Allow ins...
39
40
41
  	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
  	if (!ops)
  		return -ENOMEM;
536149910   Steven Rostedt   ftrace: add stack...
42

f20a58062   Steven Rostedt (Red Hat)   ftrace: Allow ins...
43
44
  	/* Currently only the non stack verision is supported */
  	ops->func = function_trace_call;
345ddcc88   Steven Rostedt (Red Hat)   ftrace: Have set_...
45
  	ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID;
f20a58062   Steven Rostedt (Red Hat)   ftrace: Allow ins...
46
47
48
49
50
  
  	tr->ops = ops;
  	ops->private = tr;
  	return 0;
  }
a225cdd26   Steven Rostedt   ftrace: remove st...
51

591dffdad   Steven Rostedt (Red Hat)   ftrace: Allow for...
52
53
54
55
56
  
  int ftrace_create_function_files(struct trace_array *tr,
  				 struct dentry *parent)
  {
  	int ret;
5d6c97c55   Steven Rostedt (Red Hat)   tracing: Do not t...
57
58
59
60
61
62
63
64
65
66
  	/*
  	 * The top level array uses the "global_ops", and the files are
  	 * created on boot up.
  	 */
  	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
  		return 0;
  
  	ret = allocate_ftrace_ops(tr);
  	if (ret)
  		return ret;
591dffdad   Steven Rostedt (Red Hat)   ftrace: Allow for...
67
68
69
70
71
72
73
74
75
76
77
78
  
  	ftrace_create_filter_files(tr->ops, parent);
  
  	return 0;
  }
  
  void ftrace_destroy_function_files(struct trace_array *tr)
  {
  	ftrace_destroy_filter_files(tr->ops);
  	kfree(tr->ops);
  	tr->ops = NULL;
  }
b6f11df26   Arnaldo Carvalho de Melo   trace: Call traci...
79
  static int function_trace_init(struct trace_array *tr)
1b29b0188   Steven Rostedt   ftrace: function ...
80
  {
4104d326b   Steven Rostedt (Red Hat)   ftrace: Remove gl...
81
  	ftrace_func_t func;
f20a58062   Steven Rostedt (Red Hat)   ftrace: Allow ins...
82

4104d326b   Steven Rostedt (Red Hat)   ftrace: Remove gl...
83
84
85
86
87
88
  	/*
  	 * Instance trace_arrays get their ops allocated
  	 * at instance creation. Unless it failed
  	 * the allocation.
  	 */
  	if (!tr->ops)
591dffdad   Steven Rostedt (Red Hat)   ftrace: Allow for...
89
  		return -ENOMEM;
4104d326b   Steven Rostedt (Red Hat)   ftrace: Remove gl...
90
91
92
93
94
95
96
97
98
  
  	/* Currently only the global instance can do stack tracing */
  	if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
  	    func_flags.val & TRACE_FUNC_OPT_STACK)
  		func = function_stack_trace_call;
  	else
  		func = function_trace_call;
  
  	ftrace_init_array_ops(tr, func);
f20a58062   Steven Rostedt (Red Hat)   ftrace: Allow ins...
99

12883efb6   Steven Rostedt (Red Hat)   tracing: Consolid...
100
  	tr->trace_buffer.cpu = get_cpu();
26bc83f4c   Steven Rostedt   ftrace: use curre...
101
  	put_cpu();
41bc8144d   Steven Rostedt   ftrace: fix up cm...
102
  	tracing_start_cmdline_record();
f20a58062   Steven Rostedt (Red Hat)   ftrace: Allow ins...
103
  	tracing_start_function_trace(tr);
1c80025a4   Frederic Weisbecker   tracing/ftrace: c...
104
  	return 0;
1b29b0188   Steven Rostedt   ftrace: function ...
105
  }
e309b41dd   Ingo Molnar   ftrace: remove no...
106
  static void function_trace_reset(struct trace_array *tr)
1b29b0188   Steven Rostedt   ftrace: function ...
107
  {
f20a58062   Steven Rostedt (Red Hat)   ftrace: Allow ins...
108
  	tracing_stop_function_trace(tr);
b6f11df26   Arnaldo Carvalho de Melo   trace: Call traci...
109
  	tracing_stop_cmdline_record();
4104d326b   Steven Rostedt (Red Hat)   ftrace: Remove gl...
110
  	ftrace_reset_array_ops(tr);
1b29b0188   Steven Rostedt   ftrace: function ...
111
  }
9036990d4   Steven Rostedt   ftrace: restructu...
112
113
  static void function_trace_start(struct trace_array *tr)
  {
12883efb6   Steven Rostedt (Red Hat)   tracing: Consolid...
114
  	tracing_reset_online_cpus(&tr->trace_buffer);
9036990d4   Steven Rostedt   ftrace: restructu...
115
  }
bb3c3c95f   Steven Rostedt   ftrace: move func...
116
  static void
2f5f6ad93   Steven Rostedt   ftrace: Pass ftra...
117
  function_trace_call(unsigned long ip, unsigned long parent_ip,
a1e2e31d1   Steven Rostedt   ftrace: Return pt...
118
  		    struct ftrace_ops *op, struct pt_regs *pt_regs)
bb3c3c95f   Steven Rostedt   ftrace: move func...
119
  {
f20a58062   Steven Rostedt (Red Hat)   ftrace: Allow ins...
120
  	struct trace_array *tr = op->private;
bb3c3c95f   Steven Rostedt   ftrace: move func...
121
122
  	struct trace_array_cpu *data;
  	unsigned long flags;
d41032a83   Steven Rostedt   tracing: Fix unsi...
123
  	int bit;
bb3c3c95f   Steven Rostedt   ftrace: move func...
124
125
  	int cpu;
  	int pc;
f20a58062   Steven Rostedt (Red Hat)   ftrace: Allow ins...
126
  	if (unlikely(!tr->function_enabled))
bb3c3c95f   Steven Rostedt   ftrace: move func...
127
  		return;
897f68a48   Steven Rostedt   ftrace: Use only ...
128
129
  	pc = preempt_count();
  	preempt_disable_notrace();
bb3c3c95f   Steven Rostedt   ftrace: move func...
130

897f68a48   Steven Rostedt   ftrace: Use only ...
131
132
133
134
135
  	bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
  	if (bit < 0)
  		goto out;
  
  	cpu = smp_processor_id();
12883efb6   Steven Rostedt (Red Hat)   tracing: Consolid...
136
  	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
897f68a48   Steven Rostedt   ftrace: Use only ...
137
138
  	if (!atomic_read(&data->disabled)) {
  		local_save_flags(flags);
7be421510   Arnaldo Carvalho de Melo   trace: Remove unu...
139
  		trace_function(tr, ip, parent_ip, flags, pc);
bb3c3c95f   Steven Rostedt   ftrace: move func...
140
  	}
897f68a48   Steven Rostedt   ftrace: Use only ...
141
  	trace_clear_recursion(bit);
bb3c3c95f   Steven Rostedt   ftrace: move func...
142

897f68a48   Steven Rostedt   ftrace: Use only ...
143
144
   out:
  	preempt_enable_notrace();
bb3c3c95f   Steven Rostedt   ftrace: move func...
145
146
147
  }
  
  static void
2f5f6ad93   Steven Rostedt   ftrace: Pass ftra...
148
  function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
a1e2e31d1   Steven Rostedt   ftrace: Return pt...
149
  			  struct ftrace_ops *op, struct pt_regs *pt_regs)
536149910   Steven Rostedt   ftrace: add stack...
150
  {
f20a58062   Steven Rostedt (Red Hat)   ftrace: Allow ins...
151
  	struct trace_array *tr = op->private;
536149910   Steven Rostedt   ftrace: add stack...
152
153
154
155
156
  	struct trace_array_cpu *data;
  	unsigned long flags;
  	long disabled;
  	int cpu;
  	int pc;
f20a58062   Steven Rostedt (Red Hat)   ftrace: Allow ins...
157
  	if (unlikely(!tr->function_enabled))
536149910   Steven Rostedt   ftrace: add stack...
158
159
160
161
162
163
164
165
  		return;
  
  	/*
  	 * Need to use raw, since this must be called before the
  	 * recursive protection is performed.
  	 */
  	local_irq_save(flags);
  	cpu = raw_smp_processor_id();
12883efb6   Steven Rostedt (Red Hat)   tracing: Consolid...
166
  	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
536149910   Steven Rostedt   ftrace: add stack...
167
168
169
170
  	disabled = atomic_inc_return(&data->disabled);
  
  	if (likely(disabled == 1)) {
  		pc = preempt_count();
7be421510   Arnaldo Carvalho de Melo   trace: Remove unu...
171
  		trace_function(tr, ip, parent_ip, flags, pc);
536149910   Steven Rostedt   ftrace: add stack...
172
173
174
175
176
177
178
179
  		/*
  		 * skip over 5 funcs:
  		 *    __ftrace_trace_stack,
  		 *    __trace_stack,
  		 *    function_stack_trace_call
  		 *    ftrace_list_func
  		 *    ftrace_call
  		 */
7be421510   Arnaldo Carvalho de Melo   trace: Remove unu...
180
  		__trace_stack(tr, flags, 5, pc);
536149910   Steven Rostedt   ftrace: add stack...
181
182
183
184
185
  	}
  
  	atomic_dec(&data->disabled);
  	local_irq_restore(flags);
  }
536149910   Steven Rostedt   ftrace: add stack...
186
187
188
189
190
191
192
193
194
195
196
  static struct tracer_opt func_opts[] = {
  #ifdef CONFIG_STACKTRACE
  	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
  #endif
  	{ } /* Always set a last empty entry */
  };
  
  static struct tracer_flags func_flags = {
  	.val = 0, /* By default: all flags disabled */
  	.opts = func_opts
  };
f20a58062   Steven Rostedt (Red Hat)   ftrace: Allow ins...
197
  static void tracing_start_function_trace(struct trace_array *tr)
3eb36aa05   Steven Rostedt   ftrace: combine s...
198
  {
f20a58062   Steven Rostedt (Red Hat)   ftrace: Allow ins...
199
200
201
  	tr->function_enabled = 0;
  	register_ftrace_function(tr->ops);
  	tr->function_enabled = 1;
3eb36aa05   Steven Rostedt   ftrace: combine s...
202
  }
f20a58062   Steven Rostedt (Red Hat)   ftrace: Allow ins...
203
  static void tracing_stop_function_trace(struct trace_array *tr)
3eb36aa05   Steven Rostedt   ftrace: combine s...
204
  {
f20a58062   Steven Rostedt (Red Hat)   ftrace: Allow ins...
205
206
  	tr->function_enabled = 0;
  	unregister_ftrace_function(tr->ops);
3eb36aa05   Steven Rostedt   ftrace: combine s...
207
  }
d39cdd203   Chunyu Hu   tracing: Make tra...
208
  static struct tracer function_trace;
8c1a49aed   Steven Rostedt (Red Hat)   tracing: Pass tra...
209
210
  static int
  func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
536149910   Steven Rostedt   ftrace: add stack...
211
  {
f555f1231   Anton Vorontsov   tracing/function:...
212
213
  	switch (bit) {
  	case TRACE_FUNC_OPT_STACK:
536149910   Steven Rostedt   ftrace: add stack...
214
215
  		/* do nothing if already set */
  		if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
f555f1231   Anton Vorontsov   tracing/function:...
216
  			break;
536149910   Steven Rostedt   ftrace: add stack...
217

d39cdd203   Chunyu Hu   tracing: Make tra...
218
219
220
  		/* We can change this flag when not running. */
  		if (tr->current_trace != &function_trace)
  			break;
f20a58062   Steven Rostedt (Red Hat)   ftrace: Allow ins...
221
  		unregister_ftrace_function(tr->ops);
3eb36aa05   Steven Rostedt   ftrace: combine s...
222
  		if (set) {
4104d326b   Steven Rostedt (Red Hat)   ftrace: Remove gl...
223
  			tr->ops->func = function_stack_trace_call;
f20a58062   Steven Rostedt (Red Hat)   ftrace: Allow ins...
224
  			register_ftrace_function(tr->ops);
3eb36aa05   Steven Rostedt   ftrace: combine s...
225
  		} else {
4104d326b   Steven Rostedt (Red Hat)   ftrace: Remove gl...
226
  			tr->ops->func = function_trace_call;
f20a58062   Steven Rostedt (Red Hat)   ftrace: Allow ins...
227
  			register_ftrace_function(tr->ops);
3eb36aa05   Steven Rostedt   ftrace: combine s...
228
  		}
536149910   Steven Rostedt   ftrace: add stack...
229

f555f1231   Anton Vorontsov   tracing/function:...
230
  		break;
f555f1231   Anton Vorontsov   tracing/function:...
231
232
  	default:
  		return -EINVAL;
536149910   Steven Rostedt   ftrace: add stack...
233
  	}
f555f1231   Anton Vorontsov   tracing/function:...
234
  	return 0;
536149910   Steven Rostedt   ftrace: add stack...
235
  }
8f7689933   Steven Rostedt (Red Hat)   tracing: Add ref_...
236
  static struct tracer function_trace __tracer_data =
1b29b0188   Steven Rostedt   ftrace: function ...
237
  {
3eb36aa05   Steven Rostedt   ftrace: combine s...
238
239
240
241
  	.name		= "function",
  	.init		= function_trace_init,
  	.reset		= function_trace_reset,
  	.start		= function_trace_start,
536149910   Steven Rostedt   ftrace: add stack...
242
243
  	.flags		= &func_flags,
  	.set_flag	= func_set_flag,
f20a58062   Steven Rostedt (Red Hat)   ftrace: Allow ins...
244
  	.allow_instances = true,
60a11774b   Steven Rostedt   ftrace: add self-...
245
  #ifdef CONFIG_FTRACE_SELFTEST
3eb36aa05   Steven Rostedt   ftrace: combine s...
246
  	.selftest	= trace_selftest_startup_function,
60a11774b   Steven Rostedt   ftrace: add self-...
247
  #endif
1b29b0188   Steven Rostedt   ftrace: function ...
248
  };
23b4ff3aa   Steven Rostedt   ftrace: add trace...
249
  #ifdef CONFIG_DYNAMIC_FTRACE
0af26492d   Steven Rostedt (Red Hat)   tracing/trivial: ...
250
  static void update_traceon_count(void **data, bool on)
23b4ff3aa   Steven Rostedt   ftrace: add trace...
251
  {
a9ce7c36a   Steven Rostedt (Red Hat)   tracing: Fix race...
252
253
  	long *count = (long *)data;
  	long old_count = *count;
23b4ff3aa   Steven Rostedt   ftrace: add trace...
254

a9ce7c36a   Steven Rostedt (Red Hat)   tracing: Fix race...
255
256
  	/*
  	 * Tracing gets disabled (or enabled) once per count.
0af26492d   Steven Rostedt (Red Hat)   tracing/trivial: ...
257
  	 * This function can be called at the same time on multiple CPUs.
a9ce7c36a   Steven Rostedt (Red Hat)   tracing: Fix race...
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
  	 * It is fine if both disable (or enable) tracing, as disabling
  	 * (or enabling) the second time doesn't do anything as the
  	 * state of the tracer is already disabled (or enabled).
  	 * What needs to be synchronized in this case is that the count
  	 * only gets decremented once, even if the tracer is disabled
  	 * (or enabled) twice, as the second one is really a nop.
  	 *
  	 * The memory barriers guarantee that we only decrement the
  	 * counter once. First the count is read to a local variable
  	 * and a read barrier is used to make sure that it is loaded
  	 * before checking if the tracer is in the state we want.
  	 * If the tracer is not in the state we want, then the count
  	 * is guaranteed to be the old count.
  	 *
  	 * Next the tracer is set to the state we want (disabled or enabled)
  	 * then a write memory barrier is used to make sure that
  	 * the new state is visible before changing the counter by
  	 * one minus the old counter. This guarantees that another CPU
  	 * executing this code will see the new state before seeing
0af26492d   Steven Rostedt (Red Hat)   tracing/trivial: ...
277
  	 * the new counter value, and would not do anything if the new
a9ce7c36a   Steven Rostedt (Red Hat)   tracing: Fix race...
278
279
280
281
282
283
284
285
  	 * counter is seen.
  	 *
  	 * Note, there is no synchronization between this and a user
  	 * setting the tracing_on file. But we currently don't care
  	 * about that.
  	 */
  	if (!old_count)
  		return;
23b4ff3aa   Steven Rostedt   ftrace: add trace...
286

a9ce7c36a   Steven Rostedt (Red Hat)   tracing: Fix race...
287
288
  	/* Make sure we see count before checking tracing state */
  	smp_rmb();
23b4ff3aa   Steven Rostedt   ftrace: add trace...
289

a9ce7c36a   Steven Rostedt (Red Hat)   tracing: Fix race...
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
  	if (on == !!tracing_is_on())
  		return;
  
  	if (on)
  		tracing_on();
  	else
  		tracing_off();
  
  	/* unlimited? */
  	if (old_count == -1)
  		return;
  
  	/* Make sure tracing state is visible before updating count */
  	smp_wmb();
  
  	*count = old_count - 1;
23b4ff3aa   Steven Rostedt   ftrace: add trace...
306
307
308
  }
  
  static void
8380d2486   Steven Rostedt (Red Hat)   ftrace: Separate ...
309
  ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data)
23b4ff3aa   Steven Rostedt   ftrace: add trace...
310
  {
a9ce7c36a   Steven Rostedt (Red Hat)   tracing: Fix race...
311
  	update_traceon_count(data, 1);
1c3171432   Steven Rostedt (Red Hat)   tracing: Consolid...
312
  }
23b4ff3aa   Steven Rostedt   ftrace: add trace...
313

1c3171432   Steven Rostedt (Red Hat)   tracing: Consolid...
314
  static void
8380d2486   Steven Rostedt (Red Hat)   ftrace: Separate ...
315
  ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data)
1c3171432   Steven Rostedt (Red Hat)   tracing: Consolid...
316
  {
a9ce7c36a   Steven Rostedt (Red Hat)   tracing: Fix race...
317
  	update_traceon_count(data, 0);
23b4ff3aa   Steven Rostedt   ftrace: add trace...
318
  }
8380d2486   Steven Rostedt (Red Hat)   ftrace: Separate ...
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
  static void
  ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
  {
  	if (tracing_is_on())
  		return;
  
  	tracing_on();
  }
  
  static void
  ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
  {
  	if (!tracing_is_on())
  		return;
  
  	tracing_off();
  }
dd42cd3ea   Steven Rostedt (Red Hat)   tracing: Add func...
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
  /*
   * Skip 4:
   *   ftrace_stacktrace()
   *   function_trace_probe_call()
   *   ftrace_ops_list_func()
   *   ftrace_call()
   */
  #define STACK_SKIP 4
  
  static void
  ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data)
  {
  	trace_dump_stack(STACK_SKIP);
  }
  
  static void
  ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data)
  {
a9ce7c36a   Steven Rostedt (Red Hat)   tracing: Fix race...
354
355
356
357
358
359
360
361
362
363
364
365
  	long *count = (long *)data;
  	long old_count;
  	long new_count;
  
  	/*
  	 * Stack traces should only execute the number of times the
  	 * user specified in the counter.
  	 */
  	do {
  
  		if (!tracing_is_on())
  			return;
dd42cd3ea   Steven Rostedt (Red Hat)   tracing: Add func...
366

a9ce7c36a   Steven Rostedt (Red Hat)   tracing: Fix race...
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
  		old_count = *count;
  
  		if (!old_count)
  			return;
  
  		/* unlimited? */
  		if (old_count == -1) {
  			trace_dump_stack(STACK_SKIP);
  			return;
  		}
  
  		new_count = old_count - 1;
  		new_count = cmpxchg(count, old_count, new_count);
  		if (new_count == old_count)
  			trace_dump_stack(STACK_SKIP);
  
  	} while (new_count != old_count);
  }
  
  static int update_count(void **data)
  {
  	unsigned long *count = (long *)data;
  
  	if (!*count)
  		return 0;
  
  	if (*count != -1)
  		(*count)--;
  
  	return 1;
dd42cd3ea   Steven Rostedt (Red Hat)   tracing: Add func...
397
  }
ad71d889b   Steven Rostedt (Red Hat)   tracing: Add func...
398
399
400
401
402
403
  static void
  ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data)
  {
  	if (update_count(data))
  		ftrace_dump(DUMP_ALL);
  }
90e3c03c3   Steven Rostedt (Red Hat)   tracing: Add func...
404
405
406
407
408
409
410
  /* Only dump the current CPU buffer. */
  static void
  ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data)
  {
  	if (update_count(data))
  		ftrace_dump(DUMP_ORIG);
  }
e110e3d1e   Steven Rostedt   ftrace: add prett...
411
  static int
dd42cd3ea   Steven Rostedt (Red Hat)   tracing: Add func...
412
413
414
415
416
417
418
419
  ftrace_probe_print(const char *name, struct seq_file *m,
  		   unsigned long ip, void *data)
  {
  	long count = (long)data;
  
  	seq_printf(m, "%ps:%s", (void *)ip, name);
  
  	if (count == -1)
fa6f0cc75   Rasmus Villemoes   tracing: Replace ...
420
421
  		seq_puts(m, ":unlimited
  ");
dd42cd3ea   Steven Rostedt (Red Hat)   tracing: Add func...
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
  	else
  		seq_printf(m, ":count=%ld
  ", count);
  
  	return 0;
  }
  
  static int
  ftrace_traceon_print(struct seq_file *m, unsigned long ip,
  			 struct ftrace_probe_ops *ops, void *data)
  {
  	return ftrace_probe_print("traceon", m, ip, data);
  }
  
  static int
  ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
  			 struct ftrace_probe_ops *ops, void *data)
  {
  	return ftrace_probe_print("traceoff", m, ip, data);
  }
  
  static int
  ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
  			struct ftrace_probe_ops *ops, void *data)
  {
  	return ftrace_probe_print("stacktrace", m, ip, data);
  }
e110e3d1e   Steven Rostedt   ftrace: add prett...
449

ad71d889b   Steven Rostedt (Red Hat)   tracing: Add func...
450
451
452
453
454
455
  static int
  ftrace_dump_print(struct seq_file *m, unsigned long ip,
  			struct ftrace_probe_ops *ops, void *data)
  {
  	return ftrace_probe_print("dump", m, ip, data);
  }
90e3c03c3   Steven Rostedt (Red Hat)   tracing: Add func...
456
457
458
459
460
461
  static int
  ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
  			struct ftrace_probe_ops *ops, void *data)
  {
  	return ftrace_probe_print("cpudump", m, ip, data);
  }
8380d2486   Steven Rostedt (Red Hat)   ftrace: Separate ...
462
463
  static struct ftrace_probe_ops traceon_count_probe_ops = {
  	.func			= ftrace_traceon_count,
dd42cd3ea   Steven Rostedt (Red Hat)   tracing: Add func...
464
  	.print			= ftrace_traceon_print,
8380d2486   Steven Rostedt (Red Hat)   ftrace: Separate ...
465
466
467
468
  };
  
  static struct ftrace_probe_ops traceoff_count_probe_ops = {
  	.func			= ftrace_traceoff_count,
dd42cd3ea   Steven Rostedt (Red Hat)   tracing: Add func...
469
470
471
472
473
474
  	.print			= ftrace_traceoff_print,
  };
  
  static struct ftrace_probe_ops stacktrace_count_probe_ops = {
  	.func			= ftrace_stacktrace_count,
  	.print			= ftrace_stacktrace_print,
8380d2486   Steven Rostedt (Red Hat)   ftrace: Separate ...
475
  };
ad71d889b   Steven Rostedt (Red Hat)   tracing: Add func...
476
477
478
479
  static struct ftrace_probe_ops dump_probe_ops = {
  	.func			= ftrace_dump_probe,
  	.print			= ftrace_dump_print,
  };
90e3c03c3   Steven Rostedt (Red Hat)   tracing: Add func...
480
481
482
483
  static struct ftrace_probe_ops cpudump_probe_ops = {
  	.func			= ftrace_cpudump_probe,
  	.print			= ftrace_cpudump_print,
  };
b6887d791   Steven Rostedt   ftrace: rename _h...
484
  static struct ftrace_probe_ops traceon_probe_ops = {
23b4ff3aa   Steven Rostedt   ftrace: add trace...
485
  	.func			= ftrace_traceon,
dd42cd3ea   Steven Rostedt (Red Hat)   tracing: Add func...
486
  	.print			= ftrace_traceon_print,
23b4ff3aa   Steven Rostedt   ftrace: add trace...
487
  };
b6887d791   Steven Rostedt   ftrace: rename _h...
488
  static struct ftrace_probe_ops traceoff_probe_ops = {
23b4ff3aa   Steven Rostedt   ftrace: add trace...
489
  	.func			= ftrace_traceoff,
dd42cd3ea   Steven Rostedt (Red Hat)   tracing: Add func...
490
  	.print			= ftrace_traceoff_print,
23b4ff3aa   Steven Rostedt   ftrace: add trace...
491
  };
dd42cd3ea   Steven Rostedt (Red Hat)   tracing: Add func...
492
493
494
495
  static struct ftrace_probe_ops stacktrace_probe_ops = {
  	.func			= ftrace_stacktrace,
  	.print			= ftrace_stacktrace_print,
  };
e110e3d1e   Steven Rostedt   ftrace: add prett...
496
497
  
  static int
dd42cd3ea   Steven Rostedt (Red Hat)   tracing: Add func...
498
499
500
  ftrace_trace_probe_callback(struct ftrace_probe_ops *ops,
  			    struct ftrace_hash *hash, char *glob,
  			    char *cmd, char *param, int enable)
23b4ff3aa   Steven Rostedt   ftrace: add trace...
501
  {
23b4ff3aa   Steven Rostedt   ftrace: add trace...
502
503
504
505
506
507
508
  	void *count = (void *)-1;
  	char *number;
  	int ret;
  
  	/* hash funcs only work with set_ftrace_filter */
  	if (!enable)
  		return -EINVAL;
8b8fa62c6   Steven Rostedt (Red Hat)   tracing: Consolid...
509
510
511
512
  	if (glob[0] == '!') {
  		unregister_ftrace_function_probe_func(glob+1, ops);
  		return 0;
  	}
23b4ff3aa   Steven Rostedt   ftrace: add trace...
513
514
515
516
517
518
519
520
521
522
523
524
  	if (!param)
  		goto out_reg;
  
  	number = strsep(&param, ":");
  
  	if (!strlen(number))
  		goto out_reg;
  
  	/*
  	 * We use the callback data field (which is a pointer)
  	 * as our counter.
  	 */
bcd83ea6c   Daniel Walter   tracing: Replace ...
525
  	ret = kstrtoul(number, 0, (unsigned long *)&count);
23b4ff3aa   Steven Rostedt   ftrace: add trace...
526
527
528
529
  	if (ret)
  		return ret;
  
   out_reg:
b6887d791   Steven Rostedt   ftrace: rename _h...
530
  	ret = register_ftrace_function_probe(glob, ops, count);
23b4ff3aa   Steven Rostedt   ftrace: add trace...
531

04aef32d3   Xiao Guangrong   tracing/function:...
532
  	return ret < 0 ? ret : 0;
23b4ff3aa   Steven Rostedt   ftrace: add trace...
533
  }
dd42cd3ea   Steven Rostedt (Red Hat)   tracing: Add func...
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
  static int
  ftrace_trace_onoff_callback(struct ftrace_hash *hash,
  			    char *glob, char *cmd, char *param, int enable)
  {
  	struct ftrace_probe_ops *ops;
  
  	/* we register both traceon and traceoff to this callback */
  	if (strcmp(cmd, "traceon") == 0)
  		ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
  	else
  		ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
  
  	return ftrace_trace_probe_callback(ops, hash, glob, cmd,
  					   param, enable);
  }
  
  static int
  ftrace_stacktrace_callback(struct ftrace_hash *hash,
  			   char *glob, char *cmd, char *param, int enable)
  {
  	struct ftrace_probe_ops *ops;
  
  	ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
  
  	return ftrace_trace_probe_callback(ops, hash, glob, cmd,
  					   param, enable);
  }
ad71d889b   Steven Rostedt (Red Hat)   tracing: Add func...
561
562
563
564
565
566
567
568
569
570
571
572
  static int
  ftrace_dump_callback(struct ftrace_hash *hash,
  			   char *glob, char *cmd, char *param, int enable)
  {
  	struct ftrace_probe_ops *ops;
  
  	ops = &dump_probe_ops;
  
  	/* Only dump once. */
  	return ftrace_trace_probe_callback(ops, hash, glob, cmd,
  					   "1", enable);
  }
90e3c03c3   Steven Rostedt (Red Hat)   tracing: Add func...
573
574
575
576
577
578
579
580
581
582
583
584
  static int
  ftrace_cpudump_callback(struct ftrace_hash *hash,
  			   char *glob, char *cmd, char *param, int enable)
  {
  	struct ftrace_probe_ops *ops;
  
  	ops = &cpudump_probe_ops;
  
  	/* Only dump once. */
  	return ftrace_trace_probe_callback(ops, hash, glob, cmd,
  					   "1", enable);
  }
23b4ff3aa   Steven Rostedt   ftrace: add trace...
585
586
587
588
589
590
591
592
593
  static struct ftrace_func_command ftrace_traceon_cmd = {
  	.name			= "traceon",
  	.func			= ftrace_trace_onoff_callback,
  };
  
  static struct ftrace_func_command ftrace_traceoff_cmd = {
  	.name			= "traceoff",
  	.func			= ftrace_trace_onoff_callback,
  };
dd42cd3ea   Steven Rostedt (Red Hat)   tracing: Add func...
594
595
596
597
  static struct ftrace_func_command ftrace_stacktrace_cmd = {
  	.name			= "stacktrace",
  	.func			= ftrace_stacktrace_callback,
  };
ad71d889b   Steven Rostedt (Red Hat)   tracing: Add func...
598
599
600
601
  static struct ftrace_func_command ftrace_dump_cmd = {
  	.name			= "dump",
  	.func			= ftrace_dump_callback,
  };
90e3c03c3   Steven Rostedt (Red Hat)   tracing: Add func...
602
603
604
605
  static struct ftrace_func_command ftrace_cpudump_cmd = {
  	.name			= "cpudump",
  	.func			= ftrace_cpudump_callback,
  };
23b4ff3aa   Steven Rostedt   ftrace: add trace...
606
607
608
609
610
611
612
613
614
615
  static int __init init_func_cmd_traceon(void)
  {
  	int ret;
  
  	ret = register_ftrace_command(&ftrace_traceoff_cmd);
  	if (ret)
  		return ret;
  
  	ret = register_ftrace_command(&ftrace_traceon_cmd);
  	if (ret)
ad71d889b   Steven Rostedt (Red Hat)   tracing: Add func...
616
  		goto out_free_traceoff;
dd42cd3ea   Steven Rostedt (Red Hat)   tracing: Add func...
617
618
  
  	ret = register_ftrace_command(&ftrace_stacktrace_cmd);
ad71d889b   Steven Rostedt (Red Hat)   tracing: Add func...
619
620
621
622
623
624
  	if (ret)
  		goto out_free_traceon;
  
  	ret = register_ftrace_command(&ftrace_dump_cmd);
  	if (ret)
  		goto out_free_stacktrace;
90e3c03c3   Steven Rostedt (Red Hat)   tracing: Add func...
625
626
627
  	ret = register_ftrace_command(&ftrace_cpudump_cmd);
  	if (ret)
  		goto out_free_dump;
ad71d889b   Steven Rostedt (Red Hat)   tracing: Add func...
628
  	return 0;
90e3c03c3   Steven Rostedt (Red Hat)   tracing: Add func...
629
630
   out_free_dump:
  	unregister_ftrace_command(&ftrace_dump_cmd);
ad71d889b   Steven Rostedt (Red Hat)   tracing: Add func...
631
632
633
634
635
636
   out_free_stacktrace:
  	unregister_ftrace_command(&ftrace_stacktrace_cmd);
   out_free_traceon:
  	unregister_ftrace_command(&ftrace_traceon_cmd);
   out_free_traceoff:
  	unregister_ftrace_command(&ftrace_traceoff_cmd);
23b4ff3aa   Steven Rostedt   ftrace: add trace...
637
638
639
640
641
642
643
644
  	return ret;
  }
  #else
  static inline int init_func_cmd_traceon(void)
  {
  	return 0;
  }
  #endif /* CONFIG_DYNAMIC_FTRACE */
1b29b0188   Steven Rostedt   ftrace: function ...
645
646
  static __init int init_function_trace(void)
  {
23b4ff3aa   Steven Rostedt   ftrace: add trace...
647
  	init_func_cmd_traceon();
1b29b0188   Steven Rostedt   ftrace: function ...
648
649
  	return register_tracer(&function_trace);
  }
6f4156723   Steven Rostedt   tracing: Allow tr...
650
  core_initcall(init_function_trace);