Blame view

kernel/trace/trace.c 112 KB
bc0c38d13   Steven Rostedt   ftrace: latency t...
1
2
3
4
5
6
7
8
9
10
11
12
13
  /*
   * ring buffer based function tracer
   *
   * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
   * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
   *
   * Originally taken from the RT patch by:
   *    Arnaldo Carvalho de Melo <acme@redhat.com>
   *
   * Based on code from the latency_tracer, that is:
   *  Copyright (C) 2004-2006 Ingo Molnar
   *  Copyright (C) 2004 William Lee Irwin III
   */
2cadf9135   Steven Rostedt   tracing: add bina...
14
  #include <linux/ring_buffer.h>
273b281fa   Sam Ravnborg   kbuild: move utsr...
15
  #include <generated/utsrelease.h>
2cadf9135   Steven Rostedt   tracing: add bina...
16
17
  #include <linux/stacktrace.h>
  #include <linux/writeback.h>
bc0c38d13   Steven Rostedt   ftrace: latency t...
18
19
  #include <linux/kallsyms.h>
  #include <linux/seq_file.h>
3f5a54e37   Steven Rostedt   ftrace: dump out ...
20
  #include <linux/notifier.h>
2cadf9135   Steven Rostedt   tracing: add bina...
21
  #include <linux/irqflags.h>
bc0c38d13   Steven Rostedt   ftrace: latency t...
22
  #include <linux/debugfs.h>
4c11d7aed   Steven Rostedt   ftrace: convert s...
23
  #include <linux/pagemap.h>
bc0c38d13   Steven Rostedt   ftrace: latency t...
24
25
26
  #include <linux/hardirq.h>
  #include <linux/linkage.h>
  #include <linux/uaccess.h>
2cadf9135   Steven Rostedt   tracing: add bina...
27
  #include <linux/kprobes.h>
bc0c38d13   Steven Rostedt   ftrace: latency t...
28
29
30
  #include <linux/ftrace.h>
  #include <linux/module.h>
  #include <linux/percpu.h>
2cadf9135   Steven Rostedt   tracing: add bina...
31
  #include <linux/splice.h>
3f5a54e37   Steven Rostedt   ftrace: dump out ...
32
  #include <linux/kdebug.h>
5f0c6c03c   Frederic Weisbecker   tracing/ftrace: f...
33
  #include <linux/string.h>
7e53bd42d   Lai Jiangshan   tracing: Consolid...
34
  #include <linux/rwsem.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
35
  #include <linux/slab.h>
bc0c38d13   Steven Rostedt   ftrace: latency t...
36
37
  #include <linux/ctype.h>
  #include <linux/init.h>
2a2cc8f7c   Soeren Sandmann Pedersen   ftrace: allow the...
38
  #include <linux/poll.h>
bc0c38d13   Steven Rostedt   ftrace: latency t...
39
  #include <linux/fs.h>
86387f7ee   Ingo Molnar   ftrace: add stack...
40

bc0c38d13   Steven Rostedt   ftrace: latency t...
41
  #include "trace.h"
f0868d1e2   Steven Rostedt   ftrace: set up tr...
42
  #include "trace_output.h"
bc0c38d13   Steven Rostedt   ftrace: latency t...
43

8e1b82e08   Frederic Weisbecker   tracing/function-...
44
  /*
73c5162aa   Steven Rostedt   tracing: keep rin...
45
46
47
   * On boot up, the ring buffer is set to the minimum size, so that
   * we do not waste memory on systems that are not using tracing.
   */
020e5f85c   Li Zefan   tracing/events: A...
48
  int ring_buffer_expanded;
73c5162aa   Steven Rostedt   tracing: keep rin...
49
50
  
  /*
8e1b82e08   Frederic Weisbecker   tracing/function-...
51
   * We need to change this state when a selftest is running.
ff32504fd   Frederic Weisbecker   tracing/ftrace: d...
52
53
   * A selftest will lurk into the ring-buffer to count the
   * entries inserted during the selftest although some concurrent
5e1607a00   Ingo Molnar   tracing: rename f...
54
   * insertions into the ring-buffer such as trace_printk could occurred
ff32504fd   Frederic Weisbecker   tracing/ftrace: d...
55
56
   * at the same time, giving false positive or negative results.
   */
8e1b82e08   Frederic Weisbecker   tracing/function-...
57
  static bool __read_mostly tracing_selftest_running;
ff32504fd   Frederic Weisbecker   tracing/ftrace: d...
58

b2821ae68   Steven Rostedt   trace: fix defaul...
59
60
61
  /*
   * If a tracer is running, we do not want to run SELFTEST.
   */
020e5f85c   Li Zefan   tracing/events: A...
62
  bool __read_mostly tracing_selftest_disabled;
b2821ae68   Steven Rostedt   trace: fix defaul...
63

adf9f1957   Frederic Weisbecker   tracing/ftrace: i...
64
65
66
67
68
69
70
71
72
73
74
75
76
77
  /* For tracers that don't implement custom flags */
  static struct tracer_opt dummy_tracer_opt[] = {
  	{ }
  };
  
  static struct tracer_flags dummy_tracer_flags = {
  	.val = 0,
  	.opts = dummy_tracer_opt
  };
  
  static int dummy_set_flag(u32 old_flags, u32 bit, int set)
  {
  	return 0;
  }
0f0487014   Steven Rostedt   ftrace: soft trac...
78
79
80
81
82
83
84
  
  /*
   * Kill all tracing for good (never come back).
   * It is initialized to 1 but will turn to zero if the initialization
   * of the tracer is successful. But that is the only place that sets
   * this back to zero.
   */
4fd273588   Hannes Eder   tracing: fix spar...
85
  static int tracing_disabled = 1;
0f0487014   Steven Rostedt   ftrace: soft trac...
86

9288f99aa   Christoph Lameter   this_cpu: Use thi...
87
  DEFINE_PER_CPU(int, ftrace_cpu_disabled);
d769041f8   Steven Rostedt   ring_buffer: impl...
88
89
90
91
  
  static inline void ftrace_disable_cpu(void)
  {
  	preempt_disable();
dd17c8f72   Rusty Russell   percpu: remove pe...
92
  	__this_cpu_inc(ftrace_cpu_disabled);
d769041f8   Steven Rostedt   ring_buffer: impl...
93
94
95
96
  }
  
  static inline void ftrace_enable_cpu(void)
  {
dd17c8f72   Rusty Russell   percpu: remove pe...
97
  	__this_cpu_dec(ftrace_cpu_disabled);
d769041f8   Steven Rostedt   ring_buffer: impl...
98
99
  	preempt_enable();
  }
955b61e59   Jason Wessel   ftrace,kdb: Exten...
100
  cpumask_var_t __read_mostly	tracing_buffer_mask;
ab46428c6   Steven Rostedt   ftrace: modulize ...
101

944ac4259   Steven Rostedt   ftrace: ftrace du...
102
103
104
105
106
107
108
109
110
111
112
  /*
   * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
   *
   * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
   * is set, then ftrace_dump is called. This will output the contents
   * of the ftrace buffers to the console.  This is very useful for
   * capturing traces that lead to crashes and outputing it to a
   * serial console.
   *
   * It is default off, but you can enable it with either specifying
   * "ftrace_dump_on_oops" in the kernel command line, or setting
cecbca96d   Frederic Weisbecker   tracing: Dump eit...
113
114
115
   * /proc/sys/kernel/ftrace_dump_on_oops
   * Set 1 if you want to dump buffers of all CPUs
   * Set 2 if you want to dump the buffer of the CPU that triggered oops
944ac4259   Steven Rostedt   ftrace: ftrace du...
116
   */
cecbca96d   Frederic Weisbecker   tracing: Dump eit...
117
118
  
  enum ftrace_dump_mode ftrace_dump_on_oops;
944ac4259   Steven Rostedt   ftrace: ftrace du...
119

b2821ae68   Steven Rostedt   trace: fix defaul...
120
  static int tracing_set_tracer(const char *buf);
ee6c2c1bd   Li Zefan   tracing: remove m...
121
122
  #define MAX_TRACER_SIZE		100
  static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
b2821ae68   Steven Rostedt   trace: fix defaul...
123
  static char *default_bootup_tracer;
d9e540762   Peter Zijlstra   ftrace: ftrace_du...
124

1beee96ba   Frederic Weisbecker   ftrace: Rename se...
125
  static int __init set_cmdline_ftrace(char *str)
d9e540762   Peter Zijlstra   ftrace: ftrace_du...
126
  {
ee6c2c1bd   Li Zefan   tracing: remove m...
127
  	strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
b2821ae68   Steven Rostedt   trace: fix defaul...
128
  	default_bootup_tracer = bootup_tracer_buf;
73c5162aa   Steven Rostedt   tracing: keep rin...
129
130
  	/* We are using ftrace early, expand it */
  	ring_buffer_expanded = 1;
d9e540762   Peter Zijlstra   ftrace: ftrace_du...
131
132
  	return 1;
  }
1beee96ba   Frederic Weisbecker   ftrace: Rename se...
133
  __setup("ftrace=", set_cmdline_ftrace);
d9e540762   Peter Zijlstra   ftrace: ftrace_du...
134

944ac4259   Steven Rostedt   ftrace: ftrace du...
135
136
  static int __init set_ftrace_dump_on_oops(char *str)
  {
cecbca96d   Frederic Weisbecker   tracing: Dump eit...
137
138
139
140
141
142
143
144
145
146
147
  	if (*str++ != '=' || !*str) {
  		ftrace_dump_on_oops = DUMP_ALL;
  		return 1;
  	}
  
  	if (!strcmp("orig_cpu", str)) {
  		ftrace_dump_on_oops = DUMP_ORIG;
                  return 1;
          }
  
          return 0;
944ac4259   Steven Rostedt   ftrace: ftrace du...
148
149
  }
  __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
60a11774b   Steven Rostedt   ftrace: add self-...
150

cf8e34746   Lai Jiangshan   tracing: fix inco...
151
  unsigned long long ns2usecs(cycle_t nsec)
bc0c38d13   Steven Rostedt   ftrace: latency t...
152
153
154
155
156
  {
  	nsec += 500;
  	do_div(nsec, 1000);
  	return nsec;
  }
4fcdae83c   Steven Rostedt   ftrace: comment code
157
158
159
160
161
162
163
164
165
166
167
168
  /*
   * The global_trace is the descriptor that holds the tracing
   * buffers for the live tracing. For each CPU, it contains
   * a link list of pages that will store trace entries. The
   * page descriptor of the pages in the memory is used to hold
   * the link list by linking the lru item in the page descriptor
   * to each of the pages in the buffer per CPU.
   *
   * For each active CPU there is a data field that holds the
   * pages for the buffer for that CPU. Each CPU has the same number
   * of pages allocated for its buffer.
   */
bc0c38d13   Steven Rostedt   ftrace: latency t...
169
170
171
  static struct trace_array	global_trace;
  
  static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
e77405ad8   Steven Rostedt   tracing: pass aro...
172
173
  int filter_current_check_discard(struct ring_buffer *buffer,
  				 struct ftrace_event_call *call, void *rec,
eb02ce017   Tom Zanussi   tracing/filters: ...
174
175
  				 struct ring_buffer_event *event)
  {
e77405ad8   Steven Rostedt   tracing: pass aro...
176
  	return filter_check_discard(call, rec, buffer, event);
eb02ce017   Tom Zanussi   tracing/filters: ...
177
  }
17c873ec2   Steven Rostedt   tracing/events: a...
178
  EXPORT_SYMBOL_GPL(filter_current_check_discard);
eb02ce017   Tom Zanussi   tracing/filters: ...
179

37886f6a9   Steven Rostedt   ring-buffer: add ...
180
181
182
183
184
185
186
187
188
189
190
191
192
  cycle_t ftrace_now(int cpu)
  {
  	u64 ts;
  
  	/* Early boot up does not have a buffer yet */
  	if (!global_trace.buffer)
  		return trace_clock_local();
  
  	ts = ring_buffer_time_stamp(global_trace.buffer, cpu);
  	ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts);
  
  	return ts;
  }
bc0c38d13   Steven Rostedt   ftrace: latency t...
193

4fcdae83c   Steven Rostedt   ftrace: comment code
194
195
196
197
198
199
200
201
202
203
  /*
   * The max_tr is used to snapshot the global_trace when a maximum
   * latency is reached. Some tracers will use this to store a maximum
   * trace while it continues examining live traces.
   *
   * The buffers for the max_tr are set up the same as the global_trace.
   * When a snapshot is taken, the link list of the max_tr is swapped
   * with the link list of the global_trace and the buffers are reset for
   * the global_trace so the tracing can continue.
   */
bc0c38d13   Steven Rostedt   ftrace: latency t...
204
  static struct trace_array	max_tr;
9705f69ed   Tejun Heo   percpu: make perc...
205
  static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
bc0c38d13   Steven Rostedt   ftrace: latency t...
206

4fcdae83c   Steven Rostedt   ftrace: comment code
207
  /* tracer_enabled is used to toggle activation of a tracer */
26994ead1   Steven Rostedt   ftrace: enabled t...
208
  static int			tracer_enabled = 1;
4fcdae83c   Steven Rostedt   ftrace: comment code
209

9036990d4   Steven Rostedt   ftrace: restructu...
210
211
212
213
214
215
216
217
218
219
220
221
  /**
   * tracing_is_enabled - return tracer_enabled status
   *
   * This function is used by other tracers to know the status
   * of the tracer_enabled flag.  Tracers may use this function
   * to know if it should enable their features when starting
   * up. See irqsoff tracer for an example (start_irqsoff_tracer).
   */
  int tracing_is_enabled(void)
  {
  	return tracer_enabled;
  }
4fcdae83c   Steven Rostedt   ftrace: comment code
222
  /*
3928a8a2d   Steven Rostedt   ftrace: make work...
223
224
225
   * trace_buf_size is the size in bytes that is allocated
   * for a buffer. Note, the number of bytes is always rounded
   * to page size.
3f5a54e37   Steven Rostedt   ftrace: dump out ...
226
227
228
229
230
   *
   * This number is purposely set to a low number of 16384.
   * If the dump on oops happens, it will be much appreciated
   * to not have to wait for all that output. Anyway this can be
   * boot time and run time configurable.
4fcdae83c   Steven Rostedt   ftrace: comment code
231
   */
3928a8a2d   Steven Rostedt   ftrace: make work...
232
  #define TRACE_BUF_SIZE_DEFAULT	1441792UL /* 16384 * 88 (sizeof(entry)) */
3f5a54e37   Steven Rostedt   ftrace: dump out ...
233

3928a8a2d   Steven Rostedt   ftrace: make work...
234
  static unsigned long		trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
bc0c38d13   Steven Rostedt   ftrace: latency t...
235

4fcdae83c   Steven Rostedt   ftrace: comment code
236
  /* trace_types holds a link list of available tracers. */
bc0c38d13   Steven Rostedt   ftrace: latency t...
237
  static struct tracer		*trace_types __read_mostly;
4fcdae83c   Steven Rostedt   ftrace: comment code
238
239
  
  /* current_trace points to the tracer that is currently active */
bc0c38d13   Steven Rostedt   ftrace: latency t...
240
  static struct tracer		*current_trace __read_mostly;
4fcdae83c   Steven Rostedt   ftrace: comment code
241
242
  
  /*
4fcdae83c   Steven Rostedt   ftrace: comment code
243
   * trace_types_lock is used to protect the trace_types list.
4fcdae83c   Steven Rostedt   ftrace: comment code
244
   */
bc0c38d13   Steven Rostedt   ftrace: latency t...
245
  static DEFINE_MUTEX(trace_types_lock);
4fcdae83c   Steven Rostedt   ftrace: comment code
246

7e53bd42d   Lai Jiangshan   tracing: Consolid...
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
  /*
   * serialize the access of the ring buffer
   *
   * ring buffer serializes readers, but it is low level protection.
   * The validity of the events (which returns by ring_buffer_peek() ..etc)
   * are not protected by ring buffer.
   *
   * The content of events may become garbage if we allow other process consumes
   * these events concurrently:
   *   A) the page of the consumed events may become a normal page
   *      (not reader page) in ring buffer, and this page will be rewrited
   *      by events producer.
   *   B) The page of the consumed events may become a page for splice_read,
   *      and this page will be returned to system.
   *
   * These primitives allow multi process access to different cpu ring buffer
   * concurrently.
   *
   * These primitives don't distinguish read-only and read-consume access.
   * Multi read-only access are also serialized.
   */
  
  #ifdef CONFIG_SMP
  static DECLARE_RWSEM(all_cpu_access_lock);
  static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
  
  static inline void trace_access_lock(int cpu)
  {
  	if (cpu == TRACE_PIPE_ALL_CPU) {
  		/* gain it for accessing the whole ring buffer. */
  		down_write(&all_cpu_access_lock);
  	} else {
  		/* gain it for accessing a cpu ring buffer. */
  
  		/* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */
  		down_read(&all_cpu_access_lock);
  
  		/* Secondly block other access to this @cpu ring buffer. */
  		mutex_lock(&per_cpu(cpu_access_lock, cpu));
  	}
  }
  
  static inline void trace_access_unlock(int cpu)
  {
  	if (cpu == TRACE_PIPE_ALL_CPU) {
  		up_write(&all_cpu_access_lock);
  	} else {
  		mutex_unlock(&per_cpu(cpu_access_lock, cpu));
  		up_read(&all_cpu_access_lock);
  	}
  }
  
  static inline void trace_access_lock_init(void)
  {
  	int cpu;
  
  	for_each_possible_cpu(cpu)
  		mutex_init(&per_cpu(cpu_access_lock, cpu));
  }
  
  #else
  
  static DEFINE_MUTEX(access_lock);
  
  static inline void trace_access_lock(int cpu)
  {
  	(void)cpu;
  	mutex_lock(&access_lock);
  }
  
  static inline void trace_access_unlock(int cpu)
  {
  	(void)cpu;
  	mutex_unlock(&access_lock);
  }
  
  static inline void trace_access_lock_init(void)
  {
  }
  
  #endif
4fcdae83c   Steven Rostedt   ftrace: comment code
328
  /* trace_wait is a waitqueue for tasks blocked on trace_poll */
4e6555190   Ingo Molnar   ftrace: sched tra...
329
  static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
ee6bce522   Steven Rostedt   ftrace: rename it...
330
  /* trace_flags holds trace_options default values */
12ef7d448   Steven Rostedt   ftrace: CPU buffe...
331
  unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
a2a16d6a3   Steven Rostedt   function-graph: a...
332
  	TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
77271ce4b   Steven Rostedt   tracing: Add irq,...
333
334
  	TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
  	TRACE_ITER_IRQ_INFO;
4e6555190   Ingo Molnar   ftrace: sched tra...
335

b8de7bd16   Steven Rostedt   tracing: disable ...
336
  static int trace_stop_count;
5389f6fad   Thomas Gleixner   locking, tracing:...
337
  static DEFINE_RAW_SPINLOCK(tracing_start_lock);
b8de7bd16   Steven Rostedt   tracing: disable ...
338

e7e2ee89a   Vaibhav Nagarnaik   tracing: Schedule...
339
340
341
342
343
344
  static void wakeup_work_handler(struct work_struct *work)
  {
  	wake_up(&trace_wait);
  }
  
  static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler);
4fcdae83c   Steven Rostedt   ftrace: comment code
345
346
347
  /**
   * trace_wake_up - wake up tasks waiting for trace input
   *
e7e2ee89a   Vaibhav Nagarnaik   tracing: Schedule...
348
349
350
   * Schedules a delayed work to wake up any task that is blocked on the
   * trace_wait queue. These is used with trace_poll for tasks polling the
   * trace.
4fcdae83c   Steven Rostedt   ftrace: comment code
351
   */
4e6555190   Ingo Molnar   ftrace: sched tra...
352
353
  void trace_wake_up(void)
  {
e7e2ee89a   Vaibhav Nagarnaik   tracing: Schedule...
354
  	const unsigned long delay = msecs_to_jiffies(2);
89f19f04d   Andrew Morton   sched: Fix racine...
355
356
357
  
  	if (trace_flags & TRACE_ITER_BLOCK)
  		return;
e7e2ee89a   Vaibhav Nagarnaik   tracing: Schedule...
358
  	schedule_delayed_work(&wakeup_work, delay);
4e6555190   Ingo Molnar   ftrace: sched tra...
359
  }
bc0c38d13   Steven Rostedt   ftrace: latency t...
360

3928a8a2d   Steven Rostedt   ftrace: make work...
361
  static int __init set_buf_size(char *str)
bc0c38d13   Steven Rostedt   ftrace: latency t...
362
  {
3928a8a2d   Steven Rostedt   ftrace: make work...
363
  	unsigned long buf_size;
c6caeeb14   Steven Rostedt   ftrace: replace s...
364

bc0c38d13   Steven Rostedt   ftrace: latency t...
365
366
  	if (!str)
  		return 0;
9d612beff   Li Zefan   tracing: Fix trac...
367
  	buf_size = memparse(str, &str);
c6caeeb14   Steven Rostedt   ftrace: replace s...
368
  	/* nr_entries can not be zero */
9d612beff   Li Zefan   tracing: Fix trac...
369
  	if (buf_size == 0)
c6caeeb14   Steven Rostedt   ftrace: replace s...
370
  		return 0;
3928a8a2d   Steven Rostedt   ftrace: make work...
371
  	trace_buf_size = buf_size;
bc0c38d13   Steven Rostedt   ftrace: latency t...
372
373
  	return 1;
  }
3928a8a2d   Steven Rostedt   ftrace: make work...
374
  __setup("trace_buf_size=", set_buf_size);
bc0c38d13   Steven Rostedt   ftrace: latency t...
375

0e9501735   Tim Bird   function-graph: A...
376
377
378
379
380
381
382
383
384
385
386
387
388
389
  static int __init set_tracing_thresh(char *str)
  {
  	unsigned long threshhold;
  	int ret;
  
  	if (!str)
  		return 0;
  	ret = strict_strtoul(str, 0, &threshhold);
  	if (ret < 0)
  		return 0;
  	tracing_thresh = threshhold * 1000;
  	return 1;
  }
  __setup("tracing_thresh=", set_tracing_thresh);
57f50be14   Steven Rostedt   ftrace: fix max l...
390
391
392
393
  unsigned long nsecs_to_usecs(unsigned long nsecs)
  {
  	return nsecs / 1000;
  }
4fcdae83c   Steven Rostedt   ftrace: comment code
394
  /* These must match the bit postions in trace_iterator_flags */
bc0c38d13   Steven Rostedt   ftrace: latency t...
395
396
397
398
399
  static const char *trace_options[] = {
  	"print-parent",
  	"sym-offset",
  	"sym-addr",
  	"verbose",
f9896bf30   Ingo Molnar   ftrace: add raw o...
400
  	"raw",
5e3ca0ec7   Ingo Molnar   ftrace: introduce...
401
  	"hex",
cb0f12aae   Ingo Molnar   ftrace: bin-output
402
  	"bin",
2a2cc8f7c   Soeren Sandmann Pedersen   ftrace: allow the...
403
  	"block",
86387f7ee   Ingo Molnar   ftrace: add stack...
404
  	"stacktrace",
5e1607a00   Ingo Molnar   tracing: rename f...
405
  	"trace_printk",
b2a866f93   Steven Rostedt   ftrace: function ...
406
  	"ftrace_preempt",
9f029e83e   Steven Rostedt   ftrace: rename un...
407
  	"branch",
12ef7d448   Steven Rostedt   ftrace: CPU buffe...
408
  	"annotate",
02b67518e   Török Edwin   tracing: add supp...
409
  	"userstacktrace",
b54d3de9f   Török Edwin   tracing: identify...
410
  	"sym-userobj",
66896a85c   Frederic Weisbecker   tracing/ftrace: a...
411
  	"printk-msg-only",
c4a8e8be2   Frederic Weisbecker   trace: better man...
412
  	"context-info",
c032ef64d   Steven Rostedt   tracing: add late...
413
  	"latency-format",
be6f164a0   Steven Rostedt   function-graph: a...
414
  	"sleep-time",
a2a16d6a3   Steven Rostedt   function-graph: a...
415
  	"graph-time",
e870e9a12   Li Zefan   tracing: Allow to...
416
  	"record-cmd",
750912fa3   David Sharp   tracing: Add an '...
417
  	"overwrite",
cf30cf67d   Steven Rostedt   tracing: Add disa...
418
  	"disable_on_free",
77271ce4b   Steven Rostedt   tracing: Add irq,...
419
  	"irq-info",
bc0c38d13   Steven Rostedt   ftrace: latency t...
420
421
  	NULL
  };
5079f3261   Zhaolei   ftrace: Move sett...
422
423
424
425
426
427
  static struct {
  	u64 (*func)(void);
  	const char *name;
  } trace_clocks[] = {
  	{ trace_clock_local,	"local" },
  	{ trace_clock_global,	"global" },
6249687f7   Steven Rostedt   tracing: Add a co...
428
  	{ trace_clock_counter,	"counter" },
5079f3261   Zhaolei   ftrace: Move sett...
429
430
431
  };
  
  int trace_clock_id;
b63f39ea5   jolsa@redhat.com   tracing: create g...
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
  /*
   * trace_parser_get_init - gets the buffer for trace parser
   */
  int trace_parser_get_init(struct trace_parser *parser, int size)
  {
  	memset(parser, 0, sizeof(*parser));
  
  	parser->buffer = kmalloc(size, GFP_KERNEL);
  	if (!parser->buffer)
  		return 1;
  
  	parser->size = size;
  	return 0;
  }
  
  /*
   * trace_parser_put - frees the buffer for trace parser
   */
  void trace_parser_put(struct trace_parser *parser)
  {
  	kfree(parser->buffer);
  }
  
  /*
   * trace_get_user - reads the user input string separated by  space
   * (matched by isspace(ch))
   *
   * For each string found the 'struct trace_parser' is updated,
   * and the function returns.
   *
   * Returns number of bytes read.
   *
   * See kernel/trace/trace.h for 'struct trace_parser' details.
   */
  int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
  	size_t cnt, loff_t *ppos)
  {
  	char ch;
  	size_t read = 0;
  	ssize_t ret;
  
  	if (!*ppos)
  		trace_parser_clear(parser);
  
  	ret = get_user(ch, ubuf++);
  	if (ret)
  		goto out;
  
  	read++;
  	cnt--;
  
  	/*
  	 * The parser is not finished with the last write,
  	 * continue reading the user input without skipping spaces.
  	 */
  	if (!parser->cont) {
  		/* skip white space */
  		while (cnt && isspace(ch)) {
  			ret = get_user(ch, ubuf++);
  			if (ret)
  				goto out;
  			read++;
  			cnt--;
  		}
  
  		/* only spaces were written */
  		if (isspace(ch)) {
  			*ppos += read;
  			ret = read;
  			goto out;
  		}
  
  		parser->idx = 0;
  	}
  
  	/* read the non-space input */
  	while (cnt && !isspace(ch)) {
3c235a337   Li Zefan   tracing: Fix off-...
509
  		if (parser->idx < parser->size - 1)
b63f39ea5   jolsa@redhat.com   tracing: create g...
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
  			parser->buffer[parser->idx++] = ch;
  		else {
  			ret = -EINVAL;
  			goto out;
  		}
  		ret = get_user(ch, ubuf++);
  		if (ret)
  			goto out;
  		read++;
  		cnt--;
  	}
  
  	/* We either got finished input or we have to wait for another call. */
  	if (isspace(ch)) {
  		parser->buffer[parser->idx] = 0;
  		parser->cont = false;
  	} else {
  		parser->cont = true;
  		parser->buffer[parser->idx++] = ch;
  	}
  
  	*ppos += read;
  	ret = read;
  
  out:
  	return ret;
  }
6c6c27969   Pekka Paalanen   ftrace: add readp...
537
538
539
540
  ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
  {
  	int len;
  	int ret;
2dc5d12b1   Steven Rostedt   tracing: do not r...
541
542
  	if (!cnt)
  		return 0;
6c6c27969   Pekka Paalanen   ftrace: add readp...
543
544
545
546
547
548
549
  	if (s->len <= s->readpos)
  		return -EBUSY;
  
  	len = s->len - s->readpos;
  	if (cnt > len)
  		cnt = len;
  	ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
2dc5d12b1   Steven Rostedt   tracing: do not r...
550
  	if (ret == cnt)
6c6c27969   Pekka Paalanen   ftrace: add readp...
551
  		return -EFAULT;
2dc5d12b1   Steven Rostedt   tracing: do not r...
552
  	cnt -= ret;
e74da5235   Steven Rostedt   tracing: fix seq ...
553
  	s->readpos += cnt;
6c6c27969   Pekka Paalanen   ftrace: add readp...
554
  	return cnt;
214023c3d   Steven Rostedt   ftrace: add a buf...
555
  }
b8b942653   Dmitri Vorobiev   tracing: fix four...
556
  static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
3c56819b1   Eduard - Gabriel Munteanu   tracing: splice s...
557
558
559
560
561
562
563
564
565
566
567
568
569
  {
  	int len;
  	void *ret;
  
  	if (s->len <= s->readpos)
  		return -EBUSY;
  
  	len = s->len - s->readpos;
  	if (cnt > len)
  		cnt = len;
  	ret = memcpy(buf, s->buffer + s->readpos, cnt);
  	if (!ret)
  		return -EFAULT;
e74da5235   Steven Rostedt   tracing: fix seq ...
570
  	s->readpos += cnt;
3c56819b1   Eduard - Gabriel Munteanu   tracing: splice s...
571
572
  	return cnt;
  }
5d4a9dba2   Steven Rostedt   tracing: only sho...
573
574
575
576
577
578
  /*
   * ftrace_max_lock is used to protect the swapping of buffers
   * when taking a max snapshot. The buffers themselves are
   * protected by per_cpu spinlocks. But the action of the swap
   * needs its own lock.
   *
445c89514   Thomas Gleixner   locking: Convert ...
579
   * This is defined as a arch_spinlock_t in order to help
5d4a9dba2   Steven Rostedt   tracing: only sho...
580
581
582
583
584
585
   * with performance when lockdep debugging is enabled.
   *
   * It is also used in other places outside the update_max_tr
   * so it needs to be defined outside of the
   * CONFIG_TRACER_MAX_TRACE.
   */
445c89514   Thomas Gleixner   locking: Convert ...
586
  static arch_spinlock_t ftrace_max_lock =
edc35bd72   Thomas Gleixner   locking: Rename _...
587
  	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
5d4a9dba2   Steven Rostedt   tracing: only sho...
588

0e9501735   Tim Bird   function-graph: A...
589
  unsigned long __read_mostly	tracing_thresh;
5d4a9dba2   Steven Rostedt   tracing: only sho...
590
591
  #ifdef CONFIG_TRACER_MAX_TRACE
  unsigned long __read_mostly	tracing_max_latency;
5d4a9dba2   Steven Rostedt   tracing: only sho...
592
593
594
595
596
597
598
599
600
601
  
  /*
   * Copy the new maximum trace into the separate maximum-trace
   * structure. (this way the maximum trace is permanently saved,
   * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
   */
  static void
  __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
  {
  	struct trace_array_cpu *data = tr->data[cpu];
1acaa1b2d   Arnaldo Carvalho de Melo   tracing: Update t...
602
  	struct trace_array_cpu *max_data;
5d4a9dba2   Steven Rostedt   tracing: only sho...
603
604
605
  
  	max_tr.cpu = cpu;
  	max_tr.time_start = data->preempt_timestamp;
8248ac052   Steven Rostedt   tracing: print ou...
606
607
608
609
  	max_data = max_tr.data[cpu];
  	max_data->saved_latency = tracing_max_latency;
  	max_data->critical_start = data->critical_start;
  	max_data->critical_end = data->critical_end;
5d4a9dba2   Steven Rostedt   tracing: only sho...
610

1acaa1b2d   Arnaldo Carvalho de Melo   tracing: Update t...
611
  	memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
8248ac052   Steven Rostedt   tracing: print ou...
612
613
614
615
616
  	max_data->pid = tsk->pid;
  	max_data->uid = task_uid(tsk);
  	max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
  	max_data->policy = tsk->policy;
  	max_data->rt_priority = tsk->rt_priority;
5d4a9dba2   Steven Rostedt   tracing: only sho...
617
618
619
620
  
  	/* record this tasks comm */
  	tracing_record_cmdline(tsk);
  }
4fcdae83c   Steven Rostedt   ftrace: comment code
621
622
623
624
625
626
627
628
629
  /**
   * update_max_tr - snapshot all trace buffers from global_trace to max_tr
   * @tr: tracer
   * @tsk: the task with the latency
   * @cpu: The cpu that initiated the trace.
   *
   * Flip the buffers between the @tr and the max_tr and record information
   * about which task was the cause of this latency.
   */
e309b41dd   Ingo Molnar   ftrace: remove no...
630
  void
bc0c38d13   Steven Rostedt   ftrace: latency t...
631
632
  update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
  {
3928a8a2d   Steven Rostedt   ftrace: make work...
633
  	struct ring_buffer *buf = tr->buffer;
bc0c38d13   Steven Rostedt   ftrace: latency t...
634

b8de7bd16   Steven Rostedt   tracing: disable ...
635
636
  	if (trace_stop_count)
  		return;
4c11d7aed   Steven Rostedt   ftrace: convert s...
637
  	WARN_ON_ONCE(!irqs_disabled());
ef710e100   KOSAKI Motohiro   tracing: Shrink m...
638
639
640
641
  	if (!current_trace->use_max_tr) {
  		WARN_ON_ONCE(1);
  		return;
  	}
0199c4e68   Thomas Gleixner   locking: Convert ...
642
  	arch_spin_lock(&ftrace_max_lock);
3928a8a2d   Steven Rostedt   ftrace: make work...
643
644
645
  
  	tr->buffer = max_tr.buffer;
  	max_tr.buffer = buf;
bc0c38d13   Steven Rostedt   ftrace: latency t...
646
  	__update_max_tr(tr, tsk, cpu);
0199c4e68   Thomas Gleixner   locking: Convert ...
647
  	arch_spin_unlock(&ftrace_max_lock);
bc0c38d13   Steven Rostedt   ftrace: latency t...
648
649
650
651
652
653
654
  }
  
  /**
   * update_max_tr_single - only copy one trace over, and reset the rest
   * @tr - tracer
   * @tsk - task with the latency
   * @cpu - the cpu of the buffer to copy.
4fcdae83c   Steven Rostedt   ftrace: comment code
655
656
   *
   * Flip the trace of a single CPU buffer between the @tr and the max_tr.
bc0c38d13   Steven Rostedt   ftrace: latency t...
657
   */
e309b41dd   Ingo Molnar   ftrace: remove no...
658
  void
bc0c38d13   Steven Rostedt   ftrace: latency t...
659
660
  update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
  {
3928a8a2d   Steven Rostedt   ftrace: make work...
661
  	int ret;
bc0c38d13   Steven Rostedt   ftrace: latency t...
662

b8de7bd16   Steven Rostedt   tracing: disable ...
663
664
  	if (trace_stop_count)
  		return;
4c11d7aed   Steven Rostedt   ftrace: convert s...
665
  	WARN_ON_ONCE(!irqs_disabled());
ef710e100   KOSAKI Motohiro   tracing: Shrink m...
666
667
668
669
  	if (!current_trace->use_max_tr) {
  		WARN_ON_ONCE(1);
  		return;
  	}
0199c4e68   Thomas Gleixner   locking: Convert ...
670
  	arch_spin_lock(&ftrace_max_lock);
bc0c38d13   Steven Rostedt   ftrace: latency t...
671

d769041f8   Steven Rostedt   ring_buffer: impl...
672
  	ftrace_disable_cpu();
3928a8a2d   Steven Rostedt   ftrace: make work...
673
  	ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
e8165dbb0   Steven Rostedt   tracing: report e...
674
675
676
677
678
679
680
681
682
683
684
  	if (ret == -EBUSY) {
  		/*
  		 * We failed to swap the buffer due to a commit taking
  		 * place on this CPU. We fail to record, but we reset
  		 * the max trace buffer (no one writes directly to it)
  		 * and flag that it failed.
  		 */
  		trace_array_printk(&max_tr, _THIS_IP_,
  			"Failed to swap buffers due to commit in progress
  ");
  	}
d769041f8   Steven Rostedt   ring_buffer: impl...
685
  	ftrace_enable_cpu();
e8165dbb0   Steven Rostedt   tracing: report e...
686
  	WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
bc0c38d13   Steven Rostedt   ftrace: latency t...
687
688
  
  	__update_max_tr(tr, tsk, cpu);
0199c4e68   Thomas Gleixner   locking: Convert ...
689
  	arch_spin_unlock(&ftrace_max_lock);
bc0c38d13   Steven Rostedt   ftrace: latency t...
690
  }
5d4a9dba2   Steven Rostedt   tracing: only sho...
691
  #endif /* CONFIG_TRACER_MAX_TRACE */
bc0c38d13   Steven Rostedt   ftrace: latency t...
692

4fcdae83c   Steven Rostedt   ftrace: comment code
693
694
695
696
697
698
  /**
   * register_tracer - register a tracer with the ftrace system.
   * @type - the plugin for the tracer
   *
   * Register a new plugin tracer.
   */
bc0c38d13   Steven Rostedt   ftrace: latency t...
699
  int register_tracer(struct tracer *type)
e7669b8e3   Hannes Eder   tracing: fix spar...
700
701
  __releases(kernel_lock)
  __acquires(kernel_lock)
bc0c38d13   Steven Rostedt   ftrace: latency t...
702
703
  {
  	struct tracer *t;
bc0c38d13   Steven Rostedt   ftrace: latency t...
704
705
706
707
708
709
710
  	int ret = 0;
  
  	if (!type->name) {
  		pr_info("Tracer must have a name
  ");
  		return -1;
  	}
24a461d53   Dan Carpenter   trace: strlen() r...
711
  	if (strlen(type->name) >= MAX_TRACER_SIZE) {
ee6c2c1bd   Li Zefan   tracing: remove m...
712
713
714
715
  		pr_info("Tracer has a name longer than %d
  ", MAX_TRACER_SIZE);
  		return -1;
  	}
bc0c38d13   Steven Rostedt   ftrace: latency t...
716
  	mutex_lock(&trace_types_lock);
86fa2f606   Ingo Molnar   ftrace: fix selft...
717

8e1b82e08   Frederic Weisbecker   tracing/function-...
718
  	tracing_selftest_running = true;
bc0c38d13   Steven Rostedt   ftrace: latency t...
719
720
721
  	for (t = trace_types; t; t = t->next) {
  		if (strcmp(type->name, t->name) == 0) {
  			/* already found */
ee6c2c1bd   Li Zefan   tracing: remove m...
722
723
  			pr_info("Tracer %s already registered
  ",
bc0c38d13   Steven Rostedt   ftrace: latency t...
724
725
726
727
728
  				type->name);
  			ret = -1;
  			goto out;
  		}
  	}
adf9f1957   Frederic Weisbecker   tracing/ftrace: i...
729
730
731
732
733
734
735
  	if (!type->set_flag)
  		type->set_flag = &dummy_set_flag;
  	if (!type->flags)
  		type->flags = &dummy_tracer_flags;
  	else
  		if (!type->flags->opts)
  			type->flags->opts = dummy_tracer_opt;
6eaaa5d57   Frederic Weisbecker   tracing/core: use...
736
737
  	if (!type->wait_pipe)
  		type->wait_pipe = default_wait_pipe;
adf9f1957   Frederic Weisbecker   tracing/ftrace: i...
738

60a11774b   Steven Rostedt   ftrace: add self-...
739
  #ifdef CONFIG_FTRACE_STARTUP_TEST
b2821ae68   Steven Rostedt   trace: fix defaul...
740
  	if (type->selftest && !tracing_selftest_disabled) {
60a11774b   Steven Rostedt   ftrace: add self-...
741
  		struct tracer *saved_tracer = current_trace;
60a11774b   Steven Rostedt   ftrace: add self-...
742
  		struct trace_array *tr = &global_trace;
ff32504fd   Frederic Weisbecker   tracing/ftrace: d...
743

60a11774b   Steven Rostedt   ftrace: add self-...
744
745
746
747
748
749
750
  		/*
  		 * Run a selftest on this tracer.
  		 * Here we reset the trace buffer, and set the current
  		 * tracer to be this tracer. The tracer can then run some
  		 * internal tracing to verify that everything is in order.
  		 * If we fail, we do not register this tracer.
  		 */
76f0d0737   Steven Rostedt   tracing: remove u...
751
  		tracing_reset_online_cpus(tr);
86fa2f606   Ingo Molnar   ftrace: fix selft...
752

60a11774b   Steven Rostedt   ftrace: add self-...
753
  		current_trace = type;
4a0b1665d   Steven Rostedt   tracing: Fix irqo...
754
755
756
757
  
  		/* If we expanded the buffers, make sure the max is expanded too */
  		if (ring_buffer_expanded && type->use_max_tr)
  			ring_buffer_resize(max_tr.buffer, trace_buf_size);
60a11774b   Steven Rostedt   ftrace: add self-...
758
759
760
761
762
  		/* the test is responsible for initializing and enabling */
  		pr_info("Testing tracer %s: ", type->name);
  		ret = type->selftest(type, tr);
  		/* the test is responsible for resetting too */
  		current_trace = saved_tracer;
60a11774b   Steven Rostedt   ftrace: add self-...
763
764
765
766
767
  		if (ret) {
  			printk(KERN_CONT "FAILED!
  ");
  			goto out;
  		}
1d4db00a5   Steven Rostedt   ftrace: reset sel...
768
  		/* Only reset on passing, to avoid touching corrupted buffers */
76f0d0737   Steven Rostedt   tracing: remove u...
769
  		tracing_reset_online_cpus(tr);
86fa2f606   Ingo Molnar   ftrace: fix selft...
770

4a0b1665d   Steven Rostedt   tracing: Fix irqo...
771
772
773
  		/* Shrink the max buffer again */
  		if (ring_buffer_expanded && type->use_max_tr)
  			ring_buffer_resize(max_tr.buffer, 1);
60a11774b   Steven Rostedt   ftrace: add self-...
774
775
776
777
  		printk(KERN_CONT "PASSED
  ");
  	}
  #endif
bc0c38d13   Steven Rostedt   ftrace: latency t...
778
779
  	type->next = trace_types;
  	trace_types = type;
60a11774b   Steven Rostedt   ftrace: add self-...
780

bc0c38d13   Steven Rostedt   ftrace: latency t...
781
   out:
8e1b82e08   Frederic Weisbecker   tracing/function-...
782
  	tracing_selftest_running = false;
bc0c38d13   Steven Rostedt   ftrace: latency t...
783
  	mutex_unlock(&trace_types_lock);
dac749402   Steven Rostedt   trace: code style...
784
785
  	if (ret || !default_bootup_tracer)
  		goto out_unlock;
ee6c2c1bd   Li Zefan   tracing: remove m...
786
  	if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
dac749402   Steven Rostedt   trace: code style...
787
788
789
790
791
792
793
794
795
  		goto out_unlock;
  
  	printk(KERN_INFO "Starting tracer '%s'
  ", type->name);
  	/* Do we want this tracer to start on bootup? */
  	tracing_set_tracer(type->name);
  	default_bootup_tracer = NULL;
  	/* disable other selftests, since this will break it. */
  	tracing_selftest_disabled = 1;
b2821ae68   Steven Rostedt   trace: fix defaul...
796
  #ifdef CONFIG_FTRACE_STARTUP_TEST
dac749402   Steven Rostedt   trace: code style...
797
798
799
  	printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'
  ",
  	       type->name);
b2821ae68   Steven Rostedt   trace: fix defaul...
800
  #endif
b2821ae68   Steven Rostedt   trace: fix defaul...
801

dac749402   Steven Rostedt   trace: code style...
802
   out_unlock:
bc0c38d13   Steven Rostedt   ftrace: latency t...
803
804
805
806
807
808
  	return ret;
  }
  
  void unregister_tracer(struct tracer *type)
  {
  	struct tracer **t;
bc0c38d13   Steven Rostedt   ftrace: latency t...
809
810
811
812
813
814
  
  	mutex_lock(&trace_types_lock);
  	for (t = &trace_types; *t; t = &(*t)->next) {
  		if (*t == type)
  			goto found;
  	}
ee6c2c1bd   Li Zefan   tracing: remove m...
815
816
  	pr_info("Tracer %s not registered
  ", type->name);
bc0c38d13   Steven Rostedt   ftrace: latency t...
817
818
819
820
  	goto out;
  
   found:
  	*t = (*t)->next;
b5db03c43   Arnaldo Carvalho de Melo   tracing: handle u...
821
822
823
824
825
826
827
828
  
  	if (type == current_trace && tracer_enabled) {
  		tracer_enabled = 0;
  		tracing_stop();
  		if (current_trace->stop)
  			current_trace->stop(&global_trace);
  		current_trace = &nop_trace;
  	}
ee6c2c1bd   Li Zefan   tracing: remove m...
829
  out:
bc0c38d13   Steven Rostedt   ftrace: latency t...
830
831
  	mutex_unlock(&trace_types_lock);
  }
283740c61   Steven Rostedt   tracing: Use same...
832
  static void __tracing_reset(struct ring_buffer *buffer, int cpu)
bc0c38d13   Steven Rostedt   ftrace: latency t...
833
  {
d769041f8   Steven Rostedt   ring_buffer: impl...
834
  	ftrace_disable_cpu();
283740c61   Steven Rostedt   tracing: Use same...
835
  	ring_buffer_reset_cpu(buffer, cpu);
d769041f8   Steven Rostedt   ring_buffer: impl...
836
  	ftrace_enable_cpu();
bc0c38d13   Steven Rostedt   ftrace: latency t...
837
  }
f633903af   Steven Rostedt   tracing: make tra...
838
839
840
841
842
843
844
845
  void tracing_reset(struct trace_array *tr, int cpu)
  {
  	struct ring_buffer *buffer = tr->buffer;
  
  	ring_buffer_record_disable(buffer);
  
  	/* Make sure all commits have finished */
  	synchronize_sched();
283740c61   Steven Rostedt   tracing: Use same...
846
  	__tracing_reset(buffer, cpu);
f633903af   Steven Rostedt   tracing: make tra...
847
848
849
  
  	ring_buffer_record_enable(buffer);
  }
213cc0607   Pekka J Enberg   ftrace: introduce...
850
851
  void tracing_reset_online_cpus(struct trace_array *tr)
  {
621968cdb   Steven Rostedt   tracing: disable ...
852
  	struct ring_buffer *buffer = tr->buffer;
213cc0607   Pekka J Enberg   ftrace: introduce...
853
  	int cpu;
621968cdb   Steven Rostedt   tracing: disable ...
854
855
856
857
  	ring_buffer_record_disable(buffer);
  
  	/* Make sure all commits have finished */
  	synchronize_sched();
213cc0607   Pekka J Enberg   ftrace: introduce...
858
859
860
  	tr->time_start = ftrace_now(tr->cpu);
  
  	for_each_online_cpu(cpu)
283740c61   Steven Rostedt   tracing: Use same...
861
  		__tracing_reset(buffer, cpu);
621968cdb   Steven Rostedt   tracing: disable ...
862
863
  
  	ring_buffer_record_enable(buffer);
213cc0607   Pekka J Enberg   ftrace: introduce...
864
  }
9456f0fa6   Steven Rostedt   tracing: reset ri...
865
866
867
868
869
870
871
872
873
  void tracing_reset_current(int cpu)
  {
  	tracing_reset(&global_trace, cpu);
  }
  
  void tracing_reset_current_online_cpus(void)
  {
  	tracing_reset_online_cpus(&global_trace);
  }
bc0c38d13   Steven Rostedt   ftrace: latency t...
874
  #define SAVED_CMDLINES 128
2c7eea4c6   Thomas Gleixner   tracing: replace ...
875
  #define NO_CMDLINE_MAP UINT_MAX
bc0c38d13   Steven Rostedt   ftrace: latency t...
876
877
878
879
  static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
  static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
  static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
  static int cmdline_idx;
edc35bd72   Thomas Gleixner   locking: Rename _...
880
  static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
25b0b44a1   Steven Rostedt   ftrace: fix comm ...
881

25b0b44a1   Steven Rostedt   ftrace: fix comm ...
882
  /* temporary disable recording */
4fd273588   Hannes Eder   tracing: fix spar...
883
  static atomic_t trace_record_cmdline_disabled __read_mostly;
bc0c38d13   Steven Rostedt   ftrace: latency t...
884
885
886
  
  static void trace_init_cmdlines(void)
  {
2c7eea4c6   Thomas Gleixner   tracing: replace ...
887
888
  	memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
  	memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
bc0c38d13   Steven Rostedt   ftrace: latency t...
889
890
  	cmdline_idx = 0;
  }
b5130b1e7   Carsten Emde   tracing: do not u...
891
892
893
894
  int is_tracing_stopped(void)
  {
  	return trace_stop_count;
  }
0f0487014   Steven Rostedt   ftrace: soft trac...
895
  /**
69bb54ec0   Steven Rostedt   ftrace: add ftrac...
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
   * ftrace_off_permanent - disable all ftrace code permanently
   *
   * This should only be called when a serious anomally has
   * been detected.  This will turn off the function tracing,
   * ring buffers, and other tracing utilites. It takes no
   * locks and can be called from any context.
   */
  void ftrace_off_permanent(void)
  {
  	tracing_disabled = 1;
  	ftrace_stop();
  	tracing_off_permanent();
  }
  
  /**
0f0487014   Steven Rostedt   ftrace: soft trac...
911
912
913
914
915
916
917
918
919
920
921
922
   * tracing_start - quick start of the tracer
   *
   * If tracing is enabled but was stopped by tracing_stop,
   * this will start the tracer back up.
   */
  void tracing_start(void)
  {
  	struct ring_buffer *buffer;
  	unsigned long flags;
  
  	if (tracing_disabled)
  		return;
5389f6fad   Thomas Gleixner   locking, tracing:...
923
  	raw_spin_lock_irqsave(&tracing_start_lock, flags);
b06a83018   Steven Rostedt   trace: fix logic ...
924
925
926
927
928
929
  	if (--trace_stop_count) {
  		if (trace_stop_count < 0) {
  			/* Someone screwed up their debugging */
  			WARN_ON_ONCE(1);
  			trace_stop_count = 0;
  		}
0f0487014   Steven Rostedt   ftrace: soft trac...
930
931
  		goto out;
  	}
a2f807142   Steven Rostedt   tracing: Disable ...
932
933
  	/* Prevent the buffers from switching */
  	arch_spin_lock(&ftrace_max_lock);
0f0487014   Steven Rostedt   ftrace: soft trac...
934
935
936
937
938
939
940
941
  
  	buffer = global_trace.buffer;
  	if (buffer)
  		ring_buffer_record_enable(buffer);
  
  	buffer = max_tr.buffer;
  	if (buffer)
  		ring_buffer_record_enable(buffer);
a2f807142   Steven Rostedt   tracing: Disable ...
942
  	arch_spin_unlock(&ftrace_max_lock);
0f0487014   Steven Rostedt   ftrace: soft trac...
943
944
  	ftrace_start();
   out:
5389f6fad   Thomas Gleixner   locking, tracing:...
945
  	raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
0f0487014   Steven Rostedt   ftrace: soft trac...
946
947
948
949
950
951
952
953
954
955
956
957
958
959
  }
  
  /**
   * tracing_stop - quick stop of the tracer
   *
   * Light weight way to stop tracing. Use in conjunction with
   * tracing_start.
   */
  void tracing_stop(void)
  {
  	struct ring_buffer *buffer;
  	unsigned long flags;
  
  	ftrace_stop();
5389f6fad   Thomas Gleixner   locking, tracing:...
960
  	raw_spin_lock_irqsave(&tracing_start_lock, flags);
0f0487014   Steven Rostedt   ftrace: soft trac...
961
962
  	if (trace_stop_count++)
  		goto out;
a2f807142   Steven Rostedt   tracing: Disable ...
963
964
  	/* Prevent the buffers from switching */
  	arch_spin_lock(&ftrace_max_lock);
0f0487014   Steven Rostedt   ftrace: soft trac...
965
966
967
968
969
970
971
  	buffer = global_trace.buffer;
  	if (buffer)
  		ring_buffer_record_disable(buffer);
  
  	buffer = max_tr.buffer;
  	if (buffer)
  		ring_buffer_record_disable(buffer);
a2f807142   Steven Rostedt   tracing: Disable ...
972
  	arch_spin_unlock(&ftrace_max_lock);
0f0487014   Steven Rostedt   ftrace: soft trac...
973
   out:
5389f6fad   Thomas Gleixner   locking, tracing:...
974
  	raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
0f0487014   Steven Rostedt   ftrace: soft trac...
975
  }
e309b41dd   Ingo Molnar   ftrace: remove no...
976
  void trace_stop_cmdline_recording(void);
bc0c38d13   Steven Rostedt   ftrace: latency t...
977

e309b41dd   Ingo Molnar   ftrace: remove no...
978
  static void trace_save_cmdline(struct task_struct *tsk)
bc0c38d13   Steven Rostedt   ftrace: latency t...
979
  {
a635cf049   Carsten Emde   tracing: fix comm...
980
  	unsigned pid, idx;
bc0c38d13   Steven Rostedt   ftrace: latency t...
981
982
983
984
985
986
987
988
989
990
  
  	if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
  		return;
  
  	/*
  	 * It's not the end of the world if we don't get
  	 * the lock, but we also don't want to spin
  	 * nor do we want to disable interrupts,
  	 * so if we miss here, then better luck next time.
  	 */
0199c4e68   Thomas Gleixner   locking: Convert ...
991
  	if (!arch_spin_trylock(&trace_cmdline_lock))
bc0c38d13   Steven Rostedt   ftrace: latency t...
992
993
994
  		return;
  
  	idx = map_pid_to_cmdline[tsk->pid];
2c7eea4c6   Thomas Gleixner   tracing: replace ...
995
  	if (idx == NO_CMDLINE_MAP) {
bc0c38d13   Steven Rostedt   ftrace: latency t...
996
  		idx = (cmdline_idx + 1) % SAVED_CMDLINES;
a635cf049   Carsten Emde   tracing: fix comm...
997
998
999
1000
1001
1002
1003
1004
1005
  		/*
  		 * Check whether the cmdline buffer at idx has a pid
  		 * mapped. We are going to overwrite that entry so we
  		 * need to clear the map_pid_to_cmdline. Otherwise we
  		 * would read the new comm for the old pid.
  		 */
  		pid = map_cmdline_to_pid[idx];
  		if (pid != NO_CMDLINE_MAP)
  			map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
bc0c38d13   Steven Rostedt   ftrace: latency t...
1006

a635cf049   Carsten Emde   tracing: fix comm...
1007
  		map_cmdline_to_pid[idx] = tsk->pid;
bc0c38d13   Steven Rostedt   ftrace: latency t...
1008
1009
1010
1011
1012
1013
  		map_pid_to_cmdline[tsk->pid] = idx;
  
  		cmdline_idx = idx;
  	}
  
  	memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
0199c4e68   Thomas Gleixner   locking: Convert ...
1014
  	arch_spin_unlock(&trace_cmdline_lock);
bc0c38d13   Steven Rostedt   ftrace: latency t...
1015
  }
4ca530852   Steven Rostedt   tracing: protect ...
1016
  void trace_find_cmdline(int pid, char comm[])
bc0c38d13   Steven Rostedt   ftrace: latency t...
1017
  {
bc0c38d13   Steven Rostedt   ftrace: latency t...
1018
  	unsigned map;
4ca530852   Steven Rostedt   tracing: protect ...
1019
1020
1021
1022
  	if (!pid) {
  		strcpy(comm, "<idle>");
  		return;
  	}
bc0c38d13   Steven Rostedt   ftrace: latency t...
1023

74bf4076f   Steven Rostedt   tracing: Prevent ...
1024
1025
1026
1027
  	if (WARN_ON_ONCE(pid < 0)) {
  		strcpy(comm, "<XXX>");
  		return;
  	}
4ca530852   Steven Rostedt   tracing: protect ...
1028
1029
1030
1031
  	if (pid > PID_MAX_DEFAULT) {
  		strcpy(comm, "<...>");
  		return;
  	}
bc0c38d13   Steven Rostedt   ftrace: latency t...
1032

5b6045a90   Heiko Carstens   trace: disable pr...
1033
  	preempt_disable();
0199c4e68   Thomas Gleixner   locking: Convert ...
1034
  	arch_spin_lock(&trace_cmdline_lock);
bc0c38d13   Steven Rostedt   ftrace: latency t...
1035
  	map = map_pid_to_cmdline[pid];
50d88758a   Thomas Gleixner   tracing: fix trac...
1036
1037
1038
1039
  	if (map != NO_CMDLINE_MAP)
  		strcpy(comm, saved_cmdlines[map]);
  	else
  		strcpy(comm, "<...>");
bc0c38d13   Steven Rostedt   ftrace: latency t...
1040

0199c4e68   Thomas Gleixner   locking: Convert ...
1041
  	arch_spin_unlock(&trace_cmdline_lock);
5b6045a90   Heiko Carstens   trace: disable pr...
1042
  	preempt_enable();
bc0c38d13   Steven Rostedt   ftrace: latency t...
1043
  }
e309b41dd   Ingo Molnar   ftrace: remove no...
1044
  void tracing_record_cmdline(struct task_struct *tsk)
bc0c38d13   Steven Rostedt   ftrace: latency t...
1045
  {
18aecd362   Thomas Gleixner   tracing: stop com...
1046
1047
  	if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled ||
  	    !tracing_is_on())
bc0c38d13   Steven Rostedt   ftrace: latency t...
1048
1049
1050
1051
  		return;
  
  	trace_save_cmdline(tsk);
  }
45dcd8b8a   Pekka Paalanen   ftrace: move mmio...
1052
  void
38697053f   Steven Rostedt   ftrace: preempt d...
1053
1054
  tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
  			     int pc)
bc0c38d13   Steven Rostedt   ftrace: latency t...
1055
1056
  {
  	struct task_struct *tsk = current;
bc0c38d13   Steven Rostedt   ftrace: latency t...
1057

777e208d4   Steven Rostedt   ftrace: take adva...
1058
1059
  	entry->preempt_count		= pc & 0xff;
  	entry->pid			= (tsk) ? tsk->pid : 0;
a3a4a5acd   Arjan van de Ven   Regression: parti...
1060
  	entry->padding			= 0;
777e208d4   Steven Rostedt   ftrace: take adva...
1061
  	entry->flags =
9244489a7   Steven Rostedt   ftrace: handle ar...
1062
  #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2e2ca155c   Steven Rostedt   ftrace: new conti...
1063
  		(irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
9244489a7   Steven Rostedt   ftrace: handle ar...
1064
1065
1066
  #else
  		TRACE_FLAG_IRQS_NOSUPPORT |
  #endif
bc0c38d13   Steven Rostedt   ftrace: latency t...
1067
1068
1069
1070
  		((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
  		((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
  		(need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
  }
f413cdb80   Frederic Weisbecker   perf_counter: Fix...
1071
  EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
bc0c38d13   Steven Rostedt   ftrace: latency t...
1072

e77405ad8   Steven Rostedt   tracing: pass aro...
1073
1074
1075
1076
1077
  struct ring_buffer_event *
  trace_buffer_lock_reserve(struct ring_buffer *buffer,
  			  int type,
  			  unsigned long len,
  			  unsigned long flags, int pc)
51a763dd8   Arnaldo Carvalho de Melo   tracing: Introduc...
1078
1079
  {
  	struct ring_buffer_event *event;
e77405ad8   Steven Rostedt   tracing: pass aro...
1080
  	event = ring_buffer_lock_reserve(buffer, len);
51a763dd8   Arnaldo Carvalho de Melo   tracing: Introduc...
1081
1082
1083
1084
1085
1086
1087
1088
1089
  	if (event != NULL) {
  		struct trace_entry *ent = ring_buffer_event_data(event);
  
  		tracing_generic_entry_update(ent, flags, pc);
  		ent->type = type;
  	}
  
  	return event;
  }
51a763dd8   Arnaldo Carvalho de Melo   tracing: Introduc...
1090

e77405ad8   Steven Rostedt   tracing: pass aro...
1091
1092
1093
1094
1095
  static inline void
  __trace_buffer_unlock_commit(struct ring_buffer *buffer,
  			     struct ring_buffer_event *event,
  			     unsigned long flags, int pc,
  			     int wake)
51a763dd8   Arnaldo Carvalho de Melo   tracing: Introduc...
1096
  {
e77405ad8   Steven Rostedt   tracing: pass aro...
1097
  	ring_buffer_unlock_commit(buffer, event);
51a763dd8   Arnaldo Carvalho de Melo   tracing: Introduc...
1098

e77405ad8   Steven Rostedt   tracing: pass aro...
1099
1100
  	ftrace_trace_stack(buffer, flags, 6, pc);
  	ftrace_trace_userstack(buffer, flags, pc);
07edf7121   Frederic Weisbecker   tracing/events: d...
1101
1102
1103
1104
  
  	if (wake)
  		trace_wake_up();
  }
e77405ad8   Steven Rostedt   tracing: pass aro...
1105
1106
1107
  void trace_buffer_unlock_commit(struct ring_buffer *buffer,
  				struct ring_buffer_event *event,
  				unsigned long flags, int pc)
07edf7121   Frederic Weisbecker   tracing/events: d...
1108
  {
e77405ad8   Steven Rostedt   tracing: pass aro...
1109
  	__trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
51a763dd8   Arnaldo Carvalho de Melo   tracing: Introduc...
1110
  }
ef5580d0f   Steven Rostedt   tracing: add inte...
1111
  struct ring_buffer_event *
e77405ad8   Steven Rostedt   tracing: pass aro...
1112
1113
  trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
  				  int type, unsigned long len,
ef5580d0f   Steven Rostedt   tracing: add inte...
1114
1115
  				  unsigned long flags, int pc)
  {
e77405ad8   Steven Rostedt   tracing: pass aro...
1116
1117
  	*current_rb = global_trace.buffer;
  	return trace_buffer_lock_reserve(*current_rb,
ef5580d0f   Steven Rostedt   tracing: add inte...
1118
1119
  					 type, len, flags, pc);
  }
94487d6d5   Steven Rostedt   tracing: use prop...
1120
  EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
ef5580d0f   Steven Rostedt   tracing: add inte...
1121

e77405ad8   Steven Rostedt   tracing: pass aro...
1122
1123
  void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
  					struct ring_buffer_event *event,
ef5580d0f   Steven Rostedt   tracing: add inte...
1124
1125
  					unsigned long flags, int pc)
  {
e77405ad8   Steven Rostedt   tracing: pass aro...
1126
  	__trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
07edf7121   Frederic Weisbecker   tracing/events: d...
1127
  }
94487d6d5   Steven Rostedt   tracing: use prop...
1128
  EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
07edf7121   Frederic Weisbecker   tracing/events: d...
1129

e77405ad8   Steven Rostedt   tracing: pass aro...
1130
1131
1132
  void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
  				       struct ring_buffer_event *event,
  				       unsigned long flags, int pc)
07edf7121   Frederic Weisbecker   tracing/events: d...
1133
  {
e77405ad8   Steven Rostedt   tracing: pass aro...
1134
  	__trace_buffer_unlock_commit(buffer, event, flags, pc, 0);
77d9f465d   Steven Rostedt   tracing/filters: ...
1135
  }
94487d6d5   Steven Rostedt   tracing: use prop...
1136
  EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
77d9f465d   Steven Rostedt   tracing/filters: ...
1137

1fd8df2c3   Masami Hiramatsu   tracing/kprobes: ...
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
  void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
  					    struct ring_buffer_event *event,
  					    unsigned long flags, int pc,
  					    struct pt_regs *regs)
  {
  	ring_buffer_unlock_commit(buffer, event);
  
  	ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
  	ftrace_trace_userstack(buffer, flags, pc);
  }
  EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit_regs);
e77405ad8   Steven Rostedt   tracing: pass aro...
1149
1150
  void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
  					 struct ring_buffer_event *event)
77d9f465d   Steven Rostedt   tracing/filters: ...
1151
  {
e77405ad8   Steven Rostedt   tracing: pass aro...
1152
  	ring_buffer_discard_commit(buffer, event);
ef5580d0f   Steven Rostedt   tracing: add inte...
1153
  }
12acd473d   Steven Rostedt   tracing: add EXPO...
1154
  EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
ef5580d0f   Steven Rostedt   tracing: add inte...
1155

e309b41dd   Ingo Molnar   ftrace: remove no...
1156
  void
7be421510   Arnaldo Carvalho de Melo   trace: Remove unu...
1157
  trace_function(struct trace_array *tr,
38697053f   Steven Rostedt   ftrace: preempt d...
1158
1159
  	       unsigned long ip, unsigned long parent_ip, unsigned long flags,
  	       int pc)
bc0c38d13   Steven Rostedt   ftrace: latency t...
1160
  {
e1112b4d9   Tom Zanussi   tracing/filters: ...
1161
  	struct ftrace_event_call *call = &event_function;
e77405ad8   Steven Rostedt   tracing: pass aro...
1162
  	struct ring_buffer *buffer = tr->buffer;
3928a8a2d   Steven Rostedt   ftrace: make work...
1163
  	struct ring_buffer_event *event;
777e208d4   Steven Rostedt   ftrace: take adva...
1164
  	struct ftrace_entry *entry;
bc0c38d13   Steven Rostedt   ftrace: latency t...
1165

d769041f8   Steven Rostedt   ring_buffer: impl...
1166
  	/* If we are reading the ring buffer, don't trace */
dd17c8f72   Rusty Russell   percpu: remove pe...
1167
  	if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
d769041f8   Steven Rostedt   ring_buffer: impl...
1168
  		return;
e77405ad8   Steven Rostedt   tracing: pass aro...
1169
  	event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
51a763dd8   Arnaldo Carvalho de Melo   tracing: Introduc...
1170
  					  flags, pc);
3928a8a2d   Steven Rostedt   ftrace: make work...
1171
1172
1173
  	if (!event)
  		return;
  	entry	= ring_buffer_event_data(event);
777e208d4   Steven Rostedt   ftrace: take adva...
1174
1175
  	entry->ip			= ip;
  	entry->parent_ip		= parent_ip;
e1112b4d9   Tom Zanussi   tracing/filters: ...
1176

e77405ad8   Steven Rostedt   tracing: pass aro...
1177
1178
  	if (!filter_check_discard(call, entry, buffer, event))
  		ring_buffer_unlock_commit(buffer, event);
bc0c38d13   Steven Rostedt   ftrace: latency t...
1179
  }
e309b41dd   Ingo Molnar   ftrace: remove no...
1180
  void
2e0f57618   Ingo Molnar   ftrace: build fix
1181
  ftrace(struct trace_array *tr, struct trace_array_cpu *data,
38697053f   Steven Rostedt   ftrace: preempt d...
1182
1183
         unsigned long ip, unsigned long parent_ip, unsigned long flags,
         int pc)
2e0f57618   Ingo Molnar   ftrace: build fix
1184
1185
  {
  	if (likely(!atomic_read(&data->disabled)))
7be421510   Arnaldo Carvalho de Melo   trace: Remove unu...
1186
  		trace_function(tr, ip, parent_ip, flags, pc);
2e0f57618   Ingo Molnar   ftrace: build fix
1187
  }
c0a0d0d3f   Frederic Weisbecker   tracing/core: Mak...
1188
  #ifdef CONFIG_STACKTRACE
4a9bd3f13   Steven Rostedt   tracing: Have dyn...
1189
1190
1191
1192
1193
1194
1195
1196
  
  #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
  struct ftrace_stack {
  	unsigned long		calls[FTRACE_STACK_MAX_ENTRIES];
  };
  
  static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
  static DEFINE_PER_CPU(int, ftrace_stack_reserve);
e77405ad8   Steven Rostedt   tracing: pass aro...
1197
  static void __ftrace_trace_stack(struct ring_buffer *buffer,
536149910   Steven Rostedt   ftrace: add stack...
1198
  				 unsigned long flags,
1fd8df2c3   Masami Hiramatsu   tracing/kprobes: ...
1199
  				 int skip, int pc, struct pt_regs *regs)
86387f7ee   Ingo Molnar   ftrace: add stack...
1200
  {
e1112b4d9   Tom Zanussi   tracing/filters: ...
1201
  	struct ftrace_event_call *call = &event_kernel_stack;
3928a8a2d   Steven Rostedt   ftrace: make work...
1202
  	struct ring_buffer_event *event;
777e208d4   Steven Rostedt   ftrace: take adva...
1203
  	struct stack_entry *entry;
86387f7ee   Ingo Molnar   ftrace: add stack...
1204
  	struct stack_trace trace;
4a9bd3f13   Steven Rostedt   tracing: Have dyn...
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
  	int use_stack;
  	int size = FTRACE_STACK_ENTRIES;
  
  	trace.nr_entries	= 0;
  	trace.skip		= skip;
  
  	/*
  	 * Since events can happen in NMIs there's no safe way to
  	 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
  	 * or NMI comes in, it will just have to use the default
  	 * FTRACE_STACK_SIZE.
  	 */
  	preempt_disable_notrace();
  
  	use_stack = ++__get_cpu_var(ftrace_stack_reserve);
  	/*
  	 * We don't need any atomic variables, just a barrier.
  	 * If an interrupt comes in, we don't care, because it would
  	 * have exited and put the counter back to what we want.
  	 * We just need a barrier to keep gcc from moving things
  	 * around.
  	 */
  	barrier();
  	if (use_stack == 1) {
  		trace.entries		= &__get_cpu_var(ftrace_stack).calls[0];
  		trace.max_entries	= FTRACE_STACK_MAX_ENTRIES;
  
  		if (regs)
  			save_stack_trace_regs(regs, &trace);
  		else
  			save_stack_trace(&trace);
  
  		if (trace.nr_entries > size)
  			size = trace.nr_entries;
  	} else
  		/* From now on, use_stack is a boolean */
  		use_stack = 0;
  
  	size *= sizeof(unsigned long);
86387f7ee   Ingo Molnar   ftrace: add stack...
1244

e77405ad8   Steven Rostedt   tracing: pass aro...
1245
  	event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
4a9bd3f13   Steven Rostedt   tracing: Have dyn...
1246
  					  sizeof(*entry) + size, flags, pc);
3928a8a2d   Steven Rostedt   ftrace: make work...
1247
  	if (!event)
4a9bd3f13   Steven Rostedt   tracing: Have dyn...
1248
1249
  		goto out;
  	entry = ring_buffer_event_data(event);
86387f7ee   Ingo Molnar   ftrace: add stack...
1250

4a9bd3f13   Steven Rostedt   tracing: Have dyn...
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
  	memset(&entry->caller, 0, size);
  
  	if (use_stack)
  		memcpy(&entry->caller, trace.entries,
  		       trace.nr_entries * sizeof(unsigned long));
  	else {
  		trace.max_entries	= FTRACE_STACK_ENTRIES;
  		trace.entries		= entry->caller;
  		if (regs)
  			save_stack_trace_regs(regs, &trace);
  		else
  			save_stack_trace(&trace);
  	}
  
  	entry->size = trace.nr_entries;
86387f7ee   Ingo Molnar   ftrace: add stack...
1266

e77405ad8   Steven Rostedt   tracing: pass aro...
1267
1268
  	if (!filter_check_discard(call, entry, buffer, event))
  		ring_buffer_unlock_commit(buffer, event);
4a9bd3f13   Steven Rostedt   tracing: Have dyn...
1269
1270
1271
1272
1273
1274
  
   out:
  	/* Again, don't let gcc optimize things here */
  	barrier();
  	__get_cpu_var(ftrace_stack_reserve)--;
  	preempt_enable_notrace();
f0a920d57   Ingo Molnar   ftrace: add trace...
1275
  }
1fd8df2c3   Masami Hiramatsu   tracing/kprobes: ...
1276
1277
1278
1279
1280
1281
1282
1283
  void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
  			     int skip, int pc, struct pt_regs *regs)
  {
  	if (!(trace_flags & TRACE_ITER_STACKTRACE))
  		return;
  
  	__ftrace_trace_stack(buffer, flags, skip, pc, regs);
  }
e77405ad8   Steven Rostedt   tracing: pass aro...
1284
1285
  void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
  			int skip, int pc)
536149910   Steven Rostedt   ftrace: add stack...
1286
1287
1288
  {
  	if (!(trace_flags & TRACE_ITER_STACKTRACE))
  		return;
1fd8df2c3   Masami Hiramatsu   tracing/kprobes: ...
1289
  	__ftrace_trace_stack(buffer, flags, skip, pc, NULL);
536149910   Steven Rostedt   ftrace: add stack...
1290
  }
c0a0d0d3f   Frederic Weisbecker   tracing/core: Mak...
1291
1292
  void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
  		   int pc)
38697053f   Steven Rostedt   ftrace: preempt d...
1293
  {
1fd8df2c3   Masami Hiramatsu   tracing/kprobes: ...
1294
  	__ftrace_trace_stack(tr->buffer, flags, skip, pc, NULL);
38697053f   Steven Rostedt   ftrace: preempt d...
1295
  }
03889384c   Steven Rostedt   tracing: Add trac...
1296
1297
1298
1299
1300
1301
1302
1303
  /**
   * trace_dump_stack - record a stack back trace in the trace buffer
   */
  void trace_dump_stack(void)
  {
  	unsigned long flags;
  
  	if (tracing_disabled || tracing_selftest_running)
e36c54582   Steven Rostedt   tracing: Fix retu...
1304
  		return;
03889384c   Steven Rostedt   tracing: Add trac...
1305
1306
1307
1308
  
  	local_save_flags(flags);
  
  	/* skipping 3 traces, seems to get us at the caller of this function */
1fd8df2c3   Masami Hiramatsu   tracing/kprobes: ...
1309
  	__ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count(), NULL);
03889384c   Steven Rostedt   tracing: Add trac...
1310
  }
91e86e560   Steven Rostedt   tracing: Fix recu...
1311
  static DEFINE_PER_CPU(int, user_stack_count);
e77405ad8   Steven Rostedt   tracing: pass aro...
1312
1313
  void
  ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
02b67518e   Török Edwin   tracing: add supp...
1314
  {
e1112b4d9   Tom Zanussi   tracing/filters: ...
1315
  	struct ftrace_event_call *call = &event_user_stack;
8d7c6a961   Török Edwin   tracing/stack-tra...
1316
  	struct ring_buffer_event *event;
02b67518e   Török Edwin   tracing: add supp...
1317
1318
  	struct userstack_entry *entry;
  	struct stack_trace trace;
02b67518e   Török Edwin   tracing: add supp...
1319
1320
1321
  
  	if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
  		return;
b6345879c   Steven Rostedt   tracing: Do not r...
1322
1323
1324
1325
1326
1327
  	/*
  	 * NMIs can not handle page faults, even with fix ups.
  	 * The save user stack can (and often does) fault.
  	 */
  	if (unlikely(in_nmi()))
  		return;
02b67518e   Török Edwin   tracing: add supp...
1328

91e86e560   Steven Rostedt   tracing: Fix recu...
1329
1330
1331
1332
1333
1334
1335
1336
1337
  	/*
  	 * prevent recursion, since the user stack tracing may
  	 * trigger other kernel events.
  	 */
  	preempt_disable();
  	if (__this_cpu_read(user_stack_count))
  		goto out;
  
  	__this_cpu_inc(user_stack_count);
e77405ad8   Steven Rostedt   tracing: pass aro...
1338
  	event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
51a763dd8   Arnaldo Carvalho de Melo   tracing: Introduc...
1339
  					  sizeof(*entry), flags, pc);
02b67518e   Török Edwin   tracing: add supp...
1340
  	if (!event)
1dbd1951f   Li Zefan   tracing: Fix pree...
1341
  		goto out_drop_count;
02b67518e   Török Edwin   tracing: add supp...
1342
  	entry	= ring_buffer_event_data(event);
02b67518e   Török Edwin   tracing: add supp...
1343

48659d311   Steven Rostedt   tracing: move tgi...
1344
  	entry->tgid		= current->tgid;
02b67518e   Török Edwin   tracing: add supp...
1345
1346
1347
1348
1349
1350
1351
1352
  	memset(&entry->caller, 0, sizeof(entry->caller));
  
  	trace.nr_entries	= 0;
  	trace.max_entries	= FTRACE_STACK_ENTRIES;
  	trace.skip		= 0;
  	trace.entries		= entry->caller;
  
  	save_stack_trace_user(&trace);
e77405ad8   Steven Rostedt   tracing: pass aro...
1353
1354
  	if (!filter_check_discard(call, entry, buffer, event))
  		ring_buffer_unlock_commit(buffer, event);
91e86e560   Steven Rostedt   tracing: Fix recu...
1355

1dbd1951f   Li Zefan   tracing: Fix pree...
1356
   out_drop_count:
91e86e560   Steven Rostedt   tracing: Fix recu...
1357
  	__this_cpu_dec(user_stack_count);
91e86e560   Steven Rostedt   tracing: Fix recu...
1358
1359
   out:
  	preempt_enable();
02b67518e   Török Edwin   tracing: add supp...
1360
  }
4fd273588   Hannes Eder   tracing: fix spar...
1361
1362
  #ifdef UNUSED
  static void __trace_userstack(struct trace_array *tr, unsigned long flags)
02b67518e   Török Edwin   tracing: add supp...
1363
  {
7be421510   Arnaldo Carvalho de Melo   trace: Remove unu...
1364
  	ftrace_trace_userstack(tr, flags, preempt_count());
02b67518e   Török Edwin   tracing: add supp...
1365
  }
4fd273588   Hannes Eder   tracing: fix spar...
1366
  #endif /* UNUSED */
02b67518e   Török Edwin   tracing: add supp...
1367

c0a0d0d3f   Frederic Weisbecker   tracing/core: Mak...
1368
  #endif /* CONFIG_STACKTRACE */
769b0441f   Frederic Weisbecker   tracing/core: dro...
1369
  /**
48ead0203   Frederic Weisbecker   tracing/core: bri...
1370
   * trace_vbprintk - write binary msg to tracing buffer
769b0441f   Frederic Weisbecker   tracing/core: dro...
1371
1372
   *
   */
40ce74f19   Steven Rostedt   tracing: remove r...
1373
  int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
769b0441f   Frederic Weisbecker   tracing/core: dro...
1374
  {
445c89514   Thomas Gleixner   locking: Convert ...
1375
  	static arch_spinlock_t trace_buf_lock =
edc35bd72   Thomas Gleixner   locking: Rename _...
1376
  		(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
769b0441f   Frederic Weisbecker   tracing/core: dro...
1377
  	static u32 trace_buf[TRACE_BUF_SIZE];
e1112b4d9   Tom Zanussi   tracing/filters: ...
1378
  	struct ftrace_event_call *call = &event_bprint;
769b0441f   Frederic Weisbecker   tracing/core: dro...
1379
  	struct ring_buffer_event *event;
e77405ad8   Steven Rostedt   tracing: pass aro...
1380
  	struct ring_buffer *buffer;
769b0441f   Frederic Weisbecker   tracing/core: dro...
1381
1382
  	struct trace_array *tr = &global_trace;
  	struct trace_array_cpu *data;
48ead0203   Frederic Weisbecker   tracing/core: bri...
1383
  	struct bprint_entry *entry;
769b0441f   Frederic Weisbecker   tracing/core: dro...
1384
  	unsigned long flags;
3189cdb31   Steven Rostedt   tracing: protect ...
1385
  	int disable;
769b0441f   Frederic Weisbecker   tracing/core: dro...
1386
1387
1388
1389
1390
1391
1392
1393
1394
  	int cpu, len = 0, size, pc;
  
  	if (unlikely(tracing_selftest_running || tracing_disabled))
  		return 0;
  
  	/* Don't pollute graph traces with trace_vprintk internals */
  	pause_graph_tracing();
  
  	pc = preempt_count();
5168ae50a   Steven Rostedt   tracing: Remove f...
1395
  	preempt_disable_notrace();
769b0441f   Frederic Weisbecker   tracing/core: dro...
1396
1397
  	cpu = raw_smp_processor_id();
  	data = tr->data[cpu];
3189cdb31   Steven Rostedt   tracing: protect ...
1398
1399
  	disable = atomic_inc_return(&data->disabled);
  	if (unlikely(disable != 1))
769b0441f   Frederic Weisbecker   tracing/core: dro...
1400
  		goto out;
80370cb75   Steven Rostedt   tracing: use raw ...
1401
1402
  	/* Lockdep uses trace_printk for lock tracing */
  	local_irq_save(flags);
0199c4e68   Thomas Gleixner   locking: Convert ...
1403
  	arch_spin_lock(&trace_buf_lock);
769b0441f   Frederic Weisbecker   tracing/core: dro...
1404
1405
1406
1407
1408
1409
  	len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
  
  	if (len > TRACE_BUF_SIZE || len < 0)
  		goto out_unlock;
  
  	size = sizeof(*entry) + sizeof(u32) * len;
e77405ad8   Steven Rostedt   tracing: pass aro...
1410
1411
1412
  	buffer = tr->buffer;
  	event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
  					  flags, pc);
769b0441f   Frederic Weisbecker   tracing/core: dro...
1413
1414
1415
1416
  	if (!event)
  		goto out_unlock;
  	entry = ring_buffer_event_data(event);
  	entry->ip			= ip;
769b0441f   Frederic Weisbecker   tracing/core: dro...
1417
1418
1419
  	entry->fmt			= fmt;
  
  	memcpy(entry->buf, trace_buf, sizeof(u32) * len);
d931369b7   Steven Rostedt   tracing: Add stac...
1420
  	if (!filter_check_discard(call, entry, buffer, event)) {
e77405ad8   Steven Rostedt   tracing: pass aro...
1421
  		ring_buffer_unlock_commit(buffer, event);
d931369b7   Steven Rostedt   tracing: Add stac...
1422
1423
  		ftrace_trace_stack(buffer, flags, 6, pc);
  	}
769b0441f   Frederic Weisbecker   tracing/core: dro...
1424
1425
  
  out_unlock:
0199c4e68   Thomas Gleixner   locking: Convert ...
1426
  	arch_spin_unlock(&trace_buf_lock);
80370cb75   Steven Rostedt   tracing: use raw ...
1427
  	local_irq_restore(flags);
769b0441f   Frederic Weisbecker   tracing/core: dro...
1428
1429
  
  out:
3189cdb31   Steven Rostedt   tracing: protect ...
1430
  	atomic_dec_return(&data->disabled);
5168ae50a   Steven Rostedt   tracing: Remove f...
1431
  	preempt_enable_notrace();
769b0441f   Frederic Weisbecker   tracing/core: dro...
1432
1433
1434
1435
  	unpause_graph_tracing();
  
  	return len;
  }
48ead0203   Frederic Weisbecker   tracing/core: bri...
1436
  EXPORT_SYMBOL_GPL(trace_vbprintk);
659372d3e   Steven Rostedt   tracing: add trac...
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
  int trace_array_printk(struct trace_array *tr,
  		       unsigned long ip, const char *fmt, ...)
  {
  	int ret;
  	va_list ap;
  
  	if (!(trace_flags & TRACE_ITER_PRINTK))
  		return 0;
  
  	va_start(ap, fmt);
  	ret = trace_array_vprintk(tr, ip, fmt, ap);
  	va_end(ap);
  	return ret;
  }
  
  int trace_array_vprintk(struct trace_array *tr,
  			unsigned long ip, const char *fmt, va_list args)
48ead0203   Frederic Weisbecker   tracing/core: bri...
1454
  {
edc35bd72   Thomas Gleixner   locking: Rename _...
1455
  	static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED;
48ead0203   Frederic Weisbecker   tracing/core: bri...
1456
  	static char trace_buf[TRACE_BUF_SIZE];
e1112b4d9   Tom Zanussi   tracing/filters: ...
1457
  	struct ftrace_event_call *call = &event_print;
48ead0203   Frederic Weisbecker   tracing/core: bri...
1458
  	struct ring_buffer_event *event;
e77405ad8   Steven Rostedt   tracing: pass aro...
1459
  	struct ring_buffer *buffer;
48ead0203   Frederic Weisbecker   tracing/core: bri...
1460
1461
1462
1463
  	struct trace_array_cpu *data;
  	int cpu, len = 0, size, pc;
  	struct print_entry *entry;
  	unsigned long irq_flags;
3189cdb31   Steven Rostedt   tracing: protect ...
1464
  	int disable;
48ead0203   Frederic Weisbecker   tracing/core: bri...
1465
1466
1467
1468
1469
1470
1471
1472
  
  	if (tracing_disabled || tracing_selftest_running)
  		return 0;
  
  	pc = preempt_count();
  	preempt_disable_notrace();
  	cpu = raw_smp_processor_id();
  	data = tr->data[cpu];
3189cdb31   Steven Rostedt   tracing: protect ...
1473
1474
  	disable = atomic_inc_return(&data->disabled);
  	if (unlikely(disable != 1))
48ead0203   Frederic Weisbecker   tracing/core: bri...
1475
1476
1477
1478
  		goto out;
  
  	pause_graph_tracing();
  	raw_local_irq_save(irq_flags);
0199c4e68   Thomas Gleixner   locking: Convert ...
1479
  	arch_spin_lock(&trace_buf_lock);
f2942487f   Carsten Emde   tracing: Remove c...
1480
  	len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
48ead0203   Frederic Weisbecker   tracing/core: bri...
1481
1482
  
  	size = sizeof(*entry) + len + 1;
e77405ad8   Steven Rostedt   tracing: pass aro...
1483
1484
1485
  	buffer = tr->buffer;
  	event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
  					  irq_flags, pc);
48ead0203   Frederic Weisbecker   tracing/core: bri...
1486
1487
1488
  	if (!event)
  		goto out_unlock;
  	entry = ring_buffer_event_data(event);
c13d2f7c3   Carsten Emde   tracing: Fix trac...
1489
  	entry->ip = ip;
48ead0203   Frederic Weisbecker   tracing/core: bri...
1490
1491
  
  	memcpy(&entry->buf, trace_buf, len);
c13d2f7c3   Carsten Emde   tracing: Fix trac...
1492
  	entry->buf[len] = '\0';
d931369b7   Steven Rostedt   tracing: Add stac...
1493
  	if (!filter_check_discard(call, entry, buffer, event)) {
e77405ad8   Steven Rostedt   tracing: pass aro...
1494
  		ring_buffer_unlock_commit(buffer, event);
d931369b7   Steven Rostedt   tracing: Add stac...
1495
1496
  		ftrace_trace_stack(buffer, irq_flags, 6, pc);
  	}
48ead0203   Frederic Weisbecker   tracing/core: bri...
1497
1498
  
   out_unlock:
0199c4e68   Thomas Gleixner   locking: Convert ...
1499
  	arch_spin_unlock(&trace_buf_lock);
48ead0203   Frederic Weisbecker   tracing/core: bri...
1500
1501
1502
  	raw_local_irq_restore(irq_flags);
  	unpause_graph_tracing();
   out:
3189cdb31   Steven Rostedt   tracing: protect ...
1503
  	atomic_dec_return(&data->disabled);
48ead0203   Frederic Weisbecker   tracing/core: bri...
1504
1505
1506
1507
  	preempt_enable_notrace();
  
  	return len;
  }
659372d3e   Steven Rostedt   tracing: add trac...
1508
1509
1510
  
  int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
  {
a813a1597   Steven Rostedt   tracing: fix trac...
1511
  	return trace_array_vprintk(&global_trace, ip, fmt, args);
659372d3e   Steven Rostedt   tracing: add trac...
1512
  }
769b0441f   Frederic Weisbecker   tracing/core: dro...
1513
  EXPORT_SYMBOL_GPL(trace_vprintk);
e2ac8ef57   Robert Richter   ftrace: remove un...
1514
  static void trace_iterator_increment(struct trace_iterator *iter)
5a90f577e   Steven Rostedt   ftrace: print con...
1515
  {
d769041f8   Steven Rostedt   ring_buffer: impl...
1516
1517
  	/* Don't allow ftrace to trace into the ring buffers */
  	ftrace_disable_cpu();
5a90f577e   Steven Rostedt   ftrace: print con...
1518
  	iter->idx++;
d769041f8   Steven Rostedt   ring_buffer: impl...
1519
1520
1521
1522
  	if (iter->buffer_iter[iter->cpu])
  		ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
  
  	ftrace_enable_cpu();
5a90f577e   Steven Rostedt   ftrace: print con...
1523
  }
e309b41dd   Ingo Molnar   ftrace: remove no...
1524
  static struct trace_entry *
bc21b4784   Steven Rostedt   tracing: Show the...
1525
1526
  peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
  		unsigned long *lost_events)
dd0e545f0   Steven Rostedt   ftrace: printk fo...
1527
  {
3928a8a2d   Steven Rostedt   ftrace: make work...
1528
1529
  	struct ring_buffer_event *event;
  	struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
dd0e545f0   Steven Rostedt   ftrace: printk fo...
1530

d769041f8   Steven Rostedt   ring_buffer: impl...
1531
1532
1533
1534
1535
1536
  	/* Don't allow ftrace to trace into the ring buffers */
  	ftrace_disable_cpu();
  
  	if (buf_iter)
  		event = ring_buffer_iter_peek(buf_iter, ts);
  	else
bc21b4784   Steven Rostedt   tracing: Show the...
1537
1538
  		event = ring_buffer_peek(iter->tr->buffer, cpu, ts,
  					 lost_events);
d769041f8   Steven Rostedt   ring_buffer: impl...
1539
1540
  
  	ftrace_enable_cpu();
4a9bd3f13   Steven Rostedt   tracing: Have dyn...
1541
1542
1543
1544
1545
1546
  	if (event) {
  		iter->ent_size = ring_buffer_event_length(event);
  		return ring_buffer_event_data(event);
  	}
  	iter->ent_size = 0;
  	return NULL;
dd0e545f0   Steven Rostedt   ftrace: printk fo...
1547
  }
d769041f8   Steven Rostedt   ring_buffer: impl...
1548

dd0e545f0   Steven Rostedt   ftrace: printk fo...
1549
  static struct trace_entry *
bc21b4784   Steven Rostedt   tracing: Show the...
1550
1551
  __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
  		  unsigned long *missing_events, u64 *ent_ts)
bc0c38d13   Steven Rostedt   ftrace: latency t...
1552
  {
3928a8a2d   Steven Rostedt   ftrace: make work...
1553
  	struct ring_buffer *buffer = iter->tr->buffer;
bc0c38d13   Steven Rostedt   ftrace: latency t...
1554
  	struct trace_entry *ent, *next = NULL;
aa27497c2   Lai Jiangshan   tracing: Fix unin...
1555
  	unsigned long lost_events = 0, next_lost = 0;
b04cc6b1f   Frederic Weisbecker   tracing/core: int...
1556
  	int cpu_file = iter->cpu_file;
3928a8a2d   Steven Rostedt   ftrace: make work...
1557
  	u64 next_ts = 0, ts;
bc0c38d13   Steven Rostedt   ftrace: latency t...
1558
1559
  	int next_cpu = -1;
  	int cpu;
b04cc6b1f   Frederic Weisbecker   tracing/core: int...
1560
1561
1562
1563
1564
1565
1566
  	/*
  	 * If we are in a per_cpu trace file, don't bother by iterating over
  	 * all cpu and peek directly.
  	 */
  	if (cpu_file > TRACE_PIPE_ALL_CPU) {
  		if (ring_buffer_empty_cpu(buffer, cpu_file))
  			return NULL;
bc21b4784   Steven Rostedt   tracing: Show the...
1567
  		ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
b04cc6b1f   Frederic Weisbecker   tracing/core: int...
1568
1569
1570
1571
1572
  		if (ent_cpu)
  			*ent_cpu = cpu_file;
  
  		return ent;
  	}
ab46428c6   Steven Rostedt   ftrace: modulize ...
1573
  	for_each_tracing_cpu(cpu) {
dd0e545f0   Steven Rostedt   ftrace: printk fo...
1574

3928a8a2d   Steven Rostedt   ftrace: make work...
1575
1576
  		if (ring_buffer_empty_cpu(buffer, cpu))
  			continue;
dd0e545f0   Steven Rostedt   ftrace: printk fo...
1577

bc21b4784   Steven Rostedt   tracing: Show the...
1578
  		ent = peek_next_entry(iter, cpu, &ts, &lost_events);
dd0e545f0   Steven Rostedt   ftrace: printk fo...
1579

cdd31cd2d   Ingo Molnar   ftrace: remove-id...
1580
1581
1582
  		/*
  		 * Pick the entry with the smallest timestamp:
  		 */
3928a8a2d   Steven Rostedt   ftrace: make work...
1583
  		if (ent && (!next || ts < next_ts)) {
bc0c38d13   Steven Rostedt   ftrace: latency t...
1584
1585
  			next = ent;
  			next_cpu = cpu;
3928a8a2d   Steven Rostedt   ftrace: make work...
1586
  			next_ts = ts;
bc21b4784   Steven Rostedt   tracing: Show the...
1587
  			next_lost = lost_events;
bc0c38d13   Steven Rostedt   ftrace: latency t...
1588
1589
1590
1591
1592
  		}
  	}
  
  	if (ent_cpu)
  		*ent_cpu = next_cpu;
3928a8a2d   Steven Rostedt   ftrace: make work...
1593
1594
  	if (ent_ts)
  		*ent_ts = next_ts;
bc21b4784   Steven Rostedt   tracing: Show the...
1595
1596
  	if (missing_events)
  		*missing_events = next_lost;
bc0c38d13   Steven Rostedt   ftrace: latency t...
1597
1598
  	return next;
  }
dd0e545f0   Steven Rostedt   ftrace: printk fo...
1599
  /* Find the next real entry, without updating the iterator itself */
c4a8e8be2   Frederic Weisbecker   trace: better man...
1600
1601
  struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
  					  int *ent_cpu, u64 *ent_ts)
bc0c38d13   Steven Rostedt   ftrace: latency t...
1602
  {
bc21b4784   Steven Rostedt   tracing: Show the...
1603
  	return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
dd0e545f0   Steven Rostedt   ftrace: printk fo...
1604
1605
1606
  }
  
  /* Find the next real entry, and increment the iterator to the next entry */
955b61e59   Jason Wessel   ftrace,kdb: Exten...
1607
  void *trace_find_next_entry_inc(struct trace_iterator *iter)
dd0e545f0   Steven Rostedt   ftrace: printk fo...
1608
  {
bc21b4784   Steven Rostedt   tracing: Show the...
1609
1610
  	iter->ent = __find_next_entry(iter, &iter->cpu,
  				      &iter->lost_events, &iter->ts);
dd0e545f0   Steven Rostedt   ftrace: printk fo...
1611

3928a8a2d   Steven Rostedt   ftrace: make work...
1612
  	if (iter->ent)
e2ac8ef57   Robert Richter   ftrace: remove un...
1613
  		trace_iterator_increment(iter);
dd0e545f0   Steven Rostedt   ftrace: printk fo...
1614

3928a8a2d   Steven Rostedt   ftrace: make work...
1615
  	return iter->ent ? iter : NULL;
b3806b431   Steven Rostedt   ftrace: user run ...
1616
  }
bc0c38d13   Steven Rostedt   ftrace: latency t...
1617

e309b41dd   Ingo Molnar   ftrace: remove no...
1618
  static void trace_consume(struct trace_iterator *iter)
b3806b431   Steven Rostedt   ftrace: user run ...
1619
  {
d769041f8   Steven Rostedt   ring_buffer: impl...
1620
1621
  	/* Don't allow ftrace to trace into the ring buffers */
  	ftrace_disable_cpu();
bc21b4784   Steven Rostedt   tracing: Show the...
1622
1623
  	ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts,
  			    &iter->lost_events);
d769041f8   Steven Rostedt   ring_buffer: impl...
1624
  	ftrace_enable_cpu();
bc0c38d13   Steven Rostedt   ftrace: latency t...
1625
  }
e309b41dd   Ingo Molnar   ftrace: remove no...
1626
  static void *s_next(struct seq_file *m, void *v, loff_t *pos)
bc0c38d13   Steven Rostedt   ftrace: latency t...
1627
1628
  {
  	struct trace_iterator *iter = m->private;
bc0c38d13   Steven Rostedt   ftrace: latency t...
1629
  	int i = (int)*pos;
4e3c3333f   Ingo Molnar   ftrace: fix time ...
1630
  	void *ent;
bc0c38d13   Steven Rostedt   ftrace: latency t...
1631

a63ce5b30   Steven Rostedt   tracing: Buffer t...
1632
  	WARN_ON_ONCE(iter->leftover);
bc0c38d13   Steven Rostedt   ftrace: latency t...
1633
1634
1635
1636
1637
1638
1639
  	(*pos)++;
  
  	/* can't go backwards */
  	if (iter->idx > i)
  		return NULL;
  
  	if (iter->idx < 0)
955b61e59   Jason Wessel   ftrace,kdb: Exten...
1640
  		ent = trace_find_next_entry_inc(iter);
bc0c38d13   Steven Rostedt   ftrace: latency t...
1641
1642
1643
1644
  	else
  		ent = iter;
  
  	while (ent && iter->idx < i)
955b61e59   Jason Wessel   ftrace,kdb: Exten...
1645
  		ent = trace_find_next_entry_inc(iter);
bc0c38d13   Steven Rostedt   ftrace: latency t...
1646
1647
  
  	iter->pos = *pos;
bc0c38d13   Steven Rostedt   ftrace: latency t...
1648
1649
  	return ent;
  }
955b61e59   Jason Wessel   ftrace,kdb: Exten...
1650
  void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2f26ebd54   Steven Rostedt   tracing: use time...
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
  {
  	struct trace_array *tr = iter->tr;
  	struct ring_buffer_event *event;
  	struct ring_buffer_iter *buf_iter;
  	unsigned long entries = 0;
  	u64 ts;
  
  	tr->data[cpu]->skipped_entries = 0;
  
  	if (!iter->buffer_iter[cpu])
  		return;
  
  	buf_iter = iter->buffer_iter[cpu];
  	ring_buffer_iter_reset(buf_iter);
  
  	/*
  	 * We could have the case with the max latency tracers
  	 * that a reset never took place on a cpu. This is evident
  	 * by the timestamp being before the start of the buffer.
  	 */
  	while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
  		if (ts >= iter->tr->time_start)
  			break;
  		entries++;
  		ring_buffer_read(buf_iter, NULL);
  	}
  
  	tr->data[cpu]->skipped_entries = entries;
  }
d7350c3f4   Frederic Weisbecker   tracing/core: mak...
1680
  /*
d7350c3f4   Frederic Weisbecker   tracing/core: mak...
1681
1682
1683
   * The current tracer is copied to avoid a global locking
   * all around.
   */
bc0c38d13   Steven Rostedt   ftrace: latency t...
1684
1685
1686
  static void *s_start(struct seq_file *m, loff_t *pos)
  {
  	struct trace_iterator *iter = m->private;
d7350c3f4   Frederic Weisbecker   tracing/core: mak...
1687
  	static struct tracer *old_tracer;
b04cc6b1f   Frederic Weisbecker   tracing/core: int...
1688
  	int cpu_file = iter->cpu_file;
bc0c38d13   Steven Rostedt   ftrace: latency t...
1689
1690
  	void *p = NULL;
  	loff_t l = 0;
3928a8a2d   Steven Rostedt   ftrace: make work...
1691
  	int cpu;
bc0c38d13   Steven Rostedt   ftrace: latency t...
1692

d7350c3f4   Frederic Weisbecker   tracing/core: mak...
1693
  	/* copy the tracer to avoid using a global lock all around */
bc0c38d13   Steven Rostedt   ftrace: latency t...
1694
  	mutex_lock(&trace_types_lock);
d7350c3f4   Frederic Weisbecker   tracing/core: mak...
1695
1696
1697
  	if (unlikely(old_tracer != current_trace && current_trace)) {
  		old_tracer = current_trace;
  		*iter->trace = *current_trace;
d15f57f23   Steven Rostedt   ftrace: fix mutex...
1698
  	}
d7350c3f4   Frederic Weisbecker   tracing/core: mak...
1699
  	mutex_unlock(&trace_types_lock);
bc0c38d13   Steven Rostedt   ftrace: latency t...
1700
1701
  
  	atomic_inc(&trace_record_cmdline_disabled);
bc0c38d13   Steven Rostedt   ftrace: latency t...
1702
1703
1704
1705
  	if (*pos != iter->pos) {
  		iter->ent = NULL;
  		iter->cpu = 0;
  		iter->idx = -1;
d769041f8   Steven Rostedt   ring_buffer: impl...
1706
  		ftrace_disable_cpu();
b04cc6b1f   Frederic Weisbecker   tracing/core: int...
1707
1708
  		if (cpu_file == TRACE_PIPE_ALL_CPU) {
  			for_each_tracing_cpu(cpu)
2f26ebd54   Steven Rostedt   tracing: use time...
1709
  				tracing_iter_reset(iter, cpu);
b04cc6b1f   Frederic Weisbecker   tracing/core: int...
1710
  		} else
2f26ebd54   Steven Rostedt   tracing: use time...
1711
  			tracing_iter_reset(iter, cpu_file);
bc0c38d13   Steven Rostedt   ftrace: latency t...
1712

d769041f8   Steven Rostedt   ring_buffer: impl...
1713
  		ftrace_enable_cpu();
ac91d8545   Lai Jiangshan   tracing: Fix warn...
1714
  		iter->leftover = 0;
bc0c38d13   Steven Rostedt   ftrace: latency t...
1715
1716
1717
1718
  		for (p = iter; p && l < *pos; p = s_next(m, p, &l))
  			;
  
  	} else {
a63ce5b30   Steven Rostedt   tracing: Buffer t...
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
  		/*
  		 * If we overflowed the seq_file before, then we want
  		 * to just reuse the trace_seq buffer again.
  		 */
  		if (iter->leftover)
  			p = iter;
  		else {
  			l = *pos - 1;
  			p = s_next(m, p, &l);
  		}
bc0c38d13   Steven Rostedt   ftrace: latency t...
1729
  	}
4f5359685   Lai Jiangshan   tracing: add trac...
1730
  	trace_event_read_lock();
7e53bd42d   Lai Jiangshan   tracing: Consolid...
1731
  	trace_access_lock(cpu_file);
bc0c38d13   Steven Rostedt   ftrace: latency t...
1732
1733
1734
1735
1736
  	return p;
  }
  
  static void s_stop(struct seq_file *m, void *p)
  {
7e53bd42d   Lai Jiangshan   tracing: Consolid...
1737
  	struct trace_iterator *iter = m->private;
bc0c38d13   Steven Rostedt   ftrace: latency t...
1738
  	atomic_dec(&trace_record_cmdline_disabled);
7e53bd42d   Lai Jiangshan   tracing: Consolid...
1739
  	trace_access_unlock(iter->cpu_file);
4f5359685   Lai Jiangshan   tracing: add trac...
1740
  	trace_event_read_unlock();
bc0c38d13   Steven Rostedt   ftrace: latency t...
1741
  }
39eaf7ef8   Steven Rostedt   tracing: Add entr...
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
  static void
  get_total_entries(struct trace_array *tr, unsigned long *total, unsigned long *entries)
  {
  	unsigned long count;
  	int cpu;
  
  	*total = 0;
  	*entries = 0;
  
  	for_each_tracing_cpu(cpu) {
  		count = ring_buffer_entries_cpu(tr->buffer, cpu);
  		/*
  		 * If this buffer has skipped entries, then we hold all
  		 * entries for the trace and we need to ignore the
  		 * ones before the time stamp.
  		 */
  		if (tr->data[cpu]->skipped_entries) {
  			count -= tr->data[cpu]->skipped_entries;
  			/* total is the same as the entries */
  			*total += count;
  		} else
  			*total += count +
  				ring_buffer_overrun_cpu(tr->buffer, cpu);
  		*entries += count;
  	}
  }
e309b41dd   Ingo Molnar   ftrace: remove no...
1768
  static void print_lat_help_header(struct seq_file *m)
bc0c38d13   Steven Rostedt   ftrace: latency t...
1769
  {
a6168353d   Michael Ellerman   ftrace: make outp...
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
  	seq_puts(m, "#                  _------=> CPU#            
  ");
  	seq_puts(m, "#                 / _-----=> irqs-off        
  ");
  	seq_puts(m, "#                | / _----=> need-resched    
  ");
  	seq_puts(m, "#                || / _---=> hardirq/softirq 
  ");
  	seq_puts(m, "#                ||| / _--=> preempt-depth   
  ");
e6e1e2593   Steven Rostedt   tracing: Remove l...
1780
1781
1782
1783
1784
1785
  	seq_puts(m, "#                |||| /     delay             
  ");
  	seq_puts(m, "#  cmd     pid   ||||| time  |   caller      
  ");
  	seq_puts(m, "#     \\   /      |||||  \\    |   /           
  ");
bc0c38d13   Steven Rostedt   ftrace: latency t...
1786
  }
39eaf7ef8   Steven Rostedt   tracing: Add entr...
1787
  static void print_event_info(struct trace_array *tr, struct seq_file *m)
bc0c38d13   Steven Rostedt   ftrace: latency t...
1788
  {
39eaf7ef8   Steven Rostedt   tracing: Add entr...
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
  	unsigned long total;
  	unsigned long entries;
  
  	get_total_entries(tr, &total, &entries);
  	seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d
  ",
  		   entries, total, num_online_cpus());
  	seq_puts(m, "#
  ");
  }
  
  static void print_func_help_header(struct trace_array *tr, struct seq_file *m)
  {
  	print_event_info(tr, m);
77271ce4b   Steven Rostedt   tracing: Add irq,...
1803
1804
  	seq_puts(m, "#           TASK-PID   CPU#      TIMESTAMP  FUNCTION
  ");
a6168353d   Michael Ellerman   ftrace: make outp...
1805
1806
  	seq_puts(m, "#              | |       |          |         |
  ");
bc0c38d13   Steven Rostedt   ftrace: latency t...
1807
  }
39eaf7ef8   Steven Rostedt   tracing: Add entr...
1808
  static void print_func_help_header_irq(struct trace_array *tr, struct seq_file *m)
77271ce4b   Steven Rostedt   tracing: Add irq,...
1809
  {
39eaf7ef8   Steven Rostedt   tracing: Add entr...
1810
  	print_event_info(tr, m);
77271ce4b   Steven Rostedt   tracing: Add irq,...
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
  	seq_puts(m, "#                              _-----=> irqs-off
  ");
  	seq_puts(m, "#                             / _----=> need-resched
  ");
  	seq_puts(m, "#                            | / _---=> hardirq/softirq
  ");
  	seq_puts(m, "#                            || / _--=> preempt-depth
  ");
  	seq_puts(m, "#                            ||| /     delay
  ");
  	seq_puts(m, "#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION
  ");
  	seq_puts(m, "#              | |       |   ||||       |         |
  ");
  }
bc0c38d13   Steven Rostedt   ftrace: latency t...
1826

62b915f10   Jiri Olsa   tracing: Add grap...
1827
  void
bc0c38d13   Steven Rostedt   ftrace: latency t...
1828
1829
1830
1831
1832
1833
  print_trace_header(struct seq_file *m, struct trace_iterator *iter)
  {
  	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
  	struct trace_array *tr = iter->tr;
  	struct trace_array_cpu *data = tr->data[tr->cpu];
  	struct tracer *type = current_trace;
39eaf7ef8   Steven Rostedt   tracing: Add entr...
1834
1835
  	unsigned long entries;
  	unsigned long total;
bc0c38d13   Steven Rostedt   ftrace: latency t...
1836
1837
1838
1839
  	const char *name = "preemption";
  
  	if (type)
  		name = type->name;
39eaf7ef8   Steven Rostedt   tracing: Add entr...
1840
  	get_total_entries(tr, &total, &entries);
bc0c38d13   Steven Rostedt   ftrace: latency t...
1841

888b55dc3   KOSAKI Motohiro   ftrace: tracing h...
1842
1843
  	seq_printf(m, "# %s latency trace v1.1.5 on %s
  ",
bc0c38d13   Steven Rostedt   ftrace: latency t...
1844
  		   name, UTS_RELEASE);
888b55dc3   KOSAKI Motohiro   ftrace: tracing h...
1845
  	seq_puts(m, "# -----------------------------------"
bc0c38d13   Steven Rostedt   ftrace: latency t...
1846
1847
  		 "---------------------------------
  ");
888b55dc3   KOSAKI Motohiro   ftrace: tracing h...
1848
  	seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
bc0c38d13   Steven Rostedt   ftrace: latency t...
1849
  		   " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
57f50be14   Steven Rostedt   ftrace: fix max l...
1850
  		   nsecs_to_usecs(data->saved_latency),
bc0c38d13   Steven Rostedt   ftrace: latency t...
1851
  		   entries,
4c11d7aed   Steven Rostedt   ftrace: convert s...
1852
  		   total,
bc0c38d13   Steven Rostedt   ftrace: latency t...
1853
1854
1855
1856
1857
  		   tr->cpu,
  #if defined(CONFIG_PREEMPT_NONE)
  		   "server",
  #elif defined(CONFIG_PREEMPT_VOLUNTARY)
  		   "desktop",
b5c21b451   Steven Rostedt   ftrace: check pro...
1858
  #elif defined(CONFIG_PREEMPT)
bc0c38d13   Steven Rostedt   ftrace: latency t...
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
  		   "preempt",
  #else
  		   "unknown",
  #endif
  		   /* These are reserved for later use */
  		   0, 0, 0, 0);
  #ifdef CONFIG_SMP
  	seq_printf(m, " #P:%d)
  ", num_online_cpus());
  #else
  	seq_puts(m, ")
  ");
  #endif
888b55dc3   KOSAKI Motohiro   ftrace: tracing h...
1872
1873
1874
  	seq_puts(m, "#    -----------------
  ");
  	seq_printf(m, "#    | task: %.16s-%d "
bc0c38d13   Steven Rostedt   ftrace: latency t...
1875
1876
1877
1878
  		   "(uid:%d nice:%ld policy:%ld rt_prio:%ld)
  ",
  		   data->comm, data->pid, data->uid, data->nice,
  		   data->policy, data->rt_priority);
888b55dc3   KOSAKI Motohiro   ftrace: tracing h...
1879
1880
  	seq_puts(m, "#    -----------------
  ");
bc0c38d13   Steven Rostedt   ftrace: latency t...
1881
1882
  
  	if (data->critical_start) {
888b55dc3   KOSAKI Motohiro   ftrace: tracing h...
1883
  		seq_puts(m, "#  => started at: ");
214023c3d   Steven Rostedt   ftrace: add a buf...
1884
1885
  		seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
  		trace_print_seq(m, &iter->seq);
888b55dc3   KOSAKI Motohiro   ftrace: tracing h...
1886
1887
  		seq_puts(m, "
  #  => ended at:   ");
214023c3d   Steven Rostedt   ftrace: add a buf...
1888
1889
  		seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
  		trace_print_seq(m, &iter->seq);
8248ac052   Steven Rostedt   tracing: print ou...
1890
1891
1892
  		seq_puts(m, "
  #
  ");
bc0c38d13   Steven Rostedt   ftrace: latency t...
1893
  	}
888b55dc3   KOSAKI Motohiro   ftrace: tracing h...
1894
1895
  	seq_puts(m, "#
  ");
bc0c38d13   Steven Rostedt   ftrace: latency t...
1896
  }
a309720c8   Steven Rostedt   ftrace: display s...
1897
1898
1899
  static void test_cpu_buff_start(struct trace_iterator *iter)
  {
  	struct trace_seq *s = &iter->seq;
12ef7d448   Steven Rostedt   ftrace: CPU buffe...
1900
1901
1902
1903
1904
  	if (!(trace_flags & TRACE_ITER_ANNOTATE))
  		return;
  
  	if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
  		return;
4462344ee   Rusty Russell   cpumask: convert ...
1905
  	if (cpumask_test_cpu(iter->cpu, iter->started))
a309720c8   Steven Rostedt   ftrace: display s...
1906
  		return;
2f26ebd54   Steven Rostedt   tracing: use time...
1907
1908
  	if (iter->tr->data[iter->cpu]->skipped_entries)
  		return;
4462344ee   Rusty Russell   cpumask: convert ...
1909
  	cpumask_set_cpu(iter->cpu, iter->started);
b0dfa978c   Frederic Weisbecker   tracing/ftrace: a...
1910
1911
1912
1913
1914
1915
  
  	/* Don't print started cpu buffer for the first entry of the trace */
  	if (iter->idx > 1)
  		trace_seq_printf(s, "##### CPU %u buffer started ####
  ",
  				iter->cpu);
a309720c8   Steven Rostedt   ftrace: display s...
1916
  }
2c4f035f6   Frederic Weisbecker   tracing/ftrace: c...
1917
  static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
bc0c38d13   Steven Rostedt   ftrace: latency t...
1918
  {
214023c3d   Steven Rostedt   ftrace: add a buf...
1919
  	struct trace_seq *s = &iter->seq;
bc0c38d13   Steven Rostedt   ftrace: latency t...
1920
  	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
4e3c3333f   Ingo Molnar   ftrace: fix time ...
1921
  	struct trace_entry *entry;
f633cef02   Steven Rostedt   ftrace: change tr...
1922
  	struct trace_event *event;
bc0c38d13   Steven Rostedt   ftrace: latency t...
1923

4e3c3333f   Ingo Molnar   ftrace: fix time ...
1924
  	entry = iter->ent;
dd0e545f0   Steven Rostedt   ftrace: printk fo...
1925

a309720c8   Steven Rostedt   ftrace: display s...
1926
  	test_cpu_buff_start(iter);
c4a8e8be2   Frederic Weisbecker   trace: better man...
1927
  	event = ftrace_find_event(entry->type);
bc0c38d13   Steven Rostedt   ftrace: latency t...
1928

c4a8e8be2   Frederic Weisbecker   trace: better man...
1929
  	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
27d48be84   Steven Rostedt   tracing: consolid...
1930
1931
1932
1933
1934
1935
1936
  		if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
  			if (!trace_print_lat_context(iter))
  				goto partial;
  		} else {
  			if (!trace_print_context(iter))
  				goto partial;
  		}
c4a8e8be2   Frederic Weisbecker   trace: better man...
1937
  	}
bc0c38d13   Steven Rostedt   ftrace: latency t...
1938

268ccda0c   Arnaldo Carvalho de Melo   trace: assign def...
1939
  	if (event)
a9a577638   Steven Rostedt   tracing: Allow ev...
1940
  		return event->funcs->trace(iter, sym_flags, event);
d9793bd80   Arnaldo Carvalho de Melo   trace: judicious ...
1941
1942
1943
1944
  
  	if (!trace_seq_printf(s, "Unknown type %d
  ", entry->type))
  		goto partial;
02b67518e   Török Edwin   tracing: add supp...
1945

2c4f035f6   Frederic Weisbecker   tracing/ftrace: c...
1946
  	return TRACE_TYPE_HANDLED;
d9793bd80   Arnaldo Carvalho de Melo   trace: judicious ...
1947
1948
  partial:
  	return TRACE_TYPE_PARTIAL_LINE;
bc0c38d13   Steven Rostedt   ftrace: latency t...
1949
  }
2c4f035f6   Frederic Weisbecker   tracing/ftrace: c...
1950
  static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
f9896bf30   Ingo Molnar   ftrace: add raw o...
1951
1952
1953
  {
  	struct trace_seq *s = &iter->seq;
  	struct trace_entry *entry;
f633cef02   Steven Rostedt   ftrace: change tr...
1954
  	struct trace_event *event;
f9896bf30   Ingo Molnar   ftrace: add raw o...
1955
1956
  
  	entry = iter->ent;
dd0e545f0   Steven Rostedt   ftrace: printk fo...
1957

c4a8e8be2   Frederic Weisbecker   trace: better man...
1958
  	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
d9793bd80   Arnaldo Carvalho de Melo   trace: judicious ...
1959
1960
1961
  		if (!trace_seq_printf(s, "%d %d %llu ",
  				      entry->pid, iter->cpu, iter->ts))
  			goto partial;
c4a8e8be2   Frederic Weisbecker   trace: better man...
1962
  	}
f9896bf30   Ingo Molnar   ftrace: add raw o...
1963

f633cef02   Steven Rostedt   ftrace: change tr...
1964
  	event = ftrace_find_event(entry->type);
268ccda0c   Arnaldo Carvalho de Melo   trace: assign def...
1965
  	if (event)
a9a577638   Steven Rostedt   tracing: Allow ev...
1966
  		return event->funcs->raw(iter, 0, event);
d9793bd80   Arnaldo Carvalho de Melo   trace: judicious ...
1967
1968
1969
1970
  
  	if (!trace_seq_printf(s, "%d ?
  ", entry->type))
  		goto partial;
777e208d4   Steven Rostedt   ftrace: take adva...
1971

2c4f035f6   Frederic Weisbecker   tracing/ftrace: c...
1972
  	return TRACE_TYPE_HANDLED;
d9793bd80   Arnaldo Carvalho de Melo   trace: judicious ...
1973
1974
  partial:
  	return TRACE_TYPE_PARTIAL_LINE;
f9896bf30   Ingo Molnar   ftrace: add raw o...
1975
  }
2c4f035f6   Frederic Weisbecker   tracing/ftrace: c...
1976
  static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
5e3ca0ec7   Ingo Molnar   ftrace: introduce...
1977
1978
1979
1980
1981
  {
  	struct trace_seq *s = &iter->seq;
  	unsigned char newline = '
  ';
  	struct trace_entry *entry;
f633cef02   Steven Rostedt   ftrace: change tr...
1982
  	struct trace_event *event;
5e3ca0ec7   Ingo Molnar   ftrace: introduce...
1983
1984
  
  	entry = iter->ent;
dd0e545f0   Steven Rostedt   ftrace: printk fo...
1985

c4a8e8be2   Frederic Weisbecker   trace: better man...
1986
1987
1988
1989
1990
  	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
  		SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
  		SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
  		SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
  	}
5e3ca0ec7   Ingo Molnar   ftrace: introduce...
1991

f633cef02   Steven Rostedt   ftrace: change tr...
1992
  	event = ftrace_find_event(entry->type);
268ccda0c   Arnaldo Carvalho de Melo   trace: assign def...
1993
  	if (event) {
a9a577638   Steven Rostedt   tracing: Allow ev...
1994
  		enum print_line_t ret = event->funcs->hex(iter, 0, event);
d9793bd80   Arnaldo Carvalho de Melo   trace: judicious ...
1995
1996
1997
  		if (ret != TRACE_TYPE_HANDLED)
  			return ret;
  	}
7104f300c   Steven Rostedt   ftrace: type cast...
1998

5e3ca0ec7   Ingo Molnar   ftrace: introduce...
1999
  	SEQ_PUT_FIELD_RET(s, newline);
2c4f035f6   Frederic Weisbecker   tracing/ftrace: c...
2000
  	return TRACE_TYPE_HANDLED;
5e3ca0ec7   Ingo Molnar   ftrace: introduce...
2001
  }
2c4f035f6   Frederic Weisbecker   tracing/ftrace: c...
2002
  static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
cb0f12aae   Ingo Molnar   ftrace: bin-output
2003
2004
2005
  {
  	struct trace_seq *s = &iter->seq;
  	struct trace_entry *entry;
f633cef02   Steven Rostedt   ftrace: change tr...
2006
  	struct trace_event *event;
cb0f12aae   Ingo Molnar   ftrace: bin-output
2007
2008
  
  	entry = iter->ent;
dd0e545f0   Steven Rostedt   ftrace: printk fo...
2009

c4a8e8be2   Frederic Weisbecker   trace: better man...
2010
2011
  	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
  		SEQ_PUT_FIELD_RET(s, entry->pid);
1830b52d0   Steven Rostedt   trace: remove dep...
2012
  		SEQ_PUT_FIELD_RET(s, iter->cpu);
c4a8e8be2   Frederic Weisbecker   trace: better man...
2013
2014
  		SEQ_PUT_FIELD_RET(s, iter->ts);
  	}
cb0f12aae   Ingo Molnar   ftrace: bin-output
2015

f633cef02   Steven Rostedt   ftrace: change tr...
2016
  	event = ftrace_find_event(entry->type);
a9a577638   Steven Rostedt   tracing: Allow ev...
2017
2018
  	return event ? event->funcs->binary(iter, 0, event) :
  		TRACE_TYPE_HANDLED;
cb0f12aae   Ingo Molnar   ftrace: bin-output
2019
  }
62b915f10   Jiri Olsa   tracing: Add grap...
2020
  int trace_empty(struct trace_iterator *iter)
bc0c38d13   Steven Rostedt   ftrace: latency t...
2021
  {
bc0c38d13   Steven Rostedt   ftrace: latency t...
2022
  	int cpu;
9aba60fe6   Steven Rostedt   tracing: fix trac...
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
  	/* If we are looking at one CPU buffer, only check that one */
  	if (iter->cpu_file != TRACE_PIPE_ALL_CPU) {
  		cpu = iter->cpu_file;
  		if (iter->buffer_iter[cpu]) {
  			if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
  				return 0;
  		} else {
  			if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
  				return 0;
  		}
  		return 1;
  	}
ab46428c6   Steven Rostedt   ftrace: modulize ...
2035
  	for_each_tracing_cpu(cpu) {
d769041f8   Steven Rostedt   ring_buffer: impl...
2036
2037
2038
2039
2040
2041
2042
  		if (iter->buffer_iter[cpu]) {
  			if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
  				return 0;
  		} else {
  			if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
  				return 0;
  		}
bc0c38d13   Steven Rostedt   ftrace: latency t...
2043
  	}
d769041f8   Steven Rostedt   ring_buffer: impl...
2044

797d3712a   Frederic Weisbecker   tracing/ftrace: a...
2045
  	return 1;
bc0c38d13   Steven Rostedt   ftrace: latency t...
2046
  }
4f5359685   Lai Jiangshan   tracing: add trac...
2047
  /*  Called with trace_event_read_lock() held. */
955b61e59   Jason Wessel   ftrace,kdb: Exten...
2048
  enum print_line_t print_trace_line(struct trace_iterator *iter)
f9896bf30   Ingo Molnar   ftrace: add raw o...
2049
  {
2c4f035f6   Frederic Weisbecker   tracing/ftrace: c...
2050
  	enum print_line_t ret;
ee5e51f51   Jiri Olsa   tracing: Avoid so...
2051
2052
2053
2054
2055
  	if (iter->lost_events &&
  	    !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]
  ",
  				 iter->cpu, iter->lost_events))
  		return TRACE_TYPE_PARTIAL_LINE;
bc21b4784   Steven Rostedt   tracing: Show the...
2056

2c4f035f6   Frederic Weisbecker   tracing/ftrace: c...
2057
2058
2059
2060
2061
  	if (iter->trace && iter->trace->print_line) {
  		ret = iter->trace->print_line(iter);
  		if (ret != TRACE_TYPE_UNHANDLED)
  			return ret;
  	}
72829bc3d   Thomas Gleixner   ftrace: move enum...
2062

48ead0203   Frederic Weisbecker   tracing/core: bri...
2063
2064
2065
  	if (iter->ent->type == TRACE_BPRINT &&
  			trace_flags & TRACE_ITER_PRINTK &&
  			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6f   Steven Rostedt   tracing: make pri...
2066
  		return trace_print_bprintk_msg_only(iter);
48ead0203   Frederic Weisbecker   tracing/core: bri...
2067

66896a85c   Frederic Weisbecker   tracing/ftrace: a...
2068
2069
2070
  	if (iter->ent->type == TRACE_PRINT &&
  			trace_flags & TRACE_ITER_PRINTK &&
  			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6f   Steven Rostedt   tracing: make pri...
2071
  		return trace_print_printk_msg_only(iter);
66896a85c   Frederic Weisbecker   tracing/ftrace: a...
2072

cb0f12aae   Ingo Molnar   ftrace: bin-output
2073
2074
  	if (trace_flags & TRACE_ITER_BIN)
  		return print_bin_fmt(iter);
5e3ca0ec7   Ingo Molnar   ftrace: introduce...
2075
2076
  	if (trace_flags & TRACE_ITER_HEX)
  		return print_hex_fmt(iter);
f9896bf30   Ingo Molnar   ftrace: add raw o...
2077
2078
  	if (trace_flags & TRACE_ITER_RAW)
  		return print_raw_fmt(iter);
f9896bf30   Ingo Molnar   ftrace: add raw o...
2079
2080
  	return print_trace_fmt(iter);
  }
7e9a49ef5   Jiri Olsa   tracing/latency: ...
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
  void trace_latency_header(struct seq_file *m)
  {
  	struct trace_iterator *iter = m->private;
  
  	/* print nothing if the buffers are empty */
  	if (trace_empty(iter))
  		return;
  
  	if (iter->iter_flags & TRACE_FILE_LAT_FMT)
  		print_trace_header(m, iter);
  
  	if (!(trace_flags & TRACE_ITER_VERBOSE))
  		print_lat_help_header(m);
  }
62b915f10   Jiri Olsa   tracing: Add grap...
2095
2096
2097
  void trace_default_header(struct seq_file *m)
  {
  	struct trace_iterator *iter = m->private;
f56e7f8ef   Jiri Olsa   tracing, function...
2098
2099
  	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
  		return;
62b915f10   Jiri Olsa   tracing: Add grap...
2100
2101
2102
2103
2104
2105
2106
2107
  	if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
  		/* print nothing if the buffers are empty */
  		if (trace_empty(iter))
  			return;
  		print_trace_header(m, iter);
  		if (!(trace_flags & TRACE_ITER_VERBOSE))
  			print_lat_help_header(m);
  	} else {
77271ce4b   Steven Rostedt   tracing: Add irq,...
2108
2109
  		if (!(trace_flags & TRACE_ITER_VERBOSE)) {
  			if (trace_flags & TRACE_ITER_IRQ_INFO)
39eaf7ef8   Steven Rostedt   tracing: Add entr...
2110
  				print_func_help_header_irq(iter->tr, m);
77271ce4b   Steven Rostedt   tracing: Add irq,...
2111
  			else
39eaf7ef8   Steven Rostedt   tracing: Add entr...
2112
  				print_func_help_header(iter->tr, m);
77271ce4b   Steven Rostedt   tracing: Add irq,...
2113
  		}
62b915f10   Jiri Olsa   tracing: Add grap...
2114
2115
  	}
  }
e0a413f61   Steven Rostedt   tracing: Warn on ...
2116
2117
2118
2119
2120
2121
2122
2123
2124
  static void test_ftrace_alive(struct seq_file *m)
  {
  	if (!ftrace_is_dead())
  		return;
  	seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED
  ");
  	seq_printf(m, "#          MAY BE MISSING FUNCTION EVENTS
  ");
  }
bc0c38d13   Steven Rostedt   ftrace: latency t...
2125
2126
2127
  static int s_show(struct seq_file *m, void *v)
  {
  	struct trace_iterator *iter = v;
a63ce5b30   Steven Rostedt   tracing: Buffer t...
2128
  	int ret;
bc0c38d13   Steven Rostedt   ftrace: latency t...
2129
2130
2131
2132
2133
2134
2135
  
  	if (iter->ent == NULL) {
  		if (iter->tr) {
  			seq_printf(m, "# tracer: %s
  ", iter->trace->name);
  			seq_puts(m, "#
  ");
e0a413f61   Steven Rostedt   tracing: Warn on ...
2136
  			test_ftrace_alive(m);
bc0c38d13   Steven Rostedt   ftrace: latency t...
2137
  		}
8bba1bf5e   Markus Metzger   x86, ftrace: call...
2138
2139
  		if (iter->trace && iter->trace->print_header)
  			iter->trace->print_header(m);
62b915f10   Jiri Olsa   tracing: Add grap...
2140
2141
  		else
  			trace_default_header(m);
a63ce5b30   Steven Rostedt   tracing: Buffer t...
2142
2143
2144
2145
2146
2147
2148
2149
2150
  	} else if (iter->leftover) {
  		/*
  		 * If we filled the seq_file buffer earlier, we
  		 * want to just show it now.
  		 */
  		ret = trace_print_seq(m, &iter->seq);
  
  		/* ret should this time be zero, but you never know */
  		iter->leftover = ret;
bc0c38d13   Steven Rostedt   ftrace: latency t...
2151
  	} else {
f9896bf30   Ingo Molnar   ftrace: add raw o...
2152
  		print_trace_line(iter);
a63ce5b30   Steven Rostedt   tracing: Buffer t...
2153
2154
2155
2156
2157
2158
2159
2160
2161
  		ret = trace_print_seq(m, &iter->seq);
  		/*
  		 * If we overflow the seq_file buffer, then it will
  		 * ask us for this data again at start up.
  		 * Use that instead.
  		 *  ret is 0 if seq_file write succeeded.
  		 *        -1 otherwise.
  		 */
  		iter->leftover = ret;
bc0c38d13   Steven Rostedt   ftrace: latency t...
2162
2163
2164
2165
  	}
  
  	return 0;
  }
88e9d34c7   James Morris   seq_file: constif...
2166
  static const struct seq_operations tracer_seq_ops = {
4bf39a941   Ingo Molnar   ftrace: cleanups
2167
2168
2169
2170
  	.start		= s_start,
  	.next		= s_next,
  	.stop		= s_stop,
  	.show		= s_show,
bc0c38d13   Steven Rostedt   ftrace: latency t...
2171
  };
e309b41dd   Ingo Molnar   ftrace: remove no...
2172
  static struct trace_iterator *
85a2f9b46   Steven Rostedt   tracing: use poin...
2173
  __tracing_open(struct inode *inode, struct file *file)
bc0c38d13   Steven Rostedt   ftrace: latency t...
2174
  {
b04cc6b1f   Frederic Weisbecker   tracing/core: int...
2175
  	long cpu_file = (long) inode->i_private;
85a2f9b46   Steven Rostedt   tracing: use poin...
2176
  	void *fail_ret = ERR_PTR(-ENOMEM);
bc0c38d13   Steven Rostedt   ftrace: latency t...
2177
  	struct trace_iterator *iter;
3928a8a2d   Steven Rostedt   ftrace: make work...
2178
  	struct seq_file *m;
85a2f9b46   Steven Rostedt   tracing: use poin...
2179
  	int cpu, ret;
bc0c38d13   Steven Rostedt   ftrace: latency t...
2180

85a2f9b46   Steven Rostedt   tracing: use poin...
2181
2182
  	if (tracing_disabled)
  		return ERR_PTR(-ENODEV);
60a11774b   Steven Rostedt   ftrace: add self-...
2183

bc0c38d13   Steven Rostedt   ftrace: latency t...
2184
  	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
85a2f9b46   Steven Rostedt   tracing: use poin...
2185
2186
  	if (!iter)
  		return ERR_PTR(-ENOMEM);
bc0c38d13   Steven Rostedt   ftrace: latency t...
2187

d7350c3f4   Frederic Weisbecker   tracing/core: mak...
2188
2189
2190
2191
  	/*
  	 * We make a copy of the current tracer to avoid concurrent
  	 * changes on it while we are reading.
  	 */
bc0c38d13   Steven Rostedt   ftrace: latency t...
2192
  	mutex_lock(&trace_types_lock);
d7350c3f4   Frederic Weisbecker   tracing/core: mak...
2193
  	iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
85a2f9b46   Steven Rostedt   tracing: use poin...
2194
  	if (!iter->trace)
d7350c3f4   Frederic Weisbecker   tracing/core: mak...
2195
  		goto fail;
85a2f9b46   Steven Rostedt   tracing: use poin...
2196

d7350c3f4   Frederic Weisbecker   tracing/core: mak...
2197
2198
  	if (current_trace)
  		*iter->trace = *current_trace;
79f559977   Li Zefan   cpumask: use zall...
2199
  	if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
b0dfa978c   Frederic Weisbecker   tracing/ftrace: a...
2200
  		goto fail;
bc0c38d13   Steven Rostedt   ftrace: latency t...
2201
2202
2203
  	if (current_trace && current_trace->print_max)
  		iter->tr = &max_tr;
  	else
b04cc6b1f   Frederic Weisbecker   tracing/core: int...
2204
  		iter->tr = &global_trace;
bc0c38d13   Steven Rostedt   ftrace: latency t...
2205
  	iter->pos = -1;
d7350c3f4   Frederic Weisbecker   tracing/core: mak...
2206
  	mutex_init(&iter->mutex);
b04cc6b1f   Frederic Weisbecker   tracing/core: int...
2207
  	iter->cpu_file = cpu_file;
bc0c38d13   Steven Rostedt   ftrace: latency t...
2208

8bba1bf5e   Markus Metzger   x86, ftrace: call...
2209
2210
  	/* Notify the tracer early; before we stop tracing. */
  	if (iter->trace && iter->trace->open)
a93751cab   Markus Metzger   x86, bts, ftrace:...
2211
  		iter->trace->open(iter);
8bba1bf5e   Markus Metzger   x86, ftrace: call...
2212

12ef7d448   Steven Rostedt   ftrace: CPU buffe...
2213
2214
2215
  	/* Annotate start of buffers if we had overruns */
  	if (ring_buffer_overruns(iter->tr->buffer))
  		iter->iter_flags |= TRACE_FILE_ANNOTATE;
2f26ebd54   Steven Rostedt   tracing: use time...
2216
2217
  	/* stop the trace while dumping */
  	tracing_stop();
b04cc6b1f   Frederic Weisbecker   tracing/core: int...
2218
2219
  	if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
  		for_each_tracing_cpu(cpu) {
b04cc6b1f   Frederic Weisbecker   tracing/core: int...
2220
  			iter->buffer_iter[cpu] =
72c9ddfd4   David Miller   ring-buffer: Make...
2221
2222
2223
2224
2225
  				ring_buffer_read_prepare(iter->tr->buffer, cpu);
  		}
  		ring_buffer_read_prepare_sync();
  		for_each_tracing_cpu(cpu) {
  			ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd54   Steven Rostedt   tracing: use time...
2226
  			tracing_iter_reset(iter, cpu);
b04cc6b1f   Frederic Weisbecker   tracing/core: int...
2227
2228
2229
  		}
  	} else {
  		cpu = iter->cpu_file;
3928a8a2d   Steven Rostedt   ftrace: make work...
2230
  		iter->buffer_iter[cpu] =
72c9ddfd4   David Miller   ring-buffer: Make...
2231
2232
2233
  			ring_buffer_read_prepare(iter->tr->buffer, cpu);
  		ring_buffer_read_prepare_sync();
  		ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd54   Steven Rostedt   tracing: use time...
2234
  		tracing_iter_reset(iter, cpu);
3928a8a2d   Steven Rostedt   ftrace: make work...
2235
  	}
85a2f9b46   Steven Rostedt   tracing: use poin...
2236
2237
2238
  	ret = seq_open(file, &tracer_seq_ops);
  	if (ret < 0) {
  		fail_ret = ERR_PTR(ret);
3928a8a2d   Steven Rostedt   ftrace: make work...
2239
  		goto fail_buffer;
85a2f9b46   Steven Rostedt   tracing: use poin...
2240
  	}
bc0c38d13   Steven Rostedt   ftrace: latency t...
2241

3928a8a2d   Steven Rostedt   ftrace: make work...
2242
2243
  	m = file->private_data;
  	m->private = iter;
bc0c38d13   Steven Rostedt   ftrace: latency t...
2244

bc0c38d13   Steven Rostedt   ftrace: latency t...
2245
  	mutex_unlock(&trace_types_lock);
bc0c38d13   Steven Rostedt   ftrace: latency t...
2246
  	return iter;
3928a8a2d   Steven Rostedt   ftrace: make work...
2247
2248
2249
2250
2251
2252
  
   fail_buffer:
  	for_each_tracing_cpu(cpu) {
  		if (iter->buffer_iter[cpu])
  			ring_buffer_read_finish(iter->buffer_iter[cpu]);
  	}
b0dfa978c   Frederic Weisbecker   tracing/ftrace: a...
2253
  	free_cpumask_var(iter->started);
2f26ebd54   Steven Rostedt   tracing: use time...
2254
  	tracing_start();
d7350c3f4   Frederic Weisbecker   tracing/core: mak...
2255
   fail:
3928a8a2d   Steven Rostedt   ftrace: make work...
2256
  	mutex_unlock(&trace_types_lock);
d7350c3f4   Frederic Weisbecker   tracing/core: mak...
2257
  	kfree(iter->trace);
0bb943c7a   Julia Lawall   tracing: kernel/t...
2258
  	kfree(iter);
3928a8a2d   Steven Rostedt   ftrace: make work...
2259

85a2f9b46   Steven Rostedt   tracing: use poin...
2260
  	return fail_ret;
bc0c38d13   Steven Rostedt   ftrace: latency t...
2261
2262
2263
2264
  }
  
  int tracing_open_generic(struct inode *inode, struct file *filp)
  {
60a11774b   Steven Rostedt   ftrace: add self-...
2265
2266
  	if (tracing_disabled)
  		return -ENODEV;
bc0c38d13   Steven Rostedt   ftrace: latency t...
2267
2268
2269
  	filp->private_data = inode->i_private;
  	return 0;
  }
4fd273588   Hannes Eder   tracing: fix spar...
2270
  static int tracing_release(struct inode *inode, struct file *file)
bc0c38d13   Steven Rostedt   ftrace: latency t...
2271
  {
907f27840   matt mooney   tracing/trivial: ...
2272
  	struct seq_file *m = file->private_data;
4acd4d00f   Steven Rostedt   tracing: give eas...
2273
  	struct trace_iterator *iter;
3928a8a2d   Steven Rostedt   ftrace: make work...
2274
  	int cpu;
bc0c38d13   Steven Rostedt   ftrace: latency t...
2275

4acd4d00f   Steven Rostedt   tracing: give eas...
2276
2277
2278
2279
  	if (!(file->f_mode & FMODE_READ))
  		return 0;
  
  	iter = m->private;
bc0c38d13   Steven Rostedt   ftrace: latency t...
2280
  	mutex_lock(&trace_types_lock);
3928a8a2d   Steven Rostedt   ftrace: make work...
2281
2282
2283
2284
  	for_each_tracing_cpu(cpu) {
  		if (iter->buffer_iter[cpu])
  			ring_buffer_read_finish(iter->buffer_iter[cpu]);
  	}
bc0c38d13   Steven Rostedt   ftrace: latency t...
2285
2286
2287
2288
  	if (iter->trace && iter->trace->close)
  		iter->trace->close(iter);
  
  	/* reenable tracing if it was previously enabled */
9036990d4   Steven Rostedt   ftrace: restructu...
2289
  	tracing_start();
bc0c38d13   Steven Rostedt   ftrace: latency t...
2290
2291
2292
  	mutex_unlock(&trace_types_lock);
  
  	seq_release(inode, file);
d7350c3f4   Frederic Weisbecker   tracing/core: mak...
2293
  	mutex_destroy(&iter->mutex);
b0dfa978c   Frederic Weisbecker   tracing/ftrace: a...
2294
  	free_cpumask_var(iter->started);
d7350c3f4   Frederic Weisbecker   tracing/core: mak...
2295
  	kfree(iter->trace);
bc0c38d13   Steven Rostedt   ftrace: latency t...
2296
2297
2298
2299
2300
2301
  	kfree(iter);
  	return 0;
  }
  
  static int tracing_open(struct inode *inode, struct file *file)
  {
85a2f9b46   Steven Rostedt   tracing: use poin...
2302
2303
  	struct trace_iterator *iter;
  	int ret = 0;
bc0c38d13   Steven Rostedt   ftrace: latency t...
2304

4acd4d00f   Steven Rostedt   tracing: give eas...
2305
2306
  	/* If this file was open for write, then erase contents */
  	if ((file->f_mode & FMODE_WRITE) &&
8650ae32e   Steven Rostedt   tracing: only tru...
2307
  	    (file->f_flags & O_TRUNC)) {
4acd4d00f   Steven Rostedt   tracing: give eas...
2308
  		long cpu = (long) inode->i_private;
bc0c38d13   Steven Rostedt   ftrace: latency t...
2309

4acd4d00f   Steven Rostedt   tracing: give eas...
2310
2311
2312
2313
2314
  		if (cpu == TRACE_PIPE_ALL_CPU)
  			tracing_reset_online_cpus(&global_trace);
  		else
  			tracing_reset(&global_trace, cpu);
  	}
bc0c38d13   Steven Rostedt   ftrace: latency t...
2315

4acd4d00f   Steven Rostedt   tracing: give eas...
2316
2317
2318
2319
2320
2321
2322
  	if (file->f_mode & FMODE_READ) {
  		iter = __tracing_open(inode, file);
  		if (IS_ERR(iter))
  			ret = PTR_ERR(iter);
  		else if (trace_flags & TRACE_ITER_LATENCY_FMT)
  			iter->iter_flags |= TRACE_FILE_LAT_FMT;
  	}
bc0c38d13   Steven Rostedt   ftrace: latency t...
2323
2324
  	return ret;
  }
e309b41dd   Ingo Molnar   ftrace: remove no...
2325
  static void *
bc0c38d13   Steven Rostedt   ftrace: latency t...
2326
2327
  t_next(struct seq_file *m, void *v, loff_t *pos)
  {
f129e965b   Li Zefan   tracing: Reset it...
2328
  	struct tracer *t = v;
bc0c38d13   Steven Rostedt   ftrace: latency t...
2329
2330
2331
2332
2333
  
  	(*pos)++;
  
  	if (t)
  		t = t->next;
bc0c38d13   Steven Rostedt   ftrace: latency t...
2334
2335
2336
2337
2338
  	return t;
  }
  
  static void *t_start(struct seq_file *m, loff_t *pos)
  {
f129e965b   Li Zefan   tracing: Reset it...
2339
  	struct tracer *t;
bc0c38d13   Steven Rostedt   ftrace: latency t...
2340
2341
2342
  	loff_t l = 0;
  
  	mutex_lock(&trace_types_lock);
f129e965b   Li Zefan   tracing: Reset it...
2343
  	for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
bc0c38d13   Steven Rostedt   ftrace: latency t...
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
  		;
  
  	return t;
  }
  
  static void t_stop(struct seq_file *m, void *p)
  {
  	mutex_unlock(&trace_types_lock);
  }
  
  static int t_show(struct seq_file *m, void *v)
  {
  	struct tracer *t = v;
  
  	if (!t)
  		return 0;
  
  	seq_printf(m, "%s", t->name);
  	if (t->next)
  		seq_putc(m, ' ');
  	else
  		seq_putc(m, '
  ');
  
  	return 0;
  }
88e9d34c7   James Morris   seq_file: constif...
2370
  static const struct seq_operations show_traces_seq_ops = {
4bf39a941   Ingo Molnar   ftrace: cleanups
2371
2372
2373
2374
  	.start		= t_start,
  	.next		= t_next,
  	.stop		= t_stop,
  	.show		= t_show,
bc0c38d13   Steven Rostedt   ftrace: latency t...
2375
2376
2377
2378
  };
  
  static int show_traces_open(struct inode *inode, struct file *file)
  {
60a11774b   Steven Rostedt   ftrace: add self-...
2379
2380
  	if (tracing_disabled)
  		return -ENODEV;
f129e965b   Li Zefan   tracing: Reset it...
2381
  	return seq_open(file, &show_traces_seq_ops);
bc0c38d13   Steven Rostedt   ftrace: latency t...
2382
  }
4acd4d00f   Steven Rostedt   tracing: give eas...
2383
2384
2385
2386
2387
2388
  static ssize_t
  tracing_write_stub(struct file *filp, const char __user *ubuf,
  		   size_t count, loff_t *ppos)
  {
  	return count;
  }
364829b12   Slava Pestov   tracing: Fix pani...
2389
2390
2391
2392
2393
2394
2395
  static loff_t tracing_seek(struct file *file, loff_t offset, int origin)
  {
  	if (file->f_mode & FMODE_READ)
  		return seq_lseek(file, offset, origin);
  	else
  		return 0;
  }
5e2336a0d   Steven Rostedt   tracing: make all...
2396
  static const struct file_operations tracing_fops = {
4bf39a941   Ingo Molnar   ftrace: cleanups
2397
2398
  	.open		= tracing_open,
  	.read		= seq_read,
4acd4d00f   Steven Rostedt   tracing: give eas...
2399
  	.write		= tracing_write_stub,
364829b12   Slava Pestov   tracing: Fix pani...
2400
  	.llseek		= tracing_seek,
4bf39a941   Ingo Molnar   ftrace: cleanups
2401
  	.release	= tracing_release,
bc0c38d13   Steven Rostedt   ftrace: latency t...
2402
  };
5e2336a0d   Steven Rostedt   tracing: make all...
2403
  static const struct file_operations show_traces_fops = {
c7078de1a   Ingo Molnar   ftrace: add traci...
2404
2405
2406
  	.open		= show_traces_open,
  	.read		= seq_read,
  	.release	= seq_release,
b444786f1   Arnd Bergmann   tracing: Use gene...
2407
  	.llseek		= seq_lseek,
c7078de1a   Ingo Molnar   ftrace: add traci...
2408
  };
36dfe9252   Ingo Molnar   ftrace: make use ...
2409
2410
2411
  /*
   * Only trace on a CPU if the bitmask is set:
   */
9e01c1b74   Rusty Russell   cpumask: convert ...
2412
  static cpumask_var_t tracing_cpumask;
36dfe9252   Ingo Molnar   ftrace: make use ...
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
  
  /*
   * The tracer itself will not take this lock, but still we want
   * to provide a consistent cpumask to user-space:
   */
  static DEFINE_MUTEX(tracing_cpumask_update_lock);
  
  /*
   * Temporary storage for the character representation of the
   * CPU bitmask (and one more byte for the newline):
   */
  static char mask_str[NR_CPUS + 1];
c7078de1a   Ingo Molnar   ftrace: add traci...
2425
2426
2427
2428
  static ssize_t
  tracing_cpumask_read(struct file *filp, char __user *ubuf,
  		     size_t count, loff_t *ppos)
  {
36dfe9252   Ingo Molnar   ftrace: make use ...
2429
  	int len;
c7078de1a   Ingo Molnar   ftrace: add traci...
2430
2431
  
  	mutex_lock(&tracing_cpumask_update_lock);
36dfe9252   Ingo Molnar   ftrace: make use ...
2432

9e01c1b74   Rusty Russell   cpumask: convert ...
2433
  	len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
36dfe9252   Ingo Molnar   ftrace: make use ...
2434
2435
2436
2437
2438
2439
2440
2441
2442
  	if (count - len < 2) {
  		count = -EINVAL;
  		goto out_err;
  	}
  	len += sprintf(mask_str + len, "
  ");
  	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
  
  out_err:
c7078de1a   Ingo Molnar   ftrace: add traci...
2443
2444
2445
2446
2447
2448
2449
2450
2451
  	mutex_unlock(&tracing_cpumask_update_lock);
  
  	return count;
  }
  
  static ssize_t
  tracing_cpumask_write(struct file *filp, const char __user *ubuf,
  		      size_t count, loff_t *ppos)
  {
36dfe9252   Ingo Molnar   ftrace: make use ...
2452
  	int err, cpu;
9e01c1b74   Rusty Russell   cpumask: convert ...
2453
2454
2455
2456
  	cpumask_var_t tracing_cpumask_new;
  
  	if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
  		return -ENOMEM;
c7078de1a   Ingo Molnar   ftrace: add traci...
2457

9e01c1b74   Rusty Russell   cpumask: convert ...
2458
  	err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
c7078de1a   Ingo Molnar   ftrace: add traci...
2459
  	if (err)
36dfe9252   Ingo Molnar   ftrace: make use ...
2460
  		goto err_unlock;
215368e8e   Li Zefan   tracing: fix a ty...
2461
  	mutex_lock(&tracing_cpumask_update_lock);
a5e25883a   Steven Rostedt   ftrace: replace r...
2462
  	local_irq_disable();
0199c4e68   Thomas Gleixner   locking: Convert ...
2463
  	arch_spin_lock(&ftrace_max_lock);
ab46428c6   Steven Rostedt   ftrace: modulize ...
2464
  	for_each_tracing_cpu(cpu) {
36dfe9252   Ingo Molnar   ftrace: make use ...
2465
2466
2467
2468
  		/*
  		 * Increase/decrease the disabled counter if we are
  		 * about to flip a bit in the cpumask:
  		 */
9e01c1b74   Rusty Russell   cpumask: convert ...
2469
2470
  		if (cpumask_test_cpu(cpu, tracing_cpumask) &&
  				!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
36dfe9252   Ingo Molnar   ftrace: make use ...
2471
2472
  			atomic_inc(&global_trace.data[cpu]->disabled);
  		}
9e01c1b74   Rusty Russell   cpumask: convert ...
2473
2474
  		if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
  				cpumask_test_cpu(cpu, tracing_cpumask_new)) {
36dfe9252   Ingo Molnar   ftrace: make use ...
2475
2476
2477
  			atomic_dec(&global_trace.data[cpu]->disabled);
  		}
  	}
0199c4e68   Thomas Gleixner   locking: Convert ...
2478
  	arch_spin_unlock(&ftrace_max_lock);
a5e25883a   Steven Rostedt   ftrace: replace r...
2479
  	local_irq_enable();
36dfe9252   Ingo Molnar   ftrace: make use ...
2480

9e01c1b74   Rusty Russell   cpumask: convert ...
2481
  	cpumask_copy(tracing_cpumask, tracing_cpumask_new);
36dfe9252   Ingo Molnar   ftrace: make use ...
2482
2483
  
  	mutex_unlock(&tracing_cpumask_update_lock);
9e01c1b74   Rusty Russell   cpumask: convert ...
2484
  	free_cpumask_var(tracing_cpumask_new);
c7078de1a   Ingo Molnar   ftrace: add traci...
2485
2486
  
  	return count;
36dfe9252   Ingo Molnar   ftrace: make use ...
2487
2488
  
  err_unlock:
215368e8e   Li Zefan   tracing: fix a ty...
2489
  	free_cpumask_var(tracing_cpumask_new);
36dfe9252   Ingo Molnar   ftrace: make use ...
2490
2491
  
  	return err;
c7078de1a   Ingo Molnar   ftrace: add traci...
2492
  }
5e2336a0d   Steven Rostedt   tracing: make all...
2493
  static const struct file_operations tracing_cpumask_fops = {
c7078de1a   Ingo Molnar   ftrace: add traci...
2494
2495
2496
  	.open		= tracing_open_generic,
  	.read		= tracing_cpumask_read,
  	.write		= tracing_cpumask_write,
b444786f1   Arnd Bergmann   tracing: Use gene...
2497
  	.llseek		= generic_file_llseek,
bc0c38d13   Steven Rostedt   ftrace: latency t...
2498
  };
fdb372ed4   Li Zefan   tracing: Use seq ...
2499
  static int tracing_trace_options_show(struct seq_file *m, void *v)
bc0c38d13   Steven Rostedt   ftrace: latency t...
2500
  {
d8e83d26b   Steven Rostedt   tracing: add prot...
2501
2502
  	struct tracer_opt *trace_opts;
  	u32 tracer_flags;
d8e83d26b   Steven Rostedt   tracing: add prot...
2503
  	int i;
adf9f1957   Frederic Weisbecker   tracing/ftrace: i...
2504

d8e83d26b   Steven Rostedt   tracing: add prot...
2505
2506
2507
  	mutex_lock(&trace_types_lock);
  	tracer_flags = current_trace->flags->val;
  	trace_opts = current_trace->flags->opts;
bc0c38d13   Steven Rostedt   ftrace: latency t...
2508
2509
  	for (i = 0; trace_options[i]; i++) {
  		if (trace_flags & (1 << i))
fdb372ed4   Li Zefan   tracing: Use seq ...
2510
2511
  			seq_printf(m, "%s
  ", trace_options[i]);
bc0c38d13   Steven Rostedt   ftrace: latency t...
2512
  		else
fdb372ed4   Li Zefan   tracing: Use seq ...
2513
2514
  			seq_printf(m, "no%s
  ", trace_options[i]);
bc0c38d13   Steven Rostedt   ftrace: latency t...
2515
  	}
adf9f1957   Frederic Weisbecker   tracing/ftrace: i...
2516
2517
  	for (i = 0; trace_opts[i].name; i++) {
  		if (tracer_flags & trace_opts[i].bit)
fdb372ed4   Li Zefan   tracing: Use seq ...
2518
2519
  			seq_printf(m, "%s
  ", trace_opts[i].name);
adf9f1957   Frederic Weisbecker   tracing/ftrace: i...
2520
  		else
fdb372ed4   Li Zefan   tracing: Use seq ...
2521
2522
  			seq_printf(m, "no%s
  ", trace_opts[i].name);
adf9f1957   Frederic Weisbecker   tracing/ftrace: i...
2523
  	}
d8e83d26b   Steven Rostedt   tracing: add prot...
2524
  	mutex_unlock(&trace_types_lock);
adf9f1957   Frederic Weisbecker   tracing/ftrace: i...
2525

fdb372ed4   Li Zefan   tracing: Use seq ...
2526
  	return 0;
bc0c38d13   Steven Rostedt   ftrace: latency t...
2527
  }
bc0c38d13   Steven Rostedt   ftrace: latency t...
2528

8d18eaaff   Li Zefan   tracing: Simplify...
2529
2530
2531
2532
2533
  static int __set_tracer_option(struct tracer *trace,
  			       struct tracer_flags *tracer_flags,
  			       struct tracer_opt *opts, int neg)
  {
  	int ret;
bc0c38d13   Steven Rostedt   ftrace: latency t...
2534

8d18eaaff   Li Zefan   tracing: Simplify...
2535
2536
2537
2538
2539
2540
2541
2542
2543
  	ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
  	if (ret)
  		return ret;
  
  	if (neg)
  		tracer_flags->val &= ~opts->bit;
  	else
  		tracer_flags->val |= opts->bit;
  	return 0;
bc0c38d13   Steven Rostedt   ftrace: latency t...
2544
  }
adf9f1957   Frederic Weisbecker   tracing/ftrace: i...
2545
2546
2547
  /* Try to assign a tracer specific option */
  static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
  {
7770841e6   Zhaolei   tracing: Rename s...
2548
  	struct tracer_flags *tracer_flags = trace->flags;
adf9f1957   Frederic Weisbecker   tracing/ftrace: i...
2549
  	struct tracer_opt *opts = NULL;
8d18eaaff   Li Zefan   tracing: Simplify...
2550
  	int i;
adf9f1957   Frederic Weisbecker   tracing/ftrace: i...
2551

7770841e6   Zhaolei   tracing: Rename s...
2552
2553
  	for (i = 0; tracer_flags->opts[i].name; i++) {
  		opts = &tracer_flags->opts[i];
adf9f1957   Frederic Weisbecker   tracing/ftrace: i...
2554

8d18eaaff   Li Zefan   tracing: Simplify...
2555
2556
2557
  		if (strcmp(cmp, opts->name) == 0)
  			return __set_tracer_option(trace, trace->flags,
  						   opts, neg);
adf9f1957   Frederic Weisbecker   tracing/ftrace: i...
2558
  	}
adf9f1957   Frederic Weisbecker   tracing/ftrace: i...
2559

8d18eaaff   Li Zefan   tracing: Simplify...
2560
  	return -EINVAL;
adf9f1957   Frederic Weisbecker   tracing/ftrace: i...
2561
  }
af4617bdb   Steven Rostedt   tracing: add glob...
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
  static void set_tracer_flags(unsigned int mask, int enabled)
  {
  	/* do nothing if flag is already set */
  	if (!!(trace_flags & mask) == !!enabled)
  		return;
  
  	if (enabled)
  		trace_flags |= mask;
  	else
  		trace_flags &= ~mask;
e870e9a12   Li Zefan   tracing: Allow to...
2572
2573
2574
  
  	if (mask == TRACE_ITER_RECORD_CMD)
  		trace_event_enable_cmd_record(enabled);
750912fa3   David Sharp   tracing: Add an '...
2575
2576
2577
  
  	if (mask == TRACE_ITER_OVERWRITE)
  		ring_buffer_change_overwrite(global_trace.buffer, enabled);
af4617bdb   Steven Rostedt   tracing: add glob...
2578
  }
bc0c38d13   Steven Rostedt   ftrace: latency t...
2579
  static ssize_t
ee6bce522   Steven Rostedt   ftrace: rename it...
2580
  tracing_trace_options_write(struct file *filp, const char __user *ubuf,
bc0c38d13   Steven Rostedt   ftrace: latency t...
2581
2582
2583
  			size_t cnt, loff_t *ppos)
  {
  	char buf[64];
8d18eaaff   Li Zefan   tracing: Simplify...
2584
  	char *cmp;
bc0c38d13   Steven Rostedt   ftrace: latency t...
2585
  	int neg = 0;
adf9f1957   Frederic Weisbecker   tracing/ftrace: i...
2586
  	int ret;
bc0c38d13   Steven Rostedt   ftrace: latency t...
2587
  	int i;
cffae437c   Steven Rostedt   ftrace: simple cl...
2588
2589
  	if (cnt >= sizeof(buf))
  		return -EINVAL;
bc0c38d13   Steven Rostedt   ftrace: latency t...
2590
2591
2592
2593
2594
  
  	if (copy_from_user(&buf, ubuf, cnt))
  		return -EFAULT;
  
  	buf[cnt] = 0;
8d18eaaff   Li Zefan   tracing: Simplify...
2595
  	cmp = strstrip(buf);
bc0c38d13   Steven Rostedt   ftrace: latency t...
2596

8d18eaaff   Li Zefan   tracing: Simplify...
2597
  	if (strncmp(cmp, "no", 2) == 0) {
bc0c38d13   Steven Rostedt   ftrace: latency t...
2598
2599
2600
2601
2602
  		neg = 1;
  		cmp += 2;
  	}
  
  	for (i = 0; trace_options[i]; i++) {
8d18eaaff   Li Zefan   tracing: Simplify...
2603
  		if (strcmp(cmp, trace_options[i]) == 0) {
af4617bdb   Steven Rostedt   tracing: add glob...
2604
  			set_tracer_flags(1 << i, !neg);
bc0c38d13   Steven Rostedt   ftrace: latency t...
2605
2606
2607
  			break;
  		}
  	}
adf9f1957   Frederic Weisbecker   tracing/ftrace: i...
2608
2609
2610
  
  	/* If no option could be set, test the specific tracer options */
  	if (!trace_options[i]) {
d8e83d26b   Steven Rostedt   tracing: add prot...
2611
  		mutex_lock(&trace_types_lock);
adf9f1957   Frederic Weisbecker   tracing/ftrace: i...
2612
  		ret = set_tracer_option(current_trace, cmp, neg);
d8e83d26b   Steven Rostedt   tracing: add prot...
2613
  		mutex_unlock(&trace_types_lock);
adf9f1957   Frederic Weisbecker   tracing/ftrace: i...
2614
2615
2616
  		if (ret)
  			return ret;
  	}
bc0c38d13   Steven Rostedt   ftrace: latency t...
2617

cf8517cf9   Jiri Olsa   tracing: Update *...
2618
  	*ppos += cnt;
bc0c38d13   Steven Rostedt   ftrace: latency t...
2619
2620
2621
  
  	return cnt;
  }
fdb372ed4   Li Zefan   tracing: Use seq ...
2622
2623
2624
2625
2626
2627
  static int tracing_trace_options_open(struct inode *inode, struct file *file)
  {
  	if (tracing_disabled)
  		return -ENODEV;
  	return single_open(file, tracing_trace_options_show, NULL);
  }
5e2336a0d   Steven Rostedt   tracing: make all...
2628
  static const struct file_operations tracing_iter_fops = {
fdb372ed4   Li Zefan   tracing: Use seq ...
2629
2630
2631
2632
  	.open		= tracing_trace_options_open,
  	.read		= seq_read,
  	.llseek		= seq_lseek,
  	.release	= single_release,
ee6bce522   Steven Rostedt   ftrace: rename it...
2633
  	.write		= tracing_trace_options_write,
bc0c38d13   Steven Rostedt   ftrace: latency t...
2634
  };
7bd2f24c2   Ingo Molnar   ftrace: add README
2635
2636
2637
2638
  static const char readme_msg[] =
  	"tracing mini-HOWTO:
  
  "
156f5a780   GeunSik Lim   debugfs: Fix term...
2639
2640
2641
2642
2643
  	"# mount -t debugfs nodev /sys/kernel/debug
  
  "
  	"# cat /sys/kernel/debug/tracing/available_tracers
  "
bc2b6871c   Nikanth Karthikesan   Update /debug/tra...
2644
2645
2646
  	"wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop
  
  "
156f5a780   GeunSik Lim   debugfs: Fix term...
2647
2648
  	"# cat /sys/kernel/debug/tracing/current_tracer
  "
bc2b6871c   Nikanth Karthikesan   Update /debug/tra...
2649
2650
  	"nop
  "
156f5a780   GeunSik Lim   debugfs: Fix term...
2651
2652
2653
2654
  	"# echo sched_switch > /sys/kernel/debug/tracing/current_tracer
  "
  	"# cat /sys/kernel/debug/tracing/current_tracer
  "
7bd2f24c2   Ingo Molnar   ftrace: add README
2655
2656
  	"sched_switch
  "
156f5a780   GeunSik Lim   debugfs: Fix term...
2657
2658
  	"# cat /sys/kernel/debug/tracing/trace_options
  "
7bd2f24c2   Ingo Molnar   ftrace: add README
2659
2660
  	"noprint-parent nosym-offset nosym-addr noverbose
  "
156f5a780   GeunSik Lim   debugfs: Fix term...
2661
2662
  	"# echo print-parent > /sys/kernel/debug/tracing/trace_options
  "
9b5f8b31a   Geunsik Lim   ftrace: Fix READM...
2663
2664
  	"# echo 1 > /sys/kernel/debug/tracing/tracing_on
  "
156f5a780   GeunSik Lim   debugfs: Fix term...
2665
2666
  	"# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt
  "
9b5f8b31a   Geunsik Lim   ftrace: Fix READM...
2667
2668
  	"# echo 0 > /sys/kernel/debug/tracing/tracing_on
  "
7bd2f24c2   Ingo Molnar   ftrace: add README
2669
2670
2671
2672
2673
2674
2675
2676
2677
  ;
  
  static ssize_t
  tracing_readme_read(struct file *filp, char __user *ubuf,
  		       size_t cnt, loff_t *ppos)
  {
  	return simple_read_from_buffer(ubuf, cnt, ppos,
  					readme_msg, strlen(readme_msg));
  }
5e2336a0d   Steven Rostedt   tracing: make all...
2678
  static const struct file_operations tracing_readme_fops = {
c7078de1a   Ingo Molnar   ftrace: add traci...
2679
2680
  	.open		= tracing_open_generic,
  	.read		= tracing_readme_read,
b444786f1   Arnd Bergmann   tracing: Use gene...
2681
  	.llseek		= generic_file_llseek,
7bd2f24c2   Ingo Molnar   ftrace: add README
2682
  };
bc0c38d13   Steven Rostedt   ftrace: latency t...
2683
  static ssize_t
69abe6a5d   Avadh Patel   tracing: add save...
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
  tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
  				size_t cnt, loff_t *ppos)
  {
  	char *buf_comm;
  	char *file_buf;
  	char *buf;
  	int len = 0;
  	int pid;
  	int i;
  
  	file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
  	if (!file_buf)
  		return -ENOMEM;
  
  	buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
  	if (!buf_comm) {
  		kfree(file_buf);
  		return -ENOMEM;
  	}
  
  	buf = file_buf;
  
  	for (i = 0; i < SAVED_CMDLINES; i++) {
  		int r;
  
  		pid = map_cmdline_to_pid[i];
  		if (pid == -1 || pid == NO_CMDLINE_MAP)
  			continue;
  
  		trace_find_cmdline(pid, buf_comm);
  		r = sprintf(buf, "%d %s
  ", pid, buf_comm);
  		buf += r;
  		len += r;
  	}
  
  	len = simple_read_from_buffer(ubuf, cnt, ppos,
  				      file_buf, len);
  
  	kfree(file_buf);
  	kfree(buf_comm);
  
  	return len;
  }
  
  static const struct file_operations tracing_saved_cmdlines_fops = {
      .open       = tracing_open_generic,
      .read       = tracing_saved_cmdlines_read,
b444786f1   Arnd Bergmann   tracing: Use gene...
2732
      .llseek	= generic_file_llseek,
69abe6a5d   Avadh Patel   tracing: add save...
2733
2734
2735
  };
  
  static ssize_t
bc0c38d13   Steven Rostedt   ftrace: latency t...
2736
2737
2738
  tracing_ctrl_read(struct file *filp, char __user *ubuf,
  		  size_t cnt, loff_t *ppos)
  {
bc0c38d13   Steven Rostedt   ftrace: latency t...
2739
2740
  	char buf[64];
  	int r;
9036990d4   Steven Rostedt   ftrace: restructu...
2741
2742
  	r = sprintf(buf, "%u
  ", tracer_enabled);
4e3c3333f   Ingo Molnar   ftrace: fix time ...
2743
  	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d13   Steven Rostedt   ftrace: latency t...
2744
2745
2746
2747
2748
2749
2750
  }
  
  static ssize_t
  tracing_ctrl_write(struct file *filp, const char __user *ubuf,
  		   size_t cnt, loff_t *ppos)
  {
  	struct trace_array *tr = filp->private_data;
5e39841c4   Hannes Eder   tracing: fix spar...
2751
  	unsigned long val;
c6caeeb14   Steven Rostedt   ftrace: replace s...
2752
  	int ret;
bc0c38d13   Steven Rostedt   ftrace: latency t...
2753

22fe9b54d   Peter Huewe   tracing: Convert ...
2754
2755
  	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
  	if (ret)
c6caeeb14   Steven Rostedt   ftrace: replace s...
2756
  		return ret;
bc0c38d13   Steven Rostedt   ftrace: latency t...
2757
2758
2759
2760
  
  	val = !!val;
  
  	mutex_lock(&trace_types_lock);
9036990d4   Steven Rostedt   ftrace: restructu...
2761
  	if (tracer_enabled ^ val) {
6752ab4a9   Steven Rostedt   tracing: Deprecat...
2762
2763
2764
  
  		/* Only need to warn if this is used to change the state */
  		WARN_ONCE(1, "tracing_enabled is deprecated. Use tracing_on");
9036990d4   Steven Rostedt   ftrace: restructu...
2765
  		if (val) {
bc0c38d13   Steven Rostedt   ftrace: latency t...
2766
  			tracer_enabled = 1;
9036990d4   Steven Rostedt   ftrace: restructu...
2767
2768
2769
2770
  			if (current_trace->start)
  				current_trace->start(tr);
  			tracing_start();
  		} else {
bc0c38d13   Steven Rostedt   ftrace: latency t...
2771
  			tracer_enabled = 0;
9036990d4   Steven Rostedt   ftrace: restructu...
2772
2773
2774
2775
  			tracing_stop();
  			if (current_trace->stop)
  				current_trace->stop(tr);
  		}
bc0c38d13   Steven Rostedt   ftrace: latency t...
2776
2777
  	}
  	mutex_unlock(&trace_types_lock);
cf8517cf9   Jiri Olsa   tracing: Update *...
2778
  	*ppos += cnt;
bc0c38d13   Steven Rostedt   ftrace: latency t...
2779
2780
2781
2782
2783
2784
2785
2786
  
  	return cnt;
  }
  
  static ssize_t
  tracing_set_trace_read(struct file *filp, char __user *ubuf,
  		       size_t cnt, loff_t *ppos)
  {
ee6c2c1bd   Li Zefan   tracing: remove m...
2787
  	char buf[MAX_TRACER_SIZE+2];
bc0c38d13   Steven Rostedt   ftrace: latency t...
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
  	int r;
  
  	mutex_lock(&trace_types_lock);
  	if (current_trace)
  		r = sprintf(buf, "%s
  ", current_trace->name);
  	else
  		r = sprintf(buf, "
  ");
  	mutex_unlock(&trace_types_lock);
4bf39a941   Ingo Molnar   ftrace: cleanups
2798
  	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d13   Steven Rostedt   ftrace: latency t...
2799
  }
b6f11df26   Arnaldo Carvalho de Melo   trace: Call traci...
2800
2801
2802
2803
2804
  int tracer_init(struct tracer *t, struct trace_array *tr)
  {
  	tracing_reset_online_cpus(tr);
  	return t->init(tr);
  }
4f271a2a6   Vaibhav Nagarnaik   tracing: Add a pr...
2805
  static int __tracing_resize_ring_buffer(unsigned long size)
73c5162aa   Steven Rostedt   tracing: keep rin...
2806
2807
2808
2809
2810
  {
  	int ret;
  
  	/*
  	 * If kernel or user changes the size of the ring buffer
a123c52b4   Steven Rostedt   tracing: fix comm...
2811
2812
  	 * we use the size that was given, and we can forget about
  	 * expanding it later.
73c5162aa   Steven Rostedt   tracing: keep rin...
2813
2814
2815
2816
2817
2818
  	 */
  	ring_buffer_expanded = 1;
  
  	ret = ring_buffer_resize(global_trace.buffer, size);
  	if (ret < 0)
  		return ret;
ef710e100   KOSAKI Motohiro   tracing: Shrink m...
2819
2820
  	if (!current_trace->use_max_tr)
  		goto out;
73c5162aa   Steven Rostedt   tracing: keep rin...
2821
2822
2823
2824
2825
2826
2827
  	ret = ring_buffer_resize(max_tr.buffer, size);
  	if (ret < 0) {
  		int r;
  
  		r = ring_buffer_resize(global_trace.buffer,
  				       global_trace.entries);
  		if (r < 0) {
a123c52b4   Steven Rostedt   tracing: fix comm...
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
  			/*
  			 * AARGH! We are left with different
  			 * size max buffer!!!!
  			 * The max buffer is our "snapshot" buffer.
  			 * When a tracer needs a snapshot (one of the
  			 * latency tracers), it swaps the max buffer
  			 * with the saved snap shot. We succeeded to
  			 * update the size of the main buffer, but failed to
  			 * update the size of the max buffer. But when we tried
  			 * to reset the main buffer to the original size, we
  			 * failed there too. This is very unlikely to
  			 * happen, but if it does, warn and kill all
  			 * tracing.
  			 */
73c5162aa   Steven Rostedt   tracing: keep rin...
2842
2843
2844
2845
2846
  			WARN_ON(1);
  			tracing_disabled = 1;
  		}
  		return ret;
  	}
ef710e100   KOSAKI Motohiro   tracing: Shrink m...
2847
2848
  	max_tr.entries = size;
   out:
73c5162aa   Steven Rostedt   tracing: keep rin...
2849
2850
2851
2852
  	global_trace.entries = size;
  
  	return ret;
  }
4f271a2a6   Vaibhav Nagarnaik   tracing: Add a pr...
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
  static ssize_t tracing_resize_ring_buffer(unsigned long size)
  {
  	int cpu, ret = size;
  
  	mutex_lock(&trace_types_lock);
  
  	tracing_stop();
  
  	/* disable all cpu buffers */
  	for_each_tracing_cpu(cpu) {
  		if (global_trace.data[cpu])
  			atomic_inc(&global_trace.data[cpu]->disabled);
  		if (max_tr.data[cpu])
  			atomic_inc(&max_tr.data[cpu]->disabled);
  	}
  
  	if (size != global_trace.entries)
  		ret = __tracing_resize_ring_buffer(size);
  
  	if (ret < 0)
  		ret = -ENOMEM;
  
  	for_each_tracing_cpu(cpu) {
  		if (global_trace.data[cpu])
  			atomic_dec(&global_trace.data[cpu]->disabled);
  		if (max_tr.data[cpu])
  			atomic_dec(&max_tr.data[cpu]->disabled);
  	}
  
  	tracing_start();
  	mutex_unlock(&trace_types_lock);
  
  	return ret;
  }
ef710e100   KOSAKI Motohiro   tracing: Shrink m...
2887

1852fcce1   Steven Rostedt   tracing: expand t...
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
  /**
   * tracing_update_buffers - used by tracing facility to expand ring buffers
   *
   * To save on memory when the tracing is never used on a system with it
   * configured in. The ring buffers are set to a minimum size. But once
   * a user starts to use the tracing facility, then they need to grow
   * to their default size.
   *
   * This function is to be called when a tracer is about to be used.
   */
  int tracing_update_buffers(void)
  {
  	int ret = 0;
1027fcb20   Steven Rostedt   tracing: protect ...
2901
  	mutex_lock(&trace_types_lock);
1852fcce1   Steven Rostedt   tracing: expand t...
2902
  	if (!ring_buffer_expanded)
4f271a2a6   Vaibhav Nagarnaik   tracing: Add a pr...
2903
  		ret = __tracing_resize_ring_buffer(trace_buf_size);
1027fcb20   Steven Rostedt   tracing: protect ...
2904
  	mutex_unlock(&trace_types_lock);
1852fcce1   Steven Rostedt   tracing: expand t...
2905
2906
2907
  
  	return ret;
  }
577b785f5   Steven Rostedt   tracing: add trac...
2908
2909
2910
2911
2912
2913
2914
  struct trace_option_dentry;
  
  static struct trace_option_dentry *
  create_trace_option_files(struct tracer *tracer);
  
  static void
  destroy_trace_option_files(struct trace_option_dentry *topts);
b2821ae68   Steven Rostedt   trace: fix defaul...
2915
  static int tracing_set_tracer(const char *buf)
bc0c38d13   Steven Rostedt   ftrace: latency t...
2916
  {
577b785f5   Steven Rostedt   tracing: add trac...
2917
  	static struct trace_option_dentry *topts;
bc0c38d13   Steven Rostedt   ftrace: latency t...
2918
2919
  	struct trace_array *tr = &global_trace;
  	struct tracer *t;
d9e540762   Peter Zijlstra   ftrace: ftrace_du...
2920
  	int ret = 0;
bc0c38d13   Steven Rostedt   ftrace: latency t...
2921

1027fcb20   Steven Rostedt   tracing: protect ...
2922
  	mutex_lock(&trace_types_lock);
73c5162aa   Steven Rostedt   tracing: keep rin...
2923
  	if (!ring_buffer_expanded) {
4f271a2a6   Vaibhav Nagarnaik   tracing: Add a pr...
2924
  		ret = __tracing_resize_ring_buffer(trace_buf_size);
73c5162aa   Steven Rostedt   tracing: keep rin...
2925
  		if (ret < 0)
59f586db9   Frederic Weisbecker   tracing/core: fix...
2926
  			goto out;
73c5162aa   Steven Rostedt   tracing: keep rin...
2927
2928
  		ret = 0;
  	}
bc0c38d13   Steven Rostedt   ftrace: latency t...
2929
2930
2931
2932
  	for (t = trace_types; t; t = t->next) {
  		if (strcmp(t->name, buf) == 0)
  			break;
  	}
c2931e05e   Frederic Weisbecker   ftrace: return an...
2933
2934
2935
2936
2937
  	if (!t) {
  		ret = -EINVAL;
  		goto out;
  	}
  	if (t == current_trace)
bc0c38d13   Steven Rostedt   ftrace: latency t...
2938
  		goto out;
9f029e83e   Steven Rostedt   ftrace: rename un...
2939
  	trace_branch_disable();
bc0c38d13   Steven Rostedt   ftrace: latency t...
2940
2941
  	if (current_trace && current_trace->reset)
  		current_trace->reset(tr);
ef710e100   KOSAKI Motohiro   tracing: Shrink m...
2942
2943
2944
2945
2946
2947
2948
2949
2950
  	if (current_trace && current_trace->use_max_tr) {
  		/*
  		 * We don't free the ring buffer. instead, resize it because
  		 * The max_tr ring buffer has some state (e.g. ring->clock) and
  		 * we want preserve it.
  		 */
  		ring_buffer_resize(max_tr.buffer, 1);
  		max_tr.entries = 1;
  	}
577b785f5   Steven Rostedt   tracing: add trac...
2951
  	destroy_trace_option_files(topts);
bc0c38d13   Steven Rostedt   ftrace: latency t...
2952
  	current_trace = t;
577b785f5   Steven Rostedt   tracing: add trac...
2953
2954
  
  	topts = create_trace_option_files(current_trace);
ef710e100   KOSAKI Motohiro   tracing: Shrink m...
2955
2956
2957
2958
2959
2960
  	if (current_trace->use_max_tr) {
  		ret = ring_buffer_resize(max_tr.buffer, global_trace.entries);
  		if (ret < 0)
  			goto out;
  		max_tr.entries = global_trace.entries;
  	}
577b785f5   Steven Rostedt   tracing: add trac...
2961

1c80025a4   Frederic Weisbecker   tracing/ftrace: c...
2962
  	if (t->init) {
b6f11df26   Arnaldo Carvalho de Melo   trace: Call traci...
2963
  		ret = tracer_init(t, tr);
1c80025a4   Frederic Weisbecker   tracing/ftrace: c...
2964
2965
2966
  		if (ret)
  			goto out;
  	}
bc0c38d13   Steven Rostedt   ftrace: latency t...
2967

9f029e83e   Steven Rostedt   ftrace: rename un...
2968
  	trace_branch_enable(tr);
bc0c38d13   Steven Rostedt   ftrace: latency t...
2969
2970
   out:
  	mutex_unlock(&trace_types_lock);
d9e540762   Peter Zijlstra   ftrace: ftrace_du...
2971
2972
2973
2974
2975
2976
2977
  	return ret;
  }
  
  static ssize_t
  tracing_set_trace_write(struct file *filp, const char __user *ubuf,
  			size_t cnt, loff_t *ppos)
  {
ee6c2c1bd   Li Zefan   tracing: remove m...
2978
  	char buf[MAX_TRACER_SIZE+1];
d9e540762   Peter Zijlstra   ftrace: ftrace_du...
2979
2980
  	int i;
  	size_t ret;
e6e7a65aa   Frederic Weisbecker   tracing/ftrace: f...
2981
2982
2983
  	int err;
  
  	ret = cnt;
d9e540762   Peter Zijlstra   ftrace: ftrace_du...
2984

ee6c2c1bd   Li Zefan   tracing: remove m...
2985
2986
  	if (cnt > MAX_TRACER_SIZE)
  		cnt = MAX_TRACER_SIZE;
d9e540762   Peter Zijlstra   ftrace: ftrace_du...
2987
2988
2989
2990
2991
2992
2993
2994
2995
  
  	if (copy_from_user(&buf, ubuf, cnt))
  		return -EFAULT;
  
  	buf[cnt] = 0;
  
  	/* strip ending whitespace. */
  	for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
  		buf[i] = 0;
e6e7a65aa   Frederic Weisbecker   tracing/ftrace: f...
2996
2997
2998
  	err = tracing_set_tracer(buf);
  	if (err)
  		return err;
d9e540762   Peter Zijlstra   ftrace: ftrace_du...
2999

cf8517cf9   Jiri Olsa   tracing: Update *...
3000
  	*ppos += ret;
bc0c38d13   Steven Rostedt   ftrace: latency t...
3001

c2931e05e   Frederic Weisbecker   ftrace: return an...
3002
  	return ret;
bc0c38d13   Steven Rostedt   ftrace: latency t...
3003
3004
3005
3006
3007
3008
3009
3010
3011
  }
  
  static ssize_t
  tracing_max_lat_read(struct file *filp, char __user *ubuf,
  		     size_t cnt, loff_t *ppos)
  {
  	unsigned long *ptr = filp->private_data;
  	char buf[64];
  	int r;
cffae437c   Steven Rostedt   ftrace: simple cl...
3012
3013
  	r = snprintf(buf, sizeof(buf), "%ld
  ",
bc0c38d13   Steven Rostedt   ftrace: latency t...
3014
  		     *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
cffae437c   Steven Rostedt   ftrace: simple cl...
3015
3016
  	if (r > sizeof(buf))
  		r = sizeof(buf);
4bf39a941   Ingo Molnar   ftrace: cleanups
3017
  	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d13   Steven Rostedt   ftrace: latency t...
3018
3019
3020
3021
3022
3023
  }
  
  static ssize_t
  tracing_max_lat_write(struct file *filp, const char __user *ubuf,
  		      size_t cnt, loff_t *ppos)
  {
5e39841c4   Hannes Eder   tracing: fix spar...
3024
  	unsigned long *ptr = filp->private_data;
5e39841c4   Hannes Eder   tracing: fix spar...
3025
  	unsigned long val;
c6caeeb14   Steven Rostedt   ftrace: replace s...
3026
  	int ret;
bc0c38d13   Steven Rostedt   ftrace: latency t...
3027

22fe9b54d   Peter Huewe   tracing: Convert ...
3028
3029
  	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
  	if (ret)
c6caeeb14   Steven Rostedt   ftrace: replace s...
3030
  		return ret;
bc0c38d13   Steven Rostedt   ftrace: latency t...
3031
3032
3033
3034
3035
  
  	*ptr = val * 1000;
  
  	return cnt;
  }
b3806b431   Steven Rostedt   ftrace: user run ...
3036
3037
  static int tracing_open_pipe(struct inode *inode, struct file *filp)
  {
b04cc6b1f   Frederic Weisbecker   tracing/core: int...
3038
  	long cpu_file = (long) inode->i_private;
b3806b431   Steven Rostedt   ftrace: user run ...
3039
  	struct trace_iterator *iter;
b04cc6b1f   Frederic Weisbecker   tracing/core: int...
3040
  	int ret = 0;
b3806b431   Steven Rostedt   ftrace: user run ...
3041
3042
3043
  
  	if (tracing_disabled)
  		return -ENODEV;
b04cc6b1f   Frederic Weisbecker   tracing/core: int...
3044
  	mutex_lock(&trace_types_lock);
b3806b431   Steven Rostedt   ftrace: user run ...
3045
3046
  	/* create a buffer to store the information to pass to userspace */
  	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
b04cc6b1f   Frederic Weisbecker   tracing/core: int...
3047
3048
3049
3050
  	if (!iter) {
  		ret = -ENOMEM;
  		goto out;
  	}
b3806b431   Steven Rostedt   ftrace: user run ...
3051

d7350c3f4   Frederic Weisbecker   tracing/core: mak...
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
  	/*
  	 * We make a copy of the current tracer to avoid concurrent
  	 * changes on it while we are reading.
  	 */
  	iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
  	if (!iter->trace) {
  		ret = -ENOMEM;
  		goto fail;
  	}
  	if (current_trace)
  		*iter->trace = *current_trace;
4462344ee   Rusty Russell   cpumask: convert ...
3063
  	if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
b04cc6b1f   Frederic Weisbecker   tracing/core: int...
3064
  		ret = -ENOMEM;
d7350c3f4   Frederic Weisbecker   tracing/core: mak...
3065
  		goto fail;
4462344ee   Rusty Russell   cpumask: convert ...
3066
  	}
a309720c8   Steven Rostedt   ftrace: display s...
3067
  	/* trace pipe does not show start of buffer */
4462344ee   Rusty Russell   cpumask: convert ...
3068
  	cpumask_setall(iter->started);
a309720c8   Steven Rostedt   ftrace: display s...
3069

112f38a7e   Steven Rostedt   tracing: make tra...
3070
3071
  	if (trace_flags & TRACE_ITER_LATENCY_FMT)
  		iter->iter_flags |= TRACE_FILE_LAT_FMT;
b04cc6b1f   Frederic Weisbecker   tracing/core: int...
3072
  	iter->cpu_file = cpu_file;
b3806b431   Steven Rostedt   ftrace: user run ...
3073
  	iter->tr = &global_trace;
d7350c3f4   Frederic Weisbecker   tracing/core: mak...
3074
  	mutex_init(&iter->mutex);
b3806b431   Steven Rostedt   ftrace: user run ...
3075
  	filp->private_data = iter;
107bad8be   Steven Rostedt   ftrace: add trace...
3076
3077
  	if (iter->trace->pipe_open)
  		iter->trace->pipe_open(iter);
107bad8be   Steven Rostedt   ftrace: add trace...
3078

b444786f1   Arnd Bergmann   tracing: Use gene...
3079
  	nonseekable_open(inode, filp);
b04cc6b1f   Frederic Weisbecker   tracing/core: int...
3080
3081
3082
  out:
  	mutex_unlock(&trace_types_lock);
  	return ret;
d7350c3f4   Frederic Weisbecker   tracing/core: mak...
3083
3084
3085
3086
3087
3088
  
  fail:
  	kfree(iter->trace);
  	kfree(iter);
  	mutex_unlock(&trace_types_lock);
  	return ret;
b3806b431   Steven Rostedt   ftrace: user run ...
3089
3090
3091
3092
3093
  }
  
  static int tracing_release_pipe(struct inode *inode, struct file *file)
  {
  	struct trace_iterator *iter = file->private_data;
b04cc6b1f   Frederic Weisbecker   tracing/core: int...
3094
  	mutex_lock(&trace_types_lock);
29bf4a5e3   Steven Rostedt   tracing: Only cal...
3095
  	if (iter->trace->pipe_close)
c521efd17   Steven Rostedt   tracing: Add pipe...
3096
  		iter->trace->pipe_close(iter);
b04cc6b1f   Frederic Weisbecker   tracing/core: int...
3097
  	mutex_unlock(&trace_types_lock);
4462344ee   Rusty Russell   cpumask: convert ...
3098
  	free_cpumask_var(iter->started);
d7350c3f4   Frederic Weisbecker   tracing/core: mak...
3099
3100
  	mutex_destroy(&iter->mutex);
  	kfree(iter->trace);
b3806b431   Steven Rostedt   ftrace: user run ...
3101
  	kfree(iter);
b3806b431   Steven Rostedt   ftrace: user run ...
3102
3103
3104
  
  	return 0;
  }
2a2cc8f7c   Soeren Sandmann Pedersen   ftrace: allow the...
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
  static unsigned int
  tracing_poll_pipe(struct file *filp, poll_table *poll_table)
  {
  	struct trace_iterator *iter = filp->private_data;
  
  	if (trace_flags & TRACE_ITER_BLOCK) {
  		/*
  		 * Always select as readable when in blocking mode
  		 */
  		return POLLIN | POLLRDNORM;
afc2abc0a   Ingo Molnar   ftrace: cleanups
3115
  	} else {
2a2cc8f7c   Soeren Sandmann Pedersen   ftrace: allow the...
3116
3117
3118
3119
3120
3121
3122
3123
3124
  		if (!trace_empty(iter))
  			return POLLIN | POLLRDNORM;
  		poll_wait(filp, &trace_wait, poll_table);
  		if (!trace_empty(iter))
  			return POLLIN | POLLRDNORM;
  
  		return 0;
  	}
  }
6eaaa5d57   Frederic Weisbecker   tracing/core: use...
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
  
  void default_wait_pipe(struct trace_iterator *iter)
  {
  	DEFINE_WAIT(wait);
  
  	prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
  
  	if (trace_empty(iter))
  		schedule();
  
  	finish_wait(&trace_wait, &wait);
  }
  
  /*
   * This is a make-shift waitqueue.
   * A tracer might use this callback on some rare cases:
   *
   *  1) the current tracer might hold the runqueue lock when it wakes up
   *     a reader, hence a deadlock (sched, function, and function graph tracers)
   *  2) the function tracers, trace all functions, we don't want
   *     the overhead of calling wake_up and friends
   *     (and tracing them too)
   *
   *     Anyway, this is really very primitive wakeup.
   */
  void poll_wait_pipe(struct trace_iterator *iter)
  {
  	set_current_state(TASK_INTERRUPTIBLE);
  	/* sleep for 100 msecs, and try again. */
  	schedule_timeout(HZ / 10);
  }
ff98781ba   Eduard - Gabriel Munteanu   tracing: Move pip...
3156
3157
  /* Must be called with trace_types_lock mutex held. */
  static int tracing_wait_pipe(struct file *filp)
b3806b431   Steven Rostedt   ftrace: user run ...
3158
3159
  {
  	struct trace_iterator *iter = filp->private_data;
b3806b431   Steven Rostedt   ftrace: user run ...
3160

b3806b431   Steven Rostedt   ftrace: user run ...
3161
  	while (trace_empty(iter)) {
2dc8f0957   Steven Rostedt   ftrace: trace_pip...
3162

107bad8be   Steven Rostedt   ftrace: add trace...
3163
  		if ((filp->f_flags & O_NONBLOCK)) {
ff98781ba   Eduard - Gabriel Munteanu   tracing: Move pip...
3164
  			return -EAGAIN;
107bad8be   Steven Rostedt   ftrace: add trace...
3165
  		}
2dc8f0957   Steven Rostedt   ftrace: trace_pip...
3166

d7350c3f4   Frederic Weisbecker   tracing/core: mak...
3167
  		mutex_unlock(&iter->mutex);
107bad8be   Steven Rostedt   ftrace: add trace...
3168

6eaaa5d57   Frederic Weisbecker   tracing/core: use...
3169
  		iter->trace->wait_pipe(iter);
b3806b431   Steven Rostedt   ftrace: user run ...
3170

d7350c3f4   Frederic Weisbecker   tracing/core: mak...
3171
  		mutex_lock(&iter->mutex);
107bad8be   Steven Rostedt   ftrace: add trace...
3172

6eaaa5d57   Frederic Weisbecker   tracing/core: use...
3173
  		if (signal_pending(current))
ff98781ba   Eduard - Gabriel Munteanu   tracing: Move pip...
3174
  			return -EINTR;
b3806b431   Steven Rostedt   ftrace: user run ...
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
  
  		/*
  		 * We block until we read something and tracing is disabled.
  		 * We still block if tracing is disabled, but we have never
  		 * read anything. This allows a user to cat this file, and
  		 * then enable tracing. But after we have read something,
  		 * we give an EOF when tracing is again disabled.
  		 *
  		 * iter->pos will be 0 if we haven't read anything.
  		 */
  		if (!tracer_enabled && iter->pos)
  			break;
b3806b431   Steven Rostedt   ftrace: user run ...
3187
  	}
ff98781ba   Eduard - Gabriel Munteanu   tracing: Move pip...
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
  	return 1;
  }
  
  /*
   * Consumer reader.
   */
  static ssize_t
  tracing_read_pipe(struct file *filp, char __user *ubuf,
  		  size_t cnt, loff_t *ppos)
  {
  	struct trace_iterator *iter = filp->private_data;
d7350c3f4   Frederic Weisbecker   tracing/core: mak...
3199
  	static struct tracer *old_tracer;
ff98781ba   Eduard - Gabriel Munteanu   tracing: Move pip...
3200
3201
3202
3203
3204
3205
  	ssize_t sret;
  
  	/* return any leftover data */
  	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
  	if (sret != -EBUSY)
  		return sret;
f9520750c   Steven Rostedt   tracing: make tra...
3206
  	trace_seq_init(&iter->seq);
ff98781ba   Eduard - Gabriel Munteanu   tracing: Move pip...
3207

d7350c3f4   Frederic Weisbecker   tracing/core: mak...
3208
  	/* copy the tracer to avoid using a global lock all around */
ff98781ba   Eduard - Gabriel Munteanu   tracing: Move pip...
3209
  	mutex_lock(&trace_types_lock);
d7350c3f4   Frederic Weisbecker   tracing/core: mak...
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
  	if (unlikely(old_tracer != current_trace && current_trace)) {
  		old_tracer = current_trace;
  		*iter->trace = *current_trace;
  	}
  	mutex_unlock(&trace_types_lock);
  
  	/*
  	 * Avoid more than one consumer on a single file descriptor
  	 * This is just a matter of traces coherency, the ring buffer itself
  	 * is protected.
  	 */
  	mutex_lock(&iter->mutex);
ff98781ba   Eduard - Gabriel Munteanu   tracing: Move pip...
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
  	if (iter->trace->read) {
  		sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
  		if (sret)
  			goto out;
  	}
  
  waitagain:
  	sret = tracing_wait_pipe(filp);
  	if (sret <= 0)
  		goto out;
b3806b431   Steven Rostedt   ftrace: user run ...
3232
  	/* stop when tracing is finished */
ff98781ba   Eduard - Gabriel Munteanu   tracing: Move pip...
3233
3234
  	if (trace_empty(iter)) {
  		sret = 0;
107bad8be   Steven Rostedt   ftrace: add trace...
3235
  		goto out;
ff98781ba   Eduard - Gabriel Munteanu   tracing: Move pip...
3236
  	}
b3806b431   Steven Rostedt   ftrace: user run ...
3237
3238
3239
  
  	if (cnt >= PAGE_SIZE)
  		cnt = PAGE_SIZE - 1;
53d0aa773   Steven Rostedt   ftrace: add logic...
3240
  	/* reset all but tr, trace, and overruns */
53d0aa773   Steven Rostedt   ftrace: add logic...
3241
3242
3243
  	memset(&iter->seq, 0,
  	       sizeof(struct trace_iterator) -
  	       offsetof(struct trace_iterator, seq));
4823ed7ea   Steven Rostedt   ftrace: fix setti...
3244
  	iter->pos = -1;
b3806b431   Steven Rostedt   ftrace: user run ...
3245

4f5359685   Lai Jiangshan   tracing: add trac...
3246
  	trace_event_read_lock();
7e53bd42d   Lai Jiangshan   tracing: Consolid...
3247
  	trace_access_lock(iter->cpu_file);
955b61e59   Jason Wessel   ftrace,kdb: Exten...
3248
  	while (trace_find_next_entry_inc(iter) != NULL) {
2c4f035f6   Frederic Weisbecker   tracing/ftrace: c...
3249
  		enum print_line_t ret;
088b1e427   Steven Rostedt   ftrace: pipe fixes
3250
  		int len = iter->seq.len;
f9896bf30   Ingo Molnar   ftrace: add raw o...
3251
  		ret = print_trace_line(iter);
2c4f035f6   Frederic Weisbecker   tracing/ftrace: c...
3252
  		if (ret == TRACE_TYPE_PARTIAL_LINE) {
088b1e427   Steven Rostedt   ftrace: pipe fixes
3253
3254
  			/* don't print partial lines */
  			iter->seq.len = len;
b3806b431   Steven Rostedt   ftrace: user run ...
3255
  			break;
088b1e427   Steven Rostedt   ftrace: pipe fixes
3256
  		}
b91facc36   Frederic Weisbecker   tracing/function-...
3257
3258
  		if (ret != TRACE_TYPE_NO_CONSUME)
  			trace_consume(iter);
b3806b431   Steven Rostedt   ftrace: user run ...
3259
3260
3261
  
  		if (iter->seq.len >= cnt)
  			break;
ee5e51f51   Jiri Olsa   tracing: Avoid so...
3262
3263
3264
3265
3266
3267
3268
3269
  
  		/*
  		 * Setting the full flag means we reached the trace_seq buffer
  		 * size and we should leave by partial output condition above.
  		 * One of the trace_seq_* functions is not used properly.
  		 */
  		WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
  			  iter->ent->type);
b3806b431   Steven Rostedt   ftrace: user run ...
3270
  	}
7e53bd42d   Lai Jiangshan   tracing: Consolid...
3271
  	trace_access_unlock(iter->cpu_file);
4f5359685   Lai Jiangshan   tracing: add trac...
3272
  	trace_event_read_unlock();
b3806b431   Steven Rostedt   ftrace: user run ...
3273

b3806b431   Steven Rostedt   ftrace: user run ...
3274
  	/* Now copy what we have to the user */
6c6c27969   Pekka Paalanen   ftrace: add readp...
3275
3276
  	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
  	if (iter->seq.readpos >= iter->seq.len)
f9520750c   Steven Rostedt   tracing: make tra...
3277
  		trace_seq_init(&iter->seq);
9ff4b9744   Pekka Paalanen   tracing/ftrace: f...
3278
3279
  
  	/*
25985edce   Lucas De Marchi   Fix common misspe...
3280
  	 * If there was nothing to send to user, in spite of consuming trace
9ff4b9744   Pekka Paalanen   tracing/ftrace: f...
3281
3282
  	 * entries, go back to wait for more entries.
  	 */
6c6c27969   Pekka Paalanen   ftrace: add readp...
3283
  	if (sret == -EBUSY)
9ff4b9744   Pekka Paalanen   tracing/ftrace: f...
3284
  		goto waitagain;
b3806b431   Steven Rostedt   ftrace: user run ...
3285

107bad8be   Steven Rostedt   ftrace: add trace...
3286
  out:
d7350c3f4   Frederic Weisbecker   tracing/core: mak...
3287
  	mutex_unlock(&iter->mutex);
107bad8be   Steven Rostedt   ftrace: add trace...
3288

6c6c27969   Pekka Paalanen   ftrace: add readp...
3289
  	return sret;
b3806b431   Steven Rostedt   ftrace: user run ...
3290
  }
3c56819b1   Eduard - Gabriel Munteanu   tracing: splice s...
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
  static void tracing_pipe_buf_release(struct pipe_inode_info *pipe,
  				     struct pipe_buffer *buf)
  {
  	__free_page(buf->page);
  }
  
  static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
  				     unsigned int idx)
  {
  	__free_page(spd->pages[idx]);
  }
28dfef8fe   Alexey Dobriyan   const: constify r...
3302
  static const struct pipe_buf_operations tracing_pipe_buf_ops = {
34cd4998d   Steven Rostedt   tracing: clean up...
3303
3304
3305
3306
3307
3308
3309
  	.can_merge		= 0,
  	.map			= generic_pipe_buf_map,
  	.unmap			= generic_pipe_buf_unmap,
  	.confirm		= generic_pipe_buf_confirm,
  	.release		= tracing_pipe_buf_release,
  	.steal			= generic_pipe_buf_steal,
  	.get			= generic_pipe_buf_get,
3c56819b1   Eduard - Gabriel Munteanu   tracing: splice s...
3310
  };
34cd4998d   Steven Rostedt   tracing: clean up...
3311
  static size_t
fa7c7f6e1   Frederic Weisbecker   tracing/core: rem...
3312
  tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
34cd4998d   Steven Rostedt   tracing: clean up...
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
  {
  	size_t count;
  	int ret;
  
  	/* Seq buffer is page-sized, exactly what we need. */
  	for (;;) {
  		count = iter->seq.len;
  		ret = print_trace_line(iter);
  		count = iter->seq.len - count;
  		if (rem < count) {
  			rem = 0;
  			iter->seq.len -= count;
  			break;
  		}
  		if (ret == TRACE_TYPE_PARTIAL_LINE) {
  			iter->seq.len -= count;
  			break;
  		}
74e7ff8c5   Lai Jiangshan   tracing: Fix miss...
3331
3332
  		if (ret != TRACE_TYPE_NO_CONSUME)
  			trace_consume(iter);
34cd4998d   Steven Rostedt   tracing: clean up...
3333
  		rem -= count;
955b61e59   Jason Wessel   ftrace,kdb: Exten...
3334
  		if (!trace_find_next_entry_inc(iter))	{
34cd4998d   Steven Rostedt   tracing: clean up...
3335
3336
3337
3338
3339
3340
3341
3342
  			rem = 0;
  			iter->ent = NULL;
  			break;
  		}
  	}
  
  	return rem;
  }
3c56819b1   Eduard - Gabriel Munteanu   tracing: splice s...
3343
3344
3345
3346
3347
3348
  static ssize_t tracing_splice_read_pipe(struct file *filp,
  					loff_t *ppos,
  					struct pipe_inode_info *pipe,
  					size_t len,
  					unsigned int flags)
  {
35f3d14db   Jens Axboe   pipe: add support...
3349
3350
  	struct page *pages_def[PIPE_DEF_BUFFERS];
  	struct partial_page partial_def[PIPE_DEF_BUFFERS];
3c56819b1   Eduard - Gabriel Munteanu   tracing: splice s...
3351
3352
  	struct trace_iterator *iter = filp->private_data;
  	struct splice_pipe_desc spd = {
35f3d14db   Jens Axboe   pipe: add support...
3353
3354
  		.pages		= pages_def,
  		.partial	= partial_def,
34cd4998d   Steven Rostedt   tracing: clean up...
3355
3356
3357
3358
  		.nr_pages	= 0, /* This gets updated below. */
  		.flags		= flags,
  		.ops		= &tracing_pipe_buf_ops,
  		.spd_release	= tracing_spd_release_pipe,
3c56819b1   Eduard - Gabriel Munteanu   tracing: splice s...
3359
  	};
d7350c3f4   Frederic Weisbecker   tracing/core: mak...
3360
  	static struct tracer *old_tracer;
3c56819b1   Eduard - Gabriel Munteanu   tracing: splice s...
3361
  	ssize_t ret;
34cd4998d   Steven Rostedt   tracing: clean up...
3362
  	size_t rem;
3c56819b1   Eduard - Gabriel Munteanu   tracing: splice s...
3363
  	unsigned int i;
35f3d14db   Jens Axboe   pipe: add support...
3364
3365
  	if (splice_grow_spd(pipe, &spd))
  		return -ENOMEM;
d7350c3f4   Frederic Weisbecker   tracing/core: mak...
3366
  	/* copy the tracer to avoid using a global lock all around */
3c56819b1   Eduard - Gabriel Munteanu   tracing: splice s...
3367
  	mutex_lock(&trace_types_lock);
d7350c3f4   Frederic Weisbecker   tracing/core: mak...
3368
3369
3370
3371
3372
3373
3374
  	if (unlikely(old_tracer != current_trace && current_trace)) {
  		old_tracer = current_trace;
  		*iter->trace = *current_trace;
  	}
  	mutex_unlock(&trace_types_lock);
  
  	mutex_lock(&iter->mutex);
3c56819b1   Eduard - Gabriel Munteanu   tracing: splice s...
3375
3376
3377
3378
3379
  
  	if (iter->trace->splice_read) {
  		ret = iter->trace->splice_read(iter, filp,
  					       ppos, pipe, len, flags);
  		if (ret)
34cd4998d   Steven Rostedt   tracing: clean up...
3380
  			goto out_err;
3c56819b1   Eduard - Gabriel Munteanu   tracing: splice s...
3381
3382
3383
3384
  	}
  
  	ret = tracing_wait_pipe(filp);
  	if (ret <= 0)
34cd4998d   Steven Rostedt   tracing: clean up...
3385
  		goto out_err;
3c56819b1   Eduard - Gabriel Munteanu   tracing: splice s...
3386

955b61e59   Jason Wessel   ftrace,kdb: Exten...
3387
  	if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3c56819b1   Eduard - Gabriel Munteanu   tracing: splice s...
3388
  		ret = -EFAULT;
34cd4998d   Steven Rostedt   tracing: clean up...
3389
  		goto out_err;
3c56819b1   Eduard - Gabriel Munteanu   tracing: splice s...
3390
  	}
4f5359685   Lai Jiangshan   tracing: add trac...
3391
  	trace_event_read_lock();
7e53bd42d   Lai Jiangshan   tracing: Consolid...
3392
  	trace_access_lock(iter->cpu_file);
4f5359685   Lai Jiangshan   tracing: add trac...
3393

3c56819b1   Eduard - Gabriel Munteanu   tracing: splice s...
3394
  	/* Fill as many pages as possible. */
35f3d14db   Jens Axboe   pipe: add support...
3395
3396
3397
  	for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
  		spd.pages[i] = alloc_page(GFP_KERNEL);
  		if (!spd.pages[i])
34cd4998d   Steven Rostedt   tracing: clean up...
3398
  			break;
3c56819b1   Eduard - Gabriel Munteanu   tracing: splice s...
3399

fa7c7f6e1   Frederic Weisbecker   tracing/core: rem...
3400
  		rem = tracing_fill_pipe_page(rem, iter);
3c56819b1   Eduard - Gabriel Munteanu   tracing: splice s...
3401
3402
3403
  
  		/* Copy the data into the page, so we can start over. */
  		ret = trace_seq_to_buffer(&iter->seq,
35f3d14db   Jens Axboe   pipe: add support...
3404
  					  page_address(spd.pages[i]),
3c56819b1   Eduard - Gabriel Munteanu   tracing: splice s...
3405
3406
  					  iter->seq.len);
  		if (ret < 0) {
35f3d14db   Jens Axboe   pipe: add support...
3407
  			__free_page(spd.pages[i]);
3c56819b1   Eduard - Gabriel Munteanu   tracing: splice s...
3408
3409
  			break;
  		}
35f3d14db   Jens Axboe   pipe: add support...
3410
3411
  		spd.partial[i].offset = 0;
  		spd.partial[i].len = iter->seq.len;
3c56819b1   Eduard - Gabriel Munteanu   tracing: splice s...
3412

f9520750c   Steven Rostedt   tracing: make tra...
3413
  		trace_seq_init(&iter->seq);
3c56819b1   Eduard - Gabriel Munteanu   tracing: splice s...
3414
  	}
7e53bd42d   Lai Jiangshan   tracing: Consolid...
3415
  	trace_access_unlock(iter->cpu_file);
4f5359685   Lai Jiangshan   tracing: add trac...
3416
  	trace_event_read_unlock();
d7350c3f4   Frederic Weisbecker   tracing/core: mak...
3417
  	mutex_unlock(&iter->mutex);
3c56819b1   Eduard - Gabriel Munteanu   tracing: splice s...
3418
3419
  
  	spd.nr_pages = i;
35f3d14db   Jens Axboe   pipe: add support...
3420
3421
3422
3423
  	ret = splice_to_pipe(pipe, &spd);
  out:
  	splice_shrink_spd(pipe, &spd);
  	return ret;
3c56819b1   Eduard - Gabriel Munteanu   tracing: splice s...
3424

34cd4998d   Steven Rostedt   tracing: clean up...
3425
  out_err:
d7350c3f4   Frederic Weisbecker   tracing/core: mak...
3426
  	mutex_unlock(&iter->mutex);
35f3d14db   Jens Axboe   pipe: add support...
3427
  	goto out;
3c56819b1   Eduard - Gabriel Munteanu   tracing: splice s...
3428
  }
a98a3c3fd   Steven Rostedt   ftrace: trace_ent...
3429
3430
3431
3432
3433
  static ssize_t
  tracing_entries_read(struct file *filp, char __user *ubuf,
  		     size_t cnt, loff_t *ppos)
  {
  	struct trace_array *tr = filp->private_data;
db526ca32   Steven Rostedt   tracing: show tha...
3434
  	char buf[96];
a98a3c3fd   Steven Rostedt   ftrace: trace_ent...
3435
  	int r;
db526ca32   Steven Rostedt   tracing: show tha...
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
  	mutex_lock(&trace_types_lock);
  	if (!ring_buffer_expanded)
  		r = sprintf(buf, "%lu (expanded: %lu)
  ",
  			    tr->entries >> 10,
  			    trace_buf_size >> 10);
  	else
  		r = sprintf(buf, "%lu
  ", tr->entries >> 10);
  	mutex_unlock(&trace_types_lock);
a98a3c3fd   Steven Rostedt   ftrace: trace_ent...
3446
3447
3448
3449
3450
3451
3452
3453
  	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  }
  
  static ssize_t
  tracing_entries_write(struct file *filp, const char __user *ubuf,
  		      size_t cnt, loff_t *ppos)
  {
  	unsigned long val;
4f271a2a6   Vaibhav Nagarnaik   tracing: Add a pr...
3454
  	int ret;
a98a3c3fd   Steven Rostedt   ftrace: trace_ent...
3455

22fe9b54d   Peter Huewe   tracing: Convert ...
3456
3457
  	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
  	if (ret)
c6caeeb14   Steven Rostedt   ftrace: replace s...
3458
  		return ret;
a98a3c3fd   Steven Rostedt   ftrace: trace_ent...
3459
3460
3461
3462
  
  	/* must have at least 1 entry */
  	if (!val)
  		return -EINVAL;
1696b2b0f   Steven Rostedt   ftrace: show buff...
3463
3464
  	/* value is in KB */
  	val <<= 10;
4f271a2a6   Vaibhav Nagarnaik   tracing: Add a pr...
3465
3466
3467
  	ret = tracing_resize_ring_buffer(val);
  	if (ret < 0)
  		return ret;
a98a3c3fd   Steven Rostedt   ftrace: trace_ent...
3468

cf8517cf9   Jiri Olsa   tracing: Update *...
3469
  	*ppos += cnt;
a98a3c3fd   Steven Rostedt   ftrace: trace_ent...
3470

4f271a2a6   Vaibhav Nagarnaik   tracing: Add a pr...
3471
3472
  	return cnt;
  }
bf5e6519b   Steven Rostedt   ftrace: disable t...
3473

4f271a2a6   Vaibhav Nagarnaik   tracing: Add a pr...
3474
  static ssize_t
f81ab074c   Vaibhav Nagarnaik   trace: Add a new ...
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
  tracing_total_entries_read(struct file *filp, char __user *ubuf,
  				size_t cnt, loff_t *ppos)
  {
  	struct trace_array *tr = filp->private_data;
  	char buf[64];
  	int r, cpu;
  	unsigned long size = 0, expanded_size = 0;
  
  	mutex_lock(&trace_types_lock);
  	for_each_tracing_cpu(cpu) {
  		size += tr->entries >> 10;
  		if (!ring_buffer_expanded)
  			expanded_size += trace_buf_size >> 10;
  	}
  	if (ring_buffer_expanded)
  		r = sprintf(buf, "%lu
  ", size);
  	else
  		r = sprintf(buf, "%lu (expanded: %lu)
  ", size, expanded_size);
  	mutex_unlock(&trace_types_lock);
  
  	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  }
  
  static ssize_t
4f271a2a6   Vaibhav Nagarnaik   tracing: Add a pr...
3501
3502
3503
3504
3505
3506
3507
3508
3509
  tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
  			  size_t cnt, loff_t *ppos)
  {
  	/*
  	 * There is no need to read what the user has written, this function
  	 * is just to make sure that there is no error when "echo" is used
  	 */
  
  	*ppos += cnt;
a98a3c3fd   Steven Rostedt   ftrace: trace_ent...
3510
3511
3512
  
  	return cnt;
  }
4f271a2a6   Vaibhav Nagarnaik   tracing: Add a pr...
3513
3514
3515
  static int
  tracing_free_buffer_release(struct inode *inode, struct file *filp)
  {
cf30cf67d   Steven Rostedt   tracing: Add disa...
3516
3517
3518
  	/* disable tracing ? */
  	if (trace_flags & TRACE_ITER_STOP_ON_FREE)
  		tracing_off();
4f271a2a6   Vaibhav Nagarnaik   tracing: Add a pr...
3519
3520
3521
3522
3523
  	/* resize the ring buffer to 0 */
  	tracing_resize_ring_buffer(0);
  
  	return 0;
  }
5bf9a1ee3   Pekka Paalanen   ftrace: inject ma...
3524
3525
3526
3527
  static ssize_t
  tracing_mark_write(struct file *filp, const char __user *ubuf,
  					size_t cnt, loff_t *fpos)
  {
d696b58ca   Steven Rostedt   tracing: Do not a...
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
  	unsigned long addr = (unsigned long)ubuf;
  	struct ring_buffer_event *event;
  	struct ring_buffer *buffer;
  	struct print_entry *entry;
  	unsigned long irq_flags;
  	struct page *pages[2];
  	int nr_pages = 1;
  	ssize_t written;
  	void *page1;
  	void *page2;
  	int offset;
  	int size;
  	int len;
  	int ret;
5bf9a1ee3   Pekka Paalanen   ftrace: inject ma...
3542

c76f06945   Steven Rostedt   ftrace: remove tr...
3543
  	if (tracing_disabled)
5bf9a1ee3   Pekka Paalanen   ftrace: inject ma...
3544
3545
3546
3547
  		return -EINVAL;
  
  	if (cnt > TRACE_BUF_SIZE)
  		cnt = TRACE_BUF_SIZE;
d696b58ca   Steven Rostedt   tracing: Do not a...
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
  	/*
  	 * Userspace is injecting traces into the kernel trace buffer.
  	 * We want to be as non intrusive as possible.
  	 * To do so, we do not want to allocate any special buffers
  	 * or take any locks, but instead write the userspace data
  	 * straight into the ring buffer.
  	 *
  	 * First we need to pin the userspace buffer into memory,
  	 * which, most likely it is, because it just referenced it.
  	 * But there's no guarantee that it is. By using get_user_pages_fast()
  	 * and kmap_atomic/kunmap_atomic() we can get access to the
  	 * pages directly. We then write the data directly into the
  	 * ring buffer.
  	 */
  	BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5bf9a1ee3   Pekka Paalanen   ftrace: inject ma...
3563

d696b58ca   Steven Rostedt   tracing: Do not a...
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
  	/* check if we cross pages */
  	if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
  		nr_pages = 2;
  
  	offset = addr & (PAGE_SIZE - 1);
  	addr &= PAGE_MASK;
  
  	ret = get_user_pages_fast(addr, nr_pages, 0, pages);
  	if (ret < nr_pages) {
  		while (--ret >= 0)
  			put_page(pages[ret]);
  		written = -EFAULT;
  		goto out;
5bf9a1ee3   Pekka Paalanen   ftrace: inject ma...
3577
  	}
d696b58ca   Steven Rostedt   tracing: Do not a...
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
  
  	page1 = kmap_atomic(pages[0]);
  	if (nr_pages == 2)
  		page2 = kmap_atomic(pages[1]);
  
  	local_save_flags(irq_flags);
  	size = sizeof(*entry) + cnt + 2; /* possible 
   added */
  	buffer = global_trace.buffer;
  	event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
  					  irq_flags, preempt_count());
  	if (!event) {
  		/* Ring buffer disabled, return as if not open for write */
  		written = -EBADF;
  		goto out_unlock;
5bf9a1ee3   Pekka Paalanen   ftrace: inject ma...
3593
  	}
d696b58ca   Steven Rostedt   tracing: Do not a...
3594
3595
3596
3597
3598
3599
3600
3601
  
  	entry = ring_buffer_event_data(event);
  	entry->ip = _THIS_IP_;
  
  	if (nr_pages == 2) {
  		len = PAGE_SIZE - offset;
  		memcpy(&entry->buf, page1 + offset, len);
  		memcpy(&entry->buf[len], page2, cnt - len);
c13d2f7c3   Carsten Emde   tracing: Fix trac...
3602
  	} else
d696b58ca   Steven Rostedt   tracing: Do not a...
3603
  		memcpy(&entry->buf, page1 + offset, cnt);
5bf9a1ee3   Pekka Paalanen   ftrace: inject ma...
3604

d696b58ca   Steven Rostedt   tracing: Do not a...
3605
3606
3607
3608
3609
3610
3611
3612
3613
  	if (entry->buf[cnt - 1] != '
  ') {
  		entry->buf[cnt] = '
  ';
  		entry->buf[cnt + 1] = '\0';
  	} else
  		entry->buf[cnt] = '\0';
  
  	ring_buffer_unlock_commit(buffer, event);
5bf9a1ee3   Pekka Paalanen   ftrace: inject ma...
3614

d696b58ca   Steven Rostedt   tracing: Do not a...
3615
  	written = cnt;
5bf9a1ee3   Pekka Paalanen   ftrace: inject ma...
3616

d696b58ca   Steven Rostedt   tracing: Do not a...
3617
  	*fpos += written;
1aa54bca6   Marcin Slusarz   tracing: Sanitize...
3618

d696b58ca   Steven Rostedt   tracing: Do not a...
3619
3620
3621
3622
3623
3624
3625
   out_unlock:
  	if (nr_pages == 2)
  		kunmap_atomic(page2);
  	kunmap_atomic(page1);
  	while (nr_pages > 0)
  		put_page(pages[--nr_pages]);
   out:
1aa54bca6   Marcin Slusarz   tracing: Sanitize...
3626
  	return written;
5bf9a1ee3   Pekka Paalanen   ftrace: inject ma...
3627
  }
13f16d209   Li Zefan   tracing: Use seq ...
3628
  static int tracing_clock_show(struct seq_file *m, void *v)
5079f3261   Zhaolei   ftrace: Move sett...
3629
  {
5079f3261   Zhaolei   ftrace: Move sett...
3630
3631
3632
  	int i;
  
  	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
13f16d209   Li Zefan   tracing: Use seq ...
3633
  		seq_printf(m,
5079f3261   Zhaolei   ftrace: Move sett...
3634
3635
3636
  			"%s%s%s%s", i ? " " : "",
  			i == trace_clock_id ? "[" : "", trace_clocks[i].name,
  			i == trace_clock_id ? "]" : "");
13f16d209   Li Zefan   tracing: Use seq ...
3637
3638
  	seq_putc(m, '
  ');
5079f3261   Zhaolei   ftrace: Move sett...
3639

13f16d209   Li Zefan   tracing: Use seq ...
3640
  	return 0;
5079f3261   Zhaolei   ftrace: Move sett...
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
  }
  
  static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
  				   size_t cnt, loff_t *fpos)
  {
  	char buf[64];
  	const char *clockstr;
  	int i;
  
  	if (cnt >= sizeof(buf))
  		return -EINVAL;
  
  	if (copy_from_user(&buf, ubuf, cnt))
  		return -EFAULT;
  
  	buf[cnt] = 0;
  
  	clockstr = strstrip(buf);
  
  	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
  		if (strcmp(trace_clocks[i].name, clockstr) == 0)
  			break;
  	}
  	if (i == ARRAY_SIZE(trace_clocks))
  		return -EINVAL;
  
  	trace_clock_id = i;
  
  	mutex_lock(&trace_types_lock);
  
  	ring_buffer_set_clock(global_trace.buffer, trace_clocks[i].func);
  	if (max_tr.buffer)
  		ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func);
  
  	mutex_unlock(&trace_types_lock);
  
  	*fpos += cnt;
  
  	return cnt;
  }
13f16d209   Li Zefan   tracing: Use seq ...
3681
3682
3683
3684
3685
3686
  static int tracing_clock_open(struct inode *inode, struct file *file)
  {
  	if (tracing_disabled)
  		return -ENODEV;
  	return single_open(file, tracing_clock_show, NULL);
  }
5e2336a0d   Steven Rostedt   tracing: make all...
3687
  static const struct file_operations tracing_max_lat_fops = {
4bf39a941   Ingo Molnar   ftrace: cleanups
3688
3689
3690
  	.open		= tracing_open_generic,
  	.read		= tracing_max_lat_read,
  	.write		= tracing_max_lat_write,
b444786f1   Arnd Bergmann   tracing: Use gene...
3691
  	.llseek		= generic_file_llseek,
bc0c38d13   Steven Rostedt   ftrace: latency t...
3692
  };
5e2336a0d   Steven Rostedt   tracing: make all...
3693
  static const struct file_operations tracing_ctrl_fops = {
4bf39a941   Ingo Molnar   ftrace: cleanups
3694
3695
3696
  	.open		= tracing_open_generic,
  	.read		= tracing_ctrl_read,
  	.write		= tracing_ctrl_write,
b444786f1   Arnd Bergmann   tracing: Use gene...
3697
  	.llseek		= generic_file_llseek,
bc0c38d13   Steven Rostedt   ftrace: latency t...
3698
  };
5e2336a0d   Steven Rostedt   tracing: make all...
3699
  static const struct file_operations set_tracer_fops = {
4bf39a941   Ingo Molnar   ftrace: cleanups
3700
3701
3702
  	.open		= tracing_open_generic,
  	.read		= tracing_set_trace_read,
  	.write		= tracing_set_trace_write,
b444786f1   Arnd Bergmann   tracing: Use gene...
3703
  	.llseek		= generic_file_llseek,
bc0c38d13   Steven Rostedt   ftrace: latency t...
3704
  };
5e2336a0d   Steven Rostedt   tracing: make all...
3705
  static const struct file_operations tracing_pipe_fops = {
4bf39a941   Ingo Molnar   ftrace: cleanups
3706
  	.open		= tracing_open_pipe,
2a2cc8f7c   Soeren Sandmann Pedersen   ftrace: allow the...
3707
  	.poll		= tracing_poll_pipe,
4bf39a941   Ingo Molnar   ftrace: cleanups
3708
  	.read		= tracing_read_pipe,
3c56819b1   Eduard - Gabriel Munteanu   tracing: splice s...
3709
  	.splice_read	= tracing_splice_read_pipe,
4bf39a941   Ingo Molnar   ftrace: cleanups
3710
  	.release	= tracing_release_pipe,
b444786f1   Arnd Bergmann   tracing: Use gene...
3711
  	.llseek		= no_llseek,
b3806b431   Steven Rostedt   ftrace: user run ...
3712
  };
5e2336a0d   Steven Rostedt   tracing: make all...
3713
  static const struct file_operations tracing_entries_fops = {
a98a3c3fd   Steven Rostedt   ftrace: trace_ent...
3714
3715
3716
  	.open		= tracing_open_generic,
  	.read		= tracing_entries_read,
  	.write		= tracing_entries_write,
b444786f1   Arnd Bergmann   tracing: Use gene...
3717
  	.llseek		= generic_file_llseek,
a98a3c3fd   Steven Rostedt   ftrace: trace_ent...
3718
  };
f81ab074c   Vaibhav Nagarnaik   trace: Add a new ...
3719
3720
3721
3722
3723
  static const struct file_operations tracing_total_entries_fops = {
  	.open		= tracing_open_generic,
  	.read		= tracing_total_entries_read,
  	.llseek		= generic_file_llseek,
  };
4f271a2a6   Vaibhav Nagarnaik   tracing: Add a pr...
3724
3725
3726
3727
  static const struct file_operations tracing_free_buffer_fops = {
  	.write		= tracing_free_buffer_write,
  	.release	= tracing_free_buffer_release,
  };
5e2336a0d   Steven Rostedt   tracing: make all...
3728
  static const struct file_operations tracing_mark_fops = {
43a15386c   Frédéric Weisbecker   tracing/ftrace: r...
3729
  	.open		= tracing_open_generic,
5bf9a1ee3   Pekka Paalanen   ftrace: inject ma...
3730
  	.write		= tracing_mark_write,
b444786f1   Arnd Bergmann   tracing: Use gene...
3731
  	.llseek		= generic_file_llseek,
5bf9a1ee3   Pekka Paalanen   ftrace: inject ma...
3732
  };
5079f3261   Zhaolei   ftrace: Move sett...
3733
  static const struct file_operations trace_clock_fops = {
13f16d209   Li Zefan   tracing: Use seq ...
3734
3735
3736
3737
  	.open		= tracing_clock_open,
  	.read		= seq_read,
  	.llseek		= seq_lseek,
  	.release	= single_release,
5079f3261   Zhaolei   ftrace: Move sett...
3738
3739
  	.write		= tracing_clock_write,
  };
2cadf9135   Steven Rostedt   tracing: add bina...
3740
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760
  struct ftrace_buffer_info {
  	struct trace_array	*tr;
  	void			*spare;
  	int			cpu;
  	unsigned int		read;
  };
  
  static int tracing_buffers_open(struct inode *inode, struct file *filp)
  {
  	int cpu = (int)(long)inode->i_private;
  	struct ftrace_buffer_info *info;
  
  	if (tracing_disabled)
  		return -ENODEV;
  
  	info = kzalloc(sizeof(*info), GFP_KERNEL);
  	if (!info)
  		return -ENOMEM;
  
  	info->tr	= &global_trace;
  	info->cpu	= cpu;
ddd538f3e   Lai Jiangshan   tracing: allocate...
3761
  	info->spare	= NULL;
2cadf9135   Steven Rostedt   tracing: add bina...
3762
3763
  	/* Force reading ring buffer for first read */
  	info->read	= (unsigned int)-1;
2cadf9135   Steven Rostedt   tracing: add bina...
3764
3765
  
  	filp->private_data = info;
d1e7e02f3   Lai Jiangshan   tracing: disable ...
3766
  	return nonseekable_open(inode, filp);
2cadf9135   Steven Rostedt   tracing: add bina...
3767
3768
3769
3770
3771
3772
3773
  }
  
  static ssize_t
  tracing_buffers_read(struct file *filp, char __user *ubuf,
  		     size_t count, loff_t *ppos)
  {
  	struct ftrace_buffer_info *info = filp->private_data;
2cadf9135   Steven Rostedt   tracing: add bina...
3774
3775
  	ssize_t ret;
  	size_t size;
2dc5d12b1   Steven Rostedt   tracing: do not r...
3776
3777
  	if (!count)
  		return 0;
ddd538f3e   Lai Jiangshan   tracing: allocate...
3778
  	if (!info->spare)
7ea590640   Vaibhav Nagarnaik   tracing: Use NUMA...
3779
  		info->spare = ring_buffer_alloc_read_page(info->tr->buffer, info->cpu);
ddd538f3e   Lai Jiangshan   tracing: allocate...
3780
3781
  	if (!info->spare)
  		return -ENOMEM;
2cadf9135   Steven Rostedt   tracing: add bina...
3782
3783
3784
  	/* Do we have previous read data to read? */
  	if (info->read < PAGE_SIZE)
  		goto read;
7e53bd42d   Lai Jiangshan   tracing: Consolid...
3785
  	trace_access_lock(info->cpu);
2cadf9135   Steven Rostedt   tracing: add bina...
3786
3787
3788
3789
  	ret = ring_buffer_read_page(info->tr->buffer,
  				    &info->spare,
  				    count,
  				    info->cpu, 0);
7e53bd42d   Lai Jiangshan   tracing: Consolid...
3790
  	trace_access_unlock(info->cpu);
2cadf9135   Steven Rostedt   tracing: add bina...
3791
3792
  	if (ret < 0)
  		return 0;
436fc2802   Steven Rostedt   tracing: Fix retu...
3793
  	info->read = 0;
2cadf9135   Steven Rostedt   tracing: add bina...
3794
3795
3796
3797
3798
3799
  read:
  	size = PAGE_SIZE - info->read;
  	if (size > count)
  		size = count;
  
  	ret = copy_to_user(ubuf, info->spare + info->read, size);
2dc5d12b1   Steven Rostedt   tracing: do not r...
3800
  	if (ret == size)
2cadf9135   Steven Rostedt   tracing: add bina...
3801
  		return -EFAULT;
2dc5d12b1   Steven Rostedt   tracing: do not r...
3802
  	size -= ret;
2cadf9135   Steven Rostedt   tracing: add bina...
3803
3804
3805
3806
3807
3808
3809
3810
3811
  	*ppos += size;
  	info->read += size;
  
  	return size;
  }
  
  static int tracing_buffers_release(struct inode *inode, struct file *file)
  {
  	struct ftrace_buffer_info *info = file->private_data;
ddd538f3e   Lai Jiangshan   tracing: allocate...
3812
3813
  	if (info->spare)
  		ring_buffer_free_read_page(info->tr->buffer, info->spare);
2cadf9135   Steven Rostedt   tracing: add bina...
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852
  	kfree(info);
  
  	return 0;
  }
  
  struct buffer_ref {
  	struct ring_buffer	*buffer;
  	void			*page;
  	int			ref;
  };
  
  static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
  				    struct pipe_buffer *buf)
  {
  	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
  
  	if (--ref->ref)
  		return;
  
  	ring_buffer_free_read_page(ref->buffer, ref->page);
  	kfree(ref);
  	buf->private = 0;
  }
  
  static int buffer_pipe_buf_steal(struct pipe_inode_info *pipe,
  				 struct pipe_buffer *buf)
  {
  	return 1;
  }
  
  static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
  				struct pipe_buffer *buf)
  {
  	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
  
  	ref->ref++;
  }
  
  /* Pipe buffer operations for a buffer. */
28dfef8fe   Alexey Dobriyan   const: constify r...
3853
  static const struct pipe_buf_operations buffer_pipe_buf_ops = {
2cadf9135   Steven Rostedt   tracing: add bina...
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
  	.can_merge		= 0,
  	.map			= generic_pipe_buf_map,
  	.unmap			= generic_pipe_buf_unmap,
  	.confirm		= generic_pipe_buf_confirm,
  	.release		= buffer_pipe_buf_release,
  	.steal			= buffer_pipe_buf_steal,
  	.get			= buffer_pipe_buf_get,
  };
  
  /*
   * Callback from splice_to_pipe(), if we need to release some pages
   * at the end of the spd in case we error'ed out in filling the pipe.
   */
  static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
  {
  	struct buffer_ref *ref =
  		(struct buffer_ref *)spd->partial[i].private;
  
  	if (--ref->ref)
  		return;
  
  	ring_buffer_free_read_page(ref->buffer, ref->page);
  	kfree(ref);
  	spd->partial[i].private = 0;
  }
  
  static ssize_t
  tracing_buffers_splice_read(struct file *file, loff_t *ppos,
  			    struct pipe_inode_info *pipe, size_t len,
  			    unsigned int flags)
  {
  	struct ftrace_buffer_info *info = file->private_data;
35f3d14db   Jens Axboe   pipe: add support...
3886
3887
  	struct partial_page partial_def[PIPE_DEF_BUFFERS];
  	struct page *pages_def[PIPE_DEF_BUFFERS];
2cadf9135   Steven Rostedt   tracing: add bina...
3888
  	struct splice_pipe_desc spd = {
35f3d14db   Jens Axboe   pipe: add support...
3889
3890
  		.pages		= pages_def,
  		.partial	= partial_def,
2cadf9135   Steven Rostedt   tracing: add bina...
3891
3892
3893
3894
3895
  		.flags		= flags,
  		.ops		= &buffer_pipe_buf_ops,
  		.spd_release	= buffer_spd_release,
  	};
  	struct buffer_ref *ref;
93459c6cb   Steven Rostedt   tracing: only add...
3896
  	int entries, size, i;
2cadf9135   Steven Rostedt   tracing: add bina...
3897
  	size_t ret;
35f3d14db   Jens Axboe   pipe: add support...
3898
3899
  	if (splice_grow_spd(pipe, &spd))
  		return -ENOMEM;
93cfb3c9f   Lai Jiangshan   tracing: fix spli...
3900
3901
3902
  	if (*ppos & (PAGE_SIZE - 1)) {
  		WARN_ONCE(1, "Ftrace: previous read must page-align
  ");
35f3d14db   Jens Axboe   pipe: add support...
3903
3904
  		ret = -EINVAL;
  		goto out;
93cfb3c9f   Lai Jiangshan   tracing: fix spli...
3905
3906
3907
3908
3909
  	}
  
  	if (len & (PAGE_SIZE - 1)) {
  		WARN_ONCE(1, "Ftrace: splice_read should page-align
  ");
35f3d14db   Jens Axboe   pipe: add support...
3910
3911
3912
3913
  		if (len < PAGE_SIZE) {
  			ret = -EINVAL;
  			goto out;
  		}
93cfb3c9f   Lai Jiangshan   tracing: fix spli...
3914
3915
  		len &= PAGE_MASK;
  	}
7e53bd42d   Lai Jiangshan   tracing: Consolid...
3916
  	trace_access_lock(info->cpu);
93459c6cb   Steven Rostedt   tracing: only add...
3917
  	entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
35f3d14db   Jens Axboe   pipe: add support...
3918
  	for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
2cadf9135   Steven Rostedt   tracing: add bina...
3919
3920
3921
3922
3923
3924
  		struct page *page;
  		int r;
  
  		ref = kzalloc(sizeof(*ref), GFP_KERNEL);
  		if (!ref)
  			break;
7267fa681   Steven Rostedt   tracing: fix ref ...
3925
  		ref->ref = 1;
2cadf9135   Steven Rostedt   tracing: add bina...
3926
  		ref->buffer = info->tr->buffer;
7ea590640   Vaibhav Nagarnaik   tracing: Use NUMA...
3927
  		ref->page = ring_buffer_alloc_read_page(ref->buffer, info->cpu);
2cadf9135   Steven Rostedt   tracing: add bina...
3928
3929
3930
3931
3932
3933
  		if (!ref->page) {
  			kfree(ref);
  			break;
  		}
  
  		r = ring_buffer_read_page(ref->buffer, &ref->page,
f2957f1f1   Steven Rostedt   tracing: have spl...
3934
  					  len, info->cpu, 1);
2cadf9135   Steven Rostedt   tracing: add bina...
3935
  		if (r < 0) {
7ea590640   Vaibhav Nagarnaik   tracing: Use NUMA...
3936
  			ring_buffer_free_read_page(ref->buffer, ref->page);
2cadf9135   Steven Rostedt   tracing: add bina...
3937
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952
3953
3954
3955
  			kfree(ref);
  			break;
  		}
  
  		/*
  		 * zero out any left over data, this is going to
  		 * user land.
  		 */
  		size = ring_buffer_page_len(ref->page);
  		if (size < PAGE_SIZE)
  			memset(ref->page + size, 0, PAGE_SIZE - size);
  
  		page = virt_to_page(ref->page);
  
  		spd.pages[i] = page;
  		spd.partial[i].len = PAGE_SIZE;
  		spd.partial[i].offset = 0;
  		spd.partial[i].private = (unsigned long)ref;
  		spd.nr_pages++;
93cfb3c9f   Lai Jiangshan   tracing: fix spli...
3956
  		*ppos += PAGE_SIZE;
93459c6cb   Steven Rostedt   tracing: only add...
3957
3958
  
  		entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
2cadf9135   Steven Rostedt   tracing: add bina...
3959
  	}
7e53bd42d   Lai Jiangshan   tracing: Consolid...
3960
  	trace_access_unlock(info->cpu);
2cadf9135   Steven Rostedt   tracing: add bina...
3961
3962
3963
3964
3965
3966
3967
3968
3969
  	spd.nr_pages = i;
  
  	/* did we read anything? */
  	if (!spd.nr_pages) {
  		if (flags & SPLICE_F_NONBLOCK)
  			ret = -EAGAIN;
  		else
  			ret = 0;
  		/* TODO: block */
35f3d14db   Jens Axboe   pipe: add support...
3970
  		goto out;
2cadf9135   Steven Rostedt   tracing: add bina...
3971
3972
3973
  	}
  
  	ret = splice_to_pipe(pipe, &spd);
35f3d14db   Jens Axboe   pipe: add support...
3974
3975
  	splice_shrink_spd(pipe, &spd);
  out:
2cadf9135   Steven Rostedt   tracing: add bina...
3976
3977
3978
3979
3980
3981
3982
3983
3984
3985
  	return ret;
  }
  
  static const struct file_operations tracing_buffers_fops = {
  	.open		= tracing_buffers_open,
  	.read		= tracing_buffers_read,
  	.release	= tracing_buffers_release,
  	.splice_read	= tracing_buffers_splice_read,
  	.llseek		= no_llseek,
  };
c8d771835   Steven Rostedt   tracing: export s...
3986
3987
3988
3989
3990
3991
3992
3993
  static ssize_t
  tracing_stats_read(struct file *filp, char __user *ubuf,
  		   size_t count, loff_t *ppos)
  {
  	unsigned long cpu = (unsigned long)filp->private_data;
  	struct trace_array *tr = &global_trace;
  	struct trace_seq *s;
  	unsigned long cnt;
c64e148a3   Vaibhav Nagarnaik   trace: Add ring b...
3994
3995
  	unsigned long long t;
  	unsigned long usec_rem;
c8d771835   Steven Rostedt   tracing: export s...
3996

e4f2d10f4   Li Zefan   tracing: replace ...
3997
  	s = kmalloc(sizeof(*s), GFP_KERNEL);
c8d771835   Steven Rostedt   tracing: export s...
3998
  	if (!s)
a646365cc   Roel Kluin   tracing: Fix retu...
3999
  		return -ENOMEM;
c8d771835   Steven Rostedt   tracing: export s...
4000
4001
4002
4003
4004
4005
4006
4007
4008
4009
4010
4011
4012
4013
  
  	trace_seq_init(s);
  
  	cnt = ring_buffer_entries_cpu(tr->buffer, cpu);
  	trace_seq_printf(s, "entries: %ld
  ", cnt);
  
  	cnt = ring_buffer_overrun_cpu(tr->buffer, cpu);
  	trace_seq_printf(s, "overrun: %ld
  ", cnt);
  
  	cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu);
  	trace_seq_printf(s, "commit overrun: %ld
  ", cnt);
c64e148a3   Vaibhav Nagarnaik   trace: Add ring b...
4014
4015
4016
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026
  	cnt = ring_buffer_bytes_cpu(tr->buffer, cpu);
  	trace_seq_printf(s, "bytes: %ld
  ", cnt);
  
  	t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu));
  	usec_rem = do_div(t, USEC_PER_SEC);
  	trace_seq_printf(s, "oldest event ts: %5llu.%06lu
  ", t, usec_rem);
  
  	t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu));
  	usec_rem = do_div(t, USEC_PER_SEC);
  	trace_seq_printf(s, "now ts: %5llu.%06lu
  ", t, usec_rem);
c8d771835   Steven Rostedt   tracing: export s...
4027
4028
4029
4030
4031
4032
4033
4034
4035
4036
  	count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
  
  	kfree(s);
  
  	return count;
  }
  
  static const struct file_operations tracing_stats_fops = {
  	.open		= tracing_open_generic,
  	.read		= tracing_stats_read,
b444786f1   Arnd Bergmann   tracing: Use gene...
4037
  	.llseek		= generic_file_llseek,
c8d771835   Steven Rostedt   tracing: export s...
4038
  };
bc0c38d13   Steven Rostedt   ftrace: latency t...
4039
  #ifdef CONFIG_DYNAMIC_FTRACE
b807c3d0f   Steven Rostedt   ftrace: nmi updat...
4040
4041
4042
4043
  int __weak ftrace_arch_read_dyn_info(char *buf, int size)
  {
  	return 0;
  }
bc0c38d13   Steven Rostedt   ftrace: latency t...
4044
  static ssize_t
b807c3d0f   Steven Rostedt   ftrace: nmi updat...
4045
  tracing_read_dyn_info(struct file *filp, char __user *ubuf,
bc0c38d13   Steven Rostedt   ftrace: latency t...
4046
4047
  		  size_t cnt, loff_t *ppos)
  {
a26a2a273   Steven Rostedt   ftrace: nmi safe ...
4048
4049
  	static char ftrace_dyn_info_buffer[1024];
  	static DEFINE_MUTEX(dyn_info_mutex);
bc0c38d13   Steven Rostedt   ftrace: latency t...
4050
  	unsigned long *p = filp->private_data;
b807c3d0f   Steven Rostedt   ftrace: nmi updat...
4051
  	char *buf = ftrace_dyn_info_buffer;
a26a2a273   Steven Rostedt   ftrace: nmi safe ...
4052
  	int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
bc0c38d13   Steven Rostedt   ftrace: latency t...
4053
  	int r;
b807c3d0f   Steven Rostedt   ftrace: nmi updat...
4054
4055
  	mutex_lock(&dyn_info_mutex);
  	r = sprintf(buf, "%ld ", *p);
4bf39a941   Ingo Molnar   ftrace: cleanups
4056

a26a2a273   Steven Rostedt   ftrace: nmi safe ...
4057
  	r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
b807c3d0f   Steven Rostedt   ftrace: nmi updat...
4058
4059
4060
4061
4062
4063
4064
4065
  	buf[r++] = '
  ';
  
  	r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  
  	mutex_unlock(&dyn_info_mutex);
  
  	return r;
bc0c38d13   Steven Rostedt   ftrace: latency t...
4066
  }
5e2336a0d   Steven Rostedt   tracing: make all...
4067
  static const struct file_operations tracing_dyn_info_fops = {
4bf39a941   Ingo Molnar   ftrace: cleanups
4068
  	.open		= tracing_open_generic,
b807c3d0f   Steven Rostedt   ftrace: nmi updat...
4069
  	.read		= tracing_read_dyn_info,
b444786f1   Arnd Bergmann   tracing: Use gene...
4070
  	.llseek		= generic_file_llseek,
bc0c38d13   Steven Rostedt   ftrace: latency t...
4071
4072
4073
4074
4075
4076
4077
4078
4079
4080
4081
  };
  #endif
  
  static struct dentry *d_tracer;
  
  struct dentry *tracing_init_dentry(void)
  {
  	static int once;
  
  	if (d_tracer)
  		return d_tracer;
3e1f60b80   Frederic Weisbecker   tracing/ftrace: c...
4082
4083
  	if (!debugfs_initialized())
  		return NULL;
bc0c38d13   Steven Rostedt   ftrace: latency t...
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093
4094
  	d_tracer = debugfs_create_dir("tracing", NULL);
  
  	if (!d_tracer && !once) {
  		once = 1;
  		pr_warning("Could not create debugfs directory 'tracing'
  ");
  		return NULL;
  	}
  
  	return d_tracer;
  }
b04cc6b1f   Frederic Weisbecker   tracing/core: int...
4095
4096
4097
4098
4099
4100
4101
4102
4103
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113
4114
4115
4116
4117
4118
4119
4120
4121
4122
4123
4124
  static struct dentry *d_percpu;
  
  struct dentry *tracing_dentry_percpu(void)
  {
  	static int once;
  	struct dentry *d_tracer;
  
  	if (d_percpu)
  		return d_percpu;
  
  	d_tracer = tracing_init_dentry();
  
  	if (!d_tracer)
  		return NULL;
  
  	d_percpu = debugfs_create_dir("per_cpu", d_tracer);
  
  	if (!d_percpu && !once) {
  		once = 1;
  		pr_warning("Could not create debugfs directory 'per_cpu'
  ");
  		return NULL;
  	}
  
  	return d_percpu;
  }
  
  static void tracing_init_debugfs_percpu(long cpu)
  {
  	struct dentry *d_percpu = tracing_dentry_percpu();
5452af664   Frederic Weisbecker   tracing/ftrace: f...
4125
  	struct dentry *d_cpu;
dd49a38cf   Steven Rostedt   tracing: Do not l...
4126
  	char cpu_dir[30]; /* 30 characters should be more than enough */
b04cc6b1f   Frederic Weisbecker   tracing/core: int...
4127

dd49a38cf   Steven Rostedt   tracing: Do not l...
4128
  	snprintf(cpu_dir, 30, "cpu%ld", cpu);
8656e7a2f   Frederic Weisbecker   tracing/core: mak...
4129
4130
4131
4132
4133
4134
  	d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
  	if (!d_cpu) {
  		pr_warning("Could not create debugfs '%s' entry
  ", cpu_dir);
  		return;
  	}
b04cc6b1f   Frederic Weisbecker   tracing/core: int...
4135

8656e7a2f   Frederic Weisbecker   tracing/core: mak...
4136
  	/* per cpu trace_pipe */
5452af664   Frederic Weisbecker   tracing/ftrace: f...
4137
4138
  	trace_create_file("trace_pipe", 0444, d_cpu,
  			(void *) cpu, &tracing_pipe_fops);
b04cc6b1f   Frederic Weisbecker   tracing/core: int...
4139
4140
  
  	/* per cpu trace */
5452af664   Frederic Weisbecker   tracing/ftrace: f...
4141
4142
  	trace_create_file("trace", 0644, d_cpu,
  			(void *) cpu, &tracing_fops);
7f96f93f0   Steven Rostedt   tracing: move bin...
4143

5452af664   Frederic Weisbecker   tracing/ftrace: f...
4144
4145
  	trace_create_file("trace_pipe_raw", 0444, d_cpu,
  			(void *) cpu, &tracing_buffers_fops);
7f96f93f0   Steven Rostedt   tracing: move bin...
4146

c8d771835   Steven Rostedt   tracing: export s...
4147
4148
  	trace_create_file("stats", 0444, d_cpu,
  			(void *) cpu, &tracing_stats_fops);
b04cc6b1f   Frederic Weisbecker   tracing/core: int...
4149
  }
60a11774b   Steven Rostedt   ftrace: add self-...
4150
4151
4152
4153
  #ifdef CONFIG_FTRACE_SELFTEST
  /* Let selftest have access to static functions in this file */
  #include "trace_selftest.c"
  #endif
577b785f5   Steven Rostedt   tracing: add trac...
4154
4155
4156
4157
4158
4159
4160
4161
4162
4163
4164
4165
4166
4167
4168
4169
4170
4171
4172
4173
4174
4175
4176
4177
4178
4179
4180
4181
4182
  struct trace_option_dentry {
  	struct tracer_opt		*opt;
  	struct tracer_flags		*flags;
  	struct dentry			*entry;
  };
  
  static ssize_t
  trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
  			loff_t *ppos)
  {
  	struct trace_option_dentry *topt = filp->private_data;
  	char *buf;
  
  	if (topt->flags->val & topt->opt->bit)
  		buf = "1
  ";
  	else
  		buf = "0
  ";
  
  	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  }
  
  static ssize_t
  trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
  			 loff_t *ppos)
  {
  	struct trace_option_dentry *topt = filp->private_data;
  	unsigned long val;
577b785f5   Steven Rostedt   tracing: add trac...
4183
  	int ret;
22fe9b54d   Peter Huewe   tracing: Convert ...
4184
4185
  	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
  	if (ret)
577b785f5   Steven Rostedt   tracing: add trac...
4186
  		return ret;
8d18eaaff   Li Zefan   tracing: Simplify...
4187
4188
  	if (val != 0 && val != 1)
  		return -EINVAL;
577b785f5   Steven Rostedt   tracing: add trac...
4189

8d18eaaff   Li Zefan   tracing: Simplify...
4190
  	if (!!(topt->flags->val & topt->opt->bit) != val) {
577b785f5   Steven Rostedt   tracing: add trac...
4191
  		mutex_lock(&trace_types_lock);
8d18eaaff   Li Zefan   tracing: Simplify...
4192
  		ret = __set_tracer_option(current_trace, topt->flags,
c757bea93   Steven Rostedt   tracing: Fix sett...
4193
  					  topt->opt, !val);
577b785f5   Steven Rostedt   tracing: add trac...
4194
4195
4196
  		mutex_unlock(&trace_types_lock);
  		if (ret)
  			return ret;
577b785f5   Steven Rostedt   tracing: add trac...
4197
4198
4199
4200
4201
4202
4203
4204
4205
4206
4207
4208
  	}
  
  	*ppos += cnt;
  
  	return cnt;
  }
  
  
  static const struct file_operations trace_options_fops = {
  	.open = tracing_open_generic,
  	.read = trace_options_read,
  	.write = trace_options_write,
b444786f1   Arnd Bergmann   tracing: Use gene...
4209
  	.llseek	= generic_file_llseek,
577b785f5   Steven Rostedt   tracing: add trac...
4210
  };
a82590750   Steven Rostedt   tracing: add opti...
4211
4212
4213
4214
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224
4225
4226
4227
4228
4229
4230
4231
4232
  static ssize_t
  trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
  			loff_t *ppos)
  {
  	long index = (long)filp->private_data;
  	char *buf;
  
  	if (trace_flags & (1 << index))
  		buf = "1
  ";
  	else
  		buf = "0
  ";
  
  	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  }
  
  static ssize_t
  trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
  			 loff_t *ppos)
  {
  	long index = (long)filp->private_data;
a82590750   Steven Rostedt   tracing: add opti...
4233
4234
  	unsigned long val;
  	int ret;
22fe9b54d   Peter Huewe   tracing: Convert ...
4235
4236
  	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
  	if (ret)
a82590750   Steven Rostedt   tracing: add opti...
4237
  		return ret;
f2d84b65b   Zhaolei   ftrace: Unify eff...
4238
  	if (val != 0 && val != 1)
a82590750   Steven Rostedt   tracing: add opti...
4239
  		return -EINVAL;
f2d84b65b   Zhaolei   ftrace: Unify eff...
4240
  	set_tracer_flags(1 << index, val);
a82590750   Steven Rostedt   tracing: add opti...
4241
4242
4243
4244
4245
  
  	*ppos += cnt;
  
  	return cnt;
  }
a82590750   Steven Rostedt   tracing: add opti...
4246
4247
4248
4249
  static const struct file_operations trace_options_core_fops = {
  	.open = tracing_open_generic,
  	.read = trace_options_core_read,
  	.write = trace_options_core_write,
b444786f1   Arnd Bergmann   tracing: Use gene...
4250
  	.llseek = generic_file_llseek,
a82590750   Steven Rostedt   tracing: add opti...
4251
  };
5452af664   Frederic Weisbecker   tracing/ftrace: f...
4252
  struct dentry *trace_create_file(const char *name,
f4ae40a6a   Al Viro   switch debugfs to...
4253
  				 umode_t mode,
5452af664   Frederic Weisbecker   tracing/ftrace: f...
4254
4255
4256
4257
4258
4259
4260
4261
4262
4263
4264
4265
4266
  				 struct dentry *parent,
  				 void *data,
  				 const struct file_operations *fops)
  {
  	struct dentry *ret;
  
  	ret = debugfs_create_file(name, mode, parent, data, fops);
  	if (!ret)
  		pr_warning("Could not create debugfs '%s' entry
  ", name);
  
  	return ret;
  }
a82590750   Steven Rostedt   tracing: add opti...
4267
4268
4269
4270
4271
4272
4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286
4287
  static struct dentry *trace_options_init_dentry(void)
  {
  	struct dentry *d_tracer;
  	static struct dentry *t_options;
  
  	if (t_options)
  		return t_options;
  
  	d_tracer = tracing_init_dentry();
  	if (!d_tracer)
  		return NULL;
  
  	t_options = debugfs_create_dir("options", d_tracer);
  	if (!t_options) {
  		pr_warning("Could not create debugfs directory 'options'
  ");
  		return NULL;
  	}
  
  	return t_options;
  }
577b785f5   Steven Rostedt   tracing: add trac...
4288
4289
4290
4291
4292
4293
  static void
  create_trace_option_file(struct trace_option_dentry *topt,
  			 struct tracer_flags *flags,
  			 struct tracer_opt *opt)
  {
  	struct dentry *t_options;
577b785f5   Steven Rostedt   tracing: add trac...
4294
4295
4296
4297
4298
4299
4300
  
  	t_options = trace_options_init_dentry();
  	if (!t_options)
  		return;
  
  	topt->flags = flags;
  	topt->opt = opt;
5452af664   Frederic Weisbecker   tracing/ftrace: f...
4301
  	topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
577b785f5   Steven Rostedt   tracing: add trac...
4302
  				    &trace_options_fops);
577b785f5   Steven Rostedt   tracing: add trac...
4303
4304
4305
4306
4307
4308
4309
4310
4311
4312
4313
4314
4315
4316
4317
4318
4319
4320
4321
4322
4323
4324
  }
  
  static struct trace_option_dentry *
  create_trace_option_files(struct tracer *tracer)
  {
  	struct trace_option_dentry *topts;
  	struct tracer_flags *flags;
  	struct tracer_opt *opts;
  	int cnt;
  
  	if (!tracer)
  		return NULL;
  
  	flags = tracer->flags;
  
  	if (!flags || !flags->opts)
  		return NULL;
  
  	opts = flags->opts;
  
  	for (cnt = 0; opts[cnt].name; cnt++)
  		;
0cfe82451   Steven Rostedt   tracing: replace ...
4325
  	topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
577b785f5   Steven Rostedt   tracing: add trac...
4326
4327
4328
4329
4330
4331
4332
4333
4334
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345
4346
4347
4348
4349
4350
  	if (!topts)
  		return NULL;
  
  	for (cnt = 0; opts[cnt].name; cnt++)
  		create_trace_option_file(&topts[cnt], flags,
  					 &opts[cnt]);
  
  	return topts;
  }
  
  static void
  destroy_trace_option_files(struct trace_option_dentry *topts)
  {
  	int cnt;
  
  	if (!topts)
  		return;
  
  	for (cnt = 0; topts[cnt].opt; cnt++) {
  		if (topts[cnt].entry)
  			debugfs_remove(topts[cnt].entry);
  	}
  
  	kfree(topts);
  }
a82590750   Steven Rostedt   tracing: add opti...
4351
4352
4353
4354
  static struct dentry *
  create_trace_option_core_file(const char *option, long index)
  {
  	struct dentry *t_options;
a82590750   Steven Rostedt   tracing: add opti...
4355
4356
4357
4358
  
  	t_options = trace_options_init_dentry();
  	if (!t_options)
  		return NULL;
5452af664   Frederic Weisbecker   tracing/ftrace: f...
4359
  	return trace_create_file(option, 0644, t_options, (void *)index,
a82590750   Steven Rostedt   tracing: add opti...
4360
  				    &trace_options_core_fops);
a82590750   Steven Rostedt   tracing: add opti...
4361
4362
4363
4364
4365
  }
  
  static __init void create_trace_options_dir(void)
  {
  	struct dentry *t_options;
a82590750   Steven Rostedt   tracing: add opti...
4366
4367
4368
4369
4370
  	int i;
  
  	t_options = trace_options_init_dentry();
  	if (!t_options)
  		return;
5452af664   Frederic Weisbecker   tracing/ftrace: f...
4371
4372
  	for (i = 0; trace_options[i]; i++)
  		create_trace_option_core_file(trace_options[i], i);
a82590750   Steven Rostedt   tracing: add opti...
4373
  }
b5ad384e7   Frédéric Weisbecker   tracing/ftrace: m...
4374
  static __init int tracer_init_debugfs(void)
bc0c38d13   Steven Rostedt   ftrace: latency t...
4375
4376
  {
  	struct dentry *d_tracer;
b04cc6b1f   Frederic Weisbecker   tracing/core: int...
4377
  	int cpu;
bc0c38d13   Steven Rostedt   ftrace: latency t...
4378

7e53bd42d   Lai Jiangshan   tracing: Consolid...
4379
  	trace_access_lock_init();
bc0c38d13   Steven Rostedt   ftrace: latency t...
4380
  	d_tracer = tracing_init_dentry();
5452af664   Frederic Weisbecker   tracing/ftrace: f...
4381
4382
  	trace_create_file("tracing_enabled", 0644, d_tracer,
  			&global_trace, &tracing_ctrl_fops);
bc0c38d13   Steven Rostedt   ftrace: latency t...
4383

5452af664   Frederic Weisbecker   tracing/ftrace: f...
4384
4385
  	trace_create_file("trace_options", 0644, d_tracer,
  			NULL, &tracing_iter_fops);
bc0c38d13   Steven Rostedt   ftrace: latency t...
4386

5452af664   Frederic Weisbecker   tracing/ftrace: f...
4387
4388
4389
4390
4391
  	trace_create_file("tracing_cpumask", 0644, d_tracer,
  			NULL, &tracing_cpumask_fops);
  
  	trace_create_file("trace", 0644, d_tracer,
  			(void *) TRACE_PIPE_ALL_CPU, &tracing_fops);
a82590750   Steven Rostedt   tracing: add opti...
4392

5452af664   Frederic Weisbecker   tracing/ftrace: f...
4393
4394
  	trace_create_file("available_tracers", 0444, d_tracer,
  			&global_trace, &show_traces_fops);
339ae5d3c   Li Zefan   tracing: fix file...
4395
  	trace_create_file("current_tracer", 0644, d_tracer,
5452af664   Frederic Weisbecker   tracing/ftrace: f...
4396
  			&global_trace, &set_tracer_fops);
5d4a9dba2   Steven Rostedt   tracing: only sho...
4397
  #ifdef CONFIG_TRACER_MAX_TRACE
5452af664   Frederic Weisbecker   tracing/ftrace: f...
4398
4399
  	trace_create_file("tracing_max_latency", 0644, d_tracer,
  			&tracing_max_latency, &tracing_max_lat_fops);
0e9501735   Tim Bird   function-graph: A...
4400
  #endif
5452af664   Frederic Weisbecker   tracing/ftrace: f...
4401
4402
4403
  
  	trace_create_file("tracing_thresh", 0644, d_tracer,
  			&tracing_thresh, &tracing_max_lat_fops);
a82590750   Steven Rostedt   tracing: add opti...
4404

339ae5d3c   Li Zefan   tracing: fix file...
4405
  	trace_create_file("README", 0444, d_tracer,
5452af664   Frederic Weisbecker   tracing/ftrace: f...
4406
4407
4408
  			NULL, &tracing_readme_fops);
  
  	trace_create_file("trace_pipe", 0444, d_tracer,
b04cc6b1f   Frederic Weisbecker   tracing/core: int...
4409
  			(void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops);
5452af664   Frederic Weisbecker   tracing/ftrace: f...
4410
4411
4412
  
  	trace_create_file("buffer_size_kb", 0644, d_tracer,
  			&global_trace, &tracing_entries_fops);
f81ab074c   Vaibhav Nagarnaik   trace: Add a new ...
4413
4414
  	trace_create_file("buffer_total_size_kb", 0444, d_tracer,
  			&global_trace, &tracing_total_entries_fops);
4f271a2a6   Vaibhav Nagarnaik   tracing: Add a pr...
4415
4416
  	trace_create_file("free_buffer", 0644, d_tracer,
  			&global_trace, &tracing_free_buffer_fops);
5452af664   Frederic Weisbecker   tracing/ftrace: f...
4417
4418
  	trace_create_file("trace_marker", 0220, d_tracer,
  			NULL, &tracing_mark_fops);
5bf9a1ee3   Pekka Paalanen   ftrace: inject ma...
4419

69abe6a5d   Avadh Patel   tracing: add save...
4420
4421
  	trace_create_file("saved_cmdlines", 0444, d_tracer,
  			NULL, &tracing_saved_cmdlines_fops);
5bf9a1ee3   Pekka Paalanen   ftrace: inject ma...
4422

5079f3261   Zhaolei   ftrace: Move sett...
4423
4424
  	trace_create_file("trace_clock", 0644, d_tracer, NULL,
  			  &trace_clock_fops);
bc0c38d13   Steven Rostedt   ftrace: latency t...
4425
  #ifdef CONFIG_DYNAMIC_FTRACE
5452af664   Frederic Weisbecker   tracing/ftrace: f...
4426
4427
  	trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
  			&ftrace_update_tot_cnt, &tracing_dyn_info_fops);
bc0c38d13   Steven Rostedt   ftrace: latency t...
4428
  #endif
b04cc6b1f   Frederic Weisbecker   tracing/core: int...
4429

5452af664   Frederic Weisbecker   tracing/ftrace: f...
4430
  	create_trace_options_dir();
b04cc6b1f   Frederic Weisbecker   tracing/core: int...
4431
4432
  	for_each_tracing_cpu(cpu)
  		tracing_init_debugfs_percpu(cpu);
b5ad384e7   Frédéric Weisbecker   tracing/ftrace: m...
4433
  	return 0;
bc0c38d13   Steven Rostedt   ftrace: latency t...
4434
  }
3f5a54e37   Steven Rostedt   ftrace: dump out ...
4435
4436
4437
  static int trace_panic_handler(struct notifier_block *this,
  			       unsigned long event, void *unused)
  {
944ac4259   Steven Rostedt   ftrace: ftrace du...
4438
  	if (ftrace_dump_on_oops)
cecbca96d   Frederic Weisbecker   tracing: Dump eit...
4439
  		ftrace_dump(ftrace_dump_on_oops);
3f5a54e37   Steven Rostedt   ftrace: dump out ...
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450
4451
4452
4453
4454
  	return NOTIFY_OK;
  }
  
  static struct notifier_block trace_panic_notifier = {
  	.notifier_call  = trace_panic_handler,
  	.next           = NULL,
  	.priority       = 150   /* priority: INT_MAX >= x >= 0 */
  };
  
  static int trace_die_handler(struct notifier_block *self,
  			     unsigned long val,
  			     void *data)
  {
  	switch (val) {
  	case DIE_OOPS:
944ac4259   Steven Rostedt   ftrace: ftrace du...
4455
  		if (ftrace_dump_on_oops)
cecbca96d   Frederic Weisbecker   tracing: Dump eit...
4456
  			ftrace_dump(ftrace_dump_on_oops);
3f5a54e37   Steven Rostedt   ftrace: dump out ...
4457
4458
4459
4460
4461
4462
4463
4464
4465
4466
4467
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477
4478
4479
  		break;
  	default:
  		break;
  	}
  	return NOTIFY_OK;
  }
  
  static struct notifier_block trace_die_notifier = {
  	.notifier_call = trace_die_handler,
  	.priority = 200
  };
  
  /*
   * printk is set to max of 1024, we really don't need it that big.
   * Nothing should be printing 1000 characters anyway.
   */
  #define TRACE_MAX_PRINT		1000
  
  /*
   * Define here KERN_TRACE so that we have one place to modify
   * it if we decide to change what log level the ftrace dump
   * should be at.
   */
428aee146   Steven Rostedt   trace: print ftra...
4480
  #define KERN_TRACE		KERN_EMERG
3f5a54e37   Steven Rostedt   ftrace: dump out ...
4481

955b61e59   Jason Wessel   ftrace,kdb: Exten...
4482
  void
3f5a54e37   Steven Rostedt   ftrace: dump out ...
4483
4484
4485
4486
4487
4488
4489
4490
4491
4492
  trace_printk_seq(struct trace_seq *s)
  {
  	/* Probably should print a warning here. */
  	if (s->len >= 1000)
  		s->len = 1000;
  
  	/* should be zero ended, but we are paranoid. */
  	s->buffer[s->len] = 0;
  
  	printk(KERN_TRACE "%s", s->buffer);
f9520750c   Steven Rostedt   tracing: make tra...
4493
  	trace_seq_init(s);
3f5a54e37   Steven Rostedt   ftrace: dump out ...
4494
  }
955b61e59   Jason Wessel   ftrace,kdb: Exten...
4495
4496
4497
4498
4499
4500
  void trace_init_global_iter(struct trace_iterator *iter)
  {
  	iter->tr = &global_trace;
  	iter->trace = current_trace;
  	iter->cpu_file = TRACE_PIPE_ALL_CPU;
  }
cecbca96d   Frederic Weisbecker   tracing: Dump eit...
4501
4502
  static void
  __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
3f5a54e37   Steven Rostedt   ftrace: dump out ...
4503
  {
445c89514   Thomas Gleixner   locking: Convert ...
4504
  	static arch_spinlock_t ftrace_dump_lock =
edc35bd72   Thomas Gleixner   locking: Rename _...
4505
  		(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
3f5a54e37   Steven Rostedt   ftrace: dump out ...
4506
4507
  	/* use static because iter can be a bit big for the stack */
  	static struct trace_iterator iter;
cf586b61f   Frederic Weisbecker   tracing/function-...
4508
  	unsigned int old_userobj;
3f5a54e37   Steven Rostedt   ftrace: dump out ...
4509
  	static int dump_ran;
d769041f8   Steven Rostedt   ring_buffer: impl...
4510
4511
  	unsigned long flags;
  	int cnt = 0, cpu;
3f5a54e37   Steven Rostedt   ftrace: dump out ...
4512
4513
  
  	/* only one dump */
cd891ae03   Steven Rostedt   tracing: convert ...
4514
  	local_irq_save(flags);
0199c4e68   Thomas Gleixner   locking: Convert ...
4515
  	arch_spin_lock(&ftrace_dump_lock);
3f5a54e37   Steven Rostedt   ftrace: dump out ...
4516
4517
4518
4519
  	if (dump_ran)
  		goto out;
  
  	dump_ran = 1;
0ee6b6cf5   Steven Rostedt   trace: stop all r...
4520
  	tracing_off();
cf586b61f   Frederic Weisbecker   tracing/function-...
4521

e0a413f61   Steven Rostedt   tracing: Warn on ...
4522
4523
4524
4525
4526
4527
4528
  	/* Did function tracer already get disabled? */
  	if (ftrace_is_dead()) {
  		printk("# WARNING: FUNCTION TRACING IS CORRUPTED
  ");
  		printk("#          MAY BE MISSING FUNCTION EVENTS
  ");
  	}
cf586b61f   Frederic Weisbecker   tracing/function-...
4529
4530
  	if (disable_tracing)
  		ftrace_kill();
3f5a54e37   Steven Rostedt   ftrace: dump out ...
4531

955b61e59   Jason Wessel   ftrace,kdb: Exten...
4532
  	trace_init_global_iter(&iter);
d769041f8   Steven Rostedt   ring_buffer: impl...
4533
  	for_each_tracing_cpu(cpu) {
955b61e59   Jason Wessel   ftrace,kdb: Exten...
4534
  		atomic_inc(&iter.tr->data[cpu]->disabled);
d769041f8   Steven Rostedt   ring_buffer: impl...
4535
  	}
cf586b61f   Frederic Weisbecker   tracing/function-...
4536
  	old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
b54d3de9f   Török Edwin   tracing: identify...
4537
4538
  	/* don't look at user memory in panic mode */
  	trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
e543ad769   Steven Rostedt   tracing: add cpu_...
4539
  	/* Simulate the iterator */
3f5a54e37   Steven Rostedt   ftrace: dump out ...
4540
4541
  	iter.tr = &global_trace;
  	iter.trace = current_trace;
cecbca96d   Frederic Weisbecker   tracing: Dump eit...
4542
4543
4544
4545
4546
4547
4548
4549
4550
4551
4552
4553
4554
4555
4556
4557
4558
4559
  
  	switch (oops_dump_mode) {
  	case DUMP_ALL:
  		iter.cpu_file = TRACE_PIPE_ALL_CPU;
  		break;
  	case DUMP_ORIG:
  		iter.cpu_file = raw_smp_processor_id();
  		break;
  	case DUMP_NONE:
  		goto out_enable;
  	default:
  		printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump
  ");
  		iter.cpu_file = TRACE_PIPE_ALL_CPU;
  	}
  
  	printk(KERN_TRACE "Dumping ftrace buffer:
  ");
3f5a54e37   Steven Rostedt   ftrace: dump out ...
4560
4561
4562
4563
4564
4565
4566
  
  	/*
  	 * We need to stop all tracing on all CPUS to read the
  	 * the next buffer. This is a bit expensive, but is
  	 * not done often. We fill all what we can read,
  	 * and then release the locks again.
  	 */
3f5a54e37   Steven Rostedt   ftrace: dump out ...
4567
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
  	while (!trace_empty(&iter)) {
  
  		if (!cnt)
  			printk(KERN_TRACE "---------------------------------
  ");
  
  		cnt++;
  
  		/* reset all but tr, trace, and overruns */
  		memset(&iter.seq, 0,
  		       sizeof(struct trace_iterator) -
  		       offsetof(struct trace_iterator, seq));
  		iter.iter_flags |= TRACE_FILE_LAT_FMT;
  		iter.pos = -1;
955b61e59   Jason Wessel   ftrace,kdb: Exten...
4581
  		if (trace_find_next_entry_inc(&iter) != NULL) {
74e7ff8c5   Lai Jiangshan   tracing: Fix miss...
4582
4583
4584
4585
4586
  			int ret;
  
  			ret = print_trace_line(&iter);
  			if (ret != TRACE_TYPE_NO_CONSUME)
  				trace_consume(&iter);
3f5a54e37   Steven Rostedt   ftrace: dump out ...
4587
4588
4589
4590
4591
4592
4593
4594
4595
4596
4597
  		}
  
  		trace_printk_seq(&iter.seq);
  	}
  
  	if (!cnt)
  		printk(KERN_TRACE "   (ftrace buffer empty)
  ");
  	else
  		printk(KERN_TRACE "---------------------------------
  ");
cecbca96d   Frederic Weisbecker   tracing: Dump eit...
4598
   out_enable:
cf586b61f   Frederic Weisbecker   tracing/function-...
4599
4600
4601
4602
4603
  	/* Re-enable tracing if requested */
  	if (!disable_tracing) {
  		trace_flags |= old_userobj;
  
  		for_each_tracing_cpu(cpu) {
955b61e59   Jason Wessel   ftrace,kdb: Exten...
4604
  			atomic_dec(&iter.tr->data[cpu]->disabled);
cf586b61f   Frederic Weisbecker   tracing/function-...
4605
4606
4607
  		}
  		tracing_on();
  	}
3f5a54e37   Steven Rostedt   ftrace: dump out ...
4608
   out:
0199c4e68   Thomas Gleixner   locking: Convert ...
4609
  	arch_spin_unlock(&ftrace_dump_lock);
cd891ae03   Steven Rostedt   tracing: convert ...
4610
  	local_irq_restore(flags);
3f5a54e37   Steven Rostedt   ftrace: dump out ...
4611
  }
cf586b61f   Frederic Weisbecker   tracing/function-...
4612
  /* By default: disable tracing after the dump */
cecbca96d   Frederic Weisbecker   tracing: Dump eit...
4613
  void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
cf586b61f   Frederic Weisbecker   tracing/function-...
4614
  {
cecbca96d   Frederic Weisbecker   tracing: Dump eit...
4615
  	__ftrace_dump(true, oops_dump_mode);
cf586b61f   Frederic Weisbecker   tracing/function-...
4616
  }
a8eecf224   Paul E. McKenney   trace: Allow ftra...
4617
  EXPORT_SYMBOL_GPL(ftrace_dump);
cf586b61f   Frederic Weisbecker   tracing/function-...
4618

3928a8a2d   Steven Rostedt   ftrace: make work...
4619
  __init static int tracer_alloc_buffers(void)
bc0c38d13   Steven Rostedt   ftrace: latency t...
4620
  {
73c5162aa   Steven Rostedt   tracing: keep rin...
4621
  	int ring_buf_size;
750912fa3   David Sharp   tracing: Add an '...
4622
  	enum ring_buffer_flags rb_flags;
4c11d7aed   Steven Rostedt   ftrace: convert s...
4623
  	int i;
9e01c1b74   Rusty Russell   cpumask: convert ...
4624
  	int ret = -ENOMEM;
4c11d7aed   Steven Rostedt   ftrace: convert s...
4625

750912fa3   David Sharp   tracing: Add an '...
4626

9e01c1b74   Rusty Russell   cpumask: convert ...
4627
4628
4629
4630
4631
  	if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
  		goto out;
  
  	if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
  		goto out_free_buffer_mask;
4c11d7aed   Steven Rostedt   ftrace: convert s...
4632

73c5162aa   Steven Rostedt   tracing: keep rin...
4633
4634
4635
4636
4637
  	/* To save memory, keep the ring buffer size to its minimum */
  	if (ring_buffer_expanded)
  		ring_buf_size = trace_buf_size;
  	else
  		ring_buf_size = 1;
750912fa3   David Sharp   tracing: Add an '...
4638
  	rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9e01c1b74   Rusty Russell   cpumask: convert ...
4639
4640
4641
4642
  	cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
  	cpumask_copy(tracing_cpumask, cpu_all_mask);
  
  	/* TODO: make the number of buffers hot pluggable with CPUS */
750912fa3   David Sharp   tracing: Add an '...
4643
  	global_trace.buffer = ring_buffer_alloc(ring_buf_size, rb_flags);
3928a8a2d   Steven Rostedt   ftrace: make work...
4644
4645
4646
4647
  	if (!global_trace.buffer) {
  		printk(KERN_ERR "tracer: failed to allocate ring buffer!
  ");
  		WARN_ON(1);
9e01c1b74   Rusty Russell   cpumask: convert ...
4648
  		goto out_free_cpumask;
4c11d7aed   Steven Rostedt   ftrace: convert s...
4649
  	}
3928a8a2d   Steven Rostedt   ftrace: make work...
4650
  	global_trace.entries = ring_buffer_size(global_trace.buffer);
4c11d7aed   Steven Rostedt   ftrace: convert s...
4651

9e01c1b74   Rusty Russell   cpumask: convert ...
4652

4c11d7aed   Steven Rostedt   ftrace: convert s...
4653
  #ifdef CONFIG_TRACER_MAX_TRACE
750912fa3   David Sharp   tracing: Add an '...
4654
  	max_tr.buffer = ring_buffer_alloc(1, rb_flags);
3928a8a2d   Steven Rostedt   ftrace: make work...
4655
4656
4657
4658
4659
  	if (!max_tr.buffer) {
  		printk(KERN_ERR "tracer: failed to allocate max ring buffer!
  ");
  		WARN_ON(1);
  		ring_buffer_free(global_trace.buffer);
9e01c1b74   Rusty Russell   cpumask: convert ...
4660
  		goto out_free_cpumask;
4c11d7aed   Steven Rostedt   ftrace: convert s...
4661
  	}
ef710e100   KOSAKI Motohiro   tracing: Shrink m...
4662
  	max_tr.entries = 1;
a98a3c3fd   Steven Rostedt   ftrace: trace_ent...
4663
  #endif
ab46428c6   Steven Rostedt   ftrace: modulize ...
4664

4c11d7aed   Steven Rostedt   ftrace: convert s...
4665
  	/* Allocate the first page for all buffers */
ab46428c6   Steven Rostedt   ftrace: modulize ...
4666
  	for_each_tracing_cpu(i) {
566b0aaf7   jolsa@redhat.com   tracing: Remove u...
4667
  		global_trace.data[i] = &per_cpu(global_trace_cpu, i);
9705f69ed   Tejun Heo   percpu: make perc...
4668
  		max_tr.data[i] = &per_cpu(max_tr_data, i);
4c11d7aed   Steven Rostedt   ftrace: convert s...
4669
  	}
bc0c38d13   Steven Rostedt   ftrace: latency t...
4670

bc0c38d13   Steven Rostedt   ftrace: latency t...
4671
  	trace_init_cmdlines();
43a15386c   Frédéric Weisbecker   tracing/ftrace: r...
4672
  	register_tracer(&nop_trace);
79fb0768f   Steven Rostedt   trace: let boot t...
4673
  	current_trace = &nop_trace;
60a11774b   Steven Rostedt   ftrace: add self-...
4674
4675
  	/* All seems OK, enable tracing */
  	tracing_disabled = 0;
3928a8a2d   Steven Rostedt   ftrace: make work...
4676

3f5a54e37   Steven Rostedt   ftrace: dump out ...
4677
4678
4679
4680
  	atomic_notifier_chain_register(&panic_notifier_list,
  				       &trace_panic_notifier);
  
  	register_die_notifier(&trace_die_notifier);
2fc1dfbe1   Frederic Weisbecker   tracing/core: fix...
4681
4682
  
  	return 0;
3f5a54e37   Steven Rostedt   ftrace: dump out ...
4683

9e01c1b74   Rusty Russell   cpumask: convert ...
4684
4685
4686
4687
4688
4689
  out_free_cpumask:
  	free_cpumask_var(tracing_cpumask);
  out_free_buffer_mask:
  	free_cpumask_var(tracing_buffer_mask);
  out:
  	return ret;
bc0c38d13   Steven Rostedt   ftrace: latency t...
4690
  }
b2821ae68   Steven Rostedt   trace: fix defaul...
4691
4692
4693
4694
4695
4696
4697
4698
4699
4700
4701
4702
4703
4704
4705
4706
4707
4708
4709
4710
  
  __init static int clear_boot_tracer(void)
  {
  	/*
  	 * The default tracer at boot buffer is an init section.
  	 * This function is called in lateinit. If we did not
  	 * find the boot tracer, then clear it out, to prevent
  	 * later registration from accessing the buffer that is
  	 * about to be freed.
  	 */
  	if (!default_bootup_tracer)
  		return 0;
  
  	printk(KERN_INFO "ftrace bootup tracer '%s' not registered.
  ",
  	       default_bootup_tracer);
  	default_bootup_tracer = NULL;
  
  	return 0;
  }
b5ad384e7   Frédéric Weisbecker   tracing/ftrace: m...
4711
4712
  early_initcall(tracer_alloc_buffers);
  fs_initcall(tracer_init_debugfs);
b2821ae68   Steven Rostedt   trace: fix defaul...
4713
  late_initcall(clear_boot_tracer);