Commit e2b8b2808538a91444e78c7db5a30519cadd09b2

Authored by Ingo Molnar

Merge branch 'tip/tracing/ftrace' of git://git.kernel.org/pub/scm/linux/kernel/g…

…it/rostedt/linux-2.6-trace into tracing/ftrace

Showing 14 changed files Side-by-side Diff

include/linux/tracepoint.h
... ... @@ -157,7 +157,110 @@
157 157 #define TRACE_FORMAT(name, proto, args, fmt) \
158 158 DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
159 159  
160   -#define TRACE_EVENT(name, proto, args, struct, print, assign) \
  160 +
  161 +/*
  162 + * For use with the TRACE_EVENT macro:
  163 + *
  164 + * We define a tracepoint, its arguments, its printk format
  165 + * and its 'fast binay record' layout.
  166 + *
  167 + * Firstly, name your tracepoint via TRACE_EVENT(name : the
  168 + * 'subsystem_event' notation is fine.
  169 + *
  170 + * Think about this whole construct as the
  171 + * 'trace_sched_switch() function' from now on.
  172 + *
  173 + *
  174 + * TRACE_EVENT(sched_switch,
  175 + *
  176 + * *
  177 + * * A function has a regular function arguments
  178 + * * prototype, declare it via TP_PROTO():
  179 + * *
  180 + *
  181 + * TP_PROTO(struct rq *rq, struct task_struct *prev,
  182 + * struct task_struct *next),
  183 + *
  184 + * *
  185 + * * Define the call signature of the 'function'.
  186 + * * (Design sidenote: we use this instead of a
  187 + * * TP_PROTO1/TP_PROTO2/TP_PROTO3 ugliness.)
  188 + * *
  189 + *
  190 + * TP_ARGS(rq, prev, next),
  191 + *
  192 + * *
  193 + * * Fast binary tracing: define the trace record via
  194 + * * TP_STRUCT__entry(). You can think about it like a
  195 + * * regular C structure local variable definition.
  196 + * *
  197 + * * This is how the trace record is structured and will
  198 + * * be saved into the ring buffer. These are the fields
  199 + * * that will be exposed to user-space in
  200 + * * /debug/tracing/events/<*>/format.
  201 + * *
  202 + * * The declared 'local variable' is called '__entry'
  203 + * *
  204 + * * __field(pid_t, prev_prid) is equivalent to a standard declariton:
  205 + * *
  206 + * * pid_t prev_pid;
  207 + * *
  208 + * * __array(char, prev_comm, TASK_COMM_LEN) is equivalent to:
  209 + * *
  210 + * * char prev_comm[TASK_COMM_LEN];
  211 + * *
  212 + *
  213 + * TP_STRUCT__entry(
  214 + * __array( char, prev_comm, TASK_COMM_LEN )
  215 + * __field( pid_t, prev_pid )
  216 + * __field( int, prev_prio )
  217 + * __array( char, next_comm, TASK_COMM_LEN )
  218 + * __field( pid_t, next_pid )
  219 + * __field( int, next_prio )
  220 + * ),
  221 + *
  222 + * *
  223 + * * Assign the entry into the trace record, by embedding
  224 + * * a full C statement block into TP_fast_assign(). You
  225 + * * can refer to the trace record as '__entry' -
  226 + * * otherwise you can put arbitrary C code in here.
  227 + * *
  228 + * * Note: this C code will execute every time a trace event
  229 + * * happens, on an active tracepoint.
  230 + * *
  231 + *
  232 + * TP_fast_assign(
  233 + * memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
  234 + * __entry->prev_pid = prev->pid;
  235 + * __entry->prev_prio = prev->prio;
  236 + * memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
  237 + * __entry->next_pid = next->pid;
  238 + * __entry->next_prio = next->prio;
  239 + * )
  240 + *
  241 + * *
  242 + * * Formatted output of a trace record via TP_printk().
  243 + * * This is how the tracepoint will appear under ftrace
  244 + * * plugins that make use of this tracepoint.
  245 + * *
  246 + * * (raw-binary tracing wont actually perform this step.)
  247 + * *
  248 + *
  249 + * TP_printk("task %s:%d [%d] ==> %s:%d [%d]",
  250 + * __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
  251 + * __entry->next_comm, __entry->next_pid, __entry->next_prio),
  252 + *
  253 + * );
  254 + *
  255 + * This macro construct is thus used for the regular printk format
  256 + * tracing setup, it is used to construct a function pointer based
  257 + * tracepoint callback (this is used by programmatic plugins and
  258 + * can also by used by generic instrumentation like SystemTap), and
  259 + * it is also used to expose a structured trace record in
  260 + * /debug/tracing/events/.
  261 + */
  262 +
  263 +#define TRACE_EVENT(name, proto, args, struct, assign, print) \
161 264 DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
162 265  
163 266 #endif
include/trace/irq_event_types.h
... ... @@ -31,13 +31,13 @@
31 31 __field( int, ret )
32 32 ),
33 33  
34   - TP_printk("irq=%d return=%s",
35   - __entry->irq, __entry->ret ? "handled" : "unhandled"),
36   -
37 34 TP_fast_assign(
38 35 __entry->irq = irq;
39 36 __entry->ret = ret;
40   - )
  37 + ),
  38 +
  39 + TP_printk("irq=%d return=%s",
  40 + __entry->irq, __entry->ret ? "handled" : "unhandled")
41 41 );
42 42  
43 43 #undef TRACE_SYSTEM
include/trace/sched_event_types.h
... ... @@ -22,12 +22,12 @@
22 22 __field( pid_t, pid )
23 23 ),
24 24  
25   - TP_printk("task %s:%d", __entry->comm, __entry->pid),
26   -
27 25 TP_fast_assign(
28 26 memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
29 27 __entry->pid = t->pid;
30   - )
  28 + ),
  29 +
  30 + TP_printk("task %s:%d", __entry->comm, __entry->pid)
31 31 );
32 32  
33 33 /*
34 34  
... ... @@ -43,11 +43,11 @@
43 43 __field( int, ret )
44 44 ),
45 45  
46   - TP_printk("ret %d", __entry->ret),
47   -
48 46 TP_fast_assign(
49 47 __entry->ret = ret;
50   - )
  48 + ),
  49 +
  50 + TP_printk("ret %d", __entry->ret)
51 51 );
52 52  
53 53 /*
54 54  
... ... @@ -68,14 +68,14 @@
68 68 __field( int, prio )
69 69 ),
70 70  
71   - TP_printk("task %s:%d [%d]",
72   - __entry->comm, __entry->pid, __entry->prio),
73   -
74 71 TP_fast_assign(
75 72 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
76 73 __entry->pid = p->pid;
77 74 __entry->prio = p->prio;
78   - )
  75 + ),
  76 +
  77 + TP_printk("task %s:%d [%d]",
  78 + __entry->comm, __entry->pid, __entry->prio)
79 79 );
80 80  
81 81 /*
82 82  
... ... @@ -97,16 +97,16 @@
97 97 __field( int, success )
98 98 ),
99 99  
100   - TP_printk("task %s:%d [%d] success=%d",
101   - __entry->comm, __entry->pid, __entry->prio,
102   - __entry->success),
103   -
104 100 TP_fast_assign(
105 101 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
106 102 __entry->pid = p->pid;
107 103 __entry->prio = p->prio;
108 104 __entry->success = success;
109   - )
  105 + ),
  106 +
  107 + TP_printk("task %s:%d [%d] success=%d",
  108 + __entry->comm, __entry->pid, __entry->prio,
  109 + __entry->success)
110 110 );
111 111  
112 112 /*
113 113  
... ... @@ -128,16 +128,16 @@
128 128 __field( int, success )
129 129 ),
130 130  
131   - TP_printk("task %s:%d [%d] success=%d",
132   - __entry->comm, __entry->pid, __entry->prio,
133   - __entry->success),
134   -
135 131 TP_fast_assign(
136 132 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
137 133 __entry->pid = p->pid;
138 134 __entry->prio = p->prio;
139 135 __entry->success = success;
140   - )
  136 + ),
  137 +
  138 + TP_printk("task %s:%d [%d] success=%d",
  139 + __entry->comm, __entry->pid, __entry->prio,
  140 + __entry->success)
141 141 );
142 142  
143 143 /*
... ... @@ -162,10 +162,6 @@
162 162 __field( int, next_prio )
163 163 ),
164 164  
165   - TP_printk("task %s:%d [%d] ==> %s:%d [%d]",
166   - __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
167   - __entry->next_comm, __entry->next_pid, __entry->next_prio),
168   -
169 165 TP_fast_assign(
170 166 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
171 167 __entry->prev_pid = prev->pid;
... ... @@ -173,7 +169,11 @@
173 169 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
174 170 __entry->next_pid = next->pid;
175 171 __entry->next_prio = next->prio;
176   - )
  172 + ),
  173 +
  174 + TP_printk("task %s:%d [%d] ==> %s:%d [%d]",
  175 + __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
  176 + __entry->next_comm, __entry->next_pid, __entry->next_prio)
177 177 );
178 178  
179 179 /*
180 180  
... ... @@ -193,17 +193,17 @@
193 193 __field( int, dest_cpu )
194 194 ),
195 195  
196   - TP_printk("task %s:%d [%d] from: %d to: %d",
197   - __entry->comm, __entry->pid, __entry->prio,
198   - __entry->orig_cpu, __entry->dest_cpu),
199   -
200 196 TP_fast_assign(
201 197 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
202 198 __entry->pid = p->pid;
203 199 __entry->prio = p->prio;
204 200 __entry->orig_cpu = orig_cpu;
205 201 __entry->dest_cpu = dest_cpu;
206   - )
  202 + ),
  203 +
  204 + TP_printk("task %s:%d [%d] from: %d to: %d",
  205 + __entry->comm, __entry->pid, __entry->prio,
  206 + __entry->orig_cpu, __entry->dest_cpu)
207 207 );
208 208  
209 209 /*
210 210  
... ... @@ -221,14 +221,14 @@
221 221 __field( int, prio )
222 222 ),
223 223  
224   - TP_printk("task %s:%d [%d]",
225   - __entry->comm, __entry->pid, __entry->prio),
226   -
227 224 TP_fast_assign(
228 225 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
229 226 __entry->pid = p->pid;
230 227 __entry->prio = p->prio;
231   - )
  228 + ),
  229 +
  230 + TP_printk("task %s:%d [%d]",
  231 + __entry->comm, __entry->pid, __entry->prio)
232 232 );
233 233  
234 234 /*
235 235  
... ... @@ -246,14 +246,14 @@
246 246 __field( int, prio )
247 247 ),
248 248  
249   - TP_printk("task %s:%d [%d]",
250   - __entry->comm, __entry->pid, __entry->prio),
251   -
252 249 TP_fast_assign(
253 250 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
254 251 __entry->pid = p->pid;
255 252 __entry->prio = p->prio;
256   - )
  253 + ),
  254 +
  255 + TP_printk("task %s:%d [%d]",
  256 + __entry->comm, __entry->pid, __entry->prio)
257 257 );
258 258  
259 259 /*
260 260  
... ... @@ -271,14 +271,14 @@
271 271 __field( int, prio )
272 272 ),
273 273  
274   - TP_printk("task %s:%d [%d]",
275   - __entry->comm, __entry->pid, __entry->prio),
276   -
277 274 TP_fast_assign(
278 275 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
279 276 __entry->pid = pid_nr(pid);
280 277 __entry->prio = current->prio;
281   - )
  278 + ),
  279 +
  280 + TP_printk("task %s:%d [%d]",
  281 + __entry->comm, __entry->pid, __entry->prio)
282 282 );
283 283  
284 284 /*
285 285  
... ... @@ -297,16 +297,16 @@
297 297 __field( pid_t, child_pid )
298 298 ),
299 299  
300   - TP_printk("parent %s:%d child %s:%d",
301   - __entry->parent_comm, __entry->parent_pid,
302   - __entry->child_comm, __entry->child_pid),
303   -
304 300 TP_fast_assign(
305 301 memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
306 302 __entry->parent_pid = parent->pid;
307 303 memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
308 304 __entry->child_pid = child->pid;
309   - )
  305 + ),
  306 +
  307 + TP_printk("parent %s:%d child %s:%d",
  308 + __entry->parent_comm, __entry->parent_pid,
  309 + __entry->child_comm, __entry->child_pid)
310 310 );
311 311  
312 312 /*
313 313  
... ... @@ -324,14 +324,14 @@
324 324 __field( pid_t, pid )
325 325 ),
326 326  
327   - TP_printk("sig: %d task %s:%d",
328   - __entry->sig, __entry->comm, __entry->pid),
329   -
330 327 TP_fast_assign(
331 328 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
332 329 __entry->pid = p->pid;
333 330 __entry->sig = sig;
334   - )
  331 + ),
  332 +
  333 + TP_printk("sig: %d task %s:%d",
  334 + __entry->sig, __entry->comm, __entry->pid)
335 335 );
336 336  
337 337 #undef TRACE_SYSTEM
kernel/trace/blktrace.c
... ... @@ -33,7 +33,7 @@
33 33 static int __read_mostly blk_tracer_enabled;
34 34  
35 35 /* Select an alternative, minimalistic output than the original one */
36   -#define TRACE_BLK_OPT_CLASSIC 0x1
  36 +#define TRACE_BLK_OPT_CLASSIC 0x1
37 37  
38 38 static struct tracer_opt blk_tracer_opts[] = {
39 39 /* Default disable the minimalistic output */
... ... @@ -564,7 +564,7 @@
564 564 /**
565 565 * blk_trace_ioctl: - handle the ioctls associated with tracing
566 566 * @bdev: the block device
567   - * @cmd: the ioctl cmd
  567 + * @cmd: the ioctl cmd
568 568 * @arg: the argument data, if any
569 569 *
570 570 **/
571 571  
... ... @@ -1128,9 +1128,9 @@
1128 1128  
1129 1129 static struct {
1130 1130 const char *act[2];
1131   - int (*print)(struct trace_seq *s, const struct trace_entry *ent);
  1131 + int (*print)(struct trace_seq *s, const struct trace_entry *ent);
1132 1132 } what2act[] __read_mostly = {
1133   - [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic },
  1133 + [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic },
1134 1134 [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic },
1135 1135 [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic },
1136 1136 [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic },
... ... @@ -1229,7 +1229,7 @@
1229 1229 };
1230 1230  
1231 1231 static struct trace_event trace_blk_event = {
1232   - .type = TRACE_BLK,
  1232 + .type = TRACE_BLK,
1233 1233 .trace = blk_trace_event_print,
1234 1234 .binary = blk_trace_event_print_binary,
1235 1235 };
kernel/trace/trace.c
... ... @@ -799,7 +799,7 @@
799 799  
800 800 entry->preempt_count = pc & 0xff;
801 801 entry->pid = (tsk) ? tsk->pid : 0;
802   - entry->tgid = (tsk) ? tsk->tgid : 0;
  802 + entry->tgid = (tsk) ? tsk->tgid : 0;
803 803 entry->flags =
804 804 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
805 805 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
kernel/trace/trace_branch.c
... ... @@ -157,7 +157,7 @@
157 157  
158 158  
159 159 static struct trace_event trace_branch_event = {
160   - .type = TRACE_BRANCH,
  160 + .type = TRACE_BRANCH,
161 161 .trace = trace_branch_print,
162 162 };
163 163  
kernel/trace/trace_events.c
... ... @@ -102,7 +102,7 @@
102 102 mutex_lock(&event_mutex);
103 103 events_for_each(call) {
104 104  
105   - if (!call->name)
  105 + if (!call->name || !call->regfunc)
106 106 continue;
107 107  
108 108 if (match &&
109 109  
... ... @@ -207,9 +207,21 @@
207 207  
208 208 (*pos)++;
209 209  
210   - if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
211   - return NULL;
  210 + for (;;) {
  211 + if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
  212 + return NULL;
212 213  
  214 + /*
  215 + * The ftrace subsystem is for showing formats only.
  216 + * They can not be enabled or disabled via the event files.
  217 + */
  218 + if (call->regfunc)
  219 + break;
  220 +
  221 + call++;
  222 + next = call;
  223 + }
  224 +
213 225 m->private = ++next;
214 226  
215 227 return call;
... ... @@ -338,8 +350,7 @@
338 350  
339 351 #undef FIELD
340 352 #define FIELD(type, name) \
341   - #type, #name, (unsigned int)offsetof(typeof(field), name), \
342   - (unsigned int)sizeof(field.name)
  353 + #type, #name, offsetof(typeof(field), name), sizeof(field.name)
343 354  
344 355 static int trace_write_header(struct trace_seq *s)
345 356 {
... ... @@ -347,11 +358,11 @@
347 358  
348 359 /* struct trace_entry */
349 360 return trace_seq_printf(s,
350   - "\tfield:%s %s;\toffset:%u;\tsize:%u;\n"
351   - "\tfield:%s %s;\toffset:%u;\tsize:%u;\n"
352   - "\tfield:%s %s;\toffset:%u;\tsize:%u;\n"
353   - "\tfield:%s %s;\toffset:%u;\tsize:%u;\n"
354   - "\tfield:%s %s;\toffset:%u;\tsize:%u;\n"
  361 + "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  362 + "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  363 + "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  364 + "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  365 + "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
355 366 "\n",
356 367 FIELD(unsigned char, type),
357 368 FIELD(unsigned char, flags),
... ... @@ -417,6 +428,13 @@
417 428 .stop = t_stop,
418 429 };
419 430  
  431 +static const struct file_operations ftrace_avail_fops = {
  432 + .open = ftrace_event_seq_open,
  433 + .read = seq_read,
  434 + .llseek = seq_lseek,
  435 + .release = seq_release,
  436 +};
  437 +
420 438 static const struct file_operations ftrace_set_event_fops = {
421 439 .open = ftrace_event_seq_open,
422 440 .read = seq_read,
... ... @@ -557,6 +575,13 @@
557 575 d_tracer = tracing_init_dentry();
558 576 if (!d_tracer)
559 577 return 0;
  578 +
  579 + entry = debugfs_create_file("available_events", 0444, d_tracer,
  580 + (void *)&show_event_seq_ops,
  581 + &ftrace_avail_fops);
  582 + if (!entry)
  583 + pr_warning("Could not create debugfs "
  584 + "'available_events' entry\n");
560 585  
561 586 entry = debugfs_create_file("set_event", 0644, d_tracer,
562 587 (void *)&show_set_event_seq_ops,
kernel/trace/trace_events_stage_1.h
... ... @@ -6,11 +6,13 @@
6 6 * struct ftrace_raw_<call> {
7 7 * struct trace_entry ent;
8 8 * <type> <item>;
  9 + * <type2> <item2>[<len>];
9 10 * [...]
10 11 * };
11 12 *
12   - * The <type> <item> is created by the TRACE_FIELD(type, item, assign)
13   - * macro. We simply do "type item;", and that will create the fields
  13 + * The <type> <item> is created by the __field(type, item) macro or
  14 + * the __array(type2, item2, len) macro.
  15 + * We simply do "type item;", and that will create the fields
14 16 * in the structure.
15 17 */
16 18  
... ... @@ -27,7 +29,7 @@
27 29 #define TP_STRUCT__entry(args...) args
28 30  
29 31 #undef TRACE_EVENT
30   -#define TRACE_EVENT(name, proto, args, tstruct, print, assign) \
  32 +#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
31 33 struct ftrace_raw_##name { \
32 34 struct trace_entry ent; \
33 35 tstruct \
kernel/trace/trace_events_stage_2.h
... ... @@ -20,7 +20,7 @@
20 20 *
21 21 * field = (typeof(field))entry;
22 22 *
23   - * ret = trace_seq_printf(s, <TP_RAW_FMT> "%s", <ARGS> "\n");
  23 + * ret = trace_seq_printf(s, <TP_printk> "\n");
24 24 * if (!ret)
25 25 * return TRACE_TYPE_PARTIAL_LINE;
26 26 *
... ... @@ -39,7 +39,7 @@
39 39 #define TP_printk(fmt, args...) fmt "\n", args
40 40  
41 41 #undef TRACE_EVENT
42   -#define TRACE_EVENT(call, proto, args, tstruct, print, assign) \
  42 +#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
43 43 enum print_line_t \
44 44 ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
45 45 { \
... ... @@ -76,10 +76,9 @@
76 76 * int ret;
77 77 *
78 78 * ret = trace_seq_printf(s, #type " " #item ";"
79   - * " size:%d; offset:%d;\n",
80   - * sizeof(field.type),
81   - * offsetof(struct ftrace_raw_##call,
82   - * item));
  79 + * " offset:%u; size:%u;\n",
  80 + * offsetof(struct ftrace_raw_##call, item),
  81 + * sizeof(field.type));
83 82 *
84 83 * }
85 84 */
... ... @@ -115,7 +114,7 @@
115 114 #define TP_fast_assign(args...) args
116 115  
117 116 #undef TRACE_EVENT
118   -#define TRACE_EVENT(call, proto, args, tstruct, print, func) \
  117 +#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
119 118 static int \
120 119 ftrace_format_##call(struct trace_seq *s) \
121 120 { \
kernel/trace/trace_events_stage_3.h
... ... @@ -5,23 +5,23 @@
5 5 *
6 6 * static void ftrace_event_<call>(proto)
7 7 * {
8   - * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
  8 + * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
9 9 * }
10 10 *
11 11 * static int ftrace_reg_event_<call>(void)
12 12 * {
13   - * int ret;
  13 + * int ret;
14 14 *
15   - * ret = register_trace_<call>(ftrace_event_<call>);
16   - * if (!ret)
17   - * pr_info("event trace: Could not activate trace point "
18   - * "probe to <call>");
19   - * return ret;
  15 + * ret = register_trace_<call>(ftrace_event_<call>);
  16 + * if (!ret)
  17 + * pr_info("event trace: Could not activate trace point "
  18 + * "probe to <call>");
  19 + * return ret;
20 20 * }
21 21 *
22 22 * static void ftrace_unreg_event_<call>(void)
23 23 * {
24   - * unregister_trace_<call>(ftrace_event_<call>);
  24 + * unregister_trace_<call>(ftrace_event_<call>);
25 25 * }
26 26 *
27 27 * For those macros defined with TRACE_FORMAT:
... ... @@ -29,9 +29,9 @@
29 29 * static struct ftrace_event_call __used
30 30 * __attribute__((__aligned__(4)))
31 31 * __attribute__((section("_ftrace_events"))) event_<call> = {
32   - * .name = "<call>",
33   - * .regfunc = ftrace_reg_event_<call>,
34   - * .unregfunc = ftrace_unreg_event_<call>,
  32 + * .name = "<call>",
  33 + * .regfunc = ftrace_reg_event_<call>,
  34 + * .unregfunc = ftrace_unreg_event_<call>,
35 35 * }
36 36 *
37 37 *
38 38  
39 39  
40 40  
41 41  
42 42  
43 43  
44 44  
45 45  
46 46  
47 47  
48 48  
... ... @@ -41,66 +41,66 @@
41 41 *
42 42 * static void ftrace_raw_event_<call>(proto)
43 43 * {
44   - * struct ring_buffer_event *event;
45   - * struct ftrace_raw_<call> *entry; <-- defined in stage 1
46   - * unsigned long irq_flags;
47   - * int pc;
  44 + * struct ring_buffer_event *event;
  45 + * struct ftrace_raw_<call> *entry; <-- defined in stage 1
  46 + * unsigned long irq_flags;
  47 + * int pc;
48 48 *
49   - * local_save_flags(irq_flags);
50   - * pc = preempt_count();
  49 + * local_save_flags(irq_flags);
  50 + * pc = preempt_count();
51 51 *
52   - * event = trace_current_buffer_lock_reserve(event_<call>.id,
53   - * sizeof(struct ftrace_raw_<call>),
54   - * irq_flags, pc);
55   - * if (!event)
56   - * return;
57   - * entry = ring_buffer_event_data(event);
  52 + * event = trace_current_buffer_lock_reserve(event_<call>.id,
  53 + * sizeof(struct ftrace_raw_<call>),
  54 + * irq_flags, pc);
  55 + * if (!event)
  56 + * return;
  57 + * entry = ring_buffer_event_data(event);
58 58 *
59   - * <tstruct>; <-- Here we assign the entries by the TRACE_FIELD.
  59 + * <assign>; <-- Here we assign the entries by the __field and
  60 + * __array macros.
60 61 *
61   - * trace_current_buffer_unlock_commit(event, irq_flags, pc);
  62 + * trace_current_buffer_unlock_commit(event, irq_flags, pc);
62 63 * }
63 64 *
64 65 * static int ftrace_raw_reg_event_<call>(void)
65 66 * {
66   - * int ret;
  67 + * int ret;
67 68 *
68   - * ret = register_trace_<call>(ftrace_raw_event_<call>);
69   - * if (!ret)
70   - * pr_info("event trace: Could not activate trace point "
71   - * "probe to <call>");
72   - * return ret;
  69 + * ret = register_trace_<call>(ftrace_raw_event_<call>);
  70 + * if (!ret)
  71 + * pr_info("event trace: Could not activate trace point "
  72 + * "probe to <call>");
  73 + * return ret;
73 74 * }
74 75 *
75 76 * static void ftrace_unreg_event_<call>(void)
76 77 * {
77   - * unregister_trace_<call>(ftrace_raw_event_<call>);
  78 + * unregister_trace_<call>(ftrace_raw_event_<call>);
78 79 * }
79 80 *
80 81 * static struct trace_event ftrace_event_type_<call> = {
81   - * .trace = ftrace_raw_output_<call>, <-- stage 2
  82 + * .trace = ftrace_raw_output_<call>, <-- stage 2
82 83 * };
83 84 *
84 85 * static int ftrace_raw_init_event_<call>(void)
85 86 * {
86   - * int id;
  87 + * int id;
87 88 *
88   - * id = register_ftrace_event(&ftrace_event_type_<call>);
89   - * if (!id)
90   - * return -ENODEV;
91   - * event_<call>.id = id;
92   - * return 0;
  89 + * id = register_ftrace_event(&ftrace_event_type_<call>);
  90 + * if (!id)
  91 + * return -ENODEV;
  92 + * event_<call>.id = id;
  93 + * return 0;
93 94 * }
94 95 *
95 96 * static struct ftrace_event_call __used
96 97 * __attribute__((__aligned__(4)))
97 98 * __attribute__((section("_ftrace_events"))) event_<call> = {
98   - * .name = "<call>",
99   - * .regfunc = ftrace_reg_event_<call>,
100   - * .unregfunc = ftrace_unreg_event_<call>,
101   - * .raw_init = ftrace_raw_init_event_<call>,
102   - * .raw_reg = ftrace_raw_reg_event_<call>,
103   - * .raw_unreg = ftrace_raw_unreg_event_<call>,
  99 + * .name = "<call>",
  100 + * .system = "<system>",
  101 + * .raw_init = ftrace_raw_init_event_<call>,
  102 + * .regfunc = ftrace_reg_event_<call>,
  103 + * .unregfunc = ftrace_unreg_event_<call>,
104 104 * .show_format = ftrace_format_<call>,
105 105 * }
106 106 *
... ... @@ -138,7 +138,7 @@
138 138 static struct ftrace_event_call __used \
139 139 __attribute__((__aligned__(4))) \
140 140 __attribute__((section("_ftrace_events"))) event_##call = { \
141   - .name = #call, \
  141 + .name = #call, \
142 142 .system = __stringify(TRACE_SYSTEM), \
143 143 .regfunc = ftrace_reg_event_##call, \
144 144 .unregfunc = ftrace_unreg_event_##call, \
... ... @@ -148,7 +148,7 @@
148 148 #define __entry entry
149 149  
150 150 #undef TRACE_EVENT
151   -#define TRACE_EVENT(call, proto, args, tstruct, print, assign) \
  151 +#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
152 152 \
153 153 static struct ftrace_event_call event_##call; \
154 154 \
... ... @@ -163,7 +163,7 @@
163 163 pc = preempt_count(); \
164 164 \
165 165 event = trace_current_buffer_lock_reserve(event_##call.id, \
166   - sizeof(struct ftrace_raw_##call), \
  166 + sizeof(struct ftrace_raw_##call), \
167 167 irq_flags, pc); \
168 168 if (!event) \
169 169 return; \
... ... @@ -208,7 +208,7 @@
208 208 static struct ftrace_event_call __used \
209 209 __attribute__((__aligned__(4))) \
210 210 __attribute__((section("_ftrace_events"))) event_##call = { \
211   - .name = #call, \
  211 + .name = #call, \
212 212 .system = __stringify(TRACE_SYSTEM), \
213 213 .raw_init = ftrace_raw_init_event_##call, \
214 214 .regfunc = ftrace_raw_reg_event_##call, \
kernel/trace/trace_export.c
... ... @@ -94,7 +94,7 @@
94 94 static struct ftrace_event_call __used \
95 95 __attribute__((__aligned__(4))) \
96 96 __attribute__((section("_ftrace_events"))) event_##call = { \
97   - .name = #call, \
  97 + .name = #call, \
98 98 .id = proto, \
99 99 .system = __stringify(TRACE_SYSTEM), \
100 100 .show_format = ftrace_format_##call, \
kernel/trace/trace_functions_graph.c
... ... @@ -841,12 +841,12 @@
841 841 }
842 842  
843 843 static struct tracer graph_trace __read_mostly = {
844   - .name = "function_graph",
  844 + .name = "function_graph",
845 845 .open = graph_trace_open,
846 846 .close = graph_trace_close,
847 847 .wait_pipe = poll_wait_pipe,
848   - .init = graph_trace_init,
849   - .reset = graph_trace_reset,
  848 + .init = graph_trace_init,
  849 + .reset = graph_trace_reset,
850 850 .print_line = print_graph_function,
851 851 .print_header = print_graph_headers,
852 852 .flags = &tracer_flags,
kernel/trace/trace_output.c
... ... @@ -565,7 +565,7 @@
565 565 }
566 566  
567 567 static struct trace_event trace_fn_event = {
568   - .type = TRACE_FN,
  568 + .type = TRACE_FN,
569 569 .trace = trace_fn_trace,
570 570 .raw = trace_fn_raw,
571 571 .hex = trace_fn_hex,
... ... @@ -696,7 +696,7 @@
696 696 }
697 697  
698 698 static struct trace_event trace_ctx_event = {
699   - .type = TRACE_CTX,
  699 + .type = TRACE_CTX,
700 700 .trace = trace_ctx_print,
701 701 .raw = trace_ctx_raw,
702 702 .hex = trace_ctx_hex,
... ... @@ -704,7 +704,7 @@
704 704 };
705 705  
706 706 static struct trace_event trace_wake_event = {
707   - .type = TRACE_WAKE,
  707 + .type = TRACE_WAKE,
708 708 .trace = trace_wake_print,
709 709 .raw = trace_wake_raw,
710 710 .hex = trace_wake_hex,
... ... @@ -759,7 +759,7 @@
759 759 }
760 760  
761 761 static struct trace_event trace_special_event = {
762   - .type = TRACE_SPECIAL,
  762 + .type = TRACE_SPECIAL,
763 763 .trace = trace_special_print,
764 764 .raw = trace_special_print,
765 765 .hex = trace_special_hex,
... ... @@ -796,7 +796,7 @@
796 796 }
797 797  
798 798 static struct trace_event trace_stack_event = {
799   - .type = TRACE_STACK,
  799 + .type = TRACE_STACK,
800 800 .trace = trace_stack_print,
801 801 .raw = trace_special_print,
802 802 .hex = trace_special_hex,
... ... @@ -825,7 +825,7 @@
825 825 }
826 826  
827 827 static struct trace_event trace_user_stack_event = {
828   - .type = TRACE_USER_STACK,
  828 + .type = TRACE_USER_STACK,
829 829 .trace = trace_user_stack_print,
830 830 .raw = trace_special_print,
831 831 .hex = trace_special_hex,
... ... @@ -879,7 +879,7 @@
879 879  
880 880  
881 881 static struct trace_event trace_print_event = {
882   - .type = TRACE_PRINT,
  882 + .type = TRACE_PRINT,
883 883 .trace = trace_print_print,
884 884 .raw = trace_print_raw,
885 885 };
kernel/trace/trace_workqueue.c
... ... @@ -19,14 +19,14 @@
19 19 /* Useful to know if we print the cpu headers */
20 20 bool first_entry;
21 21 int cpu;
22   - pid_t pid;
  22 + pid_t pid;
23 23 /* Can be inserted from interrupt or user context, need to be atomic */
24   - atomic_t inserted;
  24 + atomic_t inserted;
25 25 /*
26 26 * Don't need to be atomic, works are serialized in a single workqueue thread
27 27 * on a single CPU.
28 28 */
29   - unsigned int executed;
  29 + unsigned int executed;
30 30 };
31 31  
32 32 /* List of workqueue threads on one cpu */