Commit f42c85e74faa422cf0bc747ed808681145448f88
Committed by
Steven Rostedt
1 parent
97f2025153
Exists in
master
and in
7 other branches
tracing/events: move the ftrace event tracing code to core
This patch moves the ftrace creation into include/trace/ftrace.h and simplifies the work of developers in adding new tracepoints. Just the act of creating the trace points in include/trace and including define_trace.h will create the events in the debugfs/tracing/events directory. This patch removes the need of include/trace/trace_events.h Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Showing 8 changed files with 496 additions and 511 deletions Side-by-side Diff
include/trace/define_trace.h
include/trace/ftrace.h
1 | +/* | |
2 | + * Stage 1 of the trace events. | |
3 | + * | |
4 | + * Override the macros in <trace/trace_events.h> to include the following: | |
5 | + * | |
6 | + * struct ftrace_raw_<call> { | |
7 | + * struct trace_entry ent; | |
8 | + * <type> <item>; | |
9 | + * <type2> <item2>[<len>]; | |
10 | + * [...] | |
11 | + * }; | |
12 | + * | |
13 | + * The <type> <item> is created by the __field(type, item) macro or | |
14 | + * the __array(type2, item2, len) macro. | |
15 | + * We simply do "type item;", and that will create the fields | |
16 | + * in the structure. | |
17 | + */ | |
18 | + | |
19 | +#include <linux/ftrace_event.h> | |
20 | + | |
21 | +#undef TRACE_FORMAT | |
22 | +#define TRACE_FORMAT(call, proto, args, fmt) | |
23 | + | |
24 | +#undef __array | |
25 | +#define __array(type, item, len) type item[len]; | |
26 | + | |
27 | +#undef __field | |
28 | +#define __field(type, item) type item; | |
29 | + | |
30 | +#undef TP_STRUCT__entry | |
31 | +#define TP_STRUCT__entry(args...) args | |
32 | + | |
33 | +#undef TRACE_EVENT | |
34 | +#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ | |
35 | + struct ftrace_raw_##name { \ | |
36 | + struct trace_entry ent; \ | |
37 | + tstruct \ | |
38 | + }; \ | |
39 | + static struct ftrace_event_call event_##name | |
40 | + | |
41 | +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
42 | + | |
43 | +/* | |
44 | + * Stage 2 of the trace events. | |
45 | + * | |
46 | + * Override the macros in <trace/trace_events.h> to include the following: | |
47 | + * | |
48 | + * enum print_line_t | |
49 | + * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags) | |
50 | + * { | |
51 | + * struct trace_seq *s = &iter->seq; | |
52 | + * struct ftrace_raw_<call> *field; <-- defined in stage 1 | |
53 | + * struct trace_entry *entry; | |
54 | + * int ret; | |
55 | + * | |
56 | + * entry = iter->ent; | |
57 | + * | |
58 | + * if (entry->type != event_<call>.id) { | |
59 | + * WARN_ON_ONCE(1); | |
60 | + * return TRACE_TYPE_UNHANDLED; | |
61 | + * } | |
62 | + * | |
63 | + * field = (typeof(field))entry; | |
64 | + * | |
65 | + * ret = trace_seq_printf(s, <TP_printk> "\n"); | |
66 | + * if (!ret) | |
67 | + * return TRACE_TYPE_PARTIAL_LINE; | |
68 | + * | |
69 | + * return TRACE_TYPE_HANDLED; | |
70 | + * } | |
71 | + * | |
72 | + * This is the method used to print the raw event to the trace | |
73 | + * output format. Note, this is not needed if the data is read | |
74 | + * in binary. | |
75 | + */ | |
76 | + | |
77 | +#undef __entry | |
78 | +#define __entry field | |
79 | + | |
80 | +#undef TP_printk | |
81 | +#define TP_printk(fmt, args...) fmt "\n", args | |
82 | + | |
83 | +#undef TRACE_EVENT | |
84 | +#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | |
85 | +enum print_line_t \ | |
86 | +ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | |
87 | +{ \ | |
88 | + struct trace_seq *s = &iter->seq; \ | |
89 | + struct ftrace_raw_##call *field; \ | |
90 | + struct trace_entry *entry; \ | |
91 | + int ret; \ | |
92 | + \ | |
93 | + entry = iter->ent; \ | |
94 | + \ | |
95 | + if (entry->type != event_##call.id) { \ | |
96 | + WARN_ON_ONCE(1); \ | |
97 | + return TRACE_TYPE_UNHANDLED; \ | |
98 | + } \ | |
99 | + \ | |
100 | + field = (typeof(field))entry; \ | |
101 | + \ | |
102 | + ret = trace_seq_printf(s, #call ": " print); \ | |
103 | + if (!ret) \ | |
104 | + return TRACE_TYPE_PARTIAL_LINE; \ | |
105 | + \ | |
106 | + return TRACE_TYPE_HANDLED; \ | |
107 | +} | |
108 | + | |
109 | +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
110 | + | |
111 | +/* | |
112 | + * Setup the showing format of trace point. | |
113 | + * | |
114 | + * int | |
115 | + * ftrace_format_##call(struct trace_seq *s) | |
116 | + * { | |
117 | + * struct ftrace_raw_##call field; | |
118 | + * int ret; | |
119 | + * | |
120 | + * ret = trace_seq_printf(s, #type " " #item ";" | |
121 | + * " offset:%u; size:%u;\n", | |
122 | + * offsetof(struct ftrace_raw_##call, item), | |
123 | + * sizeof(field.type)); | |
124 | + * | |
125 | + * } | |
126 | + */ | |
127 | + | |
128 | +#undef TP_STRUCT__entry | |
129 | +#define TP_STRUCT__entry(args...) args | |
130 | + | |
131 | +#undef __field | |
132 | +#define __field(type, item) \ | |
133 | + ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ | |
134 | + "offset:%u;\tsize:%u;\n", \ | |
135 | + (unsigned int)offsetof(typeof(field), item), \ | |
136 | + (unsigned int)sizeof(field.item)); \ | |
137 | + if (!ret) \ | |
138 | + return 0; | |
139 | + | |
140 | +#undef __array | |
141 | +#define __array(type, item, len) \ | |
142 | + ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ | |
143 | + "offset:%u;\tsize:%u;\n", \ | |
144 | + (unsigned int)offsetof(typeof(field), item), \ | |
145 | + (unsigned int)sizeof(field.item)); \ | |
146 | + if (!ret) \ | |
147 | + return 0; | |
148 | + | |
149 | +#undef __entry | |
150 | +#define __entry REC | |
151 | + | |
152 | +#undef TP_printk | |
153 | +#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args) | |
154 | + | |
155 | +#undef TP_fast_assign | |
156 | +#define TP_fast_assign(args...) args | |
157 | + | |
158 | +#undef TRACE_EVENT | |
159 | +#define TRACE_EVENT(call, proto, args, tstruct, func, print) \ | |
160 | +static int \ | |
161 | +ftrace_format_##call(struct trace_seq *s) \ | |
162 | +{ \ | |
163 | + struct ftrace_raw_##call field; \ | |
164 | + int ret; \ | |
165 | + \ | |
166 | + tstruct; \ | |
167 | + \ | |
168 | + trace_seq_printf(s, "\nprint fmt: " print); \ | |
169 | + \ | |
170 | + return ret; \ | |
171 | +} | |
172 | + | |
173 | +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
174 | + | |
175 | +#undef __field | |
176 | +#define __field(type, item) \ | |
177 | + ret = trace_define_field(event_call, #type, #item, \ | |
178 | + offsetof(typeof(field), item), \ | |
179 | + sizeof(field.item)); \ | |
180 | + if (ret) \ | |
181 | + return ret; | |
182 | + | |
183 | +#undef __array | |
184 | +#define __array(type, item, len) \ | |
185 | + BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ | |
186 | + ret = trace_define_field(event_call, #type "[" #len "]", #item, \ | |
187 | + offsetof(typeof(field), item), \ | |
188 | + sizeof(field.item)); \ | |
189 | + if (ret) \ | |
190 | + return ret; | |
191 | + | |
192 | +#undef TRACE_EVENT | |
193 | +#define TRACE_EVENT(call, proto, args, tstruct, func, print) \ | |
194 | +int \ | |
195 | +ftrace_define_fields_##call(void) \ | |
196 | +{ \ | |
197 | + struct ftrace_raw_##call field; \ | |
198 | + struct ftrace_event_call *event_call = &event_##call; \ | |
199 | + int ret; \ | |
200 | + \ | |
201 | + __common_field(unsigned char, type); \ | |
202 | + __common_field(unsigned char, flags); \ | |
203 | + __common_field(unsigned char, preempt_count); \ | |
204 | + __common_field(int, pid); \ | |
205 | + __common_field(int, tgid); \ | |
206 | + \ | |
207 | + tstruct; \ | |
208 | + \ | |
209 | + return ret; \ | |
210 | +} | |
211 | + | |
212 | +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
213 | + | |
214 | +/* | |
215 | + * Stage 3 of the trace events. | |
216 | + * | |
217 | + * Override the macros in <trace/trace_events.h> to include the following: | |
218 | + * | |
219 | + * static void ftrace_event_<call>(proto) | |
220 | + * { | |
221 | + * event_trace_printk(_RET_IP_, "<call>: " <fmt>); | |
222 | + * } | |
223 | + * | |
224 | + * static int ftrace_reg_event_<call>(void) | |
225 | + * { | |
226 | + * int ret; | |
227 | + * | |
228 | + * ret = register_trace_<call>(ftrace_event_<call>); | |
229 | + * if (!ret) | |
230 | + * pr_info("event trace: Could not activate trace point " | |
231 | + * "probe to <call>"); | |
232 | + * return ret; | |
233 | + * } | |
234 | + * | |
235 | + * static void ftrace_unreg_event_<call>(void) | |
236 | + * { | |
237 | + * unregister_trace_<call>(ftrace_event_<call>); | |
238 | + * } | |
239 | + * | |
240 | + * For those macros defined with TRACE_FORMAT: | |
241 | + * | |
242 | + * static struct ftrace_event_call __used | |
243 | + * __attribute__((__aligned__(4))) | |
244 | + * __attribute__((section("_ftrace_events"))) event_<call> = { | |
245 | + * .name = "<call>", | |
246 | + * .regfunc = ftrace_reg_event_<call>, | |
247 | + * .unregfunc = ftrace_unreg_event_<call>, | |
248 | + * } | |
249 | + * | |
250 | + * | |
251 | + * For those macros defined with TRACE_EVENT: | |
252 | + * | |
253 | + * static struct ftrace_event_call event_<call>; | |
254 | + * | |
255 | + * static void ftrace_raw_event_<call>(proto) | |
256 | + * { | |
257 | + * struct ring_buffer_event *event; | |
258 | + * struct ftrace_raw_<call> *entry; <-- defined in stage 1 | |
259 | + * unsigned long irq_flags; | |
260 | + * int pc; | |
261 | + * | |
262 | + * local_save_flags(irq_flags); | |
263 | + * pc = preempt_count(); | |
264 | + * | |
265 | + * event = trace_current_buffer_lock_reserve(event_<call>.id, | |
266 | + * sizeof(struct ftrace_raw_<call>), | |
267 | + * irq_flags, pc); | |
268 | + * if (!event) | |
269 | + * return; | |
270 | + * entry = ring_buffer_event_data(event); | |
271 | + * | |
272 | + * <assign>; <-- Here we assign the entries by the __field and | |
273 | + * __array macros. | |
274 | + * | |
275 | + * trace_current_buffer_unlock_commit(event, irq_flags, pc); | |
276 | + * } | |
277 | + * | |
278 | + * static int ftrace_raw_reg_event_<call>(void) | |
279 | + * { | |
280 | + * int ret; | |
281 | + * | |
282 | + * ret = register_trace_<call>(ftrace_raw_event_<call>); | |
283 | + * if (!ret) | |
284 | + * pr_info("event trace: Could not activate trace point " | |
285 | + * "probe to <call>"); | |
286 | + * return ret; | |
287 | + * } | |
288 | + * | |
289 | + * static void ftrace_unreg_event_<call>(void) | |
290 | + * { | |
291 | + * unregister_trace_<call>(ftrace_raw_event_<call>); | |
292 | + * } | |
293 | + * | |
294 | + * static struct trace_event ftrace_event_type_<call> = { | |
295 | + * .trace = ftrace_raw_output_<call>, <-- stage 2 | |
296 | + * }; | |
297 | + * | |
298 | + * static int ftrace_raw_init_event_<call>(void) | |
299 | + * { | |
300 | + * int id; | |
301 | + * | |
302 | + * id = register_ftrace_event(&ftrace_event_type_<call>); | |
303 | + * if (!id) | |
304 | + * return -ENODEV; | |
305 | + * event_<call>.id = id; | |
306 | + * return 0; | |
307 | + * } | |
308 | + * | |
309 | + * static struct ftrace_event_call __used | |
310 | + * __attribute__((__aligned__(4))) | |
311 | + * __attribute__((section("_ftrace_events"))) event_<call> = { | |
312 | + * .name = "<call>", | |
313 | + * .system = "<system>", | |
314 | + * .raw_init = ftrace_raw_init_event_<call>, | |
315 | + * .regfunc = ftrace_reg_event_<call>, | |
316 | + * .unregfunc = ftrace_unreg_event_<call>, | |
317 | + * .show_format = ftrace_format_<call>, | |
318 | + * } | |
319 | + * | |
320 | + */ | |
321 | + | |
322 | +#undef TP_FMT | |
323 | +#define TP_FMT(fmt, args...) fmt "\n", ##args | |
324 | + | |
325 | +#ifdef CONFIG_EVENT_PROFILE | |
326 | +#define _TRACE_PROFILE(call, proto, args) \ | |
327 | +static void ftrace_profile_##call(proto) \ | |
328 | +{ \ | |
329 | + extern void perf_tpcounter_event(int); \ | |
330 | + perf_tpcounter_event(event_##call.id); \ | |
331 | +} \ | |
332 | + \ | |
333 | +static int ftrace_profile_enable_##call(struct ftrace_event_call *call) \ | |
334 | +{ \ | |
335 | + int ret = 0; \ | |
336 | + \ | |
337 | + if (!atomic_inc_return(&call->profile_count)) \ | |
338 | + ret = register_trace_##call(ftrace_profile_##call); \ | |
339 | + \ | |
340 | + return ret; \ | |
341 | +} \ | |
342 | + \ | |
343 | +static void ftrace_profile_disable_##call(struct ftrace_event_call *call) \ | |
344 | +{ \ | |
345 | + if (atomic_add_negative(-1, &call->profile_count)) \ | |
346 | + unregister_trace_##call(ftrace_profile_##call); \ | |
347 | +} | |
348 | + | |
349 | +#define _TRACE_PROFILE_INIT(call) \ | |
350 | + .profile_count = ATOMIC_INIT(-1), \ | |
351 | + .profile_enable = ftrace_profile_enable_##call, \ | |
352 | + .profile_disable = ftrace_profile_disable_##call, | |
353 | + | |
354 | +#else | |
355 | +#define _TRACE_PROFILE(call, proto, args) | |
356 | +#define _TRACE_PROFILE_INIT(call) | |
357 | +#endif | |
358 | + | |
359 | +#define _TRACE_FORMAT(call, proto, args, fmt) \ | |
360 | +static void ftrace_event_##call(proto) \ | |
361 | +{ \ | |
362 | + event_trace_printk(_RET_IP_, #call ": " fmt); \ | |
363 | +} \ | |
364 | + \ | |
365 | +static int ftrace_reg_event_##call(void) \ | |
366 | +{ \ | |
367 | + int ret; \ | |
368 | + \ | |
369 | + ret = register_trace_##call(ftrace_event_##call); \ | |
370 | + if (ret) \ | |
371 | + pr_info("event trace: Could not activate trace point " \ | |
372 | + "probe to " #call "\n"); \ | |
373 | + return ret; \ | |
374 | +} \ | |
375 | + \ | |
376 | +static void ftrace_unreg_event_##call(void) \ | |
377 | +{ \ | |
378 | + unregister_trace_##call(ftrace_event_##call); \ | |
379 | +} \ | |
380 | + \ | |
381 | +static struct ftrace_event_call event_##call; \ | |
382 | + \ | |
383 | +static int ftrace_init_event_##call(void) \ | |
384 | +{ \ | |
385 | + int id; \ | |
386 | + \ | |
387 | + id = register_ftrace_event(NULL); \ | |
388 | + if (!id) \ | |
389 | + return -ENODEV; \ | |
390 | + event_##call.id = id; \ | |
391 | + return 0; \ | |
392 | +} | |
393 | + | |
394 | +#undef TRACE_FORMAT | |
395 | +#define TRACE_FORMAT(call, proto, args, fmt) \ | |
396 | +_TRACE_FORMAT(call, PARAMS(proto), PARAMS(args), PARAMS(fmt)) \ | |
397 | +_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \ | |
398 | +static struct ftrace_event_call __used \ | |
399 | +__attribute__((__aligned__(4))) \ | |
400 | +__attribute__((section("_ftrace_events"))) event_##call = { \ | |
401 | + .name = #call, \ | |
402 | + .system = __stringify(TRACE_SYSTEM), \ | |
403 | + .raw_init = ftrace_init_event_##call, \ | |
404 | + .regfunc = ftrace_reg_event_##call, \ | |
405 | + .unregfunc = ftrace_unreg_event_##call, \ | |
406 | + _TRACE_PROFILE_INIT(call) \ | |
407 | +} | |
408 | + | |
409 | +#undef __entry | |
410 | +#define __entry entry | |
411 | + | |
412 | +#undef TRACE_EVENT | |
413 | +#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | |
414 | +_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \ | |
415 | + \ | |
416 | +static struct ftrace_event_call event_##call; \ | |
417 | + \ | |
418 | +static void ftrace_raw_event_##call(proto) \ | |
419 | +{ \ | |
420 | + struct ftrace_event_call *call = &event_##call; \ | |
421 | + struct ring_buffer_event *event; \ | |
422 | + struct ftrace_raw_##call *entry; \ | |
423 | + unsigned long irq_flags; \ | |
424 | + int pc; \ | |
425 | + \ | |
426 | + local_save_flags(irq_flags); \ | |
427 | + pc = preempt_count(); \ | |
428 | + \ | |
429 | + event = trace_current_buffer_lock_reserve(event_##call.id, \ | |
430 | + sizeof(struct ftrace_raw_##call), \ | |
431 | + irq_flags, pc); \ | |
432 | + if (!event) \ | |
433 | + return; \ | |
434 | + entry = ring_buffer_event_data(event); \ | |
435 | + \ | |
436 | + assign; \ | |
437 | + \ | |
438 | + if (!filter_current_check_discard(call, entry, event)) \ | |
439 | + trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \ | |
440 | +} \ | |
441 | + \ | |
442 | +static int ftrace_raw_reg_event_##call(void) \ | |
443 | +{ \ | |
444 | + int ret; \ | |
445 | + \ | |
446 | + ret = register_trace_##call(ftrace_raw_event_##call); \ | |
447 | + if (ret) \ | |
448 | + pr_info("event trace: Could not activate trace point " \ | |
449 | + "probe to " #call "\n"); \ | |
450 | + return ret; \ | |
451 | +} \ | |
452 | + \ | |
453 | +static void ftrace_raw_unreg_event_##call(void) \ | |
454 | +{ \ | |
455 | + unregister_trace_##call(ftrace_raw_event_##call); \ | |
456 | +} \ | |
457 | + \ | |
458 | +static struct trace_event ftrace_event_type_##call = { \ | |
459 | + .trace = ftrace_raw_output_##call, \ | |
460 | +}; \ | |
461 | + \ | |
462 | +static int ftrace_raw_init_event_##call(void) \ | |
463 | +{ \ | |
464 | + int id; \ | |
465 | + \ | |
466 | + id = register_ftrace_event(&ftrace_event_type_##call); \ | |
467 | + if (!id) \ | |
468 | + return -ENODEV; \ | |
469 | + event_##call.id = id; \ | |
470 | + INIT_LIST_HEAD(&event_##call.fields); \ | |
471 | + init_preds(&event_##call); \ | |
472 | + return 0; \ | |
473 | +} \ | |
474 | + \ | |
475 | +static struct ftrace_event_call __used \ | |
476 | +__attribute__((__aligned__(4))) \ | |
477 | +__attribute__((section("_ftrace_events"))) event_##call = { \ | |
478 | + .name = #call, \ | |
479 | + .system = __stringify(TRACE_SYSTEM), \ | |
480 | + .raw_init = ftrace_raw_init_event_##call, \ | |
481 | + .regfunc = ftrace_raw_reg_event_##call, \ | |
482 | + .unregfunc = ftrace_raw_unreg_event_##call, \ | |
483 | + .show_format = ftrace_format_##call, \ | |
484 | + .define_fields = ftrace_define_fields_##call, \ | |
485 | + _TRACE_PROFILE_INIT(call) \ | |
486 | +} | |
487 | + | |
488 | +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
489 | + | |
490 | +#undef _TRACE_PROFILE | |
491 | +#undef _TRACE_PROFILE_INIT |
include/trace/trace_events.h
kernel/trace/Makefile
... | ... | @@ -41,7 +41,6 @@ |
41 | 41 | obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o |
42 | 42 | obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o |
43 | 43 | obj-$(CONFIG_EVENT_TRACING) += trace_events.o |
44 | -obj-$(CONFIG_EVENT_TRACER) += events.o | |
45 | 44 | obj-$(CONFIG_EVENT_TRACING) += trace_export.o |
46 | 45 | obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o |
47 | 46 | obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o |
kernel/trace/events.c
1 | -/* | |
2 | - * This is the place to register all trace points as events. | |
3 | - */ | |
4 | - | |
5 | -#include <linux/stringify.h> | |
6 | - | |
7 | -#include <trace/trace_events.h> | |
8 | - | |
9 | -#include "trace_output.h" | |
10 | - | |
11 | -#define TRACE_HEADER_MULTI_READ | |
12 | -#include "trace_events_stage_1.h" | |
13 | -#include "trace_events_stage_2.h" | |
14 | -#include "trace_events_stage_3.h" |
kernel/trace/trace_events_stage_1.h
1 | -/* | |
2 | - * Stage 1 of the trace events. | |
3 | - * | |
4 | - * Override the macros in <trace/trace_events.h> to include the following: | |
5 | - * | |
6 | - * struct ftrace_raw_<call> { | |
7 | - * struct trace_entry ent; | |
8 | - * <type> <item>; | |
9 | - * <type2> <item2>[<len>]; | |
10 | - * [...] | |
11 | - * }; | |
12 | - * | |
13 | - * The <type> <item> is created by the __field(type, item) macro or | |
14 | - * the __array(type2, item2, len) macro. | |
15 | - * We simply do "type item;", and that will create the fields | |
16 | - * in the structure. | |
17 | - */ | |
18 | - | |
19 | -#undef TRACE_FORMAT | |
20 | -#define TRACE_FORMAT(call, proto, args, fmt) | |
21 | - | |
22 | -#undef __array | |
23 | -#define __array(type, item, len) type item[len]; | |
24 | - | |
25 | -#undef __field | |
26 | -#define __field(type, item) type item; | |
27 | - | |
28 | -#undef TP_STRUCT__entry | |
29 | -#define TP_STRUCT__entry(args...) args | |
30 | - | |
31 | -#undef TRACE_EVENT | |
32 | -#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ | |
33 | - struct ftrace_raw_##name { \ | |
34 | - struct trace_entry ent; \ | |
35 | - tstruct \ | |
36 | - }; \ | |
37 | - static struct ftrace_event_call event_##name | |
38 | - | |
39 | -#include <trace/trace_events.h> |
kernel/trace/trace_events_stage_2.h
1 | -/* | |
2 | - * Stage 2 of the trace events. | |
3 | - * | |
4 | - * Override the macros in <trace/trace_events.h> to include the following: | |
5 | - * | |
6 | - * enum print_line_t | |
7 | - * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags) | |
8 | - * { | |
9 | - * struct trace_seq *s = &iter->seq; | |
10 | - * struct ftrace_raw_<call> *field; <-- defined in stage 1 | |
11 | - * struct trace_entry *entry; | |
12 | - * int ret; | |
13 | - * | |
14 | - * entry = iter->ent; | |
15 | - * | |
16 | - * if (entry->type != event_<call>.id) { | |
17 | - * WARN_ON_ONCE(1); | |
18 | - * return TRACE_TYPE_UNHANDLED; | |
19 | - * } | |
20 | - * | |
21 | - * field = (typeof(field))entry; | |
22 | - * | |
23 | - * ret = trace_seq_printf(s, <TP_printk> "\n"); | |
24 | - * if (!ret) | |
25 | - * return TRACE_TYPE_PARTIAL_LINE; | |
26 | - * | |
27 | - * return TRACE_TYPE_HANDLED; | |
28 | - * } | |
29 | - * | |
30 | - * This is the method used to print the raw event to the trace | |
31 | - * output format. Note, this is not needed if the data is read | |
32 | - * in binary. | |
33 | - */ | |
34 | - | |
35 | -#undef __entry | |
36 | -#define __entry field | |
37 | - | |
38 | -#undef TP_printk | |
39 | -#define TP_printk(fmt, args...) fmt "\n", args | |
40 | - | |
41 | -#undef TRACE_EVENT | |
42 | -#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | |
43 | -enum print_line_t \ | |
44 | -ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | |
45 | -{ \ | |
46 | - struct trace_seq *s = &iter->seq; \ | |
47 | - struct ftrace_raw_##call *field; \ | |
48 | - struct trace_entry *entry; \ | |
49 | - int ret; \ | |
50 | - \ | |
51 | - entry = iter->ent; \ | |
52 | - \ | |
53 | - if (entry->type != event_##call.id) { \ | |
54 | - WARN_ON_ONCE(1); \ | |
55 | - return TRACE_TYPE_UNHANDLED; \ | |
56 | - } \ | |
57 | - \ | |
58 | - field = (typeof(field))entry; \ | |
59 | - \ | |
60 | - ret = trace_seq_printf(s, #call ": " print); \ | |
61 | - if (!ret) \ | |
62 | - return TRACE_TYPE_PARTIAL_LINE; \ | |
63 | - \ | |
64 | - return TRACE_TYPE_HANDLED; \ | |
65 | -} | |
66 | - | |
67 | -#include <trace/trace_events.h> | |
68 | - | |
69 | -/* | |
70 | - * Setup the showing format of trace point. | |
71 | - * | |
72 | - * int | |
73 | - * ftrace_format_##call(struct trace_seq *s) | |
74 | - * { | |
75 | - * struct ftrace_raw_##call field; | |
76 | - * int ret; | |
77 | - * | |
78 | - * ret = trace_seq_printf(s, #type " " #item ";" | |
79 | - * " offset:%u; size:%u;\n", | |
80 | - * offsetof(struct ftrace_raw_##call, item), | |
81 | - * sizeof(field.type)); | |
82 | - * | |
83 | - * } | |
84 | - */ | |
85 | - | |
86 | -#undef TP_STRUCT__entry | |
87 | -#define TP_STRUCT__entry(args...) args | |
88 | - | |
89 | -#undef __field | |
90 | -#define __field(type, item) \ | |
91 | - ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ | |
92 | - "offset:%u;\tsize:%u;\n", \ | |
93 | - (unsigned int)offsetof(typeof(field), item), \ | |
94 | - (unsigned int)sizeof(field.item)); \ | |
95 | - if (!ret) \ | |
96 | - return 0; | |
97 | - | |
98 | -#undef __array | |
99 | -#define __array(type, item, len) \ | |
100 | - ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ | |
101 | - "offset:%u;\tsize:%u;\n", \ | |
102 | - (unsigned int)offsetof(typeof(field), item), \ | |
103 | - (unsigned int)sizeof(field.item)); \ | |
104 | - if (!ret) \ | |
105 | - return 0; | |
106 | - | |
107 | -#undef __entry | |
108 | -#define __entry REC | |
109 | - | |
110 | -#undef TP_printk | |
111 | -#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args) | |
112 | - | |
113 | -#undef TP_fast_assign | |
114 | -#define TP_fast_assign(args...) args | |
115 | - | |
116 | -#undef TRACE_EVENT | |
117 | -#define TRACE_EVENT(call, proto, args, tstruct, func, print) \ | |
118 | -static int \ | |
119 | -ftrace_format_##call(struct trace_seq *s) \ | |
120 | -{ \ | |
121 | - struct ftrace_raw_##call field; \ | |
122 | - int ret; \ | |
123 | - \ | |
124 | - tstruct; \ | |
125 | - \ | |
126 | - trace_seq_printf(s, "\nprint fmt: " print); \ | |
127 | - \ | |
128 | - return ret; \ | |
129 | -} | |
130 | - | |
131 | -#include <trace/trace_events.h> | |
132 | - | |
133 | -#undef __field | |
134 | -#define __field(type, item) \ | |
135 | - ret = trace_define_field(event_call, #type, #item, \ | |
136 | - offsetof(typeof(field), item), \ | |
137 | - sizeof(field.item)); \ | |
138 | - if (ret) \ | |
139 | - return ret; | |
140 | - | |
141 | -#undef __array | |
142 | -#define __array(type, item, len) \ | |
143 | - BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ | |
144 | - ret = trace_define_field(event_call, #type "[" #len "]", #item, \ | |
145 | - offsetof(typeof(field), item), \ | |
146 | - sizeof(field.item)); \ | |
147 | - if (ret) \ | |
148 | - return ret; | |
149 | - | |
150 | -#undef TRACE_EVENT | |
151 | -#define TRACE_EVENT(call, proto, args, tstruct, func, print) \ | |
152 | -int \ | |
153 | -ftrace_define_fields_##call(void) \ | |
154 | -{ \ | |
155 | - struct ftrace_raw_##call field; \ | |
156 | - struct ftrace_event_call *event_call = &event_##call; \ | |
157 | - int ret; \ | |
158 | - \ | |
159 | - __common_field(unsigned char, type); \ | |
160 | - __common_field(unsigned char, flags); \ | |
161 | - __common_field(unsigned char, preempt_count); \ | |
162 | - __common_field(int, pid); \ | |
163 | - __common_field(int, tgid); \ | |
164 | - \ | |
165 | - tstruct; \ | |
166 | - \ | |
167 | - return ret; \ | |
168 | -} | |
169 | - | |
170 | -#include <trace/trace_events.h> |
kernel/trace/trace_events_stage_3.h
1 | -/* | |
2 | - * Stage 3 of the trace events. | |
3 | - * | |
4 | - * Override the macros in <trace/trace_events.h> to include the following: | |
5 | - * | |
6 | - * static void ftrace_event_<call>(proto) | |
7 | - * { | |
8 | - * event_trace_printk(_RET_IP_, "<call>: " <fmt>); | |
9 | - * } | |
10 | - * | |
11 | - * static int ftrace_reg_event_<call>(void) | |
12 | - * { | |
13 | - * int ret; | |
14 | - * | |
15 | - * ret = register_trace_<call>(ftrace_event_<call>); | |
16 | - * if (!ret) | |
17 | - * pr_info("event trace: Could not activate trace point " | |
18 | - * "probe to <call>"); | |
19 | - * return ret; | |
20 | - * } | |
21 | - * | |
22 | - * static void ftrace_unreg_event_<call>(void) | |
23 | - * { | |
24 | - * unregister_trace_<call>(ftrace_event_<call>); | |
25 | - * } | |
26 | - * | |
27 | - * For those macros defined with TRACE_FORMAT: | |
28 | - * | |
29 | - * static struct ftrace_event_call __used | |
30 | - * __attribute__((__aligned__(4))) | |
31 | - * __attribute__((section("_ftrace_events"))) event_<call> = { | |
32 | - * .name = "<call>", | |
33 | - * .regfunc = ftrace_reg_event_<call>, | |
34 | - * .unregfunc = ftrace_unreg_event_<call>, | |
35 | - * } | |
36 | - * | |
37 | - * | |
38 | - * For those macros defined with TRACE_EVENT: | |
39 | - * | |
40 | - * static struct ftrace_event_call event_<call>; | |
41 | - * | |
42 | - * static void ftrace_raw_event_<call>(proto) | |
43 | - * { | |
44 | - * struct ring_buffer_event *event; | |
45 | - * struct ftrace_raw_<call> *entry; <-- defined in stage 1 | |
46 | - * unsigned long irq_flags; | |
47 | - * int pc; | |
48 | - * | |
49 | - * local_save_flags(irq_flags); | |
50 | - * pc = preempt_count(); | |
51 | - * | |
52 | - * event = trace_current_buffer_lock_reserve(event_<call>.id, | |
53 | - * sizeof(struct ftrace_raw_<call>), | |
54 | - * irq_flags, pc); | |
55 | - * if (!event) | |
56 | - * return; | |
57 | - * entry = ring_buffer_event_data(event); | |
58 | - * | |
59 | - * <assign>; <-- Here we assign the entries by the __field and | |
60 | - * __array macros. | |
61 | - * | |
62 | - * trace_current_buffer_unlock_commit(event, irq_flags, pc); | |
63 | - * } | |
64 | - * | |
65 | - * static int ftrace_raw_reg_event_<call>(void) | |
66 | - * { | |
67 | - * int ret; | |
68 | - * | |
69 | - * ret = register_trace_<call>(ftrace_raw_event_<call>); | |
70 | - * if (!ret) | |
71 | - * pr_info("event trace: Could not activate trace point " | |
72 | - * "probe to <call>"); | |
73 | - * return ret; | |
74 | - * } | |
75 | - * | |
76 | - * static void ftrace_unreg_event_<call>(void) | |
77 | - * { | |
78 | - * unregister_trace_<call>(ftrace_raw_event_<call>); | |
79 | - * } | |
80 | - * | |
81 | - * static struct trace_event ftrace_event_type_<call> = { | |
82 | - * .trace = ftrace_raw_output_<call>, <-- stage 2 | |
83 | - * }; | |
84 | - * | |
85 | - * static int ftrace_raw_init_event_<call>(void) | |
86 | - * { | |
87 | - * int id; | |
88 | - * | |
89 | - * id = register_ftrace_event(&ftrace_event_type_<call>); | |
90 | - * if (!id) | |
91 | - * return -ENODEV; | |
92 | - * event_<call>.id = id; | |
93 | - * return 0; | |
94 | - * } | |
95 | - * | |
96 | - * static struct ftrace_event_call __used | |
97 | - * __attribute__((__aligned__(4))) | |
98 | - * __attribute__((section("_ftrace_events"))) event_<call> = { | |
99 | - * .name = "<call>", | |
100 | - * .system = "<system>", | |
101 | - * .raw_init = ftrace_raw_init_event_<call>, | |
102 | - * .regfunc = ftrace_reg_event_<call>, | |
103 | - * .unregfunc = ftrace_unreg_event_<call>, | |
104 | - * .show_format = ftrace_format_<call>, | |
105 | - * } | |
106 | - * | |
107 | - */ | |
108 | - | |
109 | -#undef TP_FMT | |
110 | -#define TP_FMT(fmt, args...) fmt "\n", ##args | |
111 | - | |
112 | -#ifdef CONFIG_EVENT_PROFILE | |
113 | -#define _TRACE_PROFILE(call, proto, args) \ | |
114 | -static void ftrace_profile_##call(proto) \ | |
115 | -{ \ | |
116 | - extern void perf_tpcounter_event(int); \ | |
117 | - perf_tpcounter_event(event_##call.id); \ | |
118 | -} \ | |
119 | - \ | |
120 | -static int ftrace_profile_enable_##call(struct ftrace_event_call *call) \ | |
121 | -{ \ | |
122 | - int ret = 0; \ | |
123 | - \ | |
124 | - if (!atomic_inc_return(&call->profile_count)) \ | |
125 | - ret = register_trace_##call(ftrace_profile_##call); \ | |
126 | - \ | |
127 | - return ret; \ | |
128 | -} \ | |
129 | - \ | |
130 | -static void ftrace_profile_disable_##call(struct ftrace_event_call *call) \ | |
131 | -{ \ | |
132 | - if (atomic_add_negative(-1, &call->profile_count)) \ | |
133 | - unregister_trace_##call(ftrace_profile_##call); \ | |
134 | -} | |
135 | - | |
136 | -#define _TRACE_PROFILE_INIT(call) \ | |
137 | - .profile_count = ATOMIC_INIT(-1), \ | |
138 | - .profile_enable = ftrace_profile_enable_##call, \ | |
139 | - .profile_disable = ftrace_profile_disable_##call, | |
140 | - | |
141 | -#else | |
142 | -#define _TRACE_PROFILE(call, proto, args) | |
143 | -#define _TRACE_PROFILE_INIT(call) | |
144 | -#endif | |
145 | - | |
146 | -#define _TRACE_FORMAT(call, proto, args, fmt) \ | |
147 | -static void ftrace_event_##call(proto) \ | |
148 | -{ \ | |
149 | - event_trace_printk(_RET_IP_, #call ": " fmt); \ | |
150 | -} \ | |
151 | - \ | |
152 | -static int ftrace_reg_event_##call(void) \ | |
153 | -{ \ | |
154 | - int ret; \ | |
155 | - \ | |
156 | - ret = register_trace_##call(ftrace_event_##call); \ | |
157 | - if (ret) \ | |
158 | - pr_info("event trace: Could not activate trace point " \ | |
159 | - "probe to " #call "\n"); \ | |
160 | - return ret; \ | |
161 | -} \ | |
162 | - \ | |
163 | -static void ftrace_unreg_event_##call(void) \ | |
164 | -{ \ | |
165 | - unregister_trace_##call(ftrace_event_##call); \ | |
166 | -} \ | |
167 | - \ | |
168 | -static struct ftrace_event_call event_##call; \ | |
169 | - \ | |
170 | -static int ftrace_init_event_##call(void) \ | |
171 | -{ \ | |
172 | - int id; \ | |
173 | - \ | |
174 | - id = register_ftrace_event(NULL); \ | |
175 | - if (!id) \ | |
176 | - return -ENODEV; \ | |
177 | - event_##call.id = id; \ | |
178 | - return 0; \ | |
179 | -} | |
180 | - | |
181 | -#undef TRACE_FORMAT | |
182 | -#define TRACE_FORMAT(call, proto, args, fmt) \ | |
183 | -_TRACE_FORMAT(call, PARAMS(proto), PARAMS(args), PARAMS(fmt)) \ | |
184 | -_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \ | |
185 | -static struct ftrace_event_call __used \ | |
186 | -__attribute__((__aligned__(4))) \ | |
187 | -__attribute__((section("_ftrace_events"))) event_##call = { \ | |
188 | - .name = #call, \ | |
189 | - .system = __stringify(TRACE_SYSTEM), \ | |
190 | - .raw_init = ftrace_init_event_##call, \ | |
191 | - .regfunc = ftrace_reg_event_##call, \ | |
192 | - .unregfunc = ftrace_unreg_event_##call, \ | |
193 | - _TRACE_PROFILE_INIT(call) \ | |
194 | -} | |
195 | - | |
196 | -#undef __entry | |
197 | -#define __entry entry | |
198 | - | |
199 | -#undef TRACE_EVENT | |
200 | -#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | |
201 | -_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \ | |
202 | - \ | |
203 | -static struct ftrace_event_call event_##call; \ | |
204 | - \ | |
205 | -static void ftrace_raw_event_##call(proto) \ | |
206 | -{ \ | |
207 | - struct ftrace_event_call *call = &event_##call; \ | |
208 | - struct ring_buffer_event *event; \ | |
209 | - struct ftrace_raw_##call *entry; \ | |
210 | - unsigned long irq_flags; \ | |
211 | - int pc; \ | |
212 | - \ | |
213 | - local_save_flags(irq_flags); \ | |
214 | - pc = preempt_count(); \ | |
215 | - \ | |
216 | - event = trace_current_buffer_lock_reserve(event_##call.id, \ | |
217 | - sizeof(struct ftrace_raw_##call), \ | |
218 | - irq_flags, pc); \ | |
219 | - if (!event) \ | |
220 | - return; \ | |
221 | - entry = ring_buffer_event_data(event); \ | |
222 | - \ | |
223 | - assign; \ | |
224 | - \ | |
225 | - if (!filter_current_check_discard(call, entry, event)) \ | |
226 | - trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \ | |
227 | -} \ | |
228 | - \ | |
229 | -static int ftrace_raw_reg_event_##call(void) \ | |
230 | -{ \ | |
231 | - int ret; \ | |
232 | - \ | |
233 | - ret = register_trace_##call(ftrace_raw_event_##call); \ | |
234 | - if (ret) \ | |
235 | - pr_info("event trace: Could not activate trace point " \ | |
236 | - "probe to " #call "\n"); \ | |
237 | - return ret; \ | |
238 | -} \ | |
239 | - \ | |
240 | -static void ftrace_raw_unreg_event_##call(void) \ | |
241 | -{ \ | |
242 | - unregister_trace_##call(ftrace_raw_event_##call); \ | |
243 | -} \ | |
244 | - \ | |
245 | -static struct trace_event ftrace_event_type_##call = { \ | |
246 | - .trace = ftrace_raw_output_##call, \ | |
247 | -}; \ | |
248 | - \ | |
249 | -static int ftrace_raw_init_event_##call(void) \ | |
250 | -{ \ | |
251 | - int id; \ | |
252 | - \ | |
253 | - id = register_ftrace_event(&ftrace_event_type_##call); \ | |
254 | - if (!id) \ | |
255 | - return -ENODEV; \ | |
256 | - event_##call.id = id; \ | |
257 | - INIT_LIST_HEAD(&event_##call.fields); \ | |
258 | - init_preds(&event_##call); \ | |
259 | - return 0; \ | |
260 | -} \ | |
261 | - \ | |
262 | -static struct ftrace_event_call __used \ | |
263 | -__attribute__((__aligned__(4))) \ | |
264 | -__attribute__((section("_ftrace_events"))) event_##call = { \ | |
265 | - .name = #call, \ | |
266 | - .system = __stringify(TRACE_SYSTEM), \ | |
267 | - .raw_init = ftrace_raw_init_event_##call, \ | |
268 | - .regfunc = ftrace_raw_reg_event_##call, \ | |
269 | - .unregfunc = ftrace_raw_unreg_event_##call, \ | |
270 | - .show_format = ftrace_format_##call, \ | |
271 | - .define_fields = ftrace_define_fields_##call, \ | |
272 | - _TRACE_PROFILE_INIT(call) \ | |
273 | -} | |
274 | - | |
275 | -#include <trace/trace_events.h> | |
276 | - | |
277 | -#undef _TRACE_PROFILE | |
278 | -#undef _TRACE_PROFILE_INIT |