Blame view
kernel/trace/trace_events.c
61.2 KB
b77e38aa2
|
1 2 3 4 5 |
/* * event tracer * * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> * |
981d081ec
|
6 7 8 |
* - Added format output of fields of the trace point. * This was based off of work by Tom Zanussi <tzanussi@gmail.com>. * |
b77e38aa2
|
9 |
*/ |
3448bac32
|
10 |
#define pr_fmt(fmt) fmt |
e6187007d
|
11 12 13 |
#include <linux/workqueue.h> #include <linux/spinlock.h> #include <linux/kthread.h> |
b77e38aa2
|
14 15 16 17 |
#include <linux/debugfs.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/ctype.h> |
5a0e3ad6a
|
18 |
#include <linux/slab.h> |
e6187007d
|
19 |
#include <linux/delay.h> |
b77e38aa2
|
20 |
|
020e5f85c
|
21 |
#include <asm/setup.h> |
91729ef96
|
22 |
#include "trace_output.h" |
b77e38aa2
|
23 |
|
4e5292ea1
|
24 |
#undef TRACE_SYSTEM |
b628b3e62
|
25 |
#define TRACE_SYSTEM "TRACE_SYSTEM" |
20c8928ab
|
26 |
DEFINE_MUTEX(event_mutex); |
11a241a33
|
27 |
|
a59fd6027
|
28 |
LIST_HEAD(ftrace_events); |
b3a8c6fd7
|
29 |
static LIST_HEAD(ftrace_common_fields); |
a59fd6027
|
30 |
|
d1a291437
|
31 32 33 34 |
#define GFP_TRACE (GFP_KERNEL | __GFP_ZERO) static struct kmem_cache *field_cachep; static struct kmem_cache *file_cachep; |
6e94a7803
|
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
#define SYSTEM_FL_FREE_NAME (1 << 31) static inline int system_refcount(struct event_subsystem *system) { return system->ref_count & ~SYSTEM_FL_FREE_NAME; } static int system_refcount_inc(struct event_subsystem *system) { return (system->ref_count++) & ~SYSTEM_FL_FREE_NAME; } static int system_refcount_dec(struct event_subsystem *system) { return (--system->ref_count) & ~SYSTEM_FL_FREE_NAME; } |
ae63b31e4
|
51 52 53 54 55 56 57 58 59 60 61 62 |
/* Double loops, do not use break, only goto's work */ #define do_for_each_event_file(tr, file) \ list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ list_for_each_entry(file, &tr->events, list) #define do_for_each_event_file_safe(tr, file) \ list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ struct ftrace_event_file *___n; \ list_for_each_entry_safe(file, ___n, &tr->events, list) #define while_for_each_event_file() \ } |
b3a8c6fd7
|
63 |
static struct list_head * |
2e33af029
|
64 65 66 67 68 69 |
trace_get_fields(struct ftrace_event_call *event_call) { if (!event_call->class->get_fields) return &event_call->class->fields; return event_call->class->get_fields(event_call); } |
b3a8c6fd7
|
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 |
static struct ftrace_event_field * __find_event_field(struct list_head *head, char *name) { struct ftrace_event_field *field; list_for_each_entry(field, head, link) { if (!strcmp(field->name, name)) return field; } return NULL; } struct ftrace_event_field * trace_find_event_field(struct ftrace_event_call *call, char *name) { struct ftrace_event_field *field; struct list_head *head; field = __find_event_field(&ftrace_common_fields, name); if (field) return field; head = trace_get_fields(call); return __find_event_field(head, name); } |
8728fe501
|
96 97 98 |
static int __trace_define_field(struct list_head *head, const char *type, const char *name, int offset, int size, int is_signed, int filter_type) |
cf027f645
|
99 100 |
{ struct ftrace_event_field *field; |
d1a291437
|
101 |
field = kmem_cache_alloc(field_cachep, GFP_TRACE); |
cf027f645
|
102 |
if (!field) |
aaf6ac0f0
|
103 |
return -ENOMEM; |
fe9f57f25
|
104 |
|
92edca073
|
105 106 |
field->name = name; field->type = type; |
fe9f57f25
|
107 |
|
43b51ead3
|
108 109 110 111 |
if (filter_type == FILTER_OTHER) field->filter_type = filter_assign_type(type); else field->filter_type = filter_type; |
cf027f645
|
112 113 |
field->offset = offset; field->size = size; |
a118e4d14
|
114 |
field->is_signed = is_signed; |
aa38e9fc3
|
115 |
|
2e33af029
|
116 |
list_add(&field->link, head); |
cf027f645
|
117 118 |
return 0; |
cf027f645
|
119 |
} |
8728fe501
|
120 121 122 123 124 125 126 127 128 129 130 131 132 133 |
int trace_define_field(struct ftrace_event_call *call, const char *type, const char *name, int offset, int size, int is_signed, int filter_type) { struct list_head *head; if (WARN_ON(!call->class)) return 0; head = trace_get_fields(call); return __trace_define_field(head, type, name, offset, size, is_signed, filter_type); } |
17c873ec2
|
134 |
EXPORT_SYMBOL_GPL(trace_define_field); |
cf027f645
|
135 |
|
e647d6b31
|
136 |
#define __common_field(type, item) \ |
8728fe501
|
137 138 139 140 141 |
ret = __trace_define_field(&ftrace_common_fields, #type, \ "common_" #item, \ offsetof(typeof(ent), item), \ sizeof(ent.item), \ is_signed_type(type), FILTER_OTHER); \ |
e647d6b31
|
142 143 |
if (ret) \ return ret; |
8728fe501
|
144 |
static int trace_define_common_fields(void) |
e647d6b31
|
145 146 147 148 149 150 151 152 |
{ int ret; struct trace_entry ent; __common_field(unsigned short, type); __common_field(unsigned char, flags); __common_field(unsigned char, preempt_count); __common_field(int, pid); |
e647d6b31
|
153 154 155 |
return ret; } |
ad7067ceb
|
156 |
static void trace_destroy_fields(struct ftrace_event_call *call) |
2df75e415
|
157 158 |
{ struct ftrace_event_field *field, *next; |
2e33af029
|
159 |
struct list_head *head; |
2df75e415
|
160 |
|
2e33af029
|
161 162 |
head = trace_get_fields(call); list_for_each_entry_safe(field, next, head, link) { |
2df75e415
|
163 |
list_del(&field->link); |
d1a291437
|
164 |
kmem_cache_free(field_cachep, field); |
2df75e415
|
165 166 |
} } |
87d9b4e1c
|
167 168 169 |
int trace_event_raw_init(struct ftrace_event_call *call) { int id; |
80decc70a
|
170 |
id = register_ftrace_event(&call->event); |
87d9b4e1c
|
171 172 |
if (!id) return -ENODEV; |
87d9b4e1c
|
173 174 175 176 |
return 0; } EXPORT_SYMBOL_GPL(trace_event_raw_init); |
3fd40d1ee
|
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 |
void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer, struct ftrace_event_file *ftrace_file, unsigned long len) { struct ftrace_event_call *event_call = ftrace_file->event_call; local_save_flags(fbuffer->flags); fbuffer->pc = preempt_count(); fbuffer->ftrace_file = ftrace_file; fbuffer->event = trace_event_buffer_lock_reserve(&fbuffer->buffer, ftrace_file, event_call->event.type, len, fbuffer->flags, fbuffer->pc); if (!fbuffer->event) return NULL; fbuffer->entry = ring_buffer_event_data(fbuffer->event); return fbuffer->entry; } EXPORT_SYMBOL_GPL(ftrace_event_buffer_reserve); |
0daa23029
|
198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 |
static DEFINE_SPINLOCK(tracepoint_iter_lock); static void output_printk(struct ftrace_event_buffer *fbuffer) { struct ftrace_event_call *event_call; struct trace_event *event; unsigned long flags; struct trace_iterator *iter = tracepoint_print_iter; if (!iter) return; event_call = fbuffer->ftrace_file->event_call; if (!event_call || !event_call->event.funcs || !event_call->event.funcs->trace) return; event = &fbuffer->ftrace_file->event_call->event; spin_lock_irqsave(&tracepoint_iter_lock, flags); trace_seq_init(&iter->seq); iter->ent = fbuffer->entry; event_call->event.funcs->trace(iter, 0, event); trace_seq_putc(&iter->seq, 0); printk("%s", iter->seq.buffer); spin_unlock_irqrestore(&tracepoint_iter_lock, flags); } |
3fd40d1ee
|
226 227 |
void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer) { |
0daa23029
|
228 229 |
if (tracepoint_printk) output_printk(fbuffer); |
3fd40d1ee
|
230 231 232 233 234 |
event_trigger_unlock_commit(fbuffer->ftrace_file, fbuffer->buffer, fbuffer->event, fbuffer->entry, fbuffer->flags, fbuffer->pc); } EXPORT_SYMBOL_GPL(ftrace_event_buffer_commit); |
ceec0b6fc
|
235 236 |
int ftrace_event_reg(struct ftrace_event_call *call, enum trace_reg type, void *data) |
a1d0ce821
|
237 |
{ |
ae63b31e4
|
238 |
struct ftrace_event_file *file = data; |
de7b29739
|
239 |
WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT)); |
a1d0ce821
|
240 241 |
switch (type) { case TRACE_REG_REGISTER: |
de7b29739
|
242 |
return tracepoint_probe_register(call->tp, |
a1d0ce821
|
243 |
call->class->probe, |
ae63b31e4
|
244 |
file); |
a1d0ce821
|
245 |
case TRACE_REG_UNREGISTER: |
de7b29739
|
246 |
tracepoint_probe_unregister(call->tp, |
a1d0ce821
|
247 |
call->class->probe, |
ae63b31e4
|
248 |
file); |
a1d0ce821
|
249 250 251 252 |
return 0; #ifdef CONFIG_PERF_EVENTS case TRACE_REG_PERF_REGISTER: |
de7b29739
|
253 |
return tracepoint_probe_register(call->tp, |
a1d0ce821
|
254 255 256 |
call->class->perf_probe, call); case TRACE_REG_PERF_UNREGISTER: |
de7b29739
|
257 |
tracepoint_probe_unregister(call->tp, |
a1d0ce821
|
258 259 260 |
call->class->perf_probe, call); return 0; |
ceec0b6fc
|
261 262 |
case TRACE_REG_PERF_OPEN: case TRACE_REG_PERF_CLOSE: |
489c75c3b
|
263 264 |
case TRACE_REG_PERF_ADD: case TRACE_REG_PERF_DEL: |
ceec0b6fc
|
265 |
return 0; |
a1d0ce821
|
266 267 268 269 270 |
#endif } return 0; } EXPORT_SYMBOL_GPL(ftrace_event_reg); |
e870e9a12
|
271 272 |
void trace_event_enable_cmd_record(bool enable) { |
ae63b31e4
|
273 274 |
struct ftrace_event_file *file; struct trace_array *tr; |
e870e9a12
|
275 276 |
mutex_lock(&event_mutex); |
ae63b31e4
|
277 278 279 |
do_for_each_event_file(tr, file) { if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) |
e870e9a12
|
280 281 282 283 |
continue; if (enable) { tracing_start_cmdline_record(); |
417944c4c
|
284 |
set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags); |
e870e9a12
|
285 286 |
} else { tracing_stop_cmdline_record(); |
417944c4c
|
287 |
clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags); |
e870e9a12
|
288 |
} |
ae63b31e4
|
289 |
} while_for_each_event_file(); |
e870e9a12
|
290 291 |
mutex_unlock(&event_mutex); } |
417944c4c
|
292 293 |
static int __ftrace_event_enable_disable(struct ftrace_event_file *file, int enable, int soft_disable) |
fd9949898
|
294 |
{ |
ae63b31e4
|
295 |
struct ftrace_event_call *call = file->event_call; |
3b8e42738
|
296 |
int ret = 0; |
417944c4c
|
297 |
int disable; |
3b8e42738
|
298 |
|
fd9949898
|
299 300 |
switch (enable) { case 0: |
417944c4c
|
301 |
/* |
1cf4c0732
|
302 303 |
* When soft_disable is set and enable is cleared, the sm_ref * reference counter is decremented. If it reaches 0, we want |
417944c4c
|
304 305 306 307 308 309 310 311 312 313 314 |
* to clear the SOFT_DISABLED flag but leave the event in the * state that it was. That is, if the event was enabled and * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED * is set we do not want the event to be enabled before we * clear the bit. * * When soft_disable is not set but the SOFT_MODE flag is, * we do nothing. Do not disable the tracepoint, otherwise * "soft enable"s (clearing the SOFT_DISABLED bit) wont work. */ if (soft_disable) { |
1cf4c0732
|
315 316 |
if (atomic_dec_return(&file->sm_ref) > 0) break; |
417944c4c
|
317 318 319 320 321 322 323 |
disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED; clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags); } else disable = !(file->flags & FTRACE_EVENT_FL_SOFT_MODE); if (disable && (file->flags & FTRACE_EVENT_FL_ENABLED)) { clear_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags); |
ae63b31e4
|
324 |
if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) { |
e870e9a12
|
325 |
tracing_stop_cmdline_record(); |
417944c4c
|
326 |
clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags); |
e870e9a12
|
327 |
} |
ae63b31e4
|
328 |
call->class->reg(call, TRACE_REG_UNREGISTER, file); |
fd9949898
|
329 |
} |
3baa5e4cf
|
330 |
/* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */ |
417944c4c
|
331 332 |
if (file->flags & FTRACE_EVENT_FL_SOFT_MODE) set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); |
3baa5e4cf
|
333 334 |
else clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); |
fd9949898
|
335 336 |
break; case 1: |
417944c4c
|
337 338 339 340 341 342 343 344 345 346 |
/* * When soft_disable is set and enable is set, we want to * register the tracepoint for the event, but leave the event * as is. That means, if the event was already enabled, we do * nothing (but set SOFT_MODE). If the event is disabled, we * set SOFT_DISABLED before enabling the event tracepoint, so * it still seems to be disabled. */ if (!soft_disable) clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); |
1cf4c0732
|
347 348 349 |
else { if (atomic_inc_return(&file->sm_ref) > 1) break; |
417944c4c
|
350 |
set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags); |
1cf4c0732
|
351 |
} |
417944c4c
|
352 |
|
ae63b31e4
|
353 |
if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) { |
417944c4c
|
354 355 356 357 |
/* Keep the event disabled, when going to SOFT_MODE. */ if (soft_disable) set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); |
e870e9a12
|
358 359 |
if (trace_flags & TRACE_ITER_RECORD_CMD) { tracing_start_cmdline_record(); |
417944c4c
|
360 |
set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags); |
e870e9a12
|
361 |
} |
ae63b31e4
|
362 |
ret = call->class->reg(call, TRACE_REG_REGISTER, file); |
3b8e42738
|
363 364 365 |
if (ret) { tracing_stop_cmdline_record(); pr_info("event trace: Could not enable event " |
de7b29739
|
366 367 |
"%s ", ftrace_event_name(call)); |
3b8e42738
|
368 369 |
break; } |
417944c4c
|
370 |
set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags); |
575380da8
|
371 372 373 |
/* WAS_ENABLED gets set but never cleared. */ call->flags |= TRACE_EVENT_FL_WAS_ENABLED; |
fd9949898
|
374 |
} |
fd9949898
|
375 376 |
break; } |
3b8e42738
|
377 378 |
return ret; |
fd9949898
|
379 |
} |
85f2b0826
|
380 381 382 383 384 |
int trace_event_enable_disable(struct ftrace_event_file *file, int enable, int soft_disable) { return __ftrace_event_enable_disable(file, enable, soft_disable); } |
417944c4c
|
385 386 387 388 389 |
static int ftrace_event_enable_disable(struct ftrace_event_file *file, int enable) { return __ftrace_event_enable_disable(file, enable, 0); } |
ae63b31e4
|
390 |
static void ftrace_clear_events(struct trace_array *tr) |
0e907c993
|
391 |
{ |
ae63b31e4
|
392 |
struct ftrace_event_file *file; |
0e907c993
|
393 394 |
mutex_lock(&event_mutex); |
ae63b31e4
|
395 396 |
list_for_each_entry(file, &tr->events, list) { ftrace_event_enable_disable(file, 0); |
0e907c993
|
397 398 399 |
} mutex_unlock(&event_mutex); } |
e9dbfae53
|
400 401 402 |
static void __put_system(struct event_subsystem *system) { struct event_filter *filter = system->filter; |
6e94a7803
|
403 404 |
WARN_ON_ONCE(system_refcount(system) == 0); if (system_refcount_dec(system)) |
e9dbfae53
|
405 |
return; |
ae63b31e4
|
406 |
list_del(&system->list); |
e9dbfae53
|
407 408 409 410 |
if (filter) { kfree(filter->filter_string); kfree(filter); } |
6e94a7803
|
411 412 |
if (system->ref_count & SYSTEM_FL_FREE_NAME) kfree(system->name); |
e9dbfae53
|
413 414 415 416 417 |
kfree(system); } static void __get_system(struct event_subsystem *system) { |
6e94a7803
|
418 419 |
WARN_ON_ONCE(system_refcount(system) == 0); system_refcount_inc(system); |
e9dbfae53
|
420 |
} |
ae63b31e4
|
421 422 423 424 425 426 427 428 429 430 431 |
static void __get_system_dir(struct ftrace_subsystem_dir *dir) { WARN_ON_ONCE(dir->ref_count == 0); dir->ref_count++; __get_system(dir->subsystem); } static void __put_system_dir(struct ftrace_subsystem_dir *dir) { WARN_ON_ONCE(dir->ref_count == 0); /* If the subsystem is about to be freed, the dir must be too */ |
6e94a7803
|
432 |
WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1); |
ae63b31e4
|
433 434 435 436 437 438 439 |
__put_system(dir->subsystem); if (!--dir->ref_count) kfree(dir); } static void put_system(struct ftrace_subsystem_dir *dir) |
e9dbfae53
|
440 441 |
{ mutex_lock(&event_mutex); |
ae63b31e4
|
442 |
__put_system_dir(dir); |
e9dbfae53
|
443 444 |
mutex_unlock(&event_mutex); } |
f6a84bdc7
|
445 446 447 448 449 450 451 452 453 454 455 |
static void remove_subsystem(struct ftrace_subsystem_dir *dir) { if (!dir) return; if (!--dir->nr_events) { debugfs_remove_recursive(dir->entry); list_del(&dir->list); __put_system_dir(dir); } } |
f6a84bdc7
|
456 457 |
static void remove_event_file_dir(struct ftrace_event_file *file) { |
bf682c315
|
458 459 460 461 462 |
struct dentry *dir = file->dir; struct dentry *child; if (dir) { spin_lock(&dir->d_lock); /* probably unneeded */ |
946e51f2b
|
463 |
list_for_each_entry(child, &dir->d_subdirs, d_child) { |
bf682c315
|
464 465 466 467 468 469 470 |
if (child->d_inode) /* probably unneeded */ child->d_inode->i_private = NULL; } spin_unlock(&dir->d_lock); debugfs_remove_recursive(dir); } |
f6a84bdc7
|
471 |
list_del(&file->list); |
f6a84bdc7
|
472 |
remove_subsystem(file->system); |
2448e3493
|
473 |
free_event_filter(file->filter); |
f6a84bdc7
|
474 475 |
kmem_cache_free(file_cachep, file); } |
8f31bfe53
|
476 477 478 |
/* * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events. */ |
2a6c24afa
|
479 480 481 |
static int __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match, const char *sub, const char *event, int set) |
b77e38aa2
|
482 |
{ |
ae63b31e4
|
483 |
struct ftrace_event_file *file; |
a59fd6027
|
484 |
struct ftrace_event_call *call; |
de7b29739
|
485 |
const char *name; |
29f93943d
|
486 |
int ret = -EINVAL; |
8f31bfe53
|
487 |
|
ae63b31e4
|
488 489 490 |
list_for_each_entry(file, &tr->events, list) { call = file->event_call; |
de7b29739
|
491 |
name = ftrace_event_name(call); |
8f31bfe53
|
492 |
|
de7b29739
|
493 |
if (!name || !call->class || !call->class->reg) |
8f31bfe53
|
494 |
continue; |
9b63776fa
|
495 496 |
if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) continue; |
8f31bfe53
|
497 |
if (match && |
de7b29739
|
498 |
strcmp(match, name) != 0 && |
8f0820183
|
499 |
strcmp(match, call->class->system) != 0) |
8f31bfe53
|
500 |
continue; |
8f0820183
|
501 |
if (sub && strcmp(sub, call->class->system) != 0) |
8f31bfe53
|
502 |
continue; |
de7b29739
|
503 |
if (event && strcmp(event, name) != 0) |
8f31bfe53
|
504 |
continue; |
ae63b31e4
|
505 |
ftrace_event_enable_disable(file, set); |
8f31bfe53
|
506 507 508 |
ret = 0; } |
2a6c24afa
|
509 510 511 512 513 514 515 516 517 518 519 |
return ret; } static int __ftrace_set_clr_event(struct trace_array *tr, const char *match, const char *sub, const char *event, int set) { int ret; mutex_lock(&event_mutex); ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set); |
8f31bfe53
|
520 521 522 523 |
mutex_unlock(&event_mutex); return ret; } |
ae63b31e4
|
524 |
static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) |
8f31bfe53
|
525 |
{ |
b628b3e62
|
526 |
char *event = NULL, *sub = NULL, *match; |
b628b3e62
|
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 |
/* * The buf format can be <subsystem>:<event-name> * *:<event-name> means any event by that name. * :<event-name> is the same. * * <subsystem>:* means all events in that subsystem * <subsystem>: means the same. * * <name> (no ':') means all events in a subsystem with * the name <name> or any event that matches <name> */ match = strsep(&buf, ":"); if (buf) { sub = match; event = buf; match = NULL; if (!strlen(sub) || strcmp(sub, "*") == 0) sub = NULL; if (!strlen(event) || strcmp(event, "*") == 0) event = NULL; } |
b77e38aa2
|
551 |
|
ae63b31e4
|
552 |
return __ftrace_set_clr_event(tr, match, sub, event, set); |
b77e38aa2
|
553 |
} |
4671c7940
|
554 555 556 557 558 559 560 561 562 563 564 565 566 567 |
/** * trace_set_clr_event - enable or disable an event * @system: system name to match (NULL for any system) * @event: event name to match (NULL for all events, within system) * @set: 1 to enable, 0 to disable * * This is a way for other parts of the kernel to enable or disable * event recording. * * Returns 0 on success, -EINVAL if the parameters do not match any * registered events. */ int trace_set_clr_event(const char *system, const char *event, int set) { |
ae63b31e4
|
568 |
struct trace_array *tr = top_trace_array(); |
dc81e5e3a
|
569 570 |
if (!tr) return -ENODEV; |
ae63b31e4
|
571 |
return __ftrace_set_clr_event(tr, NULL, system, event, set); |
4671c7940
|
572 |
} |
56355b83e
|
573 |
EXPORT_SYMBOL_GPL(trace_set_clr_event); |
4671c7940
|
574 |
|
b77e38aa2
|
575 576 577 578 579 580 581 |
/* 128 should be much more than enough */ #define EVENT_BUF_SIZE 127 static ssize_t ftrace_event_write(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos) { |
489663644
|
582 |
struct trace_parser parser; |
ae63b31e4
|
583 584 |
struct seq_file *m = file->private_data; struct trace_array *tr = m->private; |
4ba7978e9
|
585 |
ssize_t read, ret; |
b77e38aa2
|
586 |
|
4ba7978e9
|
587 |
if (!cnt) |
b77e38aa2
|
588 |
return 0; |
1852fcce1
|
589 590 591 |
ret = tracing_update_buffers(); if (ret < 0) return ret; |
489663644
|
592 |
if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1)) |
b77e38aa2
|
593 |
return -ENOMEM; |
489663644
|
594 |
read = trace_get_user(&parser, ubuf, cnt, ppos); |
4ba7978e9
|
595 |
if (read >= 0 && trace_parser_loaded((&parser))) { |
489663644
|
596 |
int set = 1; |
b77e38aa2
|
597 |
|
489663644
|
598 |
if (*parser.buffer == '!') |
b77e38aa2
|
599 |
set = 0; |
b77e38aa2
|
600 |
|
489663644
|
601 |
parser.buffer[parser.idx] = 0; |
ae63b31e4
|
602 |
ret = ftrace_set_clr_event(tr, parser.buffer + !set, set); |
b77e38aa2
|
603 |
if (ret) |
489663644
|
604 |
goto out_put; |
b77e38aa2
|
605 |
} |
b77e38aa2
|
606 607 |
ret = read; |
489663644
|
608 609 |
out_put: trace_parser_put(&parser); |
b77e38aa2
|
610 611 612 613 614 615 616 |
return ret; } static void * t_next(struct seq_file *m, void *v, loff_t *pos) { |
ae63b31e4
|
617 618 619 |
struct ftrace_event_file *file = v; struct ftrace_event_call *call; struct trace_array *tr = m->private; |
b77e38aa2
|
620 621 |
(*pos)++; |
ae63b31e4
|
622 623 |
list_for_each_entry_continue(file, &tr->events, list) { call = file->event_call; |
40e26815f
|
624 625 626 627 |
/* * The ftrace subsystem is for showing formats only. * They can not be enabled or disabled via the event files. */ |
a1d0ce821
|
628 |
if (call->class && call->class->reg) |
ae63b31e4
|
629 |
return file; |
40e26815f
|
630 |
} |
b77e38aa2
|
631 |
|
30bd39cd6
|
632 |
return NULL; |
b77e38aa2
|
633 634 635 636 |
} static void *t_start(struct seq_file *m, loff_t *pos) { |
ae63b31e4
|
637 638 |
struct ftrace_event_file *file; struct trace_array *tr = m->private; |
e1c7e2a6e
|
639 |
loff_t l; |
20c8928ab
|
640 |
mutex_lock(&event_mutex); |
e1c7e2a6e
|
641 |
|
ae63b31e4
|
642 |
file = list_entry(&tr->events, struct ftrace_event_file, list); |
e1c7e2a6e
|
643 |
for (l = 0; l <= *pos; ) { |
ae63b31e4
|
644 645 |
file = t_next(m, file, &l); if (!file) |
e1c7e2a6e
|
646 647 |
break; } |
ae63b31e4
|
648 |
return file; |
b77e38aa2
|
649 650 651 652 653 |
} static void * s_next(struct seq_file *m, void *v, loff_t *pos) { |
ae63b31e4
|
654 655 |
struct ftrace_event_file *file = v; struct trace_array *tr = m->private; |
b77e38aa2
|
656 657 |
(*pos)++; |
ae63b31e4
|
658 659 660 |
list_for_each_entry_continue(file, &tr->events, list) { if (file->flags & FTRACE_EVENT_FL_ENABLED) return file; |
b77e38aa2
|
661 |
} |
30bd39cd6
|
662 |
return NULL; |
b77e38aa2
|
663 664 665 666 |
} static void *s_start(struct seq_file *m, loff_t *pos) { |
ae63b31e4
|
667 668 |
struct ftrace_event_file *file; struct trace_array *tr = m->private; |
e1c7e2a6e
|
669 |
loff_t l; |
20c8928ab
|
670 |
mutex_lock(&event_mutex); |
e1c7e2a6e
|
671 |
|
ae63b31e4
|
672 |
file = list_entry(&tr->events, struct ftrace_event_file, list); |
e1c7e2a6e
|
673 |
for (l = 0; l <= *pos; ) { |
ae63b31e4
|
674 675 |
file = s_next(m, file, &l); if (!file) |
e1c7e2a6e
|
676 677 |
break; } |
ae63b31e4
|
678 |
return file; |
b77e38aa2
|
679 680 681 682 |
} static int t_show(struct seq_file *m, void *v) { |
ae63b31e4
|
683 684 |
struct ftrace_event_file *file = v; struct ftrace_event_call *call = file->event_call; |
b77e38aa2
|
685 |
|
8f0820183
|
686 687 |
if (strcmp(call->class->system, TRACE_SYSTEM) != 0) seq_printf(m, "%s:", call->class->system); |
de7b29739
|
688 689 |
seq_printf(m, "%s ", ftrace_event_name(call)); |
b77e38aa2
|
690 691 692 693 694 695 |
return 0; } static void t_stop(struct seq_file *m, void *p) { |
20c8928ab
|
696 |
mutex_unlock(&event_mutex); |
b77e38aa2
|
697 |
} |
1473e4417
|
698 699 700 701 |
static ssize_t event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { |
bc6f6b08d
|
702 703 |
struct ftrace_event_file *file; unsigned long flags; |
a43905961
|
704 |
char buf[4] = "0"; |
bc6f6b08d
|
705 706 707 708 709 710 711 712 713 714 715 |
mutex_lock(&event_mutex); file = event_file_data(filp); if (likely(file)) flags = file->flags; mutex_unlock(&event_mutex); if (!file) return -ENODEV; if (flags & FTRACE_EVENT_FL_ENABLED && !(flags & FTRACE_EVENT_FL_SOFT_DISABLED)) |
a43905961
|
716 |
strcpy(buf, "1"); |
bc6f6b08d
|
717 718 |
if (flags & FTRACE_EVENT_FL_SOFT_DISABLED || flags & FTRACE_EVENT_FL_SOFT_MODE) |
a43905961
|
719 720 721 722 |
strcat(buf, "*"); strcat(buf, " "); |
1473e4417
|
723 |
|
417944c4c
|
724 |
return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf)); |
1473e4417
|
725 726 727 728 729 730 |
} static ssize_t event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { |
bc6f6b08d
|
731 |
struct ftrace_event_file *file; |
1473e4417
|
732 733 |
unsigned long val; int ret; |
22fe9b54d
|
734 735 |
ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) |
1473e4417
|
736 |
return ret; |
1852fcce1
|
737 738 739 |
ret = tracing_update_buffers(); if (ret < 0) return ret; |
1473e4417
|
740 741 |
switch (val) { case 0: |
1473e4417
|
742 |
case 1: |
bc6f6b08d
|
743 |
ret = -ENODEV; |
11a241a33
|
744 |
mutex_lock(&event_mutex); |
bc6f6b08d
|
745 746 747 |
file = event_file_data(filp); if (likely(file)) ret = ftrace_event_enable_disable(file, val); |
11a241a33
|
748 |
mutex_unlock(&event_mutex); |
1473e4417
|
749 750 751 752 753 754 755 |
break; default: return -EINVAL; } *ppos += cnt; |
3b8e42738
|
756 |
return ret ? ret : cnt; |
1473e4417
|
757 |
} |
8ae79a138
|
758 759 760 761 |
static ssize_t system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { |
c142b15dc
|
762 |
const char set_to_char[4] = { '?', '0', '1', 'X' }; |
ae63b31e4
|
763 764 |
struct ftrace_subsystem_dir *dir = filp->private_data; struct event_subsystem *system = dir->subsystem; |
8ae79a138
|
765 |
struct ftrace_event_call *call; |
ae63b31e4
|
766 767 |
struct ftrace_event_file *file; struct trace_array *tr = dir->tr; |
8ae79a138
|
768 |
char buf[2]; |
c142b15dc
|
769 |
int set = 0; |
8ae79a138
|
770 |
int ret; |
8ae79a138
|
771 |
mutex_lock(&event_mutex); |
ae63b31e4
|
772 773 |
list_for_each_entry(file, &tr->events, list) { call = file->event_call; |
de7b29739
|
774 |
if (!ftrace_event_name(call) || !call->class || !call->class->reg) |
8ae79a138
|
775 |
continue; |
40ee4dfff
|
776 |
if (system && strcmp(call->class->system, system->name) != 0) |
8ae79a138
|
777 778 779 780 781 782 783 |
continue; /* * We need to find out if all the events are set * or if all events or cleared, or if we have * a mixture. */ |
ae63b31e4
|
784 |
set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED)); |
c142b15dc
|
785 |
|
8ae79a138
|
786 787 788 |
/* * If we have a mixture, no need to look further. */ |
c142b15dc
|
789 |
if (set == 3) |
8ae79a138
|
790 791 792 |
break; } mutex_unlock(&event_mutex); |
c142b15dc
|
793 |
buf[0] = set_to_char[set]; |
8ae79a138
|
794 795 |
buf[1] = ' '; |
8ae79a138
|
796 797 798 799 800 801 802 803 804 805 |
ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); return ret; } static ssize_t system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { |
ae63b31e4
|
806 807 |
struct ftrace_subsystem_dir *dir = filp->private_data; struct event_subsystem *system = dir->subsystem; |
40ee4dfff
|
808 |
const char *name = NULL; |
8ae79a138
|
809 |
unsigned long val; |
8ae79a138
|
810 |
ssize_t ret; |
22fe9b54d
|
811 812 |
ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) |
8ae79a138
|
813 814 815 816 817 |
return ret; ret = tracing_update_buffers(); if (ret < 0) return ret; |
8f31bfe53
|
818 |
if (val != 0 && val != 1) |
8ae79a138
|
819 |
return -EINVAL; |
8ae79a138
|
820 |
|
40ee4dfff
|
821 822 823 824 825 826 |
/* * Opening of "enable" adds a ref count to system, * so the name is safe to use. */ if (system) name = system->name; |
ae63b31e4
|
827 |
ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val); |
8ae79a138
|
828 |
if (ret) |
8f31bfe53
|
829 |
goto out; |
8ae79a138
|
830 831 |
ret = cnt; |
8f31bfe53
|
832 |
out: |
8ae79a138
|
833 834 835 836 |
*ppos += cnt; return ret; } |
2a37a3df5
|
837 838 |
enum { FORMAT_HEADER = 1, |
86397dc3c
|
839 840 |
FORMAT_FIELD_SEPERATOR = 2, FORMAT_PRINTFMT = 3, |
2a37a3df5
|
841 842 843 |
}; static void *f_next(struct seq_file *m, void *v, loff_t *pos) |
981d081ec
|
844 |
{ |
c5a44a120
|
845 |
struct ftrace_event_call *call = event_file_data(m->private); |
86397dc3c
|
846 847 |
struct list_head *common_head = &ftrace_common_fields; struct list_head *head = trace_get_fields(call); |
7710b6399
|
848 |
struct list_head *node = v; |
981d081ec
|
849 |
|
2a37a3df5
|
850 |
(*pos)++; |
5a65e9562
|
851 |
|
2a37a3df5
|
852 853 |
switch ((unsigned long)v) { case FORMAT_HEADER: |
7710b6399
|
854 855 |
node = common_head; break; |
5a65e9562
|
856 |
|
86397dc3c
|
857 |
case FORMAT_FIELD_SEPERATOR: |
7710b6399
|
858 859 |
node = head; break; |
5a65e9562
|
860 |
|
2a37a3df5
|
861 862 863 |
case FORMAT_PRINTFMT: /* all done */ return NULL; |
5a65e9562
|
864 |
} |
7710b6399
|
865 866 |
node = node->prev; if (node == common_head) |
86397dc3c
|
867 |
return (void *)FORMAT_FIELD_SEPERATOR; |
7710b6399
|
868 |
else if (node == head) |
2a37a3df5
|
869 |
return (void *)FORMAT_PRINTFMT; |
7710b6399
|
870 871 |
else return node; |
2a37a3df5
|
872 873 874 875 |
} static int f_show(struct seq_file *m, void *v) { |
c5a44a120
|
876 |
struct ftrace_event_call *call = event_file_data(m->private); |
2a37a3df5
|
877 878 879 880 881 |
struct ftrace_event_field *field; const char *array_descriptor; switch ((unsigned long)v) { case FORMAT_HEADER: |
de7b29739
|
882 883 |
seq_printf(m, "name: %s ", ftrace_event_name(call)); |
2a37a3df5
|
884 885 |
seq_printf(m, "ID: %d ", call->event.type); |
fa6f0cc75
|
886 887 |
seq_puts(m, "format: "); |
8728fe501
|
888 |
return 0; |
5a65e9562
|
889 |
|
86397dc3c
|
890 891 892 893 |
case FORMAT_FIELD_SEPERATOR: seq_putc(m, ' '); return 0; |
2a37a3df5
|
894 895 896 897 898 899 |
case FORMAT_PRINTFMT: seq_printf(m, " print fmt: %s ", call->print_fmt); return 0; |
981d081ec
|
900 |
} |
8728fe501
|
901 |
|
7710b6399
|
902 |
field = list_entry(v, struct ftrace_event_field, link); |
2a37a3df5
|
903 904 905 906 907 908 909 910 |
/* * Smartly shows the array type(except dynamic array). * Normal: * field:TYPE VAR * If TYPE := TYPE[LEN], it is shown: * field:TYPE VAR[LEN] */ array_descriptor = strchr(field->type, '['); |
8728fe501
|
911 |
|
2a37a3df5
|
912 913 |
if (!strncmp(field->type, "__data_loc", 10)) array_descriptor = NULL; |
8728fe501
|
914 |
|
2a37a3df5
|
915 916 917 918 919 920 921 922 923 924 925 926 |
if (!array_descriptor) seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d; ", field->type, field->name, field->offset, field->size, !!field->is_signed); else seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d; ", (int)(array_descriptor - field->type), field->type, field->name, array_descriptor, field->offset, field->size, !!field->is_signed); |
8728fe501
|
927 |
|
2a37a3df5
|
928 929 |
return 0; } |
5a65e9562
|
930 |
|
7710b6399
|
931 932 933 934 |
static void *f_start(struct seq_file *m, loff_t *pos) { void *p = (void *)FORMAT_HEADER; loff_t l = 0; |
c5a44a120
|
935 936 937 938 |
/* ->stop() is called even if ->start() fails */ mutex_lock(&event_mutex); if (!event_file_data(m->private)) return ERR_PTR(-ENODEV); |
7710b6399
|
939 940 941 942 943 |
while (l < *pos && p) p = f_next(m, p, &l); return p; } |
2a37a3df5
|
944 945 |
static void f_stop(struct seq_file *m, void *p) { |
c5a44a120
|
946 |
mutex_unlock(&event_mutex); |
2a37a3df5
|
947 |
} |
981d081ec
|
948 |
|
2a37a3df5
|
949 950 951 952 953 954 955 956 957 |
static const struct seq_operations trace_format_seq_ops = { .start = f_start, .next = f_next, .stop = f_stop, .show = f_show, }; static int trace_format_open(struct inode *inode, struct file *file) { |
2a37a3df5
|
958 959 960 961 962 963 964 965 |
struct seq_file *m; int ret; ret = seq_open(file, &trace_format_seq_ops); if (ret < 0) return ret; m = file->private_data; |
c5a44a120
|
966 |
m->private = file; |
2a37a3df5
|
967 968 |
return 0; |
981d081ec
|
969 |
} |
23725aeea
|
970 971 972 |
static ssize_t event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { |
1a11126bc
|
973 |
int id = (long)event_file_data(filp); |
cd458ba9d
|
974 975 |
char buf[32]; int len; |
23725aeea
|
976 977 978 |
if (*ppos) return 0; |
1a11126bc
|
979 980 981 982 983 |
if (unlikely(!id)) return -ENODEV; len = sprintf(buf, "%d ", id); |
cd458ba9d
|
984 |
return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); |
23725aeea
|
985 |
} |
7ce7e4249
|
986 987 988 989 |
static ssize_t event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { |
f306cc82a
|
990 |
struct ftrace_event_file *file; |
7ce7e4249
|
991 |
struct trace_seq *s; |
e2912b091
|
992 |
int r = -ENODEV; |
7ce7e4249
|
993 994 995 996 997 |
if (*ppos) return 0; s = kmalloc(sizeof(*s), GFP_KERNEL); |
e2912b091
|
998 |
|
7ce7e4249
|
999 1000 1001 1002 |
if (!s) return -ENOMEM; trace_seq_init(s); |
e2912b091
|
1003 |
mutex_lock(&event_mutex); |
f306cc82a
|
1004 1005 1006 |
file = event_file_data(filp); if (file) print_event_filter(file, s); |
e2912b091
|
1007 |
mutex_unlock(&event_mutex); |
f306cc82a
|
1008 |
if (file) |
5ac483784
|
1009 1010 |
r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, trace_seq_used(s)); |
7ce7e4249
|
1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 |
kfree(s); return r; } static ssize_t event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { |
f306cc82a
|
1021 |
struct ftrace_event_file *file; |
8b3725621
|
1022 |
char *buf; |
e2912b091
|
1023 |
int err = -ENODEV; |
7ce7e4249
|
1024 |
|
8b3725621
|
1025 |
if (cnt >= PAGE_SIZE) |
7ce7e4249
|
1026 |
return -EINVAL; |
8b3725621
|
1027 1028 |
buf = (char *)__get_free_page(GFP_TEMPORARY); if (!buf) |
7ce7e4249
|
1029 |
return -ENOMEM; |
8b3725621
|
1030 1031 1032 |
if (copy_from_user(buf, ubuf, cnt)) { free_page((unsigned long) buf); return -EFAULT; |
7ce7e4249
|
1033 |
} |
8b3725621
|
1034 |
buf[cnt] = '\0'; |
7ce7e4249
|
1035 |
|
e2912b091
|
1036 |
mutex_lock(&event_mutex); |
f306cc82a
|
1037 1038 1039 |
file = event_file_data(filp); if (file) err = apply_event_filter(file, buf); |
e2912b091
|
1040 |
mutex_unlock(&event_mutex); |
8b3725621
|
1041 1042 |
free_page((unsigned long) buf); if (err < 0) |
44e9c8b7a
|
1043 |
return err; |
0a19e53c1
|
1044 |
|
7ce7e4249
|
1045 1046 1047 1048 |
*ppos += cnt; return cnt; } |
e9dbfae53
|
1049 1050 1051 1052 1053 |
static LIST_HEAD(event_subsystems); static int subsystem_open(struct inode *inode, struct file *filp) { struct event_subsystem *system = NULL; |
ae63b31e4
|
1054 1055 |
struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */ struct trace_array *tr; |
e9dbfae53
|
1056 |
int ret; |
d6d3523ca
|
1057 1058 |
if (tracing_is_disabled()) return -ENODEV; |
e9dbfae53
|
1059 |
/* Make sure the system still exists */ |
a82274151
|
1060 |
mutex_lock(&trace_types_lock); |
e9dbfae53
|
1061 |
mutex_lock(&event_mutex); |
ae63b31e4
|
1062 1063 1064 1065 1066 1067 1068 1069 1070 |
list_for_each_entry(tr, &ftrace_trace_arrays, list) { list_for_each_entry(dir, &tr->systems, list) { if (dir == inode->i_private) { /* Don't open systems with no events */ if (dir->nr_events) { __get_system_dir(dir); system = dir->subsystem; } goto exit_loop; |
e9dbfae53
|
1071 |
} |
e9dbfae53
|
1072 1073 |
} } |
ae63b31e4
|
1074 |
exit_loop: |
e9dbfae53
|
1075 |
mutex_unlock(&event_mutex); |
a82274151
|
1076 |
mutex_unlock(&trace_types_lock); |
e9dbfae53
|
1077 |
|
ae63b31e4
|
1078 |
if (!system) |
e9dbfae53
|
1079 |
return -ENODEV; |
ae63b31e4
|
1080 1081 |
/* Some versions of gcc think dir can be uninitialized here */ WARN_ON(!dir); |
8e2e2fa47
|
1082 1083 1084 1085 1086 |
/* Still need to increment the ref count of the system */ if (trace_array_get(tr) < 0) { put_system(dir); return -ENODEV; } |
e9dbfae53
|
1087 |
ret = tracing_open_generic(inode, filp); |
8e2e2fa47
|
1088 1089 |
if (ret < 0) { trace_array_put(tr); |
ae63b31e4
|
1090 |
put_system(dir); |
8e2e2fa47
|
1091 |
} |
ae63b31e4
|
1092 1093 1094 1095 1096 1097 1098 1099 1100 |
return ret; } static int system_tr_open(struct inode *inode, struct file *filp) { struct ftrace_subsystem_dir *dir; struct trace_array *tr = inode->i_private; int ret; |
d6d3523ca
|
1101 1102 |
if (tracing_is_disabled()) return -ENODEV; |
8e2e2fa47
|
1103 1104 |
if (trace_array_get(tr) < 0) return -ENODEV; |
ae63b31e4
|
1105 1106 |
/* Make a temporary dir that has no system but points to tr */ dir = kzalloc(sizeof(*dir), GFP_KERNEL); |
8e2e2fa47
|
1107 1108 |
if (!dir) { trace_array_put(tr); |
ae63b31e4
|
1109 |
return -ENOMEM; |
8e2e2fa47
|
1110 |
} |
ae63b31e4
|
1111 1112 1113 1114 |
dir->tr = tr; ret = tracing_open_generic(inode, filp); |
8e2e2fa47
|
1115 1116 |
if (ret < 0) { trace_array_put(tr); |
ae63b31e4
|
1117 |
kfree(dir); |
d6d3523ca
|
1118 |
return ret; |
8e2e2fa47
|
1119 |
} |
ae63b31e4
|
1120 1121 |
filp->private_data = dir; |
e9dbfae53
|
1122 |
|
d6d3523ca
|
1123 |
return 0; |
e9dbfae53
|
1124 1125 1126 1127 |
} static int subsystem_release(struct inode *inode, struct file *file) { |
ae63b31e4
|
1128 |
struct ftrace_subsystem_dir *dir = file->private_data; |
e9dbfae53
|
1129 |
|
8e2e2fa47
|
1130 |
trace_array_put(dir->tr); |
ae63b31e4
|
1131 1132 1133 1134 1135 1136 1137 1138 1139 |
/* * If dir->subsystem is NULL, then this is a temporary * descriptor that was made for a trace_array to enable * all subsystems. */ if (dir->subsystem) put_system(dir); else kfree(dir); |
e9dbfae53
|
1140 1141 1142 |
return 0; } |
cfb180f3e
|
1143 1144 1145 1146 |
static ssize_t subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { |
ae63b31e4
|
1147 1148 |
struct ftrace_subsystem_dir *dir = filp->private_data; struct event_subsystem *system = dir->subsystem; |
cfb180f3e
|
1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 |
struct trace_seq *s; int r; if (*ppos) return 0; s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; trace_seq_init(s); |
8b3725621
|
1160 |
print_subsystem_event_filter(system, s); |
5ac483784
|
1161 1162 |
r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, trace_seq_used(s)); |
cfb180f3e
|
1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 |
kfree(s); return r; } static ssize_t subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { |
ae63b31e4
|
1173 |
struct ftrace_subsystem_dir *dir = filp->private_data; |
8b3725621
|
1174 |
char *buf; |
cfb180f3e
|
1175 |
int err; |
8b3725621
|
1176 |
if (cnt >= PAGE_SIZE) |
cfb180f3e
|
1177 |
return -EINVAL; |
8b3725621
|
1178 1179 |
buf = (char *)__get_free_page(GFP_TEMPORARY); if (!buf) |
cfb180f3e
|
1180 |
return -ENOMEM; |
8b3725621
|
1181 1182 1183 |
if (copy_from_user(buf, ubuf, cnt)) { free_page((unsigned long) buf); return -EFAULT; |
cfb180f3e
|
1184 |
} |
8b3725621
|
1185 |
buf[cnt] = '\0'; |
cfb180f3e
|
1186 |
|
ae63b31e4
|
1187 |
err = apply_subsystem_event_filter(dir, buf); |
8b3725621
|
1188 1189 |
free_page((unsigned long) buf); if (err < 0) |
44e9c8b7a
|
1190 |
return err; |
cfb180f3e
|
1191 1192 1193 1194 1195 |
*ppos += cnt; return cnt; } |
d1b182a8d
|
1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 |
static ssize_t show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { int (*func)(struct trace_seq *s) = filp->private_data; struct trace_seq *s; int r; if (*ppos) return 0; s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; trace_seq_init(s); func(s); |
5ac483784
|
1213 1214 |
r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, trace_seq_used(s)); |
d1b182a8d
|
1215 1216 1217 1218 1219 |
kfree(s); return r; } |
15075cac4
|
1220 1221 |
static int ftrace_event_avail_open(struct inode *inode, struct file *file); static int ftrace_event_set_open(struct inode *inode, struct file *file); |
f77d09a38
|
1222 |
static int ftrace_event_release(struct inode *inode, struct file *file); |
15075cac4
|
1223 |
|
b77e38aa2
|
1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 |
static const struct seq_operations show_event_seq_ops = { .start = t_start, .next = t_next, .show = t_show, .stop = t_stop, }; static const struct seq_operations show_set_event_seq_ops = { .start = s_start, .next = s_next, .show = t_show, .stop = t_stop, }; |
2314c4ae1
|
1237 |
static const struct file_operations ftrace_avail_fops = { |
15075cac4
|
1238 |
.open = ftrace_event_avail_open, |
2314c4ae1
|
1239 1240 1241 1242 |
.read = seq_read, .llseek = seq_lseek, .release = seq_release, }; |
b77e38aa2
|
1243 |
static const struct file_operations ftrace_set_event_fops = { |
15075cac4
|
1244 |
.open = ftrace_event_set_open, |
b77e38aa2
|
1245 1246 1247 |
.read = seq_read, .write = ftrace_event_write, .llseek = seq_lseek, |
f77d09a38
|
1248 |
.release = ftrace_event_release, |
b77e38aa2
|
1249 |
}; |
1473e4417
|
1250 |
static const struct file_operations ftrace_enable_fops = { |
bf682c315
|
1251 |
.open = tracing_open_generic, |
1473e4417
|
1252 1253 |
.read = event_enable_read, .write = event_enable_write, |
6038f373a
|
1254 |
.llseek = default_llseek, |
1473e4417
|
1255 |
}; |
981d081ec
|
1256 |
static const struct file_operations ftrace_event_format_fops = { |
2a37a3df5
|
1257 1258 1259 1260 |
.open = trace_format_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, |
981d081ec
|
1261 |
}; |
23725aeea
|
1262 |
static const struct file_operations ftrace_event_id_fops = { |
23725aeea
|
1263 |
.read = event_id_read, |
6038f373a
|
1264 |
.llseek = default_llseek, |
23725aeea
|
1265 |
}; |
7ce7e4249
|
1266 1267 1268 1269 |
static const struct file_operations ftrace_event_filter_fops = { .open = tracing_open_generic, .read = event_filter_read, .write = event_filter_write, |
6038f373a
|
1270 |
.llseek = default_llseek, |
7ce7e4249
|
1271 |
}; |
cfb180f3e
|
1272 |
static const struct file_operations ftrace_subsystem_filter_fops = { |
e9dbfae53
|
1273 |
.open = subsystem_open, |
cfb180f3e
|
1274 1275 |
.read = subsystem_filter_read, .write = subsystem_filter_write, |
6038f373a
|
1276 |
.llseek = default_llseek, |
e9dbfae53
|
1277 |
.release = subsystem_release, |
cfb180f3e
|
1278 |
}; |
8ae79a138
|
1279 |
static const struct file_operations ftrace_system_enable_fops = { |
40ee4dfff
|
1280 |
.open = subsystem_open, |
8ae79a138
|
1281 1282 |
.read = system_enable_read, .write = system_enable_write, |
6038f373a
|
1283 |
.llseek = default_llseek, |
40ee4dfff
|
1284 |
.release = subsystem_release, |
8ae79a138
|
1285 |
}; |
ae63b31e4
|
1286 1287 1288 1289 1290 1291 1292 |
static const struct file_operations ftrace_tr_enable_fops = { .open = system_tr_open, .read = system_enable_read, .write = system_enable_write, .llseek = default_llseek, .release = subsystem_release, }; |
d1b182a8d
|
1293 1294 1295 |
static const struct file_operations ftrace_show_header_fops = { .open = tracing_open_generic, .read = show_header, |
6038f373a
|
1296 |
.llseek = default_llseek, |
d1b182a8d
|
1297 |
}; |
ae63b31e4
|
1298 1299 1300 |
static int ftrace_event_open(struct inode *inode, struct file *file, const struct seq_operations *seq_ops) |
1473e4417
|
1301 |
{ |
ae63b31e4
|
1302 1303 |
struct seq_file *m; int ret; |
1473e4417
|
1304 |
|
ae63b31e4
|
1305 1306 1307 1308 1309 1310 |
ret = seq_open(file, seq_ops); if (ret < 0) return ret; m = file->private_data; /* copy tr over to seq ops */ m->private = inode->i_private; |
1473e4417
|
1311 |
|
ae63b31e4
|
1312 |
return ret; |
1473e4417
|
1313 |
} |
f77d09a38
|
1314 1315 1316 1317 1318 1319 1320 1321 |
static int ftrace_event_release(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; trace_array_put(tr); return seq_release(inode, file); } |
15075cac4
|
1322 1323 1324 1325 |
static int ftrace_event_avail_open(struct inode *inode, struct file *file) { const struct seq_operations *seq_ops = &show_event_seq_ops; |
ae63b31e4
|
1326 |
return ftrace_event_open(inode, file, seq_ops); |
15075cac4
|
1327 1328 1329 1330 1331 1332 |
} static int ftrace_event_set_open(struct inode *inode, struct file *file) { const struct seq_operations *seq_ops = &show_set_event_seq_ops; |
ae63b31e4
|
1333 |
struct trace_array *tr = inode->i_private; |
f77d09a38
|
1334 1335 1336 1337 |
int ret; if (trace_array_get(tr) < 0) return -ENODEV; |
15075cac4
|
1338 1339 1340 |
if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) |
ae63b31e4
|
1341 |
ftrace_clear_events(tr); |
15075cac4
|
1342 |
|
f77d09a38
|
1343 1344 1345 1346 |
ret = ftrace_event_open(inode, file, seq_ops); if (ret < 0) trace_array_put(tr); return ret; |
ae63b31e4
|
1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 |
} static struct event_subsystem * create_new_subsystem(const char *name) { struct event_subsystem *system; /* need to create new entry */ system = kmalloc(sizeof(*system), GFP_KERNEL); if (!system) return NULL; system->ref_count = 1; |
6e94a7803
|
1360 1361 1362 1363 1364 1365 1366 1367 1368 |
/* Only allocate if dynamic (kprobes and modules) */ if (!core_kernel_data((unsigned long)name)) { system->ref_count |= SYSTEM_FL_FREE_NAME; system->name = kstrdup(name, GFP_KERNEL); if (!system->name) goto out_free; } else system->name = name; |
ae63b31e4
|
1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 |
system->filter = NULL; system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL); if (!system->filter) goto out_free; list_add(&system->list, &event_subsystems); return system; out_free: |
6e94a7803
|
1381 1382 |
if (system->ref_count & SYSTEM_FL_FREE_NAME) kfree(system->name); |
ae63b31e4
|
1383 1384 |
kfree(system); return NULL; |
15075cac4
|
1385 |
} |
6ecc2d1ca
|
1386 |
static struct dentry * |
ae63b31e4
|
1387 1388 |
event_subsystem_dir(struct trace_array *tr, const char *name, struct ftrace_event_file *file, struct dentry *parent) |
6ecc2d1ca
|
1389 |
{ |
ae63b31e4
|
1390 |
struct ftrace_subsystem_dir *dir; |
6ecc2d1ca
|
1391 |
struct event_subsystem *system; |
e1112b4d9
|
1392 |
struct dentry *entry; |
6ecc2d1ca
|
1393 1394 |
/* First see if we did not already create this dir */ |
ae63b31e4
|
1395 1396 |
list_for_each_entry(dir, &tr->systems, list) { system = dir->subsystem; |
dc82ec98a
|
1397 |
if (strcmp(system->name, name) == 0) { |
ae63b31e4
|
1398 1399 1400 |
dir->nr_events++; file->system = dir; return dir->entry; |
dc82ec98a
|
1401 |
} |
6ecc2d1ca
|
1402 |
} |
ae63b31e4
|
1403 1404 1405 1406 |
/* Now see if the system itself exists. */ list_for_each_entry(system, &event_subsystems, list) { if (strcmp(system->name, name) == 0) break; |
6ecc2d1ca
|
1407 |
} |
ae63b31e4
|
1408 1409 1410 |
/* Reset system variable when not found */ if (&system->list == &event_subsystems) system = NULL; |
6ecc2d1ca
|
1411 |
|
ae63b31e4
|
1412 1413 1414 |
dir = kmalloc(sizeof(*dir), GFP_KERNEL); if (!dir) goto out_fail; |
6ecc2d1ca
|
1415 |
|
ae63b31e4
|
1416 1417 1418 1419 1420 1421 1422 1423 1424 |
if (!system) { system = create_new_subsystem(name); if (!system) goto out_free; } else __get_system(system); dir->entry = debugfs_create_dir(name, parent); if (!dir->entry) { |
3448bac32
|
1425 1426 |
pr_warn("Failed to create system directory %s ", name); |
ae63b31e4
|
1427 1428 |
__put_system(system); goto out_free; |
6d723736e
|
1429 |
} |
ae63b31e4
|
1430 1431 1432 1433 1434 |
dir->tr = tr; dir->ref_count = 1; dir->nr_events = 1; dir->subsystem = system; file->system = dir; |
8b3725621
|
1435 |
|
ae63b31e4
|
1436 |
entry = debugfs_create_file("filter", 0644, dir->entry, dir, |
e1112b4d9
|
1437 |
&ftrace_subsystem_filter_fops); |
8b3725621
|
1438 1439 1440 |
if (!entry) { kfree(system->filter); system->filter = NULL; |
3448bac32
|
1441 1442 |
pr_warn("Could not create debugfs '%s/filter' entry ", name); |
8b3725621
|
1443 |
} |
e1112b4d9
|
1444 |
|
ae63b31e4
|
1445 |
trace_create_file("enable", 0644, dir->entry, dir, |
f3f3f0092
|
1446 |
&ftrace_system_enable_fops); |
8ae79a138
|
1447 |
|
ae63b31e4
|
1448 1449 1450 1451 1452 1453 1454 1455 1456 |
list_add(&dir->list, &tr->systems); return dir->entry; out_free: kfree(dir); out_fail: /* Only print this message if failed on memory allocation */ if (!dir || !system) |
3448bac32
|
1457 1458 |
pr_warn("No memory to create event subsystem %s ", name); |
ae63b31e4
|
1459 |
return NULL; |
6ecc2d1ca
|
1460 |
} |
1473e4417
|
1461 |
static int |
620a30e97
|
1462 |
event_create_dir(struct dentry *parent, struct ftrace_event_file *file) |
1473e4417
|
1463 |
{ |
ae63b31e4
|
1464 1465 |
struct ftrace_event_call *call = file->event_call; struct trace_array *tr = file->tr; |
2e33af029
|
1466 |
struct list_head *head; |
ae63b31e4
|
1467 |
struct dentry *d_events; |
de7b29739
|
1468 |
const char *name; |
fd9949898
|
1469 |
int ret; |
1473e4417
|
1470 |
|
6ecc2d1ca
|
1471 1472 1473 1474 |
/* * If the trace point header did not define TRACE_SYSTEM * then the system would be called "TRACE_SYSTEM". */ |
ae63b31e4
|
1475 1476 1477 1478 1479 1480 |
if (strcmp(call->class->system, TRACE_SYSTEM) != 0) { d_events = event_subsystem_dir(tr, call->class->system, file, parent); if (!d_events) return -ENOMEM; } else d_events = parent; |
de7b29739
|
1481 1482 |
name = ftrace_event_name(call); file->dir = debugfs_create_dir(name, d_events); |
ae63b31e4
|
1483 |
if (!file->dir) { |
3448bac32
|
1484 1485 |
pr_warn("Could not create debugfs '%s' directory ", name); |
1473e4417
|
1486 1487 |
return -1; } |
9b63776fa
|
1488 |
if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) |
ae63b31e4
|
1489 |
trace_create_file("enable", 0644, file->dir, file, |
620a30e97
|
1490 |
&ftrace_enable_fops); |
1473e4417
|
1491 |
|
2239291ae
|
1492 |
#ifdef CONFIG_PERF_EVENTS |
a1d0ce821
|
1493 |
if (call->event.type && call->class->reg) |
1a11126bc
|
1494 |
trace_create_file("id", 0444, file->dir, |
620a30e97
|
1495 1496 |
(void *)(long)call->event.type, &ftrace_event_id_fops); |
2239291ae
|
1497 |
#endif |
23725aeea
|
1498 |
|
c9d932cf8
|
1499 1500 1501 1502 1503 1504 1505 1506 |
/* * Other events may have the same class. Only update * the fields if they are not already defined. */ head = trace_get_fields(call); if (list_empty(head)) { ret = call->class->define_fields(call); if (ret < 0) { |
3448bac32
|
1507 1508 1509 |
pr_warn("Could not initialize trace point events/%s ", name); |
ae63b31e4
|
1510 |
return -1; |
cf027f645
|
1511 1512 |
} } |
f306cc82a
|
1513 |
trace_create_file("filter", 0644, file->dir, file, |
620a30e97
|
1514 |
&ftrace_event_filter_fops); |
cf027f645
|
1515 |
|
85f2b0826
|
1516 1517 |
trace_create_file("trigger", 0644, file->dir, file, &event_trigger_fops); |
ae63b31e4
|
1518 |
trace_create_file("format", 0444, file->dir, call, |
620a30e97
|
1519 |
&ftrace_event_format_fops); |
6d723736e
|
1520 1521 1522 |
return 0; } |
ae63b31e4
|
1523 1524 1525 1526 1527 1528 |
static void remove_event_from_tracers(struct ftrace_event_call *call) { struct ftrace_event_file *file; struct trace_array *tr; do_for_each_event_file_safe(tr, file) { |
ae63b31e4
|
1529 1530 |
if (file->event_call != call) continue; |
f6a84bdc7
|
1531 |
remove_event_file_dir(file); |
ae63b31e4
|
1532 1533 1534 1535 1536 1537 1538 1539 1540 |
/* * The do_for_each_event_file_safe() is * a double loop. After finding the call for this * trace_array, we use break to jump to the next * trace_array. */ break; } while_for_each_event_file(); } |
8781915ad
|
1541 1542 |
static void event_remove(struct ftrace_event_call *call) { |
ae63b31e4
|
1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 |
struct trace_array *tr; struct ftrace_event_file *file; do_for_each_event_file(tr, file) { if (file->event_call != call) continue; ftrace_event_enable_disable(file, 0); /* * The do_for_each_event_file() is * a double loop. After finding the call for this * trace_array, we use break to jump to the next * trace_array. */ break; } while_for_each_event_file(); |
8781915ad
|
1558 1559 |
if (call->event.funcs) __unregister_ftrace_event(&call->event); |
ae63b31e4
|
1560 |
remove_event_from_tracers(call); |
8781915ad
|
1561 1562 1563 1564 1565 1566 |
list_del(&call->list); } static int event_init(struct ftrace_event_call *call) { int ret = 0; |
de7b29739
|
1567 |
const char *name; |
8781915ad
|
1568 |
|
de7b29739
|
1569 1570 |
name = ftrace_event_name(call); if (WARN_ON(!name)) |
8781915ad
|
1571 1572 1573 1574 1575 |
return -EINVAL; if (call->class->raw_init) { ret = call->class->raw_init(call); if (ret < 0 && ret != -ENOSYS) |
3448bac32
|
1576 1577 |
pr_warn("Could not initialize trace events/%s ", name); |
8781915ad
|
1578 1579 1580 1581 |
} return ret; } |
67ead0a6c
|
1582 |
static int |
ae63b31e4
|
1583 |
__register_event(struct ftrace_event_call *call, struct module *mod) |
bd1a5c849
|
1584 |
{ |
bd1a5c849
|
1585 |
int ret; |
6d723736e
|
1586 |
|
8781915ad
|
1587 1588 1589 |
ret = event_init(call); if (ret < 0) return ret; |
701970b3a
|
1590 |
|
ae63b31e4
|
1591 |
list_add(&call->list, &ftrace_events); |
67ead0a6c
|
1592 |
call->mod = mod; |
88f70d759
|
1593 |
|
ae63b31e4
|
1594 |
return 0; |
bd1a5c849
|
1595 |
} |
da511bf33
|
1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 |
static struct ftrace_event_file * trace_create_new_event(struct ftrace_event_call *call, struct trace_array *tr) { struct ftrace_event_file *file; file = kmem_cache_alloc(file_cachep, GFP_TRACE); if (!file) return NULL; file->event_call = call; file->tr = tr; atomic_set(&file->sm_ref, 0); |
85f2b0826
|
1609 1610 |
atomic_set(&file->tm_ref, 0); INIT_LIST_HEAD(&file->triggers); |
da511bf33
|
1611 1612 1613 1614 |
list_add(&file->list, &tr->events); return file; } |
ae63b31e4
|
1615 1616 |
/* Add an event to a trace directory */ static int |
620a30e97
|
1617 |
__trace_add_new_event(struct ftrace_event_call *call, struct trace_array *tr) |
ae63b31e4
|
1618 1619 |
{ struct ftrace_event_file *file; |
da511bf33
|
1620 |
file = trace_create_new_event(call, tr); |
ae63b31e4
|
1621 1622 |
if (!file) return -ENOMEM; |
620a30e97
|
1623 |
return event_create_dir(tr->event_dir, file); |
ae63b31e4
|
1624 |
} |
772482216
|
1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 |
/* * Just create a decriptor for early init. A descriptor is required * for enabling events at boot. We want to enable events before * the filesystem is initialized. */ static __init int __trace_early_add_new_event(struct ftrace_event_call *call, struct trace_array *tr) { struct ftrace_event_file *file; |
da511bf33
|
1635 |
file = trace_create_new_event(call, tr); |
772482216
|
1636 1637 |
if (!file) return -ENOMEM; |
772482216
|
1638 1639 |
return 0; } |
ae63b31e4
|
1640 |
struct ftrace_module_file_ops; |
779c5e379
|
1641 |
static void __add_event_to_tracers(struct ftrace_event_call *call); |
ae63b31e4
|
1642 |
|
bd1a5c849
|
1643 1644 1645 1646 |
/* Add an additional event_call dynamically */ int trace_add_event_call(struct ftrace_event_call *call) { int ret; |
a82274151
|
1647 |
mutex_lock(&trace_types_lock); |
bd1a5c849
|
1648 |
mutex_lock(&event_mutex); |
701970b3a
|
1649 |
|
ae63b31e4
|
1650 1651 |
ret = __register_event(call, NULL); if (ret >= 0) |
779c5e379
|
1652 |
__add_event_to_tracers(call); |
a2ca5e03b
|
1653 |
|
ae63b31e4
|
1654 |
mutex_unlock(&event_mutex); |
a82274151
|
1655 |
mutex_unlock(&trace_types_lock); |
ae63b31e4
|
1656 |
return ret; |
a2ca5e03b
|
1657 |
} |
4fead8e46
|
1658 |
/* |
a82274151
|
1659 1660 |
* Must be called under locking of trace_types_lock, event_mutex and * trace_event_sem. |
4fead8e46
|
1661 |
*/ |
bd1a5c849
|
1662 1663 |
static void __trace_remove_event_call(struct ftrace_event_call *call) { |
8781915ad
|
1664 |
event_remove(call); |
bd1a5c849
|
1665 |
trace_destroy_fields(call); |
57375747b
|
1666 1667 |
free_event_filter(call->filter); call->filter = NULL; |
bd1a5c849
|
1668 |
} |
2816c551c
|
1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 |
static int probe_remove_event_call(struct ftrace_event_call *call) { struct trace_array *tr; struct ftrace_event_file *file; #ifdef CONFIG_PERF_EVENTS if (call->perf_refcount) return -EBUSY; #endif do_for_each_event_file(tr, file) { if (file->event_call != call) continue; /* * We can't rely on ftrace_event_enable_disable(enable => 0) * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress * TRACE_REG_UNREGISTER. */ if (file->flags & FTRACE_EVENT_FL_ENABLED) return -EBUSY; |
2ba64035d
|
1688 1689 1690 1691 1692 1693 |
/* * The do_for_each_event_file_safe() is * a double loop. After finding the call for this * trace_array, we use break to jump to the next * trace_array. */ |
2816c551c
|
1694 1695 1696 1697 1698 1699 1700 |
break; } while_for_each_event_file(); __trace_remove_event_call(call); return 0; } |
bd1a5c849
|
1701 |
/* Remove an event_call */ |
2816c551c
|
1702 |
int trace_remove_event_call(struct ftrace_event_call *call) |
bd1a5c849
|
1703 |
{ |
2816c551c
|
1704 |
int ret; |
a82274151
|
1705 |
mutex_lock(&trace_types_lock); |
bd1a5c849
|
1706 |
mutex_lock(&event_mutex); |
52f6ad6dc
|
1707 |
down_write(&trace_event_sem); |
2816c551c
|
1708 |
ret = probe_remove_event_call(call); |
52f6ad6dc
|
1709 |
up_write(&trace_event_sem); |
bd1a5c849
|
1710 |
mutex_unlock(&event_mutex); |
a82274151
|
1711 |
mutex_unlock(&trace_types_lock); |
2816c551c
|
1712 1713 |
return ret; |
bd1a5c849
|
1714 1715 1716 1717 1718 1719 1720 1721 |
} #define for_each_event(event, start, end) \ for (event = start; \ (unsigned long)event < (unsigned long)end; \ event++) #ifdef CONFIG_MODULES |
6d723736e
|
1722 1723 |
static void trace_module_add_events(struct module *mod) { |
e4a9ea5ee
|
1724 |
struct ftrace_event_call **call, **start, **end; |
6d723736e
|
1725 |
|
45ab2813d
|
1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 |
if (!mod->num_trace_events) return; /* Don't add infrastructure for mods without tracepoints */ if (trace_module_has_bad_taint(mod)) { pr_err("%s: module has bad taint, not creating trace events ", mod->name); return; } |
6d723736e
|
1736 1737 |
start = mod->trace_events; end = mod->trace_events + mod->num_trace_events; |
6d723736e
|
1738 |
for_each_event(call, start, end) { |
ae63b31e4
|
1739 |
__register_event(*call, mod); |
779c5e379
|
1740 |
__add_event_to_tracers(*call); |
6d723736e
|
1741 1742 1743 1744 1745 1746 |
} } static void trace_module_remove_events(struct module *mod) { struct ftrace_event_call *call, *p; |
575380da8
|
1747 |
bool clear_trace = false; |
6d723736e
|
1748 |
|
52f6ad6dc
|
1749 |
down_write(&trace_event_sem); |
6d723736e
|
1750 1751 |
list_for_each_entry_safe(call, p, &ftrace_events, list) { if (call->mod == mod) { |
575380da8
|
1752 1753 |
if (call->flags & TRACE_EVENT_FL_WAS_ENABLED) clear_trace = true; |
bd1a5c849
|
1754 |
__trace_remove_event_call(call); |
6d723736e
|
1755 1756 |
} } |
52f6ad6dc
|
1757 |
up_write(&trace_event_sem); |
9456f0fa6
|
1758 1759 1760 |
/* * It is safest to reset the ring buffer if the module being unloaded |
873c642f5
|
1761 1762 1763 1764 1765 |
* registered any events that were used. The only worry is if * a new module gets loaded, and takes on the same id as the events * of this module. When printing out the buffer, traced events left * over from this module may be passed to the new module events and * unexpected results may occur. |
9456f0fa6
|
1766 |
*/ |
575380da8
|
1767 |
if (clear_trace) |
873c642f5
|
1768 |
tracing_reset_all_online_cpus(); |
6d723736e
|
1769 |
} |
61f919a12
|
1770 1771 |
static int trace_module_notify(struct notifier_block *self, unsigned long val, void *data) |
6d723736e
|
1772 1773 |
{ struct module *mod = data; |
a82274151
|
1774 |
mutex_lock(&trace_types_lock); |
6d723736e
|
1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 |
mutex_lock(&event_mutex); switch (val) { case MODULE_STATE_COMING: trace_module_add_events(mod); break; case MODULE_STATE_GOING: trace_module_remove_events(mod); break; } mutex_unlock(&event_mutex); |
a82274151
|
1785 |
mutex_unlock(&trace_types_lock); |
fd9949898
|
1786 |
|
1473e4417
|
1787 1788 |
return 0; } |
315326c16
|
1789 |
|
836d481ed
|
1790 1791 1792 1793 |
static struct notifier_block trace_module_nb = { .notifier_call = trace_module_notify, .priority = 0, }; |
61f919a12
|
1794 |
#endif /* CONFIG_MODULES */ |
1473e4417
|
1795 |
|
ae63b31e4
|
1796 1797 1798 1799 |
/* Create a new event directory structure for a trace directory. */ static void __trace_add_event_dirs(struct trace_array *tr) { |
ae63b31e4
|
1800 1801 1802 1803 |
struct ftrace_event_call *call; int ret; list_for_each_entry(call, &ftrace_events, list) { |
620a30e97
|
1804 |
ret = __trace_add_new_event(call, tr); |
ae63b31e4
|
1805 |
if (ret < 0) |
3448bac32
|
1806 1807 1808 |
pr_warn("Could not create directory for event %s ", ftrace_event_name(call)); |
ae63b31e4
|
1809 1810 |
} } |
7862ad184
|
1811 |
struct ftrace_event_file * |
3cd715de2
|
1812 1813 1814 1815 |
find_event_file(struct trace_array *tr, const char *system, const char *event) { struct ftrace_event_file *file; struct ftrace_event_call *call; |
de7b29739
|
1816 |
const char *name; |
3cd715de2
|
1817 1818 1819 1820 |
list_for_each_entry(file, &tr->events, list) { call = file->event_call; |
de7b29739
|
1821 |
name = ftrace_event_name(call); |
3cd715de2
|
1822 |
|
de7b29739
|
1823 |
if (!name || !call->class || !call->class->reg) |
3cd715de2
|
1824 1825 1826 1827 |
continue; if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) continue; |
de7b29739
|
1828 |
if (strcmp(event, name) == 0 && |
3cd715de2
|
1829 1830 1831 1832 1833 |
strcmp(system, call->class->system) == 0) return file; } return NULL; } |
2875a08b2
|
1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 |
#ifdef CONFIG_DYNAMIC_FTRACE /* Avoid typos */ #define ENABLE_EVENT_STR "enable_event" #define DISABLE_EVENT_STR "disable_event" struct event_probe_data { struct ftrace_event_file *file; unsigned long count; int ref; bool enable; }; |
3cd715de2
|
1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 |
static void event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data) { struct event_probe_data **pdata = (struct event_probe_data **)_data; struct event_probe_data *data = *pdata; if (!data) return; if (data->enable) clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags); else set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags); } static void event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data) { struct event_probe_data **pdata = (struct event_probe_data **)_data; struct event_probe_data *data = *pdata; if (!data) return; if (!data->count) return; /* Skip if the event is in a state we want to switch to */ if (data->enable == !(data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)) return; if (data->count != -1) (data->count)--; event_enable_probe(ip, parent_ip, _data); } static int event_enable_print(struct seq_file *m, unsigned long ip, struct ftrace_probe_ops *ops, void *_data) { struct event_probe_data *data = _data; seq_printf(m, "%ps:", (void *)ip); seq_printf(m, "%s:%s:%s", data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, data->file->event_call->class->system, |
de7b29739
|
1894 |
ftrace_event_name(data->file->event_call)); |
3cd715de2
|
1895 1896 |
if (data->count == -1) |
fa6f0cc75
|
1897 1898 |
seq_puts(m, ":unlimited "); |
3cd715de2
|
1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 |
else seq_printf(m, ":count=%ld ", data->count); return 0; } static int event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip, void **_data) { struct event_probe_data **pdata = (struct event_probe_data **)_data; struct event_probe_data *data = *pdata; data->ref++; return 0; } static void event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip, void **_data) { struct event_probe_data **pdata = (struct event_probe_data **)_data; struct event_probe_data *data = *pdata; if (WARN_ON_ONCE(data->ref <= 0)) return; data->ref--; if (!data->ref) { /* Remove the SOFT_MODE flag */ __ftrace_event_enable_disable(data->file, 0, 1); module_put(data->file->event_call->mod); kfree(data); } *pdata = NULL; } static struct ftrace_probe_ops event_enable_probe_ops = { .func = event_enable_probe, .print = event_enable_print, .init = event_enable_init, .free = event_enable_free, }; static struct ftrace_probe_ops event_enable_count_probe_ops = { .func = event_enable_count_probe, .print = event_enable_print, .init = event_enable_init, .free = event_enable_free, }; static struct ftrace_probe_ops event_disable_probe_ops = { .func = event_enable_probe, .print = event_enable_print, .init = event_enable_init, .free = event_enable_free, }; static struct ftrace_probe_ops event_disable_count_probe_ops = { .func = event_enable_count_probe, .print = event_enable_print, .init = event_enable_init, .free = event_enable_free, }; static int event_enable_func(struct ftrace_hash *hash, char *glob, char *cmd, char *param, int enabled) { struct trace_array *tr = top_trace_array(); struct ftrace_event_file *file; struct ftrace_probe_ops *ops; struct event_probe_data *data; const char *system; const char *event; char *number; bool enable; int ret; |
dc81e5e3a
|
1978 1979 |
if (!tr) return -ENODEV; |
3cd715de2
|
1980 |
/* hash funcs only work with set_ftrace_filter */ |
8092e808a
|
1981 |
if (!enabled || !param) |
3cd715de2
|
1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 |
return -EINVAL; system = strsep(¶m, ":"); if (!param) return -EINVAL; event = strsep(¶m, ":"); mutex_lock(&event_mutex); ret = -EINVAL; file = find_event_file(tr, system, event); if (!file) goto out; enable = strcmp(cmd, ENABLE_EVENT_STR) == 0; if (enable) ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops; else ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops; if (glob[0] == '!') { unregister_ftrace_function_probe_func(glob+1, ops); ret = 0; goto out; } ret = -ENOMEM; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) goto out; data->enable = enable; data->count = -1; data->file = file; if (!param) goto out_reg; number = strsep(¶m, ":"); ret = -EINVAL; if (!strlen(number)) goto out_free; /* * We use the callback data field (which is a pointer) * as our counter. */ ret = kstrtoul(number, 0, &data->count); if (ret) goto out_free; out_reg: /* Don't let event modules unload while probe registered */ ret = try_module_get(file->event_call->mod); |
6ed010666
|
2039 2040 |
if (!ret) { ret = -EBUSY; |
3cd715de2
|
2041 |
goto out_free; |
6ed010666
|
2042 |
} |
3cd715de2
|
2043 2044 2045 2046 2047 |
ret = __ftrace_event_enable_disable(file, 1, 1); if (ret < 0) goto out_put; ret = register_ftrace_function_probe(glob, ops, data); |
ff305ded9
|
2048 2049 2050 2051 2052 |
/* * The above returns on success the # of functions enabled, * but if it didn't find any functions it returns zero. * Consider no functions a failure too. */ |
a5b85bd15
|
2053 2054 |
if (!ret) { ret = -ENOENT; |
3cd715de2
|
2055 |
goto out_disable; |
ff305ded9
|
2056 2057 2058 2059 |
} else if (ret < 0) goto out_disable; /* Just return zero, not the number of enabled functions */ ret = 0; |
3cd715de2
|
2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 |
out: mutex_unlock(&event_mutex); return ret; out_disable: __ftrace_event_enable_disable(file, 0, 1); out_put: module_put(file->event_call->mod); out_free: kfree(data); goto out; } static struct ftrace_func_command event_enable_cmd = { .name = ENABLE_EVENT_STR, .func = event_enable_func, }; static struct ftrace_func_command event_disable_cmd = { .name = DISABLE_EVENT_STR, .func = event_enable_func, }; static __init int register_event_cmds(void) { int ret; ret = register_ftrace_command(&event_enable_cmd); if (WARN_ON(ret < 0)) return ret; ret = register_ftrace_command(&event_disable_cmd); if (WARN_ON(ret < 0)) unregister_ftrace_command(&event_enable_cmd); return ret; } #else static inline int register_event_cmds(void) { return 0; } #endif /* CONFIG_DYNAMIC_FTRACE */ |
772482216
|
2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 |
/* * The top level array has already had its ftrace_event_file * descriptors created in order to allow for early events to * be recorded. This function is called after the debugfs has been * initialized, and we now have to create the files associated * to the events. */ static __init void __trace_early_add_event_dirs(struct trace_array *tr) { struct ftrace_event_file *file; int ret; list_for_each_entry(file, &tr->events, list) { |
620a30e97
|
2113 |
ret = event_create_dir(tr->event_dir, file); |
772482216
|
2114 |
if (ret < 0) |
3448bac32
|
2115 2116 2117 |
pr_warn("Could not create directory for event %s ", ftrace_event_name(file->event_call)); |
772482216
|
2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 |
} } /* * For early boot up, the top trace array requires to have * a list of events that can be enabled. This must be done before * the filesystem is set up in order to allow events to be traced * early. */ static __init void __trace_early_add_events(struct trace_array *tr) { struct ftrace_event_call *call; int ret; list_for_each_entry(call, &ftrace_events, list) { /* Early boot up should not have any modules loaded */ if (WARN_ON_ONCE(call->mod)) continue; ret = __trace_early_add_new_event(call, tr); if (ret < 0) |
3448bac32
|
2140 2141 2142 |
pr_warn("Could not create early event %s ", ftrace_event_name(call)); |
772482216
|
2143 2144 |
} } |
0c8916c34
|
2145 2146 2147 2148 2149 |
/* Remove the event directory structure for a trace directory. */ static void __trace_remove_event_dirs(struct trace_array *tr) { struct ftrace_event_file *file, *next; |
f6a84bdc7
|
2150 2151 |
list_for_each_entry_safe(file, next, &tr->events, list) remove_event_file_dir(file); |
0c8916c34
|
2152 |
} |
779c5e379
|
2153 |
static void __add_event_to_tracers(struct ftrace_event_call *call) |
ae63b31e4
|
2154 2155 |
{ struct trace_array *tr; |
620a30e97
|
2156 2157 |
list_for_each_entry(tr, &ftrace_trace_arrays, list) __trace_add_new_event(call, tr); |
ae63b31e4
|
2158 |
} |
e4a9ea5ee
|
2159 2160 |
extern struct ftrace_event_call *__start_ftrace_events[]; extern struct ftrace_event_call *__stop_ftrace_events[]; |
a59fd6027
|
2161 |
|
020e5f85c
|
2162 2163 2164 2165 2166 |
static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata; static __init int setup_trace_event(char *str) { strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE); |
55034cd6e
|
2167 2168 |
ring_buffer_expanded = true; tracing_selftest_disabled = true; |
020e5f85c
|
2169 2170 2171 2172 |
return 1; } __setup("trace_event=", setup_trace_event); |
772482216
|
2173 2174 2175 |
/* Expects to have event_mutex held when called */ static int create_event_toplevel_files(struct dentry *parent, struct trace_array *tr) |
ae63b31e4
|
2176 2177 2178 2179 2180 2181 2182 |
{ struct dentry *d_events; struct dentry *entry; entry = debugfs_create_file("set_event", 0644, parent, tr, &ftrace_set_event_fops); if (!entry) { |
3448bac32
|
2183 2184 |
pr_warn("Could not create debugfs 'set_event' entry "); |
ae63b31e4
|
2185 2186 2187 2188 |
return -ENOMEM; } d_events = debugfs_create_dir("events", parent); |
277ba0446
|
2189 |
if (!d_events) { |
3448bac32
|
2190 2191 |
pr_warn("Could not create debugfs 'events' directory "); |
277ba0446
|
2192 2193 |
return -ENOMEM; } |
ae63b31e4
|
2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 |
/* ring buffer internal formats */ trace_create_file("header_page", 0444, d_events, ring_buffer_print_page_header, &ftrace_show_header_fops); trace_create_file("header_event", 0444, d_events, ring_buffer_print_entry_header, &ftrace_show_header_fops); trace_create_file("enable", 0644, d_events, tr, &ftrace_tr_enable_fops); tr->event_dir = d_events; |
772482216
|
2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 |
return 0; } /** * event_trace_add_tracer - add a instance of a trace_array to events * @parent: The parent dentry to place the files/directories for events in * @tr: The trace array associated with these events * * When a new instance is created, it needs to set up its events * directory, as well as other files associated with events. It also * creates the event hierachry in the @parent/events directory. * * Returns 0 on success. */ int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr) { int ret; mutex_lock(&event_mutex); ret = create_event_toplevel_files(parent, tr); if (ret) goto out_unlock; |
52f6ad6dc
|
2232 |
down_write(&trace_event_sem); |
ae63b31e4
|
2233 |
__trace_add_event_dirs(tr); |
52f6ad6dc
|
2234 |
up_write(&trace_event_sem); |
277ba0446
|
2235 |
|
772482216
|
2236 |
out_unlock: |
277ba0446
|
2237 |
mutex_unlock(&event_mutex); |
ae63b31e4
|
2238 |
|
772482216
|
2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 |
return ret; } /* * The top trace array already had its file descriptors created. * Now the files themselves need to be created. */ static __init int early_event_add_tracer(struct dentry *parent, struct trace_array *tr) { int ret; mutex_lock(&event_mutex); ret = create_event_toplevel_files(parent, tr); if (ret) goto out_unlock; |
52f6ad6dc
|
2256 |
down_write(&trace_event_sem); |
772482216
|
2257 |
__trace_early_add_event_dirs(tr); |
52f6ad6dc
|
2258 |
up_write(&trace_event_sem); |
772482216
|
2259 2260 2261 2262 2263 |
out_unlock: mutex_unlock(&event_mutex); return ret; |
ae63b31e4
|
2264 |
} |
0c8916c34
|
2265 2266 |
int event_trace_del_tracer(struct trace_array *tr) { |
0c8916c34
|
2267 |
mutex_lock(&event_mutex); |
85f2b0826
|
2268 2269 |
/* Disable any event triggers and associated soft-disabled events */ clear_event_triggers(tr); |
2a6c24afa
|
2270 2271 |
/* Disable any running events */ __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0); |
3ccb01239
|
2272 2273 |
/* Access to events are within rcu_read_lock_sched() */ synchronize_sched(); |
52f6ad6dc
|
2274 |
down_write(&trace_event_sem); |
0c8916c34
|
2275 2276 |
__trace_remove_event_dirs(tr); debugfs_remove_recursive(tr->event_dir); |
52f6ad6dc
|
2277 |
up_write(&trace_event_sem); |
0c8916c34
|
2278 2279 2280 2281 2282 2283 2284 |
tr->event_dir = NULL; mutex_unlock(&event_mutex); return 0; } |
d1a291437
|
2285 2286 2287 2288 2289 2290 |
static __init int event_trace_memsetup(void) { field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC); file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC); return 0; } |
8781915ad
|
2291 2292 |
static __init int event_trace_enable(void) { |
ae63b31e4
|
2293 |
struct trace_array *tr = top_trace_array(); |
8781915ad
|
2294 2295 2296 2297 |
struct ftrace_event_call **iter, *call; char *buf = bootup_event_buf; char *token; int ret; |
dc81e5e3a
|
2298 2299 |
if (!tr) return -ENODEV; |
8781915ad
|
2300 2301 2302 2303 2304 2305 2306 |
for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) { call = *iter; ret = event_init(call); if (!ret) list_add(&call->list, &ftrace_events); } |
772482216
|
2307 2308 2309 2310 2311 2312 2313 |
/* * We need the top trace array to have a working set of trace * points at early init, before the debug files and directories * are created. Create the file entries now, and attach them * to the actual file dentries later. */ __trace_early_add_events(tr); |
8781915ad
|
2314 2315 2316 2317 2318 2319 2320 |
while (true) { token = strsep(&buf, ","); if (!token) break; if (!*token) continue; |
ae63b31e4
|
2321 |
ret = ftrace_set_clr_event(tr, token, 1); |
8781915ad
|
2322 2323 2324 2325 |
if (ret) pr_warn("Failed to enable trace event: %s ", token); } |
81698831b
|
2326 2327 |
trace_printk_start_comm(); |
3cd715de2
|
2328 |
register_event_cmds(); |
85f2b0826
|
2329 |
register_trigger_cmds(); |
8781915ad
|
2330 2331 |
return 0; } |
b77e38aa2
|
2332 2333 |
static __init int event_trace_init(void) { |
ae63b31e4
|
2334 |
struct trace_array *tr; |
b77e38aa2
|
2335 2336 |
struct dentry *d_tracer; struct dentry *entry; |
6d723736e
|
2337 |
int ret; |
b77e38aa2
|
2338 |
|
ae63b31e4
|
2339 |
tr = top_trace_array(); |
dc81e5e3a
|
2340 2341 |
if (!tr) return -ENODEV; |
ae63b31e4
|
2342 |
|
b77e38aa2
|
2343 2344 2345 |
d_tracer = tracing_init_dentry(); if (!d_tracer) return 0; |
2314c4ae1
|
2346 |
entry = debugfs_create_file("available_events", 0444, d_tracer, |
ae63b31e4
|
2347 |
tr, &ftrace_avail_fops); |
2314c4ae1
|
2348 |
if (!entry) |
3448bac32
|
2349 2350 |
pr_warn("Could not create debugfs 'available_events' entry "); |
2314c4ae1
|
2351 |
|
8728fe501
|
2352 |
if (trace_define_common_fields()) |
3448bac32
|
2353 |
pr_warn("tracing: Failed to allocate common fields"); |
8728fe501
|
2354 |
|
772482216
|
2355 |
ret = early_event_add_tracer(d_tracer, tr); |
ae63b31e4
|
2356 2357 |
if (ret) return ret; |
020e5f85c
|
2358 |
|
836d481ed
|
2359 |
#ifdef CONFIG_MODULES |
6d723736e
|
2360 |
ret = register_module_notifier(&trace_module_nb); |
553793769
|
2361 |
if (ret) |
3448bac32
|
2362 2363 |
pr_warn("Failed to register trace events module notifier "); |
836d481ed
|
2364 |
#endif |
b77e38aa2
|
2365 2366 |
return 0; } |
5f893b263
|
2367 2368 2369 2370 2371 2372 2373 |
void __init trace_event_init(void) { event_trace_memsetup(); init_ftrace_syscalls(); event_trace_enable(); } |
b77e38aa2
|
2374 |
fs_initcall(event_trace_init); |
e6187007d
|
2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 |
#ifdef CONFIG_FTRACE_STARTUP_TEST static DEFINE_SPINLOCK(test_spinlock); static DEFINE_SPINLOCK(test_spinlock_irq); static DEFINE_MUTEX(test_mutex); static __init void test_work(struct work_struct *dummy) { spin_lock(&test_spinlock); spin_lock_irq(&test_spinlock_irq); udelay(1); spin_unlock_irq(&test_spinlock_irq); spin_unlock(&test_spinlock); mutex_lock(&test_mutex); msleep(1); mutex_unlock(&test_mutex); } static __init int event_test_thread(void *unused) { void *test_malloc; test_malloc = kmalloc(1234, GFP_KERNEL); if (!test_malloc) pr_info("failed to kmalloc "); schedule_on_each_cpu(test_work); kfree(test_malloc); set_current_state(TASK_INTERRUPTIBLE); |
fe0e01c77
|
2409 |
while (!kthread_should_stop()) { |
e6187007d
|
2410 |
schedule(); |
fe0e01c77
|
2411 2412 2413 |
set_current_state(TASK_INTERRUPTIBLE); } __set_current_state(TASK_RUNNING); |
e6187007d
|
2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 |
return 0; } /* * Do various things that may trigger events. */ static __init void event_test_stuff(void) { struct task_struct *test_thread; test_thread = kthread_run(event_test_thread, NULL, "test-events"); msleep(1); kthread_stop(test_thread); } /* * For every trace event defined, we will test each trace point separately, * and then by groups, and finally all trace points. */ |
9ea21c1ec
|
2434 |
static __init void event_trace_self_tests(void) |
e6187007d
|
2435 |
{ |
ae63b31e4
|
2436 2437 |
struct ftrace_subsystem_dir *dir; struct ftrace_event_file *file; |
e6187007d
|
2438 2439 |
struct ftrace_event_call *call; struct event_subsystem *system; |
ae63b31e4
|
2440 |
struct trace_array *tr; |
e6187007d
|
2441 |
int ret; |
ae63b31e4
|
2442 |
tr = top_trace_array(); |
dc81e5e3a
|
2443 2444 |
if (!tr) return; |
ae63b31e4
|
2445 |
|
e6187007d
|
2446 2447 |
pr_info("Running tests on trace events: "); |
ae63b31e4
|
2448 2449 2450 |
list_for_each_entry(file, &tr->events, list) { call = file->event_call; |
e6187007d
|
2451 |
|
2239291ae
|
2452 2453 |
/* Only test those that have a probe */ if (!call->class || !call->class->probe) |
e6187007d
|
2454 |
continue; |
1f5a6b454
|
2455 2456 2457 2458 2459 2460 2461 |
/* * Testing syscall events here is pretty useless, but * we still do it if configured. But this is time consuming. * What we really need is a user thread to perform the * syscalls as we test. */ #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS |
8f0820183
|
2462 2463 |
if (call->class->system && strcmp(call->class->system, "syscalls") == 0) |
1f5a6b454
|
2464 2465 |
continue; #endif |
de7b29739
|
2466 |
pr_info("Testing event %s: ", ftrace_event_name(call)); |
e6187007d
|
2467 2468 2469 2470 2471 |
/* * If an event is already enabled, someone is using * it and the self test should not be on. */ |
ae63b31e4
|
2472 |
if (file->flags & FTRACE_EVENT_FL_ENABLED) { |
3448bac32
|
2473 2474 |
pr_warn("Enabled event during self test! "); |
e6187007d
|
2475 2476 2477 |
WARN_ON_ONCE(1); continue; } |
ae63b31e4
|
2478 |
ftrace_event_enable_disable(file, 1); |
e6187007d
|
2479 |
event_test_stuff(); |
ae63b31e4
|
2480 |
ftrace_event_enable_disable(file, 0); |
e6187007d
|
2481 2482 2483 2484 2485 2486 2487 2488 2489 |
pr_cont("OK "); } /* Now test at the sub system level */ pr_info("Running tests on trace event systems: "); |
ae63b31e4
|
2490 2491 2492 |
list_for_each_entry(dir, &tr->systems, list) { system = dir->subsystem; |
e6187007d
|
2493 2494 2495 2496 2497 2498 |
/* the ftrace system is special, skip it */ if (strcmp(system->name, "ftrace") == 0) continue; pr_info("Testing event system %s: ", system->name); |
ae63b31e4
|
2499 |
ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1); |
e6187007d
|
2500 |
if (WARN_ON_ONCE(ret)) { |
3448bac32
|
2501 2502 2503 |
pr_warn("error enabling system %s ", system->name); |
e6187007d
|
2504 2505 2506 2507 |
continue; } event_test_stuff(); |
ae63b31e4
|
2508 |
ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0); |
76bab1b78
|
2509 |
if (WARN_ON_ONCE(ret)) { |
3448bac32
|
2510 2511 2512 |
pr_warn("error disabling system %s ", system->name); |
76bab1b78
|
2513 2514 |
continue; } |
e6187007d
|
2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 |
pr_cont("OK "); } /* Test with all events enabled */ pr_info("Running tests on all trace events: "); pr_info("Testing all events: "); |
ae63b31e4
|
2525 |
ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1); |
e6187007d
|
2526 |
if (WARN_ON_ONCE(ret)) { |
3448bac32
|
2527 2528 |
pr_warn("error enabling all events "); |
9ea21c1ec
|
2529 |
return; |
e6187007d
|
2530 2531 2532 2533 2534 |
} event_test_stuff(); /* reset sysname */ |
ae63b31e4
|
2535 |
ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0); |
e6187007d
|
2536 |
if (WARN_ON_ONCE(ret)) { |
3448bac32
|
2537 2538 |
pr_warn("error disabling all events "); |
9ea21c1ec
|
2539 |
return; |
e6187007d
|
2540 2541 2542 2543 |
} pr_cont("OK "); |
9ea21c1ec
|
2544 2545 2546 |
} #ifdef CONFIG_FUNCTION_TRACER |
245b2e70e
|
2547 |
static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable); |
9ea21c1ec
|
2548 2549 |
static void |
2f5f6ad93
|
2550 |
function_test_events_call(unsigned long ip, unsigned long parent_ip, |
a1e2e31d1
|
2551 |
struct ftrace_ops *op, struct pt_regs *pt_regs) |
9ea21c1ec
|
2552 2553 |
{ struct ring_buffer_event *event; |
e77405ad8
|
2554 |
struct ring_buffer *buffer; |
9ea21c1ec
|
2555 2556 2557 |
struct ftrace_entry *entry; unsigned long flags; long disabled; |
9ea21c1ec
|
2558 2559 2560 2561 |
int cpu; int pc; pc = preempt_count(); |
5168ae50a
|
2562 |
preempt_disable_notrace(); |
9ea21c1ec
|
2563 |
cpu = raw_smp_processor_id(); |
245b2e70e
|
2564 |
disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu)); |
9ea21c1ec
|
2565 2566 2567 2568 2569 |
if (disabled != 1) goto out; local_save_flags(flags); |
e77405ad8
|
2570 2571 |
event = trace_current_buffer_lock_reserve(&buffer, TRACE_FN, sizeof(*entry), |
9ea21c1ec
|
2572 2573 2574 2575 2576 2577 |
flags, pc); if (!event) goto out; entry = ring_buffer_event_data(event); entry->ip = ip; entry->parent_ip = parent_ip; |
0d5c6e1c1
|
2578 |
trace_buffer_unlock_commit(buffer, event, flags, pc); |
9ea21c1ec
|
2579 2580 |
out: |
245b2e70e
|
2581 |
atomic_dec(&per_cpu(ftrace_test_event_disable, cpu)); |
5168ae50a
|
2582 |
preempt_enable_notrace(); |
9ea21c1ec
|
2583 2584 2585 2586 2587 |
} static struct ftrace_ops trace_ops __initdata = { .func = function_test_events_call, |
4740974a6
|
2588 |
.flags = FTRACE_OPS_FL_RECURSION_SAFE, |
9ea21c1ec
|
2589 2590 2591 2592 |
}; static __init void event_trace_self_test_with_function(void) { |
17bb615ad
|
2593 2594 2595 2596 2597 2598 2599 |
int ret; ret = register_ftrace_function(&trace_ops); if (WARN_ON(ret < 0)) { pr_info("Failed to enable function tracer for event tests "); return; } |
9ea21c1ec
|
2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 |
pr_info("Running tests again, along with the function tracer "); event_trace_self_tests(); unregister_ftrace_function(&trace_ops); } #else static __init void event_trace_self_test_with_function(void) { } #endif static __init int event_trace_self_tests_init(void) { |
020e5f85c
|
2613 2614 2615 2616 |
if (!tracing_selftest_disabled) { event_trace_self_tests(); event_trace_self_test_with_function(); } |
e6187007d
|
2617 2618 2619 |
return 0; } |
28d20e2d6
|
2620 |
late_initcall(event_trace_self_tests_init); |
e6187007d
|
2621 2622 |
#endif |