Commit d562aff93bfb530b0992141500a402d17081189d
Committed by
Steven Rostedt
1 parent
38de93abec
Exists in
smarc-imx_3.14.28_1.0.0_ga
and in
1 other branch
tracing: Add support for SOFT_DISABLE to syscall events
The original SOFT_DISABLE patches didn't add support for soft disable of syscall events; this adds it. Add an array of ftrace_event_file pointers indexed by syscall number to the trace array and remove the existing enabled bitmaps, which as a result are now redundant. The ftrace_event_file structs in turn contain the soft disable flags we need for per-syscall soft disable accounting. Adding ftrace_event_files also means we can remove the USE_CALL_FILTER bit, thus enabling multibuffer filter support for syscall events. Link: http://lkml.kernel.org/r/6e72b566e85d8df8042f133efbc6c30e21fb017e.1382620672.git.tom.zanussi@linux.intel.com Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Showing 3 changed files with 36 additions and 14 deletions Side-by-side Diff
include/linux/syscalls.h
... | ... | @@ -120,7 +120,7 @@ |
120 | 120 | .class = &event_class_syscall_enter, \ |
121 | 121 | .event.funcs = &enter_syscall_print_funcs, \ |
122 | 122 | .data = (void *)&__syscall_meta_##sname,\ |
123 | - .flags = TRACE_EVENT_FL_CAP_ANY | TRACE_EVENT_FL_USE_CALL_FILTER,\ | |
123 | + .flags = TRACE_EVENT_FL_CAP_ANY, \ | |
124 | 124 | }; \ |
125 | 125 | static struct ftrace_event_call __used \ |
126 | 126 | __attribute__((section("_ftrace_events"))) \ |
... | ... | @@ -134,7 +134,7 @@ |
134 | 134 | .class = &event_class_syscall_exit, \ |
135 | 135 | .event.funcs = &exit_syscall_print_funcs, \ |
136 | 136 | .data = (void *)&__syscall_meta_##sname,\ |
137 | - .flags = TRACE_EVENT_FL_CAP_ANY | TRACE_EVENT_FL_USE_CALL_FILTER,\ | |
137 | + .flags = TRACE_EVENT_FL_CAP_ANY, \ | |
138 | 138 | }; \ |
139 | 139 | static struct ftrace_event_call __used \ |
140 | 140 | __attribute__((section("_ftrace_events"))) \ |
kernel/trace/trace.h
... | ... | @@ -192,8 +192,8 @@ |
192 | 192 | #ifdef CONFIG_FTRACE_SYSCALLS |
193 | 193 | int sys_refcount_enter; |
194 | 194 | int sys_refcount_exit; |
195 | - DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls); | |
196 | - DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls); | |
195 | + struct ftrace_event_file *enter_syscall_files[NR_syscalls]; | |
196 | + struct ftrace_event_file *exit_syscall_files[NR_syscalls]; | |
197 | 197 | #endif |
198 | 198 | int stop_count; |
199 | 199 | int clock_id; |
kernel/trace/trace_syscalls.c
... | ... | @@ -302,6 +302,7 @@ |
302 | 302 | static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) |
303 | 303 | { |
304 | 304 | struct trace_array *tr = data; |
305 | + struct ftrace_event_file *ftrace_file; | |
305 | 306 | struct syscall_trace_enter *entry; |
306 | 307 | struct syscall_metadata *sys_data; |
307 | 308 | struct ring_buffer_event *event; |
308 | 309 | |
... | ... | @@ -314,9 +315,15 @@ |
314 | 315 | syscall_nr = trace_get_syscall_nr(current, regs); |
315 | 316 | if (syscall_nr < 0) |
316 | 317 | return; |
317 | - if (!test_bit(syscall_nr, tr->enabled_enter_syscalls)) | |
318 | + | |
319 | + /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */ | |
320 | + ftrace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]); | |
321 | + if (!ftrace_file) | |
318 | 322 | return; |
319 | 323 | |
324 | + if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags)) | |
325 | + return; | |
326 | + | |
320 | 327 | sys_data = syscall_nr_to_meta(syscall_nr); |
321 | 328 | if (!sys_data) |
322 | 329 | return; |
... | ... | @@ -336,8 +343,7 @@ |
336 | 343 | entry->nr = syscall_nr; |
337 | 344 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); |
338 | 345 | |
339 | - if (!call_filter_check_discard(sys_data->enter_event, entry, | |
340 | - buffer, event)) | |
346 | + if (!filter_check_discard(ftrace_file, entry, buffer, event)) | |
341 | 347 | trace_current_buffer_unlock_commit(buffer, event, |
342 | 348 | irq_flags, pc); |
343 | 349 | } |
... | ... | @@ -345,6 +351,7 @@ |
345 | 351 | static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) |
346 | 352 | { |
347 | 353 | struct trace_array *tr = data; |
354 | + struct ftrace_event_file *ftrace_file; | |
348 | 355 | struct syscall_trace_exit *entry; |
349 | 356 | struct syscall_metadata *sys_data; |
350 | 357 | struct ring_buffer_event *event; |
351 | 358 | |
... | ... | @@ -356,9 +363,15 @@ |
356 | 363 | syscall_nr = trace_get_syscall_nr(current, regs); |
357 | 364 | if (syscall_nr < 0) |
358 | 365 | return; |
359 | - if (!test_bit(syscall_nr, tr->enabled_exit_syscalls)) | |
366 | + | |
367 | + /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */ | |
368 | + ftrace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]); | |
369 | + if (!ftrace_file) | |
360 | 370 | return; |
361 | 371 | |
372 | + if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags)) | |
373 | + return; | |
374 | + | |
362 | 375 | sys_data = syscall_nr_to_meta(syscall_nr); |
363 | 376 | if (!sys_data) |
364 | 377 | return; |
... | ... | @@ -377,8 +390,7 @@ |
377 | 390 | entry->nr = syscall_nr; |
378 | 391 | entry->ret = syscall_get_return_value(current, regs); |
379 | 392 | |
380 | - if (!call_filter_check_discard(sys_data->exit_event, entry, | |
381 | - buffer, event)) | |
393 | + if (!filter_check_discard(ftrace_file, entry, buffer, event)) | |
382 | 394 | trace_current_buffer_unlock_commit(buffer, event, |
383 | 395 | irq_flags, pc); |
384 | 396 | } |
... | ... | @@ -397,7 +409,7 @@ |
397 | 409 | if (!tr->sys_refcount_enter) |
398 | 410 | ret = register_trace_sys_enter(ftrace_syscall_enter, tr); |
399 | 411 | if (!ret) { |
400 | - set_bit(num, tr->enabled_enter_syscalls); | |
412 | + rcu_assign_pointer(tr->enter_syscall_files[num], file); | |
401 | 413 | tr->sys_refcount_enter++; |
402 | 414 | } |
403 | 415 | mutex_unlock(&syscall_trace_lock); |
404 | 416 | |
... | ... | @@ -415,10 +427,15 @@ |
415 | 427 | return; |
416 | 428 | mutex_lock(&syscall_trace_lock); |
417 | 429 | tr->sys_refcount_enter--; |
418 | - clear_bit(num, tr->enabled_enter_syscalls); | |
430 | + rcu_assign_pointer(tr->enter_syscall_files[num], NULL); | |
419 | 431 | if (!tr->sys_refcount_enter) |
420 | 432 | unregister_trace_sys_enter(ftrace_syscall_enter, tr); |
421 | 433 | mutex_unlock(&syscall_trace_lock); |
434 | + /* | |
435 | + * Callers expect the event to be completely disabled on | |
436 | + * return, so wait for current handlers to finish. | |
437 | + */ | |
438 | + synchronize_sched(); | |
422 | 439 | } |
423 | 440 | |
424 | 441 | static int reg_event_syscall_exit(struct ftrace_event_file *file, |
... | ... | @@ -435,7 +452,7 @@ |
435 | 452 | if (!tr->sys_refcount_exit) |
436 | 453 | ret = register_trace_sys_exit(ftrace_syscall_exit, tr); |
437 | 454 | if (!ret) { |
438 | - set_bit(num, tr->enabled_exit_syscalls); | |
455 | + rcu_assign_pointer(tr->exit_syscall_files[num], file); | |
439 | 456 | tr->sys_refcount_exit++; |
440 | 457 | } |
441 | 458 | mutex_unlock(&syscall_trace_lock); |
442 | 459 | |
... | ... | @@ -453,10 +470,15 @@ |
453 | 470 | return; |
454 | 471 | mutex_lock(&syscall_trace_lock); |
455 | 472 | tr->sys_refcount_exit--; |
456 | - clear_bit(num, tr->enabled_exit_syscalls); | |
473 | + rcu_assign_pointer(tr->exit_syscall_files[num], NULL); | |
457 | 474 | if (!tr->sys_refcount_exit) |
458 | 475 | unregister_trace_sys_exit(ftrace_syscall_exit, tr); |
459 | 476 | mutex_unlock(&syscall_trace_lock); |
477 | + /* | |
478 | + * Callers expect the event to be completely disabled on | |
479 | + * return, so wait for current handlers to finish. | |
480 | + */ | |
481 | + synchronize_sched(); | |
460 | 482 | } |
461 | 483 | |
462 | 484 | static int __init init_syscall_trace(struct ftrace_event_call *call) |