Commit b0fc494fae96a7089f3651cb451f461c7291244c
Committed by
Thomas Gleixner
1 parent
3d0833953e
Exists in
master
and in
4 other branches
ftrace: add ftrace_enabled sysctl to disable mcount function
This patch adds back the sysctl ftrace_enabled. This time it is defaulted to on, if DYNAMIC_FTRACE is configured. When ftrace_enabled is disabled, the ftrace function is set to the stub return. If DYNAMIC_FTRACE is also configured, on ftrace_enabled = 0, the registered ftrace functions will all be set to jmps, but no more new calls to ftrace recording (used to find the ftrace calling sites) will be called. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Showing 3 changed files with 124 additions and 18 deletions Side-by-side Diff
include/linux/ftrace.h
... | ... | @@ -5,6 +5,12 @@ |
5 | 5 | |
6 | 6 | #include <linux/linkage.h> |
7 | 7 | |
8 | +extern int ftrace_enabled; | |
9 | +extern int | |
10 | +ftrace_enable_sysctl(struct ctl_table *table, int write, | |
11 | + struct file *filp, void __user *buffer, size_t *lenp, | |
12 | + loff_t *ppos); | |
13 | + | |
8 | 14 | typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); |
9 | 15 | |
10 | 16 | struct ftrace_ops { |
kernel/sysctl.c
... | ... | @@ -46,6 +46,7 @@ |
46 | 46 | #include <linux/nfs_fs.h> |
47 | 47 | #include <linux/acpi.h> |
48 | 48 | #include <linux/reboot.h> |
49 | +#include <linux/ftrace.h> | |
49 | 50 | |
50 | 51 | #include <asm/uaccess.h> |
51 | 52 | #include <asm/processor.h> |
... | ... | @@ -455,6 +456,16 @@ |
455 | 456 | .mode = 0644, |
456 | 457 | .proc_handler = &proc_dointvec, |
457 | 458 | }, |
459 | +#ifdef CONFIG_FTRACE | |
460 | + { | |
461 | + .ctl_name = CTL_UNNUMBERED, | |
462 | + .procname = "ftrace_enabled", | |
463 | + .data = &ftrace_enabled, | |
464 | + .maxlen = sizeof(int), | |
465 | + .mode = 0644, | |
466 | + .proc_handler = &ftrace_enable_sysctl, | |
467 | + }, | |
468 | +#endif | |
458 | 469 | #ifdef CONFIG_KMOD |
459 | 470 | { |
460 | 471 | .ctl_name = KERN_MODPROBE, |
kernel/trace/ftrace.c
... | ... | @@ -20,12 +20,24 @@ |
20 | 20 | #include <linux/hardirq.h> |
21 | 21 | #include <linux/ftrace.h> |
22 | 22 | #include <linux/module.h> |
23 | +#include <linux/sysctl.h> | |
23 | 24 | #include <linux/hash.h> |
24 | 25 | #include <linux/list.h> |
25 | 26 | |
26 | 27 | #include "trace.h" |
27 | 28 | |
29 | +#ifdef CONFIG_DYNAMIC_FTRACE | |
30 | +# define FTRACE_ENABLED_INIT 1 | |
31 | +#else | |
32 | +# define FTRACE_ENABLED_INIT 0 | |
33 | +#endif | |
34 | + | |
35 | +int ftrace_enabled = FTRACE_ENABLED_INIT; | |
36 | +static int last_ftrace_enabled = FTRACE_ENABLED_INIT; | |
37 | + | |
28 | 38 | static DEFINE_SPINLOCK(ftrace_lock); |
39 | +static DEFINE_MUTEX(ftrace_sysctl_lock); | |
40 | + | |
29 | 41 | static struct ftrace_ops ftrace_list_end __read_mostly = |
30 | 42 | { |
31 | 43 | .func = ftrace_stub, |
... | ... | @@ -78,14 +90,16 @@ |
78 | 90 | smp_wmb(); |
79 | 91 | ftrace_list = ops; |
80 | 92 | |
81 | - /* | |
82 | - * For one func, simply call it directly. | |
83 | - * For more than one func, call the chain. | |
84 | - */ | |
85 | - if (ops->next == &ftrace_list_end) | |
86 | - ftrace_trace_function = ops->func; | |
87 | - else | |
88 | - ftrace_trace_function = ftrace_list_func; | |
93 | + if (ftrace_enabled) { | |
94 | + /* | |
95 | + * For one func, simply call it directly. | |
96 | + * For more than one func, call the chain. | |
97 | + */ | |
98 | + if (ops->next == &ftrace_list_end) | |
99 | + ftrace_trace_function = ops->func; | |
100 | + else | |
101 | + ftrace_trace_function = ftrace_list_func; | |
102 | + } | |
89 | 103 | |
90 | 104 | spin_unlock(&ftrace_lock); |
91 | 105 | |
... | ... | @@ -120,10 +134,12 @@ |
120 | 134 | |
121 | 135 | *p = (*p)->next; |
122 | 136 | |
123 | - /* If we only have one func left, then call that directly */ | |
124 | - if (ftrace_list == &ftrace_list_end || | |
125 | - ftrace_list->next == &ftrace_list_end) | |
126 | - ftrace_trace_function = ftrace_list->func; | |
137 | + if (ftrace_enabled) { | |
138 | + /* If we only have one func left, then call that directly */ | |
139 | + if (ftrace_list == &ftrace_list_end || | |
140 | + ftrace_list->next == &ftrace_list_end) | |
141 | + ftrace_trace_function = ftrace_list->func; | |
142 | + } | |
127 | 143 | |
128 | 144 | out: |
129 | 145 | spin_unlock(&ftrace_lock); |
... | ... | @@ -263,7 +279,8 @@ |
263 | 279 | goto out; |
264 | 280 | __unregister_ftrace_function(&ftrace_shutdown_ops); |
265 | 281 | |
266 | - ftrace_run_startup_code(); | |
282 | + if (ftrace_enabled) | |
283 | + ftrace_run_startup_code(); | |
267 | 284 | out: |
268 | 285 | mutex_unlock(&ftraced_lock); |
269 | 286 | } |
270 | 287 | |
... | ... | @@ -275,13 +292,32 @@ |
275 | 292 | if (ftraced_suspend) |
276 | 293 | goto out; |
277 | 294 | |
278 | - ftrace_run_shutdown_code(); | |
295 | + if (ftrace_enabled) | |
296 | + ftrace_run_shutdown_code(); | |
279 | 297 | |
280 | 298 | __register_ftrace_function(&ftrace_shutdown_ops); |
281 | 299 | out: |
282 | 300 | mutex_unlock(&ftraced_lock); |
283 | 301 | } |
284 | 302 | |
303 | +static void notrace ftrace_startup_sysctl(void) | |
304 | +{ | |
305 | + mutex_lock(&ftraced_lock); | |
306 | + /* ftraced_suspend is true if we want ftrace running */ | |
307 | + if (ftraced_suspend) | |
308 | + ftrace_run_startup_code(); | |
309 | + mutex_unlock(&ftraced_lock); | |
310 | +} | |
311 | + | |
312 | +static void notrace ftrace_shutdown_sysctl(void) | |
313 | +{ | |
314 | + mutex_lock(&ftraced_lock); | |
315 | + /* ftraced_suspend is true if ftrace is running */ | |
316 | + if (ftraced_suspend) | |
317 | + ftrace_run_shutdown_code(); | |
318 | + mutex_unlock(&ftraced_lock); | |
319 | +} | |
320 | + | |
285 | 321 | static cycle_t ftrace_update_time; |
286 | 322 | static unsigned long ftrace_update_cnt; |
287 | 323 | unsigned long ftrace_update_tot_cnt; |
288 | 324 | |
... | ... | @@ -341,8 +377,9 @@ |
341 | 377 | /* check once a second */ |
342 | 378 | schedule_timeout(HZ); |
343 | 379 | |
380 | + mutex_lock(&ftrace_sysctl_lock); | |
344 | 381 | mutex_lock(&ftraced_lock); |
345 | - if (ftraced_trigger && !ftraced_suspend) { | |
382 | + if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) { | |
346 | 383 | ftrace_record_suspend++; |
347 | 384 | ftrace_update_code(); |
348 | 385 | usecs = nsecs_to_usecs(ftrace_update_time); |
... | ... | @@ -360,6 +397,7 @@ |
360 | 397 | ftrace_record_suspend--; |
361 | 398 | } |
362 | 399 | mutex_unlock(&ftraced_lock); |
400 | + mutex_unlock(&ftrace_sysctl_lock); | |
363 | 401 | |
364 | 402 | ftrace_shutdown_replenish(); |
365 | 403 | |
... | ... | @@ -389,8 +427,10 @@ |
389 | 427 | |
390 | 428 | core_initcall(ftrace_shutdown_init); |
391 | 429 | #else |
392 | -# define ftrace_startup() do { } while (0) | |
393 | -# define ftrace_shutdown() do { } while (0) | |
430 | +# define ftrace_startup() do { } while (0) | |
431 | +# define ftrace_shutdown() do { } while (0) | |
432 | +# define ftrace_startup_sysctl() do { } while (0) | |
433 | +# define ftrace_shutdown_sysctl() do { } while (0) | |
394 | 434 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
395 | 435 | |
396 | 436 | /** |
397 | 437 | |
... | ... | @@ -406,9 +446,15 @@ |
406 | 446 | */ |
407 | 447 | int register_ftrace_function(struct ftrace_ops *ops) |
408 | 448 | { |
449 | + int ret; | |
450 | + | |
451 | + mutex_lock(&ftrace_sysctl_lock); | |
409 | 452 | ftrace_startup(); |
410 | 453 | |
411 | - return __register_ftrace_function(ops); | |
454 | + ret = __register_ftrace_function(ops); | |
455 | + mutex_unlock(&ftrace_sysctl_lock); | |
456 | + | |
457 | + return ret; | |
412 | 458 | } |
413 | 459 | |
414 | 460 | /** |
415 | 461 | |
... | ... | @@ -421,11 +467,54 @@ |
421 | 467 | { |
422 | 468 | int ret; |
423 | 469 | |
470 | + mutex_lock(&ftrace_sysctl_lock); | |
424 | 471 | ret = __unregister_ftrace_function(ops); |
425 | 472 | |
426 | 473 | if (ftrace_list == &ftrace_list_end) |
427 | 474 | ftrace_shutdown(); |
428 | 475 | |
476 | + mutex_unlock(&ftrace_sysctl_lock); | |
477 | + | |
478 | + return ret; | |
479 | +} | |
480 | + | |
481 | +notrace int | |
482 | +ftrace_enable_sysctl(struct ctl_table *table, int write, | |
483 | + struct file *filp, void __user *buffer, size_t *lenp, | |
484 | + loff_t *ppos) | |
485 | +{ | |
486 | + int ret; | |
487 | + | |
488 | + mutex_lock(&ftrace_sysctl_lock); | |
489 | + | |
490 | + ret = proc_dointvec(table, write, filp, buffer, lenp, ppos); | |
491 | + | |
492 | + if (ret || !write || (last_ftrace_enabled == ftrace_enabled)) | |
493 | + goto out; | |
494 | + | |
495 | + last_ftrace_enabled = ftrace_enabled; | |
496 | + | |
497 | + if (ftrace_enabled) { | |
498 | + | |
499 | + ftrace_startup_sysctl(); | |
500 | + | |
501 | + /* we are starting ftrace again */ | |
502 | + if (ftrace_list != &ftrace_list_end) { | |
503 | + if (ftrace_list->next == &ftrace_list_end) | |
504 | + ftrace_trace_function = ftrace_list->func; | |
505 | + else | |
506 | + ftrace_trace_function = ftrace_list_func; | |
507 | + } | |
508 | + | |
509 | + } else { | |
510 | + /* stopping ftrace calls (just send to ftrace_stub) */ | |
511 | + ftrace_trace_function = ftrace_stub; | |
512 | + | |
513 | + ftrace_shutdown_sysctl(); | |
514 | + } | |
515 | + | |
516 | + out: | |
517 | + mutex_unlock(&ftrace_sysctl_lock); | |
429 | 518 | return ret; |
430 | 519 | } |