Commit 6712e299b7dc78aa4971b85e803435ee6d49a9dd
Exists in
master
and in
39 other branches
Merge branch 'tracing/ftrace' into auto-ftrace-next
Showing 8 changed files Side-by-side Diff
include/linux/ftrace.h
kernel/Makefile
... | ... | @@ -11,7 +11,7 @@ |
11 | 11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ |
12 | 12 | notifier.o ksysfs.o pm_qos_params.o sched_clock.o |
13 | 13 | |
14 | -CFLAGS_REMOVE_sched.o = -pg -mno-spe | |
14 | +CFLAGS_REMOVE_sched.o = -mno-spe | |
15 | 15 | |
16 | 16 | ifdef CONFIG_FTRACE |
17 | 17 | # Do not trace debug files and internal ftrace files |
kernel/trace/ftrace.c
... | ... | @@ -1602,6 +1602,23 @@ |
1602 | 1602 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
1603 | 1603 | |
1604 | 1604 | /** |
1605 | + * ftrace_kill_atomic - kill ftrace from critical sections | |
1606 | + * | |
1607 | + * This function should be used by panic code. It stops ftrace | |
1608 | + * but in a not so nice way. If you need to simply kill ftrace | |
1609 | + * from a non-atomic section, use ftrace_kill. | |
1610 | + */ | |
1611 | +void ftrace_kill_atomic(void) | |
1612 | +{ | |
1613 | + ftrace_disabled = 1; | |
1614 | + ftrace_enabled = 0; | |
1615 | +#ifdef CONFIG_DYNAMIC_FTRACE | |
1616 | + ftraced_suspend = -1; | |
1617 | +#endif | |
1618 | + clear_ftrace_function(); | |
1619 | +} | |
1620 | + | |
1621 | +/** | |
1605 | 1622 | * ftrace_kill - totally shutdown ftrace |
1606 | 1623 | * |
1607 | 1624 | * This is a safety measure. If something was detected that seems |
kernel/trace/trace.c
... | ... | @@ -96,6 +96,9 @@ |
96 | 96 | /* tracer_enabled is used to toggle activation of a tracer */ |
97 | 97 | static int tracer_enabled = 1; |
98 | 98 | |
99 | +/* function tracing enabled */ | |
100 | +int ftrace_function_enabled; | |
101 | + | |
99 | 102 | /* |
100 | 103 | * trace_nr_entries is the number of entries that is allocated |
101 | 104 | * for a buffer. Note, the number of entries is always rounded |
... | ... | @@ -134,6 +137,7 @@ |
134 | 137 | { |
135 | 138 | int cpu; |
136 | 139 | |
140 | + ftrace_function_enabled = 0; | |
137 | 141 | if(tr->ctrl) |
138 | 142 | for_each_online_cpu(cpu) |
139 | 143 | tracing_reset(tr->data[cpu]); |
... | ... | @@ -1027,7 +1031,7 @@ |
1027 | 1031 | long disabled; |
1028 | 1032 | int cpu; |
1029 | 1033 | |
1030 | - if (unlikely(!tracer_enabled)) | |
1034 | + if (unlikely(!ftrace_function_enabled)) | |
1031 | 1035 | return; |
1032 | 1036 | |
1033 | 1037 | if (skip_trace(ip)) |
1034 | 1038 | |
1035 | 1039 | |
... | ... | @@ -1052,11 +1056,15 @@ |
1052 | 1056 | |
1053 | 1057 | void tracing_start_function_trace(void) |
1054 | 1058 | { |
1059 | + ftrace_function_enabled = 0; | |
1055 | 1060 | register_ftrace_function(&trace_ops); |
1061 | + if (tracer_enabled) | |
1062 | + ftrace_function_enabled = 1; | |
1056 | 1063 | } |
1057 | 1064 | |
1058 | 1065 | void tracing_stop_function_trace(void) |
1059 | 1066 | { |
1067 | + ftrace_function_enabled = 0; | |
1060 | 1068 | unregister_ftrace_function(&trace_ops); |
1061 | 1069 | } |
1062 | 1070 | #endif |
... | ... | @@ -1383,7 +1391,7 @@ |
1383 | 1391 | "server", |
1384 | 1392 | #elif defined(CONFIG_PREEMPT_VOLUNTARY) |
1385 | 1393 | "desktop", |
1386 | -#elif defined(CONFIG_PREEMPT_DESKTOP) | |
1394 | +#elif defined(CONFIG_PREEMPT) | |
1387 | 1395 | "preempt", |
1388 | 1396 | #else |
1389 | 1397 | "unknown", |
1390 | 1398 | |
... | ... | @@ -1892,8 +1900,10 @@ |
1892 | 1900 | m->private = iter; |
1893 | 1901 | |
1894 | 1902 | /* stop the trace while dumping */ |
1895 | - if (iter->tr->ctrl) | |
1903 | + if (iter->tr->ctrl) { | |
1896 | 1904 | tracer_enabled = 0; |
1905 | + ftrace_function_enabled = 0; | |
1906 | + } | |
1897 | 1907 | |
1898 | 1908 | if (iter->trace && iter->trace->open) |
1899 | 1909 | iter->trace->open(iter); |
1900 | 1910 | |
... | ... | @@ -1926,8 +1936,14 @@ |
1926 | 1936 | iter->trace->close(iter); |
1927 | 1937 | |
1928 | 1938 | /* reenable tracing if it was previously enabled */ |
1929 | - if (iter->tr->ctrl) | |
1939 | + if (iter->tr->ctrl) { | |
1930 | 1940 | tracer_enabled = 1; |
1941 | + /* | |
1942 | + * It is safe to enable function tracing even if it | |
1943 | + * isn't used | |
1944 | + */ | |
1945 | + ftrace_function_enabled = 1; | |
1946 | + } | |
1931 | 1947 | mutex_unlock(&trace_types_lock); |
1932 | 1948 | |
1933 | 1949 | seq_release(inode, file); |
kernel/trace/trace.h
... | ... | @@ -223,8 +223,6 @@ |
223 | 223 | unsigned long parent_ip, |
224 | 224 | unsigned long flags); |
225 | 225 | |
226 | -void tracing_start_function_trace(void); | |
227 | -void tracing_stop_function_trace(void); | |
228 | 226 | void tracing_start_cmdline_record(void); |
229 | 227 | void tracing_stop_cmdline_record(void); |
230 | 228 | int register_tracer(struct tracer *type); |
... | ... | @@ -240,6 +238,14 @@ |
240 | 238 | struct task_struct *tsk, int cpu); |
241 | 239 | |
242 | 240 | extern cycle_t ftrace_now(int cpu); |
241 | + | |
242 | +#ifdef CONFIG_FTRACE | |
243 | +void tracing_start_function_trace(void); | |
244 | +void tracing_stop_function_trace(void); | |
245 | +#else | |
246 | +# define tracing_start_function_trace() do { } while (0) | |
247 | +# define tracing_stop_function_trace() do { } while (0) | |
248 | +#endif | |
243 | 249 | |
244 | 250 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER |
245 | 251 | typedef void |
kernel/trace/trace_functions.c
kernel/trace/trace_sched_switch.c
... | ... | @@ -227,14 +227,14 @@ |
227 | 227 | static void start_sched_trace(struct trace_array *tr) |
228 | 228 | { |
229 | 229 | sched_switch_reset(tr); |
230 | - tracer_enabled = 1; | |
231 | 230 | tracing_start_cmdline_record(); |
231 | + tracer_enabled = 1; | |
232 | 232 | } |
233 | 233 | |
234 | 234 | static void stop_sched_trace(struct trace_array *tr) |
235 | 235 | { |
236 | - tracing_stop_cmdline_record(); | |
237 | 236 | tracer_enabled = 0; |
237 | + tracing_stop_cmdline_record(); | |
238 | 238 | } |
239 | 239 | |
240 | 240 | static void sched_switch_trace_init(struct trace_array *tr) |