Commit 915a0b575fdb2376135ed9334b3ccb1eb51db622
Exists in
master
and in
7 other branches
Merge branch 'tip/tracing/core' of git://git.kernel.org/pub/scm/linux/kernel/git…
…/rostedt/linux-2.6-trace into tracing/urgent
Showing 4 changed files Inline Diff
kernel/trace/ftrace.c
1 | /* | 1 | /* |
2 | * Infrastructure for profiling code inserted by 'gcc -pg'. | 2 | * Infrastructure for profiling code inserted by 'gcc -pg'. |
3 | * | 3 | * |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | 4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> |
5 | * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com> | 5 | * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com> |
6 | * | 6 | * |
7 | * Originally ported from the -rt patch by: | 7 | * Originally ported from the -rt patch by: |
8 | * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com> | 8 | * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com> |
9 | * | 9 | * |
10 | * Based on code in the latency_tracer, that is: | 10 | * Based on code in the latency_tracer, that is: |
11 | * | 11 | * |
12 | * Copyright (C) 2004-2006 Ingo Molnar | 12 | * Copyright (C) 2004-2006 Ingo Molnar |
13 | * Copyright (C) 2004 William Lee Irwin III | 13 | * Copyright (C) 2004 William Lee Irwin III |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/stop_machine.h> | 16 | #include <linux/stop_machine.h> |
17 | #include <linux/clocksource.h> | 17 | #include <linux/clocksource.h> |
18 | #include <linux/kallsyms.h> | 18 | #include <linux/kallsyms.h> |
19 | #include <linux/seq_file.h> | 19 | #include <linux/seq_file.h> |
20 | #include <linux/suspend.h> | 20 | #include <linux/suspend.h> |
21 | #include <linux/debugfs.h> | 21 | #include <linux/debugfs.h> |
22 | #include <linux/hardirq.h> | 22 | #include <linux/hardirq.h> |
23 | #include <linux/kthread.h> | 23 | #include <linux/kthread.h> |
24 | #include <linux/uaccess.h> | 24 | #include <linux/uaccess.h> |
25 | #include <linux/ftrace.h> | 25 | #include <linux/ftrace.h> |
26 | #include <linux/sysctl.h> | 26 | #include <linux/sysctl.h> |
27 | #include <linux/ctype.h> | 27 | #include <linux/ctype.h> |
28 | #include <linux/list.h> | 28 | #include <linux/list.h> |
29 | #include <linux/hash.h> | 29 | #include <linux/hash.h> |
30 | 30 | ||
31 | #include <trace/events/sched.h> | 31 | #include <trace/events/sched.h> |
32 | 32 | ||
33 | #include <asm/ftrace.h> | 33 | #include <asm/ftrace.h> |
34 | #include <asm/setup.h> | 34 | #include <asm/setup.h> |
35 | 35 | ||
36 | #include "trace_output.h" | 36 | #include "trace_output.h" |
37 | #include "trace_stat.h" | 37 | #include "trace_stat.h" |
38 | 38 | ||
39 | #define FTRACE_WARN_ON(cond) \ | 39 | #define FTRACE_WARN_ON(cond) \ |
40 | do { \ | 40 | do { \ |
41 | if (WARN_ON(cond)) \ | 41 | if (WARN_ON(cond)) \ |
42 | ftrace_kill(); \ | 42 | ftrace_kill(); \ |
43 | } while (0) | 43 | } while (0) |
44 | 44 | ||
45 | #define FTRACE_WARN_ON_ONCE(cond) \ | 45 | #define FTRACE_WARN_ON_ONCE(cond) \ |
46 | do { \ | 46 | do { \ |
47 | if (WARN_ON_ONCE(cond)) \ | 47 | if (WARN_ON_ONCE(cond)) \ |
48 | ftrace_kill(); \ | 48 | ftrace_kill(); \ |
49 | } while (0) | 49 | } while (0) |
50 | 50 | ||
51 | /* hash bits for specific function selection */ | 51 | /* hash bits for specific function selection */ |
52 | #define FTRACE_HASH_BITS 7 | 52 | #define FTRACE_HASH_BITS 7 |
53 | #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) | 53 | #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) |
54 | 54 | ||
55 | /* ftrace_enabled is a method to turn ftrace on or off */ | 55 | /* ftrace_enabled is a method to turn ftrace on or off */ |
56 | int ftrace_enabled __read_mostly; | 56 | int ftrace_enabled __read_mostly; |
57 | static int last_ftrace_enabled; | 57 | static int last_ftrace_enabled; |
58 | 58 | ||
59 | /* Quick disabling of function tracer. */ | 59 | /* Quick disabling of function tracer. */ |
60 | int function_trace_stop; | 60 | int function_trace_stop; |
61 | 61 | ||
62 | /* List for set_ftrace_pid's pids. */ | 62 | /* List for set_ftrace_pid's pids. */ |
63 | LIST_HEAD(ftrace_pids); | 63 | LIST_HEAD(ftrace_pids); |
64 | struct ftrace_pid { | 64 | struct ftrace_pid { |
65 | struct list_head list; | 65 | struct list_head list; |
66 | struct pid *pid; | 66 | struct pid *pid; |
67 | }; | 67 | }; |
68 | 68 | ||
69 | /* | 69 | /* |
70 | * ftrace_disabled is set when an anomaly is discovered. | 70 | * ftrace_disabled is set when an anomaly is discovered. |
71 | * ftrace_disabled is much stronger than ftrace_enabled. | 71 | * ftrace_disabled is much stronger than ftrace_enabled. |
72 | */ | 72 | */ |
73 | static int ftrace_disabled __read_mostly; | 73 | static int ftrace_disabled __read_mostly; |
74 | 74 | ||
75 | static DEFINE_MUTEX(ftrace_lock); | 75 | static DEFINE_MUTEX(ftrace_lock); |
76 | 76 | ||
77 | static struct ftrace_ops ftrace_list_end __read_mostly = | 77 | static struct ftrace_ops ftrace_list_end __read_mostly = |
78 | { | 78 | { |
79 | .func = ftrace_stub, | 79 | .func = ftrace_stub, |
80 | }; | 80 | }; |
81 | 81 | ||
82 | static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; | 82 | static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; |
83 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | 83 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; |
84 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; | 84 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; |
85 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; | 85 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; |
86 | 86 | ||
87 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
88 | static int ftrace_set_func(unsigned long *array, int *idx, char *buffer); | ||
89 | #endif | ||
90 | |||
91 | static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) | 87 | static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) |
92 | { | 88 | { |
93 | struct ftrace_ops *op = ftrace_list; | 89 | struct ftrace_ops *op = ftrace_list; |
94 | 90 | ||
95 | /* in case someone actually ports this to alpha! */ | 91 | /* in case someone actually ports this to alpha! */ |
96 | read_barrier_depends(); | 92 | read_barrier_depends(); |
97 | 93 | ||
98 | while (op != &ftrace_list_end) { | 94 | while (op != &ftrace_list_end) { |
99 | /* silly alpha */ | 95 | /* silly alpha */ |
100 | read_barrier_depends(); | 96 | read_barrier_depends(); |
101 | op->func(ip, parent_ip); | 97 | op->func(ip, parent_ip); |
102 | op = op->next; | 98 | op = op->next; |
103 | }; | 99 | }; |
104 | } | 100 | } |
105 | 101 | ||
106 | static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip) | 102 | static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip) |
107 | { | 103 | { |
108 | if (!test_tsk_trace_trace(current)) | 104 | if (!test_tsk_trace_trace(current)) |
109 | return; | 105 | return; |
110 | 106 | ||
111 | ftrace_pid_function(ip, parent_ip); | 107 | ftrace_pid_function(ip, parent_ip); |
112 | } | 108 | } |
113 | 109 | ||
114 | static void set_ftrace_pid_function(ftrace_func_t func) | 110 | static void set_ftrace_pid_function(ftrace_func_t func) |
115 | { | 111 | { |
116 | /* do not set ftrace_pid_function to itself! */ | 112 | /* do not set ftrace_pid_function to itself! */ |
117 | if (func != ftrace_pid_func) | 113 | if (func != ftrace_pid_func) |
118 | ftrace_pid_function = func; | 114 | ftrace_pid_function = func; |
119 | } | 115 | } |
120 | 116 | ||
121 | /** | 117 | /** |
122 | * clear_ftrace_function - reset the ftrace function | 118 | * clear_ftrace_function - reset the ftrace function |
123 | * | 119 | * |
124 | * This NULLs the ftrace function and in essence stops | 120 | * This NULLs the ftrace function and in essence stops |
125 | * tracing. There may be lag | 121 | * tracing. There may be lag |
126 | */ | 122 | */ |
127 | void clear_ftrace_function(void) | 123 | void clear_ftrace_function(void) |
128 | { | 124 | { |
129 | ftrace_trace_function = ftrace_stub; | 125 | ftrace_trace_function = ftrace_stub; |
130 | __ftrace_trace_function = ftrace_stub; | 126 | __ftrace_trace_function = ftrace_stub; |
131 | ftrace_pid_function = ftrace_stub; | 127 | ftrace_pid_function = ftrace_stub; |
132 | } | 128 | } |
133 | 129 | ||
134 | #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | 130 | #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST |
135 | /* | 131 | /* |
136 | * For those archs that do not test ftrace_trace_stop in their | 132 | * For those archs that do not test ftrace_trace_stop in their |
137 | * mcount call site, we need to do it from C. | 133 | * mcount call site, we need to do it from C. |
138 | */ | 134 | */ |
139 | static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip) | 135 | static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip) |
140 | { | 136 | { |
141 | if (function_trace_stop) | 137 | if (function_trace_stop) |
142 | return; | 138 | return; |
143 | 139 | ||
144 | __ftrace_trace_function(ip, parent_ip); | 140 | __ftrace_trace_function(ip, parent_ip); |
145 | } | 141 | } |
146 | #endif | 142 | #endif |
147 | 143 | ||
148 | static int __register_ftrace_function(struct ftrace_ops *ops) | 144 | static int __register_ftrace_function(struct ftrace_ops *ops) |
149 | { | 145 | { |
150 | ops->next = ftrace_list; | 146 | ops->next = ftrace_list; |
151 | /* | 147 | /* |
152 | * We are entering ops into the ftrace_list but another | 148 | * We are entering ops into the ftrace_list but another |
153 | * CPU might be walking that list. We need to make sure | 149 | * CPU might be walking that list. We need to make sure |
154 | * the ops->next pointer is valid before another CPU sees | 150 | * the ops->next pointer is valid before another CPU sees |
155 | * the ops pointer included into the ftrace_list. | 151 | * the ops pointer included into the ftrace_list. |
156 | */ | 152 | */ |
157 | smp_wmb(); | 153 | smp_wmb(); |
158 | ftrace_list = ops; | 154 | ftrace_list = ops; |
159 | 155 | ||
160 | if (ftrace_enabled) { | 156 | if (ftrace_enabled) { |
161 | ftrace_func_t func; | 157 | ftrace_func_t func; |
162 | 158 | ||
163 | if (ops->next == &ftrace_list_end) | 159 | if (ops->next == &ftrace_list_end) |
164 | func = ops->func; | 160 | func = ops->func; |
165 | else | 161 | else |
166 | func = ftrace_list_func; | 162 | func = ftrace_list_func; |
167 | 163 | ||
168 | if (!list_empty(&ftrace_pids)) { | 164 | if (!list_empty(&ftrace_pids)) { |
169 | set_ftrace_pid_function(func); | 165 | set_ftrace_pid_function(func); |
170 | func = ftrace_pid_func; | 166 | func = ftrace_pid_func; |
171 | } | 167 | } |
172 | 168 | ||
173 | /* | 169 | /* |
174 | * For one func, simply call it directly. | 170 | * For one func, simply call it directly. |
175 | * For more than one func, call the chain. | 171 | * For more than one func, call the chain. |
176 | */ | 172 | */ |
177 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | 173 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST |
178 | ftrace_trace_function = func; | 174 | ftrace_trace_function = func; |
179 | #else | 175 | #else |
180 | __ftrace_trace_function = func; | 176 | __ftrace_trace_function = func; |
181 | ftrace_trace_function = ftrace_test_stop_func; | 177 | ftrace_trace_function = ftrace_test_stop_func; |
182 | #endif | 178 | #endif |
183 | } | 179 | } |
184 | 180 | ||
185 | return 0; | 181 | return 0; |
186 | } | 182 | } |
187 | 183 | ||
188 | static int __unregister_ftrace_function(struct ftrace_ops *ops) | 184 | static int __unregister_ftrace_function(struct ftrace_ops *ops) |
189 | { | 185 | { |
190 | struct ftrace_ops **p; | 186 | struct ftrace_ops **p; |
191 | 187 | ||
192 | /* | 188 | /* |
193 | * If we are removing the last function, then simply point | 189 | * If we are removing the last function, then simply point |
194 | * to the ftrace_stub. | 190 | * to the ftrace_stub. |
195 | */ | 191 | */ |
196 | if (ftrace_list == ops && ops->next == &ftrace_list_end) { | 192 | if (ftrace_list == ops && ops->next == &ftrace_list_end) { |
197 | ftrace_trace_function = ftrace_stub; | 193 | ftrace_trace_function = ftrace_stub; |
198 | ftrace_list = &ftrace_list_end; | 194 | ftrace_list = &ftrace_list_end; |
199 | return 0; | 195 | return 0; |
200 | } | 196 | } |
201 | 197 | ||
202 | for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next) | 198 | for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next) |
203 | if (*p == ops) | 199 | if (*p == ops) |
204 | break; | 200 | break; |
205 | 201 | ||
206 | if (*p != ops) | 202 | if (*p != ops) |
207 | return -1; | 203 | return -1; |
208 | 204 | ||
209 | *p = (*p)->next; | 205 | *p = (*p)->next; |
210 | 206 | ||
211 | if (ftrace_enabled) { | 207 | if (ftrace_enabled) { |
212 | /* If we only have one func left, then call that directly */ | 208 | /* If we only have one func left, then call that directly */ |
213 | if (ftrace_list->next == &ftrace_list_end) { | 209 | if (ftrace_list->next == &ftrace_list_end) { |
214 | ftrace_func_t func = ftrace_list->func; | 210 | ftrace_func_t func = ftrace_list->func; |
215 | 211 | ||
216 | if (!list_empty(&ftrace_pids)) { | 212 | if (!list_empty(&ftrace_pids)) { |
217 | set_ftrace_pid_function(func); | 213 | set_ftrace_pid_function(func); |
218 | func = ftrace_pid_func; | 214 | func = ftrace_pid_func; |
219 | } | 215 | } |
220 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | 216 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST |
221 | ftrace_trace_function = func; | 217 | ftrace_trace_function = func; |
222 | #else | 218 | #else |
223 | __ftrace_trace_function = func; | 219 | __ftrace_trace_function = func; |
224 | #endif | 220 | #endif |
225 | } | 221 | } |
226 | } | 222 | } |
227 | 223 | ||
228 | return 0; | 224 | return 0; |
229 | } | 225 | } |
230 | 226 | ||
231 | static void ftrace_update_pid_func(void) | 227 | static void ftrace_update_pid_func(void) |
232 | { | 228 | { |
233 | ftrace_func_t func; | 229 | ftrace_func_t func; |
234 | 230 | ||
235 | if (ftrace_trace_function == ftrace_stub) | 231 | if (ftrace_trace_function == ftrace_stub) |
236 | return; | 232 | return; |
237 | 233 | ||
238 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | 234 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST |
239 | func = ftrace_trace_function; | 235 | func = ftrace_trace_function; |
240 | #else | 236 | #else |
241 | func = __ftrace_trace_function; | 237 | func = __ftrace_trace_function; |
242 | #endif | 238 | #endif |
243 | 239 | ||
244 | if (!list_empty(&ftrace_pids)) { | 240 | if (!list_empty(&ftrace_pids)) { |
245 | set_ftrace_pid_function(func); | 241 | set_ftrace_pid_function(func); |
246 | func = ftrace_pid_func; | 242 | func = ftrace_pid_func; |
247 | } else { | 243 | } else { |
248 | if (func == ftrace_pid_func) | 244 | if (func == ftrace_pid_func) |
249 | func = ftrace_pid_function; | 245 | func = ftrace_pid_function; |
250 | } | 246 | } |
251 | 247 | ||
252 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | 248 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST |
253 | ftrace_trace_function = func; | 249 | ftrace_trace_function = func; |
254 | #else | 250 | #else |
255 | __ftrace_trace_function = func; | 251 | __ftrace_trace_function = func; |
256 | #endif | 252 | #endif |
257 | } | 253 | } |
258 | 254 | ||
259 | #ifdef CONFIG_FUNCTION_PROFILER | 255 | #ifdef CONFIG_FUNCTION_PROFILER |
260 | struct ftrace_profile { | 256 | struct ftrace_profile { |
261 | struct hlist_node node; | 257 | struct hlist_node node; |
262 | unsigned long ip; | 258 | unsigned long ip; |
263 | unsigned long counter; | 259 | unsigned long counter; |
264 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 260 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
265 | unsigned long long time; | 261 | unsigned long long time; |
266 | #endif | 262 | #endif |
267 | }; | 263 | }; |
268 | 264 | ||
269 | struct ftrace_profile_page { | 265 | struct ftrace_profile_page { |
270 | struct ftrace_profile_page *next; | 266 | struct ftrace_profile_page *next; |
271 | unsigned long index; | 267 | unsigned long index; |
272 | struct ftrace_profile records[]; | 268 | struct ftrace_profile records[]; |
273 | }; | 269 | }; |
274 | 270 | ||
275 | struct ftrace_profile_stat { | 271 | struct ftrace_profile_stat { |
276 | atomic_t disabled; | 272 | atomic_t disabled; |
277 | struct hlist_head *hash; | 273 | struct hlist_head *hash; |
278 | struct ftrace_profile_page *pages; | 274 | struct ftrace_profile_page *pages; |
279 | struct ftrace_profile_page *start; | 275 | struct ftrace_profile_page *start; |
280 | struct tracer_stat stat; | 276 | struct tracer_stat stat; |
281 | }; | 277 | }; |
282 | 278 | ||
283 | #define PROFILE_RECORDS_SIZE \ | 279 | #define PROFILE_RECORDS_SIZE \ |
284 | (PAGE_SIZE - offsetof(struct ftrace_profile_page, records)) | 280 | (PAGE_SIZE - offsetof(struct ftrace_profile_page, records)) |
285 | 281 | ||
286 | #define PROFILES_PER_PAGE \ | 282 | #define PROFILES_PER_PAGE \ |
287 | (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile)) | 283 | (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile)) |
288 | 284 | ||
289 | static int ftrace_profile_bits __read_mostly; | 285 | static int ftrace_profile_bits __read_mostly; |
290 | static int ftrace_profile_enabled __read_mostly; | 286 | static int ftrace_profile_enabled __read_mostly; |
291 | 287 | ||
292 | /* ftrace_profile_lock - synchronize the enable and disable of the profiler */ | 288 | /* ftrace_profile_lock - synchronize the enable and disable of the profiler */ |
293 | static DEFINE_MUTEX(ftrace_profile_lock); | 289 | static DEFINE_MUTEX(ftrace_profile_lock); |
294 | 290 | ||
295 | static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats); | 291 | static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats); |
296 | 292 | ||
297 | #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */ | 293 | #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */ |
298 | 294 | ||
299 | static void * | 295 | static void * |
300 | function_stat_next(void *v, int idx) | 296 | function_stat_next(void *v, int idx) |
301 | { | 297 | { |
302 | struct ftrace_profile *rec = v; | 298 | struct ftrace_profile *rec = v; |
303 | struct ftrace_profile_page *pg; | 299 | struct ftrace_profile_page *pg; |
304 | 300 | ||
305 | pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK); | 301 | pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK); |
306 | 302 | ||
307 | again: | 303 | again: |
308 | if (idx != 0) | 304 | if (idx != 0) |
309 | rec++; | 305 | rec++; |
310 | 306 | ||
311 | if ((void *)rec >= (void *)&pg->records[pg->index]) { | 307 | if ((void *)rec >= (void *)&pg->records[pg->index]) { |
312 | pg = pg->next; | 308 | pg = pg->next; |
313 | if (!pg) | 309 | if (!pg) |
314 | return NULL; | 310 | return NULL; |
315 | rec = &pg->records[0]; | 311 | rec = &pg->records[0]; |
316 | if (!rec->counter) | 312 | if (!rec->counter) |
317 | goto again; | 313 | goto again; |
318 | } | 314 | } |
319 | 315 | ||
320 | return rec; | 316 | return rec; |
321 | } | 317 | } |
322 | 318 | ||
323 | static void *function_stat_start(struct tracer_stat *trace) | 319 | static void *function_stat_start(struct tracer_stat *trace) |
324 | { | 320 | { |
325 | struct ftrace_profile_stat *stat = | 321 | struct ftrace_profile_stat *stat = |
326 | container_of(trace, struct ftrace_profile_stat, stat); | 322 | container_of(trace, struct ftrace_profile_stat, stat); |
327 | 323 | ||
328 | if (!stat || !stat->start) | 324 | if (!stat || !stat->start) |
329 | return NULL; | 325 | return NULL; |
330 | 326 | ||
331 | return function_stat_next(&stat->start->records[0], 0); | 327 | return function_stat_next(&stat->start->records[0], 0); |
332 | } | 328 | } |
333 | 329 | ||
334 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 330 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
335 | /* function graph compares on total time */ | 331 | /* function graph compares on total time */ |
336 | static int function_stat_cmp(void *p1, void *p2) | 332 | static int function_stat_cmp(void *p1, void *p2) |
337 | { | 333 | { |
338 | struct ftrace_profile *a = p1; | 334 | struct ftrace_profile *a = p1; |
339 | struct ftrace_profile *b = p2; | 335 | struct ftrace_profile *b = p2; |
340 | 336 | ||
341 | if (a->time < b->time) | 337 | if (a->time < b->time) |
342 | return -1; | 338 | return -1; |
343 | if (a->time > b->time) | 339 | if (a->time > b->time) |
344 | return 1; | 340 | return 1; |
345 | else | 341 | else |
346 | return 0; | 342 | return 0; |
347 | } | 343 | } |
348 | #else | 344 | #else |
349 | /* not function graph compares against hits */ | 345 | /* not function graph compares against hits */ |
350 | static int function_stat_cmp(void *p1, void *p2) | 346 | static int function_stat_cmp(void *p1, void *p2) |
351 | { | 347 | { |
352 | struct ftrace_profile *a = p1; | 348 | struct ftrace_profile *a = p1; |
353 | struct ftrace_profile *b = p2; | 349 | struct ftrace_profile *b = p2; |
354 | 350 | ||
355 | if (a->counter < b->counter) | 351 | if (a->counter < b->counter) |
356 | return -1; | 352 | return -1; |
357 | if (a->counter > b->counter) | 353 | if (a->counter > b->counter) |
358 | return 1; | 354 | return 1; |
359 | else | 355 | else |
360 | return 0; | 356 | return 0; |
361 | } | 357 | } |
362 | #endif | 358 | #endif |
363 | 359 | ||
364 | static int function_stat_headers(struct seq_file *m) | 360 | static int function_stat_headers(struct seq_file *m) |
365 | { | 361 | { |
366 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 362 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
367 | seq_printf(m, " Function " | 363 | seq_printf(m, " Function " |
368 | "Hit Time Avg\n" | 364 | "Hit Time Avg\n" |
369 | " -------- " | 365 | " -------- " |
370 | "--- ---- ---\n"); | 366 | "--- ---- ---\n"); |
371 | #else | 367 | #else |
372 | seq_printf(m, " Function Hit\n" | 368 | seq_printf(m, " Function Hit\n" |
373 | " -------- ---\n"); | 369 | " -------- ---\n"); |
374 | #endif | 370 | #endif |
375 | return 0; | 371 | return 0; |
376 | } | 372 | } |
377 | 373 | ||
378 | static int function_stat_show(struct seq_file *m, void *v) | 374 | static int function_stat_show(struct seq_file *m, void *v) |
379 | { | 375 | { |
380 | struct ftrace_profile *rec = v; | 376 | struct ftrace_profile *rec = v; |
381 | char str[KSYM_SYMBOL_LEN]; | 377 | char str[KSYM_SYMBOL_LEN]; |
382 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 378 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
383 | static DEFINE_MUTEX(mutex); | 379 | static DEFINE_MUTEX(mutex); |
384 | static struct trace_seq s; | 380 | static struct trace_seq s; |
385 | unsigned long long avg; | 381 | unsigned long long avg; |
386 | #endif | 382 | #endif |
387 | 383 | ||
388 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | 384 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); |
389 | seq_printf(m, " %-30.30s %10lu", str, rec->counter); | 385 | seq_printf(m, " %-30.30s %10lu", str, rec->counter); |
390 | 386 | ||
391 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 387 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
392 | seq_printf(m, " "); | 388 | seq_printf(m, " "); |
393 | avg = rec->time; | 389 | avg = rec->time; |
394 | do_div(avg, rec->counter); | 390 | do_div(avg, rec->counter); |
395 | 391 | ||
396 | mutex_lock(&mutex); | 392 | mutex_lock(&mutex); |
397 | trace_seq_init(&s); | 393 | trace_seq_init(&s); |
398 | trace_print_graph_duration(rec->time, &s); | 394 | trace_print_graph_duration(rec->time, &s); |
399 | trace_seq_puts(&s, " "); | 395 | trace_seq_puts(&s, " "); |
400 | trace_print_graph_duration(avg, &s); | 396 | trace_print_graph_duration(avg, &s); |
401 | trace_print_seq(m, &s); | 397 | trace_print_seq(m, &s); |
402 | mutex_unlock(&mutex); | 398 | mutex_unlock(&mutex); |
403 | #endif | 399 | #endif |
404 | seq_putc(m, '\n'); | 400 | seq_putc(m, '\n'); |
405 | 401 | ||
406 | return 0; | 402 | return 0; |
407 | } | 403 | } |
408 | 404 | ||
409 | static void ftrace_profile_reset(struct ftrace_profile_stat *stat) | 405 | static void ftrace_profile_reset(struct ftrace_profile_stat *stat) |
410 | { | 406 | { |
411 | struct ftrace_profile_page *pg; | 407 | struct ftrace_profile_page *pg; |
412 | 408 | ||
413 | pg = stat->pages = stat->start; | 409 | pg = stat->pages = stat->start; |
414 | 410 | ||
415 | while (pg) { | 411 | while (pg) { |
416 | memset(pg->records, 0, PROFILE_RECORDS_SIZE); | 412 | memset(pg->records, 0, PROFILE_RECORDS_SIZE); |
417 | pg->index = 0; | 413 | pg->index = 0; |
418 | pg = pg->next; | 414 | pg = pg->next; |
419 | } | 415 | } |
420 | 416 | ||
421 | memset(stat->hash, 0, | 417 | memset(stat->hash, 0, |
422 | FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head)); | 418 | FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head)); |
423 | } | 419 | } |
424 | 420 | ||
425 | int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) | 421 | int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) |
426 | { | 422 | { |
427 | struct ftrace_profile_page *pg; | 423 | struct ftrace_profile_page *pg; |
428 | int functions; | 424 | int functions; |
429 | int pages; | 425 | int pages; |
430 | int i; | 426 | int i; |
431 | 427 | ||
432 | /* If we already allocated, do nothing */ | 428 | /* If we already allocated, do nothing */ |
433 | if (stat->pages) | 429 | if (stat->pages) |
434 | return 0; | 430 | return 0; |
435 | 431 | ||
436 | stat->pages = (void *)get_zeroed_page(GFP_KERNEL); | 432 | stat->pages = (void *)get_zeroed_page(GFP_KERNEL); |
437 | if (!stat->pages) | 433 | if (!stat->pages) |
438 | return -ENOMEM; | 434 | return -ENOMEM; |
439 | 435 | ||
440 | #ifdef CONFIG_DYNAMIC_FTRACE | 436 | #ifdef CONFIG_DYNAMIC_FTRACE |
441 | functions = ftrace_update_tot_cnt; | 437 | functions = ftrace_update_tot_cnt; |
442 | #else | 438 | #else |
443 | /* | 439 | /* |
444 | * We do not know the number of functions that exist because | 440 | * We do not know the number of functions that exist because |
445 | * dynamic tracing is what counts them. With past experience | 441 | * dynamic tracing is what counts them. With past experience |
446 | * we have around 20K functions. That should be more than enough. | 442 | * we have around 20K functions. That should be more than enough. |
447 | * It is highly unlikely we will execute every function in | 443 | * It is highly unlikely we will execute every function in |
448 | * the kernel. | 444 | * the kernel. |
449 | */ | 445 | */ |
450 | functions = 20000; | 446 | functions = 20000; |
451 | #endif | 447 | #endif |
452 | 448 | ||
453 | pg = stat->start = stat->pages; | 449 | pg = stat->start = stat->pages; |
454 | 450 | ||
455 | pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE); | 451 | pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE); |
456 | 452 | ||
457 | for (i = 0; i < pages; i++) { | 453 | for (i = 0; i < pages; i++) { |
458 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); | 454 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); |
459 | if (!pg->next) | 455 | if (!pg->next) |
460 | goto out_free; | 456 | goto out_free; |
461 | pg = pg->next; | 457 | pg = pg->next; |
462 | } | 458 | } |
463 | 459 | ||
464 | return 0; | 460 | return 0; |
465 | 461 | ||
466 | out_free: | 462 | out_free: |
467 | pg = stat->start; | 463 | pg = stat->start; |
468 | while (pg) { | 464 | while (pg) { |
469 | unsigned long tmp = (unsigned long)pg; | 465 | unsigned long tmp = (unsigned long)pg; |
470 | 466 | ||
471 | pg = pg->next; | 467 | pg = pg->next; |
472 | free_page(tmp); | 468 | free_page(tmp); |
473 | } | 469 | } |
474 | 470 | ||
475 | free_page((unsigned long)stat->pages); | 471 | free_page((unsigned long)stat->pages); |
476 | stat->pages = NULL; | 472 | stat->pages = NULL; |
477 | stat->start = NULL; | 473 | stat->start = NULL; |
478 | 474 | ||
479 | return -ENOMEM; | 475 | return -ENOMEM; |
480 | } | 476 | } |
481 | 477 | ||
482 | static int ftrace_profile_init_cpu(int cpu) | 478 | static int ftrace_profile_init_cpu(int cpu) |
483 | { | 479 | { |
484 | struct ftrace_profile_stat *stat; | 480 | struct ftrace_profile_stat *stat; |
485 | int size; | 481 | int size; |
486 | 482 | ||
487 | stat = &per_cpu(ftrace_profile_stats, cpu); | 483 | stat = &per_cpu(ftrace_profile_stats, cpu); |
488 | 484 | ||
489 | if (stat->hash) { | 485 | if (stat->hash) { |
490 | /* If the profile is already created, simply reset it */ | 486 | /* If the profile is already created, simply reset it */ |
491 | ftrace_profile_reset(stat); | 487 | ftrace_profile_reset(stat); |
492 | return 0; | 488 | return 0; |
493 | } | 489 | } |
494 | 490 | ||
495 | /* | 491 | /* |
496 | * We are profiling all functions, but usually only a few thousand | 492 | * We are profiling all functions, but usually only a few thousand |
497 | * functions are hit. We'll make a hash of 1024 items. | 493 | * functions are hit. We'll make a hash of 1024 items. |
498 | */ | 494 | */ |
499 | size = FTRACE_PROFILE_HASH_SIZE; | 495 | size = FTRACE_PROFILE_HASH_SIZE; |
500 | 496 | ||
501 | stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL); | 497 | stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL); |
502 | 498 | ||
503 | if (!stat->hash) | 499 | if (!stat->hash) |
504 | return -ENOMEM; | 500 | return -ENOMEM; |
505 | 501 | ||
506 | if (!ftrace_profile_bits) { | 502 | if (!ftrace_profile_bits) { |
507 | size--; | 503 | size--; |
508 | 504 | ||
509 | for (; size; size >>= 1) | 505 | for (; size; size >>= 1) |
510 | ftrace_profile_bits++; | 506 | ftrace_profile_bits++; |
511 | } | 507 | } |
512 | 508 | ||
513 | /* Preallocate the function profiling pages */ | 509 | /* Preallocate the function profiling pages */ |
514 | if (ftrace_profile_pages_init(stat) < 0) { | 510 | if (ftrace_profile_pages_init(stat) < 0) { |
515 | kfree(stat->hash); | 511 | kfree(stat->hash); |
516 | stat->hash = NULL; | 512 | stat->hash = NULL; |
517 | return -ENOMEM; | 513 | return -ENOMEM; |
518 | } | 514 | } |
519 | 515 | ||
520 | return 0; | 516 | return 0; |
521 | } | 517 | } |
522 | 518 | ||
523 | static int ftrace_profile_init(void) | 519 | static int ftrace_profile_init(void) |
524 | { | 520 | { |
525 | int cpu; | 521 | int cpu; |
526 | int ret = 0; | 522 | int ret = 0; |
527 | 523 | ||
528 | for_each_online_cpu(cpu) { | 524 | for_each_online_cpu(cpu) { |
529 | ret = ftrace_profile_init_cpu(cpu); | 525 | ret = ftrace_profile_init_cpu(cpu); |
530 | if (ret) | 526 | if (ret) |
531 | break; | 527 | break; |
532 | } | 528 | } |
533 | 529 | ||
534 | return ret; | 530 | return ret; |
535 | } | 531 | } |
536 | 532 | ||
537 | /* interrupts must be disabled */ | 533 | /* interrupts must be disabled */ |
538 | static struct ftrace_profile * | 534 | static struct ftrace_profile * |
539 | ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip) | 535 | ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip) |
540 | { | 536 | { |
541 | struct ftrace_profile *rec; | 537 | struct ftrace_profile *rec; |
542 | struct hlist_head *hhd; | 538 | struct hlist_head *hhd; |
543 | struct hlist_node *n; | 539 | struct hlist_node *n; |
544 | unsigned long key; | 540 | unsigned long key; |
545 | 541 | ||
546 | key = hash_long(ip, ftrace_profile_bits); | 542 | key = hash_long(ip, ftrace_profile_bits); |
547 | hhd = &stat->hash[key]; | 543 | hhd = &stat->hash[key]; |
548 | 544 | ||
549 | if (hlist_empty(hhd)) | 545 | if (hlist_empty(hhd)) |
550 | return NULL; | 546 | return NULL; |
551 | 547 | ||
552 | hlist_for_each_entry_rcu(rec, n, hhd, node) { | 548 | hlist_for_each_entry_rcu(rec, n, hhd, node) { |
553 | if (rec->ip == ip) | 549 | if (rec->ip == ip) |
554 | return rec; | 550 | return rec; |
555 | } | 551 | } |
556 | 552 | ||
557 | return NULL; | 553 | return NULL; |
558 | } | 554 | } |
559 | 555 | ||
560 | static void ftrace_add_profile(struct ftrace_profile_stat *stat, | 556 | static void ftrace_add_profile(struct ftrace_profile_stat *stat, |
561 | struct ftrace_profile *rec) | 557 | struct ftrace_profile *rec) |
562 | { | 558 | { |
563 | unsigned long key; | 559 | unsigned long key; |
564 | 560 | ||
565 | key = hash_long(rec->ip, ftrace_profile_bits); | 561 | key = hash_long(rec->ip, ftrace_profile_bits); |
566 | hlist_add_head_rcu(&rec->node, &stat->hash[key]); | 562 | hlist_add_head_rcu(&rec->node, &stat->hash[key]); |
567 | } | 563 | } |
568 | 564 | ||
569 | /* | 565 | /* |
570 | * The memory is already allocated, this simply finds a new record to use. | 566 | * The memory is already allocated, this simply finds a new record to use. |
571 | */ | 567 | */ |
572 | static struct ftrace_profile * | 568 | static struct ftrace_profile * |
573 | ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip) | 569 | ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip) |
574 | { | 570 | { |
575 | struct ftrace_profile *rec = NULL; | 571 | struct ftrace_profile *rec = NULL; |
576 | 572 | ||
577 | /* prevent recursion (from NMIs) */ | 573 | /* prevent recursion (from NMIs) */ |
578 | if (atomic_inc_return(&stat->disabled) != 1) | 574 | if (atomic_inc_return(&stat->disabled) != 1) |
579 | goto out; | 575 | goto out; |
580 | 576 | ||
581 | /* | 577 | /* |
582 | * Try to find the function again since an NMI | 578 | * Try to find the function again since an NMI |
583 | * could have added it | 579 | * could have added it |
584 | */ | 580 | */ |
585 | rec = ftrace_find_profiled_func(stat, ip); | 581 | rec = ftrace_find_profiled_func(stat, ip); |
586 | if (rec) | 582 | if (rec) |
587 | goto out; | 583 | goto out; |
588 | 584 | ||
589 | if (stat->pages->index == PROFILES_PER_PAGE) { | 585 | if (stat->pages->index == PROFILES_PER_PAGE) { |
590 | if (!stat->pages->next) | 586 | if (!stat->pages->next) |
591 | goto out; | 587 | goto out; |
592 | stat->pages = stat->pages->next; | 588 | stat->pages = stat->pages->next; |
593 | } | 589 | } |
594 | 590 | ||
595 | rec = &stat->pages->records[stat->pages->index++]; | 591 | rec = &stat->pages->records[stat->pages->index++]; |
596 | rec->ip = ip; | 592 | rec->ip = ip; |
597 | ftrace_add_profile(stat, rec); | 593 | ftrace_add_profile(stat, rec); |
598 | 594 | ||
599 | out: | 595 | out: |
600 | atomic_dec(&stat->disabled); | 596 | atomic_dec(&stat->disabled); |
601 | 597 | ||
602 | return rec; | 598 | return rec; |
603 | } | 599 | } |
604 | 600 | ||
605 | static void | 601 | static void |
606 | function_profile_call(unsigned long ip, unsigned long parent_ip) | 602 | function_profile_call(unsigned long ip, unsigned long parent_ip) |
607 | { | 603 | { |
608 | struct ftrace_profile_stat *stat; | 604 | struct ftrace_profile_stat *stat; |
609 | struct ftrace_profile *rec; | 605 | struct ftrace_profile *rec; |
610 | unsigned long flags; | 606 | unsigned long flags; |
611 | 607 | ||
612 | if (!ftrace_profile_enabled) | 608 | if (!ftrace_profile_enabled) |
613 | return; | 609 | return; |
614 | 610 | ||
615 | local_irq_save(flags); | 611 | local_irq_save(flags); |
616 | 612 | ||
617 | stat = &__get_cpu_var(ftrace_profile_stats); | 613 | stat = &__get_cpu_var(ftrace_profile_stats); |
618 | if (!stat->hash || !ftrace_profile_enabled) | 614 | if (!stat->hash || !ftrace_profile_enabled) |
619 | goto out; | 615 | goto out; |
620 | 616 | ||
621 | rec = ftrace_find_profiled_func(stat, ip); | 617 | rec = ftrace_find_profiled_func(stat, ip); |
622 | if (!rec) { | 618 | if (!rec) { |
623 | rec = ftrace_profile_alloc(stat, ip); | 619 | rec = ftrace_profile_alloc(stat, ip); |
624 | if (!rec) | 620 | if (!rec) |
625 | goto out; | 621 | goto out; |
626 | } | 622 | } |
627 | 623 | ||
628 | rec->counter++; | 624 | rec->counter++; |
629 | out: | 625 | out: |
630 | local_irq_restore(flags); | 626 | local_irq_restore(flags); |
631 | } | 627 | } |
632 | 628 | ||
633 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 629 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
634 | static int profile_graph_entry(struct ftrace_graph_ent *trace) | 630 | static int profile_graph_entry(struct ftrace_graph_ent *trace) |
635 | { | 631 | { |
636 | function_profile_call(trace->func, 0); | 632 | function_profile_call(trace->func, 0); |
637 | return 1; | 633 | return 1; |
638 | } | 634 | } |
639 | 635 | ||
640 | static void profile_graph_return(struct ftrace_graph_ret *trace) | 636 | static void profile_graph_return(struct ftrace_graph_ret *trace) |
641 | { | 637 | { |
642 | struct ftrace_profile_stat *stat; | 638 | struct ftrace_profile_stat *stat; |
643 | unsigned long long calltime; | 639 | unsigned long long calltime; |
644 | struct ftrace_profile *rec; | 640 | struct ftrace_profile *rec; |
645 | unsigned long flags; | 641 | unsigned long flags; |
646 | 642 | ||
647 | local_irq_save(flags); | 643 | local_irq_save(flags); |
648 | stat = &__get_cpu_var(ftrace_profile_stats); | 644 | stat = &__get_cpu_var(ftrace_profile_stats); |
649 | if (!stat->hash || !ftrace_profile_enabled) | 645 | if (!stat->hash || !ftrace_profile_enabled) |
650 | goto out; | 646 | goto out; |
651 | 647 | ||
652 | calltime = trace->rettime - trace->calltime; | 648 | calltime = trace->rettime - trace->calltime; |
653 | 649 | ||
654 | if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) { | 650 | if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) { |
655 | int index; | 651 | int index; |
656 | 652 | ||
657 | index = trace->depth; | 653 | index = trace->depth; |
658 | 654 | ||
659 | /* Append this call time to the parent time to subtract */ | 655 | /* Append this call time to the parent time to subtract */ |
660 | if (index) | 656 | if (index) |
661 | current->ret_stack[index - 1].subtime += calltime; | 657 | current->ret_stack[index - 1].subtime += calltime; |
662 | 658 | ||
663 | if (current->ret_stack[index].subtime < calltime) | 659 | if (current->ret_stack[index].subtime < calltime) |
664 | calltime -= current->ret_stack[index].subtime; | 660 | calltime -= current->ret_stack[index].subtime; |
665 | else | 661 | else |
666 | calltime = 0; | 662 | calltime = 0; |
667 | } | 663 | } |
668 | 664 | ||
669 | rec = ftrace_find_profiled_func(stat, trace->func); | 665 | rec = ftrace_find_profiled_func(stat, trace->func); |
670 | if (rec) | 666 | if (rec) |
671 | rec->time += calltime; | 667 | rec->time += calltime; |
672 | 668 | ||
673 | out: | 669 | out: |
674 | local_irq_restore(flags); | 670 | local_irq_restore(flags); |
675 | } | 671 | } |
676 | 672 | ||
677 | static int register_ftrace_profiler(void) | 673 | static int register_ftrace_profiler(void) |
678 | { | 674 | { |
679 | return register_ftrace_graph(&profile_graph_return, | 675 | return register_ftrace_graph(&profile_graph_return, |
680 | &profile_graph_entry); | 676 | &profile_graph_entry); |
681 | } | 677 | } |
682 | 678 | ||
683 | static void unregister_ftrace_profiler(void) | 679 | static void unregister_ftrace_profiler(void) |
684 | { | 680 | { |
685 | unregister_ftrace_graph(); | 681 | unregister_ftrace_graph(); |
686 | } | 682 | } |
687 | #else | 683 | #else |
688 | static struct ftrace_ops ftrace_profile_ops __read_mostly = | 684 | static struct ftrace_ops ftrace_profile_ops __read_mostly = |
689 | { | 685 | { |
690 | .func = function_profile_call, | 686 | .func = function_profile_call, |
691 | }; | 687 | }; |
692 | 688 | ||
693 | static int register_ftrace_profiler(void) | 689 | static int register_ftrace_profiler(void) |
694 | { | 690 | { |
695 | return register_ftrace_function(&ftrace_profile_ops); | 691 | return register_ftrace_function(&ftrace_profile_ops); |
696 | } | 692 | } |
697 | 693 | ||
698 | static void unregister_ftrace_profiler(void) | 694 | static void unregister_ftrace_profiler(void) |
699 | { | 695 | { |
700 | unregister_ftrace_function(&ftrace_profile_ops); | 696 | unregister_ftrace_function(&ftrace_profile_ops); |
701 | } | 697 | } |
702 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 698 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
703 | 699 | ||
704 | static ssize_t | 700 | static ssize_t |
705 | ftrace_profile_write(struct file *filp, const char __user *ubuf, | 701 | ftrace_profile_write(struct file *filp, const char __user *ubuf, |
706 | size_t cnt, loff_t *ppos) | 702 | size_t cnt, loff_t *ppos) |
707 | { | 703 | { |
708 | unsigned long val; | 704 | unsigned long val; |
709 | char buf[64]; /* big enough to hold a number */ | 705 | char buf[64]; /* big enough to hold a number */ |
710 | int ret; | 706 | int ret; |
711 | 707 | ||
712 | if (cnt >= sizeof(buf)) | 708 | if (cnt >= sizeof(buf)) |
713 | return -EINVAL; | 709 | return -EINVAL; |
714 | 710 | ||
715 | if (copy_from_user(&buf, ubuf, cnt)) | 711 | if (copy_from_user(&buf, ubuf, cnt)) |
716 | return -EFAULT; | 712 | return -EFAULT; |
717 | 713 | ||
718 | buf[cnt] = 0; | 714 | buf[cnt] = 0; |
719 | 715 | ||
720 | ret = strict_strtoul(buf, 10, &val); | 716 | ret = strict_strtoul(buf, 10, &val); |
721 | if (ret < 0) | 717 | if (ret < 0) |
722 | return ret; | 718 | return ret; |
723 | 719 | ||
724 | val = !!val; | 720 | val = !!val; |
725 | 721 | ||
726 | mutex_lock(&ftrace_profile_lock); | 722 | mutex_lock(&ftrace_profile_lock); |
727 | if (ftrace_profile_enabled ^ val) { | 723 | if (ftrace_profile_enabled ^ val) { |
728 | if (val) { | 724 | if (val) { |
729 | ret = ftrace_profile_init(); | 725 | ret = ftrace_profile_init(); |
730 | if (ret < 0) { | 726 | if (ret < 0) { |
731 | cnt = ret; | 727 | cnt = ret; |
732 | goto out; | 728 | goto out; |
733 | } | 729 | } |
734 | 730 | ||
735 | ret = register_ftrace_profiler(); | 731 | ret = register_ftrace_profiler(); |
736 | if (ret < 0) { | 732 | if (ret < 0) { |
737 | cnt = ret; | 733 | cnt = ret; |
738 | goto out; | 734 | goto out; |
739 | } | 735 | } |
740 | ftrace_profile_enabled = 1; | 736 | ftrace_profile_enabled = 1; |
741 | } else { | 737 | } else { |
742 | ftrace_profile_enabled = 0; | 738 | ftrace_profile_enabled = 0; |
743 | /* | 739 | /* |
744 | * unregister_ftrace_profiler calls stop_machine | 740 | * unregister_ftrace_profiler calls stop_machine |
745 | * so this acts like an synchronize_sched. | 741 | * so this acts like an synchronize_sched. |
746 | */ | 742 | */ |
747 | unregister_ftrace_profiler(); | 743 | unregister_ftrace_profiler(); |
748 | } | 744 | } |
749 | } | 745 | } |
750 | out: | 746 | out: |
751 | mutex_unlock(&ftrace_profile_lock); | 747 | mutex_unlock(&ftrace_profile_lock); |
752 | 748 | ||
753 | *ppos += cnt; | 749 | *ppos += cnt; |
754 | 750 | ||
755 | return cnt; | 751 | return cnt; |
756 | } | 752 | } |
757 | 753 | ||
758 | static ssize_t | 754 | static ssize_t |
759 | ftrace_profile_read(struct file *filp, char __user *ubuf, | 755 | ftrace_profile_read(struct file *filp, char __user *ubuf, |
760 | size_t cnt, loff_t *ppos) | 756 | size_t cnt, loff_t *ppos) |
761 | { | 757 | { |
762 | char buf[64]; /* big enough to hold a number */ | 758 | char buf[64]; /* big enough to hold a number */ |
763 | int r; | 759 | int r; |
764 | 760 | ||
765 | r = sprintf(buf, "%u\n", ftrace_profile_enabled); | 761 | r = sprintf(buf, "%u\n", ftrace_profile_enabled); |
766 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 762 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
767 | } | 763 | } |
768 | 764 | ||
769 | static const struct file_operations ftrace_profile_fops = { | 765 | static const struct file_operations ftrace_profile_fops = { |
770 | .open = tracing_open_generic, | 766 | .open = tracing_open_generic, |
771 | .read = ftrace_profile_read, | 767 | .read = ftrace_profile_read, |
772 | .write = ftrace_profile_write, | 768 | .write = ftrace_profile_write, |
773 | }; | 769 | }; |
774 | 770 | ||
775 | /* used to initialize the real stat files */ | 771 | /* used to initialize the real stat files */ |
776 | static struct tracer_stat function_stats __initdata = { | 772 | static struct tracer_stat function_stats __initdata = { |
777 | .name = "functions", | 773 | .name = "functions", |
778 | .stat_start = function_stat_start, | 774 | .stat_start = function_stat_start, |
779 | .stat_next = function_stat_next, | 775 | .stat_next = function_stat_next, |
780 | .stat_cmp = function_stat_cmp, | 776 | .stat_cmp = function_stat_cmp, |
781 | .stat_headers = function_stat_headers, | 777 | .stat_headers = function_stat_headers, |
782 | .stat_show = function_stat_show | 778 | .stat_show = function_stat_show |
783 | }; | 779 | }; |
784 | 780 | ||
785 | static __init void ftrace_profile_debugfs(struct dentry *d_tracer) | 781 | static __init void ftrace_profile_debugfs(struct dentry *d_tracer) |
786 | { | 782 | { |
787 | struct ftrace_profile_stat *stat; | 783 | struct ftrace_profile_stat *stat; |
788 | struct dentry *entry; | 784 | struct dentry *entry; |
789 | char *name; | 785 | char *name; |
790 | int ret; | 786 | int ret; |
791 | int cpu; | 787 | int cpu; |
792 | 788 | ||
793 | for_each_possible_cpu(cpu) { | 789 | for_each_possible_cpu(cpu) { |
794 | stat = &per_cpu(ftrace_profile_stats, cpu); | 790 | stat = &per_cpu(ftrace_profile_stats, cpu); |
795 | 791 | ||
796 | /* allocate enough for function name + cpu number */ | 792 | /* allocate enough for function name + cpu number */ |
797 | name = kmalloc(32, GFP_KERNEL); | 793 | name = kmalloc(32, GFP_KERNEL); |
798 | if (!name) { | 794 | if (!name) { |
799 | /* | 795 | /* |
800 | * The files created are permanent, if something happens | 796 | * The files created are permanent, if something happens |
801 | * we still do not free memory. | 797 | * we still do not free memory. |
802 | */ | 798 | */ |
803 | WARN(1, | 799 | WARN(1, |
804 | "Could not allocate stat file for cpu %d\n", | 800 | "Could not allocate stat file for cpu %d\n", |
805 | cpu); | 801 | cpu); |
806 | return; | 802 | return; |
807 | } | 803 | } |
808 | stat->stat = function_stats; | 804 | stat->stat = function_stats; |
809 | snprintf(name, 32, "function%d", cpu); | 805 | snprintf(name, 32, "function%d", cpu); |
810 | stat->stat.name = name; | 806 | stat->stat.name = name; |
811 | ret = register_stat_tracer(&stat->stat); | 807 | ret = register_stat_tracer(&stat->stat); |
812 | if (ret) { | 808 | if (ret) { |
813 | WARN(1, | 809 | WARN(1, |
814 | "Could not register function stat for cpu %d\n", | 810 | "Could not register function stat for cpu %d\n", |
815 | cpu); | 811 | cpu); |
816 | kfree(name); | 812 | kfree(name); |
817 | return; | 813 | return; |
818 | } | 814 | } |
819 | } | 815 | } |
820 | 816 | ||
821 | entry = debugfs_create_file("function_profile_enabled", 0644, | 817 | entry = debugfs_create_file("function_profile_enabled", 0644, |
822 | d_tracer, NULL, &ftrace_profile_fops); | 818 | d_tracer, NULL, &ftrace_profile_fops); |
823 | if (!entry) | 819 | if (!entry) |
824 | pr_warning("Could not create debugfs " | 820 | pr_warning("Could not create debugfs " |
825 | "'function_profile_enabled' entry\n"); | 821 | "'function_profile_enabled' entry\n"); |
826 | } | 822 | } |
827 | 823 | ||
828 | #else /* CONFIG_FUNCTION_PROFILER */ | 824 | #else /* CONFIG_FUNCTION_PROFILER */ |
829 | static __init void ftrace_profile_debugfs(struct dentry *d_tracer) | 825 | static __init void ftrace_profile_debugfs(struct dentry *d_tracer) |
830 | { | 826 | { |
831 | } | 827 | } |
832 | #endif /* CONFIG_FUNCTION_PROFILER */ | 828 | #endif /* CONFIG_FUNCTION_PROFILER */ |
833 | 829 | ||
834 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; | 830 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; |
835 | 831 | ||
836 | #ifdef CONFIG_DYNAMIC_FTRACE | 832 | #ifdef CONFIG_DYNAMIC_FTRACE |
837 | 833 | ||
838 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD | 834 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD |
839 | # error Dynamic ftrace depends on MCOUNT_RECORD | 835 | # error Dynamic ftrace depends on MCOUNT_RECORD |
840 | #endif | 836 | #endif |
841 | 837 | ||
842 | static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly; | 838 | static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly; |
843 | 839 | ||
844 | struct ftrace_func_probe { | 840 | struct ftrace_func_probe { |
845 | struct hlist_node node; | 841 | struct hlist_node node; |
846 | struct ftrace_probe_ops *ops; | 842 | struct ftrace_probe_ops *ops; |
847 | unsigned long flags; | 843 | unsigned long flags; |
848 | unsigned long ip; | 844 | unsigned long ip; |
849 | void *data; | 845 | void *data; |
850 | struct rcu_head rcu; | 846 | struct rcu_head rcu; |
851 | }; | 847 | }; |
852 | 848 | ||
853 | enum { | 849 | enum { |
854 | FTRACE_ENABLE_CALLS = (1 << 0), | 850 | FTRACE_ENABLE_CALLS = (1 << 0), |
855 | FTRACE_DISABLE_CALLS = (1 << 1), | 851 | FTRACE_DISABLE_CALLS = (1 << 1), |
856 | FTRACE_UPDATE_TRACE_FUNC = (1 << 2), | 852 | FTRACE_UPDATE_TRACE_FUNC = (1 << 2), |
857 | FTRACE_ENABLE_MCOUNT = (1 << 3), | 853 | FTRACE_ENABLE_MCOUNT = (1 << 3), |
858 | FTRACE_DISABLE_MCOUNT = (1 << 4), | 854 | FTRACE_DISABLE_MCOUNT = (1 << 4), |
859 | FTRACE_START_FUNC_RET = (1 << 5), | 855 | FTRACE_START_FUNC_RET = (1 << 5), |
860 | FTRACE_STOP_FUNC_RET = (1 << 6), | 856 | FTRACE_STOP_FUNC_RET = (1 << 6), |
861 | }; | 857 | }; |
862 | 858 | ||
863 | static int ftrace_filtered; | 859 | static int ftrace_filtered; |
864 | 860 | ||
865 | static struct dyn_ftrace *ftrace_new_addrs; | 861 | static struct dyn_ftrace *ftrace_new_addrs; |
866 | 862 | ||
867 | static DEFINE_MUTEX(ftrace_regex_lock); | 863 | static DEFINE_MUTEX(ftrace_regex_lock); |
868 | 864 | ||
869 | struct ftrace_page { | 865 | struct ftrace_page { |
870 | struct ftrace_page *next; | 866 | struct ftrace_page *next; |
871 | int index; | 867 | int index; |
872 | struct dyn_ftrace records[]; | 868 | struct dyn_ftrace records[]; |
873 | }; | 869 | }; |
874 | 870 | ||
875 | #define ENTRIES_PER_PAGE \ | 871 | #define ENTRIES_PER_PAGE \ |
876 | ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace)) | 872 | ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace)) |
877 | 873 | ||
878 | /* estimate from running different kernels */ | 874 | /* estimate from running different kernels */ |
879 | #define NR_TO_INIT 10000 | 875 | #define NR_TO_INIT 10000 |
880 | 876 | ||
881 | static struct ftrace_page *ftrace_pages_start; | 877 | static struct ftrace_page *ftrace_pages_start; |
882 | static struct ftrace_page *ftrace_pages; | 878 | static struct ftrace_page *ftrace_pages; |
883 | 879 | ||
884 | static struct dyn_ftrace *ftrace_free_records; | 880 | static struct dyn_ftrace *ftrace_free_records; |
885 | 881 | ||
886 | /* | 882 | /* |
887 | * This is a double for. Do not use 'break' to break out of the loop, | 883 | * This is a double for. Do not use 'break' to break out of the loop, |
888 | * you must use a goto. | 884 | * you must use a goto. |
889 | */ | 885 | */ |
890 | #define do_for_each_ftrace_rec(pg, rec) \ | 886 | #define do_for_each_ftrace_rec(pg, rec) \ |
891 | for (pg = ftrace_pages_start; pg; pg = pg->next) { \ | 887 | for (pg = ftrace_pages_start; pg; pg = pg->next) { \ |
892 | int _____i; \ | 888 | int _____i; \ |
893 | for (_____i = 0; _____i < pg->index; _____i++) { \ | 889 | for (_____i = 0; _____i < pg->index; _____i++) { \ |
894 | rec = &pg->records[_____i]; | 890 | rec = &pg->records[_____i]; |
895 | 891 | ||
896 | #define while_for_each_ftrace_rec() \ | 892 | #define while_for_each_ftrace_rec() \ |
897 | } \ | 893 | } \ |
898 | } | 894 | } |
899 | 895 | ||
900 | static void ftrace_free_rec(struct dyn_ftrace *rec) | 896 | static void ftrace_free_rec(struct dyn_ftrace *rec) |
901 | { | 897 | { |
902 | rec->freelist = ftrace_free_records; | 898 | rec->freelist = ftrace_free_records; |
903 | ftrace_free_records = rec; | 899 | ftrace_free_records = rec; |
904 | rec->flags |= FTRACE_FL_FREE; | 900 | rec->flags |= FTRACE_FL_FREE; |
905 | } | 901 | } |
906 | 902 | ||
907 | static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) | 903 | static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) |
908 | { | 904 | { |
909 | struct dyn_ftrace *rec; | 905 | struct dyn_ftrace *rec; |
910 | 906 | ||
911 | /* First check for freed records */ | 907 | /* First check for freed records */ |
912 | if (ftrace_free_records) { | 908 | if (ftrace_free_records) { |
913 | rec = ftrace_free_records; | 909 | rec = ftrace_free_records; |
914 | 910 | ||
915 | if (unlikely(!(rec->flags & FTRACE_FL_FREE))) { | 911 | if (unlikely(!(rec->flags & FTRACE_FL_FREE))) { |
916 | FTRACE_WARN_ON_ONCE(1); | 912 | FTRACE_WARN_ON_ONCE(1); |
917 | ftrace_free_records = NULL; | 913 | ftrace_free_records = NULL; |
918 | return NULL; | 914 | return NULL; |
919 | } | 915 | } |
920 | 916 | ||
921 | ftrace_free_records = rec->freelist; | 917 | ftrace_free_records = rec->freelist; |
922 | memset(rec, 0, sizeof(*rec)); | 918 | memset(rec, 0, sizeof(*rec)); |
923 | return rec; | 919 | return rec; |
924 | } | 920 | } |
925 | 921 | ||
926 | if (ftrace_pages->index == ENTRIES_PER_PAGE) { | 922 | if (ftrace_pages->index == ENTRIES_PER_PAGE) { |
927 | if (!ftrace_pages->next) { | 923 | if (!ftrace_pages->next) { |
928 | /* allocate another page */ | 924 | /* allocate another page */ |
929 | ftrace_pages->next = | 925 | ftrace_pages->next = |
930 | (void *)get_zeroed_page(GFP_KERNEL); | 926 | (void *)get_zeroed_page(GFP_KERNEL); |
931 | if (!ftrace_pages->next) | 927 | if (!ftrace_pages->next) |
932 | return NULL; | 928 | return NULL; |
933 | } | 929 | } |
934 | ftrace_pages = ftrace_pages->next; | 930 | ftrace_pages = ftrace_pages->next; |
935 | } | 931 | } |
936 | 932 | ||
937 | return &ftrace_pages->records[ftrace_pages->index++]; | 933 | return &ftrace_pages->records[ftrace_pages->index++]; |
938 | } | 934 | } |
939 | 935 | ||
940 | static struct dyn_ftrace * | 936 | static struct dyn_ftrace * |
941 | ftrace_record_ip(unsigned long ip) | 937 | ftrace_record_ip(unsigned long ip) |
942 | { | 938 | { |
943 | struct dyn_ftrace *rec; | 939 | struct dyn_ftrace *rec; |
944 | 940 | ||
945 | if (ftrace_disabled) | 941 | if (ftrace_disabled) |
946 | return NULL; | 942 | return NULL; |
947 | 943 | ||
948 | rec = ftrace_alloc_dyn_node(ip); | 944 | rec = ftrace_alloc_dyn_node(ip); |
949 | if (!rec) | 945 | if (!rec) |
950 | return NULL; | 946 | return NULL; |
951 | 947 | ||
952 | rec->ip = ip; | 948 | rec->ip = ip; |
953 | rec->newlist = ftrace_new_addrs; | 949 | rec->newlist = ftrace_new_addrs; |
954 | ftrace_new_addrs = rec; | 950 | ftrace_new_addrs = rec; |
955 | 951 | ||
956 | return rec; | 952 | return rec; |
957 | } | 953 | } |
958 | 954 | ||
959 | static void print_ip_ins(const char *fmt, unsigned char *p) | 955 | static void print_ip_ins(const char *fmt, unsigned char *p) |
960 | { | 956 | { |
961 | int i; | 957 | int i; |
962 | 958 | ||
963 | printk(KERN_CONT "%s", fmt); | 959 | printk(KERN_CONT "%s", fmt); |
964 | 960 | ||
965 | for (i = 0; i < MCOUNT_INSN_SIZE; i++) | 961 | for (i = 0; i < MCOUNT_INSN_SIZE; i++) |
966 | printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); | 962 | printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); |
967 | } | 963 | } |
968 | 964 | ||
969 | static void ftrace_bug(int failed, unsigned long ip) | 965 | static void ftrace_bug(int failed, unsigned long ip) |
970 | { | 966 | { |
971 | switch (failed) { | 967 | switch (failed) { |
972 | case -EFAULT: | 968 | case -EFAULT: |
973 | FTRACE_WARN_ON_ONCE(1); | 969 | FTRACE_WARN_ON_ONCE(1); |
974 | pr_info("ftrace faulted on modifying "); | 970 | pr_info("ftrace faulted on modifying "); |
975 | print_ip_sym(ip); | 971 | print_ip_sym(ip); |
976 | break; | 972 | break; |
977 | case -EINVAL: | 973 | case -EINVAL: |
978 | FTRACE_WARN_ON_ONCE(1); | 974 | FTRACE_WARN_ON_ONCE(1); |
979 | pr_info("ftrace failed to modify "); | 975 | pr_info("ftrace failed to modify "); |
980 | print_ip_sym(ip); | 976 | print_ip_sym(ip); |
981 | print_ip_ins(" actual: ", (unsigned char *)ip); | 977 | print_ip_ins(" actual: ", (unsigned char *)ip); |
982 | printk(KERN_CONT "\n"); | 978 | printk(KERN_CONT "\n"); |
983 | break; | 979 | break; |
984 | case -EPERM: | 980 | case -EPERM: |
985 | FTRACE_WARN_ON_ONCE(1); | 981 | FTRACE_WARN_ON_ONCE(1); |
986 | pr_info("ftrace faulted on writing "); | 982 | pr_info("ftrace faulted on writing "); |
987 | print_ip_sym(ip); | 983 | print_ip_sym(ip); |
988 | break; | 984 | break; |
989 | default: | 985 | default: |
990 | FTRACE_WARN_ON_ONCE(1); | 986 | FTRACE_WARN_ON_ONCE(1); |
991 | pr_info("ftrace faulted on unknown error "); | 987 | pr_info("ftrace faulted on unknown error "); |
992 | print_ip_sym(ip); | 988 | print_ip_sym(ip); |
993 | } | 989 | } |
994 | } | 990 | } |
995 | 991 | ||
996 | 992 | ||
997 | /* Return 1 if the address range is reserved for ftrace */ | 993 | /* Return 1 if the address range is reserved for ftrace */ |
998 | int ftrace_text_reserved(void *start, void *end) | 994 | int ftrace_text_reserved(void *start, void *end) |
999 | { | 995 | { |
1000 | struct dyn_ftrace *rec; | 996 | struct dyn_ftrace *rec; |
1001 | struct ftrace_page *pg; | 997 | struct ftrace_page *pg; |
1002 | 998 | ||
1003 | do_for_each_ftrace_rec(pg, rec) { | 999 | do_for_each_ftrace_rec(pg, rec) { |
1004 | if (rec->ip <= (unsigned long)end && | 1000 | if (rec->ip <= (unsigned long)end && |
1005 | rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start) | 1001 | rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start) |
1006 | return 1; | 1002 | return 1; |
1007 | } while_for_each_ftrace_rec(); | 1003 | } while_for_each_ftrace_rec(); |
1008 | return 0; | 1004 | return 0; |
1009 | } | 1005 | } |
1010 | 1006 | ||
1011 | 1007 | ||
1012 | static int | 1008 | static int |
1013 | __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | 1009 | __ftrace_replace_code(struct dyn_ftrace *rec, int enable) |
1014 | { | 1010 | { |
1015 | unsigned long ftrace_addr; | 1011 | unsigned long ftrace_addr; |
1016 | unsigned long flag = 0UL; | 1012 | unsigned long flag = 0UL; |
1017 | 1013 | ||
1018 | ftrace_addr = (unsigned long)FTRACE_ADDR; | 1014 | ftrace_addr = (unsigned long)FTRACE_ADDR; |
1019 | 1015 | ||
1020 | /* | 1016 | /* |
1021 | * If this record is not to be traced or we want to disable it, | 1017 | * If this record is not to be traced or we want to disable it, |
1022 | * then disable it. | 1018 | * then disable it. |
1023 | * | 1019 | * |
1024 | * If we want to enable it and filtering is off, then enable it. | 1020 | * If we want to enable it and filtering is off, then enable it. |
1025 | * | 1021 | * |
1026 | * If we want to enable it and filtering is on, enable it only if | 1022 | * If we want to enable it and filtering is on, enable it only if |
1027 | * it's filtered | 1023 | * it's filtered |
1028 | */ | 1024 | */ |
1029 | if (enable && !(rec->flags & FTRACE_FL_NOTRACE)) { | 1025 | if (enable && !(rec->flags & FTRACE_FL_NOTRACE)) { |
1030 | if (!ftrace_filtered || (rec->flags & FTRACE_FL_FILTER)) | 1026 | if (!ftrace_filtered || (rec->flags & FTRACE_FL_FILTER)) |
1031 | flag = FTRACE_FL_ENABLED; | 1027 | flag = FTRACE_FL_ENABLED; |
1032 | } | 1028 | } |
1033 | 1029 | ||
1034 | /* If the state of this record hasn't changed, then do nothing */ | 1030 | /* If the state of this record hasn't changed, then do nothing */ |
1035 | if ((rec->flags & FTRACE_FL_ENABLED) == flag) | 1031 | if ((rec->flags & FTRACE_FL_ENABLED) == flag) |
1036 | return 0; | 1032 | return 0; |
1037 | 1033 | ||
1038 | if (flag) { | 1034 | if (flag) { |
1039 | rec->flags |= FTRACE_FL_ENABLED; | 1035 | rec->flags |= FTRACE_FL_ENABLED; |
1040 | return ftrace_make_call(rec, ftrace_addr); | 1036 | return ftrace_make_call(rec, ftrace_addr); |
1041 | } | 1037 | } |
1042 | 1038 | ||
1043 | rec->flags &= ~FTRACE_FL_ENABLED; | 1039 | rec->flags &= ~FTRACE_FL_ENABLED; |
1044 | return ftrace_make_nop(NULL, rec, ftrace_addr); | 1040 | return ftrace_make_nop(NULL, rec, ftrace_addr); |
1045 | } | 1041 | } |
1046 | 1042 | ||
1047 | static void ftrace_replace_code(int enable) | 1043 | static void ftrace_replace_code(int enable) |
1048 | { | 1044 | { |
1049 | struct dyn_ftrace *rec; | 1045 | struct dyn_ftrace *rec; |
1050 | struct ftrace_page *pg; | 1046 | struct ftrace_page *pg; |
1051 | int failed; | 1047 | int failed; |
1052 | 1048 | ||
1053 | do_for_each_ftrace_rec(pg, rec) { | 1049 | do_for_each_ftrace_rec(pg, rec) { |
1054 | /* | 1050 | /* |
1055 | * Skip over free records, records that have | 1051 | * Skip over free records, records that have |
1056 | * failed and not converted. | 1052 | * failed and not converted. |
1057 | */ | 1053 | */ |
1058 | if (rec->flags & FTRACE_FL_FREE || | 1054 | if (rec->flags & FTRACE_FL_FREE || |
1059 | rec->flags & FTRACE_FL_FAILED || | 1055 | rec->flags & FTRACE_FL_FAILED || |
1060 | !(rec->flags & FTRACE_FL_CONVERTED)) | 1056 | !(rec->flags & FTRACE_FL_CONVERTED)) |
1061 | continue; | 1057 | continue; |
1062 | 1058 | ||
1063 | failed = __ftrace_replace_code(rec, enable); | 1059 | failed = __ftrace_replace_code(rec, enable); |
1064 | if (failed) { | 1060 | if (failed) { |
1065 | rec->flags |= FTRACE_FL_FAILED; | 1061 | rec->flags |= FTRACE_FL_FAILED; |
1066 | ftrace_bug(failed, rec->ip); | 1062 | ftrace_bug(failed, rec->ip); |
1067 | /* Stop processing */ | 1063 | /* Stop processing */ |
1068 | return; | 1064 | return; |
1069 | } | 1065 | } |
1070 | } while_for_each_ftrace_rec(); | 1066 | } while_for_each_ftrace_rec(); |
1071 | } | 1067 | } |
1072 | 1068 | ||
1073 | static int | 1069 | static int |
1074 | ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) | 1070 | ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) |
1075 | { | 1071 | { |
1076 | unsigned long ip; | 1072 | unsigned long ip; |
1077 | int ret; | 1073 | int ret; |
1078 | 1074 | ||
1079 | ip = rec->ip; | 1075 | ip = rec->ip; |
1080 | 1076 | ||
1081 | ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); | 1077 | ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); |
1082 | if (ret) { | 1078 | if (ret) { |
1083 | ftrace_bug(ret, ip); | 1079 | ftrace_bug(ret, ip); |
1084 | rec->flags |= FTRACE_FL_FAILED; | 1080 | rec->flags |= FTRACE_FL_FAILED; |
1085 | return 0; | 1081 | return 0; |
1086 | } | 1082 | } |
1087 | return 1; | 1083 | return 1; |
1088 | } | 1084 | } |
1089 | 1085 | ||
1090 | /* | 1086 | /* |
1091 | * archs can override this function if they must do something | 1087 | * archs can override this function if they must do something |
1092 | * before the modifying code is performed. | 1088 | * before the modifying code is performed. |
1093 | */ | 1089 | */ |
1094 | int __weak ftrace_arch_code_modify_prepare(void) | 1090 | int __weak ftrace_arch_code_modify_prepare(void) |
1095 | { | 1091 | { |
1096 | return 0; | 1092 | return 0; |
1097 | } | 1093 | } |
1098 | 1094 | ||
1099 | /* | 1095 | /* |
1100 | * archs can override this function if they must do something | 1096 | * archs can override this function if they must do something |
1101 | * after the modifying code is performed. | 1097 | * after the modifying code is performed. |
1102 | */ | 1098 | */ |
1103 | int __weak ftrace_arch_code_modify_post_process(void) | 1099 | int __weak ftrace_arch_code_modify_post_process(void) |
1104 | { | 1100 | { |
1105 | return 0; | 1101 | return 0; |
1106 | } | 1102 | } |
1107 | 1103 | ||
1108 | static int __ftrace_modify_code(void *data) | 1104 | static int __ftrace_modify_code(void *data) |
1109 | { | 1105 | { |
1110 | int *command = data; | 1106 | int *command = data; |
1111 | 1107 | ||
1112 | if (*command & FTRACE_ENABLE_CALLS) | 1108 | if (*command & FTRACE_ENABLE_CALLS) |
1113 | ftrace_replace_code(1); | 1109 | ftrace_replace_code(1); |
1114 | else if (*command & FTRACE_DISABLE_CALLS) | 1110 | else if (*command & FTRACE_DISABLE_CALLS) |
1115 | ftrace_replace_code(0); | 1111 | ftrace_replace_code(0); |
1116 | 1112 | ||
1117 | if (*command & FTRACE_UPDATE_TRACE_FUNC) | 1113 | if (*command & FTRACE_UPDATE_TRACE_FUNC) |
1118 | ftrace_update_ftrace_func(ftrace_trace_function); | 1114 | ftrace_update_ftrace_func(ftrace_trace_function); |
1119 | 1115 | ||
1120 | if (*command & FTRACE_START_FUNC_RET) | 1116 | if (*command & FTRACE_START_FUNC_RET) |
1121 | ftrace_enable_ftrace_graph_caller(); | 1117 | ftrace_enable_ftrace_graph_caller(); |
1122 | else if (*command & FTRACE_STOP_FUNC_RET) | 1118 | else if (*command & FTRACE_STOP_FUNC_RET) |
1123 | ftrace_disable_ftrace_graph_caller(); | 1119 | ftrace_disable_ftrace_graph_caller(); |
1124 | 1120 | ||
1125 | return 0; | 1121 | return 0; |
1126 | } | 1122 | } |
1127 | 1123 | ||
1128 | static void ftrace_run_update_code(int command) | 1124 | static void ftrace_run_update_code(int command) |
1129 | { | 1125 | { |
1130 | int ret; | 1126 | int ret; |
1131 | 1127 | ||
1132 | ret = ftrace_arch_code_modify_prepare(); | 1128 | ret = ftrace_arch_code_modify_prepare(); |
1133 | FTRACE_WARN_ON(ret); | 1129 | FTRACE_WARN_ON(ret); |
1134 | if (ret) | 1130 | if (ret) |
1135 | return; | 1131 | return; |
1136 | 1132 | ||
1137 | stop_machine(__ftrace_modify_code, &command, NULL); | 1133 | stop_machine(__ftrace_modify_code, &command, NULL); |
1138 | 1134 | ||
1139 | ret = ftrace_arch_code_modify_post_process(); | 1135 | ret = ftrace_arch_code_modify_post_process(); |
1140 | FTRACE_WARN_ON(ret); | 1136 | FTRACE_WARN_ON(ret); |
1141 | } | 1137 | } |
1142 | 1138 | ||
1143 | static ftrace_func_t saved_ftrace_func; | 1139 | static ftrace_func_t saved_ftrace_func; |
1144 | static int ftrace_start_up; | 1140 | static int ftrace_start_up; |
1145 | 1141 | ||
1146 | static void ftrace_startup_enable(int command) | 1142 | static void ftrace_startup_enable(int command) |
1147 | { | 1143 | { |
1148 | if (saved_ftrace_func != ftrace_trace_function) { | 1144 | if (saved_ftrace_func != ftrace_trace_function) { |
1149 | saved_ftrace_func = ftrace_trace_function; | 1145 | saved_ftrace_func = ftrace_trace_function; |
1150 | command |= FTRACE_UPDATE_TRACE_FUNC; | 1146 | command |= FTRACE_UPDATE_TRACE_FUNC; |
1151 | } | 1147 | } |
1152 | 1148 | ||
1153 | if (!command || !ftrace_enabled) | 1149 | if (!command || !ftrace_enabled) |
1154 | return; | 1150 | return; |
1155 | 1151 | ||
1156 | ftrace_run_update_code(command); | 1152 | ftrace_run_update_code(command); |
1157 | } | 1153 | } |
1158 | 1154 | ||
1159 | static void ftrace_startup(int command) | 1155 | static void ftrace_startup(int command) |
1160 | { | 1156 | { |
1161 | if (unlikely(ftrace_disabled)) | 1157 | if (unlikely(ftrace_disabled)) |
1162 | return; | 1158 | return; |
1163 | 1159 | ||
1164 | ftrace_start_up++; | 1160 | ftrace_start_up++; |
1165 | command |= FTRACE_ENABLE_CALLS; | 1161 | command |= FTRACE_ENABLE_CALLS; |
1166 | 1162 | ||
1167 | ftrace_startup_enable(command); | 1163 | ftrace_startup_enable(command); |
1168 | } | 1164 | } |
1169 | 1165 | ||
1170 | static void ftrace_shutdown(int command) | 1166 | static void ftrace_shutdown(int command) |
1171 | { | 1167 | { |
1172 | if (unlikely(ftrace_disabled)) | 1168 | if (unlikely(ftrace_disabled)) |
1173 | return; | 1169 | return; |
1174 | 1170 | ||
1175 | ftrace_start_up--; | 1171 | ftrace_start_up--; |
1176 | /* | 1172 | /* |
1177 | * Just warn in case of unbalance, no need to kill ftrace, it's not | 1173 | * Just warn in case of unbalance, no need to kill ftrace, it's not |
1178 | * critical but the ftrace_call callers may be never nopped again after | 1174 | * critical but the ftrace_call callers may be never nopped again after |
1179 | * further ftrace uses. | 1175 | * further ftrace uses. |
1180 | */ | 1176 | */ |
1181 | WARN_ON_ONCE(ftrace_start_up < 0); | 1177 | WARN_ON_ONCE(ftrace_start_up < 0); |
1182 | 1178 | ||
1183 | if (!ftrace_start_up) | 1179 | if (!ftrace_start_up) |
1184 | command |= FTRACE_DISABLE_CALLS; | 1180 | command |= FTRACE_DISABLE_CALLS; |
1185 | 1181 | ||
1186 | if (saved_ftrace_func != ftrace_trace_function) { | 1182 | if (saved_ftrace_func != ftrace_trace_function) { |
1187 | saved_ftrace_func = ftrace_trace_function; | 1183 | saved_ftrace_func = ftrace_trace_function; |
1188 | command |= FTRACE_UPDATE_TRACE_FUNC; | 1184 | command |= FTRACE_UPDATE_TRACE_FUNC; |
1189 | } | 1185 | } |
1190 | 1186 | ||
1191 | if (!command || !ftrace_enabled) | 1187 | if (!command || !ftrace_enabled) |
1192 | return; | 1188 | return; |
1193 | 1189 | ||
1194 | ftrace_run_update_code(command); | 1190 | ftrace_run_update_code(command); |
1195 | } | 1191 | } |
1196 | 1192 | ||
1197 | static void ftrace_startup_sysctl(void) | 1193 | static void ftrace_startup_sysctl(void) |
1198 | { | 1194 | { |
1199 | int command = FTRACE_ENABLE_MCOUNT; | 1195 | int command = FTRACE_ENABLE_MCOUNT; |
1200 | 1196 | ||
1201 | if (unlikely(ftrace_disabled)) | 1197 | if (unlikely(ftrace_disabled)) |
1202 | return; | 1198 | return; |
1203 | 1199 | ||
1204 | /* Force update next time */ | 1200 | /* Force update next time */ |
1205 | saved_ftrace_func = NULL; | 1201 | saved_ftrace_func = NULL; |
1206 | /* ftrace_start_up is true if we want ftrace running */ | 1202 | /* ftrace_start_up is true if we want ftrace running */ |
1207 | if (ftrace_start_up) | 1203 | if (ftrace_start_up) |
1208 | command |= FTRACE_ENABLE_CALLS; | 1204 | command |= FTRACE_ENABLE_CALLS; |
1209 | 1205 | ||
1210 | ftrace_run_update_code(command); | 1206 | ftrace_run_update_code(command); |
1211 | } | 1207 | } |
1212 | 1208 | ||
1213 | static void ftrace_shutdown_sysctl(void) | 1209 | static void ftrace_shutdown_sysctl(void) |
1214 | { | 1210 | { |
1215 | int command = FTRACE_DISABLE_MCOUNT; | 1211 | int command = FTRACE_DISABLE_MCOUNT; |
1216 | 1212 | ||
1217 | if (unlikely(ftrace_disabled)) | 1213 | if (unlikely(ftrace_disabled)) |
1218 | return; | 1214 | return; |
1219 | 1215 | ||
1220 | /* ftrace_start_up is true if ftrace is running */ | 1216 | /* ftrace_start_up is true if ftrace is running */ |
1221 | if (ftrace_start_up) | 1217 | if (ftrace_start_up) |
1222 | command |= FTRACE_DISABLE_CALLS; | 1218 | command |= FTRACE_DISABLE_CALLS; |
1223 | 1219 | ||
1224 | ftrace_run_update_code(command); | 1220 | ftrace_run_update_code(command); |
1225 | } | 1221 | } |
1226 | 1222 | ||
1227 | static cycle_t ftrace_update_time; | 1223 | static cycle_t ftrace_update_time; |
1228 | static unsigned long ftrace_update_cnt; | 1224 | static unsigned long ftrace_update_cnt; |
1229 | unsigned long ftrace_update_tot_cnt; | 1225 | unsigned long ftrace_update_tot_cnt; |
1230 | 1226 | ||
1231 | static int ftrace_update_code(struct module *mod) | 1227 | static int ftrace_update_code(struct module *mod) |
1232 | { | 1228 | { |
1233 | struct dyn_ftrace *p; | 1229 | struct dyn_ftrace *p; |
1234 | cycle_t start, stop; | 1230 | cycle_t start, stop; |
1235 | 1231 | ||
1236 | start = ftrace_now(raw_smp_processor_id()); | 1232 | start = ftrace_now(raw_smp_processor_id()); |
1237 | ftrace_update_cnt = 0; | 1233 | ftrace_update_cnt = 0; |
1238 | 1234 | ||
1239 | while (ftrace_new_addrs) { | 1235 | while (ftrace_new_addrs) { |
1240 | 1236 | ||
1241 | /* If something went wrong, bail without enabling anything */ | 1237 | /* If something went wrong, bail without enabling anything */ |
1242 | if (unlikely(ftrace_disabled)) | 1238 | if (unlikely(ftrace_disabled)) |
1243 | return -1; | 1239 | return -1; |
1244 | 1240 | ||
1245 | p = ftrace_new_addrs; | 1241 | p = ftrace_new_addrs; |
1246 | ftrace_new_addrs = p->newlist; | 1242 | ftrace_new_addrs = p->newlist; |
1247 | p->flags = 0L; | 1243 | p->flags = 0L; |
1248 | 1244 | ||
1249 | /* | 1245 | /* |
1250 | * Do the initial record convertion from mcount jump | 1246 | * Do the initial record convertion from mcount jump |
1251 | * to the NOP instructions. | 1247 | * to the NOP instructions. |
1252 | */ | 1248 | */ |
1253 | if (!ftrace_code_disable(mod, p)) { | 1249 | if (!ftrace_code_disable(mod, p)) { |
1254 | ftrace_free_rec(p); | 1250 | ftrace_free_rec(p); |
1255 | continue; | 1251 | continue; |
1256 | } | 1252 | } |
1257 | 1253 | ||
1258 | p->flags |= FTRACE_FL_CONVERTED; | 1254 | p->flags |= FTRACE_FL_CONVERTED; |
1259 | ftrace_update_cnt++; | 1255 | ftrace_update_cnt++; |
1260 | 1256 | ||
1261 | /* | 1257 | /* |
1262 | * If the tracing is enabled, go ahead and enable the record. | 1258 | * If the tracing is enabled, go ahead and enable the record. |
1263 | * | 1259 | * |
1264 | * The reason not to enable the record immediatelly is the | 1260 | * The reason not to enable the record immediatelly is the |
1265 | * inherent check of ftrace_make_nop/ftrace_make_call for | 1261 | * inherent check of ftrace_make_nop/ftrace_make_call for |
1266 | * correct previous instructions. Making first the NOP | 1262 | * correct previous instructions. Making first the NOP |
1267 | * conversion puts the module to the correct state, thus | 1263 | * conversion puts the module to the correct state, thus |
1268 | * passing the ftrace_make_call check. | 1264 | * passing the ftrace_make_call check. |
1269 | */ | 1265 | */ |
1270 | if (ftrace_start_up) { | 1266 | if (ftrace_start_up) { |
1271 | int failed = __ftrace_replace_code(p, 1); | 1267 | int failed = __ftrace_replace_code(p, 1); |
1272 | if (failed) { | 1268 | if (failed) { |
1273 | ftrace_bug(failed, p->ip); | 1269 | ftrace_bug(failed, p->ip); |
1274 | ftrace_free_rec(p); | 1270 | ftrace_free_rec(p); |
1275 | } | 1271 | } |
1276 | } | 1272 | } |
1277 | } | 1273 | } |
1278 | 1274 | ||
1279 | stop = ftrace_now(raw_smp_processor_id()); | 1275 | stop = ftrace_now(raw_smp_processor_id()); |
1280 | ftrace_update_time = stop - start; | 1276 | ftrace_update_time = stop - start; |
1281 | ftrace_update_tot_cnt += ftrace_update_cnt; | 1277 | ftrace_update_tot_cnt += ftrace_update_cnt; |
1282 | 1278 | ||
1283 | return 0; | 1279 | return 0; |
1284 | } | 1280 | } |
1285 | 1281 | ||
1286 | static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) | 1282 | static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) |
1287 | { | 1283 | { |
1288 | struct ftrace_page *pg; | 1284 | struct ftrace_page *pg; |
1289 | int cnt; | 1285 | int cnt; |
1290 | int i; | 1286 | int i; |
1291 | 1287 | ||
1292 | /* allocate a few pages */ | 1288 | /* allocate a few pages */ |
1293 | ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL); | 1289 | ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL); |
1294 | if (!ftrace_pages_start) | 1290 | if (!ftrace_pages_start) |
1295 | return -1; | 1291 | return -1; |
1296 | 1292 | ||
1297 | /* | 1293 | /* |
1298 | * Allocate a few more pages. | 1294 | * Allocate a few more pages. |
1299 | * | 1295 | * |
1300 | * TODO: have some parser search vmlinux before | 1296 | * TODO: have some parser search vmlinux before |
1301 | * final linking to find all calls to ftrace. | 1297 | * final linking to find all calls to ftrace. |
1302 | * Then we can: | 1298 | * Then we can: |
1303 | * a) know how many pages to allocate. | 1299 | * a) know how many pages to allocate. |
1304 | * and/or | 1300 | * and/or |
1305 | * b) set up the table then. | 1301 | * b) set up the table then. |
1306 | * | 1302 | * |
1307 | * The dynamic code is still necessary for | 1303 | * The dynamic code is still necessary for |
1308 | * modules. | 1304 | * modules. |
1309 | */ | 1305 | */ |
1310 | 1306 | ||
1311 | pg = ftrace_pages = ftrace_pages_start; | 1307 | pg = ftrace_pages = ftrace_pages_start; |
1312 | 1308 | ||
1313 | cnt = num_to_init / ENTRIES_PER_PAGE; | 1309 | cnt = num_to_init / ENTRIES_PER_PAGE; |
1314 | pr_info("ftrace: allocating %ld entries in %d pages\n", | 1310 | pr_info("ftrace: allocating %ld entries in %d pages\n", |
1315 | num_to_init, cnt + 1); | 1311 | num_to_init, cnt + 1); |
1316 | 1312 | ||
1317 | for (i = 0; i < cnt; i++) { | 1313 | for (i = 0; i < cnt; i++) { |
1318 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); | 1314 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); |
1319 | 1315 | ||
1320 | /* If we fail, we'll try later anyway */ | 1316 | /* If we fail, we'll try later anyway */ |
1321 | if (!pg->next) | 1317 | if (!pg->next) |
1322 | break; | 1318 | break; |
1323 | 1319 | ||
1324 | pg = pg->next; | 1320 | pg = pg->next; |
1325 | } | 1321 | } |
1326 | 1322 | ||
1327 | return 0; | 1323 | return 0; |
1328 | } | 1324 | } |
1329 | 1325 | ||
1330 | enum { | 1326 | enum { |
1331 | FTRACE_ITER_FILTER = (1 << 0), | 1327 | FTRACE_ITER_FILTER = (1 << 0), |
1332 | FTRACE_ITER_NOTRACE = (1 << 1), | 1328 | FTRACE_ITER_NOTRACE = (1 << 1), |
1333 | FTRACE_ITER_FAILURES = (1 << 2), | 1329 | FTRACE_ITER_FAILURES = (1 << 2), |
1334 | FTRACE_ITER_PRINTALL = (1 << 3), | 1330 | FTRACE_ITER_PRINTALL = (1 << 3), |
1335 | FTRACE_ITER_HASH = (1 << 4), | 1331 | FTRACE_ITER_HASH = (1 << 4), |
1336 | }; | 1332 | }; |
1337 | 1333 | ||
1338 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ | 1334 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ |
1339 | 1335 | ||
1340 | struct ftrace_iterator { | 1336 | struct ftrace_iterator { |
1341 | struct ftrace_page *pg; | 1337 | struct ftrace_page *pg; |
1342 | int hidx; | 1338 | int hidx; |
1343 | int idx; | 1339 | int idx; |
1344 | unsigned flags; | 1340 | unsigned flags; |
1345 | struct trace_parser parser; | 1341 | struct trace_parser parser; |
1346 | }; | 1342 | }; |
1347 | 1343 | ||
1348 | static void * | 1344 | static void * |
1349 | t_hash_next(struct seq_file *m, void *v, loff_t *pos) | 1345 | t_hash_next(struct seq_file *m, void *v, loff_t *pos) |
1350 | { | 1346 | { |
1351 | struct ftrace_iterator *iter = m->private; | 1347 | struct ftrace_iterator *iter = m->private; |
1352 | struct hlist_node *hnd = v; | 1348 | struct hlist_node *hnd = v; |
1353 | struct hlist_head *hhd; | 1349 | struct hlist_head *hhd; |
1354 | 1350 | ||
1355 | WARN_ON(!(iter->flags & FTRACE_ITER_HASH)); | 1351 | WARN_ON(!(iter->flags & FTRACE_ITER_HASH)); |
1356 | 1352 | ||
1357 | (*pos)++; | 1353 | (*pos)++; |
1358 | 1354 | ||
1359 | retry: | 1355 | retry: |
1360 | if (iter->hidx >= FTRACE_FUNC_HASHSIZE) | 1356 | if (iter->hidx >= FTRACE_FUNC_HASHSIZE) |
1361 | return NULL; | 1357 | return NULL; |
1362 | 1358 | ||
1363 | hhd = &ftrace_func_hash[iter->hidx]; | 1359 | hhd = &ftrace_func_hash[iter->hidx]; |
1364 | 1360 | ||
1365 | if (hlist_empty(hhd)) { | 1361 | if (hlist_empty(hhd)) { |
1366 | iter->hidx++; | 1362 | iter->hidx++; |
1367 | hnd = NULL; | 1363 | hnd = NULL; |
1368 | goto retry; | 1364 | goto retry; |
1369 | } | 1365 | } |
1370 | 1366 | ||
1371 | if (!hnd) | 1367 | if (!hnd) |
1372 | hnd = hhd->first; | 1368 | hnd = hhd->first; |
1373 | else { | 1369 | else { |
1374 | hnd = hnd->next; | 1370 | hnd = hnd->next; |
1375 | if (!hnd) { | 1371 | if (!hnd) { |
1376 | iter->hidx++; | 1372 | iter->hidx++; |
1377 | goto retry; | 1373 | goto retry; |
1378 | } | 1374 | } |
1379 | } | 1375 | } |
1380 | 1376 | ||
1381 | return hnd; | 1377 | return hnd; |
1382 | } | 1378 | } |
1383 | 1379 | ||
1384 | static void *t_hash_start(struct seq_file *m, loff_t *pos) | 1380 | static void *t_hash_start(struct seq_file *m, loff_t *pos) |
1385 | { | 1381 | { |
1386 | struct ftrace_iterator *iter = m->private; | 1382 | struct ftrace_iterator *iter = m->private; |
1387 | void *p = NULL; | 1383 | void *p = NULL; |
1388 | loff_t l; | 1384 | loff_t l; |
1389 | 1385 | ||
1390 | if (!(iter->flags & FTRACE_ITER_HASH)) | 1386 | if (!(iter->flags & FTRACE_ITER_HASH)) |
1391 | *pos = 0; | 1387 | *pos = 0; |
1392 | 1388 | ||
1393 | iter->flags |= FTRACE_ITER_HASH; | 1389 | iter->flags |= FTRACE_ITER_HASH; |
1394 | 1390 | ||
1395 | iter->hidx = 0; | 1391 | iter->hidx = 0; |
1396 | for (l = 0; l <= *pos; ) { | 1392 | for (l = 0; l <= *pos; ) { |
1397 | p = t_hash_next(m, p, &l); | 1393 | p = t_hash_next(m, p, &l); |
1398 | if (!p) | 1394 | if (!p) |
1399 | break; | 1395 | break; |
1400 | } | 1396 | } |
1401 | return p; | 1397 | return p; |
1402 | } | 1398 | } |
1403 | 1399 | ||
1404 | static int t_hash_show(struct seq_file *m, void *v) | 1400 | static int t_hash_show(struct seq_file *m, void *v) |
1405 | { | 1401 | { |
1406 | struct ftrace_func_probe *rec; | 1402 | struct ftrace_func_probe *rec; |
1407 | struct hlist_node *hnd = v; | 1403 | struct hlist_node *hnd = v; |
1408 | 1404 | ||
1409 | rec = hlist_entry(hnd, struct ftrace_func_probe, node); | 1405 | rec = hlist_entry(hnd, struct ftrace_func_probe, node); |
1410 | 1406 | ||
1411 | if (rec->ops->print) | 1407 | if (rec->ops->print) |
1412 | return rec->ops->print(m, rec->ip, rec->ops, rec->data); | 1408 | return rec->ops->print(m, rec->ip, rec->ops, rec->data); |
1413 | 1409 | ||
1414 | seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func); | 1410 | seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func); |
1415 | 1411 | ||
1416 | if (rec->data) | 1412 | if (rec->data) |
1417 | seq_printf(m, ":%p", rec->data); | 1413 | seq_printf(m, ":%p", rec->data); |
1418 | seq_putc(m, '\n'); | 1414 | seq_putc(m, '\n'); |
1419 | 1415 | ||
1420 | return 0; | 1416 | return 0; |
1421 | } | 1417 | } |
1422 | 1418 | ||
1423 | static void * | 1419 | static void * |
1424 | t_next(struct seq_file *m, void *v, loff_t *pos) | 1420 | t_next(struct seq_file *m, void *v, loff_t *pos) |
1425 | { | 1421 | { |
1426 | struct ftrace_iterator *iter = m->private; | 1422 | struct ftrace_iterator *iter = m->private; |
1427 | struct dyn_ftrace *rec = NULL; | 1423 | struct dyn_ftrace *rec = NULL; |
1428 | 1424 | ||
1429 | if (iter->flags & FTRACE_ITER_HASH) | 1425 | if (iter->flags & FTRACE_ITER_HASH) |
1430 | return t_hash_next(m, v, pos); | 1426 | return t_hash_next(m, v, pos); |
1431 | 1427 | ||
1432 | (*pos)++; | 1428 | (*pos)++; |
1433 | 1429 | ||
1434 | if (iter->flags & FTRACE_ITER_PRINTALL) | 1430 | if (iter->flags & FTRACE_ITER_PRINTALL) |
1435 | return NULL; | 1431 | return NULL; |
1436 | 1432 | ||
1437 | retry: | 1433 | retry: |
1438 | if (iter->idx >= iter->pg->index) { | 1434 | if (iter->idx >= iter->pg->index) { |
1439 | if (iter->pg->next) { | 1435 | if (iter->pg->next) { |
1440 | iter->pg = iter->pg->next; | 1436 | iter->pg = iter->pg->next; |
1441 | iter->idx = 0; | 1437 | iter->idx = 0; |
1442 | goto retry; | 1438 | goto retry; |
1443 | } | 1439 | } |
1444 | } else { | 1440 | } else { |
1445 | rec = &iter->pg->records[iter->idx++]; | 1441 | rec = &iter->pg->records[iter->idx++]; |
1446 | if ((rec->flags & FTRACE_FL_FREE) || | 1442 | if ((rec->flags & FTRACE_FL_FREE) || |
1447 | 1443 | ||
1448 | (!(iter->flags & FTRACE_ITER_FAILURES) && | 1444 | (!(iter->flags & FTRACE_ITER_FAILURES) && |
1449 | (rec->flags & FTRACE_FL_FAILED)) || | 1445 | (rec->flags & FTRACE_FL_FAILED)) || |
1450 | 1446 | ||
1451 | ((iter->flags & FTRACE_ITER_FAILURES) && | 1447 | ((iter->flags & FTRACE_ITER_FAILURES) && |
1452 | !(rec->flags & FTRACE_FL_FAILED)) || | 1448 | !(rec->flags & FTRACE_FL_FAILED)) || |
1453 | 1449 | ||
1454 | ((iter->flags & FTRACE_ITER_FILTER) && | 1450 | ((iter->flags & FTRACE_ITER_FILTER) && |
1455 | !(rec->flags & FTRACE_FL_FILTER)) || | 1451 | !(rec->flags & FTRACE_FL_FILTER)) || |
1456 | 1452 | ||
1457 | ((iter->flags & FTRACE_ITER_NOTRACE) && | 1453 | ((iter->flags & FTRACE_ITER_NOTRACE) && |
1458 | !(rec->flags & FTRACE_FL_NOTRACE))) { | 1454 | !(rec->flags & FTRACE_FL_NOTRACE))) { |
1459 | rec = NULL; | 1455 | rec = NULL; |
1460 | goto retry; | 1456 | goto retry; |
1461 | } | 1457 | } |
1462 | } | 1458 | } |
1463 | 1459 | ||
1464 | return rec; | 1460 | return rec; |
1465 | } | 1461 | } |
1466 | 1462 | ||
1467 | static void *t_start(struct seq_file *m, loff_t *pos) | 1463 | static void *t_start(struct seq_file *m, loff_t *pos) |
1468 | { | 1464 | { |
1469 | struct ftrace_iterator *iter = m->private; | 1465 | struct ftrace_iterator *iter = m->private; |
1470 | void *p = NULL; | 1466 | void *p = NULL; |
1471 | loff_t l; | 1467 | loff_t l; |
1472 | 1468 | ||
1473 | mutex_lock(&ftrace_lock); | 1469 | mutex_lock(&ftrace_lock); |
1474 | /* | 1470 | /* |
1475 | * For set_ftrace_filter reading, if we have the filter | 1471 | * For set_ftrace_filter reading, if we have the filter |
1476 | * off, we can short cut and just print out that all | 1472 | * off, we can short cut and just print out that all |
1477 | * functions are enabled. | 1473 | * functions are enabled. |
1478 | */ | 1474 | */ |
1479 | if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) { | 1475 | if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) { |
1480 | if (*pos > 0) | 1476 | if (*pos > 0) |
1481 | return t_hash_start(m, pos); | 1477 | return t_hash_start(m, pos); |
1482 | iter->flags |= FTRACE_ITER_PRINTALL; | 1478 | iter->flags |= FTRACE_ITER_PRINTALL; |
1483 | return iter; | 1479 | return iter; |
1484 | } | 1480 | } |
1485 | 1481 | ||
1486 | if (iter->flags & FTRACE_ITER_HASH) | 1482 | if (iter->flags & FTRACE_ITER_HASH) |
1487 | return t_hash_start(m, pos); | 1483 | return t_hash_start(m, pos); |
1488 | 1484 | ||
1489 | iter->pg = ftrace_pages_start; | 1485 | iter->pg = ftrace_pages_start; |
1490 | iter->idx = 0; | 1486 | iter->idx = 0; |
1491 | for (l = 0; l <= *pos; ) { | 1487 | for (l = 0; l <= *pos; ) { |
1492 | p = t_next(m, p, &l); | 1488 | p = t_next(m, p, &l); |
1493 | if (!p) | 1489 | if (!p) |
1494 | break; | 1490 | break; |
1495 | } | 1491 | } |
1496 | 1492 | ||
1497 | if (!p && iter->flags & FTRACE_ITER_FILTER) | 1493 | if (!p && iter->flags & FTRACE_ITER_FILTER) |
1498 | return t_hash_start(m, pos); | 1494 | return t_hash_start(m, pos); |
1499 | 1495 | ||
1500 | return p; | 1496 | return p; |
1501 | } | 1497 | } |
1502 | 1498 | ||
1503 | static void t_stop(struct seq_file *m, void *p) | 1499 | static void t_stop(struct seq_file *m, void *p) |
1504 | { | 1500 | { |
1505 | mutex_unlock(&ftrace_lock); | 1501 | mutex_unlock(&ftrace_lock); |
1506 | } | 1502 | } |
1507 | 1503 | ||
1508 | static int t_show(struct seq_file *m, void *v) | 1504 | static int t_show(struct seq_file *m, void *v) |
1509 | { | 1505 | { |
1510 | struct ftrace_iterator *iter = m->private; | 1506 | struct ftrace_iterator *iter = m->private; |
1511 | struct dyn_ftrace *rec = v; | 1507 | struct dyn_ftrace *rec = v; |
1512 | 1508 | ||
1513 | if (iter->flags & FTRACE_ITER_HASH) | 1509 | if (iter->flags & FTRACE_ITER_HASH) |
1514 | return t_hash_show(m, v); | 1510 | return t_hash_show(m, v); |
1515 | 1511 | ||
1516 | if (iter->flags & FTRACE_ITER_PRINTALL) { | 1512 | if (iter->flags & FTRACE_ITER_PRINTALL) { |
1517 | seq_printf(m, "#### all functions enabled ####\n"); | 1513 | seq_printf(m, "#### all functions enabled ####\n"); |
1518 | return 0; | 1514 | return 0; |
1519 | } | 1515 | } |
1520 | 1516 | ||
1521 | if (!rec) | 1517 | if (!rec) |
1522 | return 0; | 1518 | return 0; |
1523 | 1519 | ||
1524 | seq_printf(m, "%ps\n", (void *)rec->ip); | 1520 | seq_printf(m, "%ps\n", (void *)rec->ip); |
1525 | 1521 | ||
1526 | return 0; | 1522 | return 0; |
1527 | } | 1523 | } |
1528 | 1524 | ||
1529 | static const struct seq_operations show_ftrace_seq_ops = { | 1525 | static const struct seq_operations show_ftrace_seq_ops = { |
1530 | .start = t_start, | 1526 | .start = t_start, |
1531 | .next = t_next, | 1527 | .next = t_next, |
1532 | .stop = t_stop, | 1528 | .stop = t_stop, |
1533 | .show = t_show, | 1529 | .show = t_show, |
1534 | }; | 1530 | }; |
1535 | 1531 | ||
1536 | static int | 1532 | static int |
1537 | ftrace_avail_open(struct inode *inode, struct file *file) | 1533 | ftrace_avail_open(struct inode *inode, struct file *file) |
1538 | { | 1534 | { |
1539 | struct ftrace_iterator *iter; | 1535 | struct ftrace_iterator *iter; |
1540 | int ret; | 1536 | int ret; |
1541 | 1537 | ||
1542 | if (unlikely(ftrace_disabled)) | 1538 | if (unlikely(ftrace_disabled)) |
1543 | return -ENODEV; | 1539 | return -ENODEV; |
1544 | 1540 | ||
1545 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 1541 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
1546 | if (!iter) | 1542 | if (!iter) |
1547 | return -ENOMEM; | 1543 | return -ENOMEM; |
1548 | 1544 | ||
1549 | iter->pg = ftrace_pages_start; | 1545 | iter->pg = ftrace_pages_start; |
1550 | 1546 | ||
1551 | ret = seq_open(file, &show_ftrace_seq_ops); | 1547 | ret = seq_open(file, &show_ftrace_seq_ops); |
1552 | if (!ret) { | 1548 | if (!ret) { |
1553 | struct seq_file *m = file->private_data; | 1549 | struct seq_file *m = file->private_data; |
1554 | 1550 | ||
1555 | m->private = iter; | 1551 | m->private = iter; |
1556 | } else { | 1552 | } else { |
1557 | kfree(iter); | 1553 | kfree(iter); |
1558 | } | 1554 | } |
1559 | 1555 | ||
1560 | return ret; | 1556 | return ret; |
1561 | } | 1557 | } |
1562 | 1558 | ||
1563 | static int | 1559 | static int |
1564 | ftrace_failures_open(struct inode *inode, struct file *file) | 1560 | ftrace_failures_open(struct inode *inode, struct file *file) |
1565 | { | 1561 | { |
1566 | int ret; | 1562 | int ret; |
1567 | struct seq_file *m; | 1563 | struct seq_file *m; |
1568 | struct ftrace_iterator *iter; | 1564 | struct ftrace_iterator *iter; |
1569 | 1565 | ||
1570 | ret = ftrace_avail_open(inode, file); | 1566 | ret = ftrace_avail_open(inode, file); |
1571 | if (!ret) { | 1567 | if (!ret) { |
1572 | m = (struct seq_file *)file->private_data; | 1568 | m = (struct seq_file *)file->private_data; |
1573 | iter = (struct ftrace_iterator *)m->private; | 1569 | iter = (struct ftrace_iterator *)m->private; |
1574 | iter->flags = FTRACE_ITER_FAILURES; | 1570 | iter->flags = FTRACE_ITER_FAILURES; |
1575 | } | 1571 | } |
1576 | 1572 | ||
1577 | return ret; | 1573 | return ret; |
1578 | } | 1574 | } |
1579 | 1575 | ||
1580 | 1576 | ||
1581 | static void ftrace_filter_reset(int enable) | 1577 | static void ftrace_filter_reset(int enable) |
1582 | { | 1578 | { |
1583 | struct ftrace_page *pg; | 1579 | struct ftrace_page *pg; |
1584 | struct dyn_ftrace *rec; | 1580 | struct dyn_ftrace *rec; |
1585 | unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; | 1581 | unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; |
1586 | 1582 | ||
1587 | mutex_lock(&ftrace_lock); | 1583 | mutex_lock(&ftrace_lock); |
1588 | if (enable) | 1584 | if (enable) |
1589 | ftrace_filtered = 0; | 1585 | ftrace_filtered = 0; |
1590 | do_for_each_ftrace_rec(pg, rec) { | 1586 | do_for_each_ftrace_rec(pg, rec) { |
1591 | if (rec->flags & FTRACE_FL_FAILED) | 1587 | if (rec->flags & FTRACE_FL_FAILED) |
1592 | continue; | 1588 | continue; |
1593 | rec->flags &= ~type; | 1589 | rec->flags &= ~type; |
1594 | } while_for_each_ftrace_rec(); | 1590 | } while_for_each_ftrace_rec(); |
1595 | mutex_unlock(&ftrace_lock); | 1591 | mutex_unlock(&ftrace_lock); |
1596 | } | 1592 | } |
1597 | 1593 | ||
1598 | static int | 1594 | static int |
1599 | ftrace_regex_open(struct inode *inode, struct file *file, int enable) | 1595 | ftrace_regex_open(struct inode *inode, struct file *file, int enable) |
1600 | { | 1596 | { |
1601 | struct ftrace_iterator *iter; | 1597 | struct ftrace_iterator *iter; |
1602 | int ret = 0; | 1598 | int ret = 0; |
1603 | 1599 | ||
1604 | if (unlikely(ftrace_disabled)) | 1600 | if (unlikely(ftrace_disabled)) |
1605 | return -ENODEV; | 1601 | return -ENODEV; |
1606 | 1602 | ||
1607 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 1603 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
1608 | if (!iter) | 1604 | if (!iter) |
1609 | return -ENOMEM; | 1605 | return -ENOMEM; |
1610 | 1606 | ||
1611 | if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) { | 1607 | if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) { |
1612 | kfree(iter); | 1608 | kfree(iter); |
1613 | return -ENOMEM; | 1609 | return -ENOMEM; |
1614 | } | 1610 | } |
1615 | 1611 | ||
1616 | mutex_lock(&ftrace_regex_lock); | 1612 | mutex_lock(&ftrace_regex_lock); |
1617 | if ((file->f_mode & FMODE_WRITE) && | 1613 | if ((file->f_mode & FMODE_WRITE) && |
1618 | (file->f_flags & O_TRUNC)) | 1614 | (file->f_flags & O_TRUNC)) |
1619 | ftrace_filter_reset(enable); | 1615 | ftrace_filter_reset(enable); |
1620 | 1616 | ||
1621 | if (file->f_mode & FMODE_READ) { | 1617 | if (file->f_mode & FMODE_READ) { |
1622 | iter->pg = ftrace_pages_start; | 1618 | iter->pg = ftrace_pages_start; |
1623 | iter->flags = enable ? FTRACE_ITER_FILTER : | 1619 | iter->flags = enable ? FTRACE_ITER_FILTER : |
1624 | FTRACE_ITER_NOTRACE; | 1620 | FTRACE_ITER_NOTRACE; |
1625 | 1621 | ||
1626 | ret = seq_open(file, &show_ftrace_seq_ops); | 1622 | ret = seq_open(file, &show_ftrace_seq_ops); |
1627 | if (!ret) { | 1623 | if (!ret) { |
1628 | struct seq_file *m = file->private_data; | 1624 | struct seq_file *m = file->private_data; |
1629 | m->private = iter; | 1625 | m->private = iter; |
1630 | } else { | 1626 | } else { |
1631 | trace_parser_put(&iter->parser); | 1627 | trace_parser_put(&iter->parser); |
1632 | kfree(iter); | 1628 | kfree(iter); |
1633 | } | 1629 | } |
1634 | } else | 1630 | } else |
1635 | file->private_data = iter; | 1631 | file->private_data = iter; |
1636 | mutex_unlock(&ftrace_regex_lock); | 1632 | mutex_unlock(&ftrace_regex_lock); |
1637 | 1633 | ||
1638 | return ret; | 1634 | return ret; |
1639 | } | 1635 | } |
1640 | 1636 | ||
1641 | static int | 1637 | static int |
1642 | ftrace_filter_open(struct inode *inode, struct file *file) | 1638 | ftrace_filter_open(struct inode *inode, struct file *file) |
1643 | { | 1639 | { |
1644 | return ftrace_regex_open(inode, file, 1); | 1640 | return ftrace_regex_open(inode, file, 1); |
1645 | } | 1641 | } |
1646 | 1642 | ||
1647 | static int | 1643 | static int |
1648 | ftrace_notrace_open(struct inode *inode, struct file *file) | 1644 | ftrace_notrace_open(struct inode *inode, struct file *file) |
1649 | { | 1645 | { |
1650 | return ftrace_regex_open(inode, file, 0); | 1646 | return ftrace_regex_open(inode, file, 0); |
1651 | } | 1647 | } |
1652 | 1648 | ||
1653 | static loff_t | 1649 | static loff_t |
1654 | ftrace_regex_lseek(struct file *file, loff_t offset, int origin) | 1650 | ftrace_regex_lseek(struct file *file, loff_t offset, int origin) |
1655 | { | 1651 | { |
1656 | loff_t ret; | 1652 | loff_t ret; |
1657 | 1653 | ||
1658 | if (file->f_mode & FMODE_READ) | 1654 | if (file->f_mode & FMODE_READ) |
1659 | ret = seq_lseek(file, offset, origin); | 1655 | ret = seq_lseek(file, offset, origin); |
1660 | else | 1656 | else |
1661 | file->f_pos = ret = 1; | 1657 | file->f_pos = ret = 1; |
1662 | 1658 | ||
1663 | return ret; | 1659 | return ret; |
1664 | } | 1660 | } |
1665 | 1661 | ||
1666 | static int ftrace_match(char *str, char *regex, int len, int type) | 1662 | static int ftrace_match(char *str, char *regex, int len, int type) |
1667 | { | 1663 | { |
1668 | int matched = 0; | 1664 | int matched = 0; |
1669 | int slen; | 1665 | int slen; |
1670 | 1666 | ||
1671 | switch (type) { | 1667 | switch (type) { |
1672 | case MATCH_FULL: | 1668 | case MATCH_FULL: |
1673 | if (strcmp(str, regex) == 0) | 1669 | if (strcmp(str, regex) == 0) |
1674 | matched = 1; | 1670 | matched = 1; |
1675 | break; | 1671 | break; |
1676 | case MATCH_FRONT_ONLY: | 1672 | case MATCH_FRONT_ONLY: |
1677 | if (strncmp(str, regex, len) == 0) | 1673 | if (strncmp(str, regex, len) == 0) |
1678 | matched = 1; | 1674 | matched = 1; |
1679 | break; | 1675 | break; |
1680 | case MATCH_MIDDLE_ONLY: | 1676 | case MATCH_MIDDLE_ONLY: |
1681 | if (strstr(str, regex)) | 1677 | if (strstr(str, regex)) |
1682 | matched = 1; | 1678 | matched = 1; |
1683 | break; | 1679 | break; |
1684 | case MATCH_END_ONLY: | 1680 | case MATCH_END_ONLY: |
1685 | slen = strlen(str); | 1681 | slen = strlen(str); |
1686 | if (slen >= len && memcmp(str + slen - len, regex, len) == 0) | 1682 | if (slen >= len && memcmp(str + slen - len, regex, len) == 0) |
1687 | matched = 1; | 1683 | matched = 1; |
1688 | break; | 1684 | break; |
1689 | } | 1685 | } |
1690 | 1686 | ||
1691 | return matched; | 1687 | return matched; |
1692 | } | 1688 | } |
1693 | 1689 | ||
1694 | static int | 1690 | static int |
1695 | ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type) | 1691 | ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type) |
1696 | { | 1692 | { |
1697 | char str[KSYM_SYMBOL_LEN]; | 1693 | char str[KSYM_SYMBOL_LEN]; |
1698 | 1694 | ||
1699 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | 1695 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); |
1700 | return ftrace_match(str, regex, len, type); | 1696 | return ftrace_match(str, regex, len, type); |
1701 | } | 1697 | } |
1702 | 1698 | ||
1703 | static int ftrace_match_records(char *buff, int len, int enable) | 1699 | static int ftrace_match_records(char *buff, int len, int enable) |
1704 | { | 1700 | { |
1705 | unsigned int search_len; | 1701 | unsigned int search_len; |
1706 | struct ftrace_page *pg; | 1702 | struct ftrace_page *pg; |
1707 | struct dyn_ftrace *rec; | 1703 | struct dyn_ftrace *rec; |
1708 | unsigned long flag; | 1704 | unsigned long flag; |
1709 | char *search; | 1705 | char *search; |
1710 | int type; | 1706 | int type; |
1711 | int not; | 1707 | int not; |
1712 | int found = 0; | 1708 | int found = 0; |
1713 | 1709 | ||
1714 | flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; | 1710 | flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; |
1715 | type = filter_parse_regex(buff, len, &search, ¬); | 1711 | type = filter_parse_regex(buff, len, &search, ¬); |
1716 | 1712 | ||
1717 | search_len = strlen(search); | 1713 | search_len = strlen(search); |
1718 | 1714 | ||
1719 | mutex_lock(&ftrace_lock); | 1715 | mutex_lock(&ftrace_lock); |
1720 | do_for_each_ftrace_rec(pg, rec) { | 1716 | do_for_each_ftrace_rec(pg, rec) { |
1721 | 1717 | ||
1722 | if (rec->flags & FTRACE_FL_FAILED) | 1718 | if (rec->flags & FTRACE_FL_FAILED) |
1723 | continue; | 1719 | continue; |
1724 | 1720 | ||
1725 | if (ftrace_match_record(rec, search, search_len, type)) { | 1721 | if (ftrace_match_record(rec, search, search_len, type)) { |
1726 | if (not) | 1722 | if (not) |
1727 | rec->flags &= ~flag; | 1723 | rec->flags &= ~flag; |
1728 | else | 1724 | else |
1729 | rec->flags |= flag; | 1725 | rec->flags |= flag; |
1730 | found = 1; | 1726 | found = 1; |
1731 | } | 1727 | } |
1732 | /* | 1728 | /* |
1733 | * Only enable filtering if we have a function that | 1729 | * Only enable filtering if we have a function that |
1734 | * is filtered on. | 1730 | * is filtered on. |
1735 | */ | 1731 | */ |
1736 | if (enable && (rec->flags & FTRACE_FL_FILTER)) | 1732 | if (enable && (rec->flags & FTRACE_FL_FILTER)) |
1737 | ftrace_filtered = 1; | 1733 | ftrace_filtered = 1; |
1738 | } while_for_each_ftrace_rec(); | 1734 | } while_for_each_ftrace_rec(); |
1739 | mutex_unlock(&ftrace_lock); | 1735 | mutex_unlock(&ftrace_lock); |
1740 | 1736 | ||
1741 | return found; | 1737 | return found; |
1742 | } | 1738 | } |
1743 | 1739 | ||
1744 | static int | 1740 | static int |
1745 | ftrace_match_module_record(struct dyn_ftrace *rec, char *mod, | 1741 | ftrace_match_module_record(struct dyn_ftrace *rec, char *mod, |
1746 | char *regex, int len, int type) | 1742 | char *regex, int len, int type) |
1747 | { | 1743 | { |
1748 | char str[KSYM_SYMBOL_LEN]; | 1744 | char str[KSYM_SYMBOL_LEN]; |
1749 | char *modname; | 1745 | char *modname; |
1750 | 1746 | ||
1751 | kallsyms_lookup(rec->ip, NULL, NULL, &modname, str); | 1747 | kallsyms_lookup(rec->ip, NULL, NULL, &modname, str); |
1752 | 1748 | ||
1753 | if (!modname || strcmp(modname, mod)) | 1749 | if (!modname || strcmp(modname, mod)) |
1754 | return 0; | 1750 | return 0; |
1755 | 1751 | ||
1756 | /* blank search means to match all funcs in the mod */ | 1752 | /* blank search means to match all funcs in the mod */ |
1757 | if (len) | 1753 | if (len) |
1758 | return ftrace_match(str, regex, len, type); | 1754 | return ftrace_match(str, regex, len, type); |
1759 | else | 1755 | else |
1760 | return 1; | 1756 | return 1; |
1761 | } | 1757 | } |
1762 | 1758 | ||
1763 | static int ftrace_match_module_records(char *buff, char *mod, int enable) | 1759 | static int ftrace_match_module_records(char *buff, char *mod, int enable) |
1764 | { | 1760 | { |
1765 | unsigned search_len = 0; | 1761 | unsigned search_len = 0; |
1766 | struct ftrace_page *pg; | 1762 | struct ftrace_page *pg; |
1767 | struct dyn_ftrace *rec; | 1763 | struct dyn_ftrace *rec; |
1768 | int type = MATCH_FULL; | 1764 | int type = MATCH_FULL; |
1769 | char *search = buff; | 1765 | char *search = buff; |
1770 | unsigned long flag; | 1766 | unsigned long flag; |
1771 | int not = 0; | 1767 | int not = 0; |
1772 | int found = 0; | 1768 | int found = 0; |
1773 | 1769 | ||
1774 | flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; | 1770 | flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; |
1775 | 1771 | ||
1776 | /* blank or '*' mean the same */ | 1772 | /* blank or '*' mean the same */ |
1777 | if (strcmp(buff, "*") == 0) | 1773 | if (strcmp(buff, "*") == 0) |
1778 | buff[0] = 0; | 1774 | buff[0] = 0; |
1779 | 1775 | ||
1780 | /* handle the case of 'dont filter this module' */ | 1776 | /* handle the case of 'dont filter this module' */ |
1781 | if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) { | 1777 | if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) { |
1782 | buff[0] = 0; | 1778 | buff[0] = 0; |
1783 | not = 1; | 1779 | not = 1; |
1784 | } | 1780 | } |
1785 | 1781 | ||
1786 | if (strlen(buff)) { | 1782 | if (strlen(buff)) { |
1787 | type = filter_parse_regex(buff, strlen(buff), &search, ¬); | 1783 | type = filter_parse_regex(buff, strlen(buff), &search, ¬); |
1788 | search_len = strlen(search); | 1784 | search_len = strlen(search); |
1789 | } | 1785 | } |
1790 | 1786 | ||
1791 | mutex_lock(&ftrace_lock); | 1787 | mutex_lock(&ftrace_lock); |
1792 | do_for_each_ftrace_rec(pg, rec) { | 1788 | do_for_each_ftrace_rec(pg, rec) { |
1793 | 1789 | ||
1794 | if (rec->flags & FTRACE_FL_FAILED) | 1790 | if (rec->flags & FTRACE_FL_FAILED) |
1795 | continue; | 1791 | continue; |
1796 | 1792 | ||
1797 | if (ftrace_match_module_record(rec, mod, | 1793 | if (ftrace_match_module_record(rec, mod, |
1798 | search, search_len, type)) { | 1794 | search, search_len, type)) { |
1799 | if (not) | 1795 | if (not) |
1800 | rec->flags &= ~flag; | 1796 | rec->flags &= ~flag; |
1801 | else | 1797 | else |
1802 | rec->flags |= flag; | 1798 | rec->flags |= flag; |
1803 | found = 1; | 1799 | found = 1; |
1804 | } | 1800 | } |
1805 | if (enable && (rec->flags & FTRACE_FL_FILTER)) | 1801 | if (enable && (rec->flags & FTRACE_FL_FILTER)) |
1806 | ftrace_filtered = 1; | 1802 | ftrace_filtered = 1; |
1807 | 1803 | ||
1808 | } while_for_each_ftrace_rec(); | 1804 | } while_for_each_ftrace_rec(); |
1809 | mutex_unlock(&ftrace_lock); | 1805 | mutex_unlock(&ftrace_lock); |
1810 | 1806 | ||
1811 | return found; | 1807 | return found; |
1812 | } | 1808 | } |
1813 | 1809 | ||
1814 | /* | 1810 | /* |
1815 | * We register the module command as a template to show others how | 1811 | * We register the module command as a template to show others how |
1816 | * to register the a command as well. | 1812 | * to register the a command as well. |
1817 | */ | 1813 | */ |
1818 | 1814 | ||
1819 | static int | 1815 | static int |
1820 | ftrace_mod_callback(char *func, char *cmd, char *param, int enable) | 1816 | ftrace_mod_callback(char *func, char *cmd, char *param, int enable) |
1821 | { | 1817 | { |
1822 | char *mod; | 1818 | char *mod; |
1823 | 1819 | ||
1824 | /* | 1820 | /* |
1825 | * cmd == 'mod' because we only registered this func | 1821 | * cmd == 'mod' because we only registered this func |
1826 | * for the 'mod' ftrace_func_command. | 1822 | * for the 'mod' ftrace_func_command. |
1827 | * But if you register one func with multiple commands, | 1823 | * But if you register one func with multiple commands, |
1828 | * you can tell which command was used by the cmd | 1824 | * you can tell which command was used by the cmd |
1829 | * parameter. | 1825 | * parameter. |
1830 | */ | 1826 | */ |
1831 | 1827 | ||
1832 | /* we must have a module name */ | 1828 | /* we must have a module name */ |
1833 | if (!param) | 1829 | if (!param) |
1834 | return -EINVAL; | 1830 | return -EINVAL; |
1835 | 1831 | ||
1836 | mod = strsep(¶m, ":"); | 1832 | mod = strsep(¶m, ":"); |
1837 | if (!strlen(mod)) | 1833 | if (!strlen(mod)) |
1838 | return -EINVAL; | 1834 | return -EINVAL; |
1839 | 1835 | ||
1840 | if (ftrace_match_module_records(func, mod, enable)) | 1836 | if (ftrace_match_module_records(func, mod, enable)) |
1841 | return 0; | 1837 | return 0; |
1842 | return -EINVAL; | 1838 | return -EINVAL; |
1843 | } | 1839 | } |
1844 | 1840 | ||
1845 | static struct ftrace_func_command ftrace_mod_cmd = { | 1841 | static struct ftrace_func_command ftrace_mod_cmd = { |
1846 | .name = "mod", | 1842 | .name = "mod", |
1847 | .func = ftrace_mod_callback, | 1843 | .func = ftrace_mod_callback, |
1848 | }; | 1844 | }; |
1849 | 1845 | ||
1850 | static int __init ftrace_mod_cmd_init(void) | 1846 | static int __init ftrace_mod_cmd_init(void) |
1851 | { | 1847 | { |
1852 | return register_ftrace_command(&ftrace_mod_cmd); | 1848 | return register_ftrace_command(&ftrace_mod_cmd); |
1853 | } | 1849 | } |
1854 | device_initcall(ftrace_mod_cmd_init); | 1850 | device_initcall(ftrace_mod_cmd_init); |
1855 | 1851 | ||
1856 | static void | 1852 | static void |
1857 | function_trace_probe_call(unsigned long ip, unsigned long parent_ip) | 1853 | function_trace_probe_call(unsigned long ip, unsigned long parent_ip) |
1858 | { | 1854 | { |
1859 | struct ftrace_func_probe *entry; | 1855 | struct ftrace_func_probe *entry; |
1860 | struct hlist_head *hhd; | 1856 | struct hlist_head *hhd; |
1861 | struct hlist_node *n; | 1857 | struct hlist_node *n; |
1862 | unsigned long key; | 1858 | unsigned long key; |
1863 | int resched; | 1859 | int resched; |
1864 | 1860 | ||
1865 | key = hash_long(ip, FTRACE_HASH_BITS); | 1861 | key = hash_long(ip, FTRACE_HASH_BITS); |
1866 | 1862 | ||
1867 | hhd = &ftrace_func_hash[key]; | 1863 | hhd = &ftrace_func_hash[key]; |
1868 | 1864 | ||
1869 | if (hlist_empty(hhd)) | 1865 | if (hlist_empty(hhd)) |
1870 | return; | 1866 | return; |
1871 | 1867 | ||
1872 | /* | 1868 | /* |
1873 | * Disable preemption for these calls to prevent a RCU grace | 1869 | * Disable preemption for these calls to prevent a RCU grace |
1874 | * period. This syncs the hash iteration and freeing of items | 1870 | * period. This syncs the hash iteration and freeing of items |
1875 | * on the hash. rcu_read_lock is too dangerous here. | 1871 | * on the hash. rcu_read_lock is too dangerous here. |
1876 | */ | 1872 | */ |
1877 | resched = ftrace_preempt_disable(); | 1873 | resched = ftrace_preempt_disable(); |
1878 | hlist_for_each_entry_rcu(entry, n, hhd, node) { | 1874 | hlist_for_each_entry_rcu(entry, n, hhd, node) { |
1879 | if (entry->ip == ip) | 1875 | if (entry->ip == ip) |
1880 | entry->ops->func(ip, parent_ip, &entry->data); | 1876 | entry->ops->func(ip, parent_ip, &entry->data); |
1881 | } | 1877 | } |
1882 | ftrace_preempt_enable(resched); | 1878 | ftrace_preempt_enable(resched); |
1883 | } | 1879 | } |
1884 | 1880 | ||
1885 | static struct ftrace_ops trace_probe_ops __read_mostly = | 1881 | static struct ftrace_ops trace_probe_ops __read_mostly = |
1886 | { | 1882 | { |
1887 | .func = function_trace_probe_call, | 1883 | .func = function_trace_probe_call, |
1888 | }; | 1884 | }; |
1889 | 1885 | ||
1890 | static int ftrace_probe_registered; | 1886 | static int ftrace_probe_registered; |
1891 | 1887 | ||
1892 | static void __enable_ftrace_function_probe(void) | 1888 | static void __enable_ftrace_function_probe(void) |
1893 | { | 1889 | { |
1894 | int i; | 1890 | int i; |
1895 | 1891 | ||
1896 | if (ftrace_probe_registered) | 1892 | if (ftrace_probe_registered) |
1897 | return; | 1893 | return; |
1898 | 1894 | ||
1899 | for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { | 1895 | for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { |
1900 | struct hlist_head *hhd = &ftrace_func_hash[i]; | 1896 | struct hlist_head *hhd = &ftrace_func_hash[i]; |
1901 | if (hhd->first) | 1897 | if (hhd->first) |
1902 | break; | 1898 | break; |
1903 | } | 1899 | } |
1904 | /* Nothing registered? */ | 1900 | /* Nothing registered? */ |
1905 | if (i == FTRACE_FUNC_HASHSIZE) | 1901 | if (i == FTRACE_FUNC_HASHSIZE) |
1906 | return; | 1902 | return; |
1907 | 1903 | ||
1908 | __register_ftrace_function(&trace_probe_ops); | 1904 | __register_ftrace_function(&trace_probe_ops); |
1909 | ftrace_startup(0); | 1905 | ftrace_startup(0); |
1910 | ftrace_probe_registered = 1; | 1906 | ftrace_probe_registered = 1; |
1911 | } | 1907 | } |
1912 | 1908 | ||
1913 | static void __disable_ftrace_function_probe(void) | 1909 | static void __disable_ftrace_function_probe(void) |
1914 | { | 1910 | { |
1915 | int i; | 1911 | int i; |
1916 | 1912 | ||
1917 | if (!ftrace_probe_registered) | 1913 | if (!ftrace_probe_registered) |
1918 | return; | 1914 | return; |
1919 | 1915 | ||
1920 | for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { | 1916 | for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { |
1921 | struct hlist_head *hhd = &ftrace_func_hash[i]; | 1917 | struct hlist_head *hhd = &ftrace_func_hash[i]; |
1922 | if (hhd->first) | 1918 | if (hhd->first) |
1923 | return; | 1919 | return; |
1924 | } | 1920 | } |
1925 | 1921 | ||
1926 | /* no more funcs left */ | 1922 | /* no more funcs left */ |
1927 | __unregister_ftrace_function(&trace_probe_ops); | 1923 | __unregister_ftrace_function(&trace_probe_ops); |
1928 | ftrace_shutdown(0); | 1924 | ftrace_shutdown(0); |
1929 | ftrace_probe_registered = 0; | 1925 | ftrace_probe_registered = 0; |
1930 | } | 1926 | } |
1931 | 1927 | ||
1932 | 1928 | ||
1933 | static void ftrace_free_entry_rcu(struct rcu_head *rhp) | 1929 | static void ftrace_free_entry_rcu(struct rcu_head *rhp) |
1934 | { | 1930 | { |
1935 | struct ftrace_func_probe *entry = | 1931 | struct ftrace_func_probe *entry = |
1936 | container_of(rhp, struct ftrace_func_probe, rcu); | 1932 | container_of(rhp, struct ftrace_func_probe, rcu); |
1937 | 1933 | ||
1938 | if (entry->ops->free) | 1934 | if (entry->ops->free) |
1939 | entry->ops->free(&entry->data); | 1935 | entry->ops->free(&entry->data); |
1940 | kfree(entry); | 1936 | kfree(entry); |
1941 | } | 1937 | } |
1942 | 1938 | ||
1943 | 1939 | ||
1944 | int | 1940 | int |
1945 | register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | 1941 | register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, |
1946 | void *data) | 1942 | void *data) |
1947 | { | 1943 | { |
1948 | struct ftrace_func_probe *entry; | 1944 | struct ftrace_func_probe *entry; |
1949 | struct ftrace_page *pg; | 1945 | struct ftrace_page *pg; |
1950 | struct dyn_ftrace *rec; | 1946 | struct dyn_ftrace *rec; |
1951 | int type, len, not; | 1947 | int type, len, not; |
1952 | unsigned long key; | 1948 | unsigned long key; |
1953 | int count = 0; | 1949 | int count = 0; |
1954 | char *search; | 1950 | char *search; |
1955 | 1951 | ||
1956 | type = filter_parse_regex(glob, strlen(glob), &search, ¬); | 1952 | type = filter_parse_regex(glob, strlen(glob), &search, ¬); |
1957 | len = strlen(search); | 1953 | len = strlen(search); |
1958 | 1954 | ||
1959 | /* we do not support '!' for function probes */ | 1955 | /* we do not support '!' for function probes */ |
1960 | if (WARN_ON(not)) | 1956 | if (WARN_ON(not)) |
1961 | return -EINVAL; | 1957 | return -EINVAL; |
1962 | 1958 | ||
1963 | mutex_lock(&ftrace_lock); | 1959 | mutex_lock(&ftrace_lock); |
1964 | do_for_each_ftrace_rec(pg, rec) { | 1960 | do_for_each_ftrace_rec(pg, rec) { |
1965 | 1961 | ||
1966 | if (rec->flags & FTRACE_FL_FAILED) | 1962 | if (rec->flags & FTRACE_FL_FAILED) |
1967 | continue; | 1963 | continue; |
1968 | 1964 | ||
1969 | if (!ftrace_match_record(rec, search, len, type)) | 1965 | if (!ftrace_match_record(rec, search, len, type)) |
1970 | continue; | 1966 | continue; |
1971 | 1967 | ||
1972 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); | 1968 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); |
1973 | if (!entry) { | 1969 | if (!entry) { |
1974 | /* If we did not process any, then return error */ | 1970 | /* If we did not process any, then return error */ |
1975 | if (!count) | 1971 | if (!count) |
1976 | count = -ENOMEM; | 1972 | count = -ENOMEM; |
1977 | goto out_unlock; | 1973 | goto out_unlock; |
1978 | } | 1974 | } |
1979 | 1975 | ||
1980 | count++; | 1976 | count++; |
1981 | 1977 | ||
1982 | entry->data = data; | 1978 | entry->data = data; |
1983 | 1979 | ||
1984 | /* | 1980 | /* |
1985 | * The caller might want to do something special | 1981 | * The caller might want to do something special |
1986 | * for each function we find. We call the callback | 1982 | * for each function we find. We call the callback |
1987 | * to give the caller an opportunity to do so. | 1983 | * to give the caller an opportunity to do so. |
1988 | */ | 1984 | */ |
1989 | if (ops->callback) { | 1985 | if (ops->callback) { |
1990 | if (ops->callback(rec->ip, &entry->data) < 0) { | 1986 | if (ops->callback(rec->ip, &entry->data) < 0) { |
1991 | /* caller does not like this func */ | 1987 | /* caller does not like this func */ |
1992 | kfree(entry); | 1988 | kfree(entry); |
1993 | continue; | 1989 | continue; |
1994 | } | 1990 | } |
1995 | } | 1991 | } |
1996 | 1992 | ||
1997 | entry->ops = ops; | 1993 | entry->ops = ops; |
1998 | entry->ip = rec->ip; | 1994 | entry->ip = rec->ip; |
1999 | 1995 | ||
2000 | key = hash_long(entry->ip, FTRACE_HASH_BITS); | 1996 | key = hash_long(entry->ip, FTRACE_HASH_BITS); |
2001 | hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]); | 1997 | hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]); |
2002 | 1998 | ||
2003 | } while_for_each_ftrace_rec(); | 1999 | } while_for_each_ftrace_rec(); |
2004 | __enable_ftrace_function_probe(); | 2000 | __enable_ftrace_function_probe(); |
2005 | 2001 | ||
2006 | out_unlock: | 2002 | out_unlock: |
2007 | mutex_unlock(&ftrace_lock); | 2003 | mutex_unlock(&ftrace_lock); |
2008 | 2004 | ||
2009 | return count; | 2005 | return count; |
2010 | } | 2006 | } |
2011 | 2007 | ||
2012 | enum { | 2008 | enum { |
2013 | PROBE_TEST_FUNC = 1, | 2009 | PROBE_TEST_FUNC = 1, |
2014 | PROBE_TEST_DATA = 2 | 2010 | PROBE_TEST_DATA = 2 |
2015 | }; | 2011 | }; |
2016 | 2012 | ||
2017 | static void | 2013 | static void |
2018 | __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | 2014 | __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, |
2019 | void *data, int flags) | 2015 | void *data, int flags) |
2020 | { | 2016 | { |
2021 | struct ftrace_func_probe *entry; | 2017 | struct ftrace_func_probe *entry; |
2022 | struct hlist_node *n, *tmp; | 2018 | struct hlist_node *n, *tmp; |
2023 | char str[KSYM_SYMBOL_LEN]; | 2019 | char str[KSYM_SYMBOL_LEN]; |
2024 | int type = MATCH_FULL; | 2020 | int type = MATCH_FULL; |
2025 | int i, len = 0; | 2021 | int i, len = 0; |
2026 | char *search; | 2022 | char *search; |
2027 | 2023 | ||
2028 | if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) | 2024 | if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) |
2029 | glob = NULL; | 2025 | glob = NULL; |
2030 | else if (glob) { | 2026 | else if (glob) { |
2031 | int not; | 2027 | int not; |
2032 | 2028 | ||
2033 | type = filter_parse_regex(glob, strlen(glob), &search, ¬); | 2029 | type = filter_parse_regex(glob, strlen(glob), &search, ¬); |
2034 | len = strlen(search); | 2030 | len = strlen(search); |
2035 | 2031 | ||
2036 | /* we do not support '!' for function probes */ | 2032 | /* we do not support '!' for function probes */ |
2037 | if (WARN_ON(not)) | 2033 | if (WARN_ON(not)) |
2038 | return; | 2034 | return; |
2039 | } | 2035 | } |
2040 | 2036 | ||
2041 | mutex_lock(&ftrace_lock); | 2037 | mutex_lock(&ftrace_lock); |
2042 | for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { | 2038 | for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { |
2043 | struct hlist_head *hhd = &ftrace_func_hash[i]; | 2039 | struct hlist_head *hhd = &ftrace_func_hash[i]; |
2044 | 2040 | ||
2045 | hlist_for_each_entry_safe(entry, n, tmp, hhd, node) { | 2041 | hlist_for_each_entry_safe(entry, n, tmp, hhd, node) { |
2046 | 2042 | ||
2047 | /* break up if statements for readability */ | 2043 | /* break up if statements for readability */ |
2048 | if ((flags & PROBE_TEST_FUNC) && entry->ops != ops) | 2044 | if ((flags & PROBE_TEST_FUNC) && entry->ops != ops) |
2049 | continue; | 2045 | continue; |
2050 | 2046 | ||
2051 | if ((flags & PROBE_TEST_DATA) && entry->data != data) | 2047 | if ((flags & PROBE_TEST_DATA) && entry->data != data) |
2052 | continue; | 2048 | continue; |
2053 | 2049 | ||
2054 | /* do this last, since it is the most expensive */ | 2050 | /* do this last, since it is the most expensive */ |
2055 | if (glob) { | 2051 | if (glob) { |
2056 | kallsyms_lookup(entry->ip, NULL, NULL, | 2052 | kallsyms_lookup(entry->ip, NULL, NULL, |
2057 | NULL, str); | 2053 | NULL, str); |
2058 | if (!ftrace_match(str, glob, len, type)) | 2054 | if (!ftrace_match(str, glob, len, type)) |
2059 | continue; | 2055 | continue; |
2060 | } | 2056 | } |
2061 | 2057 | ||
2062 | hlist_del(&entry->node); | 2058 | hlist_del(&entry->node); |
2063 | call_rcu(&entry->rcu, ftrace_free_entry_rcu); | 2059 | call_rcu(&entry->rcu, ftrace_free_entry_rcu); |
2064 | } | 2060 | } |
2065 | } | 2061 | } |
2066 | __disable_ftrace_function_probe(); | 2062 | __disable_ftrace_function_probe(); |
2067 | mutex_unlock(&ftrace_lock); | 2063 | mutex_unlock(&ftrace_lock); |
2068 | } | 2064 | } |
2069 | 2065 | ||
2070 | void | 2066 | void |
2071 | unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | 2067 | unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, |
2072 | void *data) | 2068 | void *data) |
2073 | { | 2069 | { |
2074 | __unregister_ftrace_function_probe(glob, ops, data, | 2070 | __unregister_ftrace_function_probe(glob, ops, data, |
2075 | PROBE_TEST_FUNC | PROBE_TEST_DATA); | 2071 | PROBE_TEST_FUNC | PROBE_TEST_DATA); |
2076 | } | 2072 | } |
2077 | 2073 | ||
2078 | void | 2074 | void |
2079 | unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops) | 2075 | unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops) |
2080 | { | 2076 | { |
2081 | __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC); | 2077 | __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC); |
2082 | } | 2078 | } |
2083 | 2079 | ||
2084 | void unregister_ftrace_function_probe_all(char *glob) | 2080 | void unregister_ftrace_function_probe_all(char *glob) |
2085 | { | 2081 | { |
2086 | __unregister_ftrace_function_probe(glob, NULL, NULL, 0); | 2082 | __unregister_ftrace_function_probe(glob, NULL, NULL, 0); |
2087 | } | 2083 | } |
2088 | 2084 | ||
2089 | static LIST_HEAD(ftrace_commands); | 2085 | static LIST_HEAD(ftrace_commands); |
2090 | static DEFINE_MUTEX(ftrace_cmd_mutex); | 2086 | static DEFINE_MUTEX(ftrace_cmd_mutex); |
2091 | 2087 | ||
2092 | int register_ftrace_command(struct ftrace_func_command *cmd) | 2088 | int register_ftrace_command(struct ftrace_func_command *cmd) |
2093 | { | 2089 | { |
2094 | struct ftrace_func_command *p; | 2090 | struct ftrace_func_command *p; |
2095 | int ret = 0; | 2091 | int ret = 0; |
2096 | 2092 | ||
2097 | mutex_lock(&ftrace_cmd_mutex); | 2093 | mutex_lock(&ftrace_cmd_mutex); |
2098 | list_for_each_entry(p, &ftrace_commands, list) { | 2094 | list_for_each_entry(p, &ftrace_commands, list) { |
2099 | if (strcmp(cmd->name, p->name) == 0) { | 2095 | if (strcmp(cmd->name, p->name) == 0) { |
2100 | ret = -EBUSY; | 2096 | ret = -EBUSY; |
2101 | goto out_unlock; | 2097 | goto out_unlock; |
2102 | } | 2098 | } |
2103 | } | 2099 | } |
2104 | list_add(&cmd->list, &ftrace_commands); | 2100 | list_add(&cmd->list, &ftrace_commands); |
2105 | out_unlock: | 2101 | out_unlock: |
2106 | mutex_unlock(&ftrace_cmd_mutex); | 2102 | mutex_unlock(&ftrace_cmd_mutex); |
2107 | 2103 | ||
2108 | return ret; | 2104 | return ret; |
2109 | } | 2105 | } |
2110 | 2106 | ||
2111 | int unregister_ftrace_command(struct ftrace_func_command *cmd) | 2107 | int unregister_ftrace_command(struct ftrace_func_command *cmd) |
2112 | { | 2108 | { |
2113 | struct ftrace_func_command *p, *n; | 2109 | struct ftrace_func_command *p, *n; |
2114 | int ret = -ENODEV; | 2110 | int ret = -ENODEV; |
2115 | 2111 | ||
2116 | mutex_lock(&ftrace_cmd_mutex); | 2112 | mutex_lock(&ftrace_cmd_mutex); |
2117 | list_for_each_entry_safe(p, n, &ftrace_commands, list) { | 2113 | list_for_each_entry_safe(p, n, &ftrace_commands, list) { |
2118 | if (strcmp(cmd->name, p->name) == 0) { | 2114 | if (strcmp(cmd->name, p->name) == 0) { |
2119 | ret = 0; | 2115 | ret = 0; |
2120 | list_del_init(&p->list); | 2116 | list_del_init(&p->list); |
2121 | goto out_unlock; | 2117 | goto out_unlock; |
2122 | } | 2118 | } |
2123 | } | 2119 | } |
2124 | out_unlock: | 2120 | out_unlock: |
2125 | mutex_unlock(&ftrace_cmd_mutex); | 2121 | mutex_unlock(&ftrace_cmd_mutex); |
2126 | 2122 | ||
2127 | return ret; | 2123 | return ret; |
2128 | } | 2124 | } |
2129 | 2125 | ||
2130 | static int ftrace_process_regex(char *buff, int len, int enable) | 2126 | static int ftrace_process_regex(char *buff, int len, int enable) |
2131 | { | 2127 | { |
2132 | char *func, *command, *next = buff; | 2128 | char *func, *command, *next = buff; |
2133 | struct ftrace_func_command *p; | 2129 | struct ftrace_func_command *p; |
2134 | int ret = -EINVAL; | 2130 | int ret = -EINVAL; |
2135 | 2131 | ||
2136 | func = strsep(&next, ":"); | 2132 | func = strsep(&next, ":"); |
2137 | 2133 | ||
2138 | if (!next) { | 2134 | if (!next) { |
2139 | if (ftrace_match_records(func, len, enable)) | 2135 | if (ftrace_match_records(func, len, enable)) |
2140 | return 0; | 2136 | return 0; |
2141 | return ret; | 2137 | return ret; |
2142 | } | 2138 | } |
2143 | 2139 | ||
2144 | /* command found */ | 2140 | /* command found */ |
2145 | 2141 | ||
2146 | command = strsep(&next, ":"); | 2142 | command = strsep(&next, ":"); |
2147 | 2143 | ||
2148 | mutex_lock(&ftrace_cmd_mutex); | 2144 | mutex_lock(&ftrace_cmd_mutex); |
2149 | list_for_each_entry(p, &ftrace_commands, list) { | 2145 | list_for_each_entry(p, &ftrace_commands, list) { |
2150 | if (strcmp(p->name, command) == 0) { | 2146 | if (strcmp(p->name, command) == 0) { |
2151 | ret = p->func(func, command, next, enable); | 2147 | ret = p->func(func, command, next, enable); |
2152 | goto out_unlock; | 2148 | goto out_unlock; |
2153 | } | 2149 | } |
2154 | } | 2150 | } |
2155 | out_unlock: | 2151 | out_unlock: |
2156 | mutex_unlock(&ftrace_cmd_mutex); | 2152 | mutex_unlock(&ftrace_cmd_mutex); |
2157 | 2153 | ||
2158 | return ret; | 2154 | return ret; |
2159 | } | 2155 | } |
2160 | 2156 | ||
2161 | static ssize_t | 2157 | static ssize_t |
2162 | ftrace_regex_write(struct file *file, const char __user *ubuf, | 2158 | ftrace_regex_write(struct file *file, const char __user *ubuf, |
2163 | size_t cnt, loff_t *ppos, int enable) | 2159 | size_t cnt, loff_t *ppos, int enable) |
2164 | { | 2160 | { |
2165 | struct ftrace_iterator *iter; | 2161 | struct ftrace_iterator *iter; |
2166 | struct trace_parser *parser; | 2162 | struct trace_parser *parser; |
2167 | ssize_t ret, read; | 2163 | ssize_t ret, read; |
2168 | 2164 | ||
2169 | if (!cnt) | 2165 | if (!cnt) |
2170 | return 0; | 2166 | return 0; |
2171 | 2167 | ||
2172 | mutex_lock(&ftrace_regex_lock); | 2168 | mutex_lock(&ftrace_regex_lock); |
2173 | 2169 | ||
2174 | if (file->f_mode & FMODE_READ) { | 2170 | if (file->f_mode & FMODE_READ) { |
2175 | struct seq_file *m = file->private_data; | 2171 | struct seq_file *m = file->private_data; |
2176 | iter = m->private; | 2172 | iter = m->private; |
2177 | } else | 2173 | } else |
2178 | iter = file->private_data; | 2174 | iter = file->private_data; |
2179 | 2175 | ||
2180 | parser = &iter->parser; | 2176 | parser = &iter->parser; |
2181 | read = trace_get_user(parser, ubuf, cnt, ppos); | 2177 | read = trace_get_user(parser, ubuf, cnt, ppos); |
2182 | 2178 | ||
2183 | if (read >= 0 && trace_parser_loaded(parser) && | 2179 | if (read >= 0 && trace_parser_loaded(parser) && |
2184 | !trace_parser_cont(parser)) { | 2180 | !trace_parser_cont(parser)) { |
2185 | ret = ftrace_process_regex(parser->buffer, | 2181 | ret = ftrace_process_regex(parser->buffer, |
2186 | parser->idx, enable); | 2182 | parser->idx, enable); |
2187 | trace_parser_clear(parser); | 2183 | trace_parser_clear(parser); |
2188 | if (ret) | 2184 | if (ret) |
2189 | goto out_unlock; | 2185 | goto out_unlock; |
2190 | } | 2186 | } |
2191 | 2187 | ||
2192 | ret = read; | 2188 | ret = read; |
2193 | out_unlock: | 2189 | out_unlock: |
2194 | mutex_unlock(&ftrace_regex_lock); | 2190 | mutex_unlock(&ftrace_regex_lock); |
2195 | 2191 | ||
2196 | return ret; | 2192 | return ret; |
2197 | } | 2193 | } |
2198 | 2194 | ||
2199 | static ssize_t | 2195 | static ssize_t |
2200 | ftrace_filter_write(struct file *file, const char __user *ubuf, | 2196 | ftrace_filter_write(struct file *file, const char __user *ubuf, |
2201 | size_t cnt, loff_t *ppos) | 2197 | size_t cnt, loff_t *ppos) |
2202 | { | 2198 | { |
2203 | return ftrace_regex_write(file, ubuf, cnt, ppos, 1); | 2199 | return ftrace_regex_write(file, ubuf, cnt, ppos, 1); |
2204 | } | 2200 | } |
2205 | 2201 | ||
2206 | static ssize_t | 2202 | static ssize_t |
2207 | ftrace_notrace_write(struct file *file, const char __user *ubuf, | 2203 | ftrace_notrace_write(struct file *file, const char __user *ubuf, |
2208 | size_t cnt, loff_t *ppos) | 2204 | size_t cnt, loff_t *ppos) |
2209 | { | 2205 | { |
2210 | return ftrace_regex_write(file, ubuf, cnt, ppos, 0); | 2206 | return ftrace_regex_write(file, ubuf, cnt, ppos, 0); |
2211 | } | 2207 | } |
2212 | 2208 | ||
2213 | static void | 2209 | static void |
2214 | ftrace_set_regex(unsigned char *buf, int len, int reset, int enable) | 2210 | ftrace_set_regex(unsigned char *buf, int len, int reset, int enable) |
2215 | { | 2211 | { |
2216 | if (unlikely(ftrace_disabled)) | 2212 | if (unlikely(ftrace_disabled)) |
2217 | return; | 2213 | return; |
2218 | 2214 | ||
2219 | mutex_lock(&ftrace_regex_lock); | 2215 | mutex_lock(&ftrace_regex_lock); |
2220 | if (reset) | 2216 | if (reset) |
2221 | ftrace_filter_reset(enable); | 2217 | ftrace_filter_reset(enable); |
2222 | if (buf) | 2218 | if (buf) |
2223 | ftrace_match_records(buf, len, enable); | 2219 | ftrace_match_records(buf, len, enable); |
2224 | mutex_unlock(&ftrace_regex_lock); | 2220 | mutex_unlock(&ftrace_regex_lock); |
2225 | } | 2221 | } |
2226 | 2222 | ||
2227 | /** | 2223 | /** |
2228 | * ftrace_set_filter - set a function to filter on in ftrace | 2224 | * ftrace_set_filter - set a function to filter on in ftrace |
2229 | * @buf - the string that holds the function filter text. | 2225 | * @buf - the string that holds the function filter text. |
2230 | * @len - the length of the string. | 2226 | * @len - the length of the string. |
2231 | * @reset - non zero to reset all filters before applying this filter. | 2227 | * @reset - non zero to reset all filters before applying this filter. |
2232 | * | 2228 | * |
2233 | * Filters denote which functions should be enabled when tracing is enabled. | 2229 | * Filters denote which functions should be enabled when tracing is enabled. |
2234 | * If @buf is NULL and reset is set, all functions will be enabled for tracing. | 2230 | * If @buf is NULL and reset is set, all functions will be enabled for tracing. |
2235 | */ | 2231 | */ |
2236 | void ftrace_set_filter(unsigned char *buf, int len, int reset) | 2232 | void ftrace_set_filter(unsigned char *buf, int len, int reset) |
2237 | { | 2233 | { |
2238 | ftrace_set_regex(buf, len, reset, 1); | 2234 | ftrace_set_regex(buf, len, reset, 1); |
2239 | } | 2235 | } |
2240 | 2236 | ||
2241 | /** | 2237 | /** |
2242 | * ftrace_set_notrace - set a function to not trace in ftrace | 2238 | * ftrace_set_notrace - set a function to not trace in ftrace |
2243 | * @buf - the string that holds the function notrace text. | 2239 | * @buf - the string that holds the function notrace text. |
2244 | * @len - the length of the string. | 2240 | * @len - the length of the string. |
2245 | * @reset - non zero to reset all filters before applying this filter. | 2241 | * @reset - non zero to reset all filters before applying this filter. |
2246 | * | 2242 | * |
2247 | * Notrace Filters denote which functions should not be enabled when tracing | 2243 | * Notrace Filters denote which functions should not be enabled when tracing |
2248 | * is enabled. If @buf is NULL and reset is set, all functions will be enabled | 2244 | * is enabled. If @buf is NULL and reset is set, all functions will be enabled |
2249 | * for tracing. | 2245 | * for tracing. |
2250 | */ | 2246 | */ |
2251 | void ftrace_set_notrace(unsigned char *buf, int len, int reset) | 2247 | void ftrace_set_notrace(unsigned char *buf, int len, int reset) |
2252 | { | 2248 | { |
2253 | ftrace_set_regex(buf, len, reset, 0); | 2249 | ftrace_set_regex(buf, len, reset, 0); |
2254 | } | 2250 | } |
2255 | 2251 | ||
2256 | /* | 2252 | /* |
2257 | * command line interface to allow users to set filters on boot up. | 2253 | * command line interface to allow users to set filters on boot up. |
2258 | */ | 2254 | */ |
2259 | #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE | 2255 | #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE |
2260 | static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; | 2256 | static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; |
2261 | static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; | 2257 | static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; |
2262 | 2258 | ||
2263 | static int __init set_ftrace_notrace(char *str) | 2259 | static int __init set_ftrace_notrace(char *str) |
2264 | { | 2260 | { |
2265 | strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); | 2261 | strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); |
2266 | return 1; | 2262 | return 1; |
2267 | } | 2263 | } |
2268 | __setup("ftrace_notrace=", set_ftrace_notrace); | 2264 | __setup("ftrace_notrace=", set_ftrace_notrace); |
2269 | 2265 | ||
2270 | static int __init set_ftrace_filter(char *str) | 2266 | static int __init set_ftrace_filter(char *str) |
2271 | { | 2267 | { |
2272 | strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); | 2268 | strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); |
2273 | return 1; | 2269 | return 1; |
2274 | } | 2270 | } |
2275 | __setup("ftrace_filter=", set_ftrace_filter); | 2271 | __setup("ftrace_filter=", set_ftrace_filter); |
2276 | 2272 | ||
2277 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 2273 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
2278 | static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; | 2274 | static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; |
2275 | static int ftrace_set_func(unsigned long *array, int *idx, char *buffer); | ||
2276 | |||
2279 | static int __init set_graph_function(char *str) | 2277 | static int __init set_graph_function(char *str) |
2280 | { | 2278 | { |
2281 | strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); | 2279 | strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); |
2282 | return 1; | 2280 | return 1; |
2283 | } | 2281 | } |
2284 | __setup("ftrace_graph_filter=", set_graph_function); | 2282 | __setup("ftrace_graph_filter=", set_graph_function); |
2285 | 2283 | ||
2286 | static void __init set_ftrace_early_graph(char *buf) | 2284 | static void __init set_ftrace_early_graph(char *buf) |
2287 | { | 2285 | { |
2288 | int ret; | 2286 | int ret; |
2289 | char *func; | 2287 | char *func; |
2290 | 2288 | ||
2291 | while (buf) { | 2289 | while (buf) { |
2292 | func = strsep(&buf, ","); | 2290 | func = strsep(&buf, ","); |
2293 | /* we allow only one expression at a time */ | 2291 | /* we allow only one expression at a time */ |
2294 | ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count, | 2292 | ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count, |
2295 | func); | 2293 | func); |
2296 | if (ret) | 2294 | if (ret) |
2297 | printk(KERN_DEBUG "ftrace: function %s not " | 2295 | printk(KERN_DEBUG "ftrace: function %s not " |
2298 | "traceable\n", func); | 2296 | "traceable\n", func); |
2299 | } | 2297 | } |
2300 | } | 2298 | } |
2301 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 2299 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
2302 | 2300 | ||
2303 | static void __init set_ftrace_early_filter(char *buf, int enable) | 2301 | static void __init set_ftrace_early_filter(char *buf, int enable) |
2304 | { | 2302 | { |
2305 | char *func; | 2303 | char *func; |
2306 | 2304 | ||
2307 | while (buf) { | 2305 | while (buf) { |
2308 | func = strsep(&buf, ","); | 2306 | func = strsep(&buf, ","); |
2309 | ftrace_set_regex(func, strlen(func), 0, enable); | 2307 | ftrace_set_regex(func, strlen(func), 0, enable); |
2310 | } | 2308 | } |
2311 | } | 2309 | } |
2312 | 2310 | ||
2313 | static void __init set_ftrace_early_filters(void) | 2311 | static void __init set_ftrace_early_filters(void) |
2314 | { | 2312 | { |
2315 | if (ftrace_filter_buf[0]) | 2313 | if (ftrace_filter_buf[0]) |
2316 | set_ftrace_early_filter(ftrace_filter_buf, 1); | 2314 | set_ftrace_early_filter(ftrace_filter_buf, 1); |
2317 | if (ftrace_notrace_buf[0]) | 2315 | if (ftrace_notrace_buf[0]) |
2318 | set_ftrace_early_filter(ftrace_notrace_buf, 0); | 2316 | set_ftrace_early_filter(ftrace_notrace_buf, 0); |
2319 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 2317 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
2320 | if (ftrace_graph_buf[0]) | 2318 | if (ftrace_graph_buf[0]) |
2321 | set_ftrace_early_graph(ftrace_graph_buf); | 2319 | set_ftrace_early_graph(ftrace_graph_buf); |
2322 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 2320 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
2323 | } | 2321 | } |
2324 | 2322 | ||
2325 | static int | 2323 | static int |
2326 | ftrace_regex_release(struct inode *inode, struct file *file, int enable) | 2324 | ftrace_regex_release(struct inode *inode, struct file *file, int enable) |
2327 | { | 2325 | { |
2328 | struct seq_file *m = (struct seq_file *)file->private_data; | 2326 | struct seq_file *m = (struct seq_file *)file->private_data; |
2329 | struct ftrace_iterator *iter; | 2327 | struct ftrace_iterator *iter; |
2330 | struct trace_parser *parser; | 2328 | struct trace_parser *parser; |
2331 | 2329 | ||
2332 | mutex_lock(&ftrace_regex_lock); | 2330 | mutex_lock(&ftrace_regex_lock); |
2333 | if (file->f_mode & FMODE_READ) { | 2331 | if (file->f_mode & FMODE_READ) { |
2334 | iter = m->private; | 2332 | iter = m->private; |
2335 | 2333 | ||
2336 | seq_release(inode, file); | 2334 | seq_release(inode, file); |
2337 | } else | 2335 | } else |
2338 | iter = file->private_data; | 2336 | iter = file->private_data; |
2339 | 2337 | ||
2340 | parser = &iter->parser; | 2338 | parser = &iter->parser; |
2341 | if (trace_parser_loaded(parser)) { | 2339 | if (trace_parser_loaded(parser)) { |
2342 | parser->buffer[parser->idx] = 0; | 2340 | parser->buffer[parser->idx] = 0; |
2343 | ftrace_match_records(parser->buffer, parser->idx, enable); | 2341 | ftrace_match_records(parser->buffer, parser->idx, enable); |
2344 | } | 2342 | } |
2345 | 2343 | ||
2346 | mutex_lock(&ftrace_lock); | 2344 | mutex_lock(&ftrace_lock); |
2347 | if (ftrace_start_up && ftrace_enabled) | 2345 | if (ftrace_start_up && ftrace_enabled) |
2348 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); | 2346 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); |
2349 | mutex_unlock(&ftrace_lock); | 2347 | mutex_unlock(&ftrace_lock); |
2350 | 2348 | ||
2351 | trace_parser_put(parser); | 2349 | trace_parser_put(parser); |
2352 | kfree(iter); | 2350 | kfree(iter); |
2353 | 2351 | ||
2354 | mutex_unlock(&ftrace_regex_lock); | 2352 | mutex_unlock(&ftrace_regex_lock); |
2355 | return 0; | 2353 | return 0; |
2356 | } | 2354 | } |
2357 | 2355 | ||
2358 | static int | 2356 | static int |
2359 | ftrace_filter_release(struct inode *inode, struct file *file) | 2357 | ftrace_filter_release(struct inode *inode, struct file *file) |
2360 | { | 2358 | { |
2361 | return ftrace_regex_release(inode, file, 1); | 2359 | return ftrace_regex_release(inode, file, 1); |
2362 | } | 2360 | } |
2363 | 2361 | ||
2364 | static int | 2362 | static int |
2365 | ftrace_notrace_release(struct inode *inode, struct file *file) | 2363 | ftrace_notrace_release(struct inode *inode, struct file *file) |
2366 | { | 2364 | { |
2367 | return ftrace_regex_release(inode, file, 0); | 2365 | return ftrace_regex_release(inode, file, 0); |
2368 | } | 2366 | } |
2369 | 2367 | ||
2370 | static const struct file_operations ftrace_avail_fops = { | 2368 | static const struct file_operations ftrace_avail_fops = { |
2371 | .open = ftrace_avail_open, | 2369 | .open = ftrace_avail_open, |
2372 | .read = seq_read, | 2370 | .read = seq_read, |
2373 | .llseek = seq_lseek, | 2371 | .llseek = seq_lseek, |
2374 | .release = seq_release_private, | 2372 | .release = seq_release_private, |
2375 | }; | 2373 | }; |
2376 | 2374 | ||
2377 | static const struct file_operations ftrace_failures_fops = { | 2375 | static const struct file_operations ftrace_failures_fops = { |
2378 | .open = ftrace_failures_open, | 2376 | .open = ftrace_failures_open, |
2379 | .read = seq_read, | 2377 | .read = seq_read, |
2380 | .llseek = seq_lseek, | 2378 | .llseek = seq_lseek, |
2381 | .release = seq_release_private, | 2379 | .release = seq_release_private, |
2382 | }; | 2380 | }; |
2383 | 2381 | ||
2384 | static const struct file_operations ftrace_filter_fops = { | 2382 | static const struct file_operations ftrace_filter_fops = { |
2385 | .open = ftrace_filter_open, | 2383 | .open = ftrace_filter_open, |
2386 | .read = seq_read, | 2384 | .read = seq_read, |
2387 | .write = ftrace_filter_write, | 2385 | .write = ftrace_filter_write, |
2388 | .llseek = ftrace_regex_lseek, | 2386 | .llseek = ftrace_regex_lseek, |
2389 | .release = ftrace_filter_release, | 2387 | .release = ftrace_filter_release, |
2390 | }; | 2388 | }; |
2391 | 2389 | ||
2392 | static const struct file_operations ftrace_notrace_fops = { | 2390 | static const struct file_operations ftrace_notrace_fops = { |
2393 | .open = ftrace_notrace_open, | 2391 | .open = ftrace_notrace_open, |
2394 | .read = seq_read, | 2392 | .read = seq_read, |
2395 | .write = ftrace_notrace_write, | 2393 | .write = ftrace_notrace_write, |
2396 | .llseek = ftrace_regex_lseek, | 2394 | .llseek = ftrace_regex_lseek, |
2397 | .release = ftrace_notrace_release, | 2395 | .release = ftrace_notrace_release, |
2398 | }; | 2396 | }; |
2399 | 2397 | ||
2400 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 2398 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
2401 | 2399 | ||
2402 | static DEFINE_MUTEX(graph_lock); | 2400 | static DEFINE_MUTEX(graph_lock); |
2403 | 2401 | ||
2404 | int ftrace_graph_count; | 2402 | int ftrace_graph_count; |
2405 | int ftrace_graph_filter_enabled; | 2403 | int ftrace_graph_filter_enabled; |
2406 | unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; | 2404 | unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; |
2407 | 2405 | ||
2408 | static void * | 2406 | static void * |
2409 | __g_next(struct seq_file *m, loff_t *pos) | 2407 | __g_next(struct seq_file *m, loff_t *pos) |
2410 | { | 2408 | { |
2411 | if (*pos >= ftrace_graph_count) | 2409 | if (*pos >= ftrace_graph_count) |
2412 | return NULL; | 2410 | return NULL; |
2413 | return &ftrace_graph_funcs[*pos]; | 2411 | return &ftrace_graph_funcs[*pos]; |
2414 | } | 2412 | } |
2415 | 2413 | ||
2416 | static void * | 2414 | static void * |
2417 | g_next(struct seq_file *m, void *v, loff_t *pos) | 2415 | g_next(struct seq_file *m, void *v, loff_t *pos) |
2418 | { | 2416 | { |
2419 | (*pos)++; | 2417 | (*pos)++; |
2420 | return __g_next(m, pos); | 2418 | return __g_next(m, pos); |
2421 | } | 2419 | } |
2422 | 2420 | ||
2423 | static void *g_start(struct seq_file *m, loff_t *pos) | 2421 | static void *g_start(struct seq_file *m, loff_t *pos) |
2424 | { | 2422 | { |
2425 | mutex_lock(&graph_lock); | 2423 | mutex_lock(&graph_lock); |
2426 | 2424 | ||
2427 | /* Nothing, tell g_show to print all functions are enabled */ | 2425 | /* Nothing, tell g_show to print all functions are enabled */ |
2428 | if (!ftrace_graph_filter_enabled && !*pos) | 2426 | if (!ftrace_graph_filter_enabled && !*pos) |
2429 | return (void *)1; | 2427 | return (void *)1; |
2430 | 2428 | ||
2431 | return __g_next(m, pos); | 2429 | return __g_next(m, pos); |
2432 | } | 2430 | } |
2433 | 2431 | ||
2434 | static void g_stop(struct seq_file *m, void *p) | 2432 | static void g_stop(struct seq_file *m, void *p) |
2435 | { | 2433 | { |
2436 | mutex_unlock(&graph_lock); | 2434 | mutex_unlock(&graph_lock); |
2437 | } | 2435 | } |
2438 | 2436 | ||
2439 | static int g_show(struct seq_file *m, void *v) | 2437 | static int g_show(struct seq_file *m, void *v) |
2440 | { | 2438 | { |
2441 | unsigned long *ptr = v; | 2439 | unsigned long *ptr = v; |
2442 | 2440 | ||
2443 | if (!ptr) | 2441 | if (!ptr) |
2444 | return 0; | 2442 | return 0; |
2445 | 2443 | ||
2446 | if (ptr == (unsigned long *)1) { | 2444 | if (ptr == (unsigned long *)1) { |
2447 | seq_printf(m, "#### all functions enabled ####\n"); | 2445 | seq_printf(m, "#### all functions enabled ####\n"); |
2448 | return 0; | 2446 | return 0; |
2449 | } | 2447 | } |
2450 | 2448 | ||
2451 | seq_printf(m, "%ps\n", (void *)*ptr); | 2449 | seq_printf(m, "%ps\n", (void *)*ptr); |
2452 | 2450 | ||
2453 | return 0; | 2451 | return 0; |
2454 | } | 2452 | } |
2455 | 2453 | ||
2456 | static const struct seq_operations ftrace_graph_seq_ops = { | 2454 | static const struct seq_operations ftrace_graph_seq_ops = { |
2457 | .start = g_start, | 2455 | .start = g_start, |
2458 | .next = g_next, | 2456 | .next = g_next, |
2459 | .stop = g_stop, | 2457 | .stop = g_stop, |
2460 | .show = g_show, | 2458 | .show = g_show, |
2461 | }; | 2459 | }; |
2462 | 2460 | ||
2463 | static int | 2461 | static int |
2464 | ftrace_graph_open(struct inode *inode, struct file *file) | 2462 | ftrace_graph_open(struct inode *inode, struct file *file) |
2465 | { | 2463 | { |
2466 | int ret = 0; | 2464 | int ret = 0; |
2467 | 2465 | ||
2468 | if (unlikely(ftrace_disabled)) | 2466 | if (unlikely(ftrace_disabled)) |
2469 | return -ENODEV; | 2467 | return -ENODEV; |
2470 | 2468 | ||
2471 | mutex_lock(&graph_lock); | 2469 | mutex_lock(&graph_lock); |
2472 | if ((file->f_mode & FMODE_WRITE) && | 2470 | if ((file->f_mode & FMODE_WRITE) && |
2473 | (file->f_flags & O_TRUNC)) { | 2471 | (file->f_flags & O_TRUNC)) { |
2474 | ftrace_graph_filter_enabled = 0; | 2472 | ftrace_graph_filter_enabled = 0; |
2475 | ftrace_graph_count = 0; | 2473 | ftrace_graph_count = 0; |
2476 | memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); | 2474 | memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); |
2477 | } | 2475 | } |
2478 | mutex_unlock(&graph_lock); | 2476 | mutex_unlock(&graph_lock); |
2479 | 2477 | ||
2480 | if (file->f_mode & FMODE_READ) | 2478 | if (file->f_mode & FMODE_READ) |
2481 | ret = seq_open(file, &ftrace_graph_seq_ops); | 2479 | ret = seq_open(file, &ftrace_graph_seq_ops); |
2482 | 2480 | ||
2483 | return ret; | 2481 | return ret; |
2484 | } | 2482 | } |
2485 | 2483 | ||
2486 | static int | 2484 | static int |
2487 | ftrace_graph_release(struct inode *inode, struct file *file) | 2485 | ftrace_graph_release(struct inode *inode, struct file *file) |
2488 | { | 2486 | { |
2489 | if (file->f_mode & FMODE_READ) | 2487 | if (file->f_mode & FMODE_READ) |
2490 | seq_release(inode, file); | 2488 | seq_release(inode, file); |
2491 | return 0; | 2489 | return 0; |
2492 | } | 2490 | } |
2493 | 2491 | ||
2494 | static int | 2492 | static int |
2495 | ftrace_set_func(unsigned long *array, int *idx, char *buffer) | 2493 | ftrace_set_func(unsigned long *array, int *idx, char *buffer) |
2496 | { | 2494 | { |
2497 | struct dyn_ftrace *rec; | 2495 | struct dyn_ftrace *rec; |
2498 | struct ftrace_page *pg; | 2496 | struct ftrace_page *pg; |
2499 | int search_len; | 2497 | int search_len; |
2500 | int fail = 1; | 2498 | int fail = 1; |
2501 | int type, not; | 2499 | int type, not; |
2502 | char *search; | 2500 | char *search; |
2503 | bool exists; | 2501 | bool exists; |
2504 | int i; | 2502 | int i; |
2505 | 2503 | ||
2506 | if (ftrace_disabled) | 2504 | if (ftrace_disabled) |
2507 | return -ENODEV; | 2505 | return -ENODEV; |
2508 | 2506 | ||
2509 | /* decode regex */ | 2507 | /* decode regex */ |
2510 | type = filter_parse_regex(buffer, strlen(buffer), &search, ¬); | 2508 | type = filter_parse_regex(buffer, strlen(buffer), &search, ¬); |
2511 | if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS) | 2509 | if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS) |
2512 | return -EBUSY; | 2510 | return -EBUSY; |
2513 | 2511 | ||
2514 | search_len = strlen(search); | 2512 | search_len = strlen(search); |
2515 | 2513 | ||
2516 | mutex_lock(&ftrace_lock); | 2514 | mutex_lock(&ftrace_lock); |
2517 | do_for_each_ftrace_rec(pg, rec) { | 2515 | do_for_each_ftrace_rec(pg, rec) { |
2518 | 2516 | ||
2519 | if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE)) | 2517 | if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE)) |
2520 | continue; | 2518 | continue; |
2521 | 2519 | ||
2522 | if (ftrace_match_record(rec, search, search_len, type)) { | 2520 | if (ftrace_match_record(rec, search, search_len, type)) { |
2523 | /* if it is in the array */ | 2521 | /* if it is in the array */ |
2524 | exists = false; | 2522 | exists = false; |
2525 | for (i = 0; i < *idx; i++) { | 2523 | for (i = 0; i < *idx; i++) { |
2526 | if (array[i] == rec->ip) { | 2524 | if (array[i] == rec->ip) { |
2527 | exists = true; | 2525 | exists = true; |
2528 | break; | 2526 | break; |
2529 | } | 2527 | } |
2530 | } | 2528 | } |
2531 | 2529 | ||
2532 | if (!not) { | 2530 | if (!not) { |
2533 | fail = 0; | 2531 | fail = 0; |
2534 | if (!exists) { | 2532 | if (!exists) { |
2535 | array[(*idx)++] = rec->ip; | 2533 | array[(*idx)++] = rec->ip; |
2536 | if (*idx >= FTRACE_GRAPH_MAX_FUNCS) | 2534 | if (*idx >= FTRACE_GRAPH_MAX_FUNCS) |
2537 | goto out; | 2535 | goto out; |
2538 | } | 2536 | } |
2539 | } else { | 2537 | } else { |
2540 | if (exists) { | 2538 | if (exists) { |
2541 | array[i] = array[--(*idx)]; | 2539 | array[i] = array[--(*idx)]; |
2542 | array[*idx] = 0; | 2540 | array[*idx] = 0; |
2543 | fail = 0; | 2541 | fail = 0; |
2544 | } | 2542 | } |
2545 | } | 2543 | } |
2546 | } | 2544 | } |
2547 | } while_for_each_ftrace_rec(); | 2545 | } while_for_each_ftrace_rec(); |
2548 | out: | 2546 | out: |
2549 | mutex_unlock(&ftrace_lock); | 2547 | mutex_unlock(&ftrace_lock); |
2550 | 2548 | ||
2551 | if (fail) | 2549 | if (fail) |
2552 | return -EINVAL; | 2550 | return -EINVAL; |
2553 | 2551 | ||
2554 | ftrace_graph_filter_enabled = 1; | 2552 | ftrace_graph_filter_enabled = 1; |
2555 | return 0; | 2553 | return 0; |
2556 | } | 2554 | } |
2557 | 2555 | ||
2558 | static ssize_t | 2556 | static ssize_t |
2559 | ftrace_graph_write(struct file *file, const char __user *ubuf, | 2557 | ftrace_graph_write(struct file *file, const char __user *ubuf, |
2560 | size_t cnt, loff_t *ppos) | 2558 | size_t cnt, loff_t *ppos) |
2561 | { | 2559 | { |
2562 | struct trace_parser parser; | 2560 | struct trace_parser parser; |
2563 | ssize_t read, ret; | 2561 | ssize_t read, ret; |
2564 | 2562 | ||
2565 | if (!cnt) | 2563 | if (!cnt) |
2566 | return 0; | 2564 | return 0; |
2567 | 2565 | ||
2568 | mutex_lock(&graph_lock); | 2566 | mutex_lock(&graph_lock); |
2569 | 2567 | ||
2570 | if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) { | 2568 | if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) { |
2571 | ret = -ENOMEM; | 2569 | ret = -ENOMEM; |
2572 | goto out_unlock; | 2570 | goto out_unlock; |
2573 | } | 2571 | } |
2574 | 2572 | ||
2575 | read = trace_get_user(&parser, ubuf, cnt, ppos); | 2573 | read = trace_get_user(&parser, ubuf, cnt, ppos); |
2576 | 2574 | ||
2577 | if (read >= 0 && trace_parser_loaded((&parser))) { | 2575 | if (read >= 0 && trace_parser_loaded((&parser))) { |
2578 | parser.buffer[parser.idx] = 0; | 2576 | parser.buffer[parser.idx] = 0; |
2579 | 2577 | ||
2580 | /* we allow only one expression at a time */ | 2578 | /* we allow only one expression at a time */ |
2581 | ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count, | 2579 | ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count, |
2582 | parser.buffer); | 2580 | parser.buffer); |
2583 | if (ret) | 2581 | if (ret) |
2584 | goto out_free; | 2582 | goto out_free; |
2585 | } | 2583 | } |
2586 | 2584 | ||
2587 | ret = read; | 2585 | ret = read; |
2588 | 2586 | ||
2589 | out_free: | 2587 | out_free: |
2590 | trace_parser_put(&parser); | 2588 | trace_parser_put(&parser); |
2591 | out_unlock: | 2589 | out_unlock: |
2592 | mutex_unlock(&graph_lock); | 2590 | mutex_unlock(&graph_lock); |
2593 | 2591 | ||
2594 | return ret; | 2592 | return ret; |
2595 | } | 2593 | } |
2596 | 2594 | ||
2597 | static const struct file_operations ftrace_graph_fops = { | 2595 | static const struct file_operations ftrace_graph_fops = { |
2598 | .open = ftrace_graph_open, | 2596 | .open = ftrace_graph_open, |
2599 | .read = seq_read, | 2597 | .read = seq_read, |
2600 | .write = ftrace_graph_write, | 2598 | .write = ftrace_graph_write, |
2601 | .release = ftrace_graph_release, | 2599 | .release = ftrace_graph_release, |
2602 | }; | 2600 | }; |
2603 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 2601 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
2604 | 2602 | ||
2605 | static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) | 2603 | static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) |
2606 | { | 2604 | { |
2607 | 2605 | ||
2608 | trace_create_file("available_filter_functions", 0444, | 2606 | trace_create_file("available_filter_functions", 0444, |
2609 | d_tracer, NULL, &ftrace_avail_fops); | 2607 | d_tracer, NULL, &ftrace_avail_fops); |
2610 | 2608 | ||
2611 | trace_create_file("failures", 0444, | 2609 | trace_create_file("failures", 0444, |
2612 | d_tracer, NULL, &ftrace_failures_fops); | 2610 | d_tracer, NULL, &ftrace_failures_fops); |
2613 | 2611 | ||
2614 | trace_create_file("set_ftrace_filter", 0644, d_tracer, | 2612 | trace_create_file("set_ftrace_filter", 0644, d_tracer, |
2615 | NULL, &ftrace_filter_fops); | 2613 | NULL, &ftrace_filter_fops); |
2616 | 2614 | ||
2617 | trace_create_file("set_ftrace_notrace", 0644, d_tracer, | 2615 | trace_create_file("set_ftrace_notrace", 0644, d_tracer, |
2618 | NULL, &ftrace_notrace_fops); | 2616 | NULL, &ftrace_notrace_fops); |
2619 | 2617 | ||
2620 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 2618 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
2621 | trace_create_file("set_graph_function", 0444, d_tracer, | 2619 | trace_create_file("set_graph_function", 0444, d_tracer, |
2622 | NULL, | 2620 | NULL, |
2623 | &ftrace_graph_fops); | 2621 | &ftrace_graph_fops); |
2624 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 2622 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
2625 | 2623 | ||
2626 | return 0; | 2624 | return 0; |
2627 | } | 2625 | } |
2628 | 2626 | ||
2629 | static int ftrace_process_locs(struct module *mod, | 2627 | static int ftrace_process_locs(struct module *mod, |
2630 | unsigned long *start, | 2628 | unsigned long *start, |
2631 | unsigned long *end) | 2629 | unsigned long *end) |
2632 | { | 2630 | { |
2633 | unsigned long *p; | 2631 | unsigned long *p; |
2634 | unsigned long addr; | 2632 | unsigned long addr; |
2635 | unsigned long flags; | 2633 | unsigned long flags; |
2636 | 2634 | ||
2637 | mutex_lock(&ftrace_lock); | 2635 | mutex_lock(&ftrace_lock); |
2638 | p = start; | 2636 | p = start; |
2639 | while (p < end) { | 2637 | while (p < end) { |
2640 | addr = ftrace_call_adjust(*p++); | 2638 | addr = ftrace_call_adjust(*p++); |
2641 | /* | 2639 | /* |
2642 | * Some architecture linkers will pad between | 2640 | * Some architecture linkers will pad between |
2643 | * the different mcount_loc sections of different | 2641 | * the different mcount_loc sections of different |
2644 | * object files to satisfy alignments. | 2642 | * object files to satisfy alignments. |
2645 | * Skip any NULL pointers. | 2643 | * Skip any NULL pointers. |
2646 | */ | 2644 | */ |
2647 | if (!addr) | 2645 | if (!addr) |
2648 | continue; | 2646 | continue; |
2649 | ftrace_record_ip(addr); | 2647 | ftrace_record_ip(addr); |
2650 | } | 2648 | } |
2651 | 2649 | ||
2652 | /* disable interrupts to prevent kstop machine */ | 2650 | /* disable interrupts to prevent kstop machine */ |
2653 | local_irq_save(flags); | 2651 | local_irq_save(flags); |
2654 | ftrace_update_code(mod); | 2652 | ftrace_update_code(mod); |
2655 | local_irq_restore(flags); | 2653 | local_irq_restore(flags); |
2656 | mutex_unlock(&ftrace_lock); | 2654 | mutex_unlock(&ftrace_lock); |
2657 | 2655 | ||
2658 | return 0; | 2656 | return 0; |
2659 | } | 2657 | } |
2660 | 2658 | ||
2661 | #ifdef CONFIG_MODULES | 2659 | #ifdef CONFIG_MODULES |
2662 | void ftrace_release_mod(struct module *mod) | 2660 | void ftrace_release_mod(struct module *mod) |
2663 | { | 2661 | { |
2664 | struct dyn_ftrace *rec; | 2662 | struct dyn_ftrace *rec; |
2665 | struct ftrace_page *pg; | 2663 | struct ftrace_page *pg; |
2666 | 2664 | ||
2667 | if (ftrace_disabled) | 2665 | if (ftrace_disabled) |
2668 | return; | 2666 | return; |
2669 | 2667 | ||
2670 | mutex_lock(&ftrace_lock); | 2668 | mutex_lock(&ftrace_lock); |
2671 | do_for_each_ftrace_rec(pg, rec) { | 2669 | do_for_each_ftrace_rec(pg, rec) { |
2672 | if (within_module_core(rec->ip, mod)) { | 2670 | if (within_module_core(rec->ip, mod)) { |
2673 | /* | 2671 | /* |
2674 | * rec->ip is changed in ftrace_free_rec() | 2672 | * rec->ip is changed in ftrace_free_rec() |
2675 | * It should not between s and e if record was freed. | 2673 | * It should not between s and e if record was freed. |
2676 | */ | 2674 | */ |
2677 | FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE); | 2675 | FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE); |
2678 | ftrace_free_rec(rec); | 2676 | ftrace_free_rec(rec); |
2679 | } | 2677 | } |
2680 | } while_for_each_ftrace_rec(); | 2678 | } while_for_each_ftrace_rec(); |
2681 | mutex_unlock(&ftrace_lock); | 2679 | mutex_unlock(&ftrace_lock); |
2682 | } | 2680 | } |
2683 | 2681 | ||
2684 | static void ftrace_init_module(struct module *mod, | 2682 | static void ftrace_init_module(struct module *mod, |
2685 | unsigned long *start, unsigned long *end) | 2683 | unsigned long *start, unsigned long *end) |
2686 | { | 2684 | { |
2687 | if (ftrace_disabled || start == end) | 2685 | if (ftrace_disabled || start == end) |
2688 | return; | 2686 | return; |
2689 | ftrace_process_locs(mod, start, end); | 2687 | ftrace_process_locs(mod, start, end); |
2690 | } | 2688 | } |
2691 | 2689 | ||
2692 | static int ftrace_module_notify(struct notifier_block *self, | 2690 | static int ftrace_module_notify(struct notifier_block *self, |
2693 | unsigned long val, void *data) | 2691 | unsigned long val, void *data) |
2694 | { | 2692 | { |
2695 | struct module *mod = data; | 2693 | struct module *mod = data; |
2696 | 2694 | ||
2697 | switch (val) { | 2695 | switch (val) { |
2698 | case MODULE_STATE_COMING: | 2696 | case MODULE_STATE_COMING: |
2699 | ftrace_init_module(mod, mod->ftrace_callsites, | 2697 | ftrace_init_module(mod, mod->ftrace_callsites, |
2700 | mod->ftrace_callsites + | 2698 | mod->ftrace_callsites + |
2701 | mod->num_ftrace_callsites); | 2699 | mod->num_ftrace_callsites); |
2702 | break; | 2700 | break; |
2703 | case MODULE_STATE_GOING: | 2701 | case MODULE_STATE_GOING: |
2704 | ftrace_release_mod(mod); | 2702 | ftrace_release_mod(mod); |
2705 | break; | 2703 | break; |
2706 | } | 2704 | } |
2707 | 2705 | ||
2708 | return 0; | 2706 | return 0; |
2709 | } | 2707 | } |
2710 | #else | 2708 | #else |
2711 | static int ftrace_module_notify(struct notifier_block *self, | 2709 | static int ftrace_module_notify(struct notifier_block *self, |
2712 | unsigned long val, void *data) | 2710 | unsigned long val, void *data) |
2713 | { | 2711 | { |
2714 | return 0; | 2712 | return 0; |
2715 | } | 2713 | } |
2716 | #endif /* CONFIG_MODULES */ | 2714 | #endif /* CONFIG_MODULES */ |
2717 | 2715 | ||
2718 | struct notifier_block ftrace_module_nb = { | 2716 | struct notifier_block ftrace_module_nb = { |
2719 | .notifier_call = ftrace_module_notify, | 2717 | .notifier_call = ftrace_module_notify, |
2720 | .priority = 0, | 2718 | .priority = 0, |
2721 | }; | 2719 | }; |
2722 | 2720 | ||
2723 | extern unsigned long __start_mcount_loc[]; | 2721 | extern unsigned long __start_mcount_loc[]; |
2724 | extern unsigned long __stop_mcount_loc[]; | 2722 | extern unsigned long __stop_mcount_loc[]; |
2725 | 2723 | ||
2726 | void __init ftrace_init(void) | 2724 | void __init ftrace_init(void) |
2727 | { | 2725 | { |
2728 | unsigned long count, addr, flags; | 2726 | unsigned long count, addr, flags; |
2729 | int ret; | 2727 | int ret; |
2730 | 2728 | ||
2731 | /* Keep the ftrace pointer to the stub */ | 2729 | /* Keep the ftrace pointer to the stub */ |
2732 | addr = (unsigned long)ftrace_stub; | 2730 | addr = (unsigned long)ftrace_stub; |
2733 | 2731 | ||
2734 | local_irq_save(flags); | 2732 | local_irq_save(flags); |
2735 | ftrace_dyn_arch_init(&addr); | 2733 | ftrace_dyn_arch_init(&addr); |
2736 | local_irq_restore(flags); | 2734 | local_irq_restore(flags); |
2737 | 2735 | ||
2738 | /* ftrace_dyn_arch_init places the return code in addr */ | 2736 | /* ftrace_dyn_arch_init places the return code in addr */ |
2739 | if (addr) | 2737 | if (addr) |
2740 | goto failed; | 2738 | goto failed; |
2741 | 2739 | ||
2742 | count = __stop_mcount_loc - __start_mcount_loc; | 2740 | count = __stop_mcount_loc - __start_mcount_loc; |
2743 | 2741 | ||
2744 | ret = ftrace_dyn_table_alloc(count); | 2742 | ret = ftrace_dyn_table_alloc(count); |
2745 | if (ret) | 2743 | if (ret) |
2746 | goto failed; | 2744 | goto failed; |
2747 | 2745 | ||
2748 | last_ftrace_enabled = ftrace_enabled = 1; | 2746 | last_ftrace_enabled = ftrace_enabled = 1; |
2749 | 2747 | ||
2750 | ret = ftrace_process_locs(NULL, | 2748 | ret = ftrace_process_locs(NULL, |
2751 | __start_mcount_loc, | 2749 | __start_mcount_loc, |
2752 | __stop_mcount_loc); | 2750 | __stop_mcount_loc); |
2753 | 2751 | ||
2754 | ret = register_module_notifier(&ftrace_module_nb); | 2752 | ret = register_module_notifier(&ftrace_module_nb); |
2755 | if (ret) | 2753 | if (ret) |
2756 | pr_warning("Failed to register trace ftrace module notifier\n"); | 2754 | pr_warning("Failed to register trace ftrace module notifier\n"); |
2757 | 2755 | ||
2758 | set_ftrace_early_filters(); | 2756 | set_ftrace_early_filters(); |
2759 | 2757 | ||
2760 | return; | 2758 | return; |
2761 | failed: | 2759 | failed: |
2762 | ftrace_disabled = 1; | 2760 | ftrace_disabled = 1; |
2763 | } | 2761 | } |
2764 | 2762 | ||
2765 | #else | 2763 | #else |
2766 | 2764 | ||
2767 | static int __init ftrace_nodyn_init(void) | 2765 | static int __init ftrace_nodyn_init(void) |
2768 | { | 2766 | { |
2769 | ftrace_enabled = 1; | 2767 | ftrace_enabled = 1; |
2770 | return 0; | 2768 | return 0; |
2771 | } | 2769 | } |
2772 | device_initcall(ftrace_nodyn_init); | 2770 | device_initcall(ftrace_nodyn_init); |
2773 | 2771 | ||
2774 | static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } | 2772 | static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } |
2775 | static inline void ftrace_startup_enable(int command) { } | 2773 | static inline void ftrace_startup_enable(int command) { } |
2776 | /* Keep as macros so we do not need to define the commands */ | 2774 | /* Keep as macros so we do not need to define the commands */ |
2777 | # define ftrace_startup(command) do { } while (0) | 2775 | # define ftrace_startup(command) do { } while (0) |
2778 | # define ftrace_shutdown(command) do { } while (0) | 2776 | # define ftrace_shutdown(command) do { } while (0) |
2779 | # define ftrace_startup_sysctl() do { } while (0) | 2777 | # define ftrace_startup_sysctl() do { } while (0) |
2780 | # define ftrace_shutdown_sysctl() do { } while (0) | 2778 | # define ftrace_shutdown_sysctl() do { } while (0) |
2781 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 2779 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
2782 | 2780 | ||
2783 | static void clear_ftrace_swapper(void) | 2781 | static void clear_ftrace_swapper(void) |
2784 | { | 2782 | { |
2785 | struct task_struct *p; | 2783 | struct task_struct *p; |
2786 | int cpu; | 2784 | int cpu; |
2787 | 2785 | ||
2788 | get_online_cpus(); | 2786 | get_online_cpus(); |
2789 | for_each_online_cpu(cpu) { | 2787 | for_each_online_cpu(cpu) { |
2790 | p = idle_task(cpu); | 2788 | p = idle_task(cpu); |
2791 | clear_tsk_trace_trace(p); | 2789 | clear_tsk_trace_trace(p); |
2792 | } | 2790 | } |
2793 | put_online_cpus(); | 2791 | put_online_cpus(); |
2794 | } | 2792 | } |
2795 | 2793 | ||
2796 | static void set_ftrace_swapper(void) | 2794 | static void set_ftrace_swapper(void) |
2797 | { | 2795 | { |
2798 | struct task_struct *p; | 2796 | struct task_struct *p; |
2799 | int cpu; | 2797 | int cpu; |
2800 | 2798 | ||
2801 | get_online_cpus(); | 2799 | get_online_cpus(); |
2802 | for_each_online_cpu(cpu) { | 2800 | for_each_online_cpu(cpu) { |
2803 | p = idle_task(cpu); | 2801 | p = idle_task(cpu); |
2804 | set_tsk_trace_trace(p); | 2802 | set_tsk_trace_trace(p); |
2805 | } | 2803 | } |
2806 | put_online_cpus(); | 2804 | put_online_cpus(); |
2807 | } | 2805 | } |
2808 | 2806 | ||
2809 | static void clear_ftrace_pid(struct pid *pid) | 2807 | static void clear_ftrace_pid(struct pid *pid) |
2810 | { | 2808 | { |
2811 | struct task_struct *p; | 2809 | struct task_struct *p; |
2812 | 2810 | ||
2813 | rcu_read_lock(); | 2811 | rcu_read_lock(); |
2814 | do_each_pid_task(pid, PIDTYPE_PID, p) { | 2812 | do_each_pid_task(pid, PIDTYPE_PID, p) { |
2815 | clear_tsk_trace_trace(p); | 2813 | clear_tsk_trace_trace(p); |
2816 | } while_each_pid_task(pid, PIDTYPE_PID, p); | 2814 | } while_each_pid_task(pid, PIDTYPE_PID, p); |
2817 | rcu_read_unlock(); | 2815 | rcu_read_unlock(); |
2818 | 2816 | ||
2819 | put_pid(pid); | 2817 | put_pid(pid); |
2820 | } | 2818 | } |
2821 | 2819 | ||
2822 | static void set_ftrace_pid(struct pid *pid) | 2820 | static void set_ftrace_pid(struct pid *pid) |
2823 | { | 2821 | { |
2824 | struct task_struct *p; | 2822 | struct task_struct *p; |
2825 | 2823 | ||
2826 | rcu_read_lock(); | 2824 | rcu_read_lock(); |
2827 | do_each_pid_task(pid, PIDTYPE_PID, p) { | 2825 | do_each_pid_task(pid, PIDTYPE_PID, p) { |
2828 | set_tsk_trace_trace(p); | 2826 | set_tsk_trace_trace(p); |
2829 | } while_each_pid_task(pid, PIDTYPE_PID, p); | 2827 | } while_each_pid_task(pid, PIDTYPE_PID, p); |
2830 | rcu_read_unlock(); | 2828 | rcu_read_unlock(); |
2831 | } | 2829 | } |
2832 | 2830 | ||
2833 | static void clear_ftrace_pid_task(struct pid *pid) | 2831 | static void clear_ftrace_pid_task(struct pid *pid) |
2834 | { | 2832 | { |
2835 | if (pid == ftrace_swapper_pid) | 2833 | if (pid == ftrace_swapper_pid) |
2836 | clear_ftrace_swapper(); | 2834 | clear_ftrace_swapper(); |
2837 | else | 2835 | else |
2838 | clear_ftrace_pid(pid); | 2836 | clear_ftrace_pid(pid); |
2839 | } | 2837 | } |
2840 | 2838 | ||
2841 | static void set_ftrace_pid_task(struct pid *pid) | 2839 | static void set_ftrace_pid_task(struct pid *pid) |
2842 | { | 2840 | { |
2843 | if (pid == ftrace_swapper_pid) | 2841 | if (pid == ftrace_swapper_pid) |
2844 | set_ftrace_swapper(); | 2842 | set_ftrace_swapper(); |
2845 | else | 2843 | else |
2846 | set_ftrace_pid(pid); | 2844 | set_ftrace_pid(pid); |
2847 | } | 2845 | } |
2848 | 2846 | ||
2849 | static int ftrace_pid_add(int p) | 2847 | static int ftrace_pid_add(int p) |
2850 | { | 2848 | { |
2851 | struct pid *pid; | 2849 | struct pid *pid; |
2852 | struct ftrace_pid *fpid; | 2850 | struct ftrace_pid *fpid; |
2853 | int ret = -EINVAL; | 2851 | int ret = -EINVAL; |
2854 | 2852 | ||
2855 | mutex_lock(&ftrace_lock); | 2853 | mutex_lock(&ftrace_lock); |
2856 | 2854 | ||
2857 | if (!p) | 2855 | if (!p) |
2858 | pid = ftrace_swapper_pid; | 2856 | pid = ftrace_swapper_pid; |
2859 | else | 2857 | else |
2860 | pid = find_get_pid(p); | 2858 | pid = find_get_pid(p); |
2861 | 2859 | ||
2862 | if (!pid) | 2860 | if (!pid) |
2863 | goto out; | 2861 | goto out; |
2864 | 2862 | ||
2865 | ret = 0; | 2863 | ret = 0; |
2866 | 2864 | ||
2867 | list_for_each_entry(fpid, &ftrace_pids, list) | 2865 | list_for_each_entry(fpid, &ftrace_pids, list) |
2868 | if (fpid->pid == pid) | 2866 | if (fpid->pid == pid) |
2869 | goto out_put; | 2867 | goto out_put; |
2870 | 2868 | ||
2871 | ret = -ENOMEM; | 2869 | ret = -ENOMEM; |
2872 | 2870 | ||
2873 | fpid = kmalloc(sizeof(*fpid), GFP_KERNEL); | 2871 | fpid = kmalloc(sizeof(*fpid), GFP_KERNEL); |
2874 | if (!fpid) | 2872 | if (!fpid) |
2875 | goto out_put; | 2873 | goto out_put; |
2876 | 2874 | ||
2877 | list_add(&fpid->list, &ftrace_pids); | 2875 | list_add(&fpid->list, &ftrace_pids); |
2878 | fpid->pid = pid; | 2876 | fpid->pid = pid; |
2879 | 2877 | ||
2880 | set_ftrace_pid_task(pid); | 2878 | set_ftrace_pid_task(pid); |
2881 | 2879 | ||
2882 | ftrace_update_pid_func(); | 2880 | ftrace_update_pid_func(); |
2883 | ftrace_startup_enable(0); | 2881 | ftrace_startup_enable(0); |
2884 | 2882 | ||
2885 | mutex_unlock(&ftrace_lock); | 2883 | mutex_unlock(&ftrace_lock); |
2886 | return 0; | 2884 | return 0; |
2887 | 2885 | ||
2888 | out_put: | 2886 | out_put: |
2889 | if (pid != ftrace_swapper_pid) | 2887 | if (pid != ftrace_swapper_pid) |
2890 | put_pid(pid); | 2888 | put_pid(pid); |
2891 | 2889 | ||
2892 | out: | 2890 | out: |
2893 | mutex_unlock(&ftrace_lock); | 2891 | mutex_unlock(&ftrace_lock); |
2894 | return ret; | 2892 | return ret; |
2895 | } | 2893 | } |
2896 | 2894 | ||
2897 | static void ftrace_pid_reset(void) | 2895 | static void ftrace_pid_reset(void) |
2898 | { | 2896 | { |
2899 | struct ftrace_pid *fpid, *safe; | 2897 | struct ftrace_pid *fpid, *safe; |
2900 | 2898 | ||
2901 | mutex_lock(&ftrace_lock); | 2899 | mutex_lock(&ftrace_lock); |
2902 | list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) { | 2900 | list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) { |
2903 | struct pid *pid = fpid->pid; | 2901 | struct pid *pid = fpid->pid; |
2904 | 2902 | ||
2905 | clear_ftrace_pid_task(pid); | 2903 | clear_ftrace_pid_task(pid); |
2906 | 2904 | ||
2907 | list_del(&fpid->list); | 2905 | list_del(&fpid->list); |
2908 | kfree(fpid); | 2906 | kfree(fpid); |
2909 | } | 2907 | } |
2910 | 2908 | ||
2911 | ftrace_update_pid_func(); | 2909 | ftrace_update_pid_func(); |
2912 | ftrace_startup_enable(0); | 2910 | ftrace_startup_enable(0); |
2913 | 2911 | ||
2914 | mutex_unlock(&ftrace_lock); | 2912 | mutex_unlock(&ftrace_lock); |
2915 | } | 2913 | } |
2916 | 2914 | ||
2917 | static void *fpid_start(struct seq_file *m, loff_t *pos) | 2915 | static void *fpid_start(struct seq_file *m, loff_t *pos) |
2918 | { | 2916 | { |
2919 | mutex_lock(&ftrace_lock); | 2917 | mutex_lock(&ftrace_lock); |
2920 | 2918 | ||
2921 | if (list_empty(&ftrace_pids) && (!*pos)) | 2919 | if (list_empty(&ftrace_pids) && (!*pos)) |
2922 | return (void *) 1; | 2920 | return (void *) 1; |
2923 | 2921 | ||
2924 | return seq_list_start(&ftrace_pids, *pos); | 2922 | return seq_list_start(&ftrace_pids, *pos); |
2925 | } | 2923 | } |
2926 | 2924 | ||
2927 | static void *fpid_next(struct seq_file *m, void *v, loff_t *pos) | 2925 | static void *fpid_next(struct seq_file *m, void *v, loff_t *pos) |
2928 | { | 2926 | { |
2929 | if (v == (void *)1) | 2927 | if (v == (void *)1) |
2930 | return NULL; | 2928 | return NULL; |
2931 | 2929 | ||
2932 | return seq_list_next(v, &ftrace_pids, pos); | 2930 | return seq_list_next(v, &ftrace_pids, pos); |
2933 | } | 2931 | } |
2934 | 2932 | ||
2935 | static void fpid_stop(struct seq_file *m, void *p) | 2933 | static void fpid_stop(struct seq_file *m, void *p) |
2936 | { | 2934 | { |
2937 | mutex_unlock(&ftrace_lock); | 2935 | mutex_unlock(&ftrace_lock); |
2938 | } | 2936 | } |
2939 | 2937 | ||
2940 | static int fpid_show(struct seq_file *m, void *v) | 2938 | static int fpid_show(struct seq_file *m, void *v) |
2941 | { | 2939 | { |
2942 | const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list); | 2940 | const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list); |
2943 | 2941 | ||
2944 | if (v == (void *)1) { | 2942 | if (v == (void *)1) { |
2945 | seq_printf(m, "no pid\n"); | 2943 | seq_printf(m, "no pid\n"); |
2946 | return 0; | 2944 | return 0; |
2947 | } | 2945 | } |
2948 | 2946 | ||
2949 | if (fpid->pid == ftrace_swapper_pid) | 2947 | if (fpid->pid == ftrace_swapper_pid) |
2950 | seq_printf(m, "swapper tasks\n"); | 2948 | seq_printf(m, "swapper tasks\n"); |
2951 | else | 2949 | else |
2952 | seq_printf(m, "%u\n", pid_vnr(fpid->pid)); | 2950 | seq_printf(m, "%u\n", pid_vnr(fpid->pid)); |
2953 | 2951 | ||
2954 | return 0; | 2952 | return 0; |
2955 | } | 2953 | } |
2956 | 2954 | ||
2957 | static const struct seq_operations ftrace_pid_sops = { | 2955 | static const struct seq_operations ftrace_pid_sops = { |
2958 | .start = fpid_start, | 2956 | .start = fpid_start, |
2959 | .next = fpid_next, | 2957 | .next = fpid_next, |
2960 | .stop = fpid_stop, | 2958 | .stop = fpid_stop, |
2961 | .show = fpid_show, | 2959 | .show = fpid_show, |
2962 | }; | 2960 | }; |
2963 | 2961 | ||
2964 | static int | 2962 | static int |
2965 | ftrace_pid_open(struct inode *inode, struct file *file) | 2963 | ftrace_pid_open(struct inode *inode, struct file *file) |
2966 | { | 2964 | { |
2967 | int ret = 0; | 2965 | int ret = 0; |
2968 | 2966 | ||
2969 | if ((file->f_mode & FMODE_WRITE) && | 2967 | if ((file->f_mode & FMODE_WRITE) && |
2970 | (file->f_flags & O_TRUNC)) | 2968 | (file->f_flags & O_TRUNC)) |
2971 | ftrace_pid_reset(); | 2969 | ftrace_pid_reset(); |
2972 | 2970 | ||
2973 | if (file->f_mode & FMODE_READ) | 2971 | if (file->f_mode & FMODE_READ) |
2974 | ret = seq_open(file, &ftrace_pid_sops); | 2972 | ret = seq_open(file, &ftrace_pid_sops); |
2975 | 2973 | ||
2976 | return ret; | 2974 | return ret; |
2977 | } | 2975 | } |
2978 | 2976 | ||
2979 | static ssize_t | 2977 | static ssize_t |
2980 | ftrace_pid_write(struct file *filp, const char __user *ubuf, | 2978 | ftrace_pid_write(struct file *filp, const char __user *ubuf, |
2981 | size_t cnt, loff_t *ppos) | 2979 | size_t cnt, loff_t *ppos) |
2982 | { | 2980 | { |
2983 | char buf[64], *tmp; | 2981 | char buf[64], *tmp; |
2984 | long val; | 2982 | long val; |
2985 | int ret; | 2983 | int ret; |
2986 | 2984 | ||
2987 | if (cnt >= sizeof(buf)) | 2985 | if (cnt >= sizeof(buf)) |
2988 | return -EINVAL; | 2986 | return -EINVAL; |
2989 | 2987 | ||
2990 | if (copy_from_user(&buf, ubuf, cnt)) | 2988 | if (copy_from_user(&buf, ubuf, cnt)) |
2991 | return -EFAULT; | 2989 | return -EFAULT; |
2992 | 2990 | ||
2993 | buf[cnt] = 0; | 2991 | buf[cnt] = 0; |
2994 | 2992 | ||
2995 | /* | 2993 | /* |
2996 | * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid" | 2994 | * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid" |
2997 | * to clean the filter quietly. | 2995 | * to clean the filter quietly. |
2998 | */ | 2996 | */ |
2999 | tmp = strstrip(buf); | 2997 | tmp = strstrip(buf); |
3000 | if (strlen(tmp) == 0) | 2998 | if (strlen(tmp) == 0) |
3001 | return 1; | 2999 | return 1; |
3002 | 3000 | ||
3003 | ret = strict_strtol(tmp, 10, &val); | 3001 | ret = strict_strtol(tmp, 10, &val); |
3004 | if (ret < 0) | 3002 | if (ret < 0) |
3005 | return ret; | 3003 | return ret; |
3006 | 3004 | ||
3007 | ret = ftrace_pid_add(val); | 3005 | ret = ftrace_pid_add(val); |
3008 | 3006 | ||
3009 | return ret ? ret : cnt; | 3007 | return ret ? ret : cnt; |
3010 | } | 3008 | } |
3011 | 3009 | ||
3012 | static int | 3010 | static int |
3013 | ftrace_pid_release(struct inode *inode, struct file *file) | 3011 | ftrace_pid_release(struct inode *inode, struct file *file) |
3014 | { | 3012 | { |
3015 | if (file->f_mode & FMODE_READ) | 3013 | if (file->f_mode & FMODE_READ) |
3016 | seq_release(inode, file); | 3014 | seq_release(inode, file); |
3017 | 3015 | ||
3018 | return 0; | 3016 | return 0; |
3019 | } | 3017 | } |
3020 | 3018 | ||
3021 | static const struct file_operations ftrace_pid_fops = { | 3019 | static const struct file_operations ftrace_pid_fops = { |
3022 | .open = ftrace_pid_open, | 3020 | .open = ftrace_pid_open, |
3023 | .write = ftrace_pid_write, | 3021 | .write = ftrace_pid_write, |
3024 | .read = seq_read, | 3022 | .read = seq_read, |
3025 | .llseek = seq_lseek, | 3023 | .llseek = seq_lseek, |
3026 | .release = ftrace_pid_release, | 3024 | .release = ftrace_pid_release, |
3027 | }; | 3025 | }; |
3028 | 3026 | ||
3029 | static __init int ftrace_init_debugfs(void) | 3027 | static __init int ftrace_init_debugfs(void) |
3030 | { | 3028 | { |
3031 | struct dentry *d_tracer; | 3029 | struct dentry *d_tracer; |
3032 | 3030 | ||
3033 | d_tracer = tracing_init_dentry(); | 3031 | d_tracer = tracing_init_dentry(); |
3034 | if (!d_tracer) | 3032 | if (!d_tracer) |
3035 | return 0; | 3033 | return 0; |
3036 | 3034 | ||
3037 | ftrace_init_dyn_debugfs(d_tracer); | 3035 | ftrace_init_dyn_debugfs(d_tracer); |
3038 | 3036 | ||
3039 | trace_create_file("set_ftrace_pid", 0644, d_tracer, | 3037 | trace_create_file("set_ftrace_pid", 0644, d_tracer, |
3040 | NULL, &ftrace_pid_fops); | 3038 | NULL, &ftrace_pid_fops); |
3041 | 3039 | ||
3042 | ftrace_profile_debugfs(d_tracer); | 3040 | ftrace_profile_debugfs(d_tracer); |
3043 | 3041 | ||
3044 | return 0; | 3042 | return 0; |
3045 | } | 3043 | } |
3046 | fs_initcall(ftrace_init_debugfs); | 3044 | fs_initcall(ftrace_init_debugfs); |
3047 | 3045 | ||
3048 | /** | 3046 | /** |
3049 | * ftrace_kill - kill ftrace | 3047 | * ftrace_kill - kill ftrace |
3050 | * | 3048 | * |
3051 | * This function should be used by panic code. It stops ftrace | 3049 | * This function should be used by panic code. It stops ftrace |
3052 | * but in a not so nice way. If you need to simply kill ftrace | 3050 | * but in a not so nice way. If you need to simply kill ftrace |
3053 | * from a non-atomic section, use ftrace_kill. | 3051 | * from a non-atomic section, use ftrace_kill. |
3054 | */ | 3052 | */ |
3055 | void ftrace_kill(void) | 3053 | void ftrace_kill(void) |
3056 | { | 3054 | { |
3057 | ftrace_disabled = 1; | 3055 | ftrace_disabled = 1; |
3058 | ftrace_enabled = 0; | 3056 | ftrace_enabled = 0; |
3059 | clear_ftrace_function(); | 3057 | clear_ftrace_function(); |
3060 | } | 3058 | } |
3061 | 3059 | ||
3062 | /** | 3060 | /** |
3063 | * register_ftrace_function - register a function for profiling | 3061 | * register_ftrace_function - register a function for profiling |
3064 | * @ops - ops structure that holds the function for profiling. | 3062 | * @ops - ops structure that holds the function for profiling. |
3065 | * | 3063 | * |
3066 | * Register a function to be called by all functions in the | 3064 | * Register a function to be called by all functions in the |
3067 | * kernel. | 3065 | * kernel. |
3068 | * | 3066 | * |
3069 | * Note: @ops->func and all the functions it calls must be labeled | 3067 | * Note: @ops->func and all the functions it calls must be labeled |
3070 | * with "notrace", otherwise it will go into a | 3068 | * with "notrace", otherwise it will go into a |
3071 | * recursive loop. | 3069 | * recursive loop. |
3072 | */ | 3070 | */ |
3073 | int register_ftrace_function(struct ftrace_ops *ops) | 3071 | int register_ftrace_function(struct ftrace_ops *ops) |
3074 | { | 3072 | { |
3075 | int ret; | 3073 | int ret; |
3076 | 3074 | ||
3077 | if (unlikely(ftrace_disabled)) | 3075 | if (unlikely(ftrace_disabled)) |
3078 | return -1; | 3076 | return -1; |
3079 | 3077 | ||
3080 | mutex_lock(&ftrace_lock); | 3078 | mutex_lock(&ftrace_lock); |
3081 | 3079 | ||
3082 | ret = __register_ftrace_function(ops); | 3080 | ret = __register_ftrace_function(ops); |
3083 | ftrace_startup(0); | 3081 | ftrace_startup(0); |
3084 | 3082 | ||
3085 | mutex_unlock(&ftrace_lock); | 3083 | mutex_unlock(&ftrace_lock); |
3086 | return ret; | 3084 | return ret; |
3087 | } | 3085 | } |
3088 | 3086 | ||
3089 | /** | 3087 | /** |
3090 | * unregister_ftrace_function - unregister a function for profiling. | 3088 | * unregister_ftrace_function - unregister a function for profiling. |
3091 | * @ops - ops structure that holds the function to unregister | 3089 | * @ops - ops structure that holds the function to unregister |
3092 | * | 3090 | * |
3093 | * Unregister a function that was added to be called by ftrace profiling. | 3091 | * Unregister a function that was added to be called by ftrace profiling. |
3094 | */ | 3092 | */ |
3095 | int unregister_ftrace_function(struct ftrace_ops *ops) | 3093 | int unregister_ftrace_function(struct ftrace_ops *ops) |
3096 | { | 3094 | { |
3097 | int ret; | 3095 | int ret; |
3098 | 3096 | ||
3099 | mutex_lock(&ftrace_lock); | 3097 | mutex_lock(&ftrace_lock); |
3100 | ret = __unregister_ftrace_function(ops); | 3098 | ret = __unregister_ftrace_function(ops); |
3101 | ftrace_shutdown(0); | 3099 | ftrace_shutdown(0); |
3102 | mutex_unlock(&ftrace_lock); | 3100 | mutex_unlock(&ftrace_lock); |
3103 | 3101 | ||
3104 | return ret; | 3102 | return ret; |
3105 | } | 3103 | } |
3106 | 3104 | ||
3107 | int | 3105 | int |
3108 | ftrace_enable_sysctl(struct ctl_table *table, int write, | 3106 | ftrace_enable_sysctl(struct ctl_table *table, int write, |
3109 | void __user *buffer, size_t *lenp, | 3107 | void __user *buffer, size_t *lenp, |
3110 | loff_t *ppos) | 3108 | loff_t *ppos) |
3111 | { | 3109 | { |
3112 | int ret; | 3110 | int ret; |
3113 | 3111 | ||
3114 | if (unlikely(ftrace_disabled)) | 3112 | if (unlikely(ftrace_disabled)) |
3115 | return -ENODEV; | 3113 | return -ENODEV; |
3116 | 3114 | ||
3117 | mutex_lock(&ftrace_lock); | 3115 | mutex_lock(&ftrace_lock); |
3118 | 3116 | ||
3119 | ret = proc_dointvec(table, write, buffer, lenp, ppos); | 3117 | ret = proc_dointvec(table, write, buffer, lenp, ppos); |
3120 | 3118 | ||
3121 | if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) | 3119 | if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) |
3122 | goto out; | 3120 | goto out; |
3123 | 3121 | ||
3124 | last_ftrace_enabled = !!ftrace_enabled; | 3122 | last_ftrace_enabled = !!ftrace_enabled; |
3125 | 3123 | ||
3126 | if (ftrace_enabled) { | 3124 | if (ftrace_enabled) { |
3127 | 3125 | ||
3128 | ftrace_startup_sysctl(); | 3126 | ftrace_startup_sysctl(); |
3129 | 3127 | ||
3130 | /* we are starting ftrace again */ | 3128 | /* we are starting ftrace again */ |
3131 | if (ftrace_list != &ftrace_list_end) { | 3129 | if (ftrace_list != &ftrace_list_end) { |
3132 | if (ftrace_list->next == &ftrace_list_end) | 3130 | if (ftrace_list->next == &ftrace_list_end) |
3133 | ftrace_trace_function = ftrace_list->func; | 3131 | ftrace_trace_function = ftrace_list->func; |
3134 | else | 3132 | else |
3135 | ftrace_trace_function = ftrace_list_func; | 3133 | ftrace_trace_function = ftrace_list_func; |
3136 | } | 3134 | } |
3137 | 3135 | ||
3138 | } else { | 3136 | } else { |
3139 | /* stopping ftrace calls (just send to ftrace_stub) */ | 3137 | /* stopping ftrace calls (just send to ftrace_stub) */ |
3140 | ftrace_trace_function = ftrace_stub; | 3138 | ftrace_trace_function = ftrace_stub; |
3141 | 3139 | ||
3142 | ftrace_shutdown_sysctl(); | 3140 | ftrace_shutdown_sysctl(); |
3143 | } | 3141 | } |
3144 | 3142 | ||
3145 | out: | 3143 | out: |
3146 | mutex_unlock(&ftrace_lock); | 3144 | mutex_unlock(&ftrace_lock); |
3147 | return ret; | 3145 | return ret; |
3148 | } | 3146 | } |
3149 | 3147 | ||
3150 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 3148 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
3151 | 3149 | ||
3152 | static int ftrace_graph_active; | 3150 | static int ftrace_graph_active; |
3153 | static struct notifier_block ftrace_suspend_notifier; | 3151 | static struct notifier_block ftrace_suspend_notifier; |
3154 | 3152 | ||
3155 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) | 3153 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) |
3156 | { | 3154 | { |
3157 | return 0; | 3155 | return 0; |
3158 | } | 3156 | } |
3159 | 3157 | ||
3160 | /* The callbacks that hook a function */ | 3158 | /* The callbacks that hook a function */ |
3161 | trace_func_graph_ret_t ftrace_graph_return = | 3159 | trace_func_graph_ret_t ftrace_graph_return = |
3162 | (trace_func_graph_ret_t)ftrace_stub; | 3160 | (trace_func_graph_ret_t)ftrace_stub; |
3163 | trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub; | 3161 | trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub; |
3164 | 3162 | ||
3165 | /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ | 3163 | /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ |
3166 | static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) | 3164 | static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) |
3167 | { | 3165 | { |
3168 | int i; | 3166 | int i; |
3169 | int ret = 0; | 3167 | int ret = 0; |
3170 | unsigned long flags; | 3168 | unsigned long flags; |
3171 | int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE; | 3169 | int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE; |
3172 | struct task_struct *g, *t; | 3170 | struct task_struct *g, *t; |
3173 | 3171 | ||
3174 | for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) { | 3172 | for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) { |
3175 | ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH | 3173 | ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH |
3176 | * sizeof(struct ftrace_ret_stack), | 3174 | * sizeof(struct ftrace_ret_stack), |
3177 | GFP_KERNEL); | 3175 | GFP_KERNEL); |
3178 | if (!ret_stack_list[i]) { | 3176 | if (!ret_stack_list[i]) { |
3179 | start = 0; | 3177 | start = 0; |
3180 | end = i; | 3178 | end = i; |
3181 | ret = -ENOMEM; | 3179 | ret = -ENOMEM; |
3182 | goto free; | 3180 | goto free; |
3183 | } | 3181 | } |
3184 | } | 3182 | } |
3185 | 3183 | ||
3186 | read_lock_irqsave(&tasklist_lock, flags); | 3184 | read_lock_irqsave(&tasklist_lock, flags); |
3187 | do_each_thread(g, t) { | 3185 | do_each_thread(g, t) { |
3188 | if (start == end) { | 3186 | if (start == end) { |
3189 | ret = -EAGAIN; | 3187 | ret = -EAGAIN; |
3190 | goto unlock; | 3188 | goto unlock; |
3191 | } | 3189 | } |
3192 | 3190 | ||
3193 | if (t->ret_stack == NULL) { | 3191 | if (t->ret_stack == NULL) { |
3194 | atomic_set(&t->tracing_graph_pause, 0); | 3192 | atomic_set(&t->tracing_graph_pause, 0); |
3195 | atomic_set(&t->trace_overrun, 0); | 3193 | atomic_set(&t->trace_overrun, 0); |
3196 | t->curr_ret_stack = -1; | 3194 | t->curr_ret_stack = -1; |
3197 | /* Make sure the tasks see the -1 first: */ | 3195 | /* Make sure the tasks see the -1 first: */ |
3198 | smp_wmb(); | 3196 | smp_wmb(); |
3199 | t->ret_stack = ret_stack_list[start++]; | 3197 | t->ret_stack = ret_stack_list[start++]; |
3200 | } | 3198 | } |
3201 | } while_each_thread(g, t); | 3199 | } while_each_thread(g, t); |
3202 | 3200 | ||
3203 | unlock: | 3201 | unlock: |
3204 | read_unlock_irqrestore(&tasklist_lock, flags); | 3202 | read_unlock_irqrestore(&tasklist_lock, flags); |
3205 | free: | 3203 | free: |
3206 | for (i = start; i < end; i++) | 3204 | for (i = start; i < end; i++) |
3207 | kfree(ret_stack_list[i]); | 3205 | kfree(ret_stack_list[i]); |
3208 | return ret; | 3206 | return ret; |
3209 | } | 3207 | } |
3210 | 3208 | ||
3211 | static void | 3209 | static void |
3212 | ftrace_graph_probe_sched_switch(struct rq *__rq, struct task_struct *prev, | 3210 | ftrace_graph_probe_sched_switch(struct rq *__rq, struct task_struct *prev, |
3213 | struct task_struct *next) | 3211 | struct task_struct *next) |
3214 | { | 3212 | { |
3215 | unsigned long long timestamp; | 3213 | unsigned long long timestamp; |
3216 | int index; | 3214 | int index; |
3217 | 3215 | ||
3218 | /* | 3216 | /* |
3219 | * Does the user want to count the time a function was asleep. | 3217 | * Does the user want to count the time a function was asleep. |
3220 | * If so, do not update the time stamps. | 3218 | * If so, do not update the time stamps. |
3221 | */ | 3219 | */ |
3222 | if (trace_flags & TRACE_ITER_SLEEP_TIME) | 3220 | if (trace_flags & TRACE_ITER_SLEEP_TIME) |
3223 | return; | 3221 | return; |
3224 | 3222 | ||
3225 | timestamp = trace_clock_local(); | 3223 | timestamp = trace_clock_local(); |
3226 | 3224 | ||
3227 | prev->ftrace_timestamp = timestamp; | 3225 | prev->ftrace_timestamp = timestamp; |
3228 | 3226 | ||
3229 | /* only process tasks that we timestamped */ | 3227 | /* only process tasks that we timestamped */ |
3230 | if (!next->ftrace_timestamp) | 3228 | if (!next->ftrace_timestamp) |
3231 | return; | 3229 | return; |
3232 | 3230 | ||
3233 | /* | 3231 | /* |
3234 | * Update all the counters in next to make up for the | 3232 | * Update all the counters in next to make up for the |
3235 | * time next was sleeping. | 3233 | * time next was sleeping. |
3236 | */ | 3234 | */ |
3237 | timestamp -= next->ftrace_timestamp; | 3235 | timestamp -= next->ftrace_timestamp; |
3238 | 3236 | ||
3239 | for (index = next->curr_ret_stack; index >= 0; index--) | 3237 | for (index = next->curr_ret_stack; index >= 0; index--) |
3240 | next->ret_stack[index].calltime += timestamp; | 3238 | next->ret_stack[index].calltime += timestamp; |
3241 | } | 3239 | } |
3242 | 3240 | ||
3243 | /* Allocate a return stack for each task */ | 3241 | /* Allocate a return stack for each task */ |
3244 | static int start_graph_tracing(void) | 3242 | static int start_graph_tracing(void) |
3245 | { | 3243 | { |
3246 | struct ftrace_ret_stack **ret_stack_list; | 3244 | struct ftrace_ret_stack **ret_stack_list; |
3247 | int ret, cpu; | 3245 | int ret, cpu; |
3248 | 3246 | ||
3249 | ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE * | 3247 | ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE * |
3250 | sizeof(struct ftrace_ret_stack *), | 3248 | sizeof(struct ftrace_ret_stack *), |
3251 | GFP_KERNEL); | 3249 | GFP_KERNEL); |
3252 | 3250 | ||
3253 | if (!ret_stack_list) | 3251 | if (!ret_stack_list) |
3254 | return -ENOMEM; | 3252 | return -ENOMEM; |
3255 | 3253 | ||
3256 | /* The cpu_boot init_task->ret_stack will never be freed */ | 3254 | /* The cpu_boot init_task->ret_stack will never be freed */ |
3257 | for_each_online_cpu(cpu) { | 3255 | for_each_online_cpu(cpu) { |
3258 | if (!idle_task(cpu)->ret_stack) | 3256 | if (!idle_task(cpu)->ret_stack) |
3259 | ftrace_graph_init_task(idle_task(cpu)); | 3257 | ftrace_graph_init_task(idle_task(cpu)); |
3260 | } | 3258 | } |
3261 | 3259 | ||
3262 | do { | 3260 | do { |
3263 | ret = alloc_retstack_tasklist(ret_stack_list); | 3261 | ret = alloc_retstack_tasklist(ret_stack_list); |
3264 | } while (ret == -EAGAIN); | 3262 | } while (ret == -EAGAIN); |
3265 | 3263 | ||
3266 | if (!ret) { | 3264 | if (!ret) { |
3267 | ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch); | 3265 | ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch); |
3268 | if (ret) | 3266 | if (ret) |
3269 | pr_info("ftrace_graph: Couldn't activate tracepoint" | 3267 | pr_info("ftrace_graph: Couldn't activate tracepoint" |
3270 | " probe to kernel_sched_switch\n"); | 3268 | " probe to kernel_sched_switch\n"); |
3271 | } | 3269 | } |
3272 | 3270 | ||
3273 | kfree(ret_stack_list); | 3271 | kfree(ret_stack_list); |
3274 | return ret; | 3272 | return ret; |
3275 | } | 3273 | } |
3276 | 3274 | ||
3277 | /* | 3275 | /* |
3278 | * Hibernation protection. | 3276 | * Hibernation protection. |
3279 | * The state of the current task is too much unstable during | 3277 | * The state of the current task is too much unstable during |
3280 | * suspend/restore to disk. We want to protect against that. | 3278 | * suspend/restore to disk. We want to protect against that. |
3281 | */ | 3279 | */ |
3282 | static int | 3280 | static int |
3283 | ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state, | 3281 | ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state, |
3284 | void *unused) | 3282 | void *unused) |
3285 | { | 3283 | { |
3286 | switch (state) { | 3284 | switch (state) { |
3287 | case PM_HIBERNATION_PREPARE: | 3285 | case PM_HIBERNATION_PREPARE: |
3288 | pause_graph_tracing(); | 3286 | pause_graph_tracing(); |
3289 | break; | 3287 | break; |
3290 | 3288 | ||
3291 | case PM_POST_HIBERNATION: | 3289 | case PM_POST_HIBERNATION: |
3292 | unpause_graph_tracing(); | 3290 | unpause_graph_tracing(); |
3293 | break; | 3291 | break; |
3294 | } | 3292 | } |
3295 | return NOTIFY_DONE; | 3293 | return NOTIFY_DONE; |
3296 | } | 3294 | } |
3297 | 3295 | ||
3298 | int register_ftrace_graph(trace_func_graph_ret_t retfunc, | 3296 | int register_ftrace_graph(trace_func_graph_ret_t retfunc, |
3299 | trace_func_graph_ent_t entryfunc) | 3297 | trace_func_graph_ent_t entryfunc) |
3300 | { | 3298 | { |
3301 | int ret = 0; | 3299 | int ret = 0; |
3302 | 3300 | ||
3303 | mutex_lock(&ftrace_lock); | 3301 | mutex_lock(&ftrace_lock); |
3304 | 3302 | ||
3305 | /* we currently allow only one tracer registered at a time */ | 3303 | /* we currently allow only one tracer registered at a time */ |
3306 | if (ftrace_graph_active) { | 3304 | if (ftrace_graph_active) { |
3307 | ret = -EBUSY; | 3305 | ret = -EBUSY; |
3308 | goto out; | 3306 | goto out; |
3309 | } | 3307 | } |
3310 | 3308 | ||
3311 | ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call; | 3309 | ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call; |
3312 | register_pm_notifier(&ftrace_suspend_notifier); | 3310 | register_pm_notifier(&ftrace_suspend_notifier); |
3313 | 3311 | ||
3314 | ftrace_graph_active++; | 3312 | ftrace_graph_active++; |
3315 | ret = start_graph_tracing(); | 3313 | ret = start_graph_tracing(); |
3316 | if (ret) { | 3314 | if (ret) { |
3317 | ftrace_graph_active--; | 3315 | ftrace_graph_active--; |
3318 | goto out; | 3316 | goto out; |
3319 | } | 3317 | } |
3320 | 3318 | ||
3321 | ftrace_graph_return = retfunc; | 3319 | ftrace_graph_return = retfunc; |
3322 | ftrace_graph_entry = entryfunc; | 3320 | ftrace_graph_entry = entryfunc; |
3323 | 3321 | ||
3324 | ftrace_startup(FTRACE_START_FUNC_RET); | 3322 | ftrace_startup(FTRACE_START_FUNC_RET); |
3325 | 3323 | ||
3326 | out: | 3324 | out: |
3327 | mutex_unlock(&ftrace_lock); | 3325 | mutex_unlock(&ftrace_lock); |
3328 | return ret; | 3326 | return ret; |
3329 | } | 3327 | } |
3330 | 3328 | ||
3331 | void unregister_ftrace_graph(void) | 3329 | void unregister_ftrace_graph(void) |
3332 | { | 3330 | { |
3333 | mutex_lock(&ftrace_lock); | 3331 | mutex_lock(&ftrace_lock); |
3334 | 3332 | ||
3335 | if (unlikely(!ftrace_graph_active)) | 3333 | if (unlikely(!ftrace_graph_active)) |
3336 | goto out; | 3334 | goto out; |
3337 | 3335 | ||
3338 | ftrace_graph_active--; | 3336 | ftrace_graph_active--; |
3339 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch); | 3337 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch); |
3340 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; | 3338 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; |
3341 | ftrace_graph_entry = ftrace_graph_entry_stub; | 3339 | ftrace_graph_entry = ftrace_graph_entry_stub; |
3342 | ftrace_shutdown(FTRACE_STOP_FUNC_RET); | 3340 | ftrace_shutdown(FTRACE_STOP_FUNC_RET); |
3343 | unregister_pm_notifier(&ftrace_suspend_notifier); | 3341 | unregister_pm_notifier(&ftrace_suspend_notifier); |
3344 | 3342 | ||
3345 | out: | 3343 | out: |
3346 | mutex_unlock(&ftrace_lock); | 3344 | mutex_unlock(&ftrace_lock); |
3347 | } | 3345 | } |
3348 | 3346 | ||
3349 | /* Allocate a return stack for newly created task */ | 3347 | /* Allocate a return stack for newly created task */ |
3350 | void ftrace_graph_init_task(struct task_struct *t) | 3348 | void ftrace_graph_init_task(struct task_struct *t) |
3351 | { | 3349 | { |
3352 | /* Make sure we do not use the parent ret_stack */ | 3350 | /* Make sure we do not use the parent ret_stack */ |
3353 | t->ret_stack = NULL; | 3351 | t->ret_stack = NULL; |
3354 | 3352 | ||
3355 | if (ftrace_graph_active) { | 3353 | if (ftrace_graph_active) { |
3356 | struct ftrace_ret_stack *ret_stack; | 3354 | struct ftrace_ret_stack *ret_stack; |
3357 | 3355 | ||
3358 | ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH | 3356 | ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH |
3359 | * sizeof(struct ftrace_ret_stack), | 3357 | * sizeof(struct ftrace_ret_stack), |
3360 | GFP_KERNEL); | 3358 | GFP_KERNEL); |
3361 | if (!ret_stack) | 3359 | if (!ret_stack) |
3362 | return; | 3360 | return; |
3363 | t->curr_ret_stack = -1; | 3361 | t->curr_ret_stack = -1; |
3364 | atomic_set(&t->tracing_graph_pause, 0); | 3362 | atomic_set(&t->tracing_graph_pause, 0); |
3365 | atomic_set(&t->trace_overrun, 0); | 3363 | atomic_set(&t->trace_overrun, 0); |
3366 | t->ftrace_timestamp = 0; | 3364 | t->ftrace_timestamp = 0; |
3367 | /* make curr_ret_stack visable before we add the ret_stack */ | 3365 | /* make curr_ret_stack visable before we add the ret_stack */ |
3368 | smp_wmb(); | 3366 | smp_wmb(); |
3369 | t->ret_stack = ret_stack; | 3367 | t->ret_stack = ret_stack; |
3370 | } | 3368 | } |
3371 | } | 3369 | } |
3372 | 3370 | ||
3373 | void ftrace_graph_exit_task(struct task_struct *t) | 3371 | void ftrace_graph_exit_task(struct task_struct *t) |
3374 | { | 3372 | { |
3375 | struct ftrace_ret_stack *ret_stack = t->ret_stack; | 3373 | struct ftrace_ret_stack *ret_stack = t->ret_stack; |
3376 | 3374 | ||
3377 | t->ret_stack = NULL; | 3375 | t->ret_stack = NULL; |
3378 | /* NULL must become visible to IRQs before we free it: */ | 3376 | /* NULL must become visible to IRQs before we free it: */ |
3379 | barrier(); | 3377 | barrier(); |
3380 | 3378 | ||
3381 | kfree(ret_stack); | 3379 | kfree(ret_stack); |
3382 | } | 3380 | } |
3383 | 3381 | ||
3384 | void ftrace_graph_stop(void) | 3382 | void ftrace_graph_stop(void) |
3385 | { | 3383 | { |
3386 | ftrace_stop(); | 3384 | ftrace_stop(); |
3387 | } | 3385 | } |
kernel/trace/trace.c
1 | /* | 1 | /* |
2 | * ring buffer based function tracer | 2 | * ring buffer based function tracer |
3 | * | 3 | * |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | 4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> |
5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> | 5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> |
6 | * | 6 | * |
7 | * Originally taken from the RT patch by: | 7 | * Originally taken from the RT patch by: |
8 | * Arnaldo Carvalho de Melo <acme@redhat.com> | 8 | * Arnaldo Carvalho de Melo <acme@redhat.com> |
9 | * | 9 | * |
10 | * Based on code from the latency_tracer, that is: | 10 | * Based on code from the latency_tracer, that is: |
11 | * Copyright (C) 2004-2006 Ingo Molnar | 11 | * Copyright (C) 2004-2006 Ingo Molnar |
12 | * Copyright (C) 2004 William Lee Irwin III | 12 | * Copyright (C) 2004 William Lee Irwin III |
13 | */ | 13 | */ |
14 | #include <linux/ring_buffer.h> | 14 | #include <linux/ring_buffer.h> |
15 | #include <generated/utsrelease.h> | 15 | #include <generated/utsrelease.h> |
16 | #include <linux/stacktrace.h> | 16 | #include <linux/stacktrace.h> |
17 | #include <linux/writeback.h> | 17 | #include <linux/writeback.h> |
18 | #include <linux/kallsyms.h> | 18 | #include <linux/kallsyms.h> |
19 | #include <linux/seq_file.h> | 19 | #include <linux/seq_file.h> |
20 | #include <linux/smp_lock.h> | 20 | #include <linux/smp_lock.h> |
21 | #include <linux/notifier.h> | 21 | #include <linux/notifier.h> |
22 | #include <linux/irqflags.h> | 22 | #include <linux/irqflags.h> |
23 | #include <linux/debugfs.h> | 23 | #include <linux/debugfs.h> |
24 | #include <linux/pagemap.h> | 24 | #include <linux/pagemap.h> |
25 | #include <linux/hardirq.h> | 25 | #include <linux/hardirq.h> |
26 | #include <linux/linkage.h> | 26 | #include <linux/linkage.h> |
27 | #include <linux/uaccess.h> | 27 | #include <linux/uaccess.h> |
28 | #include <linux/kprobes.h> | 28 | #include <linux/kprobes.h> |
29 | #include <linux/ftrace.h> | 29 | #include <linux/ftrace.h> |
30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
31 | #include <linux/percpu.h> | 31 | #include <linux/percpu.h> |
32 | #include <linux/splice.h> | 32 | #include <linux/splice.h> |
33 | #include <linux/kdebug.h> | 33 | #include <linux/kdebug.h> |
34 | #include <linux/string.h> | 34 | #include <linux/string.h> |
35 | #include <linux/rwsem.h> | 35 | #include <linux/rwsem.h> |
36 | #include <linux/ctype.h> | 36 | #include <linux/ctype.h> |
37 | #include <linux/init.h> | 37 | #include <linux/init.h> |
38 | #include <linux/poll.h> | 38 | #include <linux/poll.h> |
39 | #include <linux/gfp.h> | 39 | #include <linux/gfp.h> |
40 | #include <linux/fs.h> | 40 | #include <linux/fs.h> |
41 | 41 | ||
42 | #include "trace.h" | 42 | #include "trace.h" |
43 | #include "trace_output.h" | 43 | #include "trace_output.h" |
44 | 44 | ||
45 | #define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) | 45 | #define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) |
46 | 46 | ||
47 | /* | 47 | /* |
48 | * On boot up, the ring buffer is set to the minimum size, so that | 48 | * On boot up, the ring buffer is set to the minimum size, so that |
49 | * we do not waste memory on systems that are not using tracing. | 49 | * we do not waste memory on systems that are not using tracing. |
50 | */ | 50 | */ |
51 | int ring_buffer_expanded; | 51 | int ring_buffer_expanded; |
52 | 52 | ||
53 | /* | 53 | /* |
54 | * We need to change this state when a selftest is running. | 54 | * We need to change this state when a selftest is running. |
55 | * A selftest will lurk into the ring-buffer to count the | 55 | * A selftest will lurk into the ring-buffer to count the |
56 | * entries inserted during the selftest although some concurrent | 56 | * entries inserted during the selftest although some concurrent |
57 | * insertions into the ring-buffer such as trace_printk could occurred | 57 | * insertions into the ring-buffer such as trace_printk could occurred |
58 | * at the same time, giving false positive or negative results. | 58 | * at the same time, giving false positive or negative results. |
59 | */ | 59 | */ |
60 | static bool __read_mostly tracing_selftest_running; | 60 | static bool __read_mostly tracing_selftest_running; |
61 | 61 | ||
62 | /* | 62 | /* |
63 | * If a tracer is running, we do not want to run SELFTEST. | 63 | * If a tracer is running, we do not want to run SELFTEST. |
64 | */ | 64 | */ |
65 | bool __read_mostly tracing_selftest_disabled; | 65 | bool __read_mostly tracing_selftest_disabled; |
66 | 66 | ||
67 | /* For tracers that don't implement custom flags */ | 67 | /* For tracers that don't implement custom flags */ |
68 | static struct tracer_opt dummy_tracer_opt[] = { | 68 | static struct tracer_opt dummy_tracer_opt[] = { |
69 | { } | 69 | { } |
70 | }; | 70 | }; |
71 | 71 | ||
72 | static struct tracer_flags dummy_tracer_flags = { | 72 | static struct tracer_flags dummy_tracer_flags = { |
73 | .val = 0, | 73 | .val = 0, |
74 | .opts = dummy_tracer_opt | 74 | .opts = dummy_tracer_opt |
75 | }; | 75 | }; |
76 | 76 | ||
77 | static int dummy_set_flag(u32 old_flags, u32 bit, int set) | 77 | static int dummy_set_flag(u32 old_flags, u32 bit, int set) |
78 | { | 78 | { |
79 | return 0; | 79 | return 0; |
80 | } | 80 | } |
81 | 81 | ||
82 | /* | 82 | /* |
83 | * Kill all tracing for good (never come back). | 83 | * Kill all tracing for good (never come back). |
84 | * It is initialized to 1 but will turn to zero if the initialization | 84 | * It is initialized to 1 but will turn to zero if the initialization |
85 | * of the tracer is successful. But that is the only place that sets | 85 | * of the tracer is successful. But that is the only place that sets |
86 | * this back to zero. | 86 | * this back to zero. |
87 | */ | 87 | */ |
88 | static int tracing_disabled = 1; | 88 | static int tracing_disabled = 1; |
89 | 89 | ||
90 | DEFINE_PER_CPU(int, ftrace_cpu_disabled); | 90 | DEFINE_PER_CPU(int, ftrace_cpu_disabled); |
91 | 91 | ||
92 | static inline void ftrace_disable_cpu(void) | 92 | static inline void ftrace_disable_cpu(void) |
93 | { | 93 | { |
94 | preempt_disable(); | 94 | preempt_disable(); |
95 | __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled)); | 95 | __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled)); |
96 | } | 96 | } |
97 | 97 | ||
98 | static inline void ftrace_enable_cpu(void) | 98 | static inline void ftrace_enable_cpu(void) |
99 | { | 99 | { |
100 | __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled)); | 100 | __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled)); |
101 | preempt_enable(); | 101 | preempt_enable(); |
102 | } | 102 | } |
103 | 103 | ||
104 | static cpumask_var_t __read_mostly tracing_buffer_mask; | 104 | static cpumask_var_t __read_mostly tracing_buffer_mask; |
105 | 105 | ||
106 | #define for_each_tracing_cpu(cpu) \ | 106 | #define for_each_tracing_cpu(cpu) \ |
107 | for_each_cpu(cpu, tracing_buffer_mask) | 107 | for_each_cpu(cpu, tracing_buffer_mask) |
108 | 108 | ||
109 | /* | 109 | /* |
110 | * ftrace_dump_on_oops - variable to dump ftrace buffer on oops | 110 | * ftrace_dump_on_oops - variable to dump ftrace buffer on oops |
111 | * | 111 | * |
112 | * If there is an oops (or kernel panic) and the ftrace_dump_on_oops | 112 | * If there is an oops (or kernel panic) and the ftrace_dump_on_oops |
113 | * is set, then ftrace_dump is called. This will output the contents | 113 | * is set, then ftrace_dump is called. This will output the contents |
114 | * of the ftrace buffers to the console. This is very useful for | 114 | * of the ftrace buffers to the console. This is very useful for |
115 | * capturing traces that lead to crashes and outputing it to a | 115 | * capturing traces that lead to crashes and outputing it to a |
116 | * serial console. | 116 | * serial console. |
117 | * | 117 | * |
118 | * It is default off, but you can enable it with either specifying | 118 | * It is default off, but you can enable it with either specifying |
119 | * "ftrace_dump_on_oops" in the kernel command line, or setting | 119 | * "ftrace_dump_on_oops" in the kernel command line, or setting |
120 | * /proc/sys/kernel/ftrace_dump_on_oops to true. | 120 | * /proc/sys/kernel/ftrace_dump_on_oops to true. |
121 | */ | 121 | */ |
122 | int ftrace_dump_on_oops; | 122 | int ftrace_dump_on_oops; |
123 | 123 | ||
124 | static int tracing_set_tracer(const char *buf); | 124 | static int tracing_set_tracer(const char *buf); |
125 | 125 | ||
126 | #define MAX_TRACER_SIZE 100 | 126 | #define MAX_TRACER_SIZE 100 |
127 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; | 127 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; |
128 | static char *default_bootup_tracer; | 128 | static char *default_bootup_tracer; |
129 | 129 | ||
130 | static int __init set_cmdline_ftrace(char *str) | 130 | static int __init set_cmdline_ftrace(char *str) |
131 | { | 131 | { |
132 | strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); | 132 | strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); |
133 | default_bootup_tracer = bootup_tracer_buf; | 133 | default_bootup_tracer = bootup_tracer_buf; |
134 | /* We are using ftrace early, expand it */ | 134 | /* We are using ftrace early, expand it */ |
135 | ring_buffer_expanded = 1; | 135 | ring_buffer_expanded = 1; |
136 | return 1; | 136 | return 1; |
137 | } | 137 | } |
138 | __setup("ftrace=", set_cmdline_ftrace); | 138 | __setup("ftrace=", set_cmdline_ftrace); |
139 | 139 | ||
140 | static int __init set_ftrace_dump_on_oops(char *str) | 140 | static int __init set_ftrace_dump_on_oops(char *str) |
141 | { | 141 | { |
142 | ftrace_dump_on_oops = 1; | 142 | ftrace_dump_on_oops = 1; |
143 | return 1; | 143 | return 1; |
144 | } | 144 | } |
145 | __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); | 145 | __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); |
146 | 146 | ||
147 | unsigned long long ns2usecs(cycle_t nsec) | 147 | unsigned long long ns2usecs(cycle_t nsec) |
148 | { | 148 | { |
149 | nsec += 500; | 149 | nsec += 500; |
150 | do_div(nsec, 1000); | 150 | do_div(nsec, 1000); |
151 | return nsec; | 151 | return nsec; |
152 | } | 152 | } |
153 | 153 | ||
154 | /* | 154 | /* |
155 | * The global_trace is the descriptor that holds the tracing | 155 | * The global_trace is the descriptor that holds the tracing |
156 | * buffers for the live tracing. For each CPU, it contains | 156 | * buffers for the live tracing. For each CPU, it contains |
157 | * a link list of pages that will store trace entries. The | 157 | * a link list of pages that will store trace entries. The |
158 | * page descriptor of the pages in the memory is used to hold | 158 | * page descriptor of the pages in the memory is used to hold |
159 | * the link list by linking the lru item in the page descriptor | 159 | * the link list by linking the lru item in the page descriptor |
160 | * to each of the pages in the buffer per CPU. | 160 | * to each of the pages in the buffer per CPU. |
161 | * | 161 | * |
162 | * For each active CPU there is a data field that holds the | 162 | * For each active CPU there is a data field that holds the |
163 | * pages for the buffer for that CPU. Each CPU has the same number | 163 | * pages for the buffer for that CPU. Each CPU has the same number |
164 | * of pages allocated for its buffer. | 164 | * of pages allocated for its buffer. |
165 | */ | 165 | */ |
166 | static struct trace_array global_trace; | 166 | static struct trace_array global_trace; |
167 | 167 | ||
168 | static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); | 168 | static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); |
169 | 169 | ||
170 | int filter_current_check_discard(struct ring_buffer *buffer, | 170 | int filter_current_check_discard(struct ring_buffer *buffer, |
171 | struct ftrace_event_call *call, void *rec, | 171 | struct ftrace_event_call *call, void *rec, |
172 | struct ring_buffer_event *event) | 172 | struct ring_buffer_event *event) |
173 | { | 173 | { |
174 | return filter_check_discard(call, rec, buffer, event); | 174 | return filter_check_discard(call, rec, buffer, event); |
175 | } | 175 | } |
176 | EXPORT_SYMBOL_GPL(filter_current_check_discard); | 176 | EXPORT_SYMBOL_GPL(filter_current_check_discard); |
177 | 177 | ||
178 | cycle_t ftrace_now(int cpu) | 178 | cycle_t ftrace_now(int cpu) |
179 | { | 179 | { |
180 | u64 ts; | 180 | u64 ts; |
181 | 181 | ||
182 | /* Early boot up does not have a buffer yet */ | 182 | /* Early boot up does not have a buffer yet */ |
183 | if (!global_trace.buffer) | 183 | if (!global_trace.buffer) |
184 | return trace_clock_local(); | 184 | return trace_clock_local(); |
185 | 185 | ||
186 | ts = ring_buffer_time_stamp(global_trace.buffer, cpu); | 186 | ts = ring_buffer_time_stamp(global_trace.buffer, cpu); |
187 | ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts); | 187 | ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts); |
188 | 188 | ||
189 | return ts; | 189 | return ts; |
190 | } | 190 | } |
191 | 191 | ||
192 | /* | 192 | /* |
193 | * The max_tr is used to snapshot the global_trace when a maximum | 193 | * The max_tr is used to snapshot the global_trace when a maximum |
194 | * latency is reached. Some tracers will use this to store a maximum | 194 | * latency is reached. Some tracers will use this to store a maximum |
195 | * trace while it continues examining live traces. | 195 | * trace while it continues examining live traces. |
196 | * | 196 | * |
197 | * The buffers for the max_tr are set up the same as the global_trace. | 197 | * The buffers for the max_tr are set up the same as the global_trace. |
198 | * When a snapshot is taken, the link list of the max_tr is swapped | 198 | * When a snapshot is taken, the link list of the max_tr is swapped |
199 | * with the link list of the global_trace and the buffers are reset for | 199 | * with the link list of the global_trace and the buffers are reset for |
200 | * the global_trace so the tracing can continue. | 200 | * the global_trace so the tracing can continue. |
201 | */ | 201 | */ |
202 | static struct trace_array max_tr; | 202 | static struct trace_array max_tr; |
203 | 203 | ||
204 | static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data); | 204 | static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data); |
205 | 205 | ||
206 | /* tracer_enabled is used to toggle activation of a tracer */ | 206 | /* tracer_enabled is used to toggle activation of a tracer */ |
207 | static int tracer_enabled = 1; | 207 | static int tracer_enabled = 1; |
208 | 208 | ||
209 | /** | 209 | /** |
210 | * tracing_is_enabled - return tracer_enabled status | 210 | * tracing_is_enabled - return tracer_enabled status |
211 | * | 211 | * |
212 | * This function is used by other tracers to know the status | 212 | * This function is used by other tracers to know the status |
213 | * of the tracer_enabled flag. Tracers may use this function | 213 | * of the tracer_enabled flag. Tracers may use this function |
214 | * to know if it should enable their features when starting | 214 | * to know if it should enable their features when starting |
215 | * up. See irqsoff tracer for an example (start_irqsoff_tracer). | 215 | * up. See irqsoff tracer for an example (start_irqsoff_tracer). |
216 | */ | 216 | */ |
217 | int tracing_is_enabled(void) | 217 | int tracing_is_enabled(void) |
218 | { | 218 | { |
219 | return tracer_enabled; | 219 | return tracer_enabled; |
220 | } | 220 | } |
221 | 221 | ||
222 | /* | 222 | /* |
223 | * trace_buf_size is the size in bytes that is allocated | 223 | * trace_buf_size is the size in bytes that is allocated |
224 | * for a buffer. Note, the number of bytes is always rounded | 224 | * for a buffer. Note, the number of bytes is always rounded |
225 | * to page size. | 225 | * to page size. |
226 | * | 226 | * |
227 | * This number is purposely set to a low number of 16384. | 227 | * This number is purposely set to a low number of 16384. |
228 | * If the dump on oops happens, it will be much appreciated | 228 | * If the dump on oops happens, it will be much appreciated |
229 | * to not have to wait for all that output. Anyway this can be | 229 | * to not have to wait for all that output. Anyway this can be |
230 | * boot time and run time configurable. | 230 | * boot time and run time configurable. |
231 | */ | 231 | */ |
232 | #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */ | 232 | #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */ |
233 | 233 | ||
234 | static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; | 234 | static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; |
235 | 235 | ||
236 | /* trace_types holds a link list of available tracers. */ | 236 | /* trace_types holds a link list of available tracers. */ |
237 | static struct tracer *trace_types __read_mostly; | 237 | static struct tracer *trace_types __read_mostly; |
238 | 238 | ||
239 | /* current_trace points to the tracer that is currently active */ | 239 | /* current_trace points to the tracer that is currently active */ |
240 | static struct tracer *current_trace __read_mostly; | 240 | static struct tracer *current_trace __read_mostly; |
241 | 241 | ||
242 | /* | 242 | /* |
243 | * trace_types_lock is used to protect the trace_types list. | 243 | * trace_types_lock is used to protect the trace_types list. |
244 | */ | 244 | */ |
245 | static DEFINE_MUTEX(trace_types_lock); | 245 | static DEFINE_MUTEX(trace_types_lock); |
246 | 246 | ||
247 | /* | 247 | /* |
248 | * serialize the access of the ring buffer | 248 | * serialize the access of the ring buffer |
249 | * | 249 | * |
250 | * ring buffer serializes readers, but it is low level protection. | 250 | * ring buffer serializes readers, but it is low level protection. |
251 | * The validity of the events (which returns by ring_buffer_peek() ..etc) | 251 | * The validity of the events (which returns by ring_buffer_peek() ..etc) |
252 | * are not protected by ring buffer. | 252 | * are not protected by ring buffer. |
253 | * | 253 | * |
254 | * The content of events may become garbage if we allow other process consumes | 254 | * The content of events may become garbage if we allow other process consumes |
255 | * these events concurrently: | 255 | * these events concurrently: |
256 | * A) the page of the consumed events may become a normal page | 256 | * A) the page of the consumed events may become a normal page |
257 | * (not reader page) in ring buffer, and this page will be rewrited | 257 | * (not reader page) in ring buffer, and this page will be rewrited |
258 | * by events producer. | 258 | * by events producer. |
259 | * B) The page of the consumed events may become a page for splice_read, | 259 | * B) The page of the consumed events may become a page for splice_read, |
260 | * and this page will be returned to system. | 260 | * and this page will be returned to system. |
261 | * | 261 | * |
262 | * These primitives allow multi process access to different cpu ring buffer | 262 | * These primitives allow multi process access to different cpu ring buffer |
263 | * concurrently. | 263 | * concurrently. |
264 | * | 264 | * |
265 | * These primitives don't distinguish read-only and read-consume access. | 265 | * These primitives don't distinguish read-only and read-consume access. |
266 | * Multi read-only access are also serialized. | 266 | * Multi read-only access are also serialized. |
267 | */ | 267 | */ |
268 | 268 | ||
269 | #ifdef CONFIG_SMP | 269 | #ifdef CONFIG_SMP |
270 | static DECLARE_RWSEM(all_cpu_access_lock); | 270 | static DECLARE_RWSEM(all_cpu_access_lock); |
271 | static DEFINE_PER_CPU(struct mutex, cpu_access_lock); | 271 | static DEFINE_PER_CPU(struct mutex, cpu_access_lock); |
272 | 272 | ||
273 | static inline void trace_access_lock(int cpu) | 273 | static inline void trace_access_lock(int cpu) |
274 | { | 274 | { |
275 | if (cpu == TRACE_PIPE_ALL_CPU) { | 275 | if (cpu == TRACE_PIPE_ALL_CPU) { |
276 | /* gain it for accessing the whole ring buffer. */ | 276 | /* gain it for accessing the whole ring buffer. */ |
277 | down_write(&all_cpu_access_lock); | 277 | down_write(&all_cpu_access_lock); |
278 | } else { | 278 | } else { |
279 | /* gain it for accessing a cpu ring buffer. */ | 279 | /* gain it for accessing a cpu ring buffer. */ |
280 | 280 | ||
281 | /* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */ | 281 | /* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */ |
282 | down_read(&all_cpu_access_lock); | 282 | down_read(&all_cpu_access_lock); |
283 | 283 | ||
284 | /* Secondly block other access to this @cpu ring buffer. */ | 284 | /* Secondly block other access to this @cpu ring buffer. */ |
285 | mutex_lock(&per_cpu(cpu_access_lock, cpu)); | 285 | mutex_lock(&per_cpu(cpu_access_lock, cpu)); |
286 | } | 286 | } |
287 | } | 287 | } |
288 | 288 | ||
289 | static inline void trace_access_unlock(int cpu) | 289 | static inline void trace_access_unlock(int cpu) |
290 | { | 290 | { |
291 | if (cpu == TRACE_PIPE_ALL_CPU) { | 291 | if (cpu == TRACE_PIPE_ALL_CPU) { |
292 | up_write(&all_cpu_access_lock); | 292 | up_write(&all_cpu_access_lock); |
293 | } else { | 293 | } else { |
294 | mutex_unlock(&per_cpu(cpu_access_lock, cpu)); | 294 | mutex_unlock(&per_cpu(cpu_access_lock, cpu)); |
295 | up_read(&all_cpu_access_lock); | 295 | up_read(&all_cpu_access_lock); |
296 | } | 296 | } |
297 | } | 297 | } |
298 | 298 | ||
299 | static inline void trace_access_lock_init(void) | 299 | static inline void trace_access_lock_init(void) |
300 | { | 300 | { |
301 | int cpu; | 301 | int cpu; |
302 | 302 | ||
303 | for_each_possible_cpu(cpu) | 303 | for_each_possible_cpu(cpu) |
304 | mutex_init(&per_cpu(cpu_access_lock, cpu)); | 304 | mutex_init(&per_cpu(cpu_access_lock, cpu)); |
305 | } | 305 | } |
306 | 306 | ||
307 | #else | 307 | #else |
308 | 308 | ||
309 | static DEFINE_MUTEX(access_lock); | 309 | static DEFINE_MUTEX(access_lock); |
310 | 310 | ||
311 | static inline void trace_access_lock(int cpu) | 311 | static inline void trace_access_lock(int cpu) |
312 | { | 312 | { |
313 | (void)cpu; | 313 | (void)cpu; |
314 | mutex_lock(&access_lock); | 314 | mutex_lock(&access_lock); |
315 | } | 315 | } |
316 | 316 | ||
317 | static inline void trace_access_unlock(int cpu) | 317 | static inline void trace_access_unlock(int cpu) |
318 | { | 318 | { |
319 | (void)cpu; | 319 | (void)cpu; |
320 | mutex_unlock(&access_lock); | 320 | mutex_unlock(&access_lock); |
321 | } | 321 | } |
322 | 322 | ||
323 | static inline void trace_access_lock_init(void) | 323 | static inline void trace_access_lock_init(void) |
324 | { | 324 | { |
325 | } | 325 | } |
326 | 326 | ||
327 | #endif | 327 | #endif |
328 | 328 | ||
329 | /* trace_wait is a waitqueue for tasks blocked on trace_poll */ | 329 | /* trace_wait is a waitqueue for tasks blocked on trace_poll */ |
330 | static DECLARE_WAIT_QUEUE_HEAD(trace_wait); | 330 | static DECLARE_WAIT_QUEUE_HEAD(trace_wait); |
331 | 331 | ||
332 | /* trace_flags holds trace_options default values */ | 332 | /* trace_flags holds trace_options default values */ |
333 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | 333 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | |
334 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | | 334 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | |
335 | TRACE_ITER_GRAPH_TIME; | 335 | TRACE_ITER_GRAPH_TIME; |
336 | 336 | ||
337 | static int trace_stop_count; | 337 | static int trace_stop_count; |
338 | static DEFINE_SPINLOCK(tracing_start_lock); | 338 | static DEFINE_SPINLOCK(tracing_start_lock); |
339 | 339 | ||
340 | /** | 340 | /** |
341 | * trace_wake_up - wake up tasks waiting for trace input | 341 | * trace_wake_up - wake up tasks waiting for trace input |
342 | * | 342 | * |
343 | * Simply wakes up any task that is blocked on the trace_wait | 343 | * Simply wakes up any task that is blocked on the trace_wait |
344 | * queue. These is used with trace_poll for tasks polling the trace. | 344 | * queue. These is used with trace_poll for tasks polling the trace. |
345 | */ | 345 | */ |
346 | void trace_wake_up(void) | 346 | void trace_wake_up(void) |
347 | { | 347 | { |
348 | int cpu; | 348 | int cpu; |
349 | 349 | ||
350 | if (trace_flags & TRACE_ITER_BLOCK) | 350 | if (trace_flags & TRACE_ITER_BLOCK) |
351 | return; | 351 | return; |
352 | /* | 352 | /* |
353 | * The runqueue_is_locked() can fail, but this is the best we | 353 | * The runqueue_is_locked() can fail, but this is the best we |
354 | * have for now: | 354 | * have for now: |
355 | */ | 355 | */ |
356 | cpu = get_cpu(); | 356 | cpu = get_cpu(); |
357 | if (!runqueue_is_locked(cpu)) | 357 | if (!runqueue_is_locked(cpu)) |
358 | wake_up(&trace_wait); | 358 | wake_up(&trace_wait); |
359 | put_cpu(); | 359 | put_cpu(); |
360 | } | 360 | } |
361 | 361 | ||
362 | static int __init set_buf_size(char *str) | 362 | static int __init set_buf_size(char *str) |
363 | { | 363 | { |
364 | unsigned long buf_size; | 364 | unsigned long buf_size; |
365 | 365 | ||
366 | if (!str) | 366 | if (!str) |
367 | return 0; | 367 | return 0; |
368 | buf_size = memparse(str, &str); | 368 | buf_size = memparse(str, &str); |
369 | /* nr_entries can not be zero */ | 369 | /* nr_entries can not be zero */ |
370 | if (buf_size == 0) | 370 | if (buf_size == 0) |
371 | return 0; | 371 | return 0; |
372 | trace_buf_size = buf_size; | 372 | trace_buf_size = buf_size; |
373 | return 1; | 373 | return 1; |
374 | } | 374 | } |
375 | __setup("trace_buf_size=", set_buf_size); | 375 | __setup("trace_buf_size=", set_buf_size); |
376 | 376 | ||
377 | static int __init set_tracing_thresh(char *str) | ||
378 | { | ||
379 | unsigned long threshhold; | ||
380 | int ret; | ||
381 | |||
382 | if (!str) | ||
383 | return 0; | ||
384 | ret = strict_strtoul(str, 0, &threshhold); | ||
385 | if (ret < 0) | ||
386 | return 0; | ||
387 | tracing_thresh = threshhold * 1000; | ||
388 | return 1; | ||
389 | } | ||
390 | __setup("tracing_thresh=", set_tracing_thresh); | ||
391 | |||
377 | unsigned long nsecs_to_usecs(unsigned long nsecs) | 392 | unsigned long nsecs_to_usecs(unsigned long nsecs) |
378 | { | 393 | { |
379 | return nsecs / 1000; | 394 | return nsecs / 1000; |
380 | } | 395 | } |
381 | 396 | ||
382 | /* These must match the bit postions in trace_iterator_flags */ | 397 | /* These must match the bit postions in trace_iterator_flags */ |
383 | static const char *trace_options[] = { | 398 | static const char *trace_options[] = { |
384 | "print-parent", | 399 | "print-parent", |
385 | "sym-offset", | 400 | "sym-offset", |
386 | "sym-addr", | 401 | "sym-addr", |
387 | "verbose", | 402 | "verbose", |
388 | "raw", | 403 | "raw", |
389 | "hex", | 404 | "hex", |
390 | "bin", | 405 | "bin", |
391 | "block", | 406 | "block", |
392 | "stacktrace", | 407 | "stacktrace", |
393 | "trace_printk", | 408 | "trace_printk", |
394 | "ftrace_preempt", | 409 | "ftrace_preempt", |
395 | "branch", | 410 | "branch", |
396 | "annotate", | 411 | "annotate", |
397 | "userstacktrace", | 412 | "userstacktrace", |
398 | "sym-userobj", | 413 | "sym-userobj", |
399 | "printk-msg-only", | 414 | "printk-msg-only", |
400 | "context-info", | 415 | "context-info", |
401 | "latency-format", | 416 | "latency-format", |
402 | "sleep-time", | 417 | "sleep-time", |
403 | "graph-time", | 418 | "graph-time", |
404 | NULL | 419 | NULL |
405 | }; | 420 | }; |
406 | 421 | ||
407 | static struct { | 422 | static struct { |
408 | u64 (*func)(void); | 423 | u64 (*func)(void); |
409 | const char *name; | 424 | const char *name; |
410 | } trace_clocks[] = { | 425 | } trace_clocks[] = { |
411 | { trace_clock_local, "local" }, | 426 | { trace_clock_local, "local" }, |
412 | { trace_clock_global, "global" }, | 427 | { trace_clock_global, "global" }, |
413 | }; | 428 | }; |
414 | 429 | ||
415 | int trace_clock_id; | 430 | int trace_clock_id; |
416 | 431 | ||
417 | /* | 432 | /* |
418 | * trace_parser_get_init - gets the buffer for trace parser | 433 | * trace_parser_get_init - gets the buffer for trace parser |
419 | */ | 434 | */ |
420 | int trace_parser_get_init(struct trace_parser *parser, int size) | 435 | int trace_parser_get_init(struct trace_parser *parser, int size) |
421 | { | 436 | { |
422 | memset(parser, 0, sizeof(*parser)); | 437 | memset(parser, 0, sizeof(*parser)); |
423 | 438 | ||
424 | parser->buffer = kmalloc(size, GFP_KERNEL); | 439 | parser->buffer = kmalloc(size, GFP_KERNEL); |
425 | if (!parser->buffer) | 440 | if (!parser->buffer) |
426 | return 1; | 441 | return 1; |
427 | 442 | ||
428 | parser->size = size; | 443 | parser->size = size; |
429 | return 0; | 444 | return 0; |
430 | } | 445 | } |
431 | 446 | ||
432 | /* | 447 | /* |
433 | * trace_parser_put - frees the buffer for trace parser | 448 | * trace_parser_put - frees the buffer for trace parser |
434 | */ | 449 | */ |
435 | void trace_parser_put(struct trace_parser *parser) | 450 | void trace_parser_put(struct trace_parser *parser) |
436 | { | 451 | { |
437 | kfree(parser->buffer); | 452 | kfree(parser->buffer); |
438 | } | 453 | } |
439 | 454 | ||
440 | /* | 455 | /* |
441 | * trace_get_user - reads the user input string separated by space | 456 | * trace_get_user - reads the user input string separated by space |
442 | * (matched by isspace(ch)) | 457 | * (matched by isspace(ch)) |
443 | * | 458 | * |
444 | * For each string found the 'struct trace_parser' is updated, | 459 | * For each string found the 'struct trace_parser' is updated, |
445 | * and the function returns. | 460 | * and the function returns. |
446 | * | 461 | * |
447 | * Returns number of bytes read. | 462 | * Returns number of bytes read. |
448 | * | 463 | * |
449 | * See kernel/trace/trace.h for 'struct trace_parser' details. | 464 | * See kernel/trace/trace.h for 'struct trace_parser' details. |
450 | */ | 465 | */ |
451 | int trace_get_user(struct trace_parser *parser, const char __user *ubuf, | 466 | int trace_get_user(struct trace_parser *parser, const char __user *ubuf, |
452 | size_t cnt, loff_t *ppos) | 467 | size_t cnt, loff_t *ppos) |
453 | { | 468 | { |
454 | char ch; | 469 | char ch; |
455 | size_t read = 0; | 470 | size_t read = 0; |
456 | ssize_t ret; | 471 | ssize_t ret; |
457 | 472 | ||
458 | if (!*ppos) | 473 | if (!*ppos) |
459 | trace_parser_clear(parser); | 474 | trace_parser_clear(parser); |
460 | 475 | ||
461 | ret = get_user(ch, ubuf++); | 476 | ret = get_user(ch, ubuf++); |
462 | if (ret) | 477 | if (ret) |
463 | goto out; | 478 | goto out; |
464 | 479 | ||
465 | read++; | 480 | read++; |
466 | cnt--; | 481 | cnt--; |
467 | 482 | ||
468 | /* | 483 | /* |
469 | * The parser is not finished with the last write, | 484 | * The parser is not finished with the last write, |
470 | * continue reading the user input without skipping spaces. | 485 | * continue reading the user input without skipping spaces. |
471 | */ | 486 | */ |
472 | if (!parser->cont) { | 487 | if (!parser->cont) { |
473 | /* skip white space */ | 488 | /* skip white space */ |
474 | while (cnt && isspace(ch)) { | 489 | while (cnt && isspace(ch)) { |
475 | ret = get_user(ch, ubuf++); | 490 | ret = get_user(ch, ubuf++); |
476 | if (ret) | 491 | if (ret) |
477 | goto out; | 492 | goto out; |
478 | read++; | 493 | read++; |
479 | cnt--; | 494 | cnt--; |
480 | } | 495 | } |
481 | 496 | ||
482 | /* only spaces were written */ | 497 | /* only spaces were written */ |
483 | if (isspace(ch)) { | 498 | if (isspace(ch)) { |
484 | *ppos += read; | 499 | *ppos += read; |
485 | ret = read; | 500 | ret = read; |
486 | goto out; | 501 | goto out; |
487 | } | 502 | } |
488 | 503 | ||
489 | parser->idx = 0; | 504 | parser->idx = 0; |
490 | } | 505 | } |
491 | 506 | ||
492 | /* read the non-space input */ | 507 | /* read the non-space input */ |
493 | while (cnt && !isspace(ch)) { | 508 | while (cnt && !isspace(ch)) { |
494 | if (parser->idx < parser->size - 1) | 509 | if (parser->idx < parser->size - 1) |
495 | parser->buffer[parser->idx++] = ch; | 510 | parser->buffer[parser->idx++] = ch; |
496 | else { | 511 | else { |
497 | ret = -EINVAL; | 512 | ret = -EINVAL; |
498 | goto out; | 513 | goto out; |
499 | } | 514 | } |
500 | ret = get_user(ch, ubuf++); | 515 | ret = get_user(ch, ubuf++); |
501 | if (ret) | 516 | if (ret) |
502 | goto out; | 517 | goto out; |
503 | read++; | 518 | read++; |
504 | cnt--; | 519 | cnt--; |
505 | } | 520 | } |
506 | 521 | ||
507 | /* We either got finished input or we have to wait for another call. */ | 522 | /* We either got finished input or we have to wait for another call. */ |
508 | if (isspace(ch)) { | 523 | if (isspace(ch)) { |
509 | parser->buffer[parser->idx] = 0; | 524 | parser->buffer[parser->idx] = 0; |
510 | parser->cont = false; | 525 | parser->cont = false; |
511 | } else { | 526 | } else { |
512 | parser->cont = true; | 527 | parser->cont = true; |
513 | parser->buffer[parser->idx++] = ch; | 528 | parser->buffer[parser->idx++] = ch; |
514 | } | 529 | } |
515 | 530 | ||
516 | *ppos += read; | 531 | *ppos += read; |
517 | ret = read; | 532 | ret = read; |
518 | 533 | ||
519 | out: | 534 | out: |
520 | return ret; | 535 | return ret; |
521 | } | 536 | } |
522 | 537 | ||
523 | ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) | 538 | ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) |
524 | { | 539 | { |
525 | int len; | 540 | int len; |
526 | int ret; | 541 | int ret; |
527 | 542 | ||
528 | if (!cnt) | 543 | if (!cnt) |
529 | return 0; | 544 | return 0; |
530 | 545 | ||
531 | if (s->len <= s->readpos) | 546 | if (s->len <= s->readpos) |
532 | return -EBUSY; | 547 | return -EBUSY; |
533 | 548 | ||
534 | len = s->len - s->readpos; | 549 | len = s->len - s->readpos; |
535 | if (cnt > len) | 550 | if (cnt > len) |
536 | cnt = len; | 551 | cnt = len; |
537 | ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); | 552 | ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); |
538 | if (ret == cnt) | 553 | if (ret == cnt) |
539 | return -EFAULT; | 554 | return -EFAULT; |
540 | 555 | ||
541 | cnt -= ret; | 556 | cnt -= ret; |
542 | 557 | ||
543 | s->readpos += cnt; | 558 | s->readpos += cnt; |
544 | return cnt; | 559 | return cnt; |
545 | } | 560 | } |
546 | 561 | ||
547 | static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) | 562 | static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) |
548 | { | 563 | { |
549 | int len; | 564 | int len; |
550 | void *ret; | 565 | void *ret; |
551 | 566 | ||
552 | if (s->len <= s->readpos) | 567 | if (s->len <= s->readpos) |
553 | return -EBUSY; | 568 | return -EBUSY; |
554 | 569 | ||
555 | len = s->len - s->readpos; | 570 | len = s->len - s->readpos; |
556 | if (cnt > len) | 571 | if (cnt > len) |
557 | cnt = len; | 572 | cnt = len; |
558 | ret = memcpy(buf, s->buffer + s->readpos, cnt); | 573 | ret = memcpy(buf, s->buffer + s->readpos, cnt); |
559 | if (!ret) | 574 | if (!ret) |
560 | return -EFAULT; | 575 | return -EFAULT; |
561 | 576 | ||
562 | s->readpos += cnt; | 577 | s->readpos += cnt; |
563 | return cnt; | 578 | return cnt; |
564 | } | 579 | } |
565 | 580 | ||
566 | /* | 581 | /* |
567 | * ftrace_max_lock is used to protect the swapping of buffers | 582 | * ftrace_max_lock is used to protect the swapping of buffers |
568 | * when taking a max snapshot. The buffers themselves are | 583 | * when taking a max snapshot. The buffers themselves are |
569 | * protected by per_cpu spinlocks. But the action of the swap | 584 | * protected by per_cpu spinlocks. But the action of the swap |
570 | * needs its own lock. | 585 | * needs its own lock. |
571 | * | 586 | * |
572 | * This is defined as a arch_spinlock_t in order to help | 587 | * This is defined as a arch_spinlock_t in order to help |
573 | * with performance when lockdep debugging is enabled. | 588 | * with performance when lockdep debugging is enabled. |
574 | * | 589 | * |
575 | * It is also used in other places outside the update_max_tr | 590 | * It is also used in other places outside the update_max_tr |
576 | * so it needs to be defined outside of the | 591 | * so it needs to be defined outside of the |
577 | * CONFIG_TRACER_MAX_TRACE. | 592 | * CONFIG_TRACER_MAX_TRACE. |
578 | */ | 593 | */ |
579 | static arch_spinlock_t ftrace_max_lock = | 594 | static arch_spinlock_t ftrace_max_lock = |
580 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 595 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
581 | 596 | ||
597 | unsigned long __read_mostly tracing_thresh; | ||
598 | |||
582 | #ifdef CONFIG_TRACER_MAX_TRACE | 599 | #ifdef CONFIG_TRACER_MAX_TRACE |
583 | unsigned long __read_mostly tracing_max_latency; | 600 | unsigned long __read_mostly tracing_max_latency; |
584 | unsigned long __read_mostly tracing_thresh; | ||
585 | 601 | ||
586 | /* | 602 | /* |
587 | * Copy the new maximum trace into the separate maximum-trace | 603 | * Copy the new maximum trace into the separate maximum-trace |
588 | * structure. (this way the maximum trace is permanently saved, | 604 | * structure. (this way the maximum trace is permanently saved, |
589 | * for later retrieval via /sys/kernel/debug/tracing/latency_trace) | 605 | * for later retrieval via /sys/kernel/debug/tracing/latency_trace) |
590 | */ | 606 | */ |
591 | static void | 607 | static void |
592 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | 608 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) |
593 | { | 609 | { |
594 | struct trace_array_cpu *data = tr->data[cpu]; | 610 | struct trace_array_cpu *data = tr->data[cpu]; |
595 | struct trace_array_cpu *max_data = tr->data[cpu]; | 611 | struct trace_array_cpu *max_data; |
596 | 612 | ||
597 | max_tr.cpu = cpu; | 613 | max_tr.cpu = cpu; |
598 | max_tr.time_start = data->preempt_timestamp; | 614 | max_tr.time_start = data->preempt_timestamp; |
599 | 615 | ||
600 | max_data = max_tr.data[cpu]; | 616 | max_data = max_tr.data[cpu]; |
601 | max_data->saved_latency = tracing_max_latency; | 617 | max_data->saved_latency = tracing_max_latency; |
602 | max_data->critical_start = data->critical_start; | 618 | max_data->critical_start = data->critical_start; |
603 | max_data->critical_end = data->critical_end; | 619 | max_data->critical_end = data->critical_end; |
604 | 620 | ||
605 | memcpy(data->comm, tsk->comm, TASK_COMM_LEN); | 621 | memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); |
606 | max_data->pid = tsk->pid; | 622 | max_data->pid = tsk->pid; |
607 | max_data->uid = task_uid(tsk); | 623 | max_data->uid = task_uid(tsk); |
608 | max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; | 624 | max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; |
609 | max_data->policy = tsk->policy; | 625 | max_data->policy = tsk->policy; |
610 | max_data->rt_priority = tsk->rt_priority; | 626 | max_data->rt_priority = tsk->rt_priority; |
611 | 627 | ||
612 | /* record this tasks comm */ | 628 | /* record this tasks comm */ |
613 | tracing_record_cmdline(tsk); | 629 | tracing_record_cmdline(tsk); |
614 | } | 630 | } |
615 | 631 | ||
616 | /** | 632 | /** |
617 | * update_max_tr - snapshot all trace buffers from global_trace to max_tr | 633 | * update_max_tr - snapshot all trace buffers from global_trace to max_tr |
618 | * @tr: tracer | 634 | * @tr: tracer |
619 | * @tsk: the task with the latency | 635 | * @tsk: the task with the latency |
620 | * @cpu: The cpu that initiated the trace. | 636 | * @cpu: The cpu that initiated the trace. |
621 | * | 637 | * |
622 | * Flip the buffers between the @tr and the max_tr and record information | 638 | * Flip the buffers between the @tr and the max_tr and record information |
623 | * about which task was the cause of this latency. | 639 | * about which task was the cause of this latency. |
624 | */ | 640 | */ |
625 | void | 641 | void |
626 | update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | 642 | update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) |
627 | { | 643 | { |
628 | struct ring_buffer *buf = tr->buffer; | 644 | struct ring_buffer *buf = tr->buffer; |
629 | 645 | ||
630 | if (trace_stop_count) | 646 | if (trace_stop_count) |
631 | return; | 647 | return; |
632 | 648 | ||
633 | WARN_ON_ONCE(!irqs_disabled()); | 649 | WARN_ON_ONCE(!irqs_disabled()); |
634 | arch_spin_lock(&ftrace_max_lock); | 650 | arch_spin_lock(&ftrace_max_lock); |
635 | 651 | ||
636 | tr->buffer = max_tr.buffer; | 652 | tr->buffer = max_tr.buffer; |
637 | max_tr.buffer = buf; | 653 | max_tr.buffer = buf; |
638 | 654 | ||
639 | __update_max_tr(tr, tsk, cpu); | 655 | __update_max_tr(tr, tsk, cpu); |
640 | arch_spin_unlock(&ftrace_max_lock); | 656 | arch_spin_unlock(&ftrace_max_lock); |
641 | } | 657 | } |
642 | 658 | ||
643 | /** | 659 | /** |
644 | * update_max_tr_single - only copy one trace over, and reset the rest | 660 | * update_max_tr_single - only copy one trace over, and reset the rest |
645 | * @tr - tracer | 661 | * @tr - tracer |
646 | * @tsk - task with the latency | 662 | * @tsk - task with the latency |
647 | * @cpu - the cpu of the buffer to copy. | 663 | * @cpu - the cpu of the buffer to copy. |
648 | * | 664 | * |
649 | * Flip the trace of a single CPU buffer between the @tr and the max_tr. | 665 | * Flip the trace of a single CPU buffer between the @tr and the max_tr. |
650 | */ | 666 | */ |
651 | void | 667 | void |
652 | update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | 668 | update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) |
653 | { | 669 | { |
654 | int ret; | 670 | int ret; |
655 | 671 | ||
656 | if (trace_stop_count) | 672 | if (trace_stop_count) |
657 | return; | 673 | return; |
658 | 674 | ||
659 | WARN_ON_ONCE(!irqs_disabled()); | 675 | WARN_ON_ONCE(!irqs_disabled()); |
660 | arch_spin_lock(&ftrace_max_lock); | 676 | arch_spin_lock(&ftrace_max_lock); |
661 | 677 | ||
662 | ftrace_disable_cpu(); | 678 | ftrace_disable_cpu(); |
663 | 679 | ||
664 | ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); | 680 | ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); |
665 | 681 | ||
666 | if (ret == -EBUSY) { | 682 | if (ret == -EBUSY) { |
667 | /* | 683 | /* |
668 | * We failed to swap the buffer due to a commit taking | 684 | * We failed to swap the buffer due to a commit taking |
669 | * place on this CPU. We fail to record, but we reset | 685 | * place on this CPU. We fail to record, but we reset |
670 | * the max trace buffer (no one writes directly to it) | 686 | * the max trace buffer (no one writes directly to it) |
671 | * and flag that it failed. | 687 | * and flag that it failed. |
672 | */ | 688 | */ |
673 | trace_array_printk(&max_tr, _THIS_IP_, | 689 | trace_array_printk(&max_tr, _THIS_IP_, |
674 | "Failed to swap buffers due to commit in progress\n"); | 690 | "Failed to swap buffers due to commit in progress\n"); |
675 | } | 691 | } |
676 | 692 | ||
677 | ftrace_enable_cpu(); | 693 | ftrace_enable_cpu(); |
678 | 694 | ||
679 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); | 695 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); |
680 | 696 | ||
681 | __update_max_tr(tr, tsk, cpu); | 697 | __update_max_tr(tr, tsk, cpu); |
682 | arch_spin_unlock(&ftrace_max_lock); | 698 | arch_spin_unlock(&ftrace_max_lock); |
683 | } | 699 | } |
684 | #endif /* CONFIG_TRACER_MAX_TRACE */ | 700 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
685 | 701 | ||
686 | /** | 702 | /** |
687 | * register_tracer - register a tracer with the ftrace system. | 703 | * register_tracer - register a tracer with the ftrace system. |
688 | * @type - the plugin for the tracer | 704 | * @type - the plugin for the tracer |
689 | * | 705 | * |
690 | * Register a new plugin tracer. | 706 | * Register a new plugin tracer. |
691 | */ | 707 | */ |
692 | int register_tracer(struct tracer *type) | 708 | int register_tracer(struct tracer *type) |
693 | __releases(kernel_lock) | 709 | __releases(kernel_lock) |
694 | __acquires(kernel_lock) | 710 | __acquires(kernel_lock) |
695 | { | 711 | { |
696 | struct tracer *t; | 712 | struct tracer *t; |
697 | int ret = 0; | 713 | int ret = 0; |
698 | 714 | ||
699 | if (!type->name) { | 715 | if (!type->name) { |
700 | pr_info("Tracer must have a name\n"); | 716 | pr_info("Tracer must have a name\n"); |
701 | return -1; | 717 | return -1; |
702 | } | 718 | } |
703 | 719 | ||
704 | if (strlen(type->name) > MAX_TRACER_SIZE) { | 720 | if (strlen(type->name) > MAX_TRACER_SIZE) { |
705 | pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); | 721 | pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); |
706 | return -1; | 722 | return -1; |
707 | } | 723 | } |
708 | 724 | ||
709 | /* | 725 | /* |
710 | * When this gets called we hold the BKL which means that | 726 | * When this gets called we hold the BKL which means that |
711 | * preemption is disabled. Various trace selftests however | 727 | * preemption is disabled. Various trace selftests however |
712 | * need to disable and enable preemption for successful tests. | 728 | * need to disable and enable preemption for successful tests. |
713 | * So we drop the BKL here and grab it after the tests again. | 729 | * So we drop the BKL here and grab it after the tests again. |
714 | */ | 730 | */ |
715 | unlock_kernel(); | 731 | unlock_kernel(); |
716 | mutex_lock(&trace_types_lock); | 732 | mutex_lock(&trace_types_lock); |
717 | 733 | ||
718 | tracing_selftest_running = true; | 734 | tracing_selftest_running = true; |
719 | 735 | ||
720 | for (t = trace_types; t; t = t->next) { | 736 | for (t = trace_types; t; t = t->next) { |
721 | if (strcmp(type->name, t->name) == 0) { | 737 | if (strcmp(type->name, t->name) == 0) { |
722 | /* already found */ | 738 | /* already found */ |
723 | pr_info("Tracer %s already registered\n", | 739 | pr_info("Tracer %s already registered\n", |
724 | type->name); | 740 | type->name); |
725 | ret = -1; | 741 | ret = -1; |
726 | goto out; | 742 | goto out; |
727 | } | 743 | } |
728 | } | 744 | } |
729 | 745 | ||
730 | if (!type->set_flag) | 746 | if (!type->set_flag) |
731 | type->set_flag = &dummy_set_flag; | 747 | type->set_flag = &dummy_set_flag; |
732 | if (!type->flags) | 748 | if (!type->flags) |
733 | type->flags = &dummy_tracer_flags; | 749 | type->flags = &dummy_tracer_flags; |
734 | else | 750 | else |
735 | if (!type->flags->opts) | 751 | if (!type->flags->opts) |
736 | type->flags->opts = dummy_tracer_opt; | 752 | type->flags->opts = dummy_tracer_opt; |
737 | if (!type->wait_pipe) | 753 | if (!type->wait_pipe) |
738 | type->wait_pipe = default_wait_pipe; | 754 | type->wait_pipe = default_wait_pipe; |
739 | 755 | ||
740 | 756 | ||
741 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 757 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
742 | if (type->selftest && !tracing_selftest_disabled) { | 758 | if (type->selftest && !tracing_selftest_disabled) { |
743 | struct tracer *saved_tracer = current_trace; | 759 | struct tracer *saved_tracer = current_trace; |
744 | struct trace_array *tr = &global_trace; | 760 | struct trace_array *tr = &global_trace; |
745 | 761 | ||
746 | /* | 762 | /* |
747 | * Run a selftest on this tracer. | 763 | * Run a selftest on this tracer. |
748 | * Here we reset the trace buffer, and set the current | 764 | * Here we reset the trace buffer, and set the current |
749 | * tracer to be this tracer. The tracer can then run some | 765 | * tracer to be this tracer. The tracer can then run some |
750 | * internal tracing to verify that everything is in order. | 766 | * internal tracing to verify that everything is in order. |
751 | * If we fail, we do not register this tracer. | 767 | * If we fail, we do not register this tracer. |
752 | */ | 768 | */ |
753 | tracing_reset_online_cpus(tr); | 769 | tracing_reset_online_cpus(tr); |
754 | 770 | ||
755 | current_trace = type; | 771 | current_trace = type; |
756 | /* the test is responsible for initializing and enabling */ | 772 | /* the test is responsible for initializing and enabling */ |
757 | pr_info("Testing tracer %s: ", type->name); | 773 | pr_info("Testing tracer %s: ", type->name); |
758 | ret = type->selftest(type, tr); | 774 | ret = type->selftest(type, tr); |
759 | /* the test is responsible for resetting too */ | 775 | /* the test is responsible for resetting too */ |
760 | current_trace = saved_tracer; | 776 | current_trace = saved_tracer; |
761 | if (ret) { | 777 | if (ret) { |
762 | printk(KERN_CONT "FAILED!\n"); | 778 | printk(KERN_CONT "FAILED!\n"); |
763 | goto out; | 779 | goto out; |
764 | } | 780 | } |
765 | /* Only reset on passing, to avoid touching corrupted buffers */ | 781 | /* Only reset on passing, to avoid touching corrupted buffers */ |
766 | tracing_reset_online_cpus(tr); | 782 | tracing_reset_online_cpus(tr); |
767 | 783 | ||
768 | printk(KERN_CONT "PASSED\n"); | 784 | printk(KERN_CONT "PASSED\n"); |
769 | } | 785 | } |
770 | #endif | 786 | #endif |
771 | 787 | ||
772 | type->next = trace_types; | 788 | type->next = trace_types; |
773 | trace_types = type; | 789 | trace_types = type; |
774 | 790 | ||
775 | out: | 791 | out: |
776 | tracing_selftest_running = false; | 792 | tracing_selftest_running = false; |
777 | mutex_unlock(&trace_types_lock); | 793 | mutex_unlock(&trace_types_lock); |
778 | 794 | ||
779 | if (ret || !default_bootup_tracer) | 795 | if (ret || !default_bootup_tracer) |
780 | goto out_unlock; | 796 | goto out_unlock; |
781 | 797 | ||
782 | if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) | 798 | if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) |
783 | goto out_unlock; | 799 | goto out_unlock; |
784 | 800 | ||
785 | printk(KERN_INFO "Starting tracer '%s'\n", type->name); | 801 | printk(KERN_INFO "Starting tracer '%s'\n", type->name); |
786 | /* Do we want this tracer to start on bootup? */ | 802 | /* Do we want this tracer to start on bootup? */ |
787 | tracing_set_tracer(type->name); | 803 | tracing_set_tracer(type->name); |
788 | default_bootup_tracer = NULL; | 804 | default_bootup_tracer = NULL; |
789 | /* disable other selftests, since this will break it. */ | 805 | /* disable other selftests, since this will break it. */ |
790 | tracing_selftest_disabled = 1; | 806 | tracing_selftest_disabled = 1; |
791 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 807 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
792 | printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n", | 808 | printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n", |
793 | type->name); | 809 | type->name); |
794 | #endif | 810 | #endif |
795 | 811 | ||
796 | out_unlock: | 812 | out_unlock: |
797 | lock_kernel(); | 813 | lock_kernel(); |
798 | return ret; | 814 | return ret; |
799 | } | 815 | } |
800 | 816 | ||
801 | void unregister_tracer(struct tracer *type) | 817 | void unregister_tracer(struct tracer *type) |
802 | { | 818 | { |
803 | struct tracer **t; | 819 | struct tracer **t; |
804 | 820 | ||
805 | mutex_lock(&trace_types_lock); | 821 | mutex_lock(&trace_types_lock); |
806 | for (t = &trace_types; *t; t = &(*t)->next) { | 822 | for (t = &trace_types; *t; t = &(*t)->next) { |
807 | if (*t == type) | 823 | if (*t == type) |
808 | goto found; | 824 | goto found; |
809 | } | 825 | } |
810 | pr_info("Tracer %s not registered\n", type->name); | 826 | pr_info("Tracer %s not registered\n", type->name); |
811 | goto out; | 827 | goto out; |
812 | 828 | ||
813 | found: | 829 | found: |
814 | *t = (*t)->next; | 830 | *t = (*t)->next; |
815 | 831 | ||
816 | if (type == current_trace && tracer_enabled) { | 832 | if (type == current_trace && tracer_enabled) { |
817 | tracer_enabled = 0; | 833 | tracer_enabled = 0; |
818 | tracing_stop(); | 834 | tracing_stop(); |
819 | if (current_trace->stop) | 835 | if (current_trace->stop) |
820 | current_trace->stop(&global_trace); | 836 | current_trace->stop(&global_trace); |
821 | current_trace = &nop_trace; | 837 | current_trace = &nop_trace; |
822 | } | 838 | } |
823 | out: | 839 | out: |
824 | mutex_unlock(&trace_types_lock); | 840 | mutex_unlock(&trace_types_lock); |
825 | } | 841 | } |
826 | 842 | ||
827 | static void __tracing_reset(struct trace_array *tr, int cpu) | 843 | static void __tracing_reset(struct trace_array *tr, int cpu) |
828 | { | 844 | { |
829 | ftrace_disable_cpu(); | 845 | ftrace_disable_cpu(); |
830 | ring_buffer_reset_cpu(tr->buffer, cpu); | 846 | ring_buffer_reset_cpu(tr->buffer, cpu); |
831 | ftrace_enable_cpu(); | 847 | ftrace_enable_cpu(); |
832 | } | 848 | } |
833 | 849 | ||
834 | void tracing_reset(struct trace_array *tr, int cpu) | 850 | void tracing_reset(struct trace_array *tr, int cpu) |
835 | { | 851 | { |
836 | struct ring_buffer *buffer = tr->buffer; | 852 | struct ring_buffer *buffer = tr->buffer; |
837 | 853 | ||
838 | ring_buffer_record_disable(buffer); | 854 | ring_buffer_record_disable(buffer); |
839 | 855 | ||
840 | /* Make sure all commits have finished */ | 856 | /* Make sure all commits have finished */ |
841 | synchronize_sched(); | 857 | synchronize_sched(); |
842 | __tracing_reset(tr, cpu); | 858 | __tracing_reset(tr, cpu); |
843 | 859 | ||
844 | ring_buffer_record_enable(buffer); | 860 | ring_buffer_record_enable(buffer); |
845 | } | 861 | } |
846 | 862 | ||
847 | void tracing_reset_online_cpus(struct trace_array *tr) | 863 | void tracing_reset_online_cpus(struct trace_array *tr) |
848 | { | 864 | { |
849 | struct ring_buffer *buffer = tr->buffer; | 865 | struct ring_buffer *buffer = tr->buffer; |
850 | int cpu; | 866 | int cpu; |
851 | 867 | ||
852 | ring_buffer_record_disable(buffer); | 868 | ring_buffer_record_disable(buffer); |
853 | 869 | ||
854 | /* Make sure all commits have finished */ | 870 | /* Make sure all commits have finished */ |
855 | synchronize_sched(); | 871 | synchronize_sched(); |
856 | 872 | ||
857 | tr->time_start = ftrace_now(tr->cpu); | 873 | tr->time_start = ftrace_now(tr->cpu); |
858 | 874 | ||
859 | for_each_online_cpu(cpu) | 875 | for_each_online_cpu(cpu) |
860 | __tracing_reset(tr, cpu); | 876 | __tracing_reset(tr, cpu); |
861 | 877 | ||
862 | ring_buffer_record_enable(buffer); | 878 | ring_buffer_record_enable(buffer); |
863 | } | 879 | } |
864 | 880 | ||
865 | void tracing_reset_current(int cpu) | 881 | void tracing_reset_current(int cpu) |
866 | { | 882 | { |
867 | tracing_reset(&global_trace, cpu); | 883 | tracing_reset(&global_trace, cpu); |
868 | } | 884 | } |
869 | 885 | ||
870 | void tracing_reset_current_online_cpus(void) | 886 | void tracing_reset_current_online_cpus(void) |
871 | { | 887 | { |
872 | tracing_reset_online_cpus(&global_trace); | 888 | tracing_reset_online_cpus(&global_trace); |
873 | } | 889 | } |
874 | 890 | ||
875 | #define SAVED_CMDLINES 128 | 891 | #define SAVED_CMDLINES 128 |
876 | #define NO_CMDLINE_MAP UINT_MAX | 892 | #define NO_CMDLINE_MAP UINT_MAX |
877 | static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; | 893 | static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; |
878 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; | 894 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; |
879 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; | 895 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; |
880 | static int cmdline_idx; | 896 | static int cmdline_idx; |
881 | static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; | 897 | static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
882 | 898 | ||
883 | /* temporary disable recording */ | 899 | /* temporary disable recording */ |
884 | static atomic_t trace_record_cmdline_disabled __read_mostly; | 900 | static atomic_t trace_record_cmdline_disabled __read_mostly; |
885 | 901 | ||
886 | static void trace_init_cmdlines(void) | 902 | static void trace_init_cmdlines(void) |
887 | { | 903 | { |
888 | memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline)); | 904 | memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline)); |
889 | memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid)); | 905 | memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid)); |
890 | cmdline_idx = 0; | 906 | cmdline_idx = 0; |
891 | } | 907 | } |
892 | 908 | ||
893 | int is_tracing_stopped(void) | 909 | int is_tracing_stopped(void) |
894 | { | 910 | { |
895 | return trace_stop_count; | 911 | return trace_stop_count; |
896 | } | 912 | } |
897 | 913 | ||
898 | /** | 914 | /** |
899 | * ftrace_off_permanent - disable all ftrace code permanently | 915 | * ftrace_off_permanent - disable all ftrace code permanently |
900 | * | 916 | * |
901 | * This should only be called when a serious anomally has | 917 | * This should only be called when a serious anomally has |
902 | * been detected. This will turn off the function tracing, | 918 | * been detected. This will turn off the function tracing, |
903 | * ring buffers, and other tracing utilites. It takes no | 919 | * ring buffers, and other tracing utilites. It takes no |
904 | * locks and can be called from any context. | 920 | * locks and can be called from any context. |
905 | */ | 921 | */ |
906 | void ftrace_off_permanent(void) | 922 | void ftrace_off_permanent(void) |
907 | { | 923 | { |
908 | tracing_disabled = 1; | 924 | tracing_disabled = 1; |
909 | ftrace_stop(); | 925 | ftrace_stop(); |
910 | tracing_off_permanent(); | 926 | tracing_off_permanent(); |
911 | } | 927 | } |
912 | 928 | ||
913 | /** | 929 | /** |
914 | * tracing_start - quick start of the tracer | 930 | * tracing_start - quick start of the tracer |
915 | * | 931 | * |
916 | * If tracing is enabled but was stopped by tracing_stop, | 932 | * If tracing is enabled but was stopped by tracing_stop, |
917 | * this will start the tracer back up. | 933 | * this will start the tracer back up. |
918 | */ | 934 | */ |
919 | void tracing_start(void) | 935 | void tracing_start(void) |
920 | { | 936 | { |
921 | struct ring_buffer *buffer; | 937 | struct ring_buffer *buffer; |
922 | unsigned long flags; | 938 | unsigned long flags; |
923 | 939 | ||
924 | if (tracing_disabled) | 940 | if (tracing_disabled) |
925 | return; | 941 | return; |
926 | 942 | ||
927 | spin_lock_irqsave(&tracing_start_lock, flags); | 943 | spin_lock_irqsave(&tracing_start_lock, flags); |
928 | if (--trace_stop_count) { | 944 | if (--trace_stop_count) { |
929 | if (trace_stop_count < 0) { | 945 | if (trace_stop_count < 0) { |
930 | /* Someone screwed up their debugging */ | 946 | /* Someone screwed up their debugging */ |
931 | WARN_ON_ONCE(1); | 947 | WARN_ON_ONCE(1); |
932 | trace_stop_count = 0; | 948 | trace_stop_count = 0; |
933 | } | 949 | } |
934 | goto out; | 950 | goto out; |
935 | } | 951 | } |
936 | 952 | ||
937 | 953 | ||
938 | buffer = global_trace.buffer; | 954 | buffer = global_trace.buffer; |
939 | if (buffer) | 955 | if (buffer) |
940 | ring_buffer_record_enable(buffer); | 956 | ring_buffer_record_enable(buffer); |
941 | 957 | ||
942 | buffer = max_tr.buffer; | 958 | buffer = max_tr.buffer; |
943 | if (buffer) | 959 | if (buffer) |
944 | ring_buffer_record_enable(buffer); | 960 | ring_buffer_record_enable(buffer); |
945 | 961 | ||
946 | ftrace_start(); | 962 | ftrace_start(); |
947 | out: | 963 | out: |
948 | spin_unlock_irqrestore(&tracing_start_lock, flags); | 964 | spin_unlock_irqrestore(&tracing_start_lock, flags); |
949 | } | 965 | } |
950 | 966 | ||
951 | /** | 967 | /** |
952 | * tracing_stop - quick stop of the tracer | 968 | * tracing_stop - quick stop of the tracer |
953 | * | 969 | * |
954 | * Light weight way to stop tracing. Use in conjunction with | 970 | * Light weight way to stop tracing. Use in conjunction with |
955 | * tracing_start. | 971 | * tracing_start. |
956 | */ | 972 | */ |
957 | void tracing_stop(void) | 973 | void tracing_stop(void) |
958 | { | 974 | { |
959 | struct ring_buffer *buffer; | 975 | struct ring_buffer *buffer; |
960 | unsigned long flags; | 976 | unsigned long flags; |
961 | 977 | ||
962 | ftrace_stop(); | 978 | ftrace_stop(); |
963 | spin_lock_irqsave(&tracing_start_lock, flags); | 979 | spin_lock_irqsave(&tracing_start_lock, flags); |
964 | if (trace_stop_count++) | 980 | if (trace_stop_count++) |
965 | goto out; | 981 | goto out; |
966 | 982 | ||
967 | buffer = global_trace.buffer; | 983 | buffer = global_trace.buffer; |
968 | if (buffer) | 984 | if (buffer) |
969 | ring_buffer_record_disable(buffer); | 985 | ring_buffer_record_disable(buffer); |
970 | 986 | ||
971 | buffer = max_tr.buffer; | 987 | buffer = max_tr.buffer; |
972 | if (buffer) | 988 | if (buffer) |
973 | ring_buffer_record_disable(buffer); | 989 | ring_buffer_record_disable(buffer); |
974 | 990 | ||
975 | out: | 991 | out: |
976 | spin_unlock_irqrestore(&tracing_start_lock, flags); | 992 | spin_unlock_irqrestore(&tracing_start_lock, flags); |
977 | } | 993 | } |
978 | 994 | ||
979 | void trace_stop_cmdline_recording(void); | 995 | void trace_stop_cmdline_recording(void); |
980 | 996 | ||
981 | static void trace_save_cmdline(struct task_struct *tsk) | 997 | static void trace_save_cmdline(struct task_struct *tsk) |
982 | { | 998 | { |
983 | unsigned pid, idx; | 999 | unsigned pid, idx; |
984 | 1000 | ||
985 | if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT)) | 1001 | if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT)) |
986 | return; | 1002 | return; |
987 | 1003 | ||
988 | /* | 1004 | /* |
989 | * It's not the end of the world if we don't get | 1005 | * It's not the end of the world if we don't get |
990 | * the lock, but we also don't want to spin | 1006 | * the lock, but we also don't want to spin |
991 | * nor do we want to disable interrupts, | 1007 | * nor do we want to disable interrupts, |
992 | * so if we miss here, then better luck next time. | 1008 | * so if we miss here, then better luck next time. |
993 | */ | 1009 | */ |
994 | if (!arch_spin_trylock(&trace_cmdline_lock)) | 1010 | if (!arch_spin_trylock(&trace_cmdline_lock)) |
995 | return; | 1011 | return; |
996 | 1012 | ||
997 | idx = map_pid_to_cmdline[tsk->pid]; | 1013 | idx = map_pid_to_cmdline[tsk->pid]; |
998 | if (idx == NO_CMDLINE_MAP) { | 1014 | if (idx == NO_CMDLINE_MAP) { |
999 | idx = (cmdline_idx + 1) % SAVED_CMDLINES; | 1015 | idx = (cmdline_idx + 1) % SAVED_CMDLINES; |
1000 | 1016 | ||
1001 | /* | 1017 | /* |
1002 | * Check whether the cmdline buffer at idx has a pid | 1018 | * Check whether the cmdline buffer at idx has a pid |
1003 | * mapped. We are going to overwrite that entry so we | 1019 | * mapped. We are going to overwrite that entry so we |
1004 | * need to clear the map_pid_to_cmdline. Otherwise we | 1020 | * need to clear the map_pid_to_cmdline. Otherwise we |
1005 | * would read the new comm for the old pid. | 1021 | * would read the new comm for the old pid. |
1006 | */ | 1022 | */ |
1007 | pid = map_cmdline_to_pid[idx]; | 1023 | pid = map_cmdline_to_pid[idx]; |
1008 | if (pid != NO_CMDLINE_MAP) | 1024 | if (pid != NO_CMDLINE_MAP) |
1009 | map_pid_to_cmdline[pid] = NO_CMDLINE_MAP; | 1025 | map_pid_to_cmdline[pid] = NO_CMDLINE_MAP; |
1010 | 1026 | ||
1011 | map_cmdline_to_pid[idx] = tsk->pid; | 1027 | map_cmdline_to_pid[idx] = tsk->pid; |
1012 | map_pid_to_cmdline[tsk->pid] = idx; | 1028 | map_pid_to_cmdline[tsk->pid] = idx; |
1013 | 1029 | ||
1014 | cmdline_idx = idx; | 1030 | cmdline_idx = idx; |
1015 | } | 1031 | } |
1016 | 1032 | ||
1017 | memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); | 1033 | memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); |
1018 | 1034 | ||
1019 | arch_spin_unlock(&trace_cmdline_lock); | 1035 | arch_spin_unlock(&trace_cmdline_lock); |
1020 | } | 1036 | } |
1021 | 1037 | ||
1022 | void trace_find_cmdline(int pid, char comm[]) | 1038 | void trace_find_cmdline(int pid, char comm[]) |
1023 | { | 1039 | { |
1024 | unsigned map; | 1040 | unsigned map; |
1025 | 1041 | ||
1026 | if (!pid) { | 1042 | if (!pid) { |
1027 | strcpy(comm, "<idle>"); | 1043 | strcpy(comm, "<idle>"); |
1028 | return; | 1044 | return; |
1029 | } | 1045 | } |
1030 | 1046 | ||
1031 | if (WARN_ON_ONCE(pid < 0)) { | 1047 | if (WARN_ON_ONCE(pid < 0)) { |
1032 | strcpy(comm, "<XXX>"); | 1048 | strcpy(comm, "<XXX>"); |
1033 | return; | 1049 | return; |
1034 | } | 1050 | } |
1035 | 1051 | ||
1036 | if (pid > PID_MAX_DEFAULT) { | 1052 | if (pid > PID_MAX_DEFAULT) { |
1037 | strcpy(comm, "<...>"); | 1053 | strcpy(comm, "<...>"); |
1038 | return; | 1054 | return; |
1039 | } | 1055 | } |
1040 | 1056 | ||
1041 | preempt_disable(); | 1057 | preempt_disable(); |
1042 | arch_spin_lock(&trace_cmdline_lock); | 1058 | arch_spin_lock(&trace_cmdline_lock); |
1043 | map = map_pid_to_cmdline[pid]; | 1059 | map = map_pid_to_cmdline[pid]; |
1044 | if (map != NO_CMDLINE_MAP) | 1060 | if (map != NO_CMDLINE_MAP) |
1045 | strcpy(comm, saved_cmdlines[map]); | 1061 | strcpy(comm, saved_cmdlines[map]); |
1046 | else | 1062 | else |
1047 | strcpy(comm, "<...>"); | 1063 | strcpy(comm, "<...>"); |
1048 | 1064 | ||
1049 | arch_spin_unlock(&trace_cmdline_lock); | 1065 | arch_spin_unlock(&trace_cmdline_lock); |
1050 | preempt_enable(); | 1066 | preempt_enable(); |
1051 | } | 1067 | } |
1052 | 1068 | ||
1053 | void tracing_record_cmdline(struct task_struct *tsk) | 1069 | void tracing_record_cmdline(struct task_struct *tsk) |
1054 | { | 1070 | { |
1055 | if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled || | 1071 | if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled || |
1056 | !tracing_is_on()) | 1072 | !tracing_is_on()) |
1057 | return; | 1073 | return; |
1058 | 1074 | ||
1059 | trace_save_cmdline(tsk); | 1075 | trace_save_cmdline(tsk); |
1060 | } | 1076 | } |
1061 | 1077 | ||
1062 | void | 1078 | void |
1063 | tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | 1079 | tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, |
1064 | int pc) | 1080 | int pc) |
1065 | { | 1081 | { |
1066 | struct task_struct *tsk = current; | 1082 | struct task_struct *tsk = current; |
1067 | 1083 | ||
1068 | entry->preempt_count = pc & 0xff; | 1084 | entry->preempt_count = pc & 0xff; |
1069 | entry->pid = (tsk) ? tsk->pid : 0; | 1085 | entry->pid = (tsk) ? tsk->pid : 0; |
1070 | entry->lock_depth = (tsk) ? tsk->lock_depth : 0; | 1086 | entry->lock_depth = (tsk) ? tsk->lock_depth : 0; |
1071 | entry->flags = | 1087 | entry->flags = |
1072 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT | 1088 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT |
1073 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | | 1089 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | |
1074 | #else | 1090 | #else |
1075 | TRACE_FLAG_IRQS_NOSUPPORT | | 1091 | TRACE_FLAG_IRQS_NOSUPPORT | |
1076 | #endif | 1092 | #endif |
1077 | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | | 1093 | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | |
1078 | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | | 1094 | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | |
1079 | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); | 1095 | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); |
1080 | } | 1096 | } |
1081 | EXPORT_SYMBOL_GPL(tracing_generic_entry_update); | 1097 | EXPORT_SYMBOL_GPL(tracing_generic_entry_update); |
1082 | 1098 | ||
1083 | struct ring_buffer_event * | 1099 | struct ring_buffer_event * |
1084 | trace_buffer_lock_reserve(struct ring_buffer *buffer, | 1100 | trace_buffer_lock_reserve(struct ring_buffer *buffer, |
1085 | int type, | 1101 | int type, |
1086 | unsigned long len, | 1102 | unsigned long len, |
1087 | unsigned long flags, int pc) | 1103 | unsigned long flags, int pc) |
1088 | { | 1104 | { |
1089 | struct ring_buffer_event *event; | 1105 | struct ring_buffer_event *event; |
1090 | 1106 | ||
1091 | event = ring_buffer_lock_reserve(buffer, len); | 1107 | event = ring_buffer_lock_reserve(buffer, len); |
1092 | if (event != NULL) { | 1108 | if (event != NULL) { |
1093 | struct trace_entry *ent = ring_buffer_event_data(event); | 1109 | struct trace_entry *ent = ring_buffer_event_data(event); |
1094 | 1110 | ||
1095 | tracing_generic_entry_update(ent, flags, pc); | 1111 | tracing_generic_entry_update(ent, flags, pc); |
1096 | ent->type = type; | 1112 | ent->type = type; |
1097 | } | 1113 | } |
1098 | 1114 | ||
1099 | return event; | 1115 | return event; |
1100 | } | 1116 | } |
1101 | 1117 | ||
1102 | static inline void | 1118 | static inline void |
1103 | __trace_buffer_unlock_commit(struct ring_buffer *buffer, | 1119 | __trace_buffer_unlock_commit(struct ring_buffer *buffer, |
1104 | struct ring_buffer_event *event, | 1120 | struct ring_buffer_event *event, |
1105 | unsigned long flags, int pc, | 1121 | unsigned long flags, int pc, |
1106 | int wake) | 1122 | int wake) |
1107 | { | 1123 | { |
1108 | ring_buffer_unlock_commit(buffer, event); | 1124 | ring_buffer_unlock_commit(buffer, event); |
1109 | 1125 | ||
1110 | ftrace_trace_stack(buffer, flags, 6, pc); | 1126 | ftrace_trace_stack(buffer, flags, 6, pc); |
1111 | ftrace_trace_userstack(buffer, flags, pc); | 1127 | ftrace_trace_userstack(buffer, flags, pc); |
1112 | 1128 | ||
1113 | if (wake) | 1129 | if (wake) |
1114 | trace_wake_up(); | 1130 | trace_wake_up(); |
1115 | } | 1131 | } |
1116 | 1132 | ||
1117 | void trace_buffer_unlock_commit(struct ring_buffer *buffer, | 1133 | void trace_buffer_unlock_commit(struct ring_buffer *buffer, |
1118 | struct ring_buffer_event *event, | 1134 | struct ring_buffer_event *event, |
1119 | unsigned long flags, int pc) | 1135 | unsigned long flags, int pc) |
1120 | { | 1136 | { |
1121 | __trace_buffer_unlock_commit(buffer, event, flags, pc, 1); | 1137 | __trace_buffer_unlock_commit(buffer, event, flags, pc, 1); |
1122 | } | 1138 | } |
1123 | 1139 | ||
1124 | struct ring_buffer_event * | 1140 | struct ring_buffer_event * |
1125 | trace_current_buffer_lock_reserve(struct ring_buffer **current_rb, | 1141 | trace_current_buffer_lock_reserve(struct ring_buffer **current_rb, |
1126 | int type, unsigned long len, | 1142 | int type, unsigned long len, |
1127 | unsigned long flags, int pc) | 1143 | unsigned long flags, int pc) |
1128 | { | 1144 | { |
1129 | *current_rb = global_trace.buffer; | 1145 | *current_rb = global_trace.buffer; |
1130 | return trace_buffer_lock_reserve(*current_rb, | 1146 | return trace_buffer_lock_reserve(*current_rb, |
1131 | type, len, flags, pc); | 1147 | type, len, flags, pc); |
1132 | } | 1148 | } |
1133 | EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve); | 1149 | EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve); |
1134 | 1150 | ||
1135 | void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, | 1151 | void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, |
1136 | struct ring_buffer_event *event, | 1152 | struct ring_buffer_event *event, |
1137 | unsigned long flags, int pc) | 1153 | unsigned long flags, int pc) |
1138 | { | 1154 | { |
1139 | __trace_buffer_unlock_commit(buffer, event, flags, pc, 1); | 1155 | __trace_buffer_unlock_commit(buffer, event, flags, pc, 1); |
1140 | } | 1156 | } |
1141 | EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); | 1157 | EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); |
1142 | 1158 | ||
1143 | void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, | 1159 | void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, |
1144 | struct ring_buffer_event *event, | 1160 | struct ring_buffer_event *event, |
1145 | unsigned long flags, int pc) | 1161 | unsigned long flags, int pc) |
1146 | { | 1162 | { |
1147 | __trace_buffer_unlock_commit(buffer, event, flags, pc, 0); | 1163 | __trace_buffer_unlock_commit(buffer, event, flags, pc, 0); |
1148 | } | 1164 | } |
1149 | EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit); | 1165 | EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit); |
1150 | 1166 | ||
1151 | void trace_current_buffer_discard_commit(struct ring_buffer *buffer, | 1167 | void trace_current_buffer_discard_commit(struct ring_buffer *buffer, |
1152 | struct ring_buffer_event *event) | 1168 | struct ring_buffer_event *event) |
1153 | { | 1169 | { |
1154 | ring_buffer_discard_commit(buffer, event); | 1170 | ring_buffer_discard_commit(buffer, event); |
1155 | } | 1171 | } |
1156 | EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit); | 1172 | EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit); |
1157 | 1173 | ||
1158 | void | 1174 | void |
1159 | trace_function(struct trace_array *tr, | 1175 | trace_function(struct trace_array *tr, |
1160 | unsigned long ip, unsigned long parent_ip, unsigned long flags, | 1176 | unsigned long ip, unsigned long parent_ip, unsigned long flags, |
1161 | int pc) | 1177 | int pc) |
1162 | { | 1178 | { |
1163 | struct ftrace_event_call *call = &event_function; | 1179 | struct ftrace_event_call *call = &event_function; |
1164 | struct ring_buffer *buffer = tr->buffer; | 1180 | struct ring_buffer *buffer = tr->buffer; |
1165 | struct ring_buffer_event *event; | 1181 | struct ring_buffer_event *event; |
1166 | struct ftrace_entry *entry; | 1182 | struct ftrace_entry *entry; |
1167 | 1183 | ||
1168 | /* If we are reading the ring buffer, don't trace */ | 1184 | /* If we are reading the ring buffer, don't trace */ |
1169 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) | 1185 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) |
1170 | return; | 1186 | return; |
1171 | 1187 | ||
1172 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), | 1188 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), |
1173 | flags, pc); | 1189 | flags, pc); |
1174 | if (!event) | 1190 | if (!event) |
1175 | return; | 1191 | return; |
1176 | entry = ring_buffer_event_data(event); | 1192 | entry = ring_buffer_event_data(event); |
1177 | entry->ip = ip; | 1193 | entry->ip = ip; |
1178 | entry->parent_ip = parent_ip; | 1194 | entry->parent_ip = parent_ip; |
1179 | 1195 | ||
1180 | if (!filter_check_discard(call, entry, buffer, event)) | 1196 | if (!filter_check_discard(call, entry, buffer, event)) |
1181 | ring_buffer_unlock_commit(buffer, event); | 1197 | ring_buffer_unlock_commit(buffer, event); |
1182 | } | 1198 | } |
1183 | 1199 | ||
1184 | void | 1200 | void |
1185 | ftrace(struct trace_array *tr, struct trace_array_cpu *data, | 1201 | ftrace(struct trace_array *tr, struct trace_array_cpu *data, |
1186 | unsigned long ip, unsigned long parent_ip, unsigned long flags, | 1202 | unsigned long ip, unsigned long parent_ip, unsigned long flags, |
1187 | int pc) | 1203 | int pc) |
1188 | { | 1204 | { |
1189 | if (likely(!atomic_read(&data->disabled))) | 1205 | if (likely(!atomic_read(&data->disabled))) |
1190 | trace_function(tr, ip, parent_ip, flags, pc); | 1206 | trace_function(tr, ip, parent_ip, flags, pc); |
1191 | } | 1207 | } |
1192 | 1208 | ||
1193 | #ifdef CONFIG_STACKTRACE | 1209 | #ifdef CONFIG_STACKTRACE |
1194 | static void __ftrace_trace_stack(struct ring_buffer *buffer, | 1210 | static void __ftrace_trace_stack(struct ring_buffer *buffer, |
1195 | unsigned long flags, | 1211 | unsigned long flags, |
1196 | int skip, int pc) | 1212 | int skip, int pc) |
1197 | { | 1213 | { |
1198 | struct ftrace_event_call *call = &event_kernel_stack; | 1214 | struct ftrace_event_call *call = &event_kernel_stack; |
1199 | struct ring_buffer_event *event; | 1215 | struct ring_buffer_event *event; |
1200 | struct stack_entry *entry; | 1216 | struct stack_entry *entry; |
1201 | struct stack_trace trace; | 1217 | struct stack_trace trace; |
1202 | 1218 | ||
1203 | event = trace_buffer_lock_reserve(buffer, TRACE_STACK, | 1219 | event = trace_buffer_lock_reserve(buffer, TRACE_STACK, |
1204 | sizeof(*entry), flags, pc); | 1220 | sizeof(*entry), flags, pc); |
1205 | if (!event) | 1221 | if (!event) |
1206 | return; | 1222 | return; |
1207 | entry = ring_buffer_event_data(event); | 1223 | entry = ring_buffer_event_data(event); |
1208 | memset(&entry->caller, 0, sizeof(entry->caller)); | 1224 | memset(&entry->caller, 0, sizeof(entry->caller)); |
1209 | 1225 | ||
1210 | trace.nr_entries = 0; | 1226 | trace.nr_entries = 0; |
1211 | trace.max_entries = FTRACE_STACK_ENTRIES; | 1227 | trace.max_entries = FTRACE_STACK_ENTRIES; |
1212 | trace.skip = skip; | 1228 | trace.skip = skip; |
1213 | trace.entries = entry->caller; | 1229 | trace.entries = entry->caller; |
1214 | 1230 | ||
1215 | save_stack_trace(&trace); | 1231 | save_stack_trace(&trace); |
1216 | if (!filter_check_discard(call, entry, buffer, event)) | 1232 | if (!filter_check_discard(call, entry, buffer, event)) |
1217 | ring_buffer_unlock_commit(buffer, event); | 1233 | ring_buffer_unlock_commit(buffer, event); |
1218 | } | 1234 | } |
1219 | 1235 | ||
1220 | void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, | 1236 | void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, |
1221 | int skip, int pc) | 1237 | int skip, int pc) |
1222 | { | 1238 | { |
1223 | if (!(trace_flags & TRACE_ITER_STACKTRACE)) | 1239 | if (!(trace_flags & TRACE_ITER_STACKTRACE)) |
1224 | return; | 1240 | return; |
1225 | 1241 | ||
1226 | __ftrace_trace_stack(buffer, flags, skip, pc); | 1242 | __ftrace_trace_stack(buffer, flags, skip, pc); |
1227 | } | 1243 | } |
1228 | 1244 | ||
1229 | void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, | 1245 | void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, |
1230 | int pc) | 1246 | int pc) |
1231 | { | 1247 | { |
1232 | __ftrace_trace_stack(tr->buffer, flags, skip, pc); | 1248 | __ftrace_trace_stack(tr->buffer, flags, skip, pc); |
1233 | } | 1249 | } |
1234 | 1250 | ||
1235 | /** | 1251 | /** |
1236 | * trace_dump_stack - record a stack back trace in the trace buffer | 1252 | * trace_dump_stack - record a stack back trace in the trace buffer |
1237 | */ | 1253 | */ |
1238 | void trace_dump_stack(void) | 1254 | void trace_dump_stack(void) |
1239 | { | 1255 | { |
1240 | unsigned long flags; | 1256 | unsigned long flags; |
1241 | 1257 | ||
1242 | if (tracing_disabled || tracing_selftest_running) | 1258 | if (tracing_disabled || tracing_selftest_running) |
1243 | return; | 1259 | return; |
1244 | 1260 | ||
1245 | local_save_flags(flags); | 1261 | local_save_flags(flags); |
1246 | 1262 | ||
1247 | /* skipping 3 traces, seems to get us at the caller of this function */ | 1263 | /* skipping 3 traces, seems to get us at the caller of this function */ |
1248 | __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); | 1264 | __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); |
1249 | } | 1265 | } |
1250 | 1266 | ||
1251 | void | 1267 | void |
1252 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | 1268 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) |
1253 | { | 1269 | { |
1254 | struct ftrace_event_call *call = &event_user_stack; | 1270 | struct ftrace_event_call *call = &event_user_stack; |
1255 | struct ring_buffer_event *event; | 1271 | struct ring_buffer_event *event; |
1256 | struct userstack_entry *entry; | 1272 | struct userstack_entry *entry; |
1257 | struct stack_trace trace; | 1273 | struct stack_trace trace; |
1258 | 1274 | ||
1259 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) | 1275 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) |
1260 | return; | 1276 | return; |
1261 | 1277 | ||
1262 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, | 1278 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, |
1263 | sizeof(*entry), flags, pc); | 1279 | sizeof(*entry), flags, pc); |
1264 | if (!event) | 1280 | if (!event) |
1265 | return; | 1281 | return; |
1266 | entry = ring_buffer_event_data(event); | 1282 | entry = ring_buffer_event_data(event); |
1267 | 1283 | ||
1268 | entry->tgid = current->tgid; | 1284 | entry->tgid = current->tgid; |
1269 | memset(&entry->caller, 0, sizeof(entry->caller)); | 1285 | memset(&entry->caller, 0, sizeof(entry->caller)); |
1270 | 1286 | ||
1271 | trace.nr_entries = 0; | 1287 | trace.nr_entries = 0; |
1272 | trace.max_entries = FTRACE_STACK_ENTRIES; | 1288 | trace.max_entries = FTRACE_STACK_ENTRIES; |
1273 | trace.skip = 0; | 1289 | trace.skip = 0; |
1274 | trace.entries = entry->caller; | 1290 | trace.entries = entry->caller; |
1275 | 1291 | ||
1276 | save_stack_trace_user(&trace); | 1292 | save_stack_trace_user(&trace); |
1277 | if (!filter_check_discard(call, entry, buffer, event)) | 1293 | if (!filter_check_discard(call, entry, buffer, event)) |
1278 | ring_buffer_unlock_commit(buffer, event); | 1294 | ring_buffer_unlock_commit(buffer, event); |
1279 | } | 1295 | } |
1280 | 1296 | ||
1281 | #ifdef UNUSED | 1297 | #ifdef UNUSED |
1282 | static void __trace_userstack(struct trace_array *tr, unsigned long flags) | 1298 | static void __trace_userstack(struct trace_array *tr, unsigned long flags) |
1283 | { | 1299 | { |
1284 | ftrace_trace_userstack(tr, flags, preempt_count()); | 1300 | ftrace_trace_userstack(tr, flags, preempt_count()); |
1285 | } | 1301 | } |
1286 | #endif /* UNUSED */ | 1302 | #endif /* UNUSED */ |
1287 | 1303 | ||
1288 | #endif /* CONFIG_STACKTRACE */ | 1304 | #endif /* CONFIG_STACKTRACE */ |
1289 | 1305 | ||
1290 | static void | 1306 | static void |
1291 | ftrace_trace_special(void *__tr, | 1307 | ftrace_trace_special(void *__tr, |
1292 | unsigned long arg1, unsigned long arg2, unsigned long arg3, | 1308 | unsigned long arg1, unsigned long arg2, unsigned long arg3, |
1293 | int pc) | 1309 | int pc) |
1294 | { | 1310 | { |
1295 | struct ftrace_event_call *call = &event_special; | 1311 | struct ftrace_event_call *call = &event_special; |
1296 | struct ring_buffer_event *event; | 1312 | struct ring_buffer_event *event; |
1297 | struct trace_array *tr = __tr; | 1313 | struct trace_array *tr = __tr; |
1298 | struct ring_buffer *buffer = tr->buffer; | 1314 | struct ring_buffer *buffer = tr->buffer; |
1299 | struct special_entry *entry; | 1315 | struct special_entry *entry; |
1300 | 1316 | ||
1301 | event = trace_buffer_lock_reserve(buffer, TRACE_SPECIAL, | 1317 | event = trace_buffer_lock_reserve(buffer, TRACE_SPECIAL, |
1302 | sizeof(*entry), 0, pc); | 1318 | sizeof(*entry), 0, pc); |
1303 | if (!event) | 1319 | if (!event) |
1304 | return; | 1320 | return; |
1305 | entry = ring_buffer_event_data(event); | 1321 | entry = ring_buffer_event_data(event); |
1306 | entry->arg1 = arg1; | 1322 | entry->arg1 = arg1; |
1307 | entry->arg2 = arg2; | 1323 | entry->arg2 = arg2; |
1308 | entry->arg3 = arg3; | 1324 | entry->arg3 = arg3; |
1309 | 1325 | ||
1310 | if (!filter_check_discard(call, entry, buffer, event)) | 1326 | if (!filter_check_discard(call, entry, buffer, event)) |
1311 | trace_buffer_unlock_commit(buffer, event, 0, pc); | 1327 | trace_buffer_unlock_commit(buffer, event, 0, pc); |
1312 | } | 1328 | } |
1313 | 1329 | ||
1314 | void | 1330 | void |
1315 | __trace_special(void *__tr, void *__data, | 1331 | __trace_special(void *__tr, void *__data, |
1316 | unsigned long arg1, unsigned long arg2, unsigned long arg3) | 1332 | unsigned long arg1, unsigned long arg2, unsigned long arg3) |
1317 | { | 1333 | { |
1318 | ftrace_trace_special(__tr, arg1, arg2, arg3, preempt_count()); | 1334 | ftrace_trace_special(__tr, arg1, arg2, arg3, preempt_count()); |
1319 | } | 1335 | } |
1320 | 1336 | ||
1321 | void | 1337 | void |
1322 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | 1338 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) |
1323 | { | 1339 | { |
1324 | struct trace_array *tr = &global_trace; | 1340 | struct trace_array *tr = &global_trace; |
1325 | struct trace_array_cpu *data; | 1341 | struct trace_array_cpu *data; |
1326 | unsigned long flags; | 1342 | unsigned long flags; |
1327 | int cpu; | 1343 | int cpu; |
1328 | int pc; | 1344 | int pc; |
1329 | 1345 | ||
1330 | if (tracing_disabled) | 1346 | if (tracing_disabled) |
1331 | return; | 1347 | return; |
1332 | 1348 | ||
1333 | pc = preempt_count(); | 1349 | pc = preempt_count(); |
1334 | local_irq_save(flags); | 1350 | local_irq_save(flags); |
1335 | cpu = raw_smp_processor_id(); | 1351 | cpu = raw_smp_processor_id(); |
1336 | data = tr->data[cpu]; | 1352 | data = tr->data[cpu]; |
1337 | 1353 | ||
1338 | if (likely(atomic_inc_return(&data->disabled) == 1)) | 1354 | if (likely(atomic_inc_return(&data->disabled) == 1)) |
1339 | ftrace_trace_special(tr, arg1, arg2, arg3, pc); | 1355 | ftrace_trace_special(tr, arg1, arg2, arg3, pc); |
1340 | 1356 | ||
1341 | atomic_dec(&data->disabled); | 1357 | atomic_dec(&data->disabled); |
1342 | local_irq_restore(flags); | 1358 | local_irq_restore(flags); |
1343 | } | 1359 | } |
1344 | 1360 | ||
1345 | /** | 1361 | /** |
1346 | * trace_vbprintk - write binary msg to tracing buffer | 1362 | * trace_vbprintk - write binary msg to tracing buffer |
1347 | * | 1363 | * |
1348 | */ | 1364 | */ |
1349 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | 1365 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) |
1350 | { | 1366 | { |
1351 | static arch_spinlock_t trace_buf_lock = | 1367 | static arch_spinlock_t trace_buf_lock = |
1352 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 1368 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
1353 | static u32 trace_buf[TRACE_BUF_SIZE]; | 1369 | static u32 trace_buf[TRACE_BUF_SIZE]; |
1354 | 1370 | ||
1355 | struct ftrace_event_call *call = &event_bprint; | 1371 | struct ftrace_event_call *call = &event_bprint; |
1356 | struct ring_buffer_event *event; | 1372 | struct ring_buffer_event *event; |
1357 | struct ring_buffer *buffer; | 1373 | struct ring_buffer *buffer; |
1358 | struct trace_array *tr = &global_trace; | 1374 | struct trace_array *tr = &global_trace; |
1359 | struct trace_array_cpu *data; | 1375 | struct trace_array_cpu *data; |
1360 | struct bprint_entry *entry; | 1376 | struct bprint_entry *entry; |
1361 | unsigned long flags; | 1377 | unsigned long flags; |
1362 | int disable; | 1378 | int disable; |
1363 | int resched; | 1379 | int resched; |
1364 | int cpu, len = 0, size, pc; | 1380 | int cpu, len = 0, size, pc; |
1365 | 1381 | ||
1366 | if (unlikely(tracing_selftest_running || tracing_disabled)) | 1382 | if (unlikely(tracing_selftest_running || tracing_disabled)) |
1367 | return 0; | 1383 | return 0; |
1368 | 1384 | ||
1369 | /* Don't pollute graph traces with trace_vprintk internals */ | 1385 | /* Don't pollute graph traces with trace_vprintk internals */ |
1370 | pause_graph_tracing(); | 1386 | pause_graph_tracing(); |
1371 | 1387 | ||
1372 | pc = preempt_count(); | 1388 | pc = preempt_count(); |
1373 | resched = ftrace_preempt_disable(); | 1389 | resched = ftrace_preempt_disable(); |
1374 | cpu = raw_smp_processor_id(); | 1390 | cpu = raw_smp_processor_id(); |
1375 | data = tr->data[cpu]; | 1391 | data = tr->data[cpu]; |
1376 | 1392 | ||
1377 | disable = atomic_inc_return(&data->disabled); | 1393 | disable = atomic_inc_return(&data->disabled); |
1378 | if (unlikely(disable != 1)) | 1394 | if (unlikely(disable != 1)) |
1379 | goto out; | 1395 | goto out; |
1380 | 1396 | ||
1381 | /* Lockdep uses trace_printk for lock tracing */ | 1397 | /* Lockdep uses trace_printk for lock tracing */ |
1382 | local_irq_save(flags); | 1398 | local_irq_save(flags); |
1383 | arch_spin_lock(&trace_buf_lock); | 1399 | arch_spin_lock(&trace_buf_lock); |
1384 | len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 1400 | len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); |
1385 | 1401 | ||
1386 | if (len > TRACE_BUF_SIZE || len < 0) | 1402 | if (len > TRACE_BUF_SIZE || len < 0) |
1387 | goto out_unlock; | 1403 | goto out_unlock; |
1388 | 1404 | ||
1389 | size = sizeof(*entry) + sizeof(u32) * len; | 1405 | size = sizeof(*entry) + sizeof(u32) * len; |
1390 | buffer = tr->buffer; | 1406 | buffer = tr->buffer; |
1391 | event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, | 1407 | event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, |
1392 | flags, pc); | 1408 | flags, pc); |
1393 | if (!event) | 1409 | if (!event) |
1394 | goto out_unlock; | 1410 | goto out_unlock; |
1395 | entry = ring_buffer_event_data(event); | 1411 | entry = ring_buffer_event_data(event); |
1396 | entry->ip = ip; | 1412 | entry->ip = ip; |
1397 | entry->fmt = fmt; | 1413 | entry->fmt = fmt; |
1398 | 1414 | ||
1399 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); | 1415 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); |
1400 | if (!filter_check_discard(call, entry, buffer, event)) { | 1416 | if (!filter_check_discard(call, entry, buffer, event)) { |
1401 | ring_buffer_unlock_commit(buffer, event); | 1417 | ring_buffer_unlock_commit(buffer, event); |
1402 | ftrace_trace_stack(buffer, flags, 6, pc); | 1418 | ftrace_trace_stack(buffer, flags, 6, pc); |
1403 | } | 1419 | } |
1404 | 1420 | ||
1405 | out_unlock: | 1421 | out_unlock: |
1406 | arch_spin_unlock(&trace_buf_lock); | 1422 | arch_spin_unlock(&trace_buf_lock); |
1407 | local_irq_restore(flags); | 1423 | local_irq_restore(flags); |
1408 | 1424 | ||
1409 | out: | 1425 | out: |
1410 | atomic_dec_return(&data->disabled); | 1426 | atomic_dec_return(&data->disabled); |
1411 | ftrace_preempt_enable(resched); | 1427 | ftrace_preempt_enable(resched); |
1412 | unpause_graph_tracing(); | 1428 | unpause_graph_tracing(); |
1413 | 1429 | ||
1414 | return len; | 1430 | return len; |
1415 | } | 1431 | } |
1416 | EXPORT_SYMBOL_GPL(trace_vbprintk); | 1432 | EXPORT_SYMBOL_GPL(trace_vbprintk); |
1417 | 1433 | ||
1418 | int trace_array_printk(struct trace_array *tr, | 1434 | int trace_array_printk(struct trace_array *tr, |
1419 | unsigned long ip, const char *fmt, ...) | 1435 | unsigned long ip, const char *fmt, ...) |
1420 | { | 1436 | { |
1421 | int ret; | 1437 | int ret; |
1422 | va_list ap; | 1438 | va_list ap; |
1423 | 1439 | ||
1424 | if (!(trace_flags & TRACE_ITER_PRINTK)) | 1440 | if (!(trace_flags & TRACE_ITER_PRINTK)) |
1425 | return 0; | 1441 | return 0; |
1426 | 1442 | ||
1427 | va_start(ap, fmt); | 1443 | va_start(ap, fmt); |
1428 | ret = trace_array_vprintk(tr, ip, fmt, ap); | 1444 | ret = trace_array_vprintk(tr, ip, fmt, ap); |
1429 | va_end(ap); | 1445 | va_end(ap); |
1430 | return ret; | 1446 | return ret; |
1431 | } | 1447 | } |
1432 | 1448 | ||
1433 | int trace_array_vprintk(struct trace_array *tr, | 1449 | int trace_array_vprintk(struct trace_array *tr, |
1434 | unsigned long ip, const char *fmt, va_list args) | 1450 | unsigned long ip, const char *fmt, va_list args) |
1435 | { | 1451 | { |
1436 | static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED; | 1452 | static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
1437 | static char trace_buf[TRACE_BUF_SIZE]; | 1453 | static char trace_buf[TRACE_BUF_SIZE]; |
1438 | 1454 | ||
1439 | struct ftrace_event_call *call = &event_print; | 1455 | struct ftrace_event_call *call = &event_print; |
1440 | struct ring_buffer_event *event; | 1456 | struct ring_buffer_event *event; |
1441 | struct ring_buffer *buffer; | 1457 | struct ring_buffer *buffer; |
1442 | struct trace_array_cpu *data; | 1458 | struct trace_array_cpu *data; |
1443 | int cpu, len = 0, size, pc; | 1459 | int cpu, len = 0, size, pc; |
1444 | struct print_entry *entry; | 1460 | struct print_entry *entry; |
1445 | unsigned long irq_flags; | 1461 | unsigned long irq_flags; |
1446 | int disable; | 1462 | int disable; |
1447 | 1463 | ||
1448 | if (tracing_disabled || tracing_selftest_running) | 1464 | if (tracing_disabled || tracing_selftest_running) |
1449 | return 0; | 1465 | return 0; |
1450 | 1466 | ||
1451 | pc = preempt_count(); | 1467 | pc = preempt_count(); |
1452 | preempt_disable_notrace(); | 1468 | preempt_disable_notrace(); |
1453 | cpu = raw_smp_processor_id(); | 1469 | cpu = raw_smp_processor_id(); |
1454 | data = tr->data[cpu]; | 1470 | data = tr->data[cpu]; |
1455 | 1471 | ||
1456 | disable = atomic_inc_return(&data->disabled); | 1472 | disable = atomic_inc_return(&data->disabled); |
1457 | if (unlikely(disable != 1)) | 1473 | if (unlikely(disable != 1)) |
1458 | goto out; | 1474 | goto out; |
1459 | 1475 | ||
1460 | pause_graph_tracing(); | 1476 | pause_graph_tracing(); |
1461 | raw_local_irq_save(irq_flags); | 1477 | raw_local_irq_save(irq_flags); |
1462 | arch_spin_lock(&trace_buf_lock); | 1478 | arch_spin_lock(&trace_buf_lock); |
1463 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 1479 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); |
1464 | 1480 | ||
1465 | size = sizeof(*entry) + len + 1; | 1481 | size = sizeof(*entry) + len + 1; |
1466 | buffer = tr->buffer; | 1482 | buffer = tr->buffer; |
1467 | event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, | 1483 | event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, |
1468 | irq_flags, pc); | 1484 | irq_flags, pc); |
1469 | if (!event) | 1485 | if (!event) |
1470 | goto out_unlock; | 1486 | goto out_unlock; |
1471 | entry = ring_buffer_event_data(event); | 1487 | entry = ring_buffer_event_data(event); |
1472 | entry->ip = ip; | 1488 | entry->ip = ip; |
1473 | 1489 | ||
1474 | memcpy(&entry->buf, trace_buf, len); | 1490 | memcpy(&entry->buf, trace_buf, len); |
1475 | entry->buf[len] = '\0'; | 1491 | entry->buf[len] = '\0'; |
1476 | if (!filter_check_discard(call, entry, buffer, event)) { | 1492 | if (!filter_check_discard(call, entry, buffer, event)) { |
1477 | ring_buffer_unlock_commit(buffer, event); | 1493 | ring_buffer_unlock_commit(buffer, event); |
1478 | ftrace_trace_stack(buffer, irq_flags, 6, pc); | 1494 | ftrace_trace_stack(buffer, irq_flags, 6, pc); |
1479 | } | 1495 | } |
1480 | 1496 | ||
1481 | out_unlock: | 1497 | out_unlock: |
1482 | arch_spin_unlock(&trace_buf_lock); | 1498 | arch_spin_unlock(&trace_buf_lock); |
1483 | raw_local_irq_restore(irq_flags); | 1499 | raw_local_irq_restore(irq_flags); |
1484 | unpause_graph_tracing(); | 1500 | unpause_graph_tracing(); |
1485 | out: | 1501 | out: |
1486 | atomic_dec_return(&data->disabled); | 1502 | atomic_dec_return(&data->disabled); |
1487 | preempt_enable_notrace(); | 1503 | preempt_enable_notrace(); |
1488 | 1504 | ||
1489 | return len; | 1505 | return len; |
1490 | } | 1506 | } |
1491 | 1507 | ||
1492 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | 1508 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args) |
1493 | { | 1509 | { |
1494 | return trace_array_vprintk(&global_trace, ip, fmt, args); | 1510 | return trace_array_vprintk(&global_trace, ip, fmt, args); |
1495 | } | 1511 | } |
1496 | EXPORT_SYMBOL_GPL(trace_vprintk); | 1512 | EXPORT_SYMBOL_GPL(trace_vprintk); |
1497 | 1513 | ||
1498 | enum trace_file_type { | 1514 | enum trace_file_type { |
1499 | TRACE_FILE_LAT_FMT = 1, | 1515 | TRACE_FILE_LAT_FMT = 1, |
1500 | TRACE_FILE_ANNOTATE = 2, | 1516 | TRACE_FILE_ANNOTATE = 2, |
1501 | }; | 1517 | }; |
1502 | 1518 | ||
1503 | static void trace_iterator_increment(struct trace_iterator *iter) | 1519 | static void trace_iterator_increment(struct trace_iterator *iter) |
1504 | { | 1520 | { |
1505 | /* Don't allow ftrace to trace into the ring buffers */ | 1521 | /* Don't allow ftrace to trace into the ring buffers */ |
1506 | ftrace_disable_cpu(); | 1522 | ftrace_disable_cpu(); |
1507 | 1523 | ||
1508 | iter->idx++; | 1524 | iter->idx++; |
1509 | if (iter->buffer_iter[iter->cpu]) | 1525 | if (iter->buffer_iter[iter->cpu]) |
1510 | ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); | 1526 | ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); |
1511 | 1527 | ||
1512 | ftrace_enable_cpu(); | 1528 | ftrace_enable_cpu(); |
1513 | } | 1529 | } |
1514 | 1530 | ||
1515 | static struct trace_entry * | 1531 | static struct trace_entry * |
1516 | peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) | 1532 | peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) |
1517 | { | 1533 | { |
1518 | struct ring_buffer_event *event; | 1534 | struct ring_buffer_event *event; |
1519 | struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; | 1535 | struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; |
1520 | 1536 | ||
1521 | /* Don't allow ftrace to trace into the ring buffers */ | 1537 | /* Don't allow ftrace to trace into the ring buffers */ |
1522 | ftrace_disable_cpu(); | 1538 | ftrace_disable_cpu(); |
1523 | 1539 | ||
1524 | if (buf_iter) | 1540 | if (buf_iter) |
1525 | event = ring_buffer_iter_peek(buf_iter, ts); | 1541 | event = ring_buffer_iter_peek(buf_iter, ts); |
1526 | else | 1542 | else |
1527 | event = ring_buffer_peek(iter->tr->buffer, cpu, ts); | 1543 | event = ring_buffer_peek(iter->tr->buffer, cpu, ts); |
1528 | 1544 | ||
1529 | ftrace_enable_cpu(); | 1545 | ftrace_enable_cpu(); |
1530 | 1546 | ||
1531 | return event ? ring_buffer_event_data(event) : NULL; | 1547 | return event ? ring_buffer_event_data(event) : NULL; |
1532 | } | 1548 | } |
1533 | 1549 | ||
1534 | static struct trace_entry * | 1550 | static struct trace_entry * |
1535 | __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | 1551 | __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) |
1536 | { | 1552 | { |
1537 | struct ring_buffer *buffer = iter->tr->buffer; | 1553 | struct ring_buffer *buffer = iter->tr->buffer; |
1538 | struct trace_entry *ent, *next = NULL; | 1554 | struct trace_entry *ent, *next = NULL; |
1539 | int cpu_file = iter->cpu_file; | 1555 | int cpu_file = iter->cpu_file; |
1540 | u64 next_ts = 0, ts; | 1556 | u64 next_ts = 0, ts; |
1541 | int next_cpu = -1; | 1557 | int next_cpu = -1; |
1542 | int cpu; | 1558 | int cpu; |
1543 | 1559 | ||
1544 | /* | 1560 | /* |
1545 | * If we are in a per_cpu trace file, don't bother by iterating over | 1561 | * If we are in a per_cpu trace file, don't bother by iterating over |
1546 | * all cpu and peek directly. | 1562 | * all cpu and peek directly. |
1547 | */ | 1563 | */ |
1548 | if (cpu_file > TRACE_PIPE_ALL_CPU) { | 1564 | if (cpu_file > TRACE_PIPE_ALL_CPU) { |
1549 | if (ring_buffer_empty_cpu(buffer, cpu_file)) | 1565 | if (ring_buffer_empty_cpu(buffer, cpu_file)) |
1550 | return NULL; | 1566 | return NULL; |
1551 | ent = peek_next_entry(iter, cpu_file, ent_ts); | 1567 | ent = peek_next_entry(iter, cpu_file, ent_ts); |
1552 | if (ent_cpu) | 1568 | if (ent_cpu) |
1553 | *ent_cpu = cpu_file; | 1569 | *ent_cpu = cpu_file; |
1554 | 1570 | ||
1555 | return ent; | 1571 | return ent; |
1556 | } | 1572 | } |
1557 | 1573 | ||
1558 | for_each_tracing_cpu(cpu) { | 1574 | for_each_tracing_cpu(cpu) { |
1559 | 1575 | ||
1560 | if (ring_buffer_empty_cpu(buffer, cpu)) | 1576 | if (ring_buffer_empty_cpu(buffer, cpu)) |
1561 | continue; | 1577 | continue; |
1562 | 1578 | ||
1563 | ent = peek_next_entry(iter, cpu, &ts); | 1579 | ent = peek_next_entry(iter, cpu, &ts); |
1564 | 1580 | ||
1565 | /* | 1581 | /* |
1566 | * Pick the entry with the smallest timestamp: | 1582 | * Pick the entry with the smallest timestamp: |
1567 | */ | 1583 | */ |
1568 | if (ent && (!next || ts < next_ts)) { | 1584 | if (ent && (!next || ts < next_ts)) { |
1569 | next = ent; | 1585 | next = ent; |
1570 | next_cpu = cpu; | 1586 | next_cpu = cpu; |
1571 | next_ts = ts; | 1587 | next_ts = ts; |
1572 | } | 1588 | } |
1573 | } | 1589 | } |
1574 | 1590 | ||
1575 | if (ent_cpu) | 1591 | if (ent_cpu) |
1576 | *ent_cpu = next_cpu; | 1592 | *ent_cpu = next_cpu; |
1577 | 1593 | ||
1578 | if (ent_ts) | 1594 | if (ent_ts) |
1579 | *ent_ts = next_ts; | 1595 | *ent_ts = next_ts; |
1580 | 1596 | ||
1581 | return next; | 1597 | return next; |
1582 | } | 1598 | } |
1583 | 1599 | ||
1584 | /* Find the next real entry, without updating the iterator itself */ | 1600 | /* Find the next real entry, without updating the iterator itself */ |
1585 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | 1601 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, |
1586 | int *ent_cpu, u64 *ent_ts) | 1602 | int *ent_cpu, u64 *ent_ts) |
1587 | { | 1603 | { |
1588 | return __find_next_entry(iter, ent_cpu, ent_ts); | 1604 | return __find_next_entry(iter, ent_cpu, ent_ts); |
1589 | } | 1605 | } |
1590 | 1606 | ||
1591 | /* Find the next real entry, and increment the iterator to the next entry */ | 1607 | /* Find the next real entry, and increment the iterator to the next entry */ |
1592 | static void *find_next_entry_inc(struct trace_iterator *iter) | 1608 | static void *find_next_entry_inc(struct trace_iterator *iter) |
1593 | { | 1609 | { |
1594 | iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts); | 1610 | iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts); |
1595 | 1611 | ||
1596 | if (iter->ent) | 1612 | if (iter->ent) |
1597 | trace_iterator_increment(iter); | 1613 | trace_iterator_increment(iter); |
1598 | 1614 | ||
1599 | return iter->ent ? iter : NULL; | 1615 | return iter->ent ? iter : NULL; |
1600 | } | 1616 | } |
1601 | 1617 | ||
1602 | static void trace_consume(struct trace_iterator *iter) | 1618 | static void trace_consume(struct trace_iterator *iter) |
1603 | { | 1619 | { |
1604 | /* Don't allow ftrace to trace into the ring buffers */ | 1620 | /* Don't allow ftrace to trace into the ring buffers */ |
1605 | ftrace_disable_cpu(); | 1621 | ftrace_disable_cpu(); |
1606 | ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts); | 1622 | ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts); |
1607 | ftrace_enable_cpu(); | 1623 | ftrace_enable_cpu(); |
1608 | } | 1624 | } |
1609 | 1625 | ||
1610 | static void *s_next(struct seq_file *m, void *v, loff_t *pos) | 1626 | static void *s_next(struct seq_file *m, void *v, loff_t *pos) |
1611 | { | 1627 | { |
1612 | struct trace_iterator *iter = m->private; | 1628 | struct trace_iterator *iter = m->private; |
1613 | int i = (int)*pos; | 1629 | int i = (int)*pos; |
1614 | void *ent; | 1630 | void *ent; |
1615 | 1631 | ||
1616 | WARN_ON_ONCE(iter->leftover); | 1632 | WARN_ON_ONCE(iter->leftover); |
1617 | 1633 | ||
1618 | (*pos)++; | 1634 | (*pos)++; |
1619 | 1635 | ||
1620 | /* can't go backwards */ | 1636 | /* can't go backwards */ |
1621 | if (iter->idx > i) | 1637 | if (iter->idx > i) |
1622 | return NULL; | 1638 | return NULL; |
1623 | 1639 | ||
1624 | if (iter->idx < 0) | 1640 | if (iter->idx < 0) |
1625 | ent = find_next_entry_inc(iter); | 1641 | ent = find_next_entry_inc(iter); |
1626 | else | 1642 | else |
1627 | ent = iter; | 1643 | ent = iter; |
1628 | 1644 | ||
1629 | while (ent && iter->idx < i) | 1645 | while (ent && iter->idx < i) |
1630 | ent = find_next_entry_inc(iter); | 1646 | ent = find_next_entry_inc(iter); |
1631 | 1647 | ||
1632 | iter->pos = *pos; | 1648 | iter->pos = *pos; |
1633 | 1649 | ||
1634 | return ent; | 1650 | return ent; |
1635 | } | 1651 | } |
1636 | 1652 | ||
1637 | static void tracing_iter_reset(struct trace_iterator *iter, int cpu) | 1653 | static void tracing_iter_reset(struct trace_iterator *iter, int cpu) |
1638 | { | 1654 | { |
1639 | struct trace_array *tr = iter->tr; | 1655 | struct trace_array *tr = iter->tr; |
1640 | struct ring_buffer_event *event; | 1656 | struct ring_buffer_event *event; |
1641 | struct ring_buffer_iter *buf_iter; | 1657 | struct ring_buffer_iter *buf_iter; |
1642 | unsigned long entries = 0; | 1658 | unsigned long entries = 0; |
1643 | u64 ts; | 1659 | u64 ts; |
1644 | 1660 | ||
1645 | tr->data[cpu]->skipped_entries = 0; | 1661 | tr->data[cpu]->skipped_entries = 0; |
1646 | 1662 | ||
1647 | if (!iter->buffer_iter[cpu]) | 1663 | if (!iter->buffer_iter[cpu]) |
1648 | return; | 1664 | return; |
1649 | 1665 | ||
1650 | buf_iter = iter->buffer_iter[cpu]; | 1666 | buf_iter = iter->buffer_iter[cpu]; |
1651 | ring_buffer_iter_reset(buf_iter); | 1667 | ring_buffer_iter_reset(buf_iter); |
1652 | 1668 | ||
1653 | /* | 1669 | /* |
1654 | * We could have the case with the max latency tracers | 1670 | * We could have the case with the max latency tracers |
1655 | * that a reset never took place on a cpu. This is evident | 1671 | * that a reset never took place on a cpu. This is evident |
1656 | * by the timestamp being before the start of the buffer. | 1672 | * by the timestamp being before the start of the buffer. |
1657 | */ | 1673 | */ |
1658 | while ((event = ring_buffer_iter_peek(buf_iter, &ts))) { | 1674 | while ((event = ring_buffer_iter_peek(buf_iter, &ts))) { |
1659 | if (ts >= iter->tr->time_start) | 1675 | if (ts >= iter->tr->time_start) |
1660 | break; | 1676 | break; |
1661 | entries++; | 1677 | entries++; |
1662 | ring_buffer_read(buf_iter, NULL); | 1678 | ring_buffer_read(buf_iter, NULL); |
1663 | } | 1679 | } |
1664 | 1680 | ||
1665 | tr->data[cpu]->skipped_entries = entries; | 1681 | tr->data[cpu]->skipped_entries = entries; |
1666 | } | 1682 | } |
1667 | 1683 | ||
1668 | /* | 1684 | /* |
1669 | * The current tracer is copied to avoid a global locking | 1685 | * The current tracer is copied to avoid a global locking |
1670 | * all around. | 1686 | * all around. |
1671 | */ | 1687 | */ |
1672 | static void *s_start(struct seq_file *m, loff_t *pos) | 1688 | static void *s_start(struct seq_file *m, loff_t *pos) |
1673 | { | 1689 | { |
1674 | struct trace_iterator *iter = m->private; | 1690 | struct trace_iterator *iter = m->private; |
1675 | static struct tracer *old_tracer; | 1691 | static struct tracer *old_tracer; |
1676 | int cpu_file = iter->cpu_file; | 1692 | int cpu_file = iter->cpu_file; |
1677 | void *p = NULL; | 1693 | void *p = NULL; |
1678 | loff_t l = 0; | 1694 | loff_t l = 0; |
1679 | int cpu; | 1695 | int cpu; |
1680 | 1696 | ||
1681 | /* copy the tracer to avoid using a global lock all around */ | 1697 | /* copy the tracer to avoid using a global lock all around */ |
1682 | mutex_lock(&trace_types_lock); | 1698 | mutex_lock(&trace_types_lock); |
1683 | if (unlikely(old_tracer != current_trace && current_trace)) { | 1699 | if (unlikely(old_tracer != current_trace && current_trace)) { |
1684 | old_tracer = current_trace; | 1700 | old_tracer = current_trace; |
1685 | *iter->trace = *current_trace; | 1701 | *iter->trace = *current_trace; |
1686 | } | 1702 | } |
1687 | mutex_unlock(&trace_types_lock); | 1703 | mutex_unlock(&trace_types_lock); |
1688 | 1704 | ||
1689 | atomic_inc(&trace_record_cmdline_disabled); | 1705 | atomic_inc(&trace_record_cmdline_disabled); |
1690 | 1706 | ||
1691 | if (*pos != iter->pos) { | 1707 | if (*pos != iter->pos) { |
1692 | iter->ent = NULL; | 1708 | iter->ent = NULL; |
1693 | iter->cpu = 0; | 1709 | iter->cpu = 0; |
1694 | iter->idx = -1; | 1710 | iter->idx = -1; |
1695 | 1711 | ||
1696 | ftrace_disable_cpu(); | 1712 | ftrace_disable_cpu(); |
1697 | 1713 | ||
1698 | if (cpu_file == TRACE_PIPE_ALL_CPU) { | 1714 | if (cpu_file == TRACE_PIPE_ALL_CPU) { |
1699 | for_each_tracing_cpu(cpu) | 1715 | for_each_tracing_cpu(cpu) |
1700 | tracing_iter_reset(iter, cpu); | 1716 | tracing_iter_reset(iter, cpu); |
1701 | } else | 1717 | } else |
1702 | tracing_iter_reset(iter, cpu_file); | 1718 | tracing_iter_reset(iter, cpu_file); |
1703 | 1719 | ||
1704 | ftrace_enable_cpu(); | 1720 | ftrace_enable_cpu(); |
1705 | 1721 | ||
1706 | iter->leftover = 0; | 1722 | iter->leftover = 0; |
1707 | for (p = iter; p && l < *pos; p = s_next(m, p, &l)) | 1723 | for (p = iter; p && l < *pos; p = s_next(m, p, &l)) |
1708 | ; | 1724 | ; |
1709 | 1725 | ||
1710 | } else { | 1726 | } else { |
1711 | /* | 1727 | /* |
1712 | * If we overflowed the seq_file before, then we want | 1728 | * If we overflowed the seq_file before, then we want |
1713 | * to just reuse the trace_seq buffer again. | 1729 | * to just reuse the trace_seq buffer again. |
1714 | */ | 1730 | */ |
1715 | if (iter->leftover) | 1731 | if (iter->leftover) |
1716 | p = iter; | 1732 | p = iter; |
1717 | else { | 1733 | else { |
1718 | l = *pos - 1; | 1734 | l = *pos - 1; |
1719 | p = s_next(m, p, &l); | 1735 | p = s_next(m, p, &l); |
1720 | } | 1736 | } |
1721 | } | 1737 | } |
1722 | 1738 | ||
1723 | trace_event_read_lock(); | 1739 | trace_event_read_lock(); |
1724 | trace_access_lock(cpu_file); | 1740 | trace_access_lock(cpu_file); |
1725 | return p; | 1741 | return p; |
1726 | } | 1742 | } |
1727 | 1743 | ||
1728 | static void s_stop(struct seq_file *m, void *p) | 1744 | static void s_stop(struct seq_file *m, void *p) |
1729 | { | 1745 | { |
1730 | struct trace_iterator *iter = m->private; | 1746 | struct trace_iterator *iter = m->private; |
1731 | 1747 | ||
1732 | atomic_dec(&trace_record_cmdline_disabled); | 1748 | atomic_dec(&trace_record_cmdline_disabled); |
1733 | trace_access_unlock(iter->cpu_file); | 1749 | trace_access_unlock(iter->cpu_file); |
1734 | trace_event_read_unlock(); | 1750 | trace_event_read_unlock(); |
1735 | } | 1751 | } |
1736 | 1752 | ||
1737 | static void print_lat_help_header(struct seq_file *m) | 1753 | static void print_lat_help_header(struct seq_file *m) |
1738 | { | 1754 | { |
1739 | seq_puts(m, "# _------=> CPU# \n"); | 1755 | seq_puts(m, "# _------=> CPU# \n"); |
1740 | seq_puts(m, "# / _-----=> irqs-off \n"); | 1756 | seq_puts(m, "# / _-----=> irqs-off \n"); |
1741 | seq_puts(m, "# | / _----=> need-resched \n"); | 1757 | seq_puts(m, "# | / _----=> need-resched \n"); |
1742 | seq_puts(m, "# || / _---=> hardirq/softirq \n"); | 1758 | seq_puts(m, "# || / _---=> hardirq/softirq \n"); |
1743 | seq_puts(m, "# ||| / _--=> preempt-depth \n"); | 1759 | seq_puts(m, "# ||| / _--=> preempt-depth \n"); |
1744 | seq_puts(m, "# |||| /_--=> lock-depth \n"); | 1760 | seq_puts(m, "# |||| /_--=> lock-depth \n"); |
1745 | seq_puts(m, "# |||||/ delay \n"); | 1761 | seq_puts(m, "# |||||/ delay \n"); |
1746 | seq_puts(m, "# cmd pid |||||| time | caller \n"); | 1762 | seq_puts(m, "# cmd pid |||||| time | caller \n"); |
1747 | seq_puts(m, "# \\ / |||||| \\ | / \n"); | 1763 | seq_puts(m, "# \\ / |||||| \\ | / \n"); |
1748 | } | 1764 | } |
1749 | 1765 | ||
1750 | static void print_func_help_header(struct seq_file *m) | 1766 | static void print_func_help_header(struct seq_file *m) |
1751 | { | 1767 | { |
1752 | seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); | 1768 | seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); |
1753 | seq_puts(m, "# | | | | |\n"); | 1769 | seq_puts(m, "# | | | | |\n"); |
1754 | } | 1770 | } |
1755 | 1771 | ||
1756 | 1772 | ||
1757 | static void | 1773 | static void |
1758 | print_trace_header(struct seq_file *m, struct trace_iterator *iter) | 1774 | print_trace_header(struct seq_file *m, struct trace_iterator *iter) |
1759 | { | 1775 | { |
1760 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | 1776 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); |
1761 | struct trace_array *tr = iter->tr; | 1777 | struct trace_array *tr = iter->tr; |
1762 | struct trace_array_cpu *data = tr->data[tr->cpu]; | 1778 | struct trace_array_cpu *data = tr->data[tr->cpu]; |
1763 | struct tracer *type = current_trace; | 1779 | struct tracer *type = current_trace; |
1764 | unsigned long entries = 0; | 1780 | unsigned long entries = 0; |
1765 | unsigned long total = 0; | 1781 | unsigned long total = 0; |
1766 | unsigned long count; | 1782 | unsigned long count; |
1767 | const char *name = "preemption"; | 1783 | const char *name = "preemption"; |
1768 | int cpu; | 1784 | int cpu; |
1769 | 1785 | ||
1770 | if (type) | 1786 | if (type) |
1771 | name = type->name; | 1787 | name = type->name; |
1772 | 1788 | ||
1773 | 1789 | ||
1774 | for_each_tracing_cpu(cpu) { | 1790 | for_each_tracing_cpu(cpu) { |
1775 | count = ring_buffer_entries_cpu(tr->buffer, cpu); | 1791 | count = ring_buffer_entries_cpu(tr->buffer, cpu); |
1776 | /* | 1792 | /* |
1777 | * If this buffer has skipped entries, then we hold all | 1793 | * If this buffer has skipped entries, then we hold all |
1778 | * entries for the trace and we need to ignore the | 1794 | * entries for the trace and we need to ignore the |
1779 | * ones before the time stamp. | 1795 | * ones before the time stamp. |
1780 | */ | 1796 | */ |
1781 | if (tr->data[cpu]->skipped_entries) { | 1797 | if (tr->data[cpu]->skipped_entries) { |
1782 | count -= tr->data[cpu]->skipped_entries; | 1798 | count -= tr->data[cpu]->skipped_entries; |
1783 | /* total is the same as the entries */ | 1799 | /* total is the same as the entries */ |
1784 | total += count; | 1800 | total += count; |
1785 | } else | 1801 | } else |
1786 | total += count + | 1802 | total += count + |
1787 | ring_buffer_overrun_cpu(tr->buffer, cpu); | 1803 | ring_buffer_overrun_cpu(tr->buffer, cpu); |
1788 | entries += count; | 1804 | entries += count; |
1789 | } | 1805 | } |
1790 | 1806 | ||
1791 | seq_printf(m, "# %s latency trace v1.1.5 on %s\n", | 1807 | seq_printf(m, "# %s latency trace v1.1.5 on %s\n", |
1792 | name, UTS_RELEASE); | 1808 | name, UTS_RELEASE); |
1793 | seq_puts(m, "# -----------------------------------" | 1809 | seq_puts(m, "# -----------------------------------" |
1794 | "---------------------------------\n"); | 1810 | "---------------------------------\n"); |
1795 | seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |" | 1811 | seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |" |
1796 | " (M:%s VP:%d, KP:%d, SP:%d HP:%d", | 1812 | " (M:%s VP:%d, KP:%d, SP:%d HP:%d", |
1797 | nsecs_to_usecs(data->saved_latency), | 1813 | nsecs_to_usecs(data->saved_latency), |
1798 | entries, | 1814 | entries, |
1799 | total, | 1815 | total, |
1800 | tr->cpu, | 1816 | tr->cpu, |
1801 | #if defined(CONFIG_PREEMPT_NONE) | 1817 | #if defined(CONFIG_PREEMPT_NONE) |
1802 | "server", | 1818 | "server", |
1803 | #elif defined(CONFIG_PREEMPT_VOLUNTARY) | 1819 | #elif defined(CONFIG_PREEMPT_VOLUNTARY) |
1804 | "desktop", | 1820 | "desktop", |
1805 | #elif defined(CONFIG_PREEMPT) | 1821 | #elif defined(CONFIG_PREEMPT) |
1806 | "preempt", | 1822 | "preempt", |
1807 | #else | 1823 | #else |
1808 | "unknown", | 1824 | "unknown", |
1809 | #endif | 1825 | #endif |
1810 | /* These are reserved for later use */ | 1826 | /* These are reserved for later use */ |
1811 | 0, 0, 0, 0); | 1827 | 0, 0, 0, 0); |
1812 | #ifdef CONFIG_SMP | 1828 | #ifdef CONFIG_SMP |
1813 | seq_printf(m, " #P:%d)\n", num_online_cpus()); | 1829 | seq_printf(m, " #P:%d)\n", num_online_cpus()); |
1814 | #else | 1830 | #else |
1815 | seq_puts(m, ")\n"); | 1831 | seq_puts(m, ")\n"); |
1816 | #endif | 1832 | #endif |
1817 | seq_puts(m, "# -----------------\n"); | 1833 | seq_puts(m, "# -----------------\n"); |
1818 | seq_printf(m, "# | task: %.16s-%d " | 1834 | seq_printf(m, "# | task: %.16s-%d " |
1819 | "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", | 1835 | "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", |
1820 | data->comm, data->pid, data->uid, data->nice, | 1836 | data->comm, data->pid, data->uid, data->nice, |
1821 | data->policy, data->rt_priority); | 1837 | data->policy, data->rt_priority); |
1822 | seq_puts(m, "# -----------------\n"); | 1838 | seq_puts(m, "# -----------------\n"); |
1823 | 1839 | ||
1824 | if (data->critical_start) { | 1840 | if (data->critical_start) { |
1825 | seq_puts(m, "# => started at: "); | 1841 | seq_puts(m, "# => started at: "); |
1826 | seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); | 1842 | seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); |
1827 | trace_print_seq(m, &iter->seq); | 1843 | trace_print_seq(m, &iter->seq); |
1828 | seq_puts(m, "\n# => ended at: "); | 1844 | seq_puts(m, "\n# => ended at: "); |
1829 | seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); | 1845 | seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); |
1830 | trace_print_seq(m, &iter->seq); | 1846 | trace_print_seq(m, &iter->seq); |
1831 | seq_puts(m, "\n#\n"); | 1847 | seq_puts(m, "\n#\n"); |
1832 | } | 1848 | } |
1833 | 1849 | ||
1834 | seq_puts(m, "#\n"); | 1850 | seq_puts(m, "#\n"); |
1835 | } | 1851 | } |
1836 | 1852 | ||
1837 | static void test_cpu_buff_start(struct trace_iterator *iter) | 1853 | static void test_cpu_buff_start(struct trace_iterator *iter) |
1838 | { | 1854 | { |
1839 | struct trace_seq *s = &iter->seq; | 1855 | struct trace_seq *s = &iter->seq; |
1840 | 1856 | ||
1841 | if (!(trace_flags & TRACE_ITER_ANNOTATE)) | 1857 | if (!(trace_flags & TRACE_ITER_ANNOTATE)) |
1842 | return; | 1858 | return; |
1843 | 1859 | ||
1844 | if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) | 1860 | if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) |
1845 | return; | 1861 | return; |
1846 | 1862 | ||
1847 | if (cpumask_test_cpu(iter->cpu, iter->started)) | 1863 | if (cpumask_test_cpu(iter->cpu, iter->started)) |
1848 | return; | 1864 | return; |
1849 | 1865 | ||
1850 | if (iter->tr->data[iter->cpu]->skipped_entries) | 1866 | if (iter->tr->data[iter->cpu]->skipped_entries) |
1851 | return; | 1867 | return; |
1852 | 1868 | ||
1853 | cpumask_set_cpu(iter->cpu, iter->started); | 1869 | cpumask_set_cpu(iter->cpu, iter->started); |
1854 | 1870 | ||
1855 | /* Don't print started cpu buffer for the first entry of the trace */ | 1871 | /* Don't print started cpu buffer for the first entry of the trace */ |
1856 | if (iter->idx > 1) | 1872 | if (iter->idx > 1) |
1857 | trace_seq_printf(s, "##### CPU %u buffer started ####\n", | 1873 | trace_seq_printf(s, "##### CPU %u buffer started ####\n", |
1858 | iter->cpu); | 1874 | iter->cpu); |
1859 | } | 1875 | } |
1860 | 1876 | ||
1861 | static enum print_line_t print_trace_fmt(struct trace_iterator *iter) | 1877 | static enum print_line_t print_trace_fmt(struct trace_iterator *iter) |
1862 | { | 1878 | { |
1863 | struct trace_seq *s = &iter->seq; | 1879 | struct trace_seq *s = &iter->seq; |
1864 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | 1880 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); |
1865 | struct trace_entry *entry; | 1881 | struct trace_entry *entry; |
1866 | struct trace_event *event; | 1882 | struct trace_event *event; |
1867 | 1883 | ||
1868 | entry = iter->ent; | 1884 | entry = iter->ent; |
1869 | 1885 | ||
1870 | test_cpu_buff_start(iter); | 1886 | test_cpu_buff_start(iter); |
1871 | 1887 | ||
1872 | event = ftrace_find_event(entry->type); | 1888 | event = ftrace_find_event(entry->type); |
1873 | 1889 | ||
1874 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { | 1890 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { |
1875 | if (iter->iter_flags & TRACE_FILE_LAT_FMT) { | 1891 | if (iter->iter_flags & TRACE_FILE_LAT_FMT) { |
1876 | if (!trace_print_lat_context(iter)) | 1892 | if (!trace_print_lat_context(iter)) |
1877 | goto partial; | 1893 | goto partial; |
1878 | } else { | 1894 | } else { |
1879 | if (!trace_print_context(iter)) | 1895 | if (!trace_print_context(iter)) |
1880 | goto partial; | 1896 | goto partial; |
1881 | } | 1897 | } |
1882 | } | 1898 | } |
1883 | 1899 | ||
1884 | if (event) | 1900 | if (event) |
1885 | return event->trace(iter, sym_flags); | 1901 | return event->trace(iter, sym_flags); |
1886 | 1902 | ||
1887 | if (!trace_seq_printf(s, "Unknown type %d\n", entry->type)) | 1903 | if (!trace_seq_printf(s, "Unknown type %d\n", entry->type)) |
1888 | goto partial; | 1904 | goto partial; |
1889 | 1905 | ||
1890 | return TRACE_TYPE_HANDLED; | 1906 | return TRACE_TYPE_HANDLED; |
1891 | partial: | 1907 | partial: |
1892 | return TRACE_TYPE_PARTIAL_LINE; | 1908 | return TRACE_TYPE_PARTIAL_LINE; |
1893 | } | 1909 | } |
1894 | 1910 | ||
1895 | static enum print_line_t print_raw_fmt(struct trace_iterator *iter) | 1911 | static enum print_line_t print_raw_fmt(struct trace_iterator *iter) |
1896 | { | 1912 | { |
1897 | struct trace_seq *s = &iter->seq; | 1913 | struct trace_seq *s = &iter->seq; |
1898 | struct trace_entry *entry; | 1914 | struct trace_entry *entry; |
1899 | struct trace_event *event; | 1915 | struct trace_event *event; |
1900 | 1916 | ||
1901 | entry = iter->ent; | 1917 | entry = iter->ent; |
1902 | 1918 | ||
1903 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { | 1919 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { |
1904 | if (!trace_seq_printf(s, "%d %d %llu ", | 1920 | if (!trace_seq_printf(s, "%d %d %llu ", |
1905 | entry->pid, iter->cpu, iter->ts)) | 1921 | entry->pid, iter->cpu, iter->ts)) |
1906 | goto partial; | 1922 | goto partial; |
1907 | } | 1923 | } |
1908 | 1924 | ||
1909 | event = ftrace_find_event(entry->type); | 1925 | event = ftrace_find_event(entry->type); |
1910 | if (event) | 1926 | if (event) |
1911 | return event->raw(iter, 0); | 1927 | return event->raw(iter, 0); |
1912 | 1928 | ||
1913 | if (!trace_seq_printf(s, "%d ?\n", entry->type)) | 1929 | if (!trace_seq_printf(s, "%d ?\n", entry->type)) |
1914 | goto partial; | 1930 | goto partial; |
1915 | 1931 | ||
1916 | return TRACE_TYPE_HANDLED; | 1932 | return TRACE_TYPE_HANDLED; |
1917 | partial: | 1933 | partial: |
1918 | return TRACE_TYPE_PARTIAL_LINE; | 1934 | return TRACE_TYPE_PARTIAL_LINE; |
1919 | } | 1935 | } |
1920 | 1936 | ||
1921 | static enum print_line_t print_hex_fmt(struct trace_iterator *iter) | 1937 | static enum print_line_t print_hex_fmt(struct trace_iterator *iter) |
1922 | { | 1938 | { |
1923 | struct trace_seq *s = &iter->seq; | 1939 | struct trace_seq *s = &iter->seq; |
1924 | unsigned char newline = '\n'; | 1940 | unsigned char newline = '\n'; |
1925 | struct trace_entry *entry; | 1941 | struct trace_entry *entry; |
1926 | struct trace_event *event; | 1942 | struct trace_event *event; |
1927 | 1943 | ||
1928 | entry = iter->ent; | 1944 | entry = iter->ent; |
1929 | 1945 | ||
1930 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { | 1946 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { |
1931 | SEQ_PUT_HEX_FIELD_RET(s, entry->pid); | 1947 | SEQ_PUT_HEX_FIELD_RET(s, entry->pid); |
1932 | SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); | 1948 | SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); |
1933 | SEQ_PUT_HEX_FIELD_RET(s, iter->ts); | 1949 | SEQ_PUT_HEX_FIELD_RET(s, iter->ts); |
1934 | } | 1950 | } |
1935 | 1951 | ||
1936 | event = ftrace_find_event(entry->type); | 1952 | event = ftrace_find_event(entry->type); |
1937 | if (event) { | 1953 | if (event) { |
1938 | enum print_line_t ret = event->hex(iter, 0); | 1954 | enum print_line_t ret = event->hex(iter, 0); |
1939 | if (ret != TRACE_TYPE_HANDLED) | 1955 | if (ret != TRACE_TYPE_HANDLED) |
1940 | return ret; | 1956 | return ret; |
1941 | } | 1957 | } |
1942 | 1958 | ||
1943 | SEQ_PUT_FIELD_RET(s, newline); | 1959 | SEQ_PUT_FIELD_RET(s, newline); |
1944 | 1960 | ||
1945 | return TRACE_TYPE_HANDLED; | 1961 | return TRACE_TYPE_HANDLED; |
1946 | } | 1962 | } |
1947 | 1963 | ||
1948 | static enum print_line_t print_bin_fmt(struct trace_iterator *iter) | 1964 | static enum print_line_t print_bin_fmt(struct trace_iterator *iter) |
1949 | { | 1965 | { |
1950 | struct trace_seq *s = &iter->seq; | 1966 | struct trace_seq *s = &iter->seq; |
1951 | struct trace_entry *entry; | 1967 | struct trace_entry *entry; |
1952 | struct trace_event *event; | 1968 | struct trace_event *event; |
1953 | 1969 | ||
1954 | entry = iter->ent; | 1970 | entry = iter->ent; |
1955 | 1971 | ||
1956 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { | 1972 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { |
1957 | SEQ_PUT_FIELD_RET(s, entry->pid); | 1973 | SEQ_PUT_FIELD_RET(s, entry->pid); |
1958 | SEQ_PUT_FIELD_RET(s, iter->cpu); | 1974 | SEQ_PUT_FIELD_RET(s, iter->cpu); |
1959 | SEQ_PUT_FIELD_RET(s, iter->ts); | 1975 | SEQ_PUT_FIELD_RET(s, iter->ts); |
1960 | } | 1976 | } |
1961 | 1977 | ||
1962 | event = ftrace_find_event(entry->type); | 1978 | event = ftrace_find_event(entry->type); |
1963 | return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED; | 1979 | return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED; |
1964 | } | 1980 | } |
1965 | 1981 | ||
1966 | static int trace_empty(struct trace_iterator *iter) | 1982 | static int trace_empty(struct trace_iterator *iter) |
1967 | { | 1983 | { |
1968 | int cpu; | 1984 | int cpu; |
1969 | 1985 | ||
1970 | /* If we are looking at one CPU buffer, only check that one */ | 1986 | /* If we are looking at one CPU buffer, only check that one */ |
1971 | if (iter->cpu_file != TRACE_PIPE_ALL_CPU) { | 1987 | if (iter->cpu_file != TRACE_PIPE_ALL_CPU) { |
1972 | cpu = iter->cpu_file; | 1988 | cpu = iter->cpu_file; |
1973 | if (iter->buffer_iter[cpu]) { | 1989 | if (iter->buffer_iter[cpu]) { |
1974 | if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) | 1990 | if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) |
1975 | return 0; | 1991 | return 0; |
1976 | } else { | 1992 | } else { |
1977 | if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) | 1993 | if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) |
1978 | return 0; | 1994 | return 0; |
1979 | } | 1995 | } |
1980 | return 1; | 1996 | return 1; |
1981 | } | 1997 | } |
1982 | 1998 | ||
1983 | for_each_tracing_cpu(cpu) { | 1999 | for_each_tracing_cpu(cpu) { |
1984 | if (iter->buffer_iter[cpu]) { | 2000 | if (iter->buffer_iter[cpu]) { |
1985 | if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) | 2001 | if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) |
1986 | return 0; | 2002 | return 0; |
1987 | } else { | 2003 | } else { |
1988 | if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) | 2004 | if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) |
1989 | return 0; | 2005 | return 0; |
1990 | } | 2006 | } |
1991 | } | 2007 | } |
1992 | 2008 | ||
1993 | return 1; | 2009 | return 1; |
1994 | } | 2010 | } |
1995 | 2011 | ||
1996 | /* Called with trace_event_read_lock() held. */ | 2012 | /* Called with trace_event_read_lock() held. */ |
1997 | static enum print_line_t print_trace_line(struct trace_iterator *iter) | 2013 | static enum print_line_t print_trace_line(struct trace_iterator *iter) |
1998 | { | 2014 | { |
1999 | enum print_line_t ret; | 2015 | enum print_line_t ret; |
2000 | 2016 | ||
2001 | if (iter->trace && iter->trace->print_line) { | 2017 | if (iter->trace && iter->trace->print_line) { |
2002 | ret = iter->trace->print_line(iter); | 2018 | ret = iter->trace->print_line(iter); |
2003 | if (ret != TRACE_TYPE_UNHANDLED) | 2019 | if (ret != TRACE_TYPE_UNHANDLED) |
2004 | return ret; | 2020 | return ret; |
2005 | } | 2021 | } |
2006 | 2022 | ||
2007 | if (iter->ent->type == TRACE_BPRINT && | 2023 | if (iter->ent->type == TRACE_BPRINT && |
2008 | trace_flags & TRACE_ITER_PRINTK && | 2024 | trace_flags & TRACE_ITER_PRINTK && |
2009 | trace_flags & TRACE_ITER_PRINTK_MSGONLY) | 2025 | trace_flags & TRACE_ITER_PRINTK_MSGONLY) |
2010 | return trace_print_bprintk_msg_only(iter); | 2026 | return trace_print_bprintk_msg_only(iter); |
2011 | 2027 | ||
2012 | if (iter->ent->type == TRACE_PRINT && | 2028 | if (iter->ent->type == TRACE_PRINT && |
2013 | trace_flags & TRACE_ITER_PRINTK && | 2029 | trace_flags & TRACE_ITER_PRINTK && |
2014 | trace_flags & TRACE_ITER_PRINTK_MSGONLY) | 2030 | trace_flags & TRACE_ITER_PRINTK_MSGONLY) |
2015 | return trace_print_printk_msg_only(iter); | 2031 | return trace_print_printk_msg_only(iter); |
2016 | 2032 | ||
2017 | if (trace_flags & TRACE_ITER_BIN) | 2033 | if (trace_flags & TRACE_ITER_BIN) |
2018 | return print_bin_fmt(iter); | 2034 | return print_bin_fmt(iter); |
2019 | 2035 | ||
2020 | if (trace_flags & TRACE_ITER_HEX) | 2036 | if (trace_flags & TRACE_ITER_HEX) |
2021 | return print_hex_fmt(iter); | 2037 | return print_hex_fmt(iter); |
2022 | 2038 | ||
2023 | if (trace_flags & TRACE_ITER_RAW) | 2039 | if (trace_flags & TRACE_ITER_RAW) |
2024 | return print_raw_fmt(iter); | 2040 | return print_raw_fmt(iter); |
2025 | 2041 | ||
2026 | return print_trace_fmt(iter); | 2042 | return print_trace_fmt(iter); |
2027 | } | 2043 | } |
2028 | 2044 | ||
2029 | static int s_show(struct seq_file *m, void *v) | 2045 | static int s_show(struct seq_file *m, void *v) |
2030 | { | 2046 | { |
2031 | struct trace_iterator *iter = v; | 2047 | struct trace_iterator *iter = v; |
2032 | int ret; | 2048 | int ret; |
2033 | 2049 | ||
2034 | if (iter->ent == NULL) { | 2050 | if (iter->ent == NULL) { |
2035 | if (iter->tr) { | 2051 | if (iter->tr) { |
2036 | seq_printf(m, "# tracer: %s\n", iter->trace->name); | 2052 | seq_printf(m, "# tracer: %s\n", iter->trace->name); |
2037 | seq_puts(m, "#\n"); | 2053 | seq_puts(m, "#\n"); |
2038 | } | 2054 | } |
2039 | if (iter->trace && iter->trace->print_header) | 2055 | if (iter->trace && iter->trace->print_header) |
2040 | iter->trace->print_header(m); | 2056 | iter->trace->print_header(m); |
2041 | else if (iter->iter_flags & TRACE_FILE_LAT_FMT) { | 2057 | else if (iter->iter_flags & TRACE_FILE_LAT_FMT) { |
2042 | /* print nothing if the buffers are empty */ | 2058 | /* print nothing if the buffers are empty */ |
2043 | if (trace_empty(iter)) | 2059 | if (trace_empty(iter)) |
2044 | return 0; | 2060 | return 0; |
2045 | print_trace_header(m, iter); | 2061 | print_trace_header(m, iter); |
2046 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | 2062 | if (!(trace_flags & TRACE_ITER_VERBOSE)) |
2047 | print_lat_help_header(m); | 2063 | print_lat_help_header(m); |
2048 | } else { | 2064 | } else { |
2049 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | 2065 | if (!(trace_flags & TRACE_ITER_VERBOSE)) |
2050 | print_func_help_header(m); | 2066 | print_func_help_header(m); |
2051 | } | 2067 | } |
2052 | } else if (iter->leftover) { | 2068 | } else if (iter->leftover) { |
2053 | /* | 2069 | /* |
2054 | * If we filled the seq_file buffer earlier, we | 2070 | * If we filled the seq_file buffer earlier, we |
2055 | * want to just show it now. | 2071 | * want to just show it now. |
2056 | */ | 2072 | */ |
2057 | ret = trace_print_seq(m, &iter->seq); | 2073 | ret = trace_print_seq(m, &iter->seq); |
2058 | 2074 | ||
2059 | /* ret should this time be zero, but you never know */ | 2075 | /* ret should this time be zero, but you never know */ |
2060 | iter->leftover = ret; | 2076 | iter->leftover = ret; |
2061 | 2077 | ||
2062 | } else { | 2078 | } else { |
2063 | print_trace_line(iter); | 2079 | print_trace_line(iter); |
2064 | ret = trace_print_seq(m, &iter->seq); | 2080 | ret = trace_print_seq(m, &iter->seq); |
2065 | /* | 2081 | /* |
2066 | * If we overflow the seq_file buffer, then it will | 2082 | * If we overflow the seq_file buffer, then it will |
2067 | * ask us for this data again at start up. | 2083 | * ask us for this data again at start up. |
2068 | * Use that instead. | 2084 | * Use that instead. |
2069 | * ret is 0 if seq_file write succeeded. | 2085 | * ret is 0 if seq_file write succeeded. |
2070 | * -1 otherwise. | 2086 | * -1 otherwise. |
2071 | */ | 2087 | */ |
2072 | iter->leftover = ret; | 2088 | iter->leftover = ret; |
2073 | } | 2089 | } |
2074 | 2090 | ||
2075 | return 0; | 2091 | return 0; |
2076 | } | 2092 | } |
2077 | 2093 | ||
2078 | static const struct seq_operations tracer_seq_ops = { | 2094 | static const struct seq_operations tracer_seq_ops = { |
2079 | .start = s_start, | 2095 | .start = s_start, |
2080 | .next = s_next, | 2096 | .next = s_next, |
2081 | .stop = s_stop, | 2097 | .stop = s_stop, |
2082 | .show = s_show, | 2098 | .show = s_show, |
2083 | }; | 2099 | }; |
2084 | 2100 | ||
2085 | static struct trace_iterator * | 2101 | static struct trace_iterator * |
2086 | __tracing_open(struct inode *inode, struct file *file) | 2102 | __tracing_open(struct inode *inode, struct file *file) |
2087 | { | 2103 | { |
2088 | long cpu_file = (long) inode->i_private; | 2104 | long cpu_file = (long) inode->i_private; |
2089 | void *fail_ret = ERR_PTR(-ENOMEM); | 2105 | void *fail_ret = ERR_PTR(-ENOMEM); |
2090 | struct trace_iterator *iter; | 2106 | struct trace_iterator *iter; |
2091 | struct seq_file *m; | 2107 | struct seq_file *m; |
2092 | int cpu, ret; | 2108 | int cpu, ret; |
2093 | 2109 | ||
2094 | if (tracing_disabled) | 2110 | if (tracing_disabled) |
2095 | return ERR_PTR(-ENODEV); | 2111 | return ERR_PTR(-ENODEV); |
2096 | 2112 | ||
2097 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 2113 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
2098 | if (!iter) | 2114 | if (!iter) |
2099 | return ERR_PTR(-ENOMEM); | 2115 | return ERR_PTR(-ENOMEM); |
2100 | 2116 | ||
2101 | /* | 2117 | /* |
2102 | * We make a copy of the current tracer to avoid concurrent | 2118 | * We make a copy of the current tracer to avoid concurrent |
2103 | * changes on it while we are reading. | 2119 | * changes on it while we are reading. |
2104 | */ | 2120 | */ |
2105 | mutex_lock(&trace_types_lock); | 2121 | mutex_lock(&trace_types_lock); |
2106 | iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL); | 2122 | iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL); |
2107 | if (!iter->trace) | 2123 | if (!iter->trace) |
2108 | goto fail; | 2124 | goto fail; |
2109 | 2125 | ||
2110 | if (current_trace) | 2126 | if (current_trace) |
2111 | *iter->trace = *current_trace; | 2127 | *iter->trace = *current_trace; |
2112 | 2128 | ||
2113 | if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) | 2129 | if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) |
2114 | goto fail; | 2130 | goto fail; |
2115 | 2131 | ||
2116 | if (current_trace && current_trace->print_max) | 2132 | if (current_trace && current_trace->print_max) |
2117 | iter->tr = &max_tr; | 2133 | iter->tr = &max_tr; |
2118 | else | 2134 | else |
2119 | iter->tr = &global_trace; | 2135 | iter->tr = &global_trace; |
2120 | iter->pos = -1; | 2136 | iter->pos = -1; |
2121 | mutex_init(&iter->mutex); | 2137 | mutex_init(&iter->mutex); |
2122 | iter->cpu_file = cpu_file; | 2138 | iter->cpu_file = cpu_file; |
2123 | 2139 | ||
2124 | /* Notify the tracer early; before we stop tracing. */ | 2140 | /* Notify the tracer early; before we stop tracing. */ |
2125 | if (iter->trace && iter->trace->open) | 2141 | if (iter->trace && iter->trace->open) |
2126 | iter->trace->open(iter); | 2142 | iter->trace->open(iter); |
2127 | 2143 | ||
2128 | /* Annotate start of buffers if we had overruns */ | 2144 | /* Annotate start of buffers if we had overruns */ |
2129 | if (ring_buffer_overruns(iter->tr->buffer)) | 2145 | if (ring_buffer_overruns(iter->tr->buffer)) |
2130 | iter->iter_flags |= TRACE_FILE_ANNOTATE; | 2146 | iter->iter_flags |= TRACE_FILE_ANNOTATE; |
2131 | 2147 | ||
2132 | /* stop the trace while dumping */ | 2148 | /* stop the trace while dumping */ |
2133 | tracing_stop(); | 2149 | tracing_stop(); |
2134 | 2150 | ||
2135 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { | 2151 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { |
2136 | for_each_tracing_cpu(cpu) { | 2152 | for_each_tracing_cpu(cpu) { |
2137 | 2153 | ||
2138 | iter->buffer_iter[cpu] = | 2154 | iter->buffer_iter[cpu] = |
2139 | ring_buffer_read_start(iter->tr->buffer, cpu); | 2155 | ring_buffer_read_start(iter->tr->buffer, cpu); |
2140 | tracing_iter_reset(iter, cpu); | 2156 | tracing_iter_reset(iter, cpu); |
2141 | } | 2157 | } |
2142 | } else { | 2158 | } else { |
2143 | cpu = iter->cpu_file; | 2159 | cpu = iter->cpu_file; |
2144 | iter->buffer_iter[cpu] = | 2160 | iter->buffer_iter[cpu] = |
2145 | ring_buffer_read_start(iter->tr->buffer, cpu); | 2161 | ring_buffer_read_start(iter->tr->buffer, cpu); |
2146 | tracing_iter_reset(iter, cpu); | 2162 | tracing_iter_reset(iter, cpu); |
2147 | } | 2163 | } |
2148 | 2164 | ||
2149 | ret = seq_open(file, &tracer_seq_ops); | 2165 | ret = seq_open(file, &tracer_seq_ops); |
2150 | if (ret < 0) { | 2166 | if (ret < 0) { |
2151 | fail_ret = ERR_PTR(ret); | 2167 | fail_ret = ERR_PTR(ret); |
2152 | goto fail_buffer; | 2168 | goto fail_buffer; |
2153 | } | 2169 | } |
2154 | 2170 | ||
2155 | m = file->private_data; | 2171 | m = file->private_data; |
2156 | m->private = iter; | 2172 | m->private = iter; |
2157 | 2173 | ||
2158 | mutex_unlock(&trace_types_lock); | 2174 | mutex_unlock(&trace_types_lock); |
2159 | 2175 | ||
2160 | return iter; | 2176 | return iter; |
2161 | 2177 | ||
2162 | fail_buffer: | 2178 | fail_buffer: |
2163 | for_each_tracing_cpu(cpu) { | 2179 | for_each_tracing_cpu(cpu) { |
2164 | if (iter->buffer_iter[cpu]) | 2180 | if (iter->buffer_iter[cpu]) |
2165 | ring_buffer_read_finish(iter->buffer_iter[cpu]); | 2181 | ring_buffer_read_finish(iter->buffer_iter[cpu]); |
2166 | } | 2182 | } |
2167 | free_cpumask_var(iter->started); | 2183 | free_cpumask_var(iter->started); |
2168 | tracing_start(); | 2184 | tracing_start(); |
2169 | fail: | 2185 | fail: |
2170 | mutex_unlock(&trace_types_lock); | 2186 | mutex_unlock(&trace_types_lock); |
2171 | kfree(iter->trace); | 2187 | kfree(iter->trace); |
2172 | kfree(iter); | 2188 | kfree(iter); |
2173 | 2189 | ||
2174 | return fail_ret; | 2190 | return fail_ret; |
2175 | } | 2191 | } |
2176 | 2192 | ||
2177 | int tracing_open_generic(struct inode *inode, struct file *filp) | 2193 | int tracing_open_generic(struct inode *inode, struct file *filp) |
2178 | { | 2194 | { |
2179 | if (tracing_disabled) | 2195 | if (tracing_disabled) |
2180 | return -ENODEV; | 2196 | return -ENODEV; |
2181 | 2197 | ||
2182 | filp->private_data = inode->i_private; | 2198 | filp->private_data = inode->i_private; |
2183 | return 0; | 2199 | return 0; |
2184 | } | 2200 | } |
2185 | 2201 | ||
2186 | static int tracing_release(struct inode *inode, struct file *file) | 2202 | static int tracing_release(struct inode *inode, struct file *file) |
2187 | { | 2203 | { |
2188 | struct seq_file *m = (struct seq_file *)file->private_data; | 2204 | struct seq_file *m = (struct seq_file *)file->private_data; |
2189 | struct trace_iterator *iter; | 2205 | struct trace_iterator *iter; |
2190 | int cpu; | 2206 | int cpu; |
2191 | 2207 | ||
2192 | if (!(file->f_mode & FMODE_READ)) | 2208 | if (!(file->f_mode & FMODE_READ)) |
2193 | return 0; | 2209 | return 0; |
2194 | 2210 | ||
2195 | iter = m->private; | 2211 | iter = m->private; |
2196 | 2212 | ||
2197 | mutex_lock(&trace_types_lock); | 2213 | mutex_lock(&trace_types_lock); |
2198 | for_each_tracing_cpu(cpu) { | 2214 | for_each_tracing_cpu(cpu) { |
2199 | if (iter->buffer_iter[cpu]) | 2215 | if (iter->buffer_iter[cpu]) |
2200 | ring_buffer_read_finish(iter->buffer_iter[cpu]); | 2216 | ring_buffer_read_finish(iter->buffer_iter[cpu]); |
2201 | } | 2217 | } |
2202 | 2218 | ||
2203 | if (iter->trace && iter->trace->close) | 2219 | if (iter->trace && iter->trace->close) |
2204 | iter->trace->close(iter); | 2220 | iter->trace->close(iter); |
2205 | 2221 | ||
2206 | /* reenable tracing if it was previously enabled */ | 2222 | /* reenable tracing if it was previously enabled */ |
2207 | tracing_start(); | 2223 | tracing_start(); |
2208 | mutex_unlock(&trace_types_lock); | 2224 | mutex_unlock(&trace_types_lock); |
2209 | 2225 | ||
2210 | seq_release(inode, file); | 2226 | seq_release(inode, file); |
2211 | mutex_destroy(&iter->mutex); | 2227 | mutex_destroy(&iter->mutex); |
2212 | free_cpumask_var(iter->started); | 2228 | free_cpumask_var(iter->started); |
2213 | kfree(iter->trace); | 2229 | kfree(iter->trace); |
2214 | kfree(iter); | 2230 | kfree(iter); |
2215 | return 0; | 2231 | return 0; |
2216 | } | 2232 | } |
2217 | 2233 | ||
2218 | static int tracing_open(struct inode *inode, struct file *file) | 2234 | static int tracing_open(struct inode *inode, struct file *file) |
2219 | { | 2235 | { |
2220 | struct trace_iterator *iter; | 2236 | struct trace_iterator *iter; |
2221 | int ret = 0; | 2237 | int ret = 0; |
2222 | 2238 | ||
2223 | /* If this file was open for write, then erase contents */ | 2239 | /* If this file was open for write, then erase contents */ |
2224 | if ((file->f_mode & FMODE_WRITE) && | 2240 | if ((file->f_mode & FMODE_WRITE) && |
2225 | (file->f_flags & O_TRUNC)) { | 2241 | (file->f_flags & O_TRUNC)) { |
2226 | long cpu = (long) inode->i_private; | 2242 | long cpu = (long) inode->i_private; |
2227 | 2243 | ||
2228 | if (cpu == TRACE_PIPE_ALL_CPU) | 2244 | if (cpu == TRACE_PIPE_ALL_CPU) |
2229 | tracing_reset_online_cpus(&global_trace); | 2245 | tracing_reset_online_cpus(&global_trace); |
2230 | else | 2246 | else |
2231 | tracing_reset(&global_trace, cpu); | 2247 | tracing_reset(&global_trace, cpu); |
2232 | } | 2248 | } |
2233 | 2249 | ||
2234 | if (file->f_mode & FMODE_READ) { | 2250 | if (file->f_mode & FMODE_READ) { |
2235 | iter = __tracing_open(inode, file); | 2251 | iter = __tracing_open(inode, file); |
2236 | if (IS_ERR(iter)) | 2252 | if (IS_ERR(iter)) |
2237 | ret = PTR_ERR(iter); | 2253 | ret = PTR_ERR(iter); |
2238 | else if (trace_flags & TRACE_ITER_LATENCY_FMT) | 2254 | else if (trace_flags & TRACE_ITER_LATENCY_FMT) |
2239 | iter->iter_flags |= TRACE_FILE_LAT_FMT; | 2255 | iter->iter_flags |= TRACE_FILE_LAT_FMT; |
2240 | } | 2256 | } |
2241 | return ret; | 2257 | return ret; |
2242 | } | 2258 | } |
2243 | 2259 | ||
2244 | static void * | 2260 | static void * |
2245 | t_next(struct seq_file *m, void *v, loff_t *pos) | 2261 | t_next(struct seq_file *m, void *v, loff_t *pos) |
2246 | { | 2262 | { |
2247 | struct tracer *t = v; | 2263 | struct tracer *t = v; |
2248 | 2264 | ||
2249 | (*pos)++; | 2265 | (*pos)++; |
2250 | 2266 | ||
2251 | if (t) | 2267 | if (t) |
2252 | t = t->next; | 2268 | t = t->next; |
2253 | 2269 | ||
2254 | return t; | 2270 | return t; |
2255 | } | 2271 | } |
2256 | 2272 | ||
2257 | static void *t_start(struct seq_file *m, loff_t *pos) | 2273 | static void *t_start(struct seq_file *m, loff_t *pos) |
2258 | { | 2274 | { |
2259 | struct tracer *t; | 2275 | struct tracer *t; |
2260 | loff_t l = 0; | 2276 | loff_t l = 0; |
2261 | 2277 | ||
2262 | mutex_lock(&trace_types_lock); | 2278 | mutex_lock(&trace_types_lock); |
2263 | for (t = trace_types; t && l < *pos; t = t_next(m, t, &l)) | 2279 | for (t = trace_types; t && l < *pos; t = t_next(m, t, &l)) |
2264 | ; | 2280 | ; |
2265 | 2281 | ||
2266 | return t; | 2282 | return t; |
2267 | } | 2283 | } |
2268 | 2284 | ||
2269 | static void t_stop(struct seq_file *m, void *p) | 2285 | static void t_stop(struct seq_file *m, void *p) |
2270 | { | 2286 | { |
2271 | mutex_unlock(&trace_types_lock); | 2287 | mutex_unlock(&trace_types_lock); |
2272 | } | 2288 | } |
2273 | 2289 | ||
2274 | static int t_show(struct seq_file *m, void *v) | 2290 | static int t_show(struct seq_file *m, void *v) |
2275 | { | 2291 | { |
2276 | struct tracer *t = v; | 2292 | struct tracer *t = v; |
2277 | 2293 | ||
2278 | if (!t) | 2294 | if (!t) |
2279 | return 0; | 2295 | return 0; |
2280 | 2296 | ||
2281 | seq_printf(m, "%s", t->name); | 2297 | seq_printf(m, "%s", t->name); |
2282 | if (t->next) | 2298 | if (t->next) |
2283 | seq_putc(m, ' '); | 2299 | seq_putc(m, ' '); |
2284 | else | 2300 | else |
2285 | seq_putc(m, '\n'); | 2301 | seq_putc(m, '\n'); |
2286 | 2302 | ||
2287 | return 0; | 2303 | return 0; |
2288 | } | 2304 | } |
2289 | 2305 | ||
2290 | static const struct seq_operations show_traces_seq_ops = { | 2306 | static const struct seq_operations show_traces_seq_ops = { |
2291 | .start = t_start, | 2307 | .start = t_start, |
2292 | .next = t_next, | 2308 | .next = t_next, |
2293 | .stop = t_stop, | 2309 | .stop = t_stop, |
2294 | .show = t_show, | 2310 | .show = t_show, |
2295 | }; | 2311 | }; |
2296 | 2312 | ||
2297 | static int show_traces_open(struct inode *inode, struct file *file) | 2313 | static int show_traces_open(struct inode *inode, struct file *file) |
2298 | { | 2314 | { |
2299 | if (tracing_disabled) | 2315 | if (tracing_disabled) |
2300 | return -ENODEV; | 2316 | return -ENODEV; |
2301 | 2317 | ||
2302 | return seq_open(file, &show_traces_seq_ops); | 2318 | return seq_open(file, &show_traces_seq_ops); |
2303 | } | 2319 | } |
2304 | 2320 | ||
2305 | static ssize_t | 2321 | static ssize_t |
2306 | tracing_write_stub(struct file *filp, const char __user *ubuf, | 2322 | tracing_write_stub(struct file *filp, const char __user *ubuf, |
2307 | size_t count, loff_t *ppos) | 2323 | size_t count, loff_t *ppos) |
2308 | { | 2324 | { |
2309 | return count; | 2325 | return count; |
2310 | } | 2326 | } |
2311 | 2327 | ||
2312 | static const struct file_operations tracing_fops = { | 2328 | static const struct file_operations tracing_fops = { |
2313 | .open = tracing_open, | 2329 | .open = tracing_open, |
2314 | .read = seq_read, | 2330 | .read = seq_read, |
2315 | .write = tracing_write_stub, | 2331 | .write = tracing_write_stub, |
2316 | .llseek = seq_lseek, | 2332 | .llseek = seq_lseek, |
2317 | .release = tracing_release, | 2333 | .release = tracing_release, |
2318 | }; | 2334 | }; |
2319 | 2335 | ||
2320 | static const struct file_operations show_traces_fops = { | 2336 | static const struct file_operations show_traces_fops = { |
2321 | .open = show_traces_open, | 2337 | .open = show_traces_open, |
2322 | .read = seq_read, | 2338 | .read = seq_read, |
2323 | .release = seq_release, | 2339 | .release = seq_release, |
2324 | }; | 2340 | }; |
2325 | 2341 | ||
2326 | /* | 2342 | /* |
2327 | * Only trace on a CPU if the bitmask is set: | 2343 | * Only trace on a CPU if the bitmask is set: |
2328 | */ | 2344 | */ |
2329 | static cpumask_var_t tracing_cpumask; | 2345 | static cpumask_var_t tracing_cpumask; |
2330 | 2346 | ||
2331 | /* | 2347 | /* |
2332 | * The tracer itself will not take this lock, but still we want | 2348 | * The tracer itself will not take this lock, but still we want |
2333 | * to provide a consistent cpumask to user-space: | 2349 | * to provide a consistent cpumask to user-space: |
2334 | */ | 2350 | */ |
2335 | static DEFINE_MUTEX(tracing_cpumask_update_lock); | 2351 | static DEFINE_MUTEX(tracing_cpumask_update_lock); |
2336 | 2352 | ||
2337 | /* | 2353 | /* |
2338 | * Temporary storage for the character representation of the | 2354 | * Temporary storage for the character representation of the |
2339 | * CPU bitmask (and one more byte for the newline): | 2355 | * CPU bitmask (and one more byte for the newline): |
2340 | */ | 2356 | */ |
2341 | static char mask_str[NR_CPUS + 1]; | 2357 | static char mask_str[NR_CPUS + 1]; |
2342 | 2358 | ||
2343 | static ssize_t | 2359 | static ssize_t |
2344 | tracing_cpumask_read(struct file *filp, char __user *ubuf, | 2360 | tracing_cpumask_read(struct file *filp, char __user *ubuf, |
2345 | size_t count, loff_t *ppos) | 2361 | size_t count, loff_t *ppos) |
2346 | { | 2362 | { |
2347 | int len; | 2363 | int len; |
2348 | 2364 | ||
2349 | mutex_lock(&tracing_cpumask_update_lock); | 2365 | mutex_lock(&tracing_cpumask_update_lock); |
2350 | 2366 | ||
2351 | len = cpumask_scnprintf(mask_str, count, tracing_cpumask); | 2367 | len = cpumask_scnprintf(mask_str, count, tracing_cpumask); |
2352 | if (count - len < 2) { | 2368 | if (count - len < 2) { |
2353 | count = -EINVAL; | 2369 | count = -EINVAL; |
2354 | goto out_err; | 2370 | goto out_err; |
2355 | } | 2371 | } |
2356 | len += sprintf(mask_str + len, "\n"); | 2372 | len += sprintf(mask_str + len, "\n"); |
2357 | count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1); | 2373 | count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1); |
2358 | 2374 | ||
2359 | out_err: | 2375 | out_err: |
2360 | mutex_unlock(&tracing_cpumask_update_lock); | 2376 | mutex_unlock(&tracing_cpumask_update_lock); |
2361 | 2377 | ||
2362 | return count; | 2378 | return count; |
2363 | } | 2379 | } |
2364 | 2380 | ||
2365 | static ssize_t | 2381 | static ssize_t |
2366 | tracing_cpumask_write(struct file *filp, const char __user *ubuf, | 2382 | tracing_cpumask_write(struct file *filp, const char __user *ubuf, |
2367 | size_t count, loff_t *ppos) | 2383 | size_t count, loff_t *ppos) |
2368 | { | 2384 | { |
2369 | int err, cpu; | 2385 | int err, cpu; |
2370 | cpumask_var_t tracing_cpumask_new; | 2386 | cpumask_var_t tracing_cpumask_new; |
2371 | 2387 | ||
2372 | if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) | 2388 | if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) |
2373 | return -ENOMEM; | 2389 | return -ENOMEM; |
2374 | 2390 | ||
2375 | err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); | 2391 | err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); |
2376 | if (err) | 2392 | if (err) |
2377 | goto err_unlock; | 2393 | goto err_unlock; |
2378 | 2394 | ||
2379 | mutex_lock(&tracing_cpumask_update_lock); | 2395 | mutex_lock(&tracing_cpumask_update_lock); |
2380 | 2396 | ||
2381 | local_irq_disable(); | 2397 | local_irq_disable(); |
2382 | arch_spin_lock(&ftrace_max_lock); | 2398 | arch_spin_lock(&ftrace_max_lock); |
2383 | for_each_tracing_cpu(cpu) { | 2399 | for_each_tracing_cpu(cpu) { |
2384 | /* | 2400 | /* |
2385 | * Increase/decrease the disabled counter if we are | 2401 | * Increase/decrease the disabled counter if we are |
2386 | * about to flip a bit in the cpumask: | 2402 | * about to flip a bit in the cpumask: |
2387 | */ | 2403 | */ |
2388 | if (cpumask_test_cpu(cpu, tracing_cpumask) && | 2404 | if (cpumask_test_cpu(cpu, tracing_cpumask) && |
2389 | !cpumask_test_cpu(cpu, tracing_cpumask_new)) { | 2405 | !cpumask_test_cpu(cpu, tracing_cpumask_new)) { |
2390 | atomic_inc(&global_trace.data[cpu]->disabled); | 2406 | atomic_inc(&global_trace.data[cpu]->disabled); |
2391 | } | 2407 | } |
2392 | if (!cpumask_test_cpu(cpu, tracing_cpumask) && | 2408 | if (!cpumask_test_cpu(cpu, tracing_cpumask) && |
2393 | cpumask_test_cpu(cpu, tracing_cpumask_new)) { | 2409 | cpumask_test_cpu(cpu, tracing_cpumask_new)) { |
2394 | atomic_dec(&global_trace.data[cpu]->disabled); | 2410 | atomic_dec(&global_trace.data[cpu]->disabled); |
2395 | } | 2411 | } |
2396 | } | 2412 | } |
2397 | arch_spin_unlock(&ftrace_max_lock); | 2413 | arch_spin_unlock(&ftrace_max_lock); |
2398 | local_irq_enable(); | 2414 | local_irq_enable(); |
2399 | 2415 | ||
2400 | cpumask_copy(tracing_cpumask, tracing_cpumask_new); | 2416 | cpumask_copy(tracing_cpumask, tracing_cpumask_new); |
2401 | 2417 | ||
2402 | mutex_unlock(&tracing_cpumask_update_lock); | 2418 | mutex_unlock(&tracing_cpumask_update_lock); |
2403 | free_cpumask_var(tracing_cpumask_new); | 2419 | free_cpumask_var(tracing_cpumask_new); |
2404 | 2420 | ||
2405 | return count; | 2421 | return count; |
2406 | 2422 | ||
2407 | err_unlock: | 2423 | err_unlock: |
2408 | free_cpumask_var(tracing_cpumask_new); | 2424 | free_cpumask_var(tracing_cpumask_new); |
2409 | 2425 | ||
2410 | return err; | 2426 | return err; |
2411 | } | 2427 | } |
2412 | 2428 | ||
2413 | static const struct file_operations tracing_cpumask_fops = { | 2429 | static const struct file_operations tracing_cpumask_fops = { |
2414 | .open = tracing_open_generic, | 2430 | .open = tracing_open_generic, |
2415 | .read = tracing_cpumask_read, | 2431 | .read = tracing_cpumask_read, |
2416 | .write = tracing_cpumask_write, | 2432 | .write = tracing_cpumask_write, |
2417 | }; | 2433 | }; |
2418 | 2434 | ||
2419 | static int tracing_trace_options_show(struct seq_file *m, void *v) | 2435 | static int tracing_trace_options_show(struct seq_file *m, void *v) |
2420 | { | 2436 | { |
2421 | struct tracer_opt *trace_opts; | 2437 | struct tracer_opt *trace_opts; |
2422 | u32 tracer_flags; | 2438 | u32 tracer_flags; |
2423 | int i; | 2439 | int i; |
2424 | 2440 | ||
2425 | mutex_lock(&trace_types_lock); | 2441 | mutex_lock(&trace_types_lock); |
2426 | tracer_flags = current_trace->flags->val; | 2442 | tracer_flags = current_trace->flags->val; |
2427 | trace_opts = current_trace->flags->opts; | 2443 | trace_opts = current_trace->flags->opts; |
2428 | 2444 | ||
2429 | for (i = 0; trace_options[i]; i++) { | 2445 | for (i = 0; trace_options[i]; i++) { |
2430 | if (trace_flags & (1 << i)) | 2446 | if (trace_flags & (1 << i)) |
2431 | seq_printf(m, "%s\n", trace_options[i]); | 2447 | seq_printf(m, "%s\n", trace_options[i]); |
2432 | else | 2448 | else |
2433 | seq_printf(m, "no%s\n", trace_options[i]); | 2449 | seq_printf(m, "no%s\n", trace_options[i]); |
2434 | } | 2450 | } |
2435 | 2451 | ||
2436 | for (i = 0; trace_opts[i].name; i++) { | 2452 | for (i = 0; trace_opts[i].name; i++) { |
2437 | if (tracer_flags & trace_opts[i].bit) | 2453 | if (tracer_flags & trace_opts[i].bit) |
2438 | seq_printf(m, "%s\n", trace_opts[i].name); | 2454 | seq_printf(m, "%s\n", trace_opts[i].name); |
2439 | else | 2455 | else |
2440 | seq_printf(m, "no%s\n", trace_opts[i].name); | 2456 | seq_printf(m, "no%s\n", trace_opts[i].name); |
2441 | } | 2457 | } |
2442 | mutex_unlock(&trace_types_lock); | 2458 | mutex_unlock(&trace_types_lock); |
2443 | 2459 | ||
2444 | return 0; | 2460 | return 0; |
2445 | } | 2461 | } |
2446 | 2462 | ||
2447 | static int __set_tracer_option(struct tracer *trace, | 2463 | static int __set_tracer_option(struct tracer *trace, |
2448 | struct tracer_flags *tracer_flags, | 2464 | struct tracer_flags *tracer_flags, |
2449 | struct tracer_opt *opts, int neg) | 2465 | struct tracer_opt *opts, int neg) |
2450 | { | 2466 | { |
2451 | int ret; | 2467 | int ret; |
2452 | 2468 | ||
2453 | ret = trace->set_flag(tracer_flags->val, opts->bit, !neg); | 2469 | ret = trace->set_flag(tracer_flags->val, opts->bit, !neg); |
2454 | if (ret) | 2470 | if (ret) |
2455 | return ret; | 2471 | return ret; |
2456 | 2472 | ||
2457 | if (neg) | 2473 | if (neg) |
2458 | tracer_flags->val &= ~opts->bit; | 2474 | tracer_flags->val &= ~opts->bit; |
2459 | else | 2475 | else |
2460 | tracer_flags->val |= opts->bit; | 2476 | tracer_flags->val |= opts->bit; |
2461 | return 0; | 2477 | return 0; |
2462 | } | 2478 | } |
2463 | 2479 | ||
2464 | /* Try to assign a tracer specific option */ | 2480 | /* Try to assign a tracer specific option */ |
2465 | static int set_tracer_option(struct tracer *trace, char *cmp, int neg) | 2481 | static int set_tracer_option(struct tracer *trace, char *cmp, int neg) |
2466 | { | 2482 | { |
2467 | struct tracer_flags *tracer_flags = trace->flags; | 2483 | struct tracer_flags *tracer_flags = trace->flags; |
2468 | struct tracer_opt *opts = NULL; | 2484 | struct tracer_opt *opts = NULL; |
2469 | int i; | 2485 | int i; |
2470 | 2486 | ||
2471 | for (i = 0; tracer_flags->opts[i].name; i++) { | 2487 | for (i = 0; tracer_flags->opts[i].name; i++) { |
2472 | opts = &tracer_flags->opts[i]; | 2488 | opts = &tracer_flags->opts[i]; |
2473 | 2489 | ||
2474 | if (strcmp(cmp, opts->name) == 0) | 2490 | if (strcmp(cmp, opts->name) == 0) |
2475 | return __set_tracer_option(trace, trace->flags, | 2491 | return __set_tracer_option(trace, trace->flags, |
2476 | opts, neg); | 2492 | opts, neg); |
2477 | } | 2493 | } |
2478 | 2494 | ||
2479 | return -EINVAL; | 2495 | return -EINVAL; |
2480 | } | 2496 | } |
2481 | 2497 | ||
2482 | static void set_tracer_flags(unsigned int mask, int enabled) | 2498 | static void set_tracer_flags(unsigned int mask, int enabled) |
2483 | { | 2499 | { |
2484 | /* do nothing if flag is already set */ | 2500 | /* do nothing if flag is already set */ |
2485 | if (!!(trace_flags & mask) == !!enabled) | 2501 | if (!!(trace_flags & mask) == !!enabled) |
2486 | return; | 2502 | return; |
2487 | 2503 | ||
2488 | if (enabled) | 2504 | if (enabled) |
2489 | trace_flags |= mask; | 2505 | trace_flags |= mask; |
2490 | else | 2506 | else |
2491 | trace_flags &= ~mask; | 2507 | trace_flags &= ~mask; |
2492 | } | 2508 | } |
2493 | 2509 | ||
2494 | static ssize_t | 2510 | static ssize_t |
2495 | tracing_trace_options_write(struct file *filp, const char __user *ubuf, | 2511 | tracing_trace_options_write(struct file *filp, const char __user *ubuf, |
2496 | size_t cnt, loff_t *ppos) | 2512 | size_t cnt, loff_t *ppos) |
2497 | { | 2513 | { |
2498 | char buf[64]; | 2514 | char buf[64]; |
2499 | char *cmp; | 2515 | char *cmp; |
2500 | int neg = 0; | 2516 | int neg = 0; |
2501 | int ret; | 2517 | int ret; |
2502 | int i; | 2518 | int i; |
2503 | 2519 | ||
2504 | if (cnt >= sizeof(buf)) | 2520 | if (cnt >= sizeof(buf)) |
2505 | return -EINVAL; | 2521 | return -EINVAL; |
2506 | 2522 | ||
2507 | if (copy_from_user(&buf, ubuf, cnt)) | 2523 | if (copy_from_user(&buf, ubuf, cnt)) |
2508 | return -EFAULT; | 2524 | return -EFAULT; |
2509 | 2525 | ||
2510 | buf[cnt] = 0; | 2526 | buf[cnt] = 0; |
2511 | cmp = strstrip(buf); | 2527 | cmp = strstrip(buf); |
2512 | 2528 | ||
2513 | if (strncmp(cmp, "no", 2) == 0) { | 2529 | if (strncmp(cmp, "no", 2) == 0) { |
2514 | neg = 1; | 2530 | neg = 1; |
2515 | cmp += 2; | 2531 | cmp += 2; |
2516 | } | 2532 | } |
2517 | 2533 | ||
2518 | for (i = 0; trace_options[i]; i++) { | 2534 | for (i = 0; trace_options[i]; i++) { |
2519 | if (strcmp(cmp, trace_options[i]) == 0) { | 2535 | if (strcmp(cmp, trace_options[i]) == 0) { |
2520 | set_tracer_flags(1 << i, !neg); | 2536 | set_tracer_flags(1 << i, !neg); |
2521 | break; | 2537 | break; |
2522 | } | 2538 | } |
2523 | } | 2539 | } |
2524 | 2540 | ||
2525 | /* If no option could be set, test the specific tracer options */ | 2541 | /* If no option could be set, test the specific tracer options */ |
2526 | if (!trace_options[i]) { | 2542 | if (!trace_options[i]) { |
2527 | mutex_lock(&trace_types_lock); | 2543 | mutex_lock(&trace_types_lock); |
2528 | ret = set_tracer_option(current_trace, cmp, neg); | 2544 | ret = set_tracer_option(current_trace, cmp, neg); |
2529 | mutex_unlock(&trace_types_lock); | 2545 | mutex_unlock(&trace_types_lock); |
2530 | if (ret) | 2546 | if (ret) |
2531 | return ret; | 2547 | return ret; |
2532 | } | 2548 | } |
2533 | 2549 | ||
2534 | *ppos += cnt; | 2550 | *ppos += cnt; |
2535 | 2551 | ||
2536 | return cnt; | 2552 | return cnt; |
2537 | } | 2553 | } |
2538 | 2554 | ||
2539 | static int tracing_trace_options_open(struct inode *inode, struct file *file) | 2555 | static int tracing_trace_options_open(struct inode *inode, struct file *file) |
2540 | { | 2556 | { |
2541 | if (tracing_disabled) | 2557 | if (tracing_disabled) |
2542 | return -ENODEV; | 2558 | return -ENODEV; |
2543 | return single_open(file, tracing_trace_options_show, NULL); | 2559 | return single_open(file, tracing_trace_options_show, NULL); |
2544 | } | 2560 | } |
2545 | 2561 | ||
2546 | static const struct file_operations tracing_iter_fops = { | 2562 | static const struct file_operations tracing_iter_fops = { |
2547 | .open = tracing_trace_options_open, | 2563 | .open = tracing_trace_options_open, |
2548 | .read = seq_read, | 2564 | .read = seq_read, |
2549 | .llseek = seq_lseek, | 2565 | .llseek = seq_lseek, |
2550 | .release = single_release, | 2566 | .release = single_release, |
2551 | .write = tracing_trace_options_write, | 2567 | .write = tracing_trace_options_write, |
2552 | }; | 2568 | }; |
2553 | 2569 | ||
2554 | static const char readme_msg[] = | 2570 | static const char readme_msg[] = |
2555 | "tracing mini-HOWTO:\n\n" | 2571 | "tracing mini-HOWTO:\n\n" |
2556 | "# mount -t debugfs nodev /sys/kernel/debug\n\n" | 2572 | "# mount -t debugfs nodev /sys/kernel/debug\n\n" |
2557 | "# cat /sys/kernel/debug/tracing/available_tracers\n" | 2573 | "# cat /sys/kernel/debug/tracing/available_tracers\n" |
2558 | "wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n" | 2574 | "wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n" |
2559 | "# cat /sys/kernel/debug/tracing/current_tracer\n" | 2575 | "# cat /sys/kernel/debug/tracing/current_tracer\n" |
2560 | "nop\n" | 2576 | "nop\n" |
2561 | "# echo sched_switch > /sys/kernel/debug/tracing/current_tracer\n" | 2577 | "# echo sched_switch > /sys/kernel/debug/tracing/current_tracer\n" |
2562 | "# cat /sys/kernel/debug/tracing/current_tracer\n" | 2578 | "# cat /sys/kernel/debug/tracing/current_tracer\n" |
2563 | "sched_switch\n" | 2579 | "sched_switch\n" |
2564 | "# cat /sys/kernel/debug/tracing/trace_options\n" | 2580 | "# cat /sys/kernel/debug/tracing/trace_options\n" |
2565 | "noprint-parent nosym-offset nosym-addr noverbose\n" | 2581 | "noprint-parent nosym-offset nosym-addr noverbose\n" |
2566 | "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n" | 2582 | "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n" |
2567 | "# echo 1 > /sys/kernel/debug/tracing/tracing_enabled\n" | 2583 | "# echo 1 > /sys/kernel/debug/tracing/tracing_enabled\n" |
2568 | "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n" | 2584 | "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n" |
2569 | "# echo 0 > /sys/kernel/debug/tracing/tracing_enabled\n" | 2585 | "# echo 0 > /sys/kernel/debug/tracing/tracing_enabled\n" |
2570 | ; | 2586 | ; |
2571 | 2587 | ||
2572 | static ssize_t | 2588 | static ssize_t |
2573 | tracing_readme_read(struct file *filp, char __user *ubuf, | 2589 | tracing_readme_read(struct file *filp, char __user *ubuf, |
2574 | size_t cnt, loff_t *ppos) | 2590 | size_t cnt, loff_t *ppos) |
2575 | { | 2591 | { |
2576 | return simple_read_from_buffer(ubuf, cnt, ppos, | 2592 | return simple_read_from_buffer(ubuf, cnt, ppos, |
2577 | readme_msg, strlen(readme_msg)); | 2593 | readme_msg, strlen(readme_msg)); |
2578 | } | 2594 | } |
2579 | 2595 | ||
2580 | static const struct file_operations tracing_readme_fops = { | 2596 | static const struct file_operations tracing_readme_fops = { |
2581 | .open = tracing_open_generic, | 2597 | .open = tracing_open_generic, |
2582 | .read = tracing_readme_read, | 2598 | .read = tracing_readme_read, |
2583 | }; | 2599 | }; |
2584 | 2600 | ||
2585 | static ssize_t | 2601 | static ssize_t |
2586 | tracing_saved_cmdlines_read(struct file *file, char __user *ubuf, | 2602 | tracing_saved_cmdlines_read(struct file *file, char __user *ubuf, |
2587 | size_t cnt, loff_t *ppos) | 2603 | size_t cnt, loff_t *ppos) |
2588 | { | 2604 | { |
2589 | char *buf_comm; | 2605 | char *buf_comm; |
2590 | char *file_buf; | 2606 | char *file_buf; |
2591 | char *buf; | 2607 | char *buf; |
2592 | int len = 0; | 2608 | int len = 0; |
2593 | int pid; | 2609 | int pid; |
2594 | int i; | 2610 | int i; |
2595 | 2611 | ||
2596 | file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL); | 2612 | file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL); |
2597 | if (!file_buf) | 2613 | if (!file_buf) |
2598 | return -ENOMEM; | 2614 | return -ENOMEM; |
2599 | 2615 | ||
2600 | buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL); | 2616 | buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL); |
2601 | if (!buf_comm) { | 2617 | if (!buf_comm) { |
2602 | kfree(file_buf); | 2618 | kfree(file_buf); |
2603 | return -ENOMEM; | 2619 | return -ENOMEM; |
2604 | } | 2620 | } |
2605 | 2621 | ||
2606 | buf = file_buf; | 2622 | buf = file_buf; |
2607 | 2623 | ||
2608 | for (i = 0; i < SAVED_CMDLINES; i++) { | 2624 | for (i = 0; i < SAVED_CMDLINES; i++) { |
2609 | int r; | 2625 | int r; |
2610 | 2626 | ||
2611 | pid = map_cmdline_to_pid[i]; | 2627 | pid = map_cmdline_to_pid[i]; |
2612 | if (pid == -1 || pid == NO_CMDLINE_MAP) | 2628 | if (pid == -1 || pid == NO_CMDLINE_MAP) |
2613 | continue; | 2629 | continue; |
2614 | 2630 | ||
2615 | trace_find_cmdline(pid, buf_comm); | 2631 | trace_find_cmdline(pid, buf_comm); |
2616 | r = sprintf(buf, "%d %s\n", pid, buf_comm); | 2632 | r = sprintf(buf, "%d %s\n", pid, buf_comm); |
2617 | buf += r; | 2633 | buf += r; |
2618 | len += r; | 2634 | len += r; |
2619 | } | 2635 | } |
2620 | 2636 | ||
2621 | len = simple_read_from_buffer(ubuf, cnt, ppos, | 2637 | len = simple_read_from_buffer(ubuf, cnt, ppos, |
2622 | file_buf, len); | 2638 | file_buf, len); |
2623 | 2639 | ||
2624 | kfree(file_buf); | 2640 | kfree(file_buf); |
2625 | kfree(buf_comm); | 2641 | kfree(buf_comm); |
2626 | 2642 | ||
2627 | return len; | 2643 | return len; |
2628 | } | 2644 | } |
2629 | 2645 | ||
2630 | static const struct file_operations tracing_saved_cmdlines_fops = { | 2646 | static const struct file_operations tracing_saved_cmdlines_fops = { |
2631 | .open = tracing_open_generic, | 2647 | .open = tracing_open_generic, |
2632 | .read = tracing_saved_cmdlines_read, | 2648 | .read = tracing_saved_cmdlines_read, |
2633 | }; | 2649 | }; |
2634 | 2650 | ||
2635 | static ssize_t | 2651 | static ssize_t |
2636 | tracing_ctrl_read(struct file *filp, char __user *ubuf, | 2652 | tracing_ctrl_read(struct file *filp, char __user *ubuf, |
2637 | size_t cnt, loff_t *ppos) | 2653 | size_t cnt, loff_t *ppos) |
2638 | { | 2654 | { |
2639 | char buf[64]; | 2655 | char buf[64]; |
2640 | int r; | 2656 | int r; |
2641 | 2657 | ||
2642 | r = sprintf(buf, "%u\n", tracer_enabled); | 2658 | r = sprintf(buf, "%u\n", tracer_enabled); |
2643 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 2659 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
2644 | } | 2660 | } |
2645 | 2661 | ||
2646 | static ssize_t | 2662 | static ssize_t |
2647 | tracing_ctrl_write(struct file *filp, const char __user *ubuf, | 2663 | tracing_ctrl_write(struct file *filp, const char __user *ubuf, |
2648 | size_t cnt, loff_t *ppos) | 2664 | size_t cnt, loff_t *ppos) |
2649 | { | 2665 | { |
2650 | struct trace_array *tr = filp->private_data; | 2666 | struct trace_array *tr = filp->private_data; |
2651 | char buf[64]; | 2667 | char buf[64]; |
2652 | unsigned long val; | 2668 | unsigned long val; |
2653 | int ret; | 2669 | int ret; |
2654 | 2670 | ||
2655 | if (cnt >= sizeof(buf)) | 2671 | if (cnt >= sizeof(buf)) |
2656 | return -EINVAL; | 2672 | return -EINVAL; |
2657 | 2673 | ||
2658 | if (copy_from_user(&buf, ubuf, cnt)) | 2674 | if (copy_from_user(&buf, ubuf, cnt)) |
2659 | return -EFAULT; | 2675 | return -EFAULT; |
2660 | 2676 | ||
2661 | buf[cnt] = 0; | 2677 | buf[cnt] = 0; |
2662 | 2678 | ||
2663 | ret = strict_strtoul(buf, 10, &val); | 2679 | ret = strict_strtoul(buf, 10, &val); |
2664 | if (ret < 0) | 2680 | if (ret < 0) |
2665 | return ret; | 2681 | return ret; |
2666 | 2682 | ||
2667 | val = !!val; | 2683 | val = !!val; |
2668 | 2684 | ||
2669 | mutex_lock(&trace_types_lock); | 2685 | mutex_lock(&trace_types_lock); |
2670 | if (tracer_enabled ^ val) { | 2686 | if (tracer_enabled ^ val) { |
2671 | if (val) { | 2687 | if (val) { |
2672 | tracer_enabled = 1; | 2688 | tracer_enabled = 1; |
2673 | if (current_trace->start) | 2689 | if (current_trace->start) |
2674 | current_trace->start(tr); | 2690 | current_trace->start(tr); |
2675 | tracing_start(); | 2691 | tracing_start(); |
2676 | } else { | 2692 | } else { |
2677 | tracer_enabled = 0; | 2693 | tracer_enabled = 0; |
2678 | tracing_stop(); | 2694 | tracing_stop(); |
2679 | if (current_trace->stop) | 2695 | if (current_trace->stop) |
2680 | current_trace->stop(tr); | 2696 | current_trace->stop(tr); |
2681 | } | 2697 | } |
2682 | } | 2698 | } |
2683 | mutex_unlock(&trace_types_lock); | 2699 | mutex_unlock(&trace_types_lock); |
2684 | 2700 | ||
2685 | *ppos += cnt; | 2701 | *ppos += cnt; |
2686 | 2702 | ||
2687 | return cnt; | 2703 | return cnt; |
2688 | } | 2704 | } |
2689 | 2705 | ||
2690 | static ssize_t | 2706 | static ssize_t |
2691 | tracing_set_trace_read(struct file *filp, char __user *ubuf, | 2707 | tracing_set_trace_read(struct file *filp, char __user *ubuf, |
2692 | size_t cnt, loff_t *ppos) | 2708 | size_t cnt, loff_t *ppos) |
2693 | { | 2709 | { |
2694 | char buf[MAX_TRACER_SIZE+2]; | 2710 | char buf[MAX_TRACER_SIZE+2]; |
2695 | int r; | 2711 | int r; |
2696 | 2712 | ||
2697 | mutex_lock(&trace_types_lock); | 2713 | mutex_lock(&trace_types_lock); |
2698 | if (current_trace) | 2714 | if (current_trace) |
2699 | r = sprintf(buf, "%s\n", current_trace->name); | 2715 | r = sprintf(buf, "%s\n", current_trace->name); |
2700 | else | 2716 | else |
2701 | r = sprintf(buf, "\n"); | 2717 | r = sprintf(buf, "\n"); |
2702 | mutex_unlock(&trace_types_lock); | 2718 | mutex_unlock(&trace_types_lock); |
2703 | 2719 | ||
2704 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 2720 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
2705 | } | 2721 | } |
2706 | 2722 | ||
2707 | int tracer_init(struct tracer *t, struct trace_array *tr) | 2723 | int tracer_init(struct tracer *t, struct trace_array *tr) |
2708 | { | 2724 | { |
2709 | tracing_reset_online_cpus(tr); | 2725 | tracing_reset_online_cpus(tr); |
2710 | return t->init(tr); | 2726 | return t->init(tr); |
2711 | } | 2727 | } |
2712 | 2728 | ||
2713 | static int tracing_resize_ring_buffer(unsigned long size) | 2729 | static int tracing_resize_ring_buffer(unsigned long size) |
2714 | { | 2730 | { |
2715 | int ret; | 2731 | int ret; |
2716 | 2732 | ||
2717 | /* | 2733 | /* |
2718 | * If kernel or user changes the size of the ring buffer | 2734 | * If kernel or user changes the size of the ring buffer |
2719 | * we use the size that was given, and we can forget about | 2735 | * we use the size that was given, and we can forget about |
2720 | * expanding it later. | 2736 | * expanding it later. |
2721 | */ | 2737 | */ |
2722 | ring_buffer_expanded = 1; | 2738 | ring_buffer_expanded = 1; |
2723 | 2739 | ||
2724 | ret = ring_buffer_resize(global_trace.buffer, size); | 2740 | ret = ring_buffer_resize(global_trace.buffer, size); |
2725 | if (ret < 0) | 2741 | if (ret < 0) |
2726 | return ret; | 2742 | return ret; |
2727 | 2743 | ||
2728 | ret = ring_buffer_resize(max_tr.buffer, size); | 2744 | ret = ring_buffer_resize(max_tr.buffer, size); |
2729 | if (ret < 0) { | 2745 | if (ret < 0) { |
2730 | int r; | 2746 | int r; |
2731 | 2747 | ||
2732 | r = ring_buffer_resize(global_trace.buffer, | 2748 | r = ring_buffer_resize(global_trace.buffer, |
2733 | global_trace.entries); | 2749 | global_trace.entries); |
2734 | if (r < 0) { | 2750 | if (r < 0) { |
2735 | /* | 2751 | /* |
2736 | * AARGH! We are left with different | 2752 | * AARGH! We are left with different |
2737 | * size max buffer!!!! | 2753 | * size max buffer!!!! |
2738 | * The max buffer is our "snapshot" buffer. | 2754 | * The max buffer is our "snapshot" buffer. |
2739 | * When a tracer needs a snapshot (one of the | 2755 | * When a tracer needs a snapshot (one of the |
2740 | * latency tracers), it swaps the max buffer | 2756 | * latency tracers), it swaps the max buffer |
2741 | * with the saved snap shot. We succeeded to | 2757 | * with the saved snap shot. We succeeded to |
2742 | * update the size of the main buffer, but failed to | 2758 | * update the size of the main buffer, but failed to |
2743 | * update the size of the max buffer. But when we tried | 2759 | * update the size of the max buffer. But when we tried |
2744 | * to reset the main buffer to the original size, we | 2760 | * to reset the main buffer to the original size, we |
2745 | * failed there too. This is very unlikely to | 2761 | * failed there too. This is very unlikely to |
2746 | * happen, but if it does, warn and kill all | 2762 | * happen, but if it does, warn and kill all |
2747 | * tracing. | 2763 | * tracing. |
2748 | */ | 2764 | */ |
2749 | WARN_ON(1); | 2765 | WARN_ON(1); |
2750 | tracing_disabled = 1; | 2766 | tracing_disabled = 1; |
2751 | } | 2767 | } |
2752 | return ret; | 2768 | return ret; |
2753 | } | 2769 | } |
2754 | 2770 | ||
2755 | global_trace.entries = size; | 2771 | global_trace.entries = size; |
2756 | 2772 | ||
2757 | return ret; | 2773 | return ret; |
2758 | } | 2774 | } |
2759 | 2775 | ||
2760 | /** | 2776 | /** |
2761 | * tracing_update_buffers - used by tracing facility to expand ring buffers | 2777 | * tracing_update_buffers - used by tracing facility to expand ring buffers |
2762 | * | 2778 | * |
2763 | * To save on memory when the tracing is never used on a system with it | 2779 | * To save on memory when the tracing is never used on a system with it |
2764 | * configured in. The ring buffers are set to a minimum size. But once | 2780 | * configured in. The ring buffers are set to a minimum size. But once |
2765 | * a user starts to use the tracing facility, then they need to grow | 2781 | * a user starts to use the tracing facility, then they need to grow |
2766 | * to their default size. | 2782 | * to their default size. |
2767 | * | 2783 | * |
2768 | * This function is to be called when a tracer is about to be used. | 2784 | * This function is to be called when a tracer is about to be used. |
2769 | */ | 2785 | */ |
2770 | int tracing_update_buffers(void) | 2786 | int tracing_update_buffers(void) |
2771 | { | 2787 | { |
2772 | int ret = 0; | 2788 | int ret = 0; |
2773 | 2789 | ||
2774 | mutex_lock(&trace_types_lock); | 2790 | mutex_lock(&trace_types_lock); |
2775 | if (!ring_buffer_expanded) | 2791 | if (!ring_buffer_expanded) |
2776 | ret = tracing_resize_ring_buffer(trace_buf_size); | 2792 | ret = tracing_resize_ring_buffer(trace_buf_size); |
2777 | mutex_unlock(&trace_types_lock); | 2793 | mutex_unlock(&trace_types_lock); |
2778 | 2794 | ||
2779 | return ret; | 2795 | return ret; |
2780 | } | 2796 | } |
2781 | 2797 | ||
2782 | struct trace_option_dentry; | 2798 | struct trace_option_dentry; |
2783 | 2799 | ||
2784 | static struct trace_option_dentry * | 2800 | static struct trace_option_dentry * |
2785 | create_trace_option_files(struct tracer *tracer); | 2801 | create_trace_option_files(struct tracer *tracer); |
2786 | 2802 | ||
2787 | static void | 2803 | static void |
2788 | destroy_trace_option_files(struct trace_option_dentry *topts); | 2804 | destroy_trace_option_files(struct trace_option_dentry *topts); |
2789 | 2805 | ||
2790 | static int tracing_set_tracer(const char *buf) | 2806 | static int tracing_set_tracer(const char *buf) |
2791 | { | 2807 | { |
2792 | static struct trace_option_dentry *topts; | 2808 | static struct trace_option_dentry *topts; |
2793 | struct trace_array *tr = &global_trace; | 2809 | struct trace_array *tr = &global_trace; |
2794 | struct tracer *t; | 2810 | struct tracer *t; |
2795 | int ret = 0; | 2811 | int ret = 0; |
2796 | 2812 | ||
2797 | mutex_lock(&trace_types_lock); | 2813 | mutex_lock(&trace_types_lock); |
2798 | 2814 | ||
2799 | if (!ring_buffer_expanded) { | 2815 | if (!ring_buffer_expanded) { |
2800 | ret = tracing_resize_ring_buffer(trace_buf_size); | 2816 | ret = tracing_resize_ring_buffer(trace_buf_size); |
2801 | if (ret < 0) | 2817 | if (ret < 0) |
2802 | goto out; | 2818 | goto out; |
2803 | ret = 0; | 2819 | ret = 0; |
2804 | } | 2820 | } |
2805 | 2821 | ||
2806 | for (t = trace_types; t; t = t->next) { | 2822 | for (t = trace_types; t; t = t->next) { |
2807 | if (strcmp(t->name, buf) == 0) | 2823 | if (strcmp(t->name, buf) == 0) |
2808 | break; | 2824 | break; |
2809 | } | 2825 | } |
2810 | if (!t) { | 2826 | if (!t) { |
2811 | ret = -EINVAL; | 2827 | ret = -EINVAL; |
2812 | goto out; | 2828 | goto out; |
2813 | } | 2829 | } |
2814 | if (t == current_trace) | 2830 | if (t == current_trace) |
2815 | goto out; | 2831 | goto out; |
2816 | 2832 | ||
2817 | trace_branch_disable(); | 2833 | trace_branch_disable(); |
2818 | if (current_trace && current_trace->reset) | 2834 | if (current_trace && current_trace->reset) |
2819 | current_trace->reset(tr); | 2835 | current_trace->reset(tr); |
2820 | 2836 | ||
2821 | destroy_trace_option_files(topts); | 2837 | destroy_trace_option_files(topts); |
2822 | 2838 | ||
2823 | current_trace = t; | 2839 | current_trace = t; |
2824 | 2840 | ||
2825 | topts = create_trace_option_files(current_trace); | 2841 | topts = create_trace_option_files(current_trace); |
2826 | 2842 | ||
2827 | if (t->init) { | 2843 | if (t->init) { |
2828 | ret = tracer_init(t, tr); | 2844 | ret = tracer_init(t, tr); |
2829 | if (ret) | 2845 | if (ret) |
2830 | goto out; | 2846 | goto out; |
2831 | } | 2847 | } |
2832 | 2848 | ||
2833 | trace_branch_enable(tr); | 2849 | trace_branch_enable(tr); |
2834 | out: | 2850 | out: |
2835 | mutex_unlock(&trace_types_lock); | 2851 | mutex_unlock(&trace_types_lock); |
2836 | 2852 | ||
2837 | return ret; | 2853 | return ret; |
2838 | } | 2854 | } |
2839 | 2855 | ||
2840 | static ssize_t | 2856 | static ssize_t |
2841 | tracing_set_trace_write(struct file *filp, const char __user *ubuf, | 2857 | tracing_set_trace_write(struct file *filp, const char __user *ubuf, |
2842 | size_t cnt, loff_t *ppos) | 2858 | size_t cnt, loff_t *ppos) |
2843 | { | 2859 | { |
2844 | char buf[MAX_TRACER_SIZE+1]; | 2860 | char buf[MAX_TRACER_SIZE+1]; |
2845 | int i; | 2861 | int i; |
2846 | size_t ret; | 2862 | size_t ret; |
2847 | int err; | 2863 | int err; |
2848 | 2864 | ||
2849 | ret = cnt; | 2865 | ret = cnt; |
2850 | 2866 | ||
2851 | if (cnt > MAX_TRACER_SIZE) | 2867 | if (cnt > MAX_TRACER_SIZE) |
2852 | cnt = MAX_TRACER_SIZE; | 2868 | cnt = MAX_TRACER_SIZE; |
2853 | 2869 | ||
2854 | if (copy_from_user(&buf, ubuf, cnt)) | 2870 | if (copy_from_user(&buf, ubuf, cnt)) |
2855 | return -EFAULT; | 2871 | return -EFAULT; |
2856 | 2872 | ||
2857 | buf[cnt] = 0; | 2873 | buf[cnt] = 0; |
2858 | 2874 | ||
2859 | /* strip ending whitespace. */ | 2875 | /* strip ending whitespace. */ |
2860 | for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) | 2876 | for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) |
2861 | buf[i] = 0; | 2877 | buf[i] = 0; |
2862 | 2878 | ||
2863 | err = tracing_set_tracer(buf); | 2879 | err = tracing_set_tracer(buf); |
2864 | if (err) | 2880 | if (err) |
2865 | return err; | 2881 | return err; |
2866 | 2882 | ||
2867 | *ppos += ret; | 2883 | *ppos += ret; |
2868 | 2884 | ||
2869 | return ret; | 2885 | return ret; |
2870 | } | 2886 | } |
2871 | 2887 | ||
2872 | static ssize_t | 2888 | static ssize_t |
2873 | tracing_max_lat_read(struct file *filp, char __user *ubuf, | 2889 | tracing_max_lat_read(struct file *filp, char __user *ubuf, |
2874 | size_t cnt, loff_t *ppos) | 2890 | size_t cnt, loff_t *ppos) |
2875 | { | 2891 | { |
2876 | unsigned long *ptr = filp->private_data; | 2892 | unsigned long *ptr = filp->private_data; |
2877 | char buf[64]; | 2893 | char buf[64]; |
2878 | int r; | 2894 | int r; |
2879 | 2895 | ||
2880 | r = snprintf(buf, sizeof(buf), "%ld\n", | 2896 | r = snprintf(buf, sizeof(buf), "%ld\n", |
2881 | *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr)); | 2897 | *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr)); |
2882 | if (r > sizeof(buf)) | 2898 | if (r > sizeof(buf)) |
2883 | r = sizeof(buf); | 2899 | r = sizeof(buf); |
2884 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 2900 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
2885 | } | 2901 | } |
2886 | 2902 | ||
2887 | static ssize_t | 2903 | static ssize_t |
2888 | tracing_max_lat_write(struct file *filp, const char __user *ubuf, | 2904 | tracing_max_lat_write(struct file *filp, const char __user *ubuf, |
2889 | size_t cnt, loff_t *ppos) | 2905 | size_t cnt, loff_t *ppos) |
2890 | { | 2906 | { |
2891 | unsigned long *ptr = filp->private_data; | 2907 | unsigned long *ptr = filp->private_data; |
2892 | char buf[64]; | 2908 | char buf[64]; |
2893 | unsigned long val; | 2909 | unsigned long val; |
2894 | int ret; | 2910 | int ret; |
2895 | 2911 | ||
2896 | if (cnt >= sizeof(buf)) | 2912 | if (cnt >= sizeof(buf)) |
2897 | return -EINVAL; | 2913 | return -EINVAL; |
2898 | 2914 | ||
2899 | if (copy_from_user(&buf, ubuf, cnt)) | 2915 | if (copy_from_user(&buf, ubuf, cnt)) |
2900 | return -EFAULT; | 2916 | return -EFAULT; |
2901 | 2917 | ||
2902 | buf[cnt] = 0; | 2918 | buf[cnt] = 0; |
2903 | 2919 | ||
2904 | ret = strict_strtoul(buf, 10, &val); | 2920 | ret = strict_strtoul(buf, 10, &val); |
2905 | if (ret < 0) | 2921 | if (ret < 0) |
2906 | return ret; | 2922 | return ret; |
2907 | 2923 | ||
2908 | *ptr = val * 1000; | 2924 | *ptr = val * 1000; |
2909 | 2925 | ||
2910 | return cnt; | 2926 | return cnt; |
2911 | } | 2927 | } |
2912 | 2928 | ||
2913 | static int tracing_open_pipe(struct inode *inode, struct file *filp) | 2929 | static int tracing_open_pipe(struct inode *inode, struct file *filp) |
2914 | { | 2930 | { |
2915 | long cpu_file = (long) inode->i_private; | 2931 | long cpu_file = (long) inode->i_private; |
2916 | struct trace_iterator *iter; | 2932 | struct trace_iterator *iter; |
2917 | int ret = 0; | 2933 | int ret = 0; |
2918 | 2934 | ||
2919 | if (tracing_disabled) | 2935 | if (tracing_disabled) |
2920 | return -ENODEV; | 2936 | return -ENODEV; |
2921 | 2937 | ||
2922 | mutex_lock(&trace_types_lock); | 2938 | mutex_lock(&trace_types_lock); |
2923 | 2939 | ||
2924 | /* create a buffer to store the information to pass to userspace */ | 2940 | /* create a buffer to store the information to pass to userspace */ |
2925 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 2941 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
2926 | if (!iter) { | 2942 | if (!iter) { |
2927 | ret = -ENOMEM; | 2943 | ret = -ENOMEM; |
2928 | goto out; | 2944 | goto out; |
2929 | } | 2945 | } |
2930 | 2946 | ||
2931 | /* | 2947 | /* |
2932 | * We make a copy of the current tracer to avoid concurrent | 2948 | * We make a copy of the current tracer to avoid concurrent |
2933 | * changes on it while we are reading. | 2949 | * changes on it while we are reading. |
2934 | */ | 2950 | */ |
2935 | iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL); | 2951 | iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL); |
2936 | if (!iter->trace) { | 2952 | if (!iter->trace) { |
2937 | ret = -ENOMEM; | 2953 | ret = -ENOMEM; |
2938 | goto fail; | 2954 | goto fail; |
2939 | } | 2955 | } |
2940 | if (current_trace) | 2956 | if (current_trace) |
2941 | *iter->trace = *current_trace; | 2957 | *iter->trace = *current_trace; |
2942 | 2958 | ||
2943 | if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { | 2959 | if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { |
2944 | ret = -ENOMEM; | 2960 | ret = -ENOMEM; |
2945 | goto fail; | 2961 | goto fail; |
2946 | } | 2962 | } |
2947 | 2963 | ||
2948 | /* trace pipe does not show start of buffer */ | 2964 | /* trace pipe does not show start of buffer */ |
2949 | cpumask_setall(iter->started); | 2965 | cpumask_setall(iter->started); |
2950 | 2966 | ||
2951 | if (trace_flags & TRACE_ITER_LATENCY_FMT) | 2967 | if (trace_flags & TRACE_ITER_LATENCY_FMT) |
2952 | iter->iter_flags |= TRACE_FILE_LAT_FMT; | 2968 | iter->iter_flags |= TRACE_FILE_LAT_FMT; |
2953 | 2969 | ||
2954 | iter->cpu_file = cpu_file; | 2970 | iter->cpu_file = cpu_file; |
2955 | iter->tr = &global_trace; | 2971 | iter->tr = &global_trace; |
2956 | mutex_init(&iter->mutex); | 2972 | mutex_init(&iter->mutex); |
2957 | filp->private_data = iter; | 2973 | filp->private_data = iter; |
2958 | 2974 | ||
2959 | if (iter->trace->pipe_open) | 2975 | if (iter->trace->pipe_open) |
2960 | iter->trace->pipe_open(iter); | 2976 | iter->trace->pipe_open(iter); |
2961 | 2977 | ||
2962 | out: | 2978 | out: |
2963 | mutex_unlock(&trace_types_lock); | 2979 | mutex_unlock(&trace_types_lock); |
2964 | return ret; | 2980 | return ret; |
2965 | 2981 | ||
2966 | fail: | 2982 | fail: |
2967 | kfree(iter->trace); | 2983 | kfree(iter->trace); |
2968 | kfree(iter); | 2984 | kfree(iter); |
2969 | mutex_unlock(&trace_types_lock); | 2985 | mutex_unlock(&trace_types_lock); |
2970 | return ret; | 2986 | return ret; |
2971 | } | 2987 | } |
2972 | 2988 | ||
2973 | static int tracing_release_pipe(struct inode *inode, struct file *file) | 2989 | static int tracing_release_pipe(struct inode *inode, struct file *file) |
2974 | { | 2990 | { |
2975 | struct trace_iterator *iter = file->private_data; | 2991 | struct trace_iterator *iter = file->private_data; |
2976 | 2992 | ||
2977 | mutex_lock(&trace_types_lock); | 2993 | mutex_lock(&trace_types_lock); |
2978 | 2994 | ||
2979 | if (iter->trace->pipe_close) | 2995 | if (iter->trace->pipe_close) |
2980 | iter->trace->pipe_close(iter); | 2996 | iter->trace->pipe_close(iter); |
2981 | 2997 | ||
2982 | mutex_unlock(&trace_types_lock); | 2998 | mutex_unlock(&trace_types_lock); |
2983 | 2999 | ||
2984 | free_cpumask_var(iter->started); | 3000 | free_cpumask_var(iter->started); |
2985 | mutex_destroy(&iter->mutex); | 3001 | mutex_destroy(&iter->mutex); |
2986 | kfree(iter->trace); | 3002 | kfree(iter->trace); |
2987 | kfree(iter); | 3003 | kfree(iter); |
2988 | 3004 | ||
2989 | return 0; | 3005 | return 0; |
2990 | } | 3006 | } |
2991 | 3007 | ||
2992 | static unsigned int | 3008 | static unsigned int |
2993 | tracing_poll_pipe(struct file *filp, poll_table *poll_table) | 3009 | tracing_poll_pipe(struct file *filp, poll_table *poll_table) |
2994 | { | 3010 | { |
2995 | struct trace_iterator *iter = filp->private_data; | 3011 | struct trace_iterator *iter = filp->private_data; |
2996 | 3012 | ||
2997 | if (trace_flags & TRACE_ITER_BLOCK) { | 3013 | if (trace_flags & TRACE_ITER_BLOCK) { |
2998 | /* | 3014 | /* |
2999 | * Always select as readable when in blocking mode | 3015 | * Always select as readable when in blocking mode |
3000 | */ | 3016 | */ |
3001 | return POLLIN | POLLRDNORM; | 3017 | return POLLIN | POLLRDNORM; |
3002 | } else { | 3018 | } else { |
3003 | if (!trace_empty(iter)) | 3019 | if (!trace_empty(iter)) |
3004 | return POLLIN | POLLRDNORM; | 3020 | return POLLIN | POLLRDNORM; |
3005 | poll_wait(filp, &trace_wait, poll_table); | 3021 | poll_wait(filp, &trace_wait, poll_table); |
3006 | if (!trace_empty(iter)) | 3022 | if (!trace_empty(iter)) |
3007 | return POLLIN | POLLRDNORM; | 3023 | return POLLIN | POLLRDNORM; |
3008 | 3024 | ||
3009 | return 0; | 3025 | return 0; |
3010 | } | 3026 | } |
3011 | } | 3027 | } |
3012 | 3028 | ||
3013 | 3029 | ||
3014 | void default_wait_pipe(struct trace_iterator *iter) | 3030 | void default_wait_pipe(struct trace_iterator *iter) |
3015 | { | 3031 | { |
3016 | DEFINE_WAIT(wait); | 3032 | DEFINE_WAIT(wait); |
3017 | 3033 | ||
3018 | prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE); | 3034 | prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE); |
3019 | 3035 | ||
3020 | if (trace_empty(iter)) | 3036 | if (trace_empty(iter)) |
3021 | schedule(); | 3037 | schedule(); |
3022 | 3038 | ||
3023 | finish_wait(&trace_wait, &wait); | 3039 | finish_wait(&trace_wait, &wait); |
3024 | } | 3040 | } |
3025 | 3041 | ||
3026 | /* | 3042 | /* |
3027 | * This is a make-shift waitqueue. | 3043 | * This is a make-shift waitqueue. |
3028 | * A tracer might use this callback on some rare cases: | 3044 | * A tracer might use this callback on some rare cases: |
3029 | * | 3045 | * |
3030 | * 1) the current tracer might hold the runqueue lock when it wakes up | 3046 | * 1) the current tracer might hold the runqueue lock when it wakes up |
3031 | * a reader, hence a deadlock (sched, function, and function graph tracers) | 3047 | * a reader, hence a deadlock (sched, function, and function graph tracers) |
3032 | * 2) the function tracers, trace all functions, we don't want | 3048 | * 2) the function tracers, trace all functions, we don't want |
3033 | * the overhead of calling wake_up and friends | 3049 | * the overhead of calling wake_up and friends |
3034 | * (and tracing them too) | 3050 | * (and tracing them too) |
3035 | * | 3051 | * |
3036 | * Anyway, this is really very primitive wakeup. | 3052 | * Anyway, this is really very primitive wakeup. |
3037 | */ | 3053 | */ |
3038 | void poll_wait_pipe(struct trace_iterator *iter) | 3054 | void poll_wait_pipe(struct trace_iterator *iter) |
3039 | { | 3055 | { |
3040 | set_current_state(TASK_INTERRUPTIBLE); | 3056 | set_current_state(TASK_INTERRUPTIBLE); |
3041 | /* sleep for 100 msecs, and try again. */ | 3057 | /* sleep for 100 msecs, and try again. */ |
3042 | schedule_timeout(HZ / 10); | 3058 | schedule_timeout(HZ / 10); |
3043 | } | 3059 | } |
3044 | 3060 | ||
3045 | /* Must be called with trace_types_lock mutex held. */ | 3061 | /* Must be called with trace_types_lock mutex held. */ |
3046 | static int tracing_wait_pipe(struct file *filp) | 3062 | static int tracing_wait_pipe(struct file *filp) |
3047 | { | 3063 | { |
3048 | struct trace_iterator *iter = filp->private_data; | 3064 | struct trace_iterator *iter = filp->private_data; |
3049 | 3065 | ||
3050 | while (trace_empty(iter)) { | 3066 | while (trace_empty(iter)) { |
3051 | 3067 | ||
3052 | if ((filp->f_flags & O_NONBLOCK)) { | 3068 | if ((filp->f_flags & O_NONBLOCK)) { |
3053 | return -EAGAIN; | 3069 | return -EAGAIN; |
3054 | } | 3070 | } |
3055 | 3071 | ||
3056 | mutex_unlock(&iter->mutex); | 3072 | mutex_unlock(&iter->mutex); |
3057 | 3073 | ||
3058 | iter->trace->wait_pipe(iter); | 3074 | iter->trace->wait_pipe(iter); |
3059 | 3075 | ||
3060 | mutex_lock(&iter->mutex); | 3076 | mutex_lock(&iter->mutex); |
3061 | 3077 | ||
3062 | if (signal_pending(current)) | 3078 | if (signal_pending(current)) |
3063 | return -EINTR; | 3079 | return -EINTR; |
3064 | 3080 | ||
3065 | /* | 3081 | /* |
3066 | * We block until we read something and tracing is disabled. | 3082 | * We block until we read something and tracing is disabled. |
3067 | * We still block if tracing is disabled, but we have never | 3083 | * We still block if tracing is disabled, but we have never |
3068 | * read anything. This allows a user to cat this file, and | 3084 | * read anything. This allows a user to cat this file, and |
3069 | * then enable tracing. But after we have read something, | 3085 | * then enable tracing. But after we have read something, |
3070 | * we give an EOF when tracing is again disabled. | 3086 | * we give an EOF when tracing is again disabled. |
3071 | * | 3087 | * |
3072 | * iter->pos will be 0 if we haven't read anything. | 3088 | * iter->pos will be 0 if we haven't read anything. |
3073 | */ | 3089 | */ |
3074 | if (!tracer_enabled && iter->pos) | 3090 | if (!tracer_enabled && iter->pos) |
3075 | break; | 3091 | break; |
3076 | } | 3092 | } |
3077 | 3093 | ||
3078 | return 1; | 3094 | return 1; |
3079 | } | 3095 | } |
3080 | 3096 | ||
3081 | /* | 3097 | /* |
3082 | * Consumer reader. | 3098 | * Consumer reader. |
3083 | */ | 3099 | */ |
3084 | static ssize_t | 3100 | static ssize_t |
3085 | tracing_read_pipe(struct file *filp, char __user *ubuf, | 3101 | tracing_read_pipe(struct file *filp, char __user *ubuf, |
3086 | size_t cnt, loff_t *ppos) | 3102 | size_t cnt, loff_t *ppos) |
3087 | { | 3103 | { |
3088 | struct trace_iterator *iter = filp->private_data; | 3104 | struct trace_iterator *iter = filp->private_data; |
3089 | static struct tracer *old_tracer; | 3105 | static struct tracer *old_tracer; |
3090 | ssize_t sret; | 3106 | ssize_t sret; |
3091 | 3107 | ||
3092 | /* return any leftover data */ | 3108 | /* return any leftover data */ |
3093 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); | 3109 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); |
3094 | if (sret != -EBUSY) | 3110 | if (sret != -EBUSY) |
3095 | return sret; | 3111 | return sret; |
3096 | 3112 | ||
3097 | trace_seq_init(&iter->seq); | 3113 | trace_seq_init(&iter->seq); |
3098 | 3114 | ||
3099 | /* copy the tracer to avoid using a global lock all around */ | 3115 | /* copy the tracer to avoid using a global lock all around */ |
3100 | mutex_lock(&trace_types_lock); | 3116 | mutex_lock(&trace_types_lock); |
3101 | if (unlikely(old_tracer != current_trace && current_trace)) { | 3117 | if (unlikely(old_tracer != current_trace && current_trace)) { |
3102 | old_tracer = current_trace; | 3118 | old_tracer = current_trace; |
3103 | *iter->trace = *current_trace; | 3119 | *iter->trace = *current_trace; |
3104 | } | 3120 | } |
3105 | mutex_unlock(&trace_types_lock); | 3121 | mutex_unlock(&trace_types_lock); |
3106 | 3122 | ||
3107 | /* | 3123 | /* |
3108 | * Avoid more than one consumer on a single file descriptor | 3124 | * Avoid more than one consumer on a single file descriptor |
3109 | * This is just a matter of traces coherency, the ring buffer itself | 3125 | * This is just a matter of traces coherency, the ring buffer itself |
3110 | * is protected. | 3126 | * is protected. |
3111 | */ | 3127 | */ |
3112 | mutex_lock(&iter->mutex); | 3128 | mutex_lock(&iter->mutex); |
3113 | if (iter->trace->read) { | 3129 | if (iter->trace->read) { |
3114 | sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); | 3130 | sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); |
3115 | if (sret) | 3131 | if (sret) |
3116 | goto out; | 3132 | goto out; |
3117 | } | 3133 | } |
3118 | 3134 | ||
3119 | waitagain: | 3135 | waitagain: |
3120 | sret = tracing_wait_pipe(filp); | 3136 | sret = tracing_wait_pipe(filp); |
3121 | if (sret <= 0) | 3137 | if (sret <= 0) |
3122 | goto out; | 3138 | goto out; |
3123 | 3139 | ||
3124 | /* stop when tracing is finished */ | 3140 | /* stop when tracing is finished */ |
3125 | if (trace_empty(iter)) { | 3141 | if (trace_empty(iter)) { |
3126 | sret = 0; | 3142 | sret = 0; |
3127 | goto out; | 3143 | goto out; |
3128 | } | 3144 | } |
3129 | 3145 | ||
3130 | if (cnt >= PAGE_SIZE) | 3146 | if (cnt >= PAGE_SIZE) |
3131 | cnt = PAGE_SIZE - 1; | 3147 | cnt = PAGE_SIZE - 1; |
3132 | 3148 | ||
3133 | /* reset all but tr, trace, and overruns */ | 3149 | /* reset all but tr, trace, and overruns */ |
3134 | memset(&iter->seq, 0, | 3150 | memset(&iter->seq, 0, |
3135 | sizeof(struct trace_iterator) - | 3151 | sizeof(struct trace_iterator) - |
3136 | offsetof(struct trace_iterator, seq)); | 3152 | offsetof(struct trace_iterator, seq)); |
3137 | iter->pos = -1; | 3153 | iter->pos = -1; |
3138 | 3154 | ||
3139 | trace_event_read_lock(); | 3155 | trace_event_read_lock(); |
3140 | trace_access_lock(iter->cpu_file); | 3156 | trace_access_lock(iter->cpu_file); |
3141 | while (find_next_entry_inc(iter) != NULL) { | 3157 | while (find_next_entry_inc(iter) != NULL) { |
3142 | enum print_line_t ret; | 3158 | enum print_line_t ret; |
3143 | int len = iter->seq.len; | 3159 | int len = iter->seq.len; |
3144 | 3160 | ||
3145 | ret = print_trace_line(iter); | 3161 | ret = print_trace_line(iter); |
3146 | if (ret == TRACE_TYPE_PARTIAL_LINE) { | 3162 | if (ret == TRACE_TYPE_PARTIAL_LINE) { |
3147 | /* don't print partial lines */ | 3163 | /* don't print partial lines */ |
3148 | iter->seq.len = len; | 3164 | iter->seq.len = len; |
3149 | break; | 3165 | break; |
3150 | } | 3166 | } |
3151 | if (ret != TRACE_TYPE_NO_CONSUME) | 3167 | if (ret != TRACE_TYPE_NO_CONSUME) |
3152 | trace_consume(iter); | 3168 | trace_consume(iter); |
3153 | 3169 | ||
3154 | if (iter->seq.len >= cnt) | 3170 | if (iter->seq.len >= cnt) |
3155 | break; | 3171 | break; |
3156 | } | 3172 | } |
3157 | trace_access_unlock(iter->cpu_file); | 3173 | trace_access_unlock(iter->cpu_file); |
3158 | trace_event_read_unlock(); | 3174 | trace_event_read_unlock(); |
3159 | 3175 | ||
3160 | /* Now copy what we have to the user */ | 3176 | /* Now copy what we have to the user */ |
3161 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); | 3177 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); |
3162 | if (iter->seq.readpos >= iter->seq.len) | 3178 | if (iter->seq.readpos >= iter->seq.len) |
3163 | trace_seq_init(&iter->seq); | 3179 | trace_seq_init(&iter->seq); |
3164 | 3180 | ||
3165 | /* | 3181 | /* |
3166 | * If there was nothing to send to user, inspite of consuming trace | 3182 | * If there was nothing to send to user, inspite of consuming trace |
3167 | * entries, go back to wait for more entries. | 3183 | * entries, go back to wait for more entries. |
3168 | */ | 3184 | */ |
3169 | if (sret == -EBUSY) | 3185 | if (sret == -EBUSY) |
3170 | goto waitagain; | 3186 | goto waitagain; |
3171 | 3187 | ||
3172 | out: | 3188 | out: |
3173 | mutex_unlock(&iter->mutex); | 3189 | mutex_unlock(&iter->mutex); |
3174 | 3190 | ||
3175 | return sret; | 3191 | return sret; |
3176 | } | 3192 | } |
3177 | 3193 | ||
3178 | static void tracing_pipe_buf_release(struct pipe_inode_info *pipe, | 3194 | static void tracing_pipe_buf_release(struct pipe_inode_info *pipe, |
3179 | struct pipe_buffer *buf) | 3195 | struct pipe_buffer *buf) |
3180 | { | 3196 | { |
3181 | __free_page(buf->page); | 3197 | __free_page(buf->page); |
3182 | } | 3198 | } |
3183 | 3199 | ||
3184 | static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, | 3200 | static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, |
3185 | unsigned int idx) | 3201 | unsigned int idx) |
3186 | { | 3202 | { |
3187 | __free_page(spd->pages[idx]); | 3203 | __free_page(spd->pages[idx]); |
3188 | } | 3204 | } |
3189 | 3205 | ||
3190 | static const struct pipe_buf_operations tracing_pipe_buf_ops = { | 3206 | static const struct pipe_buf_operations tracing_pipe_buf_ops = { |
3191 | .can_merge = 0, | 3207 | .can_merge = 0, |
3192 | .map = generic_pipe_buf_map, | 3208 | .map = generic_pipe_buf_map, |
3193 | .unmap = generic_pipe_buf_unmap, | 3209 | .unmap = generic_pipe_buf_unmap, |
3194 | .confirm = generic_pipe_buf_confirm, | 3210 | .confirm = generic_pipe_buf_confirm, |
3195 | .release = tracing_pipe_buf_release, | 3211 | .release = tracing_pipe_buf_release, |
3196 | .steal = generic_pipe_buf_steal, | 3212 | .steal = generic_pipe_buf_steal, |
3197 | .get = generic_pipe_buf_get, | 3213 | .get = generic_pipe_buf_get, |
3198 | }; | 3214 | }; |
3199 | 3215 | ||
3200 | static size_t | 3216 | static size_t |
3201 | tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) | 3217 | tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) |
3202 | { | 3218 | { |
3203 | size_t count; | 3219 | size_t count; |
3204 | int ret; | 3220 | int ret; |
3205 | 3221 | ||
3206 | /* Seq buffer is page-sized, exactly what we need. */ | 3222 | /* Seq buffer is page-sized, exactly what we need. */ |
3207 | for (;;) { | 3223 | for (;;) { |
3208 | count = iter->seq.len; | 3224 | count = iter->seq.len; |
3209 | ret = print_trace_line(iter); | 3225 | ret = print_trace_line(iter); |
3210 | count = iter->seq.len - count; | 3226 | count = iter->seq.len - count; |
3211 | if (rem < count) { | 3227 | if (rem < count) { |
3212 | rem = 0; | 3228 | rem = 0; |
3213 | iter->seq.len -= count; | 3229 | iter->seq.len -= count; |
3214 | break; | 3230 | break; |
3215 | } | 3231 | } |
3216 | if (ret == TRACE_TYPE_PARTIAL_LINE) { | 3232 | if (ret == TRACE_TYPE_PARTIAL_LINE) { |
3217 | iter->seq.len -= count; | 3233 | iter->seq.len -= count; |
3218 | break; | 3234 | break; |
3219 | } | 3235 | } |
3220 | 3236 | ||
3221 | if (ret != TRACE_TYPE_NO_CONSUME) | 3237 | if (ret != TRACE_TYPE_NO_CONSUME) |
3222 | trace_consume(iter); | 3238 | trace_consume(iter); |
3223 | rem -= count; | 3239 | rem -= count; |
3224 | if (!find_next_entry_inc(iter)) { | 3240 | if (!find_next_entry_inc(iter)) { |
3225 | rem = 0; | 3241 | rem = 0; |
3226 | iter->ent = NULL; | 3242 | iter->ent = NULL; |
3227 | break; | 3243 | break; |
3228 | } | 3244 | } |
3229 | } | 3245 | } |
3230 | 3246 | ||
3231 | return rem; | 3247 | return rem; |
3232 | } | 3248 | } |
3233 | 3249 | ||
3234 | static ssize_t tracing_splice_read_pipe(struct file *filp, | 3250 | static ssize_t tracing_splice_read_pipe(struct file *filp, |
3235 | loff_t *ppos, | 3251 | loff_t *ppos, |
3236 | struct pipe_inode_info *pipe, | 3252 | struct pipe_inode_info *pipe, |
3237 | size_t len, | 3253 | size_t len, |
3238 | unsigned int flags) | 3254 | unsigned int flags) |
3239 | { | 3255 | { |
3240 | struct page *pages[PIPE_BUFFERS]; | 3256 | struct page *pages[PIPE_BUFFERS]; |
3241 | struct partial_page partial[PIPE_BUFFERS]; | 3257 | struct partial_page partial[PIPE_BUFFERS]; |
3242 | struct trace_iterator *iter = filp->private_data; | 3258 | struct trace_iterator *iter = filp->private_data; |
3243 | struct splice_pipe_desc spd = { | 3259 | struct splice_pipe_desc spd = { |
3244 | .pages = pages, | 3260 | .pages = pages, |
3245 | .partial = partial, | 3261 | .partial = partial, |
3246 | .nr_pages = 0, /* This gets updated below. */ | 3262 | .nr_pages = 0, /* This gets updated below. */ |
3247 | .flags = flags, | 3263 | .flags = flags, |
3248 | .ops = &tracing_pipe_buf_ops, | 3264 | .ops = &tracing_pipe_buf_ops, |
3249 | .spd_release = tracing_spd_release_pipe, | 3265 | .spd_release = tracing_spd_release_pipe, |
3250 | }; | 3266 | }; |
3251 | static struct tracer *old_tracer; | 3267 | static struct tracer *old_tracer; |
3252 | ssize_t ret; | 3268 | ssize_t ret; |
3253 | size_t rem; | 3269 | size_t rem; |
3254 | unsigned int i; | 3270 | unsigned int i; |
3255 | 3271 | ||
3256 | /* copy the tracer to avoid using a global lock all around */ | 3272 | /* copy the tracer to avoid using a global lock all around */ |
3257 | mutex_lock(&trace_types_lock); | 3273 | mutex_lock(&trace_types_lock); |
3258 | if (unlikely(old_tracer != current_trace && current_trace)) { | 3274 | if (unlikely(old_tracer != current_trace && current_trace)) { |
3259 | old_tracer = current_trace; | 3275 | old_tracer = current_trace; |
3260 | *iter->trace = *current_trace; | 3276 | *iter->trace = *current_trace; |
3261 | } | 3277 | } |
3262 | mutex_unlock(&trace_types_lock); | 3278 | mutex_unlock(&trace_types_lock); |
3263 | 3279 | ||
3264 | mutex_lock(&iter->mutex); | 3280 | mutex_lock(&iter->mutex); |
3265 | 3281 | ||
3266 | if (iter->trace->splice_read) { | 3282 | if (iter->trace->splice_read) { |
3267 | ret = iter->trace->splice_read(iter, filp, | 3283 | ret = iter->trace->splice_read(iter, filp, |
3268 | ppos, pipe, len, flags); | 3284 | ppos, pipe, len, flags); |
3269 | if (ret) | 3285 | if (ret) |
3270 | goto out_err; | 3286 | goto out_err; |
3271 | } | 3287 | } |
3272 | 3288 | ||
3273 | ret = tracing_wait_pipe(filp); | 3289 | ret = tracing_wait_pipe(filp); |
3274 | if (ret <= 0) | 3290 | if (ret <= 0) |
3275 | goto out_err; | 3291 | goto out_err; |
3276 | 3292 | ||
3277 | if (!iter->ent && !find_next_entry_inc(iter)) { | 3293 | if (!iter->ent && !find_next_entry_inc(iter)) { |
3278 | ret = -EFAULT; | 3294 | ret = -EFAULT; |
3279 | goto out_err; | 3295 | goto out_err; |
3280 | } | 3296 | } |
3281 | 3297 | ||
3282 | trace_event_read_lock(); | 3298 | trace_event_read_lock(); |
3283 | trace_access_lock(iter->cpu_file); | 3299 | trace_access_lock(iter->cpu_file); |
3284 | 3300 | ||
3285 | /* Fill as many pages as possible. */ | 3301 | /* Fill as many pages as possible. */ |
3286 | for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) { | 3302 | for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) { |
3287 | pages[i] = alloc_page(GFP_KERNEL); | 3303 | pages[i] = alloc_page(GFP_KERNEL); |
3288 | if (!pages[i]) | 3304 | if (!pages[i]) |
3289 | break; | 3305 | break; |
3290 | 3306 | ||
3291 | rem = tracing_fill_pipe_page(rem, iter); | 3307 | rem = tracing_fill_pipe_page(rem, iter); |
3292 | 3308 | ||
3293 | /* Copy the data into the page, so we can start over. */ | 3309 | /* Copy the data into the page, so we can start over. */ |
3294 | ret = trace_seq_to_buffer(&iter->seq, | 3310 | ret = trace_seq_to_buffer(&iter->seq, |
3295 | page_address(pages[i]), | 3311 | page_address(pages[i]), |
3296 | iter->seq.len); | 3312 | iter->seq.len); |
3297 | if (ret < 0) { | 3313 | if (ret < 0) { |
3298 | __free_page(pages[i]); | 3314 | __free_page(pages[i]); |
3299 | break; | 3315 | break; |
3300 | } | 3316 | } |
3301 | partial[i].offset = 0; | 3317 | partial[i].offset = 0; |
3302 | partial[i].len = iter->seq.len; | 3318 | partial[i].len = iter->seq.len; |
3303 | 3319 | ||
3304 | trace_seq_init(&iter->seq); | 3320 | trace_seq_init(&iter->seq); |
3305 | } | 3321 | } |
3306 | 3322 | ||
3307 | trace_access_unlock(iter->cpu_file); | 3323 | trace_access_unlock(iter->cpu_file); |
3308 | trace_event_read_unlock(); | 3324 | trace_event_read_unlock(); |
3309 | mutex_unlock(&iter->mutex); | 3325 | mutex_unlock(&iter->mutex); |
3310 | 3326 | ||
3311 | spd.nr_pages = i; | 3327 | spd.nr_pages = i; |
3312 | 3328 | ||
3313 | return splice_to_pipe(pipe, &spd); | 3329 | return splice_to_pipe(pipe, &spd); |
3314 | 3330 | ||
3315 | out_err: | 3331 | out_err: |
3316 | mutex_unlock(&iter->mutex); | 3332 | mutex_unlock(&iter->mutex); |
3317 | 3333 | ||
3318 | return ret; | 3334 | return ret; |
3319 | } | 3335 | } |
3320 | 3336 | ||
3321 | static ssize_t | 3337 | static ssize_t |
3322 | tracing_entries_read(struct file *filp, char __user *ubuf, | 3338 | tracing_entries_read(struct file *filp, char __user *ubuf, |
3323 | size_t cnt, loff_t *ppos) | 3339 | size_t cnt, loff_t *ppos) |
3324 | { | 3340 | { |
3325 | struct trace_array *tr = filp->private_data; | 3341 | struct trace_array *tr = filp->private_data; |
3326 | char buf[96]; | 3342 | char buf[96]; |
3327 | int r; | 3343 | int r; |
3328 | 3344 | ||
3329 | mutex_lock(&trace_types_lock); | 3345 | mutex_lock(&trace_types_lock); |
3330 | if (!ring_buffer_expanded) | 3346 | if (!ring_buffer_expanded) |
3331 | r = sprintf(buf, "%lu (expanded: %lu)\n", | 3347 | r = sprintf(buf, "%lu (expanded: %lu)\n", |
3332 | tr->entries >> 10, | 3348 | tr->entries >> 10, |
3333 | trace_buf_size >> 10); | 3349 | trace_buf_size >> 10); |
3334 | else | 3350 | else |
3335 | r = sprintf(buf, "%lu\n", tr->entries >> 10); | 3351 | r = sprintf(buf, "%lu\n", tr->entries >> 10); |
3336 | mutex_unlock(&trace_types_lock); | 3352 | mutex_unlock(&trace_types_lock); |
3337 | 3353 | ||
3338 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 3354 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
3339 | } | 3355 | } |
3340 | 3356 | ||
3341 | static ssize_t | 3357 | static ssize_t |
3342 | tracing_entries_write(struct file *filp, const char __user *ubuf, | 3358 | tracing_entries_write(struct file *filp, const char __user *ubuf, |
3343 | size_t cnt, loff_t *ppos) | 3359 | size_t cnt, loff_t *ppos) |
3344 | { | 3360 | { |
3345 | unsigned long val; | 3361 | unsigned long val; |
3346 | char buf[64]; | 3362 | char buf[64]; |
3347 | int ret, cpu; | 3363 | int ret, cpu; |
3348 | 3364 | ||
3349 | if (cnt >= sizeof(buf)) | 3365 | if (cnt >= sizeof(buf)) |
3350 | return -EINVAL; | 3366 | return -EINVAL; |
3351 | 3367 | ||
3352 | if (copy_from_user(&buf, ubuf, cnt)) | 3368 | if (copy_from_user(&buf, ubuf, cnt)) |
3353 | return -EFAULT; | 3369 | return -EFAULT; |
3354 | 3370 | ||
3355 | buf[cnt] = 0; | 3371 | buf[cnt] = 0; |
3356 | 3372 | ||
3357 | ret = strict_strtoul(buf, 10, &val); | 3373 | ret = strict_strtoul(buf, 10, &val); |
3358 | if (ret < 0) | 3374 | if (ret < 0) |
3359 | return ret; | 3375 | return ret; |
3360 | 3376 | ||
3361 | /* must have at least 1 entry */ | 3377 | /* must have at least 1 entry */ |
3362 | if (!val) | 3378 | if (!val) |
3363 | return -EINVAL; | 3379 | return -EINVAL; |
3364 | 3380 | ||
3365 | mutex_lock(&trace_types_lock); | 3381 | mutex_lock(&trace_types_lock); |
3366 | 3382 | ||
3367 | tracing_stop(); | 3383 | tracing_stop(); |
3368 | 3384 | ||
3369 | /* disable all cpu buffers */ | 3385 | /* disable all cpu buffers */ |
3370 | for_each_tracing_cpu(cpu) { | 3386 | for_each_tracing_cpu(cpu) { |
3371 | if (global_trace.data[cpu]) | 3387 | if (global_trace.data[cpu]) |
3372 | atomic_inc(&global_trace.data[cpu]->disabled); | 3388 | atomic_inc(&global_trace.data[cpu]->disabled); |
3373 | if (max_tr.data[cpu]) | 3389 | if (max_tr.data[cpu]) |
3374 | atomic_inc(&max_tr.data[cpu]->disabled); | 3390 | atomic_inc(&max_tr.data[cpu]->disabled); |
3375 | } | 3391 | } |
3376 | 3392 | ||
3377 | /* value is in KB */ | 3393 | /* value is in KB */ |
3378 | val <<= 10; | 3394 | val <<= 10; |
3379 | 3395 | ||
3380 | if (val != global_trace.entries) { | 3396 | if (val != global_trace.entries) { |
3381 | ret = tracing_resize_ring_buffer(val); | 3397 | ret = tracing_resize_ring_buffer(val); |
3382 | if (ret < 0) { | 3398 | if (ret < 0) { |
3383 | cnt = ret; | 3399 | cnt = ret; |
3384 | goto out; | 3400 | goto out; |
3385 | } | 3401 | } |
3386 | } | 3402 | } |
3387 | 3403 | ||
3388 | *ppos += cnt; | 3404 | *ppos += cnt; |
3389 | 3405 | ||
3390 | /* If check pages failed, return ENOMEM */ | 3406 | /* If check pages failed, return ENOMEM */ |
3391 | if (tracing_disabled) | 3407 | if (tracing_disabled) |
3392 | cnt = -ENOMEM; | 3408 | cnt = -ENOMEM; |
3393 | out: | 3409 | out: |
3394 | for_each_tracing_cpu(cpu) { | 3410 | for_each_tracing_cpu(cpu) { |
3395 | if (global_trace.data[cpu]) | 3411 | if (global_trace.data[cpu]) |
3396 | atomic_dec(&global_trace.data[cpu]->disabled); | 3412 | atomic_dec(&global_trace.data[cpu]->disabled); |
3397 | if (max_tr.data[cpu]) | 3413 | if (max_tr.data[cpu]) |
3398 | atomic_dec(&max_tr.data[cpu]->disabled); | 3414 | atomic_dec(&max_tr.data[cpu]->disabled); |
3399 | } | 3415 | } |
3400 | 3416 | ||
3401 | tracing_start(); | 3417 | tracing_start(); |
3402 | max_tr.entries = global_trace.entries; | 3418 | max_tr.entries = global_trace.entries; |
3403 | mutex_unlock(&trace_types_lock); | 3419 | mutex_unlock(&trace_types_lock); |
3404 | 3420 | ||
3405 | return cnt; | 3421 | return cnt; |
3406 | } | 3422 | } |
3407 | 3423 | ||
3408 | static int mark_printk(const char *fmt, ...) | 3424 | static int mark_printk(const char *fmt, ...) |
3409 | { | 3425 | { |
3410 | int ret; | 3426 | int ret; |
3411 | va_list args; | 3427 | va_list args; |
3412 | va_start(args, fmt); | 3428 | va_start(args, fmt); |
3413 | ret = trace_vprintk(0, fmt, args); | 3429 | ret = trace_vprintk(0, fmt, args); |
3414 | va_end(args); | 3430 | va_end(args); |
3415 | return ret; | 3431 | return ret; |
3416 | } | 3432 | } |
3417 | 3433 | ||
3418 | static ssize_t | 3434 | static ssize_t |
3419 | tracing_mark_write(struct file *filp, const char __user *ubuf, | 3435 | tracing_mark_write(struct file *filp, const char __user *ubuf, |
3420 | size_t cnt, loff_t *fpos) | 3436 | size_t cnt, loff_t *fpos) |
3421 | { | 3437 | { |
3422 | char *buf; | 3438 | char *buf; |
3423 | 3439 | ||
3424 | if (tracing_disabled) | 3440 | if (tracing_disabled) |
3425 | return -EINVAL; | 3441 | return -EINVAL; |
3426 | 3442 | ||
3427 | if (cnt > TRACE_BUF_SIZE) | 3443 | if (cnt > TRACE_BUF_SIZE) |
3428 | cnt = TRACE_BUF_SIZE; | 3444 | cnt = TRACE_BUF_SIZE; |
3429 | 3445 | ||
3430 | buf = kmalloc(cnt + 2, GFP_KERNEL); | 3446 | buf = kmalloc(cnt + 2, GFP_KERNEL); |
3431 | if (buf == NULL) | 3447 | if (buf == NULL) |
3432 | return -ENOMEM; | 3448 | return -ENOMEM; |
3433 | 3449 | ||
3434 | if (copy_from_user(buf, ubuf, cnt)) { | 3450 | if (copy_from_user(buf, ubuf, cnt)) { |
3435 | kfree(buf); | 3451 | kfree(buf); |
3436 | return -EFAULT; | 3452 | return -EFAULT; |
3437 | } | 3453 | } |
3438 | if (buf[cnt-1] != '\n') { | 3454 | if (buf[cnt-1] != '\n') { |
3439 | buf[cnt] = '\n'; | 3455 | buf[cnt] = '\n'; |
3440 | buf[cnt+1] = '\0'; | 3456 | buf[cnt+1] = '\0'; |
3441 | } else | 3457 | } else |
3442 | buf[cnt] = '\0'; | 3458 | buf[cnt] = '\0'; |
3443 | 3459 | ||
3444 | cnt = mark_printk("%s", buf); | 3460 | cnt = mark_printk("%s", buf); |
3445 | kfree(buf); | 3461 | kfree(buf); |
3446 | *fpos += cnt; | 3462 | *fpos += cnt; |
3447 | 3463 | ||
3448 | return cnt; | 3464 | return cnt; |
3449 | } | 3465 | } |
3450 | 3466 | ||
3451 | static int tracing_clock_show(struct seq_file *m, void *v) | 3467 | static int tracing_clock_show(struct seq_file *m, void *v) |
3452 | { | 3468 | { |
3453 | int i; | 3469 | int i; |
3454 | 3470 | ||
3455 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) | 3471 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) |
3456 | seq_printf(m, | 3472 | seq_printf(m, |
3457 | "%s%s%s%s", i ? " " : "", | 3473 | "%s%s%s%s", i ? " " : "", |
3458 | i == trace_clock_id ? "[" : "", trace_clocks[i].name, | 3474 | i == trace_clock_id ? "[" : "", trace_clocks[i].name, |
3459 | i == trace_clock_id ? "]" : ""); | 3475 | i == trace_clock_id ? "]" : ""); |
3460 | seq_putc(m, '\n'); | 3476 | seq_putc(m, '\n'); |
3461 | 3477 | ||
3462 | return 0; | 3478 | return 0; |
3463 | } | 3479 | } |
3464 | 3480 | ||
3465 | static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | 3481 | static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, |
3466 | size_t cnt, loff_t *fpos) | 3482 | size_t cnt, loff_t *fpos) |
3467 | { | 3483 | { |
3468 | char buf[64]; | 3484 | char buf[64]; |
3469 | const char *clockstr; | 3485 | const char *clockstr; |
3470 | int i; | 3486 | int i; |
3471 | 3487 | ||
3472 | if (cnt >= sizeof(buf)) | 3488 | if (cnt >= sizeof(buf)) |
3473 | return -EINVAL; | 3489 | return -EINVAL; |
3474 | 3490 | ||
3475 | if (copy_from_user(&buf, ubuf, cnt)) | 3491 | if (copy_from_user(&buf, ubuf, cnt)) |
3476 | return -EFAULT; | 3492 | return -EFAULT; |
3477 | 3493 | ||
3478 | buf[cnt] = 0; | 3494 | buf[cnt] = 0; |
3479 | 3495 | ||
3480 | clockstr = strstrip(buf); | 3496 | clockstr = strstrip(buf); |
3481 | 3497 | ||
3482 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { | 3498 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { |
3483 | if (strcmp(trace_clocks[i].name, clockstr) == 0) | 3499 | if (strcmp(trace_clocks[i].name, clockstr) == 0) |
3484 | break; | 3500 | break; |
3485 | } | 3501 | } |
3486 | if (i == ARRAY_SIZE(trace_clocks)) | 3502 | if (i == ARRAY_SIZE(trace_clocks)) |
3487 | return -EINVAL; | 3503 | return -EINVAL; |
3488 | 3504 | ||
3489 | trace_clock_id = i; | 3505 | trace_clock_id = i; |
3490 | 3506 | ||
3491 | mutex_lock(&trace_types_lock); | 3507 | mutex_lock(&trace_types_lock); |
3492 | 3508 | ||
3493 | ring_buffer_set_clock(global_trace.buffer, trace_clocks[i].func); | 3509 | ring_buffer_set_clock(global_trace.buffer, trace_clocks[i].func); |
3494 | if (max_tr.buffer) | 3510 | if (max_tr.buffer) |
3495 | ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func); | 3511 | ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func); |
3496 | 3512 | ||
3497 | mutex_unlock(&trace_types_lock); | 3513 | mutex_unlock(&trace_types_lock); |
3498 | 3514 | ||
3499 | *fpos += cnt; | 3515 | *fpos += cnt; |
3500 | 3516 | ||
3501 | return cnt; | 3517 | return cnt; |
3502 | } | 3518 | } |
3503 | 3519 | ||
3504 | static int tracing_clock_open(struct inode *inode, struct file *file) | 3520 | static int tracing_clock_open(struct inode *inode, struct file *file) |
3505 | { | 3521 | { |
3506 | if (tracing_disabled) | 3522 | if (tracing_disabled) |
3507 | return -ENODEV; | 3523 | return -ENODEV; |
3508 | return single_open(file, tracing_clock_show, NULL); | 3524 | return single_open(file, tracing_clock_show, NULL); |
3509 | } | 3525 | } |
3510 | 3526 | ||
3511 | static const struct file_operations tracing_max_lat_fops = { | 3527 | static const struct file_operations tracing_max_lat_fops = { |
3512 | .open = tracing_open_generic, | 3528 | .open = tracing_open_generic, |
3513 | .read = tracing_max_lat_read, | 3529 | .read = tracing_max_lat_read, |
3514 | .write = tracing_max_lat_write, | 3530 | .write = tracing_max_lat_write, |
3515 | }; | 3531 | }; |
3516 | 3532 | ||
3517 | static const struct file_operations tracing_ctrl_fops = { | 3533 | static const struct file_operations tracing_ctrl_fops = { |
3518 | .open = tracing_open_generic, | 3534 | .open = tracing_open_generic, |
3519 | .read = tracing_ctrl_read, | 3535 | .read = tracing_ctrl_read, |
3520 | .write = tracing_ctrl_write, | 3536 | .write = tracing_ctrl_write, |
3521 | }; | 3537 | }; |
3522 | 3538 | ||
3523 | static const struct file_operations set_tracer_fops = { | 3539 | static const struct file_operations set_tracer_fops = { |
3524 | .open = tracing_open_generic, | 3540 | .open = tracing_open_generic, |
3525 | .read = tracing_set_trace_read, | 3541 | .read = tracing_set_trace_read, |
3526 | .write = tracing_set_trace_write, | 3542 | .write = tracing_set_trace_write, |
3527 | }; | 3543 | }; |
3528 | 3544 | ||
3529 | static const struct file_operations tracing_pipe_fops = { | 3545 | static const struct file_operations tracing_pipe_fops = { |
3530 | .open = tracing_open_pipe, | 3546 | .open = tracing_open_pipe, |
3531 | .poll = tracing_poll_pipe, | 3547 | .poll = tracing_poll_pipe, |
3532 | .read = tracing_read_pipe, | 3548 | .read = tracing_read_pipe, |
3533 | .splice_read = tracing_splice_read_pipe, | 3549 | .splice_read = tracing_splice_read_pipe, |
3534 | .release = tracing_release_pipe, | 3550 | .release = tracing_release_pipe, |
3535 | }; | 3551 | }; |
3536 | 3552 | ||
3537 | static const struct file_operations tracing_entries_fops = { | 3553 | static const struct file_operations tracing_entries_fops = { |
3538 | .open = tracing_open_generic, | 3554 | .open = tracing_open_generic, |
3539 | .read = tracing_entries_read, | 3555 | .read = tracing_entries_read, |
3540 | .write = tracing_entries_write, | 3556 | .write = tracing_entries_write, |
3541 | }; | 3557 | }; |
3542 | 3558 | ||
3543 | static const struct file_operations tracing_mark_fops = { | 3559 | static const struct file_operations tracing_mark_fops = { |
3544 | .open = tracing_open_generic, | 3560 | .open = tracing_open_generic, |
3545 | .write = tracing_mark_write, | 3561 | .write = tracing_mark_write, |
3546 | }; | 3562 | }; |
3547 | 3563 | ||
3548 | static const struct file_operations trace_clock_fops = { | 3564 | static const struct file_operations trace_clock_fops = { |
3549 | .open = tracing_clock_open, | 3565 | .open = tracing_clock_open, |
3550 | .read = seq_read, | 3566 | .read = seq_read, |
3551 | .llseek = seq_lseek, | 3567 | .llseek = seq_lseek, |
3552 | .release = single_release, | 3568 | .release = single_release, |
3553 | .write = tracing_clock_write, | 3569 | .write = tracing_clock_write, |
3554 | }; | 3570 | }; |
3555 | 3571 | ||
3556 | struct ftrace_buffer_info { | 3572 | struct ftrace_buffer_info { |
3557 | struct trace_array *tr; | 3573 | struct trace_array *tr; |
3558 | void *spare; | 3574 | void *spare; |
3559 | int cpu; | 3575 | int cpu; |
3560 | unsigned int read; | 3576 | unsigned int read; |
3561 | }; | 3577 | }; |
3562 | 3578 | ||
3563 | static int tracing_buffers_open(struct inode *inode, struct file *filp) | 3579 | static int tracing_buffers_open(struct inode *inode, struct file *filp) |
3564 | { | 3580 | { |
3565 | int cpu = (int)(long)inode->i_private; | 3581 | int cpu = (int)(long)inode->i_private; |
3566 | struct ftrace_buffer_info *info; | 3582 | struct ftrace_buffer_info *info; |
3567 | 3583 | ||
3568 | if (tracing_disabled) | 3584 | if (tracing_disabled) |
3569 | return -ENODEV; | 3585 | return -ENODEV; |
3570 | 3586 | ||
3571 | info = kzalloc(sizeof(*info), GFP_KERNEL); | 3587 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
3572 | if (!info) | 3588 | if (!info) |
3573 | return -ENOMEM; | 3589 | return -ENOMEM; |
3574 | 3590 | ||
3575 | info->tr = &global_trace; | 3591 | info->tr = &global_trace; |
3576 | info->cpu = cpu; | 3592 | info->cpu = cpu; |
3577 | info->spare = NULL; | 3593 | info->spare = NULL; |
3578 | /* Force reading ring buffer for first read */ | 3594 | /* Force reading ring buffer for first read */ |
3579 | info->read = (unsigned int)-1; | 3595 | info->read = (unsigned int)-1; |
3580 | 3596 | ||
3581 | filp->private_data = info; | 3597 | filp->private_data = info; |
3582 | 3598 | ||
3583 | return nonseekable_open(inode, filp); | 3599 | return nonseekable_open(inode, filp); |
3584 | } | 3600 | } |
3585 | 3601 | ||
3586 | static ssize_t | 3602 | static ssize_t |
3587 | tracing_buffers_read(struct file *filp, char __user *ubuf, | 3603 | tracing_buffers_read(struct file *filp, char __user *ubuf, |
3588 | size_t count, loff_t *ppos) | 3604 | size_t count, loff_t *ppos) |
3589 | { | 3605 | { |
3590 | struct ftrace_buffer_info *info = filp->private_data; | 3606 | struct ftrace_buffer_info *info = filp->private_data; |
3591 | unsigned int pos; | 3607 | unsigned int pos; |
3592 | ssize_t ret; | 3608 | ssize_t ret; |
3593 | size_t size; | 3609 | size_t size; |
3594 | 3610 | ||
3595 | if (!count) | 3611 | if (!count) |
3596 | return 0; | 3612 | return 0; |
3597 | 3613 | ||
3598 | if (!info->spare) | 3614 | if (!info->spare) |
3599 | info->spare = ring_buffer_alloc_read_page(info->tr->buffer); | 3615 | info->spare = ring_buffer_alloc_read_page(info->tr->buffer); |
3600 | if (!info->spare) | 3616 | if (!info->spare) |
3601 | return -ENOMEM; | 3617 | return -ENOMEM; |
3602 | 3618 | ||
3603 | /* Do we have previous read data to read? */ | 3619 | /* Do we have previous read data to read? */ |
3604 | if (info->read < PAGE_SIZE) | 3620 | if (info->read < PAGE_SIZE) |
3605 | goto read; | 3621 | goto read; |
3606 | 3622 | ||
3607 | info->read = 0; | 3623 | info->read = 0; |
3608 | 3624 | ||
3609 | trace_access_lock(info->cpu); | 3625 | trace_access_lock(info->cpu); |
3610 | ret = ring_buffer_read_page(info->tr->buffer, | 3626 | ret = ring_buffer_read_page(info->tr->buffer, |
3611 | &info->spare, | 3627 | &info->spare, |
3612 | count, | 3628 | count, |
3613 | info->cpu, 0); | 3629 | info->cpu, 0); |
3614 | trace_access_unlock(info->cpu); | 3630 | trace_access_unlock(info->cpu); |
3615 | if (ret < 0) | 3631 | if (ret < 0) |
3616 | return 0; | 3632 | return 0; |
3617 | 3633 | ||
3618 | pos = ring_buffer_page_len(info->spare); | 3634 | pos = ring_buffer_page_len(info->spare); |
3619 | 3635 | ||
3620 | if (pos < PAGE_SIZE) | 3636 | if (pos < PAGE_SIZE) |
3621 | memset(info->spare + pos, 0, PAGE_SIZE - pos); | 3637 | memset(info->spare + pos, 0, PAGE_SIZE - pos); |
3622 | 3638 | ||
3623 | read: | 3639 | read: |
3624 | size = PAGE_SIZE - info->read; | 3640 | size = PAGE_SIZE - info->read; |
3625 | if (size > count) | 3641 | if (size > count) |
3626 | size = count; | 3642 | size = count; |
3627 | 3643 | ||
3628 | ret = copy_to_user(ubuf, info->spare + info->read, size); | 3644 | ret = copy_to_user(ubuf, info->spare + info->read, size); |
3629 | if (ret == size) | 3645 | if (ret == size) |
3630 | return -EFAULT; | 3646 | return -EFAULT; |
3631 | size -= ret; | 3647 | size -= ret; |
3632 | 3648 | ||
3633 | *ppos += size; | 3649 | *ppos += size; |
3634 | info->read += size; | 3650 | info->read += size; |
3635 | 3651 | ||
3636 | return size; | 3652 | return size; |
3637 | } | 3653 | } |
3638 | 3654 | ||
3639 | static int tracing_buffers_release(struct inode *inode, struct file *file) | 3655 | static int tracing_buffers_release(struct inode *inode, struct file *file) |
3640 | { | 3656 | { |
3641 | struct ftrace_buffer_info *info = file->private_data; | 3657 | struct ftrace_buffer_info *info = file->private_data; |
3642 | 3658 | ||
3643 | if (info->spare) | 3659 | if (info->spare) |
3644 | ring_buffer_free_read_page(info->tr->buffer, info->spare); | 3660 | ring_buffer_free_read_page(info->tr->buffer, info->spare); |
3645 | kfree(info); | 3661 | kfree(info); |
3646 | 3662 | ||
3647 | return 0; | 3663 | return 0; |
3648 | } | 3664 | } |
3649 | 3665 | ||
3650 | struct buffer_ref { | 3666 | struct buffer_ref { |
3651 | struct ring_buffer *buffer; | 3667 | struct ring_buffer *buffer; |
3652 | void *page; | 3668 | void *page; |
3653 | int ref; | 3669 | int ref; |
3654 | }; | 3670 | }; |
3655 | 3671 | ||
3656 | static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, | 3672 | static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, |
3657 | struct pipe_buffer *buf) | 3673 | struct pipe_buffer *buf) |
3658 | { | 3674 | { |
3659 | struct buffer_ref *ref = (struct buffer_ref *)buf->private; | 3675 | struct buffer_ref *ref = (struct buffer_ref *)buf->private; |
3660 | 3676 | ||
3661 | if (--ref->ref) | 3677 | if (--ref->ref) |
3662 | return; | 3678 | return; |
3663 | 3679 | ||
3664 | ring_buffer_free_read_page(ref->buffer, ref->page); | 3680 | ring_buffer_free_read_page(ref->buffer, ref->page); |
3665 | kfree(ref); | 3681 | kfree(ref); |
3666 | buf->private = 0; | 3682 | buf->private = 0; |
3667 | } | 3683 | } |
3668 | 3684 | ||
3669 | static int buffer_pipe_buf_steal(struct pipe_inode_info *pipe, | 3685 | static int buffer_pipe_buf_steal(struct pipe_inode_info *pipe, |
3670 | struct pipe_buffer *buf) | 3686 | struct pipe_buffer *buf) |
3671 | { | 3687 | { |
3672 | return 1; | 3688 | return 1; |
3673 | } | 3689 | } |
3674 | 3690 | ||
3675 | static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, | 3691 | static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, |
3676 | struct pipe_buffer *buf) | 3692 | struct pipe_buffer *buf) |
3677 | { | 3693 | { |
3678 | struct buffer_ref *ref = (struct buffer_ref *)buf->private; | 3694 | struct buffer_ref *ref = (struct buffer_ref *)buf->private; |
3679 | 3695 | ||
3680 | ref->ref++; | 3696 | ref->ref++; |
3681 | } | 3697 | } |
3682 | 3698 | ||
3683 | /* Pipe buffer operations for a buffer. */ | 3699 | /* Pipe buffer operations for a buffer. */ |
3684 | static const struct pipe_buf_operations buffer_pipe_buf_ops = { | 3700 | static const struct pipe_buf_operations buffer_pipe_buf_ops = { |
3685 | .can_merge = 0, | 3701 | .can_merge = 0, |
3686 | .map = generic_pipe_buf_map, | 3702 | .map = generic_pipe_buf_map, |
3687 | .unmap = generic_pipe_buf_unmap, | 3703 | .unmap = generic_pipe_buf_unmap, |
3688 | .confirm = generic_pipe_buf_confirm, | 3704 | .confirm = generic_pipe_buf_confirm, |
3689 | .release = buffer_pipe_buf_release, | 3705 | .release = buffer_pipe_buf_release, |
3690 | .steal = buffer_pipe_buf_steal, | 3706 | .steal = buffer_pipe_buf_steal, |
3691 | .get = buffer_pipe_buf_get, | 3707 | .get = buffer_pipe_buf_get, |
3692 | }; | 3708 | }; |
3693 | 3709 | ||
3694 | /* | 3710 | /* |
3695 | * Callback from splice_to_pipe(), if we need to release some pages | 3711 | * Callback from splice_to_pipe(), if we need to release some pages |
3696 | * at the end of the spd in case we error'ed out in filling the pipe. | 3712 | * at the end of the spd in case we error'ed out in filling the pipe. |
3697 | */ | 3713 | */ |
3698 | static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) | 3714 | static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) |
3699 | { | 3715 | { |
3700 | struct buffer_ref *ref = | 3716 | struct buffer_ref *ref = |
3701 | (struct buffer_ref *)spd->partial[i].private; | 3717 | (struct buffer_ref *)spd->partial[i].private; |
3702 | 3718 | ||
3703 | if (--ref->ref) | 3719 | if (--ref->ref) |
3704 | return; | 3720 | return; |
3705 | 3721 | ||
3706 | ring_buffer_free_read_page(ref->buffer, ref->page); | 3722 | ring_buffer_free_read_page(ref->buffer, ref->page); |
3707 | kfree(ref); | 3723 | kfree(ref); |
3708 | spd->partial[i].private = 0; | 3724 | spd->partial[i].private = 0; |
3709 | } | 3725 | } |
3710 | 3726 | ||
3711 | static ssize_t | 3727 | static ssize_t |
3712 | tracing_buffers_splice_read(struct file *file, loff_t *ppos, | 3728 | tracing_buffers_splice_read(struct file *file, loff_t *ppos, |
3713 | struct pipe_inode_info *pipe, size_t len, | 3729 | struct pipe_inode_info *pipe, size_t len, |
3714 | unsigned int flags) | 3730 | unsigned int flags) |
3715 | { | 3731 | { |
3716 | struct ftrace_buffer_info *info = file->private_data; | 3732 | struct ftrace_buffer_info *info = file->private_data; |
3717 | struct partial_page partial[PIPE_BUFFERS]; | 3733 | struct partial_page partial[PIPE_BUFFERS]; |
3718 | struct page *pages[PIPE_BUFFERS]; | 3734 | struct page *pages[PIPE_BUFFERS]; |
3719 | struct splice_pipe_desc spd = { | 3735 | struct splice_pipe_desc spd = { |
3720 | .pages = pages, | 3736 | .pages = pages, |
3721 | .partial = partial, | 3737 | .partial = partial, |
3722 | .flags = flags, | 3738 | .flags = flags, |
3723 | .ops = &buffer_pipe_buf_ops, | 3739 | .ops = &buffer_pipe_buf_ops, |
3724 | .spd_release = buffer_spd_release, | 3740 | .spd_release = buffer_spd_release, |
3725 | }; | 3741 | }; |
3726 | struct buffer_ref *ref; | 3742 | struct buffer_ref *ref; |
3727 | int entries, size, i; | 3743 | int entries, size, i; |
3728 | size_t ret; | 3744 | size_t ret; |
3729 | 3745 | ||
3730 | if (*ppos & (PAGE_SIZE - 1)) { | 3746 | if (*ppos & (PAGE_SIZE - 1)) { |
3731 | WARN_ONCE(1, "Ftrace: previous read must page-align\n"); | 3747 | WARN_ONCE(1, "Ftrace: previous read must page-align\n"); |
3732 | return -EINVAL; | 3748 | return -EINVAL; |
3733 | } | 3749 | } |
3734 | 3750 | ||
3735 | if (len & (PAGE_SIZE - 1)) { | 3751 | if (len & (PAGE_SIZE - 1)) { |
3736 | WARN_ONCE(1, "Ftrace: splice_read should page-align\n"); | 3752 | WARN_ONCE(1, "Ftrace: splice_read should page-align\n"); |
3737 | if (len < PAGE_SIZE) | 3753 | if (len < PAGE_SIZE) |
3738 | return -EINVAL; | 3754 | return -EINVAL; |
3739 | len &= PAGE_MASK; | 3755 | len &= PAGE_MASK; |
3740 | } | 3756 | } |
3741 | 3757 | ||
3742 | trace_access_lock(info->cpu); | 3758 | trace_access_lock(info->cpu); |
3743 | entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); | 3759 | entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); |
3744 | 3760 | ||
3745 | for (i = 0; i < PIPE_BUFFERS && len && entries; i++, len -= PAGE_SIZE) { | 3761 | for (i = 0; i < PIPE_BUFFERS && len && entries; i++, len -= PAGE_SIZE) { |
3746 | struct page *page; | 3762 | struct page *page; |
3747 | int r; | 3763 | int r; |
3748 | 3764 | ||
3749 | ref = kzalloc(sizeof(*ref), GFP_KERNEL); | 3765 | ref = kzalloc(sizeof(*ref), GFP_KERNEL); |
3750 | if (!ref) | 3766 | if (!ref) |
3751 | break; | 3767 | break; |
3752 | 3768 | ||
3753 | ref->ref = 1; | 3769 | ref->ref = 1; |
3754 | ref->buffer = info->tr->buffer; | 3770 | ref->buffer = info->tr->buffer; |
3755 | ref->page = ring_buffer_alloc_read_page(ref->buffer); | 3771 | ref->page = ring_buffer_alloc_read_page(ref->buffer); |
3756 | if (!ref->page) { | 3772 | if (!ref->page) { |
3757 | kfree(ref); | 3773 | kfree(ref); |
3758 | break; | 3774 | break; |
3759 | } | 3775 | } |
3760 | 3776 | ||
3761 | r = ring_buffer_read_page(ref->buffer, &ref->page, | 3777 | r = ring_buffer_read_page(ref->buffer, &ref->page, |
3762 | len, info->cpu, 1); | 3778 | len, info->cpu, 1); |
3763 | if (r < 0) { | 3779 | if (r < 0) { |
3764 | ring_buffer_free_read_page(ref->buffer, | 3780 | ring_buffer_free_read_page(ref->buffer, |
3765 | ref->page); | 3781 | ref->page); |
3766 | kfree(ref); | 3782 | kfree(ref); |
3767 | break; | 3783 | break; |
3768 | } | 3784 | } |
3769 | 3785 | ||
3770 | /* | 3786 | /* |
3771 | * zero out any left over data, this is going to | 3787 | * zero out any left over data, this is going to |
3772 | * user land. | 3788 | * user land. |
3773 | */ | 3789 | */ |
3774 | size = ring_buffer_page_len(ref->page); | 3790 | size = ring_buffer_page_len(ref->page); |
3775 | if (size < PAGE_SIZE) | 3791 | if (size < PAGE_SIZE) |
3776 | memset(ref->page + size, 0, PAGE_SIZE - size); | 3792 | memset(ref->page + size, 0, PAGE_SIZE - size); |
3777 | 3793 | ||
3778 | page = virt_to_page(ref->page); | 3794 | page = virt_to_page(ref->page); |
3779 | 3795 | ||
3780 | spd.pages[i] = page; | 3796 | spd.pages[i] = page; |
3781 | spd.partial[i].len = PAGE_SIZE; | 3797 | spd.partial[i].len = PAGE_SIZE; |
3782 | spd.partial[i].offset = 0; | 3798 | spd.partial[i].offset = 0; |
3783 | spd.partial[i].private = (unsigned long)ref; | 3799 | spd.partial[i].private = (unsigned long)ref; |
3784 | spd.nr_pages++; | 3800 | spd.nr_pages++; |
3785 | *ppos += PAGE_SIZE; | 3801 | *ppos += PAGE_SIZE; |
3786 | 3802 | ||
3787 | entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); | 3803 | entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); |
3788 | } | 3804 | } |
3789 | 3805 | ||
3790 | trace_access_unlock(info->cpu); | 3806 | trace_access_unlock(info->cpu); |
3791 | spd.nr_pages = i; | 3807 | spd.nr_pages = i; |
3792 | 3808 | ||
3793 | /* did we read anything? */ | 3809 | /* did we read anything? */ |
3794 | if (!spd.nr_pages) { | 3810 | if (!spd.nr_pages) { |
3795 | if (flags & SPLICE_F_NONBLOCK) | 3811 | if (flags & SPLICE_F_NONBLOCK) |
3796 | ret = -EAGAIN; | 3812 | ret = -EAGAIN; |
3797 | else | 3813 | else |
3798 | ret = 0; | 3814 | ret = 0; |
3799 | /* TODO: block */ | 3815 | /* TODO: block */ |
3800 | return ret; | 3816 | return ret; |
3801 | } | 3817 | } |
3802 | 3818 | ||
3803 | ret = splice_to_pipe(pipe, &spd); | 3819 | ret = splice_to_pipe(pipe, &spd); |
3804 | 3820 | ||
3805 | return ret; | 3821 | return ret; |
3806 | } | 3822 | } |
3807 | 3823 | ||
3808 | static const struct file_operations tracing_buffers_fops = { | 3824 | static const struct file_operations tracing_buffers_fops = { |
3809 | .open = tracing_buffers_open, | 3825 | .open = tracing_buffers_open, |
3810 | .read = tracing_buffers_read, | 3826 | .read = tracing_buffers_read, |
3811 | .release = tracing_buffers_release, | 3827 | .release = tracing_buffers_release, |
3812 | .splice_read = tracing_buffers_splice_read, | 3828 | .splice_read = tracing_buffers_splice_read, |
3813 | .llseek = no_llseek, | 3829 | .llseek = no_llseek, |
3814 | }; | 3830 | }; |
3815 | 3831 | ||
3816 | static ssize_t | 3832 | static ssize_t |
3817 | tracing_stats_read(struct file *filp, char __user *ubuf, | 3833 | tracing_stats_read(struct file *filp, char __user *ubuf, |
3818 | size_t count, loff_t *ppos) | 3834 | size_t count, loff_t *ppos) |
3819 | { | 3835 | { |
3820 | unsigned long cpu = (unsigned long)filp->private_data; | 3836 | unsigned long cpu = (unsigned long)filp->private_data; |
3821 | struct trace_array *tr = &global_trace; | 3837 | struct trace_array *tr = &global_trace; |
3822 | struct trace_seq *s; | 3838 | struct trace_seq *s; |
3823 | unsigned long cnt; | 3839 | unsigned long cnt; |
3824 | 3840 | ||
3825 | s = kmalloc(sizeof(*s), GFP_KERNEL); | 3841 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
3826 | if (!s) | 3842 | if (!s) |
3827 | return -ENOMEM; | 3843 | return -ENOMEM; |
3828 | 3844 | ||
3829 | trace_seq_init(s); | 3845 | trace_seq_init(s); |
3830 | 3846 | ||
3831 | cnt = ring_buffer_entries_cpu(tr->buffer, cpu); | 3847 | cnt = ring_buffer_entries_cpu(tr->buffer, cpu); |
3832 | trace_seq_printf(s, "entries: %ld\n", cnt); | 3848 | trace_seq_printf(s, "entries: %ld\n", cnt); |
3833 | 3849 | ||
3834 | cnt = ring_buffer_overrun_cpu(tr->buffer, cpu); | 3850 | cnt = ring_buffer_overrun_cpu(tr->buffer, cpu); |
3835 | trace_seq_printf(s, "overrun: %ld\n", cnt); | 3851 | trace_seq_printf(s, "overrun: %ld\n", cnt); |
3836 | 3852 | ||
3837 | cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu); | 3853 | cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu); |
3838 | trace_seq_printf(s, "commit overrun: %ld\n", cnt); | 3854 | trace_seq_printf(s, "commit overrun: %ld\n", cnt); |
3839 | 3855 | ||
3840 | count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); | 3856 | count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); |
3841 | 3857 | ||
3842 | kfree(s); | 3858 | kfree(s); |
3843 | 3859 | ||
3844 | return count; | 3860 | return count; |
3845 | } | 3861 | } |
3846 | 3862 | ||
3847 | static const struct file_operations tracing_stats_fops = { | 3863 | static const struct file_operations tracing_stats_fops = { |
3848 | .open = tracing_open_generic, | 3864 | .open = tracing_open_generic, |
3849 | .read = tracing_stats_read, | 3865 | .read = tracing_stats_read, |
3850 | }; | 3866 | }; |
3851 | 3867 | ||
3852 | #ifdef CONFIG_DYNAMIC_FTRACE | 3868 | #ifdef CONFIG_DYNAMIC_FTRACE |
3853 | 3869 | ||
3854 | int __weak ftrace_arch_read_dyn_info(char *buf, int size) | 3870 | int __weak ftrace_arch_read_dyn_info(char *buf, int size) |
3855 | { | 3871 | { |
3856 | return 0; | 3872 | return 0; |
3857 | } | 3873 | } |
3858 | 3874 | ||
3859 | static ssize_t | 3875 | static ssize_t |
3860 | tracing_read_dyn_info(struct file *filp, char __user *ubuf, | 3876 | tracing_read_dyn_info(struct file *filp, char __user *ubuf, |
3861 | size_t cnt, loff_t *ppos) | 3877 | size_t cnt, loff_t *ppos) |
3862 | { | 3878 | { |
3863 | static char ftrace_dyn_info_buffer[1024]; | 3879 | static char ftrace_dyn_info_buffer[1024]; |
3864 | static DEFINE_MUTEX(dyn_info_mutex); | 3880 | static DEFINE_MUTEX(dyn_info_mutex); |
3865 | unsigned long *p = filp->private_data; | 3881 | unsigned long *p = filp->private_data; |
3866 | char *buf = ftrace_dyn_info_buffer; | 3882 | char *buf = ftrace_dyn_info_buffer; |
3867 | int size = ARRAY_SIZE(ftrace_dyn_info_buffer); | 3883 | int size = ARRAY_SIZE(ftrace_dyn_info_buffer); |
3868 | int r; | 3884 | int r; |
3869 | 3885 | ||
3870 | mutex_lock(&dyn_info_mutex); | 3886 | mutex_lock(&dyn_info_mutex); |
3871 | r = sprintf(buf, "%ld ", *p); | 3887 | r = sprintf(buf, "%ld ", *p); |
3872 | 3888 | ||
3873 | r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r); | 3889 | r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r); |
3874 | buf[r++] = '\n'; | 3890 | buf[r++] = '\n'; |
3875 | 3891 | ||
3876 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 3892 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
3877 | 3893 | ||
3878 | mutex_unlock(&dyn_info_mutex); | 3894 | mutex_unlock(&dyn_info_mutex); |
3879 | 3895 | ||
3880 | return r; | 3896 | return r; |
3881 | } | 3897 | } |
3882 | 3898 | ||
3883 | static const struct file_operations tracing_dyn_info_fops = { | 3899 | static const struct file_operations tracing_dyn_info_fops = { |
3884 | .open = tracing_open_generic, | 3900 | .open = tracing_open_generic, |
3885 | .read = tracing_read_dyn_info, | 3901 | .read = tracing_read_dyn_info, |
3886 | }; | 3902 | }; |
3887 | #endif | 3903 | #endif |
3888 | 3904 | ||
3889 | static struct dentry *d_tracer; | 3905 | static struct dentry *d_tracer; |
3890 | 3906 | ||
3891 | struct dentry *tracing_init_dentry(void) | 3907 | struct dentry *tracing_init_dentry(void) |
3892 | { | 3908 | { |
3893 | static int once; | 3909 | static int once; |
3894 | 3910 | ||
3895 | if (d_tracer) | 3911 | if (d_tracer) |
3896 | return d_tracer; | 3912 | return d_tracer; |
3897 | 3913 | ||
3898 | if (!debugfs_initialized()) | 3914 | if (!debugfs_initialized()) |
3899 | return NULL; | 3915 | return NULL; |
3900 | 3916 | ||
3901 | d_tracer = debugfs_create_dir("tracing", NULL); | 3917 | d_tracer = debugfs_create_dir("tracing", NULL); |
3902 | 3918 | ||
3903 | if (!d_tracer && !once) { | 3919 | if (!d_tracer && !once) { |
3904 | once = 1; | 3920 | once = 1; |
3905 | pr_warning("Could not create debugfs directory 'tracing'\n"); | 3921 | pr_warning("Could not create debugfs directory 'tracing'\n"); |
3906 | return NULL; | 3922 | return NULL; |
3907 | } | 3923 | } |
3908 | 3924 | ||
3909 | return d_tracer; | 3925 | return d_tracer; |
3910 | } | 3926 | } |
3911 | 3927 | ||
3912 | static struct dentry *d_percpu; | 3928 | static struct dentry *d_percpu; |
3913 | 3929 | ||
3914 | struct dentry *tracing_dentry_percpu(void) | 3930 | struct dentry *tracing_dentry_percpu(void) |
3915 | { | 3931 | { |
3916 | static int once; | 3932 | static int once; |
3917 | struct dentry *d_tracer; | 3933 | struct dentry *d_tracer; |
3918 | 3934 | ||
3919 | if (d_percpu) | 3935 | if (d_percpu) |
3920 | return d_percpu; | 3936 | return d_percpu; |
3921 | 3937 | ||
3922 | d_tracer = tracing_init_dentry(); | 3938 | d_tracer = tracing_init_dentry(); |
3923 | 3939 | ||
3924 | if (!d_tracer) | 3940 | if (!d_tracer) |
3925 | return NULL; | 3941 | return NULL; |
3926 | 3942 | ||
3927 | d_percpu = debugfs_create_dir("per_cpu", d_tracer); | 3943 | d_percpu = debugfs_create_dir("per_cpu", d_tracer); |
3928 | 3944 | ||
3929 | if (!d_percpu && !once) { | 3945 | if (!d_percpu && !once) { |
3930 | once = 1; | 3946 | once = 1; |
3931 | pr_warning("Could not create debugfs directory 'per_cpu'\n"); | 3947 | pr_warning("Could not create debugfs directory 'per_cpu'\n"); |
3932 | return NULL; | 3948 | return NULL; |
3933 | } | 3949 | } |
3934 | 3950 | ||
3935 | return d_percpu; | 3951 | return d_percpu; |
3936 | } | 3952 | } |
3937 | 3953 | ||
3938 | static void tracing_init_debugfs_percpu(long cpu) | 3954 | static void tracing_init_debugfs_percpu(long cpu) |
3939 | { | 3955 | { |
3940 | struct dentry *d_percpu = tracing_dentry_percpu(); | 3956 | struct dentry *d_percpu = tracing_dentry_percpu(); |
3941 | struct dentry *d_cpu; | 3957 | struct dentry *d_cpu; |
3942 | /* strlen(cpu) + MAX(log10(cpu)) + '\0' */ | 3958 | /* strlen(cpu) + MAX(log10(cpu)) + '\0' */ |
3943 | char cpu_dir[7]; | 3959 | char cpu_dir[7]; |
3944 | 3960 | ||
3945 | if (cpu > 999 || cpu < 0) | 3961 | if (cpu > 999 || cpu < 0) |
3946 | return; | 3962 | return; |
3947 | 3963 | ||
3948 | sprintf(cpu_dir, "cpu%ld", cpu); | 3964 | sprintf(cpu_dir, "cpu%ld", cpu); |
3949 | d_cpu = debugfs_create_dir(cpu_dir, d_percpu); | 3965 | d_cpu = debugfs_create_dir(cpu_dir, d_percpu); |
3950 | if (!d_cpu) { | 3966 | if (!d_cpu) { |
3951 | pr_warning("Could not create debugfs '%s' entry\n", cpu_dir); | 3967 | pr_warning("Could not create debugfs '%s' entry\n", cpu_dir); |
3952 | return; | 3968 | return; |
3953 | } | 3969 | } |
3954 | 3970 | ||
3955 | /* per cpu trace_pipe */ | 3971 | /* per cpu trace_pipe */ |
3956 | trace_create_file("trace_pipe", 0444, d_cpu, | 3972 | trace_create_file("trace_pipe", 0444, d_cpu, |
3957 | (void *) cpu, &tracing_pipe_fops); | 3973 | (void *) cpu, &tracing_pipe_fops); |
3958 | 3974 | ||
3959 | /* per cpu trace */ | 3975 | /* per cpu trace */ |
3960 | trace_create_file("trace", 0644, d_cpu, | 3976 | trace_create_file("trace", 0644, d_cpu, |
3961 | (void *) cpu, &tracing_fops); | 3977 | (void *) cpu, &tracing_fops); |
3962 | 3978 | ||
3963 | trace_create_file("trace_pipe_raw", 0444, d_cpu, | 3979 | trace_create_file("trace_pipe_raw", 0444, d_cpu, |
3964 | (void *) cpu, &tracing_buffers_fops); | 3980 | (void *) cpu, &tracing_buffers_fops); |
3965 | 3981 | ||
3966 | trace_create_file("stats", 0444, d_cpu, | 3982 | trace_create_file("stats", 0444, d_cpu, |
3967 | (void *) cpu, &tracing_stats_fops); | 3983 | (void *) cpu, &tracing_stats_fops); |
3968 | } | 3984 | } |
3969 | 3985 | ||
3970 | #ifdef CONFIG_FTRACE_SELFTEST | 3986 | #ifdef CONFIG_FTRACE_SELFTEST |
3971 | /* Let selftest have access to static functions in this file */ | 3987 | /* Let selftest have access to static functions in this file */ |
3972 | #include "trace_selftest.c" | 3988 | #include "trace_selftest.c" |
3973 | #endif | 3989 | #endif |
3974 | 3990 | ||
3975 | struct trace_option_dentry { | 3991 | struct trace_option_dentry { |
3976 | struct tracer_opt *opt; | 3992 | struct tracer_opt *opt; |
3977 | struct tracer_flags *flags; | 3993 | struct tracer_flags *flags; |
3978 | struct dentry *entry; | 3994 | struct dentry *entry; |
3979 | }; | 3995 | }; |
3980 | 3996 | ||
3981 | static ssize_t | 3997 | static ssize_t |
3982 | trace_options_read(struct file *filp, char __user *ubuf, size_t cnt, | 3998 | trace_options_read(struct file *filp, char __user *ubuf, size_t cnt, |
3983 | loff_t *ppos) | 3999 | loff_t *ppos) |
3984 | { | 4000 | { |
3985 | struct trace_option_dentry *topt = filp->private_data; | 4001 | struct trace_option_dentry *topt = filp->private_data; |
3986 | char *buf; | 4002 | char *buf; |
3987 | 4003 | ||
3988 | if (topt->flags->val & topt->opt->bit) | 4004 | if (topt->flags->val & topt->opt->bit) |
3989 | buf = "1\n"; | 4005 | buf = "1\n"; |
3990 | else | 4006 | else |
3991 | buf = "0\n"; | 4007 | buf = "0\n"; |
3992 | 4008 | ||
3993 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); | 4009 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); |
3994 | } | 4010 | } |
3995 | 4011 | ||
3996 | static ssize_t | 4012 | static ssize_t |
3997 | trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, | 4013 | trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, |
3998 | loff_t *ppos) | 4014 | loff_t *ppos) |
3999 | { | 4015 | { |
4000 | struct trace_option_dentry *topt = filp->private_data; | 4016 | struct trace_option_dentry *topt = filp->private_data; |
4001 | unsigned long val; | 4017 | unsigned long val; |
4002 | char buf[64]; | 4018 | char buf[64]; |
4003 | int ret; | 4019 | int ret; |
4004 | 4020 | ||
4005 | if (cnt >= sizeof(buf)) | 4021 | if (cnt >= sizeof(buf)) |
4006 | return -EINVAL; | 4022 | return -EINVAL; |
4007 | 4023 | ||
4008 | if (copy_from_user(&buf, ubuf, cnt)) | 4024 | if (copy_from_user(&buf, ubuf, cnt)) |
4009 | return -EFAULT; | 4025 | return -EFAULT; |
4010 | 4026 | ||
4011 | buf[cnt] = 0; | 4027 | buf[cnt] = 0; |
4012 | 4028 | ||
4013 | ret = strict_strtoul(buf, 10, &val); | 4029 | ret = strict_strtoul(buf, 10, &val); |
4014 | if (ret < 0) | 4030 | if (ret < 0) |
4015 | return ret; | 4031 | return ret; |
4016 | 4032 | ||
4017 | if (val != 0 && val != 1) | 4033 | if (val != 0 && val != 1) |
4018 | return -EINVAL; | 4034 | return -EINVAL; |
4019 | 4035 | ||
4020 | if (!!(topt->flags->val & topt->opt->bit) != val) { | 4036 | if (!!(topt->flags->val & topt->opt->bit) != val) { |
4021 | mutex_lock(&trace_types_lock); | 4037 | mutex_lock(&trace_types_lock); |
4022 | ret = __set_tracer_option(current_trace, topt->flags, | 4038 | ret = __set_tracer_option(current_trace, topt->flags, |
4023 | topt->opt, !val); | 4039 | topt->opt, !val); |
4024 | mutex_unlock(&trace_types_lock); | 4040 | mutex_unlock(&trace_types_lock); |
4025 | if (ret) | 4041 | if (ret) |
4026 | return ret; | 4042 | return ret; |
4027 | } | 4043 | } |
4028 | 4044 | ||
4029 | *ppos += cnt; | 4045 | *ppos += cnt; |
4030 | 4046 | ||
4031 | return cnt; | 4047 | return cnt; |
4032 | } | 4048 | } |
4033 | 4049 | ||
4034 | 4050 | ||
4035 | static const struct file_operations trace_options_fops = { | 4051 | static const struct file_operations trace_options_fops = { |
4036 | .open = tracing_open_generic, | 4052 | .open = tracing_open_generic, |
4037 | .read = trace_options_read, | 4053 | .read = trace_options_read, |
4038 | .write = trace_options_write, | 4054 | .write = trace_options_write, |
4039 | }; | 4055 | }; |
4040 | 4056 | ||
4041 | static ssize_t | 4057 | static ssize_t |
4042 | trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt, | 4058 | trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt, |
4043 | loff_t *ppos) | 4059 | loff_t *ppos) |
4044 | { | 4060 | { |
4045 | long index = (long)filp->private_data; | 4061 | long index = (long)filp->private_data; |
4046 | char *buf; | 4062 | char *buf; |
4047 | 4063 | ||
4048 | if (trace_flags & (1 << index)) | 4064 | if (trace_flags & (1 << index)) |
4049 | buf = "1\n"; | 4065 | buf = "1\n"; |
4050 | else | 4066 | else |
4051 | buf = "0\n"; | 4067 | buf = "0\n"; |
4052 | 4068 | ||
4053 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); | 4069 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); |
4054 | } | 4070 | } |
4055 | 4071 | ||
4056 | static ssize_t | 4072 | static ssize_t |
4057 | trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, | 4073 | trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, |
4058 | loff_t *ppos) | 4074 | loff_t *ppos) |
4059 | { | 4075 | { |
4060 | long index = (long)filp->private_data; | 4076 | long index = (long)filp->private_data; |
4061 | char buf[64]; | 4077 | char buf[64]; |
4062 | unsigned long val; | 4078 | unsigned long val; |
4063 | int ret; | 4079 | int ret; |
4064 | 4080 | ||
4065 | if (cnt >= sizeof(buf)) | 4081 | if (cnt >= sizeof(buf)) |
4066 | return -EINVAL; | 4082 | return -EINVAL; |
4067 | 4083 | ||
4068 | if (copy_from_user(&buf, ubuf, cnt)) | 4084 | if (copy_from_user(&buf, ubuf, cnt)) |
4069 | return -EFAULT; | 4085 | return -EFAULT; |
4070 | 4086 | ||
4071 | buf[cnt] = 0; | 4087 | buf[cnt] = 0; |
4072 | 4088 | ||
4073 | ret = strict_strtoul(buf, 10, &val); | 4089 | ret = strict_strtoul(buf, 10, &val); |
4074 | if (ret < 0) | 4090 | if (ret < 0) |
4075 | return ret; | 4091 | return ret; |
4076 | 4092 | ||
4077 | if (val != 0 && val != 1) | 4093 | if (val != 0 && val != 1) |
4078 | return -EINVAL; | 4094 | return -EINVAL; |
4079 | set_tracer_flags(1 << index, val); | 4095 | set_tracer_flags(1 << index, val); |
4080 | 4096 | ||
4081 | *ppos += cnt; | 4097 | *ppos += cnt; |
4082 | 4098 | ||
4083 | return cnt; | 4099 | return cnt; |
4084 | } | 4100 | } |
4085 | 4101 | ||
4086 | static const struct file_operations trace_options_core_fops = { | 4102 | static const struct file_operations trace_options_core_fops = { |
4087 | .open = tracing_open_generic, | 4103 | .open = tracing_open_generic, |
4088 | .read = trace_options_core_read, | 4104 | .read = trace_options_core_read, |
4089 | .write = trace_options_core_write, | 4105 | .write = trace_options_core_write, |
4090 | }; | 4106 | }; |
4091 | 4107 | ||
4092 | struct dentry *trace_create_file(const char *name, | 4108 | struct dentry *trace_create_file(const char *name, |
4093 | mode_t mode, | 4109 | mode_t mode, |
4094 | struct dentry *parent, | 4110 | struct dentry *parent, |
4095 | void *data, | 4111 | void *data, |
4096 | const struct file_operations *fops) | 4112 | const struct file_operations *fops) |
4097 | { | 4113 | { |
4098 | struct dentry *ret; | 4114 | struct dentry *ret; |
4099 | 4115 | ||
4100 | ret = debugfs_create_file(name, mode, parent, data, fops); | 4116 | ret = debugfs_create_file(name, mode, parent, data, fops); |
4101 | if (!ret) | 4117 | if (!ret) |
4102 | pr_warning("Could not create debugfs '%s' entry\n", name); | 4118 | pr_warning("Could not create debugfs '%s' entry\n", name); |
4103 | 4119 | ||
4104 | return ret; | 4120 | return ret; |
4105 | } | 4121 | } |
4106 | 4122 | ||
4107 | 4123 | ||
4108 | static struct dentry *trace_options_init_dentry(void) | 4124 | static struct dentry *trace_options_init_dentry(void) |
4109 | { | 4125 | { |
4110 | struct dentry *d_tracer; | 4126 | struct dentry *d_tracer; |
4111 | static struct dentry *t_options; | 4127 | static struct dentry *t_options; |
4112 | 4128 | ||
4113 | if (t_options) | 4129 | if (t_options) |
4114 | return t_options; | 4130 | return t_options; |
4115 | 4131 | ||
4116 | d_tracer = tracing_init_dentry(); | 4132 | d_tracer = tracing_init_dentry(); |
4117 | if (!d_tracer) | 4133 | if (!d_tracer) |
4118 | return NULL; | 4134 | return NULL; |
4119 | 4135 | ||
4120 | t_options = debugfs_create_dir("options", d_tracer); | 4136 | t_options = debugfs_create_dir("options", d_tracer); |
4121 | if (!t_options) { | 4137 | if (!t_options) { |
4122 | pr_warning("Could not create debugfs directory 'options'\n"); | 4138 | pr_warning("Could not create debugfs directory 'options'\n"); |
4123 | return NULL; | 4139 | return NULL; |
4124 | } | 4140 | } |
4125 | 4141 | ||
4126 | return t_options; | 4142 | return t_options; |
4127 | } | 4143 | } |
4128 | 4144 | ||
4129 | static void | 4145 | static void |
4130 | create_trace_option_file(struct trace_option_dentry *topt, | 4146 | create_trace_option_file(struct trace_option_dentry *topt, |
4131 | struct tracer_flags *flags, | 4147 | struct tracer_flags *flags, |
4132 | struct tracer_opt *opt) | 4148 | struct tracer_opt *opt) |
4133 | { | 4149 | { |
4134 | struct dentry *t_options; | 4150 | struct dentry *t_options; |
4135 | 4151 | ||
4136 | t_options = trace_options_init_dentry(); | 4152 | t_options = trace_options_init_dentry(); |
4137 | if (!t_options) | 4153 | if (!t_options) |
4138 | return; | 4154 | return; |
4139 | 4155 | ||
4140 | topt->flags = flags; | 4156 | topt->flags = flags; |
4141 | topt->opt = opt; | 4157 | topt->opt = opt; |
4142 | 4158 | ||
4143 | topt->entry = trace_create_file(opt->name, 0644, t_options, topt, | 4159 | topt->entry = trace_create_file(opt->name, 0644, t_options, topt, |
4144 | &trace_options_fops); | 4160 | &trace_options_fops); |
4145 | 4161 | ||
4146 | } | 4162 | } |
4147 | 4163 | ||
4148 | static struct trace_option_dentry * | 4164 | static struct trace_option_dentry * |
4149 | create_trace_option_files(struct tracer *tracer) | 4165 | create_trace_option_files(struct tracer *tracer) |
4150 | { | 4166 | { |
4151 | struct trace_option_dentry *topts; | 4167 | struct trace_option_dentry *topts; |
4152 | struct tracer_flags *flags; | 4168 | struct tracer_flags *flags; |
4153 | struct tracer_opt *opts; | 4169 | struct tracer_opt *opts; |
4154 | int cnt; | 4170 | int cnt; |
4155 | 4171 | ||
4156 | if (!tracer) | 4172 | if (!tracer) |
4157 | return NULL; | 4173 | return NULL; |
4158 | 4174 | ||
4159 | flags = tracer->flags; | 4175 | flags = tracer->flags; |
4160 | 4176 | ||
4161 | if (!flags || !flags->opts) | 4177 | if (!flags || !flags->opts) |
4162 | return NULL; | 4178 | return NULL; |
4163 | 4179 | ||
4164 | opts = flags->opts; | 4180 | opts = flags->opts; |
4165 | 4181 | ||
4166 | for (cnt = 0; opts[cnt].name; cnt++) | 4182 | for (cnt = 0; opts[cnt].name; cnt++) |
4167 | ; | 4183 | ; |
4168 | 4184 | ||
4169 | topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL); | 4185 | topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL); |
4170 | if (!topts) | 4186 | if (!topts) |
4171 | return NULL; | 4187 | return NULL; |
4172 | 4188 | ||
4173 | for (cnt = 0; opts[cnt].name; cnt++) | 4189 | for (cnt = 0; opts[cnt].name; cnt++) |
4174 | create_trace_option_file(&topts[cnt], flags, | 4190 | create_trace_option_file(&topts[cnt], flags, |
4175 | &opts[cnt]); | 4191 | &opts[cnt]); |
4176 | 4192 | ||
4177 | return topts; | 4193 | return topts; |
4178 | } | 4194 | } |
4179 | 4195 | ||
4180 | static void | 4196 | static void |
4181 | destroy_trace_option_files(struct trace_option_dentry *topts) | 4197 | destroy_trace_option_files(struct trace_option_dentry *topts) |
4182 | { | 4198 | { |
4183 | int cnt; | 4199 | int cnt; |
4184 | 4200 | ||
4185 | if (!topts) | 4201 | if (!topts) |
4186 | return; | 4202 | return; |
4187 | 4203 | ||
4188 | for (cnt = 0; topts[cnt].opt; cnt++) { | 4204 | for (cnt = 0; topts[cnt].opt; cnt++) { |
4189 | if (topts[cnt].entry) | 4205 | if (topts[cnt].entry) |
4190 | debugfs_remove(topts[cnt].entry); | 4206 | debugfs_remove(topts[cnt].entry); |
4191 | } | 4207 | } |
4192 | 4208 | ||
4193 | kfree(topts); | 4209 | kfree(topts); |
4194 | } | 4210 | } |
4195 | 4211 | ||
4196 | static struct dentry * | 4212 | static struct dentry * |
4197 | create_trace_option_core_file(const char *option, long index) | 4213 | create_trace_option_core_file(const char *option, long index) |
4198 | { | 4214 | { |
4199 | struct dentry *t_options; | 4215 | struct dentry *t_options; |
4200 | 4216 | ||
4201 | t_options = trace_options_init_dentry(); | 4217 | t_options = trace_options_init_dentry(); |
4202 | if (!t_options) | 4218 | if (!t_options) |
4203 | return NULL; | 4219 | return NULL; |
4204 | 4220 | ||
4205 | return trace_create_file(option, 0644, t_options, (void *)index, | 4221 | return trace_create_file(option, 0644, t_options, (void *)index, |
4206 | &trace_options_core_fops); | 4222 | &trace_options_core_fops); |
4207 | } | 4223 | } |
4208 | 4224 | ||
4209 | static __init void create_trace_options_dir(void) | 4225 | static __init void create_trace_options_dir(void) |
4210 | { | 4226 | { |
4211 | struct dentry *t_options; | 4227 | struct dentry *t_options; |
4212 | int i; | 4228 | int i; |
4213 | 4229 | ||
4214 | t_options = trace_options_init_dentry(); | 4230 | t_options = trace_options_init_dentry(); |
4215 | if (!t_options) | 4231 | if (!t_options) |
4216 | return; | 4232 | return; |
4217 | 4233 | ||
4218 | for (i = 0; trace_options[i]; i++) | 4234 | for (i = 0; trace_options[i]; i++) |
4219 | create_trace_option_core_file(trace_options[i], i); | 4235 | create_trace_option_core_file(trace_options[i], i); |
4220 | } | 4236 | } |
4221 | 4237 | ||
4222 | static __init int tracer_init_debugfs(void) | 4238 | static __init int tracer_init_debugfs(void) |
4223 | { | 4239 | { |
4224 | struct dentry *d_tracer; | 4240 | struct dentry *d_tracer; |
4225 | int cpu; | 4241 | int cpu; |
4226 | 4242 | ||
4227 | trace_access_lock_init(); | 4243 | trace_access_lock_init(); |
4228 | 4244 | ||
4229 | d_tracer = tracing_init_dentry(); | 4245 | d_tracer = tracing_init_dentry(); |
4230 | 4246 | ||
4231 | trace_create_file("tracing_enabled", 0644, d_tracer, | 4247 | trace_create_file("tracing_enabled", 0644, d_tracer, |
4232 | &global_trace, &tracing_ctrl_fops); | 4248 | &global_trace, &tracing_ctrl_fops); |
4233 | 4249 | ||
4234 | trace_create_file("trace_options", 0644, d_tracer, | 4250 | trace_create_file("trace_options", 0644, d_tracer, |
4235 | NULL, &tracing_iter_fops); | 4251 | NULL, &tracing_iter_fops); |
4236 | 4252 | ||
4237 | trace_create_file("tracing_cpumask", 0644, d_tracer, | 4253 | trace_create_file("tracing_cpumask", 0644, d_tracer, |
4238 | NULL, &tracing_cpumask_fops); | 4254 | NULL, &tracing_cpumask_fops); |
4239 | 4255 | ||
4240 | trace_create_file("trace", 0644, d_tracer, | 4256 | trace_create_file("trace", 0644, d_tracer, |
4241 | (void *) TRACE_PIPE_ALL_CPU, &tracing_fops); | 4257 | (void *) TRACE_PIPE_ALL_CPU, &tracing_fops); |
4242 | 4258 | ||
4243 | trace_create_file("available_tracers", 0444, d_tracer, | 4259 | trace_create_file("available_tracers", 0444, d_tracer, |
4244 | &global_trace, &show_traces_fops); | 4260 | &global_trace, &show_traces_fops); |
4245 | 4261 | ||
4246 | trace_create_file("current_tracer", 0644, d_tracer, | 4262 | trace_create_file("current_tracer", 0644, d_tracer, |
4247 | &global_trace, &set_tracer_fops); | 4263 | &global_trace, &set_tracer_fops); |
4248 | 4264 | ||
4249 | #ifdef CONFIG_TRACER_MAX_TRACE | 4265 | #ifdef CONFIG_TRACER_MAX_TRACE |
4250 | trace_create_file("tracing_max_latency", 0644, d_tracer, | 4266 | trace_create_file("tracing_max_latency", 0644, d_tracer, |
4251 | &tracing_max_latency, &tracing_max_lat_fops); | 4267 | &tracing_max_latency, &tracing_max_lat_fops); |
4268 | #endif | ||
4252 | 4269 | ||
4253 | trace_create_file("tracing_thresh", 0644, d_tracer, | 4270 | trace_create_file("tracing_thresh", 0644, d_tracer, |
4254 | &tracing_thresh, &tracing_max_lat_fops); | 4271 | &tracing_thresh, &tracing_max_lat_fops); |
4255 | #endif | ||
4256 | 4272 | ||
4257 | trace_create_file("README", 0444, d_tracer, | 4273 | trace_create_file("README", 0444, d_tracer, |
4258 | NULL, &tracing_readme_fops); | 4274 | NULL, &tracing_readme_fops); |
4259 | 4275 | ||
4260 | trace_create_file("trace_pipe", 0444, d_tracer, | 4276 | trace_create_file("trace_pipe", 0444, d_tracer, |
4261 | (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops); | 4277 | (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops); |
4262 | 4278 | ||
4263 | trace_create_file("buffer_size_kb", 0644, d_tracer, | 4279 | trace_create_file("buffer_size_kb", 0644, d_tracer, |
4264 | &global_trace, &tracing_entries_fops); | 4280 | &global_trace, &tracing_entries_fops); |
4265 | 4281 | ||
4266 | trace_create_file("trace_marker", 0220, d_tracer, | 4282 | trace_create_file("trace_marker", 0220, d_tracer, |
4267 | NULL, &tracing_mark_fops); | 4283 | NULL, &tracing_mark_fops); |
4268 | 4284 | ||
4269 | trace_create_file("saved_cmdlines", 0444, d_tracer, | 4285 | trace_create_file("saved_cmdlines", 0444, d_tracer, |
4270 | NULL, &tracing_saved_cmdlines_fops); | 4286 | NULL, &tracing_saved_cmdlines_fops); |
4271 | 4287 | ||
4272 | trace_create_file("trace_clock", 0644, d_tracer, NULL, | 4288 | trace_create_file("trace_clock", 0644, d_tracer, NULL, |
4273 | &trace_clock_fops); | 4289 | &trace_clock_fops); |
4274 | 4290 | ||
4275 | #ifdef CONFIG_DYNAMIC_FTRACE | 4291 | #ifdef CONFIG_DYNAMIC_FTRACE |
4276 | trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, | 4292 | trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, |
4277 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); | 4293 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); |
4278 | #endif | 4294 | #endif |
4279 | #ifdef CONFIG_SYSPROF_TRACER | 4295 | #ifdef CONFIG_SYSPROF_TRACER |
4280 | init_tracer_sysprof_debugfs(d_tracer); | 4296 | init_tracer_sysprof_debugfs(d_tracer); |
4281 | #endif | 4297 | #endif |
4282 | 4298 | ||
4283 | create_trace_options_dir(); | 4299 | create_trace_options_dir(); |
4284 | 4300 | ||
4285 | for_each_tracing_cpu(cpu) | 4301 | for_each_tracing_cpu(cpu) |
4286 | tracing_init_debugfs_percpu(cpu); | 4302 | tracing_init_debugfs_percpu(cpu); |
4287 | 4303 | ||
4288 | return 0; | 4304 | return 0; |
4289 | } | 4305 | } |
4290 | 4306 | ||
4291 | static int trace_panic_handler(struct notifier_block *this, | 4307 | static int trace_panic_handler(struct notifier_block *this, |
4292 | unsigned long event, void *unused) | 4308 | unsigned long event, void *unused) |
4293 | { | 4309 | { |
4294 | if (ftrace_dump_on_oops) | 4310 | if (ftrace_dump_on_oops) |
4295 | ftrace_dump(); | 4311 | ftrace_dump(); |
4296 | return NOTIFY_OK; | 4312 | return NOTIFY_OK; |
4297 | } | 4313 | } |
4298 | 4314 | ||
4299 | static struct notifier_block trace_panic_notifier = { | 4315 | static struct notifier_block trace_panic_notifier = { |
4300 | .notifier_call = trace_panic_handler, | 4316 | .notifier_call = trace_panic_handler, |
4301 | .next = NULL, | 4317 | .next = NULL, |
4302 | .priority = 150 /* priority: INT_MAX >= x >= 0 */ | 4318 | .priority = 150 /* priority: INT_MAX >= x >= 0 */ |
4303 | }; | 4319 | }; |
4304 | 4320 | ||
4305 | static int trace_die_handler(struct notifier_block *self, | 4321 | static int trace_die_handler(struct notifier_block *self, |
4306 | unsigned long val, | 4322 | unsigned long val, |
4307 | void *data) | 4323 | void *data) |
4308 | { | 4324 | { |
4309 | switch (val) { | 4325 | switch (val) { |
4310 | case DIE_OOPS: | 4326 | case DIE_OOPS: |
4311 | if (ftrace_dump_on_oops) | 4327 | if (ftrace_dump_on_oops) |
4312 | ftrace_dump(); | 4328 | ftrace_dump(); |
4313 | break; | 4329 | break; |
4314 | default: | 4330 | default: |
4315 | break; | 4331 | break; |
4316 | } | 4332 | } |
4317 | return NOTIFY_OK; | 4333 | return NOTIFY_OK; |
4318 | } | 4334 | } |
4319 | 4335 | ||
4320 | static struct notifier_block trace_die_notifier = { | 4336 | static struct notifier_block trace_die_notifier = { |
4321 | .notifier_call = trace_die_handler, | 4337 | .notifier_call = trace_die_handler, |
4322 | .priority = 200 | 4338 | .priority = 200 |
4323 | }; | 4339 | }; |
4324 | 4340 | ||
4325 | /* | 4341 | /* |
4326 | * printk is set to max of 1024, we really don't need it that big. | 4342 | * printk is set to max of 1024, we really don't need it that big. |
4327 | * Nothing should be printing 1000 characters anyway. | 4343 | * Nothing should be printing 1000 characters anyway. |
4328 | */ | 4344 | */ |
4329 | #define TRACE_MAX_PRINT 1000 | 4345 | #define TRACE_MAX_PRINT 1000 |
4330 | 4346 | ||
4331 | /* | 4347 | /* |
4332 | * Define here KERN_TRACE so that we have one place to modify | 4348 | * Define here KERN_TRACE so that we have one place to modify |
4333 | * it if we decide to change what log level the ftrace dump | 4349 | * it if we decide to change what log level the ftrace dump |
4334 | * should be at. | 4350 | * should be at. |
4335 | */ | 4351 | */ |
4336 | #define KERN_TRACE KERN_EMERG | 4352 | #define KERN_TRACE KERN_EMERG |
4337 | 4353 | ||
4338 | static void | 4354 | static void |
4339 | trace_printk_seq(struct trace_seq *s) | 4355 | trace_printk_seq(struct trace_seq *s) |
4340 | { | 4356 | { |
4341 | /* Probably should print a warning here. */ | 4357 | /* Probably should print a warning here. */ |
4342 | if (s->len >= 1000) | 4358 | if (s->len >= 1000) |
4343 | s->len = 1000; | 4359 | s->len = 1000; |
4344 | 4360 | ||
4345 | /* should be zero ended, but we are paranoid. */ | 4361 | /* should be zero ended, but we are paranoid. */ |
4346 | s->buffer[s->len] = 0; | 4362 | s->buffer[s->len] = 0; |
4347 | 4363 | ||
4348 | printk(KERN_TRACE "%s", s->buffer); | 4364 | printk(KERN_TRACE "%s", s->buffer); |
4349 | 4365 | ||
4350 | trace_seq_init(s); | 4366 | trace_seq_init(s); |
4351 | } | 4367 | } |
4352 | 4368 | ||
4353 | static void __ftrace_dump(bool disable_tracing) | 4369 | static void __ftrace_dump(bool disable_tracing) |
4354 | { | 4370 | { |
4355 | static arch_spinlock_t ftrace_dump_lock = | 4371 | static arch_spinlock_t ftrace_dump_lock = |
4356 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 4372 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
4357 | /* use static because iter can be a bit big for the stack */ | 4373 | /* use static because iter can be a bit big for the stack */ |
4358 | static struct trace_iterator iter; | 4374 | static struct trace_iterator iter; |
4359 | unsigned int old_userobj; | 4375 | unsigned int old_userobj; |
4360 | static int dump_ran; | 4376 | static int dump_ran; |
4361 | unsigned long flags; | 4377 | unsigned long flags; |
4362 | int cnt = 0, cpu; | 4378 | int cnt = 0, cpu; |
4363 | 4379 | ||
4364 | /* only one dump */ | 4380 | /* only one dump */ |
4365 | local_irq_save(flags); | 4381 | local_irq_save(flags); |
4366 | arch_spin_lock(&ftrace_dump_lock); | 4382 | arch_spin_lock(&ftrace_dump_lock); |
4367 | if (dump_ran) | 4383 | if (dump_ran) |
4368 | goto out; | 4384 | goto out; |
4369 | 4385 | ||
4370 | dump_ran = 1; | 4386 | dump_ran = 1; |
4371 | 4387 | ||
4372 | tracing_off(); | 4388 | tracing_off(); |
4373 | 4389 | ||
4374 | if (disable_tracing) | 4390 | if (disable_tracing) |
4375 | ftrace_kill(); | 4391 | ftrace_kill(); |
4376 | 4392 | ||
4377 | for_each_tracing_cpu(cpu) { | 4393 | for_each_tracing_cpu(cpu) { |
4378 | atomic_inc(&global_trace.data[cpu]->disabled); | 4394 | atomic_inc(&global_trace.data[cpu]->disabled); |
4379 | } | 4395 | } |
4380 | 4396 | ||
4381 | old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; | 4397 | old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; |
4382 | 4398 | ||
4383 | /* don't look at user memory in panic mode */ | 4399 | /* don't look at user memory in panic mode */ |
4384 | trace_flags &= ~TRACE_ITER_SYM_USEROBJ; | 4400 | trace_flags &= ~TRACE_ITER_SYM_USEROBJ; |
4385 | 4401 | ||
4386 | printk(KERN_TRACE "Dumping ftrace buffer:\n"); | 4402 | printk(KERN_TRACE "Dumping ftrace buffer:\n"); |
4387 | 4403 | ||
4388 | /* Simulate the iterator */ | 4404 | /* Simulate the iterator */ |
4389 | iter.tr = &global_trace; | 4405 | iter.tr = &global_trace; |
4390 | iter.trace = current_trace; | 4406 | iter.trace = current_trace; |
4391 | iter.cpu_file = TRACE_PIPE_ALL_CPU; | 4407 | iter.cpu_file = TRACE_PIPE_ALL_CPU; |
4392 | 4408 | ||
4393 | /* | 4409 | /* |
4394 | * We need to stop all tracing on all CPUS to read the | 4410 | * We need to stop all tracing on all CPUS to read the |
4395 | * the next buffer. This is a bit expensive, but is | 4411 | * the next buffer. This is a bit expensive, but is |
4396 | * not done often. We fill all what we can read, | 4412 | * not done often. We fill all what we can read, |
4397 | * and then release the locks again. | 4413 | * and then release the locks again. |
4398 | */ | 4414 | */ |
4399 | 4415 | ||
4400 | while (!trace_empty(&iter)) { | 4416 | while (!trace_empty(&iter)) { |
4401 | 4417 | ||
4402 | if (!cnt) | 4418 | if (!cnt) |
4403 | printk(KERN_TRACE "---------------------------------\n"); | 4419 | printk(KERN_TRACE "---------------------------------\n"); |
4404 | 4420 | ||
4405 | cnt++; | 4421 | cnt++; |
4406 | 4422 | ||
4407 | /* reset all but tr, trace, and overruns */ | 4423 | /* reset all but tr, trace, and overruns */ |
4408 | memset(&iter.seq, 0, | 4424 | memset(&iter.seq, 0, |
4409 | sizeof(struct trace_iterator) - | 4425 | sizeof(struct trace_iterator) - |
4410 | offsetof(struct trace_iterator, seq)); | 4426 | offsetof(struct trace_iterator, seq)); |
4411 | iter.iter_flags |= TRACE_FILE_LAT_FMT; | 4427 | iter.iter_flags |= TRACE_FILE_LAT_FMT; |
4412 | iter.pos = -1; | 4428 | iter.pos = -1; |
4413 | 4429 | ||
4414 | if (find_next_entry_inc(&iter) != NULL) { | 4430 | if (find_next_entry_inc(&iter) != NULL) { |
4415 | int ret; | 4431 | int ret; |
4416 | 4432 | ||
4417 | ret = print_trace_line(&iter); | 4433 | ret = print_trace_line(&iter); |
4418 | if (ret != TRACE_TYPE_NO_CONSUME) | 4434 | if (ret != TRACE_TYPE_NO_CONSUME) |
4419 | trace_consume(&iter); | 4435 | trace_consume(&iter); |
4420 | } | 4436 | } |
4421 | 4437 | ||
4422 | trace_printk_seq(&iter.seq); | 4438 | trace_printk_seq(&iter.seq); |
4423 | } | 4439 | } |
4424 | 4440 | ||
4425 | if (!cnt) | 4441 | if (!cnt) |
4426 | printk(KERN_TRACE " (ftrace buffer empty)\n"); | 4442 | printk(KERN_TRACE " (ftrace buffer empty)\n"); |
4427 | else | 4443 | else |
4428 | printk(KERN_TRACE "---------------------------------\n"); | 4444 | printk(KERN_TRACE "---------------------------------\n"); |
4429 | 4445 | ||
4430 | /* Re-enable tracing if requested */ | 4446 | /* Re-enable tracing if requested */ |
4431 | if (!disable_tracing) { | 4447 | if (!disable_tracing) { |
4432 | trace_flags |= old_userobj; | 4448 | trace_flags |= old_userobj; |
4433 | 4449 | ||
4434 | for_each_tracing_cpu(cpu) { | 4450 | for_each_tracing_cpu(cpu) { |
4435 | atomic_dec(&global_trace.data[cpu]->disabled); | 4451 | atomic_dec(&global_trace.data[cpu]->disabled); |
4436 | } | 4452 | } |
4437 | tracing_on(); | 4453 | tracing_on(); |
4438 | } | 4454 | } |
4439 | 4455 | ||
4440 | out: | 4456 | out: |
4441 | arch_spin_unlock(&ftrace_dump_lock); | 4457 | arch_spin_unlock(&ftrace_dump_lock); |
4442 | local_irq_restore(flags); | 4458 | local_irq_restore(flags); |
4443 | } | 4459 | } |
4444 | 4460 | ||
4445 | /* By default: disable tracing after the dump */ | 4461 | /* By default: disable tracing after the dump */ |
4446 | void ftrace_dump(void) | 4462 | void ftrace_dump(void) |
4447 | { | 4463 | { |
4448 | __ftrace_dump(true); | 4464 | __ftrace_dump(true); |
4449 | } | 4465 | } |
4450 | 4466 | ||
4451 | __init static int tracer_alloc_buffers(void) | 4467 | __init static int tracer_alloc_buffers(void) |
4452 | { | 4468 | { |
4453 | int ring_buf_size; | 4469 | int ring_buf_size; |
4454 | int i; | 4470 | int i; |
4455 | int ret = -ENOMEM; | 4471 | int ret = -ENOMEM; |
4456 | 4472 | ||
4457 | if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) | 4473 | if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) |
4458 | goto out; | 4474 | goto out; |
4459 | 4475 | ||
4460 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) | 4476 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) |
4461 | goto out_free_buffer_mask; | 4477 | goto out_free_buffer_mask; |
4462 | 4478 | ||
4463 | /* To save memory, keep the ring buffer size to its minimum */ | 4479 | /* To save memory, keep the ring buffer size to its minimum */ |
4464 | if (ring_buffer_expanded) | 4480 | if (ring_buffer_expanded) |
4465 | ring_buf_size = trace_buf_size; | 4481 | ring_buf_size = trace_buf_size; |
4466 | else | 4482 | else |
4467 | ring_buf_size = 1; | 4483 | ring_buf_size = 1; |
4468 | 4484 | ||
4469 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); | 4485 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); |
4470 | cpumask_copy(tracing_cpumask, cpu_all_mask); | 4486 | cpumask_copy(tracing_cpumask, cpu_all_mask); |
4471 | 4487 | ||
4472 | /* TODO: make the number of buffers hot pluggable with CPUS */ | 4488 | /* TODO: make the number of buffers hot pluggable with CPUS */ |
4473 | global_trace.buffer = ring_buffer_alloc(ring_buf_size, | 4489 | global_trace.buffer = ring_buffer_alloc(ring_buf_size, |
4474 | TRACE_BUFFER_FLAGS); | 4490 | TRACE_BUFFER_FLAGS); |
4475 | if (!global_trace.buffer) { | 4491 | if (!global_trace.buffer) { |
4476 | printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); | 4492 | printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); |
4477 | WARN_ON(1); | 4493 | WARN_ON(1); |
4478 | goto out_free_cpumask; | 4494 | goto out_free_cpumask; |
4479 | } | 4495 | } |
4480 | global_trace.entries = ring_buffer_size(global_trace.buffer); | 4496 | global_trace.entries = ring_buffer_size(global_trace.buffer); |
4481 | 4497 | ||
4482 | 4498 | ||
4483 | #ifdef CONFIG_TRACER_MAX_TRACE | 4499 | #ifdef CONFIG_TRACER_MAX_TRACE |
4484 | max_tr.buffer = ring_buffer_alloc(ring_buf_size, | 4500 | max_tr.buffer = ring_buffer_alloc(ring_buf_size, |
4485 | TRACE_BUFFER_FLAGS); | 4501 | TRACE_BUFFER_FLAGS); |
4486 | if (!max_tr.buffer) { | 4502 | if (!max_tr.buffer) { |
4487 | printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); | 4503 | printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); |
4488 | WARN_ON(1); | 4504 | WARN_ON(1); |
4489 | ring_buffer_free(global_trace.buffer); | 4505 | ring_buffer_free(global_trace.buffer); |
4490 | goto out_free_cpumask; | 4506 | goto out_free_cpumask; |
4491 | } | 4507 | } |
4492 | max_tr.entries = ring_buffer_size(max_tr.buffer); | 4508 | max_tr.entries = ring_buffer_size(max_tr.buffer); |
4493 | WARN_ON(max_tr.entries != global_trace.entries); | 4509 | WARN_ON(max_tr.entries != global_trace.entries); |
4494 | #endif | 4510 | #endif |
4495 | 4511 | ||
4496 | /* Allocate the first page for all buffers */ | 4512 | /* Allocate the first page for all buffers */ |
4497 | for_each_tracing_cpu(i) { | 4513 | for_each_tracing_cpu(i) { |
4498 | global_trace.data[i] = &per_cpu(global_trace_cpu, i); | 4514 | global_trace.data[i] = &per_cpu(global_trace_cpu, i); |
4499 | max_tr.data[i] = &per_cpu(max_tr_data, i); | 4515 | max_tr.data[i] = &per_cpu(max_tr_data, i); |
4500 | } | 4516 | } |
4501 | 4517 | ||
4502 | trace_init_cmdlines(); | 4518 | trace_init_cmdlines(); |
4503 | 4519 | ||
4504 | register_tracer(&nop_trace); | 4520 | register_tracer(&nop_trace); |
4505 | current_trace = &nop_trace; | 4521 | current_trace = &nop_trace; |
4506 | #ifdef CONFIG_BOOT_TRACER | 4522 | #ifdef CONFIG_BOOT_TRACER |
4507 | register_tracer(&boot_tracer); | 4523 | register_tracer(&boot_tracer); |
4508 | #endif | 4524 | #endif |
4509 | /* All seems OK, enable tracing */ | 4525 | /* All seems OK, enable tracing */ |
4510 | tracing_disabled = 0; | 4526 | tracing_disabled = 0; |
4511 | 4527 | ||
4512 | atomic_notifier_chain_register(&panic_notifier_list, | 4528 | atomic_notifier_chain_register(&panic_notifier_list, |
4513 | &trace_panic_notifier); | 4529 | &trace_panic_notifier); |
4514 | 4530 | ||
4515 | register_die_notifier(&trace_die_notifier); | 4531 | register_die_notifier(&trace_die_notifier); |
4516 | 4532 | ||
4517 | return 0; | 4533 | return 0; |
4518 | 4534 | ||
4519 | out_free_cpumask: | 4535 | out_free_cpumask: |
4520 | free_cpumask_var(tracing_cpumask); | 4536 | free_cpumask_var(tracing_cpumask); |
4521 | out_free_buffer_mask: | 4537 | out_free_buffer_mask: |
4522 | free_cpumask_var(tracing_buffer_mask); | 4538 | free_cpumask_var(tracing_buffer_mask); |
4523 | out: | 4539 | out: |
4524 | return ret; | 4540 | return ret; |
4525 | } | 4541 | } |
4526 | 4542 | ||
4527 | __init static int clear_boot_tracer(void) | 4543 | __init static int clear_boot_tracer(void) |
4528 | { | 4544 | { |
4529 | /* | 4545 | /* |
4530 | * The default tracer at boot buffer is an init section. | 4546 | * The default tracer at boot buffer is an init section. |
4531 | * This function is called in lateinit. If we did not | 4547 | * This function is called in lateinit. If we did not |
4532 | * find the boot tracer, then clear it out, to prevent | 4548 | * find the boot tracer, then clear it out, to prevent |
4533 | * later registration from accessing the buffer that is | 4549 | * later registration from accessing the buffer that is |
4534 | * about to be freed. | 4550 | * about to be freed. |
4535 | */ | 4551 | */ |
4536 | if (!default_bootup_tracer) | 4552 | if (!default_bootup_tracer) |
4537 | return 0; | 4553 | return 0; |
4538 | 4554 | ||
4539 | printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n", | 4555 | printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n", |
4540 | default_bootup_tracer); | 4556 | default_bootup_tracer); |
4541 | default_bootup_tracer = NULL; | 4557 | default_bootup_tracer = NULL; |
4542 | 4558 | ||
4543 | return 0; | 4559 | return 0; |
4544 | } | 4560 | } |
4545 | 4561 | ||
4546 | early_initcall(tracer_alloc_buffers); | 4562 | early_initcall(tracer_alloc_buffers); |
4547 | fs_initcall(tracer_init_debugfs); | 4563 | fs_initcall(tracer_init_debugfs); |
kernel/trace/trace.h
1 | #ifndef _LINUX_KERNEL_TRACE_H | 1 | #ifndef _LINUX_KERNEL_TRACE_H |
2 | #define _LINUX_KERNEL_TRACE_H | 2 | #define _LINUX_KERNEL_TRACE_H |
3 | 3 | ||
4 | #include <linux/fs.h> | 4 | #include <linux/fs.h> |
5 | #include <asm/atomic.h> | 5 | #include <asm/atomic.h> |
6 | #include <linux/sched.h> | 6 | #include <linux/sched.h> |
7 | #include <linux/clocksource.h> | 7 | #include <linux/clocksource.h> |
8 | #include <linux/ring_buffer.h> | 8 | #include <linux/ring_buffer.h> |
9 | #include <linux/mmiotrace.h> | 9 | #include <linux/mmiotrace.h> |
10 | #include <linux/tracepoint.h> | 10 | #include <linux/tracepoint.h> |
11 | #include <linux/ftrace.h> | 11 | #include <linux/ftrace.h> |
12 | #include <trace/boot.h> | 12 | #include <trace/boot.h> |
13 | #include <linux/kmemtrace.h> | 13 | #include <linux/kmemtrace.h> |
14 | #include <linux/hw_breakpoint.h> | 14 | #include <linux/hw_breakpoint.h> |
15 | 15 | ||
16 | #include <linux/trace_seq.h> | 16 | #include <linux/trace_seq.h> |
17 | #include <linux/ftrace_event.h> | 17 | #include <linux/ftrace_event.h> |
18 | 18 | ||
19 | enum trace_type { | 19 | enum trace_type { |
20 | __TRACE_FIRST_TYPE = 0, | 20 | __TRACE_FIRST_TYPE = 0, |
21 | 21 | ||
22 | TRACE_FN, | 22 | TRACE_FN, |
23 | TRACE_CTX, | 23 | TRACE_CTX, |
24 | TRACE_WAKE, | 24 | TRACE_WAKE, |
25 | TRACE_STACK, | 25 | TRACE_STACK, |
26 | TRACE_PRINT, | 26 | TRACE_PRINT, |
27 | TRACE_BPRINT, | 27 | TRACE_BPRINT, |
28 | TRACE_SPECIAL, | 28 | TRACE_SPECIAL, |
29 | TRACE_MMIO_RW, | 29 | TRACE_MMIO_RW, |
30 | TRACE_MMIO_MAP, | 30 | TRACE_MMIO_MAP, |
31 | TRACE_BRANCH, | 31 | TRACE_BRANCH, |
32 | TRACE_BOOT_CALL, | 32 | TRACE_BOOT_CALL, |
33 | TRACE_BOOT_RET, | 33 | TRACE_BOOT_RET, |
34 | TRACE_GRAPH_RET, | 34 | TRACE_GRAPH_RET, |
35 | TRACE_GRAPH_ENT, | 35 | TRACE_GRAPH_ENT, |
36 | TRACE_USER_STACK, | 36 | TRACE_USER_STACK, |
37 | TRACE_HW_BRANCHES, | 37 | TRACE_HW_BRANCHES, |
38 | TRACE_KMEM_ALLOC, | 38 | TRACE_KMEM_ALLOC, |
39 | TRACE_KMEM_FREE, | 39 | TRACE_KMEM_FREE, |
40 | TRACE_BLK, | 40 | TRACE_BLK, |
41 | TRACE_KSYM, | 41 | TRACE_KSYM, |
42 | 42 | ||
43 | __TRACE_LAST_TYPE, | 43 | __TRACE_LAST_TYPE, |
44 | }; | 44 | }; |
45 | 45 | ||
46 | enum kmemtrace_type_id { | 46 | enum kmemtrace_type_id { |
47 | KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */ | 47 | KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */ |
48 | KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */ | 48 | KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */ |
49 | KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */ | 49 | KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */ |
50 | }; | 50 | }; |
51 | 51 | ||
52 | extern struct tracer boot_tracer; | 52 | extern struct tracer boot_tracer; |
53 | 53 | ||
54 | #undef __field | 54 | #undef __field |
55 | #define __field(type, item) type item; | 55 | #define __field(type, item) type item; |
56 | 56 | ||
57 | #undef __field_struct | 57 | #undef __field_struct |
58 | #define __field_struct(type, item) __field(type, item) | 58 | #define __field_struct(type, item) __field(type, item) |
59 | 59 | ||
60 | #undef __field_desc | 60 | #undef __field_desc |
61 | #define __field_desc(type, container, item) | 61 | #define __field_desc(type, container, item) |
62 | 62 | ||
63 | #undef __array | 63 | #undef __array |
64 | #define __array(type, item, size) type item[size]; | 64 | #define __array(type, item, size) type item[size]; |
65 | 65 | ||
66 | #undef __array_desc | 66 | #undef __array_desc |
67 | #define __array_desc(type, container, item, size) | 67 | #define __array_desc(type, container, item, size) |
68 | 68 | ||
69 | #undef __dynamic_array | 69 | #undef __dynamic_array |
70 | #define __dynamic_array(type, item) type item[]; | 70 | #define __dynamic_array(type, item) type item[]; |
71 | 71 | ||
72 | #undef F_STRUCT | 72 | #undef F_STRUCT |
73 | #define F_STRUCT(args...) args | 73 | #define F_STRUCT(args...) args |
74 | 74 | ||
75 | #undef FTRACE_ENTRY | 75 | #undef FTRACE_ENTRY |
76 | #define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \ | 76 | #define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \ |
77 | struct struct_name { \ | 77 | struct struct_name { \ |
78 | struct trace_entry ent; \ | 78 | struct trace_entry ent; \ |
79 | tstruct \ | 79 | tstruct \ |
80 | } | 80 | } |
81 | 81 | ||
82 | #undef TP_ARGS | 82 | #undef TP_ARGS |
83 | #define TP_ARGS(args...) args | 83 | #define TP_ARGS(args...) args |
84 | 84 | ||
85 | #undef FTRACE_ENTRY_DUP | 85 | #undef FTRACE_ENTRY_DUP |
86 | #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk) | 86 | #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk) |
87 | 87 | ||
88 | #include "trace_entries.h" | 88 | #include "trace_entries.h" |
89 | 89 | ||
90 | /* | 90 | /* |
91 | * syscalls are special, and need special handling, this is why | 91 | * syscalls are special, and need special handling, this is why |
92 | * they are not included in trace_entries.h | 92 | * they are not included in trace_entries.h |
93 | */ | 93 | */ |
94 | struct syscall_trace_enter { | 94 | struct syscall_trace_enter { |
95 | struct trace_entry ent; | 95 | struct trace_entry ent; |
96 | int nr; | 96 | int nr; |
97 | unsigned long args[]; | 97 | unsigned long args[]; |
98 | }; | 98 | }; |
99 | 99 | ||
100 | struct syscall_trace_exit { | 100 | struct syscall_trace_exit { |
101 | struct trace_entry ent; | 101 | struct trace_entry ent; |
102 | int nr; | 102 | int nr; |
103 | long ret; | 103 | long ret; |
104 | }; | 104 | }; |
105 | 105 | ||
106 | struct kprobe_trace_entry { | 106 | struct kprobe_trace_entry { |
107 | struct trace_entry ent; | 107 | struct trace_entry ent; |
108 | unsigned long ip; | 108 | unsigned long ip; |
109 | int nargs; | 109 | int nargs; |
110 | unsigned long args[]; | 110 | unsigned long args[]; |
111 | }; | 111 | }; |
112 | 112 | ||
113 | #define SIZEOF_KPROBE_TRACE_ENTRY(n) \ | 113 | #define SIZEOF_KPROBE_TRACE_ENTRY(n) \ |
114 | (offsetof(struct kprobe_trace_entry, args) + \ | 114 | (offsetof(struct kprobe_trace_entry, args) + \ |
115 | (sizeof(unsigned long) * (n))) | 115 | (sizeof(unsigned long) * (n))) |
116 | 116 | ||
117 | struct kretprobe_trace_entry { | 117 | struct kretprobe_trace_entry { |
118 | struct trace_entry ent; | 118 | struct trace_entry ent; |
119 | unsigned long func; | 119 | unsigned long func; |
120 | unsigned long ret_ip; | 120 | unsigned long ret_ip; |
121 | int nargs; | 121 | int nargs; |
122 | unsigned long args[]; | 122 | unsigned long args[]; |
123 | }; | 123 | }; |
124 | 124 | ||
125 | #define SIZEOF_KRETPROBE_TRACE_ENTRY(n) \ | 125 | #define SIZEOF_KRETPROBE_TRACE_ENTRY(n) \ |
126 | (offsetof(struct kretprobe_trace_entry, args) + \ | 126 | (offsetof(struct kretprobe_trace_entry, args) + \ |
127 | (sizeof(unsigned long) * (n))) | 127 | (sizeof(unsigned long) * (n))) |
128 | 128 | ||
129 | /* | 129 | /* |
130 | * trace_flag_type is an enumeration that holds different | 130 | * trace_flag_type is an enumeration that holds different |
131 | * states when a trace occurs. These are: | 131 | * states when a trace occurs. These are: |
132 | * IRQS_OFF - interrupts were disabled | 132 | * IRQS_OFF - interrupts were disabled |
133 | * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags | 133 | * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags |
134 | * NEED_RESCHED - reschedule is requested | 134 | * NEED_RESCHED - reschedule is requested |
135 | * HARDIRQ - inside an interrupt handler | 135 | * HARDIRQ - inside an interrupt handler |
136 | * SOFTIRQ - inside a softirq handler | 136 | * SOFTIRQ - inside a softirq handler |
137 | */ | 137 | */ |
138 | enum trace_flag_type { | 138 | enum trace_flag_type { |
139 | TRACE_FLAG_IRQS_OFF = 0x01, | 139 | TRACE_FLAG_IRQS_OFF = 0x01, |
140 | TRACE_FLAG_IRQS_NOSUPPORT = 0x02, | 140 | TRACE_FLAG_IRQS_NOSUPPORT = 0x02, |
141 | TRACE_FLAG_NEED_RESCHED = 0x04, | 141 | TRACE_FLAG_NEED_RESCHED = 0x04, |
142 | TRACE_FLAG_HARDIRQ = 0x08, | 142 | TRACE_FLAG_HARDIRQ = 0x08, |
143 | TRACE_FLAG_SOFTIRQ = 0x10, | 143 | TRACE_FLAG_SOFTIRQ = 0x10, |
144 | }; | 144 | }; |
145 | 145 | ||
146 | #define TRACE_BUF_SIZE 1024 | 146 | #define TRACE_BUF_SIZE 1024 |
147 | 147 | ||
148 | /* | 148 | /* |
149 | * The CPU trace array - it consists of thousands of trace entries | 149 | * The CPU trace array - it consists of thousands of trace entries |
150 | * plus some other descriptor data: (for example which task started | 150 | * plus some other descriptor data: (for example which task started |
151 | * the trace, etc.) | 151 | * the trace, etc.) |
152 | */ | 152 | */ |
153 | struct trace_array_cpu { | 153 | struct trace_array_cpu { |
154 | atomic_t disabled; | 154 | atomic_t disabled; |
155 | void *buffer_page; /* ring buffer spare */ | 155 | void *buffer_page; /* ring buffer spare */ |
156 | 156 | ||
157 | unsigned long saved_latency; | 157 | unsigned long saved_latency; |
158 | unsigned long critical_start; | 158 | unsigned long critical_start; |
159 | unsigned long critical_end; | 159 | unsigned long critical_end; |
160 | unsigned long critical_sequence; | 160 | unsigned long critical_sequence; |
161 | unsigned long nice; | 161 | unsigned long nice; |
162 | unsigned long policy; | 162 | unsigned long policy; |
163 | unsigned long rt_priority; | 163 | unsigned long rt_priority; |
164 | unsigned long skipped_entries; | 164 | unsigned long skipped_entries; |
165 | cycle_t preempt_timestamp; | 165 | cycle_t preempt_timestamp; |
166 | pid_t pid; | 166 | pid_t pid; |
167 | uid_t uid; | 167 | uid_t uid; |
168 | char comm[TASK_COMM_LEN]; | 168 | char comm[TASK_COMM_LEN]; |
169 | }; | 169 | }; |
170 | 170 | ||
171 | /* | 171 | /* |
172 | * The trace array - an array of per-CPU trace arrays. This is the | 172 | * The trace array - an array of per-CPU trace arrays. This is the |
173 | * highest level data structure that individual tracers deal with. | 173 | * highest level data structure that individual tracers deal with. |
174 | * They have on/off state as well: | 174 | * They have on/off state as well: |
175 | */ | 175 | */ |
176 | struct trace_array { | 176 | struct trace_array { |
177 | struct ring_buffer *buffer; | 177 | struct ring_buffer *buffer; |
178 | unsigned long entries; | 178 | unsigned long entries; |
179 | int cpu; | 179 | int cpu; |
180 | cycle_t time_start; | 180 | cycle_t time_start; |
181 | struct task_struct *waiter; | 181 | struct task_struct *waiter; |
182 | struct trace_array_cpu *data[NR_CPUS]; | 182 | struct trace_array_cpu *data[NR_CPUS]; |
183 | }; | 183 | }; |
184 | 184 | ||
185 | #define FTRACE_CMP_TYPE(var, type) \ | 185 | #define FTRACE_CMP_TYPE(var, type) \ |
186 | __builtin_types_compatible_p(typeof(var), type *) | 186 | __builtin_types_compatible_p(typeof(var), type *) |
187 | 187 | ||
188 | #undef IF_ASSIGN | 188 | #undef IF_ASSIGN |
189 | #define IF_ASSIGN(var, entry, etype, id) \ | 189 | #define IF_ASSIGN(var, entry, etype, id) \ |
190 | if (FTRACE_CMP_TYPE(var, etype)) { \ | 190 | if (FTRACE_CMP_TYPE(var, etype)) { \ |
191 | var = (typeof(var))(entry); \ | 191 | var = (typeof(var))(entry); \ |
192 | WARN_ON(id && (entry)->type != id); \ | 192 | WARN_ON(id && (entry)->type != id); \ |
193 | break; \ | 193 | break; \ |
194 | } | 194 | } |
195 | 195 | ||
196 | /* Will cause compile errors if type is not found. */ | 196 | /* Will cause compile errors if type is not found. */ |
197 | extern void __ftrace_bad_type(void); | 197 | extern void __ftrace_bad_type(void); |
198 | 198 | ||
199 | /* | 199 | /* |
200 | * The trace_assign_type is a verifier that the entry type is | 200 | * The trace_assign_type is a verifier that the entry type is |
201 | * the same as the type being assigned. To add new types simply | 201 | * the same as the type being assigned. To add new types simply |
202 | * add a line with the following format: | 202 | * add a line with the following format: |
203 | * | 203 | * |
204 | * IF_ASSIGN(var, ent, type, id); | 204 | * IF_ASSIGN(var, ent, type, id); |
205 | * | 205 | * |
206 | * Where "type" is the trace type that includes the trace_entry | 206 | * Where "type" is the trace type that includes the trace_entry |
207 | * as the "ent" item. And "id" is the trace identifier that is | 207 | * as the "ent" item. And "id" is the trace identifier that is |
208 | * used in the trace_type enum. | 208 | * used in the trace_type enum. |
209 | * | 209 | * |
210 | * If the type can have more than one id, then use zero. | 210 | * If the type can have more than one id, then use zero. |
211 | */ | 211 | */ |
212 | #define trace_assign_type(var, ent) \ | 212 | #define trace_assign_type(var, ent) \ |
213 | do { \ | 213 | do { \ |
214 | IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ | 214 | IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ |
215 | IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ | 215 | IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ |
216 | IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ | 216 | IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ |
217 | IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ | 217 | IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ |
218 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ | 218 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ |
219 | IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ | 219 | IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ |
220 | IF_ASSIGN(var, ent, struct special_entry, 0); \ | 220 | IF_ASSIGN(var, ent, struct special_entry, 0); \ |
221 | IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ | 221 | IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ |
222 | TRACE_MMIO_RW); \ | 222 | TRACE_MMIO_RW); \ |
223 | IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ | 223 | IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ |
224 | TRACE_MMIO_MAP); \ | 224 | TRACE_MMIO_MAP); \ |
225 | IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\ | 225 | IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\ |
226 | IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\ | 226 | IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\ |
227 | IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ | 227 | IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ |
228 | IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ | 228 | IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ |
229 | TRACE_GRAPH_ENT); \ | 229 | TRACE_GRAPH_ENT); \ |
230 | IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ | 230 | IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ |
231 | TRACE_GRAPH_RET); \ | 231 | TRACE_GRAPH_RET); \ |
232 | IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\ | 232 | IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\ |
233 | IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \ | 233 | IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \ |
234 | TRACE_KMEM_ALLOC); \ | 234 | TRACE_KMEM_ALLOC); \ |
235 | IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ | 235 | IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ |
236 | TRACE_KMEM_FREE); \ | 236 | TRACE_KMEM_FREE); \ |
237 | IF_ASSIGN(var, ent, struct ksym_trace_entry, TRACE_KSYM);\ | 237 | IF_ASSIGN(var, ent, struct ksym_trace_entry, TRACE_KSYM);\ |
238 | __ftrace_bad_type(); \ | 238 | __ftrace_bad_type(); \ |
239 | } while (0) | 239 | } while (0) |
240 | 240 | ||
241 | /* | 241 | /* |
242 | * An option specific to a tracer. This is a boolean value. | 242 | * An option specific to a tracer. This is a boolean value. |
243 | * The bit is the bit index that sets its value on the | 243 | * The bit is the bit index that sets its value on the |
244 | * flags value in struct tracer_flags. | 244 | * flags value in struct tracer_flags. |
245 | */ | 245 | */ |
246 | struct tracer_opt { | 246 | struct tracer_opt { |
247 | const char *name; /* Will appear on the trace_options file */ | 247 | const char *name; /* Will appear on the trace_options file */ |
248 | u32 bit; /* Mask assigned in val field in tracer_flags */ | 248 | u32 bit; /* Mask assigned in val field in tracer_flags */ |
249 | }; | 249 | }; |
250 | 250 | ||
251 | /* | 251 | /* |
252 | * The set of specific options for a tracer. Your tracer | 252 | * The set of specific options for a tracer. Your tracer |
253 | * have to set the initial value of the flags val. | 253 | * have to set the initial value of the flags val. |
254 | */ | 254 | */ |
255 | struct tracer_flags { | 255 | struct tracer_flags { |
256 | u32 val; | 256 | u32 val; |
257 | struct tracer_opt *opts; | 257 | struct tracer_opt *opts; |
258 | }; | 258 | }; |
259 | 259 | ||
260 | /* Makes more easy to define a tracer opt */ | 260 | /* Makes more easy to define a tracer opt */ |
261 | #define TRACER_OPT(s, b) .name = #s, .bit = b | 261 | #define TRACER_OPT(s, b) .name = #s, .bit = b |
262 | 262 | ||
263 | 263 | ||
264 | /** | 264 | /** |
265 | * struct tracer - a specific tracer and its callbacks to interact with debugfs | 265 | * struct tracer - a specific tracer and its callbacks to interact with debugfs |
266 | * @name: the name chosen to select it on the available_tracers file | 266 | * @name: the name chosen to select it on the available_tracers file |
267 | * @init: called when one switches to this tracer (echo name > current_tracer) | 267 | * @init: called when one switches to this tracer (echo name > current_tracer) |
268 | * @reset: called when one switches to another tracer | 268 | * @reset: called when one switches to another tracer |
269 | * @start: called when tracing is unpaused (echo 1 > tracing_enabled) | 269 | * @start: called when tracing is unpaused (echo 1 > tracing_enabled) |
270 | * @stop: called when tracing is paused (echo 0 > tracing_enabled) | 270 | * @stop: called when tracing is paused (echo 0 > tracing_enabled) |
271 | * @open: called when the trace file is opened | 271 | * @open: called when the trace file is opened |
272 | * @pipe_open: called when the trace_pipe file is opened | 272 | * @pipe_open: called when the trace_pipe file is opened |
273 | * @wait_pipe: override how the user waits for traces on trace_pipe | 273 | * @wait_pipe: override how the user waits for traces on trace_pipe |
274 | * @close: called when the trace file is released | 274 | * @close: called when the trace file is released |
275 | * @pipe_close: called when the trace_pipe file is released | 275 | * @pipe_close: called when the trace_pipe file is released |
276 | * @read: override the default read callback on trace_pipe | 276 | * @read: override the default read callback on trace_pipe |
277 | * @splice_read: override the default splice_read callback on trace_pipe | 277 | * @splice_read: override the default splice_read callback on trace_pipe |
278 | * @selftest: selftest to run on boot (see trace_selftest.c) | 278 | * @selftest: selftest to run on boot (see trace_selftest.c) |
279 | * @print_headers: override the first lines that describe your columns | 279 | * @print_headers: override the first lines that describe your columns |
280 | * @print_line: callback that prints a trace | 280 | * @print_line: callback that prints a trace |
281 | * @set_flag: signals one of your private flags changed (trace_options file) | 281 | * @set_flag: signals one of your private flags changed (trace_options file) |
282 | * @flags: your private flags | 282 | * @flags: your private flags |
283 | */ | 283 | */ |
284 | struct tracer { | 284 | struct tracer { |
285 | const char *name; | 285 | const char *name; |
286 | int (*init)(struct trace_array *tr); | 286 | int (*init)(struct trace_array *tr); |
287 | void (*reset)(struct trace_array *tr); | 287 | void (*reset)(struct trace_array *tr); |
288 | void (*start)(struct trace_array *tr); | 288 | void (*start)(struct trace_array *tr); |
289 | void (*stop)(struct trace_array *tr); | 289 | void (*stop)(struct trace_array *tr); |
290 | void (*open)(struct trace_iterator *iter); | 290 | void (*open)(struct trace_iterator *iter); |
291 | void (*pipe_open)(struct trace_iterator *iter); | 291 | void (*pipe_open)(struct trace_iterator *iter); |
292 | void (*wait_pipe)(struct trace_iterator *iter); | 292 | void (*wait_pipe)(struct trace_iterator *iter); |
293 | void (*close)(struct trace_iterator *iter); | 293 | void (*close)(struct trace_iterator *iter); |
294 | void (*pipe_close)(struct trace_iterator *iter); | 294 | void (*pipe_close)(struct trace_iterator *iter); |
295 | ssize_t (*read)(struct trace_iterator *iter, | 295 | ssize_t (*read)(struct trace_iterator *iter, |
296 | struct file *filp, char __user *ubuf, | 296 | struct file *filp, char __user *ubuf, |
297 | size_t cnt, loff_t *ppos); | 297 | size_t cnt, loff_t *ppos); |
298 | ssize_t (*splice_read)(struct trace_iterator *iter, | 298 | ssize_t (*splice_read)(struct trace_iterator *iter, |
299 | struct file *filp, | 299 | struct file *filp, |
300 | loff_t *ppos, | 300 | loff_t *ppos, |
301 | struct pipe_inode_info *pipe, | 301 | struct pipe_inode_info *pipe, |
302 | size_t len, | 302 | size_t len, |
303 | unsigned int flags); | 303 | unsigned int flags); |
304 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 304 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
305 | int (*selftest)(struct tracer *trace, | 305 | int (*selftest)(struct tracer *trace, |
306 | struct trace_array *tr); | 306 | struct trace_array *tr); |
307 | #endif | 307 | #endif |
308 | void (*print_header)(struct seq_file *m); | 308 | void (*print_header)(struct seq_file *m); |
309 | enum print_line_t (*print_line)(struct trace_iterator *iter); | 309 | enum print_line_t (*print_line)(struct trace_iterator *iter); |
310 | /* If you handled the flag setting, return 0 */ | 310 | /* If you handled the flag setting, return 0 */ |
311 | int (*set_flag)(u32 old_flags, u32 bit, int set); | 311 | int (*set_flag)(u32 old_flags, u32 bit, int set); |
312 | struct tracer *next; | 312 | struct tracer *next; |
313 | int print_max; | 313 | int print_max; |
314 | struct tracer_flags *flags; | 314 | struct tracer_flags *flags; |
315 | }; | 315 | }; |
316 | 316 | ||
317 | 317 | ||
318 | #define TRACE_PIPE_ALL_CPU -1 | 318 | #define TRACE_PIPE_ALL_CPU -1 |
319 | 319 | ||
320 | int tracer_init(struct tracer *t, struct trace_array *tr); | 320 | int tracer_init(struct tracer *t, struct trace_array *tr); |
321 | int tracing_is_enabled(void); | 321 | int tracing_is_enabled(void); |
322 | void trace_wake_up(void); | 322 | void trace_wake_up(void); |
323 | void tracing_reset(struct trace_array *tr, int cpu); | 323 | void tracing_reset(struct trace_array *tr, int cpu); |
324 | void tracing_reset_online_cpus(struct trace_array *tr); | 324 | void tracing_reset_online_cpus(struct trace_array *tr); |
325 | void tracing_reset_current(int cpu); | 325 | void tracing_reset_current(int cpu); |
326 | void tracing_reset_current_online_cpus(void); | 326 | void tracing_reset_current_online_cpus(void); |
327 | int tracing_open_generic(struct inode *inode, struct file *filp); | 327 | int tracing_open_generic(struct inode *inode, struct file *filp); |
328 | struct dentry *trace_create_file(const char *name, | 328 | struct dentry *trace_create_file(const char *name, |
329 | mode_t mode, | 329 | mode_t mode, |
330 | struct dentry *parent, | 330 | struct dentry *parent, |
331 | void *data, | 331 | void *data, |
332 | const struct file_operations *fops); | 332 | const struct file_operations *fops); |
333 | 333 | ||
334 | struct dentry *tracing_init_dentry(void); | 334 | struct dentry *tracing_init_dentry(void); |
335 | void init_tracer_sysprof_debugfs(struct dentry *d_tracer); | 335 | void init_tracer_sysprof_debugfs(struct dentry *d_tracer); |
336 | 336 | ||
337 | struct ring_buffer_event; | 337 | struct ring_buffer_event; |
338 | 338 | ||
339 | struct ring_buffer_event * | 339 | struct ring_buffer_event * |
340 | trace_buffer_lock_reserve(struct ring_buffer *buffer, | 340 | trace_buffer_lock_reserve(struct ring_buffer *buffer, |
341 | int type, | 341 | int type, |
342 | unsigned long len, | 342 | unsigned long len, |
343 | unsigned long flags, | 343 | unsigned long flags, |
344 | int pc); | 344 | int pc); |
345 | void trace_buffer_unlock_commit(struct ring_buffer *buffer, | 345 | void trace_buffer_unlock_commit(struct ring_buffer *buffer, |
346 | struct ring_buffer_event *event, | 346 | struct ring_buffer_event *event, |
347 | unsigned long flags, int pc); | 347 | unsigned long flags, int pc); |
348 | 348 | ||
349 | struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, | 349 | struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, |
350 | struct trace_array_cpu *data); | 350 | struct trace_array_cpu *data); |
351 | 351 | ||
352 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | 352 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, |
353 | int *ent_cpu, u64 *ent_ts); | 353 | int *ent_cpu, u64 *ent_ts); |
354 | 354 | ||
355 | void default_wait_pipe(struct trace_iterator *iter); | 355 | void default_wait_pipe(struct trace_iterator *iter); |
356 | void poll_wait_pipe(struct trace_iterator *iter); | 356 | void poll_wait_pipe(struct trace_iterator *iter); |
357 | 357 | ||
358 | void ftrace(struct trace_array *tr, | 358 | void ftrace(struct trace_array *tr, |
359 | struct trace_array_cpu *data, | 359 | struct trace_array_cpu *data, |
360 | unsigned long ip, | 360 | unsigned long ip, |
361 | unsigned long parent_ip, | 361 | unsigned long parent_ip, |
362 | unsigned long flags, int pc); | 362 | unsigned long flags, int pc); |
363 | void tracing_sched_switch_trace(struct trace_array *tr, | 363 | void tracing_sched_switch_trace(struct trace_array *tr, |
364 | struct task_struct *prev, | 364 | struct task_struct *prev, |
365 | struct task_struct *next, | 365 | struct task_struct *next, |
366 | unsigned long flags, int pc); | 366 | unsigned long flags, int pc); |
367 | 367 | ||
368 | void tracing_sched_wakeup_trace(struct trace_array *tr, | 368 | void tracing_sched_wakeup_trace(struct trace_array *tr, |
369 | struct task_struct *wakee, | 369 | struct task_struct *wakee, |
370 | struct task_struct *cur, | 370 | struct task_struct *cur, |
371 | unsigned long flags, int pc); | 371 | unsigned long flags, int pc); |
372 | void trace_special(struct trace_array *tr, | 372 | void trace_special(struct trace_array *tr, |
373 | struct trace_array_cpu *data, | 373 | struct trace_array_cpu *data, |
374 | unsigned long arg1, | 374 | unsigned long arg1, |
375 | unsigned long arg2, | 375 | unsigned long arg2, |
376 | unsigned long arg3, int pc); | 376 | unsigned long arg3, int pc); |
377 | void trace_function(struct trace_array *tr, | 377 | void trace_function(struct trace_array *tr, |
378 | unsigned long ip, | 378 | unsigned long ip, |
379 | unsigned long parent_ip, | 379 | unsigned long parent_ip, |
380 | unsigned long flags, int pc); | 380 | unsigned long flags, int pc); |
381 | 381 | ||
382 | void trace_graph_return(struct ftrace_graph_ret *trace); | 382 | void trace_graph_return(struct ftrace_graph_ret *trace); |
383 | int trace_graph_entry(struct ftrace_graph_ent *trace); | 383 | int trace_graph_entry(struct ftrace_graph_ent *trace); |
384 | void set_graph_array(struct trace_array *tr); | 384 | void set_graph_array(struct trace_array *tr); |
385 | 385 | ||
386 | void tracing_start_cmdline_record(void); | 386 | void tracing_start_cmdline_record(void); |
387 | void tracing_stop_cmdline_record(void); | 387 | void tracing_stop_cmdline_record(void); |
388 | void tracing_sched_switch_assign_trace(struct trace_array *tr); | 388 | void tracing_sched_switch_assign_trace(struct trace_array *tr); |
389 | void tracing_stop_sched_switch_record(void); | 389 | void tracing_stop_sched_switch_record(void); |
390 | void tracing_start_sched_switch_record(void); | 390 | void tracing_start_sched_switch_record(void); |
391 | int register_tracer(struct tracer *type); | 391 | int register_tracer(struct tracer *type); |
392 | void unregister_tracer(struct tracer *type); | 392 | void unregister_tracer(struct tracer *type); |
393 | int is_tracing_stopped(void); | 393 | int is_tracing_stopped(void); |
394 | 394 | ||
395 | extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr); | 395 | extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr); |
396 | 396 | ||
397 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); | 397 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); |
398 | 398 | ||
399 | extern unsigned long tracing_thresh; | ||
400 | |||
399 | #ifdef CONFIG_TRACER_MAX_TRACE | 401 | #ifdef CONFIG_TRACER_MAX_TRACE |
400 | extern unsigned long tracing_max_latency; | 402 | extern unsigned long tracing_max_latency; |
401 | extern unsigned long tracing_thresh; | ||
402 | 403 | ||
403 | void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); | 404 | void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); |
404 | void update_max_tr_single(struct trace_array *tr, | 405 | void update_max_tr_single(struct trace_array *tr, |
405 | struct task_struct *tsk, int cpu); | 406 | struct task_struct *tsk, int cpu); |
406 | #endif /* CONFIG_TRACER_MAX_TRACE */ | 407 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
407 | 408 | ||
408 | #ifdef CONFIG_STACKTRACE | 409 | #ifdef CONFIG_STACKTRACE |
409 | void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, | 410 | void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, |
410 | int skip, int pc); | 411 | int skip, int pc); |
411 | 412 | ||
412 | void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, | 413 | void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, |
413 | int pc); | 414 | int pc); |
414 | 415 | ||
415 | void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, | 416 | void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, |
416 | int pc); | 417 | int pc); |
417 | #else | 418 | #else |
418 | static inline void ftrace_trace_stack(struct trace_array *tr, | 419 | static inline void ftrace_trace_stack(struct trace_array *tr, |
419 | unsigned long flags, int skip, int pc) | 420 | unsigned long flags, int skip, int pc) |
420 | { | 421 | { |
421 | } | 422 | } |
422 | 423 | ||
423 | static inline void ftrace_trace_userstack(struct trace_array *tr, | 424 | static inline void ftrace_trace_userstack(struct trace_array *tr, |
424 | unsigned long flags, int pc) | 425 | unsigned long flags, int pc) |
425 | { | 426 | { |
426 | } | 427 | } |
427 | 428 | ||
428 | static inline void __trace_stack(struct trace_array *tr, unsigned long flags, | 429 | static inline void __trace_stack(struct trace_array *tr, unsigned long flags, |
429 | int skip, int pc) | 430 | int skip, int pc) |
430 | { | 431 | { |
431 | } | 432 | } |
432 | #endif /* CONFIG_STACKTRACE */ | 433 | #endif /* CONFIG_STACKTRACE */ |
433 | 434 | ||
434 | extern cycle_t ftrace_now(int cpu); | 435 | extern cycle_t ftrace_now(int cpu); |
435 | 436 | ||
436 | extern void trace_find_cmdline(int pid, char comm[]); | 437 | extern void trace_find_cmdline(int pid, char comm[]); |
437 | 438 | ||
438 | #ifdef CONFIG_DYNAMIC_FTRACE | 439 | #ifdef CONFIG_DYNAMIC_FTRACE |
439 | extern unsigned long ftrace_update_tot_cnt; | 440 | extern unsigned long ftrace_update_tot_cnt; |
440 | #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func | 441 | #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func |
441 | extern int DYN_FTRACE_TEST_NAME(void); | 442 | extern int DYN_FTRACE_TEST_NAME(void); |
442 | #endif | 443 | #endif |
443 | 444 | ||
444 | extern int ring_buffer_expanded; | 445 | extern int ring_buffer_expanded; |
445 | extern bool tracing_selftest_disabled; | 446 | extern bool tracing_selftest_disabled; |
446 | DECLARE_PER_CPU(int, ftrace_cpu_disabled); | 447 | DECLARE_PER_CPU(int, ftrace_cpu_disabled); |
447 | 448 | ||
448 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 449 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
449 | extern int trace_selftest_startup_function(struct tracer *trace, | 450 | extern int trace_selftest_startup_function(struct tracer *trace, |
450 | struct trace_array *tr); | 451 | struct trace_array *tr); |
451 | extern int trace_selftest_startup_function_graph(struct tracer *trace, | 452 | extern int trace_selftest_startup_function_graph(struct tracer *trace, |
452 | struct trace_array *tr); | 453 | struct trace_array *tr); |
453 | extern int trace_selftest_startup_irqsoff(struct tracer *trace, | 454 | extern int trace_selftest_startup_irqsoff(struct tracer *trace, |
454 | struct trace_array *tr); | 455 | struct trace_array *tr); |
455 | extern int trace_selftest_startup_preemptoff(struct tracer *trace, | 456 | extern int trace_selftest_startup_preemptoff(struct tracer *trace, |
456 | struct trace_array *tr); | 457 | struct trace_array *tr); |
457 | extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, | 458 | extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, |
458 | struct trace_array *tr); | 459 | struct trace_array *tr); |
459 | extern int trace_selftest_startup_wakeup(struct tracer *trace, | 460 | extern int trace_selftest_startup_wakeup(struct tracer *trace, |
460 | struct trace_array *tr); | 461 | struct trace_array *tr); |
461 | extern int trace_selftest_startup_nop(struct tracer *trace, | 462 | extern int trace_selftest_startup_nop(struct tracer *trace, |
462 | struct trace_array *tr); | 463 | struct trace_array *tr); |
463 | extern int trace_selftest_startup_sched_switch(struct tracer *trace, | 464 | extern int trace_selftest_startup_sched_switch(struct tracer *trace, |
464 | struct trace_array *tr); | 465 | struct trace_array *tr); |
465 | extern int trace_selftest_startup_sysprof(struct tracer *trace, | 466 | extern int trace_selftest_startup_sysprof(struct tracer *trace, |
466 | struct trace_array *tr); | 467 | struct trace_array *tr); |
467 | extern int trace_selftest_startup_branch(struct tracer *trace, | 468 | extern int trace_selftest_startup_branch(struct tracer *trace, |
468 | struct trace_array *tr); | 469 | struct trace_array *tr); |
469 | extern int trace_selftest_startup_hw_branches(struct tracer *trace, | 470 | extern int trace_selftest_startup_hw_branches(struct tracer *trace, |
470 | struct trace_array *tr); | 471 | struct trace_array *tr); |
471 | extern int trace_selftest_startup_ksym(struct tracer *trace, | 472 | extern int trace_selftest_startup_ksym(struct tracer *trace, |
472 | struct trace_array *tr); | 473 | struct trace_array *tr); |
473 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ | 474 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
474 | 475 | ||
475 | extern void *head_page(struct trace_array_cpu *data); | 476 | extern void *head_page(struct trace_array_cpu *data); |
476 | extern unsigned long long ns2usecs(cycle_t nsec); | 477 | extern unsigned long long ns2usecs(cycle_t nsec); |
477 | extern int | 478 | extern int |
478 | trace_vbprintk(unsigned long ip, const char *fmt, va_list args); | 479 | trace_vbprintk(unsigned long ip, const char *fmt, va_list args); |
479 | extern int | 480 | extern int |
480 | trace_vprintk(unsigned long ip, const char *fmt, va_list args); | 481 | trace_vprintk(unsigned long ip, const char *fmt, va_list args); |
481 | extern int | 482 | extern int |
482 | trace_array_vprintk(struct trace_array *tr, | 483 | trace_array_vprintk(struct trace_array *tr, |
483 | unsigned long ip, const char *fmt, va_list args); | 484 | unsigned long ip, const char *fmt, va_list args); |
484 | int trace_array_printk(struct trace_array *tr, | 485 | int trace_array_printk(struct trace_array *tr, |
485 | unsigned long ip, const char *fmt, ...); | 486 | unsigned long ip, const char *fmt, ...); |
486 | 487 | ||
487 | extern unsigned long trace_flags; | 488 | extern unsigned long trace_flags; |
488 | 489 | ||
489 | extern int trace_clock_id; | 490 | extern int trace_clock_id; |
490 | 491 | ||
491 | /* Standard output formatting function used for function return traces */ | 492 | /* Standard output formatting function used for function return traces */ |
492 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 493 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
493 | extern enum print_line_t print_graph_function(struct trace_iterator *iter); | 494 | extern enum print_line_t print_graph_function(struct trace_iterator *iter); |
494 | extern enum print_line_t | 495 | extern enum print_line_t |
495 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); | 496 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); |
496 | 497 | ||
497 | #ifdef CONFIG_DYNAMIC_FTRACE | 498 | #ifdef CONFIG_DYNAMIC_FTRACE |
498 | /* TODO: make this variable */ | 499 | /* TODO: make this variable */ |
499 | #define FTRACE_GRAPH_MAX_FUNCS 32 | 500 | #define FTRACE_GRAPH_MAX_FUNCS 32 |
500 | extern int ftrace_graph_filter_enabled; | 501 | extern int ftrace_graph_filter_enabled; |
501 | extern int ftrace_graph_count; | 502 | extern int ftrace_graph_count; |
502 | extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS]; | 503 | extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS]; |
503 | 504 | ||
504 | static inline int ftrace_graph_addr(unsigned long addr) | 505 | static inline int ftrace_graph_addr(unsigned long addr) |
505 | { | 506 | { |
506 | int i; | 507 | int i; |
507 | 508 | ||
508 | if (!ftrace_graph_filter_enabled) | 509 | if (!ftrace_graph_filter_enabled) |
509 | return 1; | 510 | return 1; |
510 | 511 | ||
511 | for (i = 0; i < ftrace_graph_count; i++) { | 512 | for (i = 0; i < ftrace_graph_count; i++) { |
512 | if (addr == ftrace_graph_funcs[i]) | 513 | if (addr == ftrace_graph_funcs[i]) |
513 | return 1; | 514 | return 1; |
514 | } | 515 | } |
515 | 516 | ||
516 | return 0; | 517 | return 0; |
517 | } | 518 | } |
518 | #else | 519 | #else |
519 | static inline int ftrace_graph_addr(unsigned long addr) | 520 | static inline int ftrace_graph_addr(unsigned long addr) |
520 | { | 521 | { |
521 | return 1; | 522 | return 1; |
522 | } | 523 | } |
523 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 524 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
524 | #else /* CONFIG_FUNCTION_GRAPH_TRACER */ | 525 | #else /* CONFIG_FUNCTION_GRAPH_TRACER */ |
525 | static inline enum print_line_t | 526 | static inline enum print_line_t |
526 | print_graph_function(struct trace_iterator *iter) | 527 | print_graph_function(struct trace_iterator *iter) |
527 | { | 528 | { |
528 | return TRACE_TYPE_UNHANDLED; | 529 | return TRACE_TYPE_UNHANDLED; |
529 | } | 530 | } |
530 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 531 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
531 | 532 | ||
532 | extern struct list_head ftrace_pids; | 533 | extern struct list_head ftrace_pids; |
533 | 534 | ||
534 | #ifdef CONFIG_FUNCTION_TRACER | 535 | #ifdef CONFIG_FUNCTION_TRACER |
535 | static inline int ftrace_trace_task(struct task_struct *task) | 536 | static inline int ftrace_trace_task(struct task_struct *task) |
536 | { | 537 | { |
537 | if (list_empty(&ftrace_pids)) | 538 | if (list_empty(&ftrace_pids)) |
538 | return 1; | 539 | return 1; |
539 | 540 | ||
540 | return test_tsk_trace_trace(task); | 541 | return test_tsk_trace_trace(task); |
541 | } | 542 | } |
542 | #else | 543 | #else |
543 | static inline int ftrace_trace_task(struct task_struct *task) | 544 | static inline int ftrace_trace_task(struct task_struct *task) |
544 | { | 545 | { |
545 | return 1; | 546 | return 1; |
546 | } | 547 | } |
547 | #endif | 548 | #endif |
548 | 549 | ||
549 | /* | 550 | /* |
550 | * struct trace_parser - servers for reading the user input separated by spaces | 551 | * struct trace_parser - servers for reading the user input separated by spaces |
551 | * @cont: set if the input is not complete - no final space char was found | 552 | * @cont: set if the input is not complete - no final space char was found |
552 | * @buffer: holds the parsed user input | 553 | * @buffer: holds the parsed user input |
553 | * @idx: user input lenght | 554 | * @idx: user input lenght |
554 | * @size: buffer size | 555 | * @size: buffer size |
555 | */ | 556 | */ |
556 | struct trace_parser { | 557 | struct trace_parser { |
557 | bool cont; | 558 | bool cont; |
558 | char *buffer; | 559 | char *buffer; |
559 | unsigned idx; | 560 | unsigned idx; |
560 | unsigned size; | 561 | unsigned size; |
561 | }; | 562 | }; |
562 | 563 | ||
563 | static inline bool trace_parser_loaded(struct trace_parser *parser) | 564 | static inline bool trace_parser_loaded(struct trace_parser *parser) |
564 | { | 565 | { |
565 | return (parser->idx != 0); | 566 | return (parser->idx != 0); |
566 | } | 567 | } |
567 | 568 | ||
568 | static inline bool trace_parser_cont(struct trace_parser *parser) | 569 | static inline bool trace_parser_cont(struct trace_parser *parser) |
569 | { | 570 | { |
570 | return parser->cont; | 571 | return parser->cont; |
571 | } | 572 | } |
572 | 573 | ||
573 | static inline void trace_parser_clear(struct trace_parser *parser) | 574 | static inline void trace_parser_clear(struct trace_parser *parser) |
574 | { | 575 | { |
575 | parser->cont = false; | 576 | parser->cont = false; |
576 | parser->idx = 0; | 577 | parser->idx = 0; |
577 | } | 578 | } |
578 | 579 | ||
579 | extern int trace_parser_get_init(struct trace_parser *parser, int size); | 580 | extern int trace_parser_get_init(struct trace_parser *parser, int size); |
580 | extern void trace_parser_put(struct trace_parser *parser); | 581 | extern void trace_parser_put(struct trace_parser *parser); |
581 | extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf, | 582 | extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf, |
582 | size_t cnt, loff_t *ppos); | 583 | size_t cnt, loff_t *ppos); |
583 | 584 | ||
584 | /* | 585 | /* |
585 | * trace_iterator_flags is an enumeration that defines bit | 586 | * trace_iterator_flags is an enumeration that defines bit |
586 | * positions into trace_flags that controls the output. | 587 | * positions into trace_flags that controls the output. |
587 | * | 588 | * |
588 | * NOTE: These bits must match the trace_options array in | 589 | * NOTE: These bits must match the trace_options array in |
589 | * trace.c. | 590 | * trace.c. |
590 | */ | 591 | */ |
591 | enum trace_iterator_flags { | 592 | enum trace_iterator_flags { |
592 | TRACE_ITER_PRINT_PARENT = 0x01, | 593 | TRACE_ITER_PRINT_PARENT = 0x01, |
593 | TRACE_ITER_SYM_OFFSET = 0x02, | 594 | TRACE_ITER_SYM_OFFSET = 0x02, |
594 | TRACE_ITER_SYM_ADDR = 0x04, | 595 | TRACE_ITER_SYM_ADDR = 0x04, |
595 | TRACE_ITER_VERBOSE = 0x08, | 596 | TRACE_ITER_VERBOSE = 0x08, |
596 | TRACE_ITER_RAW = 0x10, | 597 | TRACE_ITER_RAW = 0x10, |
597 | TRACE_ITER_HEX = 0x20, | 598 | TRACE_ITER_HEX = 0x20, |
598 | TRACE_ITER_BIN = 0x40, | 599 | TRACE_ITER_BIN = 0x40, |
599 | TRACE_ITER_BLOCK = 0x80, | 600 | TRACE_ITER_BLOCK = 0x80, |
600 | TRACE_ITER_STACKTRACE = 0x100, | 601 | TRACE_ITER_STACKTRACE = 0x100, |
601 | TRACE_ITER_PRINTK = 0x200, | 602 | TRACE_ITER_PRINTK = 0x200, |
602 | TRACE_ITER_PREEMPTONLY = 0x400, | 603 | TRACE_ITER_PREEMPTONLY = 0x400, |
603 | TRACE_ITER_BRANCH = 0x800, | 604 | TRACE_ITER_BRANCH = 0x800, |
604 | TRACE_ITER_ANNOTATE = 0x1000, | 605 | TRACE_ITER_ANNOTATE = 0x1000, |
605 | TRACE_ITER_USERSTACKTRACE = 0x2000, | 606 | TRACE_ITER_USERSTACKTRACE = 0x2000, |
606 | TRACE_ITER_SYM_USEROBJ = 0x4000, | 607 | TRACE_ITER_SYM_USEROBJ = 0x4000, |
607 | TRACE_ITER_PRINTK_MSGONLY = 0x8000, | 608 | TRACE_ITER_PRINTK_MSGONLY = 0x8000, |
608 | TRACE_ITER_CONTEXT_INFO = 0x10000, /* Print pid/cpu/time */ | 609 | TRACE_ITER_CONTEXT_INFO = 0x10000, /* Print pid/cpu/time */ |
609 | TRACE_ITER_LATENCY_FMT = 0x20000, | 610 | TRACE_ITER_LATENCY_FMT = 0x20000, |
610 | TRACE_ITER_SLEEP_TIME = 0x40000, | 611 | TRACE_ITER_SLEEP_TIME = 0x40000, |
611 | TRACE_ITER_GRAPH_TIME = 0x80000, | 612 | TRACE_ITER_GRAPH_TIME = 0x80000, |
612 | }; | 613 | }; |
613 | 614 | ||
614 | /* | 615 | /* |
615 | * TRACE_ITER_SYM_MASK masks the options in trace_flags that | 616 | * TRACE_ITER_SYM_MASK masks the options in trace_flags that |
616 | * control the output of kernel symbols. | 617 | * control the output of kernel symbols. |
617 | */ | 618 | */ |
618 | #define TRACE_ITER_SYM_MASK \ | 619 | #define TRACE_ITER_SYM_MASK \ |
619 | (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) | 620 | (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) |
620 | 621 | ||
621 | extern struct tracer nop_trace; | 622 | extern struct tracer nop_trace; |
622 | 623 | ||
623 | /** | 624 | /** |
624 | * ftrace_preempt_disable - disable preemption scheduler safe | 625 | * ftrace_preempt_disable - disable preemption scheduler safe |
625 | * | 626 | * |
626 | * When tracing can happen inside the scheduler, there exists | 627 | * When tracing can happen inside the scheduler, there exists |
627 | * cases that the tracing might happen before the need_resched | 628 | * cases that the tracing might happen before the need_resched |
628 | * flag is checked. If this happens and the tracer calls | 629 | * flag is checked. If this happens and the tracer calls |
629 | * preempt_enable (after a disable), a schedule might take place | 630 | * preempt_enable (after a disable), a schedule might take place |
630 | * causing an infinite recursion. | 631 | * causing an infinite recursion. |
631 | * | 632 | * |
632 | * To prevent this, we read the need_resched flag before | 633 | * To prevent this, we read the need_resched flag before |
633 | * disabling preemption. When we want to enable preemption we | 634 | * disabling preemption. When we want to enable preemption we |
634 | * check the flag, if it is set, then we call preempt_enable_no_resched. | 635 | * check the flag, if it is set, then we call preempt_enable_no_resched. |
635 | * Otherwise, we call preempt_enable. | 636 | * Otherwise, we call preempt_enable. |
636 | * | 637 | * |
637 | * The rational for doing the above is that if need_resched is set | 638 | * The rational for doing the above is that if need_resched is set |
638 | * and we have yet to reschedule, we are either in an atomic location | 639 | * and we have yet to reschedule, we are either in an atomic location |
639 | * (where we do not need to check for scheduling) or we are inside | 640 | * (where we do not need to check for scheduling) or we are inside |
640 | * the scheduler and do not want to resched. | 641 | * the scheduler and do not want to resched. |
641 | */ | 642 | */ |
642 | static inline int ftrace_preempt_disable(void) | 643 | static inline int ftrace_preempt_disable(void) |
643 | { | 644 | { |
644 | int resched; | 645 | int resched; |
645 | 646 | ||
646 | resched = need_resched(); | 647 | resched = need_resched(); |
647 | preempt_disable_notrace(); | 648 | preempt_disable_notrace(); |
648 | 649 | ||
649 | return resched; | 650 | return resched; |
650 | } | 651 | } |
651 | 652 | ||
652 | /** | 653 | /** |
653 | * ftrace_preempt_enable - enable preemption scheduler safe | 654 | * ftrace_preempt_enable - enable preemption scheduler safe |
654 | * @resched: the return value from ftrace_preempt_disable | 655 | * @resched: the return value from ftrace_preempt_disable |
655 | * | 656 | * |
656 | * This is a scheduler safe way to enable preemption and not miss | 657 | * This is a scheduler safe way to enable preemption and not miss |
657 | * any preemption checks. The disabled saved the state of preemption. | 658 | * any preemption checks. The disabled saved the state of preemption. |
658 | * If resched is set, then we are either inside an atomic or | 659 | * If resched is set, then we are either inside an atomic or |
659 | * are inside the scheduler (we would have already scheduled | 660 | * are inside the scheduler (we would have already scheduled |
660 | * otherwise). In this case, we do not want to call normal | 661 | * otherwise). In this case, we do not want to call normal |
661 | * preempt_enable, but preempt_enable_no_resched instead. | 662 | * preempt_enable, but preempt_enable_no_resched instead. |
662 | */ | 663 | */ |
663 | static inline void ftrace_preempt_enable(int resched) | 664 | static inline void ftrace_preempt_enable(int resched) |
664 | { | 665 | { |
665 | if (resched) | 666 | if (resched) |
666 | preempt_enable_no_resched_notrace(); | 667 | preempt_enable_no_resched_notrace(); |
667 | else | 668 | else |
668 | preempt_enable_notrace(); | 669 | preempt_enable_notrace(); |
669 | } | 670 | } |
670 | 671 | ||
671 | #ifdef CONFIG_BRANCH_TRACER | 672 | #ifdef CONFIG_BRANCH_TRACER |
672 | extern int enable_branch_tracing(struct trace_array *tr); | 673 | extern int enable_branch_tracing(struct trace_array *tr); |
673 | extern void disable_branch_tracing(void); | 674 | extern void disable_branch_tracing(void); |
674 | static inline int trace_branch_enable(struct trace_array *tr) | 675 | static inline int trace_branch_enable(struct trace_array *tr) |
675 | { | 676 | { |
676 | if (trace_flags & TRACE_ITER_BRANCH) | 677 | if (trace_flags & TRACE_ITER_BRANCH) |
677 | return enable_branch_tracing(tr); | 678 | return enable_branch_tracing(tr); |
678 | return 0; | 679 | return 0; |
679 | } | 680 | } |
680 | static inline void trace_branch_disable(void) | 681 | static inline void trace_branch_disable(void) |
681 | { | 682 | { |
682 | /* due to races, always disable */ | 683 | /* due to races, always disable */ |
683 | disable_branch_tracing(); | 684 | disable_branch_tracing(); |
684 | } | 685 | } |
685 | #else | 686 | #else |
686 | static inline int trace_branch_enable(struct trace_array *tr) | 687 | static inline int trace_branch_enable(struct trace_array *tr) |
687 | { | 688 | { |
688 | return 0; | 689 | return 0; |
689 | } | 690 | } |
690 | static inline void trace_branch_disable(void) | 691 | static inline void trace_branch_disable(void) |
691 | { | 692 | { |
692 | } | 693 | } |
693 | #endif /* CONFIG_BRANCH_TRACER */ | 694 | #endif /* CONFIG_BRANCH_TRACER */ |
694 | 695 | ||
695 | /* set ring buffers to default size if not already done so */ | 696 | /* set ring buffers to default size if not already done so */ |
696 | int tracing_update_buffers(void); | 697 | int tracing_update_buffers(void); |
697 | 698 | ||
698 | /* trace event type bit fields, not numeric */ | 699 | /* trace event type bit fields, not numeric */ |
699 | enum { | 700 | enum { |
700 | TRACE_EVENT_TYPE_PRINTF = 1, | 701 | TRACE_EVENT_TYPE_PRINTF = 1, |
701 | TRACE_EVENT_TYPE_RAW = 2, | 702 | TRACE_EVENT_TYPE_RAW = 2, |
702 | }; | 703 | }; |
703 | 704 | ||
704 | struct ftrace_event_field { | 705 | struct ftrace_event_field { |
705 | struct list_head link; | 706 | struct list_head link; |
706 | char *name; | 707 | char *name; |
707 | char *type; | 708 | char *type; |
708 | int filter_type; | 709 | int filter_type; |
709 | int offset; | 710 | int offset; |
710 | int size; | 711 | int size; |
711 | int is_signed; | 712 | int is_signed; |
712 | }; | 713 | }; |
713 | 714 | ||
714 | struct event_filter { | 715 | struct event_filter { |
715 | int n_preds; | 716 | int n_preds; |
716 | struct filter_pred **preds; | 717 | struct filter_pred **preds; |
717 | char *filter_string; | 718 | char *filter_string; |
718 | }; | 719 | }; |
719 | 720 | ||
720 | struct event_subsystem { | 721 | struct event_subsystem { |
721 | struct list_head list; | 722 | struct list_head list; |
722 | const char *name; | 723 | const char *name; |
723 | struct dentry *entry; | 724 | struct dentry *entry; |
724 | struct event_filter *filter; | 725 | struct event_filter *filter; |
725 | int nr_events; | 726 | int nr_events; |
726 | }; | 727 | }; |
727 | 728 | ||
728 | struct filter_pred; | 729 | struct filter_pred; |
729 | struct regex; | 730 | struct regex; |
730 | 731 | ||
731 | typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event, | 732 | typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event, |
732 | int val1, int val2); | 733 | int val1, int val2); |
733 | 734 | ||
734 | typedef int (*regex_match_func)(char *str, struct regex *r, int len); | 735 | typedef int (*regex_match_func)(char *str, struct regex *r, int len); |
735 | 736 | ||
736 | enum regex_type { | 737 | enum regex_type { |
737 | MATCH_FULL = 0, | 738 | MATCH_FULL = 0, |
738 | MATCH_FRONT_ONLY, | 739 | MATCH_FRONT_ONLY, |
739 | MATCH_MIDDLE_ONLY, | 740 | MATCH_MIDDLE_ONLY, |
740 | MATCH_END_ONLY, | 741 | MATCH_END_ONLY, |
741 | }; | 742 | }; |
742 | 743 | ||
743 | struct regex { | 744 | struct regex { |
744 | char pattern[MAX_FILTER_STR_VAL]; | 745 | char pattern[MAX_FILTER_STR_VAL]; |
745 | int len; | 746 | int len; |
746 | int field_len; | 747 | int field_len; |
747 | regex_match_func match; | 748 | regex_match_func match; |
748 | }; | 749 | }; |
749 | 750 | ||
750 | struct filter_pred { | 751 | struct filter_pred { |
751 | filter_pred_fn_t fn; | 752 | filter_pred_fn_t fn; |
752 | u64 val; | 753 | u64 val; |
753 | struct regex regex; | 754 | struct regex regex; |
754 | char *field_name; | 755 | char *field_name; |
755 | int offset; | 756 | int offset; |
756 | int not; | 757 | int not; |
757 | int op; | 758 | int op; |
758 | int pop_n; | 759 | int pop_n; |
759 | }; | 760 | }; |
760 | 761 | ||
761 | extern enum regex_type | 762 | extern enum regex_type |
762 | filter_parse_regex(char *buff, int len, char **search, int *not); | 763 | filter_parse_regex(char *buff, int len, char **search, int *not); |
763 | extern void print_event_filter(struct ftrace_event_call *call, | 764 | extern void print_event_filter(struct ftrace_event_call *call, |
764 | struct trace_seq *s); | 765 | struct trace_seq *s); |
765 | extern int apply_event_filter(struct ftrace_event_call *call, | 766 | extern int apply_event_filter(struct ftrace_event_call *call, |
766 | char *filter_string); | 767 | char *filter_string); |
767 | extern int apply_subsystem_event_filter(struct event_subsystem *system, | 768 | extern int apply_subsystem_event_filter(struct event_subsystem *system, |
768 | char *filter_string); | 769 | char *filter_string); |
769 | extern void print_subsystem_event_filter(struct event_subsystem *system, | 770 | extern void print_subsystem_event_filter(struct event_subsystem *system, |
770 | struct trace_seq *s); | 771 | struct trace_seq *s); |
771 | extern int filter_assign_type(const char *type); | 772 | extern int filter_assign_type(const char *type); |
772 | 773 | ||
773 | static inline int | 774 | static inline int |
774 | filter_check_discard(struct ftrace_event_call *call, void *rec, | 775 | filter_check_discard(struct ftrace_event_call *call, void *rec, |
775 | struct ring_buffer *buffer, | 776 | struct ring_buffer *buffer, |
776 | struct ring_buffer_event *event) | 777 | struct ring_buffer_event *event) |
777 | { | 778 | { |
778 | if (unlikely(call->filter_active) && | 779 | if (unlikely(call->filter_active) && |
779 | !filter_match_preds(call->filter, rec)) { | 780 | !filter_match_preds(call->filter, rec)) { |
780 | ring_buffer_discard_commit(buffer, event); | 781 | ring_buffer_discard_commit(buffer, event); |
781 | return 1; | 782 | return 1; |
782 | } | 783 | } |
783 | 784 | ||
784 | return 0; | 785 | return 0; |
785 | } | 786 | } |
786 | 787 | ||
787 | extern struct mutex event_mutex; | 788 | extern struct mutex event_mutex; |
788 | extern struct list_head ftrace_events; | 789 | extern struct list_head ftrace_events; |
789 | 790 | ||
790 | extern const char *__start___trace_bprintk_fmt[]; | 791 | extern const char *__start___trace_bprintk_fmt[]; |
791 | extern const char *__stop___trace_bprintk_fmt[]; | 792 | extern const char *__stop___trace_bprintk_fmt[]; |
792 | 793 | ||
793 | #undef FTRACE_ENTRY | 794 | #undef FTRACE_ENTRY |
794 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \ | 795 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \ |
795 | extern struct ftrace_event_call \ | 796 | extern struct ftrace_event_call \ |
796 | __attribute__((__aligned__(4))) event_##call; | 797 | __attribute__((__aligned__(4))) event_##call; |
797 | #undef FTRACE_ENTRY_DUP | 798 | #undef FTRACE_ENTRY_DUP |
798 | #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \ | 799 | #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \ |
799 | FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) | 800 | FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) |
800 | #include "trace_entries.h" | 801 | #include "trace_entries.h" |
801 | 802 | ||
802 | #endif /* _LINUX_KERNEL_TRACE_H */ | 803 | #endif /* _LINUX_KERNEL_TRACE_H */ |
kernel/trace/trace_functions_graph.c
1 | /* | 1 | /* |
2 | * | 2 | * |
3 | * Function graph tracer. | 3 | * Function graph tracer. |
4 | * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com> | 4 | * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com> |
5 | * Mostly borrowed from function tracer which | 5 | * Mostly borrowed from function tracer which |
6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> | 6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> |
7 | * | 7 | * |
8 | */ | 8 | */ |
9 | #include <linux/debugfs.h> | 9 | #include <linux/debugfs.h> |
10 | #include <linux/uaccess.h> | 10 | #include <linux/uaccess.h> |
11 | #include <linux/ftrace.h> | 11 | #include <linux/ftrace.h> |
12 | #include <linux/fs.h> | 12 | #include <linux/fs.h> |
13 | 13 | ||
14 | #include "trace.h" | 14 | #include "trace.h" |
15 | #include "trace_output.h" | 15 | #include "trace_output.h" |
16 | 16 | ||
17 | struct fgraph_cpu_data { | 17 | struct fgraph_cpu_data { |
18 | pid_t last_pid; | 18 | pid_t last_pid; |
19 | int depth; | 19 | int depth; |
20 | int ignore; | 20 | int ignore; |
21 | unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH]; | 21 | unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH]; |
22 | }; | 22 | }; |
23 | 23 | ||
24 | struct fgraph_data { | 24 | struct fgraph_data { |
25 | struct fgraph_cpu_data *cpu_data; | 25 | struct fgraph_cpu_data *cpu_data; |
26 | 26 | ||
27 | /* Place to preserve last processed entry. */ | 27 | /* Place to preserve last processed entry. */ |
28 | struct ftrace_graph_ent_entry ent; | 28 | struct ftrace_graph_ent_entry ent; |
29 | struct ftrace_graph_ret_entry ret; | 29 | struct ftrace_graph_ret_entry ret; |
30 | int failed; | 30 | int failed; |
31 | int cpu; | 31 | int cpu; |
32 | }; | 32 | }; |
33 | 33 | ||
34 | #define TRACE_GRAPH_INDENT 2 | 34 | #define TRACE_GRAPH_INDENT 2 |
35 | 35 | ||
36 | /* Flag options */ | 36 | /* Flag options */ |
37 | #define TRACE_GRAPH_PRINT_OVERRUN 0x1 | 37 | #define TRACE_GRAPH_PRINT_OVERRUN 0x1 |
38 | #define TRACE_GRAPH_PRINT_CPU 0x2 | 38 | #define TRACE_GRAPH_PRINT_CPU 0x2 |
39 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 | 39 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 |
40 | #define TRACE_GRAPH_PRINT_PROC 0x8 | 40 | #define TRACE_GRAPH_PRINT_PROC 0x8 |
41 | #define TRACE_GRAPH_PRINT_DURATION 0x10 | 41 | #define TRACE_GRAPH_PRINT_DURATION 0x10 |
42 | #define TRACE_GRAPH_PRINT_ABS_TIME 0X20 | 42 | #define TRACE_GRAPH_PRINT_ABS_TIME 0X20 |
43 | 43 | ||
44 | static struct tracer_opt trace_opts[] = { | 44 | static struct tracer_opt trace_opts[] = { |
45 | /* Display overruns? (for self-debug purpose) */ | 45 | /* Display overruns? (for self-debug purpose) */ |
46 | { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, | 46 | { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, |
47 | /* Display CPU ? */ | 47 | /* Display CPU ? */ |
48 | { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, | 48 | { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, |
49 | /* Display Overhead ? */ | 49 | /* Display Overhead ? */ |
50 | { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, | 50 | { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, |
51 | /* Display proc name/pid */ | 51 | /* Display proc name/pid */ |
52 | { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, | 52 | { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, |
53 | /* Display duration of execution */ | 53 | /* Display duration of execution */ |
54 | { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, | 54 | { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, |
55 | /* Display absolute time of an entry */ | 55 | /* Display absolute time of an entry */ |
56 | { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, | 56 | { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, |
57 | { } /* Empty entry */ | 57 | { } /* Empty entry */ |
58 | }; | 58 | }; |
59 | 59 | ||
60 | static struct tracer_flags tracer_flags = { | 60 | static struct tracer_flags tracer_flags = { |
61 | /* Don't display overruns and proc by default */ | 61 | /* Don't display overruns and proc by default */ |
62 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | | 62 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | |
63 | TRACE_GRAPH_PRINT_DURATION, | 63 | TRACE_GRAPH_PRINT_DURATION, |
64 | .opts = trace_opts | 64 | .opts = trace_opts |
65 | }; | 65 | }; |
66 | 66 | ||
67 | static struct trace_array *graph_array; | 67 | static struct trace_array *graph_array; |
68 | 68 | ||
69 | 69 | ||
70 | /* Add a function return address to the trace stack on thread info.*/ | 70 | /* Add a function return address to the trace stack on thread info.*/ |
71 | int | 71 | int |
72 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, | 72 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, |
73 | unsigned long frame_pointer) | 73 | unsigned long frame_pointer) |
74 | { | 74 | { |
75 | unsigned long long calltime; | 75 | unsigned long long calltime; |
76 | int index; | 76 | int index; |
77 | 77 | ||
78 | if (!current->ret_stack) | 78 | if (!current->ret_stack) |
79 | return -EBUSY; | 79 | return -EBUSY; |
80 | 80 | ||
81 | /* | 81 | /* |
82 | * We must make sure the ret_stack is tested before we read | 82 | * We must make sure the ret_stack is tested before we read |
83 | * anything else. | 83 | * anything else. |
84 | */ | 84 | */ |
85 | smp_rmb(); | 85 | smp_rmb(); |
86 | 86 | ||
87 | /* The return trace stack is full */ | 87 | /* The return trace stack is full */ |
88 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { | 88 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { |
89 | atomic_inc(¤t->trace_overrun); | 89 | atomic_inc(¤t->trace_overrun); |
90 | return -EBUSY; | 90 | return -EBUSY; |
91 | } | 91 | } |
92 | 92 | ||
93 | calltime = trace_clock_local(); | 93 | calltime = trace_clock_local(); |
94 | 94 | ||
95 | index = ++current->curr_ret_stack; | 95 | index = ++current->curr_ret_stack; |
96 | barrier(); | 96 | barrier(); |
97 | current->ret_stack[index].ret = ret; | 97 | current->ret_stack[index].ret = ret; |
98 | current->ret_stack[index].func = func; | 98 | current->ret_stack[index].func = func; |
99 | current->ret_stack[index].calltime = calltime; | 99 | current->ret_stack[index].calltime = calltime; |
100 | current->ret_stack[index].subtime = 0; | 100 | current->ret_stack[index].subtime = 0; |
101 | current->ret_stack[index].fp = frame_pointer; | 101 | current->ret_stack[index].fp = frame_pointer; |
102 | *depth = index; | 102 | *depth = index; |
103 | 103 | ||
104 | return 0; | 104 | return 0; |
105 | } | 105 | } |
106 | 106 | ||
107 | /* Retrieve a function return address to the trace stack on thread info.*/ | 107 | /* Retrieve a function return address to the trace stack on thread info.*/ |
108 | static void | 108 | static void |
109 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, | 109 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, |
110 | unsigned long frame_pointer) | 110 | unsigned long frame_pointer) |
111 | { | 111 | { |
112 | int index; | 112 | int index; |
113 | 113 | ||
114 | index = current->curr_ret_stack; | 114 | index = current->curr_ret_stack; |
115 | 115 | ||
116 | if (unlikely(index < 0)) { | 116 | if (unlikely(index < 0)) { |
117 | ftrace_graph_stop(); | 117 | ftrace_graph_stop(); |
118 | WARN_ON(1); | 118 | WARN_ON(1); |
119 | /* Might as well panic, otherwise we have no where to go */ | 119 | /* Might as well panic, otherwise we have no where to go */ |
120 | *ret = (unsigned long)panic; | 120 | *ret = (unsigned long)panic; |
121 | return; | 121 | return; |
122 | } | 122 | } |
123 | 123 | ||
124 | #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST | 124 | #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST |
125 | /* | 125 | /* |
126 | * The arch may choose to record the frame pointer used | 126 | * The arch may choose to record the frame pointer used |
127 | * and check it here to make sure that it is what we expect it | 127 | * and check it here to make sure that it is what we expect it |
128 | * to be. If gcc does not set the place holder of the return | 128 | * to be. If gcc does not set the place holder of the return |
129 | * address in the frame pointer, and does a copy instead, then | 129 | * address in the frame pointer, and does a copy instead, then |
130 | * the function graph trace will fail. This test detects this | 130 | * the function graph trace will fail. This test detects this |
131 | * case. | 131 | * case. |
132 | * | 132 | * |
133 | * Currently, x86_32 with optimize for size (-Os) makes the latest | 133 | * Currently, x86_32 with optimize for size (-Os) makes the latest |
134 | * gcc do the above. | 134 | * gcc do the above. |
135 | */ | 135 | */ |
136 | if (unlikely(current->ret_stack[index].fp != frame_pointer)) { | 136 | if (unlikely(current->ret_stack[index].fp != frame_pointer)) { |
137 | ftrace_graph_stop(); | 137 | ftrace_graph_stop(); |
138 | WARN(1, "Bad frame pointer: expected %lx, received %lx\n" | 138 | WARN(1, "Bad frame pointer: expected %lx, received %lx\n" |
139 | " from func %ps return to %lx\n", | 139 | " from func %ps return to %lx\n", |
140 | current->ret_stack[index].fp, | 140 | current->ret_stack[index].fp, |
141 | frame_pointer, | 141 | frame_pointer, |
142 | (void *)current->ret_stack[index].func, | 142 | (void *)current->ret_stack[index].func, |
143 | current->ret_stack[index].ret); | 143 | current->ret_stack[index].ret); |
144 | *ret = (unsigned long)panic; | 144 | *ret = (unsigned long)panic; |
145 | return; | 145 | return; |
146 | } | 146 | } |
147 | #endif | 147 | #endif |
148 | 148 | ||
149 | *ret = current->ret_stack[index].ret; | 149 | *ret = current->ret_stack[index].ret; |
150 | trace->func = current->ret_stack[index].func; | 150 | trace->func = current->ret_stack[index].func; |
151 | trace->calltime = current->ret_stack[index].calltime; | 151 | trace->calltime = current->ret_stack[index].calltime; |
152 | trace->overrun = atomic_read(¤t->trace_overrun); | 152 | trace->overrun = atomic_read(¤t->trace_overrun); |
153 | trace->depth = index; | 153 | trace->depth = index; |
154 | } | 154 | } |
155 | 155 | ||
156 | /* | 156 | /* |
157 | * Send the trace to the ring-buffer. | 157 | * Send the trace to the ring-buffer. |
158 | * @return the original return address. | 158 | * @return the original return address. |
159 | */ | 159 | */ |
160 | unsigned long ftrace_return_to_handler(unsigned long frame_pointer) | 160 | unsigned long ftrace_return_to_handler(unsigned long frame_pointer) |
161 | { | 161 | { |
162 | struct ftrace_graph_ret trace; | 162 | struct ftrace_graph_ret trace; |
163 | unsigned long ret; | 163 | unsigned long ret; |
164 | 164 | ||
165 | ftrace_pop_return_trace(&trace, &ret, frame_pointer); | 165 | ftrace_pop_return_trace(&trace, &ret, frame_pointer); |
166 | trace.rettime = trace_clock_local(); | 166 | trace.rettime = trace_clock_local(); |
167 | ftrace_graph_return(&trace); | 167 | ftrace_graph_return(&trace); |
168 | barrier(); | 168 | barrier(); |
169 | current->curr_ret_stack--; | 169 | current->curr_ret_stack--; |
170 | 170 | ||
171 | if (unlikely(!ret)) { | 171 | if (unlikely(!ret)) { |
172 | ftrace_graph_stop(); | 172 | ftrace_graph_stop(); |
173 | WARN_ON(1); | 173 | WARN_ON(1); |
174 | /* Might as well panic. What else to do? */ | 174 | /* Might as well panic. What else to do? */ |
175 | ret = (unsigned long)panic; | 175 | ret = (unsigned long)panic; |
176 | } | 176 | } |
177 | 177 | ||
178 | return ret; | 178 | return ret; |
179 | } | 179 | } |
180 | 180 | ||
181 | static int __trace_graph_entry(struct trace_array *tr, | 181 | static int __trace_graph_entry(struct trace_array *tr, |
182 | struct ftrace_graph_ent *trace, | 182 | struct ftrace_graph_ent *trace, |
183 | unsigned long flags, | 183 | unsigned long flags, |
184 | int pc) | 184 | int pc) |
185 | { | 185 | { |
186 | struct ftrace_event_call *call = &event_funcgraph_entry; | 186 | struct ftrace_event_call *call = &event_funcgraph_entry; |
187 | struct ring_buffer_event *event; | 187 | struct ring_buffer_event *event; |
188 | struct ring_buffer *buffer = tr->buffer; | 188 | struct ring_buffer *buffer = tr->buffer; |
189 | struct ftrace_graph_ent_entry *entry; | 189 | struct ftrace_graph_ent_entry *entry; |
190 | 190 | ||
191 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) | 191 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) |
192 | return 0; | 192 | return 0; |
193 | 193 | ||
194 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, | 194 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, |
195 | sizeof(*entry), flags, pc); | 195 | sizeof(*entry), flags, pc); |
196 | if (!event) | 196 | if (!event) |
197 | return 0; | 197 | return 0; |
198 | entry = ring_buffer_event_data(event); | 198 | entry = ring_buffer_event_data(event); |
199 | entry->graph_ent = *trace; | 199 | entry->graph_ent = *trace; |
200 | if (!filter_current_check_discard(buffer, call, entry, event)) | 200 | if (!filter_current_check_discard(buffer, call, entry, event)) |
201 | ring_buffer_unlock_commit(buffer, event); | 201 | ring_buffer_unlock_commit(buffer, event); |
202 | 202 | ||
203 | return 1; | 203 | return 1; |
204 | } | 204 | } |
205 | 205 | ||
206 | int trace_graph_entry(struct ftrace_graph_ent *trace) | 206 | int trace_graph_entry(struct ftrace_graph_ent *trace) |
207 | { | 207 | { |
208 | struct trace_array *tr = graph_array; | 208 | struct trace_array *tr = graph_array; |
209 | struct trace_array_cpu *data; | 209 | struct trace_array_cpu *data; |
210 | unsigned long flags; | 210 | unsigned long flags; |
211 | long disabled; | 211 | long disabled; |
212 | int ret; | 212 | int ret; |
213 | int cpu; | 213 | int cpu; |
214 | int pc; | 214 | int pc; |
215 | 215 | ||
216 | if (!ftrace_trace_task(current)) | 216 | if (!ftrace_trace_task(current)) |
217 | return 0; | 217 | return 0; |
218 | 218 | ||
219 | /* trace it when it is-nested-in or is a function enabled. */ | 219 | /* trace it when it is-nested-in or is a function enabled. */ |
220 | if (!(trace->depth || ftrace_graph_addr(trace->func))) | 220 | if (!(trace->depth || ftrace_graph_addr(trace->func))) |
221 | return 0; | 221 | return 0; |
222 | 222 | ||
223 | local_irq_save(flags); | 223 | local_irq_save(flags); |
224 | cpu = raw_smp_processor_id(); | 224 | cpu = raw_smp_processor_id(); |
225 | data = tr->data[cpu]; | 225 | data = tr->data[cpu]; |
226 | disabled = atomic_inc_return(&data->disabled); | 226 | disabled = atomic_inc_return(&data->disabled); |
227 | if (likely(disabled == 1)) { | 227 | if (likely(disabled == 1)) { |
228 | pc = preempt_count(); | 228 | pc = preempt_count(); |
229 | ret = __trace_graph_entry(tr, trace, flags, pc); | 229 | ret = __trace_graph_entry(tr, trace, flags, pc); |
230 | } else { | 230 | } else { |
231 | ret = 0; | 231 | ret = 0; |
232 | } | 232 | } |
233 | 233 | ||
234 | atomic_dec(&data->disabled); | 234 | atomic_dec(&data->disabled); |
235 | local_irq_restore(flags); | 235 | local_irq_restore(flags); |
236 | 236 | ||
237 | return ret; | 237 | return ret; |
238 | } | 238 | } |
239 | 239 | ||
240 | int trace_graph_thresh_entry(struct ftrace_graph_ent *trace) | ||
241 | { | ||
242 | if (tracing_thresh) | ||
243 | return 1; | ||
244 | else | ||
245 | return trace_graph_entry(trace); | ||
246 | } | ||
247 | |||
240 | static void __trace_graph_return(struct trace_array *tr, | 248 | static void __trace_graph_return(struct trace_array *tr, |
241 | struct ftrace_graph_ret *trace, | 249 | struct ftrace_graph_ret *trace, |
242 | unsigned long flags, | 250 | unsigned long flags, |
243 | int pc) | 251 | int pc) |
244 | { | 252 | { |
245 | struct ftrace_event_call *call = &event_funcgraph_exit; | 253 | struct ftrace_event_call *call = &event_funcgraph_exit; |
246 | struct ring_buffer_event *event; | 254 | struct ring_buffer_event *event; |
247 | struct ring_buffer *buffer = tr->buffer; | 255 | struct ring_buffer *buffer = tr->buffer; |
248 | struct ftrace_graph_ret_entry *entry; | 256 | struct ftrace_graph_ret_entry *entry; |
249 | 257 | ||
250 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) | 258 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) |
251 | return; | 259 | return; |
252 | 260 | ||
253 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, | 261 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, |
254 | sizeof(*entry), flags, pc); | 262 | sizeof(*entry), flags, pc); |
255 | if (!event) | 263 | if (!event) |
256 | return; | 264 | return; |
257 | entry = ring_buffer_event_data(event); | 265 | entry = ring_buffer_event_data(event); |
258 | entry->ret = *trace; | 266 | entry->ret = *trace; |
259 | if (!filter_current_check_discard(buffer, call, entry, event)) | 267 | if (!filter_current_check_discard(buffer, call, entry, event)) |
260 | ring_buffer_unlock_commit(buffer, event); | 268 | ring_buffer_unlock_commit(buffer, event); |
261 | } | 269 | } |
262 | 270 | ||
263 | void trace_graph_return(struct ftrace_graph_ret *trace) | 271 | void trace_graph_return(struct ftrace_graph_ret *trace) |
264 | { | 272 | { |
265 | struct trace_array *tr = graph_array; | 273 | struct trace_array *tr = graph_array; |
266 | struct trace_array_cpu *data; | 274 | struct trace_array_cpu *data; |
267 | unsigned long flags; | 275 | unsigned long flags; |
268 | long disabled; | 276 | long disabled; |
269 | int cpu; | 277 | int cpu; |
270 | int pc; | 278 | int pc; |
271 | 279 | ||
272 | local_irq_save(flags); | 280 | local_irq_save(flags); |
273 | cpu = raw_smp_processor_id(); | 281 | cpu = raw_smp_processor_id(); |
274 | data = tr->data[cpu]; | 282 | data = tr->data[cpu]; |
275 | disabled = atomic_inc_return(&data->disabled); | 283 | disabled = atomic_inc_return(&data->disabled); |
276 | if (likely(disabled == 1)) { | 284 | if (likely(disabled == 1)) { |
277 | pc = preempt_count(); | 285 | pc = preempt_count(); |
278 | __trace_graph_return(tr, trace, flags, pc); | 286 | __trace_graph_return(tr, trace, flags, pc); |
279 | } | 287 | } |
280 | atomic_dec(&data->disabled); | 288 | atomic_dec(&data->disabled); |
281 | local_irq_restore(flags); | 289 | local_irq_restore(flags); |
282 | } | 290 | } |
283 | 291 | ||
284 | void set_graph_array(struct trace_array *tr) | 292 | void set_graph_array(struct trace_array *tr) |
285 | { | 293 | { |
286 | graph_array = tr; | 294 | graph_array = tr; |
287 | 295 | ||
288 | /* Make graph_array visible before we start tracing */ | 296 | /* Make graph_array visible before we start tracing */ |
289 | 297 | ||
290 | smp_mb(); | 298 | smp_mb(); |
291 | } | 299 | } |
292 | 300 | ||
301 | void trace_graph_thresh_return(struct ftrace_graph_ret *trace) | ||
302 | { | ||
303 | if (tracing_thresh && | ||
304 | (trace->rettime - trace->calltime < tracing_thresh)) | ||
305 | return; | ||
306 | else | ||
307 | trace_graph_return(trace); | ||
308 | } | ||
309 | |||
293 | static int graph_trace_init(struct trace_array *tr) | 310 | static int graph_trace_init(struct trace_array *tr) |
294 | { | 311 | { |
295 | int ret; | 312 | int ret; |
296 | 313 | ||
297 | set_graph_array(tr); | 314 | set_graph_array(tr); |
298 | ret = register_ftrace_graph(&trace_graph_return, | 315 | if (tracing_thresh) |
299 | &trace_graph_entry); | 316 | ret = register_ftrace_graph(&trace_graph_thresh_return, |
317 | &trace_graph_thresh_entry); | ||
318 | else | ||
319 | ret = register_ftrace_graph(&trace_graph_return, | ||
320 | &trace_graph_entry); | ||
300 | if (ret) | 321 | if (ret) |
301 | return ret; | 322 | return ret; |
302 | tracing_start_cmdline_record(); | 323 | tracing_start_cmdline_record(); |
303 | 324 | ||
304 | return 0; | 325 | return 0; |
305 | } | 326 | } |
306 | 327 | ||
307 | static void graph_trace_reset(struct trace_array *tr) | 328 | static void graph_trace_reset(struct trace_array *tr) |
308 | { | 329 | { |
309 | tracing_stop_cmdline_record(); | 330 | tracing_stop_cmdline_record(); |
310 | unregister_ftrace_graph(); | 331 | unregister_ftrace_graph(); |
311 | } | 332 | } |
312 | 333 | ||
313 | static int max_bytes_for_cpu; | 334 | static int max_bytes_for_cpu; |
314 | 335 | ||
315 | static enum print_line_t | 336 | static enum print_line_t |
316 | print_graph_cpu(struct trace_seq *s, int cpu) | 337 | print_graph_cpu(struct trace_seq *s, int cpu) |
317 | { | 338 | { |
318 | int ret; | 339 | int ret; |
319 | 340 | ||
320 | /* | 341 | /* |
321 | * Start with a space character - to make it stand out | 342 | * Start with a space character - to make it stand out |
322 | * to the right a bit when trace output is pasted into | 343 | * to the right a bit when trace output is pasted into |
323 | * email: | 344 | * email: |
324 | */ | 345 | */ |
325 | ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu); | 346 | ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu); |
326 | if (!ret) | 347 | if (!ret) |
327 | return TRACE_TYPE_PARTIAL_LINE; | 348 | return TRACE_TYPE_PARTIAL_LINE; |
328 | 349 | ||
329 | return TRACE_TYPE_HANDLED; | 350 | return TRACE_TYPE_HANDLED; |
330 | } | 351 | } |
331 | 352 | ||
332 | #define TRACE_GRAPH_PROCINFO_LENGTH 14 | 353 | #define TRACE_GRAPH_PROCINFO_LENGTH 14 |
333 | 354 | ||
334 | static enum print_line_t | 355 | static enum print_line_t |
335 | print_graph_proc(struct trace_seq *s, pid_t pid) | 356 | print_graph_proc(struct trace_seq *s, pid_t pid) |
336 | { | 357 | { |
337 | char comm[TASK_COMM_LEN]; | 358 | char comm[TASK_COMM_LEN]; |
338 | /* sign + log10(MAX_INT) + '\0' */ | 359 | /* sign + log10(MAX_INT) + '\0' */ |
339 | char pid_str[11]; | 360 | char pid_str[11]; |
340 | int spaces = 0; | 361 | int spaces = 0; |
341 | int ret; | 362 | int ret; |
342 | int len; | 363 | int len; |
343 | int i; | 364 | int i; |
344 | 365 | ||
345 | trace_find_cmdline(pid, comm); | 366 | trace_find_cmdline(pid, comm); |
346 | comm[7] = '\0'; | 367 | comm[7] = '\0'; |
347 | sprintf(pid_str, "%d", pid); | 368 | sprintf(pid_str, "%d", pid); |
348 | 369 | ||
349 | /* 1 stands for the "-" character */ | 370 | /* 1 stands for the "-" character */ |
350 | len = strlen(comm) + strlen(pid_str) + 1; | 371 | len = strlen(comm) + strlen(pid_str) + 1; |
351 | 372 | ||
352 | if (len < TRACE_GRAPH_PROCINFO_LENGTH) | 373 | if (len < TRACE_GRAPH_PROCINFO_LENGTH) |
353 | spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; | 374 | spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; |
354 | 375 | ||
355 | /* First spaces to align center */ | 376 | /* First spaces to align center */ |
356 | for (i = 0; i < spaces / 2; i++) { | 377 | for (i = 0; i < spaces / 2; i++) { |
357 | ret = trace_seq_printf(s, " "); | 378 | ret = trace_seq_printf(s, " "); |
358 | if (!ret) | 379 | if (!ret) |
359 | return TRACE_TYPE_PARTIAL_LINE; | 380 | return TRACE_TYPE_PARTIAL_LINE; |
360 | } | 381 | } |
361 | 382 | ||
362 | ret = trace_seq_printf(s, "%s-%s", comm, pid_str); | 383 | ret = trace_seq_printf(s, "%s-%s", comm, pid_str); |
363 | if (!ret) | 384 | if (!ret) |
364 | return TRACE_TYPE_PARTIAL_LINE; | 385 | return TRACE_TYPE_PARTIAL_LINE; |
365 | 386 | ||
366 | /* Last spaces to align center */ | 387 | /* Last spaces to align center */ |
367 | for (i = 0; i < spaces - (spaces / 2); i++) { | 388 | for (i = 0; i < spaces - (spaces / 2); i++) { |
368 | ret = trace_seq_printf(s, " "); | 389 | ret = trace_seq_printf(s, " "); |
369 | if (!ret) | 390 | if (!ret) |
370 | return TRACE_TYPE_PARTIAL_LINE; | 391 | return TRACE_TYPE_PARTIAL_LINE; |
371 | } | 392 | } |
372 | return TRACE_TYPE_HANDLED; | 393 | return TRACE_TYPE_HANDLED; |
373 | } | 394 | } |
374 | 395 | ||
375 | 396 | ||
376 | static enum print_line_t | 397 | static enum print_line_t |
377 | print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry) | 398 | print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry) |
378 | { | 399 | { |
379 | if (!trace_seq_putc(s, ' ')) | 400 | if (!trace_seq_putc(s, ' ')) |
380 | return 0; | 401 | return 0; |
381 | 402 | ||
382 | return trace_print_lat_fmt(s, entry); | 403 | return trace_print_lat_fmt(s, entry); |
383 | } | 404 | } |
384 | 405 | ||
385 | /* If the pid changed since the last trace, output this event */ | 406 | /* If the pid changed since the last trace, output this event */ |
386 | static enum print_line_t | 407 | static enum print_line_t |
387 | verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) | 408 | verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) |
388 | { | 409 | { |
389 | pid_t prev_pid; | 410 | pid_t prev_pid; |
390 | pid_t *last_pid; | 411 | pid_t *last_pid; |
391 | int ret; | 412 | int ret; |
392 | 413 | ||
393 | if (!data) | 414 | if (!data) |
394 | return TRACE_TYPE_HANDLED; | 415 | return TRACE_TYPE_HANDLED; |
395 | 416 | ||
396 | last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); | 417 | last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); |
397 | 418 | ||
398 | if (*last_pid == pid) | 419 | if (*last_pid == pid) |
399 | return TRACE_TYPE_HANDLED; | 420 | return TRACE_TYPE_HANDLED; |
400 | 421 | ||
401 | prev_pid = *last_pid; | 422 | prev_pid = *last_pid; |
402 | *last_pid = pid; | 423 | *last_pid = pid; |
403 | 424 | ||
404 | if (prev_pid == -1) | 425 | if (prev_pid == -1) |
405 | return TRACE_TYPE_HANDLED; | 426 | return TRACE_TYPE_HANDLED; |
406 | /* | 427 | /* |
407 | * Context-switch trace line: | 428 | * Context-switch trace line: |
408 | 429 | ||
409 | ------------------------------------------ | 430 | ------------------------------------------ |
410 | | 1) migration/0--1 => sshd-1755 | 431 | | 1) migration/0--1 => sshd-1755 |
411 | ------------------------------------------ | 432 | ------------------------------------------ |
412 | 433 | ||
413 | */ | 434 | */ |
414 | ret = trace_seq_printf(s, | 435 | ret = trace_seq_printf(s, |
415 | " ------------------------------------------\n"); | 436 | " ------------------------------------------\n"); |
416 | if (!ret) | 437 | if (!ret) |
417 | return TRACE_TYPE_PARTIAL_LINE; | 438 | return TRACE_TYPE_PARTIAL_LINE; |
418 | 439 | ||
419 | ret = print_graph_cpu(s, cpu); | 440 | ret = print_graph_cpu(s, cpu); |
420 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 441 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
421 | return TRACE_TYPE_PARTIAL_LINE; | 442 | return TRACE_TYPE_PARTIAL_LINE; |
422 | 443 | ||
423 | ret = print_graph_proc(s, prev_pid); | 444 | ret = print_graph_proc(s, prev_pid); |
424 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 445 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
425 | return TRACE_TYPE_PARTIAL_LINE; | 446 | return TRACE_TYPE_PARTIAL_LINE; |
426 | 447 | ||
427 | ret = trace_seq_printf(s, " => "); | 448 | ret = trace_seq_printf(s, " => "); |
428 | if (!ret) | 449 | if (!ret) |
429 | return TRACE_TYPE_PARTIAL_LINE; | 450 | return TRACE_TYPE_PARTIAL_LINE; |
430 | 451 | ||
431 | ret = print_graph_proc(s, pid); | 452 | ret = print_graph_proc(s, pid); |
432 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 453 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
433 | return TRACE_TYPE_PARTIAL_LINE; | 454 | return TRACE_TYPE_PARTIAL_LINE; |
434 | 455 | ||
435 | ret = trace_seq_printf(s, | 456 | ret = trace_seq_printf(s, |
436 | "\n ------------------------------------------\n\n"); | 457 | "\n ------------------------------------------\n\n"); |
437 | if (!ret) | 458 | if (!ret) |
438 | return TRACE_TYPE_PARTIAL_LINE; | 459 | return TRACE_TYPE_PARTIAL_LINE; |
439 | 460 | ||
440 | return TRACE_TYPE_HANDLED; | 461 | return TRACE_TYPE_HANDLED; |
441 | } | 462 | } |
442 | 463 | ||
443 | static struct ftrace_graph_ret_entry * | 464 | static struct ftrace_graph_ret_entry * |
444 | get_return_for_leaf(struct trace_iterator *iter, | 465 | get_return_for_leaf(struct trace_iterator *iter, |
445 | struct ftrace_graph_ent_entry *curr) | 466 | struct ftrace_graph_ent_entry *curr) |
446 | { | 467 | { |
447 | struct fgraph_data *data = iter->private; | 468 | struct fgraph_data *data = iter->private; |
448 | struct ring_buffer_iter *ring_iter = NULL; | 469 | struct ring_buffer_iter *ring_iter = NULL; |
449 | struct ring_buffer_event *event; | 470 | struct ring_buffer_event *event; |
450 | struct ftrace_graph_ret_entry *next; | 471 | struct ftrace_graph_ret_entry *next; |
451 | 472 | ||
452 | /* | 473 | /* |
453 | * If the previous output failed to write to the seq buffer, | 474 | * If the previous output failed to write to the seq buffer, |
454 | * then we just reuse the data from before. | 475 | * then we just reuse the data from before. |
455 | */ | 476 | */ |
456 | if (data && data->failed) { | 477 | if (data && data->failed) { |
457 | curr = &data->ent; | 478 | curr = &data->ent; |
458 | next = &data->ret; | 479 | next = &data->ret; |
459 | } else { | 480 | } else { |
460 | 481 | ||
461 | ring_iter = iter->buffer_iter[iter->cpu]; | 482 | ring_iter = iter->buffer_iter[iter->cpu]; |
462 | 483 | ||
463 | /* First peek to compare current entry and the next one */ | 484 | /* First peek to compare current entry and the next one */ |
464 | if (ring_iter) | 485 | if (ring_iter) |
465 | event = ring_buffer_iter_peek(ring_iter, NULL); | 486 | event = ring_buffer_iter_peek(ring_iter, NULL); |
466 | else { | 487 | else { |
467 | /* | 488 | /* |
468 | * We need to consume the current entry to see | 489 | * We need to consume the current entry to see |
469 | * the next one. | 490 | * the next one. |
470 | */ | 491 | */ |
471 | ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL); | 492 | ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL); |
472 | event = ring_buffer_peek(iter->tr->buffer, iter->cpu, | 493 | event = ring_buffer_peek(iter->tr->buffer, iter->cpu, |
473 | NULL); | 494 | NULL); |
474 | } | 495 | } |
475 | 496 | ||
476 | if (!event) | 497 | if (!event) |
477 | return NULL; | 498 | return NULL; |
478 | 499 | ||
479 | next = ring_buffer_event_data(event); | 500 | next = ring_buffer_event_data(event); |
480 | 501 | ||
481 | if (data) { | 502 | if (data) { |
482 | /* | 503 | /* |
483 | * Save current and next entries for later reference | 504 | * Save current and next entries for later reference |
484 | * if the output fails. | 505 | * if the output fails. |
485 | */ | 506 | */ |
486 | data->ent = *curr; | 507 | data->ent = *curr; |
487 | data->ret = *next; | 508 | data->ret = *next; |
488 | } | 509 | } |
489 | } | 510 | } |
490 | 511 | ||
491 | if (next->ent.type != TRACE_GRAPH_RET) | 512 | if (next->ent.type != TRACE_GRAPH_RET) |
492 | return NULL; | 513 | return NULL; |
493 | 514 | ||
494 | if (curr->ent.pid != next->ent.pid || | 515 | if (curr->ent.pid != next->ent.pid || |
495 | curr->graph_ent.func != next->ret.func) | 516 | curr->graph_ent.func != next->ret.func) |
496 | return NULL; | 517 | return NULL; |
497 | 518 | ||
498 | /* this is a leaf, now advance the iterator */ | 519 | /* this is a leaf, now advance the iterator */ |
499 | if (ring_iter) | 520 | if (ring_iter) |
500 | ring_buffer_read(ring_iter, NULL); | 521 | ring_buffer_read(ring_iter, NULL); |
501 | 522 | ||
502 | return next; | 523 | return next; |
503 | } | 524 | } |
504 | 525 | ||
505 | /* Signal a overhead of time execution to the output */ | 526 | /* Signal a overhead of time execution to the output */ |
506 | static int | 527 | static int |
507 | print_graph_overhead(unsigned long long duration, struct trace_seq *s) | 528 | print_graph_overhead(unsigned long long duration, struct trace_seq *s) |
508 | { | 529 | { |
509 | /* If duration disappear, we don't need anything */ | 530 | /* If duration disappear, we don't need anything */ |
510 | if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)) | 531 | if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)) |
511 | return 1; | 532 | return 1; |
512 | 533 | ||
513 | /* Non nested entry or return */ | 534 | /* Non nested entry or return */ |
514 | if (duration == -1) | 535 | if (duration == -1) |
515 | return trace_seq_printf(s, " "); | 536 | return trace_seq_printf(s, " "); |
516 | 537 | ||
517 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 538 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { |
518 | /* Duration exceeded 100 msecs */ | 539 | /* Duration exceeded 100 msecs */ |
519 | if (duration > 100000ULL) | 540 | if (duration > 100000ULL) |
520 | return trace_seq_printf(s, "! "); | 541 | return trace_seq_printf(s, "! "); |
521 | 542 | ||
522 | /* Duration exceeded 10 msecs */ | 543 | /* Duration exceeded 10 msecs */ |
523 | if (duration > 10000ULL) | 544 | if (duration > 10000ULL) |
524 | return trace_seq_printf(s, "+ "); | 545 | return trace_seq_printf(s, "+ "); |
525 | } | 546 | } |
526 | 547 | ||
527 | return trace_seq_printf(s, " "); | 548 | return trace_seq_printf(s, " "); |
528 | } | 549 | } |
529 | 550 | ||
530 | static int print_graph_abs_time(u64 t, struct trace_seq *s) | 551 | static int print_graph_abs_time(u64 t, struct trace_seq *s) |
531 | { | 552 | { |
532 | unsigned long usecs_rem; | 553 | unsigned long usecs_rem; |
533 | 554 | ||
534 | usecs_rem = do_div(t, NSEC_PER_SEC); | 555 | usecs_rem = do_div(t, NSEC_PER_SEC); |
535 | usecs_rem /= 1000; | 556 | usecs_rem /= 1000; |
536 | 557 | ||
537 | return trace_seq_printf(s, "%5lu.%06lu | ", | 558 | return trace_seq_printf(s, "%5lu.%06lu | ", |
538 | (unsigned long)t, usecs_rem); | 559 | (unsigned long)t, usecs_rem); |
539 | } | 560 | } |
540 | 561 | ||
541 | static enum print_line_t | 562 | static enum print_line_t |
542 | print_graph_irq(struct trace_iterator *iter, unsigned long addr, | 563 | print_graph_irq(struct trace_iterator *iter, unsigned long addr, |
543 | enum trace_type type, int cpu, pid_t pid) | 564 | enum trace_type type, int cpu, pid_t pid) |
544 | { | 565 | { |
545 | int ret; | 566 | int ret; |
546 | struct trace_seq *s = &iter->seq; | 567 | struct trace_seq *s = &iter->seq; |
547 | 568 | ||
548 | if (addr < (unsigned long)__irqentry_text_start || | 569 | if (addr < (unsigned long)__irqentry_text_start || |
549 | addr >= (unsigned long)__irqentry_text_end) | 570 | addr >= (unsigned long)__irqentry_text_end) |
550 | return TRACE_TYPE_UNHANDLED; | 571 | return TRACE_TYPE_UNHANDLED; |
551 | 572 | ||
552 | /* Absolute time */ | 573 | /* Absolute time */ |
553 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { | 574 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { |
554 | ret = print_graph_abs_time(iter->ts, s); | 575 | ret = print_graph_abs_time(iter->ts, s); |
555 | if (!ret) | 576 | if (!ret) |
556 | return TRACE_TYPE_PARTIAL_LINE; | 577 | return TRACE_TYPE_PARTIAL_LINE; |
557 | } | 578 | } |
558 | 579 | ||
559 | /* Cpu */ | 580 | /* Cpu */ |
560 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | 581 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { |
561 | ret = print_graph_cpu(s, cpu); | 582 | ret = print_graph_cpu(s, cpu); |
562 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 583 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
563 | return TRACE_TYPE_PARTIAL_LINE; | 584 | return TRACE_TYPE_PARTIAL_LINE; |
564 | } | 585 | } |
565 | 586 | ||
566 | /* Proc */ | 587 | /* Proc */ |
567 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | 588 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { |
568 | ret = print_graph_proc(s, pid); | 589 | ret = print_graph_proc(s, pid); |
569 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 590 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
570 | return TRACE_TYPE_PARTIAL_LINE; | 591 | return TRACE_TYPE_PARTIAL_LINE; |
571 | ret = trace_seq_printf(s, " | "); | 592 | ret = trace_seq_printf(s, " | "); |
572 | if (!ret) | 593 | if (!ret) |
573 | return TRACE_TYPE_PARTIAL_LINE; | 594 | return TRACE_TYPE_PARTIAL_LINE; |
574 | } | 595 | } |
575 | 596 | ||
576 | /* No overhead */ | 597 | /* No overhead */ |
577 | ret = print_graph_overhead(-1, s); | 598 | ret = print_graph_overhead(-1, s); |
578 | if (!ret) | 599 | if (!ret) |
579 | return TRACE_TYPE_PARTIAL_LINE; | 600 | return TRACE_TYPE_PARTIAL_LINE; |
580 | 601 | ||
581 | if (type == TRACE_GRAPH_ENT) | 602 | if (type == TRACE_GRAPH_ENT) |
582 | ret = trace_seq_printf(s, "==========>"); | 603 | ret = trace_seq_printf(s, "==========>"); |
583 | else | 604 | else |
584 | ret = trace_seq_printf(s, "<=========="); | 605 | ret = trace_seq_printf(s, "<=========="); |
585 | 606 | ||
586 | if (!ret) | 607 | if (!ret) |
587 | return TRACE_TYPE_PARTIAL_LINE; | 608 | return TRACE_TYPE_PARTIAL_LINE; |
588 | 609 | ||
589 | /* Don't close the duration column if haven't one */ | 610 | /* Don't close the duration column if haven't one */ |
590 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) | 611 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) |
591 | trace_seq_printf(s, " |"); | 612 | trace_seq_printf(s, " |"); |
592 | ret = trace_seq_printf(s, "\n"); | 613 | ret = trace_seq_printf(s, "\n"); |
593 | 614 | ||
594 | if (!ret) | 615 | if (!ret) |
595 | return TRACE_TYPE_PARTIAL_LINE; | 616 | return TRACE_TYPE_PARTIAL_LINE; |
596 | return TRACE_TYPE_HANDLED; | 617 | return TRACE_TYPE_HANDLED; |
597 | } | 618 | } |
598 | 619 | ||
599 | enum print_line_t | 620 | enum print_line_t |
600 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) | 621 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) |
601 | { | 622 | { |
602 | unsigned long nsecs_rem = do_div(duration, 1000); | 623 | unsigned long nsecs_rem = do_div(duration, 1000); |
603 | /* log10(ULONG_MAX) + '\0' */ | 624 | /* log10(ULONG_MAX) + '\0' */ |
604 | char msecs_str[21]; | 625 | char msecs_str[21]; |
605 | char nsecs_str[5]; | 626 | char nsecs_str[5]; |
606 | int ret, len; | 627 | int ret, len; |
607 | int i; | 628 | int i; |
608 | 629 | ||
609 | sprintf(msecs_str, "%lu", (unsigned long) duration); | 630 | sprintf(msecs_str, "%lu", (unsigned long) duration); |
610 | 631 | ||
611 | /* Print msecs */ | 632 | /* Print msecs */ |
612 | ret = trace_seq_printf(s, "%s", msecs_str); | 633 | ret = trace_seq_printf(s, "%s", msecs_str); |
613 | if (!ret) | 634 | if (!ret) |
614 | return TRACE_TYPE_PARTIAL_LINE; | 635 | return TRACE_TYPE_PARTIAL_LINE; |
615 | 636 | ||
616 | len = strlen(msecs_str); | 637 | len = strlen(msecs_str); |
617 | 638 | ||
618 | /* Print nsecs (we don't want to exceed 7 numbers) */ | 639 | /* Print nsecs (we don't want to exceed 7 numbers) */ |
619 | if (len < 7) { | 640 | if (len < 7) { |
620 | snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem); | 641 | snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem); |
621 | ret = trace_seq_printf(s, ".%s", nsecs_str); | 642 | ret = trace_seq_printf(s, ".%s", nsecs_str); |
622 | if (!ret) | 643 | if (!ret) |
623 | return TRACE_TYPE_PARTIAL_LINE; | 644 | return TRACE_TYPE_PARTIAL_LINE; |
624 | len += strlen(nsecs_str); | 645 | len += strlen(nsecs_str); |
625 | } | 646 | } |
626 | 647 | ||
627 | ret = trace_seq_printf(s, " us "); | 648 | ret = trace_seq_printf(s, " us "); |
628 | if (!ret) | 649 | if (!ret) |
629 | return TRACE_TYPE_PARTIAL_LINE; | 650 | return TRACE_TYPE_PARTIAL_LINE; |
630 | 651 | ||
631 | /* Print remaining spaces to fit the row's width */ | 652 | /* Print remaining spaces to fit the row's width */ |
632 | for (i = len; i < 7; i++) { | 653 | for (i = len; i < 7; i++) { |
633 | ret = trace_seq_printf(s, " "); | 654 | ret = trace_seq_printf(s, " "); |
634 | if (!ret) | 655 | if (!ret) |
635 | return TRACE_TYPE_PARTIAL_LINE; | 656 | return TRACE_TYPE_PARTIAL_LINE; |
636 | } | 657 | } |
637 | return TRACE_TYPE_HANDLED; | 658 | return TRACE_TYPE_HANDLED; |
638 | } | 659 | } |
639 | 660 | ||
640 | static enum print_line_t | 661 | static enum print_line_t |
641 | print_graph_duration(unsigned long long duration, struct trace_seq *s) | 662 | print_graph_duration(unsigned long long duration, struct trace_seq *s) |
642 | { | 663 | { |
643 | int ret; | 664 | int ret; |
644 | 665 | ||
645 | ret = trace_print_graph_duration(duration, s); | 666 | ret = trace_print_graph_duration(duration, s); |
646 | if (ret != TRACE_TYPE_HANDLED) | 667 | if (ret != TRACE_TYPE_HANDLED) |
647 | return ret; | 668 | return ret; |
648 | 669 | ||
649 | ret = trace_seq_printf(s, "| "); | 670 | ret = trace_seq_printf(s, "| "); |
650 | if (!ret) | 671 | if (!ret) |
651 | return TRACE_TYPE_PARTIAL_LINE; | 672 | return TRACE_TYPE_PARTIAL_LINE; |
652 | 673 | ||
653 | return TRACE_TYPE_HANDLED; | 674 | return TRACE_TYPE_HANDLED; |
654 | } | 675 | } |
655 | 676 | ||
656 | /* Case of a leaf function on its call entry */ | 677 | /* Case of a leaf function on its call entry */ |
657 | static enum print_line_t | 678 | static enum print_line_t |
658 | print_graph_entry_leaf(struct trace_iterator *iter, | 679 | print_graph_entry_leaf(struct trace_iterator *iter, |
659 | struct ftrace_graph_ent_entry *entry, | 680 | struct ftrace_graph_ent_entry *entry, |
660 | struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s) | 681 | struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s) |
661 | { | 682 | { |
662 | struct fgraph_data *data = iter->private; | 683 | struct fgraph_data *data = iter->private; |
663 | struct ftrace_graph_ret *graph_ret; | 684 | struct ftrace_graph_ret *graph_ret; |
664 | struct ftrace_graph_ent *call; | 685 | struct ftrace_graph_ent *call; |
665 | unsigned long long duration; | 686 | unsigned long long duration; |
666 | int ret; | 687 | int ret; |
667 | int i; | 688 | int i; |
668 | 689 | ||
669 | graph_ret = &ret_entry->ret; | 690 | graph_ret = &ret_entry->ret; |
670 | call = &entry->graph_ent; | 691 | call = &entry->graph_ent; |
671 | duration = graph_ret->rettime - graph_ret->calltime; | 692 | duration = graph_ret->rettime - graph_ret->calltime; |
672 | 693 | ||
673 | if (data) { | 694 | if (data) { |
674 | struct fgraph_cpu_data *cpu_data; | 695 | struct fgraph_cpu_data *cpu_data; |
675 | int cpu = iter->cpu; | 696 | int cpu = iter->cpu; |
676 | 697 | ||
677 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); | 698 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); |
678 | 699 | ||
679 | /* | 700 | /* |
680 | * Comments display at + 1 to depth. Since | 701 | * Comments display at + 1 to depth. Since |
681 | * this is a leaf function, keep the comments | 702 | * this is a leaf function, keep the comments |
682 | * equal to this depth. | 703 | * equal to this depth. |
683 | */ | 704 | */ |
684 | cpu_data->depth = call->depth - 1; | 705 | cpu_data->depth = call->depth - 1; |
685 | 706 | ||
686 | /* No need to keep this function around for this depth */ | 707 | /* No need to keep this function around for this depth */ |
687 | if (call->depth < FTRACE_RETFUNC_DEPTH) | 708 | if (call->depth < FTRACE_RETFUNC_DEPTH) |
688 | cpu_data->enter_funcs[call->depth] = 0; | 709 | cpu_data->enter_funcs[call->depth] = 0; |
689 | } | 710 | } |
690 | 711 | ||
691 | /* Overhead */ | 712 | /* Overhead */ |
692 | ret = print_graph_overhead(duration, s); | 713 | ret = print_graph_overhead(duration, s); |
693 | if (!ret) | 714 | if (!ret) |
694 | return TRACE_TYPE_PARTIAL_LINE; | 715 | return TRACE_TYPE_PARTIAL_LINE; |
695 | 716 | ||
696 | /* Duration */ | 717 | /* Duration */ |
697 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { | 718 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { |
698 | ret = print_graph_duration(duration, s); | 719 | ret = print_graph_duration(duration, s); |
699 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 720 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
700 | return TRACE_TYPE_PARTIAL_LINE; | 721 | return TRACE_TYPE_PARTIAL_LINE; |
701 | } | 722 | } |
702 | 723 | ||
703 | /* Function */ | 724 | /* Function */ |
704 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | 725 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { |
705 | ret = trace_seq_printf(s, " "); | 726 | ret = trace_seq_printf(s, " "); |
706 | if (!ret) | 727 | if (!ret) |
707 | return TRACE_TYPE_PARTIAL_LINE; | 728 | return TRACE_TYPE_PARTIAL_LINE; |
708 | } | 729 | } |
709 | 730 | ||
710 | ret = trace_seq_printf(s, "%ps();\n", (void *)call->func); | 731 | ret = trace_seq_printf(s, "%ps();\n", (void *)call->func); |
711 | if (!ret) | 732 | if (!ret) |
712 | return TRACE_TYPE_PARTIAL_LINE; | 733 | return TRACE_TYPE_PARTIAL_LINE; |
713 | 734 | ||
714 | return TRACE_TYPE_HANDLED; | 735 | return TRACE_TYPE_HANDLED; |
715 | } | 736 | } |
716 | 737 | ||
717 | static enum print_line_t | 738 | static enum print_line_t |
718 | print_graph_entry_nested(struct trace_iterator *iter, | 739 | print_graph_entry_nested(struct trace_iterator *iter, |
719 | struct ftrace_graph_ent_entry *entry, | 740 | struct ftrace_graph_ent_entry *entry, |
720 | struct trace_seq *s, int cpu) | 741 | struct trace_seq *s, int cpu) |
721 | { | 742 | { |
722 | struct ftrace_graph_ent *call = &entry->graph_ent; | 743 | struct ftrace_graph_ent *call = &entry->graph_ent; |
723 | struct fgraph_data *data = iter->private; | 744 | struct fgraph_data *data = iter->private; |
724 | int ret; | 745 | int ret; |
725 | int i; | 746 | int i; |
726 | 747 | ||
727 | if (data) { | 748 | if (data) { |
728 | struct fgraph_cpu_data *cpu_data; | 749 | struct fgraph_cpu_data *cpu_data; |
729 | int cpu = iter->cpu; | 750 | int cpu = iter->cpu; |
730 | 751 | ||
731 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); | 752 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); |
732 | cpu_data->depth = call->depth; | 753 | cpu_data->depth = call->depth; |
733 | 754 | ||
734 | /* Save this function pointer to see if the exit matches */ | 755 | /* Save this function pointer to see if the exit matches */ |
735 | if (call->depth < FTRACE_RETFUNC_DEPTH) | 756 | if (call->depth < FTRACE_RETFUNC_DEPTH) |
736 | cpu_data->enter_funcs[call->depth] = call->func; | 757 | cpu_data->enter_funcs[call->depth] = call->func; |
737 | } | 758 | } |
738 | 759 | ||
739 | /* No overhead */ | 760 | /* No overhead */ |
740 | ret = print_graph_overhead(-1, s); | 761 | ret = print_graph_overhead(-1, s); |
741 | if (!ret) | 762 | if (!ret) |
742 | return TRACE_TYPE_PARTIAL_LINE; | 763 | return TRACE_TYPE_PARTIAL_LINE; |
743 | 764 | ||
744 | /* No time */ | 765 | /* No time */ |
745 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { | 766 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { |
746 | ret = trace_seq_printf(s, " | "); | 767 | ret = trace_seq_printf(s, " | "); |
747 | if (!ret) | 768 | if (!ret) |
748 | return TRACE_TYPE_PARTIAL_LINE; | 769 | return TRACE_TYPE_PARTIAL_LINE; |
749 | } | 770 | } |
750 | 771 | ||
751 | /* Function */ | 772 | /* Function */ |
752 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | 773 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { |
753 | ret = trace_seq_printf(s, " "); | 774 | ret = trace_seq_printf(s, " "); |
754 | if (!ret) | 775 | if (!ret) |
755 | return TRACE_TYPE_PARTIAL_LINE; | 776 | return TRACE_TYPE_PARTIAL_LINE; |
756 | } | 777 | } |
757 | 778 | ||
758 | ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func); | 779 | ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func); |
759 | if (!ret) | 780 | if (!ret) |
760 | return TRACE_TYPE_PARTIAL_LINE; | 781 | return TRACE_TYPE_PARTIAL_LINE; |
761 | 782 | ||
762 | /* | 783 | /* |
763 | * we already consumed the current entry to check the next one | 784 | * we already consumed the current entry to check the next one |
764 | * and see if this is a leaf. | 785 | * and see if this is a leaf. |
765 | */ | 786 | */ |
766 | return TRACE_TYPE_NO_CONSUME; | 787 | return TRACE_TYPE_NO_CONSUME; |
767 | } | 788 | } |
768 | 789 | ||
769 | static enum print_line_t | 790 | static enum print_line_t |
770 | print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, | 791 | print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, |
771 | int type, unsigned long addr) | 792 | int type, unsigned long addr) |
772 | { | 793 | { |
773 | struct fgraph_data *data = iter->private; | 794 | struct fgraph_data *data = iter->private; |
774 | struct trace_entry *ent = iter->ent; | 795 | struct trace_entry *ent = iter->ent; |
775 | int cpu = iter->cpu; | 796 | int cpu = iter->cpu; |
776 | int ret; | 797 | int ret; |
777 | 798 | ||
778 | /* Pid */ | 799 | /* Pid */ |
779 | if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE) | 800 | if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE) |
780 | return TRACE_TYPE_PARTIAL_LINE; | 801 | return TRACE_TYPE_PARTIAL_LINE; |
781 | 802 | ||
782 | if (type) { | 803 | if (type) { |
783 | /* Interrupt */ | 804 | /* Interrupt */ |
784 | ret = print_graph_irq(iter, addr, type, cpu, ent->pid); | 805 | ret = print_graph_irq(iter, addr, type, cpu, ent->pid); |
785 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 806 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
786 | return TRACE_TYPE_PARTIAL_LINE; | 807 | return TRACE_TYPE_PARTIAL_LINE; |
787 | } | 808 | } |
788 | 809 | ||
789 | /* Absolute time */ | 810 | /* Absolute time */ |
790 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { | 811 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { |
791 | ret = print_graph_abs_time(iter->ts, s); | 812 | ret = print_graph_abs_time(iter->ts, s); |
792 | if (!ret) | 813 | if (!ret) |
793 | return TRACE_TYPE_PARTIAL_LINE; | 814 | return TRACE_TYPE_PARTIAL_LINE; |
794 | } | 815 | } |
795 | 816 | ||
796 | /* Cpu */ | 817 | /* Cpu */ |
797 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | 818 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { |
798 | ret = print_graph_cpu(s, cpu); | 819 | ret = print_graph_cpu(s, cpu); |
799 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 820 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
800 | return TRACE_TYPE_PARTIAL_LINE; | 821 | return TRACE_TYPE_PARTIAL_LINE; |
801 | } | 822 | } |
802 | 823 | ||
803 | /* Proc */ | 824 | /* Proc */ |
804 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | 825 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { |
805 | ret = print_graph_proc(s, ent->pid); | 826 | ret = print_graph_proc(s, ent->pid); |
806 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 827 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
807 | return TRACE_TYPE_PARTIAL_LINE; | 828 | return TRACE_TYPE_PARTIAL_LINE; |
808 | 829 | ||
809 | ret = trace_seq_printf(s, " | "); | 830 | ret = trace_seq_printf(s, " | "); |
810 | if (!ret) | 831 | if (!ret) |
811 | return TRACE_TYPE_PARTIAL_LINE; | 832 | return TRACE_TYPE_PARTIAL_LINE; |
812 | } | 833 | } |
813 | 834 | ||
814 | /* Latency format */ | 835 | /* Latency format */ |
815 | if (trace_flags & TRACE_ITER_LATENCY_FMT) { | 836 | if (trace_flags & TRACE_ITER_LATENCY_FMT) { |
816 | ret = print_graph_lat_fmt(s, ent); | 837 | ret = print_graph_lat_fmt(s, ent); |
817 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 838 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
818 | return TRACE_TYPE_PARTIAL_LINE; | 839 | return TRACE_TYPE_PARTIAL_LINE; |
819 | } | 840 | } |
820 | 841 | ||
821 | return 0; | 842 | return 0; |
822 | } | 843 | } |
823 | 844 | ||
824 | static enum print_line_t | 845 | static enum print_line_t |
825 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | 846 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, |
826 | struct trace_iterator *iter) | 847 | struct trace_iterator *iter) |
827 | { | 848 | { |
828 | struct fgraph_data *data = iter->private; | 849 | struct fgraph_data *data = iter->private; |
829 | struct ftrace_graph_ent *call = &field->graph_ent; | 850 | struct ftrace_graph_ent *call = &field->graph_ent; |
830 | struct ftrace_graph_ret_entry *leaf_ret; | 851 | struct ftrace_graph_ret_entry *leaf_ret; |
831 | static enum print_line_t ret; | 852 | static enum print_line_t ret; |
832 | int cpu = iter->cpu; | 853 | int cpu = iter->cpu; |
833 | 854 | ||
834 | if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func)) | 855 | if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func)) |
835 | return TRACE_TYPE_PARTIAL_LINE; | 856 | return TRACE_TYPE_PARTIAL_LINE; |
836 | 857 | ||
837 | leaf_ret = get_return_for_leaf(iter, field); | 858 | leaf_ret = get_return_for_leaf(iter, field); |
838 | if (leaf_ret) | 859 | if (leaf_ret) |
839 | ret = print_graph_entry_leaf(iter, field, leaf_ret, s); | 860 | ret = print_graph_entry_leaf(iter, field, leaf_ret, s); |
840 | else | 861 | else |
841 | ret = print_graph_entry_nested(iter, field, s, cpu); | 862 | ret = print_graph_entry_nested(iter, field, s, cpu); |
842 | 863 | ||
843 | if (data) { | 864 | if (data) { |
844 | /* | 865 | /* |
845 | * If we failed to write our output, then we need to make | 866 | * If we failed to write our output, then we need to make |
846 | * note of it. Because we already consumed our entry. | 867 | * note of it. Because we already consumed our entry. |
847 | */ | 868 | */ |
848 | if (s->full) { | 869 | if (s->full) { |
849 | data->failed = 1; | 870 | data->failed = 1; |
850 | data->cpu = cpu; | 871 | data->cpu = cpu; |
851 | } else | 872 | } else |
852 | data->failed = 0; | 873 | data->failed = 0; |
853 | } | 874 | } |
854 | 875 | ||
855 | return ret; | 876 | return ret; |
856 | } | 877 | } |
857 | 878 | ||
858 | static enum print_line_t | 879 | static enum print_line_t |
859 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | 880 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, |
860 | struct trace_entry *ent, struct trace_iterator *iter) | 881 | struct trace_entry *ent, struct trace_iterator *iter) |
861 | { | 882 | { |
862 | unsigned long long duration = trace->rettime - trace->calltime; | 883 | unsigned long long duration = trace->rettime - trace->calltime; |
863 | struct fgraph_data *data = iter->private; | 884 | struct fgraph_data *data = iter->private; |
864 | pid_t pid = ent->pid; | 885 | pid_t pid = ent->pid; |
865 | int cpu = iter->cpu; | 886 | int cpu = iter->cpu; |
866 | int func_match = 1; | 887 | int func_match = 1; |
867 | int ret; | 888 | int ret; |
868 | int i; | 889 | int i; |
869 | 890 | ||
870 | if (data) { | 891 | if (data) { |
871 | struct fgraph_cpu_data *cpu_data; | 892 | struct fgraph_cpu_data *cpu_data; |
872 | int cpu = iter->cpu; | 893 | int cpu = iter->cpu; |
873 | 894 | ||
874 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); | 895 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); |
875 | 896 | ||
876 | /* | 897 | /* |
877 | * Comments display at + 1 to depth. This is the | 898 | * Comments display at + 1 to depth. This is the |
878 | * return from a function, we now want the comments | 899 | * return from a function, we now want the comments |
879 | * to display at the same level of the bracket. | 900 | * to display at the same level of the bracket. |
880 | */ | 901 | */ |
881 | cpu_data->depth = trace->depth - 1; | 902 | cpu_data->depth = trace->depth - 1; |
882 | 903 | ||
883 | if (trace->depth < FTRACE_RETFUNC_DEPTH) { | 904 | if (trace->depth < FTRACE_RETFUNC_DEPTH) { |
884 | if (cpu_data->enter_funcs[trace->depth] != trace->func) | 905 | if (cpu_data->enter_funcs[trace->depth] != trace->func) |
885 | func_match = 0; | 906 | func_match = 0; |
886 | cpu_data->enter_funcs[trace->depth] = 0; | 907 | cpu_data->enter_funcs[trace->depth] = 0; |
887 | } | 908 | } |
888 | } | 909 | } |
889 | 910 | ||
890 | if (print_graph_prologue(iter, s, 0, 0)) | 911 | if (print_graph_prologue(iter, s, 0, 0)) |
891 | return TRACE_TYPE_PARTIAL_LINE; | 912 | return TRACE_TYPE_PARTIAL_LINE; |
892 | 913 | ||
893 | /* Overhead */ | 914 | /* Overhead */ |
894 | ret = print_graph_overhead(duration, s); | 915 | ret = print_graph_overhead(duration, s); |
895 | if (!ret) | 916 | if (!ret) |
896 | return TRACE_TYPE_PARTIAL_LINE; | 917 | return TRACE_TYPE_PARTIAL_LINE; |
897 | 918 | ||
898 | /* Duration */ | 919 | /* Duration */ |
899 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { | 920 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { |
900 | ret = print_graph_duration(duration, s); | 921 | ret = print_graph_duration(duration, s); |
901 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 922 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
902 | return TRACE_TYPE_PARTIAL_LINE; | 923 | return TRACE_TYPE_PARTIAL_LINE; |
903 | } | 924 | } |
904 | 925 | ||
905 | /* Closing brace */ | 926 | /* Closing brace */ |
906 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { | 927 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { |
907 | ret = trace_seq_printf(s, " "); | 928 | ret = trace_seq_printf(s, " "); |
908 | if (!ret) | 929 | if (!ret) |
909 | return TRACE_TYPE_PARTIAL_LINE; | 930 | return TRACE_TYPE_PARTIAL_LINE; |
910 | } | 931 | } |
911 | 932 | ||
912 | /* | 933 | /* |
913 | * If the return function does not have a matching entry, | 934 | * If the return function does not have a matching entry, |
914 | * then the entry was lost. Instead of just printing | 935 | * then the entry was lost. Instead of just printing |
915 | * the '}' and letting the user guess what function this | 936 | * the '}' and letting the user guess what function this |
916 | * belongs to, write out the function name. | 937 | * belongs to, write out the function name. |
917 | */ | 938 | */ |
918 | if (func_match) { | 939 | if (func_match) { |
919 | ret = trace_seq_printf(s, "}\n"); | 940 | ret = trace_seq_printf(s, "}\n"); |
920 | if (!ret) | 941 | if (!ret) |
921 | return TRACE_TYPE_PARTIAL_LINE; | 942 | return TRACE_TYPE_PARTIAL_LINE; |
922 | } else { | 943 | } else { |
923 | ret = trace_seq_printf(s, "} (%ps)\n", (void *)trace->func); | 944 | ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func); |
924 | if (!ret) | 945 | if (!ret) |
925 | return TRACE_TYPE_PARTIAL_LINE; | 946 | return TRACE_TYPE_PARTIAL_LINE; |
926 | } | 947 | } |
927 | 948 | ||
928 | /* Overrun */ | 949 | /* Overrun */ |
929 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { | 950 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { |
930 | ret = trace_seq_printf(s, " (Overruns: %lu)\n", | 951 | ret = trace_seq_printf(s, " (Overruns: %lu)\n", |
931 | trace->overrun); | 952 | trace->overrun); |
932 | if (!ret) | 953 | if (!ret) |
933 | return TRACE_TYPE_PARTIAL_LINE; | 954 | return TRACE_TYPE_PARTIAL_LINE; |
934 | } | 955 | } |
935 | 956 | ||
936 | ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, cpu, pid); | 957 | ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, cpu, pid); |
937 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 958 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
938 | return TRACE_TYPE_PARTIAL_LINE; | 959 | return TRACE_TYPE_PARTIAL_LINE; |
939 | 960 | ||
940 | return TRACE_TYPE_HANDLED; | 961 | return TRACE_TYPE_HANDLED; |
941 | } | 962 | } |
942 | 963 | ||
943 | static enum print_line_t | 964 | static enum print_line_t |
944 | print_graph_comment(struct trace_seq *s, struct trace_entry *ent, | 965 | print_graph_comment(struct trace_seq *s, struct trace_entry *ent, |
945 | struct trace_iterator *iter) | 966 | struct trace_iterator *iter) |
946 | { | 967 | { |
947 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | 968 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); |
948 | struct fgraph_data *data = iter->private; | 969 | struct fgraph_data *data = iter->private; |
949 | struct trace_event *event; | 970 | struct trace_event *event; |
950 | int depth = 0; | 971 | int depth = 0; |
951 | int ret; | 972 | int ret; |
952 | int i; | 973 | int i; |
953 | 974 | ||
954 | if (data) | 975 | if (data) |
955 | depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; | 976 | depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; |
956 | 977 | ||
957 | if (print_graph_prologue(iter, s, 0, 0)) | 978 | if (print_graph_prologue(iter, s, 0, 0)) |
958 | return TRACE_TYPE_PARTIAL_LINE; | 979 | return TRACE_TYPE_PARTIAL_LINE; |
959 | 980 | ||
960 | /* No overhead */ | 981 | /* No overhead */ |
961 | ret = print_graph_overhead(-1, s); | 982 | ret = print_graph_overhead(-1, s); |
962 | if (!ret) | 983 | if (!ret) |
963 | return TRACE_TYPE_PARTIAL_LINE; | 984 | return TRACE_TYPE_PARTIAL_LINE; |
964 | 985 | ||
965 | /* No time */ | 986 | /* No time */ |
966 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { | 987 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { |
967 | ret = trace_seq_printf(s, " | "); | 988 | ret = trace_seq_printf(s, " | "); |
968 | if (!ret) | 989 | if (!ret) |
969 | return TRACE_TYPE_PARTIAL_LINE; | 990 | return TRACE_TYPE_PARTIAL_LINE; |
970 | } | 991 | } |
971 | 992 | ||
972 | /* Indentation */ | 993 | /* Indentation */ |
973 | if (depth > 0) | 994 | if (depth > 0) |
974 | for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) { | 995 | for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) { |
975 | ret = trace_seq_printf(s, " "); | 996 | ret = trace_seq_printf(s, " "); |
976 | if (!ret) | 997 | if (!ret) |
977 | return TRACE_TYPE_PARTIAL_LINE; | 998 | return TRACE_TYPE_PARTIAL_LINE; |
978 | } | 999 | } |
979 | 1000 | ||
980 | /* The comment */ | 1001 | /* The comment */ |
981 | ret = trace_seq_printf(s, "/* "); | 1002 | ret = trace_seq_printf(s, "/* "); |
982 | if (!ret) | 1003 | if (!ret) |
983 | return TRACE_TYPE_PARTIAL_LINE; | 1004 | return TRACE_TYPE_PARTIAL_LINE; |
984 | 1005 | ||
985 | switch (iter->ent->type) { | 1006 | switch (iter->ent->type) { |
986 | case TRACE_BPRINT: | 1007 | case TRACE_BPRINT: |
987 | ret = trace_print_bprintk_msg_only(iter); | 1008 | ret = trace_print_bprintk_msg_only(iter); |
988 | if (ret != TRACE_TYPE_HANDLED) | 1009 | if (ret != TRACE_TYPE_HANDLED) |
989 | return ret; | 1010 | return ret; |
990 | break; | 1011 | break; |
991 | case TRACE_PRINT: | 1012 | case TRACE_PRINT: |
992 | ret = trace_print_printk_msg_only(iter); | 1013 | ret = trace_print_printk_msg_only(iter); |
993 | if (ret != TRACE_TYPE_HANDLED) | 1014 | if (ret != TRACE_TYPE_HANDLED) |
994 | return ret; | 1015 | return ret; |
995 | break; | 1016 | break; |
996 | default: | 1017 | default: |
997 | event = ftrace_find_event(ent->type); | 1018 | event = ftrace_find_event(ent->type); |
998 | if (!event) | 1019 | if (!event) |
999 | return TRACE_TYPE_UNHANDLED; | 1020 | return TRACE_TYPE_UNHANDLED; |
1000 | 1021 | ||
1001 | ret = event->trace(iter, sym_flags); | 1022 | ret = event->trace(iter, sym_flags); |
1002 | if (ret != TRACE_TYPE_HANDLED) | 1023 | if (ret != TRACE_TYPE_HANDLED) |
1003 | return ret; | 1024 | return ret; |
1004 | } | 1025 | } |
1005 | 1026 | ||
1006 | /* Strip ending newline */ | 1027 | /* Strip ending newline */ |
1007 | if (s->buffer[s->len - 1] == '\n') { | 1028 | if (s->buffer[s->len - 1] == '\n') { |
1008 | s->buffer[s->len - 1] = '\0'; | 1029 | s->buffer[s->len - 1] = '\0'; |
1009 | s->len--; | 1030 | s->len--; |
1010 | } | 1031 | } |
1011 | 1032 | ||
1012 | ret = trace_seq_printf(s, " */\n"); | 1033 | ret = trace_seq_printf(s, " */\n"); |
1013 | if (!ret) | 1034 | if (!ret) |
1014 | return TRACE_TYPE_PARTIAL_LINE; | 1035 | return TRACE_TYPE_PARTIAL_LINE; |
1015 | 1036 | ||
1016 | return TRACE_TYPE_HANDLED; | 1037 | return TRACE_TYPE_HANDLED; |
1017 | } | 1038 | } |
1018 | 1039 | ||
1019 | 1040 | ||
1020 | enum print_line_t | 1041 | enum print_line_t |
1021 | print_graph_function(struct trace_iterator *iter) | 1042 | print_graph_function(struct trace_iterator *iter) |
1022 | { | 1043 | { |
1023 | struct ftrace_graph_ent_entry *field; | 1044 | struct ftrace_graph_ent_entry *field; |
1024 | struct fgraph_data *data = iter->private; | 1045 | struct fgraph_data *data = iter->private; |
1025 | struct trace_entry *entry = iter->ent; | 1046 | struct trace_entry *entry = iter->ent; |
1026 | struct trace_seq *s = &iter->seq; | 1047 | struct trace_seq *s = &iter->seq; |
1027 | int cpu = iter->cpu; | 1048 | int cpu = iter->cpu; |
1028 | int ret; | 1049 | int ret; |
1029 | 1050 | ||
1030 | if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) { | 1051 | if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) { |
1031 | per_cpu_ptr(data->cpu_data, cpu)->ignore = 0; | 1052 | per_cpu_ptr(data->cpu_data, cpu)->ignore = 0; |
1032 | return TRACE_TYPE_HANDLED; | 1053 | return TRACE_TYPE_HANDLED; |
1033 | } | 1054 | } |
1034 | 1055 | ||
1035 | /* | 1056 | /* |
1036 | * If the last output failed, there's a possibility we need | 1057 | * If the last output failed, there's a possibility we need |
1037 | * to print out the missing entry which would never go out. | 1058 | * to print out the missing entry which would never go out. |
1038 | */ | 1059 | */ |
1039 | if (data && data->failed) { | 1060 | if (data && data->failed) { |
1040 | field = &data->ent; | 1061 | field = &data->ent; |
1041 | iter->cpu = data->cpu; | 1062 | iter->cpu = data->cpu; |
1042 | ret = print_graph_entry(field, s, iter); | 1063 | ret = print_graph_entry(field, s, iter); |
1043 | if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) { | 1064 | if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) { |
1044 | per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1; | 1065 | per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1; |
1045 | ret = TRACE_TYPE_NO_CONSUME; | 1066 | ret = TRACE_TYPE_NO_CONSUME; |
1046 | } | 1067 | } |
1047 | iter->cpu = cpu; | 1068 | iter->cpu = cpu; |
1048 | return ret; | 1069 | return ret; |
1049 | } | 1070 | } |
1050 | 1071 | ||
1051 | switch (entry->type) { | 1072 | switch (entry->type) { |
1052 | case TRACE_GRAPH_ENT: { | 1073 | case TRACE_GRAPH_ENT: { |
1053 | /* | 1074 | /* |
1054 | * print_graph_entry() may consume the current event, | 1075 | * print_graph_entry() may consume the current event, |
1055 | * thus @field may become invalid, so we need to save it. | 1076 | * thus @field may become invalid, so we need to save it. |
1056 | * sizeof(struct ftrace_graph_ent_entry) is very small, | 1077 | * sizeof(struct ftrace_graph_ent_entry) is very small, |
1057 | * it can be safely saved at the stack. | 1078 | * it can be safely saved at the stack. |
1058 | */ | 1079 | */ |
1059 | struct ftrace_graph_ent_entry saved; | 1080 | struct ftrace_graph_ent_entry saved; |
1060 | trace_assign_type(field, entry); | 1081 | trace_assign_type(field, entry); |
1061 | saved = *field; | 1082 | saved = *field; |
1062 | return print_graph_entry(&saved, s, iter); | 1083 | return print_graph_entry(&saved, s, iter); |
1063 | } | 1084 | } |
1064 | case TRACE_GRAPH_RET: { | 1085 | case TRACE_GRAPH_RET: { |
1065 | struct ftrace_graph_ret_entry *field; | 1086 | struct ftrace_graph_ret_entry *field; |
1066 | trace_assign_type(field, entry); | 1087 | trace_assign_type(field, entry); |
1067 | return print_graph_return(&field->ret, s, entry, iter); | 1088 | return print_graph_return(&field->ret, s, entry, iter); |
1068 | } | 1089 | } |
1069 | default: | 1090 | default: |
1070 | return print_graph_comment(s, entry, iter); | 1091 | return print_graph_comment(s, entry, iter); |
1071 | } | 1092 | } |
1072 | 1093 | ||
1073 | return TRACE_TYPE_HANDLED; | 1094 | return TRACE_TYPE_HANDLED; |
1074 | } | 1095 | } |
1075 | 1096 | ||
1076 | static void print_lat_header(struct seq_file *s) | 1097 | static void print_lat_header(struct seq_file *s) |
1077 | { | 1098 | { |
1078 | static const char spaces[] = " " /* 16 spaces */ | 1099 | static const char spaces[] = " " /* 16 spaces */ |
1079 | " " /* 4 spaces */ | 1100 | " " /* 4 spaces */ |
1080 | " "; /* 17 spaces */ | 1101 | " "; /* 17 spaces */ |
1081 | int size = 0; | 1102 | int size = 0; |
1082 | 1103 | ||
1083 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) | 1104 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) |
1084 | size += 16; | 1105 | size += 16; |
1085 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) | 1106 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) |
1086 | size += 4; | 1107 | size += 4; |
1087 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) | 1108 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) |
1088 | size += 17; | 1109 | size += 17; |
1089 | 1110 | ||
1090 | seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces); | 1111 | seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces); |
1091 | seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces); | 1112 | seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces); |
1092 | seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces); | 1113 | seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces); |
1093 | seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces); | 1114 | seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces); |
1094 | seq_printf(s, "#%.*s||| / _-=> lock-depth \n", size, spaces); | 1115 | seq_printf(s, "#%.*s||| / _-=> lock-depth \n", size, spaces); |
1095 | seq_printf(s, "#%.*s|||| / \n", size, spaces); | 1116 | seq_printf(s, "#%.*s|||| / \n", size, spaces); |
1096 | } | 1117 | } |
1097 | 1118 | ||
1098 | static void print_graph_headers(struct seq_file *s) | 1119 | static void print_graph_headers(struct seq_file *s) |
1099 | { | 1120 | { |
1100 | int lat = trace_flags & TRACE_ITER_LATENCY_FMT; | 1121 | int lat = trace_flags & TRACE_ITER_LATENCY_FMT; |
1101 | 1122 | ||
1102 | if (lat) | 1123 | if (lat) |
1103 | print_lat_header(s); | 1124 | print_lat_header(s); |
1104 | 1125 | ||
1105 | /* 1st line */ | 1126 | /* 1st line */ |
1106 | seq_printf(s, "#"); | 1127 | seq_printf(s, "#"); |
1107 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) | 1128 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) |
1108 | seq_printf(s, " TIME "); | 1129 | seq_printf(s, " TIME "); |
1109 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) | 1130 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) |
1110 | seq_printf(s, " CPU"); | 1131 | seq_printf(s, " CPU"); |
1111 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) | 1132 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) |
1112 | seq_printf(s, " TASK/PID "); | 1133 | seq_printf(s, " TASK/PID "); |
1113 | if (lat) | 1134 | if (lat) |
1114 | seq_printf(s, "|||||"); | 1135 | seq_printf(s, "|||||"); |
1115 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) | 1136 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) |
1116 | seq_printf(s, " DURATION "); | 1137 | seq_printf(s, " DURATION "); |
1117 | seq_printf(s, " FUNCTION CALLS\n"); | 1138 | seq_printf(s, " FUNCTION CALLS\n"); |
1118 | 1139 | ||
1119 | /* 2nd line */ | 1140 | /* 2nd line */ |
1120 | seq_printf(s, "#"); | 1141 | seq_printf(s, "#"); |
1121 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) | 1142 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) |
1122 | seq_printf(s, " | "); | 1143 | seq_printf(s, " | "); |
1123 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) | 1144 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) |
1124 | seq_printf(s, " | "); | 1145 | seq_printf(s, " | "); |
1125 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) | 1146 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) |
1126 | seq_printf(s, " | | "); | 1147 | seq_printf(s, " | | "); |
1127 | if (lat) | 1148 | if (lat) |
1128 | seq_printf(s, "|||||"); | 1149 | seq_printf(s, "|||||"); |
1129 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) | 1150 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) |
1130 | seq_printf(s, " | | "); | 1151 | seq_printf(s, " | | "); |
1131 | seq_printf(s, " | | | |\n"); | 1152 | seq_printf(s, " | | | |\n"); |
1132 | } | 1153 | } |
1133 | 1154 | ||
1134 | static void graph_trace_open(struct trace_iterator *iter) | 1155 | static void graph_trace_open(struct trace_iterator *iter) |
1135 | { | 1156 | { |
1136 | /* pid and depth on the last trace processed */ | 1157 | /* pid and depth on the last trace processed */ |
1137 | struct fgraph_data *data; | 1158 | struct fgraph_data *data; |
1138 | int cpu; | 1159 | int cpu; |
1139 | 1160 | ||
1140 | iter->private = NULL; | 1161 | iter->private = NULL; |
1141 | 1162 | ||
1142 | data = kzalloc(sizeof(*data), GFP_KERNEL); | 1163 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
1143 | if (!data) | 1164 | if (!data) |
1144 | goto out_err; | 1165 | goto out_err; |
1145 | 1166 | ||
1146 | data->cpu_data = alloc_percpu(struct fgraph_cpu_data); | 1167 | data->cpu_data = alloc_percpu(struct fgraph_cpu_data); |
1147 | if (!data->cpu_data) | 1168 | if (!data->cpu_data) |
1148 | goto out_err_free; | 1169 | goto out_err_free; |
1149 | 1170 | ||
1150 | for_each_possible_cpu(cpu) { | 1171 | for_each_possible_cpu(cpu) { |
1151 | pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); | 1172 | pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); |
1152 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); | 1173 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); |
1153 | int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore); | 1174 | int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore); |
1154 | *pid = -1; | 1175 | *pid = -1; |
1155 | *depth = 0; | 1176 | *depth = 0; |
1156 | *ignore = 0; | 1177 | *ignore = 0; |
1157 | } | 1178 | } |
1158 | 1179 | ||
1159 | iter->private = data; | 1180 | iter->private = data; |
1160 | 1181 | ||
1161 | return; | 1182 | return; |
1162 | 1183 | ||
1163 | out_err_free: | 1184 | out_err_free: |
1164 | kfree(data); | 1185 | kfree(data); |
1165 | out_err: | 1186 | out_err: |
1166 | pr_warning("function graph tracer: not enough memory\n"); | 1187 | pr_warning("function graph tracer: not enough memory\n"); |
1167 | } | 1188 | } |
1168 | 1189 | ||
1169 | static void graph_trace_close(struct trace_iterator *iter) | 1190 | static void graph_trace_close(struct trace_iterator *iter) |
1170 | { | 1191 | { |
1171 | struct fgraph_data *data = iter->private; | 1192 | struct fgraph_data *data = iter->private; |
1172 | 1193 | ||
1173 | if (data) { | 1194 | if (data) { |
1174 | free_percpu(data->cpu_data); | 1195 | free_percpu(data->cpu_data); |
1175 | kfree(data); | 1196 | kfree(data); |
1176 | } | 1197 | } |
1177 | } | 1198 | } |
1178 | 1199 | ||
1179 | static struct tracer graph_trace __read_mostly = { | 1200 | static struct tracer graph_trace __read_mostly = { |
1180 | .name = "function_graph", | 1201 | .name = "function_graph", |
1181 | .open = graph_trace_open, | 1202 | .open = graph_trace_open, |
1182 | .pipe_open = graph_trace_open, | 1203 | .pipe_open = graph_trace_open, |
1183 | .close = graph_trace_close, | 1204 | .close = graph_trace_close, |
1184 | .pipe_close = graph_trace_close, | 1205 | .pipe_close = graph_trace_close, |
1185 | .wait_pipe = poll_wait_pipe, | 1206 | .wait_pipe = poll_wait_pipe, |
1186 | .init = graph_trace_init, | 1207 | .init = graph_trace_init, |
1187 | .reset = graph_trace_reset, | 1208 | .reset = graph_trace_reset, |
1188 | .print_line = print_graph_function, | 1209 | .print_line = print_graph_function, |
1189 | .print_header = print_graph_headers, | 1210 | .print_header = print_graph_headers, |
1190 | .flags = &tracer_flags, | 1211 | .flags = &tracer_flags, |
1191 | #ifdef CONFIG_FTRACE_SELFTEST | 1212 | #ifdef CONFIG_FTRACE_SELFTEST |
1192 | .selftest = trace_selftest_startup_function_graph, | 1213 | .selftest = trace_selftest_startup_function_graph, |
1193 | #endif | 1214 | #endif |
1194 | }; | 1215 | }; |
1195 | 1216 | ||
1196 | static __init int init_graph_trace(void) | 1217 | static __init int init_graph_trace(void) |
1197 | { | 1218 | { |
1198 | max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1); | 1219 | max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1); |
1199 | 1220 | ||
1200 | return register_tracer(&graph_trace); | 1221 | return register_tracer(&graph_trace); |
1201 | } | 1222 | } |
1202 | 1223 | ||
1203 | device_initcall(init_graph_trace); | 1224 | device_initcall(init_graph_trace); |
1204 | 1225 |