Blame view
kernel/trace/trace_sched_wakeup.c
14 KB
352ad25aa ftrace: tracer fo... |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 |
/* * trace task wakeup timings * * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> * * Based on code from the latency_tracer, that is: * * Copyright (C) 2004-2006 Ingo Molnar * Copyright (C) 2004 William Lee Irwin III */ #include <linux/module.h> #include <linux/fs.h> #include <linux/debugfs.h> #include <linux/kallsyms.h> #include <linux/uaccess.h> #include <linux/ftrace.h> |
ad8d75fff tracing/events: m... |
18 |
#include <trace/events/sched.h> |
352ad25aa ftrace: tracer fo... |
19 20 21 22 23 24 25 26 |
#include "trace.h" static struct trace_array *wakeup_trace; static int __read_mostly tracer_enabled; static struct task_struct *wakeup_task; static int wakeup_cpu; |
478142c39 tracing: do not g... |
27 |
static int wakeup_current_cpu; |
352ad25aa ftrace: tracer fo... |
28 |
static unsigned wakeup_prio = -1; |
3244351c3 trace: separate o... |
29 |
static int wakeup_rt; |
352ad25aa ftrace: tracer fo... |
30 |
|
445c89514 locking: Convert ... |
31 |
static arch_spinlock_t wakeup_lock = |
edc35bd72 locking: Rename _... |
32 |
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
352ad25aa ftrace: tracer fo... |
33 |
|
7495a5bea tracing: Graph su... |
34 |
static void wakeup_reset(struct trace_array *tr); |
e309b41dd ftrace: remove no... |
35 |
static void __wakeup_reset(struct trace_array *tr); |
7495a5bea tracing: Graph su... |
36 37 |
static int wakeup_graph_entry(struct ftrace_graph_ent *trace); static void wakeup_graph_return(struct ftrace_graph_ret *trace); |
352ad25aa ftrace: tracer fo... |
38 |
|
e9d25fe6e tracing: have lat... |
39 |
static int save_lat_flag; |
7495a5bea tracing: Graph su... |
40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
#define TRACE_DISPLAY_GRAPH 1 static struct tracer_opt trace_opts[] = { #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* display latency trace as call graph */ { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) }, #endif { } /* Empty entry */ }; static struct tracer_flags tracer_flags = { .val = 0, .opts = trace_opts, }; #define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH) |
606576ce8 ftrace: rename FT... |
56 |
#ifdef CONFIG_FUNCTION_TRACER |
542181d37 tracing: Use one ... |
57 |
|
7e18d8e70 ftrace: add funct... |
58 |
/* |
542181d37 tracing: Use one ... |
59 60 61 62 63 64 65 66 67 68 69 70 |
* Prologue for the wakeup function tracers. * * Returns 1 if it is OK to continue, and preemption * is disabled and data->disabled is incremented. * 0 if the trace is to be ignored, and preemption * is not disabled and data->disabled is * kept the same. * * Note, this function is also used outside this ifdef but * inside the #ifdef of the function graph tracer below. * This is OK, since the function graph tracer is * dependent on the function tracer. |
7e18d8e70 ftrace: add funct... |
71 |
*/ |
542181d37 tracing: Use one ... |
72 73 74 75 |
static int func_prolog_preempt_disable(struct trace_array *tr, struct trace_array_cpu **data, int *pc) |
7e18d8e70 ftrace: add funct... |
76 |
{ |
7e18d8e70 ftrace: add funct... |
77 |
long disabled; |
7e18d8e70 ftrace: add funct... |
78 79 80 |
int cpu; if (likely(!wakeup_task)) |
542181d37 tracing: Use one ... |
81 |
return 0; |
7e18d8e70 ftrace: add funct... |
82 |
|
542181d37 tracing: Use one ... |
83 |
*pc = preempt_count(); |
5168ae50a tracing: Remove f... |
84 |
preempt_disable_notrace(); |
7e18d8e70 ftrace: add funct... |
85 86 |
cpu = raw_smp_processor_id(); |
478142c39 tracing: do not g... |
87 88 |
if (cpu != wakeup_current_cpu) goto out_enable; |
542181d37 tracing: Use one ... |
89 90 |
*data = tr->data[cpu]; disabled = atomic_inc_return(&(*data)->disabled); |
7e18d8e70 ftrace: add funct... |
91 92 |
if (unlikely(disabled != 1)) goto out; |
542181d37 tracing: Use one ... |
93 |
return 1; |
7e18d8e70 ftrace: add funct... |
94 |
|
542181d37 tracing: Use one ... |
95 96 |
out: atomic_dec(&(*data)->disabled); |
7e18d8e70 ftrace: add funct... |
97 |
|
542181d37 tracing: Use one ... |
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 |
out_enable: preempt_enable_notrace(); return 0; } /* * wakeup uses its own tracer function to keep the overhead down: */ static void wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) { struct trace_array *tr = wakeup_trace; struct trace_array_cpu *data; unsigned long flags; int pc; if (!func_prolog_preempt_disable(tr, &data, &pc)) return; local_irq_save(flags); trace_function(tr, ip, parent_ip, flags, pc); |
e59494f44 ftrace: fix 4d370... |
119 |
local_irq_restore(flags); |
7e18d8e70 ftrace: add funct... |
120 |
|
7e18d8e70 ftrace: add funct... |
121 |
atomic_dec(&data->disabled); |
5168ae50a tracing: Remove f... |
122 |
preempt_enable_notrace(); |
7e18d8e70 ftrace: add funct... |
123 124 125 126 127 |
} static struct ftrace_ops trace_ops __read_mostly = { .func = wakeup_tracer_call, |
b848914ce ftrace: Implement... |
128 |
.flags = FTRACE_OPS_FL_GLOBAL, |
7e18d8e70 ftrace: add funct... |
129 |
}; |
7e40798f4 tracing: Fix comp... |
130 |
#endif /* CONFIG_FUNCTION_TRACER */ |
7495a5bea tracing: Graph su... |
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 |
static int start_func_tracer(int graph) { int ret; if (!graph) ret = register_ftrace_function(&trace_ops); else ret = register_ftrace_graph(&wakeup_graph_return, &wakeup_graph_entry); if (!ret && tracing_is_enabled()) tracer_enabled = 1; else tracer_enabled = 0; return ret; } static void stop_func_tracer(int graph) { tracer_enabled = 0; if (!graph) unregister_ftrace_function(&trace_ops); else unregister_ftrace_graph(); } |
7495a5bea tracing: Graph su... |
159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 |
#ifdef CONFIG_FUNCTION_GRAPH_TRACER static int wakeup_set_flag(u32 old_flags, u32 bit, int set) { if (!(bit & TRACE_DISPLAY_GRAPH)) return -EINVAL; if (!(is_graph() ^ set)) return 0; stop_func_tracer(!set); wakeup_reset(wakeup_trace); tracing_max_latency = 0; return start_func_tracer(set); } static int wakeup_graph_entry(struct ftrace_graph_ent *trace) { struct trace_array *tr = wakeup_trace; struct trace_array_cpu *data; unsigned long flags; |
542181d37 tracing: Use one ... |
182 |
int pc, ret = 0; |
7495a5bea tracing: Graph su... |
183 |
|
542181d37 tracing: Use one ... |
184 |
if (!func_prolog_preempt_disable(tr, &data, &pc)) |
7495a5bea tracing: Graph su... |
185 |
return 0; |
7495a5bea tracing: Graph su... |
186 187 |
local_save_flags(flags); ret = __trace_graph_entry(tr, trace, flags, pc); |
7495a5bea tracing: Graph su... |
188 |
atomic_dec(&data->disabled); |
7495a5bea tracing: Graph su... |
189 |
preempt_enable_notrace(); |
542181d37 tracing: Use one ... |
190 |
|
7495a5bea tracing: Graph su... |
191 192 193 194 195 196 197 198 |
return ret; } static void wakeup_graph_return(struct ftrace_graph_ret *trace) { struct trace_array *tr = wakeup_trace; struct trace_array_cpu *data; unsigned long flags; |
542181d37 tracing: Use one ... |
199 |
int pc; |
7495a5bea tracing: Graph su... |
200 |
|
542181d37 tracing: Use one ... |
201 |
if (!func_prolog_preempt_disable(tr, &data, &pc)) |
7495a5bea tracing: Graph su... |
202 |
return; |
7495a5bea tracing: Graph su... |
203 204 |
local_save_flags(flags); __trace_graph_return(tr, trace, flags, pc); |
7495a5bea tracing: Graph su... |
205 |
atomic_dec(&data->disabled); |
7495a5bea tracing: Graph su... |
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 |
preempt_enable_notrace(); return; } static void wakeup_trace_open(struct trace_iterator *iter) { if (is_graph()) graph_trace_open(iter); } static void wakeup_trace_close(struct trace_iterator *iter) { if (iter->private) graph_trace_close(iter); } |
321e68b09 tracing, function... |
221 222 223 |
#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \ TRACE_GRAPH_PRINT_ABS_TIME | \ TRACE_GRAPH_PRINT_DURATION) |
7495a5bea tracing: Graph su... |
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 |
static enum print_line_t wakeup_print_line(struct trace_iterator *iter) { /* * In graph mode call the graph tracer output function, * otherwise go with the TRACE_FN event handler */ if (is_graph()) return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); return TRACE_TYPE_UNHANDLED; } static void wakeup_print_header(struct seq_file *s) { if (is_graph()) print_graph_headers_flags(s, GRAPH_TRACER_FLAGS); else trace_default_header(s); } static void __trace_function(struct trace_array *tr, unsigned long ip, unsigned long parent_ip, unsigned long flags, int pc) { if (is_graph()) trace_graph_function(tr, ip, parent_ip, flags, pc); else trace_function(tr, ip, parent_ip, flags, pc); } #else #define __trace_function trace_function static int wakeup_set_flag(u32 old_flags, u32 bit, int set) { return -EINVAL; } static int wakeup_graph_entry(struct ftrace_graph_ent *trace) { return -1; } static enum print_line_t wakeup_print_line(struct trace_iterator *iter) { return TRACE_TYPE_UNHANDLED; } static void wakeup_graph_return(struct ftrace_graph_ret *trace) { } |
7495a5bea tracing: Graph su... |
274 275 |
static void wakeup_trace_open(struct trace_iterator *iter) { } static void wakeup_trace_close(struct trace_iterator *iter) { } |
7e9a49ef5 tracing/latency: ... |
276 277 278 279 280 281 282 283 284 285 286 287 |
#ifdef CONFIG_FUNCTION_TRACER static void wakeup_print_header(struct seq_file *s) { trace_default_header(s); } #else static void wakeup_print_header(struct seq_file *s) { trace_latency_header(s); } #endif /* CONFIG_FUNCTION_TRACER */ |
7495a5bea tracing: Graph su... |
288 |
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
352ad25aa ftrace: tracer fo... |
289 290 291 |
/* * Should this new latency be reported/recorded? */ |
e309b41dd ftrace: remove no... |
292 |
static int report_latency(cycle_t delta) |
352ad25aa ftrace: tracer fo... |
293 294 295 296 297 298 299 300 301 302 |
{ if (tracing_thresh) { if (delta < tracing_thresh) return 0; } else { if (delta <= tracing_max_latency) return 0; } return 1; } |
38516ab59 tracing: Let trac... |
303 304 |
static void probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu) |
478142c39 tracing: do not g... |
305 306 307 308 309 310 |
{ if (task != wakeup_task) return; wakeup_current_cpu = cpu; } |
5b82a1b08 Port ftrace to ma... |
311 |
static void notrace |
38516ab59 tracing: Let trac... |
312 313 |
probe_wakeup_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next) |
352ad25aa ftrace: tracer fo... |
314 |
{ |
352ad25aa ftrace: tracer fo... |
315 316 317 318 319 |
struct trace_array_cpu *data; cycle_t T0, T1, delta; unsigned long flags; long disabled; int cpu; |
38697053f ftrace: preempt d... |
320 |
int pc; |
352ad25aa ftrace: tracer fo... |
321 |
|
b07c3f193 ftrace: port to t... |
322 |
tracing_record_cmdline(prev); |
352ad25aa ftrace: tracer fo... |
323 324 325 326 327 328 329 330 331 332 333 334 335 336 |
if (unlikely(!tracer_enabled)) return; /* * When we start a new trace, we set wakeup_task to NULL * and then set tracer_enabled = 1. We want to make sure * that another CPU does not see the tracer_enabled = 1 * and the wakeup_task with an older task, that might * actually be the same as next. */ smp_rmb(); if (next != wakeup_task) return; |
38697053f ftrace: preempt d... |
337 |
pc = preempt_count(); |
352ad25aa ftrace: tracer fo... |
338 339 |
/* disable local data, not wakeup_cpu data */ cpu = raw_smp_processor_id(); |
b07c3f193 ftrace: port to t... |
340 |
disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); |
352ad25aa ftrace: tracer fo... |
341 342 |
if (likely(disabled != 1)) goto out; |
e59494f44 ftrace: fix 4d370... |
343 |
local_irq_save(flags); |
0199c4e68 locking: Convert ... |
344 |
arch_spin_lock(&wakeup_lock); |
352ad25aa ftrace: tracer fo... |
345 346 347 348 |
/* We could race with grabbing wakeup_lock */ if (unlikely(!tracer_enabled || next != wakeup_task)) goto out_unlock; |
9be24414a tracing/wakeup: m... |
349 350 |
/* The task we are waiting for is waking up */ data = wakeup_trace->data[wakeup_cpu]; |
7495a5bea tracing: Graph su... |
351 |
__trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); |
7be421510 trace: Remove unu... |
352 |
tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); |
352ad25aa ftrace: tracer fo... |
353 |
|
352ad25aa ftrace: tracer fo... |
354 |
T0 = data->preempt_timestamp; |
750ed1a40 ftrace: timestamp... |
355 |
T1 = ftrace_now(cpu); |
352ad25aa ftrace: tracer fo... |
356 357 358 359 |
delta = T1-T0; if (!report_latency(delta)) goto out_unlock; |
b5130b1e7 tracing: do not u... |
360 361 362 363 |
if (likely(!is_tracing_stopped())) { tracing_max_latency = delta; update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu); } |
352ad25aa ftrace: tracer fo... |
364 |
|
352ad25aa ftrace: tracer fo... |
365 |
out_unlock: |
b07c3f193 ftrace: port to t... |
366 |
__wakeup_reset(wakeup_trace); |
0199c4e68 locking: Convert ... |
367 |
arch_spin_unlock(&wakeup_lock); |
e59494f44 ftrace: fix 4d370... |
368 |
local_irq_restore(flags); |
352ad25aa ftrace: tracer fo... |
369 |
out: |
b07c3f193 ftrace: port to t... |
370 |
atomic_dec(&wakeup_trace->data[cpu]->disabled); |
5b82a1b08 Port ftrace to ma... |
371 |
} |
e309b41dd ftrace: remove no... |
372 |
static void __wakeup_reset(struct trace_array *tr) |
352ad25aa ftrace: tracer fo... |
373 |
{ |
352ad25aa ftrace: tracer fo... |
374 375 376 377 378 379 380 381 |
wakeup_cpu = -1; wakeup_prio = -1; if (wakeup_task) put_task_struct(wakeup_task); wakeup_task = NULL; } |
e309b41dd ftrace: remove no... |
382 |
static void wakeup_reset(struct trace_array *tr) |
352ad25aa ftrace: tracer fo... |
383 384 |
{ unsigned long flags; |
2f26ebd54 tracing: use time... |
385 |
tracing_reset_online_cpus(tr); |
e59494f44 ftrace: fix 4d370... |
386 |
local_irq_save(flags); |
0199c4e68 locking: Convert ... |
387 |
arch_spin_lock(&wakeup_lock); |
352ad25aa ftrace: tracer fo... |
388 |
__wakeup_reset(tr); |
0199c4e68 locking: Convert ... |
389 |
arch_spin_unlock(&wakeup_lock); |
e59494f44 ftrace: fix 4d370... |
390 |
local_irq_restore(flags); |
352ad25aa ftrace: tracer fo... |
391 |
} |
e309b41dd ftrace: remove no... |
392 |
static void |
38516ab59 tracing: Let trac... |
393 |
probe_wakeup(void *ignore, struct task_struct *p, int success) |
352ad25aa ftrace: tracer fo... |
394 |
{ |
f8ec1062f wakeup-tracer: sh... |
395 |
struct trace_array_cpu *data; |
352ad25aa ftrace: tracer fo... |
396 397 398 |
int cpu = smp_processor_id(); unsigned long flags; long disabled; |
38697053f ftrace: preempt d... |
399 |
int pc; |
352ad25aa ftrace: tracer fo... |
400 |
|
b07c3f193 ftrace: port to t... |
401 402 403 404 405 |
if (likely(!tracer_enabled)) return; tracing_record_cmdline(p); tracing_record_cmdline(current); |
3244351c3 trace: separate o... |
406 |
if ((wakeup_rt && !rt_task(p)) || |
352ad25aa ftrace: tracer fo... |
407 |
p->prio >= wakeup_prio || |
b07c3f193 ftrace: port to t... |
408 |
p->prio >= current->prio) |
352ad25aa ftrace: tracer fo... |
409 |
return; |
38697053f ftrace: preempt d... |
410 |
pc = preempt_count(); |
b07c3f193 ftrace: port to t... |
411 |
disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); |
352ad25aa ftrace: tracer fo... |
412 413 414 415 |
if (unlikely(disabled != 1)) goto out; /* interrupts should be off from try_to_wake_up */ |
0199c4e68 locking: Convert ... |
416 |
arch_spin_lock(&wakeup_lock); |
352ad25aa ftrace: tracer fo... |
417 418 419 420 421 422 |
/* check for races. */ if (!tracer_enabled || p->prio >= wakeup_prio) goto out_locked; /* reset the trace */ |
b07c3f193 ftrace: port to t... |
423 |
__wakeup_reset(wakeup_trace); |
352ad25aa ftrace: tracer fo... |
424 425 |
wakeup_cpu = task_cpu(p); |
478142c39 tracing: do not g... |
426 |
wakeup_current_cpu = wakeup_cpu; |
352ad25aa ftrace: tracer fo... |
427 428 429 430 431 432 |
wakeup_prio = p->prio; wakeup_task = p; get_task_struct(wakeup_task); local_save_flags(flags); |
f8ec1062f wakeup-tracer: sh... |
433 434 |
data = wakeup_trace->data[wakeup_cpu]; data->preempt_timestamp = ftrace_now(cpu); |
7be421510 trace: Remove unu... |
435 |
tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); |
301fd748e tracing: remove C... |
436 437 438 439 440 441 |
/* * We must be careful in using CALLER_ADDR2. But since wake_up * is not called by an assembly function (where as schedule is) * it should be safe to use it here. */ |
7495a5bea tracing: Graph su... |
442 |
__trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); |
352ad25aa ftrace: tracer fo... |
443 444 |
out_locked: |
0199c4e68 locking: Convert ... |
445 |
arch_spin_unlock(&wakeup_lock); |
352ad25aa ftrace: tracer fo... |
446 |
out: |
b07c3f193 ftrace: port to t... |
447 |
atomic_dec(&wakeup_trace->data[cpu]->disabled); |
352ad25aa ftrace: tracer fo... |
448 |
} |
e309b41dd ftrace: remove no... |
449 |
static void start_wakeup_tracer(struct trace_array *tr) |
352ad25aa ftrace: tracer fo... |
450 |
{ |
5b82a1b08 Port ftrace to ma... |
451 |
int ret; |
38516ab59 tracing: Let trac... |
452 |
ret = register_trace_sched_wakeup(probe_wakeup, NULL); |
5b82a1b08 Port ftrace to ma... |
453 |
if (ret) { |
b07c3f193 ftrace: port to t... |
454 |
pr_info("wakeup trace: Couldn't activate tracepoint" |
5b82a1b08 Port ftrace to ma... |
455 456 457 458 |
" probe to kernel_sched_wakeup "); return; } |
38516ab59 tracing: Let trac... |
459 |
ret = register_trace_sched_wakeup_new(probe_wakeup, NULL); |
5b82a1b08 Port ftrace to ma... |
460 |
if (ret) { |
b07c3f193 ftrace: port to t... |
461 |
pr_info("wakeup trace: Couldn't activate tracepoint" |
5b82a1b08 Port ftrace to ma... |
462 463 464 465 |
" probe to kernel_sched_wakeup_new "); goto fail_deprobe; } |
38516ab59 tracing: Let trac... |
466 |
ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL); |
5b82a1b08 Port ftrace to ma... |
467 |
if (ret) { |
b07c3f193 ftrace: port to t... |
468 |
pr_info("sched trace: Couldn't activate tracepoint" |
73d8b8bc4 tracing: fix typi... |
469 470 |
" probe to kernel_sched_switch "); |
5b82a1b08 Port ftrace to ma... |
471 472 |
goto fail_deprobe_wake_new; } |
38516ab59 tracing: Let trac... |
473 |
ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL); |
478142c39 tracing: do not g... |
474 475 476 477 478 479 |
if (ret) { pr_info("wakeup trace: Couldn't activate tracepoint" " probe to kernel_sched_migrate_task "); return; } |
352ad25aa ftrace: tracer fo... |
480 481 482 483 484 485 486 487 488 489 |
wakeup_reset(tr); /* * Don't let the tracer_enabled = 1 show up before * the wakeup_task is reset. This may be overkill since * wakeup_reset does a spin_unlock after setting the * wakeup_task to NULL, but I want to be safe. * This is a slow path anyway. */ smp_wmb(); |
7495a5bea tracing: Graph su... |
490 491 492 |
if (start_func_tracer(is_graph())) printk(KERN_ERR "failed to start wakeup tracer "); |
ad591240c ftrace: start wak... |
493 |
|
352ad25aa ftrace: tracer fo... |
494 |
return; |
5b82a1b08 Port ftrace to ma... |
495 |
fail_deprobe_wake_new: |
38516ab59 tracing: Let trac... |
496 |
unregister_trace_sched_wakeup_new(probe_wakeup, NULL); |
5b82a1b08 Port ftrace to ma... |
497 |
fail_deprobe: |
38516ab59 tracing: Let trac... |
498 |
unregister_trace_sched_wakeup(probe_wakeup, NULL); |
352ad25aa ftrace: tracer fo... |
499 |
} |
e309b41dd ftrace: remove no... |
500 |
static void stop_wakeup_tracer(struct trace_array *tr) |
352ad25aa ftrace: tracer fo... |
501 502 |
{ tracer_enabled = 0; |
7495a5bea tracing: Graph su... |
503 |
stop_func_tracer(is_graph()); |
38516ab59 tracing: Let trac... |
504 505 506 507 |
unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL); unregister_trace_sched_wakeup_new(probe_wakeup, NULL); unregister_trace_sched_wakeup(probe_wakeup, NULL); unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL); |
352ad25aa ftrace: tracer fo... |
508 |
} |
3244351c3 trace: separate o... |
509 |
static int __wakeup_tracer_init(struct trace_array *tr) |
352ad25aa ftrace: tracer fo... |
510 |
{ |
e9d25fe6e tracing: have lat... |
511 512 |
save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; trace_flags |= TRACE_ITER_LATENCY_FMT; |
745b1626d trace: set max la... |
513 |
tracing_max_latency = 0; |
352ad25aa ftrace: tracer fo... |
514 |
wakeup_trace = tr; |
c76f06945 ftrace: remove tr... |
515 |
start_wakeup_tracer(tr); |
1c80025a4 tracing/ftrace: c... |
516 |
return 0; |
352ad25aa ftrace: tracer fo... |
517 |
} |
3244351c3 trace: separate o... |
518 519 520 521 522 523 524 525 526 527 528 |
static int wakeup_tracer_init(struct trace_array *tr) { wakeup_rt = 0; return __wakeup_tracer_init(tr); } static int wakeup_rt_tracer_init(struct trace_array *tr) { wakeup_rt = 1; return __wakeup_tracer_init(tr); } |
e309b41dd ftrace: remove no... |
529 |
static void wakeup_tracer_reset(struct trace_array *tr) |
352ad25aa ftrace: tracer fo... |
530 |
{ |
c76f06945 ftrace: remove tr... |
531 532 533 |
stop_wakeup_tracer(tr); /* make sure we put back any tasks we are tracing */ wakeup_reset(tr); |
e9d25fe6e tracing: have lat... |
534 535 536 |
if (!save_lat_flag) trace_flags &= ~TRACE_ITER_LATENCY_FMT; |
352ad25aa ftrace: tracer fo... |
537 |
} |
9036990d4 ftrace: restructu... |
538 539 540 541 |
static void wakeup_tracer_start(struct trace_array *tr) { wakeup_reset(tr); tracer_enabled = 1; |
9036990d4 ftrace: restructu... |
542 543 544 545 546 |
} static void wakeup_tracer_stop(struct trace_array *tr) { tracer_enabled = 0; |
352ad25aa ftrace: tracer fo... |
547 548 549 550 551 552 553 |
} static struct tracer wakeup_tracer __read_mostly = { .name = "wakeup", .init = wakeup_tracer_init, .reset = wakeup_tracer_reset, |
9036990d4 ftrace: restructu... |
554 555 |
.start = wakeup_tracer_start, .stop = wakeup_tracer_stop, |
352ad25aa ftrace: tracer fo... |
556 |
.print_max = 1, |
7495a5bea tracing: Graph su... |
557 558 559 560 |
.print_header = wakeup_print_header, .print_line = wakeup_print_line, .flags = &tracer_flags, .set_flag = wakeup_set_flag, |
60a11774b ftrace: add self-... |
561 562 563 |
#ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_wakeup, #endif |
7495a5bea tracing: Graph su... |
564 565 |
.open = wakeup_trace_open, .close = wakeup_trace_close, |
ef710e100 tracing: Shrink m... |
566 |
.use_max_tr = 1, |
352ad25aa ftrace: tracer fo... |
567 |
}; |
3244351c3 trace: separate o... |
568 569 570 571 572 573 574 |
static struct tracer wakeup_rt_tracer __read_mostly = { .name = "wakeup_rt", .init = wakeup_rt_tracer_init, .reset = wakeup_tracer_reset, .start = wakeup_tracer_start, .stop = wakeup_tracer_stop, |
6eaaa5d57 tracing/core: use... |
575 |
.wait_pipe = poll_wait_pipe, |
3244351c3 trace: separate o... |
576 |
.print_max = 1, |
7495a5bea tracing: Graph su... |
577 578 579 580 |
.print_header = wakeup_print_header, .print_line = wakeup_print_line, .flags = &tracer_flags, .set_flag = wakeup_set_flag, |
3244351c3 trace: separate o... |
581 582 583 |
#ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_wakeup, #endif |
7495a5bea tracing: Graph su... |
584 585 |
.open = wakeup_trace_open, .close = wakeup_trace_close, |
ef710e100 tracing: Shrink m... |
586 |
.use_max_tr = 1, |
3244351c3 trace: separate o... |
587 |
}; |
352ad25aa ftrace: tracer fo... |
588 589 590 591 592 593 594 |
__init static int init_wakeup_tracer(void) { int ret; ret = register_tracer(&wakeup_tracer); if (ret) return ret; |
3244351c3 trace: separate o... |
595 596 597 |
ret = register_tracer(&wakeup_rt_tracer); if (ret) return ret; |
352ad25aa ftrace: tracer fo... |
598 599 600 |
return 0; } device_initcall(init_wakeup_tracer); |