Blame view
kernel/trace/trace_irqsoff.c
17.6 KB
81d68a96a
|
1 |
/* |
73d8b8bc4
|
2 |
* trace irqs off critical timings |
81d68a96a
|
3 4 5 6 7 8 9 |
* * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> * * From code in the latency_tracer, that is: * * Copyright (C) 2004-2006 Ingo Molnar |
6d49e352a
|
10 |
* Copyright (C) 2004 Nadia Yvette Chambers |
81d68a96a
|
11 12 |
*/ #include <linux/kallsyms.h> |
81d68a96a
|
13 14 15 |
#include <linux/uaccess.h> #include <linux/module.h> #include <linux/ftrace.h> |
81d68a96a
|
16 17 18 19 20 |
#include "trace.h" static struct trace_array *irqsoff_trace __read_mostly; static int tracer_enabled __read_mostly; |
6cd8a4bb2
|
21 |
static DEFINE_PER_CPU(int, tracing_cpu); |
5389f6fad
|
22 |
static DEFINE_RAW_SPINLOCK(max_trace_lock); |
89b2f9781
|
23 |
|
6cd8a4bb2
|
24 25 26 27 28 29 |
enum { TRACER_IRQS_OFF = (1 << 1), TRACER_PREEMPT_OFF = (1 << 2), }; static int trace_type __read_mostly; |
613f04a0f
|
30 |
static int save_flags; |
e9d25fe6e
|
31 |
|
62b915f10
|
32 33 |
static void stop_irqsoff_tracer(struct trace_array *tr, int graph); static int start_irqsoff_tracer(struct trace_array *tr, int graph); |
6cd8a4bb2
|
34 |
#ifdef CONFIG_PREEMPT_TRACER |
e309b41dd
|
35 |
static inline int |
6cd8a4bb2
|
36 37 38 39 40 41 42 43 44 |
preempt_trace(void) { return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count()); } #else # define preempt_trace() (0) #endif #ifdef CONFIG_IRQSOFF_TRACER |
e309b41dd
|
45 |
static inline int |
6cd8a4bb2
|
46 47 48 49 50 51 52 53 |
irq_trace(void) { return ((trace_type & TRACER_IRQS_OFF) && irqs_disabled()); } #else # define irq_trace() (0) #endif |
62b915f10
|
54 |
#ifdef CONFIG_FUNCTION_GRAPH_TRACER |
03905582f
|
55 |
static int irqsoff_display_graph(struct trace_array *tr, int set); |
983f938ae
|
56 |
# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH) |
03905582f
|
57 58 59 60 61 |
#else static inline int irqsoff_display_graph(struct trace_array *tr, int set) { return -EINVAL; } |
983f938ae
|
62 |
# define is_graph(tr) false |
62b915f10
|
63 |
#endif |
62b915f10
|
64 |
|
81d68a96a
|
65 66 67 68 69 |
/* * Sequence count - we record it when starting a measurement and * skip the latency if the sequence has changed - some other section * did a maximum and could disturb our measurement with serial console * printouts, etc. Truly coinciding maximum latencies should be rare |
25985edce
|
70 |
* and what happens together happens separately as well, so this doesn't |
81d68a96a
|
71 72 73 |
* decrease the validity of the maximum found: */ static __cacheline_aligned_in_smp unsigned long max_sequence; |
606576ce8
|
74 |
#ifdef CONFIG_FUNCTION_TRACER |
81d68a96a
|
75 |
/* |
5e6d2b9cf
|
76 77 78 79 80 81 82 83 84 85 86 |
* Prologue for the preempt and irqs off function tracers. * * Returns 1 if it is OK to continue, and data->disabled is * incremented. * 0 if the trace is to be ignored, and data->disabled * is kept the same. * * Note, this function is also used outside this ifdef but * inside the #ifdef of the function graph tracer below. * This is OK, since the function graph tracer is * dependent on the function tracer. |
81d68a96a
|
87 |
*/ |
5e6d2b9cf
|
88 89 90 |
static int func_prolog_dec(struct trace_array *tr, struct trace_array_cpu **data, unsigned long *flags) |
81d68a96a
|
91 |
{ |
81d68a96a
|
92 93 |
long disabled; int cpu; |
361943ad0
|
94 95 96 97 98 99 100 101 |
/* * Does not matter if we preempt. We test the flags * afterward, to see if irqs are disabled or not. * If we preempt and get a false positive, the flags * test will fail. */ cpu = raw_smp_processor_id(); if (likely(!per_cpu(tracing_cpu, cpu))) |
5e6d2b9cf
|
102 |
return 0; |
81d68a96a
|
103 |
|
5e6d2b9cf
|
104 |
local_save_flags(*flags); |
cb86e0539
|
105 106 107 108 109 110 |
/* * Slight chance to get a false positive on tracing_cpu, * although I'm starting to think there isn't a chance. * Leave this for now just to be paranoid. */ if (!irqs_disabled_flags(*flags) && !preempt_count()) |
5e6d2b9cf
|
111 |
return 0; |
81d68a96a
|
112 |
|
12883efb6
|
113 |
*data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
5e6d2b9cf
|
114 |
disabled = atomic_inc_return(&(*data)->disabled); |
81d68a96a
|
115 116 |
if (likely(disabled == 1)) |
5e6d2b9cf
|
117 118 119 120 121 122 123 124 125 126 127 |
return 1; atomic_dec(&(*data)->disabled); return 0; } /* * irqsoff uses its own tracer function to keep the overhead down: */ static void |
2f5f6ad93
|
128 |
irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip, |
a1e2e31d1
|
129 |
struct ftrace_ops *op, struct pt_regs *pt_regs) |
5e6d2b9cf
|
130 131 132 133 134 135 136 137 138 |
{ struct trace_array *tr = irqsoff_trace; struct trace_array_cpu *data; unsigned long flags; if (!func_prolog_dec(tr, &data, &flags)) return; trace_function(tr, ip, parent_ip, flags, preempt_count()); |
81d68a96a
|
139 140 141 |
atomic_dec(&data->disabled); } |
606576ce8
|
142 |
#endif /* CONFIG_FUNCTION_TRACER */ |
81d68a96a
|
143 |
|
62b915f10
|
144 |
#ifdef CONFIG_FUNCTION_GRAPH_TRACER |
03905582f
|
145 |
static int irqsoff_display_graph(struct trace_array *tr, int set) |
62b915f10
|
146 147 |
{ int cpu; |
983f938ae
|
148 |
if (!(is_graph(tr) ^ set)) |
62b915f10
|
149 150 151 152 153 154 |
return 0; stop_irqsoff_tracer(irqsoff_trace, !set); for_each_possible_cpu(cpu) per_cpu(tracing_cpu, cpu) = 0; |
6d9b3fa5e
|
155 |
tr->max_latency = 0; |
12883efb6
|
156 |
tracing_reset_online_cpus(&irqsoff_trace->trace_buffer); |
62b915f10
|
157 158 159 160 161 162 163 164 165 |
return start_irqsoff_tracer(irqsoff_trace, set); } static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) { struct trace_array *tr = irqsoff_trace; struct trace_array_cpu *data; unsigned long flags; |
62b915f10
|
166 |
int ret; |
62b915f10
|
167 |
int pc; |
5e6d2b9cf
|
168 |
if (!func_prolog_dec(tr, &data, &flags)) |
62b915f10
|
169 |
return 0; |
5e6d2b9cf
|
170 171 |
pc = preempt_count(); ret = __trace_graph_entry(tr, trace, flags, pc); |
62b915f10
|
172 |
atomic_dec(&data->disabled); |
5e6d2b9cf
|
173 |
|
62b915f10
|
174 175 176 177 178 179 180 181 |
return ret; } static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { struct trace_array *tr = irqsoff_trace; struct trace_array_cpu *data; unsigned long flags; |
62b915f10
|
182 |
int pc; |
5e6d2b9cf
|
183 |
if (!func_prolog_dec(tr, &data, &flags)) |
62b915f10
|
184 |
return; |
5e6d2b9cf
|
185 186 |
pc = preempt_count(); __trace_graph_return(tr, trace, flags, pc); |
62b915f10
|
187 188 189 190 191 |
atomic_dec(&data->disabled); } static void irqsoff_trace_open(struct trace_iterator *iter) { |
983f938ae
|
192 |
if (is_graph(iter->tr)) |
62b915f10
|
193 194 195 196 197 198 199 200 201 202 203 |
graph_trace_open(iter); } static void irqsoff_trace_close(struct trace_iterator *iter) { if (iter->private) graph_trace_close(iter); } #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \ |
321e68b09
|
204 205 206 |
TRACE_GRAPH_PRINT_PROC | \ TRACE_GRAPH_PRINT_ABS_TIME | \ TRACE_GRAPH_PRINT_DURATION) |
62b915f10
|
207 208 209 |
static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) { |
62b915f10
|
210 211 212 213 |
/* * In graph mode call the graph tracer output function, * otherwise go with the TRACE_FN event handler */ |
983f938ae
|
214 |
if (is_graph(iter->tr)) |
0a772620a
|
215 |
return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); |
62b915f10
|
216 217 218 219 220 221 |
return TRACE_TYPE_UNHANDLED; } static void irqsoff_print_header(struct seq_file *s) { |
983f938ae
|
222 223 224 |
struct trace_array *tr = irqsoff_trace; if (is_graph(tr)) |
0a772620a
|
225 226 |
print_graph_headers_flags(s, GRAPH_TRACER_FLAGS); else |
62b915f10
|
227 228 229 230 |
trace_default_header(s); } static void |
62b915f10
|
231 232 233 234 |
__trace_function(struct trace_array *tr, unsigned long ip, unsigned long parent_ip, unsigned long flags, int pc) { |
983f938ae
|
235 |
if (is_graph(tr)) |
0a772620a
|
236 237 |
trace_graph_function(tr, ip, parent_ip, flags, pc); else |
62b915f10
|
238 |
trace_function(tr, ip, parent_ip, flags, pc); |
62b915f10
|
239 240 241 242 |
} #else #define __trace_function trace_function |
8179e8a15
|
243 |
#ifdef CONFIG_FUNCTION_TRACER |
62b915f10
|
244 245 246 247 |
static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) { return -1; } |
8179e8a15
|
248 |
#endif |
62b915f10
|
249 250 251 252 253 |
static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) { return TRACE_TYPE_UNHANDLED; } |
62b915f10
|
254 255 |
static void irqsoff_trace_open(struct trace_iterator *iter) { } static void irqsoff_trace_close(struct trace_iterator *iter) { } |
7e9a49ef5
|
256 257 |
#ifdef CONFIG_FUNCTION_TRACER |
8179e8a15
|
258 |
static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { } |
7e9a49ef5
|
259 260 261 262 263 264 265 266 267 268 |
static void irqsoff_print_header(struct seq_file *s) { trace_default_header(s); } #else static void irqsoff_print_header(struct seq_file *s) { trace_latency_header(s); } #endif /* CONFIG_FUNCTION_TRACER */ |
62b915f10
|
269 |
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
81d68a96a
|
270 271 272 |
/* * Should this new latency be reported/recorded? */ |
79851821b
|
273 |
static bool report_latency(struct trace_array *tr, cycle_t delta) |
81d68a96a
|
274 275 276 |
{ if (tracing_thresh) { if (delta < tracing_thresh) |
79851821b
|
277 |
return false; |
81d68a96a
|
278 |
} else { |
6d9b3fa5e
|
279 |
if (delta <= tr->max_latency) |
79851821b
|
280 |
return false; |
81d68a96a
|
281 |
} |
79851821b
|
282 |
return true; |
81d68a96a
|
283 |
} |
e309b41dd
|
284 |
static void |
81d68a96a
|
285 286 287 288 289 |
check_critical_timing(struct trace_array *tr, struct trace_array_cpu *data, unsigned long parent_ip, int cpu) { |
89b2f9781
|
290 |
cycle_t T0, T1, delta; |
81d68a96a
|
291 |
unsigned long flags; |
38697053f
|
292 |
int pc; |
81d68a96a
|
293 |
|
81d68a96a
|
294 |
T0 = data->preempt_timestamp; |
750ed1a40
|
295 |
T1 = ftrace_now(cpu); |
81d68a96a
|
296 297 298 |
delta = T1-T0; local_save_flags(flags); |
6450c1d32
|
299 |
pc = preempt_count(); |
6d9b3fa5e
|
300 |
if (!report_latency(tr, delta)) |
81d68a96a
|
301 |
goto out; |
5389f6fad
|
302 |
raw_spin_lock_irqsave(&max_trace_lock, flags); |
81d68a96a
|
303 |
|
89b2f9781
|
304 |
/* check if we are still the max latency */ |
6d9b3fa5e
|
305 |
if (!report_latency(tr, delta)) |
89b2f9781
|
306 |
goto out_unlock; |
62b915f10
|
307 |
__trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); |
cc51a0fca
|
308 309 |
/* Skip 5 functions to get to the irq/preempt enable function */ __trace_stack(tr, flags, 5, pc); |
81d68a96a
|
310 |
|
81d68a96a
|
311 |
if (data->critical_sequence != max_sequence) |
89b2f9781
|
312 |
goto out_unlock; |
81d68a96a
|
313 |
|
81d68a96a
|
314 |
data->critical_end = parent_ip; |
b5130b1e7
|
315 |
if (likely(!is_tracing_stopped())) { |
6d9b3fa5e
|
316 |
tr->max_latency = delta; |
b5130b1e7
|
317 318 |
update_max_tr_single(tr, current, cpu); } |
81d68a96a
|
319 |
|
81d68a96a
|
320 |
max_sequence++; |
89b2f9781
|
321 |
out_unlock: |
5389f6fad
|
322 |
raw_spin_unlock_irqrestore(&max_trace_lock, flags); |
89b2f9781
|
323 |
|
81d68a96a
|
324 325 |
out: data->critical_sequence = max_sequence; |
750ed1a40
|
326 |
data->preempt_timestamp = ftrace_now(cpu); |
62b915f10
|
327 |
__trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); |
81d68a96a
|
328 |
} |
e309b41dd
|
329 |
static inline void |
81d68a96a
|
330 331 332 333 334 335 |
start_critical_timing(unsigned long ip, unsigned long parent_ip) { int cpu; struct trace_array *tr = irqsoff_trace; struct trace_array_cpu *data; unsigned long flags; |
10246fa35
|
336 |
if (!tracer_enabled || !tracing_is_enabled()) |
81d68a96a
|
337 |
return; |
c5f888cae
|
338 339 340 |
cpu = raw_smp_processor_id(); if (per_cpu(tracing_cpu, cpu)) |
6cd8a4bb2
|
341 |
return; |
12883efb6
|
342 |
data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
81d68a96a
|
343 |
|
c5f888cae
|
344 |
if (unlikely(!data) || atomic_read(&data->disabled)) |
81d68a96a
|
345 346 347 348 349 |
return; atomic_inc(&data->disabled); data->critical_sequence = max_sequence; |
750ed1a40
|
350 |
data->preempt_timestamp = ftrace_now(cpu); |
6cd8a4bb2
|
351 |
data->critical_start = parent_ip ? : ip; |
81d68a96a
|
352 353 |
local_save_flags(flags); |
6cd8a4bb2
|
354 |
|
62b915f10
|
355 |
__trace_function(tr, ip, parent_ip, flags, preempt_count()); |
81d68a96a
|
356 |
|
c5f888cae
|
357 |
per_cpu(tracing_cpu, cpu) = 1; |
6cd8a4bb2
|
358 |
|
81d68a96a
|
359 360 |
atomic_dec(&data->disabled); } |
e309b41dd
|
361 |
static inline void |
81d68a96a
|
362 363 364 365 366 367 |
stop_critical_timing(unsigned long ip, unsigned long parent_ip) { int cpu; struct trace_array *tr = irqsoff_trace; struct trace_array_cpu *data; unsigned long flags; |
c5f888cae
|
368 |
cpu = raw_smp_processor_id(); |
6cd8a4bb2
|
369 |
/* Always clear the tracing cpu on stopping the trace */ |
c5f888cae
|
370 371 |
if (unlikely(per_cpu(tracing_cpu, cpu))) per_cpu(tracing_cpu, cpu) = 0; |
6cd8a4bb2
|
372 373 |
else return; |
10246fa35
|
374 |
if (!tracer_enabled || !tracing_is_enabled()) |
81d68a96a
|
375 |
return; |
12883efb6
|
376 |
data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
81d68a96a
|
377 |
|
3928a8a2d
|
378 |
if (unlikely(!data) || |
81d68a96a
|
379 380 381 382 |
!data->critical_start || atomic_read(&data->disabled)) return; atomic_inc(&data->disabled); |
c5f888cae
|
383 |
|
81d68a96a
|
384 |
local_save_flags(flags); |
62b915f10
|
385 |
__trace_function(tr, ip, parent_ip, flags, preempt_count()); |
6cd8a4bb2
|
386 |
check_critical_timing(tr, data, parent_ip ? : ip, cpu); |
81d68a96a
|
387 388 389 |
data->critical_start = 0; atomic_dec(&data->disabled); } |
6cd8a4bb2
|
390 |
/* start and stop critical timings used to for stoppage (in idle) */ |
e309b41dd
|
391 |
void start_critical_timings(void) |
81d68a96a
|
392 |
{ |
6cd8a4bb2
|
393 |
if (preempt_trace() || irq_trace()) |
81d68a96a
|
394 395 |
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } |
1fe371044
|
396 |
EXPORT_SYMBOL_GPL(start_critical_timings); |
81d68a96a
|
397 |
|
e309b41dd
|
398 |
void stop_critical_timings(void) |
81d68a96a
|
399 |
{ |
6cd8a4bb2
|
400 |
if (preempt_trace() || irq_trace()) |
81d68a96a
|
401 402 |
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } |
1fe371044
|
403 |
EXPORT_SYMBOL_GPL(stop_critical_timings); |
81d68a96a
|
404 |
|
6cd8a4bb2
|
405 |
#ifdef CONFIG_IRQSOFF_TRACER |
81d68a96a
|
406 |
#ifdef CONFIG_PROVE_LOCKING |
e309b41dd
|
407 |
void time_hardirqs_on(unsigned long a0, unsigned long a1) |
81d68a96a
|
408 |
{ |
6cd8a4bb2
|
409 |
if (!preempt_trace() && irq_trace()) |
81d68a96a
|
410 411 |
stop_critical_timing(a0, a1); } |
e309b41dd
|
412 |
void time_hardirqs_off(unsigned long a0, unsigned long a1) |
81d68a96a
|
413 |
{ |
6cd8a4bb2
|
414 |
if (!preempt_trace() && irq_trace()) |
81d68a96a
|
415 416 417 418 419 420 421 422 |
start_critical_timing(a0, a1); } #else /* !CONFIG_PROVE_LOCKING */ /* * Stubs: */ |
81d68a96a
|
423 424 425 426 427 428 429 |
void trace_softirqs_on(unsigned long ip) { } void trace_softirqs_off(unsigned long ip) { } |
e309b41dd
|
430 |
inline void print_irqtrace_events(struct task_struct *curr) |
81d68a96a
|
431 432 433 434 435 436 |
{ } /* * We are only interested in hardirq on/off events: */ |
e309b41dd
|
437 |
void trace_hardirqs_on(void) |
81d68a96a
|
438 |
{ |
6cd8a4bb2
|
439 |
if (!preempt_trace() && irq_trace()) |
81d68a96a
|
440 441 442 |
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } EXPORT_SYMBOL(trace_hardirqs_on); |
e309b41dd
|
443 |
void trace_hardirqs_off(void) |
81d68a96a
|
444 |
{ |
6cd8a4bb2
|
445 |
if (!preempt_trace() && irq_trace()) |
81d68a96a
|
446 447 448 |
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } EXPORT_SYMBOL(trace_hardirqs_off); |
285c00adf
|
449 |
__visible void trace_hardirqs_on_caller(unsigned long caller_addr) |
81d68a96a
|
450 |
{ |
6cd8a4bb2
|
451 |
if (!preempt_trace() && irq_trace()) |
81d68a96a
|
452 453 454 |
stop_critical_timing(CALLER_ADDR0, caller_addr); } EXPORT_SYMBOL(trace_hardirqs_on_caller); |
285c00adf
|
455 |
__visible void trace_hardirqs_off_caller(unsigned long caller_addr) |
81d68a96a
|
456 |
{ |
6cd8a4bb2
|
457 |
if (!preempt_trace() && irq_trace()) |
81d68a96a
|
458 459 460 461 462 |
start_critical_timing(CALLER_ADDR0, caller_addr); } EXPORT_SYMBOL(trace_hardirqs_off_caller); #endif /* CONFIG_PROVE_LOCKING */ |
6cd8a4bb2
|
463 464 465 |
#endif /* CONFIG_IRQSOFF_TRACER */ #ifdef CONFIG_PREEMPT_TRACER |
e309b41dd
|
466 |
void trace_preempt_on(unsigned long a0, unsigned long a1) |
6cd8a4bb2
|
467 |
{ |
e36de1de4
|
468 |
if (preempt_trace() && !irq_trace()) |
1e01cb0c6
|
469 |
stop_critical_timing(a0, a1); |
6cd8a4bb2
|
470 |
} |
e309b41dd
|
471 |
void trace_preempt_off(unsigned long a0, unsigned long a1) |
6cd8a4bb2
|
472 |
{ |
e36de1de4
|
473 |
if (preempt_trace() && !irq_trace()) |
1e01cb0c6
|
474 |
start_critical_timing(a0, a1); |
6cd8a4bb2
|
475 476 |
} #endif /* CONFIG_PREEMPT_TRACER */ |
81d68a96a
|
477 |
|
8179e8a15
|
478 479 |
#ifdef CONFIG_FUNCTION_TRACER static bool function_enabled; |
4104d326b
|
480 |
static int register_irqsoff_function(struct trace_array *tr, int graph, int set) |
81d68a96a
|
481 |
{ |
328df4759
|
482 |
int ret; |
62b915f10
|
483 |
|
328df4759
|
484 |
/* 'set' is set if TRACE_ITER_FUNCTION is about to be set */ |
983f938ae
|
485 |
if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION))) |
328df4759
|
486 487 488 |
return 0; if (graph) |
62b915f10
|
489 490 |
ret = register_ftrace_graph(&irqsoff_graph_return, &irqsoff_graph_entry); |
328df4759
|
491 |
else |
4104d326b
|
492 |
ret = register_ftrace_function(tr->ops); |
328df4759
|
493 494 495 496 497 498 |
if (!ret) function_enabled = true; return ret; } |
4104d326b
|
499 |
static void unregister_irqsoff_function(struct trace_array *tr, int graph) |
328df4759
|
500 501 502 503 504 505 506 |
{ if (!function_enabled) return; if (graph) unregister_ftrace_graph(); else |
4104d326b
|
507 |
unregister_ftrace_function(tr->ops); |
328df4759
|
508 509 510 |
function_enabled = false; } |
8179e8a15
|
511 |
static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set) |
328df4759
|
512 |
{ |
8179e8a15
|
513 514 |
if (!(mask & TRACE_ITER_FUNCTION)) return 0; |
328df4759
|
515 |
if (set) |
983f938ae
|
516 |
register_irqsoff_function(tr, is_graph(tr), 1); |
328df4759
|
517 |
else |
983f938ae
|
518 |
unregister_irqsoff_function(tr, is_graph(tr)); |
8179e8a15
|
519 520 521 522 523 |
return 1; } #else static int register_irqsoff_function(struct trace_array *tr, int graph, int set) { |
03905582f
|
524 |
return 0; |
328df4759
|
525 |
} |
8179e8a15
|
526 527 528 529 530 531 |
static void unregister_irqsoff_function(struct trace_array *tr, int graph) { } static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set) { return 0; } #endif /* CONFIG_FUNCTION_TRACER */ |
328df4759
|
532 |
|
bf6065b5c
|
533 |
static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set) |
328df4759
|
534 |
{ |
bf6065b5c
|
535 |
struct tracer *tracer = tr->current_trace; |
8179e8a15
|
536 537 |
if (irqsoff_function_set(tr, mask, set)) return 0; |
03905582f
|
538 |
|
729358da9
|
539 |
#ifdef CONFIG_FUNCTION_GRAPH_TRACER |
03905582f
|
540 541 |
if (mask & TRACE_ITER_DISPLAY_GRAPH) return irqsoff_display_graph(tr, set); |
729358da9
|
542 |
#endif |
328df4759
|
543 544 545 546 547 548 549 |
return trace_keep_overwrite(tracer, mask, set); } static int start_irqsoff_tracer(struct trace_array *tr, int graph) { int ret; |
4104d326b
|
550 |
ret = register_irqsoff_function(tr, graph, 0); |
62b915f10
|
551 552 |
if (!ret && tracing_is_enabled()) |
9036990d4
|
553 |
tracer_enabled = 1; |
94523e818
|
554 |
else |
9036990d4
|
555 |
tracer_enabled = 0; |
62b915f10
|
556 557 |
return ret; |
81d68a96a
|
558 |
} |
62b915f10
|
559 |
static void stop_irqsoff_tracer(struct trace_array *tr, int graph) |
81d68a96a
|
560 |
{ |
81d68a96a
|
561 |
tracer_enabled = 0; |
62b915f10
|
562 |
|
4104d326b
|
563 |
unregister_irqsoff_function(tr, graph); |
81d68a96a
|
564 |
} |
02f2f7646
|
565 566 567 |
static bool irqsoff_busy; static int __irqsoff_tracer_init(struct trace_array *tr) |
81d68a96a
|
568 |
{ |
02f2f7646
|
569 570 |
if (irqsoff_busy) return -EBUSY; |
983f938ae
|
571 |
save_flags = tr->trace_flags; |
613f04a0f
|
572 573 |
/* non overwrite screws up the latency tracers */ |
2b6080f28
|
574 575 |
set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1); set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1); |
e9d25fe6e
|
576 |
|
6d9b3fa5e
|
577 |
tr->max_latency = 0; |
81d68a96a
|
578 |
irqsoff_trace = tr; |
c5f888cae
|
579 |
/* make sure that the tracer is visible */ |
81d68a96a
|
580 |
smp_wmb(); |
62b915f10
|
581 |
|
4104d326b
|
582 583 584 585 |
ftrace_init_array_ops(tr, irqsoff_tracer_call); /* Only toplevel instance supports graph tracing */ if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL && |
983f938ae
|
586 |
is_graph(tr)))) |
62b915f10
|
587 588 |
printk(KERN_ERR "failed to start irqsoff tracer "); |
02f2f7646
|
589 590 591 |
irqsoff_busy = true; return 0; |
81d68a96a
|
592 593 594 595 |
} static void irqsoff_tracer_reset(struct trace_array *tr) { |
613f04a0f
|
596 597 |
int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; |
983f938ae
|
598 |
stop_irqsoff_tracer(tr, is_graph(tr)); |
e9d25fe6e
|
599 |
|
2b6080f28
|
600 601 |
set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag); set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag); |
4104d326b
|
602 |
ftrace_reset_array_ops(tr); |
02f2f7646
|
603 604 |
irqsoff_busy = false; |
81d68a96a
|
605 |
} |
9036990d4
|
606 607 |
static void irqsoff_tracer_start(struct trace_array *tr) { |
9036990d4
|
608 |
tracer_enabled = 1; |
9036990d4
|
609 610 611 612 613 |
} static void irqsoff_tracer_stop(struct trace_array *tr) { tracer_enabled = 0; |
81d68a96a
|
614 |
} |
6cd8a4bb2
|
615 |
#ifdef CONFIG_IRQSOFF_TRACER |
1c80025a4
|
616 |
static int irqsoff_tracer_init(struct trace_array *tr) |
6cd8a4bb2
|
617 618 |
{ trace_type = TRACER_IRQS_OFF; |
02f2f7646
|
619 |
return __irqsoff_tracer_init(tr); |
6cd8a4bb2
|
620 |
} |
81d68a96a
|
621 622 623 624 625 |
static struct tracer irqsoff_tracer __read_mostly = { .name = "irqsoff", .init = irqsoff_tracer_init, .reset = irqsoff_tracer_reset, |
9036990d4
|
626 627 |
.start = irqsoff_tracer_start, .stop = irqsoff_tracer_stop, |
f43c738bf
|
628 |
.print_max = true, |
62b915f10
|
629 630 |
.print_header = irqsoff_print_header, .print_line = irqsoff_print_line, |
328df4759
|
631 |
.flag_changed = irqsoff_flag_changed, |
60a11774b
|
632 633 634 |
#ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_irqsoff, #endif |
62b915f10
|
635 636 |
.open = irqsoff_trace_open, .close = irqsoff_trace_close, |
02f2f7646
|
637 |
.allow_instances = true, |
f43c738bf
|
638 |
.use_max_tr = true, |
81d68a96a
|
639 |
}; |
6cd8a4bb2
|
640 641 642 643 644 645 |
# define register_irqsoff(trace) register_tracer(&trace) #else # define register_irqsoff(trace) do { } while (0) #endif #ifdef CONFIG_PREEMPT_TRACER |
1c80025a4
|
646 |
static int preemptoff_tracer_init(struct trace_array *tr) |
6cd8a4bb2
|
647 648 |
{ trace_type = TRACER_PREEMPT_OFF; |
02f2f7646
|
649 |
return __irqsoff_tracer_init(tr); |
6cd8a4bb2
|
650 651 652 653 654 655 656 |
} static struct tracer preemptoff_tracer __read_mostly = { .name = "preemptoff", .init = preemptoff_tracer_init, .reset = irqsoff_tracer_reset, |
9036990d4
|
657 658 |
.start = irqsoff_tracer_start, .stop = irqsoff_tracer_stop, |
f43c738bf
|
659 |
.print_max = true, |
62b915f10
|
660 661 |
.print_header = irqsoff_print_header, .print_line = irqsoff_print_line, |
328df4759
|
662 |
.flag_changed = irqsoff_flag_changed, |
60a11774b
|
663 664 665 |
#ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_preemptoff, #endif |
62b915f10
|
666 667 |
.open = irqsoff_trace_open, .close = irqsoff_trace_close, |
02f2f7646
|
668 |
.allow_instances = true, |
f43c738bf
|
669 |
.use_max_tr = true, |
6cd8a4bb2
|
670 671 672 673 674 675 676 677 |
}; # define register_preemptoff(trace) register_tracer(&trace) #else # define register_preemptoff(trace) do { } while (0) #endif #if defined(CONFIG_IRQSOFF_TRACER) && \ defined(CONFIG_PREEMPT_TRACER) |
1c80025a4
|
678 |
static int preemptirqsoff_tracer_init(struct trace_array *tr) |
6cd8a4bb2
|
679 680 |
{ trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; |
02f2f7646
|
681 |
return __irqsoff_tracer_init(tr); |
6cd8a4bb2
|
682 683 684 685 686 687 688 |
} static struct tracer preemptirqsoff_tracer __read_mostly = { .name = "preemptirqsoff", .init = preemptirqsoff_tracer_init, .reset = irqsoff_tracer_reset, |
9036990d4
|
689 690 |
.start = irqsoff_tracer_start, .stop = irqsoff_tracer_stop, |
f43c738bf
|
691 |
.print_max = true, |
62b915f10
|
692 693 |
.print_header = irqsoff_print_header, .print_line = irqsoff_print_line, |
328df4759
|
694 |
.flag_changed = irqsoff_flag_changed, |
60a11774b
|
695 696 697 |
#ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_preemptirqsoff, #endif |
62b915f10
|
698 699 |
.open = irqsoff_trace_open, .close = irqsoff_trace_close, |
02f2f7646
|
700 |
.allow_instances = true, |
f43c738bf
|
701 |
.use_max_tr = true, |
6cd8a4bb2
|
702 703 704 705 706 707 |
}; # define register_preemptirqsoff(trace) register_tracer(&trace) #else # define register_preemptirqsoff(trace) do { } while (0) #endif |
81d68a96a
|
708 709 710 |
__init static int init_irqsoff_tracer(void) { |
6cd8a4bb2
|
711 712 713 |
register_irqsoff(irqsoff_tracer); register_preemptoff(preemptoff_tracer); register_preemptirqsoff(preemptirqsoff_tracer); |
81d68a96a
|
714 715 716 |
return 0; } |
6f4156723
|
717 |
core_initcall(init_irqsoff_tracer); |