Commit da89a7a2536c46e76a1a4351a70a8b8417e5fed1
Committed by
Thomas Gleixner
1 parent
7e18d8e701
Exists in
master
and in
7 other branches
ftrace: remove printks from irqsoff trace
Printing out new max latencies was fine for the old RT tracer. But for mainline it is a bit messy. We also need to test if the run queue is locked before we can do the print. This means that we may not be printing out latencies if the run queue is locked on another CPU. This produces inconsistencies in the output. This patch simply removes the print altogether. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Cc: pq@iki.fi Cc: proski@gnu.org Cc: sandmann@redhat.com Cc: a.p.zijlstra@chello.nl Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Showing 1 changed file with 0 additions and 16 deletions Inline Diff
kernel/trace/trace_irqsoff.c
1 | /* | 1 | /* |
2 | * trace irqs off criticall timings | 2 | * trace irqs off criticall timings |
3 | * | 3 | * |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | 4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> |
5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> | 5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> |
6 | * | 6 | * |
7 | * From code in the latency_tracer, that is: | 7 | * From code in the latency_tracer, that is: |
8 | * | 8 | * |
9 | * Copyright (C) 2004-2006 Ingo Molnar | 9 | * Copyright (C) 2004-2006 Ingo Molnar |
10 | * Copyright (C) 2004 William Lee Irwin III | 10 | * Copyright (C) 2004 William Lee Irwin III |
11 | */ | 11 | */ |
12 | #include <linux/kallsyms.h> | 12 | #include <linux/kallsyms.h> |
13 | #include <linux/debugfs.h> | 13 | #include <linux/debugfs.h> |
14 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/ftrace.h> | 16 | #include <linux/ftrace.h> |
17 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
18 | 18 | ||
19 | #include "trace.h" | 19 | #include "trace.h" |
20 | 20 | ||
21 | static struct trace_array *irqsoff_trace __read_mostly; | 21 | static struct trace_array *irqsoff_trace __read_mostly; |
22 | static int tracer_enabled __read_mostly; | 22 | static int tracer_enabled __read_mostly; |
23 | 23 | ||
24 | static DEFINE_PER_CPU(int, tracing_cpu); | 24 | static DEFINE_PER_CPU(int, tracing_cpu); |
25 | 25 | ||
26 | static DEFINE_SPINLOCK(max_trace_lock); | 26 | static DEFINE_SPINLOCK(max_trace_lock); |
27 | 27 | ||
28 | enum { | 28 | enum { |
29 | TRACER_IRQS_OFF = (1 << 1), | 29 | TRACER_IRQS_OFF = (1 << 1), |
30 | TRACER_PREEMPT_OFF = (1 << 2), | 30 | TRACER_PREEMPT_OFF = (1 << 2), |
31 | }; | 31 | }; |
32 | 32 | ||
33 | static int trace_type __read_mostly; | 33 | static int trace_type __read_mostly; |
34 | 34 | ||
35 | #ifdef CONFIG_PREEMPT_TRACER | 35 | #ifdef CONFIG_PREEMPT_TRACER |
36 | static inline int | 36 | static inline int |
37 | preempt_trace(void) | 37 | preempt_trace(void) |
38 | { | 38 | { |
39 | return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count()); | 39 | return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count()); |
40 | } | 40 | } |
41 | #else | 41 | #else |
42 | # define preempt_trace() (0) | 42 | # define preempt_trace() (0) |
43 | #endif | 43 | #endif |
44 | 44 | ||
45 | #ifdef CONFIG_IRQSOFF_TRACER | 45 | #ifdef CONFIG_IRQSOFF_TRACER |
46 | static inline int | 46 | static inline int |
47 | irq_trace(void) | 47 | irq_trace(void) |
48 | { | 48 | { |
49 | return ((trace_type & TRACER_IRQS_OFF) && | 49 | return ((trace_type & TRACER_IRQS_OFF) && |
50 | irqs_disabled()); | 50 | irqs_disabled()); |
51 | } | 51 | } |
52 | #else | 52 | #else |
53 | # define irq_trace() (0) | 53 | # define irq_trace() (0) |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | /* | 56 | /* |
57 | * Sequence count - we record it when starting a measurement and | 57 | * Sequence count - we record it when starting a measurement and |
58 | * skip the latency if the sequence has changed - some other section | 58 | * skip the latency if the sequence has changed - some other section |
59 | * did a maximum and could disturb our measurement with serial console | 59 | * did a maximum and could disturb our measurement with serial console |
60 | * printouts, etc. Truly coinciding maximum latencies should be rare | 60 | * printouts, etc. Truly coinciding maximum latencies should be rare |
61 | * and what happens together happens separately as well, so this doesnt | 61 | * and what happens together happens separately as well, so this doesnt |
62 | * decrease the validity of the maximum found: | 62 | * decrease the validity of the maximum found: |
63 | */ | 63 | */ |
64 | static __cacheline_aligned_in_smp unsigned long max_sequence; | 64 | static __cacheline_aligned_in_smp unsigned long max_sequence; |
65 | 65 | ||
66 | #ifdef CONFIG_FTRACE | 66 | #ifdef CONFIG_FTRACE |
67 | /* | 67 | /* |
68 | * irqsoff uses its own tracer function to keep the overhead down: | 68 | * irqsoff uses its own tracer function to keep the overhead down: |
69 | */ | 69 | */ |
70 | static void | 70 | static void |
71 | irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) | 71 | irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) |
72 | { | 72 | { |
73 | struct trace_array *tr = irqsoff_trace; | 73 | struct trace_array *tr = irqsoff_trace; |
74 | struct trace_array_cpu *data; | 74 | struct trace_array_cpu *data; |
75 | unsigned long flags; | 75 | unsigned long flags; |
76 | long disabled; | 76 | long disabled; |
77 | int cpu; | 77 | int cpu; |
78 | 78 | ||
79 | /* | 79 | /* |
80 | * Does not matter if we preempt. We test the flags | 80 | * Does not matter if we preempt. We test the flags |
81 | * afterward, to see if irqs are disabled or not. | 81 | * afterward, to see if irqs are disabled or not. |
82 | * If we preempt and get a false positive, the flags | 82 | * If we preempt and get a false positive, the flags |
83 | * test will fail. | 83 | * test will fail. |
84 | */ | 84 | */ |
85 | cpu = raw_smp_processor_id(); | 85 | cpu = raw_smp_processor_id(); |
86 | if (likely(!per_cpu(tracing_cpu, cpu))) | 86 | if (likely(!per_cpu(tracing_cpu, cpu))) |
87 | return; | 87 | return; |
88 | 88 | ||
89 | local_save_flags(flags); | 89 | local_save_flags(flags); |
90 | /* slight chance to get a false positive on tracing_cpu */ | 90 | /* slight chance to get a false positive on tracing_cpu */ |
91 | if (!irqs_disabled_flags(flags)) | 91 | if (!irqs_disabled_flags(flags)) |
92 | return; | 92 | return; |
93 | 93 | ||
94 | data = tr->data[cpu]; | 94 | data = tr->data[cpu]; |
95 | disabled = atomic_inc_return(&data->disabled); | 95 | disabled = atomic_inc_return(&data->disabled); |
96 | 96 | ||
97 | if (likely(disabled == 1)) | 97 | if (likely(disabled == 1)) |
98 | trace_function(tr, data, ip, parent_ip, flags); | 98 | trace_function(tr, data, ip, parent_ip, flags); |
99 | 99 | ||
100 | atomic_dec(&data->disabled); | 100 | atomic_dec(&data->disabled); |
101 | } | 101 | } |
102 | 102 | ||
103 | static struct ftrace_ops trace_ops __read_mostly = | 103 | static struct ftrace_ops trace_ops __read_mostly = |
104 | { | 104 | { |
105 | .func = irqsoff_tracer_call, | 105 | .func = irqsoff_tracer_call, |
106 | }; | 106 | }; |
107 | #endif /* CONFIG_FTRACE */ | 107 | #endif /* CONFIG_FTRACE */ |
108 | 108 | ||
109 | /* | 109 | /* |
110 | * Should this new latency be reported/recorded? | 110 | * Should this new latency be reported/recorded? |
111 | */ | 111 | */ |
112 | static int report_latency(cycle_t delta) | 112 | static int report_latency(cycle_t delta) |
113 | { | 113 | { |
114 | if (tracing_thresh) { | 114 | if (tracing_thresh) { |
115 | if (delta < tracing_thresh) | 115 | if (delta < tracing_thresh) |
116 | return 0; | 116 | return 0; |
117 | } else { | 117 | } else { |
118 | if (delta <= tracing_max_latency) | 118 | if (delta <= tracing_max_latency) |
119 | return 0; | 119 | return 0; |
120 | } | 120 | } |
121 | return 1; | 121 | return 1; |
122 | } | 122 | } |
123 | 123 | ||
124 | static void | 124 | static void |
125 | check_critical_timing(struct trace_array *tr, | 125 | check_critical_timing(struct trace_array *tr, |
126 | struct trace_array_cpu *data, | 126 | struct trace_array_cpu *data, |
127 | unsigned long parent_ip, | 127 | unsigned long parent_ip, |
128 | int cpu) | 128 | int cpu) |
129 | { | 129 | { |
130 | unsigned long latency, t0, t1; | 130 | unsigned long latency, t0, t1; |
131 | cycle_t T0, T1, delta; | 131 | cycle_t T0, T1, delta; |
132 | unsigned long flags; | 132 | unsigned long flags; |
133 | 133 | ||
134 | /* | 134 | /* |
135 | * usecs conversion is slow so we try to delay the conversion | 135 | * usecs conversion is slow so we try to delay the conversion |
136 | * as long as possible: | 136 | * as long as possible: |
137 | */ | 137 | */ |
138 | T0 = data->preempt_timestamp; | 138 | T0 = data->preempt_timestamp; |
139 | T1 = ftrace_now(cpu); | 139 | T1 = ftrace_now(cpu); |
140 | delta = T1-T0; | 140 | delta = T1-T0; |
141 | 141 | ||
142 | local_save_flags(flags); | 142 | local_save_flags(flags); |
143 | 143 | ||
144 | if (!report_latency(delta)) | 144 | if (!report_latency(delta)) |
145 | goto out; | 145 | goto out; |
146 | 146 | ||
147 | spin_lock_irqsave(&max_trace_lock, flags); | 147 | spin_lock_irqsave(&max_trace_lock, flags); |
148 | 148 | ||
149 | /* check if we are still the max latency */ | 149 | /* check if we are still the max latency */ |
150 | if (!report_latency(delta)) | 150 | if (!report_latency(delta)) |
151 | goto out_unlock; | 151 | goto out_unlock; |
152 | 152 | ||
153 | trace_function(tr, data, CALLER_ADDR0, parent_ip, flags); | 153 | trace_function(tr, data, CALLER_ADDR0, parent_ip, flags); |
154 | 154 | ||
155 | latency = nsecs_to_usecs(delta); | 155 | latency = nsecs_to_usecs(delta); |
156 | 156 | ||
157 | if (data->critical_sequence != max_sequence) | 157 | if (data->critical_sequence != max_sequence) |
158 | goto out_unlock; | 158 | goto out_unlock; |
159 | 159 | ||
160 | tracing_max_latency = delta; | 160 | tracing_max_latency = delta; |
161 | t0 = nsecs_to_usecs(T0); | 161 | t0 = nsecs_to_usecs(T0); |
162 | t1 = nsecs_to_usecs(T1); | 162 | t1 = nsecs_to_usecs(T1); |
163 | 163 | ||
164 | data->critical_end = parent_ip; | 164 | data->critical_end = parent_ip; |
165 | 165 | ||
166 | update_max_tr_single(tr, current, cpu); | 166 | update_max_tr_single(tr, current, cpu); |
167 | 167 | ||
168 | if (!runqueue_is_locked()) { | ||
169 | if (tracing_thresh) { | ||
170 | printk(KERN_INFO "(%16s-%-5d|#%d): %lu us critical" | ||
171 | " section violates %lu us threshold.\n", | ||
172 | current->comm, current->pid, | ||
173 | raw_smp_processor_id(), | ||
174 | latency, nsecs_to_usecs(tracing_thresh)); | ||
175 | } else { | ||
176 | printk(KERN_INFO "(%16s-%-5d|#%d): new %lu us" | ||
177 | " maximum-latency critical section.\n", | ||
178 | current->comm, current->pid, | ||
179 | raw_smp_processor_id(), | ||
180 | latency); | ||
181 | } | ||
182 | } | ||
183 | |||
184 | max_sequence++; | 168 | max_sequence++; |
185 | 169 | ||
186 | out_unlock: | 170 | out_unlock: |
187 | spin_unlock_irqrestore(&max_trace_lock, flags); | 171 | spin_unlock_irqrestore(&max_trace_lock, flags); |
188 | 172 | ||
189 | out: | 173 | out: |
190 | data->critical_sequence = max_sequence; | 174 | data->critical_sequence = max_sequence; |
191 | data->preempt_timestamp = ftrace_now(cpu); | 175 | data->preempt_timestamp = ftrace_now(cpu); |
192 | tracing_reset(data); | 176 | tracing_reset(data); |
193 | trace_function(tr, data, CALLER_ADDR0, parent_ip, flags); | 177 | trace_function(tr, data, CALLER_ADDR0, parent_ip, flags); |
194 | } | 178 | } |
195 | 179 | ||
196 | static inline void | 180 | static inline void |
197 | start_critical_timing(unsigned long ip, unsigned long parent_ip) | 181 | start_critical_timing(unsigned long ip, unsigned long parent_ip) |
198 | { | 182 | { |
199 | int cpu; | 183 | int cpu; |
200 | struct trace_array *tr = irqsoff_trace; | 184 | struct trace_array *tr = irqsoff_trace; |
201 | struct trace_array_cpu *data; | 185 | struct trace_array_cpu *data; |
202 | unsigned long flags; | 186 | unsigned long flags; |
203 | 187 | ||
204 | if (likely(!tracer_enabled)) | 188 | if (likely(!tracer_enabled)) |
205 | return; | 189 | return; |
206 | 190 | ||
207 | cpu = raw_smp_processor_id(); | 191 | cpu = raw_smp_processor_id(); |
208 | 192 | ||
209 | if (per_cpu(tracing_cpu, cpu)) | 193 | if (per_cpu(tracing_cpu, cpu)) |
210 | return; | 194 | return; |
211 | 195 | ||
212 | data = tr->data[cpu]; | 196 | data = tr->data[cpu]; |
213 | 197 | ||
214 | if (unlikely(!data) || atomic_read(&data->disabled)) | 198 | if (unlikely(!data) || atomic_read(&data->disabled)) |
215 | return; | 199 | return; |
216 | 200 | ||
217 | atomic_inc(&data->disabled); | 201 | atomic_inc(&data->disabled); |
218 | 202 | ||
219 | data->critical_sequence = max_sequence; | 203 | data->critical_sequence = max_sequence; |
220 | data->preempt_timestamp = ftrace_now(cpu); | 204 | data->preempt_timestamp = ftrace_now(cpu); |
221 | data->critical_start = parent_ip ? : ip; | 205 | data->critical_start = parent_ip ? : ip; |
222 | tracing_reset(data); | 206 | tracing_reset(data); |
223 | 207 | ||
224 | local_save_flags(flags); | 208 | local_save_flags(flags); |
225 | 209 | ||
226 | trace_function(tr, data, ip, parent_ip, flags); | 210 | trace_function(tr, data, ip, parent_ip, flags); |
227 | 211 | ||
228 | per_cpu(tracing_cpu, cpu) = 1; | 212 | per_cpu(tracing_cpu, cpu) = 1; |
229 | 213 | ||
230 | atomic_dec(&data->disabled); | 214 | atomic_dec(&data->disabled); |
231 | } | 215 | } |
232 | 216 | ||
233 | static inline void | 217 | static inline void |
234 | stop_critical_timing(unsigned long ip, unsigned long parent_ip) | 218 | stop_critical_timing(unsigned long ip, unsigned long parent_ip) |
235 | { | 219 | { |
236 | int cpu; | 220 | int cpu; |
237 | struct trace_array *tr = irqsoff_trace; | 221 | struct trace_array *tr = irqsoff_trace; |
238 | struct trace_array_cpu *data; | 222 | struct trace_array_cpu *data; |
239 | unsigned long flags; | 223 | unsigned long flags; |
240 | 224 | ||
241 | cpu = raw_smp_processor_id(); | 225 | cpu = raw_smp_processor_id(); |
242 | /* Always clear the tracing cpu on stopping the trace */ | 226 | /* Always clear the tracing cpu on stopping the trace */ |
243 | if (unlikely(per_cpu(tracing_cpu, cpu))) | 227 | if (unlikely(per_cpu(tracing_cpu, cpu))) |
244 | per_cpu(tracing_cpu, cpu) = 0; | 228 | per_cpu(tracing_cpu, cpu) = 0; |
245 | else | 229 | else |
246 | return; | 230 | return; |
247 | 231 | ||
248 | if (!tracer_enabled) | 232 | if (!tracer_enabled) |
249 | return; | 233 | return; |
250 | 234 | ||
251 | data = tr->data[cpu]; | 235 | data = tr->data[cpu]; |
252 | 236 | ||
253 | if (unlikely(!data) || unlikely(!head_page(data)) || | 237 | if (unlikely(!data) || unlikely(!head_page(data)) || |
254 | !data->critical_start || atomic_read(&data->disabled)) | 238 | !data->critical_start || atomic_read(&data->disabled)) |
255 | return; | 239 | return; |
256 | 240 | ||
257 | atomic_inc(&data->disabled); | 241 | atomic_inc(&data->disabled); |
258 | 242 | ||
259 | local_save_flags(flags); | 243 | local_save_flags(flags); |
260 | trace_function(tr, data, ip, parent_ip, flags); | 244 | trace_function(tr, data, ip, parent_ip, flags); |
261 | check_critical_timing(tr, data, parent_ip ? : ip, cpu); | 245 | check_critical_timing(tr, data, parent_ip ? : ip, cpu); |
262 | data->critical_start = 0; | 246 | data->critical_start = 0; |
263 | atomic_dec(&data->disabled); | 247 | atomic_dec(&data->disabled); |
264 | } | 248 | } |
265 | 249 | ||
266 | /* start and stop critical timings used to for stoppage (in idle) */ | 250 | /* start and stop critical timings used to for stoppage (in idle) */ |
267 | void start_critical_timings(void) | 251 | void start_critical_timings(void) |
268 | { | 252 | { |
269 | if (preempt_trace() || irq_trace()) | 253 | if (preempt_trace() || irq_trace()) |
270 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); | 254 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
271 | } | 255 | } |
272 | 256 | ||
273 | void stop_critical_timings(void) | 257 | void stop_critical_timings(void) |
274 | { | 258 | { |
275 | if (preempt_trace() || irq_trace()) | 259 | if (preempt_trace() || irq_trace()) |
276 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); | 260 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
277 | } | 261 | } |
278 | 262 | ||
279 | #ifdef CONFIG_IRQSOFF_TRACER | 263 | #ifdef CONFIG_IRQSOFF_TRACER |
280 | #ifdef CONFIG_PROVE_LOCKING | 264 | #ifdef CONFIG_PROVE_LOCKING |
281 | void time_hardirqs_on(unsigned long a0, unsigned long a1) | 265 | void time_hardirqs_on(unsigned long a0, unsigned long a1) |
282 | { | 266 | { |
283 | if (!preempt_trace() && irq_trace()) | 267 | if (!preempt_trace() && irq_trace()) |
284 | stop_critical_timing(a0, a1); | 268 | stop_critical_timing(a0, a1); |
285 | } | 269 | } |
286 | 270 | ||
287 | void time_hardirqs_off(unsigned long a0, unsigned long a1) | 271 | void time_hardirqs_off(unsigned long a0, unsigned long a1) |
288 | { | 272 | { |
289 | if (!preempt_trace() && irq_trace()) | 273 | if (!preempt_trace() && irq_trace()) |
290 | start_critical_timing(a0, a1); | 274 | start_critical_timing(a0, a1); |
291 | } | 275 | } |
292 | 276 | ||
293 | #else /* !CONFIG_PROVE_LOCKING */ | 277 | #else /* !CONFIG_PROVE_LOCKING */ |
294 | 278 | ||
295 | /* | 279 | /* |
296 | * Stubs: | 280 | * Stubs: |
297 | */ | 281 | */ |
298 | 282 | ||
299 | void early_boot_irqs_off(void) | 283 | void early_boot_irqs_off(void) |
300 | { | 284 | { |
301 | } | 285 | } |
302 | 286 | ||
303 | void early_boot_irqs_on(void) | 287 | void early_boot_irqs_on(void) |
304 | { | 288 | { |
305 | } | 289 | } |
306 | 290 | ||
307 | void trace_softirqs_on(unsigned long ip) | 291 | void trace_softirqs_on(unsigned long ip) |
308 | { | 292 | { |
309 | } | 293 | } |
310 | 294 | ||
311 | void trace_softirqs_off(unsigned long ip) | 295 | void trace_softirqs_off(unsigned long ip) |
312 | { | 296 | { |
313 | } | 297 | } |
314 | 298 | ||
315 | inline void print_irqtrace_events(struct task_struct *curr) | 299 | inline void print_irqtrace_events(struct task_struct *curr) |
316 | { | 300 | { |
317 | } | 301 | } |
318 | 302 | ||
319 | /* | 303 | /* |
320 | * We are only interested in hardirq on/off events: | 304 | * We are only interested in hardirq on/off events: |
321 | */ | 305 | */ |
322 | void trace_hardirqs_on(void) | 306 | void trace_hardirqs_on(void) |
323 | { | 307 | { |
324 | if (!preempt_trace() && irq_trace()) | 308 | if (!preempt_trace() && irq_trace()) |
325 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); | 309 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
326 | } | 310 | } |
327 | EXPORT_SYMBOL(trace_hardirqs_on); | 311 | EXPORT_SYMBOL(trace_hardirqs_on); |
328 | 312 | ||
329 | void trace_hardirqs_off(void) | 313 | void trace_hardirqs_off(void) |
330 | { | 314 | { |
331 | if (!preempt_trace() && irq_trace()) | 315 | if (!preempt_trace() && irq_trace()) |
332 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); | 316 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
333 | } | 317 | } |
334 | EXPORT_SYMBOL(trace_hardirqs_off); | 318 | EXPORT_SYMBOL(trace_hardirqs_off); |
335 | 319 | ||
336 | void trace_hardirqs_on_caller(unsigned long caller_addr) | 320 | void trace_hardirqs_on_caller(unsigned long caller_addr) |
337 | { | 321 | { |
338 | if (!preempt_trace() && irq_trace()) | 322 | if (!preempt_trace() && irq_trace()) |
339 | stop_critical_timing(CALLER_ADDR0, caller_addr); | 323 | stop_critical_timing(CALLER_ADDR0, caller_addr); |
340 | } | 324 | } |
341 | EXPORT_SYMBOL(trace_hardirqs_on_caller); | 325 | EXPORT_SYMBOL(trace_hardirqs_on_caller); |
342 | 326 | ||
343 | void trace_hardirqs_off_caller(unsigned long caller_addr) | 327 | void trace_hardirqs_off_caller(unsigned long caller_addr) |
344 | { | 328 | { |
345 | if (!preempt_trace() && irq_trace()) | 329 | if (!preempt_trace() && irq_trace()) |
346 | start_critical_timing(CALLER_ADDR0, caller_addr); | 330 | start_critical_timing(CALLER_ADDR0, caller_addr); |
347 | } | 331 | } |
348 | EXPORT_SYMBOL(trace_hardirqs_off_caller); | 332 | EXPORT_SYMBOL(trace_hardirqs_off_caller); |
349 | 333 | ||
350 | #endif /* CONFIG_PROVE_LOCKING */ | 334 | #endif /* CONFIG_PROVE_LOCKING */ |
351 | #endif /* CONFIG_IRQSOFF_TRACER */ | 335 | #endif /* CONFIG_IRQSOFF_TRACER */ |
352 | 336 | ||
353 | #ifdef CONFIG_PREEMPT_TRACER | 337 | #ifdef CONFIG_PREEMPT_TRACER |
354 | void trace_preempt_on(unsigned long a0, unsigned long a1) | 338 | void trace_preempt_on(unsigned long a0, unsigned long a1) |
355 | { | 339 | { |
356 | stop_critical_timing(a0, a1); | 340 | stop_critical_timing(a0, a1); |
357 | } | 341 | } |
358 | 342 | ||
359 | void trace_preempt_off(unsigned long a0, unsigned long a1) | 343 | void trace_preempt_off(unsigned long a0, unsigned long a1) |
360 | { | 344 | { |
361 | start_critical_timing(a0, a1); | 345 | start_critical_timing(a0, a1); |
362 | } | 346 | } |
363 | #endif /* CONFIG_PREEMPT_TRACER */ | 347 | #endif /* CONFIG_PREEMPT_TRACER */ |
364 | 348 | ||
365 | static void start_irqsoff_tracer(struct trace_array *tr) | 349 | static void start_irqsoff_tracer(struct trace_array *tr) |
366 | { | 350 | { |
367 | register_ftrace_function(&trace_ops); | 351 | register_ftrace_function(&trace_ops); |
368 | tracer_enabled = 1; | 352 | tracer_enabled = 1; |
369 | } | 353 | } |
370 | 354 | ||
371 | static void stop_irqsoff_tracer(struct trace_array *tr) | 355 | static void stop_irqsoff_tracer(struct trace_array *tr) |
372 | { | 356 | { |
373 | tracer_enabled = 0; | 357 | tracer_enabled = 0; |
374 | unregister_ftrace_function(&trace_ops); | 358 | unregister_ftrace_function(&trace_ops); |
375 | } | 359 | } |
376 | 360 | ||
377 | static void __irqsoff_tracer_init(struct trace_array *tr) | 361 | static void __irqsoff_tracer_init(struct trace_array *tr) |
378 | { | 362 | { |
379 | irqsoff_trace = tr; | 363 | irqsoff_trace = tr; |
380 | /* make sure that the tracer is visible */ | 364 | /* make sure that the tracer is visible */ |
381 | smp_wmb(); | 365 | smp_wmb(); |
382 | 366 | ||
383 | if (tr->ctrl) | 367 | if (tr->ctrl) |
384 | start_irqsoff_tracer(tr); | 368 | start_irqsoff_tracer(tr); |
385 | } | 369 | } |
386 | 370 | ||
387 | static void irqsoff_tracer_reset(struct trace_array *tr) | 371 | static void irqsoff_tracer_reset(struct trace_array *tr) |
388 | { | 372 | { |
389 | if (tr->ctrl) | 373 | if (tr->ctrl) |
390 | stop_irqsoff_tracer(tr); | 374 | stop_irqsoff_tracer(tr); |
391 | } | 375 | } |
392 | 376 | ||
393 | static void irqsoff_tracer_ctrl_update(struct trace_array *tr) | 377 | static void irqsoff_tracer_ctrl_update(struct trace_array *tr) |
394 | { | 378 | { |
395 | if (tr->ctrl) | 379 | if (tr->ctrl) |
396 | start_irqsoff_tracer(tr); | 380 | start_irqsoff_tracer(tr); |
397 | else | 381 | else |
398 | stop_irqsoff_tracer(tr); | 382 | stop_irqsoff_tracer(tr); |
399 | } | 383 | } |
400 | 384 | ||
401 | static void irqsoff_tracer_open(struct trace_iterator *iter) | 385 | static void irqsoff_tracer_open(struct trace_iterator *iter) |
402 | { | 386 | { |
403 | /* stop the trace while dumping */ | 387 | /* stop the trace while dumping */ |
404 | if (iter->tr->ctrl) | 388 | if (iter->tr->ctrl) |
405 | stop_irqsoff_tracer(iter->tr); | 389 | stop_irqsoff_tracer(iter->tr); |
406 | } | 390 | } |
407 | 391 | ||
408 | static void irqsoff_tracer_close(struct trace_iterator *iter) | 392 | static void irqsoff_tracer_close(struct trace_iterator *iter) |
409 | { | 393 | { |
410 | if (iter->tr->ctrl) | 394 | if (iter->tr->ctrl) |
411 | start_irqsoff_tracer(iter->tr); | 395 | start_irqsoff_tracer(iter->tr); |
412 | } | 396 | } |
413 | 397 | ||
414 | #ifdef CONFIG_IRQSOFF_TRACER | 398 | #ifdef CONFIG_IRQSOFF_TRACER |
415 | static void irqsoff_tracer_init(struct trace_array *tr) | 399 | static void irqsoff_tracer_init(struct trace_array *tr) |
416 | { | 400 | { |
417 | trace_type = TRACER_IRQS_OFF; | 401 | trace_type = TRACER_IRQS_OFF; |
418 | 402 | ||
419 | __irqsoff_tracer_init(tr); | 403 | __irqsoff_tracer_init(tr); |
420 | } | 404 | } |
421 | static struct tracer irqsoff_tracer __read_mostly = | 405 | static struct tracer irqsoff_tracer __read_mostly = |
422 | { | 406 | { |
423 | .name = "irqsoff", | 407 | .name = "irqsoff", |
424 | .init = irqsoff_tracer_init, | 408 | .init = irqsoff_tracer_init, |
425 | .reset = irqsoff_tracer_reset, | 409 | .reset = irqsoff_tracer_reset, |
426 | .open = irqsoff_tracer_open, | 410 | .open = irqsoff_tracer_open, |
427 | .close = irqsoff_tracer_close, | 411 | .close = irqsoff_tracer_close, |
428 | .ctrl_update = irqsoff_tracer_ctrl_update, | 412 | .ctrl_update = irqsoff_tracer_ctrl_update, |
429 | .print_max = 1, | 413 | .print_max = 1, |
430 | #ifdef CONFIG_FTRACE_SELFTEST | 414 | #ifdef CONFIG_FTRACE_SELFTEST |
431 | .selftest = trace_selftest_startup_irqsoff, | 415 | .selftest = trace_selftest_startup_irqsoff, |
432 | #endif | 416 | #endif |
433 | }; | 417 | }; |
434 | # define register_irqsoff(trace) register_tracer(&trace) | 418 | # define register_irqsoff(trace) register_tracer(&trace) |
435 | #else | 419 | #else |
436 | # define register_irqsoff(trace) do { } while (0) | 420 | # define register_irqsoff(trace) do { } while (0) |
437 | #endif | 421 | #endif |
438 | 422 | ||
439 | #ifdef CONFIG_PREEMPT_TRACER | 423 | #ifdef CONFIG_PREEMPT_TRACER |
440 | static void preemptoff_tracer_init(struct trace_array *tr) | 424 | static void preemptoff_tracer_init(struct trace_array *tr) |
441 | { | 425 | { |
442 | trace_type = TRACER_PREEMPT_OFF; | 426 | trace_type = TRACER_PREEMPT_OFF; |
443 | 427 | ||
444 | __irqsoff_tracer_init(tr); | 428 | __irqsoff_tracer_init(tr); |
445 | } | 429 | } |
446 | 430 | ||
447 | static struct tracer preemptoff_tracer __read_mostly = | 431 | static struct tracer preemptoff_tracer __read_mostly = |
448 | { | 432 | { |
449 | .name = "preemptoff", | 433 | .name = "preemptoff", |
450 | .init = preemptoff_tracer_init, | 434 | .init = preemptoff_tracer_init, |
451 | .reset = irqsoff_tracer_reset, | 435 | .reset = irqsoff_tracer_reset, |
452 | .open = irqsoff_tracer_open, | 436 | .open = irqsoff_tracer_open, |
453 | .close = irqsoff_tracer_close, | 437 | .close = irqsoff_tracer_close, |
454 | .ctrl_update = irqsoff_tracer_ctrl_update, | 438 | .ctrl_update = irqsoff_tracer_ctrl_update, |
455 | .print_max = 1, | 439 | .print_max = 1, |
456 | #ifdef CONFIG_FTRACE_SELFTEST | 440 | #ifdef CONFIG_FTRACE_SELFTEST |
457 | .selftest = trace_selftest_startup_preemptoff, | 441 | .selftest = trace_selftest_startup_preemptoff, |
458 | #endif | 442 | #endif |
459 | }; | 443 | }; |
460 | # define register_preemptoff(trace) register_tracer(&trace) | 444 | # define register_preemptoff(trace) register_tracer(&trace) |
461 | #else | 445 | #else |
462 | # define register_preemptoff(trace) do { } while (0) | 446 | # define register_preemptoff(trace) do { } while (0) |
463 | #endif | 447 | #endif |
464 | 448 | ||
465 | #if defined(CONFIG_IRQSOFF_TRACER) && \ | 449 | #if defined(CONFIG_IRQSOFF_TRACER) && \ |
466 | defined(CONFIG_PREEMPT_TRACER) | 450 | defined(CONFIG_PREEMPT_TRACER) |
467 | 451 | ||
468 | static void preemptirqsoff_tracer_init(struct trace_array *tr) | 452 | static void preemptirqsoff_tracer_init(struct trace_array *tr) |
469 | { | 453 | { |
470 | trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; | 454 | trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; |
471 | 455 | ||
472 | __irqsoff_tracer_init(tr); | 456 | __irqsoff_tracer_init(tr); |
473 | } | 457 | } |
474 | 458 | ||
475 | static struct tracer preemptirqsoff_tracer __read_mostly = | 459 | static struct tracer preemptirqsoff_tracer __read_mostly = |
476 | { | 460 | { |
477 | .name = "preemptirqsoff", | 461 | .name = "preemptirqsoff", |
478 | .init = preemptirqsoff_tracer_init, | 462 | .init = preemptirqsoff_tracer_init, |
479 | .reset = irqsoff_tracer_reset, | 463 | .reset = irqsoff_tracer_reset, |
480 | .open = irqsoff_tracer_open, | 464 | .open = irqsoff_tracer_open, |
481 | .close = irqsoff_tracer_close, | 465 | .close = irqsoff_tracer_close, |
482 | .ctrl_update = irqsoff_tracer_ctrl_update, | 466 | .ctrl_update = irqsoff_tracer_ctrl_update, |
483 | .print_max = 1, | 467 | .print_max = 1, |
484 | #ifdef CONFIG_FTRACE_SELFTEST | 468 | #ifdef CONFIG_FTRACE_SELFTEST |
485 | .selftest = trace_selftest_startup_preemptirqsoff, | 469 | .selftest = trace_selftest_startup_preemptirqsoff, |
486 | #endif | 470 | #endif |
487 | }; | 471 | }; |
488 | 472 | ||
489 | # define register_preemptirqsoff(trace) register_tracer(&trace) | 473 | # define register_preemptirqsoff(trace) register_tracer(&trace) |
490 | #else | 474 | #else |
491 | # define register_preemptirqsoff(trace) do { } while (0) | 475 | # define register_preemptirqsoff(trace) do { } while (0) |
492 | #endif | 476 | #endif |
493 | 477 | ||
494 | __init static int init_irqsoff_tracer(void) | 478 | __init static int init_irqsoff_tracer(void) |
495 | { | 479 | { |
496 | register_irqsoff(irqsoff_tracer); | 480 | register_irqsoff(irqsoff_tracer); |
497 | register_preemptoff(preemptoff_tracer); | 481 | register_preemptoff(preemptoff_tracer); |
498 | register_preemptirqsoff(preemptirqsoff_tracer); | 482 | register_preemptirqsoff(preemptirqsoff_tracer); |
499 | 483 | ||
500 | return 0; | 484 | return 0; |
501 | } | 485 | } |
502 | device_initcall(init_irqsoff_tracer); | 486 | device_initcall(init_irqsoff_tracer); |
503 | 487 |