Commit 781d06248234e221edb560a18461d65808a8a942
Committed by
Steven Rostedt
1 parent
a2546fae01
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
ftrace: Do not test frame pointers if -mfentry is used
The function graph has a test to check if the frame pointer is corrupted, which can happen with various options of gcc with mcount. But this is not an issue with -mfentry as -mfentry does not need nor use frame pointers for function graph tracing. Link: http://lkml.kernel.org/r/20120807194059.773895870@goodmis.org Acked-by: H. Peter Anvin <hpa@linux.intel.com> Acked-by: Ingo Molnar <mingo@kernel.org> Cc: Andi Kleen <andi@firstfloor.org> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Showing 1 changed file with 4 additions and 1 deletions Inline Diff
kernel/trace/trace_functions_graph.c
1 | /* | 1 | /* |
2 | * | 2 | * |
3 | * Function graph tracer. | 3 | * Function graph tracer. |
4 | * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com> | 4 | * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com> |
5 | * Mostly borrowed from function tracer which | 5 | * Mostly borrowed from function tracer which |
6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> | 6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> |
7 | * | 7 | * |
8 | */ | 8 | */ |
9 | #include <linux/debugfs.h> | 9 | #include <linux/debugfs.h> |
10 | #include <linux/uaccess.h> | 10 | #include <linux/uaccess.h> |
11 | #include <linux/ftrace.h> | 11 | #include <linux/ftrace.h> |
12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
13 | #include <linux/fs.h> | 13 | #include <linux/fs.h> |
14 | 14 | ||
15 | #include "trace.h" | 15 | #include "trace.h" |
16 | #include "trace_output.h" | 16 | #include "trace_output.h" |
17 | 17 | ||
18 | /* When set, irq functions will be ignored */ | 18 | /* When set, irq functions will be ignored */ |
19 | static int ftrace_graph_skip_irqs; | 19 | static int ftrace_graph_skip_irqs; |
20 | 20 | ||
21 | struct fgraph_cpu_data { | 21 | struct fgraph_cpu_data { |
22 | pid_t last_pid; | 22 | pid_t last_pid; |
23 | int depth; | 23 | int depth; |
24 | int depth_irq; | 24 | int depth_irq; |
25 | int ignore; | 25 | int ignore; |
26 | unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH]; | 26 | unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH]; |
27 | }; | 27 | }; |
28 | 28 | ||
29 | struct fgraph_data { | 29 | struct fgraph_data { |
30 | struct fgraph_cpu_data __percpu *cpu_data; | 30 | struct fgraph_cpu_data __percpu *cpu_data; |
31 | 31 | ||
32 | /* Place to preserve last processed entry. */ | 32 | /* Place to preserve last processed entry. */ |
33 | struct ftrace_graph_ent_entry ent; | 33 | struct ftrace_graph_ent_entry ent; |
34 | struct ftrace_graph_ret_entry ret; | 34 | struct ftrace_graph_ret_entry ret; |
35 | int failed; | 35 | int failed; |
36 | int cpu; | 36 | int cpu; |
37 | }; | 37 | }; |
38 | 38 | ||
39 | #define TRACE_GRAPH_INDENT 2 | 39 | #define TRACE_GRAPH_INDENT 2 |
40 | 40 | ||
41 | /* Flag options */ | 41 | /* Flag options */ |
42 | #define TRACE_GRAPH_PRINT_OVERRUN 0x1 | 42 | #define TRACE_GRAPH_PRINT_OVERRUN 0x1 |
43 | #define TRACE_GRAPH_PRINT_CPU 0x2 | 43 | #define TRACE_GRAPH_PRINT_CPU 0x2 |
44 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 | 44 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 |
45 | #define TRACE_GRAPH_PRINT_PROC 0x8 | 45 | #define TRACE_GRAPH_PRINT_PROC 0x8 |
46 | #define TRACE_GRAPH_PRINT_DURATION 0x10 | 46 | #define TRACE_GRAPH_PRINT_DURATION 0x10 |
47 | #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 | 47 | #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 |
48 | #define TRACE_GRAPH_PRINT_IRQS 0x40 | 48 | #define TRACE_GRAPH_PRINT_IRQS 0x40 |
49 | 49 | ||
50 | static struct tracer_opt trace_opts[] = { | 50 | static struct tracer_opt trace_opts[] = { |
51 | /* Display overruns? (for self-debug purpose) */ | 51 | /* Display overruns? (for self-debug purpose) */ |
52 | { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, | 52 | { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, |
53 | /* Display CPU ? */ | 53 | /* Display CPU ? */ |
54 | { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, | 54 | { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, |
55 | /* Display Overhead ? */ | 55 | /* Display Overhead ? */ |
56 | { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, | 56 | { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, |
57 | /* Display proc name/pid */ | 57 | /* Display proc name/pid */ |
58 | { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, | 58 | { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, |
59 | /* Display duration of execution */ | 59 | /* Display duration of execution */ |
60 | { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, | 60 | { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, |
61 | /* Display absolute time of an entry */ | 61 | /* Display absolute time of an entry */ |
62 | { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, | 62 | { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, |
63 | /* Display interrupts */ | 63 | /* Display interrupts */ |
64 | { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) }, | 64 | { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) }, |
65 | { } /* Empty entry */ | 65 | { } /* Empty entry */ |
66 | }; | 66 | }; |
67 | 67 | ||
68 | static struct tracer_flags tracer_flags = { | 68 | static struct tracer_flags tracer_flags = { |
69 | /* Don't display overruns and proc by default */ | 69 | /* Don't display overruns and proc by default */ |
70 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | | 70 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | |
71 | TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS, | 71 | TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS, |
72 | .opts = trace_opts | 72 | .opts = trace_opts |
73 | }; | 73 | }; |
74 | 74 | ||
75 | static struct trace_array *graph_array; | 75 | static struct trace_array *graph_array; |
76 | 76 | ||
77 | /* | 77 | /* |
78 | * DURATION column is being also used to display IRQ signs, | 78 | * DURATION column is being also used to display IRQ signs, |
79 | * following values are used by print_graph_irq and others | 79 | * following values are used by print_graph_irq and others |
80 | * to fill in space into DURATION column. | 80 | * to fill in space into DURATION column. |
81 | */ | 81 | */ |
82 | enum { | 82 | enum { |
83 | DURATION_FILL_FULL = -1, | 83 | DURATION_FILL_FULL = -1, |
84 | DURATION_FILL_START = -2, | 84 | DURATION_FILL_START = -2, |
85 | DURATION_FILL_END = -3, | 85 | DURATION_FILL_END = -3, |
86 | }; | 86 | }; |
87 | 87 | ||
88 | static enum print_line_t | 88 | static enum print_line_t |
89 | print_graph_duration(unsigned long long duration, struct trace_seq *s, | 89 | print_graph_duration(unsigned long long duration, struct trace_seq *s, |
90 | u32 flags); | 90 | u32 flags); |
91 | 91 | ||
92 | /* Add a function return address to the trace stack on thread info.*/ | 92 | /* Add a function return address to the trace stack on thread info.*/ |
93 | int | 93 | int |
94 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, | 94 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, |
95 | unsigned long frame_pointer) | 95 | unsigned long frame_pointer) |
96 | { | 96 | { |
97 | unsigned long long calltime; | 97 | unsigned long long calltime; |
98 | int index; | 98 | int index; |
99 | 99 | ||
100 | if (!current->ret_stack) | 100 | if (!current->ret_stack) |
101 | return -EBUSY; | 101 | return -EBUSY; |
102 | 102 | ||
103 | /* | 103 | /* |
104 | * We must make sure the ret_stack is tested before we read | 104 | * We must make sure the ret_stack is tested before we read |
105 | * anything else. | 105 | * anything else. |
106 | */ | 106 | */ |
107 | smp_rmb(); | 107 | smp_rmb(); |
108 | 108 | ||
109 | /* The return trace stack is full */ | 109 | /* The return trace stack is full */ |
110 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { | 110 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { |
111 | atomic_inc(¤t->trace_overrun); | 111 | atomic_inc(¤t->trace_overrun); |
112 | return -EBUSY; | 112 | return -EBUSY; |
113 | } | 113 | } |
114 | 114 | ||
115 | calltime = trace_clock_local(); | 115 | calltime = trace_clock_local(); |
116 | 116 | ||
117 | index = ++current->curr_ret_stack; | 117 | index = ++current->curr_ret_stack; |
118 | barrier(); | 118 | barrier(); |
119 | current->ret_stack[index].ret = ret; | 119 | current->ret_stack[index].ret = ret; |
120 | current->ret_stack[index].func = func; | 120 | current->ret_stack[index].func = func; |
121 | current->ret_stack[index].calltime = calltime; | 121 | current->ret_stack[index].calltime = calltime; |
122 | current->ret_stack[index].subtime = 0; | 122 | current->ret_stack[index].subtime = 0; |
123 | current->ret_stack[index].fp = frame_pointer; | 123 | current->ret_stack[index].fp = frame_pointer; |
124 | *depth = index; | 124 | *depth = index; |
125 | 125 | ||
126 | return 0; | 126 | return 0; |
127 | } | 127 | } |
128 | 128 | ||
129 | /* Retrieve a function return address to the trace stack on thread info.*/ | 129 | /* Retrieve a function return address to the trace stack on thread info.*/ |
130 | static void | 130 | static void |
131 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, | 131 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, |
132 | unsigned long frame_pointer) | 132 | unsigned long frame_pointer) |
133 | { | 133 | { |
134 | int index; | 134 | int index; |
135 | 135 | ||
136 | index = current->curr_ret_stack; | 136 | index = current->curr_ret_stack; |
137 | 137 | ||
138 | if (unlikely(index < 0)) { | 138 | if (unlikely(index < 0)) { |
139 | ftrace_graph_stop(); | 139 | ftrace_graph_stop(); |
140 | WARN_ON(1); | 140 | WARN_ON(1); |
141 | /* Might as well panic, otherwise we have no where to go */ | 141 | /* Might as well panic, otherwise we have no where to go */ |
142 | *ret = (unsigned long)panic; | 142 | *ret = (unsigned long)panic; |
143 | return; | 143 | return; |
144 | } | 144 | } |
145 | 145 | ||
146 | #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST | 146 | #if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY) |
147 | /* | 147 | /* |
148 | * The arch may choose to record the frame pointer used | 148 | * The arch may choose to record the frame pointer used |
149 | * and check it here to make sure that it is what we expect it | 149 | * and check it here to make sure that it is what we expect it |
150 | * to be. If gcc does not set the place holder of the return | 150 | * to be. If gcc does not set the place holder of the return |
151 | * address in the frame pointer, and does a copy instead, then | 151 | * address in the frame pointer, and does a copy instead, then |
152 | * the function graph trace will fail. This test detects this | 152 | * the function graph trace will fail. This test detects this |
153 | * case. | 153 | * case. |
154 | * | 154 | * |
155 | * Currently, x86_32 with optimize for size (-Os) makes the latest | 155 | * Currently, x86_32 with optimize for size (-Os) makes the latest |
156 | * gcc do the above. | 156 | * gcc do the above. |
157 | * | ||
158 | * Note, -mfentry does not use frame pointers, and this test | ||
159 | * is not needed if CC_USING_FENTRY is set. | ||
157 | */ | 160 | */ |
158 | if (unlikely(current->ret_stack[index].fp != frame_pointer)) { | 161 | if (unlikely(current->ret_stack[index].fp != frame_pointer)) { |
159 | ftrace_graph_stop(); | 162 | ftrace_graph_stop(); |
160 | WARN(1, "Bad frame pointer: expected %lx, received %lx\n" | 163 | WARN(1, "Bad frame pointer: expected %lx, received %lx\n" |
161 | " from func %ps return to %lx\n", | 164 | " from func %ps return to %lx\n", |
162 | current->ret_stack[index].fp, | 165 | current->ret_stack[index].fp, |
163 | frame_pointer, | 166 | frame_pointer, |
164 | (void *)current->ret_stack[index].func, | 167 | (void *)current->ret_stack[index].func, |
165 | current->ret_stack[index].ret); | 168 | current->ret_stack[index].ret); |
166 | *ret = (unsigned long)panic; | 169 | *ret = (unsigned long)panic; |
167 | return; | 170 | return; |
168 | } | 171 | } |
169 | #endif | 172 | #endif |
170 | 173 | ||
171 | *ret = current->ret_stack[index].ret; | 174 | *ret = current->ret_stack[index].ret; |
172 | trace->func = current->ret_stack[index].func; | 175 | trace->func = current->ret_stack[index].func; |
173 | trace->calltime = current->ret_stack[index].calltime; | 176 | trace->calltime = current->ret_stack[index].calltime; |
174 | trace->overrun = atomic_read(¤t->trace_overrun); | 177 | trace->overrun = atomic_read(¤t->trace_overrun); |
175 | trace->depth = index; | 178 | trace->depth = index; |
176 | } | 179 | } |
177 | 180 | ||
178 | /* | 181 | /* |
179 | * Send the trace to the ring-buffer. | 182 | * Send the trace to the ring-buffer. |
180 | * @return the original return address. | 183 | * @return the original return address. |
181 | */ | 184 | */ |
182 | unsigned long ftrace_return_to_handler(unsigned long frame_pointer) | 185 | unsigned long ftrace_return_to_handler(unsigned long frame_pointer) |
183 | { | 186 | { |
184 | struct ftrace_graph_ret trace; | 187 | struct ftrace_graph_ret trace; |
185 | unsigned long ret; | 188 | unsigned long ret; |
186 | 189 | ||
187 | ftrace_pop_return_trace(&trace, &ret, frame_pointer); | 190 | ftrace_pop_return_trace(&trace, &ret, frame_pointer); |
188 | trace.rettime = trace_clock_local(); | 191 | trace.rettime = trace_clock_local(); |
189 | ftrace_graph_return(&trace); | 192 | ftrace_graph_return(&trace); |
190 | barrier(); | 193 | barrier(); |
191 | current->curr_ret_stack--; | 194 | current->curr_ret_stack--; |
192 | 195 | ||
193 | if (unlikely(!ret)) { | 196 | if (unlikely(!ret)) { |
194 | ftrace_graph_stop(); | 197 | ftrace_graph_stop(); |
195 | WARN_ON(1); | 198 | WARN_ON(1); |
196 | /* Might as well panic. What else to do? */ | 199 | /* Might as well panic. What else to do? */ |
197 | ret = (unsigned long)panic; | 200 | ret = (unsigned long)panic; |
198 | } | 201 | } |
199 | 202 | ||
200 | return ret; | 203 | return ret; |
201 | } | 204 | } |
202 | 205 | ||
203 | int __trace_graph_entry(struct trace_array *tr, | 206 | int __trace_graph_entry(struct trace_array *tr, |
204 | struct ftrace_graph_ent *trace, | 207 | struct ftrace_graph_ent *trace, |
205 | unsigned long flags, | 208 | unsigned long flags, |
206 | int pc) | 209 | int pc) |
207 | { | 210 | { |
208 | struct ftrace_event_call *call = &event_funcgraph_entry; | 211 | struct ftrace_event_call *call = &event_funcgraph_entry; |
209 | struct ring_buffer_event *event; | 212 | struct ring_buffer_event *event; |
210 | struct ring_buffer *buffer = tr->buffer; | 213 | struct ring_buffer *buffer = tr->buffer; |
211 | struct ftrace_graph_ent_entry *entry; | 214 | struct ftrace_graph_ent_entry *entry; |
212 | 215 | ||
213 | if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) | 216 | if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) |
214 | return 0; | 217 | return 0; |
215 | 218 | ||
216 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, | 219 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, |
217 | sizeof(*entry), flags, pc); | 220 | sizeof(*entry), flags, pc); |
218 | if (!event) | 221 | if (!event) |
219 | return 0; | 222 | return 0; |
220 | entry = ring_buffer_event_data(event); | 223 | entry = ring_buffer_event_data(event); |
221 | entry->graph_ent = *trace; | 224 | entry->graph_ent = *trace; |
222 | if (!filter_current_check_discard(buffer, call, entry, event)) | 225 | if (!filter_current_check_discard(buffer, call, entry, event)) |
223 | ring_buffer_unlock_commit(buffer, event); | 226 | ring_buffer_unlock_commit(buffer, event); |
224 | 227 | ||
225 | return 1; | 228 | return 1; |
226 | } | 229 | } |
227 | 230 | ||
228 | static inline int ftrace_graph_ignore_irqs(void) | 231 | static inline int ftrace_graph_ignore_irqs(void) |
229 | { | 232 | { |
230 | if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT)) | 233 | if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT)) |
231 | return 0; | 234 | return 0; |
232 | 235 | ||
233 | return in_irq(); | 236 | return in_irq(); |
234 | } | 237 | } |
235 | 238 | ||
236 | int trace_graph_entry(struct ftrace_graph_ent *trace) | 239 | int trace_graph_entry(struct ftrace_graph_ent *trace) |
237 | { | 240 | { |
238 | struct trace_array *tr = graph_array; | 241 | struct trace_array *tr = graph_array; |
239 | struct trace_array_cpu *data; | 242 | struct trace_array_cpu *data; |
240 | unsigned long flags; | 243 | unsigned long flags; |
241 | long disabled; | 244 | long disabled; |
242 | int ret; | 245 | int ret; |
243 | int cpu; | 246 | int cpu; |
244 | int pc; | 247 | int pc; |
245 | 248 | ||
246 | if (!ftrace_trace_task(current)) | 249 | if (!ftrace_trace_task(current)) |
247 | return 0; | 250 | return 0; |
248 | 251 | ||
249 | /* trace it when it is-nested-in or is a function enabled. */ | 252 | /* trace it when it is-nested-in or is a function enabled. */ |
250 | if (!(trace->depth || ftrace_graph_addr(trace->func)) || | 253 | if (!(trace->depth || ftrace_graph_addr(trace->func)) || |
251 | ftrace_graph_ignore_irqs()) | 254 | ftrace_graph_ignore_irqs()) |
252 | return 0; | 255 | return 0; |
253 | 256 | ||
254 | local_irq_save(flags); | 257 | local_irq_save(flags); |
255 | cpu = raw_smp_processor_id(); | 258 | cpu = raw_smp_processor_id(); |
256 | data = tr->data[cpu]; | 259 | data = tr->data[cpu]; |
257 | disabled = atomic_inc_return(&data->disabled); | 260 | disabled = atomic_inc_return(&data->disabled); |
258 | if (likely(disabled == 1)) { | 261 | if (likely(disabled == 1)) { |
259 | pc = preempt_count(); | 262 | pc = preempt_count(); |
260 | ret = __trace_graph_entry(tr, trace, flags, pc); | 263 | ret = __trace_graph_entry(tr, trace, flags, pc); |
261 | } else { | 264 | } else { |
262 | ret = 0; | 265 | ret = 0; |
263 | } | 266 | } |
264 | 267 | ||
265 | atomic_dec(&data->disabled); | 268 | atomic_dec(&data->disabled); |
266 | local_irq_restore(flags); | 269 | local_irq_restore(flags); |
267 | 270 | ||
268 | return ret; | 271 | return ret; |
269 | } | 272 | } |
270 | 273 | ||
271 | int trace_graph_thresh_entry(struct ftrace_graph_ent *trace) | 274 | int trace_graph_thresh_entry(struct ftrace_graph_ent *trace) |
272 | { | 275 | { |
273 | if (tracing_thresh) | 276 | if (tracing_thresh) |
274 | return 1; | 277 | return 1; |
275 | else | 278 | else |
276 | return trace_graph_entry(trace); | 279 | return trace_graph_entry(trace); |
277 | } | 280 | } |
278 | 281 | ||
279 | static void | 282 | static void |
280 | __trace_graph_function(struct trace_array *tr, | 283 | __trace_graph_function(struct trace_array *tr, |
281 | unsigned long ip, unsigned long flags, int pc) | 284 | unsigned long ip, unsigned long flags, int pc) |
282 | { | 285 | { |
283 | u64 time = trace_clock_local(); | 286 | u64 time = trace_clock_local(); |
284 | struct ftrace_graph_ent ent = { | 287 | struct ftrace_graph_ent ent = { |
285 | .func = ip, | 288 | .func = ip, |
286 | .depth = 0, | 289 | .depth = 0, |
287 | }; | 290 | }; |
288 | struct ftrace_graph_ret ret = { | 291 | struct ftrace_graph_ret ret = { |
289 | .func = ip, | 292 | .func = ip, |
290 | .depth = 0, | 293 | .depth = 0, |
291 | .calltime = time, | 294 | .calltime = time, |
292 | .rettime = time, | 295 | .rettime = time, |
293 | }; | 296 | }; |
294 | 297 | ||
295 | __trace_graph_entry(tr, &ent, flags, pc); | 298 | __trace_graph_entry(tr, &ent, flags, pc); |
296 | __trace_graph_return(tr, &ret, flags, pc); | 299 | __trace_graph_return(tr, &ret, flags, pc); |
297 | } | 300 | } |
298 | 301 | ||
299 | void | 302 | void |
300 | trace_graph_function(struct trace_array *tr, | 303 | trace_graph_function(struct trace_array *tr, |
301 | unsigned long ip, unsigned long parent_ip, | 304 | unsigned long ip, unsigned long parent_ip, |
302 | unsigned long flags, int pc) | 305 | unsigned long flags, int pc) |
303 | { | 306 | { |
304 | __trace_graph_function(tr, ip, flags, pc); | 307 | __trace_graph_function(tr, ip, flags, pc); |
305 | } | 308 | } |
306 | 309 | ||
307 | void __trace_graph_return(struct trace_array *tr, | 310 | void __trace_graph_return(struct trace_array *tr, |
308 | struct ftrace_graph_ret *trace, | 311 | struct ftrace_graph_ret *trace, |
309 | unsigned long flags, | 312 | unsigned long flags, |
310 | int pc) | 313 | int pc) |
311 | { | 314 | { |
312 | struct ftrace_event_call *call = &event_funcgraph_exit; | 315 | struct ftrace_event_call *call = &event_funcgraph_exit; |
313 | struct ring_buffer_event *event; | 316 | struct ring_buffer_event *event; |
314 | struct ring_buffer *buffer = tr->buffer; | 317 | struct ring_buffer *buffer = tr->buffer; |
315 | struct ftrace_graph_ret_entry *entry; | 318 | struct ftrace_graph_ret_entry *entry; |
316 | 319 | ||
317 | if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) | 320 | if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) |
318 | return; | 321 | return; |
319 | 322 | ||
320 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, | 323 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, |
321 | sizeof(*entry), flags, pc); | 324 | sizeof(*entry), flags, pc); |
322 | if (!event) | 325 | if (!event) |
323 | return; | 326 | return; |
324 | entry = ring_buffer_event_data(event); | 327 | entry = ring_buffer_event_data(event); |
325 | entry->ret = *trace; | 328 | entry->ret = *trace; |
326 | if (!filter_current_check_discard(buffer, call, entry, event)) | 329 | if (!filter_current_check_discard(buffer, call, entry, event)) |
327 | ring_buffer_unlock_commit(buffer, event); | 330 | ring_buffer_unlock_commit(buffer, event); |
328 | } | 331 | } |
329 | 332 | ||
330 | void trace_graph_return(struct ftrace_graph_ret *trace) | 333 | void trace_graph_return(struct ftrace_graph_ret *trace) |
331 | { | 334 | { |
332 | struct trace_array *tr = graph_array; | 335 | struct trace_array *tr = graph_array; |
333 | struct trace_array_cpu *data; | 336 | struct trace_array_cpu *data; |
334 | unsigned long flags; | 337 | unsigned long flags; |
335 | long disabled; | 338 | long disabled; |
336 | int cpu; | 339 | int cpu; |
337 | int pc; | 340 | int pc; |
338 | 341 | ||
339 | local_irq_save(flags); | 342 | local_irq_save(flags); |
340 | cpu = raw_smp_processor_id(); | 343 | cpu = raw_smp_processor_id(); |
341 | data = tr->data[cpu]; | 344 | data = tr->data[cpu]; |
342 | disabled = atomic_inc_return(&data->disabled); | 345 | disabled = atomic_inc_return(&data->disabled); |
343 | if (likely(disabled == 1)) { | 346 | if (likely(disabled == 1)) { |
344 | pc = preempt_count(); | 347 | pc = preempt_count(); |
345 | __trace_graph_return(tr, trace, flags, pc); | 348 | __trace_graph_return(tr, trace, flags, pc); |
346 | } | 349 | } |
347 | atomic_dec(&data->disabled); | 350 | atomic_dec(&data->disabled); |
348 | local_irq_restore(flags); | 351 | local_irq_restore(flags); |
349 | } | 352 | } |
350 | 353 | ||
351 | void set_graph_array(struct trace_array *tr) | 354 | void set_graph_array(struct trace_array *tr) |
352 | { | 355 | { |
353 | graph_array = tr; | 356 | graph_array = tr; |
354 | 357 | ||
355 | /* Make graph_array visible before we start tracing */ | 358 | /* Make graph_array visible before we start tracing */ |
356 | 359 | ||
357 | smp_mb(); | 360 | smp_mb(); |
358 | } | 361 | } |
359 | 362 | ||
360 | void trace_graph_thresh_return(struct ftrace_graph_ret *trace) | 363 | void trace_graph_thresh_return(struct ftrace_graph_ret *trace) |
361 | { | 364 | { |
362 | if (tracing_thresh && | 365 | if (tracing_thresh && |
363 | (trace->rettime - trace->calltime < tracing_thresh)) | 366 | (trace->rettime - trace->calltime < tracing_thresh)) |
364 | return; | 367 | return; |
365 | else | 368 | else |
366 | trace_graph_return(trace); | 369 | trace_graph_return(trace); |
367 | } | 370 | } |
368 | 371 | ||
369 | static int graph_trace_init(struct trace_array *tr) | 372 | static int graph_trace_init(struct trace_array *tr) |
370 | { | 373 | { |
371 | int ret; | 374 | int ret; |
372 | 375 | ||
373 | set_graph_array(tr); | 376 | set_graph_array(tr); |
374 | if (tracing_thresh) | 377 | if (tracing_thresh) |
375 | ret = register_ftrace_graph(&trace_graph_thresh_return, | 378 | ret = register_ftrace_graph(&trace_graph_thresh_return, |
376 | &trace_graph_thresh_entry); | 379 | &trace_graph_thresh_entry); |
377 | else | 380 | else |
378 | ret = register_ftrace_graph(&trace_graph_return, | 381 | ret = register_ftrace_graph(&trace_graph_return, |
379 | &trace_graph_entry); | 382 | &trace_graph_entry); |
380 | if (ret) | 383 | if (ret) |
381 | return ret; | 384 | return ret; |
382 | tracing_start_cmdline_record(); | 385 | tracing_start_cmdline_record(); |
383 | 386 | ||
384 | return 0; | 387 | return 0; |
385 | } | 388 | } |
386 | 389 | ||
387 | static void graph_trace_reset(struct trace_array *tr) | 390 | static void graph_trace_reset(struct trace_array *tr) |
388 | { | 391 | { |
389 | tracing_stop_cmdline_record(); | 392 | tracing_stop_cmdline_record(); |
390 | unregister_ftrace_graph(); | 393 | unregister_ftrace_graph(); |
391 | } | 394 | } |
392 | 395 | ||
393 | static int max_bytes_for_cpu; | 396 | static int max_bytes_for_cpu; |
394 | 397 | ||
395 | static enum print_line_t | 398 | static enum print_line_t |
396 | print_graph_cpu(struct trace_seq *s, int cpu) | 399 | print_graph_cpu(struct trace_seq *s, int cpu) |
397 | { | 400 | { |
398 | int ret; | 401 | int ret; |
399 | 402 | ||
400 | /* | 403 | /* |
401 | * Start with a space character - to make it stand out | 404 | * Start with a space character - to make it stand out |
402 | * to the right a bit when trace output is pasted into | 405 | * to the right a bit when trace output is pasted into |
403 | * email: | 406 | * email: |
404 | */ | 407 | */ |
405 | ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu); | 408 | ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu); |
406 | if (!ret) | 409 | if (!ret) |
407 | return TRACE_TYPE_PARTIAL_LINE; | 410 | return TRACE_TYPE_PARTIAL_LINE; |
408 | 411 | ||
409 | return TRACE_TYPE_HANDLED; | 412 | return TRACE_TYPE_HANDLED; |
410 | } | 413 | } |
411 | 414 | ||
412 | #define TRACE_GRAPH_PROCINFO_LENGTH 14 | 415 | #define TRACE_GRAPH_PROCINFO_LENGTH 14 |
413 | 416 | ||
414 | static enum print_line_t | 417 | static enum print_line_t |
415 | print_graph_proc(struct trace_seq *s, pid_t pid) | 418 | print_graph_proc(struct trace_seq *s, pid_t pid) |
416 | { | 419 | { |
417 | char comm[TASK_COMM_LEN]; | 420 | char comm[TASK_COMM_LEN]; |
418 | /* sign + log10(MAX_INT) + '\0' */ | 421 | /* sign + log10(MAX_INT) + '\0' */ |
419 | char pid_str[11]; | 422 | char pid_str[11]; |
420 | int spaces = 0; | 423 | int spaces = 0; |
421 | int ret; | 424 | int ret; |
422 | int len; | 425 | int len; |
423 | int i; | 426 | int i; |
424 | 427 | ||
425 | trace_find_cmdline(pid, comm); | 428 | trace_find_cmdline(pid, comm); |
426 | comm[7] = '\0'; | 429 | comm[7] = '\0'; |
427 | sprintf(pid_str, "%d", pid); | 430 | sprintf(pid_str, "%d", pid); |
428 | 431 | ||
429 | /* 1 stands for the "-" character */ | 432 | /* 1 stands for the "-" character */ |
430 | len = strlen(comm) + strlen(pid_str) + 1; | 433 | len = strlen(comm) + strlen(pid_str) + 1; |
431 | 434 | ||
432 | if (len < TRACE_GRAPH_PROCINFO_LENGTH) | 435 | if (len < TRACE_GRAPH_PROCINFO_LENGTH) |
433 | spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; | 436 | spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; |
434 | 437 | ||
435 | /* First spaces to align center */ | 438 | /* First spaces to align center */ |
436 | for (i = 0; i < spaces / 2; i++) { | 439 | for (i = 0; i < spaces / 2; i++) { |
437 | ret = trace_seq_printf(s, " "); | 440 | ret = trace_seq_printf(s, " "); |
438 | if (!ret) | 441 | if (!ret) |
439 | return TRACE_TYPE_PARTIAL_LINE; | 442 | return TRACE_TYPE_PARTIAL_LINE; |
440 | } | 443 | } |
441 | 444 | ||
442 | ret = trace_seq_printf(s, "%s-%s", comm, pid_str); | 445 | ret = trace_seq_printf(s, "%s-%s", comm, pid_str); |
443 | if (!ret) | 446 | if (!ret) |
444 | return TRACE_TYPE_PARTIAL_LINE; | 447 | return TRACE_TYPE_PARTIAL_LINE; |
445 | 448 | ||
446 | /* Last spaces to align center */ | 449 | /* Last spaces to align center */ |
447 | for (i = 0; i < spaces - (spaces / 2); i++) { | 450 | for (i = 0; i < spaces - (spaces / 2); i++) { |
448 | ret = trace_seq_printf(s, " "); | 451 | ret = trace_seq_printf(s, " "); |
449 | if (!ret) | 452 | if (!ret) |
450 | return TRACE_TYPE_PARTIAL_LINE; | 453 | return TRACE_TYPE_PARTIAL_LINE; |
451 | } | 454 | } |
452 | return TRACE_TYPE_HANDLED; | 455 | return TRACE_TYPE_HANDLED; |
453 | } | 456 | } |
454 | 457 | ||
455 | 458 | ||
456 | static enum print_line_t | 459 | static enum print_line_t |
457 | print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry) | 460 | print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry) |
458 | { | 461 | { |
459 | if (!trace_seq_putc(s, ' ')) | 462 | if (!trace_seq_putc(s, ' ')) |
460 | return 0; | 463 | return 0; |
461 | 464 | ||
462 | return trace_print_lat_fmt(s, entry); | 465 | return trace_print_lat_fmt(s, entry); |
463 | } | 466 | } |
464 | 467 | ||
465 | /* If the pid changed since the last trace, output this event */ | 468 | /* If the pid changed since the last trace, output this event */ |
466 | static enum print_line_t | 469 | static enum print_line_t |
467 | verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) | 470 | verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) |
468 | { | 471 | { |
469 | pid_t prev_pid; | 472 | pid_t prev_pid; |
470 | pid_t *last_pid; | 473 | pid_t *last_pid; |
471 | int ret; | 474 | int ret; |
472 | 475 | ||
473 | if (!data) | 476 | if (!data) |
474 | return TRACE_TYPE_HANDLED; | 477 | return TRACE_TYPE_HANDLED; |
475 | 478 | ||
476 | last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); | 479 | last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); |
477 | 480 | ||
478 | if (*last_pid == pid) | 481 | if (*last_pid == pid) |
479 | return TRACE_TYPE_HANDLED; | 482 | return TRACE_TYPE_HANDLED; |
480 | 483 | ||
481 | prev_pid = *last_pid; | 484 | prev_pid = *last_pid; |
482 | *last_pid = pid; | 485 | *last_pid = pid; |
483 | 486 | ||
484 | if (prev_pid == -1) | 487 | if (prev_pid == -1) |
485 | return TRACE_TYPE_HANDLED; | 488 | return TRACE_TYPE_HANDLED; |
486 | /* | 489 | /* |
487 | * Context-switch trace line: | 490 | * Context-switch trace line: |
488 | 491 | ||
489 | ------------------------------------------ | 492 | ------------------------------------------ |
490 | | 1) migration/0--1 => sshd-1755 | 493 | | 1) migration/0--1 => sshd-1755 |
491 | ------------------------------------------ | 494 | ------------------------------------------ |
492 | 495 | ||
493 | */ | 496 | */ |
494 | ret = trace_seq_printf(s, | 497 | ret = trace_seq_printf(s, |
495 | " ------------------------------------------\n"); | 498 | " ------------------------------------------\n"); |
496 | if (!ret) | 499 | if (!ret) |
497 | return TRACE_TYPE_PARTIAL_LINE; | 500 | return TRACE_TYPE_PARTIAL_LINE; |
498 | 501 | ||
499 | ret = print_graph_cpu(s, cpu); | 502 | ret = print_graph_cpu(s, cpu); |
500 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 503 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
501 | return TRACE_TYPE_PARTIAL_LINE; | 504 | return TRACE_TYPE_PARTIAL_LINE; |
502 | 505 | ||
503 | ret = print_graph_proc(s, prev_pid); | 506 | ret = print_graph_proc(s, prev_pid); |
504 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 507 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
505 | return TRACE_TYPE_PARTIAL_LINE; | 508 | return TRACE_TYPE_PARTIAL_LINE; |
506 | 509 | ||
507 | ret = trace_seq_printf(s, " => "); | 510 | ret = trace_seq_printf(s, " => "); |
508 | if (!ret) | 511 | if (!ret) |
509 | return TRACE_TYPE_PARTIAL_LINE; | 512 | return TRACE_TYPE_PARTIAL_LINE; |
510 | 513 | ||
511 | ret = print_graph_proc(s, pid); | 514 | ret = print_graph_proc(s, pid); |
512 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 515 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
513 | return TRACE_TYPE_PARTIAL_LINE; | 516 | return TRACE_TYPE_PARTIAL_LINE; |
514 | 517 | ||
515 | ret = trace_seq_printf(s, | 518 | ret = trace_seq_printf(s, |
516 | "\n ------------------------------------------\n\n"); | 519 | "\n ------------------------------------------\n\n"); |
517 | if (!ret) | 520 | if (!ret) |
518 | return TRACE_TYPE_PARTIAL_LINE; | 521 | return TRACE_TYPE_PARTIAL_LINE; |
519 | 522 | ||
520 | return TRACE_TYPE_HANDLED; | 523 | return TRACE_TYPE_HANDLED; |
521 | } | 524 | } |
522 | 525 | ||
523 | static struct ftrace_graph_ret_entry * | 526 | static struct ftrace_graph_ret_entry * |
524 | get_return_for_leaf(struct trace_iterator *iter, | 527 | get_return_for_leaf(struct trace_iterator *iter, |
525 | struct ftrace_graph_ent_entry *curr) | 528 | struct ftrace_graph_ent_entry *curr) |
526 | { | 529 | { |
527 | struct fgraph_data *data = iter->private; | 530 | struct fgraph_data *data = iter->private; |
528 | struct ring_buffer_iter *ring_iter = NULL; | 531 | struct ring_buffer_iter *ring_iter = NULL; |
529 | struct ring_buffer_event *event; | 532 | struct ring_buffer_event *event; |
530 | struct ftrace_graph_ret_entry *next; | 533 | struct ftrace_graph_ret_entry *next; |
531 | 534 | ||
532 | /* | 535 | /* |
533 | * If the previous output failed to write to the seq buffer, | 536 | * If the previous output failed to write to the seq buffer, |
534 | * then we just reuse the data from before. | 537 | * then we just reuse the data from before. |
535 | */ | 538 | */ |
536 | if (data && data->failed) { | 539 | if (data && data->failed) { |
537 | curr = &data->ent; | 540 | curr = &data->ent; |
538 | next = &data->ret; | 541 | next = &data->ret; |
539 | } else { | 542 | } else { |
540 | 543 | ||
541 | ring_iter = trace_buffer_iter(iter, iter->cpu); | 544 | ring_iter = trace_buffer_iter(iter, iter->cpu); |
542 | 545 | ||
543 | /* First peek to compare current entry and the next one */ | 546 | /* First peek to compare current entry and the next one */ |
544 | if (ring_iter) | 547 | if (ring_iter) |
545 | event = ring_buffer_iter_peek(ring_iter, NULL); | 548 | event = ring_buffer_iter_peek(ring_iter, NULL); |
546 | else { | 549 | else { |
547 | /* | 550 | /* |
548 | * We need to consume the current entry to see | 551 | * We need to consume the current entry to see |
549 | * the next one. | 552 | * the next one. |
550 | */ | 553 | */ |
551 | ring_buffer_consume(iter->tr->buffer, iter->cpu, | 554 | ring_buffer_consume(iter->tr->buffer, iter->cpu, |
552 | NULL, NULL); | 555 | NULL, NULL); |
553 | event = ring_buffer_peek(iter->tr->buffer, iter->cpu, | 556 | event = ring_buffer_peek(iter->tr->buffer, iter->cpu, |
554 | NULL, NULL); | 557 | NULL, NULL); |
555 | } | 558 | } |
556 | 559 | ||
557 | if (!event) | 560 | if (!event) |
558 | return NULL; | 561 | return NULL; |
559 | 562 | ||
560 | next = ring_buffer_event_data(event); | 563 | next = ring_buffer_event_data(event); |
561 | 564 | ||
562 | if (data) { | 565 | if (data) { |
563 | /* | 566 | /* |
564 | * Save current and next entries for later reference | 567 | * Save current and next entries for later reference |
565 | * if the output fails. | 568 | * if the output fails. |
566 | */ | 569 | */ |
567 | data->ent = *curr; | 570 | data->ent = *curr; |
568 | /* | 571 | /* |
569 | * If the next event is not a return type, then | 572 | * If the next event is not a return type, then |
570 | * we only care about what type it is. Otherwise we can | 573 | * we only care about what type it is. Otherwise we can |
571 | * safely copy the entire event. | 574 | * safely copy the entire event. |
572 | */ | 575 | */ |
573 | if (next->ent.type == TRACE_GRAPH_RET) | 576 | if (next->ent.type == TRACE_GRAPH_RET) |
574 | data->ret = *next; | 577 | data->ret = *next; |
575 | else | 578 | else |
576 | data->ret.ent.type = next->ent.type; | 579 | data->ret.ent.type = next->ent.type; |
577 | } | 580 | } |
578 | } | 581 | } |
579 | 582 | ||
580 | if (next->ent.type != TRACE_GRAPH_RET) | 583 | if (next->ent.type != TRACE_GRAPH_RET) |
581 | return NULL; | 584 | return NULL; |
582 | 585 | ||
583 | if (curr->ent.pid != next->ent.pid || | 586 | if (curr->ent.pid != next->ent.pid || |
584 | curr->graph_ent.func != next->ret.func) | 587 | curr->graph_ent.func != next->ret.func) |
585 | return NULL; | 588 | return NULL; |
586 | 589 | ||
587 | /* this is a leaf, now advance the iterator */ | 590 | /* this is a leaf, now advance the iterator */ |
588 | if (ring_iter) | 591 | if (ring_iter) |
589 | ring_buffer_read(ring_iter, NULL); | 592 | ring_buffer_read(ring_iter, NULL); |
590 | 593 | ||
591 | return next; | 594 | return next; |
592 | } | 595 | } |
593 | 596 | ||
594 | static int print_graph_abs_time(u64 t, struct trace_seq *s) | 597 | static int print_graph_abs_time(u64 t, struct trace_seq *s) |
595 | { | 598 | { |
596 | unsigned long usecs_rem; | 599 | unsigned long usecs_rem; |
597 | 600 | ||
598 | usecs_rem = do_div(t, NSEC_PER_SEC); | 601 | usecs_rem = do_div(t, NSEC_PER_SEC); |
599 | usecs_rem /= 1000; | 602 | usecs_rem /= 1000; |
600 | 603 | ||
601 | return trace_seq_printf(s, "%5lu.%06lu | ", | 604 | return trace_seq_printf(s, "%5lu.%06lu | ", |
602 | (unsigned long)t, usecs_rem); | 605 | (unsigned long)t, usecs_rem); |
603 | } | 606 | } |
604 | 607 | ||
605 | static enum print_line_t | 608 | static enum print_line_t |
606 | print_graph_irq(struct trace_iterator *iter, unsigned long addr, | 609 | print_graph_irq(struct trace_iterator *iter, unsigned long addr, |
607 | enum trace_type type, int cpu, pid_t pid, u32 flags) | 610 | enum trace_type type, int cpu, pid_t pid, u32 flags) |
608 | { | 611 | { |
609 | int ret; | 612 | int ret; |
610 | struct trace_seq *s = &iter->seq; | 613 | struct trace_seq *s = &iter->seq; |
611 | 614 | ||
612 | if (addr < (unsigned long)__irqentry_text_start || | 615 | if (addr < (unsigned long)__irqentry_text_start || |
613 | addr >= (unsigned long)__irqentry_text_end) | 616 | addr >= (unsigned long)__irqentry_text_end) |
614 | return TRACE_TYPE_UNHANDLED; | 617 | return TRACE_TYPE_UNHANDLED; |
615 | 618 | ||
616 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { | 619 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { |
617 | /* Absolute time */ | 620 | /* Absolute time */ |
618 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) { | 621 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) { |
619 | ret = print_graph_abs_time(iter->ts, s); | 622 | ret = print_graph_abs_time(iter->ts, s); |
620 | if (!ret) | 623 | if (!ret) |
621 | return TRACE_TYPE_PARTIAL_LINE; | 624 | return TRACE_TYPE_PARTIAL_LINE; |
622 | } | 625 | } |
623 | 626 | ||
624 | /* Cpu */ | 627 | /* Cpu */ |
625 | if (flags & TRACE_GRAPH_PRINT_CPU) { | 628 | if (flags & TRACE_GRAPH_PRINT_CPU) { |
626 | ret = print_graph_cpu(s, cpu); | 629 | ret = print_graph_cpu(s, cpu); |
627 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 630 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
628 | return TRACE_TYPE_PARTIAL_LINE; | 631 | return TRACE_TYPE_PARTIAL_LINE; |
629 | } | 632 | } |
630 | 633 | ||
631 | /* Proc */ | 634 | /* Proc */ |
632 | if (flags & TRACE_GRAPH_PRINT_PROC) { | 635 | if (flags & TRACE_GRAPH_PRINT_PROC) { |
633 | ret = print_graph_proc(s, pid); | 636 | ret = print_graph_proc(s, pid); |
634 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 637 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
635 | return TRACE_TYPE_PARTIAL_LINE; | 638 | return TRACE_TYPE_PARTIAL_LINE; |
636 | ret = trace_seq_printf(s, " | "); | 639 | ret = trace_seq_printf(s, " | "); |
637 | if (!ret) | 640 | if (!ret) |
638 | return TRACE_TYPE_PARTIAL_LINE; | 641 | return TRACE_TYPE_PARTIAL_LINE; |
639 | } | 642 | } |
640 | } | 643 | } |
641 | 644 | ||
642 | /* No overhead */ | 645 | /* No overhead */ |
643 | ret = print_graph_duration(DURATION_FILL_START, s, flags); | 646 | ret = print_graph_duration(DURATION_FILL_START, s, flags); |
644 | if (ret != TRACE_TYPE_HANDLED) | 647 | if (ret != TRACE_TYPE_HANDLED) |
645 | return ret; | 648 | return ret; |
646 | 649 | ||
647 | if (type == TRACE_GRAPH_ENT) | 650 | if (type == TRACE_GRAPH_ENT) |
648 | ret = trace_seq_printf(s, "==========>"); | 651 | ret = trace_seq_printf(s, "==========>"); |
649 | else | 652 | else |
650 | ret = trace_seq_printf(s, "<=========="); | 653 | ret = trace_seq_printf(s, "<=========="); |
651 | 654 | ||
652 | if (!ret) | 655 | if (!ret) |
653 | return TRACE_TYPE_PARTIAL_LINE; | 656 | return TRACE_TYPE_PARTIAL_LINE; |
654 | 657 | ||
655 | ret = print_graph_duration(DURATION_FILL_END, s, flags); | 658 | ret = print_graph_duration(DURATION_FILL_END, s, flags); |
656 | if (ret != TRACE_TYPE_HANDLED) | 659 | if (ret != TRACE_TYPE_HANDLED) |
657 | return ret; | 660 | return ret; |
658 | 661 | ||
659 | ret = trace_seq_printf(s, "\n"); | 662 | ret = trace_seq_printf(s, "\n"); |
660 | 663 | ||
661 | if (!ret) | 664 | if (!ret) |
662 | return TRACE_TYPE_PARTIAL_LINE; | 665 | return TRACE_TYPE_PARTIAL_LINE; |
663 | return TRACE_TYPE_HANDLED; | 666 | return TRACE_TYPE_HANDLED; |
664 | } | 667 | } |
665 | 668 | ||
666 | enum print_line_t | 669 | enum print_line_t |
667 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) | 670 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) |
668 | { | 671 | { |
669 | unsigned long nsecs_rem = do_div(duration, 1000); | 672 | unsigned long nsecs_rem = do_div(duration, 1000); |
670 | /* log10(ULONG_MAX) + '\0' */ | 673 | /* log10(ULONG_MAX) + '\0' */ |
671 | char msecs_str[21]; | 674 | char msecs_str[21]; |
672 | char nsecs_str[5]; | 675 | char nsecs_str[5]; |
673 | int ret, len; | 676 | int ret, len; |
674 | int i; | 677 | int i; |
675 | 678 | ||
676 | sprintf(msecs_str, "%lu", (unsigned long) duration); | 679 | sprintf(msecs_str, "%lu", (unsigned long) duration); |
677 | 680 | ||
678 | /* Print msecs */ | 681 | /* Print msecs */ |
679 | ret = trace_seq_printf(s, "%s", msecs_str); | 682 | ret = trace_seq_printf(s, "%s", msecs_str); |
680 | if (!ret) | 683 | if (!ret) |
681 | return TRACE_TYPE_PARTIAL_LINE; | 684 | return TRACE_TYPE_PARTIAL_LINE; |
682 | 685 | ||
683 | len = strlen(msecs_str); | 686 | len = strlen(msecs_str); |
684 | 687 | ||
685 | /* Print nsecs (we don't want to exceed 7 numbers) */ | 688 | /* Print nsecs (we don't want to exceed 7 numbers) */ |
686 | if (len < 7) { | 689 | if (len < 7) { |
687 | size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len); | 690 | size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len); |
688 | 691 | ||
689 | snprintf(nsecs_str, slen, "%03lu", nsecs_rem); | 692 | snprintf(nsecs_str, slen, "%03lu", nsecs_rem); |
690 | ret = trace_seq_printf(s, ".%s", nsecs_str); | 693 | ret = trace_seq_printf(s, ".%s", nsecs_str); |
691 | if (!ret) | 694 | if (!ret) |
692 | return TRACE_TYPE_PARTIAL_LINE; | 695 | return TRACE_TYPE_PARTIAL_LINE; |
693 | len += strlen(nsecs_str); | 696 | len += strlen(nsecs_str); |
694 | } | 697 | } |
695 | 698 | ||
696 | ret = trace_seq_printf(s, " us "); | 699 | ret = trace_seq_printf(s, " us "); |
697 | if (!ret) | 700 | if (!ret) |
698 | return TRACE_TYPE_PARTIAL_LINE; | 701 | return TRACE_TYPE_PARTIAL_LINE; |
699 | 702 | ||
700 | /* Print remaining spaces to fit the row's width */ | 703 | /* Print remaining spaces to fit the row's width */ |
701 | for (i = len; i < 7; i++) { | 704 | for (i = len; i < 7; i++) { |
702 | ret = trace_seq_printf(s, " "); | 705 | ret = trace_seq_printf(s, " "); |
703 | if (!ret) | 706 | if (!ret) |
704 | return TRACE_TYPE_PARTIAL_LINE; | 707 | return TRACE_TYPE_PARTIAL_LINE; |
705 | } | 708 | } |
706 | return TRACE_TYPE_HANDLED; | 709 | return TRACE_TYPE_HANDLED; |
707 | } | 710 | } |
708 | 711 | ||
709 | static enum print_line_t | 712 | static enum print_line_t |
710 | print_graph_duration(unsigned long long duration, struct trace_seq *s, | 713 | print_graph_duration(unsigned long long duration, struct trace_seq *s, |
711 | u32 flags) | 714 | u32 flags) |
712 | { | 715 | { |
713 | int ret = -1; | 716 | int ret = -1; |
714 | 717 | ||
715 | if (!(flags & TRACE_GRAPH_PRINT_DURATION) || | 718 | if (!(flags & TRACE_GRAPH_PRINT_DURATION) || |
716 | !(trace_flags & TRACE_ITER_CONTEXT_INFO)) | 719 | !(trace_flags & TRACE_ITER_CONTEXT_INFO)) |
717 | return TRACE_TYPE_HANDLED; | 720 | return TRACE_TYPE_HANDLED; |
718 | 721 | ||
719 | /* No real adata, just filling the column with spaces */ | 722 | /* No real adata, just filling the column with spaces */ |
720 | switch (duration) { | 723 | switch (duration) { |
721 | case DURATION_FILL_FULL: | 724 | case DURATION_FILL_FULL: |
722 | ret = trace_seq_printf(s, " | "); | 725 | ret = trace_seq_printf(s, " | "); |
723 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | 726 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; |
724 | case DURATION_FILL_START: | 727 | case DURATION_FILL_START: |
725 | ret = trace_seq_printf(s, " "); | 728 | ret = trace_seq_printf(s, " "); |
726 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | 729 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; |
727 | case DURATION_FILL_END: | 730 | case DURATION_FILL_END: |
728 | ret = trace_seq_printf(s, " |"); | 731 | ret = trace_seq_printf(s, " |"); |
729 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | 732 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; |
730 | } | 733 | } |
731 | 734 | ||
732 | /* Signal a overhead of time execution to the output */ | 735 | /* Signal a overhead of time execution to the output */ |
733 | if (flags & TRACE_GRAPH_PRINT_OVERHEAD) { | 736 | if (flags & TRACE_GRAPH_PRINT_OVERHEAD) { |
734 | /* Duration exceeded 100 msecs */ | 737 | /* Duration exceeded 100 msecs */ |
735 | if (duration > 100000ULL) | 738 | if (duration > 100000ULL) |
736 | ret = trace_seq_printf(s, "! "); | 739 | ret = trace_seq_printf(s, "! "); |
737 | /* Duration exceeded 10 msecs */ | 740 | /* Duration exceeded 10 msecs */ |
738 | else if (duration > 10000ULL) | 741 | else if (duration > 10000ULL) |
739 | ret = trace_seq_printf(s, "+ "); | 742 | ret = trace_seq_printf(s, "+ "); |
740 | } | 743 | } |
741 | 744 | ||
742 | /* | 745 | /* |
743 | * The -1 means we either did not exceed the duration tresholds | 746 | * The -1 means we either did not exceed the duration tresholds |
744 | * or we dont want to print out the overhead. Either way we need | 747 | * or we dont want to print out the overhead. Either way we need |
745 | * to fill out the space. | 748 | * to fill out the space. |
746 | */ | 749 | */ |
747 | if (ret == -1) | 750 | if (ret == -1) |
748 | ret = trace_seq_printf(s, " "); | 751 | ret = trace_seq_printf(s, " "); |
749 | 752 | ||
750 | /* Catching here any failure happenned above */ | 753 | /* Catching here any failure happenned above */ |
751 | if (!ret) | 754 | if (!ret) |
752 | return TRACE_TYPE_PARTIAL_LINE; | 755 | return TRACE_TYPE_PARTIAL_LINE; |
753 | 756 | ||
754 | ret = trace_print_graph_duration(duration, s); | 757 | ret = trace_print_graph_duration(duration, s); |
755 | if (ret != TRACE_TYPE_HANDLED) | 758 | if (ret != TRACE_TYPE_HANDLED) |
756 | return ret; | 759 | return ret; |
757 | 760 | ||
758 | ret = trace_seq_printf(s, "| "); | 761 | ret = trace_seq_printf(s, "| "); |
759 | if (!ret) | 762 | if (!ret) |
760 | return TRACE_TYPE_PARTIAL_LINE; | 763 | return TRACE_TYPE_PARTIAL_LINE; |
761 | 764 | ||
762 | return TRACE_TYPE_HANDLED; | 765 | return TRACE_TYPE_HANDLED; |
763 | } | 766 | } |
764 | 767 | ||
765 | /* Case of a leaf function on its call entry */ | 768 | /* Case of a leaf function on its call entry */ |
766 | static enum print_line_t | 769 | static enum print_line_t |
767 | print_graph_entry_leaf(struct trace_iterator *iter, | 770 | print_graph_entry_leaf(struct trace_iterator *iter, |
768 | struct ftrace_graph_ent_entry *entry, | 771 | struct ftrace_graph_ent_entry *entry, |
769 | struct ftrace_graph_ret_entry *ret_entry, | 772 | struct ftrace_graph_ret_entry *ret_entry, |
770 | struct trace_seq *s, u32 flags) | 773 | struct trace_seq *s, u32 flags) |
771 | { | 774 | { |
772 | struct fgraph_data *data = iter->private; | 775 | struct fgraph_data *data = iter->private; |
773 | struct ftrace_graph_ret *graph_ret; | 776 | struct ftrace_graph_ret *graph_ret; |
774 | struct ftrace_graph_ent *call; | 777 | struct ftrace_graph_ent *call; |
775 | unsigned long long duration; | 778 | unsigned long long duration; |
776 | int ret; | 779 | int ret; |
777 | int i; | 780 | int i; |
778 | 781 | ||
779 | graph_ret = &ret_entry->ret; | 782 | graph_ret = &ret_entry->ret; |
780 | call = &entry->graph_ent; | 783 | call = &entry->graph_ent; |
781 | duration = graph_ret->rettime - graph_ret->calltime; | 784 | duration = graph_ret->rettime - graph_ret->calltime; |
782 | 785 | ||
783 | if (data) { | 786 | if (data) { |
784 | struct fgraph_cpu_data *cpu_data; | 787 | struct fgraph_cpu_data *cpu_data; |
785 | int cpu = iter->cpu; | 788 | int cpu = iter->cpu; |
786 | 789 | ||
787 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); | 790 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); |
788 | 791 | ||
789 | /* | 792 | /* |
790 | * Comments display at + 1 to depth. Since | 793 | * Comments display at + 1 to depth. Since |
791 | * this is a leaf function, keep the comments | 794 | * this is a leaf function, keep the comments |
792 | * equal to this depth. | 795 | * equal to this depth. |
793 | */ | 796 | */ |
794 | cpu_data->depth = call->depth - 1; | 797 | cpu_data->depth = call->depth - 1; |
795 | 798 | ||
796 | /* No need to keep this function around for this depth */ | 799 | /* No need to keep this function around for this depth */ |
797 | if (call->depth < FTRACE_RETFUNC_DEPTH) | 800 | if (call->depth < FTRACE_RETFUNC_DEPTH) |
798 | cpu_data->enter_funcs[call->depth] = 0; | 801 | cpu_data->enter_funcs[call->depth] = 0; |
799 | } | 802 | } |
800 | 803 | ||
801 | /* Overhead and duration */ | 804 | /* Overhead and duration */ |
802 | ret = print_graph_duration(duration, s, flags); | 805 | ret = print_graph_duration(duration, s, flags); |
803 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 806 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
804 | return TRACE_TYPE_PARTIAL_LINE; | 807 | return TRACE_TYPE_PARTIAL_LINE; |
805 | 808 | ||
806 | /* Function */ | 809 | /* Function */ |
807 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | 810 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { |
808 | ret = trace_seq_printf(s, " "); | 811 | ret = trace_seq_printf(s, " "); |
809 | if (!ret) | 812 | if (!ret) |
810 | return TRACE_TYPE_PARTIAL_LINE; | 813 | return TRACE_TYPE_PARTIAL_LINE; |
811 | } | 814 | } |
812 | 815 | ||
813 | ret = trace_seq_printf(s, "%ps();\n", (void *)call->func); | 816 | ret = trace_seq_printf(s, "%ps();\n", (void *)call->func); |
814 | if (!ret) | 817 | if (!ret) |
815 | return TRACE_TYPE_PARTIAL_LINE; | 818 | return TRACE_TYPE_PARTIAL_LINE; |
816 | 819 | ||
817 | return TRACE_TYPE_HANDLED; | 820 | return TRACE_TYPE_HANDLED; |
818 | } | 821 | } |
819 | 822 | ||
820 | static enum print_line_t | 823 | static enum print_line_t |
821 | print_graph_entry_nested(struct trace_iterator *iter, | 824 | print_graph_entry_nested(struct trace_iterator *iter, |
822 | struct ftrace_graph_ent_entry *entry, | 825 | struct ftrace_graph_ent_entry *entry, |
823 | struct trace_seq *s, int cpu, u32 flags) | 826 | struct trace_seq *s, int cpu, u32 flags) |
824 | { | 827 | { |
825 | struct ftrace_graph_ent *call = &entry->graph_ent; | 828 | struct ftrace_graph_ent *call = &entry->graph_ent; |
826 | struct fgraph_data *data = iter->private; | 829 | struct fgraph_data *data = iter->private; |
827 | int ret; | 830 | int ret; |
828 | int i; | 831 | int i; |
829 | 832 | ||
830 | if (data) { | 833 | if (data) { |
831 | struct fgraph_cpu_data *cpu_data; | 834 | struct fgraph_cpu_data *cpu_data; |
832 | int cpu = iter->cpu; | 835 | int cpu = iter->cpu; |
833 | 836 | ||
834 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); | 837 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); |
835 | cpu_data->depth = call->depth; | 838 | cpu_data->depth = call->depth; |
836 | 839 | ||
837 | /* Save this function pointer to see if the exit matches */ | 840 | /* Save this function pointer to see if the exit matches */ |
838 | if (call->depth < FTRACE_RETFUNC_DEPTH) | 841 | if (call->depth < FTRACE_RETFUNC_DEPTH) |
839 | cpu_data->enter_funcs[call->depth] = call->func; | 842 | cpu_data->enter_funcs[call->depth] = call->func; |
840 | } | 843 | } |
841 | 844 | ||
842 | /* No time */ | 845 | /* No time */ |
843 | ret = print_graph_duration(DURATION_FILL_FULL, s, flags); | 846 | ret = print_graph_duration(DURATION_FILL_FULL, s, flags); |
844 | if (ret != TRACE_TYPE_HANDLED) | 847 | if (ret != TRACE_TYPE_HANDLED) |
845 | return ret; | 848 | return ret; |
846 | 849 | ||
847 | /* Function */ | 850 | /* Function */ |
848 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | 851 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { |
849 | ret = trace_seq_printf(s, " "); | 852 | ret = trace_seq_printf(s, " "); |
850 | if (!ret) | 853 | if (!ret) |
851 | return TRACE_TYPE_PARTIAL_LINE; | 854 | return TRACE_TYPE_PARTIAL_LINE; |
852 | } | 855 | } |
853 | 856 | ||
854 | ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func); | 857 | ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func); |
855 | if (!ret) | 858 | if (!ret) |
856 | return TRACE_TYPE_PARTIAL_LINE; | 859 | return TRACE_TYPE_PARTIAL_LINE; |
857 | 860 | ||
858 | /* | 861 | /* |
859 | * we already consumed the current entry to check the next one | 862 | * we already consumed the current entry to check the next one |
860 | * and see if this is a leaf. | 863 | * and see if this is a leaf. |
861 | */ | 864 | */ |
862 | return TRACE_TYPE_NO_CONSUME; | 865 | return TRACE_TYPE_NO_CONSUME; |
863 | } | 866 | } |
864 | 867 | ||
865 | static enum print_line_t | 868 | static enum print_line_t |
866 | print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, | 869 | print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, |
867 | int type, unsigned long addr, u32 flags) | 870 | int type, unsigned long addr, u32 flags) |
868 | { | 871 | { |
869 | struct fgraph_data *data = iter->private; | 872 | struct fgraph_data *data = iter->private; |
870 | struct trace_entry *ent = iter->ent; | 873 | struct trace_entry *ent = iter->ent; |
871 | int cpu = iter->cpu; | 874 | int cpu = iter->cpu; |
872 | int ret; | 875 | int ret; |
873 | 876 | ||
874 | /* Pid */ | 877 | /* Pid */ |
875 | if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE) | 878 | if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE) |
876 | return TRACE_TYPE_PARTIAL_LINE; | 879 | return TRACE_TYPE_PARTIAL_LINE; |
877 | 880 | ||
878 | if (type) { | 881 | if (type) { |
879 | /* Interrupt */ | 882 | /* Interrupt */ |
880 | ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags); | 883 | ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags); |
881 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 884 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
882 | return TRACE_TYPE_PARTIAL_LINE; | 885 | return TRACE_TYPE_PARTIAL_LINE; |
883 | } | 886 | } |
884 | 887 | ||
885 | if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) | 888 | if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) |
886 | return 0; | 889 | return 0; |
887 | 890 | ||
888 | /* Absolute time */ | 891 | /* Absolute time */ |
889 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) { | 892 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) { |
890 | ret = print_graph_abs_time(iter->ts, s); | 893 | ret = print_graph_abs_time(iter->ts, s); |
891 | if (!ret) | 894 | if (!ret) |
892 | return TRACE_TYPE_PARTIAL_LINE; | 895 | return TRACE_TYPE_PARTIAL_LINE; |
893 | } | 896 | } |
894 | 897 | ||
895 | /* Cpu */ | 898 | /* Cpu */ |
896 | if (flags & TRACE_GRAPH_PRINT_CPU) { | 899 | if (flags & TRACE_GRAPH_PRINT_CPU) { |
897 | ret = print_graph_cpu(s, cpu); | 900 | ret = print_graph_cpu(s, cpu); |
898 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 901 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
899 | return TRACE_TYPE_PARTIAL_LINE; | 902 | return TRACE_TYPE_PARTIAL_LINE; |
900 | } | 903 | } |
901 | 904 | ||
902 | /* Proc */ | 905 | /* Proc */ |
903 | if (flags & TRACE_GRAPH_PRINT_PROC) { | 906 | if (flags & TRACE_GRAPH_PRINT_PROC) { |
904 | ret = print_graph_proc(s, ent->pid); | 907 | ret = print_graph_proc(s, ent->pid); |
905 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 908 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
906 | return TRACE_TYPE_PARTIAL_LINE; | 909 | return TRACE_TYPE_PARTIAL_LINE; |
907 | 910 | ||
908 | ret = trace_seq_printf(s, " | "); | 911 | ret = trace_seq_printf(s, " | "); |
909 | if (!ret) | 912 | if (!ret) |
910 | return TRACE_TYPE_PARTIAL_LINE; | 913 | return TRACE_TYPE_PARTIAL_LINE; |
911 | } | 914 | } |
912 | 915 | ||
913 | /* Latency format */ | 916 | /* Latency format */ |
914 | if (trace_flags & TRACE_ITER_LATENCY_FMT) { | 917 | if (trace_flags & TRACE_ITER_LATENCY_FMT) { |
915 | ret = print_graph_lat_fmt(s, ent); | 918 | ret = print_graph_lat_fmt(s, ent); |
916 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 919 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
917 | return TRACE_TYPE_PARTIAL_LINE; | 920 | return TRACE_TYPE_PARTIAL_LINE; |
918 | } | 921 | } |
919 | 922 | ||
920 | return 0; | 923 | return 0; |
921 | } | 924 | } |
922 | 925 | ||
923 | /* | 926 | /* |
924 | * Entry check for irq code | 927 | * Entry check for irq code |
925 | * | 928 | * |
926 | * returns 1 if | 929 | * returns 1 if |
927 | * - we are inside irq code | 930 | * - we are inside irq code |
928 | * - we just entered irq code | 931 | * - we just entered irq code |
929 | * | 932 | * |
930 | * retunns 0 if | 933 | * retunns 0 if |
931 | * - funcgraph-interrupts option is set | 934 | * - funcgraph-interrupts option is set |
932 | * - we are not inside irq code | 935 | * - we are not inside irq code |
933 | */ | 936 | */ |
934 | static int | 937 | static int |
935 | check_irq_entry(struct trace_iterator *iter, u32 flags, | 938 | check_irq_entry(struct trace_iterator *iter, u32 flags, |
936 | unsigned long addr, int depth) | 939 | unsigned long addr, int depth) |
937 | { | 940 | { |
938 | int cpu = iter->cpu; | 941 | int cpu = iter->cpu; |
939 | int *depth_irq; | 942 | int *depth_irq; |
940 | struct fgraph_data *data = iter->private; | 943 | struct fgraph_data *data = iter->private; |
941 | 944 | ||
942 | /* | 945 | /* |
943 | * If we are either displaying irqs, or we got called as | 946 | * If we are either displaying irqs, or we got called as |
944 | * a graph event and private data does not exist, | 947 | * a graph event and private data does not exist, |
945 | * then we bypass the irq check. | 948 | * then we bypass the irq check. |
946 | */ | 949 | */ |
947 | if ((flags & TRACE_GRAPH_PRINT_IRQS) || | 950 | if ((flags & TRACE_GRAPH_PRINT_IRQS) || |
948 | (!data)) | 951 | (!data)) |
949 | return 0; | 952 | return 0; |
950 | 953 | ||
951 | depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); | 954 | depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); |
952 | 955 | ||
953 | /* | 956 | /* |
954 | * We are inside the irq code | 957 | * We are inside the irq code |
955 | */ | 958 | */ |
956 | if (*depth_irq >= 0) | 959 | if (*depth_irq >= 0) |
957 | return 1; | 960 | return 1; |
958 | 961 | ||
959 | if ((addr < (unsigned long)__irqentry_text_start) || | 962 | if ((addr < (unsigned long)__irqentry_text_start) || |
960 | (addr >= (unsigned long)__irqentry_text_end)) | 963 | (addr >= (unsigned long)__irqentry_text_end)) |
961 | return 0; | 964 | return 0; |
962 | 965 | ||
963 | /* | 966 | /* |
964 | * We are entering irq code. | 967 | * We are entering irq code. |
965 | */ | 968 | */ |
966 | *depth_irq = depth; | 969 | *depth_irq = depth; |
967 | return 1; | 970 | return 1; |
968 | } | 971 | } |
969 | 972 | ||
970 | /* | 973 | /* |
971 | * Return check for irq code | 974 | * Return check for irq code |
972 | * | 975 | * |
973 | * returns 1 if | 976 | * returns 1 if |
974 | * - we are inside irq code | 977 | * - we are inside irq code |
975 | * - we just left irq code | 978 | * - we just left irq code |
976 | * | 979 | * |
977 | * returns 0 if | 980 | * returns 0 if |
978 | * - funcgraph-interrupts option is set | 981 | * - funcgraph-interrupts option is set |
979 | * - we are not inside irq code | 982 | * - we are not inside irq code |
980 | */ | 983 | */ |
981 | static int | 984 | static int |
982 | check_irq_return(struct trace_iterator *iter, u32 flags, int depth) | 985 | check_irq_return(struct trace_iterator *iter, u32 flags, int depth) |
983 | { | 986 | { |
984 | int cpu = iter->cpu; | 987 | int cpu = iter->cpu; |
985 | int *depth_irq; | 988 | int *depth_irq; |
986 | struct fgraph_data *data = iter->private; | 989 | struct fgraph_data *data = iter->private; |
987 | 990 | ||
988 | /* | 991 | /* |
989 | * If we are either displaying irqs, or we got called as | 992 | * If we are either displaying irqs, or we got called as |
990 | * a graph event and private data does not exist, | 993 | * a graph event and private data does not exist, |
991 | * then we bypass the irq check. | 994 | * then we bypass the irq check. |
992 | */ | 995 | */ |
993 | if ((flags & TRACE_GRAPH_PRINT_IRQS) || | 996 | if ((flags & TRACE_GRAPH_PRINT_IRQS) || |
994 | (!data)) | 997 | (!data)) |
995 | return 0; | 998 | return 0; |
996 | 999 | ||
997 | depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); | 1000 | depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); |
998 | 1001 | ||
999 | /* | 1002 | /* |
1000 | * We are not inside the irq code. | 1003 | * We are not inside the irq code. |
1001 | */ | 1004 | */ |
1002 | if (*depth_irq == -1) | 1005 | if (*depth_irq == -1) |
1003 | return 0; | 1006 | return 0; |
1004 | 1007 | ||
1005 | /* | 1008 | /* |
1006 | * We are inside the irq code, and this is returning entry. | 1009 | * We are inside the irq code, and this is returning entry. |
1007 | * Let's not trace it and clear the entry depth, since | 1010 | * Let's not trace it and clear the entry depth, since |
1008 | * we are out of irq code. | 1011 | * we are out of irq code. |
1009 | * | 1012 | * |
1010 | * This condition ensures that we 'leave the irq code' once | 1013 | * This condition ensures that we 'leave the irq code' once |
1011 | * we are out of the entry depth. Thus protecting us from | 1014 | * we are out of the entry depth. Thus protecting us from |
1012 | * the RETURN entry loss. | 1015 | * the RETURN entry loss. |
1013 | */ | 1016 | */ |
1014 | if (*depth_irq >= depth) { | 1017 | if (*depth_irq >= depth) { |
1015 | *depth_irq = -1; | 1018 | *depth_irq = -1; |
1016 | return 1; | 1019 | return 1; |
1017 | } | 1020 | } |
1018 | 1021 | ||
1019 | /* | 1022 | /* |
1020 | * We are inside the irq code, and this is not the entry. | 1023 | * We are inside the irq code, and this is not the entry. |
1021 | */ | 1024 | */ |
1022 | return 1; | 1025 | return 1; |
1023 | } | 1026 | } |
1024 | 1027 | ||
1025 | static enum print_line_t | 1028 | static enum print_line_t |
1026 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | 1029 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, |
1027 | struct trace_iterator *iter, u32 flags) | 1030 | struct trace_iterator *iter, u32 flags) |
1028 | { | 1031 | { |
1029 | struct fgraph_data *data = iter->private; | 1032 | struct fgraph_data *data = iter->private; |
1030 | struct ftrace_graph_ent *call = &field->graph_ent; | 1033 | struct ftrace_graph_ent *call = &field->graph_ent; |
1031 | struct ftrace_graph_ret_entry *leaf_ret; | 1034 | struct ftrace_graph_ret_entry *leaf_ret; |
1032 | static enum print_line_t ret; | 1035 | static enum print_line_t ret; |
1033 | int cpu = iter->cpu; | 1036 | int cpu = iter->cpu; |
1034 | 1037 | ||
1035 | if (check_irq_entry(iter, flags, call->func, call->depth)) | 1038 | if (check_irq_entry(iter, flags, call->func, call->depth)) |
1036 | return TRACE_TYPE_HANDLED; | 1039 | return TRACE_TYPE_HANDLED; |
1037 | 1040 | ||
1038 | if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags)) | 1041 | if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags)) |
1039 | return TRACE_TYPE_PARTIAL_LINE; | 1042 | return TRACE_TYPE_PARTIAL_LINE; |
1040 | 1043 | ||
1041 | leaf_ret = get_return_for_leaf(iter, field); | 1044 | leaf_ret = get_return_for_leaf(iter, field); |
1042 | if (leaf_ret) | 1045 | if (leaf_ret) |
1043 | ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags); | 1046 | ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags); |
1044 | else | 1047 | else |
1045 | ret = print_graph_entry_nested(iter, field, s, cpu, flags); | 1048 | ret = print_graph_entry_nested(iter, field, s, cpu, flags); |
1046 | 1049 | ||
1047 | if (data) { | 1050 | if (data) { |
1048 | /* | 1051 | /* |
1049 | * If we failed to write our output, then we need to make | 1052 | * If we failed to write our output, then we need to make |
1050 | * note of it. Because we already consumed our entry. | 1053 | * note of it. Because we already consumed our entry. |
1051 | */ | 1054 | */ |
1052 | if (s->full) { | 1055 | if (s->full) { |
1053 | data->failed = 1; | 1056 | data->failed = 1; |
1054 | data->cpu = cpu; | 1057 | data->cpu = cpu; |
1055 | } else | 1058 | } else |
1056 | data->failed = 0; | 1059 | data->failed = 0; |
1057 | } | 1060 | } |
1058 | 1061 | ||
1059 | return ret; | 1062 | return ret; |
1060 | } | 1063 | } |
1061 | 1064 | ||
1062 | static enum print_line_t | 1065 | static enum print_line_t |
1063 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | 1066 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, |
1064 | struct trace_entry *ent, struct trace_iterator *iter, | 1067 | struct trace_entry *ent, struct trace_iterator *iter, |
1065 | u32 flags) | 1068 | u32 flags) |
1066 | { | 1069 | { |
1067 | unsigned long long duration = trace->rettime - trace->calltime; | 1070 | unsigned long long duration = trace->rettime - trace->calltime; |
1068 | struct fgraph_data *data = iter->private; | 1071 | struct fgraph_data *data = iter->private; |
1069 | pid_t pid = ent->pid; | 1072 | pid_t pid = ent->pid; |
1070 | int cpu = iter->cpu; | 1073 | int cpu = iter->cpu; |
1071 | int func_match = 1; | 1074 | int func_match = 1; |
1072 | int ret; | 1075 | int ret; |
1073 | int i; | 1076 | int i; |
1074 | 1077 | ||
1075 | if (check_irq_return(iter, flags, trace->depth)) | 1078 | if (check_irq_return(iter, flags, trace->depth)) |
1076 | return TRACE_TYPE_HANDLED; | 1079 | return TRACE_TYPE_HANDLED; |
1077 | 1080 | ||
1078 | if (data) { | 1081 | if (data) { |
1079 | struct fgraph_cpu_data *cpu_data; | 1082 | struct fgraph_cpu_data *cpu_data; |
1080 | int cpu = iter->cpu; | 1083 | int cpu = iter->cpu; |
1081 | 1084 | ||
1082 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); | 1085 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); |
1083 | 1086 | ||
1084 | /* | 1087 | /* |
1085 | * Comments display at + 1 to depth. This is the | 1088 | * Comments display at + 1 to depth. This is the |
1086 | * return from a function, we now want the comments | 1089 | * return from a function, we now want the comments |
1087 | * to display at the same level of the bracket. | 1090 | * to display at the same level of the bracket. |
1088 | */ | 1091 | */ |
1089 | cpu_data->depth = trace->depth - 1; | 1092 | cpu_data->depth = trace->depth - 1; |
1090 | 1093 | ||
1091 | if (trace->depth < FTRACE_RETFUNC_DEPTH) { | 1094 | if (trace->depth < FTRACE_RETFUNC_DEPTH) { |
1092 | if (cpu_data->enter_funcs[trace->depth] != trace->func) | 1095 | if (cpu_data->enter_funcs[trace->depth] != trace->func) |
1093 | func_match = 0; | 1096 | func_match = 0; |
1094 | cpu_data->enter_funcs[trace->depth] = 0; | 1097 | cpu_data->enter_funcs[trace->depth] = 0; |
1095 | } | 1098 | } |
1096 | } | 1099 | } |
1097 | 1100 | ||
1098 | if (print_graph_prologue(iter, s, 0, 0, flags)) | 1101 | if (print_graph_prologue(iter, s, 0, 0, flags)) |
1099 | return TRACE_TYPE_PARTIAL_LINE; | 1102 | return TRACE_TYPE_PARTIAL_LINE; |
1100 | 1103 | ||
1101 | /* Overhead and duration */ | 1104 | /* Overhead and duration */ |
1102 | ret = print_graph_duration(duration, s, flags); | 1105 | ret = print_graph_duration(duration, s, flags); |
1103 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 1106 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
1104 | return TRACE_TYPE_PARTIAL_LINE; | 1107 | return TRACE_TYPE_PARTIAL_LINE; |
1105 | 1108 | ||
1106 | /* Closing brace */ | 1109 | /* Closing brace */ |
1107 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { | 1110 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { |
1108 | ret = trace_seq_printf(s, " "); | 1111 | ret = trace_seq_printf(s, " "); |
1109 | if (!ret) | 1112 | if (!ret) |
1110 | return TRACE_TYPE_PARTIAL_LINE; | 1113 | return TRACE_TYPE_PARTIAL_LINE; |
1111 | } | 1114 | } |
1112 | 1115 | ||
1113 | /* | 1116 | /* |
1114 | * If the return function does not have a matching entry, | 1117 | * If the return function does not have a matching entry, |
1115 | * then the entry was lost. Instead of just printing | 1118 | * then the entry was lost. Instead of just printing |
1116 | * the '}' and letting the user guess what function this | 1119 | * the '}' and letting the user guess what function this |
1117 | * belongs to, write out the function name. | 1120 | * belongs to, write out the function name. |
1118 | */ | 1121 | */ |
1119 | if (func_match) { | 1122 | if (func_match) { |
1120 | ret = trace_seq_printf(s, "}\n"); | 1123 | ret = trace_seq_printf(s, "}\n"); |
1121 | if (!ret) | 1124 | if (!ret) |
1122 | return TRACE_TYPE_PARTIAL_LINE; | 1125 | return TRACE_TYPE_PARTIAL_LINE; |
1123 | } else { | 1126 | } else { |
1124 | ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func); | 1127 | ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func); |
1125 | if (!ret) | 1128 | if (!ret) |
1126 | return TRACE_TYPE_PARTIAL_LINE; | 1129 | return TRACE_TYPE_PARTIAL_LINE; |
1127 | } | 1130 | } |
1128 | 1131 | ||
1129 | /* Overrun */ | 1132 | /* Overrun */ |
1130 | if (flags & TRACE_GRAPH_PRINT_OVERRUN) { | 1133 | if (flags & TRACE_GRAPH_PRINT_OVERRUN) { |
1131 | ret = trace_seq_printf(s, " (Overruns: %lu)\n", | 1134 | ret = trace_seq_printf(s, " (Overruns: %lu)\n", |
1132 | trace->overrun); | 1135 | trace->overrun); |
1133 | if (!ret) | 1136 | if (!ret) |
1134 | return TRACE_TYPE_PARTIAL_LINE; | 1137 | return TRACE_TYPE_PARTIAL_LINE; |
1135 | } | 1138 | } |
1136 | 1139 | ||
1137 | ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, | 1140 | ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, |
1138 | cpu, pid, flags); | 1141 | cpu, pid, flags); |
1139 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 1142 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
1140 | return TRACE_TYPE_PARTIAL_LINE; | 1143 | return TRACE_TYPE_PARTIAL_LINE; |
1141 | 1144 | ||
1142 | return TRACE_TYPE_HANDLED; | 1145 | return TRACE_TYPE_HANDLED; |
1143 | } | 1146 | } |
1144 | 1147 | ||
1145 | static enum print_line_t | 1148 | static enum print_line_t |
1146 | print_graph_comment(struct trace_seq *s, struct trace_entry *ent, | 1149 | print_graph_comment(struct trace_seq *s, struct trace_entry *ent, |
1147 | struct trace_iterator *iter, u32 flags) | 1150 | struct trace_iterator *iter, u32 flags) |
1148 | { | 1151 | { |
1149 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | 1152 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); |
1150 | struct fgraph_data *data = iter->private; | 1153 | struct fgraph_data *data = iter->private; |
1151 | struct trace_event *event; | 1154 | struct trace_event *event; |
1152 | int depth = 0; | 1155 | int depth = 0; |
1153 | int ret; | 1156 | int ret; |
1154 | int i; | 1157 | int i; |
1155 | 1158 | ||
1156 | if (data) | 1159 | if (data) |
1157 | depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; | 1160 | depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; |
1158 | 1161 | ||
1159 | if (print_graph_prologue(iter, s, 0, 0, flags)) | 1162 | if (print_graph_prologue(iter, s, 0, 0, flags)) |
1160 | return TRACE_TYPE_PARTIAL_LINE; | 1163 | return TRACE_TYPE_PARTIAL_LINE; |
1161 | 1164 | ||
1162 | /* No time */ | 1165 | /* No time */ |
1163 | ret = print_graph_duration(DURATION_FILL_FULL, s, flags); | 1166 | ret = print_graph_duration(DURATION_FILL_FULL, s, flags); |
1164 | if (ret != TRACE_TYPE_HANDLED) | 1167 | if (ret != TRACE_TYPE_HANDLED) |
1165 | return ret; | 1168 | return ret; |
1166 | 1169 | ||
1167 | /* Indentation */ | 1170 | /* Indentation */ |
1168 | if (depth > 0) | 1171 | if (depth > 0) |
1169 | for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) { | 1172 | for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) { |
1170 | ret = trace_seq_printf(s, " "); | 1173 | ret = trace_seq_printf(s, " "); |
1171 | if (!ret) | 1174 | if (!ret) |
1172 | return TRACE_TYPE_PARTIAL_LINE; | 1175 | return TRACE_TYPE_PARTIAL_LINE; |
1173 | } | 1176 | } |
1174 | 1177 | ||
1175 | /* The comment */ | 1178 | /* The comment */ |
1176 | ret = trace_seq_printf(s, "/* "); | 1179 | ret = trace_seq_printf(s, "/* "); |
1177 | if (!ret) | 1180 | if (!ret) |
1178 | return TRACE_TYPE_PARTIAL_LINE; | 1181 | return TRACE_TYPE_PARTIAL_LINE; |
1179 | 1182 | ||
1180 | switch (iter->ent->type) { | 1183 | switch (iter->ent->type) { |
1181 | case TRACE_BPRINT: | 1184 | case TRACE_BPRINT: |
1182 | ret = trace_print_bprintk_msg_only(iter); | 1185 | ret = trace_print_bprintk_msg_only(iter); |
1183 | if (ret != TRACE_TYPE_HANDLED) | 1186 | if (ret != TRACE_TYPE_HANDLED) |
1184 | return ret; | 1187 | return ret; |
1185 | break; | 1188 | break; |
1186 | case TRACE_PRINT: | 1189 | case TRACE_PRINT: |
1187 | ret = trace_print_printk_msg_only(iter); | 1190 | ret = trace_print_printk_msg_only(iter); |
1188 | if (ret != TRACE_TYPE_HANDLED) | 1191 | if (ret != TRACE_TYPE_HANDLED) |
1189 | return ret; | 1192 | return ret; |
1190 | break; | 1193 | break; |
1191 | default: | 1194 | default: |
1192 | event = ftrace_find_event(ent->type); | 1195 | event = ftrace_find_event(ent->type); |
1193 | if (!event) | 1196 | if (!event) |
1194 | return TRACE_TYPE_UNHANDLED; | 1197 | return TRACE_TYPE_UNHANDLED; |
1195 | 1198 | ||
1196 | ret = event->funcs->trace(iter, sym_flags, event); | 1199 | ret = event->funcs->trace(iter, sym_flags, event); |
1197 | if (ret != TRACE_TYPE_HANDLED) | 1200 | if (ret != TRACE_TYPE_HANDLED) |
1198 | return ret; | 1201 | return ret; |
1199 | } | 1202 | } |
1200 | 1203 | ||
1201 | /* Strip ending newline */ | 1204 | /* Strip ending newline */ |
1202 | if (s->buffer[s->len - 1] == '\n') { | 1205 | if (s->buffer[s->len - 1] == '\n') { |
1203 | s->buffer[s->len - 1] = '\0'; | 1206 | s->buffer[s->len - 1] = '\0'; |
1204 | s->len--; | 1207 | s->len--; |
1205 | } | 1208 | } |
1206 | 1209 | ||
1207 | ret = trace_seq_printf(s, " */\n"); | 1210 | ret = trace_seq_printf(s, " */\n"); |
1208 | if (!ret) | 1211 | if (!ret) |
1209 | return TRACE_TYPE_PARTIAL_LINE; | 1212 | return TRACE_TYPE_PARTIAL_LINE; |
1210 | 1213 | ||
1211 | return TRACE_TYPE_HANDLED; | 1214 | return TRACE_TYPE_HANDLED; |
1212 | } | 1215 | } |
1213 | 1216 | ||
1214 | 1217 | ||
1215 | enum print_line_t | 1218 | enum print_line_t |
1216 | print_graph_function_flags(struct trace_iterator *iter, u32 flags) | 1219 | print_graph_function_flags(struct trace_iterator *iter, u32 flags) |
1217 | { | 1220 | { |
1218 | struct ftrace_graph_ent_entry *field; | 1221 | struct ftrace_graph_ent_entry *field; |
1219 | struct fgraph_data *data = iter->private; | 1222 | struct fgraph_data *data = iter->private; |
1220 | struct trace_entry *entry = iter->ent; | 1223 | struct trace_entry *entry = iter->ent; |
1221 | struct trace_seq *s = &iter->seq; | 1224 | struct trace_seq *s = &iter->seq; |
1222 | int cpu = iter->cpu; | 1225 | int cpu = iter->cpu; |
1223 | int ret; | 1226 | int ret; |
1224 | 1227 | ||
1225 | if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) { | 1228 | if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) { |
1226 | per_cpu_ptr(data->cpu_data, cpu)->ignore = 0; | 1229 | per_cpu_ptr(data->cpu_data, cpu)->ignore = 0; |
1227 | return TRACE_TYPE_HANDLED; | 1230 | return TRACE_TYPE_HANDLED; |
1228 | } | 1231 | } |
1229 | 1232 | ||
1230 | /* | 1233 | /* |
1231 | * If the last output failed, there's a possibility we need | 1234 | * If the last output failed, there's a possibility we need |
1232 | * to print out the missing entry which would never go out. | 1235 | * to print out the missing entry which would never go out. |
1233 | */ | 1236 | */ |
1234 | if (data && data->failed) { | 1237 | if (data && data->failed) { |
1235 | field = &data->ent; | 1238 | field = &data->ent; |
1236 | iter->cpu = data->cpu; | 1239 | iter->cpu = data->cpu; |
1237 | ret = print_graph_entry(field, s, iter, flags); | 1240 | ret = print_graph_entry(field, s, iter, flags); |
1238 | if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) { | 1241 | if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) { |
1239 | per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1; | 1242 | per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1; |
1240 | ret = TRACE_TYPE_NO_CONSUME; | 1243 | ret = TRACE_TYPE_NO_CONSUME; |
1241 | } | 1244 | } |
1242 | iter->cpu = cpu; | 1245 | iter->cpu = cpu; |
1243 | return ret; | 1246 | return ret; |
1244 | } | 1247 | } |
1245 | 1248 | ||
1246 | switch (entry->type) { | 1249 | switch (entry->type) { |
1247 | case TRACE_GRAPH_ENT: { | 1250 | case TRACE_GRAPH_ENT: { |
1248 | /* | 1251 | /* |
1249 | * print_graph_entry() may consume the current event, | 1252 | * print_graph_entry() may consume the current event, |
1250 | * thus @field may become invalid, so we need to save it. | 1253 | * thus @field may become invalid, so we need to save it. |
1251 | * sizeof(struct ftrace_graph_ent_entry) is very small, | 1254 | * sizeof(struct ftrace_graph_ent_entry) is very small, |
1252 | * it can be safely saved at the stack. | 1255 | * it can be safely saved at the stack. |
1253 | */ | 1256 | */ |
1254 | struct ftrace_graph_ent_entry saved; | 1257 | struct ftrace_graph_ent_entry saved; |
1255 | trace_assign_type(field, entry); | 1258 | trace_assign_type(field, entry); |
1256 | saved = *field; | 1259 | saved = *field; |
1257 | return print_graph_entry(&saved, s, iter, flags); | 1260 | return print_graph_entry(&saved, s, iter, flags); |
1258 | } | 1261 | } |
1259 | case TRACE_GRAPH_RET: { | 1262 | case TRACE_GRAPH_RET: { |
1260 | struct ftrace_graph_ret_entry *field; | 1263 | struct ftrace_graph_ret_entry *field; |
1261 | trace_assign_type(field, entry); | 1264 | trace_assign_type(field, entry); |
1262 | return print_graph_return(&field->ret, s, entry, iter, flags); | 1265 | return print_graph_return(&field->ret, s, entry, iter, flags); |
1263 | } | 1266 | } |
1264 | case TRACE_STACK: | 1267 | case TRACE_STACK: |
1265 | case TRACE_FN: | 1268 | case TRACE_FN: |
1266 | /* dont trace stack and functions as comments */ | 1269 | /* dont trace stack and functions as comments */ |
1267 | return TRACE_TYPE_UNHANDLED; | 1270 | return TRACE_TYPE_UNHANDLED; |
1268 | 1271 | ||
1269 | default: | 1272 | default: |
1270 | return print_graph_comment(s, entry, iter, flags); | 1273 | return print_graph_comment(s, entry, iter, flags); |
1271 | } | 1274 | } |
1272 | 1275 | ||
1273 | return TRACE_TYPE_HANDLED; | 1276 | return TRACE_TYPE_HANDLED; |
1274 | } | 1277 | } |
1275 | 1278 | ||
1276 | static enum print_line_t | 1279 | static enum print_line_t |
1277 | print_graph_function(struct trace_iterator *iter) | 1280 | print_graph_function(struct trace_iterator *iter) |
1278 | { | 1281 | { |
1279 | return print_graph_function_flags(iter, tracer_flags.val); | 1282 | return print_graph_function_flags(iter, tracer_flags.val); |
1280 | } | 1283 | } |
1281 | 1284 | ||
1282 | static enum print_line_t | 1285 | static enum print_line_t |
1283 | print_graph_function_event(struct trace_iterator *iter, int flags, | 1286 | print_graph_function_event(struct trace_iterator *iter, int flags, |
1284 | struct trace_event *event) | 1287 | struct trace_event *event) |
1285 | { | 1288 | { |
1286 | return print_graph_function(iter); | 1289 | return print_graph_function(iter); |
1287 | } | 1290 | } |
1288 | 1291 | ||
1289 | static void print_lat_header(struct seq_file *s, u32 flags) | 1292 | static void print_lat_header(struct seq_file *s, u32 flags) |
1290 | { | 1293 | { |
1291 | static const char spaces[] = " " /* 16 spaces */ | 1294 | static const char spaces[] = " " /* 16 spaces */ |
1292 | " " /* 4 spaces */ | 1295 | " " /* 4 spaces */ |
1293 | " "; /* 17 spaces */ | 1296 | " "; /* 17 spaces */ |
1294 | int size = 0; | 1297 | int size = 0; |
1295 | 1298 | ||
1296 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) | 1299 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
1297 | size += 16; | 1300 | size += 16; |
1298 | if (flags & TRACE_GRAPH_PRINT_CPU) | 1301 | if (flags & TRACE_GRAPH_PRINT_CPU) |
1299 | size += 4; | 1302 | size += 4; |
1300 | if (flags & TRACE_GRAPH_PRINT_PROC) | 1303 | if (flags & TRACE_GRAPH_PRINT_PROC) |
1301 | size += 17; | 1304 | size += 17; |
1302 | 1305 | ||
1303 | seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces); | 1306 | seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces); |
1304 | seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces); | 1307 | seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces); |
1305 | seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces); | 1308 | seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces); |
1306 | seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces); | 1309 | seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces); |
1307 | seq_printf(s, "#%.*s||| / \n", size, spaces); | 1310 | seq_printf(s, "#%.*s||| / \n", size, spaces); |
1308 | } | 1311 | } |
1309 | 1312 | ||
1310 | static void __print_graph_headers_flags(struct seq_file *s, u32 flags) | 1313 | static void __print_graph_headers_flags(struct seq_file *s, u32 flags) |
1311 | { | 1314 | { |
1312 | int lat = trace_flags & TRACE_ITER_LATENCY_FMT; | 1315 | int lat = trace_flags & TRACE_ITER_LATENCY_FMT; |
1313 | 1316 | ||
1314 | if (lat) | 1317 | if (lat) |
1315 | print_lat_header(s, flags); | 1318 | print_lat_header(s, flags); |
1316 | 1319 | ||
1317 | /* 1st line */ | 1320 | /* 1st line */ |
1318 | seq_printf(s, "#"); | 1321 | seq_printf(s, "#"); |
1319 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) | 1322 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
1320 | seq_printf(s, " TIME "); | 1323 | seq_printf(s, " TIME "); |
1321 | if (flags & TRACE_GRAPH_PRINT_CPU) | 1324 | if (flags & TRACE_GRAPH_PRINT_CPU) |
1322 | seq_printf(s, " CPU"); | 1325 | seq_printf(s, " CPU"); |
1323 | if (flags & TRACE_GRAPH_PRINT_PROC) | 1326 | if (flags & TRACE_GRAPH_PRINT_PROC) |
1324 | seq_printf(s, " TASK/PID "); | 1327 | seq_printf(s, " TASK/PID "); |
1325 | if (lat) | 1328 | if (lat) |
1326 | seq_printf(s, "||||"); | 1329 | seq_printf(s, "||||"); |
1327 | if (flags & TRACE_GRAPH_PRINT_DURATION) | 1330 | if (flags & TRACE_GRAPH_PRINT_DURATION) |
1328 | seq_printf(s, " DURATION "); | 1331 | seq_printf(s, " DURATION "); |
1329 | seq_printf(s, " FUNCTION CALLS\n"); | 1332 | seq_printf(s, " FUNCTION CALLS\n"); |
1330 | 1333 | ||
1331 | /* 2nd line */ | 1334 | /* 2nd line */ |
1332 | seq_printf(s, "#"); | 1335 | seq_printf(s, "#"); |
1333 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) | 1336 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
1334 | seq_printf(s, " | "); | 1337 | seq_printf(s, " | "); |
1335 | if (flags & TRACE_GRAPH_PRINT_CPU) | 1338 | if (flags & TRACE_GRAPH_PRINT_CPU) |
1336 | seq_printf(s, " | "); | 1339 | seq_printf(s, " | "); |
1337 | if (flags & TRACE_GRAPH_PRINT_PROC) | 1340 | if (flags & TRACE_GRAPH_PRINT_PROC) |
1338 | seq_printf(s, " | | "); | 1341 | seq_printf(s, " | | "); |
1339 | if (lat) | 1342 | if (lat) |
1340 | seq_printf(s, "||||"); | 1343 | seq_printf(s, "||||"); |
1341 | if (flags & TRACE_GRAPH_PRINT_DURATION) | 1344 | if (flags & TRACE_GRAPH_PRINT_DURATION) |
1342 | seq_printf(s, " | | "); | 1345 | seq_printf(s, " | | "); |
1343 | seq_printf(s, " | | | |\n"); | 1346 | seq_printf(s, " | | | |\n"); |
1344 | } | 1347 | } |
1345 | 1348 | ||
1346 | void print_graph_headers(struct seq_file *s) | 1349 | void print_graph_headers(struct seq_file *s) |
1347 | { | 1350 | { |
1348 | print_graph_headers_flags(s, tracer_flags.val); | 1351 | print_graph_headers_flags(s, tracer_flags.val); |
1349 | } | 1352 | } |
1350 | 1353 | ||
1351 | void print_graph_headers_flags(struct seq_file *s, u32 flags) | 1354 | void print_graph_headers_flags(struct seq_file *s, u32 flags) |
1352 | { | 1355 | { |
1353 | struct trace_iterator *iter = s->private; | 1356 | struct trace_iterator *iter = s->private; |
1354 | 1357 | ||
1355 | if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) | 1358 | if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) |
1356 | return; | 1359 | return; |
1357 | 1360 | ||
1358 | if (trace_flags & TRACE_ITER_LATENCY_FMT) { | 1361 | if (trace_flags & TRACE_ITER_LATENCY_FMT) { |
1359 | /* print nothing if the buffers are empty */ | 1362 | /* print nothing if the buffers are empty */ |
1360 | if (trace_empty(iter)) | 1363 | if (trace_empty(iter)) |
1361 | return; | 1364 | return; |
1362 | 1365 | ||
1363 | print_trace_header(s, iter); | 1366 | print_trace_header(s, iter); |
1364 | } | 1367 | } |
1365 | 1368 | ||
1366 | __print_graph_headers_flags(s, flags); | 1369 | __print_graph_headers_flags(s, flags); |
1367 | } | 1370 | } |
1368 | 1371 | ||
1369 | void graph_trace_open(struct trace_iterator *iter) | 1372 | void graph_trace_open(struct trace_iterator *iter) |
1370 | { | 1373 | { |
1371 | /* pid and depth on the last trace processed */ | 1374 | /* pid and depth on the last trace processed */ |
1372 | struct fgraph_data *data; | 1375 | struct fgraph_data *data; |
1373 | int cpu; | 1376 | int cpu; |
1374 | 1377 | ||
1375 | iter->private = NULL; | 1378 | iter->private = NULL; |
1376 | 1379 | ||
1377 | data = kzalloc(sizeof(*data), GFP_KERNEL); | 1380 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
1378 | if (!data) | 1381 | if (!data) |
1379 | goto out_err; | 1382 | goto out_err; |
1380 | 1383 | ||
1381 | data->cpu_data = alloc_percpu(struct fgraph_cpu_data); | 1384 | data->cpu_data = alloc_percpu(struct fgraph_cpu_data); |
1382 | if (!data->cpu_data) | 1385 | if (!data->cpu_data) |
1383 | goto out_err_free; | 1386 | goto out_err_free; |
1384 | 1387 | ||
1385 | for_each_possible_cpu(cpu) { | 1388 | for_each_possible_cpu(cpu) { |
1386 | pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); | 1389 | pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); |
1387 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); | 1390 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); |
1388 | int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore); | 1391 | int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore); |
1389 | int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); | 1392 | int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); |
1390 | 1393 | ||
1391 | *pid = -1; | 1394 | *pid = -1; |
1392 | *depth = 0; | 1395 | *depth = 0; |
1393 | *ignore = 0; | 1396 | *ignore = 0; |
1394 | *depth_irq = -1; | 1397 | *depth_irq = -1; |
1395 | } | 1398 | } |
1396 | 1399 | ||
1397 | iter->private = data; | 1400 | iter->private = data; |
1398 | 1401 | ||
1399 | return; | 1402 | return; |
1400 | 1403 | ||
1401 | out_err_free: | 1404 | out_err_free: |
1402 | kfree(data); | 1405 | kfree(data); |
1403 | out_err: | 1406 | out_err: |
1404 | pr_warning("function graph tracer: not enough memory\n"); | 1407 | pr_warning("function graph tracer: not enough memory\n"); |
1405 | } | 1408 | } |
1406 | 1409 | ||
1407 | void graph_trace_close(struct trace_iterator *iter) | 1410 | void graph_trace_close(struct trace_iterator *iter) |
1408 | { | 1411 | { |
1409 | struct fgraph_data *data = iter->private; | 1412 | struct fgraph_data *data = iter->private; |
1410 | 1413 | ||
1411 | if (data) { | 1414 | if (data) { |
1412 | free_percpu(data->cpu_data); | 1415 | free_percpu(data->cpu_data); |
1413 | kfree(data); | 1416 | kfree(data); |
1414 | } | 1417 | } |
1415 | } | 1418 | } |
1416 | 1419 | ||
1417 | static int func_graph_set_flag(u32 old_flags, u32 bit, int set) | 1420 | static int func_graph_set_flag(u32 old_flags, u32 bit, int set) |
1418 | { | 1421 | { |
1419 | if (bit == TRACE_GRAPH_PRINT_IRQS) | 1422 | if (bit == TRACE_GRAPH_PRINT_IRQS) |
1420 | ftrace_graph_skip_irqs = !set; | 1423 | ftrace_graph_skip_irqs = !set; |
1421 | 1424 | ||
1422 | return 0; | 1425 | return 0; |
1423 | } | 1426 | } |
1424 | 1427 | ||
1425 | static struct trace_event_functions graph_functions = { | 1428 | static struct trace_event_functions graph_functions = { |
1426 | .trace = print_graph_function_event, | 1429 | .trace = print_graph_function_event, |
1427 | }; | 1430 | }; |
1428 | 1431 | ||
1429 | static struct trace_event graph_trace_entry_event = { | 1432 | static struct trace_event graph_trace_entry_event = { |
1430 | .type = TRACE_GRAPH_ENT, | 1433 | .type = TRACE_GRAPH_ENT, |
1431 | .funcs = &graph_functions, | 1434 | .funcs = &graph_functions, |
1432 | }; | 1435 | }; |
1433 | 1436 | ||
1434 | static struct trace_event graph_trace_ret_event = { | 1437 | static struct trace_event graph_trace_ret_event = { |
1435 | .type = TRACE_GRAPH_RET, | 1438 | .type = TRACE_GRAPH_RET, |
1436 | .funcs = &graph_functions | 1439 | .funcs = &graph_functions |
1437 | }; | 1440 | }; |
1438 | 1441 | ||
1439 | static struct tracer graph_trace __read_mostly = { | 1442 | static struct tracer graph_trace __read_mostly = { |
1440 | .name = "function_graph", | 1443 | .name = "function_graph", |
1441 | .open = graph_trace_open, | 1444 | .open = graph_trace_open, |
1442 | .pipe_open = graph_trace_open, | 1445 | .pipe_open = graph_trace_open, |
1443 | .close = graph_trace_close, | 1446 | .close = graph_trace_close, |
1444 | .pipe_close = graph_trace_close, | 1447 | .pipe_close = graph_trace_close, |
1445 | .wait_pipe = poll_wait_pipe, | 1448 | .wait_pipe = poll_wait_pipe, |
1446 | .init = graph_trace_init, | 1449 | .init = graph_trace_init, |
1447 | .reset = graph_trace_reset, | 1450 | .reset = graph_trace_reset, |
1448 | .print_line = print_graph_function, | 1451 | .print_line = print_graph_function, |
1449 | .print_header = print_graph_headers, | 1452 | .print_header = print_graph_headers, |
1450 | .flags = &tracer_flags, | 1453 | .flags = &tracer_flags, |
1451 | .set_flag = func_graph_set_flag, | 1454 | .set_flag = func_graph_set_flag, |
1452 | #ifdef CONFIG_FTRACE_SELFTEST | 1455 | #ifdef CONFIG_FTRACE_SELFTEST |
1453 | .selftest = trace_selftest_startup_function_graph, | 1456 | .selftest = trace_selftest_startup_function_graph, |
1454 | #endif | 1457 | #endif |
1455 | }; | 1458 | }; |
1456 | 1459 | ||
1457 | static __init int init_graph_trace(void) | 1460 | static __init int init_graph_trace(void) |
1458 | { | 1461 | { |
1459 | max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1); | 1462 | max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1); |
1460 | 1463 | ||
1461 | if (!register_ftrace_event(&graph_trace_entry_event)) { | 1464 | if (!register_ftrace_event(&graph_trace_entry_event)) { |
1462 | pr_warning("Warning: could not register graph trace events\n"); | 1465 | pr_warning("Warning: could not register graph trace events\n"); |
1463 | return 1; | 1466 | return 1; |
1464 | } | 1467 | } |
1465 | 1468 | ||
1466 | if (!register_ftrace_event(&graph_trace_ret_event)) { | 1469 | if (!register_ftrace_event(&graph_trace_ret_event)) { |
1467 | pr_warning("Warning: could not register graph trace events\n"); | 1470 | pr_warning("Warning: could not register graph trace events\n"); |
1468 | return 1; | 1471 | return 1; |
1469 | } | 1472 | } |
1470 | 1473 | ||
1471 | return register_tracer(&graph_trace); | 1474 | return register_tracer(&graph_trace); |
1472 | } | 1475 | } |
1473 | 1476 | ||
1474 | device_initcall(init_graph_trace); | 1477 | device_initcall(init_graph_trace); |
1475 | 1478 |