Commit 749230b06a753a22f6ed96e5dd60815d6ab12865
Committed by
Steven Rostedt
1 parent
199abfab40
Exists in
master
and in
6 other branches
tracing, function_graph: Add context-info support for function_graph tracer
The function_graph tracer does not follow global context-info option. Adding TRACE_ITER_CONTEXT_INFO trace_flags check to enable it. With following commands: # echo function_graph > ./current_tracer # echo 0 > options/context-info # cat trace This is what it looked like before: # tracer: function_graph # # TIME CPU DURATION FUNCTION CALLS # | | | | | | | | 1) 0.079 us | } /* __vma_link_rb */ 1) 0.056 us | copy_page_range(); 1) | security_vm_enough_memory() { ... This is what it looks like now: # tracer: function_graph # } /* update_ts_time_stats */ timekeeping_max_deferment(); ... Signed-off-by: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1307113131-10045-6-git-send-email-jolsa@redhat.com Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Showing 1 changed file with 31 additions and 22 deletions Inline Diff
kernel/trace/trace_functions_graph.c
1 | /* | 1 | /* |
2 | * | 2 | * |
3 | * Function graph tracer. | 3 | * Function graph tracer. |
4 | * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com> | 4 | * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com> |
5 | * Mostly borrowed from function tracer which | 5 | * Mostly borrowed from function tracer which |
6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> | 6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> |
7 | * | 7 | * |
8 | */ | 8 | */ |
9 | #include <linux/debugfs.h> | 9 | #include <linux/debugfs.h> |
10 | #include <linux/uaccess.h> | 10 | #include <linux/uaccess.h> |
11 | #include <linux/ftrace.h> | 11 | #include <linux/ftrace.h> |
12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
13 | #include <linux/fs.h> | 13 | #include <linux/fs.h> |
14 | 14 | ||
15 | #include "trace.h" | 15 | #include "trace.h" |
16 | #include "trace_output.h" | 16 | #include "trace_output.h" |
17 | 17 | ||
18 | /* When set, irq functions will be ignored */ | 18 | /* When set, irq functions will be ignored */ |
19 | static int ftrace_graph_skip_irqs; | 19 | static int ftrace_graph_skip_irqs; |
20 | 20 | ||
21 | struct fgraph_cpu_data { | 21 | struct fgraph_cpu_data { |
22 | pid_t last_pid; | 22 | pid_t last_pid; |
23 | int depth; | 23 | int depth; |
24 | int depth_irq; | 24 | int depth_irq; |
25 | int ignore; | 25 | int ignore; |
26 | unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH]; | 26 | unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH]; |
27 | }; | 27 | }; |
28 | 28 | ||
29 | struct fgraph_data { | 29 | struct fgraph_data { |
30 | struct fgraph_cpu_data __percpu *cpu_data; | 30 | struct fgraph_cpu_data __percpu *cpu_data; |
31 | 31 | ||
32 | /* Place to preserve last processed entry. */ | 32 | /* Place to preserve last processed entry. */ |
33 | struct ftrace_graph_ent_entry ent; | 33 | struct ftrace_graph_ent_entry ent; |
34 | struct ftrace_graph_ret_entry ret; | 34 | struct ftrace_graph_ret_entry ret; |
35 | int failed; | 35 | int failed; |
36 | int cpu; | 36 | int cpu; |
37 | }; | 37 | }; |
38 | 38 | ||
39 | #define TRACE_GRAPH_INDENT 2 | 39 | #define TRACE_GRAPH_INDENT 2 |
40 | 40 | ||
41 | /* Flag options */ | 41 | /* Flag options */ |
42 | #define TRACE_GRAPH_PRINT_OVERRUN 0x1 | 42 | #define TRACE_GRAPH_PRINT_OVERRUN 0x1 |
43 | #define TRACE_GRAPH_PRINT_CPU 0x2 | 43 | #define TRACE_GRAPH_PRINT_CPU 0x2 |
44 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 | 44 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 |
45 | #define TRACE_GRAPH_PRINT_PROC 0x8 | 45 | #define TRACE_GRAPH_PRINT_PROC 0x8 |
46 | #define TRACE_GRAPH_PRINT_DURATION 0x10 | 46 | #define TRACE_GRAPH_PRINT_DURATION 0x10 |
47 | #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 | 47 | #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 |
48 | #define TRACE_GRAPH_PRINT_IRQS 0x40 | 48 | #define TRACE_GRAPH_PRINT_IRQS 0x40 |
49 | 49 | ||
50 | static struct tracer_opt trace_opts[] = { | 50 | static struct tracer_opt trace_opts[] = { |
51 | /* Display overruns? (for self-debug purpose) */ | 51 | /* Display overruns? (for self-debug purpose) */ |
52 | { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, | 52 | { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, |
53 | /* Display CPU ? */ | 53 | /* Display CPU ? */ |
54 | { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, | 54 | { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, |
55 | /* Display Overhead ? */ | 55 | /* Display Overhead ? */ |
56 | { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, | 56 | { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, |
57 | /* Display proc name/pid */ | 57 | /* Display proc name/pid */ |
58 | { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, | 58 | { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, |
59 | /* Display duration of execution */ | 59 | /* Display duration of execution */ |
60 | { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, | 60 | { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, |
61 | /* Display absolute time of an entry */ | 61 | /* Display absolute time of an entry */ |
62 | { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, | 62 | { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, |
63 | /* Display interrupts */ | 63 | /* Display interrupts */ |
64 | { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) }, | 64 | { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) }, |
65 | { } /* Empty entry */ | 65 | { } /* Empty entry */ |
66 | }; | 66 | }; |
67 | 67 | ||
68 | static struct tracer_flags tracer_flags = { | 68 | static struct tracer_flags tracer_flags = { |
69 | /* Don't display overruns and proc by default */ | 69 | /* Don't display overruns and proc by default */ |
70 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | | 70 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | |
71 | TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS, | 71 | TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS, |
72 | .opts = trace_opts | 72 | .opts = trace_opts |
73 | }; | 73 | }; |
74 | 74 | ||
75 | static struct trace_array *graph_array; | 75 | static struct trace_array *graph_array; |
76 | 76 | ||
77 | /* | 77 | /* |
78 | * DURATION column is being also used to display IRQ signs, | 78 | * DURATION column is being also used to display IRQ signs, |
79 | * following values are used by print_graph_irq and others | 79 | * following values are used by print_graph_irq and others |
80 | * to fill in space into DURATION column. | 80 | * to fill in space into DURATION column. |
81 | */ | 81 | */ |
82 | enum { | 82 | enum { |
83 | DURATION_FILL_FULL = -1, | 83 | DURATION_FILL_FULL = -1, |
84 | DURATION_FILL_START = -2, | 84 | DURATION_FILL_START = -2, |
85 | DURATION_FILL_END = -3, | 85 | DURATION_FILL_END = -3, |
86 | }; | 86 | }; |
87 | 87 | ||
88 | static enum print_line_t | 88 | static enum print_line_t |
89 | print_graph_duration(unsigned long long duration, struct trace_seq *s, | 89 | print_graph_duration(unsigned long long duration, struct trace_seq *s, |
90 | u32 flags); | 90 | u32 flags); |
91 | 91 | ||
92 | /* Add a function return address to the trace stack on thread info.*/ | 92 | /* Add a function return address to the trace stack on thread info.*/ |
93 | int | 93 | int |
94 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, | 94 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, |
95 | unsigned long frame_pointer) | 95 | unsigned long frame_pointer) |
96 | { | 96 | { |
97 | unsigned long long calltime; | 97 | unsigned long long calltime; |
98 | int index; | 98 | int index; |
99 | 99 | ||
100 | if (!current->ret_stack) | 100 | if (!current->ret_stack) |
101 | return -EBUSY; | 101 | return -EBUSY; |
102 | 102 | ||
103 | /* | 103 | /* |
104 | * We must make sure the ret_stack is tested before we read | 104 | * We must make sure the ret_stack is tested before we read |
105 | * anything else. | 105 | * anything else. |
106 | */ | 106 | */ |
107 | smp_rmb(); | 107 | smp_rmb(); |
108 | 108 | ||
109 | /* The return trace stack is full */ | 109 | /* The return trace stack is full */ |
110 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { | 110 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { |
111 | atomic_inc(¤t->trace_overrun); | 111 | atomic_inc(¤t->trace_overrun); |
112 | return -EBUSY; | 112 | return -EBUSY; |
113 | } | 113 | } |
114 | 114 | ||
115 | calltime = trace_clock_local(); | 115 | calltime = trace_clock_local(); |
116 | 116 | ||
117 | index = ++current->curr_ret_stack; | 117 | index = ++current->curr_ret_stack; |
118 | barrier(); | 118 | barrier(); |
119 | current->ret_stack[index].ret = ret; | 119 | current->ret_stack[index].ret = ret; |
120 | current->ret_stack[index].func = func; | 120 | current->ret_stack[index].func = func; |
121 | current->ret_stack[index].calltime = calltime; | 121 | current->ret_stack[index].calltime = calltime; |
122 | current->ret_stack[index].subtime = 0; | 122 | current->ret_stack[index].subtime = 0; |
123 | current->ret_stack[index].fp = frame_pointer; | 123 | current->ret_stack[index].fp = frame_pointer; |
124 | *depth = index; | 124 | *depth = index; |
125 | 125 | ||
126 | return 0; | 126 | return 0; |
127 | } | 127 | } |
128 | 128 | ||
129 | /* Retrieve a function return address to the trace stack on thread info.*/ | 129 | /* Retrieve a function return address to the trace stack on thread info.*/ |
130 | static void | 130 | static void |
131 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, | 131 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, |
132 | unsigned long frame_pointer) | 132 | unsigned long frame_pointer) |
133 | { | 133 | { |
134 | int index; | 134 | int index; |
135 | 135 | ||
136 | index = current->curr_ret_stack; | 136 | index = current->curr_ret_stack; |
137 | 137 | ||
138 | if (unlikely(index < 0)) { | 138 | if (unlikely(index < 0)) { |
139 | ftrace_graph_stop(); | 139 | ftrace_graph_stop(); |
140 | WARN_ON(1); | 140 | WARN_ON(1); |
141 | /* Might as well panic, otherwise we have no where to go */ | 141 | /* Might as well panic, otherwise we have no where to go */ |
142 | *ret = (unsigned long)panic; | 142 | *ret = (unsigned long)panic; |
143 | return; | 143 | return; |
144 | } | 144 | } |
145 | 145 | ||
146 | #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST | 146 | #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST |
147 | /* | 147 | /* |
148 | * The arch may choose to record the frame pointer used | 148 | * The arch may choose to record the frame pointer used |
149 | * and check it here to make sure that it is what we expect it | 149 | * and check it here to make sure that it is what we expect it |
150 | * to be. If gcc does not set the place holder of the return | 150 | * to be. If gcc does not set the place holder of the return |
151 | * address in the frame pointer, and does a copy instead, then | 151 | * address in the frame pointer, and does a copy instead, then |
152 | * the function graph trace will fail. This test detects this | 152 | * the function graph trace will fail. This test detects this |
153 | * case. | 153 | * case. |
154 | * | 154 | * |
155 | * Currently, x86_32 with optimize for size (-Os) makes the latest | 155 | * Currently, x86_32 with optimize for size (-Os) makes the latest |
156 | * gcc do the above. | 156 | * gcc do the above. |
157 | */ | 157 | */ |
158 | if (unlikely(current->ret_stack[index].fp != frame_pointer)) { | 158 | if (unlikely(current->ret_stack[index].fp != frame_pointer)) { |
159 | ftrace_graph_stop(); | 159 | ftrace_graph_stop(); |
160 | WARN(1, "Bad frame pointer: expected %lx, received %lx\n" | 160 | WARN(1, "Bad frame pointer: expected %lx, received %lx\n" |
161 | " from func %ps return to %lx\n", | 161 | " from func %ps return to %lx\n", |
162 | current->ret_stack[index].fp, | 162 | current->ret_stack[index].fp, |
163 | frame_pointer, | 163 | frame_pointer, |
164 | (void *)current->ret_stack[index].func, | 164 | (void *)current->ret_stack[index].func, |
165 | current->ret_stack[index].ret); | 165 | current->ret_stack[index].ret); |
166 | *ret = (unsigned long)panic; | 166 | *ret = (unsigned long)panic; |
167 | return; | 167 | return; |
168 | } | 168 | } |
169 | #endif | 169 | #endif |
170 | 170 | ||
171 | *ret = current->ret_stack[index].ret; | 171 | *ret = current->ret_stack[index].ret; |
172 | trace->func = current->ret_stack[index].func; | 172 | trace->func = current->ret_stack[index].func; |
173 | trace->calltime = current->ret_stack[index].calltime; | 173 | trace->calltime = current->ret_stack[index].calltime; |
174 | trace->overrun = atomic_read(¤t->trace_overrun); | 174 | trace->overrun = atomic_read(¤t->trace_overrun); |
175 | trace->depth = index; | 175 | trace->depth = index; |
176 | } | 176 | } |
177 | 177 | ||
178 | /* | 178 | /* |
179 | * Send the trace to the ring-buffer. | 179 | * Send the trace to the ring-buffer. |
180 | * @return the original return address. | 180 | * @return the original return address. |
181 | */ | 181 | */ |
182 | unsigned long ftrace_return_to_handler(unsigned long frame_pointer) | 182 | unsigned long ftrace_return_to_handler(unsigned long frame_pointer) |
183 | { | 183 | { |
184 | struct ftrace_graph_ret trace; | 184 | struct ftrace_graph_ret trace; |
185 | unsigned long ret; | 185 | unsigned long ret; |
186 | 186 | ||
187 | ftrace_pop_return_trace(&trace, &ret, frame_pointer); | 187 | ftrace_pop_return_trace(&trace, &ret, frame_pointer); |
188 | trace.rettime = trace_clock_local(); | 188 | trace.rettime = trace_clock_local(); |
189 | ftrace_graph_return(&trace); | 189 | ftrace_graph_return(&trace); |
190 | barrier(); | 190 | barrier(); |
191 | current->curr_ret_stack--; | 191 | current->curr_ret_stack--; |
192 | 192 | ||
193 | if (unlikely(!ret)) { | 193 | if (unlikely(!ret)) { |
194 | ftrace_graph_stop(); | 194 | ftrace_graph_stop(); |
195 | WARN_ON(1); | 195 | WARN_ON(1); |
196 | /* Might as well panic. What else to do? */ | 196 | /* Might as well panic. What else to do? */ |
197 | ret = (unsigned long)panic; | 197 | ret = (unsigned long)panic; |
198 | } | 198 | } |
199 | 199 | ||
200 | return ret; | 200 | return ret; |
201 | } | 201 | } |
202 | 202 | ||
203 | int __trace_graph_entry(struct trace_array *tr, | 203 | int __trace_graph_entry(struct trace_array *tr, |
204 | struct ftrace_graph_ent *trace, | 204 | struct ftrace_graph_ent *trace, |
205 | unsigned long flags, | 205 | unsigned long flags, |
206 | int pc) | 206 | int pc) |
207 | { | 207 | { |
208 | struct ftrace_event_call *call = &event_funcgraph_entry; | 208 | struct ftrace_event_call *call = &event_funcgraph_entry; |
209 | struct ring_buffer_event *event; | 209 | struct ring_buffer_event *event; |
210 | struct ring_buffer *buffer = tr->buffer; | 210 | struct ring_buffer *buffer = tr->buffer; |
211 | struct ftrace_graph_ent_entry *entry; | 211 | struct ftrace_graph_ent_entry *entry; |
212 | 212 | ||
213 | if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) | 213 | if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) |
214 | return 0; | 214 | return 0; |
215 | 215 | ||
216 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, | 216 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, |
217 | sizeof(*entry), flags, pc); | 217 | sizeof(*entry), flags, pc); |
218 | if (!event) | 218 | if (!event) |
219 | return 0; | 219 | return 0; |
220 | entry = ring_buffer_event_data(event); | 220 | entry = ring_buffer_event_data(event); |
221 | entry->graph_ent = *trace; | 221 | entry->graph_ent = *trace; |
222 | if (!filter_current_check_discard(buffer, call, entry, event)) | 222 | if (!filter_current_check_discard(buffer, call, entry, event)) |
223 | ring_buffer_unlock_commit(buffer, event); | 223 | ring_buffer_unlock_commit(buffer, event); |
224 | 224 | ||
225 | return 1; | 225 | return 1; |
226 | } | 226 | } |
227 | 227 | ||
228 | static inline int ftrace_graph_ignore_irqs(void) | 228 | static inline int ftrace_graph_ignore_irqs(void) |
229 | { | 229 | { |
230 | if (!ftrace_graph_skip_irqs) | 230 | if (!ftrace_graph_skip_irqs) |
231 | return 0; | 231 | return 0; |
232 | 232 | ||
233 | return in_irq(); | 233 | return in_irq(); |
234 | } | 234 | } |
235 | 235 | ||
236 | int trace_graph_entry(struct ftrace_graph_ent *trace) | 236 | int trace_graph_entry(struct ftrace_graph_ent *trace) |
237 | { | 237 | { |
238 | struct trace_array *tr = graph_array; | 238 | struct trace_array *tr = graph_array; |
239 | struct trace_array_cpu *data; | 239 | struct trace_array_cpu *data; |
240 | unsigned long flags; | 240 | unsigned long flags; |
241 | long disabled; | 241 | long disabled; |
242 | int ret; | 242 | int ret; |
243 | int cpu; | 243 | int cpu; |
244 | int pc; | 244 | int pc; |
245 | 245 | ||
246 | if (!ftrace_trace_task(current)) | 246 | if (!ftrace_trace_task(current)) |
247 | return 0; | 247 | return 0; |
248 | 248 | ||
249 | /* trace it when it is-nested-in or is a function enabled. */ | 249 | /* trace it when it is-nested-in or is a function enabled. */ |
250 | if (!(trace->depth || ftrace_graph_addr(trace->func)) || | 250 | if (!(trace->depth || ftrace_graph_addr(trace->func)) || |
251 | ftrace_graph_ignore_irqs()) | 251 | ftrace_graph_ignore_irqs()) |
252 | return 0; | 252 | return 0; |
253 | 253 | ||
254 | local_irq_save(flags); | 254 | local_irq_save(flags); |
255 | cpu = raw_smp_processor_id(); | 255 | cpu = raw_smp_processor_id(); |
256 | data = tr->data[cpu]; | 256 | data = tr->data[cpu]; |
257 | disabled = atomic_inc_return(&data->disabled); | 257 | disabled = atomic_inc_return(&data->disabled); |
258 | if (likely(disabled == 1)) { | 258 | if (likely(disabled == 1)) { |
259 | pc = preempt_count(); | 259 | pc = preempt_count(); |
260 | ret = __trace_graph_entry(tr, trace, flags, pc); | 260 | ret = __trace_graph_entry(tr, trace, flags, pc); |
261 | } else { | 261 | } else { |
262 | ret = 0; | 262 | ret = 0; |
263 | } | 263 | } |
264 | 264 | ||
265 | atomic_dec(&data->disabled); | 265 | atomic_dec(&data->disabled); |
266 | local_irq_restore(flags); | 266 | local_irq_restore(flags); |
267 | 267 | ||
268 | return ret; | 268 | return ret; |
269 | } | 269 | } |
270 | 270 | ||
271 | int trace_graph_thresh_entry(struct ftrace_graph_ent *trace) | 271 | int trace_graph_thresh_entry(struct ftrace_graph_ent *trace) |
272 | { | 272 | { |
273 | if (tracing_thresh) | 273 | if (tracing_thresh) |
274 | return 1; | 274 | return 1; |
275 | else | 275 | else |
276 | return trace_graph_entry(trace); | 276 | return trace_graph_entry(trace); |
277 | } | 277 | } |
278 | 278 | ||
279 | static void | 279 | static void |
280 | __trace_graph_function(struct trace_array *tr, | 280 | __trace_graph_function(struct trace_array *tr, |
281 | unsigned long ip, unsigned long flags, int pc) | 281 | unsigned long ip, unsigned long flags, int pc) |
282 | { | 282 | { |
283 | u64 time = trace_clock_local(); | 283 | u64 time = trace_clock_local(); |
284 | struct ftrace_graph_ent ent = { | 284 | struct ftrace_graph_ent ent = { |
285 | .func = ip, | 285 | .func = ip, |
286 | .depth = 0, | 286 | .depth = 0, |
287 | }; | 287 | }; |
288 | struct ftrace_graph_ret ret = { | 288 | struct ftrace_graph_ret ret = { |
289 | .func = ip, | 289 | .func = ip, |
290 | .depth = 0, | 290 | .depth = 0, |
291 | .calltime = time, | 291 | .calltime = time, |
292 | .rettime = time, | 292 | .rettime = time, |
293 | }; | 293 | }; |
294 | 294 | ||
295 | __trace_graph_entry(tr, &ent, flags, pc); | 295 | __trace_graph_entry(tr, &ent, flags, pc); |
296 | __trace_graph_return(tr, &ret, flags, pc); | 296 | __trace_graph_return(tr, &ret, flags, pc); |
297 | } | 297 | } |
298 | 298 | ||
299 | void | 299 | void |
300 | trace_graph_function(struct trace_array *tr, | 300 | trace_graph_function(struct trace_array *tr, |
301 | unsigned long ip, unsigned long parent_ip, | 301 | unsigned long ip, unsigned long parent_ip, |
302 | unsigned long flags, int pc) | 302 | unsigned long flags, int pc) |
303 | { | 303 | { |
304 | __trace_graph_function(tr, ip, flags, pc); | 304 | __trace_graph_function(tr, ip, flags, pc); |
305 | } | 305 | } |
306 | 306 | ||
307 | void __trace_graph_return(struct trace_array *tr, | 307 | void __trace_graph_return(struct trace_array *tr, |
308 | struct ftrace_graph_ret *trace, | 308 | struct ftrace_graph_ret *trace, |
309 | unsigned long flags, | 309 | unsigned long flags, |
310 | int pc) | 310 | int pc) |
311 | { | 311 | { |
312 | struct ftrace_event_call *call = &event_funcgraph_exit; | 312 | struct ftrace_event_call *call = &event_funcgraph_exit; |
313 | struct ring_buffer_event *event; | 313 | struct ring_buffer_event *event; |
314 | struct ring_buffer *buffer = tr->buffer; | 314 | struct ring_buffer *buffer = tr->buffer; |
315 | struct ftrace_graph_ret_entry *entry; | 315 | struct ftrace_graph_ret_entry *entry; |
316 | 316 | ||
317 | if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) | 317 | if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) |
318 | return; | 318 | return; |
319 | 319 | ||
320 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, | 320 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, |
321 | sizeof(*entry), flags, pc); | 321 | sizeof(*entry), flags, pc); |
322 | if (!event) | 322 | if (!event) |
323 | return; | 323 | return; |
324 | entry = ring_buffer_event_data(event); | 324 | entry = ring_buffer_event_data(event); |
325 | entry->ret = *trace; | 325 | entry->ret = *trace; |
326 | if (!filter_current_check_discard(buffer, call, entry, event)) | 326 | if (!filter_current_check_discard(buffer, call, entry, event)) |
327 | ring_buffer_unlock_commit(buffer, event); | 327 | ring_buffer_unlock_commit(buffer, event); |
328 | } | 328 | } |
329 | 329 | ||
330 | void trace_graph_return(struct ftrace_graph_ret *trace) | 330 | void trace_graph_return(struct ftrace_graph_ret *trace) |
331 | { | 331 | { |
332 | struct trace_array *tr = graph_array; | 332 | struct trace_array *tr = graph_array; |
333 | struct trace_array_cpu *data; | 333 | struct trace_array_cpu *data; |
334 | unsigned long flags; | 334 | unsigned long flags; |
335 | long disabled; | 335 | long disabled; |
336 | int cpu; | 336 | int cpu; |
337 | int pc; | 337 | int pc; |
338 | 338 | ||
339 | local_irq_save(flags); | 339 | local_irq_save(flags); |
340 | cpu = raw_smp_processor_id(); | 340 | cpu = raw_smp_processor_id(); |
341 | data = tr->data[cpu]; | 341 | data = tr->data[cpu]; |
342 | disabled = atomic_inc_return(&data->disabled); | 342 | disabled = atomic_inc_return(&data->disabled); |
343 | if (likely(disabled == 1)) { | 343 | if (likely(disabled == 1)) { |
344 | pc = preempt_count(); | 344 | pc = preempt_count(); |
345 | __trace_graph_return(tr, trace, flags, pc); | 345 | __trace_graph_return(tr, trace, flags, pc); |
346 | } | 346 | } |
347 | atomic_dec(&data->disabled); | 347 | atomic_dec(&data->disabled); |
348 | local_irq_restore(flags); | 348 | local_irq_restore(flags); |
349 | } | 349 | } |
350 | 350 | ||
351 | void set_graph_array(struct trace_array *tr) | 351 | void set_graph_array(struct trace_array *tr) |
352 | { | 352 | { |
353 | graph_array = tr; | 353 | graph_array = tr; |
354 | 354 | ||
355 | /* Make graph_array visible before we start tracing */ | 355 | /* Make graph_array visible before we start tracing */ |
356 | 356 | ||
357 | smp_mb(); | 357 | smp_mb(); |
358 | } | 358 | } |
359 | 359 | ||
360 | void trace_graph_thresh_return(struct ftrace_graph_ret *trace) | 360 | void trace_graph_thresh_return(struct ftrace_graph_ret *trace) |
361 | { | 361 | { |
362 | if (tracing_thresh && | 362 | if (tracing_thresh && |
363 | (trace->rettime - trace->calltime < tracing_thresh)) | 363 | (trace->rettime - trace->calltime < tracing_thresh)) |
364 | return; | 364 | return; |
365 | else | 365 | else |
366 | trace_graph_return(trace); | 366 | trace_graph_return(trace); |
367 | } | 367 | } |
368 | 368 | ||
369 | static int graph_trace_init(struct trace_array *tr) | 369 | static int graph_trace_init(struct trace_array *tr) |
370 | { | 370 | { |
371 | int ret; | 371 | int ret; |
372 | 372 | ||
373 | set_graph_array(tr); | 373 | set_graph_array(tr); |
374 | if (tracing_thresh) | 374 | if (tracing_thresh) |
375 | ret = register_ftrace_graph(&trace_graph_thresh_return, | 375 | ret = register_ftrace_graph(&trace_graph_thresh_return, |
376 | &trace_graph_thresh_entry); | 376 | &trace_graph_thresh_entry); |
377 | else | 377 | else |
378 | ret = register_ftrace_graph(&trace_graph_return, | 378 | ret = register_ftrace_graph(&trace_graph_return, |
379 | &trace_graph_entry); | 379 | &trace_graph_entry); |
380 | if (ret) | 380 | if (ret) |
381 | return ret; | 381 | return ret; |
382 | tracing_start_cmdline_record(); | 382 | tracing_start_cmdline_record(); |
383 | 383 | ||
384 | return 0; | 384 | return 0; |
385 | } | 385 | } |
386 | 386 | ||
387 | static void graph_trace_reset(struct trace_array *tr) | 387 | static void graph_trace_reset(struct trace_array *tr) |
388 | { | 388 | { |
389 | tracing_stop_cmdline_record(); | 389 | tracing_stop_cmdline_record(); |
390 | unregister_ftrace_graph(); | 390 | unregister_ftrace_graph(); |
391 | } | 391 | } |
392 | 392 | ||
393 | static int max_bytes_for_cpu; | 393 | static int max_bytes_for_cpu; |
394 | 394 | ||
395 | static enum print_line_t | 395 | static enum print_line_t |
396 | print_graph_cpu(struct trace_seq *s, int cpu) | 396 | print_graph_cpu(struct trace_seq *s, int cpu) |
397 | { | 397 | { |
398 | int ret; | 398 | int ret; |
399 | 399 | ||
400 | /* | 400 | /* |
401 | * Start with a space character - to make it stand out | 401 | * Start with a space character - to make it stand out |
402 | * to the right a bit when trace output is pasted into | 402 | * to the right a bit when trace output is pasted into |
403 | * email: | 403 | * email: |
404 | */ | 404 | */ |
405 | ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu); | 405 | ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu); |
406 | if (!ret) | 406 | if (!ret) |
407 | return TRACE_TYPE_PARTIAL_LINE; | 407 | return TRACE_TYPE_PARTIAL_LINE; |
408 | 408 | ||
409 | return TRACE_TYPE_HANDLED; | 409 | return TRACE_TYPE_HANDLED; |
410 | } | 410 | } |
411 | 411 | ||
412 | #define TRACE_GRAPH_PROCINFO_LENGTH 14 | 412 | #define TRACE_GRAPH_PROCINFO_LENGTH 14 |
413 | 413 | ||
414 | static enum print_line_t | 414 | static enum print_line_t |
415 | print_graph_proc(struct trace_seq *s, pid_t pid) | 415 | print_graph_proc(struct trace_seq *s, pid_t pid) |
416 | { | 416 | { |
417 | char comm[TASK_COMM_LEN]; | 417 | char comm[TASK_COMM_LEN]; |
418 | /* sign + log10(MAX_INT) + '\0' */ | 418 | /* sign + log10(MAX_INT) + '\0' */ |
419 | char pid_str[11]; | 419 | char pid_str[11]; |
420 | int spaces = 0; | 420 | int spaces = 0; |
421 | int ret; | 421 | int ret; |
422 | int len; | 422 | int len; |
423 | int i; | 423 | int i; |
424 | 424 | ||
425 | trace_find_cmdline(pid, comm); | 425 | trace_find_cmdline(pid, comm); |
426 | comm[7] = '\0'; | 426 | comm[7] = '\0'; |
427 | sprintf(pid_str, "%d", pid); | 427 | sprintf(pid_str, "%d", pid); |
428 | 428 | ||
429 | /* 1 stands for the "-" character */ | 429 | /* 1 stands for the "-" character */ |
430 | len = strlen(comm) + strlen(pid_str) + 1; | 430 | len = strlen(comm) + strlen(pid_str) + 1; |
431 | 431 | ||
432 | if (len < TRACE_GRAPH_PROCINFO_LENGTH) | 432 | if (len < TRACE_GRAPH_PROCINFO_LENGTH) |
433 | spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; | 433 | spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; |
434 | 434 | ||
435 | /* First spaces to align center */ | 435 | /* First spaces to align center */ |
436 | for (i = 0; i < spaces / 2; i++) { | 436 | for (i = 0; i < spaces / 2; i++) { |
437 | ret = trace_seq_printf(s, " "); | 437 | ret = trace_seq_printf(s, " "); |
438 | if (!ret) | 438 | if (!ret) |
439 | return TRACE_TYPE_PARTIAL_LINE; | 439 | return TRACE_TYPE_PARTIAL_LINE; |
440 | } | 440 | } |
441 | 441 | ||
442 | ret = trace_seq_printf(s, "%s-%s", comm, pid_str); | 442 | ret = trace_seq_printf(s, "%s-%s", comm, pid_str); |
443 | if (!ret) | 443 | if (!ret) |
444 | return TRACE_TYPE_PARTIAL_LINE; | 444 | return TRACE_TYPE_PARTIAL_LINE; |
445 | 445 | ||
446 | /* Last spaces to align center */ | 446 | /* Last spaces to align center */ |
447 | for (i = 0; i < spaces - (spaces / 2); i++) { | 447 | for (i = 0; i < spaces - (spaces / 2); i++) { |
448 | ret = trace_seq_printf(s, " "); | 448 | ret = trace_seq_printf(s, " "); |
449 | if (!ret) | 449 | if (!ret) |
450 | return TRACE_TYPE_PARTIAL_LINE; | 450 | return TRACE_TYPE_PARTIAL_LINE; |
451 | } | 451 | } |
452 | return TRACE_TYPE_HANDLED; | 452 | return TRACE_TYPE_HANDLED; |
453 | } | 453 | } |
454 | 454 | ||
455 | 455 | ||
456 | static enum print_line_t | 456 | static enum print_line_t |
457 | print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry) | 457 | print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry) |
458 | { | 458 | { |
459 | if (!trace_seq_putc(s, ' ')) | 459 | if (!trace_seq_putc(s, ' ')) |
460 | return 0; | 460 | return 0; |
461 | 461 | ||
462 | return trace_print_lat_fmt(s, entry); | 462 | return trace_print_lat_fmt(s, entry); |
463 | } | 463 | } |
464 | 464 | ||
465 | /* If the pid changed since the last trace, output this event */ | 465 | /* If the pid changed since the last trace, output this event */ |
466 | static enum print_line_t | 466 | static enum print_line_t |
467 | verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) | 467 | verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) |
468 | { | 468 | { |
469 | pid_t prev_pid; | 469 | pid_t prev_pid; |
470 | pid_t *last_pid; | 470 | pid_t *last_pid; |
471 | int ret; | 471 | int ret; |
472 | 472 | ||
473 | if (!data) | 473 | if (!data) |
474 | return TRACE_TYPE_HANDLED; | 474 | return TRACE_TYPE_HANDLED; |
475 | 475 | ||
476 | last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); | 476 | last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); |
477 | 477 | ||
478 | if (*last_pid == pid) | 478 | if (*last_pid == pid) |
479 | return TRACE_TYPE_HANDLED; | 479 | return TRACE_TYPE_HANDLED; |
480 | 480 | ||
481 | prev_pid = *last_pid; | 481 | prev_pid = *last_pid; |
482 | *last_pid = pid; | 482 | *last_pid = pid; |
483 | 483 | ||
484 | if (prev_pid == -1) | 484 | if (prev_pid == -1) |
485 | return TRACE_TYPE_HANDLED; | 485 | return TRACE_TYPE_HANDLED; |
486 | /* | 486 | /* |
487 | * Context-switch trace line: | 487 | * Context-switch trace line: |
488 | 488 | ||
489 | ------------------------------------------ | 489 | ------------------------------------------ |
490 | | 1) migration/0--1 => sshd-1755 | 490 | | 1) migration/0--1 => sshd-1755 |
491 | ------------------------------------------ | 491 | ------------------------------------------ |
492 | 492 | ||
493 | */ | 493 | */ |
494 | ret = trace_seq_printf(s, | 494 | ret = trace_seq_printf(s, |
495 | " ------------------------------------------\n"); | 495 | " ------------------------------------------\n"); |
496 | if (!ret) | 496 | if (!ret) |
497 | return TRACE_TYPE_PARTIAL_LINE; | 497 | return TRACE_TYPE_PARTIAL_LINE; |
498 | 498 | ||
499 | ret = print_graph_cpu(s, cpu); | 499 | ret = print_graph_cpu(s, cpu); |
500 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 500 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
501 | return TRACE_TYPE_PARTIAL_LINE; | 501 | return TRACE_TYPE_PARTIAL_LINE; |
502 | 502 | ||
503 | ret = print_graph_proc(s, prev_pid); | 503 | ret = print_graph_proc(s, prev_pid); |
504 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 504 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
505 | return TRACE_TYPE_PARTIAL_LINE; | 505 | return TRACE_TYPE_PARTIAL_LINE; |
506 | 506 | ||
507 | ret = trace_seq_printf(s, " => "); | 507 | ret = trace_seq_printf(s, " => "); |
508 | if (!ret) | 508 | if (!ret) |
509 | return TRACE_TYPE_PARTIAL_LINE; | 509 | return TRACE_TYPE_PARTIAL_LINE; |
510 | 510 | ||
511 | ret = print_graph_proc(s, pid); | 511 | ret = print_graph_proc(s, pid); |
512 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 512 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
513 | return TRACE_TYPE_PARTIAL_LINE; | 513 | return TRACE_TYPE_PARTIAL_LINE; |
514 | 514 | ||
515 | ret = trace_seq_printf(s, | 515 | ret = trace_seq_printf(s, |
516 | "\n ------------------------------------------\n\n"); | 516 | "\n ------------------------------------------\n\n"); |
517 | if (!ret) | 517 | if (!ret) |
518 | return TRACE_TYPE_PARTIAL_LINE; | 518 | return TRACE_TYPE_PARTIAL_LINE; |
519 | 519 | ||
520 | return TRACE_TYPE_HANDLED; | 520 | return TRACE_TYPE_HANDLED; |
521 | } | 521 | } |
522 | 522 | ||
523 | static struct ftrace_graph_ret_entry * | 523 | static struct ftrace_graph_ret_entry * |
524 | get_return_for_leaf(struct trace_iterator *iter, | 524 | get_return_for_leaf(struct trace_iterator *iter, |
525 | struct ftrace_graph_ent_entry *curr) | 525 | struct ftrace_graph_ent_entry *curr) |
526 | { | 526 | { |
527 | struct fgraph_data *data = iter->private; | 527 | struct fgraph_data *data = iter->private; |
528 | struct ring_buffer_iter *ring_iter = NULL; | 528 | struct ring_buffer_iter *ring_iter = NULL; |
529 | struct ring_buffer_event *event; | 529 | struct ring_buffer_event *event; |
530 | struct ftrace_graph_ret_entry *next; | 530 | struct ftrace_graph_ret_entry *next; |
531 | 531 | ||
532 | /* | 532 | /* |
533 | * If the previous output failed to write to the seq buffer, | 533 | * If the previous output failed to write to the seq buffer, |
534 | * then we just reuse the data from before. | 534 | * then we just reuse the data from before. |
535 | */ | 535 | */ |
536 | if (data && data->failed) { | 536 | if (data && data->failed) { |
537 | curr = &data->ent; | 537 | curr = &data->ent; |
538 | next = &data->ret; | 538 | next = &data->ret; |
539 | } else { | 539 | } else { |
540 | 540 | ||
541 | ring_iter = iter->buffer_iter[iter->cpu]; | 541 | ring_iter = iter->buffer_iter[iter->cpu]; |
542 | 542 | ||
543 | /* First peek to compare current entry and the next one */ | 543 | /* First peek to compare current entry and the next one */ |
544 | if (ring_iter) | 544 | if (ring_iter) |
545 | event = ring_buffer_iter_peek(ring_iter, NULL); | 545 | event = ring_buffer_iter_peek(ring_iter, NULL); |
546 | else { | 546 | else { |
547 | /* | 547 | /* |
548 | * We need to consume the current entry to see | 548 | * We need to consume the current entry to see |
549 | * the next one. | 549 | * the next one. |
550 | */ | 550 | */ |
551 | ring_buffer_consume(iter->tr->buffer, iter->cpu, | 551 | ring_buffer_consume(iter->tr->buffer, iter->cpu, |
552 | NULL, NULL); | 552 | NULL, NULL); |
553 | event = ring_buffer_peek(iter->tr->buffer, iter->cpu, | 553 | event = ring_buffer_peek(iter->tr->buffer, iter->cpu, |
554 | NULL, NULL); | 554 | NULL, NULL); |
555 | } | 555 | } |
556 | 556 | ||
557 | if (!event) | 557 | if (!event) |
558 | return NULL; | 558 | return NULL; |
559 | 559 | ||
560 | next = ring_buffer_event_data(event); | 560 | next = ring_buffer_event_data(event); |
561 | 561 | ||
562 | if (data) { | 562 | if (data) { |
563 | /* | 563 | /* |
564 | * Save current and next entries for later reference | 564 | * Save current and next entries for later reference |
565 | * if the output fails. | 565 | * if the output fails. |
566 | */ | 566 | */ |
567 | data->ent = *curr; | 567 | data->ent = *curr; |
568 | /* | 568 | /* |
569 | * If the next event is not a return type, then | 569 | * If the next event is not a return type, then |
570 | * we only care about what type it is. Otherwise we can | 570 | * we only care about what type it is. Otherwise we can |
571 | * safely copy the entire event. | 571 | * safely copy the entire event. |
572 | */ | 572 | */ |
573 | if (next->ent.type == TRACE_GRAPH_RET) | 573 | if (next->ent.type == TRACE_GRAPH_RET) |
574 | data->ret = *next; | 574 | data->ret = *next; |
575 | else | 575 | else |
576 | data->ret.ent.type = next->ent.type; | 576 | data->ret.ent.type = next->ent.type; |
577 | } | 577 | } |
578 | } | 578 | } |
579 | 579 | ||
580 | if (next->ent.type != TRACE_GRAPH_RET) | 580 | if (next->ent.type != TRACE_GRAPH_RET) |
581 | return NULL; | 581 | return NULL; |
582 | 582 | ||
583 | if (curr->ent.pid != next->ent.pid || | 583 | if (curr->ent.pid != next->ent.pid || |
584 | curr->graph_ent.func != next->ret.func) | 584 | curr->graph_ent.func != next->ret.func) |
585 | return NULL; | 585 | return NULL; |
586 | 586 | ||
587 | /* this is a leaf, now advance the iterator */ | 587 | /* this is a leaf, now advance the iterator */ |
588 | if (ring_iter) | 588 | if (ring_iter) |
589 | ring_buffer_read(ring_iter, NULL); | 589 | ring_buffer_read(ring_iter, NULL); |
590 | 590 | ||
591 | return next; | 591 | return next; |
592 | } | 592 | } |
593 | 593 | ||
594 | static int print_graph_abs_time(u64 t, struct trace_seq *s) | 594 | static int print_graph_abs_time(u64 t, struct trace_seq *s) |
595 | { | 595 | { |
596 | unsigned long usecs_rem; | 596 | unsigned long usecs_rem; |
597 | 597 | ||
598 | usecs_rem = do_div(t, NSEC_PER_SEC); | 598 | usecs_rem = do_div(t, NSEC_PER_SEC); |
599 | usecs_rem /= 1000; | 599 | usecs_rem /= 1000; |
600 | 600 | ||
601 | return trace_seq_printf(s, "%5lu.%06lu | ", | 601 | return trace_seq_printf(s, "%5lu.%06lu | ", |
602 | (unsigned long)t, usecs_rem); | 602 | (unsigned long)t, usecs_rem); |
603 | } | 603 | } |
604 | 604 | ||
605 | static enum print_line_t | 605 | static enum print_line_t |
606 | print_graph_irq(struct trace_iterator *iter, unsigned long addr, | 606 | print_graph_irq(struct trace_iterator *iter, unsigned long addr, |
607 | enum trace_type type, int cpu, pid_t pid, u32 flags) | 607 | enum trace_type type, int cpu, pid_t pid, u32 flags) |
608 | { | 608 | { |
609 | int ret; | 609 | int ret; |
610 | struct trace_seq *s = &iter->seq; | 610 | struct trace_seq *s = &iter->seq; |
611 | 611 | ||
612 | if (addr < (unsigned long)__irqentry_text_start || | 612 | if (addr < (unsigned long)__irqentry_text_start || |
613 | addr >= (unsigned long)__irqentry_text_end) | 613 | addr >= (unsigned long)__irqentry_text_end) |
614 | return TRACE_TYPE_UNHANDLED; | 614 | return TRACE_TYPE_UNHANDLED; |
615 | 615 | ||
616 | /* Absolute time */ | 616 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { |
617 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) { | 617 | /* Absolute time */ |
618 | ret = print_graph_abs_time(iter->ts, s); | 618 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) { |
619 | if (!ret) | 619 | ret = print_graph_abs_time(iter->ts, s); |
620 | return TRACE_TYPE_PARTIAL_LINE; | 620 | if (!ret) |
621 | } | 621 | return TRACE_TYPE_PARTIAL_LINE; |
622 | } | ||
622 | 623 | ||
623 | /* Cpu */ | 624 | /* Cpu */ |
624 | if (flags & TRACE_GRAPH_PRINT_CPU) { | 625 | if (flags & TRACE_GRAPH_PRINT_CPU) { |
625 | ret = print_graph_cpu(s, cpu); | 626 | ret = print_graph_cpu(s, cpu); |
626 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 627 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
627 | return TRACE_TYPE_PARTIAL_LINE; | 628 | return TRACE_TYPE_PARTIAL_LINE; |
628 | } | 629 | } |
629 | 630 | ||
630 | /* Proc */ | 631 | /* Proc */ |
631 | if (flags & TRACE_GRAPH_PRINT_PROC) { | 632 | if (flags & TRACE_GRAPH_PRINT_PROC) { |
632 | ret = print_graph_proc(s, pid); | 633 | ret = print_graph_proc(s, pid); |
633 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 634 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
634 | return TRACE_TYPE_PARTIAL_LINE; | 635 | return TRACE_TYPE_PARTIAL_LINE; |
635 | ret = trace_seq_printf(s, " | "); | 636 | ret = trace_seq_printf(s, " | "); |
636 | if (!ret) | 637 | if (!ret) |
637 | return TRACE_TYPE_PARTIAL_LINE; | 638 | return TRACE_TYPE_PARTIAL_LINE; |
639 | } | ||
638 | } | 640 | } |
639 | 641 | ||
640 | /* No overhead */ | 642 | /* No overhead */ |
641 | ret = print_graph_duration(DURATION_FILL_START, s, flags); | 643 | ret = print_graph_duration(DURATION_FILL_START, s, flags); |
642 | if (ret != TRACE_TYPE_HANDLED) | 644 | if (ret != TRACE_TYPE_HANDLED) |
643 | return ret; | 645 | return ret; |
644 | 646 | ||
645 | if (type == TRACE_GRAPH_ENT) | 647 | if (type == TRACE_GRAPH_ENT) |
646 | ret = trace_seq_printf(s, "==========>"); | 648 | ret = trace_seq_printf(s, "==========>"); |
647 | else | 649 | else |
648 | ret = trace_seq_printf(s, "<=========="); | 650 | ret = trace_seq_printf(s, "<=========="); |
649 | 651 | ||
650 | if (!ret) | 652 | if (!ret) |
651 | return TRACE_TYPE_PARTIAL_LINE; | 653 | return TRACE_TYPE_PARTIAL_LINE; |
652 | 654 | ||
653 | ret = print_graph_duration(DURATION_FILL_END, s, flags); | 655 | ret = print_graph_duration(DURATION_FILL_END, s, flags); |
654 | if (ret != TRACE_TYPE_HANDLED) | 656 | if (ret != TRACE_TYPE_HANDLED) |
655 | return ret; | 657 | return ret; |
656 | 658 | ||
657 | ret = trace_seq_printf(s, "\n"); | 659 | ret = trace_seq_printf(s, "\n"); |
658 | 660 | ||
659 | if (!ret) | 661 | if (!ret) |
660 | return TRACE_TYPE_PARTIAL_LINE; | 662 | return TRACE_TYPE_PARTIAL_LINE; |
661 | return TRACE_TYPE_HANDLED; | 663 | return TRACE_TYPE_HANDLED; |
662 | } | 664 | } |
663 | 665 | ||
664 | enum print_line_t | 666 | enum print_line_t |
665 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) | 667 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) |
666 | { | 668 | { |
667 | unsigned long nsecs_rem = do_div(duration, 1000); | 669 | unsigned long nsecs_rem = do_div(duration, 1000); |
668 | /* log10(ULONG_MAX) + '\0' */ | 670 | /* log10(ULONG_MAX) + '\0' */ |
669 | char msecs_str[21]; | 671 | char msecs_str[21]; |
670 | char nsecs_str[5]; | 672 | char nsecs_str[5]; |
671 | int ret, len; | 673 | int ret, len; |
672 | int i; | 674 | int i; |
673 | 675 | ||
674 | sprintf(msecs_str, "%lu", (unsigned long) duration); | 676 | sprintf(msecs_str, "%lu", (unsigned long) duration); |
675 | 677 | ||
676 | /* Print msecs */ | 678 | /* Print msecs */ |
677 | ret = trace_seq_printf(s, "%s", msecs_str); | 679 | ret = trace_seq_printf(s, "%s", msecs_str); |
678 | if (!ret) | 680 | if (!ret) |
679 | return TRACE_TYPE_PARTIAL_LINE; | 681 | return TRACE_TYPE_PARTIAL_LINE; |
680 | 682 | ||
681 | len = strlen(msecs_str); | 683 | len = strlen(msecs_str); |
682 | 684 | ||
683 | /* Print nsecs (we don't want to exceed 7 numbers) */ | 685 | /* Print nsecs (we don't want to exceed 7 numbers) */ |
684 | if (len < 7) { | 686 | if (len < 7) { |
685 | size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len); | 687 | size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len); |
686 | 688 | ||
687 | snprintf(nsecs_str, slen, "%03lu", nsecs_rem); | 689 | snprintf(nsecs_str, slen, "%03lu", nsecs_rem); |
688 | ret = trace_seq_printf(s, ".%s", nsecs_str); | 690 | ret = trace_seq_printf(s, ".%s", nsecs_str); |
689 | if (!ret) | 691 | if (!ret) |
690 | return TRACE_TYPE_PARTIAL_LINE; | 692 | return TRACE_TYPE_PARTIAL_LINE; |
691 | len += strlen(nsecs_str); | 693 | len += strlen(nsecs_str); |
692 | } | 694 | } |
693 | 695 | ||
694 | ret = trace_seq_printf(s, " us "); | 696 | ret = trace_seq_printf(s, " us "); |
695 | if (!ret) | 697 | if (!ret) |
696 | return TRACE_TYPE_PARTIAL_LINE; | 698 | return TRACE_TYPE_PARTIAL_LINE; |
697 | 699 | ||
698 | /* Print remaining spaces to fit the row's width */ | 700 | /* Print remaining spaces to fit the row's width */ |
699 | for (i = len; i < 7; i++) { | 701 | for (i = len; i < 7; i++) { |
700 | ret = trace_seq_printf(s, " "); | 702 | ret = trace_seq_printf(s, " "); |
701 | if (!ret) | 703 | if (!ret) |
702 | return TRACE_TYPE_PARTIAL_LINE; | 704 | return TRACE_TYPE_PARTIAL_LINE; |
703 | } | 705 | } |
704 | return TRACE_TYPE_HANDLED; | 706 | return TRACE_TYPE_HANDLED; |
705 | } | 707 | } |
706 | 708 | ||
707 | static enum print_line_t | 709 | static enum print_line_t |
708 | print_graph_duration(unsigned long long duration, struct trace_seq *s, | 710 | print_graph_duration(unsigned long long duration, struct trace_seq *s, |
709 | u32 flags) | 711 | u32 flags) |
710 | { | 712 | { |
711 | int ret = -1; | 713 | int ret = -1; |
712 | 714 | ||
713 | if (!(flags & TRACE_GRAPH_PRINT_DURATION)) | 715 | if (!(flags & TRACE_GRAPH_PRINT_DURATION) || |
714 | return TRACE_TYPE_HANDLED; | 716 | !(trace_flags & TRACE_ITER_CONTEXT_INFO)) |
717 | return TRACE_TYPE_HANDLED; | ||
715 | 718 | ||
716 | /* No real adata, just filling the column with spaces */ | 719 | /* No real adata, just filling the column with spaces */ |
717 | switch (duration) { | 720 | switch (duration) { |
718 | case DURATION_FILL_FULL: | 721 | case DURATION_FILL_FULL: |
719 | ret = trace_seq_printf(s, " | "); | 722 | ret = trace_seq_printf(s, " | "); |
720 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | 723 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; |
721 | case DURATION_FILL_START: | 724 | case DURATION_FILL_START: |
722 | ret = trace_seq_printf(s, " "); | 725 | ret = trace_seq_printf(s, " "); |
723 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | 726 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; |
724 | case DURATION_FILL_END: | 727 | case DURATION_FILL_END: |
725 | ret = trace_seq_printf(s, " |"); | 728 | ret = trace_seq_printf(s, " |"); |
726 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | 729 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; |
727 | } | 730 | } |
728 | 731 | ||
729 | /* Signal a overhead of time execution to the output */ | 732 | /* Signal a overhead of time execution to the output */ |
730 | if (flags & TRACE_GRAPH_PRINT_OVERHEAD) { | 733 | if (flags & TRACE_GRAPH_PRINT_OVERHEAD) { |
731 | /* Duration exceeded 100 msecs */ | 734 | /* Duration exceeded 100 msecs */ |
732 | if (duration > 100000ULL) | 735 | if (duration > 100000ULL) |
733 | ret = trace_seq_printf(s, "! "); | 736 | ret = trace_seq_printf(s, "! "); |
734 | /* Duration exceeded 10 msecs */ | 737 | /* Duration exceeded 10 msecs */ |
735 | else if (duration > 10000ULL) | 738 | else if (duration > 10000ULL) |
736 | ret = trace_seq_printf(s, "+ "); | 739 | ret = trace_seq_printf(s, "+ "); |
737 | } | 740 | } |
738 | 741 | ||
739 | /* | 742 | /* |
740 | * The -1 means we either did not exceed the duration tresholds | 743 | * The -1 means we either did not exceed the duration tresholds |
741 | * or we dont want to print out the overhead. Either way we need | 744 | * or we dont want to print out the overhead. Either way we need |
742 | * to fill out the space. | 745 | * to fill out the space. |
743 | */ | 746 | */ |
744 | if (ret == -1) | 747 | if (ret == -1) |
745 | ret = trace_seq_printf(s, " "); | 748 | ret = trace_seq_printf(s, " "); |
746 | 749 | ||
747 | /* Catching here any failure happenned above */ | 750 | /* Catching here any failure happenned above */ |
748 | if (!ret) | 751 | if (!ret) |
749 | return TRACE_TYPE_PARTIAL_LINE; | 752 | return TRACE_TYPE_PARTIAL_LINE; |
750 | 753 | ||
751 | ret = trace_print_graph_duration(duration, s); | 754 | ret = trace_print_graph_duration(duration, s); |
752 | if (ret != TRACE_TYPE_HANDLED) | 755 | if (ret != TRACE_TYPE_HANDLED) |
753 | return ret; | 756 | return ret; |
754 | 757 | ||
755 | ret = trace_seq_printf(s, "| "); | 758 | ret = trace_seq_printf(s, "| "); |
756 | if (!ret) | 759 | if (!ret) |
757 | return TRACE_TYPE_PARTIAL_LINE; | 760 | return TRACE_TYPE_PARTIAL_LINE; |
758 | 761 | ||
759 | return TRACE_TYPE_HANDLED; | 762 | return TRACE_TYPE_HANDLED; |
760 | } | 763 | } |
761 | 764 | ||
762 | /* Case of a leaf function on its call entry */ | 765 | /* Case of a leaf function on its call entry */ |
763 | static enum print_line_t | 766 | static enum print_line_t |
764 | print_graph_entry_leaf(struct trace_iterator *iter, | 767 | print_graph_entry_leaf(struct trace_iterator *iter, |
765 | struct ftrace_graph_ent_entry *entry, | 768 | struct ftrace_graph_ent_entry *entry, |
766 | struct ftrace_graph_ret_entry *ret_entry, | 769 | struct ftrace_graph_ret_entry *ret_entry, |
767 | struct trace_seq *s, u32 flags) | 770 | struct trace_seq *s, u32 flags) |
768 | { | 771 | { |
769 | struct fgraph_data *data = iter->private; | 772 | struct fgraph_data *data = iter->private; |
770 | struct ftrace_graph_ret *graph_ret; | 773 | struct ftrace_graph_ret *graph_ret; |
771 | struct ftrace_graph_ent *call; | 774 | struct ftrace_graph_ent *call; |
772 | unsigned long long duration; | 775 | unsigned long long duration; |
773 | int ret; | 776 | int ret; |
774 | int i; | 777 | int i; |
775 | 778 | ||
776 | graph_ret = &ret_entry->ret; | 779 | graph_ret = &ret_entry->ret; |
777 | call = &entry->graph_ent; | 780 | call = &entry->graph_ent; |
778 | duration = graph_ret->rettime - graph_ret->calltime; | 781 | duration = graph_ret->rettime - graph_ret->calltime; |
779 | 782 | ||
780 | if (data) { | 783 | if (data) { |
781 | struct fgraph_cpu_data *cpu_data; | 784 | struct fgraph_cpu_data *cpu_data; |
782 | int cpu = iter->cpu; | 785 | int cpu = iter->cpu; |
783 | 786 | ||
784 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); | 787 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); |
785 | 788 | ||
786 | /* | 789 | /* |
787 | * Comments display at + 1 to depth. Since | 790 | * Comments display at + 1 to depth. Since |
788 | * this is a leaf function, keep the comments | 791 | * this is a leaf function, keep the comments |
789 | * equal to this depth. | 792 | * equal to this depth. |
790 | */ | 793 | */ |
791 | cpu_data->depth = call->depth - 1; | 794 | cpu_data->depth = call->depth - 1; |
792 | 795 | ||
793 | /* No need to keep this function around for this depth */ | 796 | /* No need to keep this function around for this depth */ |
794 | if (call->depth < FTRACE_RETFUNC_DEPTH) | 797 | if (call->depth < FTRACE_RETFUNC_DEPTH) |
795 | cpu_data->enter_funcs[call->depth] = 0; | 798 | cpu_data->enter_funcs[call->depth] = 0; |
796 | } | 799 | } |
797 | 800 | ||
798 | /* Overhead and duration */ | 801 | /* Overhead and duration */ |
799 | ret = print_graph_duration(duration, s, flags); | 802 | ret = print_graph_duration(duration, s, flags); |
800 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 803 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
801 | return TRACE_TYPE_PARTIAL_LINE; | 804 | return TRACE_TYPE_PARTIAL_LINE; |
802 | 805 | ||
803 | /* Function */ | 806 | /* Function */ |
804 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | 807 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { |
805 | ret = trace_seq_printf(s, " "); | 808 | ret = trace_seq_printf(s, " "); |
806 | if (!ret) | 809 | if (!ret) |
807 | return TRACE_TYPE_PARTIAL_LINE; | 810 | return TRACE_TYPE_PARTIAL_LINE; |
808 | } | 811 | } |
809 | 812 | ||
810 | ret = trace_seq_printf(s, "%ps();\n", (void *)call->func); | 813 | ret = trace_seq_printf(s, "%ps();\n", (void *)call->func); |
811 | if (!ret) | 814 | if (!ret) |
812 | return TRACE_TYPE_PARTIAL_LINE; | 815 | return TRACE_TYPE_PARTIAL_LINE; |
813 | 816 | ||
814 | return TRACE_TYPE_HANDLED; | 817 | return TRACE_TYPE_HANDLED; |
815 | } | 818 | } |
816 | 819 | ||
817 | static enum print_line_t | 820 | static enum print_line_t |
818 | print_graph_entry_nested(struct trace_iterator *iter, | 821 | print_graph_entry_nested(struct trace_iterator *iter, |
819 | struct ftrace_graph_ent_entry *entry, | 822 | struct ftrace_graph_ent_entry *entry, |
820 | struct trace_seq *s, int cpu, u32 flags) | 823 | struct trace_seq *s, int cpu, u32 flags) |
821 | { | 824 | { |
822 | struct ftrace_graph_ent *call = &entry->graph_ent; | 825 | struct ftrace_graph_ent *call = &entry->graph_ent; |
823 | struct fgraph_data *data = iter->private; | 826 | struct fgraph_data *data = iter->private; |
824 | int ret; | 827 | int ret; |
825 | int i; | 828 | int i; |
826 | 829 | ||
827 | if (data) { | 830 | if (data) { |
828 | struct fgraph_cpu_data *cpu_data; | 831 | struct fgraph_cpu_data *cpu_data; |
829 | int cpu = iter->cpu; | 832 | int cpu = iter->cpu; |
830 | 833 | ||
831 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); | 834 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); |
832 | cpu_data->depth = call->depth; | 835 | cpu_data->depth = call->depth; |
833 | 836 | ||
834 | /* Save this function pointer to see if the exit matches */ | 837 | /* Save this function pointer to see if the exit matches */ |
835 | if (call->depth < FTRACE_RETFUNC_DEPTH) | 838 | if (call->depth < FTRACE_RETFUNC_DEPTH) |
836 | cpu_data->enter_funcs[call->depth] = call->func; | 839 | cpu_data->enter_funcs[call->depth] = call->func; |
837 | } | 840 | } |
838 | 841 | ||
839 | /* No time */ | 842 | /* No time */ |
840 | ret = print_graph_duration(DURATION_FILL_FULL, s, flags); | 843 | ret = print_graph_duration(DURATION_FILL_FULL, s, flags); |
841 | if (ret != TRACE_TYPE_HANDLED) | 844 | if (ret != TRACE_TYPE_HANDLED) |
842 | return ret; | 845 | return ret; |
843 | 846 | ||
844 | /* Function */ | 847 | /* Function */ |
845 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | 848 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { |
846 | ret = trace_seq_printf(s, " "); | 849 | ret = trace_seq_printf(s, " "); |
847 | if (!ret) | 850 | if (!ret) |
848 | return TRACE_TYPE_PARTIAL_LINE; | 851 | return TRACE_TYPE_PARTIAL_LINE; |
849 | } | 852 | } |
850 | 853 | ||
851 | ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func); | 854 | ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func); |
852 | if (!ret) | 855 | if (!ret) |
853 | return TRACE_TYPE_PARTIAL_LINE; | 856 | return TRACE_TYPE_PARTIAL_LINE; |
854 | 857 | ||
855 | /* | 858 | /* |
856 | * we already consumed the current entry to check the next one | 859 | * we already consumed the current entry to check the next one |
857 | * and see if this is a leaf. | 860 | * and see if this is a leaf. |
858 | */ | 861 | */ |
859 | return TRACE_TYPE_NO_CONSUME; | 862 | return TRACE_TYPE_NO_CONSUME; |
860 | } | 863 | } |
861 | 864 | ||
862 | static enum print_line_t | 865 | static enum print_line_t |
863 | print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, | 866 | print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, |
864 | int type, unsigned long addr, u32 flags) | 867 | int type, unsigned long addr, u32 flags) |
865 | { | 868 | { |
866 | struct fgraph_data *data = iter->private; | 869 | struct fgraph_data *data = iter->private; |
867 | struct trace_entry *ent = iter->ent; | 870 | struct trace_entry *ent = iter->ent; |
868 | int cpu = iter->cpu; | 871 | int cpu = iter->cpu; |
869 | int ret; | 872 | int ret; |
870 | 873 | ||
871 | /* Pid */ | 874 | /* Pid */ |
872 | if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE) | 875 | if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE) |
873 | return TRACE_TYPE_PARTIAL_LINE; | 876 | return TRACE_TYPE_PARTIAL_LINE; |
874 | 877 | ||
875 | if (type) { | 878 | if (type) { |
876 | /* Interrupt */ | 879 | /* Interrupt */ |
877 | ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags); | 880 | ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags); |
878 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 881 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
879 | return TRACE_TYPE_PARTIAL_LINE; | 882 | return TRACE_TYPE_PARTIAL_LINE; |
880 | } | 883 | } |
881 | 884 | ||
885 | if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) | ||
886 | return 0; | ||
887 | |||
882 | /* Absolute time */ | 888 | /* Absolute time */ |
883 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) { | 889 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) { |
884 | ret = print_graph_abs_time(iter->ts, s); | 890 | ret = print_graph_abs_time(iter->ts, s); |
885 | if (!ret) | 891 | if (!ret) |
886 | return TRACE_TYPE_PARTIAL_LINE; | 892 | return TRACE_TYPE_PARTIAL_LINE; |
887 | } | 893 | } |
888 | 894 | ||
889 | /* Cpu */ | 895 | /* Cpu */ |
890 | if (flags & TRACE_GRAPH_PRINT_CPU) { | 896 | if (flags & TRACE_GRAPH_PRINT_CPU) { |
891 | ret = print_graph_cpu(s, cpu); | 897 | ret = print_graph_cpu(s, cpu); |
892 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 898 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
893 | return TRACE_TYPE_PARTIAL_LINE; | 899 | return TRACE_TYPE_PARTIAL_LINE; |
894 | } | 900 | } |
895 | 901 | ||
896 | /* Proc */ | 902 | /* Proc */ |
897 | if (flags & TRACE_GRAPH_PRINT_PROC) { | 903 | if (flags & TRACE_GRAPH_PRINT_PROC) { |
898 | ret = print_graph_proc(s, ent->pid); | 904 | ret = print_graph_proc(s, ent->pid); |
899 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 905 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
900 | return TRACE_TYPE_PARTIAL_LINE; | 906 | return TRACE_TYPE_PARTIAL_LINE; |
901 | 907 | ||
902 | ret = trace_seq_printf(s, " | "); | 908 | ret = trace_seq_printf(s, " | "); |
903 | if (!ret) | 909 | if (!ret) |
904 | return TRACE_TYPE_PARTIAL_LINE; | 910 | return TRACE_TYPE_PARTIAL_LINE; |
905 | } | 911 | } |
906 | 912 | ||
907 | /* Latency format */ | 913 | /* Latency format */ |
908 | if (trace_flags & TRACE_ITER_LATENCY_FMT) { | 914 | if (trace_flags & TRACE_ITER_LATENCY_FMT) { |
909 | ret = print_graph_lat_fmt(s, ent); | 915 | ret = print_graph_lat_fmt(s, ent); |
910 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 916 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
911 | return TRACE_TYPE_PARTIAL_LINE; | 917 | return TRACE_TYPE_PARTIAL_LINE; |
912 | } | 918 | } |
913 | 919 | ||
914 | return 0; | 920 | return 0; |
915 | } | 921 | } |
916 | 922 | ||
917 | /* | 923 | /* |
918 | * Entry check for irq code | 924 | * Entry check for irq code |
919 | * | 925 | * |
920 | * returns 1 if | 926 | * returns 1 if |
921 | * - we are inside irq code | 927 | * - we are inside irq code |
922 | * - we just entered irq code | 928 | * - we just entered irq code |
923 | * | 929 | * |
924 | * retunns 0 if | 930 | * retunns 0 if |
925 | * - funcgraph-interrupts option is set | 931 | * - funcgraph-interrupts option is set |
926 | * - we are not inside irq code | 932 | * - we are not inside irq code |
927 | */ | 933 | */ |
928 | static int | 934 | static int |
929 | check_irq_entry(struct trace_iterator *iter, u32 flags, | 935 | check_irq_entry(struct trace_iterator *iter, u32 flags, |
930 | unsigned long addr, int depth) | 936 | unsigned long addr, int depth) |
931 | { | 937 | { |
932 | int cpu = iter->cpu; | 938 | int cpu = iter->cpu; |
933 | int *depth_irq; | 939 | int *depth_irq; |
934 | struct fgraph_data *data = iter->private; | 940 | struct fgraph_data *data = iter->private; |
935 | 941 | ||
936 | /* | 942 | /* |
937 | * If we are either displaying irqs, or we got called as | 943 | * If we are either displaying irqs, or we got called as |
938 | * a graph event and private data does not exist, | 944 | * a graph event and private data does not exist, |
939 | * then we bypass the irq check. | 945 | * then we bypass the irq check. |
940 | */ | 946 | */ |
941 | if ((flags & TRACE_GRAPH_PRINT_IRQS) || | 947 | if ((flags & TRACE_GRAPH_PRINT_IRQS) || |
942 | (!data)) | 948 | (!data)) |
943 | return 0; | 949 | return 0; |
944 | 950 | ||
945 | depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); | 951 | depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); |
946 | 952 | ||
947 | /* | 953 | /* |
948 | * We are inside the irq code | 954 | * We are inside the irq code |
949 | */ | 955 | */ |
950 | if (*depth_irq >= 0) | 956 | if (*depth_irq >= 0) |
951 | return 1; | 957 | return 1; |
952 | 958 | ||
953 | if ((addr < (unsigned long)__irqentry_text_start) || | 959 | if ((addr < (unsigned long)__irqentry_text_start) || |
954 | (addr >= (unsigned long)__irqentry_text_end)) | 960 | (addr >= (unsigned long)__irqentry_text_end)) |
955 | return 0; | 961 | return 0; |
956 | 962 | ||
957 | /* | 963 | /* |
958 | * We are entering irq code. | 964 | * We are entering irq code. |
959 | */ | 965 | */ |
960 | *depth_irq = depth; | 966 | *depth_irq = depth; |
961 | return 1; | 967 | return 1; |
962 | } | 968 | } |
963 | 969 | ||
964 | /* | 970 | /* |
965 | * Return check for irq code | 971 | * Return check for irq code |
966 | * | 972 | * |
967 | * returns 1 if | 973 | * returns 1 if |
968 | * - we are inside irq code | 974 | * - we are inside irq code |
969 | * - we just left irq code | 975 | * - we just left irq code |
970 | * | 976 | * |
971 | * returns 0 if | 977 | * returns 0 if |
972 | * - funcgraph-interrupts option is set | 978 | * - funcgraph-interrupts option is set |
973 | * - we are not inside irq code | 979 | * - we are not inside irq code |
974 | */ | 980 | */ |
975 | static int | 981 | static int |
976 | check_irq_return(struct trace_iterator *iter, u32 flags, int depth) | 982 | check_irq_return(struct trace_iterator *iter, u32 flags, int depth) |
977 | { | 983 | { |
978 | int cpu = iter->cpu; | 984 | int cpu = iter->cpu; |
979 | int *depth_irq; | 985 | int *depth_irq; |
980 | struct fgraph_data *data = iter->private; | 986 | struct fgraph_data *data = iter->private; |
981 | 987 | ||
982 | /* | 988 | /* |
983 | * If we are either displaying irqs, or we got called as | 989 | * If we are either displaying irqs, or we got called as |
984 | * a graph event and private data does not exist, | 990 | * a graph event and private data does not exist, |
985 | * then we bypass the irq check. | 991 | * then we bypass the irq check. |
986 | */ | 992 | */ |
987 | if ((flags & TRACE_GRAPH_PRINT_IRQS) || | 993 | if ((flags & TRACE_GRAPH_PRINT_IRQS) || |
988 | (!data)) | 994 | (!data)) |
989 | return 0; | 995 | return 0; |
990 | 996 | ||
991 | depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); | 997 | depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); |
992 | 998 | ||
993 | /* | 999 | /* |
994 | * We are not inside the irq code. | 1000 | * We are not inside the irq code. |
995 | */ | 1001 | */ |
996 | if (*depth_irq == -1) | 1002 | if (*depth_irq == -1) |
997 | return 0; | 1003 | return 0; |
998 | 1004 | ||
999 | /* | 1005 | /* |
1000 | * We are inside the irq code, and this is returning entry. | 1006 | * We are inside the irq code, and this is returning entry. |
1001 | * Let's not trace it and clear the entry depth, since | 1007 | * Let's not trace it and clear the entry depth, since |
1002 | * we are out of irq code. | 1008 | * we are out of irq code. |
1003 | * | 1009 | * |
1004 | * This condition ensures that we 'leave the irq code' once | 1010 | * This condition ensures that we 'leave the irq code' once |
1005 | * we are out of the entry depth. Thus protecting us from | 1011 | * we are out of the entry depth. Thus protecting us from |
1006 | * the RETURN entry loss. | 1012 | * the RETURN entry loss. |
1007 | */ | 1013 | */ |
1008 | if (*depth_irq >= depth) { | 1014 | if (*depth_irq >= depth) { |
1009 | *depth_irq = -1; | 1015 | *depth_irq = -1; |
1010 | return 1; | 1016 | return 1; |
1011 | } | 1017 | } |
1012 | 1018 | ||
1013 | /* | 1019 | /* |
1014 | * We are inside the irq code, and this is not the entry. | 1020 | * We are inside the irq code, and this is not the entry. |
1015 | */ | 1021 | */ |
1016 | return 1; | 1022 | return 1; |
1017 | } | 1023 | } |
1018 | 1024 | ||
1019 | static enum print_line_t | 1025 | static enum print_line_t |
1020 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | 1026 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, |
1021 | struct trace_iterator *iter, u32 flags) | 1027 | struct trace_iterator *iter, u32 flags) |
1022 | { | 1028 | { |
1023 | struct fgraph_data *data = iter->private; | 1029 | struct fgraph_data *data = iter->private; |
1024 | struct ftrace_graph_ent *call = &field->graph_ent; | 1030 | struct ftrace_graph_ent *call = &field->graph_ent; |
1025 | struct ftrace_graph_ret_entry *leaf_ret; | 1031 | struct ftrace_graph_ret_entry *leaf_ret; |
1026 | static enum print_line_t ret; | 1032 | static enum print_line_t ret; |
1027 | int cpu = iter->cpu; | 1033 | int cpu = iter->cpu; |
1028 | 1034 | ||
1029 | if (check_irq_entry(iter, flags, call->func, call->depth)) | 1035 | if (check_irq_entry(iter, flags, call->func, call->depth)) |
1030 | return TRACE_TYPE_HANDLED; | 1036 | return TRACE_TYPE_HANDLED; |
1031 | 1037 | ||
1032 | if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags)) | 1038 | if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags)) |
1033 | return TRACE_TYPE_PARTIAL_LINE; | 1039 | return TRACE_TYPE_PARTIAL_LINE; |
1034 | 1040 | ||
1035 | leaf_ret = get_return_for_leaf(iter, field); | 1041 | leaf_ret = get_return_for_leaf(iter, field); |
1036 | if (leaf_ret) | 1042 | if (leaf_ret) |
1037 | ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags); | 1043 | ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags); |
1038 | else | 1044 | else |
1039 | ret = print_graph_entry_nested(iter, field, s, cpu, flags); | 1045 | ret = print_graph_entry_nested(iter, field, s, cpu, flags); |
1040 | 1046 | ||
1041 | if (data) { | 1047 | if (data) { |
1042 | /* | 1048 | /* |
1043 | * If we failed to write our output, then we need to make | 1049 | * If we failed to write our output, then we need to make |
1044 | * note of it. Because we already consumed our entry. | 1050 | * note of it. Because we already consumed our entry. |
1045 | */ | 1051 | */ |
1046 | if (s->full) { | 1052 | if (s->full) { |
1047 | data->failed = 1; | 1053 | data->failed = 1; |
1048 | data->cpu = cpu; | 1054 | data->cpu = cpu; |
1049 | } else | 1055 | } else |
1050 | data->failed = 0; | 1056 | data->failed = 0; |
1051 | } | 1057 | } |
1052 | 1058 | ||
1053 | return ret; | 1059 | return ret; |
1054 | } | 1060 | } |
1055 | 1061 | ||
1056 | static enum print_line_t | 1062 | static enum print_line_t |
1057 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | 1063 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, |
1058 | struct trace_entry *ent, struct trace_iterator *iter, | 1064 | struct trace_entry *ent, struct trace_iterator *iter, |
1059 | u32 flags) | 1065 | u32 flags) |
1060 | { | 1066 | { |
1061 | unsigned long long duration = trace->rettime - trace->calltime; | 1067 | unsigned long long duration = trace->rettime - trace->calltime; |
1062 | struct fgraph_data *data = iter->private; | 1068 | struct fgraph_data *data = iter->private; |
1063 | pid_t pid = ent->pid; | 1069 | pid_t pid = ent->pid; |
1064 | int cpu = iter->cpu; | 1070 | int cpu = iter->cpu; |
1065 | int func_match = 1; | 1071 | int func_match = 1; |
1066 | int ret; | 1072 | int ret; |
1067 | int i; | 1073 | int i; |
1068 | 1074 | ||
1069 | if (check_irq_return(iter, flags, trace->depth)) | 1075 | if (check_irq_return(iter, flags, trace->depth)) |
1070 | return TRACE_TYPE_HANDLED; | 1076 | return TRACE_TYPE_HANDLED; |
1071 | 1077 | ||
1072 | if (data) { | 1078 | if (data) { |
1073 | struct fgraph_cpu_data *cpu_data; | 1079 | struct fgraph_cpu_data *cpu_data; |
1074 | int cpu = iter->cpu; | 1080 | int cpu = iter->cpu; |
1075 | 1081 | ||
1076 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); | 1082 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); |
1077 | 1083 | ||
1078 | /* | 1084 | /* |
1079 | * Comments display at + 1 to depth. This is the | 1085 | * Comments display at + 1 to depth. This is the |
1080 | * return from a function, we now want the comments | 1086 | * return from a function, we now want the comments |
1081 | * to display at the same level of the bracket. | 1087 | * to display at the same level of the bracket. |
1082 | */ | 1088 | */ |
1083 | cpu_data->depth = trace->depth - 1; | 1089 | cpu_data->depth = trace->depth - 1; |
1084 | 1090 | ||
1085 | if (trace->depth < FTRACE_RETFUNC_DEPTH) { | 1091 | if (trace->depth < FTRACE_RETFUNC_DEPTH) { |
1086 | if (cpu_data->enter_funcs[trace->depth] != trace->func) | 1092 | if (cpu_data->enter_funcs[trace->depth] != trace->func) |
1087 | func_match = 0; | 1093 | func_match = 0; |
1088 | cpu_data->enter_funcs[trace->depth] = 0; | 1094 | cpu_data->enter_funcs[trace->depth] = 0; |
1089 | } | 1095 | } |
1090 | } | 1096 | } |
1091 | 1097 | ||
1092 | if (print_graph_prologue(iter, s, 0, 0, flags)) | 1098 | if (print_graph_prologue(iter, s, 0, 0, flags)) |
1093 | return TRACE_TYPE_PARTIAL_LINE; | 1099 | return TRACE_TYPE_PARTIAL_LINE; |
1094 | 1100 | ||
1095 | /* Overhead and duration */ | 1101 | /* Overhead and duration */ |
1096 | ret = print_graph_duration(duration, s, flags); | 1102 | ret = print_graph_duration(duration, s, flags); |
1097 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 1103 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
1098 | return TRACE_TYPE_PARTIAL_LINE; | 1104 | return TRACE_TYPE_PARTIAL_LINE; |
1099 | 1105 | ||
1100 | /* Closing brace */ | 1106 | /* Closing brace */ |
1101 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { | 1107 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { |
1102 | ret = trace_seq_printf(s, " "); | 1108 | ret = trace_seq_printf(s, " "); |
1103 | if (!ret) | 1109 | if (!ret) |
1104 | return TRACE_TYPE_PARTIAL_LINE; | 1110 | return TRACE_TYPE_PARTIAL_LINE; |
1105 | } | 1111 | } |
1106 | 1112 | ||
1107 | /* | 1113 | /* |
1108 | * If the return function does not have a matching entry, | 1114 | * If the return function does not have a matching entry, |
1109 | * then the entry was lost. Instead of just printing | 1115 | * then the entry was lost. Instead of just printing |
1110 | * the '}' and letting the user guess what function this | 1116 | * the '}' and letting the user guess what function this |
1111 | * belongs to, write out the function name. | 1117 | * belongs to, write out the function name. |
1112 | */ | 1118 | */ |
1113 | if (func_match) { | 1119 | if (func_match) { |
1114 | ret = trace_seq_printf(s, "}\n"); | 1120 | ret = trace_seq_printf(s, "}\n"); |
1115 | if (!ret) | 1121 | if (!ret) |
1116 | return TRACE_TYPE_PARTIAL_LINE; | 1122 | return TRACE_TYPE_PARTIAL_LINE; |
1117 | } else { | 1123 | } else { |
1118 | ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func); | 1124 | ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func); |
1119 | if (!ret) | 1125 | if (!ret) |
1120 | return TRACE_TYPE_PARTIAL_LINE; | 1126 | return TRACE_TYPE_PARTIAL_LINE; |
1121 | } | 1127 | } |
1122 | 1128 | ||
1123 | /* Overrun */ | 1129 | /* Overrun */ |
1124 | if (flags & TRACE_GRAPH_PRINT_OVERRUN) { | 1130 | if (flags & TRACE_GRAPH_PRINT_OVERRUN) { |
1125 | ret = trace_seq_printf(s, " (Overruns: %lu)\n", | 1131 | ret = trace_seq_printf(s, " (Overruns: %lu)\n", |
1126 | trace->overrun); | 1132 | trace->overrun); |
1127 | if (!ret) | 1133 | if (!ret) |
1128 | return TRACE_TYPE_PARTIAL_LINE; | 1134 | return TRACE_TYPE_PARTIAL_LINE; |
1129 | } | 1135 | } |
1130 | 1136 | ||
1131 | ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, | 1137 | ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, |
1132 | cpu, pid, flags); | 1138 | cpu, pid, flags); |
1133 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 1139 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
1134 | return TRACE_TYPE_PARTIAL_LINE; | 1140 | return TRACE_TYPE_PARTIAL_LINE; |
1135 | 1141 | ||
1136 | return TRACE_TYPE_HANDLED; | 1142 | return TRACE_TYPE_HANDLED; |
1137 | } | 1143 | } |
1138 | 1144 | ||
1139 | static enum print_line_t | 1145 | static enum print_line_t |
1140 | print_graph_comment(struct trace_seq *s, struct trace_entry *ent, | 1146 | print_graph_comment(struct trace_seq *s, struct trace_entry *ent, |
1141 | struct trace_iterator *iter, u32 flags) | 1147 | struct trace_iterator *iter, u32 flags) |
1142 | { | 1148 | { |
1143 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | 1149 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); |
1144 | struct fgraph_data *data = iter->private; | 1150 | struct fgraph_data *data = iter->private; |
1145 | struct trace_event *event; | 1151 | struct trace_event *event; |
1146 | int depth = 0; | 1152 | int depth = 0; |
1147 | int ret; | 1153 | int ret; |
1148 | int i; | 1154 | int i; |
1149 | 1155 | ||
1150 | if (data) | 1156 | if (data) |
1151 | depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; | 1157 | depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; |
1152 | 1158 | ||
1153 | if (print_graph_prologue(iter, s, 0, 0, flags)) | 1159 | if (print_graph_prologue(iter, s, 0, 0, flags)) |
1154 | return TRACE_TYPE_PARTIAL_LINE; | 1160 | return TRACE_TYPE_PARTIAL_LINE; |
1155 | 1161 | ||
1156 | /* No time */ | 1162 | /* No time */ |
1157 | ret = print_graph_duration(DURATION_FILL_FULL, s, flags); | 1163 | ret = print_graph_duration(DURATION_FILL_FULL, s, flags); |
1158 | if (ret != TRACE_TYPE_HANDLED) | 1164 | if (ret != TRACE_TYPE_HANDLED) |
1159 | return ret; | 1165 | return ret; |
1160 | 1166 | ||
1161 | /* Indentation */ | 1167 | /* Indentation */ |
1162 | if (depth > 0) | 1168 | if (depth > 0) |
1163 | for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) { | 1169 | for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) { |
1164 | ret = trace_seq_printf(s, " "); | 1170 | ret = trace_seq_printf(s, " "); |
1165 | if (!ret) | 1171 | if (!ret) |
1166 | return TRACE_TYPE_PARTIAL_LINE; | 1172 | return TRACE_TYPE_PARTIAL_LINE; |
1167 | } | 1173 | } |
1168 | 1174 | ||
1169 | /* The comment */ | 1175 | /* The comment */ |
1170 | ret = trace_seq_printf(s, "/* "); | 1176 | ret = trace_seq_printf(s, "/* "); |
1171 | if (!ret) | 1177 | if (!ret) |
1172 | return TRACE_TYPE_PARTIAL_LINE; | 1178 | return TRACE_TYPE_PARTIAL_LINE; |
1173 | 1179 | ||
1174 | switch (iter->ent->type) { | 1180 | switch (iter->ent->type) { |
1175 | case TRACE_BPRINT: | 1181 | case TRACE_BPRINT: |
1176 | ret = trace_print_bprintk_msg_only(iter); | 1182 | ret = trace_print_bprintk_msg_only(iter); |
1177 | if (ret != TRACE_TYPE_HANDLED) | 1183 | if (ret != TRACE_TYPE_HANDLED) |
1178 | return ret; | 1184 | return ret; |
1179 | break; | 1185 | break; |
1180 | case TRACE_PRINT: | 1186 | case TRACE_PRINT: |
1181 | ret = trace_print_printk_msg_only(iter); | 1187 | ret = trace_print_printk_msg_only(iter); |
1182 | if (ret != TRACE_TYPE_HANDLED) | 1188 | if (ret != TRACE_TYPE_HANDLED) |
1183 | return ret; | 1189 | return ret; |
1184 | break; | 1190 | break; |
1185 | default: | 1191 | default: |
1186 | event = ftrace_find_event(ent->type); | 1192 | event = ftrace_find_event(ent->type); |
1187 | if (!event) | 1193 | if (!event) |
1188 | return TRACE_TYPE_UNHANDLED; | 1194 | return TRACE_TYPE_UNHANDLED; |
1189 | 1195 | ||
1190 | ret = event->funcs->trace(iter, sym_flags, event); | 1196 | ret = event->funcs->trace(iter, sym_flags, event); |
1191 | if (ret != TRACE_TYPE_HANDLED) | 1197 | if (ret != TRACE_TYPE_HANDLED) |
1192 | return ret; | 1198 | return ret; |
1193 | } | 1199 | } |
1194 | 1200 | ||
1195 | /* Strip ending newline */ | 1201 | /* Strip ending newline */ |
1196 | if (s->buffer[s->len - 1] == '\n') { | 1202 | if (s->buffer[s->len - 1] == '\n') { |
1197 | s->buffer[s->len - 1] = '\0'; | 1203 | s->buffer[s->len - 1] = '\0'; |
1198 | s->len--; | 1204 | s->len--; |
1199 | } | 1205 | } |
1200 | 1206 | ||
1201 | ret = trace_seq_printf(s, " */\n"); | 1207 | ret = trace_seq_printf(s, " */\n"); |
1202 | if (!ret) | 1208 | if (!ret) |
1203 | return TRACE_TYPE_PARTIAL_LINE; | 1209 | return TRACE_TYPE_PARTIAL_LINE; |
1204 | 1210 | ||
1205 | return TRACE_TYPE_HANDLED; | 1211 | return TRACE_TYPE_HANDLED; |
1206 | } | 1212 | } |
1207 | 1213 | ||
1208 | 1214 | ||
1209 | enum print_line_t | 1215 | enum print_line_t |
1210 | print_graph_function_flags(struct trace_iterator *iter, u32 flags) | 1216 | print_graph_function_flags(struct trace_iterator *iter, u32 flags) |
1211 | { | 1217 | { |
1212 | struct ftrace_graph_ent_entry *field; | 1218 | struct ftrace_graph_ent_entry *field; |
1213 | struct fgraph_data *data = iter->private; | 1219 | struct fgraph_data *data = iter->private; |
1214 | struct trace_entry *entry = iter->ent; | 1220 | struct trace_entry *entry = iter->ent; |
1215 | struct trace_seq *s = &iter->seq; | 1221 | struct trace_seq *s = &iter->seq; |
1216 | int cpu = iter->cpu; | 1222 | int cpu = iter->cpu; |
1217 | int ret; | 1223 | int ret; |
1218 | 1224 | ||
1219 | if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) { | 1225 | if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) { |
1220 | per_cpu_ptr(data->cpu_data, cpu)->ignore = 0; | 1226 | per_cpu_ptr(data->cpu_data, cpu)->ignore = 0; |
1221 | return TRACE_TYPE_HANDLED; | 1227 | return TRACE_TYPE_HANDLED; |
1222 | } | 1228 | } |
1223 | 1229 | ||
1224 | /* | 1230 | /* |
1225 | * If the last output failed, there's a possibility we need | 1231 | * If the last output failed, there's a possibility we need |
1226 | * to print out the missing entry which would never go out. | 1232 | * to print out the missing entry which would never go out. |
1227 | */ | 1233 | */ |
1228 | if (data && data->failed) { | 1234 | if (data && data->failed) { |
1229 | field = &data->ent; | 1235 | field = &data->ent; |
1230 | iter->cpu = data->cpu; | 1236 | iter->cpu = data->cpu; |
1231 | ret = print_graph_entry(field, s, iter, flags); | 1237 | ret = print_graph_entry(field, s, iter, flags); |
1232 | if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) { | 1238 | if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) { |
1233 | per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1; | 1239 | per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1; |
1234 | ret = TRACE_TYPE_NO_CONSUME; | 1240 | ret = TRACE_TYPE_NO_CONSUME; |
1235 | } | 1241 | } |
1236 | iter->cpu = cpu; | 1242 | iter->cpu = cpu; |
1237 | return ret; | 1243 | return ret; |
1238 | } | 1244 | } |
1239 | 1245 | ||
1240 | switch (entry->type) { | 1246 | switch (entry->type) { |
1241 | case TRACE_GRAPH_ENT: { | 1247 | case TRACE_GRAPH_ENT: { |
1242 | /* | 1248 | /* |
1243 | * print_graph_entry() may consume the current event, | 1249 | * print_graph_entry() may consume the current event, |
1244 | * thus @field may become invalid, so we need to save it. | 1250 | * thus @field may become invalid, so we need to save it. |
1245 | * sizeof(struct ftrace_graph_ent_entry) is very small, | 1251 | * sizeof(struct ftrace_graph_ent_entry) is very small, |
1246 | * it can be safely saved at the stack. | 1252 | * it can be safely saved at the stack. |
1247 | */ | 1253 | */ |
1248 | struct ftrace_graph_ent_entry saved; | 1254 | struct ftrace_graph_ent_entry saved; |
1249 | trace_assign_type(field, entry); | 1255 | trace_assign_type(field, entry); |
1250 | saved = *field; | 1256 | saved = *field; |
1251 | return print_graph_entry(&saved, s, iter, flags); | 1257 | return print_graph_entry(&saved, s, iter, flags); |
1252 | } | 1258 | } |
1253 | case TRACE_GRAPH_RET: { | 1259 | case TRACE_GRAPH_RET: { |
1254 | struct ftrace_graph_ret_entry *field; | 1260 | struct ftrace_graph_ret_entry *field; |
1255 | trace_assign_type(field, entry); | 1261 | trace_assign_type(field, entry); |
1256 | return print_graph_return(&field->ret, s, entry, iter, flags); | 1262 | return print_graph_return(&field->ret, s, entry, iter, flags); |
1257 | } | 1263 | } |
1258 | case TRACE_STACK: | 1264 | case TRACE_STACK: |
1259 | case TRACE_FN: | 1265 | case TRACE_FN: |
1260 | /* dont trace stack and functions as comments */ | 1266 | /* dont trace stack and functions as comments */ |
1261 | return TRACE_TYPE_UNHANDLED; | 1267 | return TRACE_TYPE_UNHANDLED; |
1262 | 1268 | ||
1263 | default: | 1269 | default: |
1264 | return print_graph_comment(s, entry, iter, flags); | 1270 | return print_graph_comment(s, entry, iter, flags); |
1265 | } | 1271 | } |
1266 | 1272 | ||
1267 | return TRACE_TYPE_HANDLED; | 1273 | return TRACE_TYPE_HANDLED; |
1268 | } | 1274 | } |
1269 | 1275 | ||
1270 | static enum print_line_t | 1276 | static enum print_line_t |
1271 | print_graph_function(struct trace_iterator *iter) | 1277 | print_graph_function(struct trace_iterator *iter) |
1272 | { | 1278 | { |
1273 | return print_graph_function_flags(iter, tracer_flags.val); | 1279 | return print_graph_function_flags(iter, tracer_flags.val); |
1274 | } | 1280 | } |
1275 | 1281 | ||
1276 | static enum print_line_t | 1282 | static enum print_line_t |
1277 | print_graph_function_event(struct trace_iterator *iter, int flags, | 1283 | print_graph_function_event(struct trace_iterator *iter, int flags, |
1278 | struct trace_event *event) | 1284 | struct trace_event *event) |
1279 | { | 1285 | { |
1280 | return print_graph_function(iter); | 1286 | return print_graph_function(iter); |
1281 | } | 1287 | } |
1282 | 1288 | ||
1283 | static void print_lat_header(struct seq_file *s, u32 flags) | 1289 | static void print_lat_header(struct seq_file *s, u32 flags) |
1284 | { | 1290 | { |
1285 | static const char spaces[] = " " /* 16 spaces */ | 1291 | static const char spaces[] = " " /* 16 spaces */ |
1286 | " " /* 4 spaces */ | 1292 | " " /* 4 spaces */ |
1287 | " "; /* 17 spaces */ | 1293 | " "; /* 17 spaces */ |
1288 | int size = 0; | 1294 | int size = 0; |
1289 | 1295 | ||
1290 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) | 1296 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
1291 | size += 16; | 1297 | size += 16; |
1292 | if (flags & TRACE_GRAPH_PRINT_CPU) | 1298 | if (flags & TRACE_GRAPH_PRINT_CPU) |
1293 | size += 4; | 1299 | size += 4; |
1294 | if (flags & TRACE_GRAPH_PRINT_PROC) | 1300 | if (flags & TRACE_GRAPH_PRINT_PROC) |
1295 | size += 17; | 1301 | size += 17; |
1296 | 1302 | ||
1297 | seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces); | 1303 | seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces); |
1298 | seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces); | 1304 | seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces); |
1299 | seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces); | 1305 | seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces); |
1300 | seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces); | 1306 | seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces); |
1301 | seq_printf(s, "#%.*s||| / \n", size, spaces); | 1307 | seq_printf(s, "#%.*s||| / \n", size, spaces); |
1302 | } | 1308 | } |
1303 | 1309 | ||
1304 | static void __print_graph_headers_flags(struct seq_file *s, u32 flags) | 1310 | static void __print_graph_headers_flags(struct seq_file *s, u32 flags) |
1305 | { | 1311 | { |
1306 | int lat = trace_flags & TRACE_ITER_LATENCY_FMT; | 1312 | int lat = trace_flags & TRACE_ITER_LATENCY_FMT; |
1307 | 1313 | ||
1308 | if (lat) | 1314 | if (lat) |
1309 | print_lat_header(s, flags); | 1315 | print_lat_header(s, flags); |
1310 | 1316 | ||
1311 | /* 1st line */ | 1317 | /* 1st line */ |
1312 | seq_printf(s, "#"); | 1318 | seq_printf(s, "#"); |
1313 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) | 1319 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
1314 | seq_printf(s, " TIME "); | 1320 | seq_printf(s, " TIME "); |
1315 | if (flags & TRACE_GRAPH_PRINT_CPU) | 1321 | if (flags & TRACE_GRAPH_PRINT_CPU) |
1316 | seq_printf(s, " CPU"); | 1322 | seq_printf(s, " CPU"); |
1317 | if (flags & TRACE_GRAPH_PRINT_PROC) | 1323 | if (flags & TRACE_GRAPH_PRINT_PROC) |
1318 | seq_printf(s, " TASK/PID "); | 1324 | seq_printf(s, " TASK/PID "); |
1319 | if (lat) | 1325 | if (lat) |
1320 | seq_printf(s, "||||"); | 1326 | seq_printf(s, "||||"); |
1321 | if (flags & TRACE_GRAPH_PRINT_DURATION) | 1327 | if (flags & TRACE_GRAPH_PRINT_DURATION) |
1322 | seq_printf(s, " DURATION "); | 1328 | seq_printf(s, " DURATION "); |
1323 | seq_printf(s, " FUNCTION CALLS\n"); | 1329 | seq_printf(s, " FUNCTION CALLS\n"); |
1324 | 1330 | ||
1325 | /* 2nd line */ | 1331 | /* 2nd line */ |
1326 | seq_printf(s, "#"); | 1332 | seq_printf(s, "#"); |
1327 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) | 1333 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
1328 | seq_printf(s, " | "); | 1334 | seq_printf(s, " | "); |
1329 | if (flags & TRACE_GRAPH_PRINT_CPU) | 1335 | if (flags & TRACE_GRAPH_PRINT_CPU) |
1330 | seq_printf(s, " | "); | 1336 | seq_printf(s, " | "); |
1331 | if (flags & TRACE_GRAPH_PRINT_PROC) | 1337 | if (flags & TRACE_GRAPH_PRINT_PROC) |
1332 | seq_printf(s, " | | "); | 1338 | seq_printf(s, " | | "); |
1333 | if (lat) | 1339 | if (lat) |
1334 | seq_printf(s, "||||"); | 1340 | seq_printf(s, "||||"); |
1335 | if (flags & TRACE_GRAPH_PRINT_DURATION) | 1341 | if (flags & TRACE_GRAPH_PRINT_DURATION) |
1336 | seq_printf(s, " | | "); | 1342 | seq_printf(s, " | | "); |
1337 | seq_printf(s, " | | | |\n"); | 1343 | seq_printf(s, " | | | |\n"); |
1338 | } | 1344 | } |
1339 | 1345 | ||
1340 | void print_graph_headers(struct seq_file *s) | 1346 | void print_graph_headers(struct seq_file *s) |
1341 | { | 1347 | { |
1342 | print_graph_headers_flags(s, tracer_flags.val); | 1348 | print_graph_headers_flags(s, tracer_flags.val); |
1343 | } | 1349 | } |
1344 | 1350 | ||
1345 | void print_graph_headers_flags(struct seq_file *s, u32 flags) | 1351 | void print_graph_headers_flags(struct seq_file *s, u32 flags) |
1346 | { | 1352 | { |
1347 | struct trace_iterator *iter = s->private; | 1353 | struct trace_iterator *iter = s->private; |
1354 | |||
1355 | if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) | ||
1356 | return; | ||
1348 | 1357 | ||
1349 | if (trace_flags & TRACE_ITER_LATENCY_FMT) { | 1358 | if (trace_flags & TRACE_ITER_LATENCY_FMT) { |
1350 | /* print nothing if the buffers are empty */ | 1359 | /* print nothing if the buffers are empty */ |
1351 | if (trace_empty(iter)) | 1360 | if (trace_empty(iter)) |
1352 | return; | 1361 | return; |
1353 | 1362 | ||
1354 | print_trace_header(s, iter); | 1363 | print_trace_header(s, iter); |
1355 | } | 1364 | } |
1356 | 1365 | ||
1357 | __print_graph_headers_flags(s, flags); | 1366 | __print_graph_headers_flags(s, flags); |
1358 | } | 1367 | } |
1359 | 1368 | ||
1360 | void graph_trace_open(struct trace_iterator *iter) | 1369 | void graph_trace_open(struct trace_iterator *iter) |
1361 | { | 1370 | { |
1362 | /* pid and depth on the last trace processed */ | 1371 | /* pid and depth on the last trace processed */ |
1363 | struct fgraph_data *data; | 1372 | struct fgraph_data *data; |
1364 | int cpu; | 1373 | int cpu; |
1365 | 1374 | ||
1366 | iter->private = NULL; | 1375 | iter->private = NULL; |
1367 | 1376 | ||
1368 | data = kzalloc(sizeof(*data), GFP_KERNEL); | 1377 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
1369 | if (!data) | 1378 | if (!data) |
1370 | goto out_err; | 1379 | goto out_err; |
1371 | 1380 | ||
1372 | data->cpu_data = alloc_percpu(struct fgraph_cpu_data); | 1381 | data->cpu_data = alloc_percpu(struct fgraph_cpu_data); |
1373 | if (!data->cpu_data) | 1382 | if (!data->cpu_data) |
1374 | goto out_err_free; | 1383 | goto out_err_free; |
1375 | 1384 | ||
1376 | for_each_possible_cpu(cpu) { | 1385 | for_each_possible_cpu(cpu) { |
1377 | pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); | 1386 | pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); |
1378 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); | 1387 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); |
1379 | int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore); | 1388 | int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore); |
1380 | int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); | 1389 | int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); |
1381 | 1390 | ||
1382 | *pid = -1; | 1391 | *pid = -1; |
1383 | *depth = 0; | 1392 | *depth = 0; |
1384 | *ignore = 0; | 1393 | *ignore = 0; |
1385 | *depth_irq = -1; | 1394 | *depth_irq = -1; |
1386 | } | 1395 | } |
1387 | 1396 | ||
1388 | iter->private = data; | 1397 | iter->private = data; |
1389 | 1398 | ||
1390 | return; | 1399 | return; |
1391 | 1400 | ||
1392 | out_err_free: | 1401 | out_err_free: |
1393 | kfree(data); | 1402 | kfree(data); |
1394 | out_err: | 1403 | out_err: |
1395 | pr_warning("function graph tracer: not enough memory\n"); | 1404 | pr_warning("function graph tracer: not enough memory\n"); |
1396 | } | 1405 | } |
1397 | 1406 | ||
1398 | void graph_trace_close(struct trace_iterator *iter) | 1407 | void graph_trace_close(struct trace_iterator *iter) |
1399 | { | 1408 | { |
1400 | struct fgraph_data *data = iter->private; | 1409 | struct fgraph_data *data = iter->private; |
1401 | 1410 | ||
1402 | if (data) { | 1411 | if (data) { |
1403 | free_percpu(data->cpu_data); | 1412 | free_percpu(data->cpu_data); |
1404 | kfree(data); | 1413 | kfree(data); |
1405 | } | 1414 | } |
1406 | } | 1415 | } |
1407 | 1416 | ||
1408 | static int func_graph_set_flag(u32 old_flags, u32 bit, int set) | 1417 | static int func_graph_set_flag(u32 old_flags, u32 bit, int set) |
1409 | { | 1418 | { |
1410 | if (bit == TRACE_GRAPH_PRINT_IRQS) | 1419 | if (bit == TRACE_GRAPH_PRINT_IRQS) |
1411 | ftrace_graph_skip_irqs = !set; | 1420 | ftrace_graph_skip_irqs = !set; |
1412 | 1421 | ||
1413 | return 0; | 1422 | return 0; |
1414 | } | 1423 | } |
1415 | 1424 | ||
1416 | static struct trace_event_functions graph_functions = { | 1425 | static struct trace_event_functions graph_functions = { |
1417 | .trace = print_graph_function_event, | 1426 | .trace = print_graph_function_event, |
1418 | }; | 1427 | }; |
1419 | 1428 | ||
1420 | static struct trace_event graph_trace_entry_event = { | 1429 | static struct trace_event graph_trace_entry_event = { |
1421 | .type = TRACE_GRAPH_ENT, | 1430 | .type = TRACE_GRAPH_ENT, |
1422 | .funcs = &graph_functions, | 1431 | .funcs = &graph_functions, |
1423 | }; | 1432 | }; |
1424 | 1433 | ||
1425 | static struct trace_event graph_trace_ret_event = { | 1434 | static struct trace_event graph_trace_ret_event = { |
1426 | .type = TRACE_GRAPH_RET, | 1435 | .type = TRACE_GRAPH_RET, |
1427 | .funcs = &graph_functions | 1436 | .funcs = &graph_functions |
1428 | }; | 1437 | }; |
1429 | 1438 | ||
1430 | static struct tracer graph_trace __read_mostly = { | 1439 | static struct tracer graph_trace __read_mostly = { |
1431 | .name = "function_graph", | 1440 | .name = "function_graph", |
1432 | .open = graph_trace_open, | 1441 | .open = graph_trace_open, |
1433 | .pipe_open = graph_trace_open, | 1442 | .pipe_open = graph_trace_open, |
1434 | .close = graph_trace_close, | 1443 | .close = graph_trace_close, |
1435 | .pipe_close = graph_trace_close, | 1444 | .pipe_close = graph_trace_close, |
1436 | .wait_pipe = poll_wait_pipe, | 1445 | .wait_pipe = poll_wait_pipe, |
1437 | .init = graph_trace_init, | 1446 | .init = graph_trace_init, |
1438 | .reset = graph_trace_reset, | 1447 | .reset = graph_trace_reset, |
1439 | .print_line = print_graph_function, | 1448 | .print_line = print_graph_function, |
1440 | .print_header = print_graph_headers, | 1449 | .print_header = print_graph_headers, |
1441 | .flags = &tracer_flags, | 1450 | .flags = &tracer_flags, |
1442 | .set_flag = func_graph_set_flag, | 1451 | .set_flag = func_graph_set_flag, |
1443 | #ifdef CONFIG_FTRACE_SELFTEST | 1452 | #ifdef CONFIG_FTRACE_SELFTEST |
1444 | .selftest = trace_selftest_startup_function_graph, | 1453 | .selftest = trace_selftest_startup_function_graph, |
1445 | #endif | 1454 | #endif |
1446 | }; | 1455 | }; |
1447 | 1456 | ||
1448 | static __init int init_graph_trace(void) | 1457 | static __init int init_graph_trace(void) |
1449 | { | 1458 | { |
1450 | max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1); | 1459 | max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1); |
1451 | 1460 | ||
1452 | if (!register_ftrace_event(&graph_trace_entry_event)) { | 1461 | if (!register_ftrace_event(&graph_trace_entry_event)) { |
1453 | pr_warning("Warning: could not register graph trace events\n"); | 1462 | pr_warning("Warning: could not register graph trace events\n"); |
1454 | return 1; | 1463 | return 1; |
1455 | } | 1464 | } |
1456 | 1465 | ||
1457 | if (!register_ftrace_event(&graph_trace_ret_event)) { | 1466 | if (!register_ftrace_event(&graph_trace_ret_event)) { |
1458 | pr_warning("Warning: could not register graph trace events\n"); | 1467 | pr_warning("Warning: could not register graph trace events\n"); |
1459 | return 1; | 1468 | return 1; |
1460 | } | 1469 | } |
1461 | 1470 | ||
1462 | return register_tracer(&graph_trace); | 1471 | return register_tracer(&graph_trace); |
1463 | } | 1472 | } |
1464 | 1473 | ||
1465 | device_initcall(init_graph_trace); | 1474 | device_initcall(init_graph_trace); |
1466 | 1475 |