Commit 5087f8d2a2f2daff5a913d72d8ea3ad601948e10

Authored by Steven Rostedt
1 parent 40ce74f19c

function-graph: show binary events as comments

With the added TRACE_EVENT macro, the events no longer appear in
the function graph tracer. This was because the function graph
did not know how to display the entries. The graph tracer was
only aware of its own entries and the printk entries.

By using the event call back feature, the graph tracer can now display
the events.

 # echo irq > /debug/tracing/set_event

Which can show:

 0)               |          handle_IRQ_event() {
 0)               |            /* irq_handler_entry: irq=48 handler=eth0 */
 0)               |            e1000_intr() {
 0)   0.926 us    |              __napi_schedule();
 0)   3.888 us    |            }
 0)               |            /* irq_handler_exit: irq=48 return=handled */
 0)   0.655 us    |            runqueue_is_locked();
 0)               |            __wake_up() {
 0)   0.831 us    |              _spin_lock_irqsave();

The irq entry and exit events show up as comments.

Signed-off-by: Steven Rostedt <srostedt@redhat.com>

Showing 1 changed file with 28 additions and 12 deletions Inline Diff

kernel/trace/trace_functions_graph.c
1 /* 1 /*
2 * 2 *
3 * Function graph tracer. 3 * Function graph tracer.
4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com> 4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which 5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com> 6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 * 7 *
8 */ 8 */
9 #include <linux/debugfs.h> 9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h> 10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h> 11 #include <linux/ftrace.h>
12 #include <linux/fs.h> 12 #include <linux/fs.h>
13 13
14 #include "trace.h" 14 #include "trace.h"
15 #include "trace_output.h" 15 #include "trace_output.h"
16 16
17 struct fgraph_data { 17 struct fgraph_data {
18 pid_t last_pid; 18 pid_t last_pid;
19 int depth; 19 int depth;
20 }; 20 };
21 21
22 #define TRACE_GRAPH_INDENT 2 22 #define TRACE_GRAPH_INDENT 2
23 23
24 /* Flag options */ 24 /* Flag options */
25 #define TRACE_GRAPH_PRINT_OVERRUN 0x1 25 #define TRACE_GRAPH_PRINT_OVERRUN 0x1
26 #define TRACE_GRAPH_PRINT_CPU 0x2 26 #define TRACE_GRAPH_PRINT_CPU 0x2
27 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 27 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
28 #define TRACE_GRAPH_PRINT_PROC 0x8 28 #define TRACE_GRAPH_PRINT_PROC 0x8
29 #define TRACE_GRAPH_PRINT_DURATION 0x10 29 #define TRACE_GRAPH_PRINT_DURATION 0x10
30 #define TRACE_GRAPH_PRINT_ABS_TIME 0X20 30 #define TRACE_GRAPH_PRINT_ABS_TIME 0X20
31 31
32 static struct tracer_opt trace_opts[] = { 32 static struct tracer_opt trace_opts[] = {
33 /* Display overruns? (for self-debug purpose) */ 33 /* Display overruns? (for self-debug purpose) */
34 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, 34 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
35 /* Display CPU ? */ 35 /* Display CPU ? */
36 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, 36 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
37 /* Display Overhead ? */ 37 /* Display Overhead ? */
38 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, 38 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
39 /* Display proc name/pid */ 39 /* Display proc name/pid */
40 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, 40 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
41 /* Display duration of execution */ 41 /* Display duration of execution */
42 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, 42 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
43 /* Display absolute time of an entry */ 43 /* Display absolute time of an entry */
44 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, 44 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
45 { } /* Empty entry */ 45 { } /* Empty entry */
46 }; 46 };
47 47
48 static struct tracer_flags tracer_flags = { 48 static struct tracer_flags tracer_flags = {
49 /* Don't display overruns and proc by default */ 49 /* Don't display overruns and proc by default */
50 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | 50 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
51 TRACE_GRAPH_PRINT_DURATION, 51 TRACE_GRAPH_PRINT_DURATION,
52 .opts = trace_opts 52 .opts = trace_opts
53 }; 53 };
54 54
55 /* pid on the last trace processed */ 55 /* pid on the last trace processed */
56 56
57 57
58 /* Add a function return address to the trace stack on thread info.*/ 58 /* Add a function return address to the trace stack on thread info.*/
59 int 59 int
60 ftrace_push_return_trace(unsigned long ret, unsigned long long time, 60 ftrace_push_return_trace(unsigned long ret, unsigned long long time,
61 unsigned long func, int *depth) 61 unsigned long func, int *depth)
62 { 62 {
63 int index; 63 int index;
64 64
65 if (!current->ret_stack) 65 if (!current->ret_stack)
66 return -EBUSY; 66 return -EBUSY;
67 67
68 /* The return trace stack is full */ 68 /* The return trace stack is full */
69 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { 69 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
70 atomic_inc(&current->trace_overrun); 70 atomic_inc(&current->trace_overrun);
71 return -EBUSY; 71 return -EBUSY;
72 } 72 }
73 73
74 index = ++current->curr_ret_stack; 74 index = ++current->curr_ret_stack;
75 barrier(); 75 barrier();
76 current->ret_stack[index].ret = ret; 76 current->ret_stack[index].ret = ret;
77 current->ret_stack[index].func = func; 77 current->ret_stack[index].func = func;
78 current->ret_stack[index].calltime = time; 78 current->ret_stack[index].calltime = time;
79 *depth = index; 79 *depth = index;
80 80
81 return 0; 81 return 0;
82 } 82 }
83 83
84 /* Retrieve a function return address to the trace stack on thread info.*/ 84 /* Retrieve a function return address to the trace stack on thread info.*/
85 void 85 void
86 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) 86 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
87 { 87 {
88 int index; 88 int index;
89 89
90 index = current->curr_ret_stack; 90 index = current->curr_ret_stack;
91 91
92 if (unlikely(index < 0)) { 92 if (unlikely(index < 0)) {
93 ftrace_graph_stop(); 93 ftrace_graph_stop();
94 WARN_ON(1); 94 WARN_ON(1);
95 /* Might as well panic, otherwise we have no where to go */ 95 /* Might as well panic, otherwise we have no where to go */
96 *ret = (unsigned long)panic; 96 *ret = (unsigned long)panic;
97 return; 97 return;
98 } 98 }
99 99
100 *ret = current->ret_stack[index].ret; 100 *ret = current->ret_stack[index].ret;
101 trace->func = current->ret_stack[index].func; 101 trace->func = current->ret_stack[index].func;
102 trace->calltime = current->ret_stack[index].calltime; 102 trace->calltime = current->ret_stack[index].calltime;
103 trace->overrun = atomic_read(&current->trace_overrun); 103 trace->overrun = atomic_read(&current->trace_overrun);
104 trace->depth = index; 104 trace->depth = index;
105 barrier(); 105 barrier();
106 current->curr_ret_stack--; 106 current->curr_ret_stack--;
107 107
108 } 108 }
109 109
110 /* 110 /*
111 * Send the trace to the ring-buffer. 111 * Send the trace to the ring-buffer.
112 * @return the original return address. 112 * @return the original return address.
113 */ 113 */
114 unsigned long ftrace_return_to_handler(void) 114 unsigned long ftrace_return_to_handler(void)
115 { 115 {
116 struct ftrace_graph_ret trace; 116 struct ftrace_graph_ret trace;
117 unsigned long ret; 117 unsigned long ret;
118 118
119 ftrace_pop_return_trace(&trace, &ret); 119 ftrace_pop_return_trace(&trace, &ret);
120 trace.rettime = trace_clock_local(); 120 trace.rettime = trace_clock_local();
121 ftrace_graph_return(&trace); 121 ftrace_graph_return(&trace);
122 122
123 if (unlikely(!ret)) { 123 if (unlikely(!ret)) {
124 ftrace_graph_stop(); 124 ftrace_graph_stop();
125 WARN_ON(1); 125 WARN_ON(1);
126 /* Might as well panic. What else to do? */ 126 /* Might as well panic. What else to do? */
127 ret = (unsigned long)panic; 127 ret = (unsigned long)panic;
128 } 128 }
129 129
130 return ret; 130 return ret;
131 } 131 }
132 132
133 static int graph_trace_init(struct trace_array *tr) 133 static int graph_trace_init(struct trace_array *tr)
134 { 134 {
135 int ret = register_ftrace_graph(&trace_graph_return, 135 int ret = register_ftrace_graph(&trace_graph_return,
136 &trace_graph_entry); 136 &trace_graph_entry);
137 if (ret) 137 if (ret)
138 return ret; 138 return ret;
139 tracing_start_cmdline_record(); 139 tracing_start_cmdline_record();
140 140
141 return 0; 141 return 0;
142 } 142 }
143 143
144 static void graph_trace_reset(struct trace_array *tr) 144 static void graph_trace_reset(struct trace_array *tr)
145 { 145 {
146 tracing_stop_cmdline_record(); 146 tracing_stop_cmdline_record();
147 unregister_ftrace_graph(); 147 unregister_ftrace_graph();
148 } 148 }
149 149
150 static inline int log10_cpu(int nb) 150 static inline int log10_cpu(int nb)
151 { 151 {
152 if (nb / 100) 152 if (nb / 100)
153 return 3; 153 return 3;
154 if (nb / 10) 154 if (nb / 10)
155 return 2; 155 return 2;
156 return 1; 156 return 1;
157 } 157 }
158 158
159 static enum print_line_t 159 static enum print_line_t
160 print_graph_cpu(struct trace_seq *s, int cpu) 160 print_graph_cpu(struct trace_seq *s, int cpu)
161 { 161 {
162 int i; 162 int i;
163 int ret; 163 int ret;
164 int log10_this = log10_cpu(cpu); 164 int log10_this = log10_cpu(cpu);
165 int log10_all = log10_cpu(cpumask_weight(cpu_online_mask)); 165 int log10_all = log10_cpu(cpumask_weight(cpu_online_mask));
166 166
167 167
168 /* 168 /*
169 * Start with a space character - to make it stand out 169 * Start with a space character - to make it stand out
170 * to the right a bit when trace output is pasted into 170 * to the right a bit when trace output is pasted into
171 * email: 171 * email:
172 */ 172 */
173 ret = trace_seq_printf(s, " "); 173 ret = trace_seq_printf(s, " ");
174 174
175 /* 175 /*
176 * Tricky - we space the CPU field according to the max 176 * Tricky - we space the CPU field according to the max
177 * number of online CPUs. On a 2-cpu system it would take 177 * number of online CPUs. On a 2-cpu system it would take
178 * a maximum of 1 digit - on a 128 cpu system it would 178 * a maximum of 1 digit - on a 128 cpu system it would
179 * take up to 3 digits: 179 * take up to 3 digits:
180 */ 180 */
181 for (i = 0; i < log10_all - log10_this; i++) { 181 for (i = 0; i < log10_all - log10_this; i++) {
182 ret = trace_seq_printf(s, " "); 182 ret = trace_seq_printf(s, " ");
183 if (!ret) 183 if (!ret)
184 return TRACE_TYPE_PARTIAL_LINE; 184 return TRACE_TYPE_PARTIAL_LINE;
185 } 185 }
186 ret = trace_seq_printf(s, "%d) ", cpu); 186 ret = trace_seq_printf(s, "%d) ", cpu);
187 if (!ret) 187 if (!ret)
188 return TRACE_TYPE_PARTIAL_LINE; 188 return TRACE_TYPE_PARTIAL_LINE;
189 189
190 return TRACE_TYPE_HANDLED; 190 return TRACE_TYPE_HANDLED;
191 } 191 }
192 192
193 #define TRACE_GRAPH_PROCINFO_LENGTH 14 193 #define TRACE_GRAPH_PROCINFO_LENGTH 14
194 194
195 static enum print_line_t 195 static enum print_line_t
196 print_graph_proc(struct trace_seq *s, pid_t pid) 196 print_graph_proc(struct trace_seq *s, pid_t pid)
197 { 197 {
198 char comm[TASK_COMM_LEN]; 198 char comm[TASK_COMM_LEN];
199 /* sign + log10(MAX_INT) + '\0' */ 199 /* sign + log10(MAX_INT) + '\0' */
200 char pid_str[11]; 200 char pid_str[11];
201 int spaces = 0; 201 int spaces = 0;
202 int ret; 202 int ret;
203 int len; 203 int len;
204 int i; 204 int i;
205 205
206 trace_find_cmdline(pid, comm); 206 trace_find_cmdline(pid, comm);
207 comm[7] = '\0'; 207 comm[7] = '\0';
208 sprintf(pid_str, "%d", pid); 208 sprintf(pid_str, "%d", pid);
209 209
210 /* 1 stands for the "-" character */ 210 /* 1 stands for the "-" character */
211 len = strlen(comm) + strlen(pid_str) + 1; 211 len = strlen(comm) + strlen(pid_str) + 1;
212 212
213 if (len < TRACE_GRAPH_PROCINFO_LENGTH) 213 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
214 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; 214 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
215 215
216 /* First spaces to align center */ 216 /* First spaces to align center */
217 for (i = 0; i < spaces / 2; i++) { 217 for (i = 0; i < spaces / 2; i++) {
218 ret = trace_seq_printf(s, " "); 218 ret = trace_seq_printf(s, " ");
219 if (!ret) 219 if (!ret)
220 return TRACE_TYPE_PARTIAL_LINE; 220 return TRACE_TYPE_PARTIAL_LINE;
221 } 221 }
222 222
223 ret = trace_seq_printf(s, "%s-%s", comm, pid_str); 223 ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
224 if (!ret) 224 if (!ret)
225 return TRACE_TYPE_PARTIAL_LINE; 225 return TRACE_TYPE_PARTIAL_LINE;
226 226
227 /* Last spaces to align center */ 227 /* Last spaces to align center */
228 for (i = 0; i < spaces - (spaces / 2); i++) { 228 for (i = 0; i < spaces - (spaces / 2); i++) {
229 ret = trace_seq_printf(s, " "); 229 ret = trace_seq_printf(s, " ");
230 if (!ret) 230 if (!ret)
231 return TRACE_TYPE_PARTIAL_LINE; 231 return TRACE_TYPE_PARTIAL_LINE;
232 } 232 }
233 return TRACE_TYPE_HANDLED; 233 return TRACE_TYPE_HANDLED;
234 } 234 }
235 235
236 236
237 /* If the pid changed since the last trace, output this event */ 237 /* If the pid changed since the last trace, output this event */
238 static enum print_line_t 238 static enum print_line_t
239 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) 239 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
240 { 240 {
241 pid_t prev_pid; 241 pid_t prev_pid;
242 pid_t *last_pid; 242 pid_t *last_pid;
243 int ret; 243 int ret;
244 244
245 if (!data) 245 if (!data)
246 return TRACE_TYPE_HANDLED; 246 return TRACE_TYPE_HANDLED;
247 247
248 last_pid = &(per_cpu_ptr(data, cpu)->last_pid); 248 last_pid = &(per_cpu_ptr(data, cpu)->last_pid);
249 249
250 if (*last_pid == pid) 250 if (*last_pid == pid)
251 return TRACE_TYPE_HANDLED; 251 return TRACE_TYPE_HANDLED;
252 252
253 prev_pid = *last_pid; 253 prev_pid = *last_pid;
254 *last_pid = pid; 254 *last_pid = pid;
255 255
256 if (prev_pid == -1) 256 if (prev_pid == -1)
257 return TRACE_TYPE_HANDLED; 257 return TRACE_TYPE_HANDLED;
258 /* 258 /*
259 * Context-switch trace line: 259 * Context-switch trace line:
260 260
261 ------------------------------------------ 261 ------------------------------------------
262 | 1) migration/0--1 => sshd-1755 262 | 1) migration/0--1 => sshd-1755
263 ------------------------------------------ 263 ------------------------------------------
264 264
265 */ 265 */
266 ret = trace_seq_printf(s, 266 ret = trace_seq_printf(s,
267 " ------------------------------------------\n"); 267 " ------------------------------------------\n");
268 if (!ret) 268 if (!ret)
269 return TRACE_TYPE_PARTIAL_LINE; 269 return TRACE_TYPE_PARTIAL_LINE;
270 270
271 ret = print_graph_cpu(s, cpu); 271 ret = print_graph_cpu(s, cpu);
272 if (ret == TRACE_TYPE_PARTIAL_LINE) 272 if (ret == TRACE_TYPE_PARTIAL_LINE)
273 return TRACE_TYPE_PARTIAL_LINE; 273 return TRACE_TYPE_PARTIAL_LINE;
274 274
275 ret = print_graph_proc(s, prev_pid); 275 ret = print_graph_proc(s, prev_pid);
276 if (ret == TRACE_TYPE_PARTIAL_LINE) 276 if (ret == TRACE_TYPE_PARTIAL_LINE)
277 return TRACE_TYPE_PARTIAL_LINE; 277 return TRACE_TYPE_PARTIAL_LINE;
278 278
279 ret = trace_seq_printf(s, " => "); 279 ret = trace_seq_printf(s, " => ");
280 if (!ret) 280 if (!ret)
281 return TRACE_TYPE_PARTIAL_LINE; 281 return TRACE_TYPE_PARTIAL_LINE;
282 282
283 ret = print_graph_proc(s, pid); 283 ret = print_graph_proc(s, pid);
284 if (ret == TRACE_TYPE_PARTIAL_LINE) 284 if (ret == TRACE_TYPE_PARTIAL_LINE)
285 return TRACE_TYPE_PARTIAL_LINE; 285 return TRACE_TYPE_PARTIAL_LINE;
286 286
287 ret = trace_seq_printf(s, 287 ret = trace_seq_printf(s,
288 "\n ------------------------------------------\n\n"); 288 "\n ------------------------------------------\n\n");
289 if (!ret) 289 if (!ret)
290 return TRACE_TYPE_PARTIAL_LINE; 290 return TRACE_TYPE_PARTIAL_LINE;
291 291
292 return TRACE_TYPE_HANDLED; 292 return TRACE_TYPE_HANDLED;
293 } 293 }
294 294
295 static struct ftrace_graph_ret_entry * 295 static struct ftrace_graph_ret_entry *
296 get_return_for_leaf(struct trace_iterator *iter, 296 get_return_for_leaf(struct trace_iterator *iter,
297 struct ftrace_graph_ent_entry *curr) 297 struct ftrace_graph_ent_entry *curr)
298 { 298 {
299 struct ring_buffer_iter *ring_iter; 299 struct ring_buffer_iter *ring_iter;
300 struct ring_buffer_event *event; 300 struct ring_buffer_event *event;
301 struct ftrace_graph_ret_entry *next; 301 struct ftrace_graph_ret_entry *next;
302 302
303 ring_iter = iter->buffer_iter[iter->cpu]; 303 ring_iter = iter->buffer_iter[iter->cpu];
304 304
305 /* First peek to compare current entry and the next one */ 305 /* First peek to compare current entry and the next one */
306 if (ring_iter) 306 if (ring_iter)
307 event = ring_buffer_iter_peek(ring_iter, NULL); 307 event = ring_buffer_iter_peek(ring_iter, NULL);
308 else { 308 else {
309 /* We need to consume the current entry to see the next one */ 309 /* We need to consume the current entry to see the next one */
310 ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL); 310 ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
311 event = ring_buffer_peek(iter->tr->buffer, iter->cpu, 311 event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
312 NULL); 312 NULL);
313 } 313 }
314 314
315 if (!event) 315 if (!event)
316 return NULL; 316 return NULL;
317 317
318 next = ring_buffer_event_data(event); 318 next = ring_buffer_event_data(event);
319 319
320 if (next->ent.type != TRACE_GRAPH_RET) 320 if (next->ent.type != TRACE_GRAPH_RET)
321 return NULL; 321 return NULL;
322 322
323 if (curr->ent.pid != next->ent.pid || 323 if (curr->ent.pid != next->ent.pid ||
324 curr->graph_ent.func != next->ret.func) 324 curr->graph_ent.func != next->ret.func)
325 return NULL; 325 return NULL;
326 326
327 /* this is a leaf, now advance the iterator */ 327 /* this is a leaf, now advance the iterator */
328 if (ring_iter) 328 if (ring_iter)
329 ring_buffer_read(ring_iter, NULL); 329 ring_buffer_read(ring_iter, NULL);
330 330
331 return next; 331 return next;
332 } 332 }
333 333
334 /* Signal a overhead of time execution to the output */ 334 /* Signal a overhead of time execution to the output */
335 static int 335 static int
336 print_graph_overhead(unsigned long long duration, struct trace_seq *s) 336 print_graph_overhead(unsigned long long duration, struct trace_seq *s)
337 { 337 {
338 /* If duration disappear, we don't need anything */ 338 /* If duration disappear, we don't need anything */
339 if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)) 339 if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION))
340 return 1; 340 return 1;
341 341
342 /* Non nested entry or return */ 342 /* Non nested entry or return */
343 if (duration == -1) 343 if (duration == -1)
344 return trace_seq_printf(s, " "); 344 return trace_seq_printf(s, " ");
345 345
346 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { 346 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
347 /* Duration exceeded 100 msecs */ 347 /* Duration exceeded 100 msecs */
348 if (duration > 100000ULL) 348 if (duration > 100000ULL)
349 return trace_seq_printf(s, "! "); 349 return trace_seq_printf(s, "! ");
350 350
351 /* Duration exceeded 10 msecs */ 351 /* Duration exceeded 10 msecs */
352 if (duration > 10000ULL) 352 if (duration > 10000ULL)
353 return trace_seq_printf(s, "+ "); 353 return trace_seq_printf(s, "+ ");
354 } 354 }
355 355
356 return trace_seq_printf(s, " "); 356 return trace_seq_printf(s, " ");
357 } 357 }
358 358
359 static int print_graph_abs_time(u64 t, struct trace_seq *s) 359 static int print_graph_abs_time(u64 t, struct trace_seq *s)
360 { 360 {
361 unsigned long usecs_rem; 361 unsigned long usecs_rem;
362 362
363 usecs_rem = do_div(t, NSEC_PER_SEC); 363 usecs_rem = do_div(t, NSEC_PER_SEC);
364 usecs_rem /= 1000; 364 usecs_rem /= 1000;
365 365
366 return trace_seq_printf(s, "%5lu.%06lu | ", 366 return trace_seq_printf(s, "%5lu.%06lu | ",
367 (unsigned long)t, usecs_rem); 367 (unsigned long)t, usecs_rem);
368 } 368 }
369 369
370 static enum print_line_t 370 static enum print_line_t
371 print_graph_irq(struct trace_iterator *iter, unsigned long addr, 371 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
372 enum trace_type type, int cpu, pid_t pid) 372 enum trace_type type, int cpu, pid_t pid)
373 { 373 {
374 int ret; 374 int ret;
375 struct trace_seq *s = &iter->seq; 375 struct trace_seq *s = &iter->seq;
376 376
377 if (addr < (unsigned long)__irqentry_text_start || 377 if (addr < (unsigned long)__irqentry_text_start ||
378 addr >= (unsigned long)__irqentry_text_end) 378 addr >= (unsigned long)__irqentry_text_end)
379 return TRACE_TYPE_UNHANDLED; 379 return TRACE_TYPE_UNHANDLED;
380 380
381 /* Absolute time */ 381 /* Absolute time */
382 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { 382 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
383 ret = print_graph_abs_time(iter->ts, s); 383 ret = print_graph_abs_time(iter->ts, s);
384 if (!ret) 384 if (!ret)
385 return TRACE_TYPE_PARTIAL_LINE; 385 return TRACE_TYPE_PARTIAL_LINE;
386 } 386 }
387 387
388 /* Cpu */ 388 /* Cpu */
389 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { 389 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
390 ret = print_graph_cpu(s, cpu); 390 ret = print_graph_cpu(s, cpu);
391 if (ret == TRACE_TYPE_PARTIAL_LINE) 391 if (ret == TRACE_TYPE_PARTIAL_LINE)
392 return TRACE_TYPE_PARTIAL_LINE; 392 return TRACE_TYPE_PARTIAL_LINE;
393 } 393 }
394 /* Proc */ 394 /* Proc */
395 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { 395 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
396 ret = print_graph_proc(s, pid); 396 ret = print_graph_proc(s, pid);
397 if (ret == TRACE_TYPE_PARTIAL_LINE) 397 if (ret == TRACE_TYPE_PARTIAL_LINE)
398 return TRACE_TYPE_PARTIAL_LINE; 398 return TRACE_TYPE_PARTIAL_LINE;
399 ret = trace_seq_printf(s, " | "); 399 ret = trace_seq_printf(s, " | ");
400 if (!ret) 400 if (!ret)
401 return TRACE_TYPE_PARTIAL_LINE; 401 return TRACE_TYPE_PARTIAL_LINE;
402 } 402 }
403 403
404 /* No overhead */ 404 /* No overhead */
405 ret = print_graph_overhead(-1, s); 405 ret = print_graph_overhead(-1, s);
406 if (!ret) 406 if (!ret)
407 return TRACE_TYPE_PARTIAL_LINE; 407 return TRACE_TYPE_PARTIAL_LINE;
408 408
409 if (type == TRACE_GRAPH_ENT) 409 if (type == TRACE_GRAPH_ENT)
410 ret = trace_seq_printf(s, "==========>"); 410 ret = trace_seq_printf(s, "==========>");
411 else 411 else
412 ret = trace_seq_printf(s, "<=========="); 412 ret = trace_seq_printf(s, "<==========");
413 413
414 if (!ret) 414 if (!ret)
415 return TRACE_TYPE_PARTIAL_LINE; 415 return TRACE_TYPE_PARTIAL_LINE;
416 416
417 /* Don't close the duration column if haven't one */ 417 /* Don't close the duration column if haven't one */
418 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) 418 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
419 trace_seq_printf(s, " |"); 419 trace_seq_printf(s, " |");
420 ret = trace_seq_printf(s, "\n"); 420 ret = trace_seq_printf(s, "\n");
421 421
422 if (!ret) 422 if (!ret)
423 return TRACE_TYPE_PARTIAL_LINE; 423 return TRACE_TYPE_PARTIAL_LINE;
424 return TRACE_TYPE_HANDLED; 424 return TRACE_TYPE_HANDLED;
425 } 425 }
426 426
427 static enum print_line_t 427 static enum print_line_t
428 print_graph_duration(unsigned long long duration, struct trace_seq *s) 428 print_graph_duration(unsigned long long duration, struct trace_seq *s)
429 { 429 {
430 unsigned long nsecs_rem = do_div(duration, 1000); 430 unsigned long nsecs_rem = do_div(duration, 1000);
431 /* log10(ULONG_MAX) + '\0' */ 431 /* log10(ULONG_MAX) + '\0' */
432 char msecs_str[21]; 432 char msecs_str[21];
433 char nsecs_str[5]; 433 char nsecs_str[5];
434 int ret, len; 434 int ret, len;
435 int i; 435 int i;
436 436
437 sprintf(msecs_str, "%lu", (unsigned long) duration); 437 sprintf(msecs_str, "%lu", (unsigned long) duration);
438 438
439 /* Print msecs */ 439 /* Print msecs */
440 ret = trace_seq_printf(s, "%s", msecs_str); 440 ret = trace_seq_printf(s, "%s", msecs_str);
441 if (!ret) 441 if (!ret)
442 return TRACE_TYPE_PARTIAL_LINE; 442 return TRACE_TYPE_PARTIAL_LINE;
443 443
444 len = strlen(msecs_str); 444 len = strlen(msecs_str);
445 445
446 /* Print nsecs (we don't want to exceed 7 numbers) */ 446 /* Print nsecs (we don't want to exceed 7 numbers) */
447 if (len < 7) { 447 if (len < 7) {
448 snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem); 448 snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem);
449 ret = trace_seq_printf(s, ".%s", nsecs_str); 449 ret = trace_seq_printf(s, ".%s", nsecs_str);
450 if (!ret) 450 if (!ret)
451 return TRACE_TYPE_PARTIAL_LINE; 451 return TRACE_TYPE_PARTIAL_LINE;
452 len += strlen(nsecs_str); 452 len += strlen(nsecs_str);
453 } 453 }
454 454
455 ret = trace_seq_printf(s, " us "); 455 ret = trace_seq_printf(s, " us ");
456 if (!ret) 456 if (!ret)
457 return TRACE_TYPE_PARTIAL_LINE; 457 return TRACE_TYPE_PARTIAL_LINE;
458 458
459 /* Print remaining spaces to fit the row's width */ 459 /* Print remaining spaces to fit the row's width */
460 for (i = len; i < 7; i++) { 460 for (i = len; i < 7; i++) {
461 ret = trace_seq_printf(s, " "); 461 ret = trace_seq_printf(s, " ");
462 if (!ret) 462 if (!ret)
463 return TRACE_TYPE_PARTIAL_LINE; 463 return TRACE_TYPE_PARTIAL_LINE;
464 } 464 }
465 465
466 ret = trace_seq_printf(s, "| "); 466 ret = trace_seq_printf(s, "| ");
467 if (!ret) 467 if (!ret)
468 return TRACE_TYPE_PARTIAL_LINE; 468 return TRACE_TYPE_PARTIAL_LINE;
469 return TRACE_TYPE_HANDLED; 469 return TRACE_TYPE_HANDLED;
470 470
471 } 471 }
472 472
473 /* Case of a leaf function on its call entry */ 473 /* Case of a leaf function on its call entry */
474 static enum print_line_t 474 static enum print_line_t
475 print_graph_entry_leaf(struct trace_iterator *iter, 475 print_graph_entry_leaf(struct trace_iterator *iter,
476 struct ftrace_graph_ent_entry *entry, 476 struct ftrace_graph_ent_entry *entry,
477 struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s) 477 struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s)
478 { 478 {
479 struct fgraph_data *data = iter->private; 479 struct fgraph_data *data = iter->private;
480 struct ftrace_graph_ret *graph_ret; 480 struct ftrace_graph_ret *graph_ret;
481 struct ftrace_graph_ent *call; 481 struct ftrace_graph_ent *call;
482 unsigned long long duration; 482 unsigned long long duration;
483 int ret; 483 int ret;
484 int i; 484 int i;
485 485
486 graph_ret = &ret_entry->ret; 486 graph_ret = &ret_entry->ret;
487 call = &entry->graph_ent; 487 call = &entry->graph_ent;
488 duration = graph_ret->rettime - graph_ret->calltime; 488 duration = graph_ret->rettime - graph_ret->calltime;
489 489
490 if (data) { 490 if (data) {
491 int cpu = iter->cpu; 491 int cpu = iter->cpu;
492 int *depth = &(per_cpu_ptr(data, cpu)->depth); 492 int *depth = &(per_cpu_ptr(data, cpu)->depth);
493 493
494 /* 494 /*
495 * Comments display at + 1 to depth. Since 495 * Comments display at + 1 to depth. Since
496 * this is a leaf function, keep the comments 496 * this is a leaf function, keep the comments
497 * equal to this depth. 497 * equal to this depth.
498 */ 498 */
499 *depth = call->depth - 1; 499 *depth = call->depth - 1;
500 } 500 }
501 501
502 /* Overhead */ 502 /* Overhead */
503 ret = print_graph_overhead(duration, s); 503 ret = print_graph_overhead(duration, s);
504 if (!ret) 504 if (!ret)
505 return TRACE_TYPE_PARTIAL_LINE; 505 return TRACE_TYPE_PARTIAL_LINE;
506 506
507 /* Duration */ 507 /* Duration */
508 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { 508 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
509 ret = print_graph_duration(duration, s); 509 ret = print_graph_duration(duration, s);
510 if (ret == TRACE_TYPE_PARTIAL_LINE) 510 if (ret == TRACE_TYPE_PARTIAL_LINE)
511 return TRACE_TYPE_PARTIAL_LINE; 511 return TRACE_TYPE_PARTIAL_LINE;
512 } 512 }
513 513
514 /* Function */ 514 /* Function */
515 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { 515 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
516 ret = trace_seq_printf(s, " "); 516 ret = trace_seq_printf(s, " ");
517 if (!ret) 517 if (!ret)
518 return TRACE_TYPE_PARTIAL_LINE; 518 return TRACE_TYPE_PARTIAL_LINE;
519 } 519 }
520 520
521 ret = seq_print_ip_sym(s, call->func, 0); 521 ret = seq_print_ip_sym(s, call->func, 0);
522 if (!ret) 522 if (!ret)
523 return TRACE_TYPE_PARTIAL_LINE; 523 return TRACE_TYPE_PARTIAL_LINE;
524 524
525 ret = trace_seq_printf(s, "();\n"); 525 ret = trace_seq_printf(s, "();\n");
526 if (!ret) 526 if (!ret)
527 return TRACE_TYPE_PARTIAL_LINE; 527 return TRACE_TYPE_PARTIAL_LINE;
528 528
529 return TRACE_TYPE_HANDLED; 529 return TRACE_TYPE_HANDLED;
530 } 530 }
531 531
532 static enum print_line_t 532 static enum print_line_t
533 print_graph_entry_nested(struct trace_iterator *iter, 533 print_graph_entry_nested(struct trace_iterator *iter,
534 struct ftrace_graph_ent_entry *entry, 534 struct ftrace_graph_ent_entry *entry,
535 struct trace_seq *s, int cpu) 535 struct trace_seq *s, int cpu)
536 { 536 {
537 struct ftrace_graph_ent *call = &entry->graph_ent; 537 struct ftrace_graph_ent *call = &entry->graph_ent;
538 struct fgraph_data *data = iter->private; 538 struct fgraph_data *data = iter->private;
539 int ret; 539 int ret;
540 int i; 540 int i;
541 541
542 if (data) { 542 if (data) {
543 int cpu = iter->cpu; 543 int cpu = iter->cpu;
544 int *depth = &(per_cpu_ptr(data, cpu)->depth); 544 int *depth = &(per_cpu_ptr(data, cpu)->depth);
545 545
546 *depth = call->depth; 546 *depth = call->depth;
547 } 547 }
548 548
549 /* No overhead */ 549 /* No overhead */
550 ret = print_graph_overhead(-1, s); 550 ret = print_graph_overhead(-1, s);
551 if (!ret) 551 if (!ret)
552 return TRACE_TYPE_PARTIAL_LINE; 552 return TRACE_TYPE_PARTIAL_LINE;
553 553
554 /* No time */ 554 /* No time */
555 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { 555 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
556 ret = trace_seq_printf(s, " | "); 556 ret = trace_seq_printf(s, " | ");
557 if (!ret) 557 if (!ret)
558 return TRACE_TYPE_PARTIAL_LINE; 558 return TRACE_TYPE_PARTIAL_LINE;
559 } 559 }
560 560
561 /* Function */ 561 /* Function */
562 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { 562 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
563 ret = trace_seq_printf(s, " "); 563 ret = trace_seq_printf(s, " ");
564 if (!ret) 564 if (!ret)
565 return TRACE_TYPE_PARTIAL_LINE; 565 return TRACE_TYPE_PARTIAL_LINE;
566 } 566 }
567 567
568 ret = seq_print_ip_sym(s, call->func, 0); 568 ret = seq_print_ip_sym(s, call->func, 0);
569 if (!ret) 569 if (!ret)
570 return TRACE_TYPE_PARTIAL_LINE; 570 return TRACE_TYPE_PARTIAL_LINE;
571 571
572 ret = trace_seq_printf(s, "() {\n"); 572 ret = trace_seq_printf(s, "() {\n");
573 if (!ret) 573 if (!ret)
574 return TRACE_TYPE_PARTIAL_LINE; 574 return TRACE_TYPE_PARTIAL_LINE;
575 575
576 /* 576 /*
577 * we already consumed the current entry to check the next one 577 * we already consumed the current entry to check the next one
578 * and see if this is a leaf. 578 * and see if this is a leaf.
579 */ 579 */
580 return TRACE_TYPE_NO_CONSUME; 580 return TRACE_TYPE_NO_CONSUME;
581 } 581 }
582 582
583 static enum print_line_t 583 static enum print_line_t
584 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, 584 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
585 int type, unsigned long addr) 585 int type, unsigned long addr)
586 { 586 {
587 struct fgraph_data *data = iter->private; 587 struct fgraph_data *data = iter->private;
588 struct trace_entry *ent = iter->ent; 588 struct trace_entry *ent = iter->ent;
589 int cpu = iter->cpu; 589 int cpu = iter->cpu;
590 int ret; 590 int ret;
591 591
592 /* Pid */ 592 /* Pid */
593 if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE) 593 if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
594 return TRACE_TYPE_PARTIAL_LINE; 594 return TRACE_TYPE_PARTIAL_LINE;
595 595
596 if (type) { 596 if (type) {
597 /* Interrupt */ 597 /* Interrupt */
598 ret = print_graph_irq(iter, addr, type, cpu, ent->pid); 598 ret = print_graph_irq(iter, addr, type, cpu, ent->pid);
599 if (ret == TRACE_TYPE_PARTIAL_LINE) 599 if (ret == TRACE_TYPE_PARTIAL_LINE)
600 return TRACE_TYPE_PARTIAL_LINE; 600 return TRACE_TYPE_PARTIAL_LINE;
601 } 601 }
602 602
603 /* Absolute time */ 603 /* Absolute time */
604 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { 604 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
605 ret = print_graph_abs_time(iter->ts, s); 605 ret = print_graph_abs_time(iter->ts, s);
606 if (!ret) 606 if (!ret)
607 return TRACE_TYPE_PARTIAL_LINE; 607 return TRACE_TYPE_PARTIAL_LINE;
608 } 608 }
609 609
610 /* Cpu */ 610 /* Cpu */
611 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { 611 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
612 ret = print_graph_cpu(s, cpu); 612 ret = print_graph_cpu(s, cpu);
613 if (ret == TRACE_TYPE_PARTIAL_LINE) 613 if (ret == TRACE_TYPE_PARTIAL_LINE)
614 return TRACE_TYPE_PARTIAL_LINE; 614 return TRACE_TYPE_PARTIAL_LINE;
615 } 615 }
616 616
617 /* Proc */ 617 /* Proc */
618 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { 618 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
619 ret = print_graph_proc(s, ent->pid); 619 ret = print_graph_proc(s, ent->pid);
620 if (ret == TRACE_TYPE_PARTIAL_LINE) 620 if (ret == TRACE_TYPE_PARTIAL_LINE)
621 return TRACE_TYPE_PARTIAL_LINE; 621 return TRACE_TYPE_PARTIAL_LINE;
622 622
623 ret = trace_seq_printf(s, " | "); 623 ret = trace_seq_printf(s, " | ");
624 if (!ret) 624 if (!ret)
625 return TRACE_TYPE_PARTIAL_LINE; 625 return TRACE_TYPE_PARTIAL_LINE;
626 } 626 }
627 627
628 return 0; 628 return 0;
629 } 629 }
630 630
631 static enum print_line_t 631 static enum print_line_t
632 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, 632 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
633 struct trace_iterator *iter) 633 struct trace_iterator *iter)
634 { 634 {
635 int cpu = iter->cpu; 635 int cpu = iter->cpu;
636 struct ftrace_graph_ent *call = &field->graph_ent; 636 struct ftrace_graph_ent *call = &field->graph_ent;
637 struct ftrace_graph_ret_entry *leaf_ret; 637 struct ftrace_graph_ret_entry *leaf_ret;
638 638
639 if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func)) 639 if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func))
640 return TRACE_TYPE_PARTIAL_LINE; 640 return TRACE_TYPE_PARTIAL_LINE;
641 641
642 leaf_ret = get_return_for_leaf(iter, field); 642 leaf_ret = get_return_for_leaf(iter, field);
643 if (leaf_ret) 643 if (leaf_ret)
644 return print_graph_entry_leaf(iter, field, leaf_ret, s); 644 return print_graph_entry_leaf(iter, field, leaf_ret, s);
645 else 645 else
646 return print_graph_entry_nested(iter, field, s, cpu); 646 return print_graph_entry_nested(iter, field, s, cpu);
647 647
648 } 648 }
649 649
650 static enum print_line_t 650 static enum print_line_t
651 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, 651 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
652 struct trace_entry *ent, struct trace_iterator *iter) 652 struct trace_entry *ent, struct trace_iterator *iter)
653 { 653 {
654 unsigned long long duration = trace->rettime - trace->calltime; 654 unsigned long long duration = trace->rettime - trace->calltime;
655 struct fgraph_data *data = iter->private; 655 struct fgraph_data *data = iter->private;
656 pid_t pid = ent->pid; 656 pid_t pid = ent->pid;
657 int cpu = iter->cpu; 657 int cpu = iter->cpu;
658 int ret; 658 int ret;
659 int i; 659 int i;
660 660
661 if (data) { 661 if (data) {
662 int cpu = iter->cpu; 662 int cpu = iter->cpu;
663 int *depth = &(per_cpu_ptr(data, cpu)->depth); 663 int *depth = &(per_cpu_ptr(data, cpu)->depth);
664 664
665 /* 665 /*
666 * Comments display at + 1 to depth. This is the 666 * Comments display at + 1 to depth. This is the
667 * return from a function, we now want the comments 667 * return from a function, we now want the comments
668 * to display at the same level of the bracket. 668 * to display at the same level of the bracket.
669 */ 669 */
670 *depth = trace->depth - 1; 670 *depth = trace->depth - 1;
671 } 671 }
672 672
673 if (print_graph_prologue(iter, s, 0, 0)) 673 if (print_graph_prologue(iter, s, 0, 0))
674 return TRACE_TYPE_PARTIAL_LINE; 674 return TRACE_TYPE_PARTIAL_LINE;
675 675
676 /* Overhead */ 676 /* Overhead */
677 ret = print_graph_overhead(duration, s); 677 ret = print_graph_overhead(duration, s);
678 if (!ret) 678 if (!ret)
679 return TRACE_TYPE_PARTIAL_LINE; 679 return TRACE_TYPE_PARTIAL_LINE;
680 680
681 /* Duration */ 681 /* Duration */
682 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { 682 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
683 ret = print_graph_duration(duration, s); 683 ret = print_graph_duration(duration, s);
684 if (ret == TRACE_TYPE_PARTIAL_LINE) 684 if (ret == TRACE_TYPE_PARTIAL_LINE)
685 return TRACE_TYPE_PARTIAL_LINE; 685 return TRACE_TYPE_PARTIAL_LINE;
686 } 686 }
687 687
688 /* Closing brace */ 688 /* Closing brace */
689 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { 689 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
690 ret = trace_seq_printf(s, " "); 690 ret = trace_seq_printf(s, " ");
691 if (!ret) 691 if (!ret)
692 return TRACE_TYPE_PARTIAL_LINE; 692 return TRACE_TYPE_PARTIAL_LINE;
693 } 693 }
694 694
695 ret = trace_seq_printf(s, "}\n"); 695 ret = trace_seq_printf(s, "}\n");
696 if (!ret) 696 if (!ret)
697 return TRACE_TYPE_PARTIAL_LINE; 697 return TRACE_TYPE_PARTIAL_LINE;
698 698
699 /* Overrun */ 699 /* Overrun */
700 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { 700 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
701 ret = trace_seq_printf(s, " (Overruns: %lu)\n", 701 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
702 trace->overrun); 702 trace->overrun);
703 if (!ret) 703 if (!ret)
704 return TRACE_TYPE_PARTIAL_LINE; 704 return TRACE_TYPE_PARTIAL_LINE;
705 } 705 }
706 706
707 ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, cpu, pid); 707 ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, cpu, pid);
708 if (ret == TRACE_TYPE_PARTIAL_LINE) 708 if (ret == TRACE_TYPE_PARTIAL_LINE)
709 return TRACE_TYPE_PARTIAL_LINE; 709 return TRACE_TYPE_PARTIAL_LINE;
710 710
711 return TRACE_TYPE_HANDLED; 711 return TRACE_TYPE_HANDLED;
712 } 712 }
713 713
714 static enum print_line_t 714 static enum print_line_t
715 print_graph_comment(struct bprint_entry *trace, struct trace_seq *s, 715 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
716 struct trace_entry *ent, struct trace_iterator *iter) 716 struct trace_iterator *iter)
717 { 717 {
718 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
718 struct fgraph_data *data = iter->private; 719 struct fgraph_data *data = iter->private;
720 struct trace_event *event;
719 int depth = 0; 721 int depth = 0;
720 int ret; 722 int ret;
721 int i; 723 int i;
722 724
723 if (data) 725 if (data)
724 depth = per_cpu_ptr(data, iter->cpu)->depth; 726 depth = per_cpu_ptr(data, iter->cpu)->depth;
725 727
726 if (print_graph_prologue(iter, s, 0, 0)) 728 if (print_graph_prologue(iter, s, 0, 0))
727 return TRACE_TYPE_PARTIAL_LINE; 729 return TRACE_TYPE_PARTIAL_LINE;
728 730
729 /* No overhead */ 731 /* No overhead */
730 ret = print_graph_overhead(-1, s); 732 ret = print_graph_overhead(-1, s);
731 if (!ret) 733 if (!ret)
732 return TRACE_TYPE_PARTIAL_LINE; 734 return TRACE_TYPE_PARTIAL_LINE;
733 735
734 /* No time */ 736 /* No time */
735 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { 737 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
736 ret = trace_seq_printf(s, " | "); 738 ret = trace_seq_printf(s, " | ");
737 if (!ret) 739 if (!ret)
738 return TRACE_TYPE_PARTIAL_LINE; 740 return TRACE_TYPE_PARTIAL_LINE;
739 } 741 }
740 742
741 /* Indentation */ 743 /* Indentation */
742 if (depth > 0) 744 if (depth > 0)
743 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) { 745 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
744 ret = trace_seq_printf(s, " "); 746 ret = trace_seq_printf(s, " ");
745 if (!ret) 747 if (!ret)
746 return TRACE_TYPE_PARTIAL_LINE; 748 return TRACE_TYPE_PARTIAL_LINE;
747 } 749 }
748 750
749 /* The comment */ 751 /* The comment */
750 ret = trace_seq_printf(s, "/* "); 752 ret = trace_seq_printf(s, "/* ");
751 if (!ret) 753 if (!ret)
752 return TRACE_TYPE_PARTIAL_LINE; 754 return TRACE_TYPE_PARTIAL_LINE;
753 755
754 ret = trace_seq_bprintf(s, trace->fmt, trace->buf); 756 switch (iter->ent->type) {
755 if (!ret) 757 case TRACE_BPRINT:
756 return TRACE_TYPE_PARTIAL_LINE; 758 ret = trace_print_bprintk_msg_only(iter);
759 if (ret != TRACE_TYPE_HANDLED)
760 return ret;
761 break;
762 case TRACE_PRINT:
763 ret = trace_print_printk_msg_only(iter);
764 if (ret != TRACE_TYPE_HANDLED)
765 return ret;
766 break;
767 default:
768 event = ftrace_find_event(ent->type);
769 if (!event)
770 return TRACE_TYPE_UNHANDLED;
757 771
772 ret = event->trace(iter, sym_flags);
773 if (ret != TRACE_TYPE_HANDLED)
774 return ret;
775 }
776
758 /* Strip ending newline */ 777 /* Strip ending newline */
759 if (s->buffer[s->len - 1] == '\n') { 778 if (s->buffer[s->len - 1] == '\n') {
760 s->buffer[s->len - 1] = '\0'; 779 s->buffer[s->len - 1] = '\0';
761 s->len--; 780 s->len--;
762 } 781 }
763 782
764 ret = trace_seq_printf(s, " */\n"); 783 ret = trace_seq_printf(s, " */\n");
765 if (!ret) 784 if (!ret)
766 return TRACE_TYPE_PARTIAL_LINE; 785 return TRACE_TYPE_PARTIAL_LINE;
767 786
768 return TRACE_TYPE_HANDLED; 787 return TRACE_TYPE_HANDLED;
769 } 788 }
770 789
771 790
772 enum print_line_t 791 enum print_line_t
773 print_graph_function(struct trace_iterator *iter) 792 print_graph_function(struct trace_iterator *iter)
774 { 793 {
775 struct trace_seq *s = &iter->seq;
776 struct trace_entry *entry = iter->ent; 794 struct trace_entry *entry = iter->ent;
795 struct trace_seq *s = &iter->seq;
777 796
778 switch (entry->type) { 797 switch (entry->type) {
779 case TRACE_GRAPH_ENT: { 798 case TRACE_GRAPH_ENT: {
780 struct ftrace_graph_ent_entry *field; 799 struct ftrace_graph_ent_entry *field;
781 trace_assign_type(field, entry); 800 trace_assign_type(field, entry);
782 return print_graph_entry(field, s, iter); 801 return print_graph_entry(field, s, iter);
783 } 802 }
784 case TRACE_GRAPH_RET: { 803 case TRACE_GRAPH_RET: {
785 struct ftrace_graph_ret_entry *field; 804 struct ftrace_graph_ret_entry *field;
786 trace_assign_type(field, entry); 805 trace_assign_type(field, entry);
787 return print_graph_return(&field->ret, s, entry, iter); 806 return print_graph_return(&field->ret, s, entry, iter);
788 } 807 }
789 case TRACE_BPRINT: {
790 struct bprint_entry *field;
791 trace_assign_type(field, entry);
792 return print_graph_comment(field, s, entry, iter);
793 }
794 default: 808 default:
795 return TRACE_TYPE_UNHANDLED; 809 return print_graph_comment(s, entry, iter);
796 } 810 }
811
812 return TRACE_TYPE_HANDLED;
797 } 813 }
798 814
799 static void print_graph_headers(struct seq_file *s) 815 static void print_graph_headers(struct seq_file *s)
800 { 816 {
801 /* 1st line */ 817 /* 1st line */
802 seq_printf(s, "# "); 818 seq_printf(s, "# ");
803 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) 819 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
804 seq_printf(s, " TIME "); 820 seq_printf(s, " TIME ");
805 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) 821 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
806 seq_printf(s, "CPU"); 822 seq_printf(s, "CPU");
807 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) 823 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
808 seq_printf(s, " TASK/PID "); 824 seq_printf(s, " TASK/PID ");
809 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) 825 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
810 seq_printf(s, " DURATION "); 826 seq_printf(s, " DURATION ");
811 seq_printf(s, " FUNCTION CALLS\n"); 827 seq_printf(s, " FUNCTION CALLS\n");
812 828
813 /* 2nd line */ 829 /* 2nd line */
814 seq_printf(s, "# "); 830 seq_printf(s, "# ");
815 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) 831 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
816 seq_printf(s, " | "); 832 seq_printf(s, " | ");
817 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) 833 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
818 seq_printf(s, "| "); 834 seq_printf(s, "| ");
819 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) 835 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
820 seq_printf(s, " | | "); 836 seq_printf(s, " | | ");
821 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) 837 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
822 seq_printf(s, " | | "); 838 seq_printf(s, " | | ");
823 seq_printf(s, " | | | |\n"); 839 seq_printf(s, " | | | |\n");
824 } 840 }
825 841
826 static void graph_trace_open(struct trace_iterator *iter) 842 static void graph_trace_open(struct trace_iterator *iter)
827 { 843 {
828 /* pid and depth on the last trace processed */ 844 /* pid and depth on the last trace processed */
829 struct fgraph_data *data = alloc_percpu(struct fgraph_data); 845 struct fgraph_data *data = alloc_percpu(struct fgraph_data);
830 int cpu; 846 int cpu;
831 847
832 if (!data) 848 if (!data)
833 pr_warning("function graph tracer: not enough memory\n"); 849 pr_warning("function graph tracer: not enough memory\n");
834 else 850 else
835 for_each_possible_cpu(cpu) { 851 for_each_possible_cpu(cpu) {
836 pid_t *pid = &(per_cpu_ptr(data, cpu)->last_pid); 852 pid_t *pid = &(per_cpu_ptr(data, cpu)->last_pid);
837 int *depth = &(per_cpu_ptr(data, cpu)->depth); 853 int *depth = &(per_cpu_ptr(data, cpu)->depth);
838 *pid = -1; 854 *pid = -1;
839 *depth = 0; 855 *depth = 0;
840 } 856 }
841 857
842 iter->private = data; 858 iter->private = data;
843 } 859 }
844 860
845 static void graph_trace_close(struct trace_iterator *iter) 861 static void graph_trace_close(struct trace_iterator *iter)
846 { 862 {
847 free_percpu(iter->private); 863 free_percpu(iter->private);
848 } 864 }
849 865
850 static struct tracer graph_trace __read_mostly = { 866 static struct tracer graph_trace __read_mostly = {
851 .name = "function_graph", 867 .name = "function_graph",
852 .open = graph_trace_open, 868 .open = graph_trace_open,
853 .close = graph_trace_close, 869 .close = graph_trace_close,
854 .wait_pipe = poll_wait_pipe, 870 .wait_pipe = poll_wait_pipe,
855 .init = graph_trace_init, 871 .init = graph_trace_init,
856 .reset = graph_trace_reset, 872 .reset = graph_trace_reset,
857 .print_line = print_graph_function, 873 .print_line = print_graph_function,
858 .print_header = print_graph_headers, 874 .print_header = print_graph_headers,
859 .flags = &tracer_flags, 875 .flags = &tracer_flags,
860 #ifdef CONFIG_FTRACE_SELFTEST 876 #ifdef CONFIG_FTRACE_SELFTEST
861 .selftest = trace_selftest_startup_function_graph, 877 .selftest = trace_selftest_startup_function_graph,
862 #endif 878 #endif
863 }; 879 };
864 880
865 static __init int init_graph_trace(void) 881 static __init int init_graph_trace(void)