Commit be1eca39319689aed7d3aedb9c3bece9469fe10f

Authored by Jiri Olsa
Committed by Steven Rostedt
1 parent d184b31c0e

tracing: Fix function graph trace_pipe to properly display failed entries

There is a case where the graph tracer might get confused and omits
displaying of a single record.  This applies mostly with the trace_pipe
since it is unlikely that the trace_seq buffer will overflow with the
trace file.

As the function_graph tracer goes through the trace entries keeping a
pointer to the current record:

current ->  func1 ENTRY
            func2 ENTRY
            func2 RETURN
            func1 RETURN

When an function ENTRY is encountered, it moves the pointer to the
next entry to check if the function is a nested or leaf function.

            func1 ENTRY
current ->  func2 ENTRY
            func2 RETURN
            func1 RETURN

If the rest of the writing of the function fills the trace_seq buffer,
then the trace_pipe read will ignore this entry. The next read will
Now start at the current location, but the first entry (func1) will
be discarded.

This patch keeps a copy of the current entry in the iterator private
storage and will keep track of when the trace_seq buffer fills. When
the trace_seq buffer fills, it will reuse the copy of the entry in the
next iteration.

[
  This patch has been largely modified by Steven Rostedt in order to
  clean it up and simplify it. The original idea and concept was from
  Jirka and for that, this patch will go under his name to give him
  the credit he deserves. But because this was modify by Steven Rostedt
  anything wrong with the patch should be blamed on Steven.
]

Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <1259067458-27143-1-git-send-email-jolsa@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>

Showing 1 changed file with 131 additions and 34 deletions Side-by-side Diff

kernel/trace/trace_functions_graph.c
... ... @@ -14,11 +14,22 @@
14 14 #include "trace.h"
15 15 #include "trace_output.h"
16 16  
17   -struct fgraph_data {
  17 +struct fgraph_cpu_data {
18 18 pid_t last_pid;
19 19 int depth;
  20 + int ignore;
20 21 };
21 22  
  23 +struct fgraph_data {
  24 + struct fgraph_cpu_data *cpu_data;
  25 +
  26 + /* Place to preserve last processed entry. */
  27 + struct ftrace_graph_ent_entry ent;
  28 + struct ftrace_graph_ret_entry ret;
  29 + int failed;
  30 + int cpu;
  31 +};
  32 +
22 33 #define TRACE_GRAPH_INDENT 2
23 34  
24 35 /* Flag options */
... ... @@ -384,7 +395,7 @@
384 395 if (!data)
385 396 return TRACE_TYPE_HANDLED;
386 397  
387   - last_pid = &(per_cpu_ptr(data, cpu)->last_pid);
  398 + last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
388 399  
389 400 if (*last_pid == pid)
390 401 return TRACE_TYPE_HANDLED;
391 402  
392 403  
393 404  
394 405  
395 406  
... ... @@ -435,27 +446,50 @@
435 446 get_return_for_leaf(struct trace_iterator *iter,
436 447 struct ftrace_graph_ent_entry *curr)
437 448 {
438   - struct ring_buffer_iter *ring_iter;
  449 + struct fgraph_data *data = iter->private;
  450 + struct ring_buffer_iter *ring_iter = NULL;
439 451 struct ring_buffer_event *event;
440 452 struct ftrace_graph_ret_entry *next;
441 453  
442   - ring_iter = iter->buffer_iter[iter->cpu];
  454 + /*
  455 + * If the previous output failed to write to the seq buffer,
  456 + * then we just reuse the data from before.
  457 + */
  458 + if (data && data->failed) {
  459 + curr = &data->ent;
  460 + next = &data->ret;
  461 + } else {
443 462  
444   - /* First peek to compare current entry and the next one */
445   - if (ring_iter)
446   - event = ring_buffer_iter_peek(ring_iter, NULL);
447   - else {
448   - /* We need to consume the current entry to see the next one */
449   - ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
450   - event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
451   - NULL);
452   - }
  463 + ring_iter = iter->buffer_iter[iter->cpu];
453 464  
454   - if (!event)
455   - return NULL;
  465 + /* First peek to compare current entry and the next one */
  466 + if (ring_iter)
  467 + event = ring_buffer_iter_peek(ring_iter, NULL);
  468 + else {
  469 + /*
  470 + * We need to consume the current entry to see
  471 + * the next one.
  472 + */
  473 + ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
  474 + event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
  475 + NULL);
  476 + }
456 477  
457   - next = ring_buffer_event_data(event);
  478 + if (!event)
  479 + return NULL;
458 480  
  481 + next = ring_buffer_event_data(event);
  482 +
  483 + if (data) {
  484 + /*
  485 + * Save current and next entries for later reference
  486 + * if the output fails.
  487 + */
  488 + data->ent = *curr;
  489 + data->ret = *next;
  490 + }
  491 + }
  492 +
459 493 if (next->ent.type != TRACE_GRAPH_RET)
460 494 return NULL;
461 495  
... ... @@ -640,7 +674,7 @@
640 674  
641 675 if (data) {
642 676 int cpu = iter->cpu;
643   - int *depth = &(per_cpu_ptr(data, cpu)->depth);
  677 + int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
644 678  
645 679 /*
646 680 * Comments display at + 1 to depth. Since
... ... @@ -688,7 +722,7 @@
688 722  
689 723 if (data) {
690 724 int cpu = iter->cpu;
691   - int *depth = &(per_cpu_ptr(data, cpu)->depth);
  725 + int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
692 726  
693 727 *depth = call->depth;
694 728 }
695 729  
696 730  
697 731  
698 732  
... ... @@ -782,19 +816,34 @@
782 816 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
783 817 struct trace_iterator *iter)
784 818 {
785   - int cpu = iter->cpu;
  819 + struct fgraph_data *data = iter->private;
786 820 struct ftrace_graph_ent *call = &field->graph_ent;
787 821 struct ftrace_graph_ret_entry *leaf_ret;
  822 + static enum print_line_t ret;
  823 + int cpu = iter->cpu;
788 824  
789 825 if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func))
790 826 return TRACE_TYPE_PARTIAL_LINE;
791 827  
792 828 leaf_ret = get_return_for_leaf(iter, field);
793 829 if (leaf_ret)
794   - return print_graph_entry_leaf(iter, field, leaf_ret, s);
  830 + ret = print_graph_entry_leaf(iter, field, leaf_ret, s);
795 831 else
796   - return print_graph_entry_nested(iter, field, s, cpu);
  832 + ret = print_graph_entry_nested(iter, field, s, cpu);
797 833  
  834 + if (data) {
  835 + /*
  836 + * If we failed to write our output, then we need to make
  837 + * note of it. Because we already consumed our entry.
  838 + */
  839 + if (s->full) {
  840 + data->failed = 1;
  841 + data->cpu = cpu;
  842 + } else
  843 + data->failed = 0;
  844 + }
  845 +
  846 + return ret;
798 847 }
799 848  
800 849 static enum print_line_t
... ... @@ -810,7 +859,7 @@
810 859  
811 860 if (data) {
812 861 int cpu = iter->cpu;
813   - int *depth = &(per_cpu_ptr(data, cpu)->depth);
  862 + int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
814 863  
815 864 /*
816 865 * Comments display at + 1 to depth. This is the
... ... @@ -873,7 +922,7 @@
873 922 int i;
874 923  
875 924 if (data)
876   - depth = per_cpu_ptr(data, iter->cpu)->depth;
  925 + depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
877 926  
878 927 if (print_graph_prologue(iter, s, 0, 0))
879 928 return TRACE_TYPE_PARTIAL_LINE;
880 929  
881 930  
... ... @@ -941,9 +990,34 @@
941 990 enum print_line_t
942 991 print_graph_function(struct trace_iterator *iter)
943 992 {
  993 + struct ftrace_graph_ent_entry *field;
  994 + struct fgraph_data *data = iter->private;
944 995 struct trace_entry *entry = iter->ent;
945 996 struct trace_seq *s = &iter->seq;
  997 + int cpu = iter->cpu;
  998 + int ret;
946 999  
  1000 + if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
  1001 + per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
  1002 + return TRACE_TYPE_HANDLED;
  1003 + }
  1004 +
  1005 + /*
  1006 + * If the last output failed, there's a possibility we need
  1007 + * to print out the missing entry which would never go out.
  1008 + */
  1009 + if (data && data->failed) {
  1010 + field = &data->ent;
  1011 + iter->cpu = data->cpu;
  1012 + ret = print_graph_entry(field, s, iter);
  1013 + if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
  1014 + per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
  1015 + ret = TRACE_TYPE_NO_CONSUME;
  1016 + }
  1017 + iter->cpu = cpu;
  1018 + return ret;
  1019 + }
  1020 +
947 1021 switch (entry->type) {
948 1022 case TRACE_GRAPH_ENT: {
949 1023 /*
... ... @@ -952,7 +1026,7 @@
952 1026 * sizeof(struct ftrace_graph_ent_entry) is very small,
953 1027 * it can be safely saved at the stack.
954 1028 */
955   - struct ftrace_graph_ent_entry *field, saved;
  1029 + struct ftrace_graph_ent_entry saved;
956 1030 trace_assign_type(field, entry);
957 1031 saved = *field;
958 1032 return print_graph_entry(&saved, s, iter);
959 1033  
960 1034  
961 1035  
962 1036  
963 1037  
964 1038  
965 1039  
... ... @@ -1030,31 +1104,54 @@
1030 1104 static void graph_trace_open(struct trace_iterator *iter)
1031 1105 {
1032 1106 /* pid and depth on the last trace processed */
1033   - struct fgraph_data *data = alloc_percpu(struct fgraph_data);
  1107 + struct fgraph_data *data;
1034 1108 int cpu;
1035 1109  
  1110 + iter->private = NULL;
  1111 +
  1112 + data = kzalloc(sizeof(*data), GFP_KERNEL);
1036 1113 if (!data)
1037   - pr_warning("function graph tracer: not enough memory\n");
1038   - else
1039   - for_each_possible_cpu(cpu) {
1040   - pid_t *pid = &(per_cpu_ptr(data, cpu)->last_pid);
1041   - int *depth = &(per_cpu_ptr(data, cpu)->depth);
1042   - *pid = -1;
1043   - *depth = 0;
1044   - }
  1114 + goto out_err;
1045 1115  
  1116 + data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
  1117 + if (!data->cpu_data)
  1118 + goto out_err_free;
  1119 +
  1120 + for_each_possible_cpu(cpu) {
  1121 + pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
  1122 + int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
  1123 + int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
  1124 + *pid = -1;
  1125 + *depth = 0;
  1126 + *ignore = 0;
  1127 + }
  1128 +
1046 1129 iter->private = data;
  1130 +
  1131 + return;
  1132 +
  1133 + out_err_free:
  1134 + kfree(data);
  1135 + out_err:
  1136 + pr_warning("function graph tracer: not enough memory\n");
1047 1137 }
1048 1138  
1049 1139 static void graph_trace_close(struct trace_iterator *iter)
1050 1140 {
1051   - free_percpu(iter->private);
  1141 + struct fgraph_data *data = iter->private;
  1142 +
  1143 + if (data) {
  1144 + free_percpu(data->cpu_data);
  1145 + kfree(data);
  1146 + }
1052 1147 }
1053 1148  
1054 1149 static struct tracer graph_trace __read_mostly = {
1055 1150 .name = "function_graph",
1056 1151 .open = graph_trace_open,
  1152 + .pipe_open = graph_trace_open,
1057 1153 .close = graph_trace_close,
  1154 + .pipe_close = graph_trace_close,
1058 1155 .wait_pipe = poll_wait_pipe,
1059 1156 .init = graph_trace_init,
1060 1157 .reset = graph_trace_reset,