Commit a63ce5b306855bccdacba95c03bfc293316c8ae3

Authored by Steven Rostedt
Committed by Steven Rostedt
1 parent 29bf4a5e3f

tracing: Buffer the output of seq_file in case of filled buffer

If the seq_read fills the buffer it will call s_start again on the next
itertation with the same position. This causes a problem with the
function_graph tracer because it consumes the iteration in order to
determine leaf functions.

What happens is that the iterator stores the entry, and the function
graph plugin will look at the next entry. If that next entry is a return
of the same function and task, then the function is a leaf and the
function_graph plugin calls ring_buffer_read which moves the ring buffer
iterator forward (the trace iterator still points to the function start
entry).

The copying of the trace_seq to the seq_file buffer will fail if the
seq_file buffer is full. The seq_read will not show this entry.
The next read by userspace will cause seq_read to again call s_start
which will reuse the trace iterator entry (the function start entry).
But the function return entry was already consumed. The function graph
plugin will think that this entry is a nested function and not a leaf.

To solve this, the trace code now checks the return status of the
seq_printf (trace_print_seq). If the writing to the seq_file buffer
fails, we set a flag in the iterator (leftover) and we do not reset
the trace_seq buffer. On the next call to s_start, we check the leftover
flag, and if it is set, we just reuse the trace_seq buffer and do not
call into the plugin print functions.

Before this patch:

 2)               |      fput() {
 2)               |        __fput() {
 2)   0.550 us    |          inotify_inode_queue_event();
 2)               |          __fsnotify_parent() {
 2)   0.540 us    |          inotify_dentry_parent_queue_event();

After the patch:

 2)               |      fput() {
 2)               |        __fput() {
 2)   0.550 us    |          inotify_inode_queue_event();
 2)   0.548 us    |          __fsnotify_parent();
 2)   0.540 us    |          inotify_dentry_parent_queue_event();

[
  Updated the patch to fix a missing return 0 from the trace_print_seq()
  stub when CONFIG_TRACING is disabled.

  Reported-by: Ingo Molnar <mingo@elte.hu>
]

Reported-by: Jiri Olsa <jolsa@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>

Showing 4 changed files with 47 additions and 8 deletions Side-by-side Diff

include/linux/ftrace_event.h
... ... @@ -57,6 +57,7 @@
57 57 /* The below is zeroed out in pipe_read */
58 58 struct trace_seq seq;
59 59 struct trace_entry *ent;
  60 + int leftover;
60 61 int cpu;
61 62 u64 ts;
62 63  
include/linux/trace_seq.h
... ... @@ -33,7 +33,7 @@
33 33 __attribute__ ((format (printf, 2, 0)));
34 34 extern int
35 35 trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary);
36   -extern void trace_print_seq(struct seq_file *m, struct trace_seq *s);
  36 +extern int trace_print_seq(struct seq_file *m, struct trace_seq *s);
37 37 extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
38 38 size_t cnt);
39 39 extern int trace_seq_puts(struct trace_seq *s, const char *str);
40 40  
... ... @@ -55,8 +55,9 @@
55 55 return 0;
56 56 }
57 57  
58   -static inline void trace_print_seq(struct seq_file *m, struct trace_seq *s)
  58 +static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s)
59 59 {
  60 + return 0;
60 61 }
61 62 static inline ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
62 63 size_t cnt)
kernel/trace/trace.c
... ... @@ -1516,6 +1516,8 @@
1516 1516 int i = (int)*pos;
1517 1517 void *ent;
1518 1518  
  1519 + WARN_ON_ONCE(iter->leftover);
  1520 +
1519 1521 (*pos)++;
1520 1522  
1521 1523 /* can't go backwards */
... ... @@ -1614,8 +1616,16 @@
1614 1616 ;
1615 1617  
1616 1618 } else {
1617   - l = *pos - 1;
1618   - p = s_next(m, p, &l);
  1619 + /*
  1620 + * If we overflowed the seq_file before, then we want
  1621 + * to just reuse the trace_seq buffer again.
  1622 + */
  1623 + if (iter->leftover)
  1624 + p = iter;
  1625 + else {
  1626 + l = *pos - 1;
  1627 + p = s_next(m, p, &l);
  1628 + }
1619 1629 }
1620 1630  
1621 1631 trace_event_read_lock();
... ... @@ -1923,6 +1933,7 @@
1923 1933 static int s_show(struct seq_file *m, void *v)
1924 1934 {
1925 1935 struct trace_iterator *iter = v;
  1936 + int ret;
1926 1937  
1927 1938 if (iter->ent == NULL) {
1928 1939 if (iter->tr) {
1929 1940  
... ... @@ -1942,9 +1953,27 @@
1942 1953 if (!(trace_flags & TRACE_ITER_VERBOSE))
1943 1954 print_func_help_header(m);
1944 1955 }
  1956 + } else if (iter->leftover) {
  1957 + /*
  1958 + * If we filled the seq_file buffer earlier, we
  1959 + * want to just show it now.
  1960 + */
  1961 + ret = trace_print_seq(m, &iter->seq);
  1962 +
  1963 + /* ret should this time be zero, but you never know */
  1964 + iter->leftover = ret;
  1965 +
1945 1966 } else {
1946 1967 print_trace_line(iter);
1947   - trace_print_seq(m, &iter->seq);
  1968 + ret = trace_print_seq(m, &iter->seq);
  1969 + /*
  1970 + * If we overflow the seq_file buffer, then it will
  1971 + * ask us for this data again at start up.
  1972 + * Use that instead.
  1973 + * ret is 0 if seq_file write succeeded.
  1974 + * -1 otherwise.
  1975 + */
  1976 + iter->leftover = ret;
1948 1977 }
1949 1978  
1950 1979 return 0;
kernel/trace/trace_output.c
... ... @@ -23,13 +23,21 @@
23 23  
24 24 static int next_event_type = __TRACE_LAST_TYPE + 1;
25 25  
26   -void trace_print_seq(struct seq_file *m, struct trace_seq *s)
  26 +int trace_print_seq(struct seq_file *m, struct trace_seq *s)
27 27 {
28 28 int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
  29 + int ret;
29 30  
30   - seq_write(m, s->buffer, len);
  31 + ret = seq_write(m, s->buffer, len);
31 32  
32   - trace_seq_init(s);
  33 + /*
  34 + * Only reset this buffer if we successfully wrote to the
  35 + * seq_file buffer.
  36 + */
  37 + if (!ret)
  38 + trace_seq_init(s);
  39 +
  40 + return ret;
33 41 }
34 42  
35 43 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)