Commit 788d70dce0184eccc249ac6f05aa38b385b7497c

Authored by Ingo Molnar

Merge branch 'tip/tracing/core3' of git://git.kernel.org/pub/scm/linux/kernel/gi…

…t/rostedt/linux-2.6-trace into tracing/core

Showing 6 changed files Side-by-side Diff

include/linux/ftrace_event.h
... ... @@ -57,6 +57,7 @@
57 57 /* The below is zeroed out in pipe_read */
58 58 struct trace_seq seq;
59 59 struct trace_entry *ent;
  60 + int leftover;
60 61 int cpu;
61 62 u64 ts;
62 63  
include/linux/trace_seq.h
... ... @@ -14,6 +14,7 @@
14 14 unsigned char buffer[PAGE_SIZE];
15 15 unsigned int len;
16 16 unsigned int readpos;
  17 + int full;
17 18 };
18 19  
19 20 static inline void
... ... @@ -21,6 +22,7 @@
21 22 {
22 23 s->len = 0;
23 24 s->readpos = 0;
  25 + s->full = 0;
24 26 }
25 27  
26 28 /*
... ... @@ -33,7 +35,7 @@
33 35 __attribute__ ((format (printf, 2, 0)));
34 36 extern int
35 37 trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary);
36   -extern void trace_print_seq(struct seq_file *m, struct trace_seq *s);
  38 +extern int trace_print_seq(struct seq_file *m, struct trace_seq *s);
37 39 extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
38 40 size_t cnt);
39 41 extern int trace_seq_puts(struct trace_seq *s, const char *str);
40 42  
... ... @@ -55,8 +57,9 @@
55 57 return 0;
56 58 }
57 59  
58   -static inline void trace_print_seq(struct seq_file *m, struct trace_seq *s)
  60 +static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s)
59 61 {
  62 + return 0;
60 63 }
61 64 static inline ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
62 65 size_t cnt)
kernel/trace/trace.c
... ... @@ -1361,11 +1361,7 @@
1361 1361 pause_graph_tracing();
1362 1362 raw_local_irq_save(irq_flags);
1363 1363 __raw_spin_lock(&trace_buf_lock);
1364   - if (args == NULL) {
1365   - strncpy(trace_buf, fmt, TRACE_BUF_SIZE);
1366   - len = strlen(trace_buf);
1367   - } else
1368   - len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
  1364 + len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1369 1365  
1370 1366 size = sizeof(*entry) + len + 1;
1371 1367 buffer = tr->buffer;
... ... @@ -1516,6 +1512,8 @@
1516 1512 int i = (int)*pos;
1517 1513 void *ent;
1518 1514  
  1515 + WARN_ON_ONCE(iter->leftover);
  1516 +
1519 1517 (*pos)++;
1520 1518  
1521 1519 /* can't go backwards */
... ... @@ -1614,8 +1612,16 @@
1614 1612 ;
1615 1613  
1616 1614 } else {
1617   - l = *pos - 1;
1618   - p = s_next(m, p, &l);
  1615 + /*
  1616 + * If we overflowed the seq_file before, then we want
  1617 + * to just reuse the trace_seq buffer again.
  1618 + */
  1619 + if (iter->leftover)
  1620 + p = iter;
  1621 + else {
  1622 + l = *pos - 1;
  1623 + p = s_next(m, p, &l);
  1624 + }
1619 1625 }
1620 1626  
1621 1627 trace_event_read_lock();
... ... @@ -1923,6 +1929,7 @@
1923 1929 static int s_show(struct seq_file *m, void *v)
1924 1930 {
1925 1931 struct trace_iterator *iter = v;
  1932 + int ret;
1926 1933  
1927 1934 if (iter->ent == NULL) {
1928 1935 if (iter->tr) {
1929 1936  
... ... @@ -1942,9 +1949,27 @@
1942 1949 if (!(trace_flags & TRACE_ITER_VERBOSE))
1943 1950 print_func_help_header(m);
1944 1951 }
  1952 + } else if (iter->leftover) {
  1953 + /*
  1954 + * If we filled the seq_file buffer earlier, we
  1955 + * want to just show it now.
  1956 + */
  1957 + ret = trace_print_seq(m, &iter->seq);
  1958 +
  1959 + /* ret should this time be zero, but you never know */
  1960 + iter->leftover = ret;
  1961 +
1945 1962 } else {
1946 1963 print_trace_line(iter);
1947   - trace_print_seq(m, &iter->seq);
  1964 + ret = trace_print_seq(m, &iter->seq);
  1965 + /*
  1966 + * If we overflow the seq_file buffer, then it will
  1967 + * ask us for this data again at start up.
  1968 + * Use that instead.
  1969 + * ret is 0 if seq_file write succeeded.
  1970 + * -1 otherwise.
  1971 + */
  1972 + iter->leftover = ret;
1948 1973 }
1949 1974  
1950 1975 return 0;
... ... @@ -2898,6 +2923,10 @@
2898 2923 else
2899 2924 cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask);
2900 2925  
  2926 +
  2927 + if (iter->trace->pipe_close)
  2928 + iter->trace->pipe_close(iter);
  2929 +
2901 2930 mutex_unlock(&trace_types_lock);
2902 2931  
2903 2932 free_cpumask_var(iter->started);
... ... @@ -3320,6 +3349,16 @@
3320 3349 return cnt;
3321 3350 }
3322 3351  
  3352 +static int mark_printk(const char *fmt, ...)
  3353 +{
  3354 + int ret;
  3355 + va_list args;
  3356 + va_start(args, fmt);
  3357 + ret = trace_vprintk(0, fmt, args);
  3358 + va_end(args);
  3359 + return ret;
  3360 +}
  3361 +
3323 3362 static ssize_t
3324 3363 tracing_mark_write(struct file *filp, const char __user *ubuf,
3325 3364 size_t cnt, loff_t *fpos)
... ... @@ -3346,7 +3385,7 @@
3346 3385 } else
3347 3386 buf[cnt] = '\0';
3348 3387  
3349   - cnt = trace_vprintk(0, buf, NULL);
  3388 + cnt = mark_printk("%s", buf);
3350 3389 kfree(buf);
3351 3390 *fpos += cnt;
3352 3391  
kernel/trace/trace.h
... ... @@ -272,6 +272,7 @@
272 272 * @pipe_open: called when the trace_pipe file is opened
273 273 * @wait_pipe: override how the user waits for traces on trace_pipe
274 274 * @close: called when the trace file is released
  275 + * @pipe_close: called when the trace_pipe file is released
275 276 * @read: override the default read callback on trace_pipe
276 277 * @splice_read: override the default splice_read callback on trace_pipe
277 278 * @selftest: selftest to run on boot (see trace_selftest.c)
... ... @@ -290,6 +291,7 @@
290 291 void (*pipe_open)(struct trace_iterator *iter);
291 292 void (*wait_pipe)(struct trace_iterator *iter);
292 293 void (*close)(struct trace_iterator *iter);
  294 + void (*pipe_close)(struct trace_iterator *iter);
293 295 ssize_t (*read)(struct trace_iterator *iter,
294 296 struct file *filp, char __user *ubuf,
295 297 size_t cnt, loff_t *ppos);
kernel/trace/trace_functions_graph.c
... ... @@ -14,11 +14,22 @@
14 14 #include "trace.h"
15 15 #include "trace_output.h"
16 16  
17   -struct fgraph_data {
  17 +struct fgraph_cpu_data {
18 18 pid_t last_pid;
19 19 int depth;
  20 + int ignore;
20 21 };
21 22  
  23 +struct fgraph_data {
  24 + struct fgraph_cpu_data *cpu_data;
  25 +
  26 + /* Place to preserve last processed entry. */
  27 + struct ftrace_graph_ent_entry ent;
  28 + struct ftrace_graph_ret_entry ret;
  29 + int failed;
  30 + int cpu;
  31 +};
  32 +
22 33 #define TRACE_GRAPH_INDENT 2
23 34  
24 35 /* Flag options */
... ... @@ -384,7 +395,7 @@
384 395 if (!data)
385 396 return TRACE_TYPE_HANDLED;
386 397  
387   - last_pid = &(per_cpu_ptr(data, cpu)->last_pid);
  398 + last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
388 399  
389 400 if (*last_pid == pid)
390 401 return TRACE_TYPE_HANDLED;
391 402  
392 403  
393 404  
394 405  
395 406  
... ... @@ -435,27 +446,50 @@
435 446 get_return_for_leaf(struct trace_iterator *iter,
436 447 struct ftrace_graph_ent_entry *curr)
437 448 {
438   - struct ring_buffer_iter *ring_iter;
  449 + struct fgraph_data *data = iter->private;
  450 + struct ring_buffer_iter *ring_iter = NULL;
439 451 struct ring_buffer_event *event;
440 452 struct ftrace_graph_ret_entry *next;
441 453  
442   - ring_iter = iter->buffer_iter[iter->cpu];
  454 + /*
  455 + * If the previous output failed to write to the seq buffer,
  456 + * then we just reuse the data from before.
  457 + */
  458 + if (data && data->failed) {
  459 + curr = &data->ent;
  460 + next = &data->ret;
  461 + } else {
443 462  
444   - /* First peek to compare current entry and the next one */
445   - if (ring_iter)
446   - event = ring_buffer_iter_peek(ring_iter, NULL);
447   - else {
448   - /* We need to consume the current entry to see the next one */
449   - ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
450   - event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
451   - NULL);
452   - }
  463 + ring_iter = iter->buffer_iter[iter->cpu];
453 464  
454   - if (!event)
455   - return NULL;
  465 + /* First peek to compare current entry and the next one */
  466 + if (ring_iter)
  467 + event = ring_buffer_iter_peek(ring_iter, NULL);
  468 + else {
  469 + /*
  470 + * We need to consume the current entry to see
  471 + * the next one.
  472 + */
  473 + ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
  474 + event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
  475 + NULL);
  476 + }
456 477  
457   - next = ring_buffer_event_data(event);
  478 + if (!event)
  479 + return NULL;
458 480  
  481 + next = ring_buffer_event_data(event);
  482 +
  483 + if (data) {
  484 + /*
  485 + * Save current and next entries for later reference
  486 + * if the output fails.
  487 + */
  488 + data->ent = *curr;
  489 + data->ret = *next;
  490 + }
  491 + }
  492 +
459 493 if (next->ent.type != TRACE_GRAPH_RET)
460 494 return NULL;
461 495  
... ... @@ -640,7 +674,7 @@
640 674  
641 675 if (data) {
642 676 int cpu = iter->cpu;
643   - int *depth = &(per_cpu_ptr(data, cpu)->depth);
  677 + int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
644 678  
645 679 /*
646 680 * Comments display at + 1 to depth. Since
... ... @@ -688,7 +722,7 @@
688 722  
689 723 if (data) {
690 724 int cpu = iter->cpu;
691   - int *depth = &(per_cpu_ptr(data, cpu)->depth);
  725 + int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
692 726  
693 727 *depth = call->depth;
694 728 }
695 729  
696 730  
697 731  
698 732  
... ... @@ -782,19 +816,34 @@
782 816 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
783 817 struct trace_iterator *iter)
784 818 {
785   - int cpu = iter->cpu;
  819 + struct fgraph_data *data = iter->private;
786 820 struct ftrace_graph_ent *call = &field->graph_ent;
787 821 struct ftrace_graph_ret_entry *leaf_ret;
  822 + static enum print_line_t ret;
  823 + int cpu = iter->cpu;
788 824  
789 825 if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func))
790 826 return TRACE_TYPE_PARTIAL_LINE;
791 827  
792 828 leaf_ret = get_return_for_leaf(iter, field);
793 829 if (leaf_ret)
794   - return print_graph_entry_leaf(iter, field, leaf_ret, s);
  830 + ret = print_graph_entry_leaf(iter, field, leaf_ret, s);
795 831 else
796   - return print_graph_entry_nested(iter, field, s, cpu);
  832 + ret = print_graph_entry_nested(iter, field, s, cpu);
797 833  
  834 + if (data) {
  835 + /*
  836 + * If we failed to write our output, then we need to make
  837 + * note of it. Because we already consumed our entry.
  838 + */
  839 + if (s->full) {
  840 + data->failed = 1;
  841 + data->cpu = cpu;
  842 + } else
  843 + data->failed = 0;
  844 + }
  845 +
  846 + return ret;
798 847 }
799 848  
800 849 static enum print_line_t
... ... @@ -810,7 +859,7 @@
810 859  
811 860 if (data) {
812 861 int cpu = iter->cpu;
813   - int *depth = &(per_cpu_ptr(data, cpu)->depth);
  862 + int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
814 863  
815 864 /*
816 865 * Comments display at + 1 to depth. This is the
... ... @@ -873,7 +922,7 @@
873 922 int i;
874 923  
875 924 if (data)
876   - depth = per_cpu_ptr(data, iter->cpu)->depth;
  925 + depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
877 926  
878 927 if (print_graph_prologue(iter, s, 0, 0))
879 928 return TRACE_TYPE_PARTIAL_LINE;
880 929  
881 930  
... ... @@ -941,9 +990,34 @@
941 990 enum print_line_t
942 991 print_graph_function(struct trace_iterator *iter)
943 992 {
  993 + struct ftrace_graph_ent_entry *field;
  994 + struct fgraph_data *data = iter->private;
944 995 struct trace_entry *entry = iter->ent;
945 996 struct trace_seq *s = &iter->seq;
  997 + int cpu = iter->cpu;
  998 + int ret;
946 999  
  1000 + if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
  1001 + per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
  1002 + return TRACE_TYPE_HANDLED;
  1003 + }
  1004 +
  1005 + /*
  1006 + * If the last output failed, there's a possibility we need
  1007 + * to print out the missing entry which would never go out.
  1008 + */
  1009 + if (data && data->failed) {
  1010 + field = &data->ent;
  1011 + iter->cpu = data->cpu;
  1012 + ret = print_graph_entry(field, s, iter);
  1013 + if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
  1014 + per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
  1015 + ret = TRACE_TYPE_NO_CONSUME;
  1016 + }
  1017 + iter->cpu = cpu;
  1018 + return ret;
  1019 + }
  1020 +
947 1021 switch (entry->type) {
948 1022 case TRACE_GRAPH_ENT: {
949 1023 /*
... ... @@ -952,7 +1026,7 @@
952 1026 * sizeof(struct ftrace_graph_ent_entry) is very small,
953 1027 * it can be safely saved at the stack.
954 1028 */
955   - struct ftrace_graph_ent_entry *field, saved;
  1029 + struct ftrace_graph_ent_entry saved;
956 1030 trace_assign_type(field, entry);
957 1031 saved = *field;
958 1032 return print_graph_entry(&saved, s, iter);
959 1033  
960 1034  
961 1035  
962 1036  
963 1037  
964 1038  
965 1039  
... ... @@ -1030,31 +1104,54 @@
1030 1104 static void graph_trace_open(struct trace_iterator *iter)
1031 1105 {
1032 1106 /* pid and depth on the last trace processed */
1033   - struct fgraph_data *data = alloc_percpu(struct fgraph_data);
  1107 + struct fgraph_data *data;
1034 1108 int cpu;
1035 1109  
  1110 + iter->private = NULL;
  1111 +
  1112 + data = kzalloc(sizeof(*data), GFP_KERNEL);
1036 1113 if (!data)
1037   - pr_warning("function graph tracer: not enough memory\n");
1038   - else
1039   - for_each_possible_cpu(cpu) {
1040   - pid_t *pid = &(per_cpu_ptr(data, cpu)->last_pid);
1041   - int *depth = &(per_cpu_ptr(data, cpu)->depth);
1042   - *pid = -1;
1043   - *depth = 0;
1044   - }
  1114 + goto out_err;
1045 1115  
  1116 + data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
  1117 + if (!data->cpu_data)
  1118 + goto out_err_free;
  1119 +
  1120 + for_each_possible_cpu(cpu) {
  1121 + pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
  1122 + int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
  1123 + int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
  1124 + *pid = -1;
  1125 + *depth = 0;
  1126 + *ignore = 0;
  1127 + }
  1128 +
1046 1129 iter->private = data;
  1130 +
  1131 + return;
  1132 +
  1133 + out_err_free:
  1134 + kfree(data);
  1135 + out_err:
  1136 + pr_warning("function graph tracer: not enough memory\n");
1047 1137 }
1048 1138  
1049 1139 static void graph_trace_close(struct trace_iterator *iter)
1050 1140 {
1051   - free_percpu(iter->private);
  1141 + struct fgraph_data *data = iter->private;
  1142 +
  1143 + if (data) {
  1144 + free_percpu(data->cpu_data);
  1145 + kfree(data);
  1146 + }
1052 1147 }
1053 1148  
1054 1149 static struct tracer graph_trace __read_mostly = {
1055 1150 .name = "function_graph",
1056 1151 .open = graph_trace_open,
  1152 + .pipe_open = graph_trace_open,
1057 1153 .close = graph_trace_close,
  1154 + .pipe_close = graph_trace_close,
1058 1155 .wait_pipe = poll_wait_pipe,
1059 1156 .init = graph_trace_init,
1060 1157 .reset = graph_trace_reset,
kernel/trace/trace_output.c
... ... @@ -23,13 +23,21 @@
23 23  
24 24 static int next_event_type = __TRACE_LAST_TYPE + 1;
25 25  
26   -void trace_print_seq(struct seq_file *m, struct trace_seq *s)
  26 +int trace_print_seq(struct seq_file *m, struct trace_seq *s)
27 27 {
28 28 int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
  29 + int ret;
29 30  
30   - seq_write(m, s->buffer, len);
  31 + ret = seq_write(m, s->buffer, len);
31 32  
32   - trace_seq_init(s);
  33 + /*
  34 + * Only reset this buffer if we successfully wrote to the
  35 + * seq_file buffer.
  36 + */
  37 + if (!ret)
  38 + trace_seq_init(s);
  39 +
  40 + return ret;
33 41 }
34 42  
35 43 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
... ... @@ -85,7 +93,7 @@
85 93 va_list ap;
86 94 int ret;
87 95  
88   - if (!len)
  96 + if (s->full || !len)
89 97 return 0;
90 98  
91 99 va_start(ap, fmt);
92 100  
... ... @@ -93,8 +101,10 @@
93 101 va_end(ap);
94 102  
95 103 /* If we can't write it all, don't bother writing anything */
96   - if (ret >= len)
  104 + if (ret >= len) {
  105 + s->full = 1;
97 106 return 0;
  107 + }
98 108  
99 109 s->len += ret;
100 110  
101 111  
102 112  
... ... @@ -119,14 +129,16 @@
119 129 int len = (PAGE_SIZE - 1) - s->len;
120 130 int ret;
121 131  
122   - if (!len)
  132 + if (s->full || !len)
123 133 return 0;
124 134  
125 135 ret = vsnprintf(s->buffer + s->len, len, fmt, args);
126 136  
127 137 /* If we can't write it all, don't bother writing anything */
128   - if (ret >= len)
  138 + if (ret >= len) {
  139 + s->full = 1;
129 140 return 0;
  141 + }
130 142  
131 143 s->len += ret;
132 144  
133 145  
134 146  
... ... @@ -139,14 +151,16 @@
139 151 int len = (PAGE_SIZE - 1) - s->len;
140 152 int ret;
141 153  
142   - if (!len)
  154 + if (s->full || !len)
143 155 return 0;
144 156  
145 157 ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
146 158  
147 159 /* If we can't write it all, don't bother writing anything */
148   - if (ret >= len)
  160 + if (ret >= len) {
  161 + s->full = 1;
149 162 return 0;
  163 + }
150 164  
151 165 s->len += ret;
152 166  
153 167  
... ... @@ -167,9 +181,14 @@
167 181 {
168 182 int len = strlen(str);
169 183  
170   - if (len > ((PAGE_SIZE - 1) - s->len))
  184 + if (s->full)
171 185 return 0;
172 186  
  187 + if (len > ((PAGE_SIZE - 1) - s->len)) {
  188 + s->full = 1;
  189 + return 0;
  190 + }
  191 +
173 192 memcpy(s->buffer + s->len, str, len);
174 193 s->len += len;
175 194  
176 195  
... ... @@ -178,9 +197,14 @@
178 197  
179 198 int trace_seq_putc(struct trace_seq *s, unsigned char c)
180 199 {
181   - if (s->len >= (PAGE_SIZE - 1))
  200 + if (s->full)
182 201 return 0;
183 202  
  203 + if (s->len >= (PAGE_SIZE - 1)) {
  204 + s->full = 1;
  205 + return 0;
  206 + }
  207 +
184 208 s->buffer[s->len++] = c;
185 209  
186 210 return 1;
187 211  
... ... @@ -188,9 +212,14 @@
188 212  
189 213 int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
190 214 {
191   - if (len > ((PAGE_SIZE - 1) - s->len))
  215 + if (s->full)
192 216 return 0;
193 217  
  218 + if (len > ((PAGE_SIZE - 1) - s->len)) {
  219 + s->full = 1;
  220 + return 0;
  221 + }
  222 +
194 223 memcpy(s->buffer + s->len, mem, len);
195 224 s->len += len;
196 225  
... ... @@ -203,6 +232,9 @@
203 232 const unsigned char *data = mem;
204 233 int i, j;
205 234  
  235 + if (s->full)
  236 + return 0;
  237 +
206 238 #ifdef __BIG_ENDIAN
207 239 for (i = 0, j = 0; i < len; i++) {
208 240 #else
209 241  
... ... @@ -220,8 +252,13 @@
220 252 {
221 253 void *ret;
222 254  
223   - if (len > ((PAGE_SIZE - 1) - s->len))
  255 + if (s->full)
  256 + return 0;
  257 +
  258 + if (len > ((PAGE_SIZE - 1) - s->len)) {
  259 + s->full = 1;
224 260 return NULL;
  261 + }
225 262  
226 263 ret = s->buffer + s->len;
227 264 s->len += len;
228 265  
... ... @@ -233,8 +270,14 @@
233 270 {
234 271 unsigned char *p;
235 272  
236   - if (s->len >= (PAGE_SIZE - 1))
  273 + if (s->full)
237 274 return 0;
  275 +
  276 + if (s->len >= (PAGE_SIZE - 1)) {
  277 + s->full = 1;
  278 + return 0;
  279 + }
  280 +
238 281 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
239 282 if (!IS_ERR(p)) {
240 283 p = mangle_path(s->buffer + s->len, p, "\n");
... ... @@ -247,6 +290,7 @@
247 290 return 1;
248 291 }
249 292  
  293 + s->full = 1;
250 294 return 0;
251 295 }
252 296  
... ... @@ -372,6 +416,9 @@
372 416 struct file *file = NULL;
373 417 unsigned long vmstart = 0;
374 418 int ret = 1;
  419 +
  420 + if (s->full)
  421 + return 0;
375 422  
376 423 if (mm) {
377 424 const struct vm_area_struct *vma;