Commit 750ed158bf6c782d2813da1bca2c824365a0b777

Authored by Ingo Molnar

Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/ro…

…stedt/linux-2.6-trace into perf/core

Showing 6 changed files Side-by-side Diff

kernel/trace/ftrace.c
... ... @@ -1638,8 +1638,8 @@
1638 1638  
1639 1639 ret = ftrace_avail_open(inode, file);
1640 1640 if (!ret) {
1641   - m = (struct seq_file *)file->private_data;
1642   - iter = (struct ftrace_iterator *)m->private;
  1641 + m = file->private_data;
  1642 + iter = m->private;
1643 1643 iter->flags = FTRACE_ITER_FAILURES;
1644 1644 }
1645 1645  
kernel/trace/trace.c
... ... @@ -2196,7 +2196,7 @@
2196 2196  
2197 2197 static int tracing_release(struct inode *inode, struct file *file)
2198 2198 {
2199   - struct seq_file *m = (struct seq_file *)file->private_data;
  2199 + struct seq_file *m = file->private_data;
2200 2200 struct trace_iterator *iter;
2201 2201 int cpu;
2202 2202  
kernel/trace/trace.h
... ... @@ -343,6 +343,10 @@
343 343 unsigned long ip,
344 344 unsigned long parent_ip,
345 345 unsigned long flags, int pc);
  346 +void trace_graph_function(struct trace_array *tr,
  347 + unsigned long ip,
  348 + unsigned long parent_ip,
  349 + unsigned long flags, int pc);
346 350 void trace_default_header(struct seq_file *m);
347 351 void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
348 352 int trace_empty(struct trace_iterator *iter);
kernel/trace/trace_functions_graph.c
... ... @@ -262,6 +262,34 @@
262 262 return trace_graph_entry(trace);
263 263 }
264 264  
  265 +static void
  266 +__trace_graph_function(struct trace_array *tr,
  267 + unsigned long ip, unsigned long flags, int pc)
  268 +{
  269 + u64 time = trace_clock_local();
  270 + struct ftrace_graph_ent ent = {
  271 + .func = ip,
  272 + .depth = 0,
  273 + };
  274 + struct ftrace_graph_ret ret = {
  275 + .func = ip,
  276 + .depth = 0,
  277 + .calltime = time,
  278 + .rettime = time,
  279 + };
  280 +
  281 + __trace_graph_entry(tr, &ent, flags, pc);
  282 + __trace_graph_return(tr, &ret, flags, pc);
  283 +}
  284 +
  285 +void
  286 +trace_graph_function(struct trace_array *tr,
  287 + unsigned long ip, unsigned long parent_ip,
  288 + unsigned long flags, int pc)
  289 +{
  290 + __trace_graph_function(tr, ip, flags, pc);
  291 +}
  292 +
265 293 void __trace_graph_return(struct trace_array *tr,
266 294 struct ftrace_graph_ret *trace,
267 295 unsigned long flags,
268 296  
269 297  
270 298  
... ... @@ -888,12 +916,20 @@
888 916 unsigned long addr, int depth)
889 917 {
890 918 int cpu = iter->cpu;
  919 + int *depth_irq;
891 920 struct fgraph_data *data = iter->private;
892   - int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
893 921  
894   - if (flags & TRACE_GRAPH_PRINT_IRQS)
  922 + /*
  923 + * If we are either displaying irqs, or we got called as
  924 + * a graph event and private data does not exist,
  925 + * then we bypass the irq check.
  926 + */
  927 + if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
  928 + (!data))
895 929 return 0;
896 930  
  931 + depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  932 +
897 933 /*
898 934 * We are inside the irq code
899 935 */
900 936  
901 937  
902 938  
... ... @@ -926,12 +962,20 @@
926 962 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
927 963 {
928 964 int cpu = iter->cpu;
  965 + int *depth_irq;
929 966 struct fgraph_data *data = iter->private;
930   - int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
931 967  
932   - if (flags & TRACE_GRAPH_PRINT_IRQS)
  968 + /*
  969 + * If we are either displaying irqs, or we got called as
  970 + * a graph event and private data does not exist,
  971 + * then we bypass the irq check.
  972 + */
  973 + if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
  974 + (!data))
933 975 return 0;
934 976  
  977 + depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  978 +
935 979 /*
936 980 * We are not inside the irq code.
937 981 */
... ... @@ -1163,7 +1207,7 @@
1163 1207  
1164 1208  
1165 1209 enum print_line_t
1166   -print_graph_function_flags(struct trace_iterator *iter, u32 flags)
  1210 +__print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1167 1211 {
1168 1212 struct ftrace_graph_ent_entry *field;
1169 1213 struct fgraph_data *data = iter->private;
1170 1214  
... ... @@ -1226,9 +1270,20 @@
1226 1270 static enum print_line_t
1227 1271 print_graph_function(struct trace_iterator *iter)
1228 1272 {
1229   - return print_graph_function_flags(iter, tracer_flags.val);
  1273 + return __print_graph_function_flags(iter, tracer_flags.val);
1230 1274 }
1231 1275  
  1276 +enum print_line_t print_graph_function_flags(struct trace_iterator *iter,
  1277 + u32 flags)
  1278 +{
  1279 + if (trace_flags & TRACE_ITER_LATENCY_FMT)
  1280 + flags |= TRACE_GRAPH_PRINT_DURATION;
  1281 + else
  1282 + flags |= TRACE_GRAPH_PRINT_ABS_TIME;
  1283 +
  1284 + return __print_graph_function_flags(iter, flags);
  1285 +}
  1286 +
1232 1287 static enum print_line_t
1233 1288 print_graph_function_event(struct trace_iterator *iter, int flags,
1234 1289 struct trace_event *event)
... ... @@ -1258,7 +1313,7 @@
1258 1313 seq_printf(s, "#%.*s|||| / \n", size, spaces);
1259 1314 }
1260 1315  
1261   -void print_graph_headers_flags(struct seq_file *s, u32 flags)
  1316 +static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
1262 1317 {
1263 1318 int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
1264 1319  
... ... @@ -1297,6 +1352,23 @@
1297 1352 void print_graph_headers(struct seq_file *s)
1298 1353 {
1299 1354 print_graph_headers_flags(s, tracer_flags.val);
  1355 +}
  1356 +
  1357 +void print_graph_headers_flags(struct seq_file *s, u32 flags)
  1358 +{
  1359 + struct trace_iterator *iter = s->private;
  1360 +
  1361 + if (trace_flags & TRACE_ITER_LATENCY_FMT) {
  1362 + /* print nothing if the buffers are empty */
  1363 + if (trace_empty(iter))
  1364 + return;
  1365 +
  1366 + print_trace_header(s, iter);
  1367 + flags |= TRACE_GRAPH_PRINT_DURATION;
  1368 + } else
  1369 + flags |= TRACE_GRAPH_PRINT_ABS_TIME;
  1370 +
  1371 + __print_graph_headers_flags(s, flags);
1300 1372 }
1301 1373  
1302 1374 void graph_trace_open(struct trace_iterator *iter)
kernel/trace/trace_irqsoff.c
... ... @@ -87,14 +87,22 @@
87 87  
88 88 #ifdef CONFIG_FUNCTION_TRACER
89 89 /*
90   - * irqsoff uses its own tracer function to keep the overhead down:
  90 + * Prologue for the preempt and irqs off function tracers.
  91 + *
  92 + * Returns 1 if it is OK to continue, and data->disabled is
  93 + * incremented.
  94 + * 0 if the trace is to be ignored, and data->disabled
  95 + * is kept the same.
  96 + *
  97 + * Note, this function is also used outside this ifdef but
  98 + * inside the #ifdef of the function graph tracer below.
  99 + * This is OK, since the function graph tracer is
  100 + * dependent on the function tracer.
91 101 */
92   -static void
93   -irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
  102 +static int func_prolog_dec(struct trace_array *tr,
  103 + struct trace_array_cpu **data,
  104 + unsigned long *flags)
94 105 {
95   - struct trace_array *tr = irqsoff_trace;
96   - struct trace_array_cpu *data;
97   - unsigned long flags;
98 106 long disabled;
99 107 int cpu;
100 108  
101 109  
102 110  
103 111  
104 112  
105 113  
... ... @@ -106,19 +114,39 @@
106 114 */
107 115 cpu = raw_smp_processor_id();
108 116 if (likely(!per_cpu(tracing_cpu, cpu)))
109   - return;
  117 + return 0;
110 118  
111   - local_save_flags(flags);
  119 + local_save_flags(*flags);
112 120 /* slight chance to get a false positive on tracing_cpu */
113   - if (!irqs_disabled_flags(flags))
114   - return;
  121 + if (!irqs_disabled_flags(*flags))
  122 + return 0;
115 123  
116   - data = tr->data[cpu];
117   - disabled = atomic_inc_return(&data->disabled);
  124 + *data = tr->data[cpu];
  125 + disabled = atomic_inc_return(&(*data)->disabled);
118 126  
119 127 if (likely(disabled == 1))
120   - trace_function(tr, ip, parent_ip, flags, preempt_count());
  128 + return 1;
121 129  
  130 + atomic_dec(&(*data)->disabled);
  131 +
  132 + return 0;
  133 +}
  134 +
  135 +/*
  136 + * irqsoff uses its own tracer function to keep the overhead down:
  137 + */
  138 +static void
  139 +irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
  140 +{
  141 + struct trace_array *tr = irqsoff_trace;
  142 + struct trace_array_cpu *data;
  143 + unsigned long flags;
  144 +
  145 + if (!func_prolog_dec(tr, &data, &flags))
  146 + return;
  147 +
  148 + trace_function(tr, ip, parent_ip, flags, preempt_count());
  149 +
122 150 atomic_dec(&data->disabled);
123 151 }
124 152  
125 153  
126 154  
127 155  
128 156  
... ... @@ -155,30 +183,16 @@
155 183 struct trace_array *tr = irqsoff_trace;
156 184 struct trace_array_cpu *data;
157 185 unsigned long flags;
158   - long disabled;
159 186 int ret;
160   - int cpu;
161 187 int pc;
162 188  
163   - cpu = raw_smp_processor_id();
164   - if (likely(!per_cpu(tracing_cpu, cpu)))
  189 + if (!func_prolog_dec(tr, &data, &flags))
165 190 return 0;
166 191  
167   - local_save_flags(flags);
168   - /* slight chance to get a false positive on tracing_cpu */
169   - if (!irqs_disabled_flags(flags))
170   - return 0;
171   -
172   - data = tr->data[cpu];
173   - disabled = atomic_inc_return(&data->disabled);
174   -
175   - if (likely(disabled == 1)) {
176   - pc = preempt_count();
177   - ret = __trace_graph_entry(tr, trace, flags, pc);
178   - } else
179   - ret = 0;
180   -
  192 + pc = preempt_count();
  193 + ret = __trace_graph_entry(tr, trace, flags, pc);
181 194 atomic_dec(&data->disabled);
  195 +
182 196 return ret;
183 197 }
184 198  
185 199  
186 200  
... ... @@ -187,27 +201,13 @@
187 201 struct trace_array *tr = irqsoff_trace;
188 202 struct trace_array_cpu *data;
189 203 unsigned long flags;
190   - long disabled;
191   - int cpu;
192 204 int pc;
193 205  
194   - cpu = raw_smp_processor_id();
195   - if (likely(!per_cpu(tracing_cpu, cpu)))
  206 + if (!func_prolog_dec(tr, &data, &flags))
196 207 return;
197 208  
198   - local_save_flags(flags);
199   - /* slight chance to get a false positive on tracing_cpu */
200   - if (!irqs_disabled_flags(flags))
201   - return;
202   -
203   - data = tr->data[cpu];
204   - disabled = atomic_inc_return(&data->disabled);
205   -
206   - if (likely(disabled == 1)) {
207   - pc = preempt_count();
208   - __trace_graph_return(tr, trace, flags, pc);
209   - }
210   -
  209 + pc = preempt_count();
  210 + __trace_graph_return(tr, trace, flags, pc);
211 211 atomic_dec(&data->disabled);
212 212 }
213 213  
214 214  
215 215  
216 216  
217 217  
218 218  
... ... @@ -229,75 +229,33 @@
229 229  
230 230 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
231 231 {
232   - u32 flags = GRAPH_TRACER_FLAGS;
233   -
234   - if (trace_flags & TRACE_ITER_LATENCY_FMT)
235   - flags |= TRACE_GRAPH_PRINT_DURATION;
236   - else
237   - flags |= TRACE_GRAPH_PRINT_ABS_TIME;
238   -
239 232 /*
240 233 * In graph mode call the graph tracer output function,
241 234 * otherwise go with the TRACE_FN event handler
242 235 */
243 236 if (is_graph())
244   - return print_graph_function_flags(iter, flags);
  237 + return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
245 238  
246 239 return TRACE_TYPE_UNHANDLED;
247 240 }
248 241  
249 242 static void irqsoff_print_header(struct seq_file *s)
250 243 {
251   - if (is_graph()) {
252   - struct trace_iterator *iter = s->private;
253   - u32 flags = GRAPH_TRACER_FLAGS;
254   -
255   - if (trace_flags & TRACE_ITER_LATENCY_FMT) {
256   - /* print nothing if the buffers are empty */
257   - if (trace_empty(iter))
258   - return;
259   -
260   - print_trace_header(s, iter);
261   - flags |= TRACE_GRAPH_PRINT_DURATION;
262   - } else
263   - flags |= TRACE_GRAPH_PRINT_ABS_TIME;
264   -
265   - print_graph_headers_flags(s, flags);
266   - } else
  244 + if (is_graph())
  245 + print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
  246 + else
267 247 trace_default_header(s);
268 248 }
269 249  
270 250 static void
271   -trace_graph_function(struct trace_array *tr,
272   - unsigned long ip, unsigned long flags, int pc)
273   -{
274   - u64 time = trace_clock_local();
275   - struct ftrace_graph_ent ent = {
276   - .func = ip,
277   - .depth = 0,
278   - };
279   - struct ftrace_graph_ret ret = {
280   - .func = ip,
281   - .depth = 0,
282   - .calltime = time,
283   - .rettime = time,
284   - };
285   -
286   - __trace_graph_entry(tr, &ent, flags, pc);
287   - __trace_graph_return(tr, &ret, flags, pc);
288   -}
289   -
290   -static void
291 251 __trace_function(struct trace_array *tr,
292 252 unsigned long ip, unsigned long parent_ip,
293 253 unsigned long flags, int pc)
294 254 {
295   - if (!is_graph())
  255 + if (is_graph())
  256 + trace_graph_function(tr, ip, parent_ip, flags, pc);
  257 + else
296 258 trace_function(tr, ip, parent_ip, flags, pc);
297   - else {
298   - trace_graph_function(tr, parent_ip, flags, pc);
299   - trace_graph_function(tr, ip, flags, pc);
300   - }
301 259 }
302 260  
303 261 #else
kernel/trace/trace_sched_wakeup.c
... ... @@ -31,48 +31,98 @@
31 31 static arch_spinlock_t wakeup_lock =
32 32 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
33 33  
  34 +static void wakeup_reset(struct trace_array *tr);
34 35 static void __wakeup_reset(struct trace_array *tr);
  36 +static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
  37 +static void wakeup_graph_return(struct ftrace_graph_ret *trace);
35 38  
36 39 static int save_lat_flag;
37 40  
  41 +#define TRACE_DISPLAY_GRAPH 1
  42 +
  43 +static struct tracer_opt trace_opts[] = {
  44 +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
  45 + /* display latency trace as call graph */
  46 + { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
  47 +#endif
  48 + { } /* Empty entry */
  49 +};
  50 +
  51 +static struct tracer_flags tracer_flags = {
  52 + .val = 0,
  53 + .opts = trace_opts,
  54 +};
  55 +
  56 +#define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
  57 +
38 58 #ifdef CONFIG_FUNCTION_TRACER
  59 +
39 60 /*
40   - * irqsoff uses its own tracer function to keep the overhead down:
  61 + * Prologue for the wakeup function tracers.
  62 + *
  63 + * Returns 1 if it is OK to continue, and preemption
  64 + * is disabled and data->disabled is incremented.
  65 + * 0 if the trace is to be ignored, and preemption
  66 + * is not disabled and data->disabled is
  67 + * kept the same.
  68 + *
  69 + * Note, this function is also used outside this ifdef but
  70 + * inside the #ifdef of the function graph tracer below.
  71 + * This is OK, since the function graph tracer is
  72 + * dependent on the function tracer.
41 73 */
42   -static void
43   -wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
  74 +static int
  75 +func_prolog_preempt_disable(struct trace_array *tr,
  76 + struct trace_array_cpu **data,
  77 + int *pc)
44 78 {
45   - struct trace_array *tr = wakeup_trace;
46   - struct trace_array_cpu *data;
47   - unsigned long flags;
48 79 long disabled;
49 80 int cpu;
50   - int pc;
51 81  
52 82 if (likely(!wakeup_task))
53   - return;
  83 + return 0;
54 84  
55   - pc = preempt_count();
  85 + *pc = preempt_count();
56 86 preempt_disable_notrace();
57 87  
58 88 cpu = raw_smp_processor_id();
59 89 if (cpu != wakeup_current_cpu)
60 90 goto out_enable;
61 91  
62   - data = tr->data[cpu];
63   - disabled = atomic_inc_return(&data->disabled);
  92 + *data = tr->data[cpu];
  93 + disabled = atomic_inc_return(&(*data)->disabled);
64 94 if (unlikely(disabled != 1))
65 95 goto out;
66 96  
67   - local_irq_save(flags);
  97 + return 1;
68 98  
69   - trace_function(tr, ip, parent_ip, flags, pc);
  99 +out:
  100 + atomic_dec(&(*data)->disabled);
70 101  
  102 +out_enable:
  103 + preempt_enable_notrace();
  104 + return 0;
  105 +}
  106 +
  107 +/*
  108 + * wakeup uses its own tracer function to keep the overhead down:
  109 + */
  110 +static void
  111 +wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
  112 +{
  113 + struct trace_array *tr = wakeup_trace;
  114 + struct trace_array_cpu *data;
  115 + unsigned long flags;
  116 + int pc;
  117 +
  118 + if (!func_prolog_preempt_disable(tr, &data, &pc))
  119 + return;
  120 +
  121 + local_irq_save(flags);
  122 + trace_function(tr, ip, parent_ip, flags, pc);
71 123 local_irq_restore(flags);
72 124  
73   - out:
74 125 atomic_dec(&data->disabled);
75   - out_enable:
76 126 preempt_enable_notrace();
77 127 }
78 128  
... ... @@ -82,6 +132,156 @@
82 132 };
83 133 #endif /* CONFIG_FUNCTION_TRACER */
84 134  
  135 +static int start_func_tracer(int graph)
  136 +{
  137 + int ret;
  138 +
  139 + if (!graph)
  140 + ret = register_ftrace_function(&trace_ops);
  141 + else
  142 + ret = register_ftrace_graph(&wakeup_graph_return,
  143 + &wakeup_graph_entry);
  144 +
  145 + if (!ret && tracing_is_enabled())
  146 + tracer_enabled = 1;
  147 + else
  148 + tracer_enabled = 0;
  149 +
  150 + return ret;
  151 +}
  152 +
  153 +static void stop_func_tracer(int graph)
  154 +{
  155 + tracer_enabled = 0;
  156 +
  157 + if (!graph)
  158 + unregister_ftrace_function(&trace_ops);
  159 + else
  160 + unregister_ftrace_graph();
  161 +}
  162 +
  163 +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
  164 +static int wakeup_set_flag(u32 old_flags, u32 bit, int set)
  165 +{
  166 +
  167 + if (!(bit & TRACE_DISPLAY_GRAPH))
  168 + return -EINVAL;
  169 +
  170 + if (!(is_graph() ^ set))
  171 + return 0;
  172 +
  173 + stop_func_tracer(!set);
  174 +
  175 + wakeup_reset(wakeup_trace);
  176 + tracing_max_latency = 0;
  177 +
  178 + return start_func_tracer(set);
  179 +}
  180 +
  181 +static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
  182 +{
  183 + struct trace_array *tr = wakeup_trace;
  184 + struct trace_array_cpu *data;
  185 + unsigned long flags;
  186 + int pc, ret = 0;
  187 +
  188 + if (!func_prolog_preempt_disable(tr, &data, &pc))
  189 + return 0;
  190 +
  191 + local_save_flags(flags);
  192 + ret = __trace_graph_entry(tr, trace, flags, pc);
  193 + atomic_dec(&data->disabled);
  194 + preempt_enable_notrace();
  195 +
  196 + return ret;
  197 +}
  198 +
  199 +static void wakeup_graph_return(struct ftrace_graph_ret *trace)
  200 +{
  201 + struct trace_array *tr = wakeup_trace;
  202 + struct trace_array_cpu *data;
  203 + unsigned long flags;
  204 + int pc;
  205 +
  206 + if (!func_prolog_preempt_disable(tr, &data, &pc))
  207 + return;
  208 +
  209 + local_save_flags(flags);
  210 + __trace_graph_return(tr, trace, flags, pc);
  211 + atomic_dec(&data->disabled);
  212 +
  213 + preempt_enable_notrace();
  214 + return;
  215 +}
  216 +
  217 +static void wakeup_trace_open(struct trace_iterator *iter)
  218 +{
  219 + if (is_graph())
  220 + graph_trace_open(iter);
  221 +}
  222 +
  223 +static void wakeup_trace_close(struct trace_iterator *iter)
  224 +{
  225 + if (iter->private)
  226 + graph_trace_close(iter);
  227 +}
  228 +
  229 +#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC)
  230 +
  231 +static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
  232 +{
  233 + /*
  234 + * In graph mode call the graph tracer output function,
  235 + * otherwise go with the TRACE_FN event handler
  236 + */
  237 + if (is_graph())
  238 + return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
  239 +
  240 + return TRACE_TYPE_UNHANDLED;
  241 +}
  242 +
  243 +static void wakeup_print_header(struct seq_file *s)
  244 +{
  245 + if (is_graph())
  246 + print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
  247 + else
  248 + trace_default_header(s);
  249 +}
  250 +
  251 +static void
  252 +__trace_function(struct trace_array *tr,
  253 + unsigned long ip, unsigned long parent_ip,
  254 + unsigned long flags, int pc)
  255 +{
  256 + if (is_graph())
  257 + trace_graph_function(tr, ip, parent_ip, flags, pc);
  258 + else
  259 + trace_function(tr, ip, parent_ip, flags, pc);
  260 +}
  261 +#else
  262 +#define __trace_function trace_function
  263 +
  264 +static int wakeup_set_flag(u32 old_flags, u32 bit, int set)
  265 +{
  266 + return -EINVAL;
  267 +}
  268 +
  269 +static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
  270 +{
  271 + return -1;
  272 +}
  273 +
  274 +static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
  275 +{
  276 + return TRACE_TYPE_UNHANDLED;
  277 +}
  278 +
  279 +static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
  280 +static void wakeup_print_header(struct seq_file *s) { }
  281 +static void wakeup_trace_open(struct trace_iterator *iter) { }
  282 +static void wakeup_trace_close(struct trace_iterator *iter) { }
  283 +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  284 +
85 285 /*
86 286 * Should this new latency be reported/recorded?
87 287 */
... ... @@ -152,7 +352,7 @@
152 352 /* The task we are waiting for is waking up */
153 353 data = wakeup_trace->data[wakeup_cpu];
154 354  
155   - trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
  355 + __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
156 356 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
157 357  
158 358 T0 = data->preempt_timestamp;
... ... @@ -252,7 +452,7 @@
252 452 * is not called by an assembly function (where as schedule is)
253 453 * it should be safe to use it here.
254 454 */
255   - trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
  455 + __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
256 456  
257 457 out_locked:
258 458 arch_spin_unlock(&wakeup_lock);
259 459  
... ... @@ -303,13 +503,9 @@
303 503 */
304 504 smp_wmb();
305 505  
306   - register_ftrace_function(&trace_ops);
  506 + if (start_func_tracer(is_graph()))
  507 + printk(KERN_ERR "failed to start wakeup tracer\n");
307 508  
308   - if (tracing_is_enabled())
309   - tracer_enabled = 1;
310   - else
311   - tracer_enabled = 0;
312   -
313 509 return;
314 510 fail_deprobe_wake_new:
315 511 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
... ... @@ -320,7 +516,7 @@
320 516 static void stop_wakeup_tracer(struct trace_array *tr)
321 517 {
322 518 tracer_enabled = 0;
323   - unregister_ftrace_function(&trace_ops);
  519 + stop_func_tracer(is_graph());
324 520 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
325 521 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
326 522 unregister_trace_sched_wakeup(probe_wakeup, NULL);
327 523  
... ... @@ -379,9 +575,15 @@
379 575 .start = wakeup_tracer_start,
380 576 .stop = wakeup_tracer_stop,
381 577 .print_max = 1,
  578 + .print_header = wakeup_print_header,
  579 + .print_line = wakeup_print_line,
  580 + .flags = &tracer_flags,
  581 + .set_flag = wakeup_set_flag,
382 582 #ifdef CONFIG_FTRACE_SELFTEST
383 583 .selftest = trace_selftest_startup_wakeup,
384 584 #endif
  585 + .open = wakeup_trace_open,
  586 + .close = wakeup_trace_close,
385 587 .use_max_tr = 1,
386 588 };
387 589  
388 590  
... ... @@ -394,9 +596,15 @@
394 596 .stop = wakeup_tracer_stop,
395 597 .wait_pipe = poll_wait_pipe,
396 598 .print_max = 1,
  599 + .print_header = wakeup_print_header,
  600 + .print_line = wakeup_print_line,
  601 + .flags = &tracer_flags,
  602 + .set_flag = wakeup_set_flag,
397 603 #ifdef CONFIG_FTRACE_SELFTEST
398 604 .selftest = trace_selftest_startup_wakeup,
399 605 #endif
  606 + .open = wakeup_trace_open,
  607 + .close = wakeup_trace_close,
400 608 .use_max_tr = 1,
401 609 };
402 610