Commit 4cd0332db7e8f57cc082bab11d82c064a9721737

Authored by Ingo Molnar

Merge branch 'mainline/function-graph' of git://git.kernel.org/pub/scm/linux/ker…

…nel/git/rostedt/linux-2.6-trace into tracing/function-graph-tracer

Showing 5 changed files Side-by-side Diff

arch/x86/include/asm/ftrace.h
... ... @@ -55,30 +55,5 @@
55 55 #endif /* __ASSEMBLY__ */
56 56 #endif /* CONFIG_FUNCTION_TRACER */
57 57  
58   -#ifdef CONFIG_FUNCTION_GRAPH_TRACER
59   -
60   -#ifndef __ASSEMBLY__
61   -
62   -/*
63   - * Stack of return addresses for functions
64   - * of a thread.
65   - * Used in struct thread_info
66   - */
67   -struct ftrace_ret_stack {
68   - unsigned long ret;
69   - unsigned long func;
70   - unsigned long long calltime;
71   -};
72   -
73   -/*
74   - * Primary handler of a function return.
75   - * It relays on ftrace_return_to_handler.
76   - * Defined in entry_32/64.S
77   - */
78   -extern void return_to_handler(void);
79   -
80   -#endif /* __ASSEMBLY__ */
81   -#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
82   -
83 58 #endif /* _ASM_X86_FTRACE_H */
arch/x86/kernel/dumpstack.c
... ... @@ -10,6 +10,7 @@
10 10 #include <linux/kdebug.h>
11 11 #include <linux/module.h>
12 12 #include <linux/ptrace.h>
  13 +#include <linux/ftrace.h>
13 14 #include <linux/kexec.h>
14 15 #include <linux/bug.h>
15 16 #include <linux/nmi.h>
arch/x86/kernel/ftrace.c
... ... @@ -369,80 +369,7 @@
369 369  
370 370 #endif /* !CONFIG_DYNAMIC_FTRACE */
371 371  
372   -/* Add a function return address to the trace stack on thread info.*/
373   -static int push_return_trace(unsigned long ret, unsigned long long time,
374   - unsigned long func, int *depth)
375   -{
376   - int index;
377   -
378   - if (!current->ret_stack)
379   - return -EBUSY;
380   -
381   - /* The return trace stack is full */
382   - if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
383   - atomic_inc(&current->trace_overrun);
384   - return -EBUSY;
385   - }
386   -
387   - index = ++current->curr_ret_stack;
388   - barrier();
389   - current->ret_stack[index].ret = ret;
390   - current->ret_stack[index].func = func;
391   - current->ret_stack[index].calltime = time;
392   - *depth = index;
393   -
394   - return 0;
395   -}
396   -
397   -/* Retrieve a function return address to the trace stack on thread info.*/
398   -static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
399   -{
400   - int index;
401   -
402   - index = current->curr_ret_stack;
403   -
404   - if (unlikely(index < 0)) {
405   - ftrace_graph_stop();
406   - WARN_ON(1);
407   - /* Might as well panic, otherwise we have no where to go */
408   - *ret = (unsigned long)panic;
409   - return;
410   - }
411   -
412   - *ret = current->ret_stack[index].ret;
413   - trace->func = current->ret_stack[index].func;
414   - trace->calltime = current->ret_stack[index].calltime;
415   - trace->overrun = atomic_read(&current->trace_overrun);
416   - trace->depth = index;
417   - barrier();
418   - current->curr_ret_stack--;
419   -
420   -}
421   -
422 372 /*
423   - * Send the trace to the ring-buffer.
424   - * @return the original return address.
425   - */
426   -unsigned long ftrace_return_to_handler(void)
427   -{
428   - struct ftrace_graph_ret trace;
429   - unsigned long ret;
430   -
431   - pop_return_trace(&trace, &ret);
432   - trace.rettime = cpu_clock(raw_smp_processor_id());
433   - ftrace_graph_return(&trace);
434   -
435   - if (unlikely(!ret)) {
436   - ftrace_graph_stop();
437   - WARN_ON(1);
438   - /* Might as well panic. What else to do? */
439   - ret = (unsigned long)panic;
440   - }
441   -
442   - return ret;
443   -}
444   -
445   -/*
446 373 * Hook the return address and push it in the stack of return addrs
447 374 * in current thread info.
448 375 */
... ... @@ -494,7 +421,7 @@
494 421  
495 422 calltime = cpu_clock(raw_smp_processor_id());
496 423  
497   - if (push_return_trace(old, calltime,
  424 + if (ftrace_push_return_trace(old, calltime,
498 425 self_addr, &trace.depth) == -EBUSY) {
499 426 *parent = old;
500 427 return;
include/linux/ftrace.h
... ... @@ -404,6 +404,30 @@
404 404 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
405 405  
406 406 /*
  407 + * Stack of return addresses for functions
  408 + * of a thread.
  409 + * Used in struct thread_info
  410 + */
  411 +struct ftrace_ret_stack {
  412 + unsigned long ret;
  413 + unsigned long func;
  414 + unsigned long long calltime;
  415 +};
  416 +
  417 +/*
  418 + * Primary handler of a function return.
  419 + * It relays on ftrace_return_to_handler.
  420 + * Defined in entry_32/64.S
  421 + */
  422 +extern void return_to_handler(void);
  423 +
  424 +extern int
  425 +ftrace_push_return_trace(unsigned long ret, unsigned long long time,
  426 + unsigned long func, int *depth);
  427 +extern void
  428 +ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret);
  429 +
  430 +/*
407 431 * Sometimes we don't want to trace a function with the function
408 432 * graph tracer but we want them to keep traced by the usual function
409 433 * tracer if the function graph tracer is not configured.
kernel/trace/trace_functions_graph.c
... ... @@ -50,6 +50,81 @@
50 50 /* pid on the last trace processed */
51 51  
52 52  
  53 +/* Add a function return address to the trace stack on thread info.*/
  54 +int
  55 +ftrace_push_return_trace(unsigned long ret, unsigned long long time,
  56 + unsigned long func, int *depth)
  57 +{
  58 + int index;
  59 +
  60 + if (!current->ret_stack)
  61 + return -EBUSY;
  62 +
  63 + /* The return trace stack is full */
  64 + if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
  65 + atomic_inc(&current->trace_overrun);
  66 + return -EBUSY;
  67 + }
  68 +
  69 + index = ++current->curr_ret_stack;
  70 + barrier();
  71 + current->ret_stack[index].ret = ret;
  72 + current->ret_stack[index].func = func;
  73 + current->ret_stack[index].calltime = time;
  74 + *depth = index;
  75 +
  76 + return 0;
  77 +}
  78 +
  79 +/* Retrieve a function return address to the trace stack on thread info.*/
  80 +void
  81 +ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
  82 +{
  83 + int index;
  84 +
  85 + index = current->curr_ret_stack;
  86 +
  87 + if (unlikely(index < 0)) {
  88 + ftrace_graph_stop();
  89 + WARN_ON(1);
  90 + /* Might as well panic, otherwise we have no where to go */
  91 + *ret = (unsigned long)panic;
  92 + return;
  93 + }
  94 +
  95 + *ret = current->ret_stack[index].ret;
  96 + trace->func = current->ret_stack[index].func;
  97 + trace->calltime = current->ret_stack[index].calltime;
  98 + trace->overrun = atomic_read(&current->trace_overrun);
  99 + trace->depth = index;
  100 + barrier();
  101 + current->curr_ret_stack--;
  102 +
  103 +}
  104 +
  105 +/*
  106 + * Send the trace to the ring-buffer.
  107 + * @return the original return address.
  108 + */
  109 +unsigned long ftrace_return_to_handler(void)
  110 +{
  111 + struct ftrace_graph_ret trace;
  112 + unsigned long ret;
  113 +
  114 + ftrace_pop_return_trace(&trace, &ret);
  115 + trace.rettime = cpu_clock(raw_smp_processor_id());
  116 + ftrace_graph_return(&trace);
  117 +
  118 + if (unlikely(!ret)) {
  119 + ftrace_graph_stop();
  120 + WARN_ON(1);
  121 + /* Might as well panic. What else to do? */
  122 + ret = (unsigned long)panic;
  123 + }
  124 +
  125 + return ret;
  126 +}
  127 +
53 128 static int graph_trace_init(struct trace_array *tr)
54 129 {
55 130 int ret = register_ftrace_graph(&trace_graph_return,