Commit 4cd0332db7e8f57cc082bab11d82c064a9721737
Exists in
master
and in
7 other branches
Merge branch 'mainline/function-graph' of git://git.kernel.org/pub/scm/linux/ker…
…nel/git/rostedt/linux-2.6-trace into tracing/function-graph-tracer
Showing 5 changed files Inline Diff
arch/x86/include/asm/ftrace.h
1 | #ifndef _ASM_X86_FTRACE_H | 1 | #ifndef _ASM_X86_FTRACE_H |
2 | #define _ASM_X86_FTRACE_H | 2 | #define _ASM_X86_FTRACE_H |
3 | 3 | ||
4 | #ifdef __ASSEMBLY__ | 4 | #ifdef __ASSEMBLY__ |
5 | 5 | ||
6 | .macro MCOUNT_SAVE_FRAME | 6 | .macro MCOUNT_SAVE_FRAME |
7 | /* taken from glibc */ | 7 | /* taken from glibc */ |
8 | subq $0x38, %rsp | 8 | subq $0x38, %rsp |
9 | movq %rax, (%rsp) | 9 | movq %rax, (%rsp) |
10 | movq %rcx, 8(%rsp) | 10 | movq %rcx, 8(%rsp) |
11 | movq %rdx, 16(%rsp) | 11 | movq %rdx, 16(%rsp) |
12 | movq %rsi, 24(%rsp) | 12 | movq %rsi, 24(%rsp) |
13 | movq %rdi, 32(%rsp) | 13 | movq %rdi, 32(%rsp) |
14 | movq %r8, 40(%rsp) | 14 | movq %r8, 40(%rsp) |
15 | movq %r9, 48(%rsp) | 15 | movq %r9, 48(%rsp) |
16 | .endm | 16 | .endm |
17 | 17 | ||
18 | .macro MCOUNT_RESTORE_FRAME | 18 | .macro MCOUNT_RESTORE_FRAME |
19 | movq 48(%rsp), %r9 | 19 | movq 48(%rsp), %r9 |
20 | movq 40(%rsp), %r8 | 20 | movq 40(%rsp), %r8 |
21 | movq 32(%rsp), %rdi | 21 | movq 32(%rsp), %rdi |
22 | movq 24(%rsp), %rsi | 22 | movq 24(%rsp), %rsi |
23 | movq 16(%rsp), %rdx | 23 | movq 16(%rsp), %rdx |
24 | movq 8(%rsp), %rcx | 24 | movq 8(%rsp), %rcx |
25 | movq (%rsp), %rax | 25 | movq (%rsp), %rax |
26 | addq $0x38, %rsp | 26 | addq $0x38, %rsp |
27 | .endm | 27 | .endm |
28 | 28 | ||
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | #ifdef CONFIG_FUNCTION_TRACER | 31 | #ifdef CONFIG_FUNCTION_TRACER |
32 | #define MCOUNT_ADDR ((long)(mcount)) | 32 | #define MCOUNT_ADDR ((long)(mcount)) |
33 | #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ | 33 | #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ |
34 | 34 | ||
35 | #ifndef __ASSEMBLY__ | 35 | #ifndef __ASSEMBLY__ |
36 | extern void mcount(void); | 36 | extern void mcount(void); |
37 | 37 | ||
38 | static inline unsigned long ftrace_call_adjust(unsigned long addr) | 38 | static inline unsigned long ftrace_call_adjust(unsigned long addr) |
39 | { | 39 | { |
40 | /* | 40 | /* |
41 | * call mcount is "e8 <4 byte offset>" | 41 | * call mcount is "e8 <4 byte offset>" |
42 | * The addr points to the 4 byte offset and the caller of this | 42 | * The addr points to the 4 byte offset and the caller of this |
43 | * function wants the pointer to e8. Simply subtract one. | 43 | * function wants the pointer to e8. Simply subtract one. |
44 | */ | 44 | */ |
45 | return addr - 1; | 45 | return addr - 1; |
46 | } | 46 | } |
47 | 47 | ||
48 | #ifdef CONFIG_DYNAMIC_FTRACE | 48 | #ifdef CONFIG_DYNAMIC_FTRACE |
49 | 49 | ||
50 | struct dyn_arch_ftrace { | 50 | struct dyn_arch_ftrace { |
51 | /* No extra data needed for x86 */ | 51 | /* No extra data needed for x86 */ |
52 | }; | 52 | }; |
53 | 53 | ||
54 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 54 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
55 | #endif /* __ASSEMBLY__ */ | 55 | #endif /* __ASSEMBLY__ */ |
56 | #endif /* CONFIG_FUNCTION_TRACER */ | 56 | #endif /* CONFIG_FUNCTION_TRACER */ |
57 | 57 | ||
58 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
59 | |||
60 | #ifndef __ASSEMBLY__ | ||
61 | |||
62 | /* | ||
63 | * Stack of return addresses for functions | ||
64 | * of a thread. | ||
65 | * Used in struct thread_info | ||
66 | */ | ||
67 | struct ftrace_ret_stack { | ||
68 | unsigned long ret; | ||
69 | unsigned long func; | ||
70 | unsigned long long calltime; | ||
71 | }; | ||
72 | |||
73 | /* | ||
74 | * Primary handler of a function return. | ||
75 | * It relays on ftrace_return_to_handler. | ||
76 | * Defined in entry_32/64.S | ||
77 | */ | ||
78 | extern void return_to_handler(void); | ||
79 | |||
80 | #endif /* __ASSEMBLY__ */ | ||
81 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
82 | |||
83 | #endif /* _ASM_X86_FTRACE_H */ | 58 | #endif /* _ASM_X86_FTRACE_H */ |
84 | 59 |
arch/x86/kernel/dumpstack.c
1 | /* | 1 | /* |
2 | * Copyright (C) 1991, 1992 Linus Torvalds | 2 | * Copyright (C) 1991, 1992 Linus Torvalds |
3 | * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs | 3 | * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs |
4 | */ | 4 | */ |
5 | #include <linux/kallsyms.h> | 5 | #include <linux/kallsyms.h> |
6 | #include <linux/kprobes.h> | 6 | #include <linux/kprobes.h> |
7 | #include <linux/uaccess.h> | 7 | #include <linux/uaccess.h> |
8 | #include <linux/utsname.h> | 8 | #include <linux/utsname.h> |
9 | #include <linux/hardirq.h> | 9 | #include <linux/hardirq.h> |
10 | #include <linux/kdebug.h> | 10 | #include <linux/kdebug.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/ptrace.h> | 12 | #include <linux/ptrace.h> |
13 | #include <linux/ftrace.h> | ||
13 | #include <linux/kexec.h> | 14 | #include <linux/kexec.h> |
14 | #include <linux/bug.h> | 15 | #include <linux/bug.h> |
15 | #include <linux/nmi.h> | 16 | #include <linux/nmi.h> |
16 | #include <linux/sysfs.h> | 17 | #include <linux/sysfs.h> |
17 | #include <linux/ftrace.h> | 18 | #include <linux/ftrace.h> |
18 | 19 | ||
19 | #include <asm/stacktrace.h> | 20 | #include <asm/stacktrace.h> |
20 | 21 | ||
21 | #include "dumpstack.h" | 22 | #include "dumpstack.h" |
22 | 23 | ||
23 | int panic_on_unrecovered_nmi; | 24 | int panic_on_unrecovered_nmi; |
24 | unsigned int code_bytes = 64; | 25 | unsigned int code_bytes = 64; |
25 | int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE; | 26 | int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE; |
26 | static int die_counter; | 27 | static int die_counter; |
27 | 28 | ||
28 | void printk_address(unsigned long address, int reliable) | 29 | void printk_address(unsigned long address, int reliable) |
29 | { | 30 | { |
30 | printk(" [<%p>] %s%pS\n", (void *) address, | 31 | printk(" [<%p>] %s%pS\n", (void *) address, |
31 | reliable ? "" : "? ", (void *) address); | 32 | reliable ? "" : "? ", (void *) address); |
32 | } | 33 | } |
33 | 34 | ||
34 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 35 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
35 | static void | 36 | static void |
36 | print_ftrace_graph_addr(unsigned long addr, void *data, | 37 | print_ftrace_graph_addr(unsigned long addr, void *data, |
37 | const struct stacktrace_ops *ops, | 38 | const struct stacktrace_ops *ops, |
38 | struct thread_info *tinfo, int *graph) | 39 | struct thread_info *tinfo, int *graph) |
39 | { | 40 | { |
40 | struct task_struct *task = tinfo->task; | 41 | struct task_struct *task = tinfo->task; |
41 | unsigned long ret_addr; | 42 | unsigned long ret_addr; |
42 | int index = task->curr_ret_stack; | 43 | int index = task->curr_ret_stack; |
43 | 44 | ||
44 | if (addr != (unsigned long)return_to_handler) | 45 | if (addr != (unsigned long)return_to_handler) |
45 | return; | 46 | return; |
46 | 47 | ||
47 | if (!task->ret_stack || index < *graph) | 48 | if (!task->ret_stack || index < *graph) |
48 | return; | 49 | return; |
49 | 50 | ||
50 | index -= *graph; | 51 | index -= *graph; |
51 | ret_addr = task->ret_stack[index].ret; | 52 | ret_addr = task->ret_stack[index].ret; |
52 | 53 | ||
53 | ops->address(data, ret_addr, 1); | 54 | ops->address(data, ret_addr, 1); |
54 | 55 | ||
55 | (*graph)++; | 56 | (*graph)++; |
56 | } | 57 | } |
57 | #else | 58 | #else |
58 | static inline void | 59 | static inline void |
59 | print_ftrace_graph_addr(unsigned long addr, void *data, | 60 | print_ftrace_graph_addr(unsigned long addr, void *data, |
60 | const struct stacktrace_ops *ops, | 61 | const struct stacktrace_ops *ops, |
61 | struct thread_info *tinfo, int *graph) | 62 | struct thread_info *tinfo, int *graph) |
62 | { } | 63 | { } |
63 | #endif | 64 | #endif |
64 | 65 | ||
65 | /* | 66 | /* |
66 | * x86-64 can have up to three kernel stacks: | 67 | * x86-64 can have up to three kernel stacks: |
67 | * process stack | 68 | * process stack |
68 | * interrupt stack | 69 | * interrupt stack |
69 | * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack | 70 | * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack |
70 | */ | 71 | */ |
71 | 72 | ||
72 | static inline int valid_stack_ptr(struct thread_info *tinfo, | 73 | static inline int valid_stack_ptr(struct thread_info *tinfo, |
73 | void *p, unsigned int size, void *end) | 74 | void *p, unsigned int size, void *end) |
74 | { | 75 | { |
75 | void *t = tinfo; | 76 | void *t = tinfo; |
76 | if (end) { | 77 | if (end) { |
77 | if (p < end && p >= (end-THREAD_SIZE)) | 78 | if (p < end && p >= (end-THREAD_SIZE)) |
78 | return 1; | 79 | return 1; |
79 | else | 80 | else |
80 | return 0; | 81 | return 0; |
81 | } | 82 | } |
82 | return p > t && p < t + THREAD_SIZE - size; | 83 | return p > t && p < t + THREAD_SIZE - size; |
83 | } | 84 | } |
84 | 85 | ||
85 | unsigned long | 86 | unsigned long |
86 | print_context_stack(struct thread_info *tinfo, | 87 | print_context_stack(struct thread_info *tinfo, |
87 | unsigned long *stack, unsigned long bp, | 88 | unsigned long *stack, unsigned long bp, |
88 | const struct stacktrace_ops *ops, void *data, | 89 | const struct stacktrace_ops *ops, void *data, |
89 | unsigned long *end, int *graph) | 90 | unsigned long *end, int *graph) |
90 | { | 91 | { |
91 | struct stack_frame *frame = (struct stack_frame *)bp; | 92 | struct stack_frame *frame = (struct stack_frame *)bp; |
92 | 93 | ||
93 | while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) { | 94 | while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) { |
94 | unsigned long addr; | 95 | unsigned long addr; |
95 | 96 | ||
96 | addr = *stack; | 97 | addr = *stack; |
97 | if (__kernel_text_address(addr)) { | 98 | if (__kernel_text_address(addr)) { |
98 | if ((unsigned long) stack == bp + sizeof(long)) { | 99 | if ((unsigned long) stack == bp + sizeof(long)) { |
99 | ops->address(data, addr, 1); | 100 | ops->address(data, addr, 1); |
100 | frame = frame->next_frame; | 101 | frame = frame->next_frame; |
101 | bp = (unsigned long) frame; | 102 | bp = (unsigned long) frame; |
102 | } else { | 103 | } else { |
103 | ops->address(data, addr, bp == 0); | 104 | ops->address(data, addr, bp == 0); |
104 | } | 105 | } |
105 | print_ftrace_graph_addr(addr, data, ops, tinfo, graph); | 106 | print_ftrace_graph_addr(addr, data, ops, tinfo, graph); |
106 | } | 107 | } |
107 | stack++; | 108 | stack++; |
108 | } | 109 | } |
109 | return bp; | 110 | return bp; |
110 | } | 111 | } |
111 | 112 | ||
112 | 113 | ||
113 | static void | 114 | static void |
114 | print_trace_warning_symbol(void *data, char *msg, unsigned long symbol) | 115 | print_trace_warning_symbol(void *data, char *msg, unsigned long symbol) |
115 | { | 116 | { |
116 | printk(data); | 117 | printk(data); |
117 | print_symbol(msg, symbol); | 118 | print_symbol(msg, symbol); |
118 | printk("\n"); | 119 | printk("\n"); |
119 | } | 120 | } |
120 | 121 | ||
121 | static void print_trace_warning(void *data, char *msg) | 122 | static void print_trace_warning(void *data, char *msg) |
122 | { | 123 | { |
123 | printk("%s%s\n", (char *)data, msg); | 124 | printk("%s%s\n", (char *)data, msg); |
124 | } | 125 | } |
125 | 126 | ||
126 | static int print_trace_stack(void *data, char *name) | 127 | static int print_trace_stack(void *data, char *name) |
127 | { | 128 | { |
128 | printk("%s <%s> ", (char *)data, name); | 129 | printk("%s <%s> ", (char *)data, name); |
129 | return 0; | 130 | return 0; |
130 | } | 131 | } |
131 | 132 | ||
132 | /* | 133 | /* |
133 | * Print one address/symbol entries per line. | 134 | * Print one address/symbol entries per line. |
134 | */ | 135 | */ |
135 | static void print_trace_address(void *data, unsigned long addr, int reliable) | 136 | static void print_trace_address(void *data, unsigned long addr, int reliable) |
136 | { | 137 | { |
137 | touch_nmi_watchdog(); | 138 | touch_nmi_watchdog(); |
138 | printk(data); | 139 | printk(data); |
139 | printk_address(addr, reliable); | 140 | printk_address(addr, reliable); |
140 | } | 141 | } |
141 | 142 | ||
142 | static const struct stacktrace_ops print_trace_ops = { | 143 | static const struct stacktrace_ops print_trace_ops = { |
143 | .warning = print_trace_warning, | 144 | .warning = print_trace_warning, |
144 | .warning_symbol = print_trace_warning_symbol, | 145 | .warning_symbol = print_trace_warning_symbol, |
145 | .stack = print_trace_stack, | 146 | .stack = print_trace_stack, |
146 | .address = print_trace_address, | 147 | .address = print_trace_address, |
147 | }; | 148 | }; |
148 | 149 | ||
149 | void | 150 | void |
150 | show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, | 151 | show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, |
151 | unsigned long *stack, unsigned long bp, char *log_lvl) | 152 | unsigned long *stack, unsigned long bp, char *log_lvl) |
152 | { | 153 | { |
153 | printk("%sCall Trace:\n", log_lvl); | 154 | printk("%sCall Trace:\n", log_lvl); |
154 | dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl); | 155 | dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl); |
155 | } | 156 | } |
156 | 157 | ||
157 | void show_trace(struct task_struct *task, struct pt_regs *regs, | 158 | void show_trace(struct task_struct *task, struct pt_regs *regs, |
158 | unsigned long *stack, unsigned long bp) | 159 | unsigned long *stack, unsigned long bp) |
159 | { | 160 | { |
160 | show_trace_log_lvl(task, regs, stack, bp, ""); | 161 | show_trace_log_lvl(task, regs, stack, bp, ""); |
161 | } | 162 | } |
162 | 163 | ||
163 | void show_stack(struct task_struct *task, unsigned long *sp) | 164 | void show_stack(struct task_struct *task, unsigned long *sp) |
164 | { | 165 | { |
165 | show_stack_log_lvl(task, NULL, sp, 0, ""); | 166 | show_stack_log_lvl(task, NULL, sp, 0, ""); |
166 | } | 167 | } |
167 | 168 | ||
168 | /* | 169 | /* |
169 | * The architecture-independent dump_stack generator | 170 | * The architecture-independent dump_stack generator |
170 | */ | 171 | */ |
171 | void dump_stack(void) | 172 | void dump_stack(void) |
172 | { | 173 | { |
173 | unsigned long bp = 0; | 174 | unsigned long bp = 0; |
174 | unsigned long stack; | 175 | unsigned long stack; |
175 | 176 | ||
176 | #ifdef CONFIG_FRAME_POINTER | 177 | #ifdef CONFIG_FRAME_POINTER |
177 | if (!bp) | 178 | if (!bp) |
178 | get_bp(bp); | 179 | get_bp(bp); |
179 | #endif | 180 | #endif |
180 | 181 | ||
181 | printk("Pid: %d, comm: %.20s %s %s %.*s\n", | 182 | printk("Pid: %d, comm: %.20s %s %s %.*s\n", |
182 | current->pid, current->comm, print_tainted(), | 183 | current->pid, current->comm, print_tainted(), |
183 | init_utsname()->release, | 184 | init_utsname()->release, |
184 | (int)strcspn(init_utsname()->version, " "), | 185 | (int)strcspn(init_utsname()->version, " "), |
185 | init_utsname()->version); | 186 | init_utsname()->version); |
186 | show_trace(NULL, NULL, &stack, bp); | 187 | show_trace(NULL, NULL, &stack, bp); |
187 | } | 188 | } |
188 | EXPORT_SYMBOL(dump_stack); | 189 | EXPORT_SYMBOL(dump_stack); |
189 | 190 | ||
190 | static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED; | 191 | static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED; |
191 | static int die_owner = -1; | 192 | static int die_owner = -1; |
192 | static unsigned int die_nest_count; | 193 | static unsigned int die_nest_count; |
193 | 194 | ||
194 | unsigned __kprobes long oops_begin(void) | 195 | unsigned __kprobes long oops_begin(void) |
195 | { | 196 | { |
196 | int cpu; | 197 | int cpu; |
197 | unsigned long flags; | 198 | unsigned long flags; |
198 | 199 | ||
199 | /* notify the hw-branch tracer so it may disable tracing and | 200 | /* notify the hw-branch tracer so it may disable tracing and |
200 | add the last trace to the trace buffer - | 201 | add the last trace to the trace buffer - |
201 | the earlier this happens, the more useful the trace. */ | 202 | the earlier this happens, the more useful the trace. */ |
202 | trace_hw_branch_oops(); | 203 | trace_hw_branch_oops(); |
203 | 204 | ||
204 | oops_enter(); | 205 | oops_enter(); |
205 | 206 | ||
206 | /* racy, but better than risking deadlock. */ | 207 | /* racy, but better than risking deadlock. */ |
207 | raw_local_irq_save(flags); | 208 | raw_local_irq_save(flags); |
208 | cpu = smp_processor_id(); | 209 | cpu = smp_processor_id(); |
209 | if (!__raw_spin_trylock(&die_lock)) { | 210 | if (!__raw_spin_trylock(&die_lock)) { |
210 | if (cpu == die_owner) | 211 | if (cpu == die_owner) |
211 | /* nested oops. should stop eventually */; | 212 | /* nested oops. should stop eventually */; |
212 | else | 213 | else |
213 | __raw_spin_lock(&die_lock); | 214 | __raw_spin_lock(&die_lock); |
214 | } | 215 | } |
215 | die_nest_count++; | 216 | die_nest_count++; |
216 | die_owner = cpu; | 217 | die_owner = cpu; |
217 | console_verbose(); | 218 | console_verbose(); |
218 | bust_spinlocks(1); | 219 | bust_spinlocks(1); |
219 | return flags; | 220 | return flags; |
220 | } | 221 | } |
221 | 222 | ||
222 | void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) | 223 | void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) |
223 | { | 224 | { |
224 | if (regs && kexec_should_crash(current)) | 225 | if (regs && kexec_should_crash(current)) |
225 | crash_kexec(regs); | 226 | crash_kexec(regs); |
226 | 227 | ||
227 | bust_spinlocks(0); | 228 | bust_spinlocks(0); |
228 | die_owner = -1; | 229 | die_owner = -1; |
229 | add_taint(TAINT_DIE); | 230 | add_taint(TAINT_DIE); |
230 | die_nest_count--; | 231 | die_nest_count--; |
231 | if (!die_nest_count) | 232 | if (!die_nest_count) |
232 | /* Nest count reaches zero, release the lock. */ | 233 | /* Nest count reaches zero, release the lock. */ |
233 | __raw_spin_unlock(&die_lock); | 234 | __raw_spin_unlock(&die_lock); |
234 | raw_local_irq_restore(flags); | 235 | raw_local_irq_restore(flags); |
235 | oops_exit(); | 236 | oops_exit(); |
236 | 237 | ||
237 | if (!signr) | 238 | if (!signr) |
238 | return; | 239 | return; |
239 | if (in_interrupt()) | 240 | if (in_interrupt()) |
240 | panic("Fatal exception in interrupt"); | 241 | panic("Fatal exception in interrupt"); |
241 | if (panic_on_oops) | 242 | if (panic_on_oops) |
242 | panic("Fatal exception"); | 243 | panic("Fatal exception"); |
243 | do_exit(signr); | 244 | do_exit(signr); |
244 | } | 245 | } |
245 | 246 | ||
246 | int __kprobes __die(const char *str, struct pt_regs *regs, long err) | 247 | int __kprobes __die(const char *str, struct pt_regs *regs, long err) |
247 | { | 248 | { |
248 | #ifdef CONFIG_X86_32 | 249 | #ifdef CONFIG_X86_32 |
249 | unsigned short ss; | 250 | unsigned short ss; |
250 | unsigned long sp; | 251 | unsigned long sp; |
251 | #endif | 252 | #endif |
252 | printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter); | 253 | printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter); |
253 | #ifdef CONFIG_PREEMPT | 254 | #ifdef CONFIG_PREEMPT |
254 | printk("PREEMPT "); | 255 | printk("PREEMPT "); |
255 | #endif | 256 | #endif |
256 | #ifdef CONFIG_SMP | 257 | #ifdef CONFIG_SMP |
257 | printk("SMP "); | 258 | printk("SMP "); |
258 | #endif | 259 | #endif |
259 | #ifdef CONFIG_DEBUG_PAGEALLOC | 260 | #ifdef CONFIG_DEBUG_PAGEALLOC |
260 | printk("DEBUG_PAGEALLOC"); | 261 | printk("DEBUG_PAGEALLOC"); |
261 | #endif | 262 | #endif |
262 | printk("\n"); | 263 | printk("\n"); |
263 | sysfs_printk_last_file(); | 264 | sysfs_printk_last_file(); |
264 | if (notify_die(DIE_OOPS, str, regs, err, | 265 | if (notify_die(DIE_OOPS, str, regs, err, |
265 | current->thread.trap_no, SIGSEGV) == NOTIFY_STOP) | 266 | current->thread.trap_no, SIGSEGV) == NOTIFY_STOP) |
266 | return 1; | 267 | return 1; |
267 | 268 | ||
268 | show_registers(regs); | 269 | show_registers(regs); |
269 | #ifdef CONFIG_X86_32 | 270 | #ifdef CONFIG_X86_32 |
270 | sp = (unsigned long) (®s->sp); | 271 | sp = (unsigned long) (®s->sp); |
271 | savesegment(ss, ss); | 272 | savesegment(ss, ss); |
272 | if (user_mode(regs)) { | 273 | if (user_mode(regs)) { |
273 | sp = regs->sp; | 274 | sp = regs->sp; |
274 | ss = regs->ss & 0xffff; | 275 | ss = regs->ss & 0xffff; |
275 | } | 276 | } |
276 | printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip); | 277 | printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip); |
277 | print_symbol("%s", regs->ip); | 278 | print_symbol("%s", regs->ip); |
278 | printk(" SS:ESP %04x:%08lx\n", ss, sp); | 279 | printk(" SS:ESP %04x:%08lx\n", ss, sp); |
279 | #else | 280 | #else |
280 | /* Executive summary in case the oops scrolled away */ | 281 | /* Executive summary in case the oops scrolled away */ |
281 | printk(KERN_ALERT "RIP "); | 282 | printk(KERN_ALERT "RIP "); |
282 | printk_address(regs->ip, 1); | 283 | printk_address(regs->ip, 1); |
283 | printk(" RSP <%016lx>\n", regs->sp); | 284 | printk(" RSP <%016lx>\n", regs->sp); |
284 | #endif | 285 | #endif |
285 | return 0; | 286 | return 0; |
286 | } | 287 | } |
287 | 288 | ||
288 | /* | 289 | /* |
289 | * This is gone through when something in the kernel has done something bad | 290 | * This is gone through when something in the kernel has done something bad |
290 | * and is about to be terminated: | 291 | * and is about to be terminated: |
291 | */ | 292 | */ |
292 | void die(const char *str, struct pt_regs *regs, long err) | 293 | void die(const char *str, struct pt_regs *regs, long err) |
293 | { | 294 | { |
294 | unsigned long flags = oops_begin(); | 295 | unsigned long flags = oops_begin(); |
295 | int sig = SIGSEGV; | 296 | int sig = SIGSEGV; |
296 | 297 | ||
297 | if (!user_mode_vm(regs)) | 298 | if (!user_mode_vm(regs)) |
298 | report_bug(regs->ip, regs); | 299 | report_bug(regs->ip, regs); |
299 | 300 | ||
300 | if (__die(str, regs, err)) | 301 | if (__die(str, regs, err)) |
301 | sig = 0; | 302 | sig = 0; |
302 | oops_end(flags, regs, sig); | 303 | oops_end(flags, regs, sig); |
303 | } | 304 | } |
304 | 305 | ||
305 | void notrace __kprobes | 306 | void notrace __kprobes |
306 | die_nmi(char *str, struct pt_regs *regs, int do_panic) | 307 | die_nmi(char *str, struct pt_regs *regs, int do_panic) |
307 | { | 308 | { |
308 | unsigned long flags; | 309 | unsigned long flags; |
309 | 310 | ||
310 | if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP) | 311 | if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP) |
311 | return; | 312 | return; |
312 | 313 | ||
313 | /* | 314 | /* |
314 | * We are in trouble anyway, lets at least try | 315 | * We are in trouble anyway, lets at least try |
315 | * to get a message out. | 316 | * to get a message out. |
316 | */ | 317 | */ |
317 | flags = oops_begin(); | 318 | flags = oops_begin(); |
318 | printk(KERN_EMERG "%s", str); | 319 | printk(KERN_EMERG "%s", str); |
319 | printk(" on CPU%d, ip %08lx, registers:\n", | 320 | printk(" on CPU%d, ip %08lx, registers:\n", |
320 | smp_processor_id(), regs->ip); | 321 | smp_processor_id(), regs->ip); |
321 | show_registers(regs); | 322 | show_registers(regs); |
322 | oops_end(flags, regs, 0); | 323 | oops_end(flags, regs, 0); |
323 | if (do_panic || panic_on_oops) | 324 | if (do_panic || panic_on_oops) |
324 | panic("Non maskable interrupt"); | 325 | panic("Non maskable interrupt"); |
325 | nmi_exit(); | 326 | nmi_exit(); |
326 | local_irq_enable(); | 327 | local_irq_enable(); |
327 | do_exit(SIGBUS); | 328 | do_exit(SIGBUS); |
328 | } | 329 | } |
329 | 330 | ||
330 | static int __init oops_setup(char *s) | 331 | static int __init oops_setup(char *s) |
331 | { | 332 | { |
332 | if (!s) | 333 | if (!s) |
333 | return -EINVAL; | 334 | return -EINVAL; |
334 | if (!strcmp(s, "panic")) | 335 | if (!strcmp(s, "panic")) |
335 | panic_on_oops = 1; | 336 | panic_on_oops = 1; |
336 | return 0; | 337 | return 0; |
337 | } | 338 | } |
338 | early_param("oops", oops_setup); | 339 | early_param("oops", oops_setup); |
339 | 340 | ||
340 | static int __init kstack_setup(char *s) | 341 | static int __init kstack_setup(char *s) |
341 | { | 342 | { |
342 | if (!s) | 343 | if (!s) |
343 | return -EINVAL; | 344 | return -EINVAL; |
344 | kstack_depth_to_print = simple_strtoul(s, NULL, 0); | 345 | kstack_depth_to_print = simple_strtoul(s, NULL, 0); |
345 | return 0; | 346 | return 0; |
346 | } | 347 | } |
347 | early_param("kstack", kstack_setup); | 348 | early_param("kstack", kstack_setup); |
348 | 349 | ||
349 | static int __init code_bytes_setup(char *s) | 350 | static int __init code_bytes_setup(char *s) |
350 | { | 351 | { |
351 | code_bytes = simple_strtoul(s, NULL, 0); | 352 | code_bytes = simple_strtoul(s, NULL, 0); |
352 | if (code_bytes > 8192) | 353 | if (code_bytes > 8192) |
353 | code_bytes = 8192; | 354 | code_bytes = 8192; |
354 | 355 | ||
355 | return 1; | 356 | return 1; |
356 | } | 357 | } |
357 | __setup("code_bytes=", code_bytes_setup); | 358 | __setup("code_bytes=", code_bytes_setup); |
358 | 359 |
arch/x86/kernel/ftrace.c
1 | /* | 1 | /* |
2 | * Code for replacing ftrace calls with jumps. | 2 | * Code for replacing ftrace calls with jumps. |
3 | * | 3 | * |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | 4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> |
5 | * | 5 | * |
6 | * Thanks goes to Ingo Molnar, for suggesting the idea. | 6 | * Thanks goes to Ingo Molnar, for suggesting the idea. |
7 | * Mathieu Desnoyers, for suggesting postponing the modifications. | 7 | * Mathieu Desnoyers, for suggesting postponing the modifications. |
8 | * Arjan van de Ven, for keeping me straight, and explaining to me | 8 | * Arjan van de Ven, for keeping me straight, and explaining to me |
9 | * the dangers of modifying code on the run. | 9 | * the dangers of modifying code on the run. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/spinlock.h> | 12 | #include <linux/spinlock.h> |
13 | #include <linux/hardirq.h> | 13 | #include <linux/hardirq.h> |
14 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
15 | #include <linux/ftrace.h> | 15 | #include <linux/ftrace.h> |
16 | #include <linux/percpu.h> | 16 | #include <linux/percpu.h> |
17 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/list.h> | 19 | #include <linux/list.h> |
20 | 20 | ||
21 | #include <asm/ftrace.h> | 21 | #include <asm/ftrace.h> |
22 | #include <linux/ftrace.h> | 22 | #include <linux/ftrace.h> |
23 | #include <asm/nops.h> | 23 | #include <asm/nops.h> |
24 | #include <asm/nmi.h> | 24 | #include <asm/nmi.h> |
25 | 25 | ||
26 | 26 | ||
27 | #ifdef CONFIG_DYNAMIC_FTRACE | 27 | #ifdef CONFIG_DYNAMIC_FTRACE |
28 | 28 | ||
29 | union ftrace_code_union { | 29 | union ftrace_code_union { |
30 | char code[MCOUNT_INSN_SIZE]; | 30 | char code[MCOUNT_INSN_SIZE]; |
31 | struct { | 31 | struct { |
32 | char e8; | 32 | char e8; |
33 | int offset; | 33 | int offset; |
34 | } __attribute__((packed)); | 34 | } __attribute__((packed)); |
35 | }; | 35 | }; |
36 | 36 | ||
37 | static int ftrace_calc_offset(long ip, long addr) | 37 | static int ftrace_calc_offset(long ip, long addr) |
38 | { | 38 | { |
39 | return (int)(addr - ip); | 39 | return (int)(addr - ip); |
40 | } | 40 | } |
41 | 41 | ||
42 | static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) | 42 | static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) |
43 | { | 43 | { |
44 | static union ftrace_code_union calc; | 44 | static union ftrace_code_union calc; |
45 | 45 | ||
46 | calc.e8 = 0xe8; | 46 | calc.e8 = 0xe8; |
47 | calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr); | 47 | calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr); |
48 | 48 | ||
49 | /* | 49 | /* |
50 | * No locking needed, this must be called via kstop_machine | 50 | * No locking needed, this must be called via kstop_machine |
51 | * which in essence is like running on a uniprocessor machine. | 51 | * which in essence is like running on a uniprocessor machine. |
52 | */ | 52 | */ |
53 | return calc.code; | 53 | return calc.code; |
54 | } | 54 | } |
55 | 55 | ||
56 | /* | 56 | /* |
57 | * Modifying code must take extra care. On an SMP machine, if | 57 | * Modifying code must take extra care. On an SMP machine, if |
58 | * the code being modified is also being executed on another CPU | 58 | * the code being modified is also being executed on another CPU |
59 | * that CPU will have undefined results and possibly take a GPF. | 59 | * that CPU will have undefined results and possibly take a GPF. |
60 | * We use kstop_machine to stop other CPUS from exectuing code. | 60 | * We use kstop_machine to stop other CPUS from exectuing code. |
61 | * But this does not stop NMIs from happening. We still need | 61 | * But this does not stop NMIs from happening. We still need |
62 | * to protect against that. We separate out the modification of | 62 | * to protect against that. We separate out the modification of |
63 | * the code to take care of this. | 63 | * the code to take care of this. |
64 | * | 64 | * |
65 | * Two buffers are added: An IP buffer and a "code" buffer. | 65 | * Two buffers are added: An IP buffer and a "code" buffer. |
66 | * | 66 | * |
67 | * 1) Put the instruction pointer into the IP buffer | 67 | * 1) Put the instruction pointer into the IP buffer |
68 | * and the new code into the "code" buffer. | 68 | * and the new code into the "code" buffer. |
69 | * 2) Set a flag that says we are modifying code | 69 | * 2) Set a flag that says we are modifying code |
70 | * 3) Wait for any running NMIs to finish. | 70 | * 3) Wait for any running NMIs to finish. |
71 | * 4) Write the code | 71 | * 4) Write the code |
72 | * 5) clear the flag. | 72 | * 5) clear the flag. |
73 | * 6) Wait for any running NMIs to finish. | 73 | * 6) Wait for any running NMIs to finish. |
74 | * | 74 | * |
75 | * If an NMI is executed, the first thing it does is to call | 75 | * If an NMI is executed, the first thing it does is to call |
76 | * "ftrace_nmi_enter". This will check if the flag is set to write | 76 | * "ftrace_nmi_enter". This will check if the flag is set to write |
77 | * and if it is, it will write what is in the IP and "code" buffers. | 77 | * and if it is, it will write what is in the IP and "code" buffers. |
78 | * | 78 | * |
79 | * The trick is, it does not matter if everyone is writing the same | 79 | * The trick is, it does not matter if everyone is writing the same |
80 | * content to the code location. Also, if a CPU is executing code | 80 | * content to the code location. Also, if a CPU is executing code |
81 | * it is OK to write to that code location if the contents being written | 81 | * it is OK to write to that code location if the contents being written |
82 | * are the same as what exists. | 82 | * are the same as what exists. |
83 | */ | 83 | */ |
84 | 84 | ||
85 | static atomic_t nmi_running = ATOMIC_INIT(0); | 85 | static atomic_t nmi_running = ATOMIC_INIT(0); |
86 | static int mod_code_status; /* holds return value of text write */ | 86 | static int mod_code_status; /* holds return value of text write */ |
87 | static int mod_code_write; /* set when NMI should do the write */ | 87 | static int mod_code_write; /* set when NMI should do the write */ |
88 | static void *mod_code_ip; /* holds the IP to write to */ | 88 | static void *mod_code_ip; /* holds the IP to write to */ |
89 | static void *mod_code_newcode; /* holds the text to write to the IP */ | 89 | static void *mod_code_newcode; /* holds the text to write to the IP */ |
90 | 90 | ||
91 | static unsigned nmi_wait_count; | 91 | static unsigned nmi_wait_count; |
92 | static atomic_t nmi_update_count = ATOMIC_INIT(0); | 92 | static atomic_t nmi_update_count = ATOMIC_INIT(0); |
93 | 93 | ||
94 | int ftrace_arch_read_dyn_info(char *buf, int size) | 94 | int ftrace_arch_read_dyn_info(char *buf, int size) |
95 | { | 95 | { |
96 | int r; | 96 | int r; |
97 | 97 | ||
98 | r = snprintf(buf, size, "%u %u", | 98 | r = snprintf(buf, size, "%u %u", |
99 | nmi_wait_count, | 99 | nmi_wait_count, |
100 | atomic_read(&nmi_update_count)); | 100 | atomic_read(&nmi_update_count)); |
101 | return r; | 101 | return r; |
102 | } | 102 | } |
103 | 103 | ||
104 | static void ftrace_mod_code(void) | 104 | static void ftrace_mod_code(void) |
105 | { | 105 | { |
106 | /* | 106 | /* |
107 | * Yes, more than one CPU process can be writing to mod_code_status. | 107 | * Yes, more than one CPU process can be writing to mod_code_status. |
108 | * (and the code itself) | 108 | * (and the code itself) |
109 | * But if one were to fail, then they all should, and if one were | 109 | * But if one were to fail, then they all should, and if one were |
110 | * to succeed, then they all should. | 110 | * to succeed, then they all should. |
111 | */ | 111 | */ |
112 | mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode, | 112 | mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode, |
113 | MCOUNT_INSN_SIZE); | 113 | MCOUNT_INSN_SIZE); |
114 | } | 114 | } |
115 | 115 | ||
116 | void ftrace_nmi_enter(void) | 116 | void ftrace_nmi_enter(void) |
117 | { | 117 | { |
118 | atomic_inc(&nmi_running); | 118 | atomic_inc(&nmi_running); |
119 | /* Must have nmi_running seen before reading write flag */ | 119 | /* Must have nmi_running seen before reading write flag */ |
120 | smp_mb(); | 120 | smp_mb(); |
121 | if (mod_code_write) { | 121 | if (mod_code_write) { |
122 | ftrace_mod_code(); | 122 | ftrace_mod_code(); |
123 | atomic_inc(&nmi_update_count); | 123 | atomic_inc(&nmi_update_count); |
124 | } | 124 | } |
125 | } | 125 | } |
126 | 126 | ||
127 | void ftrace_nmi_exit(void) | 127 | void ftrace_nmi_exit(void) |
128 | { | 128 | { |
129 | /* Finish all executions before clearing nmi_running */ | 129 | /* Finish all executions before clearing nmi_running */ |
130 | smp_wmb(); | 130 | smp_wmb(); |
131 | atomic_dec(&nmi_running); | 131 | atomic_dec(&nmi_running); |
132 | } | 132 | } |
133 | 133 | ||
134 | static void wait_for_nmi(void) | 134 | static void wait_for_nmi(void) |
135 | { | 135 | { |
136 | if (!atomic_read(&nmi_running)) | 136 | if (!atomic_read(&nmi_running)) |
137 | return; | 137 | return; |
138 | 138 | ||
139 | do { | 139 | do { |
140 | cpu_relax(); | 140 | cpu_relax(); |
141 | } while (atomic_read(&nmi_running)); | 141 | } while (atomic_read(&nmi_running)); |
142 | 142 | ||
143 | nmi_wait_count++; | 143 | nmi_wait_count++; |
144 | } | 144 | } |
145 | 145 | ||
146 | static int | 146 | static int |
147 | do_ftrace_mod_code(unsigned long ip, void *new_code) | 147 | do_ftrace_mod_code(unsigned long ip, void *new_code) |
148 | { | 148 | { |
149 | mod_code_ip = (void *)ip; | 149 | mod_code_ip = (void *)ip; |
150 | mod_code_newcode = new_code; | 150 | mod_code_newcode = new_code; |
151 | 151 | ||
152 | /* The buffers need to be visible before we let NMIs write them */ | 152 | /* The buffers need to be visible before we let NMIs write them */ |
153 | smp_wmb(); | 153 | smp_wmb(); |
154 | 154 | ||
155 | mod_code_write = 1; | 155 | mod_code_write = 1; |
156 | 156 | ||
157 | /* Make sure write bit is visible before we wait on NMIs */ | 157 | /* Make sure write bit is visible before we wait on NMIs */ |
158 | smp_mb(); | 158 | smp_mb(); |
159 | 159 | ||
160 | wait_for_nmi(); | 160 | wait_for_nmi(); |
161 | 161 | ||
162 | /* Make sure all running NMIs have finished before we write the code */ | 162 | /* Make sure all running NMIs have finished before we write the code */ |
163 | smp_mb(); | 163 | smp_mb(); |
164 | 164 | ||
165 | ftrace_mod_code(); | 165 | ftrace_mod_code(); |
166 | 166 | ||
167 | /* Make sure the write happens before clearing the bit */ | 167 | /* Make sure the write happens before clearing the bit */ |
168 | smp_wmb(); | 168 | smp_wmb(); |
169 | 169 | ||
170 | mod_code_write = 0; | 170 | mod_code_write = 0; |
171 | 171 | ||
172 | /* make sure NMIs see the cleared bit */ | 172 | /* make sure NMIs see the cleared bit */ |
173 | smp_mb(); | 173 | smp_mb(); |
174 | 174 | ||
175 | wait_for_nmi(); | 175 | wait_for_nmi(); |
176 | 176 | ||
177 | return mod_code_status; | 177 | return mod_code_status; |
178 | } | 178 | } |
179 | 179 | ||
180 | 180 | ||
181 | 181 | ||
182 | 182 | ||
183 | static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; | 183 | static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; |
184 | 184 | ||
185 | static unsigned char *ftrace_nop_replace(void) | 185 | static unsigned char *ftrace_nop_replace(void) |
186 | { | 186 | { |
187 | return ftrace_nop; | 187 | return ftrace_nop; |
188 | } | 188 | } |
189 | 189 | ||
190 | static int | 190 | static int |
191 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, | 191 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, |
192 | unsigned char *new_code) | 192 | unsigned char *new_code) |
193 | { | 193 | { |
194 | unsigned char replaced[MCOUNT_INSN_SIZE]; | 194 | unsigned char replaced[MCOUNT_INSN_SIZE]; |
195 | 195 | ||
196 | /* | 196 | /* |
197 | * Note: Due to modules and __init, code can | 197 | * Note: Due to modules and __init, code can |
198 | * disappear and change, we need to protect against faulting | 198 | * disappear and change, we need to protect against faulting |
199 | * as well as code changing. We do this by using the | 199 | * as well as code changing. We do this by using the |
200 | * probe_kernel_* functions. | 200 | * probe_kernel_* functions. |
201 | * | 201 | * |
202 | * No real locking needed, this code is run through | 202 | * No real locking needed, this code is run through |
203 | * kstop_machine, or before SMP starts. | 203 | * kstop_machine, or before SMP starts. |
204 | */ | 204 | */ |
205 | 205 | ||
206 | /* read the text we want to modify */ | 206 | /* read the text we want to modify */ |
207 | if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) | 207 | if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) |
208 | return -EFAULT; | 208 | return -EFAULT; |
209 | 209 | ||
210 | /* Make sure it is what we expect it to be */ | 210 | /* Make sure it is what we expect it to be */ |
211 | if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) | 211 | if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) |
212 | return -EINVAL; | 212 | return -EINVAL; |
213 | 213 | ||
214 | /* replace the text with the new text */ | 214 | /* replace the text with the new text */ |
215 | if (do_ftrace_mod_code(ip, new_code)) | 215 | if (do_ftrace_mod_code(ip, new_code)) |
216 | return -EPERM; | 216 | return -EPERM; |
217 | 217 | ||
218 | sync_core(); | 218 | sync_core(); |
219 | 219 | ||
220 | return 0; | 220 | return 0; |
221 | } | 221 | } |
222 | 222 | ||
223 | int ftrace_make_nop(struct module *mod, | 223 | int ftrace_make_nop(struct module *mod, |
224 | struct dyn_ftrace *rec, unsigned long addr) | 224 | struct dyn_ftrace *rec, unsigned long addr) |
225 | { | 225 | { |
226 | unsigned char *new, *old; | 226 | unsigned char *new, *old; |
227 | unsigned long ip = rec->ip; | 227 | unsigned long ip = rec->ip; |
228 | 228 | ||
229 | old = ftrace_call_replace(ip, addr); | 229 | old = ftrace_call_replace(ip, addr); |
230 | new = ftrace_nop_replace(); | 230 | new = ftrace_nop_replace(); |
231 | 231 | ||
232 | return ftrace_modify_code(rec->ip, old, new); | 232 | return ftrace_modify_code(rec->ip, old, new); |
233 | } | 233 | } |
234 | 234 | ||
235 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | 235 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) |
236 | { | 236 | { |
237 | unsigned char *new, *old; | 237 | unsigned char *new, *old; |
238 | unsigned long ip = rec->ip; | 238 | unsigned long ip = rec->ip; |
239 | 239 | ||
240 | old = ftrace_nop_replace(); | 240 | old = ftrace_nop_replace(); |
241 | new = ftrace_call_replace(ip, addr); | 241 | new = ftrace_call_replace(ip, addr); |
242 | 242 | ||
243 | return ftrace_modify_code(rec->ip, old, new); | 243 | return ftrace_modify_code(rec->ip, old, new); |
244 | } | 244 | } |
245 | 245 | ||
246 | int ftrace_update_ftrace_func(ftrace_func_t func) | 246 | int ftrace_update_ftrace_func(ftrace_func_t func) |
247 | { | 247 | { |
248 | unsigned long ip = (unsigned long)(&ftrace_call); | 248 | unsigned long ip = (unsigned long)(&ftrace_call); |
249 | unsigned char old[MCOUNT_INSN_SIZE], *new; | 249 | unsigned char old[MCOUNT_INSN_SIZE], *new; |
250 | int ret; | 250 | int ret; |
251 | 251 | ||
252 | memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); | 252 | memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); |
253 | new = ftrace_call_replace(ip, (unsigned long)func); | 253 | new = ftrace_call_replace(ip, (unsigned long)func); |
254 | ret = ftrace_modify_code(ip, old, new); | 254 | ret = ftrace_modify_code(ip, old, new); |
255 | 255 | ||
256 | return ret; | 256 | return ret; |
257 | } | 257 | } |
258 | 258 | ||
259 | int __init ftrace_dyn_arch_init(void *data) | 259 | int __init ftrace_dyn_arch_init(void *data) |
260 | { | 260 | { |
261 | extern const unsigned char ftrace_test_p6nop[]; | 261 | extern const unsigned char ftrace_test_p6nop[]; |
262 | extern const unsigned char ftrace_test_nop5[]; | 262 | extern const unsigned char ftrace_test_nop5[]; |
263 | extern const unsigned char ftrace_test_jmp[]; | 263 | extern const unsigned char ftrace_test_jmp[]; |
264 | int faulted = 0; | 264 | int faulted = 0; |
265 | 265 | ||
266 | /* | 266 | /* |
267 | * There is no good nop for all x86 archs. | 267 | * There is no good nop for all x86 archs. |
268 | * We will default to using the P6_NOP5, but first we | 268 | * We will default to using the P6_NOP5, but first we |
269 | * will test to make sure that the nop will actually | 269 | * will test to make sure that the nop will actually |
270 | * work on this CPU. If it faults, we will then | 270 | * work on this CPU. If it faults, we will then |
271 | * go to a lesser efficient 5 byte nop. If that fails | 271 | * go to a lesser efficient 5 byte nop. If that fails |
272 | * we then just use a jmp as our nop. This isn't the most | 272 | * we then just use a jmp as our nop. This isn't the most |
273 | * efficient nop, but we can not use a multi part nop | 273 | * efficient nop, but we can not use a multi part nop |
274 | * since we would then risk being preempted in the middle | 274 | * since we would then risk being preempted in the middle |
275 | * of that nop, and if we enabled tracing then, it might | 275 | * of that nop, and if we enabled tracing then, it might |
276 | * cause a system crash. | 276 | * cause a system crash. |
277 | * | 277 | * |
278 | * TODO: check the cpuid to determine the best nop. | 278 | * TODO: check the cpuid to determine the best nop. |
279 | */ | 279 | */ |
280 | asm volatile ( | 280 | asm volatile ( |
281 | "ftrace_test_jmp:" | 281 | "ftrace_test_jmp:" |
282 | "jmp ftrace_test_p6nop\n" | 282 | "jmp ftrace_test_p6nop\n" |
283 | "nop\n" | 283 | "nop\n" |
284 | "nop\n" | 284 | "nop\n" |
285 | "nop\n" /* 2 byte jmp + 3 bytes */ | 285 | "nop\n" /* 2 byte jmp + 3 bytes */ |
286 | "ftrace_test_p6nop:" | 286 | "ftrace_test_p6nop:" |
287 | P6_NOP5 | 287 | P6_NOP5 |
288 | "jmp 1f\n" | 288 | "jmp 1f\n" |
289 | "ftrace_test_nop5:" | 289 | "ftrace_test_nop5:" |
290 | ".byte 0x66,0x66,0x66,0x66,0x90\n" | 290 | ".byte 0x66,0x66,0x66,0x66,0x90\n" |
291 | "1:" | 291 | "1:" |
292 | ".section .fixup, \"ax\"\n" | 292 | ".section .fixup, \"ax\"\n" |
293 | "2: movl $1, %0\n" | 293 | "2: movl $1, %0\n" |
294 | " jmp ftrace_test_nop5\n" | 294 | " jmp ftrace_test_nop5\n" |
295 | "3: movl $2, %0\n" | 295 | "3: movl $2, %0\n" |
296 | " jmp 1b\n" | 296 | " jmp 1b\n" |
297 | ".previous\n" | 297 | ".previous\n" |
298 | _ASM_EXTABLE(ftrace_test_p6nop, 2b) | 298 | _ASM_EXTABLE(ftrace_test_p6nop, 2b) |
299 | _ASM_EXTABLE(ftrace_test_nop5, 3b) | 299 | _ASM_EXTABLE(ftrace_test_nop5, 3b) |
300 | : "=r"(faulted) : "0" (faulted)); | 300 | : "=r"(faulted) : "0" (faulted)); |
301 | 301 | ||
302 | switch (faulted) { | 302 | switch (faulted) { |
303 | case 0: | 303 | case 0: |
304 | pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n"); | 304 | pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n"); |
305 | memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE); | 305 | memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE); |
306 | break; | 306 | break; |
307 | case 1: | 307 | case 1: |
308 | pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n"); | 308 | pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n"); |
309 | memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE); | 309 | memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE); |
310 | break; | 310 | break; |
311 | case 2: | 311 | case 2: |
312 | pr_info("ftrace: converting mcount calls to jmp . + 5\n"); | 312 | pr_info("ftrace: converting mcount calls to jmp . + 5\n"); |
313 | memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE); | 313 | memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE); |
314 | break; | 314 | break; |
315 | } | 315 | } |
316 | 316 | ||
317 | /* The return code is retured via data */ | 317 | /* The return code is retured via data */ |
318 | *(unsigned long *)data = 0; | 318 | *(unsigned long *)data = 0; |
319 | 319 | ||
320 | return 0; | 320 | return 0; |
321 | } | 321 | } |
322 | #endif | 322 | #endif |
323 | 323 | ||
324 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 324 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
325 | 325 | ||
326 | #ifdef CONFIG_DYNAMIC_FTRACE | 326 | #ifdef CONFIG_DYNAMIC_FTRACE |
327 | extern void ftrace_graph_call(void); | 327 | extern void ftrace_graph_call(void); |
328 | 328 | ||
329 | static int ftrace_mod_jmp(unsigned long ip, | 329 | static int ftrace_mod_jmp(unsigned long ip, |
330 | int old_offset, int new_offset) | 330 | int old_offset, int new_offset) |
331 | { | 331 | { |
332 | unsigned char code[MCOUNT_INSN_SIZE]; | 332 | unsigned char code[MCOUNT_INSN_SIZE]; |
333 | 333 | ||
334 | if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE)) | 334 | if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE)) |
335 | return -EFAULT; | 335 | return -EFAULT; |
336 | 336 | ||
337 | if (code[0] != 0xe9 || old_offset != *(int *)(&code[1])) | 337 | if (code[0] != 0xe9 || old_offset != *(int *)(&code[1])) |
338 | return -EINVAL; | 338 | return -EINVAL; |
339 | 339 | ||
340 | *(int *)(&code[1]) = new_offset; | 340 | *(int *)(&code[1]) = new_offset; |
341 | 341 | ||
342 | if (do_ftrace_mod_code(ip, &code)) | 342 | if (do_ftrace_mod_code(ip, &code)) |
343 | return -EPERM; | 343 | return -EPERM; |
344 | 344 | ||
345 | return 0; | 345 | return 0; |
346 | } | 346 | } |
347 | 347 | ||
348 | int ftrace_enable_ftrace_graph_caller(void) | 348 | int ftrace_enable_ftrace_graph_caller(void) |
349 | { | 349 | { |
350 | unsigned long ip = (unsigned long)(&ftrace_graph_call); | 350 | unsigned long ip = (unsigned long)(&ftrace_graph_call); |
351 | int old_offset, new_offset; | 351 | int old_offset, new_offset; |
352 | 352 | ||
353 | old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE); | 353 | old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE); |
354 | new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE); | 354 | new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE); |
355 | 355 | ||
356 | return ftrace_mod_jmp(ip, old_offset, new_offset); | 356 | return ftrace_mod_jmp(ip, old_offset, new_offset); |
357 | } | 357 | } |
358 | 358 | ||
359 | int ftrace_disable_ftrace_graph_caller(void) | 359 | int ftrace_disable_ftrace_graph_caller(void) |
360 | { | 360 | { |
361 | unsigned long ip = (unsigned long)(&ftrace_graph_call); | 361 | unsigned long ip = (unsigned long)(&ftrace_graph_call); |
362 | int old_offset, new_offset; | 362 | int old_offset, new_offset; |
363 | 363 | ||
364 | old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE); | 364 | old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE); |
365 | new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE); | 365 | new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE); |
366 | 366 | ||
367 | return ftrace_mod_jmp(ip, old_offset, new_offset); | 367 | return ftrace_mod_jmp(ip, old_offset, new_offset); |
368 | } | 368 | } |
369 | 369 | ||
370 | #endif /* !CONFIG_DYNAMIC_FTRACE */ | 370 | #endif /* !CONFIG_DYNAMIC_FTRACE */ |
371 | 371 | ||
372 | /* Add a function return address to the trace stack on thread info.*/ | ||
373 | static int push_return_trace(unsigned long ret, unsigned long long time, | ||
374 | unsigned long func, int *depth) | ||
375 | { | ||
376 | int index; | ||
377 | |||
378 | if (!current->ret_stack) | ||
379 | return -EBUSY; | ||
380 | |||
381 | /* The return trace stack is full */ | ||
382 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { | ||
383 | atomic_inc(¤t->trace_overrun); | ||
384 | return -EBUSY; | ||
385 | } | ||
386 | |||
387 | index = ++current->curr_ret_stack; | ||
388 | barrier(); | ||
389 | current->ret_stack[index].ret = ret; | ||
390 | current->ret_stack[index].func = func; | ||
391 | current->ret_stack[index].calltime = time; | ||
392 | *depth = index; | ||
393 | |||
394 | return 0; | ||
395 | } | ||
396 | |||
397 | /* Retrieve a function return address to the trace stack on thread info.*/ | ||
398 | static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) | ||
399 | { | ||
400 | int index; | ||
401 | |||
402 | index = current->curr_ret_stack; | ||
403 | |||
404 | if (unlikely(index < 0)) { | ||
405 | ftrace_graph_stop(); | ||
406 | WARN_ON(1); | ||
407 | /* Might as well panic, otherwise we have no where to go */ | ||
408 | *ret = (unsigned long)panic; | ||
409 | return; | ||
410 | } | ||
411 | |||
412 | *ret = current->ret_stack[index].ret; | ||
413 | trace->func = current->ret_stack[index].func; | ||
414 | trace->calltime = current->ret_stack[index].calltime; | ||
415 | trace->overrun = atomic_read(¤t->trace_overrun); | ||
416 | trace->depth = index; | ||
417 | barrier(); | ||
418 | current->curr_ret_stack--; | ||
419 | |||
420 | } | ||
421 | |||
422 | /* | 372 | /* |
423 | * Send the trace to the ring-buffer. | ||
424 | * @return the original return address. | ||
425 | */ | ||
426 | unsigned long ftrace_return_to_handler(void) | ||
427 | { | ||
428 | struct ftrace_graph_ret trace; | ||
429 | unsigned long ret; | ||
430 | |||
431 | pop_return_trace(&trace, &ret); | ||
432 | trace.rettime = cpu_clock(raw_smp_processor_id()); | ||
433 | ftrace_graph_return(&trace); | ||
434 | |||
435 | if (unlikely(!ret)) { | ||
436 | ftrace_graph_stop(); | ||
437 | WARN_ON(1); | ||
438 | /* Might as well panic. What else to do? */ | ||
439 | ret = (unsigned long)panic; | ||
440 | } | ||
441 | |||
442 | return ret; | ||
443 | } | ||
444 | |||
445 | /* | ||
446 | * Hook the return address and push it in the stack of return addrs | 373 | * Hook the return address and push it in the stack of return addrs |
447 | * in current thread info. | 374 | * in current thread info. |
448 | */ | 375 | */ |
449 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | 376 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) |
450 | { | 377 | { |
451 | unsigned long old; | 378 | unsigned long old; |
452 | unsigned long long calltime; | 379 | unsigned long long calltime; |
453 | int faulted; | 380 | int faulted; |
454 | struct ftrace_graph_ent trace; | 381 | struct ftrace_graph_ent trace; |
455 | unsigned long return_hooker = (unsigned long) | 382 | unsigned long return_hooker = (unsigned long) |
456 | &return_to_handler; | 383 | &return_to_handler; |
457 | 384 | ||
458 | /* Nmi's are currently unsupported */ | 385 | /* Nmi's are currently unsupported */ |
459 | if (unlikely(in_nmi())) | 386 | if (unlikely(in_nmi())) |
460 | return; | 387 | return; |
461 | 388 | ||
462 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 389 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
463 | return; | 390 | return; |
464 | 391 | ||
465 | /* | 392 | /* |
466 | * Protect against fault, even if it shouldn't | 393 | * Protect against fault, even if it shouldn't |
467 | * happen. This tool is too much intrusive to | 394 | * happen. This tool is too much intrusive to |
468 | * ignore such a protection. | 395 | * ignore such a protection. |
469 | */ | 396 | */ |
470 | asm volatile( | 397 | asm volatile( |
471 | "1: " _ASM_MOV " (%[parent]), %[old]\n" | 398 | "1: " _ASM_MOV " (%[parent]), %[old]\n" |
472 | "2: " _ASM_MOV " %[return_hooker], (%[parent])\n" | 399 | "2: " _ASM_MOV " %[return_hooker], (%[parent])\n" |
473 | " movl $0, %[faulted]\n" | 400 | " movl $0, %[faulted]\n" |
474 | "3:\n" | 401 | "3:\n" |
475 | 402 | ||
476 | ".section .fixup, \"ax\"\n" | 403 | ".section .fixup, \"ax\"\n" |
477 | "4: movl $1, %[faulted]\n" | 404 | "4: movl $1, %[faulted]\n" |
478 | " jmp 3b\n" | 405 | " jmp 3b\n" |
479 | ".previous\n" | 406 | ".previous\n" |
480 | 407 | ||
481 | _ASM_EXTABLE(1b, 4b) | 408 | _ASM_EXTABLE(1b, 4b) |
482 | _ASM_EXTABLE(2b, 4b) | 409 | _ASM_EXTABLE(2b, 4b) |
483 | 410 | ||
484 | : [old] "=r" (old), [faulted] "=r" (faulted) | 411 | : [old] "=r" (old), [faulted] "=r" (faulted) |
485 | : [parent] "r" (parent), [return_hooker] "r" (return_hooker) | 412 | : [parent] "r" (parent), [return_hooker] "r" (return_hooker) |
486 | : "memory" | 413 | : "memory" |
487 | ); | 414 | ); |
488 | 415 | ||
489 | if (unlikely(faulted)) { | 416 | if (unlikely(faulted)) { |
490 | ftrace_graph_stop(); | 417 | ftrace_graph_stop(); |
491 | WARN_ON(1); | 418 | WARN_ON(1); |
492 | return; | 419 | return; |
493 | } | 420 | } |
494 | 421 | ||
495 | calltime = cpu_clock(raw_smp_processor_id()); | 422 | calltime = cpu_clock(raw_smp_processor_id()); |
496 | 423 | ||
497 | if (push_return_trace(old, calltime, | 424 | if (ftrace_push_return_trace(old, calltime, |
498 | self_addr, &trace.depth) == -EBUSY) { | 425 | self_addr, &trace.depth) == -EBUSY) { |
499 | *parent = old; | 426 | *parent = old; |
500 | return; | 427 | return; |
501 | } | 428 | } |
502 | 429 | ||
503 | trace.func = self_addr; | 430 | trace.func = self_addr; |
504 | 431 | ||
505 | /* Only trace if the calling function expects to */ | 432 | /* Only trace if the calling function expects to */ |
506 | if (!ftrace_graph_entry(&trace)) { | 433 | if (!ftrace_graph_entry(&trace)) { |
507 | current->curr_ret_stack--; | 434 | current->curr_ret_stack--; |
508 | *parent = old; | 435 | *parent = old; |
509 | } | 436 | } |
510 | } | 437 | } |
511 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 438 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
512 | 439 |
include/linux/ftrace.h
1 | #ifndef _LINUX_FTRACE_H | 1 | #ifndef _LINUX_FTRACE_H |
2 | #define _LINUX_FTRACE_H | 2 | #define _LINUX_FTRACE_H |
3 | 3 | ||
4 | #include <linux/linkage.h> | 4 | #include <linux/linkage.h> |
5 | #include <linux/fs.h> | 5 | #include <linux/fs.h> |
6 | #include <linux/ktime.h> | 6 | #include <linux/ktime.h> |
7 | #include <linux/init.h> | 7 | #include <linux/init.h> |
8 | #include <linux/types.h> | 8 | #include <linux/types.h> |
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/kallsyms.h> | 10 | #include <linux/kallsyms.h> |
11 | #include <linux/bitops.h> | 11 | #include <linux/bitops.h> |
12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
13 | 13 | ||
14 | #ifdef CONFIG_FUNCTION_TRACER | 14 | #ifdef CONFIG_FUNCTION_TRACER |
15 | 15 | ||
16 | extern int ftrace_enabled; | 16 | extern int ftrace_enabled; |
17 | extern int | 17 | extern int |
18 | ftrace_enable_sysctl(struct ctl_table *table, int write, | 18 | ftrace_enable_sysctl(struct ctl_table *table, int write, |
19 | struct file *filp, void __user *buffer, size_t *lenp, | 19 | struct file *filp, void __user *buffer, size_t *lenp, |
20 | loff_t *ppos); | 20 | loff_t *ppos); |
21 | 21 | ||
22 | typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); | 22 | typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); |
23 | 23 | ||
24 | struct ftrace_ops { | 24 | struct ftrace_ops { |
25 | ftrace_func_t func; | 25 | ftrace_func_t func; |
26 | struct ftrace_ops *next; | 26 | struct ftrace_ops *next; |
27 | }; | 27 | }; |
28 | 28 | ||
29 | extern int function_trace_stop; | 29 | extern int function_trace_stop; |
30 | 30 | ||
31 | /* | 31 | /* |
32 | * Type of the current tracing. | 32 | * Type of the current tracing. |
33 | */ | 33 | */ |
34 | enum ftrace_tracing_type_t { | 34 | enum ftrace_tracing_type_t { |
35 | FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */ | 35 | FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */ |
36 | FTRACE_TYPE_RETURN, /* Hook the return of the function */ | 36 | FTRACE_TYPE_RETURN, /* Hook the return of the function */ |
37 | }; | 37 | }; |
38 | 38 | ||
39 | /* Current tracing type, default is FTRACE_TYPE_ENTER */ | 39 | /* Current tracing type, default is FTRACE_TYPE_ENTER */ |
40 | extern enum ftrace_tracing_type_t ftrace_tracing_type; | 40 | extern enum ftrace_tracing_type_t ftrace_tracing_type; |
41 | 41 | ||
42 | /** | 42 | /** |
43 | * ftrace_stop - stop function tracer. | 43 | * ftrace_stop - stop function tracer. |
44 | * | 44 | * |
45 | * A quick way to stop the function tracer. Note this an on off switch, | 45 | * A quick way to stop the function tracer. Note this an on off switch, |
46 | * it is not something that is recursive like preempt_disable. | 46 | * it is not something that is recursive like preempt_disable. |
47 | * This does not disable the calling of mcount, it only stops the | 47 | * This does not disable the calling of mcount, it only stops the |
48 | * calling of functions from mcount. | 48 | * calling of functions from mcount. |
49 | */ | 49 | */ |
50 | static inline void ftrace_stop(void) | 50 | static inline void ftrace_stop(void) |
51 | { | 51 | { |
52 | function_trace_stop = 1; | 52 | function_trace_stop = 1; |
53 | } | 53 | } |
54 | 54 | ||
55 | /** | 55 | /** |
56 | * ftrace_start - start the function tracer. | 56 | * ftrace_start - start the function tracer. |
57 | * | 57 | * |
58 | * This function is the inverse of ftrace_stop. This does not enable | 58 | * This function is the inverse of ftrace_stop. This does not enable |
59 | * the function tracing if the function tracer is disabled. This only | 59 | * the function tracing if the function tracer is disabled. This only |
60 | * sets the function tracer flag to continue calling the functions | 60 | * sets the function tracer flag to continue calling the functions |
61 | * from mcount. | 61 | * from mcount. |
62 | */ | 62 | */ |
63 | static inline void ftrace_start(void) | 63 | static inline void ftrace_start(void) |
64 | { | 64 | { |
65 | function_trace_stop = 0; | 65 | function_trace_stop = 0; |
66 | } | 66 | } |
67 | 67 | ||
68 | /* | 68 | /* |
69 | * The ftrace_ops must be a static and should also | 69 | * The ftrace_ops must be a static and should also |
70 | * be read_mostly. These functions do modify read_mostly variables | 70 | * be read_mostly. These functions do modify read_mostly variables |
71 | * so use them sparely. Never free an ftrace_op or modify the | 71 | * so use them sparely. Never free an ftrace_op or modify the |
72 | * next pointer after it has been registered. Even after unregistering | 72 | * next pointer after it has been registered. Even after unregistering |
73 | * it, the next pointer may still be used internally. | 73 | * it, the next pointer may still be used internally. |
74 | */ | 74 | */ |
75 | int register_ftrace_function(struct ftrace_ops *ops); | 75 | int register_ftrace_function(struct ftrace_ops *ops); |
76 | int unregister_ftrace_function(struct ftrace_ops *ops); | 76 | int unregister_ftrace_function(struct ftrace_ops *ops); |
77 | void clear_ftrace_function(void); | 77 | void clear_ftrace_function(void); |
78 | 78 | ||
79 | extern void ftrace_stub(unsigned long a0, unsigned long a1); | 79 | extern void ftrace_stub(unsigned long a0, unsigned long a1); |
80 | 80 | ||
81 | #else /* !CONFIG_FUNCTION_TRACER */ | 81 | #else /* !CONFIG_FUNCTION_TRACER */ |
82 | # define register_ftrace_function(ops) do { } while (0) | 82 | # define register_ftrace_function(ops) do { } while (0) |
83 | # define unregister_ftrace_function(ops) do { } while (0) | 83 | # define unregister_ftrace_function(ops) do { } while (0) |
84 | # define clear_ftrace_function(ops) do { } while (0) | 84 | # define clear_ftrace_function(ops) do { } while (0) |
85 | static inline void ftrace_kill(void) { } | 85 | static inline void ftrace_kill(void) { } |
86 | static inline void ftrace_stop(void) { } | 86 | static inline void ftrace_stop(void) { } |
87 | static inline void ftrace_start(void) { } | 87 | static inline void ftrace_start(void) { } |
88 | #endif /* CONFIG_FUNCTION_TRACER */ | 88 | #endif /* CONFIG_FUNCTION_TRACER */ |
89 | 89 | ||
90 | #ifdef CONFIG_STACK_TRACER | 90 | #ifdef CONFIG_STACK_TRACER |
91 | extern int stack_tracer_enabled; | 91 | extern int stack_tracer_enabled; |
92 | int | 92 | int |
93 | stack_trace_sysctl(struct ctl_table *table, int write, | 93 | stack_trace_sysctl(struct ctl_table *table, int write, |
94 | struct file *file, void __user *buffer, size_t *lenp, | 94 | struct file *file, void __user *buffer, size_t *lenp, |
95 | loff_t *ppos); | 95 | loff_t *ppos); |
96 | #endif | 96 | #endif |
97 | 97 | ||
98 | struct ftrace_func_command { | 98 | struct ftrace_func_command { |
99 | struct list_head list; | 99 | struct list_head list; |
100 | char *name; | 100 | char *name; |
101 | int (*func)(char *func, char *cmd, | 101 | int (*func)(char *func, char *cmd, |
102 | char *params, int enable); | 102 | char *params, int enable); |
103 | }; | 103 | }; |
104 | 104 | ||
105 | #ifdef CONFIG_DYNAMIC_FTRACE | 105 | #ifdef CONFIG_DYNAMIC_FTRACE |
106 | /* asm/ftrace.h must be defined for archs supporting dynamic ftrace */ | 106 | /* asm/ftrace.h must be defined for archs supporting dynamic ftrace */ |
107 | #include <asm/ftrace.h> | 107 | #include <asm/ftrace.h> |
108 | 108 | ||
109 | struct seq_file; | 109 | struct seq_file; |
110 | 110 | ||
111 | struct ftrace_probe_ops { | 111 | struct ftrace_probe_ops { |
112 | void (*func)(unsigned long ip, | 112 | void (*func)(unsigned long ip, |
113 | unsigned long parent_ip, | 113 | unsigned long parent_ip, |
114 | void **data); | 114 | void **data); |
115 | int (*callback)(unsigned long ip, void **data); | 115 | int (*callback)(unsigned long ip, void **data); |
116 | void (*free)(void **data); | 116 | void (*free)(void **data); |
117 | int (*print)(struct seq_file *m, | 117 | int (*print)(struct seq_file *m, |
118 | unsigned long ip, | 118 | unsigned long ip, |
119 | struct ftrace_probe_ops *ops, | 119 | struct ftrace_probe_ops *ops, |
120 | void *data); | 120 | void *data); |
121 | }; | 121 | }; |
122 | 122 | ||
123 | extern int | 123 | extern int |
124 | register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | 124 | register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, |
125 | void *data); | 125 | void *data); |
126 | extern void | 126 | extern void |
127 | unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | 127 | unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, |
128 | void *data); | 128 | void *data); |
129 | extern void | 129 | extern void |
130 | unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops); | 130 | unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops); |
131 | extern void unregister_ftrace_function_probe_all(char *glob); | 131 | extern void unregister_ftrace_function_probe_all(char *glob); |
132 | 132 | ||
133 | enum { | 133 | enum { |
134 | FTRACE_FL_FREE = (1 << 0), | 134 | FTRACE_FL_FREE = (1 << 0), |
135 | FTRACE_FL_FAILED = (1 << 1), | 135 | FTRACE_FL_FAILED = (1 << 1), |
136 | FTRACE_FL_FILTER = (1 << 2), | 136 | FTRACE_FL_FILTER = (1 << 2), |
137 | FTRACE_FL_ENABLED = (1 << 3), | 137 | FTRACE_FL_ENABLED = (1 << 3), |
138 | FTRACE_FL_NOTRACE = (1 << 4), | 138 | FTRACE_FL_NOTRACE = (1 << 4), |
139 | FTRACE_FL_CONVERTED = (1 << 5), | 139 | FTRACE_FL_CONVERTED = (1 << 5), |
140 | FTRACE_FL_FROZEN = (1 << 6), | 140 | FTRACE_FL_FROZEN = (1 << 6), |
141 | }; | 141 | }; |
142 | 142 | ||
143 | struct dyn_ftrace { | 143 | struct dyn_ftrace { |
144 | struct list_head list; | 144 | struct list_head list; |
145 | unsigned long ip; /* address of mcount call-site */ | 145 | unsigned long ip; /* address of mcount call-site */ |
146 | unsigned long flags; | 146 | unsigned long flags; |
147 | struct dyn_arch_ftrace arch; | 147 | struct dyn_arch_ftrace arch; |
148 | }; | 148 | }; |
149 | 149 | ||
150 | int ftrace_force_update(void); | 150 | int ftrace_force_update(void); |
151 | void ftrace_set_filter(unsigned char *buf, int len, int reset); | 151 | void ftrace_set_filter(unsigned char *buf, int len, int reset); |
152 | 152 | ||
153 | int register_ftrace_command(struct ftrace_func_command *cmd); | 153 | int register_ftrace_command(struct ftrace_func_command *cmd); |
154 | int unregister_ftrace_command(struct ftrace_func_command *cmd); | 154 | int unregister_ftrace_command(struct ftrace_func_command *cmd); |
155 | 155 | ||
156 | /* defined in arch */ | 156 | /* defined in arch */ |
157 | extern int ftrace_ip_converted(unsigned long ip); | 157 | extern int ftrace_ip_converted(unsigned long ip); |
158 | extern int ftrace_dyn_arch_init(void *data); | 158 | extern int ftrace_dyn_arch_init(void *data); |
159 | extern int ftrace_update_ftrace_func(ftrace_func_t func); | 159 | extern int ftrace_update_ftrace_func(ftrace_func_t func); |
160 | extern void ftrace_caller(void); | 160 | extern void ftrace_caller(void); |
161 | extern void ftrace_call(void); | 161 | extern void ftrace_call(void); |
162 | extern void mcount_call(void); | 162 | extern void mcount_call(void); |
163 | 163 | ||
164 | #ifndef FTRACE_ADDR | 164 | #ifndef FTRACE_ADDR |
165 | #define FTRACE_ADDR ((unsigned long)ftrace_caller) | 165 | #define FTRACE_ADDR ((unsigned long)ftrace_caller) |
166 | #endif | 166 | #endif |
167 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 167 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
168 | extern void ftrace_graph_caller(void); | 168 | extern void ftrace_graph_caller(void); |
169 | extern int ftrace_enable_ftrace_graph_caller(void); | 169 | extern int ftrace_enable_ftrace_graph_caller(void); |
170 | extern int ftrace_disable_ftrace_graph_caller(void); | 170 | extern int ftrace_disable_ftrace_graph_caller(void); |
171 | #else | 171 | #else |
172 | static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; } | 172 | static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; } |
173 | static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } | 173 | static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } |
174 | #endif | 174 | #endif |
175 | 175 | ||
176 | /** | 176 | /** |
177 | * ftrace_make_nop - convert code into nop | 177 | * ftrace_make_nop - convert code into nop |
178 | * @mod: module structure if called by module load initialization | 178 | * @mod: module structure if called by module load initialization |
179 | * @rec: the mcount call site record | 179 | * @rec: the mcount call site record |
180 | * @addr: the address that the call site should be calling | 180 | * @addr: the address that the call site should be calling |
181 | * | 181 | * |
182 | * This is a very sensitive operation and great care needs | 182 | * This is a very sensitive operation and great care needs |
183 | * to be taken by the arch. The operation should carefully | 183 | * to be taken by the arch. The operation should carefully |
184 | * read the location, check to see if what is read is indeed | 184 | * read the location, check to see if what is read is indeed |
185 | * what we expect it to be, and then on success of the compare, | 185 | * what we expect it to be, and then on success of the compare, |
186 | * it should write to the location. | 186 | * it should write to the location. |
187 | * | 187 | * |
188 | * The code segment at @rec->ip should be a caller to @addr | 188 | * The code segment at @rec->ip should be a caller to @addr |
189 | * | 189 | * |
190 | * Return must be: | 190 | * Return must be: |
191 | * 0 on success | 191 | * 0 on success |
192 | * -EFAULT on error reading the location | 192 | * -EFAULT on error reading the location |
193 | * -EINVAL on a failed compare of the contents | 193 | * -EINVAL on a failed compare of the contents |
194 | * -EPERM on error writing to the location | 194 | * -EPERM on error writing to the location |
195 | * Any other value will be considered a failure. | 195 | * Any other value will be considered a failure. |
196 | */ | 196 | */ |
197 | extern int ftrace_make_nop(struct module *mod, | 197 | extern int ftrace_make_nop(struct module *mod, |
198 | struct dyn_ftrace *rec, unsigned long addr); | 198 | struct dyn_ftrace *rec, unsigned long addr); |
199 | 199 | ||
200 | /** | 200 | /** |
201 | * ftrace_make_call - convert a nop call site into a call to addr | 201 | * ftrace_make_call - convert a nop call site into a call to addr |
202 | * @rec: the mcount call site record | 202 | * @rec: the mcount call site record |
203 | * @addr: the address that the call site should call | 203 | * @addr: the address that the call site should call |
204 | * | 204 | * |
205 | * This is a very sensitive operation and great care needs | 205 | * This is a very sensitive operation and great care needs |
206 | * to be taken by the arch. The operation should carefully | 206 | * to be taken by the arch. The operation should carefully |
207 | * read the location, check to see if what is read is indeed | 207 | * read the location, check to see if what is read is indeed |
208 | * what we expect it to be, and then on success of the compare, | 208 | * what we expect it to be, and then on success of the compare, |
209 | * it should write to the location. | 209 | * it should write to the location. |
210 | * | 210 | * |
211 | * The code segment at @rec->ip should be a nop | 211 | * The code segment at @rec->ip should be a nop |
212 | * | 212 | * |
213 | * Return must be: | 213 | * Return must be: |
214 | * 0 on success | 214 | * 0 on success |
215 | * -EFAULT on error reading the location | 215 | * -EFAULT on error reading the location |
216 | * -EINVAL on a failed compare of the contents | 216 | * -EINVAL on a failed compare of the contents |
217 | * -EPERM on error writing to the location | 217 | * -EPERM on error writing to the location |
218 | * Any other value will be considered a failure. | 218 | * Any other value will be considered a failure. |
219 | */ | 219 | */ |
220 | extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); | 220 | extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); |
221 | 221 | ||
222 | 222 | ||
223 | /* May be defined in arch */ | 223 | /* May be defined in arch */ |
224 | extern int ftrace_arch_read_dyn_info(char *buf, int size); | 224 | extern int ftrace_arch_read_dyn_info(char *buf, int size); |
225 | 225 | ||
226 | extern int skip_trace(unsigned long ip); | 226 | extern int skip_trace(unsigned long ip); |
227 | 227 | ||
228 | extern void ftrace_release(void *start, unsigned long size); | 228 | extern void ftrace_release(void *start, unsigned long size); |
229 | 229 | ||
230 | extern void ftrace_disable_daemon(void); | 230 | extern void ftrace_disable_daemon(void); |
231 | extern void ftrace_enable_daemon(void); | 231 | extern void ftrace_enable_daemon(void); |
232 | #else | 232 | #else |
233 | # define skip_trace(ip) ({ 0; }) | 233 | # define skip_trace(ip) ({ 0; }) |
234 | # define ftrace_force_update() ({ 0; }) | 234 | # define ftrace_force_update() ({ 0; }) |
235 | # define ftrace_set_filter(buf, len, reset) do { } while (0) | 235 | # define ftrace_set_filter(buf, len, reset) do { } while (0) |
236 | # define ftrace_disable_daemon() do { } while (0) | 236 | # define ftrace_disable_daemon() do { } while (0) |
237 | # define ftrace_enable_daemon() do { } while (0) | 237 | # define ftrace_enable_daemon() do { } while (0) |
238 | static inline void ftrace_release(void *start, unsigned long size) { } | 238 | static inline void ftrace_release(void *start, unsigned long size) { } |
239 | static inline int register_ftrace_command(struct ftrace_func_command *cmd) | 239 | static inline int register_ftrace_command(struct ftrace_func_command *cmd) |
240 | { | 240 | { |
241 | return -EINVAL; | 241 | return -EINVAL; |
242 | } | 242 | } |
243 | static inline int unregister_ftrace_command(char *cmd_name) | 243 | static inline int unregister_ftrace_command(char *cmd_name) |
244 | { | 244 | { |
245 | return -EINVAL; | 245 | return -EINVAL; |
246 | } | 246 | } |
247 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 247 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
248 | 248 | ||
249 | /* totally disable ftrace - can not re-enable after this */ | 249 | /* totally disable ftrace - can not re-enable after this */ |
250 | void ftrace_kill(void); | 250 | void ftrace_kill(void); |
251 | 251 | ||
252 | static inline void tracer_disable(void) | 252 | static inline void tracer_disable(void) |
253 | { | 253 | { |
254 | #ifdef CONFIG_FUNCTION_TRACER | 254 | #ifdef CONFIG_FUNCTION_TRACER |
255 | ftrace_enabled = 0; | 255 | ftrace_enabled = 0; |
256 | #endif | 256 | #endif |
257 | } | 257 | } |
258 | 258 | ||
259 | /* | 259 | /* |
260 | * Ftrace disable/restore without lock. Some synchronization mechanism | 260 | * Ftrace disable/restore without lock. Some synchronization mechanism |
261 | * must be used to prevent ftrace_enabled to be changed between | 261 | * must be used to prevent ftrace_enabled to be changed between |
262 | * disable/restore. | 262 | * disable/restore. |
263 | */ | 263 | */ |
264 | static inline int __ftrace_enabled_save(void) | 264 | static inline int __ftrace_enabled_save(void) |
265 | { | 265 | { |
266 | #ifdef CONFIG_FUNCTION_TRACER | 266 | #ifdef CONFIG_FUNCTION_TRACER |
267 | int saved_ftrace_enabled = ftrace_enabled; | 267 | int saved_ftrace_enabled = ftrace_enabled; |
268 | ftrace_enabled = 0; | 268 | ftrace_enabled = 0; |
269 | return saved_ftrace_enabled; | 269 | return saved_ftrace_enabled; |
270 | #else | 270 | #else |
271 | return 0; | 271 | return 0; |
272 | #endif | 272 | #endif |
273 | } | 273 | } |
274 | 274 | ||
275 | static inline void __ftrace_enabled_restore(int enabled) | 275 | static inline void __ftrace_enabled_restore(int enabled) |
276 | { | 276 | { |
277 | #ifdef CONFIG_FUNCTION_TRACER | 277 | #ifdef CONFIG_FUNCTION_TRACER |
278 | ftrace_enabled = enabled; | 278 | ftrace_enabled = enabled; |
279 | #endif | 279 | #endif |
280 | } | 280 | } |
281 | 281 | ||
282 | #ifdef CONFIG_FRAME_POINTER | 282 | #ifdef CONFIG_FRAME_POINTER |
283 | /* TODO: need to fix this for ARM */ | 283 | /* TODO: need to fix this for ARM */ |
284 | # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) | 284 | # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) |
285 | # define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1)) | 285 | # define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1)) |
286 | # define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2)) | 286 | # define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2)) |
287 | # define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3)) | 287 | # define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3)) |
288 | # define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4)) | 288 | # define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4)) |
289 | # define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5)) | 289 | # define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5)) |
290 | # define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6)) | 290 | # define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6)) |
291 | #else | 291 | #else |
292 | # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) | 292 | # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) |
293 | # define CALLER_ADDR1 0UL | 293 | # define CALLER_ADDR1 0UL |
294 | # define CALLER_ADDR2 0UL | 294 | # define CALLER_ADDR2 0UL |
295 | # define CALLER_ADDR3 0UL | 295 | # define CALLER_ADDR3 0UL |
296 | # define CALLER_ADDR4 0UL | 296 | # define CALLER_ADDR4 0UL |
297 | # define CALLER_ADDR5 0UL | 297 | # define CALLER_ADDR5 0UL |
298 | # define CALLER_ADDR6 0UL | 298 | # define CALLER_ADDR6 0UL |
299 | #endif | 299 | #endif |
300 | 300 | ||
301 | #ifdef CONFIG_IRQSOFF_TRACER | 301 | #ifdef CONFIG_IRQSOFF_TRACER |
302 | extern void time_hardirqs_on(unsigned long a0, unsigned long a1); | 302 | extern void time_hardirqs_on(unsigned long a0, unsigned long a1); |
303 | extern void time_hardirqs_off(unsigned long a0, unsigned long a1); | 303 | extern void time_hardirqs_off(unsigned long a0, unsigned long a1); |
304 | #else | 304 | #else |
305 | # define time_hardirqs_on(a0, a1) do { } while (0) | 305 | # define time_hardirqs_on(a0, a1) do { } while (0) |
306 | # define time_hardirqs_off(a0, a1) do { } while (0) | 306 | # define time_hardirqs_off(a0, a1) do { } while (0) |
307 | #endif | 307 | #endif |
308 | 308 | ||
309 | #ifdef CONFIG_PREEMPT_TRACER | 309 | #ifdef CONFIG_PREEMPT_TRACER |
310 | extern void trace_preempt_on(unsigned long a0, unsigned long a1); | 310 | extern void trace_preempt_on(unsigned long a0, unsigned long a1); |
311 | extern void trace_preempt_off(unsigned long a0, unsigned long a1); | 311 | extern void trace_preempt_off(unsigned long a0, unsigned long a1); |
312 | #else | 312 | #else |
313 | # define trace_preempt_on(a0, a1) do { } while (0) | 313 | # define trace_preempt_on(a0, a1) do { } while (0) |
314 | # define trace_preempt_off(a0, a1) do { } while (0) | 314 | # define trace_preempt_off(a0, a1) do { } while (0) |
315 | #endif | 315 | #endif |
316 | 316 | ||
317 | #ifdef CONFIG_TRACING | 317 | #ifdef CONFIG_TRACING |
318 | extern int ftrace_dump_on_oops; | 318 | extern int ftrace_dump_on_oops; |
319 | 319 | ||
320 | extern void tracing_start(void); | 320 | extern void tracing_start(void); |
321 | extern void tracing_stop(void); | 321 | extern void tracing_stop(void); |
322 | extern void ftrace_off_permanent(void); | 322 | extern void ftrace_off_permanent(void); |
323 | 323 | ||
324 | extern void | 324 | extern void |
325 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3); | 325 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3); |
326 | 326 | ||
327 | /** | 327 | /** |
328 | * ftrace_printk - printf formatting in the ftrace buffer | 328 | * ftrace_printk - printf formatting in the ftrace buffer |
329 | * @fmt: the printf format for printing | 329 | * @fmt: the printf format for printing |
330 | * | 330 | * |
331 | * Note: __ftrace_printk is an internal function for ftrace_printk and | 331 | * Note: __ftrace_printk is an internal function for ftrace_printk and |
332 | * the @ip is passed in via the ftrace_printk macro. | 332 | * the @ip is passed in via the ftrace_printk macro. |
333 | * | 333 | * |
334 | * This function allows a kernel developer to debug fast path sections | 334 | * This function allows a kernel developer to debug fast path sections |
335 | * that printk is not appropriate for. By scattering in various | 335 | * that printk is not appropriate for. By scattering in various |
336 | * printk like tracing in the code, a developer can quickly see | 336 | * printk like tracing in the code, a developer can quickly see |
337 | * where problems are occurring. | 337 | * where problems are occurring. |
338 | * | 338 | * |
339 | * This is intended as a debugging tool for the developer only. | 339 | * This is intended as a debugging tool for the developer only. |
340 | * Please refrain from leaving ftrace_printks scattered around in | 340 | * Please refrain from leaving ftrace_printks scattered around in |
341 | * your code. | 341 | * your code. |
342 | */ | 342 | */ |
343 | # define ftrace_printk(fmt...) __ftrace_printk(_THIS_IP_, fmt) | 343 | # define ftrace_printk(fmt...) __ftrace_printk(_THIS_IP_, fmt) |
344 | extern int | 344 | extern int |
345 | __ftrace_printk(unsigned long ip, const char *fmt, ...) | 345 | __ftrace_printk(unsigned long ip, const char *fmt, ...) |
346 | __attribute__ ((format (printf, 2, 3))); | 346 | __attribute__ ((format (printf, 2, 3))); |
347 | # define ftrace_vprintk(fmt, ap) __ftrace_printk(_THIS_IP_, fmt, ap) | 347 | # define ftrace_vprintk(fmt, ap) __ftrace_printk(_THIS_IP_, fmt, ap) |
348 | extern int | 348 | extern int |
349 | __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); | 349 | __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); |
350 | extern void ftrace_dump(void); | 350 | extern void ftrace_dump(void); |
351 | #else | 351 | #else |
352 | static inline void | 352 | static inline void |
353 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } | 353 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } |
354 | static inline int | 354 | static inline int |
355 | ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); | 355 | ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); |
356 | 356 | ||
357 | static inline void tracing_start(void) { } | 357 | static inline void tracing_start(void) { } |
358 | static inline void tracing_stop(void) { } | 358 | static inline void tracing_stop(void) { } |
359 | static inline void ftrace_off_permanent(void) { } | 359 | static inline void ftrace_off_permanent(void) { } |
360 | static inline int | 360 | static inline int |
361 | ftrace_printk(const char *fmt, ...) | 361 | ftrace_printk(const char *fmt, ...) |
362 | { | 362 | { |
363 | return 0; | 363 | return 0; |
364 | } | 364 | } |
365 | static inline int | 365 | static inline int |
366 | ftrace_vprintk(const char *fmt, va_list ap) | 366 | ftrace_vprintk(const char *fmt, va_list ap) |
367 | { | 367 | { |
368 | return 0; | 368 | return 0; |
369 | } | 369 | } |
370 | static inline void ftrace_dump(void) { } | 370 | static inline void ftrace_dump(void) { } |
371 | #endif | 371 | #endif |
372 | 372 | ||
373 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD | 373 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
374 | extern void ftrace_init(void); | 374 | extern void ftrace_init(void); |
375 | extern void ftrace_init_module(struct module *mod, | 375 | extern void ftrace_init_module(struct module *mod, |
376 | unsigned long *start, unsigned long *end); | 376 | unsigned long *start, unsigned long *end); |
377 | #else | 377 | #else |
378 | static inline void ftrace_init(void) { } | 378 | static inline void ftrace_init(void) { } |
379 | static inline void | 379 | static inline void |
380 | ftrace_init_module(struct module *mod, | 380 | ftrace_init_module(struct module *mod, |
381 | unsigned long *start, unsigned long *end) { } | 381 | unsigned long *start, unsigned long *end) { } |
382 | #endif | 382 | #endif |
383 | 383 | ||
384 | /* | 384 | /* |
385 | * Structure that defines an entry function trace. | 385 | * Structure that defines an entry function trace. |
386 | */ | 386 | */ |
387 | struct ftrace_graph_ent { | 387 | struct ftrace_graph_ent { |
388 | unsigned long func; /* Current function */ | 388 | unsigned long func; /* Current function */ |
389 | int depth; | 389 | int depth; |
390 | }; | 390 | }; |
391 | 391 | ||
392 | /* | 392 | /* |
393 | * Structure that defines a return function trace. | 393 | * Structure that defines a return function trace. |
394 | */ | 394 | */ |
395 | struct ftrace_graph_ret { | 395 | struct ftrace_graph_ret { |
396 | unsigned long func; /* Current function */ | 396 | unsigned long func; /* Current function */ |
397 | unsigned long long calltime; | 397 | unsigned long long calltime; |
398 | unsigned long long rettime; | 398 | unsigned long long rettime; |
399 | /* Number of functions that overran the depth limit for current task */ | 399 | /* Number of functions that overran the depth limit for current task */ |
400 | unsigned long overrun; | 400 | unsigned long overrun; |
401 | int depth; | 401 | int depth; |
402 | }; | 402 | }; |
403 | 403 | ||
404 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 404 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
405 | 405 | ||
406 | /* | 406 | /* |
407 | * Stack of return addresses for functions | ||
408 | * of a thread. | ||
409 | * Used in struct thread_info | ||
410 | */ | ||
411 | struct ftrace_ret_stack { | ||
412 | unsigned long ret; | ||
413 | unsigned long func; | ||
414 | unsigned long long calltime; | ||
415 | }; | ||
416 | |||
417 | /* | ||
418 | * Primary handler of a function return. | ||
419 | * It relays on ftrace_return_to_handler. | ||
420 | * Defined in entry_32/64.S | ||
421 | */ | ||
422 | extern void return_to_handler(void); | ||
423 | |||
424 | extern int | ||
425 | ftrace_push_return_trace(unsigned long ret, unsigned long long time, | ||
426 | unsigned long func, int *depth); | ||
427 | extern void | ||
428 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret); | ||
429 | |||
430 | /* | ||
407 | * Sometimes we don't want to trace a function with the function | 431 | * Sometimes we don't want to trace a function with the function |
408 | * graph tracer but we want them to keep traced by the usual function | 432 | * graph tracer but we want them to keep traced by the usual function |
409 | * tracer if the function graph tracer is not configured. | 433 | * tracer if the function graph tracer is not configured. |
410 | */ | 434 | */ |
411 | #define __notrace_funcgraph notrace | 435 | #define __notrace_funcgraph notrace |
412 | 436 | ||
413 | /* | 437 | /* |
414 | * We want to which function is an entrypoint of a hardirq. | 438 | * We want to which function is an entrypoint of a hardirq. |
415 | * That will help us to put a signal on output. | 439 | * That will help us to put a signal on output. |
416 | */ | 440 | */ |
417 | #define __irq_entry __attribute__((__section__(".irqentry.text"))) | 441 | #define __irq_entry __attribute__((__section__(".irqentry.text"))) |
418 | 442 | ||
419 | /* Limits of hardirq entrypoints */ | 443 | /* Limits of hardirq entrypoints */ |
420 | extern char __irqentry_text_start[]; | 444 | extern char __irqentry_text_start[]; |
421 | extern char __irqentry_text_end[]; | 445 | extern char __irqentry_text_end[]; |
422 | 446 | ||
423 | #define FTRACE_RETFUNC_DEPTH 50 | 447 | #define FTRACE_RETFUNC_DEPTH 50 |
424 | #define FTRACE_RETSTACK_ALLOC_SIZE 32 | 448 | #define FTRACE_RETSTACK_ALLOC_SIZE 32 |
425 | /* Type of the callback handlers for tracing function graph*/ | 449 | /* Type of the callback handlers for tracing function graph*/ |
426 | typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ | 450 | typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ |
427 | typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ | 451 | typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ |
428 | 452 | ||
429 | extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, | 453 | extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, |
430 | trace_func_graph_ent_t entryfunc); | 454 | trace_func_graph_ent_t entryfunc); |
431 | 455 | ||
432 | extern void ftrace_graph_stop(void); | 456 | extern void ftrace_graph_stop(void); |
433 | 457 | ||
434 | /* The current handlers in use */ | 458 | /* The current handlers in use */ |
435 | extern trace_func_graph_ret_t ftrace_graph_return; | 459 | extern trace_func_graph_ret_t ftrace_graph_return; |
436 | extern trace_func_graph_ent_t ftrace_graph_entry; | 460 | extern trace_func_graph_ent_t ftrace_graph_entry; |
437 | 461 | ||
438 | extern void unregister_ftrace_graph(void); | 462 | extern void unregister_ftrace_graph(void); |
439 | 463 | ||
440 | extern void ftrace_graph_init_task(struct task_struct *t); | 464 | extern void ftrace_graph_init_task(struct task_struct *t); |
441 | extern void ftrace_graph_exit_task(struct task_struct *t); | 465 | extern void ftrace_graph_exit_task(struct task_struct *t); |
442 | 466 | ||
443 | static inline int task_curr_ret_stack(struct task_struct *t) | 467 | static inline int task_curr_ret_stack(struct task_struct *t) |
444 | { | 468 | { |
445 | return t->curr_ret_stack; | 469 | return t->curr_ret_stack; |
446 | } | 470 | } |
447 | 471 | ||
448 | static inline void pause_graph_tracing(void) | 472 | static inline void pause_graph_tracing(void) |
449 | { | 473 | { |
450 | atomic_inc(¤t->tracing_graph_pause); | 474 | atomic_inc(¤t->tracing_graph_pause); |
451 | } | 475 | } |
452 | 476 | ||
453 | static inline void unpause_graph_tracing(void) | 477 | static inline void unpause_graph_tracing(void) |
454 | { | 478 | { |
455 | atomic_dec(¤t->tracing_graph_pause); | 479 | atomic_dec(¤t->tracing_graph_pause); |
456 | } | 480 | } |
457 | #else | 481 | #else |
458 | 482 | ||
459 | #define __notrace_funcgraph | 483 | #define __notrace_funcgraph |
460 | #define __irq_entry | 484 | #define __irq_entry |
461 | 485 | ||
462 | static inline void ftrace_graph_init_task(struct task_struct *t) { } | 486 | static inline void ftrace_graph_init_task(struct task_struct *t) { } |
463 | static inline void ftrace_graph_exit_task(struct task_struct *t) { } | 487 | static inline void ftrace_graph_exit_task(struct task_struct *t) { } |
464 | 488 | ||
465 | static inline int task_curr_ret_stack(struct task_struct *tsk) | 489 | static inline int task_curr_ret_stack(struct task_struct *tsk) |
466 | { | 490 | { |
467 | return -1; | 491 | return -1; |
468 | } | 492 | } |
469 | 493 | ||
470 | static inline void pause_graph_tracing(void) { } | 494 | static inline void pause_graph_tracing(void) { } |
471 | static inline void unpause_graph_tracing(void) { } | 495 | static inline void unpause_graph_tracing(void) { } |
472 | #endif | 496 | #endif |
473 | 497 | ||
474 | #ifdef CONFIG_TRACING | 498 | #ifdef CONFIG_TRACING |
475 | #include <linux/sched.h> | 499 | #include <linux/sched.h> |
476 | 500 | ||
477 | /* flags for current->trace */ | 501 | /* flags for current->trace */ |
478 | enum { | 502 | enum { |
479 | TSK_TRACE_FL_TRACE_BIT = 0, | 503 | TSK_TRACE_FL_TRACE_BIT = 0, |
480 | TSK_TRACE_FL_GRAPH_BIT = 1, | 504 | TSK_TRACE_FL_GRAPH_BIT = 1, |
481 | }; | 505 | }; |
482 | enum { | 506 | enum { |
483 | TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT, | 507 | TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT, |
484 | TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT, | 508 | TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT, |
485 | }; | 509 | }; |
486 | 510 | ||
487 | static inline void set_tsk_trace_trace(struct task_struct *tsk) | 511 | static inline void set_tsk_trace_trace(struct task_struct *tsk) |
488 | { | 512 | { |
489 | set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); | 513 | set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); |
490 | } | 514 | } |
491 | 515 | ||
492 | static inline void clear_tsk_trace_trace(struct task_struct *tsk) | 516 | static inline void clear_tsk_trace_trace(struct task_struct *tsk) |
493 | { | 517 | { |
494 | clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); | 518 | clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); |
495 | } | 519 | } |
496 | 520 | ||
497 | static inline int test_tsk_trace_trace(struct task_struct *tsk) | 521 | static inline int test_tsk_trace_trace(struct task_struct *tsk) |
498 | { | 522 | { |
499 | return tsk->trace & TSK_TRACE_FL_TRACE; | 523 | return tsk->trace & TSK_TRACE_FL_TRACE; |
500 | } | 524 | } |
501 | 525 | ||
502 | static inline void set_tsk_trace_graph(struct task_struct *tsk) | 526 | static inline void set_tsk_trace_graph(struct task_struct *tsk) |
503 | { | 527 | { |
504 | set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); | 528 | set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); |
505 | } | 529 | } |
506 | 530 | ||
507 | static inline void clear_tsk_trace_graph(struct task_struct *tsk) | 531 | static inline void clear_tsk_trace_graph(struct task_struct *tsk) |
508 | { | 532 | { |
509 | clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); | 533 | clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); |
510 | } | 534 | } |
511 | 535 | ||
512 | static inline int test_tsk_trace_graph(struct task_struct *tsk) | 536 | static inline int test_tsk_trace_graph(struct task_struct *tsk) |
513 | { | 537 | { |
514 | return tsk->trace & TSK_TRACE_FL_GRAPH; | 538 | return tsk->trace & TSK_TRACE_FL_GRAPH; |
515 | } | 539 | } |
516 | 540 | ||
517 | #endif /* CONFIG_TRACING */ | 541 | #endif /* CONFIG_TRACING */ |
518 | 542 | ||
519 | 543 | ||
520 | #ifdef CONFIG_HW_BRANCH_TRACER | 544 | #ifdef CONFIG_HW_BRANCH_TRACER |
521 | 545 | ||
522 | void trace_hw_branch(u64 from, u64 to); | 546 | void trace_hw_branch(u64 from, u64 to); |
523 | void trace_hw_branch_oops(void); | 547 | void trace_hw_branch_oops(void); |
524 | 548 | ||
525 | #else /* CONFIG_HW_BRANCH_TRACER */ | 549 | #else /* CONFIG_HW_BRANCH_TRACER */ |
526 | 550 | ||
527 | static inline void trace_hw_branch(u64 from, u64 to) {} | 551 | static inline void trace_hw_branch(u64 from, u64 to) {} |
528 | static inline void trace_hw_branch_oops(void) {} | 552 | static inline void trace_hw_branch_oops(void) {} |
529 | 553 | ||
530 | #endif /* CONFIG_HW_BRANCH_TRACER */ | 554 | #endif /* CONFIG_HW_BRANCH_TRACER */ |
531 | 555 | ||
532 | #endif /* _LINUX_FTRACE_H */ | 556 | #endif /* _LINUX_FTRACE_H */ |
533 | 557 |
kernel/trace/trace_functions_graph.c
1 | /* | 1 | /* |
2 | * | 2 | * |
3 | * Function graph tracer. | 3 | * Function graph tracer. |
4 | * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com> | 4 | * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com> |
5 | * Mostly borrowed from function tracer which | 5 | * Mostly borrowed from function tracer which |
6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> | 6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> |
7 | * | 7 | * |
8 | */ | 8 | */ |
9 | #include <linux/debugfs.h> | 9 | #include <linux/debugfs.h> |
10 | #include <linux/uaccess.h> | 10 | #include <linux/uaccess.h> |
11 | #include <linux/ftrace.h> | 11 | #include <linux/ftrace.h> |
12 | #include <linux/fs.h> | 12 | #include <linux/fs.h> |
13 | 13 | ||
14 | #include "trace.h" | 14 | #include "trace.h" |
15 | #include "trace_output.h" | 15 | #include "trace_output.h" |
16 | 16 | ||
17 | #define TRACE_GRAPH_INDENT 2 | 17 | #define TRACE_GRAPH_INDENT 2 |
18 | 18 | ||
19 | /* Flag options */ | 19 | /* Flag options */ |
20 | #define TRACE_GRAPH_PRINT_OVERRUN 0x1 | 20 | #define TRACE_GRAPH_PRINT_OVERRUN 0x1 |
21 | #define TRACE_GRAPH_PRINT_CPU 0x2 | 21 | #define TRACE_GRAPH_PRINT_CPU 0x2 |
22 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 | 22 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 |
23 | #define TRACE_GRAPH_PRINT_PROC 0x8 | 23 | #define TRACE_GRAPH_PRINT_PROC 0x8 |
24 | #define TRACE_GRAPH_PRINT_DURATION 0x10 | 24 | #define TRACE_GRAPH_PRINT_DURATION 0x10 |
25 | #define TRACE_GRAPH_PRINT_ABS_TIME 0X20 | 25 | #define TRACE_GRAPH_PRINT_ABS_TIME 0X20 |
26 | 26 | ||
27 | static struct tracer_opt trace_opts[] = { | 27 | static struct tracer_opt trace_opts[] = { |
28 | /* Display overruns? (for self-debug purpose) */ | 28 | /* Display overruns? (for self-debug purpose) */ |
29 | { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, | 29 | { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, |
30 | /* Display CPU ? */ | 30 | /* Display CPU ? */ |
31 | { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, | 31 | { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, |
32 | /* Display Overhead ? */ | 32 | /* Display Overhead ? */ |
33 | { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, | 33 | { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, |
34 | /* Display proc name/pid */ | 34 | /* Display proc name/pid */ |
35 | { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, | 35 | { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, |
36 | /* Display duration of execution */ | 36 | /* Display duration of execution */ |
37 | { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, | 37 | { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, |
38 | /* Display absolute time of an entry */ | 38 | /* Display absolute time of an entry */ |
39 | { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, | 39 | { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, |
40 | { } /* Empty entry */ | 40 | { } /* Empty entry */ |
41 | }; | 41 | }; |
42 | 42 | ||
43 | static struct tracer_flags tracer_flags = { | 43 | static struct tracer_flags tracer_flags = { |
44 | /* Don't display overruns and proc by default */ | 44 | /* Don't display overruns and proc by default */ |
45 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | | 45 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | |
46 | TRACE_GRAPH_PRINT_DURATION, | 46 | TRACE_GRAPH_PRINT_DURATION, |
47 | .opts = trace_opts | 47 | .opts = trace_opts |
48 | }; | 48 | }; |
49 | 49 | ||
50 | /* pid on the last trace processed */ | 50 | /* pid on the last trace processed */ |
51 | 51 | ||
52 | 52 | ||
53 | /* Add a function return address to the trace stack on thread info.*/ | ||
54 | int | ||
55 | ftrace_push_return_trace(unsigned long ret, unsigned long long time, | ||
56 | unsigned long func, int *depth) | ||
57 | { | ||
58 | int index; | ||
59 | |||
60 | if (!current->ret_stack) | ||
61 | return -EBUSY; | ||
62 | |||
63 | /* The return trace stack is full */ | ||
64 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { | ||
65 | atomic_inc(¤t->trace_overrun); | ||
66 | return -EBUSY; | ||
67 | } | ||
68 | |||
69 | index = ++current->curr_ret_stack; | ||
70 | barrier(); | ||
71 | current->ret_stack[index].ret = ret; | ||
72 | current->ret_stack[index].func = func; | ||
73 | current->ret_stack[index].calltime = time; | ||
74 | *depth = index; | ||
75 | |||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | /* Retrieve a function return address to the trace stack on thread info.*/ | ||
80 | void | ||
81 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) | ||
82 | { | ||
83 | int index; | ||
84 | |||
85 | index = current->curr_ret_stack; | ||
86 | |||
87 | if (unlikely(index < 0)) { | ||
88 | ftrace_graph_stop(); | ||
89 | WARN_ON(1); | ||
90 | /* Might as well panic, otherwise we have no where to go */ | ||
91 | *ret = (unsigned long)panic; | ||
92 | return; | ||
93 | } | ||
94 | |||
95 | *ret = current->ret_stack[index].ret; | ||
96 | trace->func = current->ret_stack[index].func; | ||
97 | trace->calltime = current->ret_stack[index].calltime; | ||
98 | trace->overrun = atomic_read(¤t->trace_overrun); | ||
99 | trace->depth = index; | ||
100 | barrier(); | ||
101 | current->curr_ret_stack--; | ||
102 | |||
103 | } | ||
104 | |||
105 | /* | ||
106 | * Send the trace to the ring-buffer. | ||
107 | * @return the original return address. | ||
108 | */ | ||
109 | unsigned long ftrace_return_to_handler(void) | ||
110 | { | ||
111 | struct ftrace_graph_ret trace; | ||
112 | unsigned long ret; | ||
113 | |||
114 | ftrace_pop_return_trace(&trace, &ret); | ||
115 | trace.rettime = cpu_clock(raw_smp_processor_id()); | ||
116 | ftrace_graph_return(&trace); | ||
117 | |||
118 | if (unlikely(!ret)) { | ||
119 | ftrace_graph_stop(); | ||
120 | WARN_ON(1); | ||
121 | /* Might as well panic. What else to do? */ | ||
122 | ret = (unsigned long)panic; | ||
123 | } | ||
124 | |||
125 | return ret; | ||
126 | } | ||
127 | |||
53 | static int graph_trace_init(struct trace_array *tr) | 128 | static int graph_trace_init(struct trace_array *tr) |
54 | { | 129 | { |
55 | int ret = register_ftrace_graph(&trace_graph_return, | 130 | int ret = register_ftrace_graph(&trace_graph_return, |
56 | &trace_graph_entry); | 131 | &trace_graph_entry); |
57 | if (ret) | 132 | if (ret) |
58 | return ret; | 133 | return ret; |
59 | tracing_start_cmdline_record(); | 134 | tracing_start_cmdline_record(); |
60 | 135 | ||
61 | return 0; | 136 | return 0; |
62 | } | 137 | } |
63 | 138 | ||
64 | static void graph_trace_reset(struct trace_array *tr) | 139 | static void graph_trace_reset(struct trace_array *tr) |
65 | { | 140 | { |
66 | tracing_stop_cmdline_record(); | 141 | tracing_stop_cmdline_record(); |
67 | unregister_ftrace_graph(); | 142 | unregister_ftrace_graph(); |
68 | } | 143 | } |
69 | 144 | ||
70 | static inline int log10_cpu(int nb) | 145 | static inline int log10_cpu(int nb) |
71 | { | 146 | { |
72 | if (nb / 100) | 147 | if (nb / 100) |
73 | return 3; | 148 | return 3; |
74 | if (nb / 10) | 149 | if (nb / 10) |
75 | return 2; | 150 | return 2; |
76 | return 1; | 151 | return 1; |
77 | } | 152 | } |
78 | 153 | ||
79 | static enum print_line_t | 154 | static enum print_line_t |
80 | print_graph_cpu(struct trace_seq *s, int cpu) | 155 | print_graph_cpu(struct trace_seq *s, int cpu) |
81 | { | 156 | { |
82 | int i; | 157 | int i; |
83 | int ret; | 158 | int ret; |
84 | int log10_this = log10_cpu(cpu); | 159 | int log10_this = log10_cpu(cpu); |
85 | int log10_all = log10_cpu(cpumask_weight(cpu_online_mask)); | 160 | int log10_all = log10_cpu(cpumask_weight(cpu_online_mask)); |
86 | 161 | ||
87 | 162 | ||
88 | /* | 163 | /* |
89 | * Start with a space character - to make it stand out | 164 | * Start with a space character - to make it stand out |
90 | * to the right a bit when trace output is pasted into | 165 | * to the right a bit when trace output is pasted into |
91 | * email: | 166 | * email: |
92 | */ | 167 | */ |
93 | ret = trace_seq_printf(s, " "); | 168 | ret = trace_seq_printf(s, " "); |
94 | 169 | ||
95 | /* | 170 | /* |
96 | * Tricky - we space the CPU field according to the max | 171 | * Tricky - we space the CPU field according to the max |
97 | * number of online CPUs. On a 2-cpu system it would take | 172 | * number of online CPUs. On a 2-cpu system it would take |
98 | * a maximum of 1 digit - on a 128 cpu system it would | 173 | * a maximum of 1 digit - on a 128 cpu system it would |
99 | * take up to 3 digits: | 174 | * take up to 3 digits: |
100 | */ | 175 | */ |
101 | for (i = 0; i < log10_all - log10_this; i++) { | 176 | for (i = 0; i < log10_all - log10_this; i++) { |
102 | ret = trace_seq_printf(s, " "); | 177 | ret = trace_seq_printf(s, " "); |
103 | if (!ret) | 178 | if (!ret) |
104 | return TRACE_TYPE_PARTIAL_LINE; | 179 | return TRACE_TYPE_PARTIAL_LINE; |
105 | } | 180 | } |
106 | ret = trace_seq_printf(s, "%d) ", cpu); | 181 | ret = trace_seq_printf(s, "%d) ", cpu); |
107 | if (!ret) | 182 | if (!ret) |
108 | return TRACE_TYPE_PARTIAL_LINE; | 183 | return TRACE_TYPE_PARTIAL_LINE; |
109 | 184 | ||
110 | return TRACE_TYPE_HANDLED; | 185 | return TRACE_TYPE_HANDLED; |
111 | } | 186 | } |
112 | 187 | ||
113 | #define TRACE_GRAPH_PROCINFO_LENGTH 14 | 188 | #define TRACE_GRAPH_PROCINFO_LENGTH 14 |
114 | 189 | ||
115 | static enum print_line_t | 190 | static enum print_line_t |
116 | print_graph_proc(struct trace_seq *s, pid_t pid) | 191 | print_graph_proc(struct trace_seq *s, pid_t pid) |
117 | { | 192 | { |
118 | int i; | 193 | int i; |
119 | int ret; | 194 | int ret; |
120 | int len; | 195 | int len; |
121 | char comm[8]; | 196 | char comm[8]; |
122 | int spaces = 0; | 197 | int spaces = 0; |
123 | /* sign + log10(MAX_INT) + '\0' */ | 198 | /* sign + log10(MAX_INT) + '\0' */ |
124 | char pid_str[11]; | 199 | char pid_str[11]; |
125 | 200 | ||
126 | strncpy(comm, trace_find_cmdline(pid), 7); | 201 | strncpy(comm, trace_find_cmdline(pid), 7); |
127 | comm[7] = '\0'; | 202 | comm[7] = '\0'; |
128 | sprintf(pid_str, "%d", pid); | 203 | sprintf(pid_str, "%d", pid); |
129 | 204 | ||
130 | /* 1 stands for the "-" character */ | 205 | /* 1 stands for the "-" character */ |
131 | len = strlen(comm) + strlen(pid_str) + 1; | 206 | len = strlen(comm) + strlen(pid_str) + 1; |
132 | 207 | ||
133 | if (len < TRACE_GRAPH_PROCINFO_LENGTH) | 208 | if (len < TRACE_GRAPH_PROCINFO_LENGTH) |
134 | spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; | 209 | spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; |
135 | 210 | ||
136 | /* First spaces to align center */ | 211 | /* First spaces to align center */ |
137 | for (i = 0; i < spaces / 2; i++) { | 212 | for (i = 0; i < spaces / 2; i++) { |
138 | ret = trace_seq_printf(s, " "); | 213 | ret = trace_seq_printf(s, " "); |
139 | if (!ret) | 214 | if (!ret) |
140 | return TRACE_TYPE_PARTIAL_LINE; | 215 | return TRACE_TYPE_PARTIAL_LINE; |
141 | } | 216 | } |
142 | 217 | ||
143 | ret = trace_seq_printf(s, "%s-%s", comm, pid_str); | 218 | ret = trace_seq_printf(s, "%s-%s", comm, pid_str); |
144 | if (!ret) | 219 | if (!ret) |
145 | return TRACE_TYPE_PARTIAL_LINE; | 220 | return TRACE_TYPE_PARTIAL_LINE; |
146 | 221 | ||
147 | /* Last spaces to align center */ | 222 | /* Last spaces to align center */ |
148 | for (i = 0; i < spaces - (spaces / 2); i++) { | 223 | for (i = 0; i < spaces - (spaces / 2); i++) { |
149 | ret = trace_seq_printf(s, " "); | 224 | ret = trace_seq_printf(s, " "); |
150 | if (!ret) | 225 | if (!ret) |
151 | return TRACE_TYPE_PARTIAL_LINE; | 226 | return TRACE_TYPE_PARTIAL_LINE; |
152 | } | 227 | } |
153 | return TRACE_TYPE_HANDLED; | 228 | return TRACE_TYPE_HANDLED; |
154 | } | 229 | } |
155 | 230 | ||
156 | 231 | ||
157 | /* If the pid changed since the last trace, output this event */ | 232 | /* If the pid changed since the last trace, output this event */ |
158 | static enum print_line_t | 233 | static enum print_line_t |
159 | verif_pid(struct trace_seq *s, pid_t pid, int cpu, pid_t *last_pids_cpu) | 234 | verif_pid(struct trace_seq *s, pid_t pid, int cpu, pid_t *last_pids_cpu) |
160 | { | 235 | { |
161 | pid_t prev_pid; | 236 | pid_t prev_pid; |
162 | pid_t *last_pid; | 237 | pid_t *last_pid; |
163 | int ret; | 238 | int ret; |
164 | 239 | ||
165 | if (!last_pids_cpu) | 240 | if (!last_pids_cpu) |
166 | return TRACE_TYPE_HANDLED; | 241 | return TRACE_TYPE_HANDLED; |
167 | 242 | ||
168 | last_pid = per_cpu_ptr(last_pids_cpu, cpu); | 243 | last_pid = per_cpu_ptr(last_pids_cpu, cpu); |
169 | 244 | ||
170 | if (*last_pid == pid) | 245 | if (*last_pid == pid) |
171 | return TRACE_TYPE_HANDLED; | 246 | return TRACE_TYPE_HANDLED; |
172 | 247 | ||
173 | prev_pid = *last_pid; | 248 | prev_pid = *last_pid; |
174 | *last_pid = pid; | 249 | *last_pid = pid; |
175 | 250 | ||
176 | if (prev_pid == -1) | 251 | if (prev_pid == -1) |
177 | return TRACE_TYPE_HANDLED; | 252 | return TRACE_TYPE_HANDLED; |
178 | /* | 253 | /* |
179 | * Context-switch trace line: | 254 | * Context-switch trace line: |
180 | 255 | ||
181 | ------------------------------------------ | 256 | ------------------------------------------ |
182 | | 1) migration/0--1 => sshd-1755 | 257 | | 1) migration/0--1 => sshd-1755 |
183 | ------------------------------------------ | 258 | ------------------------------------------ |
184 | 259 | ||
185 | */ | 260 | */ |
186 | ret = trace_seq_printf(s, | 261 | ret = trace_seq_printf(s, |
187 | " ------------------------------------------\n"); | 262 | " ------------------------------------------\n"); |
188 | if (!ret) | 263 | if (!ret) |
189 | return TRACE_TYPE_PARTIAL_LINE; | 264 | return TRACE_TYPE_PARTIAL_LINE; |
190 | 265 | ||
191 | ret = print_graph_cpu(s, cpu); | 266 | ret = print_graph_cpu(s, cpu); |
192 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 267 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
193 | return TRACE_TYPE_PARTIAL_LINE; | 268 | return TRACE_TYPE_PARTIAL_LINE; |
194 | 269 | ||
195 | ret = print_graph_proc(s, prev_pid); | 270 | ret = print_graph_proc(s, prev_pid); |
196 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 271 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
197 | return TRACE_TYPE_PARTIAL_LINE; | 272 | return TRACE_TYPE_PARTIAL_LINE; |
198 | 273 | ||
199 | ret = trace_seq_printf(s, " => "); | 274 | ret = trace_seq_printf(s, " => "); |
200 | if (!ret) | 275 | if (!ret) |
201 | return TRACE_TYPE_PARTIAL_LINE; | 276 | return TRACE_TYPE_PARTIAL_LINE; |
202 | 277 | ||
203 | ret = print_graph_proc(s, pid); | 278 | ret = print_graph_proc(s, pid); |
204 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 279 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
205 | return TRACE_TYPE_PARTIAL_LINE; | 280 | return TRACE_TYPE_PARTIAL_LINE; |
206 | 281 | ||
207 | ret = trace_seq_printf(s, | 282 | ret = trace_seq_printf(s, |
208 | "\n ------------------------------------------\n\n"); | 283 | "\n ------------------------------------------\n\n"); |
209 | if (!ret) | 284 | if (!ret) |
210 | return TRACE_TYPE_PARTIAL_LINE; | 285 | return TRACE_TYPE_PARTIAL_LINE; |
211 | 286 | ||
212 | return TRACE_TYPE_HANDLED; | 287 | return TRACE_TYPE_HANDLED; |
213 | } | 288 | } |
214 | 289 | ||
215 | static struct ftrace_graph_ret_entry * | 290 | static struct ftrace_graph_ret_entry * |
216 | get_return_for_leaf(struct trace_iterator *iter, | 291 | get_return_for_leaf(struct trace_iterator *iter, |
217 | struct ftrace_graph_ent_entry *curr) | 292 | struct ftrace_graph_ent_entry *curr) |
218 | { | 293 | { |
219 | struct ring_buffer_iter *ring_iter; | 294 | struct ring_buffer_iter *ring_iter; |
220 | struct ring_buffer_event *event; | 295 | struct ring_buffer_event *event; |
221 | struct ftrace_graph_ret_entry *next; | 296 | struct ftrace_graph_ret_entry *next; |
222 | 297 | ||
223 | ring_iter = iter->buffer_iter[iter->cpu]; | 298 | ring_iter = iter->buffer_iter[iter->cpu]; |
224 | 299 | ||
225 | /* First peek to compare current entry and the next one */ | 300 | /* First peek to compare current entry and the next one */ |
226 | if (ring_iter) | 301 | if (ring_iter) |
227 | event = ring_buffer_iter_peek(ring_iter, NULL); | 302 | event = ring_buffer_iter_peek(ring_iter, NULL); |
228 | else { | 303 | else { |
229 | /* We need to consume the current entry to see the next one */ | 304 | /* We need to consume the current entry to see the next one */ |
230 | ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL); | 305 | ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL); |
231 | event = ring_buffer_peek(iter->tr->buffer, iter->cpu, | 306 | event = ring_buffer_peek(iter->tr->buffer, iter->cpu, |
232 | NULL); | 307 | NULL); |
233 | } | 308 | } |
234 | 309 | ||
235 | if (!event) | 310 | if (!event) |
236 | return NULL; | 311 | return NULL; |
237 | 312 | ||
238 | next = ring_buffer_event_data(event); | 313 | next = ring_buffer_event_data(event); |
239 | 314 | ||
240 | if (next->ent.type != TRACE_GRAPH_RET) | 315 | if (next->ent.type != TRACE_GRAPH_RET) |
241 | return NULL; | 316 | return NULL; |
242 | 317 | ||
243 | if (curr->ent.pid != next->ent.pid || | 318 | if (curr->ent.pid != next->ent.pid || |
244 | curr->graph_ent.func != next->ret.func) | 319 | curr->graph_ent.func != next->ret.func) |
245 | return NULL; | 320 | return NULL; |
246 | 321 | ||
247 | /* this is a leaf, now advance the iterator */ | 322 | /* this is a leaf, now advance the iterator */ |
248 | if (ring_iter) | 323 | if (ring_iter) |
249 | ring_buffer_read(ring_iter, NULL); | 324 | ring_buffer_read(ring_iter, NULL); |
250 | 325 | ||
251 | return next; | 326 | return next; |
252 | } | 327 | } |
253 | 328 | ||
254 | /* Signal a overhead of time execution to the output */ | 329 | /* Signal a overhead of time execution to the output */ |
255 | static int | 330 | static int |
256 | print_graph_overhead(unsigned long long duration, struct trace_seq *s) | 331 | print_graph_overhead(unsigned long long duration, struct trace_seq *s) |
257 | { | 332 | { |
258 | /* If duration disappear, we don't need anything */ | 333 | /* If duration disappear, we don't need anything */ |
259 | if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)) | 334 | if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)) |
260 | return 1; | 335 | return 1; |
261 | 336 | ||
262 | /* Non nested entry or return */ | 337 | /* Non nested entry or return */ |
263 | if (duration == -1) | 338 | if (duration == -1) |
264 | return trace_seq_printf(s, " "); | 339 | return trace_seq_printf(s, " "); |
265 | 340 | ||
266 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 341 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { |
267 | /* Duration exceeded 100 msecs */ | 342 | /* Duration exceeded 100 msecs */ |
268 | if (duration > 100000ULL) | 343 | if (duration > 100000ULL) |
269 | return trace_seq_printf(s, "! "); | 344 | return trace_seq_printf(s, "! "); |
270 | 345 | ||
271 | /* Duration exceeded 10 msecs */ | 346 | /* Duration exceeded 10 msecs */ |
272 | if (duration > 10000ULL) | 347 | if (duration > 10000ULL) |
273 | return trace_seq_printf(s, "+ "); | 348 | return trace_seq_printf(s, "+ "); |
274 | } | 349 | } |
275 | 350 | ||
276 | return trace_seq_printf(s, " "); | 351 | return trace_seq_printf(s, " "); |
277 | } | 352 | } |
278 | 353 | ||
279 | static enum print_line_t | 354 | static enum print_line_t |
280 | print_graph_irq(struct trace_seq *s, unsigned long addr, | 355 | print_graph_irq(struct trace_seq *s, unsigned long addr, |
281 | enum trace_type type, int cpu, pid_t pid) | 356 | enum trace_type type, int cpu, pid_t pid) |
282 | { | 357 | { |
283 | int ret; | 358 | int ret; |
284 | 359 | ||
285 | if (addr < (unsigned long)__irqentry_text_start || | 360 | if (addr < (unsigned long)__irqentry_text_start || |
286 | addr >= (unsigned long)__irqentry_text_end) | 361 | addr >= (unsigned long)__irqentry_text_end) |
287 | return TRACE_TYPE_UNHANDLED; | 362 | return TRACE_TYPE_UNHANDLED; |
288 | 363 | ||
289 | /* Cpu */ | 364 | /* Cpu */ |
290 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | 365 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { |
291 | ret = print_graph_cpu(s, cpu); | 366 | ret = print_graph_cpu(s, cpu); |
292 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 367 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
293 | return TRACE_TYPE_PARTIAL_LINE; | 368 | return TRACE_TYPE_PARTIAL_LINE; |
294 | } | 369 | } |
295 | /* Proc */ | 370 | /* Proc */ |
296 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | 371 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { |
297 | ret = print_graph_proc(s, pid); | 372 | ret = print_graph_proc(s, pid); |
298 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 373 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
299 | return TRACE_TYPE_PARTIAL_LINE; | 374 | return TRACE_TYPE_PARTIAL_LINE; |
300 | ret = trace_seq_printf(s, " | "); | 375 | ret = trace_seq_printf(s, " | "); |
301 | if (!ret) | 376 | if (!ret) |
302 | return TRACE_TYPE_PARTIAL_LINE; | 377 | return TRACE_TYPE_PARTIAL_LINE; |
303 | } | 378 | } |
304 | 379 | ||
305 | /* No overhead */ | 380 | /* No overhead */ |
306 | ret = print_graph_overhead(-1, s); | 381 | ret = print_graph_overhead(-1, s); |
307 | if (!ret) | 382 | if (!ret) |
308 | return TRACE_TYPE_PARTIAL_LINE; | 383 | return TRACE_TYPE_PARTIAL_LINE; |
309 | 384 | ||
310 | if (type == TRACE_GRAPH_ENT) | 385 | if (type == TRACE_GRAPH_ENT) |
311 | ret = trace_seq_printf(s, "==========>"); | 386 | ret = trace_seq_printf(s, "==========>"); |
312 | else | 387 | else |
313 | ret = trace_seq_printf(s, "<=========="); | 388 | ret = trace_seq_printf(s, "<=========="); |
314 | 389 | ||
315 | if (!ret) | 390 | if (!ret) |
316 | return TRACE_TYPE_PARTIAL_LINE; | 391 | return TRACE_TYPE_PARTIAL_LINE; |
317 | 392 | ||
318 | /* Don't close the duration column if haven't one */ | 393 | /* Don't close the duration column if haven't one */ |
319 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) | 394 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) |
320 | trace_seq_printf(s, " |"); | 395 | trace_seq_printf(s, " |"); |
321 | ret = trace_seq_printf(s, "\n"); | 396 | ret = trace_seq_printf(s, "\n"); |
322 | 397 | ||
323 | if (!ret) | 398 | if (!ret) |
324 | return TRACE_TYPE_PARTIAL_LINE; | 399 | return TRACE_TYPE_PARTIAL_LINE; |
325 | return TRACE_TYPE_HANDLED; | 400 | return TRACE_TYPE_HANDLED; |
326 | } | 401 | } |
327 | 402 | ||
328 | static enum print_line_t | 403 | static enum print_line_t |
329 | print_graph_duration(unsigned long long duration, struct trace_seq *s) | 404 | print_graph_duration(unsigned long long duration, struct trace_seq *s) |
330 | { | 405 | { |
331 | unsigned long nsecs_rem = do_div(duration, 1000); | 406 | unsigned long nsecs_rem = do_div(duration, 1000); |
332 | /* log10(ULONG_MAX) + '\0' */ | 407 | /* log10(ULONG_MAX) + '\0' */ |
333 | char msecs_str[21]; | 408 | char msecs_str[21]; |
334 | char nsecs_str[5]; | 409 | char nsecs_str[5]; |
335 | int ret, len; | 410 | int ret, len; |
336 | int i; | 411 | int i; |
337 | 412 | ||
338 | sprintf(msecs_str, "%lu", (unsigned long) duration); | 413 | sprintf(msecs_str, "%lu", (unsigned long) duration); |
339 | 414 | ||
340 | /* Print msecs */ | 415 | /* Print msecs */ |
341 | ret = trace_seq_printf(s, "%s", msecs_str); | 416 | ret = trace_seq_printf(s, "%s", msecs_str); |
342 | if (!ret) | 417 | if (!ret) |
343 | return TRACE_TYPE_PARTIAL_LINE; | 418 | return TRACE_TYPE_PARTIAL_LINE; |
344 | 419 | ||
345 | len = strlen(msecs_str); | 420 | len = strlen(msecs_str); |
346 | 421 | ||
347 | /* Print nsecs (we don't want to exceed 7 numbers) */ | 422 | /* Print nsecs (we don't want to exceed 7 numbers) */ |
348 | if (len < 7) { | 423 | if (len < 7) { |
349 | snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem); | 424 | snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem); |
350 | ret = trace_seq_printf(s, ".%s", nsecs_str); | 425 | ret = trace_seq_printf(s, ".%s", nsecs_str); |
351 | if (!ret) | 426 | if (!ret) |
352 | return TRACE_TYPE_PARTIAL_LINE; | 427 | return TRACE_TYPE_PARTIAL_LINE; |
353 | len += strlen(nsecs_str); | 428 | len += strlen(nsecs_str); |
354 | } | 429 | } |
355 | 430 | ||
356 | ret = trace_seq_printf(s, " us "); | 431 | ret = trace_seq_printf(s, " us "); |
357 | if (!ret) | 432 | if (!ret) |
358 | return TRACE_TYPE_PARTIAL_LINE; | 433 | return TRACE_TYPE_PARTIAL_LINE; |
359 | 434 | ||
360 | /* Print remaining spaces to fit the row's width */ | 435 | /* Print remaining spaces to fit the row's width */ |
361 | for (i = len; i < 7; i++) { | 436 | for (i = len; i < 7; i++) { |
362 | ret = trace_seq_printf(s, " "); | 437 | ret = trace_seq_printf(s, " "); |
363 | if (!ret) | 438 | if (!ret) |
364 | return TRACE_TYPE_PARTIAL_LINE; | 439 | return TRACE_TYPE_PARTIAL_LINE; |
365 | } | 440 | } |
366 | 441 | ||
367 | ret = trace_seq_printf(s, "| "); | 442 | ret = trace_seq_printf(s, "| "); |
368 | if (!ret) | 443 | if (!ret) |
369 | return TRACE_TYPE_PARTIAL_LINE; | 444 | return TRACE_TYPE_PARTIAL_LINE; |
370 | return TRACE_TYPE_HANDLED; | 445 | return TRACE_TYPE_HANDLED; |
371 | 446 | ||
372 | } | 447 | } |
373 | 448 | ||
374 | static int print_graph_abs_time(u64 t, struct trace_seq *s) | 449 | static int print_graph_abs_time(u64 t, struct trace_seq *s) |
375 | { | 450 | { |
376 | unsigned long usecs_rem; | 451 | unsigned long usecs_rem; |
377 | 452 | ||
378 | usecs_rem = do_div(t, 1000000000); | 453 | usecs_rem = do_div(t, 1000000000); |
379 | usecs_rem /= 1000; | 454 | usecs_rem /= 1000; |
380 | 455 | ||
381 | return trace_seq_printf(s, "%5lu.%06lu | ", | 456 | return trace_seq_printf(s, "%5lu.%06lu | ", |
382 | (unsigned long)t, usecs_rem); | 457 | (unsigned long)t, usecs_rem); |
383 | } | 458 | } |
384 | 459 | ||
385 | /* Case of a leaf function on its call entry */ | 460 | /* Case of a leaf function on its call entry */ |
386 | static enum print_line_t | 461 | static enum print_line_t |
387 | print_graph_entry_leaf(struct trace_iterator *iter, | 462 | print_graph_entry_leaf(struct trace_iterator *iter, |
388 | struct ftrace_graph_ent_entry *entry, | 463 | struct ftrace_graph_ent_entry *entry, |
389 | struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s) | 464 | struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s) |
390 | { | 465 | { |
391 | struct ftrace_graph_ret *graph_ret; | 466 | struct ftrace_graph_ret *graph_ret; |
392 | struct ftrace_graph_ent *call; | 467 | struct ftrace_graph_ent *call; |
393 | unsigned long long duration; | 468 | unsigned long long duration; |
394 | int ret; | 469 | int ret; |
395 | int i; | 470 | int i; |
396 | 471 | ||
397 | graph_ret = &ret_entry->ret; | 472 | graph_ret = &ret_entry->ret; |
398 | call = &entry->graph_ent; | 473 | call = &entry->graph_ent; |
399 | duration = graph_ret->rettime - graph_ret->calltime; | 474 | duration = graph_ret->rettime - graph_ret->calltime; |
400 | 475 | ||
401 | /* Overhead */ | 476 | /* Overhead */ |
402 | ret = print_graph_overhead(duration, s); | 477 | ret = print_graph_overhead(duration, s); |
403 | if (!ret) | 478 | if (!ret) |
404 | return TRACE_TYPE_PARTIAL_LINE; | 479 | return TRACE_TYPE_PARTIAL_LINE; |
405 | 480 | ||
406 | /* Duration */ | 481 | /* Duration */ |
407 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { | 482 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { |
408 | ret = print_graph_duration(duration, s); | 483 | ret = print_graph_duration(duration, s); |
409 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 484 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
410 | return TRACE_TYPE_PARTIAL_LINE; | 485 | return TRACE_TYPE_PARTIAL_LINE; |
411 | } | 486 | } |
412 | 487 | ||
413 | /* Function */ | 488 | /* Function */ |
414 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | 489 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { |
415 | ret = trace_seq_printf(s, " "); | 490 | ret = trace_seq_printf(s, " "); |
416 | if (!ret) | 491 | if (!ret) |
417 | return TRACE_TYPE_PARTIAL_LINE; | 492 | return TRACE_TYPE_PARTIAL_LINE; |
418 | } | 493 | } |
419 | 494 | ||
420 | ret = seq_print_ip_sym(s, call->func, 0); | 495 | ret = seq_print_ip_sym(s, call->func, 0); |
421 | if (!ret) | 496 | if (!ret) |
422 | return TRACE_TYPE_PARTIAL_LINE; | 497 | return TRACE_TYPE_PARTIAL_LINE; |
423 | 498 | ||
424 | ret = trace_seq_printf(s, "();\n"); | 499 | ret = trace_seq_printf(s, "();\n"); |
425 | if (!ret) | 500 | if (!ret) |
426 | return TRACE_TYPE_PARTIAL_LINE; | 501 | return TRACE_TYPE_PARTIAL_LINE; |
427 | 502 | ||
428 | return TRACE_TYPE_HANDLED; | 503 | return TRACE_TYPE_HANDLED; |
429 | } | 504 | } |
430 | 505 | ||
431 | static enum print_line_t | 506 | static enum print_line_t |
432 | print_graph_entry_nested(struct ftrace_graph_ent_entry *entry, | 507 | print_graph_entry_nested(struct ftrace_graph_ent_entry *entry, |
433 | struct trace_seq *s, pid_t pid, int cpu) | 508 | struct trace_seq *s, pid_t pid, int cpu) |
434 | { | 509 | { |
435 | int i; | 510 | int i; |
436 | int ret; | 511 | int ret; |
437 | struct ftrace_graph_ent *call = &entry->graph_ent; | 512 | struct ftrace_graph_ent *call = &entry->graph_ent; |
438 | 513 | ||
439 | /* No overhead */ | 514 | /* No overhead */ |
440 | ret = print_graph_overhead(-1, s); | 515 | ret = print_graph_overhead(-1, s); |
441 | if (!ret) | 516 | if (!ret) |
442 | return TRACE_TYPE_PARTIAL_LINE; | 517 | return TRACE_TYPE_PARTIAL_LINE; |
443 | 518 | ||
444 | /* No time */ | 519 | /* No time */ |
445 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { | 520 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { |
446 | ret = trace_seq_printf(s, " | "); | 521 | ret = trace_seq_printf(s, " | "); |
447 | if (!ret) | 522 | if (!ret) |
448 | return TRACE_TYPE_PARTIAL_LINE; | 523 | return TRACE_TYPE_PARTIAL_LINE; |
449 | } | 524 | } |
450 | 525 | ||
451 | /* Function */ | 526 | /* Function */ |
452 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | 527 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { |
453 | ret = trace_seq_printf(s, " "); | 528 | ret = trace_seq_printf(s, " "); |
454 | if (!ret) | 529 | if (!ret) |
455 | return TRACE_TYPE_PARTIAL_LINE; | 530 | return TRACE_TYPE_PARTIAL_LINE; |
456 | } | 531 | } |
457 | 532 | ||
458 | ret = seq_print_ip_sym(s, call->func, 0); | 533 | ret = seq_print_ip_sym(s, call->func, 0); |
459 | if (!ret) | 534 | if (!ret) |
460 | return TRACE_TYPE_PARTIAL_LINE; | 535 | return TRACE_TYPE_PARTIAL_LINE; |
461 | 536 | ||
462 | ret = trace_seq_printf(s, "() {\n"); | 537 | ret = trace_seq_printf(s, "() {\n"); |
463 | if (!ret) | 538 | if (!ret) |
464 | return TRACE_TYPE_PARTIAL_LINE; | 539 | return TRACE_TYPE_PARTIAL_LINE; |
465 | 540 | ||
466 | /* | 541 | /* |
467 | * we already consumed the current entry to check the next one | 542 | * we already consumed the current entry to check the next one |
468 | * and see if this is a leaf. | 543 | * and see if this is a leaf. |
469 | */ | 544 | */ |
470 | return TRACE_TYPE_NO_CONSUME; | 545 | return TRACE_TYPE_NO_CONSUME; |
471 | } | 546 | } |
472 | 547 | ||
473 | static enum print_line_t | 548 | static enum print_line_t |
474 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | 549 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, |
475 | struct trace_iterator *iter) | 550 | struct trace_iterator *iter) |
476 | { | 551 | { |
477 | int ret; | 552 | int ret; |
478 | int cpu = iter->cpu; | 553 | int cpu = iter->cpu; |
479 | pid_t *last_entry = iter->private; | 554 | pid_t *last_entry = iter->private; |
480 | struct trace_entry *ent = iter->ent; | 555 | struct trace_entry *ent = iter->ent; |
481 | struct ftrace_graph_ent *call = &field->graph_ent; | 556 | struct ftrace_graph_ent *call = &field->graph_ent; |
482 | struct ftrace_graph_ret_entry *leaf_ret; | 557 | struct ftrace_graph_ret_entry *leaf_ret; |
483 | 558 | ||
484 | /* Pid */ | 559 | /* Pid */ |
485 | if (verif_pid(s, ent->pid, cpu, last_entry) == TRACE_TYPE_PARTIAL_LINE) | 560 | if (verif_pid(s, ent->pid, cpu, last_entry) == TRACE_TYPE_PARTIAL_LINE) |
486 | return TRACE_TYPE_PARTIAL_LINE; | 561 | return TRACE_TYPE_PARTIAL_LINE; |
487 | 562 | ||
488 | /* Interrupt */ | 563 | /* Interrupt */ |
489 | ret = print_graph_irq(s, call->func, TRACE_GRAPH_ENT, cpu, ent->pid); | 564 | ret = print_graph_irq(s, call->func, TRACE_GRAPH_ENT, cpu, ent->pid); |
490 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 565 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
491 | return TRACE_TYPE_PARTIAL_LINE; | 566 | return TRACE_TYPE_PARTIAL_LINE; |
492 | 567 | ||
493 | /* Absolute time */ | 568 | /* Absolute time */ |
494 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { | 569 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { |
495 | ret = print_graph_abs_time(iter->ts, s); | 570 | ret = print_graph_abs_time(iter->ts, s); |
496 | if (!ret) | 571 | if (!ret) |
497 | return TRACE_TYPE_PARTIAL_LINE; | 572 | return TRACE_TYPE_PARTIAL_LINE; |
498 | } | 573 | } |
499 | 574 | ||
500 | /* Cpu */ | 575 | /* Cpu */ |
501 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | 576 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { |
502 | ret = print_graph_cpu(s, cpu); | 577 | ret = print_graph_cpu(s, cpu); |
503 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 578 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
504 | return TRACE_TYPE_PARTIAL_LINE; | 579 | return TRACE_TYPE_PARTIAL_LINE; |
505 | } | 580 | } |
506 | 581 | ||
507 | /* Proc */ | 582 | /* Proc */ |
508 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | 583 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { |
509 | ret = print_graph_proc(s, ent->pid); | 584 | ret = print_graph_proc(s, ent->pid); |
510 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 585 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
511 | return TRACE_TYPE_PARTIAL_LINE; | 586 | return TRACE_TYPE_PARTIAL_LINE; |
512 | 587 | ||
513 | ret = trace_seq_printf(s, " | "); | 588 | ret = trace_seq_printf(s, " | "); |
514 | if (!ret) | 589 | if (!ret) |
515 | return TRACE_TYPE_PARTIAL_LINE; | 590 | return TRACE_TYPE_PARTIAL_LINE; |
516 | } | 591 | } |
517 | 592 | ||
518 | leaf_ret = get_return_for_leaf(iter, field); | 593 | leaf_ret = get_return_for_leaf(iter, field); |
519 | if (leaf_ret) | 594 | if (leaf_ret) |
520 | return print_graph_entry_leaf(iter, field, leaf_ret, s); | 595 | return print_graph_entry_leaf(iter, field, leaf_ret, s); |
521 | else | 596 | else |
522 | return print_graph_entry_nested(field, s, iter->ent->pid, cpu); | 597 | return print_graph_entry_nested(field, s, iter->ent->pid, cpu); |
523 | 598 | ||
524 | } | 599 | } |
525 | 600 | ||
526 | static enum print_line_t | 601 | static enum print_line_t |
527 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | 602 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, |
528 | struct trace_entry *ent, struct trace_iterator *iter) | 603 | struct trace_entry *ent, struct trace_iterator *iter) |
529 | { | 604 | { |
530 | int i; | 605 | int i; |
531 | int ret; | 606 | int ret; |
532 | int cpu = iter->cpu; | 607 | int cpu = iter->cpu; |
533 | pid_t *last_pid = iter->private; | 608 | pid_t *last_pid = iter->private; |
534 | unsigned long long duration = trace->rettime - trace->calltime; | 609 | unsigned long long duration = trace->rettime - trace->calltime; |
535 | 610 | ||
536 | /* Pid */ | 611 | /* Pid */ |
537 | if (verif_pid(s, ent->pid, cpu, last_pid) == TRACE_TYPE_PARTIAL_LINE) | 612 | if (verif_pid(s, ent->pid, cpu, last_pid) == TRACE_TYPE_PARTIAL_LINE) |
538 | return TRACE_TYPE_PARTIAL_LINE; | 613 | return TRACE_TYPE_PARTIAL_LINE; |
539 | 614 | ||
540 | /* Absolute time */ | 615 | /* Absolute time */ |
541 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { | 616 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { |
542 | ret = print_graph_abs_time(iter->ts, s); | 617 | ret = print_graph_abs_time(iter->ts, s); |
543 | if (!ret) | 618 | if (!ret) |
544 | return TRACE_TYPE_PARTIAL_LINE; | 619 | return TRACE_TYPE_PARTIAL_LINE; |
545 | } | 620 | } |
546 | 621 | ||
547 | /* Cpu */ | 622 | /* Cpu */ |
548 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | 623 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { |
549 | ret = print_graph_cpu(s, cpu); | 624 | ret = print_graph_cpu(s, cpu); |
550 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 625 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
551 | return TRACE_TYPE_PARTIAL_LINE; | 626 | return TRACE_TYPE_PARTIAL_LINE; |
552 | } | 627 | } |
553 | 628 | ||
554 | /* Proc */ | 629 | /* Proc */ |
555 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | 630 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { |
556 | ret = print_graph_proc(s, ent->pid); | 631 | ret = print_graph_proc(s, ent->pid); |
557 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 632 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
558 | return TRACE_TYPE_PARTIAL_LINE; | 633 | return TRACE_TYPE_PARTIAL_LINE; |
559 | 634 | ||
560 | ret = trace_seq_printf(s, " | "); | 635 | ret = trace_seq_printf(s, " | "); |
561 | if (!ret) | 636 | if (!ret) |
562 | return TRACE_TYPE_PARTIAL_LINE; | 637 | return TRACE_TYPE_PARTIAL_LINE; |
563 | } | 638 | } |
564 | 639 | ||
565 | /* Overhead */ | 640 | /* Overhead */ |
566 | ret = print_graph_overhead(duration, s); | 641 | ret = print_graph_overhead(duration, s); |
567 | if (!ret) | 642 | if (!ret) |
568 | return TRACE_TYPE_PARTIAL_LINE; | 643 | return TRACE_TYPE_PARTIAL_LINE; |
569 | 644 | ||
570 | /* Duration */ | 645 | /* Duration */ |
571 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { | 646 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { |
572 | ret = print_graph_duration(duration, s); | 647 | ret = print_graph_duration(duration, s); |
573 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 648 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
574 | return TRACE_TYPE_PARTIAL_LINE; | 649 | return TRACE_TYPE_PARTIAL_LINE; |
575 | } | 650 | } |
576 | 651 | ||
577 | /* Closing brace */ | 652 | /* Closing brace */ |
578 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { | 653 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { |
579 | ret = trace_seq_printf(s, " "); | 654 | ret = trace_seq_printf(s, " "); |
580 | if (!ret) | 655 | if (!ret) |
581 | return TRACE_TYPE_PARTIAL_LINE; | 656 | return TRACE_TYPE_PARTIAL_LINE; |
582 | } | 657 | } |
583 | 658 | ||
584 | ret = trace_seq_printf(s, "}\n"); | 659 | ret = trace_seq_printf(s, "}\n"); |
585 | if (!ret) | 660 | if (!ret) |
586 | return TRACE_TYPE_PARTIAL_LINE; | 661 | return TRACE_TYPE_PARTIAL_LINE; |
587 | 662 | ||
588 | /* Overrun */ | 663 | /* Overrun */ |
589 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { | 664 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { |
590 | ret = trace_seq_printf(s, " (Overruns: %lu)\n", | 665 | ret = trace_seq_printf(s, " (Overruns: %lu)\n", |
591 | trace->overrun); | 666 | trace->overrun); |
592 | if (!ret) | 667 | if (!ret) |
593 | return TRACE_TYPE_PARTIAL_LINE; | 668 | return TRACE_TYPE_PARTIAL_LINE; |
594 | } | 669 | } |
595 | 670 | ||
596 | ret = print_graph_irq(s, trace->func, TRACE_GRAPH_RET, cpu, ent->pid); | 671 | ret = print_graph_irq(s, trace->func, TRACE_GRAPH_RET, cpu, ent->pid); |
597 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 672 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
598 | return TRACE_TYPE_PARTIAL_LINE; | 673 | return TRACE_TYPE_PARTIAL_LINE; |
599 | 674 | ||
600 | return TRACE_TYPE_HANDLED; | 675 | return TRACE_TYPE_HANDLED; |
601 | } | 676 | } |
602 | 677 | ||
603 | static enum print_line_t | 678 | static enum print_line_t |
604 | print_graph_comment(struct print_entry *trace, struct trace_seq *s, | 679 | print_graph_comment(struct print_entry *trace, struct trace_seq *s, |
605 | struct trace_entry *ent, struct trace_iterator *iter) | 680 | struct trace_entry *ent, struct trace_iterator *iter) |
606 | { | 681 | { |
607 | int i; | 682 | int i; |
608 | int ret; | 683 | int ret; |
609 | int cpu = iter->cpu; | 684 | int cpu = iter->cpu; |
610 | pid_t *last_pid = iter->private; | 685 | pid_t *last_pid = iter->private; |
611 | 686 | ||
612 | /* Absolute time */ | 687 | /* Absolute time */ |
613 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { | 688 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { |
614 | ret = print_graph_abs_time(iter->ts, s); | 689 | ret = print_graph_abs_time(iter->ts, s); |
615 | if (!ret) | 690 | if (!ret) |
616 | return TRACE_TYPE_PARTIAL_LINE; | 691 | return TRACE_TYPE_PARTIAL_LINE; |
617 | } | 692 | } |
618 | 693 | ||
619 | /* Pid */ | 694 | /* Pid */ |
620 | if (verif_pid(s, ent->pid, cpu, last_pid) == TRACE_TYPE_PARTIAL_LINE) | 695 | if (verif_pid(s, ent->pid, cpu, last_pid) == TRACE_TYPE_PARTIAL_LINE) |
621 | return TRACE_TYPE_PARTIAL_LINE; | 696 | return TRACE_TYPE_PARTIAL_LINE; |
622 | 697 | ||
623 | /* Cpu */ | 698 | /* Cpu */ |
624 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | 699 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { |
625 | ret = print_graph_cpu(s, cpu); | 700 | ret = print_graph_cpu(s, cpu); |
626 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 701 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
627 | return TRACE_TYPE_PARTIAL_LINE; | 702 | return TRACE_TYPE_PARTIAL_LINE; |
628 | } | 703 | } |
629 | 704 | ||
630 | /* Proc */ | 705 | /* Proc */ |
631 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | 706 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { |
632 | ret = print_graph_proc(s, ent->pid); | 707 | ret = print_graph_proc(s, ent->pid); |
633 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 708 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
634 | return TRACE_TYPE_PARTIAL_LINE; | 709 | return TRACE_TYPE_PARTIAL_LINE; |
635 | 710 | ||
636 | ret = trace_seq_printf(s, " | "); | 711 | ret = trace_seq_printf(s, " | "); |
637 | if (!ret) | 712 | if (!ret) |
638 | return TRACE_TYPE_PARTIAL_LINE; | 713 | return TRACE_TYPE_PARTIAL_LINE; |
639 | } | 714 | } |
640 | 715 | ||
641 | /* No overhead */ | 716 | /* No overhead */ |
642 | ret = print_graph_overhead(-1, s); | 717 | ret = print_graph_overhead(-1, s); |
643 | if (!ret) | 718 | if (!ret) |
644 | return TRACE_TYPE_PARTIAL_LINE; | 719 | return TRACE_TYPE_PARTIAL_LINE; |
645 | 720 | ||
646 | /* No time */ | 721 | /* No time */ |
647 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { | 722 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { |
648 | ret = trace_seq_printf(s, " | "); | 723 | ret = trace_seq_printf(s, " | "); |
649 | if (!ret) | 724 | if (!ret) |
650 | return TRACE_TYPE_PARTIAL_LINE; | 725 | return TRACE_TYPE_PARTIAL_LINE; |
651 | } | 726 | } |
652 | 727 | ||
653 | /* Indentation */ | 728 | /* Indentation */ |
654 | if (trace->depth > 0) | 729 | if (trace->depth > 0) |
655 | for (i = 0; i < (trace->depth + 1) * TRACE_GRAPH_INDENT; i++) { | 730 | for (i = 0; i < (trace->depth + 1) * TRACE_GRAPH_INDENT; i++) { |
656 | ret = trace_seq_printf(s, " "); | 731 | ret = trace_seq_printf(s, " "); |
657 | if (!ret) | 732 | if (!ret) |
658 | return TRACE_TYPE_PARTIAL_LINE; | 733 | return TRACE_TYPE_PARTIAL_LINE; |
659 | } | 734 | } |
660 | 735 | ||
661 | /* The comment */ | 736 | /* The comment */ |
662 | ret = trace_seq_printf(s, "/* %s", trace->buf); | 737 | ret = trace_seq_printf(s, "/* %s", trace->buf); |
663 | if (!ret) | 738 | if (!ret) |
664 | return TRACE_TYPE_PARTIAL_LINE; | 739 | return TRACE_TYPE_PARTIAL_LINE; |
665 | 740 | ||
666 | /* Strip ending newline */ | 741 | /* Strip ending newline */ |
667 | if (s->buffer[s->len - 1] == '\n') { | 742 | if (s->buffer[s->len - 1] == '\n') { |
668 | s->buffer[s->len - 1] = '\0'; | 743 | s->buffer[s->len - 1] = '\0'; |
669 | s->len--; | 744 | s->len--; |
670 | } | 745 | } |
671 | 746 | ||
672 | ret = trace_seq_printf(s, " */\n"); | 747 | ret = trace_seq_printf(s, " */\n"); |
673 | if (!ret) | 748 | if (!ret) |
674 | return TRACE_TYPE_PARTIAL_LINE; | 749 | return TRACE_TYPE_PARTIAL_LINE; |
675 | 750 | ||
676 | return TRACE_TYPE_HANDLED; | 751 | return TRACE_TYPE_HANDLED; |
677 | } | 752 | } |
678 | 753 | ||
679 | 754 | ||
680 | enum print_line_t | 755 | enum print_line_t |
681 | print_graph_function(struct trace_iterator *iter) | 756 | print_graph_function(struct trace_iterator *iter) |
682 | { | 757 | { |
683 | struct trace_seq *s = &iter->seq; | 758 | struct trace_seq *s = &iter->seq; |
684 | struct trace_entry *entry = iter->ent; | 759 | struct trace_entry *entry = iter->ent; |
685 | 760 | ||
686 | switch (entry->type) { | 761 | switch (entry->type) { |
687 | case TRACE_GRAPH_ENT: { | 762 | case TRACE_GRAPH_ENT: { |
688 | struct ftrace_graph_ent_entry *field; | 763 | struct ftrace_graph_ent_entry *field; |
689 | trace_assign_type(field, entry); | 764 | trace_assign_type(field, entry); |
690 | return print_graph_entry(field, s, iter); | 765 | return print_graph_entry(field, s, iter); |
691 | } | 766 | } |
692 | case TRACE_GRAPH_RET: { | 767 | case TRACE_GRAPH_RET: { |
693 | struct ftrace_graph_ret_entry *field; | 768 | struct ftrace_graph_ret_entry *field; |
694 | trace_assign_type(field, entry); | 769 | trace_assign_type(field, entry); |
695 | return print_graph_return(&field->ret, s, entry, iter); | 770 | return print_graph_return(&field->ret, s, entry, iter); |
696 | } | 771 | } |
697 | case TRACE_PRINT: { | 772 | case TRACE_PRINT: { |
698 | struct print_entry *field; | 773 | struct print_entry *field; |
699 | trace_assign_type(field, entry); | 774 | trace_assign_type(field, entry); |
700 | return print_graph_comment(field, s, entry, iter); | 775 | return print_graph_comment(field, s, entry, iter); |
701 | } | 776 | } |
702 | default: | 777 | default: |
703 | return TRACE_TYPE_UNHANDLED; | 778 | return TRACE_TYPE_UNHANDLED; |
704 | } | 779 | } |
705 | } | 780 | } |
706 | 781 | ||
707 | static void print_graph_headers(struct seq_file *s) | 782 | static void print_graph_headers(struct seq_file *s) |
708 | { | 783 | { |
709 | /* 1st line */ | 784 | /* 1st line */ |
710 | seq_printf(s, "# "); | 785 | seq_printf(s, "# "); |
711 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) | 786 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) |
712 | seq_printf(s, " TIME "); | 787 | seq_printf(s, " TIME "); |
713 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) | 788 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) |
714 | seq_printf(s, "CPU"); | 789 | seq_printf(s, "CPU"); |
715 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) | 790 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) |
716 | seq_printf(s, " TASK/PID "); | 791 | seq_printf(s, " TASK/PID "); |
717 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) | 792 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) |
718 | seq_printf(s, " DURATION "); | 793 | seq_printf(s, " DURATION "); |
719 | seq_printf(s, " FUNCTION CALLS\n"); | 794 | seq_printf(s, " FUNCTION CALLS\n"); |
720 | 795 | ||
721 | /* 2nd line */ | 796 | /* 2nd line */ |
722 | seq_printf(s, "# "); | 797 | seq_printf(s, "# "); |
723 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) | 798 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) |
724 | seq_printf(s, " | "); | 799 | seq_printf(s, " | "); |
725 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) | 800 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) |
726 | seq_printf(s, "| "); | 801 | seq_printf(s, "| "); |
727 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) | 802 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) |
728 | seq_printf(s, " | | "); | 803 | seq_printf(s, " | | "); |
729 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) | 804 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) |
730 | seq_printf(s, " | | "); | 805 | seq_printf(s, " | | "); |
731 | seq_printf(s, " | | | |\n"); | 806 | seq_printf(s, " | | | |\n"); |
732 | } | 807 | } |
733 | 808 | ||
734 | static void graph_trace_open(struct trace_iterator *iter) | 809 | static void graph_trace_open(struct trace_iterator *iter) |
735 | { | 810 | { |
736 | /* pid on the last trace processed */ | 811 | /* pid on the last trace processed */ |
737 | pid_t *last_pid = alloc_percpu(pid_t); | 812 | pid_t *last_pid = alloc_percpu(pid_t); |
738 | int cpu; | 813 | int cpu; |
739 | 814 | ||
740 | if (!last_pid) | 815 | if (!last_pid) |
741 | pr_warning("function graph tracer: not enough memory\n"); | 816 | pr_warning("function graph tracer: not enough memory\n"); |
742 | else | 817 | else |
743 | for_each_possible_cpu(cpu) { | 818 | for_each_possible_cpu(cpu) { |
744 | pid_t *pid = per_cpu_ptr(last_pid, cpu); | 819 | pid_t *pid = per_cpu_ptr(last_pid, cpu); |
745 | *pid = -1; | 820 | *pid = -1; |
746 | } | 821 | } |
747 | 822 | ||
748 | iter->private = last_pid; | 823 | iter->private = last_pid; |
749 | } | 824 | } |
750 | 825 | ||
751 | static void graph_trace_close(struct trace_iterator *iter) | 826 | static void graph_trace_close(struct trace_iterator *iter) |
752 | { | 827 | { |
753 | percpu_free(iter->private); | 828 | percpu_free(iter->private); |
754 | } | 829 | } |
755 | 830 | ||
756 | static struct tracer graph_trace __read_mostly = { | 831 | static struct tracer graph_trace __read_mostly = { |
757 | .name = "function_graph", | 832 | .name = "function_graph", |
758 | .open = graph_trace_open, | 833 | .open = graph_trace_open, |
759 | .close = graph_trace_close, | 834 | .close = graph_trace_close, |
760 | .wait_pipe = poll_wait_pipe, | 835 | .wait_pipe = poll_wait_pipe, |
761 | .init = graph_trace_init, | 836 | .init = graph_trace_init, |
762 | .reset = graph_trace_reset, | 837 | .reset = graph_trace_reset, |
763 | .print_line = print_graph_function, | 838 | .print_line = print_graph_function, |
764 | .print_header = print_graph_headers, | 839 | .print_header = print_graph_headers, |
765 | .flags = &tracer_flags, | 840 | .flags = &tracer_flags, |
766 | #ifdef CONFIG_FTRACE_SELFTEST | 841 | #ifdef CONFIG_FTRACE_SELFTEST |
767 | .selftest = trace_selftest_startup_function_graph, | 842 | .selftest = trace_selftest_startup_function_graph, |
768 | #endif | 843 | #endif |
769 | }; | 844 | }; |
770 | 845 | ||
771 | static __init int init_graph_trace(void) | 846 | static __init int init_graph_trace(void) |
772 | { | 847 | { |
773 | return register_tracer(&graph_trace); | 848 | return register_tracer(&graph_trace); |
774 | } | 849 | } |
775 | 850 | ||
776 | device_initcall(init_graph_trace); | 851 | device_initcall(init_graph_trace); |
777 | 852 |