Commit f5074429621ceb0ec42f8116bd51d02c031faf82
1 parent
67df6cc665
Exists in
master
and in
4 other branches
Blackfin: add support for dynamic ftrace
Signed-off-by: Mike Frysinger <vapier@gentoo.org>
Showing 5 changed files with 181 additions and 9 deletions Side-by-side Diff
arch/blackfin/Kconfig
arch/blackfin/include/asm/ftrace.h
... | ... | @@ -12,6 +12,22 @@ |
12 | 12 | |
13 | 13 | #ifndef __ASSEMBLY__ |
14 | 14 | |
15 | +#ifdef CONFIG_DYNAMIC_FTRACE | |
16 | + | |
17 | +extern void _mcount(void); | |
18 | +#define MCOUNT_ADDR ((unsigned long)_mcount) | |
19 | + | |
20 | +static inline unsigned long ftrace_call_adjust(unsigned long addr) | |
21 | +{ | |
22 | + return addr; | |
23 | +} | |
24 | + | |
25 | +struct dyn_arch_ftrace { | |
26 | + /* No extra data needed for Blackfin */ | |
27 | +}; | |
28 | + | |
29 | +#endif | |
30 | + | |
15 | 31 | #ifdef CONFIG_FRAME_POINTER |
16 | 32 | #include <linux/mm.h> |
17 | 33 |
arch/blackfin/kernel/Makefile
arch/blackfin/kernel/ftrace-entry.S
... | ... | @@ -10,6 +10,18 @@ |
10 | 10 | |
11 | 11 | .text |
12 | 12 | |
13 | +#ifdef CONFIG_DYNAMIC_FTRACE | |
14 | + | |
15 | +/* Simple stub so we can boot the kernel until runtime patching has | |
16 | + * disabled all calls to this. Then it'll be unused. | |
17 | + */ | |
18 | +ENTRY(__mcount) | |
19 | +# if ANOMALY_05000371 | |
20 | + nop; nop; nop; nop; | |
21 | +# endif | |
22 | + rts; | |
23 | +ENDPROC(__mcount) | |
24 | + | |
13 | 25 | /* GCC will have called us before setting up the function prologue, so we |
14 | 26 | * can clobber the normal scratch registers, but we need to make sure to |
15 | 27 | * save/restore the registers used for argument passing (R0-R2) in case |
16 | 28 | |
17 | 29 | |
... | ... | @@ -20,15 +32,65 @@ |
20 | 32 | * function. And since GCC pushed the previous RETS for us, the previous |
21 | 33 | * function will be waiting there. mmmm pie. |
22 | 34 | */ |
35 | +ENTRY(_ftrace_caller) | |
36 | +# ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | |
37 | + /* optional micro optimization: return if stopped */ | |
38 | + p1.l = _function_trace_stop; | |
39 | + p1.h = _function_trace_stop; | |
40 | + r3 = [p1]; | |
41 | + cc = r3 == 0; | |
42 | + if ! cc jump _ftrace_stub (bp); | |
43 | +# endif | |
44 | + | |
45 | + /* save first/second/third function arg and the return register */ | |
46 | + [--sp] = r2; | |
47 | + [--sp] = r0; | |
48 | + [--sp] = r1; | |
49 | + [--sp] = rets; | |
50 | + | |
51 | + /* function_trace_call(unsigned long ip, unsigned long parent_ip): | |
52 | + * ip: this point was called by ... | |
53 | + * parent_ip: ... this function | |
54 | + * the ip itself will need adjusting for the mcount call | |
55 | + */ | |
56 | + r0 = rets; | |
57 | + r1 = [sp + 16]; /* skip the 4 local regs on stack */ | |
58 | + r0 += -MCOUNT_INSN_SIZE; | |
59 | + | |
60 | +.globl _ftrace_call | |
61 | +_ftrace_call: | |
62 | + call _ftrace_stub | |
63 | + | |
64 | +# ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
65 | +.globl _ftrace_graph_call | |
66 | +_ftrace_graph_call: | |
67 | + nop; /* jump _ftrace_graph_caller; */ | |
68 | +# endif | |
69 | + | |
70 | + /* restore state and get out of dodge */ | |
71 | +.Lfinish_trace: | |
72 | + rets = [sp++]; | |
73 | + r1 = [sp++]; | |
74 | + r0 = [sp++]; | |
75 | + r2 = [sp++]; | |
76 | + | |
77 | +.globl _ftrace_stub | |
78 | +_ftrace_stub: | |
79 | + rts; | |
80 | +ENDPROC(_ftrace_caller) | |
81 | + | |
82 | +#else | |
83 | + | |
84 | +/* See documentation for _ftrace_caller */ | |
23 | 85 | ENTRY(__mcount) |
24 | -#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | |
86 | +# ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | |
25 | 87 | /* optional micro optimization: return if stopped */ |
26 | 88 | p1.l = _function_trace_stop; |
27 | 89 | p1.h = _function_trace_stop; |
28 | 90 | r3 = [p1]; |
29 | 91 | cc = r3 == 0; |
30 | 92 | if ! cc jump _ftrace_stub (bp); |
31 | -#endif | |
93 | +# endif | |
32 | 94 | |
33 | 95 | /* save third function arg early so we can do testing below */ |
34 | 96 | [--sp] = r2; |
... | ... | @@ -44,7 +106,7 @@ |
44 | 106 | cc = r2 == r3; |
45 | 107 | if ! cc jump .Ldo_trace; |
46 | 108 | |
47 | -#ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
109 | +# ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
48 | 110 | /* if the ftrace_graph_return function pointer is not set to |
49 | 111 | * the ftrace_stub entry, call prepare_ftrace_return(). |
50 | 112 | */ |
... | ... | @@ -64,7 +126,7 @@ |
64 | 126 | r3 = [p0]; |
65 | 127 | cc = r2 == r3; |
66 | 128 | if ! cc jump _ftrace_graph_caller; |
67 | -#endif | |
129 | +# endif | |
68 | 130 | |
69 | 131 | r2 = [sp++]; |
70 | 132 | rts; |
... | ... | @@ -103,6 +165,8 @@ |
103 | 165 | rts; |
104 | 166 | ENDPROC(__mcount) |
105 | 167 | |
168 | +#endif | |
169 | + | |
106 | 170 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
107 | 171 | /* The prepare_ftrace_return() function is similar to the trace function |
108 | 172 | * except it takes a pointer to the location of the frompc. This is so |
... | ... | @@ -110,6 +174,7 @@ |
110 | 174 | * purposes. |
111 | 175 | */ |
112 | 176 | ENTRY(_ftrace_graph_caller) |
177 | +# ifndef CONFIG_DYNAMIC_FTRACE | |
113 | 178 | /* save first/second function arg and the return register */ |
114 | 179 | [--sp] = r0; |
115 | 180 | [--sp] = r1; |
116 | 181 | |
... | ... | @@ -118,9 +183,13 @@ |
118 | 183 | /* prepare_ftrace_return(parent, self_addr, frame_pointer) */ |
119 | 184 | r0 = sp; /* unsigned long *parent */ |
120 | 185 | r1 = rets; /* unsigned long self_addr */ |
121 | -#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST | |
186 | +# else | |
187 | + r0 = sp; /* unsigned long *parent */ | |
188 | + r1 = [sp]; /* unsigned long self_addr */ | |
189 | +# endif | |
190 | +# ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST | |
122 | 191 | r2 = fp; /* unsigned long frame_pointer */ |
123 | -#endif | |
192 | +# endif | |
124 | 193 | r0 += 16; /* skip the 4 local regs on stack */ |
125 | 194 | r1 += -MCOUNT_INSN_SIZE; |
126 | 195 | call _prepare_ftrace_return; |
127 | 196 | |
... | ... | @@ -139,9 +208,9 @@ |
139 | 208 | [--sp] = r1; |
140 | 209 | |
141 | 210 | /* get original return address */ |
142 | -#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST | |
211 | +# ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST | |
143 | 212 | r0 = fp; /* Blackfin is sane, so omit this */ |
144 | -#endif | |
213 | +# endif | |
145 | 214 | call _ftrace_return_to_handler; |
146 | 215 | rets = r0; |
147 | 216 |
arch/blackfin/kernel/ftrace.c
1 | 1 | /* |
2 | 2 | * ftrace graph code |
3 | 3 | * |
4 | - * Copyright (C) 2009 Analog Devices Inc. | |
4 | + * Copyright (C) 2009-2010 Analog Devices Inc. | |
5 | 5 | * Licensed under the GPL-2 or later. |
6 | 6 | */ |
7 | 7 | |
8 | 8 | #include <linux/ftrace.h> |
9 | 9 | #include <linux/kernel.h> |
10 | 10 | #include <linux/sched.h> |
11 | +#include <linux/uaccess.h> | |
11 | 12 | #include <asm/atomic.h> |
13 | +#include <asm/cacheflush.h> | |
12 | 14 | |
15 | +#ifdef CONFIG_DYNAMIC_FTRACE | |
16 | + | |
17 | +static const unsigned char mnop[] = { | |
18 | + 0x03, 0xc0, 0x00, 0x18, /* MNOP; */ | |
19 | + 0x03, 0xc0, 0x00, 0x18, /* MNOP; */ | |
20 | +}; | |
21 | + | |
22 | +static void bfin_make_pcrel24(unsigned char *insn, unsigned long src, | |
23 | + unsigned long dst) | |
24 | +{ | |
25 | + uint32_t pcrel = (dst - src) >> 1; | |
26 | + insn[0] = pcrel >> 16; | |
27 | + insn[1] = 0xe3; | |
28 | + insn[2] = pcrel; | |
29 | + insn[3] = pcrel >> 8; | |
30 | +} | |
31 | +#define bfin_make_pcrel24(insn, src, dst) bfin_make_pcrel24(insn, src, (unsigned long)(dst)) | |
32 | + | |
33 | +static int ftrace_modify_code(unsigned long ip, const unsigned char *code, | |
34 | + unsigned long len) | |
35 | +{ | |
36 | + int ret = probe_kernel_write((void *)ip, (void *)code, len); | |
37 | + flush_icache_range(ip, ip + len); | |
38 | + return ret; | |
39 | +} | |
40 | + | |
41 | +int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, | |
42 | + unsigned long addr) | |
43 | +{ | |
44 | + /* Turn the mcount call site into two MNOPs as those are 32bit insns */ | |
45 | + return ftrace_modify_code(rec->ip, mnop, sizeof(mnop)); | |
46 | +} | |
47 | + | |
48 | +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |
49 | +{ | |
50 | + /* Restore the mcount call site */ | |
51 | + unsigned char call[8]; | |
52 | + call[0] = 0x67; /* [--SP] = RETS; */ | |
53 | + call[1] = 0x01; | |
54 | + bfin_make_pcrel24(&call[2], rec->ip + 2, addr); | |
55 | + call[6] = 0x27; /* RETS = [SP++]; */ | |
56 | + call[7] = 0x01; | |
57 | + return ftrace_modify_code(rec->ip, call, sizeof(call)); | |
58 | +} | |
59 | + | |
60 | +int ftrace_update_ftrace_func(ftrace_func_t func) | |
61 | +{ | |
62 | + unsigned char call[4]; | |
63 | + unsigned long ip = (unsigned long)&ftrace_call; | |
64 | + bfin_make_pcrel24(call, ip, func); | |
65 | + return ftrace_modify_code(ip, call, sizeof(call)); | |
66 | +} | |
67 | + | |
68 | +int __init ftrace_dyn_arch_init(void *data) | |
69 | +{ | |
70 | + /* return value is done indirectly via data */ | |
71 | + *(unsigned long *)data = 0; | |
72 | + | |
73 | + return 0; | |
74 | +} | |
75 | + | |
76 | +#endif | |
77 | + | |
13 | 78 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
79 | + | |
80 | +# ifdef CONFIG_DYNAMIC_FTRACE | |
81 | + | |
82 | +extern void ftrace_graph_call(void); | |
83 | + | |
84 | +int ftrace_enable_ftrace_graph_caller(void) | |
85 | +{ | |
86 | + unsigned long ip = (unsigned long)&ftrace_graph_call; | |
87 | + uint16_t jump_pcrel12 = ((unsigned long)&ftrace_graph_caller - ip) >> 1; | |
88 | + jump_pcrel12 |= 0x2000; | |
89 | + return ftrace_modify_code(ip, (void *)&jump_pcrel12, sizeof(jump_pcrel12)); | |
90 | +} | |
91 | + | |
92 | +int ftrace_disable_ftrace_graph_caller(void) | |
93 | +{ | |
94 | + return ftrace_modify_code((unsigned long)&ftrace_graph_call, empty_zero_page, 2); | |
95 | +} | |
96 | + | |
97 | +# endif | |
14 | 98 | |
15 | 99 | /* |
16 | 100 | * Hook the return address and push it in the stack of return addrs |