Commit 813b8520f5c240c71df55d14095a7b171de264ce

Authored by Ingo Molnar

Merge branch 'ppc/ftrace' of git://git.kernel.org/pub/scm/linux/kernel/git/roste…

…dt/linux-2.6-trace into tracing/powerpc

Showing 6 changed files Side-by-side Diff

arch/powerpc/include/asm/ftrace.h
... ... @@ -7,7 +7,19 @@
7 7  
8 8 #ifndef __ASSEMBLY__
9 9 extern void _mcount(void);
10   -#endif
  10 +
  11 +#ifdef CONFIG_DYNAMIC_FTRACE
  12 +static inline unsigned long ftrace_call_adjust(unsigned long addr)
  13 +{
  14 + /* reloction of mcount call site is the same as the address */
  15 + return addr;
  16 +}
  17 +
  18 +struct dyn_arch_ftrace {
  19 + struct module *mod;
  20 +};
  21 +#endif /* CONFIG_DYNAMIC_FTRACE */
  22 +#endif /* __ASSEMBLY__ */
11 23  
12 24 #endif
13 25  
arch/powerpc/include/asm/module.h
... ... @@ -34,11 +34,19 @@
34 34 #ifdef __powerpc64__
35 35 unsigned int stubs_section; /* Index of stubs section in module */
36 36 unsigned int toc_section; /* What section is the TOC? */
37   -#else
  37 +#ifdef CONFIG_DYNAMIC_FTRACE
  38 + unsigned long toc;
  39 + unsigned long tramp;
  40 +#endif
  41 +
  42 +#else /* powerpc64 */
38 43 /* Indices of PLT sections within module. */
39 44 unsigned int core_plt_section;
40 45 unsigned int init_plt_section;
  46 +#ifdef CONFIG_DYNAMIC_FTRACE
  47 + unsigned long tramp;
41 48 #endif
  49 +#endif /* powerpc64 */
42 50  
43 51 /* List of BUG addresses, source line numbers and filenames */
44 52 struct list_head bug_list;
... ... @@ -65,6 +73,12 @@
65 73 # ifdef MODULE
66 74 asm(".section .plt,\"ax\",@nobits; .align 3; .previous");
67 75 asm(".section .init.plt,\"ax\",@nobits; .align 3; .previous");
  76 +# endif /* MODULE */
  77 +#endif
  78 +
  79 +#ifdef CONFIG_DYNAMIC_FTRACE
  80 +# ifdef MODULE
  81 + asm(".section .ftrace.tramp,\"ax\",@nobits; .align 3; .previous");
68 82 # endif /* MODULE */
69 83 #endif
70 84  
arch/powerpc/kernel/ftrace.c
... ... @@ -9,22 +9,30 @@
9 9  
10 10 #include <linux/spinlock.h>
11 11 #include <linux/hardirq.h>
  12 +#include <linux/uaccess.h>
  13 +#include <linux/module.h>
12 14 #include <linux/ftrace.h>
13 15 #include <linux/percpu.h>
14 16 #include <linux/init.h>
15 17 #include <linux/list.h>
16 18  
17 19 #include <asm/cacheflush.h>
  20 +#include <asm/code-patching.h>
18 21 #include <asm/ftrace.h>
19 22  
  23 +#if 0
  24 +#define DEBUGP printk
  25 +#else
  26 +#define DEBUGP(fmt , ...) do { } while (0)
  27 +#endif
20 28  
21   -static unsigned int ftrace_nop = 0x60000000;
  29 +static unsigned int ftrace_nop = PPC_NOP_INSTR;
22 30  
23 31 #ifdef CONFIG_PPC32
24 32 # define GET_ADDR(addr) addr
25 33 #else
26 34 /* PowerPC64's functions are data that points to the functions */
27   -# define GET_ADDR(addr) *(unsigned long *)addr
  35 +# define GET_ADDR(addr) (*(unsigned long *)addr)
28 36 #endif
29 37  
30 38  
31 39  
... ... @@ -33,12 +41,12 @@
33 41 return (int)(addr - ip);
34 42 }
35 43  
36   -unsigned char *ftrace_nop_replace(void)
  44 +static unsigned char *ftrace_nop_replace(void)
37 45 {
38 46 return (char *)&ftrace_nop;
39 47 }
40 48  
41   -unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
  49 +static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
42 50 {
43 51 static unsigned int op;
44 52  
45 53  
46 54  
47 55  
48 56  
49 57  
50 58  
51 59  
52 60  
... ... @@ -68,51 +76,436 @@
68 76 # define _ASM_PTR " .long "
69 77 #endif
70 78  
71   -int
  79 +static int
72 80 ftrace_modify_code(unsigned long ip, unsigned char *old_code,
73 81 unsigned char *new_code)
74 82 {
75   - unsigned replaced;
76   - unsigned old = *(unsigned *)old_code;
77   - unsigned new = *(unsigned *)new_code;
78   - int faulted = 0;
  83 + unsigned char replaced[MCOUNT_INSN_SIZE];
79 84  
80 85 /*
81 86 * Note: Due to modules and __init, code can
82 87 * disappear and change, we need to protect against faulting
83   - * as well as code changing.
  88 + * as well as code changing. We do this by using the
  89 + * probe_kernel_* functions.
84 90 *
85 91 * No real locking needed, this code is run through
86   - * kstop_machine.
  92 + * kstop_machine, or before SMP starts.
87 93 */
88   - asm volatile (
89   - "1: lwz %1, 0(%2)\n"
90   - " cmpw %1, %5\n"
91   - " bne 2f\n"
92   - " stwu %3, 0(%2)\n"
93   - "2:\n"
94   - ".section .fixup, \"ax\"\n"
95   - "3: li %0, 1\n"
96   - " b 2b\n"
97   - ".previous\n"
98   - ".section __ex_table,\"a\"\n"
99   - _ASM_ALIGN "\n"
100   - _ASM_PTR "1b, 3b\n"
101   - ".previous"
102   - : "=r"(faulted), "=r"(replaced)
103   - : "r"(ip), "r"(new),
104   - "0"(faulted), "r"(old)
105   - : "memory");
106 94  
107   - if (replaced != old && replaced != new)
108   - faulted = 2;
  95 + /* read the text we want to modify */
  96 + if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
  97 + return -EFAULT;
109 98  
110   - if (!faulted)
111   - flush_icache_range(ip, ip + 8);
  99 + /* Make sure it is what we expect it to be */
  100 + if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
  101 + return -EINVAL;
112 102  
113   - return faulted;
  103 + /* replace the text with the new text */
  104 + if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
  105 + return -EPERM;
  106 +
  107 + flush_icache_range(ip, ip + 8);
  108 +
  109 + return 0;
114 110 }
115 111  
  112 +/*
  113 + * Helper functions that are the same for both PPC64 and PPC32.
  114 + */
  115 +static int test_24bit_addr(unsigned long ip, unsigned long addr)
  116 +{
  117 + long diff;
  118 +
  119 + /*
  120 + * Can we get to addr from ip in 24 bits?
  121 + * (26 really, since we mulitply by 4 for 4 byte alignment)
  122 + */
  123 + diff = addr - ip;
  124 +
  125 + /*
  126 + * Return true if diff is less than 1 << 25
  127 + * and greater than -1 << 26.
  128 + */
  129 + return (diff < (1 << 25)) && (diff > (-1 << 26));
  130 +}
  131 +
  132 +static int is_bl_op(unsigned int op)
  133 +{
  134 + return (op & 0xfc000003) == 0x48000001;
  135 +}
  136 +
  137 +static int test_offset(unsigned long offset)
  138 +{
  139 + return (offset + 0x2000000 > 0x3ffffff) || ((offset & 3) != 0);
  140 +}
  141 +
  142 +static unsigned long find_bl_target(unsigned long ip, unsigned int op)
  143 +{
  144 + static int offset;
  145 +
  146 + offset = (op & 0x03fffffc);
  147 + /* make it signed */
  148 + if (offset & 0x02000000)
  149 + offset |= 0xfe000000;
  150 +
  151 + return ip + (long)offset;
  152 +}
  153 +
  154 +static unsigned int branch_offset(unsigned long offset)
  155 +{
  156 + /* return "bl ip+offset" */
  157 + return 0x48000001 | (offset & 0x03fffffc);
  158 +}
  159 +
  160 +#ifdef CONFIG_PPC64
  161 +static int
  162 +__ftrace_make_nop(struct module *mod,
  163 + struct dyn_ftrace *rec, unsigned long addr)
  164 +{
  165 + unsigned char replaced[MCOUNT_INSN_SIZE * 2];
  166 + unsigned int *op = (unsigned *)&replaced;
  167 + unsigned char jmp[8];
  168 + unsigned long *ptr = (unsigned long *)&jmp;
  169 + unsigned long ip = rec->ip;
  170 + unsigned long tramp;
  171 + int offset;
  172 +
  173 + /* read where this goes */
  174 + if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
  175 + return -EFAULT;
  176 +
  177 + /* Make sure that that this is still a 24bit jump */
  178 + if (!is_bl_op(*op)) {
  179 + printk(KERN_ERR "Not expected bl: opcode is %x\n", *op);
  180 + return -EINVAL;
  181 + }
  182 +
  183 + /* lets find where the pointer goes */
  184 + tramp = find_bl_target(ip, *op);
  185 +
  186 + /*
  187 + * On PPC64 the trampoline looks like:
  188 + * 0x3d, 0x82, 0x00, 0x00, addis r12,r2, <high>
  189 + * 0x39, 0x8c, 0x00, 0x00, addi r12,r12, <low>
  190 + * Where the bytes 2,3,6 and 7 make up the 32bit offset
  191 + * to the TOC that holds the pointer.
  192 + * to jump to.
  193 + * 0xf8, 0x41, 0x00, 0x28, std r2,40(r1)
  194 + * 0xe9, 0x6c, 0x00, 0x20, ld r11,32(r12)
  195 + * The actually address is 32 bytes from the offset
  196 + * into the TOC.
  197 + * 0xe8, 0x4c, 0x00, 0x28, ld r2,40(r12)
  198 + */
  199 +
  200 + DEBUGP("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc);
  201 +
  202 + /* Find where the trampoline jumps to */
  203 + if (probe_kernel_read(jmp, (void *)tramp, 8)) {
  204 + printk(KERN_ERR "Failed to read %lx\n", tramp);
  205 + return -EFAULT;
  206 + }
  207 +
  208 + DEBUGP(" %08x %08x",
  209 + (unsigned)(*ptr >> 32),
  210 + (unsigned)*ptr);
  211 +
  212 + offset = (unsigned)jmp[2] << 24 |
  213 + (unsigned)jmp[3] << 16 |
  214 + (unsigned)jmp[6] << 8 |
  215 + (unsigned)jmp[7];
  216 +
  217 + DEBUGP(" %x ", offset);
  218 +
  219 + /* get the address this jumps too */
  220 + tramp = mod->arch.toc + offset + 32;
  221 + DEBUGP("toc: %lx", tramp);
  222 +
  223 + if (probe_kernel_read(jmp, (void *)tramp, 8)) {
  224 + printk(KERN_ERR "Failed to read %lx\n", tramp);
  225 + return -EFAULT;
  226 + }
  227 +
  228 + DEBUGP(" %08x %08x\n",
  229 + (unsigned)(*ptr >> 32),
  230 + (unsigned)*ptr);
  231 +
  232 + /* This should match what was called */
  233 + if (*ptr != GET_ADDR(addr)) {
  234 + printk(KERN_ERR "addr does not match %lx\n", *ptr);
  235 + return -EINVAL;
  236 + }
  237 +
  238 + /*
  239 + * We want to nop the line, but the next line is
  240 + * 0xe8, 0x41, 0x00, 0x28 ld r2,40(r1)
  241 + * This needs to be turned to a nop too.
  242 + */
  243 + if (probe_kernel_read(replaced, (void *)(ip+4), MCOUNT_INSN_SIZE))
  244 + return -EFAULT;
  245 +
  246 + if (*op != 0xe8410028) {
  247 + printk(KERN_ERR "Next line is not ld! (%08x)\n", *op);
  248 + return -EINVAL;
  249 + }
  250 +
  251 + /*
  252 + * Milton Miller pointed out that we can not blindly do nops.
  253 + * If a task was preempted when calling a trace function,
  254 + * the nops will remove the way to restore the TOC in r2
  255 + * and the r2 TOC will get corrupted.
  256 + */
  257 +
  258 + /*
  259 + * Replace:
  260 + * bl <tramp> <==== will be replaced with "b 1f"
  261 + * ld r2,40(r1)
  262 + * 1:
  263 + */
  264 + op[0] = 0x48000008; /* b +8 */
  265 +
  266 + if (probe_kernel_write((void *)ip, replaced, MCOUNT_INSN_SIZE))
  267 + return -EPERM;
  268 +
  269 + return 0;
  270 +}
  271 +
  272 +#else /* !PPC64 */
  273 +static int
  274 +__ftrace_make_nop(struct module *mod,
  275 + struct dyn_ftrace *rec, unsigned long addr)
  276 +{
  277 + unsigned char replaced[MCOUNT_INSN_SIZE];
  278 + unsigned int *op = (unsigned *)&replaced;
  279 + unsigned char jmp[8];
  280 + unsigned int *ptr = (unsigned int *)&jmp;
  281 + unsigned long ip = rec->ip;
  282 + unsigned long tramp;
  283 + int offset;
  284 +
  285 + if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
  286 + return -EFAULT;
  287 +
  288 + /* Make sure that that this is still a 24bit jump */
  289 + if (!is_bl_op(*op)) {
  290 + printk(KERN_ERR "Not expected bl: opcode is %x\n", *op);
  291 + return -EINVAL;
  292 + }
  293 +
  294 + /* lets find where the pointer goes */
  295 + tramp = find_bl_target(ip, *op);
  296 +
  297 + /*
  298 + * On PPC32 the trampoline looks like:
  299 + * lis r11,sym@ha
  300 + * addi r11,r11,sym@l
  301 + * mtctr r11
  302 + * bctr
  303 + */
  304 +
  305 + DEBUGP("ip:%lx jumps to %lx", ip, tramp);
  306 +
  307 + /* Find where the trampoline jumps to */
  308 + if (probe_kernel_read(jmp, (void *)tramp, 8)) {
  309 + printk(KERN_ERR "Failed to read %lx\n", tramp);
  310 + return -EFAULT;
  311 + }
  312 +
  313 + DEBUGP(" %08x %08x ", ptr[0], ptr[1]);
  314 +
  315 + tramp = (ptr[1] & 0xffff) |
  316 + ((ptr[0] & 0xffff) << 16);
  317 + if (tramp & 0x8000)
  318 + tramp -= 0x10000;
  319 +
  320 + DEBUGP(" %x ", tramp);
  321 +
  322 + if (tramp != addr) {
  323 + printk(KERN_ERR
  324 + "Trampoline location %08lx does not match addr\n",
  325 + tramp);
  326 + return -EINVAL;
  327 + }
  328 +
  329 + op[0] = PPC_NOP_INSTR;
  330 +
  331 + if (probe_kernel_write((void *)ip, replaced, MCOUNT_INSN_SIZE))
  332 + return -EPERM;
  333 +
  334 + return 0;
  335 +}
  336 +#endif /* PPC64 */
  337 +
  338 +int ftrace_make_nop(struct module *mod,
  339 + struct dyn_ftrace *rec, unsigned long addr)
  340 +{
  341 + unsigned char *old, *new;
  342 + unsigned long ip = rec->ip;
  343 +
  344 + /*
  345 + * If the calling address is more that 24 bits away,
  346 + * then we had to use a trampoline to make the call.
  347 + * Otherwise just update the call site.
  348 + */
  349 + if (test_24bit_addr(ip, addr)) {
  350 + /* within range */
  351 + old = ftrace_call_replace(ip, addr);
  352 + new = ftrace_nop_replace();
  353 + return ftrace_modify_code(ip, old, new);
  354 + }
  355 +
  356 + /*
  357 + * Out of range jumps are called from modules.
  358 + * We should either already have a pointer to the module
  359 + * or it has been passed in.
  360 + */
  361 + if (!rec->arch.mod) {
  362 + if (!mod) {
  363 + printk(KERN_ERR "No module loaded addr=%lx\n",
  364 + addr);
  365 + return -EFAULT;
  366 + }
  367 + rec->arch.mod = mod;
  368 + } else if (mod) {
  369 + if (mod != rec->arch.mod) {
  370 + printk(KERN_ERR
  371 + "Record mod %p not equal to passed in mod %p\n",
  372 + rec->arch.mod, mod);
  373 + return -EINVAL;
  374 + }
  375 + /* nothing to do if mod == rec->arch.mod */
  376 + } else
  377 + mod = rec->arch.mod;
  378 +
  379 + return __ftrace_make_nop(mod, rec, addr);
  380 +
  381 +}
  382 +
  383 +#ifdef CONFIG_PPC64
  384 +static int
  385 +__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  386 +{
  387 + unsigned char replaced[MCOUNT_INSN_SIZE * 2];
  388 + unsigned int *op = (unsigned *)&replaced;
  389 + unsigned long ip = rec->ip;
  390 + unsigned long offset;
  391 +
  392 + /* read where this goes */
  393 + if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE * 2))
  394 + return -EFAULT;
  395 +
  396 + /*
  397 + * It should be pointing to two nops or
  398 + * b +8; ld r2,40(r1)
  399 + */
  400 + if (((op[0] != 0x48000008) || (op[1] != 0xe8410028)) &&
  401 + ((op[0] != PPC_NOP_INSTR) || (op[1] != PPC_NOP_INSTR))) {
  402 + printk(KERN_ERR "Expected NOPs but have %x %x\n", op[0], op[1]);
  403 + return -EINVAL;
  404 + }
  405 +
  406 + /* If we never set up a trampoline to ftrace_caller, then bail */
  407 + if (!rec->arch.mod->arch.tramp) {
  408 + printk(KERN_ERR "No ftrace trampoline\n");
  409 + return -EINVAL;
  410 + }
  411 +
  412 + /* now calculate a jump to the ftrace caller trampoline */
  413 + offset = rec->arch.mod->arch.tramp - ip;
  414 +
  415 + if (test_offset(offset)) {
  416 + printk(KERN_ERR "REL24 %li out of range!\n",
  417 + (long int)offset);
  418 + return -EINVAL;
  419 + }
  420 +
  421 + /* Set to "bl addr" */
  422 + op[0] = branch_offset(offset);
  423 + /* ld r2,40(r1) */
  424 + op[1] = 0xe8410028;
  425 +
  426 + DEBUGP("write to %lx\n", rec->ip);
  427 +
  428 + if (probe_kernel_write((void *)ip, replaced, MCOUNT_INSN_SIZE * 2))
  429 + return -EPERM;
  430 +
  431 + return 0;
  432 +}
  433 +#else
  434 +static int
  435 +__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  436 +{
  437 + unsigned char replaced[MCOUNT_INSN_SIZE];
  438 + unsigned int *op = (unsigned *)&replaced;
  439 + unsigned long ip = rec->ip;
  440 + unsigned long offset;
  441 +
  442 + /* read where this goes */
  443 + if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
  444 + return -EFAULT;
  445 +
  446 + /* It should be pointing to a nop */
  447 + if (op[0] != PPC_NOP_INSTR) {
  448 + printk(KERN_ERR "Expected NOP but have %x\n", op[0]);
  449 + return -EINVAL;
  450 + }
  451 +
  452 + /* If we never set up a trampoline to ftrace_caller, then bail */
  453 + if (!rec->arch.mod->arch.tramp) {
  454 + printk(KERN_ERR "No ftrace trampoline\n");
  455 + return -EINVAL;
  456 + }
  457 +
  458 + /* now calculate a jump to the ftrace caller trampoline */
  459 + offset = rec->arch.mod->arch.tramp - ip;
  460 +
  461 + if (test_offset(offset)) {
  462 + printk(KERN_ERR "REL24 %li out of range!\n",
  463 + (long int)offset);
  464 + return -EINVAL;
  465 + }
  466 +
  467 + /* Set to "bl addr" */
  468 + op[0] = branch_offset(offset);
  469 +
  470 + DEBUGP("write to %lx\n", rec->ip);
  471 +
  472 + if (probe_kernel_write((void *)ip, replaced, MCOUNT_INSN_SIZE))
  473 + return -EPERM;
  474 +
  475 + return 0;
  476 +}
  477 +#endif /* CONFIG_PPC64 */
  478 +
  479 +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  480 +{
  481 + unsigned char *old, *new;
  482 + unsigned long ip = rec->ip;
  483 +
  484 + /*
  485 + * If the calling address is more that 24 bits away,
  486 + * then we had to use a trampoline to make the call.
  487 + * Otherwise just update the call site.
  488 + */
  489 + if (test_24bit_addr(ip, addr)) {
  490 + /* within range */
  491 + old = ftrace_nop_replace();
  492 + new = ftrace_call_replace(ip, addr);
  493 + return ftrace_modify_code(ip, old, new);
  494 + }
  495 +
  496 + /*
  497 + * Out of range jumps are called from modules.
  498 + * Being that we are converting from nop, it had better
  499 + * already have a module defined.
  500 + */
  501 + if (!rec->arch.mod) {
  502 + printk(KERN_ERR "No module loaded\n");
  503 + return -EINVAL;
  504 + }
  505 +
  506 + return __ftrace_make_call(rec, addr);
  507 +}
  508 +
116 509 int ftrace_update_ftrace_func(ftrace_func_t func)
117 510 {
118 511 unsigned long ip = (unsigned long)(&ftrace_call);
119 512  
... ... @@ -128,9 +521,10 @@
128 521  
129 522 int __init ftrace_dyn_arch_init(void *data)
130 523 {
131   - /* This is running in kstop_machine */
  524 + /* caller expects data to be zero */
  525 + unsigned long *p = data;
132 526  
133   - ftrace_mcount_set(data);
  527 + *p = 0;
134 528  
135 529 return 0;
136 530 }
arch/powerpc/kernel/idle.c
... ... @@ -69,9 +69,14 @@
69 69 smp_mb();
70 70 local_irq_disable();
71 71  
  72 + /* Don't trace irqs off for idle */
  73 + stop_critical_timings();
  74 +
72 75 /* check again after disabling irqs */
73 76 if (!need_resched() && !cpu_should_die())
74 77 ppc_md.power_save();
  78 +
  79 + start_critical_timings();
75 80  
76 81 local_irq_enable();
77 82 set_thread_flag(TIF_POLLING_NRFLAG);
arch/powerpc/kernel/module_32.c
... ... @@ -22,6 +22,7 @@
22 22 #include <linux/fs.h>
23 23 #include <linux/string.h>
24 24 #include <linux/kernel.h>
  25 +#include <linux/ftrace.h>
25 26 #include <linux/cache.h>
26 27 #include <linux/bug.h>
27 28 #include <linux/sort.h>
... ... @@ -53,6 +54,9 @@
53 54 r_addend = rela[i].r_addend;
54 55 }
55 56  
  57 +#ifdef CONFIG_DYNAMIC_FTRACE
  58 + _count_relocs++; /* add one for ftrace_caller */
  59 +#endif
56 60 return _count_relocs;
57 61 }
58 62  
... ... @@ -306,6 +310,12 @@
306 310 return -ENOEXEC;
307 311 }
308 312 }
  313 +#ifdef CONFIG_DYNAMIC_FTRACE
  314 + module->arch.tramp =
  315 + do_plt_call(module->module_core,
  316 + (unsigned long)ftrace_caller,
  317 + sechdrs, module);
  318 +#endif
309 319 return 0;
310 320 }
arch/powerpc/kernel/module_64.c
... ... @@ -20,6 +20,7 @@
20 20 #include <linux/moduleloader.h>
21 21 #include <linux/err.h>
22 22 #include <linux/vmalloc.h>
  23 +#include <linux/ftrace.h>
23 24 #include <linux/bug.h>
24 25 #include <asm/module.h>
25 26 #include <asm/firmware.h>
... ... @@ -163,6 +164,11 @@
163 164 }
164 165 }
165 166  
  167 +#ifdef CONFIG_DYNAMIC_FTRACE
  168 + /* make the trampoline to the ftrace_caller */
  169 + relocs++;
  170 +#endif
  171 +
166 172 DEBUGP("Looks like a total of %lu stubs, max\n", relocs);
167 173 return relocs * sizeof(struct ppc64_stub_entry);
168 174 }
... ... @@ -440,6 +446,13 @@
440 446 return -ENOEXEC;
441 447 }
442 448 }
  449 +
  450 +#ifdef CONFIG_DYNAMIC_FTRACE
  451 + me->arch.toc = my_r2(sechdrs, me);
  452 + me->arch.tramp = stub_for_addr(sechdrs,
  453 + (unsigned long)ftrace_caller,
  454 + me);
  455 +#endif
443 456  
444 457 return 0;
445 458 }