Commit 4e491d14f2506b218d678935c25a7027b79178b1

Authored by Steven Rostedt
Committed by Thomas Gleixner
1 parent e0eca07bad

ftrace: support for PowerPC

This patch adds full support for ftrace for PowerPC (both 64 and 32 bit).
This includes dynamic tracing and function filtering.

Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

Showing 11 changed files with 405 additions and 10 deletions Side-by-side Diff

arch/powerpc/Kconfig
... ... @@ -105,11 +105,12 @@
105 105 config PPC
106 106 bool
107 107 default y
  108 + select HAVE_FTRACE
108 109 select HAVE_IDE
109   - select HAVE_OPROFILE
110 110 select HAVE_KPROBES
111 111 select HAVE_KRETPROBES
112 112 select HAVE_LMB
  113 + select HAVE_OPROFILE
113 114  
114 115 config EARLY_PRINTK
115 116 bool
arch/powerpc/kernel/Makefile
... ... @@ -12,6 +12,18 @@
12 12 CFLAGS_btext.o += -fPIC
13 13 endif
14 14  
  15 +ifdef CONFIG_FTRACE
  16 +# Do not trace early boot code
  17 +CFLAGS_REMOVE_cputable.o = -pg
  18 +CFLAGS_REMOVE_prom_init.o = -pg
  19 +
  20 +ifdef CONFIG_DYNAMIC_FTRACE
  21 +# dynamic ftrace setup.
  22 +CFLAGS_REMOVE_ftrace.o = -pg
  23 +endif
  24 +
  25 +endif
  26 +
15 27 obj-y := cputable.o ptrace.o syscalls.o \
16 28 irq.o align.o signal_32.o pmc.o vdso.o \
17 29 init_task.o process.o systbl.o idle.o \
... ... @@ -77,6 +89,8 @@
77 89 machine_kexec_$(CONFIG_WORD_SIZE).o
78 90 obj-$(CONFIG_AUDIT) += audit.o
79 91 obj64-$(CONFIG_AUDIT) += compat_audit.o
  92 +
  93 +obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
80 94  
81 95 obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o
82 96  
arch/powerpc/kernel/entry_32.S
... ... @@ -1035,4 +1035,134 @@
1035 1035 /* XXX load up BATs and panic */
1036 1036  
1037 1037 #endif /* CONFIG_PPC_RTAS */
  1038 +
  1039 +#ifdef CONFIG_FTRACE
  1040 +#ifdef CONFIG_DYNAMIC_FTRACE
  1041 +_GLOBAL(mcount)
  1042 +_GLOBAL(_mcount)
  1043 + stwu r1,-48(r1)
  1044 + stw r3, 12(r1)
  1045 + stw r4, 16(r1)
  1046 + stw r5, 20(r1)
  1047 + stw r6, 24(r1)
  1048 + mflr r3
  1049 + stw r7, 28(r1)
  1050 + mfcr r5
  1051 + stw r8, 32(r1)
  1052 + stw r9, 36(r1)
  1053 + stw r10,40(r1)
  1054 + stw r3, 44(r1)
  1055 + stw r5, 8(r1)
  1056 + .globl mcount_call
  1057 +mcount_call:
  1058 + bl ftrace_stub
  1059 + nop
  1060 + lwz r6, 8(r1)
  1061 + lwz r0, 44(r1)
  1062 + lwz r3, 12(r1)
  1063 + mtctr r0
  1064 + lwz r4, 16(r1)
  1065 + mtcr r6
  1066 + lwz r5, 20(r1)
  1067 + lwz r6, 24(r1)
  1068 + lwz r0, 52(r1)
  1069 + lwz r7, 28(r1)
  1070 + lwz r8, 32(r1)
  1071 + mtlr r0
  1072 + lwz r9, 36(r1)
  1073 + lwz r10,40(r1)
  1074 + addi r1, r1, 48
  1075 + bctr
  1076 +
  1077 +_GLOBAL(ftrace_caller)
  1078 + /* Based off of objdump optput from glibc */
  1079 + stwu r1,-48(r1)
  1080 + stw r3, 12(r1)
  1081 + stw r4, 16(r1)
  1082 + stw r5, 20(r1)
  1083 + stw r6, 24(r1)
  1084 + mflr r3
  1085 + lwz r4, 52(r1)
  1086 + mfcr r5
  1087 + stw r7, 28(r1)
  1088 + stw r8, 32(r1)
  1089 + stw r9, 36(r1)
  1090 + stw r10,40(r1)
  1091 + stw r3, 44(r1)
  1092 + stw r5, 8(r1)
  1093 +.globl ftrace_call
  1094 +ftrace_call:
  1095 + bl ftrace_stub
  1096 + nop
  1097 + lwz r6, 8(r1)
  1098 + lwz r0, 44(r1)
  1099 + lwz r3, 12(r1)
  1100 + mtctr r0
  1101 + lwz r4, 16(r1)
  1102 + mtcr r6
  1103 + lwz r5, 20(r1)
  1104 + lwz r6, 24(r1)
  1105 + lwz r0, 52(r1)
  1106 + lwz r7, 28(r1)
  1107 + lwz r8, 32(r1)
  1108 + mtlr r0
  1109 + lwz r9, 36(r1)
  1110 + lwz r10,40(r1)
  1111 + addi r1, r1, 48
  1112 + bctr
  1113 +#else
  1114 +_GLOBAL(mcount)
  1115 +_GLOBAL(_mcount)
  1116 + stwu r1,-48(r1)
  1117 + stw r3, 12(r1)
  1118 + stw r4, 16(r1)
  1119 + stw r5, 20(r1)
  1120 + stw r6, 24(r1)
  1121 + mflr r3
  1122 + lwz r4, 52(r1)
  1123 + mfcr r5
  1124 + stw r7, 28(r1)
  1125 + stw r8, 32(r1)
  1126 + stw r9, 36(r1)
  1127 + stw r10,40(r1)
  1128 + stw r3, 44(r1)
  1129 + stw r5, 8(r1)
  1130 +
  1131 + LOAD_REG_ADDR(r5, ftrace_trace_function)
  1132 +#if 0
  1133 + mtctr r3
  1134 + mr r1, r5
  1135 + bctrl
  1136 +#endif
  1137 + lwz r5,0(r5)
  1138 +#if 1
  1139 + mtctr r5
  1140 + bctrl
  1141 +#else
  1142 + bl ftrace_stub
  1143 +#endif
  1144 + nop
  1145 +
  1146 + lwz r6, 8(r1)
  1147 + lwz r0, 44(r1)
  1148 + lwz r3, 12(r1)
  1149 + mtctr r0
  1150 + lwz r4, 16(r1)
  1151 + mtcr r6
  1152 + lwz r5, 20(r1)
  1153 + lwz r6, 24(r1)
  1154 + lwz r0, 52(r1)
  1155 + lwz r7, 28(r1)
  1156 + lwz r8, 32(r1)
  1157 + mtlr r0
  1158 + lwz r9, 36(r1)
  1159 + lwz r10,40(r1)
  1160 + addi r1, r1, 48
  1161 + bctr
  1162 +#endif
  1163 +
  1164 +_GLOBAL(ftrace_stub)
  1165 + blr
  1166 +
  1167 +#endif /* CONFIG_MCOUNT */
arch/powerpc/kernel/entry_64.S
... ... @@ -870,4 +870,66 @@
870 870 ld r0,16(r1)
871 871 mtlr r0
872 872 blr
  873 +
  874 +#ifdef CONFIG_FTRACE
  875 +#ifdef CONFIG_DYNAMIC_FTRACE
  876 +_GLOBAL(mcount)
  877 +_GLOBAL(_mcount)
  878 + /* Taken from output of objdump from lib64/glibc */
  879 + mflr r3
  880 + stdu r1, -112(r1)
  881 + std r3, 128(r1)
  882 + .globl mcount_call
  883 +mcount_call:
  884 + bl ftrace_stub
  885 + nop
  886 + ld r0, 128(r1)
  887 + mtlr r0
  888 + addi r1, r1, 112
  889 + blr
  890 +
  891 +_GLOBAL(ftrace_caller)
  892 + /* Taken from output of objdump from lib64/glibc */
  893 + mflr r3
  894 + ld r11, 0(r1)
  895 + stdu r1, -112(r1)
  896 + std r3, 128(r1)
  897 + ld r4, 16(r11)
  898 +.globl ftrace_call
  899 +ftrace_call:
  900 + bl ftrace_stub
  901 + nop
  902 + ld r0, 128(r1)
  903 + mtlr r0
  904 + addi r1, r1, 112
  905 +_GLOBAL(ftrace_stub)
  906 + blr
  907 +#else
  908 +_GLOBAL(mcount)
  909 + blr
  910 +
  911 +_GLOBAL(_mcount)
  912 + /* Taken from output of objdump from lib64/glibc */
  913 + mflr r3
  914 + ld r11, 0(r1)
  915 + stdu r1, -112(r1)
  916 + std r3, 128(r1)
  917 + ld r4, 16(r11)
  918 +
  919 +
  920 + LOAD_REG_ADDR(r5,ftrace_trace_function)
  921 + ld r5,0(r5)
  922 + ld r5,0(r5)
  923 + mtctr r5
  924 + bctrl
  925 +
  926 + nop
  927 + ld r0, 128(r1)
  928 + mtlr r0
  929 + addi r1, r1, 112
  930 +_GLOBAL(ftrace_stub)
  931 + blr
  932 +
  933 +#endif
  934 +#endif
arch/powerpc/kernel/ftrace.c
  1 +/*
  2 + * Code for replacing ftrace calls with jumps.
  3 + *
  4 + * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5 + *
  6 + * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
  7 + *
  8 + */
  9 +
  10 +#include <linux/spinlock.h>
  11 +#include <linux/hardirq.h>
  12 +#include <linux/ftrace.h>
  13 +#include <linux/percpu.h>
  14 +#include <linux/init.h>
  15 +#include <linux/list.h>
  16 +
  17 +#include <asm/cacheflush.h>
  18 +
  19 +#define CALL_BACK 4
  20 +
  21 +static unsigned int ftrace_nop = 0x60000000;
  22 +
  23 +#ifdef CONFIG_PPC32
  24 +# define GET_ADDR(addr) addr
  25 +#else
  26 +/* PowerPC64's functions are data that points to the functions */
  27 +# define GET_ADDR(addr) *(unsigned long *)addr
  28 +#endif
  29 +
  30 +notrace int ftrace_ip_converted(unsigned long ip)
  31 +{
  32 + unsigned int save;
  33 +
  34 + ip -= CALL_BACK;
  35 + save = *(unsigned int *)ip;
  36 +
  37 + return save == ftrace_nop;
  38 +}
  39 +
  40 +static unsigned int notrace ftrace_calc_offset(long ip, long addr)
  41 +{
  42 + return (int)((addr + CALL_BACK) - ip);
  43 +}
  44 +
  45 +notrace unsigned char *ftrace_nop_replace(void)
  46 +{
  47 + return (char *)&ftrace_nop;
  48 +}
  49 +
  50 +notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
  51 +{
  52 + static unsigned int op;
  53 +
  54 + addr = GET_ADDR(addr);
  55 +
  56 + /* Set to "bl addr" */
  57 + op = 0x48000001 | (ftrace_calc_offset(ip, addr) & 0x03fffffe);
  58 +
  59 + /*
  60 + * No locking needed, this must be called via kstop_machine
  61 + * which in essence is like running on a uniprocessor machine.
  62 + */
  63 + return (unsigned char *)&op;
  64 +}
  65 +
  66 +#ifdef CONFIG_PPC64
  67 +# define _ASM_ALIGN " .align 3 "
  68 +# define _ASM_PTR " .llong "
  69 +#else
  70 +# define _ASM_ALIGN " .align 2 "
  71 +# define _ASM_PTR " .long "
  72 +#endif
  73 +
  74 +notrace int
  75 +ftrace_modify_code(unsigned long ip, unsigned char *old_code,
  76 + unsigned char *new_code)
  77 +{
  78 + unsigned replaced;
  79 + unsigned old = *(unsigned *)old_code;
  80 + unsigned new = *(unsigned *)new_code;
  81 + int faulted = 0;
  82 +
  83 + /* move the IP back to the start of the call */
  84 + ip -= CALL_BACK;
  85 +
  86 + /*
  87 + * Note: Due to modules and __init, code can
  88 + * disappear and change, we need to protect against faulting
  89 + * as well as code changing.
  90 + *
  91 + * No real locking needed, this code is run through
  92 + * kstop_machine.
  93 + */
  94 + asm volatile (
  95 + "1: lwz %1, 0(%2)\n"
  96 + " cmpw %1, %5\n"
  97 + " bne 2f\n"
  98 + " stwu %3, 0(%2)\n"
  99 + "2:\n"
  100 + ".section .fixup, \"ax\"\n"
  101 + "3: li %0, 1\n"
  102 + " b 2b\n"
  103 + ".previous\n"
  104 + ".section __ex_table,\"a\"\n"
  105 + _ASM_ALIGN "\n"
  106 + _ASM_PTR "1b, 3b\n"
  107 + ".previous"
  108 + : "=r"(faulted), "=r"(replaced)
  109 + : "r"(ip), "r"(new),
  110 + "0"(faulted), "r"(old)
  111 + : "memory");
  112 +
  113 + if (replaced != old && replaced != new)
  114 + faulted = 2;
  115 +
  116 + if (!faulted)
  117 + flush_icache_range(ip, ip + 8);
  118 +
  119 + return faulted;
  120 +}
  121 +
  122 +notrace int ftrace_update_ftrace_func(ftrace_func_t func)
  123 +{
  124 + unsigned long ip = (unsigned long)(&ftrace_call);
  125 + unsigned char old[4], *new;
  126 + int ret;
  127 +
  128 + ip += CALL_BACK;
  129 +
  130 + memcpy(old, &ftrace_call, 4);
  131 + new = ftrace_call_replace(ip, (unsigned long)func);
  132 + ret = ftrace_modify_code(ip, old, new);
  133 +
  134 + return ret;
  135 +}
  136 +
  137 +notrace int ftrace_mcount_set(unsigned long *data)
  138 +{
  139 + unsigned long ip = (long)(&mcount_call);
  140 + unsigned long *addr = data;
  141 + unsigned char old[4], *new;
  142 +
  143 + /* ip is at the location, but modify code will subtact this */
  144 + ip += CALL_BACK;
  145 +
  146 + /*
  147 + * Replace the mcount stub with a pointer to the
  148 + * ip recorder function.
  149 + */
  150 + memcpy(old, &mcount_call, 4);
  151 + new = ftrace_call_replace(ip, *addr);
  152 + *addr = ftrace_modify_code(ip, old, new);
  153 +
  154 + return 0;
  155 +}
  156 +
  157 +int __init ftrace_dyn_arch_init(void *data)
  158 +{
  159 + /* This is running in kstop_machine */
  160 +
  161 + ftrace_mcount_set(data);
  162 +
  163 + return 0;
  164 +}
arch/powerpc/kernel/io.c
... ... @@ -120,7 +120,8 @@
120 120  
121 121 #define IO_CHECK_ALIGN(v,a) ((((unsigned long)(v)) & ((a) - 1)) == 0)
122 122  
123   -void _memset_io(volatile void __iomem *addr, int c, unsigned long n)
  123 +notrace void
  124 +_memset_io(volatile void __iomem *addr, int c, unsigned long n)
124 125 {
125 126 void *p = (void __force *)addr;
126 127 u32 lc = c;
arch/powerpc/kernel/irq.c
... ... @@ -98,7 +98,7 @@
98 98  
99 99 int distribute_irqs = 1;
100 100  
101   -static inline unsigned long get_hard_enabled(void)
  101 +static inline notrace unsigned long get_hard_enabled(void)
102 102 {
103 103 unsigned long enabled;
104 104  
105 105  
... ... @@ -108,13 +108,13 @@
108 108 return enabled;
109 109 }
110 110  
111   -static inline void set_soft_enabled(unsigned long enable)
  111 +static inline notrace void set_soft_enabled(unsigned long enable)
112 112 {
113 113 __asm__ __volatile__("stb %0,%1(13)"
114 114 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
115 115 }
116 116  
117   -void raw_local_irq_restore(unsigned long en)
  117 +notrace void raw_local_irq_restore(unsigned long en)
118 118 {
119 119 /*
120 120 * get_paca()->soft_enabled = en;
arch/powerpc/kernel/setup_32.c
... ... @@ -47,6 +47,11 @@
47 47 #include <asm/kgdb.h>
48 48 #endif
49 49  
  50 +#ifdef CONFIG_FTRACE
  51 +extern void _mcount(void);
  52 +EXPORT_SYMBOL(_mcount);
  53 +#endif
  54 +
50 55 extern void bootx_init(unsigned long r4, unsigned long phys);
51 56  
52 57 int boot_cpuid;
... ... @@ -81,7 +86,7 @@
81 86 * from the address that it was linked at, so we must use RELOC/PTRRELOC
82 87 * to access static data (including strings). -- paulus
83 88 */
84   -unsigned long __init early_init(unsigned long dt_ptr)
  89 +notrace unsigned long __init early_init(unsigned long dt_ptr)
85 90 {
86 91 unsigned long offset = reloc_offset();
87 92 struct cpu_spec *spec;
... ... @@ -111,7 +116,7 @@
111 116 * This is called very early on the boot process, after a minimal
112 117 * MMU environment has been set up but before MMU_init is called.
113 118 */
114   -void __init machine_init(unsigned long dt_ptr, unsigned long phys)
  119 +notrace void __init machine_init(unsigned long dt_ptr, unsigned long phys)
115 120 {
116 121 /* Enable early debugging if any specified (see udbg.h) */
117 122 udbg_early_init();
... ... @@ -133,7 +138,7 @@
133 138  
134 139 #ifdef CONFIG_BOOKE_WDT
135 140 /* Checks wdt=x and wdt_period=xx command-line option */
136   -int __init early_parse_wdt(char *p)
  141 +notrace int __init early_parse_wdt(char *p)
137 142 {
138 143 if (p && strncmp(p, "0", 1) != 0)
139 144 booke_wdt_enabled = 1;
arch/powerpc/kernel/setup_64.c
... ... @@ -85,6 +85,11 @@
85 85 };
86 86 EXPORT_SYMBOL_GPL(ppc64_caches);
87 87  
  88 +#ifdef CONFIG_FTRACE
  89 +extern void _mcount(void);
  90 +EXPORT_SYMBOL(_mcount);
  91 +#endif
  92 +
88 93 /*
89 94 * These are used in binfmt_elf.c to put aux entries on the stack
90 95 * for each elf executable being started.
arch/powerpc/platforms/powermac/Makefile
1 1 CFLAGS_bootx_init.o += -fPIC
2 2  
  3 +ifdef CONFIG_FTRACE
  4 +# Do not trace early boot code
  5 +CFLAGS_REMOVE_bootx_init.o = -pg
  6 +endif
  7 +
3 8 obj-y += pic.o setup.o time.o feature.o pci.o \
4 9 sleep.o low_i2c.o cache.o pfunc_core.o \
5 10 pfunc_base.o
kernel/trace/trace_selftest.c
... ... @@ -123,6 +123,7 @@
123 123 int ret;
124 124 int save_ftrace_enabled = ftrace_enabled;
125 125 int save_tracer_enabled = tracer_enabled;
  126 + char *func_name;
126 127  
127 128 /* The ftrace test PASSED */
128 129 printk(KERN_CONT "PASSED\n");
129 130  
... ... @@ -142,9 +143,15 @@
142 143 return ret;
143 144 }
144 145  
  146 + /*
  147 + * Some archs *cough*PowerPC*cough* add charachters to the
  148 + * start of the function names. We simply put a '*' to
  149 + * accomodate them.
  150 + */
  151 + func_name = "*" STR(DYN_FTRACE_TEST_NAME);
  152 +
145 153 /* filter only on our function */
146   - ftrace_set_filter(STR(DYN_FTRACE_TEST_NAME),
147   - sizeof(STR(DYN_FTRACE_TEST_NAME)), 1);
  154 + ftrace_set_filter(func_name, strlen(func_name), 1);
148 155  
149 156 /* enable tracing */
150 157 tr->ctrl = 1;