Commit dfd9f7abc0fb67b5781f340d982384cea53b2884

Authored by Heiko Carstens
Committed by Martin Schwidefsky
1 parent a2b53673fa

[S390] ftrace: add dynamic ftrace support

Dynamic ftrace support for s390.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

Showing 10 changed files with 276 additions and 29 deletions Side-by-side Diff

... ... @@ -82,6 +82,8 @@
82 82 select USE_GENERIC_SMP_HELPERS if SMP
83 83 select HAVE_SYSCALL_WRAPPERS
84 84 select HAVE_FUNCTION_TRACER
  85 + select HAVE_FTRACE_MCOUNT_RECORD
  86 + select HAVE_DYNAMIC_FTRACE
85 87 select HAVE_DEFAULT_NO_SPIN_MUTEXES
86 88 select HAVE_OPROFILE
87 89 select HAVE_KPROBES
arch/s390/include/asm/ftrace.h
... ... @@ -2,8 +2,27 @@
2 2 #define _ASM_S390_FTRACE_H
3 3  
4 4 #ifndef __ASSEMBLY__
  5 +
5 6 extern void _mcount(void);
  7 +extern unsigned long ftrace_dyn_func;
  8 +
  9 +struct dyn_arch_ftrace { };
  10 +
  11 +#define MCOUNT_ADDR ((long)_mcount)
  12 +
  13 +#ifdef CONFIG_64BIT
  14 +#define MCOUNT_INSN_SIZE 24
  15 +#define MCOUNT_OFFSET 14
  16 +#else
  17 +#define MCOUNT_INSN_SIZE 30
  18 +#define MCOUNT_OFFSET 8
6 19 #endif
7 20  
  21 +static inline unsigned long ftrace_call_adjust(unsigned long addr)
  22 +{
  23 + return addr - MCOUNT_OFFSET;
  24 +}
  25 +
  26 +#endif /* __ASSEMBLY__ */
8 27 #endif /* _ASM_S390_FTRACE_H */
arch/s390/include/asm/lowcore.h
... ... @@ -68,6 +68,7 @@
68 68 #define __LC_CPUID 0x02b0
69 69 #define __LC_INT_CLOCK 0x02c8
70 70 #define __LC_MACHINE_FLAGS 0x02d8
  71 +#define __LC_FTRACE_FUNC 0x02dc
71 72 #define __LC_IRB 0x0300
72 73 #define __LC_PFAULT_INTPARM 0x0080
73 74 #define __LC_CPU_TIMER_SAVE_AREA 0x00d8
... ... @@ -113,6 +114,7 @@
113 114 #define __LC_INT_CLOCK 0x0340
114 115 #define __LC_VDSO_PER_CPU 0x0350
115 116 #define __LC_MACHINE_FLAGS 0x0358
  117 +#define __LC_FTRACE_FUNC 0x0360
116 118 #define __LC_IRB 0x0380
117 119 #define __LC_PASTE 0x03c0
118 120 #define __LC_PFAULT_INTPARM 0x11b8
... ... @@ -281,7 +283,8 @@
281 283 __u64 int_clock; /* 0x02c8 */
282 284 __u64 clock_comparator; /* 0x02d0 */
283 285 __u32 machine_flags; /* 0x02d8 */
284   - __u8 pad_0x02dc[0x0300-0x02dc]; /* 0x02dc */
  286 + __u32 ftrace_func; /* 0x02dc */
  287 + __u8 pad_0x02f0[0x0300-0x02f0]; /* 0x02f0 */
285 288  
286 289 /* Interrupt response block */
287 290 __u8 irb[64]; /* 0x0300 */
... ... @@ -386,7 +389,8 @@
386 389 __u64 clock_comparator; /* 0x0348 */
387 390 __u64 vdso_per_cpu_data; /* 0x0350 */
388 391 __u64 machine_flags; /* 0x0358 */
389   - __u8 pad_0x0360[0x0380-0x0360]; /* 0x0360 */
  392 + __u64 ftrace_func; /* 0x0360 */
  393 + __u8 pad_0x0368[0x0380-0x0368]; /* 0x0368 */
390 394  
391 395 /* Interrupt response block. */
392 396 __u8 irb[64]; /* 0x0380 */
arch/s390/kernel/Makefile
... ... @@ -7,6 +7,10 @@
7 7 CFLAGS_REMOVE_early.o = -pg
8 8 endif
9 9  
  10 +ifdef CONFIG_DYNAMIC_FTRACE
  11 +CFLAGS_REMOVE_ftrace.o = -pg
  12 +endif
  13 +
10 14 #
11 15 # Passing null pointers is ok for smp code, since we access the lowcore here.
12 16 #
... ... @@ -41,6 +45,7 @@
41 45 obj-$(CONFIG_STACKTRACE) += stacktrace.o
42 46 obj-$(CONFIG_KPROBES) += kprobes.o
43 47 obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
  48 +obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
44 49  
45 50 # Kexec part
46 51 S390_KEXEC_OBJS := machine_kexec.o crash.o
arch/s390/kernel/early.c
... ... @@ -11,6 +11,7 @@
11 11 #include <linux/errno.h>
12 12 #include <linux/string.h>
13 13 #include <linux/ctype.h>
  14 +#include <linux/ftrace.h>
14 15 #include <linux/lockdep.h>
15 16 #include <linux/module.h>
16 17 #include <linux/pfn.h>
... ... @@ -410,6 +411,9 @@
410 411 sclp_facilities_detect();
411 412 detect_memory_layout(memory_chunk);
412 413 S390_lowcore.machine_flags = machine_flags;
  414 +#ifdef CONFIG_DYNAMIC_FTRACE
  415 + S390_lowcore.ftrace_func = (unsigned long)ftrace_caller;
  416 +#endif
413 417 lockdep_on();
414 418 }
arch/s390/kernel/ftrace.c
  1 +/*
  2 + * Dynamic function tracer architecture backend.
  3 + *
  4 + * Copyright IBM Corp. 2009
  5 + *
  6 + * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
  7 + *
  8 + */
  9 +
  10 +#include <linux/uaccess.h>
  11 +#include <linux/ftrace.h>
  12 +#include <linux/kernel.h>
  13 +#include <linux/types.h>
  14 +#include <asm/lowcore.h>
  15 +
  16 +void ftrace_disable_code(void);
  17 +void ftrace_call_code(void);
  18 +void ftrace_nop_code(void);
  19 +
  20 +#define FTRACE_INSN_SIZE 4
  21 +
  22 +#ifdef CONFIG_64BIT
  23 +
  24 +asm(
  25 + " .align 4\n"
  26 + "ftrace_disable_code:\n"
  27 + " j 0f\n"
  28 + " .word 0x0024\n"
  29 + " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n"
  30 + " basr %r14,%r1\n"
  31 + " lg %r14,8(15)\n"
  32 + " lgr %r0,%r0\n"
  33 + "0:\n");
  34 +
  35 +asm(
  36 + " .align 4\n"
  37 + "ftrace_nop_code:\n"
  38 + " j .+"__stringify(MCOUNT_INSN_SIZE)"\n");
  39 +
  40 +asm(
  41 + " .align 4\n"
  42 + "ftrace_call_code:\n"
  43 + " stg %r14,8(%r15)\n");
  44 +
  45 +#else /* CONFIG_64BIT */
  46 +
  47 +asm(
  48 + " .align 4\n"
  49 + "ftrace_disable_code:\n"
  50 + " j 0f\n"
  51 + " l %r1,"__stringify(__LC_FTRACE_FUNC)"\n"
  52 + " basr %r14,%r1\n"
  53 + " l %r14,4(%r15)\n"
  54 + " j 0f\n"
  55 + " bcr 0,%r7\n"
  56 + " bcr 0,%r7\n"
  57 + " bcr 0,%r7\n"
  58 + " bcr 0,%r7\n"
  59 + " bcr 0,%r7\n"
  60 + " bcr 0,%r7\n"
  61 + "0:\n");
  62 +
  63 +asm(
  64 + " .align 4\n"
  65 + "ftrace_nop_code:\n"
  66 + " j .+"__stringify(MCOUNT_INSN_SIZE)"\n");
  67 +
  68 +asm(
  69 + " .align 4\n"
  70 + "ftrace_call_code:\n"
  71 + " st %r14,4(%r15)\n");
  72 +
  73 +#endif /* CONFIG_64BIT */
  74 +
  75 +static int ftrace_modify_code(unsigned long ip,
  76 + void *old_code, int old_size,
  77 + void *new_code, int new_size)
  78 +{
  79 + unsigned char replaced[MCOUNT_INSN_SIZE];
  80 +
  81 + /*
  82 + * Note: Due to modules code can disappear and change.
  83 + * We need to protect against faulting as well as code
  84 + * changing. We do this by using the probe_kernel_*
  85 + * functions.
  86 + * This however is just a simple sanity check.
  87 + */
  88 + if (probe_kernel_read(replaced, (void *)ip, old_size))
  89 + return -EFAULT;
  90 + if (memcmp(replaced, old_code, old_size) != 0)
  91 + return -EINVAL;
  92 + if (probe_kernel_write((void *)ip, new_code, new_size))
  93 + return -EPERM;
  94 + return 0;
  95 +}
  96 +
  97 +static int ftrace_make_initial_nop(struct module *mod, struct dyn_ftrace *rec,
  98 + unsigned long addr)
  99 +{
  100 + return ftrace_modify_code(rec->ip,
  101 + ftrace_call_code, FTRACE_INSN_SIZE,
  102 + ftrace_disable_code, MCOUNT_INSN_SIZE);
  103 +}
  104 +
  105 +int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
  106 + unsigned long addr)
  107 +{
  108 + if (addr == MCOUNT_ADDR)
  109 + return ftrace_make_initial_nop(mod, rec, addr);
  110 + return ftrace_modify_code(rec->ip,
  111 + ftrace_call_code, FTRACE_INSN_SIZE,
  112 + ftrace_nop_code, FTRACE_INSN_SIZE);
  113 +}
  114 +
  115 +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  116 +{
  117 + return ftrace_modify_code(rec->ip,
  118 + ftrace_nop_code, FTRACE_INSN_SIZE,
  119 + ftrace_call_code, FTRACE_INSN_SIZE);
  120 +}
  121 +
  122 +int ftrace_update_ftrace_func(ftrace_func_t func)
  123 +{
  124 + ftrace_dyn_func = (unsigned long)func;
  125 + return 0;
  126 +}
  127 +
  128 +int __init ftrace_dyn_arch_init(void *data)
  129 +{
  130 + *(unsigned long *)data = 0;
  131 + return 0;
  132 +}
arch/s390/kernel/mcount.S
1 1 /*
2   - * Copyright IBM Corp. 2008
  2 + * Copyright IBM Corp. 2008,2009
3 3 *
4 4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
5 5 *
6 6  
7 7  
8 8  
9 9  
10 10  
... ... @@ -7,36 +7,46 @@
7 7  
8 8 #include <asm/asm-offsets.h>
9 9  
10   -#ifndef CONFIG_64BIT
11   -.globl _mcount
  10 + .globl ftrace_stub
  11 +ftrace_stub:
  12 + br %r14
  13 +
  14 +#ifdef CONFIG_64BIT
  15 +
  16 +#ifdef CONFIG_DYNAMIC_FTRACE
  17 +
  18 + .globl _mcount
12 19 _mcount:
13   - stm %r0,%r5,8(%r15)
14   - st %r14,56(%r15)
15   - lr %r1,%r15
16   - ahi %r15,-96
17   - l %r3,100(%r15)
18   - la %r2,0(%r14)
19   - st %r1,__SF_BACKCHAIN(%r15)
20   - la %r3,0(%r3)
21   - bras %r14,0f
22   - .long ftrace_trace_function
23   -0: l %r14,0(%r14)
24   - l %r14,0(%r14)
25   - basr %r14,%r14
26   - ahi %r15,96
27   - lm %r0,%r5,8(%r15)
28   - l %r14,56(%r15)
29 20 br %r14
30 21  
31   -.globl ftrace_stub
32   -ftrace_stub:
  22 + .globl ftrace_caller
  23 +ftrace_caller:
  24 + stmg %r2,%r5,32(%r15)
  25 + stg %r14,112(%r15)
  26 + lgr %r1,%r15
  27 + aghi %r15,-160
  28 + stg %r1,__SF_BACKCHAIN(%r15)
  29 + lgr %r2,%r14
  30 + lg %r3,168(%r15)
  31 + larl %r14,ftrace_dyn_func
  32 + lg %r14,0(%r14)
  33 + basr %r14,%r14
  34 + aghi %r15,160
  35 + lmg %r2,%r5,32(%r15)
  36 + lg %r14,112(%r15)
33 37 br %r14
34 38  
35   -#else /* CONFIG_64BIT */
  39 + .data
  40 + .globl ftrace_dyn_func
  41 +ftrace_dyn_func:
  42 + .quad ftrace_stub
  43 + .previous
36 44  
37   -.globl _mcount
  45 +#else /* CONFIG_DYNAMIC_FTRACE */
  46 +
  47 + .globl _mcount
38 48 _mcount:
39   - stmg %r0,%r5,16(%r15)
  49 + stmg %r2,%r5,32(%r15)
40 50 stg %r14,112(%r15)
41 51 lgr %r1,%r15
42 52 aghi %r15,-160
43 53  
44 54  
... ... @@ -47,13 +57,68 @@
47 57 lg %r14,0(%r14)
48 58 basr %r14,%r14
49 59 aghi %r15,160
50   - lmg %r0,%r5,16(%r15)
  60 + lmg %r2,%r5,32(%r15)
51 61 lg %r14,112(%r15)
52 62 br %r14
53 63  
54   -.globl ftrace_stub
55   -ftrace_stub:
  64 +#endif /* CONFIG_DYNAMIC_FTRACE */
  65 +
  66 +#else /* CONFIG_64BIT */
  67 +
  68 +#ifdef CONFIG_DYNAMIC_FTRACE
  69 +
  70 + .globl _mcount
  71 +_mcount:
56 72 br %r14
57 73  
  74 + .globl ftrace_caller
  75 +ftrace_caller:
  76 + stm %r2,%r5,16(%r15)
  77 + st %r14,56(%r15)
  78 + lr %r1,%r15
  79 + ahi %r15,-96
  80 + l %r3,100(%r15)
  81 + la %r2,0(%r14)
  82 + st %r1,__SF_BACKCHAIN(%r15)
  83 + la %r3,0(%r3)
  84 + bras %r14,0f
  85 + .long ftrace_dyn_func
  86 +0: l %r14,0(%r14)
  87 + l %r14,0(%r14)
  88 + basr %r14,%r14
  89 + ahi %r15,96
  90 + lm %r2,%r5,16(%r15)
  91 + l %r14,56(%r15)
  92 + br %r14
  93 +
  94 + .data
  95 + .globl ftrace_dyn_func
  96 +ftrace_dyn_func:
  97 + .long ftrace_stub
  98 + .previous
  99 +
  100 +#else /* CONFIG_DYNAMIC_FTRACE */
  101 +
  102 + .globl _mcount
  103 +_mcount:
  104 + stm %r2,%r5,16(%r15)
  105 + st %r14,56(%r15)
  106 + lr %r1,%r15
  107 + ahi %r15,-96
  108 + l %r3,100(%r15)
  109 + la %r2,0(%r14)
  110 + st %r1,__SF_BACKCHAIN(%r15)
  111 + la %r3,0(%r3)
  112 + bras %r14,0f
  113 + .long ftrace_trace_function
  114 +0: l %r14,0(%r14)
  115 + l %r14,0(%r14)
  116 + basr %r14,%r14
  117 + ahi %r15,96
  118 + lm %r2,%r5,16(%r15)
  119 + l %r14,56(%r15)
  120 + br %r14
  121 +
  122 +#endif /* CONFIG_DYNAMIC_FTRACE */
58 123 #endif /* CONFIG_64BIT */
arch/s390/kernel/setup.c
... ... @@ -42,6 +42,7 @@
42 42 #include <linux/ctype.h>
43 43 #include <linux/reboot.h>
44 44 #include <linux/topology.h>
  45 +#include <linux/ftrace.h>
45 46  
46 47 #include <asm/ipl.h>
47 48 #include <asm/uaccess.h>
... ... @@ -442,6 +443,7 @@
442 443 lc->steal_timer = S390_lowcore.steal_timer;
443 444 lc->last_update_timer = S390_lowcore.last_update_timer;
444 445 lc->last_update_clock = S390_lowcore.last_update_clock;
  446 + lc->ftrace_func = S390_lowcore.ftrace_func;
445 447 set_prefix((u32)(unsigned long) lc);
446 448 lowcore_ptr[0] = lc;
447 449 }
arch/s390/kernel/smp.c
... ... @@ -572,6 +572,7 @@
572 572 cpu_lowcore->cpu_nr = cpu;
573 573 cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce;
574 574 cpu_lowcore->machine_flags = S390_lowcore.machine_flags;
  575 + cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func;
575 576 eieio();
576 577  
577 578 while (signal_processor(cpu, sigp_restart) == sigp_busy)
scripts/recordmcount.pl
... ... @@ -185,6 +185,19 @@
185 185 $objcopy .= " -O elf32-i386";
186 186 $cc .= " -m32";
187 187  
  188 +} elsif ($arch eq "s390" && $bits == 32) {
  189 + $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_32\\s+_mcount\$";
  190 + $alignment = 4;
  191 + $ld .= " -m elf_s390";
  192 + $cc .= " -m31";
  193 +
  194 +} elsif ($arch eq "s390" && $bits == 64) {
  195 + $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_(PC|PLT)32DBL\\s+_mcount\\+0x2\$";
  196 + $alignment = 8;
  197 + $type = ".quad";
  198 + $ld .= " -m elf64_s390";
  199 + $cc .= " -m64";
  200 +
188 201 } elsif ($arch eq "sh") {
189 202 $alignment = 2;
190 203