Commit 23adec554a7648f99c8acc0caf49c66320cd2b84

Authored by Steven Rostedt
Committed by Thomas Gleixner
1 parent ffdc1a09ae

x86: add notrace annotations to vsyscall.

Add the notrace annotations to the vsyscall functions - there we are
not in kernel context yet, so the tracer function cannot (and must not)
be called.

Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

Showing 4 changed files with 14 additions and 10 deletions Side-by-side Diff

arch/x86/kernel/vsyscall_64.c
... ... @@ -42,7 +42,8 @@
42 42 #include <asm/topology.h>
43 43 #include <asm/vgtod.h>
44 44  
45   -#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
  45 +#define __vsyscall(nr) \
  46 + __attribute__ ((unused, __section__(".vsyscall_" #nr))) notrace
46 47 #define __syscall_clobber "r11","cx","memory"
47 48  
48 49 /*
arch/x86/vdso/vclock_gettime.c
... ... @@ -23,7 +23,7 @@
23 23  
24 24 #define gtod vdso_vsyscall_gtod_data
25 25  
26   -static long vdso_fallback_gettime(long clock, struct timespec *ts)
  26 +notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
27 27 {
28 28 long ret;
29 29 asm("syscall" : "=a" (ret) :
... ... @@ -31,7 +31,7 @@
31 31 return ret;
32 32 }
33 33  
34   -static inline long vgetns(void)
  34 +notrace static inline long vgetns(void)
35 35 {
36 36 long v;
37 37 cycles_t (*vread)(void);
... ... @@ -40,7 +40,7 @@
40 40 return (v * gtod->clock.mult) >> gtod->clock.shift;
41 41 }
42 42  
43   -static noinline int do_realtime(struct timespec *ts)
  43 +notrace static noinline int do_realtime(struct timespec *ts)
44 44 {
45 45 unsigned long seq, ns;
46 46 do {
... ... @@ -54,7 +54,8 @@
54 54 }
55 55  
56 56 /* Copy of the version in kernel/time.c which we cannot directly access */
57   -static void vset_normalized_timespec(struct timespec *ts, long sec, long nsec)
  57 +notrace static void
  58 +vset_normalized_timespec(struct timespec *ts, long sec, long nsec)
58 59 {
59 60 while (nsec >= NSEC_PER_SEC) {
60 61 nsec -= NSEC_PER_SEC;
... ... @@ -68,7 +69,7 @@
68 69 ts->tv_nsec = nsec;
69 70 }
70 71  
71   -static noinline int do_monotonic(struct timespec *ts)
  72 +notrace static noinline int do_monotonic(struct timespec *ts)
72 73 {
73 74 unsigned long seq, ns, secs;
74 75 do {
... ... @@ -82,7 +83,7 @@
82 83 return 0;
83 84 }
84 85  
85   -int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
  86 +notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
86 87 {
87 88 if (likely(gtod->sysctl_enabled && gtod->clock.vread))
88 89 switch (clock) {
... ... @@ -96,7 +97,7 @@
96 97 int clock_gettime(clockid_t, struct timespec *)
97 98 __attribute__((weak, alias("__vdso_clock_gettime")));
98 99  
99   -int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
  100 +notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
100 101 {
101 102 long ret;
102 103 if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
arch/x86/vdso/vgetcpu.c
... ... @@ -13,7 +13,8 @@
13 13 #include <asm/vgtod.h>
14 14 #include "vextern.h"
15 15  
16   -long __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
  16 +notrace long
  17 +__vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
17 18 {
18 19 unsigned int p;
19 20  
include/asm-x86/vsyscall.h
... ... @@ -24,7 +24,8 @@
24 24 ((unused, __section__ (".vsyscall_gtod_data"),aligned(16)))
25 25 #define __section_vsyscall_clock __attribute__ \
26 26 ((unused, __section__ (".vsyscall_clock"),aligned(16)))
27   -#define __vsyscall_fn __attribute__ ((unused,__section__(".vsyscall_fn")))
  27 +#define __vsyscall_fn \
  28 + __attribute__ ((unused, __section__(".vsyscall_fn"))) notrace
28 29  
29 30 #define VGETCPU_RDTSCP 1
30 31 #define VGETCPU_LSL 2