Blame view

arch/x86/vdso/vclock_gettime.c 5.57 KB
2aae950b2   Andi Kleen   x86_64: Add vDSO ...
1
2
3
4
  /*
   * Copyright 2006 Andi Kleen, SUSE Labs.
   * Subject to the GNU Public License, v.2
   *
f144a6b4d   Andy Lutomirski   x86-64: Add time ...
5
   * Fast user context implementation of clock_gettime, gettimeofday, and time.
2aae950b2   Andi Kleen   x86_64: Add vDSO ...
6
7
8
   *
   * The code should have no internal unresolved relocations.
   * Check with readelf after changing.
2aae950b2   Andi Kleen   x86_64: Add vDSO ...
9
   */
2b7d0390a   Ingo Molnar   tracing: branch t...
10
  /* Disable profiling for userspace code: */
2ed84eeb8   Steven Rostedt   trace: rename unl...
11
  #define DISABLE_BRANCH_PROFILING
2b7d0390a   Ingo Molnar   tracing: branch t...
12

2aae950b2   Andi Kleen   x86_64: Add vDSO ...
13
14
15
16
17
  #include <linux/kernel.h>
  #include <linux/posix-timers.h>
  #include <linux/time.h>
  #include <linux/string.h>
  #include <asm/vsyscall.h>
98d0ac38c   Andy Lutomirski   x86-64: Move vrea...
18
  #include <asm/fixmap.h>
2aae950b2   Andi Kleen   x86_64: Add vDSO ...
19
20
21
22
23
  #include <asm/vgtod.h>
  #include <asm/timex.h>
  #include <asm/hpet.h>
  #include <asm/unistd.h>
  #include <asm/io.h>
2aae950b2   Andi Kleen   x86_64: Add vDSO ...
24

8c49d9a74   Andy Lutomirski   x86-64: Clean up ...
25
  #define gtod (&VVAR(vsyscall_gtod_data))
2aae950b2   Andi Kleen   x86_64: Add vDSO ...
26

98d0ac38c   Andy Lutomirski   x86-64: Move vrea...
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
  notrace static cycle_t vread_tsc(void)
  {
  	cycle_t ret;
  	u64 last;
  
  	/*
  	 * Empirically, a fence (of type that depends on the CPU)
  	 * before rdtsc is enough to ensure that rdtsc is ordered
  	 * with respect to loads.  The various CPU manuals are unclear
  	 * as to whether rdtsc can be reordered with later loads,
  	 * but no one has ever seen it happen.
  	 */
  	rdtsc_barrier();
  	ret = (cycle_t)vget_cycles();
  
  	last = VVAR(vsyscall_gtod_data).clock.cycle_last;
  
  	if (likely(ret >= last))
  		return ret;
  
  	/*
  	 * GCC likes to generate cmov here, but this branch is extremely
  	 * predictable (it's just a funciton of time and the likely is
  	 * very likely) and there's a data dependence, so force GCC
  	 * to generate a branch instead.  I don't barrier() because
  	 * we don't actually need a barrier, and if this function
  	 * ever gets inlined it will generate worse code.
  	 */
  	asm volatile ("");
  	return last;
  }
  
  static notrace cycle_t vread_hpet(void)
  {
  	return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
  }
23adec554   Steven Rostedt   x86: add notrace ...
63
  notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
2aae950b2   Andi Kleen   x86_64: Add vDSO ...
64
65
66
67
68
69
  {
  	long ret;
  	asm("syscall" : "=a" (ret) :
  	    "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
  	return ret;
  }
23adec554   Steven Rostedt   x86: add notrace ...
70
  notrace static inline long vgetns(void)
2aae950b2   Andi Kleen   x86_64: Add vDSO ...
71
  {
95b086799   Andi Kleen   x86_64: Add missi...
72
  	long v;
98d0ac38c   Andy Lutomirski   x86-64: Move vrea...
73
74
75
76
77
78
  	cycles_t cycles;
  	if (gtod->clock.vclock_mode == VCLOCK_TSC)
  		cycles = vread_tsc();
  	else
  		cycles = vread_hpet();
  	v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask;
95b086799   Andi Kleen   x86_64: Add missi...
79
  	return (v * gtod->clock.mult) >> gtod->clock.shift;
2aae950b2   Andi Kleen   x86_64: Add vDSO ...
80
  }
23adec554   Steven Rostedt   x86: add notrace ...
81
  notrace static noinline int do_realtime(struct timespec *ts)
2aae950b2   Andi Kleen   x86_64: Add vDSO ...
82
83
84
85
86
87
88
89
90
91
92
  {
  	unsigned long seq, ns;
  	do {
  		seq = read_seqbegin(&gtod->lock);
  		ts->tv_sec = gtod->wall_time_sec;
  		ts->tv_nsec = gtod->wall_time_nsec;
  		ns = vgetns();
  	} while (unlikely(read_seqretry(&gtod->lock, seq)));
  	timespec_add_ns(ts, ns);
  	return 0;
  }
23adec554   Steven Rostedt   x86: add notrace ...
93
  notrace static noinline int do_monotonic(struct timespec *ts)
2aae950b2   Andi Kleen   x86_64: Add vDSO ...
94
95
96
97
98
99
100
101
102
  {
  	unsigned long seq, ns, secs;
  	do {
  		seq = read_seqbegin(&gtod->lock);
  		secs = gtod->wall_time_sec;
  		ns = gtod->wall_time_nsec + vgetns();
  		secs += gtod->wall_to_monotonic.tv_sec;
  		ns += gtod->wall_to_monotonic.tv_nsec;
  	} while (unlikely(read_seqretry(&gtod->lock, seq)));
0f51f2852   Andy Lutomirski   x86-64: Vclock_ge...
103
104
105
106
107
108
109
110
111
112
  
  	/* wall_time_nsec, vgetns(), and wall_to_monotonic.tv_nsec
  	 * are all guaranteed to be nonnegative.
  	 */
  	while (ns >= NSEC_PER_SEC) {
  		ns -= NSEC_PER_SEC;
  		++secs;
  	}
  	ts->tv_sec = secs;
  	ts->tv_nsec = ns;
2aae950b2   Andi Kleen   x86_64: Add vDSO ...
113
114
  	return 0;
  }
da15cfdae   John Stultz   time: Introduce C...
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
  notrace static noinline int do_realtime_coarse(struct timespec *ts)
  {
  	unsigned long seq;
  	do {
  		seq = read_seqbegin(&gtod->lock);
  		ts->tv_sec = gtod->wall_time_coarse.tv_sec;
  		ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
  	} while (unlikely(read_seqretry(&gtod->lock, seq)));
  	return 0;
  }
  
  notrace static noinline int do_monotonic_coarse(struct timespec *ts)
  {
  	unsigned long seq, ns, secs;
  	do {
  		seq = read_seqbegin(&gtod->lock);
  		secs = gtod->wall_time_coarse.tv_sec;
  		ns = gtod->wall_time_coarse.tv_nsec;
  		secs += gtod->wall_to_monotonic.tv_sec;
  		ns += gtod->wall_to_monotonic.tv_nsec;
  	} while (unlikely(read_seqretry(&gtod->lock, seq)));
0f51f2852   Andy Lutomirski   x86-64: Vclock_ge...
136
137
138
139
140
141
142
143
144
145
  
  	/* wall_time_nsec and wall_to_monotonic.tv_nsec are
  	 * guaranteed to be between 0 and NSEC_PER_SEC.
  	 */
  	if (ns >= NSEC_PER_SEC) {
  		ns -= NSEC_PER_SEC;
  		++secs;
  	}
  	ts->tv_sec = secs;
  	ts->tv_nsec = ns;
da15cfdae   John Stultz   time: Introduce C...
146
147
  	return 0;
  }
23adec554   Steven Rostedt   x86: add notrace ...
148
  notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
2aae950b2   Andi Kleen   x86_64: Add vDSO ...
149
  {
0d7b8547f   Andy Lutomirski   x86-64: Remove ke...
150
151
  	switch (clock) {
  	case CLOCK_REALTIME:
98d0ac38c   Andy Lutomirski   x86-64: Move vrea...
152
  		if (likely(gtod->clock.vclock_mode != VCLOCK_NONE))
0d7b8547f   Andy Lutomirski   x86-64: Remove ke...
153
154
155
  			return do_realtime(ts);
  		break;
  	case CLOCK_MONOTONIC:
98d0ac38c   Andy Lutomirski   x86-64: Move vrea...
156
  		if (likely(gtod->clock.vclock_mode != VCLOCK_NONE))
0d7b8547f   Andy Lutomirski   x86-64: Remove ke...
157
158
159
160
161
162
163
  			return do_monotonic(ts);
  		break;
  	case CLOCK_REALTIME_COARSE:
  		return do_realtime_coarse(ts);
  	case CLOCK_MONOTONIC_COARSE:
  		return do_monotonic_coarse(ts);
  	}
2aae950b2   Andi Kleen   x86_64: Add vDSO ...
164
165
166
167
  	return vdso_fallback_gettime(clock, ts);
  }
  int clock_gettime(clockid_t, struct timespec *)
  	__attribute__((weak, alias("__vdso_clock_gettime")));
23adec554   Steven Rostedt   x86: add notrace ...
168
  notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
2aae950b2   Andi Kleen   x86_64: Add vDSO ...
169
170
  {
  	long ret;
98d0ac38c   Andy Lutomirski   x86-64: Move vrea...
171
  	if (likely(gtod->clock.vclock_mode != VCLOCK_NONE)) {
2f65dd475   John Wright   x86: gettimeofday...
172
173
174
175
176
177
178
  		if (likely(tv != NULL)) {
  			BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
  				     offsetof(struct timespec, tv_nsec) ||
  				     sizeof(*tv) != sizeof(struct timespec));
  			do_realtime((struct timespec *)tv);
  			tv->tv_usec /= 1000;
  		}
2aae950b2   Andi Kleen   x86_64: Add vDSO ...
179
  		if (unlikely(tz != NULL)) {
a1289643a   Andi Kleen   x86: use explicit...
180
181
182
  			/* Avoid memcpy. Some old compilers fail to inline it */
  			tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest;
  			tz->tz_dsttime = gtod->sys_tz.tz_dsttime;
2aae950b2   Andi Kleen   x86_64: Add vDSO ...
183
184
185
186
187
188
189
190
191
  		}
  		return 0;
  	}
  	asm("syscall" : "=a" (ret) :
  	    "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
  	return ret;
  }
  int gettimeofday(struct timeval *, struct timezone *)
  	__attribute__((weak, alias("__vdso_gettimeofday")));
f144a6b4d   Andy Lutomirski   x86-64: Add time ...
192

0d7b8547f   Andy Lutomirski   x86-64: Remove ke...
193
194
195
196
  /*
   * This will break when the xtime seconds get inaccurate, but that is
   * unlikely
   */
f144a6b4d   Andy Lutomirski   x86-64: Add time ...
197
198
  notrace time_t __vdso_time(time_t *t)
  {
973aa8181   Andy Lutomirski   x86-64: Optimize ...
199
  	/* This is atomic on x86_64 so we don't need any locks. */
0d7b8547f   Andy Lutomirski   x86-64: Remove ke...
200
  	time_t result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec);
f144a6b4d   Andy Lutomirski   x86-64: Add time ...
201
202
203
204
205
206
207
  
  	if (t)
  		*t = result;
  	return result;
  }
  int time(time_t *t)
  	__attribute__((weak, alias("__vdso_time")));