Commit 78410af51146796f783925009c8676a30d6c6d90

Authored by Chris Metcalf
1 parent 94fb1afbcb

tile: add clock_gettime support to vDSO

This change adds support for clock_gettime with CLOCK_REALTIME
and CLOCK_MONOTONIC using vDSO.  It also updates the vdso
struct nomenclature used for the clocks to match the x86 code
to keep it easier to update going forward.

We also support the *_COARSE clockid_t, for apps that want speed
but aren't concerned about fine-grained timestamps; this saves
about 20 cycles per call (see http://lwn.net/Articles/342018/).

Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Acked-by: John Stultz <john.stultz@linaro.org>

Showing 4 changed files with 172 additions and 35 deletions Side-by-side Diff

arch/tile/include/asm/vdso.h
... ... @@ -29,13 +29,18 @@
29 29 struct vdso_data {
30 30 seqcount_t tz_seq; /* Timezone seqlock */
31 31 seqcount_t tb_seq; /* Timebase seqlock */
32   - __u64 xtime_tod_stamp; /* TOD clock for xtime */
33   - __u64 xtime_clock_sec; /* Kernel time second */
34   - __u64 xtime_clock_nsec; /* Kernel time nanosecond */
35   - __u64 wtom_clock_sec; /* Wall to monotonic clock second */
36   - __u64 wtom_clock_nsec; /* Wall to monotonic clock nanosecond */
  32 + __u64 cycle_last; /* TOD clock for xtime */
  33 + __u64 mask; /* Cycle mask */
37 34 __u32 mult; /* Cycle to nanosecond multiplier */
38 35 __u32 shift; /* Cycle to nanosecond divisor (power of two) */
  36 + __u64 wall_time_sec;
  37 + __u64 wall_time_snsec;
  38 + __u64 monotonic_time_sec;
  39 + __u64 monotonic_time_snsec;
  40 + __u64 wall_time_coarse_sec;
  41 + __u64 wall_time_coarse_nsec;
  42 + __u64 monotonic_time_coarse_sec;
  43 + __u64 monotonic_time_coarse_nsec;
39 44 __u32 tz_minuteswest; /* Minutes west of Greenwich */
40 45 __u32 tz_dsttime; /* Type of dst correction */
41 46 };
arch/tile/kernel/time.c
... ... @@ -257,21 +257,44 @@
257 257  
258 258 void update_vsyscall(struct timekeeper *tk)
259 259 {
260   - struct timespec *wtm = &tk->wall_to_monotonic;
261   - struct clocksource *clock = tk->tkr.clock;
262   -
263   - if (clock != &cycle_counter_cs)
  260 + if (tk->tkr.clock != &cycle_counter_cs)
264 261 return;
265 262  
266 263 write_seqcount_begin(&vdso_data->tb_seq);
267 264  
268   - vdso_data->xtime_tod_stamp = tk->tkr.cycle_last;
269   - vdso_data->xtime_clock_sec = tk->xtime_sec;
270   - vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec;
271   - vdso_data->wtom_clock_sec = wtm->tv_sec;
272   - vdso_data->wtom_clock_nsec = wtm->tv_nsec;
273   - vdso_data->mult = tk->tkr.mult;
274   - vdso_data->shift = tk->tkr.shift;
  265 + vdso_data->cycle_last = tk->tkr.cycle_last;
  266 + vdso_data->mask = tk->tkr.mask;
  267 + vdso_data->mult = tk->tkr.mult;
  268 + vdso_data->shift = tk->tkr.shift;
  269 +
  270 + vdso_data->wall_time_sec = tk->xtime_sec;
  271 + vdso_data->wall_time_snsec = tk->tkr.xtime_nsec;
  272 +
  273 + vdso_data->monotonic_time_sec = tk->xtime_sec
  274 + + tk->wall_to_monotonic.tv_sec;
  275 + vdso_data->monotonic_time_snsec = tk->tkr.xtime_nsec
  276 + + ((u64)tk->wall_to_monotonic.tv_nsec
  277 + << tk->tkr.shift);
  278 + while (vdso_data->monotonic_time_snsec >=
  279 + (((u64)NSEC_PER_SEC) << tk->tkr.shift)) {
  280 + vdso_data->monotonic_time_snsec -=
  281 + ((u64)NSEC_PER_SEC) << tk->tkr.shift;
  282 + vdso_data->monotonic_time_sec++;
  283 + }
  284 +
  285 + vdso_data->wall_time_coarse_sec = tk->xtime_sec;
  286 + vdso_data->wall_time_coarse_nsec = (long)(tk->tkr.xtime_nsec >>
  287 + tk->tkr.shift);
  288 +
  289 + vdso_data->monotonic_time_coarse_sec =
  290 + vdso_data->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec;
  291 + vdso_data->monotonic_time_coarse_nsec =
  292 + vdso_data->wall_time_coarse_nsec + tk->wall_to_monotonic.tv_nsec;
  293 +
  294 + while (vdso_data->monotonic_time_coarse_nsec >= NSEC_PER_SEC) {
  295 + vdso_data->monotonic_time_coarse_nsec -= NSEC_PER_SEC;
  296 + vdso_data->monotonic_time_coarse_sec++;
  297 + }
275 298  
276 299 write_seqcount_end(&vdso_data->tb_seq);
277 300 }
arch/tile/kernel/vdso/vdso.lds.S
... ... @@ -82,6 +82,8 @@
82 82 __vdso_rt_sigreturn;
83 83 __vdso_gettimeofday;
84 84 gettimeofday;
  85 + __vdso_clock_gettime;
  86 + clock_gettime;
85 87 local:*;
86 88 };
87 89 }
arch/tile/kernel/vdso/vgettimeofday.c
... ... @@ -15,6 +15,7 @@
15 15 #define VDSO_BUILD /* avoid some shift warnings for -m32 in <asm/page.h> */
16 16 #include <linux/time.h>
17 17 #include <asm/timex.h>
  18 +#include <asm/unistd.h>
18 19 #include <asm/vdso.h>
19 20  
20 21 #if CHIP_HAS_SPLIT_CYCLE()
... ... @@ -35,6 +36,11 @@
35 36 #define get_cycles get_cycles_inline
36 37 #endif
37 38  
  39 +struct syscall_return_value {
  40 + long value;
  41 + long error;
  42 +};
  43 +
38 44 /*
39 45 * Find out the vDSO data page address in the process address space.
40 46 */
41 47  
42 48  
... ... @@ -50,11 +56,82 @@
50 56 return ret;
51 57 }
52 58  
53   -int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
  59 +static inline u64 vgetsns(struct vdso_data *vdso)
54 60 {
55   - cycles_t cycles;
  61 + return ((get_cycles() - vdso->cycle_last) & vdso->mask) * vdso->mult;
  62 +}
  63 +
  64 +static inline int do_realtime(struct vdso_data *vdso, struct timespec *ts)
  65 +{
56 66 unsigned count;
57   - unsigned long sec, ns;
  67 + u64 ns;
  68 +
  69 + do {
  70 + count = read_seqcount_begin(&vdso->tb_seq);
  71 + ts->tv_sec = vdso->wall_time_sec;
  72 + ns = vdso->wall_time_snsec;
  73 + ns += vgetsns(vdso);
  74 + ns >>= vdso->shift;
  75 + } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
  76 +
  77 + ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
  78 + ts->tv_nsec = ns;
  79 +
  80 + return 0;
  81 +}
  82 +
  83 +static inline int do_monotonic(struct vdso_data *vdso, struct timespec *ts)
  84 +{
  85 + unsigned count;
  86 + u64 ns;
  87 +
  88 + do {
  89 + count = read_seqcount_begin(&vdso->tb_seq);
  90 + ts->tv_sec = vdso->monotonic_time_sec;
  91 + ns = vdso->monotonic_time_snsec;
  92 + ns += vgetsns(vdso);
  93 + ns >>= vdso->shift;
  94 + } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
  95 +
  96 + ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
  97 + ts->tv_nsec = ns;
  98 +
  99 + return 0;
  100 +}
  101 +
  102 +static inline int do_realtime_coarse(struct vdso_data *vdso,
  103 + struct timespec *ts)
  104 +{
  105 + unsigned count;
  106 +
  107 + do {
  108 + count = read_seqcount_begin(&vdso->tb_seq);
  109 + ts->tv_sec = vdso->wall_time_coarse_sec;
  110 + ts->tv_nsec = vdso->wall_time_coarse_nsec;
  111 + } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
  112 +
  113 + return 0;
  114 +}
  115 +
  116 +static inline int do_monotonic_coarse(struct vdso_data *vdso,
  117 + struct timespec *ts)
  118 +{
  119 + unsigned count;
  120 +
  121 + do {
  122 + count = read_seqcount_begin(&vdso->tb_seq);
  123 + ts->tv_sec = vdso->monotonic_time_coarse_sec;
  124 + ts->tv_nsec = vdso->monotonic_time_coarse_nsec;
  125 + } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
  126 +
  127 + return 0;
  128 +}
  129 +
  130 +struct syscall_return_value __vdso_gettimeofday(struct timeval *tv,
  131 + struct timezone *tz)
  132 +{
  133 + struct syscall_return_value ret = { 0, 0 };
  134 + unsigned count;
58 135 struct vdso_data *vdso = (struct vdso_data *)get_datapage();
59 136  
60 137 /* The use of the timezone is obsolete, normally tz is NULL. */
61 138  
62 139  
63 140  
... ... @@ -67,26 +144,56 @@
67 144 }
68 145  
69 146 if (unlikely(tv == NULL))
70   - return 0;
  147 + return ret;
71 148  
72   - do {
73   - count = read_seqcount_begin(&vdso->tb_seq);
74   - sec = vdso->xtime_clock_sec;
75   - cycles = get_cycles() - vdso->xtime_tod_stamp;
76   - ns = (cycles * vdso->mult) + vdso->xtime_clock_nsec;
77   - ns >>= vdso->shift;
78   - if (ns >= NSEC_PER_SEC) {
79   - ns -= NSEC_PER_SEC;
80   - sec += 1;
81   - }
82   - } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
  149 + do_realtime(vdso, (struct timespec *)tv);
  150 + tv->tv_usec /= 1000;
83 151  
84   - tv->tv_sec = sec;
85   - tv->tv_usec = ns / 1000;
86   -
87   - return 0;
  152 + return ret;
88 153 }
89 154  
90 155 int gettimeofday(struct timeval *tv, struct timezone *tz)
91 156 __attribute__((weak, alias("__vdso_gettimeofday")));
  157 +
  158 +static struct syscall_return_value vdso_fallback_gettime(long clock,
  159 + struct timespec *ts)
  160 +{
  161 + struct syscall_return_value ret;
  162 + __asm__ __volatile__ (
  163 + "swint1"
  164 + : "=R00" (ret.value), "=R01" (ret.error)
  165 + : "R10" (__NR_clock_gettime), "R00" (clock), "R01" (ts)
  166 + : "r2", "r3", "r4", "r5", "r6", "r7",
  167 + "r8", "r9", "r11", "r12", "r13", "r14", "r15",
  168 + "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
  169 + "r24", "r25", "r26", "r27", "r28", "r29", "memory");
  170 + return ret;
  171 +}
  172 +
  173 +struct syscall_return_value __vdso_clock_gettime(clockid_t clock,
  174 + struct timespec *ts)
  175 +{
  176 + struct vdso_data *vdso = (struct vdso_data *)get_datapage();
  177 + struct syscall_return_value ret = { 0, 0 };
  178 +
  179 + switch (clock) {
  180 + case CLOCK_REALTIME:
  181 + do_realtime(vdso, ts);
  182 + return ret;
  183 + case CLOCK_MONOTONIC:
  184 + do_monotonic(vdso, ts);
  185 + return ret;
  186 + case CLOCK_REALTIME_COARSE:
  187 + do_realtime_coarse(vdso, ts);
  188 + return ret;
  189 + case CLOCK_MONOTONIC_COARSE:
  190 + do_monotonic_coarse(vdso, ts);
  191 + return ret;
  192 + default:
  193 + return vdso_fallback_gettime(clock, ts);
  194 + }
  195 +}
  196 +
  197 +int clock_gettime(clockid_t clock, struct timespec *ts)
  198 + __attribute__((weak, alias("__vdso_clock_gettime")));