Commit 2ab516575f2f273b19d95140d02c54612201e80a

Authored by Thomas Gleixner
Committed by John Stultz
1 parent 6c260d5863

x86: vdso: Use seqcount instead of seqlock

The update of the vdso data happens under xtime_lock, so adding a
nested lock is pointless. Just use a seqcount to sync the readers.

Reviewed-by: Andy Lutomirski <luto@amacapital.net>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: John Stultz <john.stultz@linaro.org>

Showing 3 changed files with 12 additions and 17 deletions Side-by-side Diff

arch/x86/include/asm/vgtod.h
... ... @@ -5,7 +5,7 @@
5 5 #include <linux/clocksource.h>
6 6  
7 7 struct vsyscall_gtod_data {
8   - seqlock_t lock;
  8 + seqcount_t seq;
9 9  
10 10 /* open coded 'struct timespec' */
11 11 time_t wall_time_sec;
arch/x86/kernel/vsyscall_64.c
... ... @@ -52,10 +52,7 @@
52 52 #include "vsyscall_trace.h"
53 53  
54 54 DEFINE_VVAR(int, vgetcpu_mode);
55   -DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
56   -{
57   - .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
58   -};
  55 +DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
59 56  
60 57 static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
61 58  
62 59  
... ... @@ -86,10 +83,8 @@
86 83 void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
87 84 struct clocksource *clock, u32 mult)
88 85 {
89   - unsigned long flags;
  86 + write_seqcount_begin(&vsyscall_gtod_data.seq);
90 87  
91   - write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
92   -
93 88 /* copy vsyscall data */
94 89 vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode;
95 90 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
... ... @@ -101,7 +96,7 @@
101 96 vsyscall_gtod_data.wall_to_monotonic = *wtm;
102 97 vsyscall_gtod_data.wall_time_coarse = __current_kernel_time();
103 98  
104   - write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
  99 + write_seqcount_end(&vsyscall_gtod_data.seq);
105 100 }
106 101  
107 102 static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
arch/x86/vdso/vclock_gettime.c
... ... @@ -100,12 +100,12 @@
100 100 int mode;
101 101  
102 102 do {
103   - seq = read_seqbegin(&gtod->lock);
  103 + seq = read_seqcount_begin(&gtod->seq);
104 104 mode = gtod->clock.vclock_mode;
105 105 ts->tv_sec = gtod->wall_time_sec;
106 106 ts->tv_nsec = gtod->wall_time_nsec;
107 107 ns = vgetns();
108   - } while (unlikely(read_seqretry(&gtod->lock, seq)));
  108 + } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
109 109  
110 110 timespec_add_ns(ts, ns);
111 111 return mode;
112 112  
... ... @@ -117,13 +117,13 @@
117 117 int mode;
118 118  
119 119 do {
120   - seq = read_seqbegin(&gtod->lock);
  120 + seq = read_seqcount_begin(&gtod->seq);
121 121 mode = gtod->clock.vclock_mode;
122 122 secs = gtod->wall_time_sec;
123 123 ns = gtod->wall_time_nsec + vgetns();
124 124 secs += gtod->wall_to_monotonic.tv_sec;
125 125 ns += gtod->wall_to_monotonic.tv_nsec;
126   - } while (unlikely(read_seqretry(&gtod->lock, seq)));
  126 + } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
127 127  
128 128 /* wall_time_nsec, vgetns(), and wall_to_monotonic.tv_nsec
129 129 * are all guaranteed to be nonnegative.
130 130  
... ... @@ -142,10 +142,10 @@
142 142 {
143 143 unsigned long seq;
144 144 do {
145   - seq = read_seqbegin(&gtod->lock);
  145 + seq = read_seqcount_begin(&gtod->seq);
146 146 ts->tv_sec = gtod->wall_time_coarse.tv_sec;
147 147 ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
148   - } while (unlikely(read_seqretry(&gtod->lock, seq)));
  148 + } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
149 149 return 0;
150 150 }
151 151  
152 152  
... ... @@ -153,12 +153,12 @@
153 153 {
154 154 unsigned long seq, ns, secs;
155 155 do {
156   - seq = read_seqbegin(&gtod->lock);
  156 + seq = read_seqcount_begin(&gtod->seq);
157 157 secs = gtod->wall_time_coarse.tv_sec;
158 158 ns = gtod->wall_time_coarse.tv_nsec;
159 159 secs += gtod->wall_to_monotonic.tv_sec;
160 160 ns += gtod->wall_to_monotonic.tv_nsec;
161   - } while (unlikely(read_seqretry(&gtod->lock, seq)));
  161 + } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
162 162  
163 163 /* wall_time_nsec and wall_to_monotonic.tv_nsec are
164 164 * guaranteed to be between 0 and NSEC_PER_SEC.