Commit e0b306fef90556233797d2e1747bd6a3ae35ea93

Authored by Marcelo Tosatti
1 parent 886b470cb1

time: export time information for KVM pvclock

As suggested by John, export time data similarly to how its
done by vsyscall support. This allows KVM to retrieve necessary
information to implement vsyscall support in KVM guests.

Acked-by: John Stultz <johnstul@us.ibm.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>

Showing 2 changed files with 59 additions and 0 deletions Inline Diff

include/linux/pvclock_gtod.h
File was created 1 #ifndef _PVCLOCK_GTOD_H
2 #define _PVCLOCK_GTOD_H
3
4 #include <linux/notifier.h>
5
6 extern int pvclock_gtod_register_notifier(struct notifier_block *nb);
7 extern int pvclock_gtod_unregister_notifier(struct notifier_block *nb);
8
9 #endif /* _PVCLOCK_GTOD_H */
10
kernel/time/timekeeping.c
1 /* 1 /*
2 * linux/kernel/time/timekeeping.c 2 * linux/kernel/time/timekeeping.c
3 * 3 *
4 * Kernel timekeeping code and accessor functions 4 * Kernel timekeeping code and accessor functions
5 * 5 *
6 * This code was moved from linux/kernel/timer.c. 6 * This code was moved from linux/kernel/timer.c.
7 * Please see that file for copyright and history logs. 7 * Please see that file for copyright and history logs.
8 * 8 *
9 */ 9 */
10 10
11 #include <linux/timekeeper_internal.h> 11 #include <linux/timekeeper_internal.h>
12 #include <linux/module.h> 12 #include <linux/module.h>
13 #include <linux/interrupt.h> 13 #include <linux/interrupt.h>
14 #include <linux/percpu.h> 14 #include <linux/percpu.h>
15 #include <linux/init.h> 15 #include <linux/init.h>
16 #include <linux/mm.h> 16 #include <linux/mm.h>
17 #include <linux/sched.h> 17 #include <linux/sched.h>
18 #include <linux/syscore_ops.h> 18 #include <linux/syscore_ops.h>
19 #include <linux/clocksource.h> 19 #include <linux/clocksource.h>
20 #include <linux/jiffies.h> 20 #include <linux/jiffies.h>
21 #include <linux/time.h> 21 #include <linux/time.h>
22 #include <linux/tick.h> 22 #include <linux/tick.h>
23 #include <linux/stop_machine.h> 23 #include <linux/stop_machine.h>
24 #include <linux/pvclock_gtod.h>
24 25
25 26
26 static struct timekeeper timekeeper; 27 static struct timekeeper timekeeper;
27 28
28 /* 29 /*
29 * This read-write spinlock protects us from races in SMP while 30 * This read-write spinlock protects us from races in SMP while
30 * playing with xtime. 31 * playing with xtime.
31 */ 32 */
32 __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); 33 __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
33 34
34 /* flag for if timekeeping is suspended */ 35 /* flag for if timekeeping is suspended */
35 int __read_mostly timekeeping_suspended; 36 int __read_mostly timekeeping_suspended;
36 37
37 static inline void tk_normalize_xtime(struct timekeeper *tk) 38 static inline void tk_normalize_xtime(struct timekeeper *tk)
38 { 39 {
39 while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) { 40 while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) {
40 tk->xtime_nsec -= (u64)NSEC_PER_SEC << tk->shift; 41 tk->xtime_nsec -= (u64)NSEC_PER_SEC << tk->shift;
41 tk->xtime_sec++; 42 tk->xtime_sec++;
42 } 43 }
43 } 44 }
44 45
45 static void tk_set_xtime(struct timekeeper *tk, const struct timespec *ts) 46 static void tk_set_xtime(struct timekeeper *tk, const struct timespec *ts)
46 { 47 {
47 tk->xtime_sec = ts->tv_sec; 48 tk->xtime_sec = ts->tv_sec;
48 tk->xtime_nsec = (u64)ts->tv_nsec << tk->shift; 49 tk->xtime_nsec = (u64)ts->tv_nsec << tk->shift;
49 } 50 }
50 51
51 static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts) 52 static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts)
52 { 53 {
53 tk->xtime_sec += ts->tv_sec; 54 tk->xtime_sec += ts->tv_sec;
54 tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift; 55 tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift;
55 tk_normalize_xtime(tk); 56 tk_normalize_xtime(tk);
56 } 57 }
57 58
58 static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm) 59 static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm)
59 { 60 {
60 struct timespec tmp; 61 struct timespec tmp;
61 62
62 /* 63 /*
63 * Verify consistency of: offset_real = -wall_to_monotonic 64 * Verify consistency of: offset_real = -wall_to_monotonic
64 * before modifying anything 65 * before modifying anything
65 */ 66 */
66 set_normalized_timespec(&tmp, -tk->wall_to_monotonic.tv_sec, 67 set_normalized_timespec(&tmp, -tk->wall_to_monotonic.tv_sec,
67 -tk->wall_to_monotonic.tv_nsec); 68 -tk->wall_to_monotonic.tv_nsec);
68 WARN_ON_ONCE(tk->offs_real.tv64 != timespec_to_ktime(tmp).tv64); 69 WARN_ON_ONCE(tk->offs_real.tv64 != timespec_to_ktime(tmp).tv64);
69 tk->wall_to_monotonic = wtm; 70 tk->wall_to_monotonic = wtm;
70 set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec); 71 set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
71 tk->offs_real = timespec_to_ktime(tmp); 72 tk->offs_real = timespec_to_ktime(tmp);
72 } 73 }
73 74
74 static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t) 75 static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t)
75 { 76 {
76 /* Verify consistency before modifying */ 77 /* Verify consistency before modifying */
77 WARN_ON_ONCE(tk->offs_boot.tv64 != timespec_to_ktime(tk->total_sleep_time).tv64); 78 WARN_ON_ONCE(tk->offs_boot.tv64 != timespec_to_ktime(tk->total_sleep_time).tv64);
78 79
79 tk->total_sleep_time = t; 80 tk->total_sleep_time = t;
80 tk->offs_boot = timespec_to_ktime(t); 81 tk->offs_boot = timespec_to_ktime(t);
81 } 82 }
82 83
83 /** 84 /**
84 * timekeeper_setup_internals - Set up internals to use clocksource clock. 85 * timekeeper_setup_internals - Set up internals to use clocksource clock.
85 * 86 *
86 * @clock: Pointer to clocksource. 87 * @clock: Pointer to clocksource.
87 * 88 *
88 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment 89 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
89 * pair and interval request. 90 * pair and interval request.
90 * 91 *
91 * Unless you're the timekeeping code, you should not be using this! 92 * Unless you're the timekeeping code, you should not be using this!
92 */ 93 */
93 static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) 94 static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
94 { 95 {
95 cycle_t interval; 96 cycle_t interval;
96 u64 tmp, ntpinterval; 97 u64 tmp, ntpinterval;
97 struct clocksource *old_clock; 98 struct clocksource *old_clock;
98 99
99 old_clock = tk->clock; 100 old_clock = tk->clock;
100 tk->clock = clock; 101 tk->clock = clock;
101 clock->cycle_last = clock->read(clock); 102 clock->cycle_last = clock->read(clock);
102 103
103 /* Do the ns -> cycle conversion first, using original mult */ 104 /* Do the ns -> cycle conversion first, using original mult */
104 tmp = NTP_INTERVAL_LENGTH; 105 tmp = NTP_INTERVAL_LENGTH;
105 tmp <<= clock->shift; 106 tmp <<= clock->shift;
106 ntpinterval = tmp; 107 ntpinterval = tmp;
107 tmp += clock->mult/2; 108 tmp += clock->mult/2;
108 do_div(tmp, clock->mult); 109 do_div(tmp, clock->mult);
109 if (tmp == 0) 110 if (tmp == 0)
110 tmp = 1; 111 tmp = 1;
111 112
112 interval = (cycle_t) tmp; 113 interval = (cycle_t) tmp;
113 tk->cycle_interval = interval; 114 tk->cycle_interval = interval;
114 115
115 /* Go back from cycles -> shifted ns */ 116 /* Go back from cycles -> shifted ns */
116 tk->xtime_interval = (u64) interval * clock->mult; 117 tk->xtime_interval = (u64) interval * clock->mult;
117 tk->xtime_remainder = ntpinterval - tk->xtime_interval; 118 tk->xtime_remainder = ntpinterval - tk->xtime_interval;
118 tk->raw_interval = 119 tk->raw_interval =
119 ((u64) interval * clock->mult) >> clock->shift; 120 ((u64) interval * clock->mult) >> clock->shift;
120 121
121 /* if changing clocks, convert xtime_nsec shift units */ 122 /* if changing clocks, convert xtime_nsec shift units */
122 if (old_clock) { 123 if (old_clock) {
123 int shift_change = clock->shift - old_clock->shift; 124 int shift_change = clock->shift - old_clock->shift;
124 if (shift_change < 0) 125 if (shift_change < 0)
125 tk->xtime_nsec >>= -shift_change; 126 tk->xtime_nsec >>= -shift_change;
126 else 127 else
127 tk->xtime_nsec <<= shift_change; 128 tk->xtime_nsec <<= shift_change;
128 } 129 }
129 tk->shift = clock->shift; 130 tk->shift = clock->shift;
130 131
131 tk->ntp_error = 0; 132 tk->ntp_error = 0;
132 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift; 133 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
133 134
134 /* 135 /*
135 * The timekeeper keeps its own mult values for the currently 136 * The timekeeper keeps its own mult values for the currently
136 * active clocksource. These value will be adjusted via NTP 137 * active clocksource. These value will be adjusted via NTP
137 * to counteract clock drifting. 138 * to counteract clock drifting.
138 */ 139 */
139 tk->mult = clock->mult; 140 tk->mult = clock->mult;
140 } 141 }
141 142
142 /* Timekeeper helper functions. */ 143 /* Timekeeper helper functions. */
143 static inline s64 timekeeping_get_ns(struct timekeeper *tk) 144 static inline s64 timekeeping_get_ns(struct timekeeper *tk)
144 { 145 {
145 cycle_t cycle_now, cycle_delta; 146 cycle_t cycle_now, cycle_delta;
146 struct clocksource *clock; 147 struct clocksource *clock;
147 s64 nsec; 148 s64 nsec;
148 149
149 /* read clocksource: */ 150 /* read clocksource: */
150 clock = tk->clock; 151 clock = tk->clock;
151 cycle_now = clock->read(clock); 152 cycle_now = clock->read(clock);
152 153
153 /* calculate the delta since the last update_wall_time: */ 154 /* calculate the delta since the last update_wall_time: */
154 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; 155 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
155 156
156 nsec = cycle_delta * tk->mult + tk->xtime_nsec; 157 nsec = cycle_delta * tk->mult + tk->xtime_nsec;
157 nsec >>= tk->shift; 158 nsec >>= tk->shift;
158 159
159 /* If arch requires, add in gettimeoffset() */ 160 /* If arch requires, add in gettimeoffset() */
160 return nsec + arch_gettimeoffset(); 161 return nsec + arch_gettimeoffset();
161 } 162 }
162 163
163 static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk) 164 static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
164 { 165 {
165 cycle_t cycle_now, cycle_delta; 166 cycle_t cycle_now, cycle_delta;
166 struct clocksource *clock; 167 struct clocksource *clock;
167 s64 nsec; 168 s64 nsec;
168 169
169 /* read clocksource: */ 170 /* read clocksource: */
170 clock = tk->clock; 171 clock = tk->clock;
171 cycle_now = clock->read(clock); 172 cycle_now = clock->read(clock);
172 173
173 /* calculate the delta since the last update_wall_time: */ 174 /* calculate the delta since the last update_wall_time: */
174 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; 175 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
175 176
176 /* convert delta to nanoseconds. */ 177 /* convert delta to nanoseconds. */
177 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); 178 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
178 179
179 /* If arch requires, add in gettimeoffset() */ 180 /* If arch requires, add in gettimeoffset() */
180 return nsec + arch_gettimeoffset(); 181 return nsec + arch_gettimeoffset();
181 } 182 }
182 183
184 static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
185
186 static void update_pvclock_gtod(struct timekeeper *tk)
187 {
188 raw_notifier_call_chain(&pvclock_gtod_chain, 0, tk);
189 }
190
191 /**
192 * pvclock_gtod_register_notifier - register a pvclock timedata update listener
193 *
194 * Must hold write on timekeeper.lock
195 */
196 int pvclock_gtod_register_notifier(struct notifier_block *nb)
197 {
198 struct timekeeper *tk = &timekeeper;
199 unsigned long flags;
200 int ret;
201
202 write_seqlock_irqsave(&tk->lock, flags);
203 ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
204 /* update timekeeping data */
205 update_pvclock_gtod(tk);
206 write_sequnlock_irqrestore(&tk->lock, flags);
207
208 return ret;
209 }
210 EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
211
212 /**
213 * pvclock_gtod_unregister_notifier - unregister a pvclock
214 * timedata update listener
215 *
216 * Must hold write on timekeeper.lock
217 */
218 int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
219 {
220 struct timekeeper *tk = &timekeeper;
221 unsigned long flags;
222 int ret;
223
224 write_seqlock_irqsave(&tk->lock, flags);
225 ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
226 write_sequnlock_irqrestore(&tk->lock, flags);
227
228 return ret;
229 }
230 EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
231
183 /* must hold write on timekeeper.lock */ 232 /* must hold write on timekeeper.lock */
184 static void timekeeping_update(struct timekeeper *tk, bool clearntp) 233 static void timekeeping_update(struct timekeeper *tk, bool clearntp)
185 { 234 {
186 if (clearntp) { 235 if (clearntp) {
187 tk->ntp_error = 0; 236 tk->ntp_error = 0;
188 ntp_clear(); 237 ntp_clear();
189 } 238 }
190 update_vsyscall(tk); 239 update_vsyscall(tk);
240 update_pvclock_gtod(tk);
191 } 241 }
192 242
193 /** 243 /**
194 * timekeeping_forward_now - update clock to the current time 244 * timekeeping_forward_now - update clock to the current time
195 * 245 *
196 * Forward the current clock to update its state since the last call to 246 * Forward the current clock to update its state since the last call to
197 * update_wall_time(). This is useful before significant clock changes, 247 * update_wall_time(). This is useful before significant clock changes,
198 * as it avoids having to deal with this time offset explicitly. 248 * as it avoids having to deal with this time offset explicitly.
199 */ 249 */
200 static void timekeeping_forward_now(struct timekeeper *tk) 250 static void timekeeping_forward_now(struct timekeeper *tk)
201 { 251 {
202 cycle_t cycle_now, cycle_delta; 252 cycle_t cycle_now, cycle_delta;
203 struct clocksource *clock; 253 struct clocksource *clock;
204 s64 nsec; 254 s64 nsec;
205 255
206 clock = tk->clock; 256 clock = tk->clock;
207 cycle_now = clock->read(clock); 257 cycle_now = clock->read(clock);
208 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; 258 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
209 clock->cycle_last = cycle_now; 259 clock->cycle_last = cycle_now;
210 260
211 tk->xtime_nsec += cycle_delta * tk->mult; 261 tk->xtime_nsec += cycle_delta * tk->mult;
212 262
213 /* If arch requires, add in gettimeoffset() */ 263 /* If arch requires, add in gettimeoffset() */
214 tk->xtime_nsec += (u64)arch_gettimeoffset() << tk->shift; 264 tk->xtime_nsec += (u64)arch_gettimeoffset() << tk->shift;
215 265
216 tk_normalize_xtime(tk); 266 tk_normalize_xtime(tk);
217 267
218 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); 268 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
219 timespec_add_ns(&tk->raw_time, nsec); 269 timespec_add_ns(&tk->raw_time, nsec);
220 } 270 }
221 271
222 /** 272 /**
223 * getnstimeofday - Returns the time of day in a timespec 273 * getnstimeofday - Returns the time of day in a timespec
224 * @ts: pointer to the timespec to be set 274 * @ts: pointer to the timespec to be set
225 * 275 *
226 * Returns the time of day in a timespec. 276 * Returns the time of day in a timespec.
227 */ 277 */
228 void getnstimeofday(struct timespec *ts) 278 void getnstimeofday(struct timespec *ts)
229 { 279 {
230 struct timekeeper *tk = &timekeeper; 280 struct timekeeper *tk = &timekeeper;
231 unsigned long seq; 281 unsigned long seq;
232 s64 nsecs = 0; 282 s64 nsecs = 0;
233 283
234 WARN_ON(timekeeping_suspended); 284 WARN_ON(timekeeping_suspended);
235 285
236 do { 286 do {
237 seq = read_seqbegin(&tk->lock); 287 seq = read_seqbegin(&tk->lock);
238 288
239 ts->tv_sec = tk->xtime_sec; 289 ts->tv_sec = tk->xtime_sec;
240 nsecs = timekeeping_get_ns(tk); 290 nsecs = timekeeping_get_ns(tk);
241 291
242 } while (read_seqretry(&tk->lock, seq)); 292 } while (read_seqretry(&tk->lock, seq));
243 293
244 ts->tv_nsec = 0; 294 ts->tv_nsec = 0;
245 timespec_add_ns(ts, nsecs); 295 timespec_add_ns(ts, nsecs);
246 } 296 }
247 EXPORT_SYMBOL(getnstimeofday); 297 EXPORT_SYMBOL(getnstimeofday);
248 298
249 ktime_t ktime_get(void) 299 ktime_t ktime_get(void)
250 { 300 {
251 struct timekeeper *tk = &timekeeper; 301 struct timekeeper *tk = &timekeeper;
252 unsigned int seq; 302 unsigned int seq;
253 s64 secs, nsecs; 303 s64 secs, nsecs;
254 304
255 WARN_ON(timekeeping_suspended); 305 WARN_ON(timekeeping_suspended);
256 306
257 do { 307 do {
258 seq = read_seqbegin(&tk->lock); 308 seq = read_seqbegin(&tk->lock);
259 secs = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; 309 secs = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
260 nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec; 310 nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec;
261 311
262 } while (read_seqretry(&tk->lock, seq)); 312 } while (read_seqretry(&tk->lock, seq));
263 /* 313 /*
264 * Use ktime_set/ktime_add_ns to create a proper ktime on 314 * Use ktime_set/ktime_add_ns to create a proper ktime on
265 * 32-bit architectures without CONFIG_KTIME_SCALAR. 315 * 32-bit architectures without CONFIG_KTIME_SCALAR.
266 */ 316 */
267 return ktime_add_ns(ktime_set(secs, 0), nsecs); 317 return ktime_add_ns(ktime_set(secs, 0), nsecs);
268 } 318 }
269 EXPORT_SYMBOL_GPL(ktime_get); 319 EXPORT_SYMBOL_GPL(ktime_get);
270 320
271 /** 321 /**
272 * ktime_get_ts - get the monotonic clock in timespec format 322 * ktime_get_ts - get the monotonic clock in timespec format
273 * @ts: pointer to timespec variable 323 * @ts: pointer to timespec variable
274 * 324 *
275 * The function calculates the monotonic clock from the realtime 325 * The function calculates the monotonic clock from the realtime
276 * clock and the wall_to_monotonic offset and stores the result 326 * clock and the wall_to_monotonic offset and stores the result
277 * in normalized timespec format in the variable pointed to by @ts. 327 * in normalized timespec format in the variable pointed to by @ts.
278 */ 328 */
279 void ktime_get_ts(struct timespec *ts) 329 void ktime_get_ts(struct timespec *ts)
280 { 330 {
281 struct timekeeper *tk = &timekeeper; 331 struct timekeeper *tk = &timekeeper;
282 struct timespec tomono; 332 struct timespec tomono;
283 s64 nsec; 333 s64 nsec;
284 unsigned int seq; 334 unsigned int seq;
285 335
286 WARN_ON(timekeeping_suspended); 336 WARN_ON(timekeeping_suspended);
287 337
288 do { 338 do {
289 seq = read_seqbegin(&tk->lock); 339 seq = read_seqbegin(&tk->lock);
290 ts->tv_sec = tk->xtime_sec; 340 ts->tv_sec = tk->xtime_sec;
291 nsec = timekeeping_get_ns(tk); 341 nsec = timekeeping_get_ns(tk);
292 tomono = tk->wall_to_monotonic; 342 tomono = tk->wall_to_monotonic;
293 343
294 } while (read_seqretry(&tk->lock, seq)); 344 } while (read_seqretry(&tk->lock, seq));
295 345
296 ts->tv_sec += tomono.tv_sec; 346 ts->tv_sec += tomono.tv_sec;
297 ts->tv_nsec = 0; 347 ts->tv_nsec = 0;
298 timespec_add_ns(ts, nsec + tomono.tv_nsec); 348 timespec_add_ns(ts, nsec + tomono.tv_nsec);
299 } 349 }
300 EXPORT_SYMBOL_GPL(ktime_get_ts); 350 EXPORT_SYMBOL_GPL(ktime_get_ts);
301 351
302 #ifdef CONFIG_NTP_PPS 352 #ifdef CONFIG_NTP_PPS
303 353
304 /** 354 /**
305 * getnstime_raw_and_real - get day and raw monotonic time in timespec format 355 * getnstime_raw_and_real - get day and raw monotonic time in timespec format
306 * @ts_raw: pointer to the timespec to be set to raw monotonic time 356 * @ts_raw: pointer to the timespec to be set to raw monotonic time
307 * @ts_real: pointer to the timespec to be set to the time of day 357 * @ts_real: pointer to the timespec to be set to the time of day
308 * 358 *
309 * This function reads both the time of day and raw monotonic time at the 359 * This function reads both the time of day and raw monotonic time at the
310 * same time atomically and stores the resulting timestamps in timespec 360 * same time atomically and stores the resulting timestamps in timespec
311 * format. 361 * format.
312 */ 362 */
313 void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) 363 void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
314 { 364 {
315 struct timekeeper *tk = &timekeeper; 365 struct timekeeper *tk = &timekeeper;
316 unsigned long seq; 366 unsigned long seq;
317 s64 nsecs_raw, nsecs_real; 367 s64 nsecs_raw, nsecs_real;
318 368
319 WARN_ON_ONCE(timekeeping_suspended); 369 WARN_ON_ONCE(timekeeping_suspended);
320 370
321 do { 371 do {
322 seq = read_seqbegin(&tk->lock); 372 seq = read_seqbegin(&tk->lock);
323 373
324 *ts_raw = tk->raw_time; 374 *ts_raw = tk->raw_time;
325 ts_real->tv_sec = tk->xtime_sec; 375 ts_real->tv_sec = tk->xtime_sec;
326 ts_real->tv_nsec = 0; 376 ts_real->tv_nsec = 0;
327 377
328 nsecs_raw = timekeeping_get_ns_raw(tk); 378 nsecs_raw = timekeeping_get_ns_raw(tk);
329 nsecs_real = timekeeping_get_ns(tk); 379 nsecs_real = timekeeping_get_ns(tk);
330 380
331 } while (read_seqretry(&tk->lock, seq)); 381 } while (read_seqretry(&tk->lock, seq));
332 382
333 timespec_add_ns(ts_raw, nsecs_raw); 383 timespec_add_ns(ts_raw, nsecs_raw);
334 timespec_add_ns(ts_real, nsecs_real); 384 timespec_add_ns(ts_real, nsecs_real);
335 } 385 }
336 EXPORT_SYMBOL(getnstime_raw_and_real); 386 EXPORT_SYMBOL(getnstime_raw_and_real);
337 387
338 #endif /* CONFIG_NTP_PPS */ 388 #endif /* CONFIG_NTP_PPS */
339 389
340 /** 390 /**
341 * do_gettimeofday - Returns the time of day in a timeval 391 * do_gettimeofday - Returns the time of day in a timeval
342 * @tv: pointer to the timeval to be set 392 * @tv: pointer to the timeval to be set
343 * 393 *
344 * NOTE: Users should be converted to using getnstimeofday() 394 * NOTE: Users should be converted to using getnstimeofday()
345 */ 395 */
346 void do_gettimeofday(struct timeval *tv) 396 void do_gettimeofday(struct timeval *tv)
347 { 397 {
348 struct timespec now; 398 struct timespec now;
349 399
350 getnstimeofday(&now); 400 getnstimeofday(&now);
351 tv->tv_sec = now.tv_sec; 401 tv->tv_sec = now.tv_sec;
352 tv->tv_usec = now.tv_nsec/1000; 402 tv->tv_usec = now.tv_nsec/1000;
353 } 403 }
354 EXPORT_SYMBOL(do_gettimeofday); 404 EXPORT_SYMBOL(do_gettimeofday);
355 405
356 /** 406 /**
357 * do_settimeofday - Sets the time of day 407 * do_settimeofday - Sets the time of day
358 * @tv: pointer to the timespec variable containing the new time 408 * @tv: pointer to the timespec variable containing the new time
359 * 409 *
360 * Sets the time of day to the new time and update NTP and notify hrtimers 410 * Sets the time of day to the new time and update NTP and notify hrtimers
361 */ 411 */
362 int do_settimeofday(const struct timespec *tv) 412 int do_settimeofday(const struct timespec *tv)
363 { 413 {
364 struct timekeeper *tk = &timekeeper; 414 struct timekeeper *tk = &timekeeper;
365 struct timespec ts_delta, xt; 415 struct timespec ts_delta, xt;
366 unsigned long flags; 416 unsigned long flags;
367 417
368 if (!timespec_valid_strict(tv)) 418 if (!timespec_valid_strict(tv))
369 return -EINVAL; 419 return -EINVAL;
370 420
371 write_seqlock_irqsave(&tk->lock, flags); 421 write_seqlock_irqsave(&tk->lock, flags);
372 422
373 timekeeping_forward_now(tk); 423 timekeeping_forward_now(tk);
374 424
375 xt = tk_xtime(tk); 425 xt = tk_xtime(tk);
376 ts_delta.tv_sec = tv->tv_sec - xt.tv_sec; 426 ts_delta.tv_sec = tv->tv_sec - xt.tv_sec;
377 ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec; 427 ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec;
378 428
379 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, ts_delta)); 429 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, ts_delta));
380 430
381 tk_set_xtime(tk, tv); 431 tk_set_xtime(tk, tv);
382 432
383 timekeeping_update(tk, true); 433 timekeeping_update(tk, true);
384 434
385 write_sequnlock_irqrestore(&tk->lock, flags); 435 write_sequnlock_irqrestore(&tk->lock, flags);
386 436
387 /* signal hrtimers about time change */ 437 /* signal hrtimers about time change */
388 clock_was_set(); 438 clock_was_set();
389 439
390 return 0; 440 return 0;
391 } 441 }
392 EXPORT_SYMBOL(do_settimeofday); 442 EXPORT_SYMBOL(do_settimeofday);
393 443
394 /** 444 /**
395 * timekeeping_inject_offset - Adds or subtracts from the current time. 445 * timekeeping_inject_offset - Adds or subtracts from the current time.
396 * @tv: pointer to the timespec variable containing the offset 446 * @tv: pointer to the timespec variable containing the offset
397 * 447 *
398 * Adds or subtracts an offset value from the current time. 448 * Adds or subtracts an offset value from the current time.
399 */ 449 */
400 int timekeeping_inject_offset(struct timespec *ts) 450 int timekeeping_inject_offset(struct timespec *ts)
401 { 451 {
402 struct timekeeper *tk = &timekeeper; 452 struct timekeeper *tk = &timekeeper;
403 unsigned long flags; 453 unsigned long flags;
404 struct timespec tmp; 454 struct timespec tmp;
405 int ret = 0; 455 int ret = 0;
406 456
407 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) 457 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
408 return -EINVAL; 458 return -EINVAL;
409 459
410 write_seqlock_irqsave(&tk->lock, flags); 460 write_seqlock_irqsave(&tk->lock, flags);
411 461
412 timekeeping_forward_now(tk); 462 timekeeping_forward_now(tk);
413 463
414 /* Make sure the proposed value is valid */ 464 /* Make sure the proposed value is valid */
415 tmp = timespec_add(tk_xtime(tk), *ts); 465 tmp = timespec_add(tk_xtime(tk), *ts);
416 if (!timespec_valid_strict(&tmp)) { 466 if (!timespec_valid_strict(&tmp)) {
417 ret = -EINVAL; 467 ret = -EINVAL;
418 goto error; 468 goto error;
419 } 469 }
420 470
421 tk_xtime_add(tk, ts); 471 tk_xtime_add(tk, ts);
422 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts)); 472 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts));
423 473
424 error: /* even if we error out, we forwarded the time, so call update */ 474 error: /* even if we error out, we forwarded the time, so call update */
425 timekeeping_update(tk, true); 475 timekeeping_update(tk, true);
426 476
427 write_sequnlock_irqrestore(&tk->lock, flags); 477 write_sequnlock_irqrestore(&tk->lock, flags);
428 478
429 /* signal hrtimers about time change */ 479 /* signal hrtimers about time change */
430 clock_was_set(); 480 clock_was_set();
431 481
432 return ret; 482 return ret;
433 } 483 }
434 EXPORT_SYMBOL(timekeeping_inject_offset); 484 EXPORT_SYMBOL(timekeeping_inject_offset);
435 485
436 /** 486 /**
437 * change_clocksource - Swaps clocksources if a new one is available 487 * change_clocksource - Swaps clocksources if a new one is available
438 * 488 *
439 * Accumulates current time interval and initializes new clocksource 489 * Accumulates current time interval and initializes new clocksource
440 */ 490 */
441 static int change_clocksource(void *data) 491 static int change_clocksource(void *data)
442 { 492 {
443 struct timekeeper *tk = &timekeeper; 493 struct timekeeper *tk = &timekeeper;
444 struct clocksource *new, *old; 494 struct clocksource *new, *old;
445 unsigned long flags; 495 unsigned long flags;
446 496
447 new = (struct clocksource *) data; 497 new = (struct clocksource *) data;
448 498
449 write_seqlock_irqsave(&tk->lock, flags); 499 write_seqlock_irqsave(&tk->lock, flags);
450 500
451 timekeeping_forward_now(tk); 501 timekeeping_forward_now(tk);
452 if (!new->enable || new->enable(new) == 0) { 502 if (!new->enable || new->enable(new) == 0) {
453 old = tk->clock; 503 old = tk->clock;
454 tk_setup_internals(tk, new); 504 tk_setup_internals(tk, new);
455 if (old->disable) 505 if (old->disable)
456 old->disable(old); 506 old->disable(old);
457 } 507 }
458 timekeeping_update(tk, true); 508 timekeeping_update(tk, true);
459 509
460 write_sequnlock_irqrestore(&tk->lock, flags); 510 write_sequnlock_irqrestore(&tk->lock, flags);
461 511
462 return 0; 512 return 0;
463 } 513 }
464 514
465 /** 515 /**
466 * timekeeping_notify - Install a new clock source 516 * timekeeping_notify - Install a new clock source
467 * @clock: pointer to the clock source 517 * @clock: pointer to the clock source
468 * 518 *
469 * This function is called from clocksource.c after a new, better clock 519 * This function is called from clocksource.c after a new, better clock
470 * source has been registered. The caller holds the clocksource_mutex. 520 * source has been registered. The caller holds the clocksource_mutex.
471 */ 521 */
472 void timekeeping_notify(struct clocksource *clock) 522 void timekeeping_notify(struct clocksource *clock)
473 { 523 {
474 struct timekeeper *tk = &timekeeper; 524 struct timekeeper *tk = &timekeeper;
475 525
476 if (tk->clock == clock) 526 if (tk->clock == clock)
477 return; 527 return;
478 stop_machine(change_clocksource, clock, NULL); 528 stop_machine(change_clocksource, clock, NULL);
479 tick_clock_notify(); 529 tick_clock_notify();
480 } 530 }
481 531
482 /** 532 /**
483 * ktime_get_real - get the real (wall-) time in ktime_t format 533 * ktime_get_real - get the real (wall-) time in ktime_t format
484 * 534 *
485 * returns the time in ktime_t format 535 * returns the time in ktime_t format
486 */ 536 */
487 ktime_t ktime_get_real(void) 537 ktime_t ktime_get_real(void)
488 { 538 {
489 struct timespec now; 539 struct timespec now;
490 540
491 getnstimeofday(&now); 541 getnstimeofday(&now);
492 542
493 return timespec_to_ktime(now); 543 return timespec_to_ktime(now);
494 } 544 }
495 EXPORT_SYMBOL_GPL(ktime_get_real); 545 EXPORT_SYMBOL_GPL(ktime_get_real);
496 546
497 /** 547 /**
498 * getrawmonotonic - Returns the raw monotonic time in a timespec 548 * getrawmonotonic - Returns the raw monotonic time in a timespec
499 * @ts: pointer to the timespec to be set 549 * @ts: pointer to the timespec to be set
500 * 550 *
501 * Returns the raw monotonic time (completely un-modified by ntp) 551 * Returns the raw monotonic time (completely un-modified by ntp)
502 */ 552 */
503 void getrawmonotonic(struct timespec *ts) 553 void getrawmonotonic(struct timespec *ts)
504 { 554 {
505 struct timekeeper *tk = &timekeeper; 555 struct timekeeper *tk = &timekeeper;
506 unsigned long seq; 556 unsigned long seq;
507 s64 nsecs; 557 s64 nsecs;
508 558
509 do { 559 do {
510 seq = read_seqbegin(&tk->lock); 560 seq = read_seqbegin(&tk->lock);
511 nsecs = timekeeping_get_ns_raw(tk); 561 nsecs = timekeeping_get_ns_raw(tk);
512 *ts = tk->raw_time; 562 *ts = tk->raw_time;
513 563
514 } while (read_seqretry(&tk->lock, seq)); 564 } while (read_seqretry(&tk->lock, seq));
515 565
516 timespec_add_ns(ts, nsecs); 566 timespec_add_ns(ts, nsecs);
517 } 567 }
518 EXPORT_SYMBOL(getrawmonotonic); 568 EXPORT_SYMBOL(getrawmonotonic);
519 569
520 /** 570 /**
521 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres 571 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
522 */ 572 */
523 int timekeeping_valid_for_hres(void) 573 int timekeeping_valid_for_hres(void)
524 { 574 {
525 struct timekeeper *tk = &timekeeper; 575 struct timekeeper *tk = &timekeeper;
526 unsigned long seq; 576 unsigned long seq;
527 int ret; 577 int ret;
528 578
529 do { 579 do {
530 seq = read_seqbegin(&tk->lock); 580 seq = read_seqbegin(&tk->lock);
531 581
532 ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; 582 ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
533 583
534 } while (read_seqretry(&tk->lock, seq)); 584 } while (read_seqretry(&tk->lock, seq));
535 585
536 return ret; 586 return ret;
537 } 587 }
538 588
539 /** 589 /**
540 * timekeeping_max_deferment - Returns max time the clocksource can be deferred 590 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
541 */ 591 */
542 u64 timekeeping_max_deferment(void) 592 u64 timekeeping_max_deferment(void)
543 { 593 {
544 struct timekeeper *tk = &timekeeper; 594 struct timekeeper *tk = &timekeeper;
545 unsigned long seq; 595 unsigned long seq;
546 u64 ret; 596 u64 ret;
547 597
548 do { 598 do {
549 seq = read_seqbegin(&tk->lock); 599 seq = read_seqbegin(&tk->lock);
550 600
551 ret = tk->clock->max_idle_ns; 601 ret = tk->clock->max_idle_ns;
552 602
553 } while (read_seqretry(&tk->lock, seq)); 603 } while (read_seqretry(&tk->lock, seq));
554 604
555 return ret; 605 return ret;
556 } 606 }
557 607
558 /** 608 /**
559 * read_persistent_clock - Return time from the persistent clock. 609 * read_persistent_clock - Return time from the persistent clock.
560 * 610 *
561 * Weak dummy function for arches that do not yet support it. 611 * Weak dummy function for arches that do not yet support it.
562 * Reads the time from the battery backed persistent clock. 612 * Reads the time from the battery backed persistent clock.
563 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported. 613 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
564 * 614 *
565 * XXX - Do be sure to remove it once all arches implement it. 615 * XXX - Do be sure to remove it once all arches implement it.
566 */ 616 */
567 void __attribute__((weak)) read_persistent_clock(struct timespec *ts) 617 void __attribute__((weak)) read_persistent_clock(struct timespec *ts)
568 { 618 {
569 ts->tv_sec = 0; 619 ts->tv_sec = 0;
570 ts->tv_nsec = 0; 620 ts->tv_nsec = 0;
571 } 621 }
572 622
573 /** 623 /**
574 * read_boot_clock - Return time of the system start. 624 * read_boot_clock - Return time of the system start.
575 * 625 *
576 * Weak dummy function for arches that do not yet support it. 626 * Weak dummy function for arches that do not yet support it.
577 * Function to read the exact time the system has been started. 627 * Function to read the exact time the system has been started.
578 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported. 628 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
579 * 629 *
580 * XXX - Do be sure to remove it once all arches implement it. 630 * XXX - Do be sure to remove it once all arches implement it.
581 */ 631 */
582 void __attribute__((weak)) read_boot_clock(struct timespec *ts) 632 void __attribute__((weak)) read_boot_clock(struct timespec *ts)
583 { 633 {
584 ts->tv_sec = 0; 634 ts->tv_sec = 0;
585 ts->tv_nsec = 0; 635 ts->tv_nsec = 0;
586 } 636 }
587 637
588 /* 638 /*
589 * timekeeping_init - Initializes the clocksource and common timekeeping values 639 * timekeeping_init - Initializes the clocksource and common timekeeping values
590 */ 640 */
591 void __init timekeeping_init(void) 641 void __init timekeeping_init(void)
592 { 642 {
593 struct timekeeper *tk = &timekeeper; 643 struct timekeeper *tk = &timekeeper;
594 struct clocksource *clock; 644 struct clocksource *clock;
595 unsigned long flags; 645 unsigned long flags;
596 struct timespec now, boot, tmp; 646 struct timespec now, boot, tmp;
597 647
598 read_persistent_clock(&now); 648 read_persistent_clock(&now);
599 if (!timespec_valid_strict(&now)) { 649 if (!timespec_valid_strict(&now)) {
600 pr_warn("WARNING: Persistent clock returned invalid value!\n" 650 pr_warn("WARNING: Persistent clock returned invalid value!\n"
601 " Check your CMOS/BIOS settings.\n"); 651 " Check your CMOS/BIOS settings.\n");
602 now.tv_sec = 0; 652 now.tv_sec = 0;
603 now.tv_nsec = 0; 653 now.tv_nsec = 0;
604 } 654 }
605 655
606 read_boot_clock(&boot); 656 read_boot_clock(&boot);
607 if (!timespec_valid_strict(&boot)) { 657 if (!timespec_valid_strict(&boot)) {
608 pr_warn("WARNING: Boot clock returned invalid value!\n" 658 pr_warn("WARNING: Boot clock returned invalid value!\n"
609 " Check your CMOS/BIOS settings.\n"); 659 " Check your CMOS/BIOS settings.\n");
610 boot.tv_sec = 0; 660 boot.tv_sec = 0;
611 boot.tv_nsec = 0; 661 boot.tv_nsec = 0;
612 } 662 }
613 663
614 seqlock_init(&tk->lock); 664 seqlock_init(&tk->lock);
615 665
616 ntp_init(); 666 ntp_init();
617 667
618 write_seqlock_irqsave(&tk->lock, flags); 668 write_seqlock_irqsave(&tk->lock, flags);
619 clock = clocksource_default_clock(); 669 clock = clocksource_default_clock();
620 if (clock->enable) 670 if (clock->enable)
621 clock->enable(clock); 671 clock->enable(clock);
622 tk_setup_internals(tk, clock); 672 tk_setup_internals(tk, clock);
623 673
624 tk_set_xtime(tk, &now); 674 tk_set_xtime(tk, &now);
625 tk->raw_time.tv_sec = 0; 675 tk->raw_time.tv_sec = 0;
626 tk->raw_time.tv_nsec = 0; 676 tk->raw_time.tv_nsec = 0;
627 if (boot.tv_sec == 0 && boot.tv_nsec == 0) 677 if (boot.tv_sec == 0 && boot.tv_nsec == 0)
628 boot = tk_xtime(tk); 678 boot = tk_xtime(tk);
629 679
630 set_normalized_timespec(&tmp, -boot.tv_sec, -boot.tv_nsec); 680 set_normalized_timespec(&tmp, -boot.tv_sec, -boot.tv_nsec);
631 tk_set_wall_to_mono(tk, tmp); 681 tk_set_wall_to_mono(tk, tmp);
632 682
633 tmp.tv_sec = 0; 683 tmp.tv_sec = 0;
634 tmp.tv_nsec = 0; 684 tmp.tv_nsec = 0;
635 tk_set_sleep_time(tk, tmp); 685 tk_set_sleep_time(tk, tmp);
636 686
637 write_sequnlock_irqrestore(&tk->lock, flags); 687 write_sequnlock_irqrestore(&tk->lock, flags);
638 } 688 }
639 689
640 /* time in seconds when suspend began */ 690 /* time in seconds when suspend began */
641 static struct timespec timekeeping_suspend_time; 691 static struct timespec timekeeping_suspend_time;
642 692
643 /** 693 /**
644 * __timekeeping_inject_sleeptime - Internal function to add sleep interval 694 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
645 * @delta: pointer to a timespec delta value 695 * @delta: pointer to a timespec delta value
646 * 696 *
647 * Takes a timespec offset measuring a suspend interval and properly 697 * Takes a timespec offset measuring a suspend interval and properly
648 * adds the sleep offset to the timekeeping variables. 698 * adds the sleep offset to the timekeeping variables.
649 */ 699 */
650 static void __timekeeping_inject_sleeptime(struct timekeeper *tk, 700 static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
651 struct timespec *delta) 701 struct timespec *delta)
652 { 702 {
653 if (!timespec_valid_strict(delta)) { 703 if (!timespec_valid_strict(delta)) {
654 printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid " 704 printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid "
655 "sleep delta value!\n"); 705 "sleep delta value!\n");
656 return; 706 return;
657 } 707 }
658 tk_xtime_add(tk, delta); 708 tk_xtime_add(tk, delta);
659 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *delta)); 709 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *delta));
660 tk_set_sleep_time(tk, timespec_add(tk->total_sleep_time, *delta)); 710 tk_set_sleep_time(tk, timespec_add(tk->total_sleep_time, *delta));
661 } 711 }
662 712
663 /** 713 /**
664 * timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values 714 * timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values
665 * @delta: pointer to a timespec delta value 715 * @delta: pointer to a timespec delta value
666 * 716 *
667 * This hook is for architectures that cannot support read_persistent_clock 717 * This hook is for architectures that cannot support read_persistent_clock
668 * because their RTC/persistent clock is only accessible when irqs are enabled. 718 * because their RTC/persistent clock is only accessible when irqs are enabled.
669 * 719 *
670 * This function should only be called by rtc_resume(), and allows 720 * This function should only be called by rtc_resume(), and allows
671 * a suspend offset to be injected into the timekeeping values. 721 * a suspend offset to be injected into the timekeeping values.
672 */ 722 */
673 void timekeeping_inject_sleeptime(struct timespec *delta) 723 void timekeeping_inject_sleeptime(struct timespec *delta)
674 { 724 {
675 struct timekeeper *tk = &timekeeper; 725 struct timekeeper *tk = &timekeeper;
676 unsigned long flags; 726 unsigned long flags;
677 struct timespec ts; 727 struct timespec ts;
678 728
679 /* Make sure we don't set the clock twice */ 729 /* Make sure we don't set the clock twice */
680 read_persistent_clock(&ts); 730 read_persistent_clock(&ts);
681 if (!(ts.tv_sec == 0 && ts.tv_nsec == 0)) 731 if (!(ts.tv_sec == 0 && ts.tv_nsec == 0))
682 return; 732 return;
683 733
684 write_seqlock_irqsave(&tk->lock, flags); 734 write_seqlock_irqsave(&tk->lock, flags);
685 735
686 timekeeping_forward_now(tk); 736 timekeeping_forward_now(tk);
687 737
688 __timekeeping_inject_sleeptime(tk, delta); 738 __timekeeping_inject_sleeptime(tk, delta);
689 739
690 timekeeping_update(tk, true); 740 timekeeping_update(tk, true);
691 741
692 write_sequnlock_irqrestore(&tk->lock, flags); 742 write_sequnlock_irqrestore(&tk->lock, flags);
693 743
694 /* signal hrtimers about time change */ 744 /* signal hrtimers about time change */
695 clock_was_set(); 745 clock_was_set();
696 } 746 }
697 747
698 /** 748 /**
699 * timekeeping_resume - Resumes the generic timekeeping subsystem. 749 * timekeeping_resume - Resumes the generic timekeeping subsystem.
700 * 750 *
701 * This is for the generic clocksource timekeeping. 751 * This is for the generic clocksource timekeeping.
702 * xtime/wall_to_monotonic/jiffies/etc are 752 * xtime/wall_to_monotonic/jiffies/etc are
703 * still managed by arch specific suspend/resume code. 753 * still managed by arch specific suspend/resume code.
704 */ 754 */
705 static void timekeeping_resume(void) 755 static void timekeeping_resume(void)
706 { 756 {
707 struct timekeeper *tk = &timekeeper; 757 struct timekeeper *tk = &timekeeper;
708 unsigned long flags; 758 unsigned long flags;
709 struct timespec ts; 759 struct timespec ts;
710 760
711 read_persistent_clock(&ts); 761 read_persistent_clock(&ts);
712 762
713 clockevents_resume(); 763 clockevents_resume();
714 clocksource_resume(); 764 clocksource_resume();
715 765
716 write_seqlock_irqsave(&tk->lock, flags); 766 write_seqlock_irqsave(&tk->lock, flags);
717 767
718 if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { 768 if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
719 ts = timespec_sub(ts, timekeeping_suspend_time); 769 ts = timespec_sub(ts, timekeeping_suspend_time);
720 __timekeeping_inject_sleeptime(tk, &ts); 770 __timekeeping_inject_sleeptime(tk, &ts);
721 } 771 }
722 /* re-base the last cycle value */ 772 /* re-base the last cycle value */
723 tk->clock->cycle_last = tk->clock->read(tk->clock); 773 tk->clock->cycle_last = tk->clock->read(tk->clock);
724 tk->ntp_error = 0; 774 tk->ntp_error = 0;
725 timekeeping_suspended = 0; 775 timekeeping_suspended = 0;
726 timekeeping_update(tk, false); 776 timekeeping_update(tk, false);
727 write_sequnlock_irqrestore(&tk->lock, flags); 777 write_sequnlock_irqrestore(&tk->lock, flags);
728 778
729 touch_softlockup_watchdog(); 779 touch_softlockup_watchdog();
730 780
731 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL); 781 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
732 782
733 /* Resume hrtimers */ 783 /* Resume hrtimers */
734 hrtimers_resume(); 784 hrtimers_resume();
735 } 785 }
736 786
737 static int timekeeping_suspend(void) 787 static int timekeeping_suspend(void)
738 { 788 {
739 struct timekeeper *tk = &timekeeper; 789 struct timekeeper *tk = &timekeeper;
740 unsigned long flags; 790 unsigned long flags;
741 struct timespec delta, delta_delta; 791 struct timespec delta, delta_delta;
742 static struct timespec old_delta; 792 static struct timespec old_delta;
743 793
744 read_persistent_clock(&timekeeping_suspend_time); 794 read_persistent_clock(&timekeeping_suspend_time);
745 795
746 write_seqlock_irqsave(&tk->lock, flags); 796 write_seqlock_irqsave(&tk->lock, flags);
747 timekeeping_forward_now(tk); 797 timekeeping_forward_now(tk);
748 timekeeping_suspended = 1; 798 timekeeping_suspended = 1;
749 799
750 /* 800 /*
751 * To avoid drift caused by repeated suspend/resumes, 801 * To avoid drift caused by repeated suspend/resumes,
752 * which each can add ~1 second drift error, 802 * which each can add ~1 second drift error,
753 * try to compensate so the difference in system time 803 * try to compensate so the difference in system time
754 * and persistent_clock time stays close to constant. 804 * and persistent_clock time stays close to constant.
755 */ 805 */
756 delta = timespec_sub(tk_xtime(tk), timekeeping_suspend_time); 806 delta = timespec_sub(tk_xtime(tk), timekeeping_suspend_time);
757 delta_delta = timespec_sub(delta, old_delta); 807 delta_delta = timespec_sub(delta, old_delta);
758 if (abs(delta_delta.tv_sec) >= 2) { 808 if (abs(delta_delta.tv_sec) >= 2) {
759 /* 809 /*
760 * if delta_delta is too large, assume time correction 810 * if delta_delta is too large, assume time correction
761 * has occured and set old_delta to the current delta. 811 * has occured and set old_delta to the current delta.
762 */ 812 */
763 old_delta = delta; 813 old_delta = delta;
764 } else { 814 } else {
765 /* Otherwise try to adjust old_system to compensate */ 815 /* Otherwise try to adjust old_system to compensate */
766 timekeeping_suspend_time = 816 timekeeping_suspend_time =
767 timespec_add(timekeeping_suspend_time, delta_delta); 817 timespec_add(timekeeping_suspend_time, delta_delta);
768 } 818 }
769 write_sequnlock_irqrestore(&tk->lock, flags); 819 write_sequnlock_irqrestore(&tk->lock, flags);
770 820
771 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); 821 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
772 clocksource_suspend(); 822 clocksource_suspend();
773 clockevents_suspend(); 823 clockevents_suspend();
774 824
775 return 0; 825 return 0;
776 } 826 }
777 827
778 /* sysfs resume/suspend bits for timekeeping */ 828 /* sysfs resume/suspend bits for timekeeping */
779 static struct syscore_ops timekeeping_syscore_ops = { 829 static struct syscore_ops timekeeping_syscore_ops = {
780 .resume = timekeeping_resume, 830 .resume = timekeeping_resume,
781 .suspend = timekeeping_suspend, 831 .suspend = timekeeping_suspend,
782 }; 832 };
783 833
784 static int __init timekeeping_init_ops(void) 834 static int __init timekeeping_init_ops(void)
785 { 835 {
786 register_syscore_ops(&timekeeping_syscore_ops); 836 register_syscore_ops(&timekeeping_syscore_ops);
787 return 0; 837 return 0;
788 } 838 }
789 839
790 device_initcall(timekeeping_init_ops); 840 device_initcall(timekeeping_init_ops);
791 841
792 /* 842 /*
793 * If the error is already larger, we look ahead even further 843 * If the error is already larger, we look ahead even further
794 * to compensate for late or lost adjustments. 844 * to compensate for late or lost adjustments.
795 */ 845 */
796 static __always_inline int timekeeping_bigadjust(struct timekeeper *tk, 846 static __always_inline int timekeeping_bigadjust(struct timekeeper *tk,
797 s64 error, s64 *interval, 847 s64 error, s64 *interval,
798 s64 *offset) 848 s64 *offset)
799 { 849 {
800 s64 tick_error, i; 850 s64 tick_error, i;
801 u32 look_ahead, adj; 851 u32 look_ahead, adj;
802 s32 error2, mult; 852 s32 error2, mult;
803 853
804 /* 854 /*
805 * Use the current error value to determine how much to look ahead. 855 * Use the current error value to determine how much to look ahead.
806 * The larger the error the slower we adjust for it to avoid problems 856 * The larger the error the slower we adjust for it to avoid problems
807 * with losing too many ticks, otherwise we would overadjust and 857 * with losing too many ticks, otherwise we would overadjust and
808 * produce an even larger error. The smaller the adjustment the 858 * produce an even larger error. The smaller the adjustment the
809 * faster we try to adjust for it, as lost ticks can do less harm 859 * faster we try to adjust for it, as lost ticks can do less harm
810 * here. This is tuned so that an error of about 1 msec is adjusted 860 * here. This is tuned so that an error of about 1 msec is adjusted
811 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks). 861 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
812 */ 862 */
813 error2 = tk->ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ); 863 error2 = tk->ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
814 error2 = abs(error2); 864 error2 = abs(error2);
815 for (look_ahead = 0; error2 > 0; look_ahead++) 865 for (look_ahead = 0; error2 > 0; look_ahead++)
816 error2 >>= 2; 866 error2 >>= 2;
817 867
818 /* 868 /*
819 * Now calculate the error in (1 << look_ahead) ticks, but first 869 * Now calculate the error in (1 << look_ahead) ticks, but first
820 * remove the single look ahead already included in the error. 870 * remove the single look ahead already included in the error.
821 */ 871 */
822 tick_error = ntp_tick_length() >> (tk->ntp_error_shift + 1); 872 tick_error = ntp_tick_length() >> (tk->ntp_error_shift + 1);
823 tick_error -= tk->xtime_interval >> 1; 873 tick_error -= tk->xtime_interval >> 1;
824 error = ((error - tick_error) >> look_ahead) + tick_error; 874 error = ((error - tick_error) >> look_ahead) + tick_error;
825 875
826 /* Finally calculate the adjustment shift value. */ 876 /* Finally calculate the adjustment shift value. */
827 i = *interval; 877 i = *interval;
828 mult = 1; 878 mult = 1;
829 if (error < 0) { 879 if (error < 0) {
830 error = -error; 880 error = -error;
831 *interval = -*interval; 881 *interval = -*interval;
832 *offset = -*offset; 882 *offset = -*offset;
833 mult = -1; 883 mult = -1;
834 } 884 }
835 for (adj = 0; error > i; adj++) 885 for (adj = 0; error > i; adj++)
836 error >>= 1; 886 error >>= 1;
837 887
838 *interval <<= adj; 888 *interval <<= adj;
839 *offset <<= adj; 889 *offset <<= adj;
840 return mult << adj; 890 return mult << adj;
841 } 891 }
842 892
843 /* 893 /*
844 * Adjust the multiplier to reduce the error value, 894 * Adjust the multiplier to reduce the error value,
845 * this is optimized for the most common adjustments of -1,0,1, 895 * this is optimized for the most common adjustments of -1,0,1,
846 * for other values we can do a bit more work. 896 * for other values we can do a bit more work.
847 */ 897 */
848 static void timekeeping_adjust(struct timekeeper *tk, s64 offset) 898 static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
849 { 899 {
850 s64 error, interval = tk->cycle_interval; 900 s64 error, interval = tk->cycle_interval;
851 int adj; 901 int adj;
852 902
853 /* 903 /*
854 * The point of this is to check if the error is greater than half 904 * The point of this is to check if the error is greater than half
855 * an interval. 905 * an interval.
856 * 906 *
857 * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs. 907 * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
858 * 908 *
859 * Note we subtract one in the shift, so that error is really error*2. 909 * Note we subtract one in the shift, so that error is really error*2.
860 * This "saves" dividing(shifting) interval twice, but keeps the 910 * This "saves" dividing(shifting) interval twice, but keeps the
861 * (error > interval) comparison as still measuring if error is 911 * (error > interval) comparison as still measuring if error is
862 * larger than half an interval. 912 * larger than half an interval.
863 * 913 *
864 * Note: It does not "save" on aggravation when reading the code. 914 * Note: It does not "save" on aggravation when reading the code.
865 */ 915 */
866 error = tk->ntp_error >> (tk->ntp_error_shift - 1); 916 error = tk->ntp_error >> (tk->ntp_error_shift - 1);
867 if (error > interval) { 917 if (error > interval) {
868 /* 918 /*
869 * We now divide error by 4(via shift), which checks if 919 * We now divide error by 4(via shift), which checks if
870 * the error is greater than twice the interval. 920 * the error is greater than twice the interval.
871 * If it is greater, we need a bigadjust, if its smaller, 921 * If it is greater, we need a bigadjust, if its smaller,
872 * we can adjust by 1. 922 * we can adjust by 1.
873 */ 923 */
874 error >>= 2; 924 error >>= 2;
875 /* 925 /*
876 * XXX - In update_wall_time, we round up to the next 926 * XXX - In update_wall_time, we round up to the next
877 * nanosecond, and store the amount rounded up into 927 * nanosecond, and store the amount rounded up into
878 * the error. This causes the likely below to be unlikely. 928 * the error. This causes the likely below to be unlikely.
879 * 929 *
880 * The proper fix is to avoid rounding up by using 930 * The proper fix is to avoid rounding up by using
881 * the high precision tk->xtime_nsec instead of 931 * the high precision tk->xtime_nsec instead of
882 * xtime.tv_nsec everywhere. Fixing this will take some 932 * xtime.tv_nsec everywhere. Fixing this will take some
883 * time. 933 * time.
884 */ 934 */
885 if (likely(error <= interval)) 935 if (likely(error <= interval))
886 adj = 1; 936 adj = 1;
887 else 937 else
888 adj = timekeeping_bigadjust(tk, error, &interval, &offset); 938 adj = timekeeping_bigadjust(tk, error, &interval, &offset);
889 } else { 939 } else {
890 if (error < -interval) { 940 if (error < -interval) {
891 /* See comment above, this is just switched for the negative */ 941 /* See comment above, this is just switched for the negative */
892 error >>= 2; 942 error >>= 2;
893 if (likely(error >= -interval)) { 943 if (likely(error >= -interval)) {
894 adj = -1; 944 adj = -1;
895 interval = -interval; 945 interval = -interval;
896 offset = -offset; 946 offset = -offset;
897 } else { 947 } else {
898 adj = timekeeping_bigadjust(tk, error, &interval, &offset); 948 adj = timekeeping_bigadjust(tk, error, &interval, &offset);
899 } 949 }
900 } else { 950 } else {
901 goto out_adjust; 951 goto out_adjust;
902 } 952 }
903 } 953 }
904 954
905 if (unlikely(tk->clock->maxadj && 955 if (unlikely(tk->clock->maxadj &&
906 (tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) { 956 (tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) {
907 printk_once(KERN_WARNING 957 printk_once(KERN_WARNING
908 "Adjusting %s more than 11%% (%ld vs %ld)\n", 958 "Adjusting %s more than 11%% (%ld vs %ld)\n",
909 tk->clock->name, (long)tk->mult + adj, 959 tk->clock->name, (long)tk->mult + adj,
910 (long)tk->clock->mult + tk->clock->maxadj); 960 (long)tk->clock->mult + tk->clock->maxadj);
911 } 961 }
912 /* 962 /*
913 * So the following can be confusing. 963 * So the following can be confusing.
914 * 964 *
915 * To keep things simple, lets assume adj == 1 for now. 965 * To keep things simple, lets assume adj == 1 for now.
916 * 966 *
917 * When adj != 1, remember that the interval and offset values 967 * When adj != 1, remember that the interval and offset values
918 * have been appropriately scaled so the math is the same. 968 * have been appropriately scaled so the math is the same.
919 * 969 *
920 * The basic idea here is that we're increasing the multiplier 970 * The basic idea here is that we're increasing the multiplier
921 * by one, this causes the xtime_interval to be incremented by 971 * by one, this causes the xtime_interval to be incremented by
922 * one cycle_interval. This is because: 972 * one cycle_interval. This is because:
923 * xtime_interval = cycle_interval * mult 973 * xtime_interval = cycle_interval * mult
924 * So if mult is being incremented by one: 974 * So if mult is being incremented by one:
925 * xtime_interval = cycle_interval * (mult + 1) 975 * xtime_interval = cycle_interval * (mult + 1)
926 * Its the same as: 976 * Its the same as:
927 * xtime_interval = (cycle_interval * mult) + cycle_interval 977 * xtime_interval = (cycle_interval * mult) + cycle_interval
928 * Which can be shortened to: 978 * Which can be shortened to:
929 * xtime_interval += cycle_interval 979 * xtime_interval += cycle_interval
930 * 980 *
931 * So offset stores the non-accumulated cycles. Thus the current 981 * So offset stores the non-accumulated cycles. Thus the current
932 * time (in shifted nanoseconds) is: 982 * time (in shifted nanoseconds) is:
933 * now = (offset * adj) + xtime_nsec 983 * now = (offset * adj) + xtime_nsec
934 * Now, even though we're adjusting the clock frequency, we have 984 * Now, even though we're adjusting the clock frequency, we have
935 * to keep time consistent. In other words, we can't jump back 985 * to keep time consistent. In other words, we can't jump back
936 * in time, and we also want to avoid jumping forward in time. 986 * in time, and we also want to avoid jumping forward in time.
937 * 987 *
938 * So given the same offset value, we need the time to be the same 988 * So given the same offset value, we need the time to be the same
939 * both before and after the freq adjustment. 989 * both before and after the freq adjustment.
940 * now = (offset * adj_1) + xtime_nsec_1 990 * now = (offset * adj_1) + xtime_nsec_1
941 * now = (offset * adj_2) + xtime_nsec_2 991 * now = (offset * adj_2) + xtime_nsec_2
942 * So: 992 * So:
943 * (offset * adj_1) + xtime_nsec_1 = 993 * (offset * adj_1) + xtime_nsec_1 =
944 * (offset * adj_2) + xtime_nsec_2 994 * (offset * adj_2) + xtime_nsec_2
945 * And we know: 995 * And we know:
946 * adj_2 = adj_1 + 1 996 * adj_2 = adj_1 + 1
947 * So: 997 * So:
948 * (offset * adj_1) + xtime_nsec_1 = 998 * (offset * adj_1) + xtime_nsec_1 =
949 * (offset * (adj_1+1)) + xtime_nsec_2 999 * (offset * (adj_1+1)) + xtime_nsec_2
950 * (offset * adj_1) + xtime_nsec_1 = 1000 * (offset * adj_1) + xtime_nsec_1 =
951 * (offset * adj_1) + offset + xtime_nsec_2 1001 * (offset * adj_1) + offset + xtime_nsec_2
952 * Canceling the sides: 1002 * Canceling the sides:
953 * xtime_nsec_1 = offset + xtime_nsec_2 1003 * xtime_nsec_1 = offset + xtime_nsec_2
954 * Which gives us: 1004 * Which gives us:
955 * xtime_nsec_2 = xtime_nsec_1 - offset 1005 * xtime_nsec_2 = xtime_nsec_1 - offset
956 * Which simplfies to: 1006 * Which simplfies to:
957 * xtime_nsec -= offset 1007 * xtime_nsec -= offset
958 * 1008 *
959 * XXX - TODO: Doc ntp_error calculation. 1009 * XXX - TODO: Doc ntp_error calculation.
960 */ 1010 */
961 tk->mult += adj; 1011 tk->mult += adj;
962 tk->xtime_interval += interval; 1012 tk->xtime_interval += interval;
963 tk->xtime_nsec -= offset; 1013 tk->xtime_nsec -= offset;
964 tk->ntp_error -= (interval - offset) << tk->ntp_error_shift; 1014 tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
965 1015
966 out_adjust: 1016 out_adjust:
967 /* 1017 /*
968 * It may be possible that when we entered this function, xtime_nsec 1018 * It may be possible that when we entered this function, xtime_nsec
969 * was very small. Further, if we're slightly speeding the clocksource 1019 * was very small. Further, if we're slightly speeding the clocksource
970 * in the code above, its possible the required corrective factor to 1020 * in the code above, its possible the required corrective factor to
971 * xtime_nsec could cause it to underflow. 1021 * xtime_nsec could cause it to underflow.
972 * 1022 *
973 * Now, since we already accumulated the second, cannot simply roll 1023 * Now, since we already accumulated the second, cannot simply roll
974 * the accumulated second back, since the NTP subsystem has been 1024 * the accumulated second back, since the NTP subsystem has been
975 * notified via second_overflow. So instead we push xtime_nsec forward 1025 * notified via second_overflow. So instead we push xtime_nsec forward
976 * by the amount we underflowed, and add that amount into the error. 1026 * by the amount we underflowed, and add that amount into the error.
977 * 1027 *
978 * We'll correct this error next time through this function, when 1028 * We'll correct this error next time through this function, when
979 * xtime_nsec is not as small. 1029 * xtime_nsec is not as small.
980 */ 1030 */
981 if (unlikely((s64)tk->xtime_nsec < 0)) { 1031 if (unlikely((s64)tk->xtime_nsec < 0)) {
982 s64 neg = -(s64)tk->xtime_nsec; 1032 s64 neg = -(s64)tk->xtime_nsec;
983 tk->xtime_nsec = 0; 1033 tk->xtime_nsec = 0;
984 tk->ntp_error += neg << tk->ntp_error_shift; 1034 tk->ntp_error += neg << tk->ntp_error_shift;
985 } 1035 }
986 1036
987 } 1037 }
988 1038
989 /** 1039 /**
990 * accumulate_nsecs_to_secs - Accumulates nsecs into secs 1040 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
991 * 1041 *
992 * Helper function that accumulates a the nsecs greater then a second 1042 * Helper function that accumulates a the nsecs greater then a second
993 * from the xtime_nsec field to the xtime_secs field. 1043 * from the xtime_nsec field to the xtime_secs field.
994 * It also calls into the NTP code to handle leapsecond processing. 1044 * It also calls into the NTP code to handle leapsecond processing.
995 * 1045 *
996 */ 1046 */
997 static inline void accumulate_nsecs_to_secs(struct timekeeper *tk) 1047 static inline void accumulate_nsecs_to_secs(struct timekeeper *tk)
998 { 1048 {
999 u64 nsecps = (u64)NSEC_PER_SEC << tk->shift; 1049 u64 nsecps = (u64)NSEC_PER_SEC << tk->shift;
1000 1050
1001 while (tk->xtime_nsec >= nsecps) { 1051 while (tk->xtime_nsec >= nsecps) {
1002 int leap; 1052 int leap;
1003 1053
1004 tk->xtime_nsec -= nsecps; 1054 tk->xtime_nsec -= nsecps;
1005 tk->xtime_sec++; 1055 tk->xtime_sec++;
1006 1056
1007 /* Figure out if its a leap sec and apply if needed */ 1057 /* Figure out if its a leap sec and apply if needed */
1008 leap = second_overflow(tk->xtime_sec); 1058 leap = second_overflow(tk->xtime_sec);
1009 if (unlikely(leap)) { 1059 if (unlikely(leap)) {
1010 struct timespec ts; 1060 struct timespec ts;
1011 1061
1012 tk->xtime_sec += leap; 1062 tk->xtime_sec += leap;
1013 1063
1014 ts.tv_sec = leap; 1064 ts.tv_sec = leap;
1015 ts.tv_nsec = 0; 1065 ts.tv_nsec = 0;
1016 tk_set_wall_to_mono(tk, 1066 tk_set_wall_to_mono(tk,
1017 timespec_sub(tk->wall_to_monotonic, ts)); 1067 timespec_sub(tk->wall_to_monotonic, ts));
1018 1068
1019 clock_was_set_delayed(); 1069 clock_was_set_delayed();
1020 } 1070 }
1021 } 1071 }
1022 } 1072 }
1023 1073
1024 /** 1074 /**
1025 * logarithmic_accumulation - shifted accumulation of cycles 1075 * logarithmic_accumulation - shifted accumulation of cycles
1026 * 1076 *
1027 * This functions accumulates a shifted interval of cycles into 1077 * This functions accumulates a shifted interval of cycles into
1028 * into a shifted interval nanoseconds. Allows for O(log) accumulation 1078 * into a shifted interval nanoseconds. Allows for O(log) accumulation
1029 * loop. 1079 * loop.
1030 * 1080 *
1031 * Returns the unconsumed cycles. 1081 * Returns the unconsumed cycles.
1032 */ 1082 */
1033 static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, 1083 static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1034 u32 shift) 1084 u32 shift)
1035 { 1085 {
1036 u64 raw_nsecs; 1086 u64 raw_nsecs;
1037 1087
1038 /* If the offset is smaller then a shifted interval, do nothing */ 1088 /* If the offset is smaller then a shifted interval, do nothing */
1039 if (offset < tk->cycle_interval<<shift) 1089 if (offset < tk->cycle_interval<<shift)
1040 return offset; 1090 return offset;
1041 1091
1042 /* Accumulate one shifted interval */ 1092 /* Accumulate one shifted interval */
1043 offset -= tk->cycle_interval << shift; 1093 offset -= tk->cycle_interval << shift;
1044 tk->clock->cycle_last += tk->cycle_interval << shift; 1094 tk->clock->cycle_last += tk->cycle_interval << shift;
1045 1095
1046 tk->xtime_nsec += tk->xtime_interval << shift; 1096 tk->xtime_nsec += tk->xtime_interval << shift;
1047 accumulate_nsecs_to_secs(tk); 1097 accumulate_nsecs_to_secs(tk);
1048 1098
1049 /* Accumulate raw time */ 1099 /* Accumulate raw time */
1050 raw_nsecs = (u64)tk->raw_interval << shift; 1100 raw_nsecs = (u64)tk->raw_interval << shift;
1051 raw_nsecs += tk->raw_time.tv_nsec; 1101 raw_nsecs += tk->raw_time.tv_nsec;
1052 if (raw_nsecs >= NSEC_PER_SEC) { 1102 if (raw_nsecs >= NSEC_PER_SEC) {
1053 u64 raw_secs = raw_nsecs; 1103 u64 raw_secs = raw_nsecs;
1054 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC); 1104 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
1055 tk->raw_time.tv_sec += raw_secs; 1105 tk->raw_time.tv_sec += raw_secs;
1056 } 1106 }
1057 tk->raw_time.tv_nsec = raw_nsecs; 1107 tk->raw_time.tv_nsec = raw_nsecs;
1058 1108
1059 /* Accumulate error between NTP and clock interval */ 1109 /* Accumulate error between NTP and clock interval */
1060 tk->ntp_error += ntp_tick_length() << shift; 1110 tk->ntp_error += ntp_tick_length() << shift;
1061 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) << 1111 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
1062 (tk->ntp_error_shift + shift); 1112 (tk->ntp_error_shift + shift);
1063 1113
1064 return offset; 1114 return offset;
1065 } 1115 }
1066 1116
1067 #ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD 1117 #ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
1068 static inline void old_vsyscall_fixup(struct timekeeper *tk) 1118 static inline void old_vsyscall_fixup(struct timekeeper *tk)
1069 { 1119 {
1070 s64 remainder; 1120 s64 remainder;
1071 1121
1072 /* 1122 /*
1073 * Store only full nanoseconds into xtime_nsec after rounding 1123 * Store only full nanoseconds into xtime_nsec after rounding
1074 * it up and add the remainder to the error difference. 1124 * it up and add the remainder to the error difference.
1075 * XXX - This is necessary to avoid small 1ns inconsistnecies caused 1125 * XXX - This is necessary to avoid small 1ns inconsistnecies caused
1076 * by truncating the remainder in vsyscalls. However, it causes 1126 * by truncating the remainder in vsyscalls. However, it causes
1077 * additional work to be done in timekeeping_adjust(). Once 1127 * additional work to be done in timekeeping_adjust(). Once
1078 * the vsyscall implementations are converted to use xtime_nsec 1128 * the vsyscall implementations are converted to use xtime_nsec
1079 * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD 1129 * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
1080 * users are removed, this can be killed. 1130 * users are removed, this can be killed.
1081 */ 1131 */
1082 remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1); 1132 remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1);
1083 tk->xtime_nsec -= remainder; 1133 tk->xtime_nsec -= remainder;
1084 tk->xtime_nsec += 1ULL << tk->shift; 1134 tk->xtime_nsec += 1ULL << tk->shift;
1085 tk->ntp_error += remainder << tk->ntp_error_shift; 1135 tk->ntp_error += remainder << tk->ntp_error_shift;
1086 1136
1087 } 1137 }
1088 #else 1138 #else
1089 #define old_vsyscall_fixup(tk) 1139 #define old_vsyscall_fixup(tk)
1090 #endif 1140 #endif
1091 1141
1092 1142
1093 1143
1094 /** 1144 /**
1095 * update_wall_time - Uses the current clocksource to increment the wall time 1145 * update_wall_time - Uses the current clocksource to increment the wall time
1096 * 1146 *
1097 */ 1147 */
1098 static void update_wall_time(void) 1148 static void update_wall_time(void)
1099 { 1149 {
1100 struct clocksource *clock; 1150 struct clocksource *clock;
1101 struct timekeeper *tk = &timekeeper; 1151 struct timekeeper *tk = &timekeeper;
1102 cycle_t offset; 1152 cycle_t offset;
1103 int shift = 0, maxshift; 1153 int shift = 0, maxshift;
1104 unsigned long flags; 1154 unsigned long flags;
1105 1155
1106 write_seqlock_irqsave(&tk->lock, flags); 1156 write_seqlock_irqsave(&tk->lock, flags);
1107 1157
1108 /* Make sure we're fully resumed: */ 1158 /* Make sure we're fully resumed: */
1109 if (unlikely(timekeeping_suspended)) 1159 if (unlikely(timekeeping_suspended))
1110 goto out; 1160 goto out;
1111 1161
1112 clock = tk->clock; 1162 clock = tk->clock;
1113 1163
1114 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET 1164 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
1115 offset = tk->cycle_interval; 1165 offset = tk->cycle_interval;
1116 #else 1166 #else
1117 offset = (clock->read(clock) - clock->cycle_last) & clock->mask; 1167 offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
1118 #endif 1168 #endif
1119 1169
1120 /* Check if there's really nothing to do */ 1170 /* Check if there's really nothing to do */
1121 if (offset < tk->cycle_interval) 1171 if (offset < tk->cycle_interval)
1122 goto out; 1172 goto out;
1123 1173
1124 /* 1174 /*
1125 * With NO_HZ we may have to accumulate many cycle_intervals 1175 * With NO_HZ we may have to accumulate many cycle_intervals
1126 * (think "ticks") worth of time at once. To do this efficiently, 1176 * (think "ticks") worth of time at once. To do this efficiently,
1127 * we calculate the largest doubling multiple of cycle_intervals 1177 * we calculate the largest doubling multiple of cycle_intervals
1128 * that is smaller than the offset. We then accumulate that 1178 * that is smaller than the offset. We then accumulate that
1129 * chunk in one go, and then try to consume the next smaller 1179 * chunk in one go, and then try to consume the next smaller
1130 * doubled multiple. 1180 * doubled multiple.
1131 */ 1181 */
1132 shift = ilog2(offset) - ilog2(tk->cycle_interval); 1182 shift = ilog2(offset) - ilog2(tk->cycle_interval);
1133 shift = max(0, shift); 1183 shift = max(0, shift);
1134 /* Bound shift to one less than what overflows tick_length */ 1184 /* Bound shift to one less than what overflows tick_length */
1135 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1; 1185 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
1136 shift = min(shift, maxshift); 1186 shift = min(shift, maxshift);
1137 while (offset >= tk->cycle_interval) { 1187 while (offset >= tk->cycle_interval) {
1138 offset = logarithmic_accumulation(tk, offset, shift); 1188 offset = logarithmic_accumulation(tk, offset, shift);
1139 if (offset < tk->cycle_interval<<shift) 1189 if (offset < tk->cycle_interval<<shift)
1140 shift--; 1190 shift--;
1141 } 1191 }
1142 1192
1143 /* correct the clock when NTP error is too big */ 1193 /* correct the clock when NTP error is too big */
1144 timekeeping_adjust(tk, offset); 1194 timekeeping_adjust(tk, offset);
1145 1195
1146 /* 1196 /*
1147 * XXX This can be killed once everyone converts 1197 * XXX This can be killed once everyone converts
1148 * to the new update_vsyscall. 1198 * to the new update_vsyscall.
1149 */ 1199 */
1150 old_vsyscall_fixup(tk); 1200 old_vsyscall_fixup(tk);
1151 1201
1152 /* 1202 /*
1153 * Finally, make sure that after the rounding 1203 * Finally, make sure that after the rounding
1154 * xtime_nsec isn't larger than NSEC_PER_SEC 1204 * xtime_nsec isn't larger than NSEC_PER_SEC
1155 */ 1205 */
1156 accumulate_nsecs_to_secs(tk); 1206 accumulate_nsecs_to_secs(tk);
1157 1207
1158 timekeeping_update(tk, false); 1208 timekeeping_update(tk, false);
1159 1209
1160 out: 1210 out:
1161 write_sequnlock_irqrestore(&tk->lock, flags); 1211 write_sequnlock_irqrestore(&tk->lock, flags);
1162 1212
1163 } 1213 }
1164 1214
1165 /** 1215 /**
1166 * getboottime - Return the real time of system boot. 1216 * getboottime - Return the real time of system boot.
1167 * @ts: pointer to the timespec to be set 1217 * @ts: pointer to the timespec to be set
1168 * 1218 *
1169 * Returns the wall-time of boot in a timespec. 1219 * Returns the wall-time of boot in a timespec.
1170 * 1220 *
1171 * This is based on the wall_to_monotonic offset and the total suspend 1221 * This is based on the wall_to_monotonic offset and the total suspend
1172 * time. Calls to settimeofday will affect the value returned (which 1222 * time. Calls to settimeofday will affect the value returned (which
1173 * basically means that however wrong your real time clock is at boot time, 1223 * basically means that however wrong your real time clock is at boot time,
1174 * you get the right time here). 1224 * you get the right time here).
1175 */ 1225 */
1176 void getboottime(struct timespec *ts) 1226 void getboottime(struct timespec *ts)
1177 { 1227 {
1178 struct timekeeper *tk = &timekeeper; 1228 struct timekeeper *tk = &timekeeper;
1179 struct timespec boottime = { 1229 struct timespec boottime = {
1180 .tv_sec = tk->wall_to_monotonic.tv_sec + 1230 .tv_sec = tk->wall_to_monotonic.tv_sec +
1181 tk->total_sleep_time.tv_sec, 1231 tk->total_sleep_time.tv_sec,
1182 .tv_nsec = tk->wall_to_monotonic.tv_nsec + 1232 .tv_nsec = tk->wall_to_monotonic.tv_nsec +
1183 tk->total_sleep_time.tv_nsec 1233 tk->total_sleep_time.tv_nsec
1184 }; 1234 };
1185 1235
1186 set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); 1236 set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
1187 } 1237 }
1188 EXPORT_SYMBOL_GPL(getboottime); 1238 EXPORT_SYMBOL_GPL(getboottime);
1189 1239
1190 /** 1240 /**
1191 * get_monotonic_boottime - Returns monotonic time since boot 1241 * get_monotonic_boottime - Returns monotonic time since boot
1192 * @ts: pointer to the timespec to be set 1242 * @ts: pointer to the timespec to be set
1193 * 1243 *
1194 * Returns the monotonic time since boot in a timespec. 1244 * Returns the monotonic time since boot in a timespec.
1195 * 1245 *
1196 * This is similar to CLOCK_MONTONIC/ktime_get_ts, but also 1246 * This is similar to CLOCK_MONTONIC/ktime_get_ts, but also
1197 * includes the time spent in suspend. 1247 * includes the time spent in suspend.
1198 */ 1248 */
1199 void get_monotonic_boottime(struct timespec *ts) 1249 void get_monotonic_boottime(struct timespec *ts)
1200 { 1250 {
1201 struct timekeeper *tk = &timekeeper; 1251 struct timekeeper *tk = &timekeeper;
1202 struct timespec tomono, sleep; 1252 struct timespec tomono, sleep;
1203 s64 nsec; 1253 s64 nsec;
1204 unsigned int seq; 1254 unsigned int seq;
1205 1255
1206 WARN_ON(timekeeping_suspended); 1256 WARN_ON(timekeeping_suspended);
1207 1257
1208 do { 1258 do {
1209 seq = read_seqbegin(&tk->lock); 1259 seq = read_seqbegin(&tk->lock);
1210 ts->tv_sec = tk->xtime_sec; 1260 ts->tv_sec = tk->xtime_sec;
1211 nsec = timekeeping_get_ns(tk); 1261 nsec = timekeeping_get_ns(tk);
1212 tomono = tk->wall_to_monotonic; 1262 tomono = tk->wall_to_monotonic;
1213 sleep = tk->total_sleep_time; 1263 sleep = tk->total_sleep_time;
1214 1264
1215 } while (read_seqretry(&tk->lock, seq)); 1265 } while (read_seqretry(&tk->lock, seq));
1216 1266
1217 ts->tv_sec += tomono.tv_sec + sleep.tv_sec; 1267 ts->tv_sec += tomono.tv_sec + sleep.tv_sec;
1218 ts->tv_nsec = 0; 1268 ts->tv_nsec = 0;
1219 timespec_add_ns(ts, nsec + tomono.tv_nsec + sleep.tv_nsec); 1269 timespec_add_ns(ts, nsec + tomono.tv_nsec + sleep.tv_nsec);
1220 } 1270 }
1221 EXPORT_SYMBOL_GPL(get_monotonic_boottime); 1271 EXPORT_SYMBOL_GPL(get_monotonic_boottime);
1222 1272
1223 /** 1273 /**
1224 * ktime_get_boottime - Returns monotonic time since boot in a ktime 1274 * ktime_get_boottime - Returns monotonic time since boot in a ktime
1225 * 1275 *
1226 * Returns the monotonic time since boot in a ktime 1276 * Returns the monotonic time since boot in a ktime
1227 * 1277 *
1228 * This is similar to CLOCK_MONTONIC/ktime_get, but also 1278 * This is similar to CLOCK_MONTONIC/ktime_get, but also
1229 * includes the time spent in suspend. 1279 * includes the time spent in suspend.
1230 */ 1280 */
1231 ktime_t ktime_get_boottime(void) 1281 ktime_t ktime_get_boottime(void)
1232 { 1282 {
1233 struct timespec ts; 1283 struct timespec ts;
1234 1284
1235 get_monotonic_boottime(&ts); 1285 get_monotonic_boottime(&ts);
1236 return timespec_to_ktime(ts); 1286 return timespec_to_ktime(ts);
1237 } 1287 }
1238 EXPORT_SYMBOL_GPL(ktime_get_boottime); 1288 EXPORT_SYMBOL_GPL(ktime_get_boottime);
1239 1289
1240 /** 1290 /**
1241 * monotonic_to_bootbased - Convert the monotonic time to boot based. 1291 * monotonic_to_bootbased - Convert the monotonic time to boot based.
1242 * @ts: pointer to the timespec to be converted 1292 * @ts: pointer to the timespec to be converted
1243 */ 1293 */
1244 void monotonic_to_bootbased(struct timespec *ts) 1294 void monotonic_to_bootbased(struct timespec *ts)
1245 { 1295 {
1246 struct timekeeper *tk = &timekeeper; 1296 struct timekeeper *tk = &timekeeper;
1247 1297
1248 *ts = timespec_add(*ts, tk->total_sleep_time); 1298 *ts = timespec_add(*ts, tk->total_sleep_time);
1249 } 1299 }
1250 EXPORT_SYMBOL_GPL(monotonic_to_bootbased); 1300 EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
1251 1301
1252 unsigned long get_seconds(void) 1302 unsigned long get_seconds(void)
1253 { 1303 {
1254 struct timekeeper *tk = &timekeeper; 1304 struct timekeeper *tk = &timekeeper;
1255 1305
1256 return tk->xtime_sec; 1306 return tk->xtime_sec;
1257 } 1307 }
1258 EXPORT_SYMBOL(get_seconds); 1308 EXPORT_SYMBOL(get_seconds);
1259 1309
1260 struct timespec __current_kernel_time(void) 1310 struct timespec __current_kernel_time(void)
1261 { 1311 {
1262 struct timekeeper *tk = &timekeeper; 1312 struct timekeeper *tk = &timekeeper;
1263 1313
1264 return tk_xtime(tk); 1314 return tk_xtime(tk);
1265 } 1315 }
1266 1316
1267 struct timespec current_kernel_time(void) 1317 struct timespec current_kernel_time(void)
1268 { 1318 {
1269 struct timekeeper *tk = &timekeeper; 1319 struct timekeeper *tk = &timekeeper;
1270 struct timespec now; 1320 struct timespec now;
1271 unsigned long seq; 1321 unsigned long seq;
1272 1322
1273 do { 1323 do {
1274 seq = read_seqbegin(&tk->lock); 1324 seq = read_seqbegin(&tk->lock);
1275 1325
1276 now = tk_xtime(tk); 1326 now = tk_xtime(tk);
1277 } while (read_seqretry(&tk->lock, seq)); 1327 } while (read_seqretry(&tk->lock, seq));
1278 1328
1279 return now; 1329 return now;
1280 } 1330 }
1281 EXPORT_SYMBOL(current_kernel_time); 1331 EXPORT_SYMBOL(current_kernel_time);
1282 1332
1283 struct timespec get_monotonic_coarse(void) 1333 struct timespec get_monotonic_coarse(void)
1284 { 1334 {
1285 struct timekeeper *tk = &timekeeper; 1335 struct timekeeper *tk = &timekeeper;
1286 struct timespec now, mono; 1336 struct timespec now, mono;
1287 unsigned long seq; 1337 unsigned long seq;
1288 1338
1289 do { 1339 do {
1290 seq = read_seqbegin(&tk->lock); 1340 seq = read_seqbegin(&tk->lock);
1291 1341
1292 now = tk_xtime(tk); 1342 now = tk_xtime(tk);
1293 mono = tk->wall_to_monotonic; 1343 mono = tk->wall_to_monotonic;
1294 } while (read_seqretry(&tk->lock, seq)); 1344 } while (read_seqretry(&tk->lock, seq));
1295 1345
1296 set_normalized_timespec(&now, now.tv_sec + mono.tv_sec, 1346 set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
1297 now.tv_nsec + mono.tv_nsec); 1347 now.tv_nsec + mono.tv_nsec);
1298 return now; 1348 return now;
1299 } 1349 }
1300 1350
1301 /* 1351 /*
1302 * The 64-bit jiffies value is not atomic - you MUST NOT read it 1352 * The 64-bit jiffies value is not atomic - you MUST NOT read it
1303 * without sampling the sequence number in xtime_lock. 1353 * without sampling the sequence number in xtime_lock.
1304 * jiffies is defined in the linker script... 1354 * jiffies is defined in the linker script...
1305 */ 1355 */
1306 void do_timer(unsigned long ticks) 1356 void do_timer(unsigned long ticks)
1307 { 1357 {
1308 jiffies_64 += ticks; 1358 jiffies_64 += ticks;
1309 update_wall_time(); 1359 update_wall_time();
1310 calc_global_load(ticks); 1360 calc_global_load(ticks);
1311 } 1361 }
1312 1362
1313 /** 1363 /**
1314 * get_xtime_and_monotonic_and_sleep_offset() - get xtime, wall_to_monotonic, 1364 * get_xtime_and_monotonic_and_sleep_offset() - get xtime, wall_to_monotonic,
1315 * and sleep offsets. 1365 * and sleep offsets.
1316 * @xtim: pointer to timespec to be set with xtime 1366 * @xtim: pointer to timespec to be set with xtime
1317 * @wtom: pointer to timespec to be set with wall_to_monotonic 1367 * @wtom: pointer to timespec to be set with wall_to_monotonic
1318 * @sleep: pointer to timespec to be set with time in suspend 1368 * @sleep: pointer to timespec to be set with time in suspend
1319 */ 1369 */
1320 void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, 1370 void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
1321 struct timespec *wtom, struct timespec *sleep) 1371 struct timespec *wtom, struct timespec *sleep)
1322 { 1372 {
1323 struct timekeeper *tk = &timekeeper; 1373 struct timekeeper *tk = &timekeeper;
1324 unsigned long seq; 1374 unsigned long seq;
1325 1375
1326 do { 1376 do {
1327 seq = read_seqbegin(&tk->lock); 1377 seq = read_seqbegin(&tk->lock);
1328 *xtim = tk_xtime(tk); 1378 *xtim = tk_xtime(tk);
1329 *wtom = tk->wall_to_monotonic; 1379 *wtom = tk->wall_to_monotonic;
1330 *sleep = tk->total_sleep_time; 1380 *sleep = tk->total_sleep_time;
1331 } while (read_seqretry(&tk->lock, seq)); 1381 } while (read_seqretry(&tk->lock, seq));
1332 } 1382 }
1333 1383
1334 #ifdef CONFIG_HIGH_RES_TIMERS 1384 #ifdef CONFIG_HIGH_RES_TIMERS
1335 /** 1385 /**
1336 * ktime_get_update_offsets - hrtimer helper 1386 * ktime_get_update_offsets - hrtimer helper
1337 * @offs_real: pointer to storage for monotonic -> realtime offset 1387 * @offs_real: pointer to storage for monotonic -> realtime offset
1338 * @offs_boot: pointer to storage for monotonic -> boottime offset 1388 * @offs_boot: pointer to storage for monotonic -> boottime offset
1339 * 1389 *
1340 * Returns current monotonic time and updates the offsets 1390 * Returns current monotonic time and updates the offsets
1341 * Called from hrtimer_interupt() or retrigger_next_event() 1391 * Called from hrtimer_interupt() or retrigger_next_event()
1342 */ 1392 */
1343 ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot) 1393 ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot)
1344 { 1394 {
1345 struct timekeeper *tk = &timekeeper; 1395 struct timekeeper *tk = &timekeeper;
1346 ktime_t now; 1396 ktime_t now;
1347 unsigned int seq; 1397 unsigned int seq;
1348 u64 secs, nsecs; 1398 u64 secs, nsecs;
1349 1399
1350 do { 1400 do {
1351 seq = read_seqbegin(&tk->lock); 1401 seq = read_seqbegin(&tk->lock);
1352 1402
1353 secs = tk->xtime_sec; 1403 secs = tk->xtime_sec;
1354 nsecs = timekeeping_get_ns(tk); 1404 nsecs = timekeeping_get_ns(tk);
1355 1405
1356 *offs_real = tk->offs_real; 1406 *offs_real = tk->offs_real;
1357 *offs_boot = tk->offs_boot; 1407 *offs_boot = tk->offs_boot;
1358 } while (read_seqretry(&tk->lock, seq)); 1408 } while (read_seqretry(&tk->lock, seq));
1359 1409
1360 now = ktime_add_ns(ktime_set(secs, 0), nsecs); 1410 now = ktime_add_ns(ktime_set(secs, 0), nsecs);
1361 now = ktime_sub(now, *offs_real); 1411 now = ktime_sub(now, *offs_real);
1362 return now; 1412 return now;
1363 } 1413 }
1364 #endif 1414 #endif
1365 1415
1366 /** 1416 /**
1367 * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format 1417 * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
1368 */ 1418 */
1369 ktime_t ktime_get_monotonic_offset(void) 1419 ktime_t ktime_get_monotonic_offset(void)
1370 { 1420 {
1371 struct timekeeper *tk = &timekeeper; 1421 struct timekeeper *tk = &timekeeper;
1372 unsigned long seq; 1422 unsigned long seq;
1373 struct timespec wtom; 1423 struct timespec wtom;
1374 1424
1375 do { 1425 do {
1376 seq = read_seqbegin(&tk->lock); 1426 seq = read_seqbegin(&tk->lock);
1377 wtom = tk->wall_to_monotonic; 1427 wtom = tk->wall_to_monotonic;
1378 } while (read_seqretry(&tk->lock, seq)); 1428 } while (read_seqretry(&tk->lock, seq));
1379 1429
1380 return timespec_to_ktime(wtom); 1430 return timespec_to_ktime(wtom);
1381 } 1431 }
1382 EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset); 1432 EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset);
1383 1433
1384 /** 1434 /**
1385 * xtime_update() - advances the timekeeping infrastructure 1435 * xtime_update() - advances the timekeeping infrastructure
1386 * @ticks: number of ticks, that have elapsed since the last call. 1436 * @ticks: number of ticks, that have elapsed since the last call.
1387 * 1437 *
1388 * Must be called with interrupts disabled. 1438 * Must be called with interrupts disabled.
1389 */ 1439 */
1390 void xtime_update(unsigned long ticks) 1440 void xtime_update(unsigned long ticks)
1391 { 1441 {
1392 write_seqlock(&xtime_lock); 1442 write_seqlock(&xtime_lock);
1393 do_timer(ticks); 1443 do_timer(ticks);
1394 write_sequnlock(&xtime_lock); 1444 write_sequnlock(&xtime_lock);
1395 } 1445 }
1396 1446