Blame view

kernel/time/timekeeping.c 70 KB
35728b820   Thomas Gleixner   time: Add SPDX li...
1
  // SPDX-License-Identifier: GPL-2.0
8524070b7   John Stultz   Move timekeeping ...
2
  /*
58c5fc2b9   Thomas Gleixner   time: Remove usel...
3
4
   *  Kernel timekeeping code and accessor functions. Based on code from
   *  timer.c, moved in commit 8524070b7982.
8524070b7   John Stultz   Move timekeeping ...
5
   */
d7b4202e0   John Stultz   time: Move timeke...
6
  #include <linux/timekeeper_internal.h>
8524070b7   John Stultz   Move timekeeping ...
7
8
9
10
11
  #include <linux/module.h>
  #include <linux/interrupt.h>
  #include <linux/percpu.h>
  #include <linux/init.h>
  #include <linux/mm.h>
38b8d208a   Ingo Molnar   sched/headers: Pr...
12
  #include <linux/nmi.h>
d43c36dc6   Alexey Dobriyan   headers: remove s...
13
  #include <linux/sched.h>
4f17722c7   Ingo Molnar   sched/headers: Pr...
14
  #include <linux/sched/loadavg.h>
3eca99374   Pavel Tatashin   timekeeping: Repl...
15
  #include <linux/sched/clock.h>
e1a85b2c5   Rafael J. Wysocki   timekeeping: Use ...
16
  #include <linux/syscore_ops.h>
8524070b7   John Stultz   Move timekeeping ...
17
18
19
20
  #include <linux/clocksource.h>
  #include <linux/jiffies.h>
  #include <linux/time.h>
  #include <linux/tick.h>
75c5158f7   Martin Schwidefsky   timekeeping: Upda...
21
  #include <linux/stop_machine.h>
e0b306fef   Marcelo Tosatti   time: export time...
22
  #include <linux/pvclock_gtod.h>
52f5684c8   Gideon Israel Dsouza   kernel: use macro...
23
  #include <linux/compiler.h>
2d87a0674   Ondrej Mosnacek   timekeeping: Audi...
24
  #include <linux/audit.h>
8524070b7   John Stultz   Move timekeeping ...
25

eb93e4d93   Thomas Gleixner   timekeeping: Make...
26
  #include "tick-internal.h"
aa6f9c595   John Stultz   ntp: Move do_adjt...
27
  #include "ntp_internal.h"
5c83545f2   Colin Cross   power: Add option...
28
  #include "timekeeping_internal.h"
155ec6022   Martin Schwidefsky   timekeeping: Intr...
29

04397fe94   David Vrabel   timekeeping: Pass...
30
31
  #define TK_CLEAR_NTP		(1 << 0)
  #define TK_MIRROR		(1 << 1)
780427f0e   David Vrabel   timekeeping: Indi...
32
  #define TK_CLOCK_WAS_SET	(1 << 2)
04397fe94   David Vrabel   timekeeping: Pass...
33

b061c7a51   Miroslav Lichvar   timekeeping: Upda...
34
35
36
37
38
39
40
  enum timekeeping_adv_mode {
  	/* Update timekeeper when a tick has passed */
  	TK_ADV_TICK,
  
  	/* Update timekeeper on a direct frequency change */
  	TK_ADV_FREQ
  };
b923f1247   Linus Torvalds   Merge tag 'timers...
41
  DEFINE_RAW_SPINLOCK(timekeeper_lock);
025e82bcb   Ahmed S. Darwish   timekeeping: Use ...
42

3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
43
44
45
46
47
  /*
   * The most important data for readout fits into a single 64 byte
   * cache line.
   */
  static struct {
025e82bcb   Ahmed S. Darwish   timekeeping: Use ...
48
  	seqcount_raw_spinlock_t	seq;
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
49
  	struct timekeeper	timekeeper;
ce10a5b39   Bart Van Assche   timekeeping: Use ...
50
  } tk_core ____cacheline_aligned = {
025e82bcb   Ahmed S. Darwish   timekeeping: Use ...
51
  	.seq = SEQCNT_RAW_SPINLOCK_ZERO(tk_core.seq, &timekeeper_lock),
ce10a5b39   Bart Van Assche   timekeeping: Use ...
52
  };
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
53

48cdc135d   Thomas Gleixner   timekeeping: Impl...
54
  static struct timekeeper shadow_timekeeper;
155ec6022   Martin Schwidefsky   timekeeping: Intr...
55

71419b30c   Thomas Gleixner   timekeeping: Util...
56
57
  /* flag for if timekeeping is suspended */
  int __read_mostly timekeeping_suspended;
4396e058c   Thomas Gleixner   timekeeping: Prov...
58
59
60
61
62
63
64
65
66
67
  /**
   * struct tk_fast - NMI safe timekeeper
   * @seq:	Sequence counter for protecting updates. The lowest bit
   *		is the index for the tk_read_base array
   * @base:	tk_read_base array. Access is indexed by the lowest bit of
   *		@seq.
   *
   * See @update_fast_timekeeper() below.
   */
  struct tk_fast {
249d05383   Ahmed S. Darwish   timekeeping: Use ...
68
  	seqcount_latch_t	seq;
4396e058c   Thomas Gleixner   timekeeping: Prov...
69
70
  	struct tk_read_base	base[2];
  };
5df32107f   Prarit Bhargava   timekeeping: Make...
71
72
73
74
75
  /* Suspend-time cycles value for halted fast timekeeper. */
  static u64 cycles_at_suspend;
  
  static u64 dummy_clock_read(struct clocksource *cs)
  {
71419b30c   Thomas Gleixner   timekeeping: Util...
76
77
78
  	if (timekeeping_suspended)
  		return cycles_at_suspend;
  	return local_clock();
5df32107f   Prarit Bhargava   timekeeping: Make...
79
80
81
82
83
  }
  
  static struct clocksource dummy_clock = {
  	.read = dummy_clock_read,
  };
71419b30c   Thomas Gleixner   timekeeping: Util...
84
85
86
87
88
89
90
91
92
93
94
95
96
97
  /*
   * Boot time initialization which allows local_clock() to be utilized
   * during early boot when clocksources are not available. local_clock()
   * returns nanoseconds already so no conversion is required, hence mult=1
   * and shift=0. When the first proper clocksource is installed then
   * the fast time keepers are updated with the correct values.
   */
  #define FAST_TK_INIT						\
  	{							\
  		.clock		= &dummy_clock,			\
  		.mask		= CLOCKSOURCE_MASK(64),		\
  		.mult		= 1,				\
  		.shift		= 0,				\
  	}
5df32107f   Prarit Bhargava   timekeeping: Make...
98
  static struct tk_fast tk_fast_mono ____cacheline_aligned = {
249d05383   Ahmed S. Darwish   timekeeping: Use ...
99
  	.seq     = SEQCNT_LATCH_ZERO(tk_fast_mono.seq),
71419b30c   Thomas Gleixner   timekeeping: Util...
100
101
  	.base[0] = FAST_TK_INIT,
  	.base[1] = FAST_TK_INIT,
5df32107f   Prarit Bhargava   timekeeping: Make...
102
103
104
  };
  
  static struct tk_fast tk_fast_raw  ____cacheline_aligned = {
249d05383   Ahmed S. Darwish   timekeeping: Use ...
105
  	.seq     = SEQCNT_LATCH_ZERO(tk_fast_raw.seq),
71419b30c   Thomas Gleixner   timekeeping: Util...
106
107
  	.base[0] = FAST_TK_INIT,
  	.base[1] = FAST_TK_INIT,
5df32107f   Prarit Bhargava   timekeeping: Make...
108
  };
4396e058c   Thomas Gleixner   timekeeping: Prov...
109

1e75fa8be   John Stultz   time: Condense ti...
110
111
  static inline void tk_normalize_xtime(struct timekeeper *tk)
  {
876e78818   Peter Zijlstra   time: Rename time...
112
113
  	while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
  		tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
1e75fa8be   John Stultz   time: Condense ti...
114
115
  		tk->xtime_sec++;
  	}
fc6eead7c   John Stultz   time: Clean up CL...
116
117
118
119
  	while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) {
  		tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
  		tk->raw_sec++;
  	}
1e75fa8be   John Stultz   time: Condense ti...
120
  }
985e69507   Ondrej Mosnacek   timekeeping/ntp: ...
121
  static inline struct timespec64 tk_xtime(const struct timekeeper *tk)
c905fae43   Thomas Gleixner   timekeeper: Move ...
122
123
124
125
  {
  	struct timespec64 ts;
  
  	ts.tv_sec = tk->xtime_sec;
876e78818   Peter Zijlstra   time: Rename time...
126
  	ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
c905fae43   Thomas Gleixner   timekeeper: Move ...
127
128
  	return ts;
  }
7d489d15c   John Stultz   timekeeping: Conv...
129
  static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
1e75fa8be   John Stultz   time: Condense ti...
130
131
  {
  	tk->xtime_sec = ts->tv_sec;
876e78818   Peter Zijlstra   time: Rename time...
132
  	tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
1e75fa8be   John Stultz   time: Condense ti...
133
  }
7d489d15c   John Stultz   timekeeping: Conv...
134
  static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
1e75fa8be   John Stultz   time: Condense ti...
135
136
  {
  	tk->xtime_sec += ts->tv_sec;
876e78818   Peter Zijlstra   time: Rename time...
137
  	tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
784ffcbb9   John Stultz   time: Ensure we n...
138
  	tk_normalize_xtime(tk);
1e75fa8be   John Stultz   time: Condense ti...
139
  }
8fcce546b   John Stultz   time: Cleanup glo...
140

7d489d15c   John Stultz   timekeeping: Conv...
141
  static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
6d0ef903e   John Stultz   time: Clean up of...
142
  {
7d489d15c   John Stultz   timekeeping: Conv...
143
  	struct timespec64 tmp;
6d0ef903e   John Stultz   time: Clean up of...
144
145
146
147
148
  
  	/*
  	 * Verify consistency of: offset_real = -wall_to_monotonic
  	 * before modifying anything
  	 */
7d489d15c   John Stultz   timekeeping: Conv...
149
  	set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
6d0ef903e   John Stultz   time: Clean up of...
150
  					-tk->wall_to_monotonic.tv_nsec);
2456e8553   Thomas Gleixner   ktime: Get rid of...
151
  	WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp));
6d0ef903e   John Stultz   time: Clean up of...
152
  	tk->wall_to_monotonic = wtm;
7d489d15c   John Stultz   timekeeping: Conv...
153
154
  	set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
  	tk->offs_real = timespec64_to_ktime(tmp);
04005f601   John Stultz   timekeeping: Fix ...
155
  	tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
6d0ef903e   John Stultz   time: Clean up of...
156
  }
47da70d32   Thomas Gleixner   timekeeping: Remo...
157
  static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
6d0ef903e   John Stultz   time: Clean up of...
158
  {
a3ed0e439   Thomas Gleixner   Revert: Unify CLO...
159
  	tk->offs_boot = ktime_add(tk->offs_boot, delta);
b99328a60   Thomas Gleixner   timekeeping/vsysc...
160
161
162
163
164
  	/*
  	 * Timespec representation for VDSO update to avoid 64bit division
  	 * on every update.
  	 */
  	tk->monotonic_to_boot = ktime_to_timespec64(tk->offs_boot);
6d0ef903e   John Stultz   time: Clean up of...
165
  }
ceea5e377   John Stultz   time: Fix clock->...
166
167
168
169
  /*
   * tk_clock_read - atomic clocksource read() helper
   *
   * This helper is necessary to use in the read paths because, while the
025e82bcb   Ahmed S. Darwish   timekeeping: Use ...
170
   * seqcount ensures we don't return a bad value while structures are updated,
ceea5e377   John Stultz   time: Fix clock->...
171
172
173
174
175
176
177
178
   * it doesn't protect from potential crashes. There is the possibility that
   * the tkr's clocksource may change between the read reference, and the
   * clock reference passed to the read function.  This can cause crashes if
   * the wrong clocksource is passed to the wrong read function.
   * This isn't necessary to use when holding the timekeeper_lock or doing
   * a read of the fast-timekeeper tkrs (which is protected by its own locking
   * and update logic).
   */
985e69507   Ondrej Mosnacek   timekeeping/ntp: ...
179
  static inline u64 tk_clock_read(const struct tk_read_base *tkr)
ceea5e377   John Stultz   time: Fix clock->...
180
181
182
183
184
  {
  	struct clocksource *clock = READ_ONCE(tkr->clock);
  
  	return clock->read(clock);
  }
3c17ad19f   John Stultz   timekeeping: Add ...
185
  #ifdef CONFIG_DEBUG_TIMEKEEPING
4ca22c264   John Stultz   timekeeping: Add ...
186
  #define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
4ca22c264   John Stultz   timekeeping: Add ...
187

a5a1d1c29   Thomas Gleixner   clocksource: Use ...
188
  static void timekeeping_check_update(struct timekeeper *tk, u64 offset)
3c17ad19f   John Stultz   timekeeping: Add ...
189
  {
a5a1d1c29   Thomas Gleixner   clocksource: Use ...
190
  	u64 max_cycles = tk->tkr_mono.clock->max_cycles;
876e78818   Peter Zijlstra   time: Rename time...
191
  	const char *name = tk->tkr_mono.clock->name;
3c17ad19f   John Stultz   timekeeping: Add ...
192
193
  
  	if (offset > max_cycles) {
a558cd021   John Stultz   timekeeping: Add ...
194
195
  		printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger
  ",
3c17ad19f   John Stultz   timekeeping: Add ...
196
  				offset, name, max_cycles);
a558cd021   John Stultz   timekeeping: Add ...
197
198
  		printk_deferred("         timekeeping: Your kernel is sick, but tries to cope by capping time updates
  ");
3c17ad19f   John Stultz   timekeeping: Add ...
199
200
  	} else {
  		if (offset > (max_cycles >> 1)) {
fc4fa6e11   Masanari Iida   treewide: Fix typ...
201
202
  			printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the '%s' clock's 50%% safety margin (%lld)
  ",
3c17ad19f   John Stultz   timekeeping: Add ...
203
204
205
206
207
  					offset, name, max_cycles >> 1);
  			printk_deferred("      timekeeping: Your kernel is still fine, but is feeling a bit nervous
  ");
  		}
  	}
4ca22c264   John Stultz   timekeeping: Add ...
208

57d05a93a   John Stultz   time: Rework debu...
209
210
  	if (tk->underflow_seen) {
  		if (jiffies - tk->last_warning > WARNING_FREQ) {
4ca22c264   John Stultz   timekeeping: Add ...
211
212
213
214
215
216
  			printk_deferred("WARNING: Underflow in clocksource '%s' observed, time update ignored.
  ", name);
  			printk_deferred("         Please report this, consider using a different clocksource, if possible.
  ");
  			printk_deferred("         Your kernel is probably still fine.
  ");
57d05a93a   John Stultz   time: Rework debu...
217
  			tk->last_warning = jiffies;
4ca22c264   John Stultz   timekeeping: Add ...
218
  		}
57d05a93a   John Stultz   time: Rework debu...
219
  		tk->underflow_seen = 0;
4ca22c264   John Stultz   timekeeping: Add ...
220
  	}
57d05a93a   John Stultz   time: Rework debu...
221
222
  	if (tk->overflow_seen) {
  		if (jiffies - tk->last_warning > WARNING_FREQ) {
4ca22c264   John Stultz   timekeeping: Add ...
223
224
225
226
227
228
  			printk_deferred("WARNING: Overflow in clocksource '%s' observed, time update capped.
  ", name);
  			printk_deferred("         Please report this, consider using a different clocksource, if possible.
  ");
  			printk_deferred("         Your kernel is probably still fine.
  ");
57d05a93a   John Stultz   time: Rework debu...
229
  			tk->last_warning = jiffies;
4ca22c264   John Stultz   timekeeping: Add ...
230
  		}
57d05a93a   John Stultz   time: Rework debu...
231
  		tk->overflow_seen = 0;
4ca22c264   John Stultz   timekeeping: Add ...
232
  	}
3c17ad19f   John Stultz   timekeeping: Add ...
233
  }
a558cd021   John Stultz   timekeeping: Add ...
234

985e69507   Ondrej Mosnacek   timekeeping/ntp: ...
235
  static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
a558cd021   John Stultz   timekeeping: Add ...
236
  {
57d05a93a   John Stultz   time: Rework debu...
237
  	struct timekeeper *tk = &tk_core.timekeeper;
a5a1d1c29   Thomas Gleixner   clocksource: Use ...
238
  	u64 now, last, mask, max, delta;
4ca22c264   John Stultz   timekeeping: Add ...
239
  	unsigned int seq;
a558cd021   John Stultz   timekeeping: Add ...
240

4ca22c264   John Stultz   timekeeping: Add ...
241
  	/*
025e82bcb   Ahmed S. Darwish   timekeeping: Use ...
242
  	 * Since we're called holding a seqcount, the data may shift
4ca22c264   John Stultz   timekeeping: Add ...
243
244
  	 * under us while we're doing the calculation. This can cause
  	 * false positives, since we'd note a problem but throw the
025e82bcb   Ahmed S. Darwish   timekeeping: Use ...
245
  	 * results away. So nest another seqcount here to atomically
4ca22c264   John Stultz   timekeeping: Add ...
246
247
248
249
  	 * grab the points we are checking with.
  	 */
  	do {
  		seq = read_seqcount_begin(&tk_core.seq);
ceea5e377   John Stultz   time: Fix clock->...
250
  		now = tk_clock_read(tkr);
4ca22c264   John Stultz   timekeeping: Add ...
251
252
253
254
  		last = tkr->cycle_last;
  		mask = tkr->mask;
  		max = tkr->clock->max_cycles;
  	} while (read_seqcount_retry(&tk_core.seq, seq));
a558cd021   John Stultz   timekeeping: Add ...
255

4ca22c264   John Stultz   timekeeping: Add ...
256
  	delta = clocksource_delta(now, last, mask);
a558cd021   John Stultz   timekeeping: Add ...
257

057b87e31   John Stultz   timekeeping: Try ...
258
259
260
261
  	/*
  	 * Try to catch underflows by checking if we are seeing small
  	 * mask-relative negative values.
  	 */
4ca22c264   John Stultz   timekeeping: Add ...
262
  	if (unlikely((~delta & mask) < (mask >> 3))) {
57d05a93a   John Stultz   time: Rework debu...
263
  		tk->underflow_seen = 1;
057b87e31   John Stultz   timekeeping: Try ...
264
  		delta = 0;
4ca22c264   John Stultz   timekeeping: Add ...
265
  	}
057b87e31   John Stultz   timekeeping: Try ...
266

a558cd021   John Stultz   timekeeping: Add ...
267
  	/* Cap delta value to the max_cycles values to avoid mult overflows */
4ca22c264   John Stultz   timekeeping: Add ...
268
  	if (unlikely(delta > max)) {
57d05a93a   John Stultz   time: Rework debu...
269
  		tk->overflow_seen = 1;
a558cd021   John Stultz   timekeeping: Add ...
270
  		delta = tkr->clock->max_cycles;
4ca22c264   John Stultz   timekeeping: Add ...
271
  	}
a558cd021   John Stultz   timekeeping: Add ...
272
273
274
  
  	return delta;
  }
3c17ad19f   John Stultz   timekeeping: Add ...
275
  #else
a5a1d1c29   Thomas Gleixner   clocksource: Use ...
276
  static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset)
3c17ad19f   John Stultz   timekeeping: Add ...
277
278
  {
  }
985e69507   Ondrej Mosnacek   timekeeping/ntp: ...
279
  static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
a558cd021   John Stultz   timekeeping: Add ...
280
  {
a5a1d1c29   Thomas Gleixner   clocksource: Use ...
281
  	u64 cycle_now, delta;
a558cd021   John Stultz   timekeeping: Add ...
282
283
  
  	/* read clocksource */
ceea5e377   John Stultz   time: Fix clock->...
284
  	cycle_now = tk_clock_read(tkr);
a558cd021   John Stultz   timekeeping: Add ...
285
286
287
288
289
290
  
  	/* calculate the delta since the last update_wall_time */
  	delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
  
  	return delta;
  }
3c17ad19f   John Stultz   timekeeping: Add ...
291
  #endif
155ec6022   Martin Schwidefsky   timekeeping: Intr...
292
  /**
d26e4fe0d   Yijing Wang   timekeeper: fix c...
293
   * tk_setup_internals - Set up internals to use clocksource clock.
155ec6022   Martin Schwidefsky   timekeeping: Intr...
294
   *
d26e4fe0d   Yijing Wang   timekeeper: fix c...
295
   * @tk:		The target timekeeper to setup.
155ec6022   Martin Schwidefsky   timekeeping: Intr...
296
297
298
299
300
301
302
   * @clock:		Pointer to clocksource.
   *
   * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
   * pair and interval request.
   *
   * Unless you're the timekeeping code, you should not be using this!
   */
f726a697d   John Stultz   time: Rework time...
303
  static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
155ec6022   Martin Schwidefsky   timekeeping: Intr...
304
  {
a5a1d1c29   Thomas Gleixner   clocksource: Use ...
305
  	u64 interval;
a386b5af8   Kasper Pedersen   time: Compensate ...
306
  	u64 tmp, ntpinterval;
1e75fa8be   John Stultz   time: Condense ti...
307
  	struct clocksource *old_clock;
155ec6022   Martin Schwidefsky   timekeeping: Intr...
308

2c756feb1   Christopher S. Hall   time: Add history...
309
  	++tk->cs_was_changed_seq;
876e78818   Peter Zijlstra   time: Rename time...
310
311
  	old_clock = tk->tkr_mono.clock;
  	tk->tkr_mono.clock = clock;
876e78818   Peter Zijlstra   time: Rename time...
312
  	tk->tkr_mono.mask = clock->mask;
ceea5e377   John Stultz   time: Fix clock->...
313
  	tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
155ec6022   Martin Schwidefsky   timekeeping: Intr...
314

4a4ad80d3   Peter Zijlstra   time: Add timerke...
315
  	tk->tkr_raw.clock = clock;
4a4ad80d3   Peter Zijlstra   time: Add timerke...
316
317
  	tk->tkr_raw.mask = clock->mask;
  	tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
155ec6022   Martin Schwidefsky   timekeeping: Intr...
318
319
320
  	/* Do the ns -> cycle conversion first, using original mult */
  	tmp = NTP_INTERVAL_LENGTH;
  	tmp <<= clock->shift;
a386b5af8   Kasper Pedersen   time: Compensate ...
321
  	ntpinterval = tmp;
0a5441983   Martin Schwidefsky   timekeeping: Move...
322
323
  	tmp += clock->mult/2;
  	do_div(tmp, clock->mult);
155ec6022   Martin Schwidefsky   timekeeping: Intr...
324
325
  	if (tmp == 0)
  		tmp = 1;
a5a1d1c29   Thomas Gleixner   clocksource: Use ...
326
  	interval = (u64) tmp;
f726a697d   John Stultz   time: Rework time...
327
  	tk->cycle_interval = interval;
155ec6022   Martin Schwidefsky   timekeeping: Intr...
328
329
  
  	/* Go back from cycles -> shifted ns */
cbd99e3b2   Thomas Gleixner   timekeeping: Get ...
330
  	tk->xtime_interval = interval * clock->mult;
f726a697d   John Stultz   time: Rework time...
331
  	tk->xtime_remainder = ntpinterval - tk->xtime_interval;
3d88d56c5   John Stultz   time: Fix CLOCK_M...
332
  	tk->raw_interval = interval * clock->mult;
155ec6022   Martin Schwidefsky   timekeeping: Intr...
333

1e75fa8be   John Stultz   time: Condense ti...
334
335
336
  	 /* if changing clocks, convert xtime_nsec shift units */
  	if (old_clock) {
  		int shift_change = clock->shift - old_clock->shift;
fc6eead7c   John Stultz   time: Clean up CL...
337
  		if (shift_change < 0) {
876e78818   Peter Zijlstra   time: Rename time...
338
  			tk->tkr_mono.xtime_nsec >>= -shift_change;
fc6eead7c   John Stultz   time: Clean up CL...
339
340
  			tk->tkr_raw.xtime_nsec >>= -shift_change;
  		} else {
876e78818   Peter Zijlstra   time: Rename time...
341
  			tk->tkr_mono.xtime_nsec <<= shift_change;
fc6eead7c   John Stultz   time: Clean up CL...
342
343
  			tk->tkr_raw.xtime_nsec <<= shift_change;
  		}
1e75fa8be   John Stultz   time: Condense ti...
344
  	}
4a4ad80d3   Peter Zijlstra   time: Add timerke...
345

876e78818   Peter Zijlstra   time: Rename time...
346
  	tk->tkr_mono.shift = clock->shift;
4a4ad80d3   Peter Zijlstra   time: Add timerke...
347
  	tk->tkr_raw.shift = clock->shift;
155ec6022   Martin Schwidefsky   timekeeping: Intr...
348

f726a697d   John Stultz   time: Rework time...
349
350
  	tk->ntp_error = 0;
  	tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
375f45b5b   John Stultz   timekeeping: Use ...
351
  	tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
0a5441983   Martin Schwidefsky   timekeeping: Move...
352
353
354
355
356
357
  
  	/*
  	 * The timekeeper keeps its own mult values for the currently
  	 * active clocksource. These value will be adjusted via NTP
  	 * to counteract clock drifting.
  	 */
876e78818   Peter Zijlstra   time: Rename time...
358
  	tk->tkr_mono.mult = clock->mult;
4a4ad80d3   Peter Zijlstra   time: Add timerke...
359
  	tk->tkr_raw.mult = clock->mult;
dc491596f   John Stultz   timekeeping: Rewo...
360
  	tk->ntp_err_mult = 0;
78b98e3c5   Miroslav Lichvar   timekeeping/ntp: ...
361
  	tk->skip_second_overflow = 0;
155ec6022   Martin Schwidefsky   timekeeping: Intr...
362
  }
8524070b7   John Stultz   Move timekeeping ...
363

2ba2a3054   Martin Schwidefsky   timekeeping: Add ...
364
  /* Timekeeper helper functions. */
7b1f62076   Stephen Warren   time: convert arc...
365
366
  
  #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
e06fde37b   Thomas Gleixner   timekeeping: Simp...
367
368
  static u32 default_arch_gettimeoffset(void) { return 0; }
  u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
7b1f62076   Stephen Warren   time: convert arc...
369
  #else
e06fde37b   Thomas Gleixner   timekeeping: Simp...
370
  static inline u32 arch_gettimeoffset(void) { return 0; }
7b1f62076   Stephen Warren   time: convert arc...
371
  #endif
985e69507   Ondrej Mosnacek   timekeeping/ntp: ...
372
  static inline u64 timekeeping_delta_to_ns(const struct tk_read_base *tkr, u64 delta)
6bd58f09e   Christopher S. Hall   time: Add cycles ...
373
  {
9c1645727   Thomas Gleixner   timekeeping_Force...
374
  	u64 nsec;
6bd58f09e   Christopher S. Hall   time: Add cycles ...
375
376
377
378
379
380
381
  
  	nsec = delta * tkr->mult + tkr->xtime_nsec;
  	nsec >>= tkr->shift;
  
  	/* If arch requires, add in get_arch_timeoffset() */
  	return nsec + arch_gettimeoffset();
  }
985e69507   Ondrej Mosnacek   timekeeping/ntp: ...
382
  static inline u64 timekeeping_get_ns(const struct tk_read_base *tkr)
2ba2a3054   Martin Schwidefsky   timekeeping: Add ...
383
  {
a5a1d1c29   Thomas Gleixner   clocksource: Use ...
384
  	u64 delta;
2ba2a3054   Martin Schwidefsky   timekeeping: Add ...
385

a558cd021   John Stultz   timekeeping: Add ...
386
  	delta = timekeeping_get_delta(tkr);
6bd58f09e   Christopher S. Hall   time: Add cycles ...
387
388
  	return timekeeping_delta_to_ns(tkr, delta);
  }
2ba2a3054   Martin Schwidefsky   timekeeping: Add ...
389

985e69507   Ondrej Mosnacek   timekeeping/ntp: ...
390
  static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles)
6bd58f09e   Christopher S. Hall   time: Add cycles ...
391
  {
a5a1d1c29   Thomas Gleixner   clocksource: Use ...
392
  	u64 delta;
f2a5a0854   John Stultz   time: Move arch_g...
393

6bd58f09e   Christopher S. Hall   time: Add cycles ...
394
395
396
  	/* calculate the delta since the last update_wall_time */
  	delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask);
  	return timekeeping_delta_to_ns(tkr, delta);
2ba2a3054   Martin Schwidefsky   timekeeping: Add ...
397
  }
4396e058c   Thomas Gleixner   timekeeping: Prov...
398
399
  /**
   * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
affe3e85a   Rafael J. Wysocki   timekeeping: Pass...
400
   * @tkr: Timekeeping readout base from which we take the update
4396e058c   Thomas Gleixner   timekeeping: Prov...
401
402
403
404
   *
   * We want to use this from any context including NMI and tracing /
   * instrumenting the timekeeping code itself.
   *
6695b92a6   Peter Zijlstra   seqlock: Better d...
405
   * Employ the latch technique; see @raw_write_seqcount_latch.
4396e058c   Thomas Gleixner   timekeeping: Prov...
406
407
408
409
410
411
   *
   * So if a NMI hits the update of base[0] then it will use base[1]
   * which is still consistent. In the worst case this can result is a
   * slightly wrong timestamp (a few nanoseconds). See
   * @ktime_get_mono_fast_ns.
   */
985e69507   Ondrej Mosnacek   timekeeping/ntp: ...
412
413
  static void update_fast_timekeeper(const struct tk_read_base *tkr,
  				   struct tk_fast *tkf)
4396e058c   Thomas Gleixner   timekeeping: Prov...
414
  {
4498e7467   Peter Zijlstra   time: Parametrize...
415
  	struct tk_read_base *base = tkf->base;
4396e058c   Thomas Gleixner   timekeeping: Prov...
416
417
  
  	/* Force readers off to base[1] */
4498e7467   Peter Zijlstra   time: Parametrize...
418
  	raw_write_seqcount_latch(&tkf->seq);
4396e058c   Thomas Gleixner   timekeeping: Prov...
419
420
  
  	/* Update base[0] */
affe3e85a   Rafael J. Wysocki   timekeeping: Pass...
421
  	memcpy(base, tkr, sizeof(*base));
4396e058c   Thomas Gleixner   timekeeping: Prov...
422
423
  
  	/* Force readers back to base[0] */
4498e7467   Peter Zijlstra   time: Parametrize...
424
  	raw_write_seqcount_latch(&tkf->seq);
4396e058c   Thomas Gleixner   timekeeping: Prov...
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
  
  	/* Update base[1] */
  	memcpy(base + 1, base, sizeof(*base));
  }
  
  /**
   * ktime_get_mono_fast_ns - Fast NMI safe access to clock monotonic
   *
   * This timestamp is not guaranteed to be monotonic across an update.
   * The timestamp is calculated by:
   *
   *	now = base_mono + clock_delta * slope
   *
   * So if the update lowers the slope, readers who are forced to the
   * not yet updated second array are still using the old steeper slope.
   *
   * tmono
   * ^
   * |    o  n
   * |   o n
   * |  u
   * | o
   * |o
   * |12345678---> reader order
   *
   * o = old slope
   * u = update
   * n = new slope
   *
   * So reader 6 will observe time going backwards versus reader 5.
   *
   * While other CPUs are likely to be able observe that, the only way
   * for a CPU local observation is when an NMI hits in the middle of
   * the update. Timestamps taken from that NMI context might be ahead
   * of the following timestamps. Callers need to be aware of that and
   * deal with it.
   */
4498e7467   Peter Zijlstra   time: Parametrize...
462
  static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
4396e058c   Thomas Gleixner   timekeeping: Prov...
463
464
465
466
467
468
  {
  	struct tk_read_base *tkr;
  	unsigned int seq;
  	u64 now;
  
  	do {
7fc26327b   Peter Zijlstra   seqlock: Introduc...
469
  		seq = raw_read_seqcount_latch(&tkf->seq);
4498e7467   Peter Zijlstra   time: Parametrize...
470
  		tkr = tkf->base + (seq & 0x01);
27727df24   John Stultz   timekeeping: Avoi...
471
  		now = ktime_to_ns(tkr->base);
58bfea953   John Stultz   timekeeping: Fix ...
472
473
  		now += timekeeping_delta_to_ns(tkr,
  				clocksource_delta(
ceea5e377   John Stultz   time: Fix clock->...
474
  					tk_clock_read(tkr),
58bfea953   John Stultz   timekeeping: Fix ...
475
476
  					tkr->cycle_last,
  					tkr->mask));
249d05383   Ahmed S. Darwish   timekeeping: Use ...
477
  	} while (read_seqcount_latch_retry(&tkf->seq, seq));
4396e058c   Thomas Gleixner   timekeeping: Prov...
478

4396e058c   Thomas Gleixner   timekeeping: Prov...
479
480
  	return now;
  }
4498e7467   Peter Zijlstra   time: Parametrize...
481
482
483
484
485
  
  u64 ktime_get_mono_fast_ns(void)
  {
  	return __ktime_get_fast_ns(&tk_fast_mono);
  }
4396e058c   Thomas Gleixner   timekeeping: Prov...
486
  EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
f09cb9a18   Peter Zijlstra   time: Introduce t...
487
488
489
490
491
  u64 ktime_get_raw_fast_ns(void)
  {
  	return __ktime_get_fast_ns(&tk_fast_raw);
  }
  EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
a3ed0e439   Thomas Gleixner   Revert: Unify CLO...
492
493
494
495
496
  /**
   * ktime_get_boot_fast_ns - NMI safe and fast access to boot clock.
   *
   * To keep it NMI safe since we're accessing from tracing, we're not using a
   * separate timekeeper with updates to monotonic clock and boot offset
025e82bcb   Ahmed S. Darwish   timekeeping: Use ...
497
   * protected with seqcounts. This has the following minor side effects:
a3ed0e439   Thomas Gleixner   Revert: Unify CLO...
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
   *
   * (1) Its possible that a timestamp be taken after the boot offset is updated
   * but before the timekeeper is updated. If this happens, the new boot offset
   * is added to the old timekeeping making the clock appear to update slightly
   * earlier:
   *    CPU 0                                        CPU 1
   *    timekeeping_inject_sleeptime64()
   *    __timekeeping_inject_sleeptime(tk, delta);
   *                                                 timestamp();
   *    timekeeping_update(tk, TK_CLEAR_NTP...);
   *
   * (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be
   * partially updated.  Since the tk->offs_boot update is a rare event, this
   * should be a rare occurrence which postprocessing should be able to handle.
   */
  u64 notrace ktime_get_boot_fast_ns(void)
  {
  	struct timekeeper *tk = &tk_core.timekeeper;
  
  	return (ktime_get_mono_fast_ns() + ktime_to_ns(tk->offs_boot));
  }
  EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
4c3711d7f   Thomas Gleixner   timekeeping: Prov...
520
521
522
  /*
   * See comment for __ktime_get_fast_ns() vs. timestamp ordering
   */
e2d977c9f   Thomas Gleixner   timekeeping: Prov...
523
  static __always_inline u64 __ktime_get_real_fast(struct tk_fast *tkf, u64 *mono)
4c3711d7f   Thomas Gleixner   timekeeping: Prov...
524
525
  {
  	struct tk_read_base *tkr;
e2d977c9f   Thomas Gleixner   timekeeping: Prov...
526
  	u64 basem, baser, delta;
4c3711d7f   Thomas Gleixner   timekeeping: Prov...
527
  	unsigned int seq;
4c3711d7f   Thomas Gleixner   timekeeping: Prov...
528
529
530
531
  
  	do {
  		seq = raw_read_seqcount_latch(&tkf->seq);
  		tkr = tkf->base + (seq & 0x01);
e2d977c9f   Thomas Gleixner   timekeeping: Prov...
532
533
  		basem = ktime_to_ns(tkr->base);
  		baser = ktime_to_ns(tkr->base_real);
4c3711d7f   Thomas Gleixner   timekeeping: Prov...
534

e2d977c9f   Thomas Gleixner   timekeeping: Prov...
535
536
537
  		delta = timekeeping_delta_to_ns(tkr,
  				clocksource_delta(tk_clock_read(tkr),
  				tkr->cycle_last, tkr->mask));
249d05383   Ahmed S. Darwish   timekeeping: Use ...
538
  	} while (read_seqcount_latch_retry(&tkf->seq, seq));
4c3711d7f   Thomas Gleixner   timekeeping: Prov...
539

e2d977c9f   Thomas Gleixner   timekeeping: Prov...
540
541
542
  	if (mono)
  		*mono = basem + delta;
  	return baser + delta;
4c3711d7f   Thomas Gleixner   timekeeping: Prov...
543
544
545
546
547
548
549
  }
  
  /**
   * ktime_get_real_fast_ns: - NMI safe and fast access to clock realtime.
   */
  u64 ktime_get_real_fast_ns(void)
  {
e2d977c9f   Thomas Gleixner   timekeeping: Prov...
550
  	return __ktime_get_real_fast(&tk_fast_mono, NULL);
4c3711d7f   Thomas Gleixner   timekeeping: Prov...
551
  }
df27067e6   Arnd Bergmann   pstore: Use ktime...
552
  EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns);
4c3711d7f   Thomas Gleixner   timekeeping: Prov...
553

060407aed   Rafael J. Wysocki   timekeeping: Make...
554
  /**
e2d977c9f   Thomas Gleixner   timekeeping: Prov...
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
   * ktime_get_fast_timestamps: - NMI safe timestamps
   * @snapshot:	Pointer to timestamp storage
   *
   * Stores clock monotonic, boottime and realtime timestamps.
   *
   * Boot time is a racy access on 32bit systems if the sleep time injection
   * happens late during resume and not in timekeeping_resume(). That could
   * be avoided by expanding struct tk_read_base with boot offset for 32bit
   * and adding more overhead to the update. As this is a hard to observe
   * once per resume event which can be filtered with reasonable effort using
   * the accurate mono/real timestamps, it's probably not worth the trouble.
   *
   * Aside of that it might be possible on 32 and 64 bit to observe the
   * following when the sleep time injection happens late:
   *
   * CPU 0				CPU 1
   * timekeeping_resume()
   * ktime_get_fast_timestamps()
   *	mono, real = __ktime_get_real_fast()
   *					inject_sleep_time()
   *					   update boot offset
   *	boot = mono + bootoffset;
   *
   * That means that boot time already has the sleep time adjustment, but
   * real time does not. On the next readout both are in sync again.
   *
   * Preventing this for 64bit is not really feasible without destroying the
   * careful cache layout of the timekeeper because the sequence count and
   * struct tk_read_base would then need two cache lines instead of one.
   *
   * Access to the time keeper clock source is disabled accross the innermost
   * steps of suspend/resume. The accessors still work, but the timestamps
   * are frozen until time keeping is resumed which happens very early.
   *
   * For regular suspend/resume there is no observable difference vs. sched
   * clock, but it might affect some of the nasty low level debug printks.
   *
   * OTOH, access to sched clock is not guaranteed accross suspend/resume on
   * all systems either so it depends on the hardware in use.
   *
   * If that turns out to be a real problem then this could be mitigated by
   * using sched clock in a similar way as during early boot. But it's not as
   * trivial as on early boot because it needs some careful protection
   * against the clock monotonic timestamp jumping backwards on resume.
   */
  void ktime_get_fast_timestamps(struct ktime_timestamps *snapshot)
  {
  	struct timekeeper *tk = &tk_core.timekeeper;
  
  	snapshot->real = __ktime_get_real_fast(&tk_fast_mono, &snapshot->mono);
  	snapshot->boot = snapshot->mono + ktime_to_ns(data_race(tk->offs_boot));
  }
  
  /**
060407aed   Rafael J. Wysocki   timekeeping: Make...
609
610
611
612
613
614
615
616
617
   * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
   * @tk: Timekeeper to snapshot.
   *
   * It generally is unsafe to access the clocksource after timekeeping has been
   * suspended, so take a snapshot of the readout base of @tk and use it as the
   * fast timekeeper's readout base while suspended.  It will return the same
   * number of cycles every time until timekeeping is resumed at which time the
   * proper readout base for the fast timekeeper will be restored automatically.
   */
985e69507   Ondrej Mosnacek   timekeeping/ntp: ...
618
  static void halt_fast_timekeeper(const struct timekeeper *tk)
060407aed   Rafael J. Wysocki   timekeeping: Make...
619
620
  {
  	static struct tk_read_base tkr_dummy;
985e69507   Ondrej Mosnacek   timekeeping/ntp: ...
621
  	const struct tk_read_base *tkr = &tk->tkr_mono;
060407aed   Rafael J. Wysocki   timekeeping: Make...
622
623
  
  	memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
ceea5e377   John Stultz   time: Fix clock->...
624
625
  	cycles_at_suspend = tk_clock_read(tkr);
  	tkr_dummy.clock = &dummy_clock;
4c3711d7f   Thomas Gleixner   timekeeping: Prov...
626
  	tkr_dummy.base_real = tkr->base + tk->offs_real;
4498e7467   Peter Zijlstra   time: Parametrize...
627
  	update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
f09cb9a18   Peter Zijlstra   time: Introduce t...
628
629
630
  
  	tkr = &tk->tkr_raw;
  	memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
ceea5e377   John Stultz   time: Fix clock->...
631
  	tkr_dummy.clock = &dummy_clock;
f09cb9a18   Peter Zijlstra   time: Introduce t...
632
  	update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
060407aed   Rafael J. Wysocki   timekeeping: Make...
633
  }
e0b306fef   Marcelo Tosatti   time: export time...
634
  static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
780427f0e   David Vrabel   timekeeping: Indi...
635
  static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
e0b306fef   Marcelo Tosatti   time: export time...
636
  {
780427f0e   David Vrabel   timekeeping: Indi...
637
  	raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
e0b306fef   Marcelo Tosatti   time: export time...
638
639
640
641
  }
  
  /**
   * pvclock_gtod_register_notifier - register a pvclock timedata update listener
e0b306fef   Marcelo Tosatti   time: export time...
642
643
644
   */
  int pvclock_gtod_register_notifier(struct notifier_block *nb)
  {
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
645
  	struct timekeeper *tk = &tk_core.timekeeper;
e0b306fef   Marcelo Tosatti   time: export time...
646
647
  	unsigned long flags;
  	int ret;
9a7a71b1d   Thomas Gleixner   timekeeping: Spli...
648
  	raw_spin_lock_irqsave(&timekeeper_lock, flags);
e0b306fef   Marcelo Tosatti   time: export time...
649
  	ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
780427f0e   David Vrabel   timekeeping: Indi...
650
  	update_pvclock_gtod(tk, true);
9a7a71b1d   Thomas Gleixner   timekeeping: Spli...
651
  	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
e0b306fef   Marcelo Tosatti   time: export time...
652
653
654
655
656
657
658
659
  
  	return ret;
  }
  EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
  
  /**
   * pvclock_gtod_unregister_notifier - unregister a pvclock
   * timedata update listener
e0b306fef   Marcelo Tosatti   time: export time...
660
661
662
   */
  int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
  {
e0b306fef   Marcelo Tosatti   time: export time...
663
664
  	unsigned long flags;
  	int ret;
9a7a71b1d   Thomas Gleixner   timekeeping: Spli...
665
  	raw_spin_lock_irqsave(&timekeeper_lock, flags);
e0b306fef   Marcelo Tosatti   time: export time...
666
  	ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
9a7a71b1d   Thomas Gleixner   timekeeping: Spli...
667
  	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
e0b306fef   Marcelo Tosatti   time: export time...
668
669
670
671
  
  	return ret;
  }
  EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
7c032df55   Thomas Gleixner   timekeeping: Prov...
672
  /*
833f32d76   John Stultz   time: Prevent ear...
673
674
675
676
677
   * tk_update_leap_state - helper to update the next_leap_ktime
   */
  static inline void tk_update_leap_state(struct timekeeper *tk)
  {
  	tk->next_leap_ktime = ntp_get_next_leap();
2456e8553   Thomas Gleixner   ktime: Get rid of...
678
  	if (tk->next_leap_ktime != KTIME_MAX)
833f32d76   John Stultz   time: Prevent ear...
679
680
681
682
683
  		/* Convert to monotonic time */
  		tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
  }
  
  /*
7c032df55   Thomas Gleixner   timekeeping: Prov...
684
685
686
687
   * Update the ktime_t based scalar nsec members of the timekeeper
   */
  static inline void tk_update_ktime_data(struct timekeeper *tk)
  {
9e3680b17   Heena Sirwani   timekeeping: Prov...
688
689
  	u64 seconds;
  	u32 nsec;
7c032df55   Thomas Gleixner   timekeeping: Prov...
690
691
692
693
694
695
696
697
  
  	/*
  	 * The xtime based monotonic readout is:
  	 *	nsec = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec + now();
  	 * The ktime based monotonic readout is:
  	 *	nsec = base_mono + now();
  	 * ==> base_mono = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec
  	 */
9e3680b17   Heena Sirwani   timekeeping: Prov...
698
699
  	seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
  	nsec = (u32) tk->wall_to_monotonic.tv_nsec;
876e78818   Peter Zijlstra   time: Rename time...
700
  	tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
f519b1a2e   Thomas Gleixner   timekeeping: Prov...
701

9e3680b17   Heena Sirwani   timekeeping: Prov...
702
703
704
705
706
  	/*
  	 * The sum of the nanoseconds portions of xtime and
  	 * wall_to_monotonic can be greater/equal one second. Take
  	 * this into account before updating tk->ktime_sec.
  	 */
876e78818   Peter Zijlstra   time: Rename time...
707
  	nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
9e3680b17   Heena Sirwani   timekeeping: Prov...
708
709
710
  	if (nsec >= NSEC_PER_SEC)
  		seconds++;
  	tk->ktime_sec = seconds;
fc6eead7c   John Stultz   time: Clean up CL...
711
712
  
  	/* Update the monotonic raw base */
0bcdc0987   John Stultz   time: Fix ktime_g...
713
  	tk->tkr_raw.base = ns_to_ktime(tk->raw_sec * NSEC_PER_SEC);
7c032df55   Thomas Gleixner   timekeeping: Prov...
714
  }
9a7a71b1d   Thomas Gleixner   timekeeping: Spli...
715
  /* must hold timekeeper_lock */
04397fe94   David Vrabel   timekeeping: Pass...
716
  static void timekeeping_update(struct timekeeper *tk, unsigned int action)
cc06268c6   Thomas Gleixner   time: Move common...
717
  {
04397fe94   David Vrabel   timekeeping: Pass...
718
  	if (action & TK_CLEAR_NTP) {
f726a697d   John Stultz   time: Rework time...
719
  		tk->ntp_error = 0;
cc06268c6   Thomas Gleixner   time: Move common...
720
721
  		ntp_clear();
  	}
48cdc135d   Thomas Gleixner   timekeeping: Impl...
722

833f32d76   John Stultz   time: Prevent ear...
723
  	tk_update_leap_state(tk);
7c032df55   Thomas Gleixner   timekeeping: Prov...
724
  	tk_update_ktime_data(tk);
9bf2419fa   Thomas Gleixner   timekeeping: Upda...
725
726
  	update_vsyscall(tk);
  	update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
4c3711d7f   Thomas Gleixner   timekeeping: Prov...
727
  	tk->tkr_mono.base_real = tk->tkr_mono.base + tk->offs_real;
4498e7467   Peter Zijlstra   time: Parametrize...
728
  	update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
f09cb9a18   Peter Zijlstra   time: Introduce t...
729
  	update_fast_timekeeper(&tk->tkr_raw,  &tk_fast_raw);
868a3e915   Thomas Gleixner   hrtimer: Make off...
730
731
732
  
  	if (action & TK_CLOCK_WAS_SET)
  		tk->clock_was_set_seq++;
d15183265   John Stultz   time: Move clock_...
733
734
735
736
737
738
739
740
  	/*
  	 * The mirroring of the data to the shadow-timekeeper needs
  	 * to happen last here to ensure we don't over-write the
  	 * timekeeper structure on the next update with stale data
  	 */
  	if (action & TK_MIRROR)
  		memcpy(&shadow_timekeeper, &tk_core.timekeeper,
  		       sizeof(tk_core.timekeeper));
cc06268c6   Thomas Gleixner   time: Move common...
741
  }
8524070b7   John Stultz   Move timekeeping ...
742
  /**
155ec6022   Martin Schwidefsky   timekeeping: Intr...
743
   * timekeeping_forward_now - update clock to the current time
8524070b7   John Stultz   Move timekeeping ...
744
   *
9a055117d   Roman Zippel   clocksource: intr...
745
746
747
   * Forward the current clock to update its state since the last call to
   * update_wall_time(). This is useful before significant clock changes,
   * as it avoids having to deal with this time offset explicitly.
8524070b7   John Stultz   Move timekeeping ...
748
   */
f726a697d   John Stultz   time: Rework time...
749
  static void timekeeping_forward_now(struct timekeeper *tk)
8524070b7   John Stultz   Move timekeeping ...
750
  {
a5a1d1c29   Thomas Gleixner   clocksource: Use ...
751
  	u64 cycle_now, delta;
8524070b7   John Stultz   Move timekeeping ...
752

ceea5e377   John Stultz   time: Fix clock->...
753
  	cycle_now = tk_clock_read(&tk->tkr_mono);
876e78818   Peter Zijlstra   time: Rename time...
754
755
  	delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
  	tk->tkr_mono.cycle_last = cycle_now;
4a4ad80d3   Peter Zijlstra   time: Add timerke...
756
  	tk->tkr_raw.cycle_last  = cycle_now;
8524070b7   John Stultz   Move timekeeping ...
757

876e78818   Peter Zijlstra   time: Rename time...
758
  	tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
7d27558c4   John Stultz   timekeeping: crea...
759

7b1f62076   Stephen Warren   time: convert arc...
760
  	/* If arch requires, add in get_arch_timeoffset() */
876e78818   Peter Zijlstra   time: Rename time...
761
  	tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift;
7d27558c4   John Stultz   timekeeping: crea...
762

2d42244ae   John Stultz   clocksource: intr...
763

fc6eead7c   John Stultz   time: Clean up CL...
764
765
766
767
768
769
  	tk->tkr_raw.xtime_nsec += delta * tk->tkr_raw.mult;
  
  	/* If arch requires, add in get_arch_timeoffset() */
  	tk->tkr_raw.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_raw.shift;
  
  	tk_normalize_xtime(tk);
8524070b7   John Stultz   Move timekeeping ...
770
771
772
  }
  
  /**
edca71fec   Arnd Bergmann   timekeeping: Clea...
773
   * ktime_get_real_ts64 - Returns the time of day in a timespec64.
8524070b7   John Stultz   Move timekeeping ...
774
775
   * @ts:		pointer to the timespec to be set
   *
edca71fec   Arnd Bergmann   timekeeping: Clea...
776
   * Returns the time of day in a timespec64 (WARN if suspended).
8524070b7   John Stultz   Move timekeeping ...
777
   */
edca71fec   Arnd Bergmann   timekeeping: Clea...
778
  void ktime_get_real_ts64(struct timespec64 *ts)
8524070b7   John Stultz   Move timekeeping ...
779
  {
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
780
  	struct timekeeper *tk = &tk_core.timekeeper;
e1e41b6ce   Rasmus Villemoes   timekeeping: Cons...
781
  	unsigned int seq;
acc89612a   Thomas Gleixner   timekeeping: Make...
782
  	u64 nsecs;
8524070b7   John Stultz   Move timekeeping ...
783

edca71fec   Arnd Bergmann   timekeeping: Clea...
784
  	WARN_ON(timekeeping_suspended);
8524070b7   John Stultz   Move timekeeping ...
785
  	do {
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
786
  		seq = read_seqcount_begin(&tk_core.seq);
8524070b7   John Stultz   Move timekeeping ...
787

4e250fdde   John Stultz   time: Remove all ...
788
  		ts->tv_sec = tk->xtime_sec;
876e78818   Peter Zijlstra   time: Rename time...
789
  		nsecs = timekeeping_get_ns(&tk->tkr_mono);
8524070b7   John Stultz   Move timekeeping ...
790

3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
791
  	} while (read_seqcount_retry(&tk_core.seq, seq));
8524070b7   John Stultz   Move timekeeping ...
792

ec145babe   John Stultz   time: Fix timeeke...
793
  	ts->tv_nsec = 0;
d6d29896c   Thomas Gleixner   timekeeping: Prov...
794
  	timespec64_add_ns(ts, nsecs);
8524070b7   John Stultz   Move timekeeping ...
795
  }
edca71fec   Arnd Bergmann   timekeeping: Clea...
796
  EXPORT_SYMBOL(ktime_get_real_ts64);
8524070b7   John Stultz   Move timekeeping ...
797

951ed4d36   Martin Schwidefsky   timekeeping: opti...
798
799
  ktime_t ktime_get(void)
  {
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
800
  	struct timekeeper *tk = &tk_core.timekeeper;
951ed4d36   Martin Schwidefsky   timekeeping: opti...
801
  	unsigned int seq;
a016a5bd6   Thomas Gleixner   timekeeping: Use ...
802
  	ktime_t base;
acc89612a   Thomas Gleixner   timekeeping: Make...
803
  	u64 nsecs;
951ed4d36   Martin Schwidefsky   timekeeping: opti...
804
805
806
807
  
  	WARN_ON(timekeeping_suspended);
  
  	do {
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
808
  		seq = read_seqcount_begin(&tk_core.seq);
876e78818   Peter Zijlstra   time: Rename time...
809
810
  		base = tk->tkr_mono.base;
  		nsecs = timekeeping_get_ns(&tk->tkr_mono);
951ed4d36   Martin Schwidefsky   timekeeping: opti...
811

3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
812
  	} while (read_seqcount_retry(&tk_core.seq, seq));
24e4a8c3e   John Stultz   ktime: Kill non-s...
813

a016a5bd6   Thomas Gleixner   timekeeping: Use ...
814
  	return ktime_add_ns(base, nsecs);
951ed4d36   Martin Schwidefsky   timekeeping: opti...
815
816
  }
  EXPORT_SYMBOL_GPL(ktime_get);
6374f9124   Harald Geyer   timekeeping: Prov...
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
  u32 ktime_get_resolution_ns(void)
  {
  	struct timekeeper *tk = &tk_core.timekeeper;
  	unsigned int seq;
  	u32 nsecs;
  
  	WARN_ON(timekeeping_suspended);
  
  	do {
  		seq = read_seqcount_begin(&tk_core.seq);
  		nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift;
  	} while (read_seqcount_retry(&tk_core.seq, seq));
  
  	return nsecs;
  }
  EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);
0077dc60f   Thomas Gleixner   timekeeping: Prov...
833
834
  static ktime_t *offsets[TK_OFFS_MAX] = {
  	[TK_OFFS_REAL]	= &tk_core.timekeeper.offs_real,
a3ed0e439   Thomas Gleixner   Revert: Unify CLO...
835
  	[TK_OFFS_BOOT]	= &tk_core.timekeeper.offs_boot,
0077dc60f   Thomas Gleixner   timekeeping: Prov...
836
837
838
839
840
841
842
843
  	[TK_OFFS_TAI]	= &tk_core.timekeeper.offs_tai,
  };
  
  ktime_t ktime_get_with_offset(enum tk_offsets offs)
  {
  	struct timekeeper *tk = &tk_core.timekeeper;
  	unsigned int seq;
  	ktime_t base, *offset = offsets[offs];
acc89612a   Thomas Gleixner   timekeeping: Make...
844
  	u64 nsecs;
0077dc60f   Thomas Gleixner   timekeeping: Prov...
845
846
847
848
849
  
  	WARN_ON(timekeeping_suspended);
  
  	do {
  		seq = read_seqcount_begin(&tk_core.seq);
876e78818   Peter Zijlstra   time: Rename time...
850
851
  		base = ktime_add(tk->tkr_mono.base, *offset);
  		nsecs = timekeeping_get_ns(&tk->tkr_mono);
0077dc60f   Thomas Gleixner   timekeeping: Prov...
852
853
854
855
856
857
858
  
  	} while (read_seqcount_retry(&tk_core.seq, seq));
  
  	return ktime_add_ns(base, nsecs);
  
  }
  EXPORT_SYMBOL_GPL(ktime_get_with_offset);
b9ff604cf   Arnd Bergmann   timekeeping: Add ...
859
860
861
862
863
  ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs)
  {
  	struct timekeeper *tk = &tk_core.timekeeper;
  	unsigned int seq;
  	ktime_t base, *offset = offsets[offs];
e3ff9c367   Thomas Gleixner   timekeeping: Repa...
864
  	u64 nsecs;
b9ff604cf   Arnd Bergmann   timekeeping: Add ...
865
866
867
868
869
870
  
  	WARN_ON(timekeeping_suspended);
  
  	do {
  		seq = read_seqcount_begin(&tk_core.seq);
  		base = ktime_add(tk->tkr_mono.base, *offset);
e3ff9c367   Thomas Gleixner   timekeeping: Repa...
871
  		nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
b9ff604cf   Arnd Bergmann   timekeeping: Add ...
872
873
  
  	} while (read_seqcount_retry(&tk_core.seq, seq));
0354c1a3c   Jason A. Donenfeld   timekeeping: Use ...
874
  	return ktime_add_ns(base, nsecs);
b9ff604cf   Arnd Bergmann   timekeeping: Add ...
875
876
  }
  EXPORT_SYMBOL_GPL(ktime_get_coarse_with_offset);
951ed4d36   Martin Schwidefsky   timekeeping: opti...
877
  /**
9a6b51976   Thomas Gleixner   timekeeping: Prov...
878
879
880
881
882
883
884
   * ktime_mono_to_any() - convert mononotic time to any other time
   * @tmono:	time to convert.
   * @offs:	which offset to use
   */
  ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
  {
  	ktime_t *offset = offsets[offs];
e1e41b6ce   Rasmus Villemoes   timekeeping: Cons...
885
  	unsigned int seq;
9a6b51976   Thomas Gleixner   timekeeping: Prov...
886
887
888
889
890
891
892
893
894
895
896
897
  	ktime_t tconv;
  
  	do {
  		seq = read_seqcount_begin(&tk_core.seq);
  		tconv = ktime_add(tmono, *offset);
  	} while (read_seqcount_retry(&tk_core.seq, seq));
  
  	return tconv;
  }
  EXPORT_SYMBOL_GPL(ktime_mono_to_any);
  
  /**
f519b1a2e   Thomas Gleixner   timekeeping: Prov...
898
899
900
901
902
903
904
   * ktime_get_raw - Returns the raw monotonic time in ktime_t format
   */
  ktime_t ktime_get_raw(void)
  {
  	struct timekeeper *tk = &tk_core.timekeeper;
  	unsigned int seq;
  	ktime_t base;
acc89612a   Thomas Gleixner   timekeeping: Make...
905
  	u64 nsecs;
f519b1a2e   Thomas Gleixner   timekeeping: Prov...
906
907
908
  
  	do {
  		seq = read_seqcount_begin(&tk_core.seq);
4a4ad80d3   Peter Zijlstra   time: Add timerke...
909
910
  		base = tk->tkr_raw.base;
  		nsecs = timekeeping_get_ns(&tk->tkr_raw);
f519b1a2e   Thomas Gleixner   timekeeping: Prov...
911
912
913
914
915
916
917
918
  
  	} while (read_seqcount_retry(&tk_core.seq, seq));
  
  	return ktime_add_ns(base, nsecs);
  }
  EXPORT_SYMBOL_GPL(ktime_get_raw);
  
  /**
d6d29896c   Thomas Gleixner   timekeeping: Prov...
919
   * ktime_get_ts64 - get the monotonic clock in timespec64 format
951ed4d36   Martin Schwidefsky   timekeeping: opti...
920
921
922
923
   * @ts:		pointer to timespec variable
   *
   * The function calculates the monotonic clock from the realtime
   * clock and the wall_to_monotonic offset and stores the result
5322e4c26   John Stultz   time: Fixup comme...
924
   * in normalized timespec64 format in the variable pointed to by @ts.
951ed4d36   Martin Schwidefsky   timekeeping: opti...
925
   */
d6d29896c   Thomas Gleixner   timekeeping: Prov...
926
  void ktime_get_ts64(struct timespec64 *ts)
951ed4d36   Martin Schwidefsky   timekeeping: opti...
927
  {
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
928
  	struct timekeeper *tk = &tk_core.timekeeper;
d6d29896c   Thomas Gleixner   timekeeping: Prov...
929
  	struct timespec64 tomono;
951ed4d36   Martin Schwidefsky   timekeeping: opti...
930
  	unsigned int seq;
acc89612a   Thomas Gleixner   timekeeping: Make...
931
  	u64 nsec;
951ed4d36   Martin Schwidefsky   timekeeping: opti...
932
933
934
935
  
  	WARN_ON(timekeeping_suspended);
  
  	do {
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
936
  		seq = read_seqcount_begin(&tk_core.seq);
d6d29896c   Thomas Gleixner   timekeeping: Prov...
937
  		ts->tv_sec = tk->xtime_sec;
876e78818   Peter Zijlstra   time: Rename time...
938
  		nsec = timekeeping_get_ns(&tk->tkr_mono);
4e250fdde   John Stultz   time: Remove all ...
939
  		tomono = tk->wall_to_monotonic;
951ed4d36   Martin Schwidefsky   timekeeping: opti...
940

3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
941
  	} while (read_seqcount_retry(&tk_core.seq, seq));
951ed4d36   Martin Schwidefsky   timekeeping: opti...
942

d6d29896c   Thomas Gleixner   timekeeping: Prov...
943
944
945
  	ts->tv_sec += tomono.tv_sec;
  	ts->tv_nsec = 0;
  	timespec64_add_ns(ts, nsec + tomono.tv_nsec);
951ed4d36   Martin Schwidefsky   timekeeping: opti...
946
  }
d6d29896c   Thomas Gleixner   timekeeping: Prov...
947
  EXPORT_SYMBOL_GPL(ktime_get_ts64);
951ed4d36   Martin Schwidefsky   timekeeping: opti...
948

9e3680b17   Heena Sirwani   timekeeping: Prov...
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
  /**
   * ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC
   *
   * Returns the seconds portion of CLOCK_MONOTONIC with a single non
   * serialized read. tk->ktime_sec is of type 'unsigned long' so this
   * works on both 32 and 64 bit systems. On 32 bit systems the readout
   * covers ~136 years of uptime which should be enough to prevent
   * premature wrap arounds.
   */
  time64_t ktime_get_seconds(void)
  {
  	struct timekeeper *tk = &tk_core.timekeeper;
  
  	WARN_ON(timekeeping_suspended);
  	return tk->ktime_sec;
  }
  EXPORT_SYMBOL_GPL(ktime_get_seconds);
dbe7aa622   Heena Sirwani   timekeeping: Prov...
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
  /**
   * ktime_get_real_seconds - Get the seconds portion of CLOCK_REALTIME
   *
   * Returns the wall clock seconds since 1970. This replaces the
   * get_seconds() interface which is not y2038 safe on 32bit systems.
   *
   * For 64bit systems the fast access to tk->xtime_sec is preserved. On
   * 32bit systems the access must be protected with the sequence
   * counter to provide "atomic" access to the 64bit tk->xtime_sec
   * value.
   */
  time64_t ktime_get_real_seconds(void)
  {
  	struct timekeeper *tk = &tk_core.timekeeper;
  	time64_t seconds;
  	unsigned int seq;
  
  	if (IS_ENABLED(CONFIG_64BIT))
  		return tk->xtime_sec;
  
  	do {
  		seq = read_seqcount_begin(&tk_core.seq);
  		seconds = tk->xtime_sec;
  
  	} while (read_seqcount_retry(&tk_core.seq, seq));
  
  	return seconds;
  }
  EXPORT_SYMBOL_GPL(ktime_get_real_seconds);
dee366541   DengChao   timekeeping: Prov...
995
996
997
998
999
  /**
   * __ktime_get_real_seconds - The same as ktime_get_real_seconds
   * but without the sequence counter protect. This internal function
   * is called just when timekeeping lock is already held.
   */
865d3a9af   Thomas Gleixner   x86/mce: Address ...
1000
  noinstr time64_t __ktime_get_real_seconds(void)
dee366541   DengChao   timekeeping: Prov...
1001
1002
1003
1004
1005
  {
  	struct timekeeper *tk = &tk_core.timekeeper;
  
  	return tk->xtime_sec;
  }
9da0f49c8   Christopher S. Hall   time: Add timekee...
1006
1007
1008
1009
1010
1011
1012
  /**
   * ktime_get_snapshot - snapshots the realtime/monotonic raw clocks with counter
   * @systime_snapshot:	pointer to struct receiving the system time snapshot
   */
  void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
  {
  	struct timekeeper *tk = &tk_core.timekeeper;
e1e41b6ce   Rasmus Villemoes   timekeeping: Cons...
1013
  	unsigned int seq;
9da0f49c8   Christopher S. Hall   time: Add timekee...
1014
1015
  	ktime_t base_raw;
  	ktime_t base_real;
acc89612a   Thomas Gleixner   timekeeping: Make...
1016
1017
  	u64 nsec_raw;
  	u64 nsec_real;
a5a1d1c29   Thomas Gleixner   clocksource: Use ...
1018
  	u64 now;
9da0f49c8   Christopher S. Hall   time: Add timekee...
1019

ba26621e6   Christopher S. Hall   time: Remove dupl...
1020
  	WARN_ON_ONCE(timekeeping_suspended);
9da0f49c8   Christopher S. Hall   time: Add timekee...
1021
1022
  	do {
  		seq = read_seqcount_begin(&tk_core.seq);
ceea5e377   John Stultz   time: Fix clock->...
1023
  		now = tk_clock_read(&tk->tkr_mono);
2c756feb1   Christopher S. Hall   time: Add history...
1024
1025
  		systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
  		systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
9da0f49c8   Christopher S. Hall   time: Add timekee...
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
  		base_real = ktime_add(tk->tkr_mono.base,
  				      tk_core.timekeeper.offs_real);
  		base_raw = tk->tkr_raw.base;
  		nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, now);
  		nsec_raw  = timekeeping_cycles_to_ns(&tk->tkr_raw, now);
  	} while (read_seqcount_retry(&tk_core.seq, seq));
  
  	systime_snapshot->cycles = now;
  	systime_snapshot->real = ktime_add_ns(base_real, nsec_real);
  	systime_snapshot->raw = ktime_add_ns(base_raw, nsec_raw);
  }
  EXPORT_SYMBOL_GPL(ktime_get_snapshot);
dee366541   DengChao   timekeeping: Prov...
1038

2c756feb1   Christopher S. Hall   time: Add history...
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
  /* Scale base by mult/div checking for overflow */
  static int scale64_check_overflow(u64 mult, u64 div, u64 *base)
  {
  	u64 tmp, rem;
  
  	tmp = div64_u64_rem(*base, div, &rem);
  
  	if (((int)sizeof(u64)*8 - fls64(mult) < fls64(tmp)) ||
  	    ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem)))
  		return -EOVERFLOW;
  	tmp *= mult;
2c756feb1   Christopher S. Hall   time: Add history...
1050

4cbbc3a0e   Wen Yang   timekeeping: Prev...
1051
  	rem = div64_u64(rem * mult, div);
2c756feb1   Christopher S. Hall   time: Add history...
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
  	*base = tmp + rem;
  	return 0;
  }
  
  /**
   * adjust_historical_crosststamp - adjust crosstimestamp previous to current interval
   * @history:			Snapshot representing start of history
   * @partial_history_cycles:	Cycle offset into history (fractional part)
   * @total_history_cycles:	Total history length in cycles
   * @discontinuity:		True indicates clock was set on history period
   * @ts:				Cross timestamp that should be adjusted using
   *	partial/total ratio
   *
   * Helper function used by get_device_system_crosststamp() to correct the
   * crosstimestamp corresponding to the start of the current interval to the
   * system counter value (timestamp point) provided by the driver. The
   * total_history_* quantities are the total history starting at the provided
   * reference point and ending at the start of the current interval. The cycle
   * count between the driver timestamp point and the start of the current
   * interval is partial_history_cycles.
   */
  static int adjust_historical_crosststamp(struct system_time_snapshot *history,
a5a1d1c29   Thomas Gleixner   clocksource: Use ...
1074
1075
  					 u64 partial_history_cycles,
  					 u64 total_history_cycles,
2c756feb1   Christopher S. Hall   time: Add history...
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
  					 bool discontinuity,
  					 struct system_device_crosststamp *ts)
  {
  	struct timekeeper *tk = &tk_core.timekeeper;
  	u64 corr_raw, corr_real;
  	bool interp_forward;
  	int ret;
  
  	if (total_history_cycles == 0 || partial_history_cycles == 0)
  		return 0;
  
  	/* Interpolate shortest distance from beginning or end of history */
5fc63f957   Nicholas Mc Guire   timekeeping: Remo...
1088
  	interp_forward = partial_history_cycles > total_history_cycles / 2;
2c756feb1   Christopher S. Hall   time: Add history...
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
  	partial_history_cycles = interp_forward ?
  		total_history_cycles - partial_history_cycles :
  		partial_history_cycles;
  
  	/*
  	 * Scale the monotonic raw time delta by:
  	 *	partial_history_cycles / total_history_cycles
  	 */
  	corr_raw = (u64)ktime_to_ns(
  		ktime_sub(ts->sys_monoraw, history->raw));
  	ret = scale64_check_overflow(partial_history_cycles,
  				     total_history_cycles, &corr_raw);
  	if (ret)
  		return ret;
  
  	/*
  	 * If there is a discontinuity in the history, scale monotonic raw
  	 *	correction by:
  	 *	mult(real)/mult(raw) yielding the realtime correction
  	 * Otherwise, calculate the realtime correction similar to monotonic
  	 *	raw calculation
  	 */
  	if (discontinuity) {
  		corr_real = mul_u64_u32_div
  			(corr_raw, tk->tkr_mono.mult, tk->tkr_raw.mult);
  	} else {
  		corr_real = (u64)ktime_to_ns(
  			ktime_sub(ts->sys_realtime, history->real));
  		ret = scale64_check_overflow(partial_history_cycles,
  					     total_history_cycles, &corr_real);
  		if (ret)
  			return ret;
  	}
  
  	/* Fixup monotonic raw and real time time values */
  	if (interp_forward) {
  		ts->sys_monoraw = ktime_add_ns(history->raw, corr_raw);
  		ts->sys_realtime = ktime_add_ns(history->real, corr_real);
  	} else {
  		ts->sys_monoraw = ktime_sub_ns(ts->sys_monoraw, corr_raw);
  		ts->sys_realtime = ktime_sub_ns(ts->sys_realtime, corr_real);
  	}
  
  	return 0;
  }
  
  /*
   * cycle_between - true if test occurs chronologically between before and after
   */
a5a1d1c29   Thomas Gleixner   clocksource: Use ...
1138
  static bool cycle_between(u64 before, u64 test, u64 after)
2c756feb1   Christopher S. Hall   time: Add history...
1139
1140
1141
1142
1143
1144
1145
  {
  	if (test > before && test < after)
  		return true;
  	if (test < before && before > after)
  		return true;
  	return false;
  }
8524070b7   John Stultz   Move timekeeping ...
1146
  /**
8006c2459   Christopher S. Hall   time: Add driver ...
1147
   * get_device_system_crosststamp - Synchronously capture system/device timestamp
2c756feb1   Christopher S. Hall   time: Add history...
1148
   * @get_time_fn:	Callback to get simultaneous device time and
8006c2459   Christopher S. Hall   time: Add driver ...
1149
   *	system counter from the device driver
2c756feb1   Christopher S. Hall   time: Add history...
1150
1151
1152
   * @ctx:		Context passed to get_time_fn()
   * @history_begin:	Historical reference point used to interpolate system
   *	time when counter provided by the driver is before the current interval
8006c2459   Christopher S. Hall   time: Add driver ...
1153
1154
1155
1156
1157
1158
1159
1160
1161
   * @xtstamp:		Receives simultaneously captured system and device time
   *
   * Reads a timestamp from a device and correlates it to system time
   */
  int get_device_system_crosststamp(int (*get_time_fn)
  				  (ktime_t *device_time,
  				   struct system_counterval_t *sys_counterval,
  				   void *ctx),
  				  void *ctx,
2c756feb1   Christopher S. Hall   time: Add history...
1162
  				  struct system_time_snapshot *history_begin,
8006c2459   Christopher S. Hall   time: Add driver ...
1163
1164
1165
1166
  				  struct system_device_crosststamp *xtstamp)
  {
  	struct system_counterval_t system_counterval;
  	struct timekeeper *tk = &tk_core.timekeeper;
a5a1d1c29   Thomas Gleixner   clocksource: Use ...
1167
  	u64 cycles, now, interval_start;
6436257b4   Ingo Molnar   time/timekeeping:...
1168
  	unsigned int clock_was_set_seq = 0;
8006c2459   Christopher S. Hall   time: Add driver ...
1169
  	ktime_t base_real, base_raw;
acc89612a   Thomas Gleixner   timekeeping: Make...
1170
  	u64 nsec_real, nsec_raw;
2c756feb1   Christopher S. Hall   time: Add history...
1171
  	u8 cs_was_changed_seq;
e1e41b6ce   Rasmus Villemoes   timekeeping: Cons...
1172
  	unsigned int seq;
2c756feb1   Christopher S. Hall   time: Add history...
1173
  	bool do_interp;
8006c2459   Christopher S. Hall   time: Add driver ...
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
  	int ret;
  
  	do {
  		seq = read_seqcount_begin(&tk_core.seq);
  		/*
  		 * Try to synchronously capture device time and a system
  		 * counter value calling back into the device driver
  		 */
  		ret = get_time_fn(&xtstamp->device, &system_counterval, ctx);
  		if (ret)
  			return ret;
  
  		/*
  		 * Verify that the clocksource associated with the captured
  		 * system counter value is the same as the currently installed
  		 * timekeeper clocksource
  		 */
  		if (tk->tkr_mono.clock != system_counterval.cs)
  			return -ENODEV;
2c756feb1   Christopher S. Hall   time: Add history...
1193
1194
1195
1196
1197
1198
  		cycles = system_counterval.cycles;
  
  		/*
  		 * Check whether the system counter value provided by the
  		 * device driver is on the current timekeeping interval.
  		 */
ceea5e377   John Stultz   time: Fix clock->...
1199
  		now = tk_clock_read(&tk->tkr_mono);
2c756feb1   Christopher S. Hall   time: Add history...
1200
1201
1202
1203
1204
1205
1206
1207
1208
  		interval_start = tk->tkr_mono.cycle_last;
  		if (!cycle_between(interval_start, cycles, now)) {
  			clock_was_set_seq = tk->clock_was_set_seq;
  			cs_was_changed_seq = tk->cs_was_changed_seq;
  			cycles = interval_start;
  			do_interp = true;
  		} else {
  			do_interp = false;
  		}
8006c2459   Christopher S. Hall   time: Add driver ...
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
  
  		base_real = ktime_add(tk->tkr_mono.base,
  				      tk_core.timekeeper.offs_real);
  		base_raw = tk->tkr_raw.base;
  
  		nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono,
  						     system_counterval.cycles);
  		nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw,
  						    system_counterval.cycles);
  	} while (read_seqcount_retry(&tk_core.seq, seq));
  
  	xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real);
  	xtstamp->sys_monoraw = ktime_add_ns(base_raw, nsec_raw);
2c756feb1   Christopher S. Hall   time: Add history...
1222
1223
1224
1225
1226
1227
  
  	/*
  	 * Interpolate if necessary, adjusting back from the start of the
  	 * current interval
  	 */
  	if (do_interp) {
a5a1d1c29   Thomas Gleixner   clocksource: Use ...
1228
  		u64 partial_history_cycles, total_history_cycles;
2c756feb1   Christopher S. Hall   time: Add history...
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
  		bool discontinuity;
  
  		/*
  		 * Check that the counter value occurs after the provided
  		 * history reference and that the history doesn't cross a
  		 * clocksource change
  		 */
  		if (!history_begin ||
  		    !cycle_between(history_begin->cycles,
  				   system_counterval.cycles, cycles) ||
  		    history_begin->cs_was_changed_seq != cs_was_changed_seq)
  			return -EINVAL;
  		partial_history_cycles = cycles - system_counterval.cycles;
  		total_history_cycles = cycles - history_begin->cycles;
  		discontinuity =
  			history_begin->clock_was_set_seq != clock_was_set_seq;
  
  		ret = adjust_historical_crosststamp(history_begin,
  						    partial_history_cycles,
  						    total_history_cycles,
  						    discontinuity, xtstamp);
  		if (ret)
  			return ret;
  	}
8006c2459   Christopher S. Hall   time: Add driver ...
1253
1254
1255
1256
1257
  	return 0;
  }
  EXPORT_SYMBOL_GPL(get_device_system_crosststamp);
  
  /**
21f7eca55   pang.xunlei   time: Provide y20...
1258
1259
   * do_settimeofday64 - Sets the time of day.
   * @ts:     pointer to the timespec64 variable containing the new time
8524070b7   John Stultz   Move timekeeping ...
1260
1261
1262
   *
   * Sets the time of day to the new time and update NTP and notify hrtimers
   */
21f7eca55   pang.xunlei   time: Provide y20...
1263
  int do_settimeofday64(const struct timespec64 *ts)
8524070b7   John Stultz   Move timekeeping ...
1264
  {
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
1265
  	struct timekeeper *tk = &tk_core.timekeeper;
21f7eca55   pang.xunlei   time: Provide y20...
1266
  	struct timespec64 ts_delta, xt;
92c1d3ed4   John Stultz   time: Remove most...
1267
  	unsigned long flags;
e1d7ba873   Wang YanQing   time: Always make...
1268
  	int ret = 0;
8524070b7   John Stultz   Move timekeeping ...
1269

7a8e61f84   Thomas Gleixner   timekeeping: Forc...
1270
  	if (!timespec64_valid_settod(ts))
8524070b7   John Stultz   Move timekeeping ...
1271
  		return -EINVAL;
9a7a71b1d   Thomas Gleixner   timekeeping: Spli...
1272
  	raw_spin_lock_irqsave(&timekeeper_lock, flags);
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
1273
  	write_seqcount_begin(&tk_core.seq);
8524070b7   John Stultz   Move timekeeping ...
1274

4e250fdde   John Stultz   time: Remove all ...
1275
  	timekeeping_forward_now(tk);
9a055117d   Roman Zippel   clocksource: intr...
1276

4e250fdde   John Stultz   time: Remove all ...
1277
  	xt = tk_xtime(tk);
21f7eca55   pang.xunlei   time: Provide y20...
1278
1279
  	ts_delta.tv_sec = ts->tv_sec - xt.tv_sec;
  	ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec;
1e75fa8be   John Stultz   time: Condense ti...
1280

e1d7ba873   Wang YanQing   time: Always make...
1281
1282
1283
1284
  	if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) {
  		ret = -EINVAL;
  		goto out;
  	}
7d489d15c   John Stultz   timekeeping: Conv...
1285
  	tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
8524070b7   John Stultz   Move timekeeping ...
1286

21f7eca55   pang.xunlei   time: Provide y20...
1287
  	tk_set_xtime(tk, ts);
e1d7ba873   Wang YanQing   time: Always make...
1288
  out:
780427f0e   David Vrabel   timekeeping: Indi...
1289
  	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
8524070b7   John Stultz   Move timekeeping ...
1290

3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
1291
  	write_seqcount_end(&tk_core.seq);
9a7a71b1d   Thomas Gleixner   timekeeping: Spli...
1292
  	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
8524070b7   John Stultz   Move timekeeping ...
1293
1294
1295
  
  	/* signal hrtimers about time change */
  	clock_was_set();
2d87a0674   Ondrej Mosnacek   timekeeping: Audi...
1296
1297
  	if (!ret)
  		audit_tk_injoffset(ts_delta);
e1d7ba873   Wang YanQing   time: Always make...
1298
  	return ret;
8524070b7   John Stultz   Move timekeeping ...
1299
  }
21f7eca55   pang.xunlei   time: Provide y20...
1300
  EXPORT_SYMBOL(do_settimeofday64);
8524070b7   John Stultz   Move timekeeping ...
1301

c528f7c6c   John Stultz   time: Introduce t...
1302
1303
1304
1305
1306
1307
  /**
   * timekeeping_inject_offset - Adds or subtracts from the current time.
   * @tv:		pointer to the timespec variable containing the offset
   *
   * Adds or subtracts an offset value from the current time.
   */
985e69507   Ondrej Mosnacek   timekeeping/ntp: ...
1308
  static int timekeeping_inject_offset(const struct timespec64 *ts)
c528f7c6c   John Stultz   time: Introduce t...
1309
  {
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
1310
  	struct timekeeper *tk = &tk_core.timekeeper;
92c1d3ed4   John Stultz   time: Remove most...
1311
  	unsigned long flags;
1572fa037   Arnd Bergmann   timekeeping: Use ...
1312
  	struct timespec64 tmp;
4e8b14526   John Stultz   time: Improve san...
1313
  	int ret = 0;
c528f7c6c   John Stultz   time: Introduce t...
1314

1572fa037   Arnd Bergmann   timekeeping: Use ...
1315
  	if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC)
c528f7c6c   John Stultz   time: Introduce t...
1316
  		return -EINVAL;
9a7a71b1d   Thomas Gleixner   timekeeping: Spli...
1317
  	raw_spin_lock_irqsave(&timekeeper_lock, flags);
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
1318
  	write_seqcount_begin(&tk_core.seq);
c528f7c6c   John Stultz   time: Introduce t...
1319

4e250fdde   John Stultz   time: Remove all ...
1320
  	timekeeping_forward_now(tk);
c528f7c6c   John Stultz   time: Introduce t...
1321

4e8b14526   John Stultz   time: Improve san...
1322
  	/* Make sure the proposed value is valid */
1572fa037   Arnd Bergmann   timekeeping: Use ...
1323
1324
  	tmp = timespec64_add(tk_xtime(tk), *ts);
  	if (timespec64_compare(&tk->wall_to_monotonic, ts) > 0 ||
7a8e61f84   Thomas Gleixner   timekeeping: Forc...
1325
  	    !timespec64_valid_settod(&tmp)) {
4e8b14526   John Stultz   time: Improve san...
1326
1327
1328
  		ret = -EINVAL;
  		goto error;
  	}
1e75fa8be   John Stultz   time: Condense ti...
1329

1572fa037   Arnd Bergmann   timekeeping: Use ...
1330
1331
  	tk_xtime_add(tk, ts);
  	tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *ts));
c528f7c6c   John Stultz   time: Introduce t...
1332

4e8b14526   John Stultz   time: Improve san...
1333
  error: /* even if we error out, we forwarded the time, so call update */
780427f0e   David Vrabel   timekeeping: Indi...
1334
  	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
c528f7c6c   John Stultz   time: Introduce t...
1335

3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
1336
  	write_seqcount_end(&tk_core.seq);
9a7a71b1d   Thomas Gleixner   timekeeping: Spli...
1337
  	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
c528f7c6c   John Stultz   time: Introduce t...
1338
1339
1340
  
  	/* signal hrtimers about time change */
  	clock_was_set();
4e8b14526   John Stultz   time: Improve san...
1341
  	return ret;
c528f7c6c   John Stultz   time: Introduce t...
1342
  }
e0956dcc4   Arnd Bergmann   timekeeping: Cons...
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
  
  /*
   * Indicates if there is an offset between the system clock and the hardware
   * clock/persistent clock/rtc.
   */
  int persistent_clock_is_local;
  
  /*
   * Adjust the time obtained from the CMOS to be UTC time instead of
   * local time.
   *
   * This is ugly, but preferable to the alternatives.  Otherwise we
   * would either need to write a program to do it in /etc/rc (and risk
   * confusion if the program gets run more than once; it would also be
   * hard to make the program warp the clock precisely n hours)  or
   * compile in the timezone information into the kernel.  Bad, bad....
   *
   *						- TYT, 1992-01-01
   *
   * The best thing to do is to keep the CMOS clock in universal time (UTC)
   * as real UNIX machines always do it. This avoids all headaches about
   * daylight saving times and warping kernel clocks.
   */
  void timekeeping_warp_clock(void)
  {
  	if (sys_tz.tz_minuteswest != 0) {
1572fa037   Arnd Bergmann   timekeeping: Use ...
1369
  		struct timespec64 adjust;
e0956dcc4   Arnd Bergmann   timekeeping: Cons...
1370
1371
1372
1373
1374
1375
1376
  
  		persistent_clock_is_local = 1;
  		adjust.tv_sec = sys_tz.tz_minuteswest * 60;
  		adjust.tv_nsec = 0;
  		timekeeping_inject_offset(&adjust);
  	}
  }
c528f7c6c   John Stultz   time: Introduce t...
1377

cc244ddae   John Stultz   timekeeping: Move...
1378
  /**
40d9f8275   Stephen Boyd   timekeeping: Remo...
1379
   * __timekeeping_set_tai_offset - Sets the TAI offset from UTC and monotonic
cc244ddae   John Stultz   timekeeping: Move...
1380
1381
   *
   */
dd5d70e86   Fengguang Wu   timekeeping: __ti...
1382
  static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
cc244ddae   John Stultz   timekeeping: Move...
1383
1384
  {
  	tk->tai_offset = tai_offset;
04005f601   John Stultz   timekeeping: Fix ...
1385
  	tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
cc244ddae   John Stultz   timekeeping: Move...
1386
1387
1388
  }
  
  /**
8524070b7   John Stultz   Move timekeeping ...
1389
1390
1391
1392
   * change_clocksource - Swaps clocksources if a new one is available
   *
   * Accumulates current time interval and initializes new clocksource
   */
75c5158f7   Martin Schwidefsky   timekeeping: Upda...
1393
  static int change_clocksource(void *data)
8524070b7   John Stultz   Move timekeeping ...
1394
  {
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
1395
  	struct timekeeper *tk = &tk_core.timekeeper;
4614e6ada   Magnus Damm   clocksource: add ...
1396
  	struct clocksource *new, *old;
f695cf948   John Stultz   time: Fix change_...
1397
  	unsigned long flags;
8524070b7   John Stultz   Move timekeeping ...
1398

75c5158f7   Martin Schwidefsky   timekeeping: Upda...
1399
  	new = (struct clocksource *) data;
8524070b7   John Stultz   Move timekeeping ...
1400

9a7a71b1d   Thomas Gleixner   timekeeping: Spli...
1401
  	raw_spin_lock_irqsave(&timekeeper_lock, flags);
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
1402
  	write_seqcount_begin(&tk_core.seq);
f695cf948   John Stultz   time: Fix change_...
1403

4e250fdde   John Stultz   time: Remove all ...
1404
  	timekeeping_forward_now(tk);
09ac369c8   Thomas Gleixner   clocksource: Add ...
1405
1406
1407
1408
1409
1410
  	/*
  	 * If the cs is in module, get a module reference. Succeeds
  	 * for built-in code (owner == NULL) as well.
  	 */
  	if (try_module_get(new->owner)) {
  		if (!new->enable || new->enable(new) == 0) {
876e78818   Peter Zijlstra   time: Rename time...
1411
  			old = tk->tkr_mono.clock;
09ac369c8   Thomas Gleixner   clocksource: Add ...
1412
1413
1414
1415
1416
1417
1418
  			tk_setup_internals(tk, new);
  			if (old->disable)
  				old->disable(old);
  			module_put(old->owner);
  		} else {
  			module_put(new->owner);
  		}
75c5158f7   Martin Schwidefsky   timekeeping: Upda...
1419
  	}
780427f0e   David Vrabel   timekeeping: Indi...
1420
  	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
f695cf948   John Stultz   time: Fix change_...
1421

3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
1422
  	write_seqcount_end(&tk_core.seq);
9a7a71b1d   Thomas Gleixner   timekeeping: Spli...
1423
  	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
f695cf948   John Stultz   time: Fix change_...
1424

75c5158f7   Martin Schwidefsky   timekeeping: Upda...
1425
1426
  	return 0;
  }
8524070b7   John Stultz   Move timekeeping ...
1427

75c5158f7   Martin Schwidefsky   timekeeping: Upda...
1428
1429
1430
1431
1432
1433
1434
  /**
   * timekeeping_notify - Install a new clock source
   * @clock:		pointer to the clock source
   *
   * This function is called from clocksource.c after a new, better clock
   * source has been registered. The caller holds the clocksource_mutex.
   */
ba919d1ca   Thomas Gleixner   clocksource: Let ...
1435
  int timekeeping_notify(struct clocksource *clock)
75c5158f7   Martin Schwidefsky   timekeeping: Upda...
1436
  {
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
1437
  	struct timekeeper *tk = &tk_core.timekeeper;
4e250fdde   John Stultz   time: Remove all ...
1438

876e78818   Peter Zijlstra   time: Rename time...
1439
  	if (tk->tkr_mono.clock == clock)
ba919d1ca   Thomas Gleixner   clocksource: Let ...
1440
  		return 0;
75c5158f7   Martin Schwidefsky   timekeeping: Upda...
1441
  	stop_machine(change_clocksource, clock, NULL);
8524070b7   John Stultz   Move timekeeping ...
1442
  	tick_clock_notify();
876e78818   Peter Zijlstra   time: Rename time...
1443
  	return tk->tkr_mono.clock == clock ? 0 : -1;
8524070b7   John Stultz   Move timekeeping ...
1444
  }
75c5158f7   Martin Schwidefsky   timekeeping: Upda...
1445

a40f262cc   Thomas Gleixner   timekeeping: Move...
1446
  /**
fb7fcc96a   Arnd Bergmann   timekeeping: Stan...
1447
   * ktime_get_raw_ts64 - Returns the raw monotonic time in a timespec
cdba2ec53   John Stultz   time: Expose getr...
1448
   * @ts:		pointer to the timespec64 to be set
2d42244ae   John Stultz   clocksource: intr...
1449
1450
1451
   *
   * Returns the raw monotonic time (completely un-modified by ntp)
   */
fb7fcc96a   Arnd Bergmann   timekeeping: Stan...
1452
  void ktime_get_raw_ts64(struct timespec64 *ts)
2d42244ae   John Stultz   clocksource: intr...
1453
  {
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
1454
  	struct timekeeper *tk = &tk_core.timekeeper;
e1e41b6ce   Rasmus Villemoes   timekeeping: Cons...
1455
  	unsigned int seq;
acc89612a   Thomas Gleixner   timekeeping: Make...
1456
  	u64 nsecs;
2d42244ae   John Stultz   clocksource: intr...
1457
1458
  
  	do {
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
1459
  		seq = read_seqcount_begin(&tk_core.seq);
fc6eead7c   John Stultz   time: Clean up CL...
1460
  		ts->tv_sec = tk->raw_sec;
4a4ad80d3   Peter Zijlstra   time: Add timerke...
1461
  		nsecs = timekeeping_get_ns(&tk->tkr_raw);
2d42244ae   John Stultz   clocksource: intr...
1462

3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
1463
  	} while (read_seqcount_retry(&tk_core.seq, seq));
2d42244ae   John Stultz   clocksource: intr...
1464

fc6eead7c   John Stultz   time: Clean up CL...
1465
1466
  	ts->tv_nsec = 0;
  	timespec64_add_ns(ts, nsecs);
2d42244ae   John Stultz   clocksource: intr...
1467
  }
fb7fcc96a   Arnd Bergmann   timekeeping: Stan...
1468
  EXPORT_SYMBOL(ktime_get_raw_ts64);
cdba2ec53   John Stultz   time: Expose getr...
1469

2d42244ae   John Stultz   clocksource: intr...
1470

2d42244ae   John Stultz   clocksource: intr...
1471
  /**
cf4fc6cb7   Li Zefan   timekeeping: rena...
1472
   * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
8524070b7   John Stultz   Move timekeeping ...
1473
   */
cf4fc6cb7   Li Zefan   timekeeping: rena...
1474
  int timekeeping_valid_for_hres(void)
8524070b7   John Stultz   Move timekeeping ...
1475
  {
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
1476
  	struct timekeeper *tk = &tk_core.timekeeper;
e1e41b6ce   Rasmus Villemoes   timekeeping: Cons...
1477
  	unsigned int seq;
8524070b7   John Stultz   Move timekeeping ...
1478
1479
1480
  	int ret;
  
  	do {
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
1481
  		seq = read_seqcount_begin(&tk_core.seq);
8524070b7   John Stultz   Move timekeeping ...
1482

876e78818   Peter Zijlstra   time: Rename time...
1483
  		ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
8524070b7   John Stultz   Move timekeeping ...
1484

3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
1485
  	} while (read_seqcount_retry(&tk_core.seq, seq));
8524070b7   John Stultz   Move timekeeping ...
1486
1487
1488
1489
1490
  
  	return ret;
  }
  
  /**
98962465e   Jon Hunter   nohz: Prevent clo...
1491
   * timekeeping_max_deferment - Returns max time the clocksource can be deferred
98962465e   Jon Hunter   nohz: Prevent clo...
1492
1493
1494
   */
  u64 timekeeping_max_deferment(void)
  {
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
1495
  	struct timekeeper *tk = &tk_core.timekeeper;
e1e41b6ce   Rasmus Villemoes   timekeeping: Cons...
1496
  	unsigned int seq;
70471f2f0   John Stultz   time: Add timekee...
1497
  	u64 ret;
42e71e81f   John Stultz   time: Whitespace ...
1498

70471f2f0   John Stultz   time: Add timekee...
1499
  	do {
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
1500
  		seq = read_seqcount_begin(&tk_core.seq);
70471f2f0   John Stultz   time: Add timekee...
1501

876e78818   Peter Zijlstra   time: Rename time...
1502
  		ret = tk->tkr_mono.clock->max_idle_ns;
70471f2f0   John Stultz   time: Add timekee...
1503

3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
1504
  	} while (read_seqcount_retry(&tk_core.seq, seq));
70471f2f0   John Stultz   time: Add timekee...
1505
1506
  
  	return ret;
98962465e   Jon Hunter   nohz: Prevent clo...
1507
1508
1509
  }
  
  /**
926617889   Arnd Bergmann   timekeeping: remo...
1510
   * read_persistent_clock64 -  Return time from the persistent clock.
8524070b7   John Stultz   Move timekeeping ...
1511
1512
   *
   * Weak dummy function for arches that do not yet support it.
d4f587c67   Martin Schwidefsky   timekeeping: Incr...
1513
1514
   * Reads the time from the battery backed persistent clock.
   * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
8524070b7   John Stultz   Move timekeeping ...
1515
1516
1517
   *
   *  XXX - Do be sure to remove it once all arches implement it.
   */
926617889   Arnd Bergmann   timekeeping: remo...
1518
  void __weak read_persistent_clock64(struct timespec64 *ts)
8524070b7   John Stultz   Move timekeeping ...
1519
  {
d4f587c67   Martin Schwidefsky   timekeeping: Incr...
1520
1521
  	ts->tv_sec = 0;
  	ts->tv_nsec = 0;
8524070b7   John Stultz   Move timekeeping ...
1522
  }
23970e389   Martin Schwidefsky   timekeeping: Intr...
1523
  /**
3eca99374   Pavel Tatashin   timekeeping: Repl...
1524
1525
   * read_persistent_wall_and_boot_offset - Read persistent clock, and also offset
   *                                        from the boot.
23970e389   Martin Schwidefsky   timekeeping: Intr...
1526
1527
   *
   * Weak dummy function for arches that do not yet support it.
3eca99374   Pavel Tatashin   timekeeping: Repl...
1528
1529
   * wall_time	- current time as returned by persistent clock
   * boot_offset	- offset that is defined as wall_time - boot_time
4b1b7f805   Pavel Tatashin   timekeeping: Defa...
1530
1531
1532
1533
   * The default function calculates offset based on the current value of
   * local_clock(). This way architectures that support sched_clock() but don't
   * support dedicated boot time clock will provide the best estimate of the
   * boot time.
23970e389   Martin Schwidefsky   timekeeping: Intr...
1534
   */
3eca99374   Pavel Tatashin   timekeeping: Repl...
1535
1536
1537
  void __weak __init
  read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
  				     struct timespec64 *boot_offset)
23970e389   Martin Schwidefsky   timekeeping: Intr...
1538
  {
3eca99374   Pavel Tatashin   timekeeping: Repl...
1539
  	read_persistent_clock64(wall_time);
4b1b7f805   Pavel Tatashin   timekeeping: Defa...
1540
  	*boot_offset = ns_to_timespec64(local_clock());
23970e389   Martin Schwidefsky   timekeeping: Intr...
1541
  }
f473e5f46   Mukesh Ojha   time: Fix extra s...
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
  /*
   * Flag reflecting whether timekeeping_resume() has injected sleeptime.
   *
   * The flag starts of false and is only set when a suspend reaches
   * timekeeping_suspend(), timekeeping_resume() sets it to false when the
   * timekeeper clocksource is not stopping across suspend and has been
   * used to update sleep time. If the timekeeper clocksource has stopped
   * then the flag stays true and is used by the RTC resume code to decide
   * whether sleeptime must be injected and if so the flag gets false then.
   *
   * If a suspend fails before reaching timekeeping_resume() then the flag
   * stays false and prevents erroneous sleeptime injection.
   */
  static bool suspend_timing_needed;
0fa88cb4b   Xunlei Pang   time, drivers/rtc...
1556
1557
1558
  
  /* Flag for if there is a persistent clock on this platform */
  static bool persistent_clock_exists;
8524070b7   John Stultz   Move timekeeping ...
1559
1560
1561
1562
1563
  /*
   * timekeeping_init - Initializes the clocksource and common timekeeping values
   */
  void __init timekeeping_init(void)
  {
3eca99374   Pavel Tatashin   timekeeping: Repl...
1564
  	struct timespec64 wall_time, boot_offset, wall_to_mono;
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
1565
  	struct timekeeper *tk = &tk_core.timekeeper;
155ec6022   Martin Schwidefsky   timekeeping: Intr...
1566
  	struct clocksource *clock;
8524070b7   John Stultz   Move timekeeping ...
1567
  	unsigned long flags;
4e8b14526   John Stultz   time: Improve san...
1568

3eca99374   Pavel Tatashin   timekeeping: Repl...
1569
  	read_persistent_wall_and_boot_offset(&wall_time, &boot_offset);
7a8e61f84   Thomas Gleixner   timekeeping: Forc...
1570
  	if (timespec64_valid_settod(&wall_time) &&
3eca99374   Pavel Tatashin   timekeeping: Repl...
1571
1572
  	    timespec64_to_ns(&wall_time) > 0) {
  		persistent_clock_exists = true;
684ad537a   Pavel Tatashin   timekeeping: Prev...
1573
  	} else if (timespec64_to_ns(&wall_time) != 0) {
3eca99374   Pavel Tatashin   timekeeping: Repl...
1574
1575
  		pr_warn("Persistent clock returned invalid value");
  		wall_time = (struct timespec64){0};
4e8b14526   John Stultz   time: Improve san...
1576
  	}
8524070b7   John Stultz   Move timekeeping ...
1577

3eca99374   Pavel Tatashin   timekeeping: Repl...
1578
1579
1580
1581
1582
1583
1584
1585
  	if (timespec64_compare(&wall_time, &boot_offset) < 0)
  		boot_offset = (struct timespec64){0};
  
  	/*
  	 * We want set wall_to_mono, so the following is true:
  	 * wall time + wall_to_mono = boot time
  	 */
  	wall_to_mono = timespec64_sub(boot_offset, wall_time);
9a7a71b1d   Thomas Gleixner   timekeeping: Spli...
1586
  	raw_spin_lock_irqsave(&timekeeper_lock, flags);
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
1587
  	write_seqcount_begin(&tk_core.seq);
06c017fdd   John Stultz   timekeeping: Hold...
1588
  	ntp_init();
f1b82746c   Martin Schwidefsky   clocksource: Clea...
1589
  	clock = clocksource_default_clock();
a0f7d48bf   Martin Schwidefsky   timekeeping: Remo...
1590
1591
  	if (clock->enable)
  		clock->enable(clock);
4e250fdde   John Stultz   time: Remove all ...
1592
  	tk_setup_internals(tk, clock);
8524070b7   John Stultz   Move timekeeping ...
1593

3eca99374   Pavel Tatashin   timekeeping: Repl...
1594
  	tk_set_xtime(tk, &wall_time);
fc6eead7c   John Stultz   time: Clean up CL...
1595
  	tk->raw_sec = 0;
1e75fa8be   John Stultz   time: Condense ti...
1596

3eca99374   Pavel Tatashin   timekeeping: Repl...
1597
  	tk_set_wall_to_mono(tk, wall_to_mono);
6d0ef903e   John Stultz   time: Clean up of...
1598

56fd16cab   Thomas Gleixner   timekeeping: Incr...
1599
  	timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
48cdc135d   Thomas Gleixner   timekeeping: Impl...
1600

3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
1601
  	write_seqcount_end(&tk_core.seq);
9a7a71b1d   Thomas Gleixner   timekeeping: Spli...
1602
  	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
8524070b7   John Stultz   Move timekeeping ...
1603
  }
264bb3f79   Xunlei Pang   time: Fix a bug i...
1604
  /* time in seconds when suspend began for persistent clock */
7d489d15c   John Stultz   timekeeping: Conv...
1605
  static struct timespec64 timekeeping_suspend_time;
8524070b7   John Stultz   Move timekeeping ...
1606
1607
  
  /**
304529b1b   John Stultz   time: Add timekee...
1608
1609
1610
1611
1612
1613
   * __timekeeping_inject_sleeptime - Internal function to add sleep interval
   * @delta: pointer to a timespec delta value
   *
   * Takes a timespec offset measuring a suspend interval and properly
   * adds the sleep offset to the timekeeping variables.
   */
f726a697d   John Stultz   time: Rework time...
1614
  static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
985e69507   Ondrej Mosnacek   timekeeping/ntp: ...
1615
  					   const struct timespec64 *delta)
304529b1b   John Stultz   time: Add timekee...
1616
  {
7d489d15c   John Stultz   timekeeping: Conv...
1617
  	if (!timespec64_valid_strict(delta)) {
6d9bcb621   John Stultz   timekeeping: use ...
1618
1619
1620
1621
  		printk_deferred(KERN_WARNING
  				"__timekeeping_inject_sleeptime: Invalid "
  				"sleep delta value!
  ");
cb5de2f8d   John Stultz   time: Catch inval...
1622
1623
  		return;
  	}
f726a697d   John Stultz   time: Rework time...
1624
  	tk_xtime_add(tk, delta);
a3ed0e439   Thomas Gleixner   Revert: Unify CLO...
1625
  	tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
47da70d32   Thomas Gleixner   timekeeping: Remo...
1626
  	tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
5c83545f2   Colin Cross   power: Add option...
1627
  	tk_debug_account_sleep_time(delta);
304529b1b   John Stultz   time: Add timekee...
1628
  }
7f2981393   Xunlei Pang   time: Don't build...
1629
  #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
304529b1b   John Stultz   time: Add timekee...
1630
  /**
0fa88cb4b   Xunlei Pang   time, drivers/rtc...
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
   * We have three kinds of time sources to use for sleep time
   * injection, the preference order is:
   * 1) non-stop clocksource
   * 2) persistent clock (ie: RTC accessible when irqs are off)
   * 3) RTC
   *
   * 1) and 2) are used by timekeeping, 3) by RTC subsystem.
   * If system has neither 1) nor 2), 3) will be used finally.
   *
   *
   * If timekeeping has injected sleeptime via either 1) or 2),
   * 3) becomes needless, so in this case we don't need to call
   * rtc_resume(), and this is what timekeeping_rtc_skipresume()
   * means.
   */
  bool timekeeping_rtc_skipresume(void)
  {
f473e5f46   Mukesh Ojha   time: Fix extra s...
1648
  	return !suspend_timing_needed;
0fa88cb4b   Xunlei Pang   time, drivers/rtc...
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
  }
  
  /**
   * 1) can be determined whether to use or not only when doing
   * timekeeping_resume() which is invoked after rtc_suspend(),
   * so we can't skip rtc_suspend() surely if system has 1).
   *
   * But if system has 2), 2) will definitely be used, so in this
   * case we don't need to call rtc_suspend(), and this is what
   * timekeeping_rtc_skipsuspend() means.
   */
  bool timekeeping_rtc_skipsuspend(void)
  {
  	return persistent_clock_exists;
  }
  
  /**
04d908908   pang.xunlei   time: Provide y20...
1666
1667
   * timekeeping_inject_sleeptime64 - Adds suspend interval to timeekeeping values
   * @delta: pointer to a timespec64 delta value
304529b1b   John Stultz   time: Add timekee...
1668
   *
2ee966320   Xunlei Pang   time: Add y2038 s...
1669
   * This hook is for architectures that cannot support read_persistent_clock64
304529b1b   John Stultz   time: Add timekee...
1670
   * because their RTC/persistent clock is only accessible when irqs are enabled.
0fa88cb4b   Xunlei Pang   time, drivers/rtc...
1671
   * and also don't have an effective nonstop clocksource.
304529b1b   John Stultz   time: Add timekee...
1672
1673
1674
1675
   *
   * This function should only be called by rtc_resume(), and allows
   * a suspend offset to be injected into the timekeeping values.
   */
985e69507   Ondrej Mosnacek   timekeeping/ntp: ...
1676
  void timekeeping_inject_sleeptime64(const struct timespec64 *delta)
304529b1b   John Stultz   time: Add timekee...
1677
  {
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
1678
  	struct timekeeper *tk = &tk_core.timekeeper;
92c1d3ed4   John Stultz   time: Remove most...
1679
  	unsigned long flags;
304529b1b   John Stultz   time: Add timekee...
1680

9a7a71b1d   Thomas Gleixner   timekeeping: Spli...
1681
  	raw_spin_lock_irqsave(&timekeeper_lock, flags);
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
1682
  	write_seqcount_begin(&tk_core.seq);
70471f2f0   John Stultz   time: Add timekee...
1683

f473e5f46   Mukesh Ojha   time: Fix extra s...
1684
  	suspend_timing_needed = false;
4e250fdde   John Stultz   time: Remove all ...
1685
  	timekeeping_forward_now(tk);
304529b1b   John Stultz   time: Add timekee...
1686

04d908908   pang.xunlei   time: Provide y20...
1687
  	__timekeeping_inject_sleeptime(tk, delta);
304529b1b   John Stultz   time: Add timekee...
1688

780427f0e   David Vrabel   timekeeping: Indi...
1689
  	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
304529b1b   John Stultz   time: Add timekee...
1690

3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
1691
  	write_seqcount_end(&tk_core.seq);
9a7a71b1d   Thomas Gleixner   timekeeping: Spli...
1692
  	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
304529b1b   John Stultz   time: Add timekee...
1693
1694
1695
1696
  
  	/* signal hrtimers about time change */
  	clock_was_set();
  }
7f2981393   Xunlei Pang   time: Don't build...
1697
  #endif
304529b1b   John Stultz   time: Add timekee...
1698

304529b1b   John Stultz   time: Add timekee...
1699
  /**
8524070b7   John Stultz   Move timekeeping ...
1700
   * timekeeping_resume - Resumes the generic timekeeping subsystem.
8524070b7   John Stultz   Move timekeeping ...
1701
   */
124cf9117   Rafael J. Wysocki   PM / sleep: Make ...
1702
  void timekeeping_resume(void)
8524070b7   John Stultz   Move timekeeping ...
1703
  {
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
1704
  	struct timekeeper *tk = &tk_core.timekeeper;
876e78818   Peter Zijlstra   time: Rename time...
1705
  	struct clocksource *clock = tk->tkr_mono.clock;
92c1d3ed4   John Stultz   time: Remove most...
1706
  	unsigned long flags;
7d489d15c   John Stultz   timekeeping: Conv...
1707
  	struct timespec64 ts_new, ts_delta;
39232ed5a   Baolin Wang   time: Introduce o...
1708
  	u64 cycle_now, nsec;
f473e5f46   Mukesh Ojha   time: Fix extra s...
1709
  	bool inject_sleeptime = false;
d4f587c67   Martin Schwidefsky   timekeeping: Incr...
1710

2ee966320   Xunlei Pang   time: Add y2038 s...
1711
  	read_persistent_clock64(&ts_new);
8524070b7   John Stultz   Move timekeeping ...
1712

adc78e6b9   Rafael J. Wysocki   timekeeping: Add ...
1713
  	clockevents_resume();
d10ff3fb6   Thomas Gleixner   timekeeping fix p...
1714
  	clocksource_resume();
9a7a71b1d   Thomas Gleixner   timekeeping: Spli...
1715
  	raw_spin_lock_irqsave(&timekeeper_lock, flags);
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
1716
  	write_seqcount_begin(&tk_core.seq);
8524070b7   John Stultz   Move timekeeping ...
1717

e445cf1c4   Feng Tang   timekeeping: util...
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
  	/*
  	 * After system resumes, we need to calculate the suspended time and
  	 * compensate it for the OS time. There are 3 sources that could be
  	 * used: Nonstop clocksource during suspend, persistent clock and rtc
  	 * device.
  	 *
  	 * One specific platform may have 1 or 2 or all of them, and the
  	 * preference will be:
  	 *	suspend-nonstop clocksource -> persistent clock -> rtc
  	 * The less preferred source will only be tried if there is no better
  	 * usable source. The rtc part is handled separately in rtc core code.
  	 */
ceea5e377   John Stultz   time: Fix clock->...
1730
  	cycle_now = tk_clock_read(&tk->tkr_mono);
39232ed5a   Baolin Wang   time: Introduce o...
1731
1732
  	nsec = clocksource_stop_suspend_timing(clock, cycle_now);
  	if (nsec > 0) {
7d489d15c   John Stultz   timekeeping: Conv...
1733
  		ts_delta = ns_to_timespec64(nsec);
f473e5f46   Mukesh Ojha   time: Fix extra s...
1734
  		inject_sleeptime = true;
7d489d15c   John Stultz   timekeeping: Conv...
1735
1736
  	} else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
  		ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
f473e5f46   Mukesh Ojha   time: Fix extra s...
1737
  		inject_sleeptime = true;
8524070b7   John Stultz   Move timekeeping ...
1738
  	}
e445cf1c4   Feng Tang   timekeeping: util...
1739

f473e5f46   Mukesh Ojha   time: Fix extra s...
1740
1741
  	if (inject_sleeptime) {
  		suspend_timing_needed = false;
e445cf1c4   Feng Tang   timekeeping: util...
1742
  		__timekeeping_inject_sleeptime(tk, &ts_delta);
f473e5f46   Mukesh Ojha   time: Fix extra s...
1743
  	}
e445cf1c4   Feng Tang   timekeeping: util...
1744
1745
  
  	/* Re-base the last cycle value */
876e78818   Peter Zijlstra   time: Rename time...
1746
  	tk->tkr_mono.cycle_last = cycle_now;
4a4ad80d3   Peter Zijlstra   time: Add timerke...
1747
  	tk->tkr_raw.cycle_last  = cycle_now;
4e250fdde   John Stultz   time: Remove all ...
1748
  	tk->ntp_error = 0;
8524070b7   John Stultz   Move timekeeping ...
1749
  	timekeeping_suspended = 0;
780427f0e   David Vrabel   timekeeping: Indi...
1750
  	timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
1751
  	write_seqcount_end(&tk_core.seq);
9a7a71b1d   Thomas Gleixner   timekeeping: Spli...
1752
  	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
8524070b7   John Stultz   Move timekeeping ...
1753
1754
  
  	touch_softlockup_watchdog();
4ffee521f   Thomas Gleixner   clockevents: Make...
1755
  	tick_resume();
b12a03ce4   Thomas Gleixner   hrtimers: Prepare...
1756
  	hrtimers_resume();
8524070b7   John Stultz   Move timekeeping ...
1757
  }
124cf9117   Rafael J. Wysocki   PM / sleep: Make ...
1758
  int timekeeping_suspend(void)
8524070b7   John Stultz   Move timekeeping ...
1759
  {
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
1760
  	struct timekeeper *tk = &tk_core.timekeeper;
92c1d3ed4   John Stultz   time: Remove most...
1761
  	unsigned long flags;
7d489d15c   John Stultz   timekeeping: Conv...
1762
1763
  	struct timespec64		delta, delta_delta;
  	static struct timespec64	old_delta;
39232ed5a   Baolin Wang   time: Introduce o...
1764
1765
  	struct clocksource *curr_clock;
  	u64 cycle_now;
8524070b7   John Stultz   Move timekeeping ...
1766

2ee966320   Xunlei Pang   time: Add y2038 s...
1767
  	read_persistent_clock64(&timekeeping_suspend_time);
3be909506   Thomas Gleixner   timekeeping: acce...
1768

0d6bd9953   Zoran Markovic   timekeeping: Corr...
1769
1770
1771
1772
1773
1774
  	/*
  	 * On some systems the persistent_clock can not be detected at
  	 * timekeeping_init by its return value, so if we see a valid
  	 * value returned, update the persistent_clock_exists flag.
  	 */
  	if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
0fa88cb4b   Xunlei Pang   time, drivers/rtc...
1775
  		persistent_clock_exists = true;
0d6bd9953   Zoran Markovic   timekeeping: Corr...
1776

f473e5f46   Mukesh Ojha   time: Fix extra s...
1777
  	suspend_timing_needed = true;
9a7a71b1d   Thomas Gleixner   timekeeping: Spli...
1778
  	raw_spin_lock_irqsave(&timekeeper_lock, flags);
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
1779
  	write_seqcount_begin(&tk_core.seq);
4e250fdde   John Stultz   time: Remove all ...
1780
  	timekeeping_forward_now(tk);
8524070b7   John Stultz   Move timekeeping ...
1781
  	timekeeping_suspended = 1;
cb33217b1   John Stultz   time: Avoid accum...
1782

39232ed5a   Baolin Wang   time: Introduce o...
1783
1784
1785
1786
1787
1788
1789
1790
  	/*
  	 * Since we've called forward_now, cycle_last stores the value
  	 * just read from the current clocksource. Save this to potentially
  	 * use in suspend timing.
  	 */
  	curr_clock = tk->tkr_mono.clock;
  	cycle_now = tk->tkr_mono.cycle_last;
  	clocksource_start_suspend_timing(curr_clock, cycle_now);
0fa88cb4b   Xunlei Pang   time, drivers/rtc...
1791
  	if (persistent_clock_exists) {
cb33217b1   John Stultz   time: Avoid accum...
1792
  		/*
264bb3f79   Xunlei Pang   time: Fix a bug i...
1793
1794
1795
1796
  		 * To avoid drift caused by repeated suspend/resumes,
  		 * which each can add ~1 second drift error,
  		 * try to compensate so the difference in system time
  		 * and persistent_clock time stays close to constant.
cb33217b1   John Stultz   time: Avoid accum...
1797
  		 */
264bb3f79   Xunlei Pang   time: Fix a bug i...
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
  		delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
  		delta_delta = timespec64_sub(delta, old_delta);
  		if (abs(delta_delta.tv_sec) >= 2) {
  			/*
  			 * if delta_delta is too large, assume time correction
  			 * has occurred and set old_delta to the current delta.
  			 */
  			old_delta = delta;
  		} else {
  			/* Otherwise try to adjust old_system to compensate */
  			timekeeping_suspend_time =
  				timespec64_add(timekeeping_suspend_time, delta_delta);
  		}
cb33217b1   John Stultz   time: Avoid accum...
1811
  	}
330a1617b   John Stultz   timekeeping: Fix ...
1812
1813
  
  	timekeeping_update(tk, TK_MIRROR);
060407aed   Rafael J. Wysocki   timekeeping: Make...
1814
  	halt_fast_timekeeper(tk);
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
1815
  	write_seqcount_end(&tk_core.seq);
9a7a71b1d   Thomas Gleixner   timekeeping: Spli...
1816
  	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
8524070b7   John Stultz   Move timekeeping ...
1817

4ffee521f   Thomas Gleixner   clockevents: Make...
1818
  	tick_suspend();
c54a42b19   Magnus Damm   clocksource: add ...
1819
  	clocksource_suspend();
adc78e6b9   Rafael J. Wysocki   timekeeping: Add ...
1820
  	clockevents_suspend();
8524070b7   John Stultz   Move timekeeping ...
1821
1822
1823
1824
1825
  
  	return 0;
  }
  
  /* sysfs resume/suspend bits for timekeeping */
e1a85b2c5   Rafael J. Wysocki   timekeeping: Use ...
1826
  static struct syscore_ops timekeeping_syscore_ops = {
8524070b7   John Stultz   Move timekeeping ...
1827
1828
  	.resume		= timekeeping_resume,
  	.suspend	= timekeeping_suspend,
8524070b7   John Stultz   Move timekeeping ...
1829
  };
e1a85b2c5   Rafael J. Wysocki   timekeeping: Use ...
1830
  static int __init timekeeping_init_ops(void)
8524070b7   John Stultz   Move timekeeping ...
1831
  {
e1a85b2c5   Rafael J. Wysocki   timekeeping: Use ...
1832
1833
  	register_syscore_ops(&timekeeping_syscore_ops);
  	return 0;
8524070b7   John Stultz   Move timekeeping ...
1834
  }
e1a85b2c5   Rafael J. Wysocki   timekeeping: Use ...
1835
  device_initcall(timekeeping_init_ops);
8524070b7   John Stultz   Move timekeeping ...
1836
1837
  
  /*
dc491596f   John Stultz   timekeeping: Rewo...
1838
   * Apply a multiplier adjustment to the timekeeper
8524070b7   John Stultz   Move timekeeping ...
1839
   */
dc491596f   John Stultz   timekeeping: Rewo...
1840
1841
  static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
  							 s64 offset,
78b98e3c5   Miroslav Lichvar   timekeeping/ntp: ...
1842
  							 s32 mult_adj)
8524070b7   John Stultz   Move timekeeping ...
1843
  {
dc491596f   John Stultz   timekeeping: Rewo...
1844
  	s64 interval = tk->cycle_interval;
8524070b7   John Stultz   Move timekeeping ...
1845

78b98e3c5   Miroslav Lichvar   timekeeping/ntp: ...
1846
1847
1848
  	if (mult_adj == 0) {
  		return;
  	} else if (mult_adj == -1) {
dc491596f   John Stultz   timekeeping: Rewo...
1849
  		interval = -interval;
78b98e3c5   Miroslav Lichvar   timekeeping/ntp: ...
1850
1851
1852
1853
  		offset = -offset;
  	} else if (mult_adj != 1) {
  		interval *= mult_adj;
  		offset *= mult_adj;
1d17d1748   Ingo Molnar   time: Fix adjustm...
1854
  	}
8524070b7   John Stultz   Move timekeeping ...
1855

c2bc11113   John Stultz   time: Improve doc...
1856
1857
1858
  	/*
  	 * So the following can be confusing.
  	 *
dc491596f   John Stultz   timekeeping: Rewo...
1859
  	 * To keep things simple, lets assume mult_adj == 1 for now.
c2bc11113   John Stultz   time: Improve doc...
1860
  	 *
dc491596f   John Stultz   timekeeping: Rewo...
1861
  	 * When mult_adj != 1, remember that the interval and offset values
c2bc11113   John Stultz   time: Improve doc...
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
  	 * have been appropriately scaled so the math is the same.
  	 *
  	 * The basic idea here is that we're increasing the multiplier
  	 * by one, this causes the xtime_interval to be incremented by
  	 * one cycle_interval. This is because:
  	 *	xtime_interval = cycle_interval * mult
  	 * So if mult is being incremented by one:
  	 *	xtime_interval = cycle_interval * (mult + 1)
  	 * Its the same as:
  	 *	xtime_interval = (cycle_interval * mult) + cycle_interval
  	 * Which can be shortened to:
  	 *	xtime_interval += cycle_interval
  	 *
  	 * So offset stores the non-accumulated cycles. Thus the current
  	 * time (in shifted nanoseconds) is:
  	 *	now = (offset * adj) + xtime_nsec
  	 * Now, even though we're adjusting the clock frequency, we have
  	 * to keep time consistent. In other words, we can't jump back
  	 * in time, and we also want to avoid jumping forward in time.
  	 *
  	 * So given the same offset value, we need the time to be the same
  	 * both before and after the freq adjustment.
  	 *	now = (offset * adj_1) + xtime_nsec_1
  	 *	now = (offset * adj_2) + xtime_nsec_2
  	 * So:
  	 *	(offset * adj_1) + xtime_nsec_1 =
  	 *		(offset * adj_2) + xtime_nsec_2
  	 * And we know:
  	 *	adj_2 = adj_1 + 1
  	 * So:
  	 *	(offset * adj_1) + xtime_nsec_1 =
  	 *		(offset * (adj_1+1)) + xtime_nsec_2
  	 *	(offset * adj_1) + xtime_nsec_1 =
  	 *		(offset * adj_1) + offset + xtime_nsec_2
  	 * Canceling the sides:
  	 *	xtime_nsec_1 = offset + xtime_nsec_2
  	 * Which gives us:
  	 *	xtime_nsec_2 = xtime_nsec_1 - offset
  	 * Which simplfies to:
  	 *	xtime_nsec -= offset
c2bc11113   John Stultz   time: Improve doc...
1902
  	 */
876e78818   Peter Zijlstra   time: Rename time...
1903
  	if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) {
6067dc5a8   pang.xunlei   time: Avoid possi...
1904
1905
1906
1907
  		/* NTP adjustment caused clocksource mult overflow */
  		WARN_ON_ONCE(1);
  		return;
  	}
876e78818   Peter Zijlstra   time: Rename time...
1908
  	tk->tkr_mono.mult += mult_adj;
f726a697d   John Stultz   time: Rework time...
1909
  	tk->xtime_interval += interval;
876e78818   Peter Zijlstra   time: Rename time...
1910
  	tk->tkr_mono.xtime_nsec -= offset;
dc491596f   John Stultz   timekeeping: Rewo...
1911
1912
1913
  }
  
  /*
78b98e3c5   Miroslav Lichvar   timekeeping/ntp: ...
1914
1915
   * Adjust the timekeeper's multiplier to the correct frequency
   * and also to reduce the accumulated error value.
dc491596f   John Stultz   timekeeping: Rewo...
1916
   */
78b98e3c5   Miroslav Lichvar   timekeeping/ntp: ...
1917
  static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
dc491596f   John Stultz   timekeeping: Rewo...
1918
  {
78b98e3c5   Miroslav Lichvar   timekeeping/ntp: ...
1919
  	u32 mult;
dc491596f   John Stultz   timekeeping: Rewo...
1920

ec02b076c   John Stultz   timekeeping: Cap ...
1921
  	/*
78b98e3c5   Miroslav Lichvar   timekeeping/ntp: ...
1922
1923
  	 * Determine the multiplier from the current NTP tick length.
  	 * Avoid expensive division when the tick length doesn't change.
ec02b076c   John Stultz   timekeeping: Cap ...
1924
  	 */
78b98e3c5   Miroslav Lichvar   timekeeping/ntp: ...
1925
1926
1927
1928
1929
1930
  	if (likely(tk->ntp_tick == ntp_tick_length())) {
  		mult = tk->tkr_mono.mult - tk->ntp_err_mult;
  	} else {
  		tk->ntp_tick = ntp_tick_length();
  		mult = div64_u64((tk->ntp_tick >> tk->ntp_error_shift) -
  				 tk->xtime_remainder, tk->cycle_interval);
ec02b076c   John Stultz   timekeeping: Cap ...
1931
  	}
dc491596f   John Stultz   timekeeping: Rewo...
1932

78b98e3c5   Miroslav Lichvar   timekeeping/ntp: ...
1933
1934
1935
1936
1937
1938
1939
1940
  	/*
  	 * If the clock is behind the NTP time, increase the multiplier by 1
  	 * to catch up with it. If it's ahead and there was a remainder in the
  	 * tick division, the clock will slow down. Otherwise it will stay
  	 * ahead until the tick length changes to a non-divisible value.
  	 */
  	tk->ntp_err_mult = tk->ntp_error > 0 ? 1 : 0;
  	mult += tk->ntp_err_mult;
dc491596f   John Stultz   timekeeping: Rewo...
1941

78b98e3c5   Miroslav Lichvar   timekeeping/ntp: ...
1942
  	timekeeping_apply_adjustment(tk, offset, mult - tk->tkr_mono.mult);
dc491596f   John Stultz   timekeeping: Rewo...
1943

876e78818   Peter Zijlstra   time: Rename time...
1944
1945
1946
  	if (unlikely(tk->tkr_mono.clock->maxadj &&
  		(abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult)
  			> tk->tkr_mono.clock->maxadj))) {
dc491596f   John Stultz   timekeeping: Rewo...
1947
1948
1949
  		printk_once(KERN_WARNING
  			"Adjusting %s more than 11%% (%ld vs %ld)
  ",
876e78818   Peter Zijlstra   time: Rename time...
1950
1951
  			tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult,
  			(long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj);
dc491596f   John Stultz   timekeeping: Rewo...
1952
  	}
2a8c0883c   John Stultz   time: Move xtime_...
1953
1954
1955
1956
1957
1958
1959
  
  	/*
  	 * It may be possible that when we entered this function, xtime_nsec
  	 * was very small.  Further, if we're slightly speeding the clocksource
  	 * in the code above, its possible the required corrective factor to
  	 * xtime_nsec could cause it to underflow.
  	 *
78b98e3c5   Miroslav Lichvar   timekeeping/ntp: ...
1960
1961
1962
  	 * Now, since we have already accumulated the second and the NTP
  	 * subsystem has been notified via second_overflow(), we need to skip
  	 * the next update.
2a8c0883c   John Stultz   time: Move xtime_...
1963
  	 */
876e78818   Peter Zijlstra   time: Rename time...
1964
  	if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) {
78b98e3c5   Miroslav Lichvar   timekeeping/ntp: ...
1965
1966
1967
1968
  		tk->tkr_mono.xtime_nsec += (u64)NSEC_PER_SEC <<
  							tk->tkr_mono.shift;
  		tk->xtime_sec--;
  		tk->skip_second_overflow = 1;
2a8c0883c   John Stultz   time: Move xtime_...
1969
  	}
8524070b7   John Stultz   Move timekeeping ...
1970
1971
1972
  }
  
  /**
1f4f94870   John Stultz   time: Refactor ac...
1973
1974
   * accumulate_nsecs_to_secs - Accumulates nsecs into secs
   *
571af55a3   Zhen Lei   time: Fix spellin...
1975
   * Helper function that accumulates the nsecs greater than a second
1f4f94870   John Stultz   time: Refactor ac...
1976
1977
1978
1979
   * from the xtime_nsec field to the xtime_secs field.
   * It also calls into the NTP code to handle leapsecond processing.
   *
   */
780427f0e   David Vrabel   timekeeping: Indi...
1980
  static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
1f4f94870   John Stultz   time: Refactor ac...
1981
  {
876e78818   Peter Zijlstra   time: Rename time...
1982
  	u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
5258d3f25   John Stultz   timekeeping: Fix ...
1983
  	unsigned int clock_set = 0;
1f4f94870   John Stultz   time: Refactor ac...
1984

876e78818   Peter Zijlstra   time: Rename time...
1985
  	while (tk->tkr_mono.xtime_nsec >= nsecps) {
1f4f94870   John Stultz   time: Refactor ac...
1986
  		int leap;
876e78818   Peter Zijlstra   time: Rename time...
1987
  		tk->tkr_mono.xtime_nsec -= nsecps;
1f4f94870   John Stultz   time: Refactor ac...
1988
  		tk->xtime_sec++;
78b98e3c5   Miroslav Lichvar   timekeeping/ntp: ...
1989
1990
1991
1992
1993
1994
1995
1996
  		/*
  		 * Skip NTP update if this second was accumulated before,
  		 * i.e. xtime_nsec underflowed in timekeeping_adjust()
  		 */
  		if (unlikely(tk->skip_second_overflow)) {
  			tk->skip_second_overflow = 0;
  			continue;
  		}
1f4f94870   John Stultz   time: Refactor ac...
1997
1998
  		/* Figure out if its a leap sec and apply if needed */
  		leap = second_overflow(tk->xtime_sec);
6d0ef903e   John Stultz   time: Clean up of...
1999
  		if (unlikely(leap)) {
7d489d15c   John Stultz   timekeeping: Conv...
2000
  			struct timespec64 ts;
6d0ef903e   John Stultz   time: Clean up of...
2001
2002
  
  			tk->xtime_sec += leap;
1f4f94870   John Stultz   time: Refactor ac...
2003

6d0ef903e   John Stultz   time: Clean up of...
2004
2005
2006
  			ts.tv_sec = leap;
  			ts.tv_nsec = 0;
  			tk_set_wall_to_mono(tk,
7d489d15c   John Stultz   timekeeping: Conv...
2007
  				timespec64_sub(tk->wall_to_monotonic, ts));
6d0ef903e   John Stultz   time: Clean up of...
2008

cc244ddae   John Stultz   timekeeping: Move...
2009
  			__timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
5258d3f25   John Stultz   timekeeping: Fix ...
2010
  			clock_set = TK_CLOCK_WAS_SET;
6d0ef903e   John Stultz   time: Clean up of...
2011
  		}
1f4f94870   John Stultz   time: Refactor ac...
2012
  	}
5258d3f25   John Stultz   timekeeping: Fix ...
2013
  	return clock_set;
1f4f94870   John Stultz   time: Refactor ac...
2014
  }
1f4f94870   John Stultz   time: Refactor ac...
2015
  /**
a092ff0f9   John Stultz   time: Implement l...
2016
2017
2018
   * logarithmic_accumulation - shifted accumulation of cycles
   *
   * This functions accumulates a shifted interval of cycles into
b0294f302   Randy Dunlap   time: Delete repe...
2019
   * a shifted interval nanoseconds. Allows for O(log) accumulation
a092ff0f9   John Stultz   time: Implement l...
2020
2021
2022
2023
   * loop.
   *
   * Returns the unconsumed cycles.
   */
a5a1d1c29   Thomas Gleixner   clocksource: Use ...
2024
2025
  static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
  				    u32 shift, unsigned int *clock_set)
a092ff0f9   John Stultz   time: Implement l...
2026
  {
a5a1d1c29   Thomas Gleixner   clocksource: Use ...
2027
  	u64 interval = tk->cycle_interval << shift;
3d88d56c5   John Stultz   time: Fix CLOCK_M...
2028
  	u64 snsec_per_sec;
a092ff0f9   John Stultz   time: Implement l...
2029

571af55a3   Zhen Lei   time: Fix spellin...
2030
  	/* If the offset is smaller than a shifted interval, do nothing */
23a9537a6   Thomas Gleixner   timekeeping: Calc...
2031
  	if (offset < interval)
a092ff0f9   John Stultz   time: Implement l...
2032
2033
2034
  		return offset;
  
  	/* Accumulate one shifted interval */
23a9537a6   Thomas Gleixner   timekeeping: Calc...
2035
  	offset -= interval;
876e78818   Peter Zijlstra   time: Rename time...
2036
  	tk->tkr_mono.cycle_last += interval;
4a4ad80d3   Peter Zijlstra   time: Add timerke...
2037
  	tk->tkr_raw.cycle_last  += interval;
a092ff0f9   John Stultz   time: Implement l...
2038

876e78818   Peter Zijlstra   time: Rename time...
2039
  	tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift;
5258d3f25   John Stultz   timekeeping: Fix ...
2040
  	*clock_set |= accumulate_nsecs_to_secs(tk);
a092ff0f9   John Stultz   time: Implement l...
2041

deda2e819   Jason Wessel   timekeeping: Fix ...
2042
  	/* Accumulate raw time */
3d88d56c5   John Stultz   time: Fix CLOCK_M...
2043
2044
2045
2046
  	tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
  	snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
  	while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
  		tk->tkr_raw.xtime_nsec -= snsec_per_sec;
fc6eead7c   John Stultz   time: Clean up CL...
2047
  		tk->raw_sec++;
a092ff0f9   John Stultz   time: Implement l...
2048
2049
2050
  	}
  
  	/* Accumulate error between NTP and clock interval */
375f45b5b   John Stultz   timekeeping: Use ...
2051
  	tk->ntp_error += tk->ntp_tick << shift;
f726a697d   John Stultz   time: Rework time...
2052
2053
  	tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
  						(tk->ntp_error_shift + shift);
a092ff0f9   John Stultz   time: Implement l...
2054
2055
2056
  
  	return offset;
  }
b061c7a51   Miroslav Lichvar   timekeeping: Upda...
2057
2058
2059
  /*
   * timekeeping_advance - Updates the timekeeper to the current time and
   * current NTP tick length
8524070b7   John Stultz   Move timekeeping ...
2060
   */
b061c7a51   Miroslav Lichvar   timekeeping: Upda...
2061
  static void timekeeping_advance(enum timekeeping_adv_mode mode)
8524070b7   John Stultz   Move timekeeping ...
2062
  {
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
2063
  	struct timekeeper *real_tk = &tk_core.timekeeper;
48cdc135d   Thomas Gleixner   timekeeping: Impl...
2064
  	struct timekeeper *tk = &shadow_timekeeper;
a5a1d1c29   Thomas Gleixner   clocksource: Use ...
2065
  	u64 offset;
a092ff0f9   John Stultz   time: Implement l...
2066
  	int shift = 0, maxshift;
5258d3f25   John Stultz   timekeeping: Fix ...
2067
  	unsigned int clock_set = 0;
70471f2f0   John Stultz   time: Add timekee...
2068
  	unsigned long flags;
9a7a71b1d   Thomas Gleixner   timekeeping: Spli...
2069
  	raw_spin_lock_irqsave(&timekeeper_lock, flags);
8524070b7   John Stultz   Move timekeeping ...
2070
2071
2072
  
  	/* Make sure we're fully resumed: */
  	if (unlikely(timekeeping_suspended))
70471f2f0   John Stultz   time: Add timekee...
2073
  		goto out;
8524070b7   John Stultz   Move timekeeping ...
2074

592913ecb   John Stultz   time: Kill off CO...
2075
  #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
48cdc135d   Thomas Gleixner   timekeeping: Impl...
2076
  	offset = real_tk->cycle_interval;
b061c7a51   Miroslav Lichvar   timekeeping: Upda...
2077
2078
2079
  
  	if (mode != TK_ADV_TICK)
  		goto out;
592913ecb   John Stultz   time: Kill off CO...
2080
  #else
ceea5e377   John Stultz   time: Fix clock->...
2081
  	offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
876e78818   Peter Zijlstra   time: Rename time...
2082
  				   tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
8524070b7   John Stultz   Move timekeeping ...
2083

bf2ac3121   John Stultz   time: Avoid makin...
2084
  	/* Check if there's really nothing to do */
b061c7a51   Miroslav Lichvar   timekeeping: Upda...
2085
  	if (offset < real_tk->cycle_interval && mode == TK_ADV_TICK)
bf2ac3121   John Stultz   time: Avoid makin...
2086
  		goto out;
b061c7a51   Miroslav Lichvar   timekeeping: Upda...
2087
  #endif
bf2ac3121   John Stultz   time: Avoid makin...
2088

3c17ad19f   John Stultz   timekeeping: Add ...
2089
  	/* Do some additional sanity checking */
a529bea8f   Stafford Horne   timekeeping: Use ...
2090
  	timekeeping_check_update(tk, offset);
3c17ad19f   John Stultz   timekeeping: Add ...
2091

a092ff0f9   John Stultz   time: Implement l...
2092
2093
2094
2095
  	/*
  	 * With NO_HZ we may have to accumulate many cycle_intervals
  	 * (think "ticks") worth of time at once. To do this efficiently,
  	 * we calculate the largest doubling multiple of cycle_intervals
88b28adf6   Jim Cromie   kernel-time: fix ...
2096
  	 * that is smaller than the offset.  We then accumulate that
a092ff0f9   John Stultz   time: Implement l...
2097
2098
  	 * chunk in one go, and then try to consume the next smaller
  	 * doubled multiple.
8524070b7   John Stultz   Move timekeeping ...
2099
  	 */
4e250fdde   John Stultz   time: Remove all ...
2100
  	shift = ilog2(offset) - ilog2(tk->cycle_interval);
a092ff0f9   John Stultz   time: Implement l...
2101
  	shift = max(0, shift);
88b28adf6   Jim Cromie   kernel-time: fix ...
2102
  	/* Bound shift to one less than what overflows tick_length */
ea7cf49a7   John Stultz   ntp: Access tick_...
2103
  	maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
a092ff0f9   John Stultz   time: Implement l...
2104
  	shift = min(shift, maxshift);
4e250fdde   John Stultz   time: Remove all ...
2105
  	while (offset >= tk->cycle_interval) {
5258d3f25   John Stultz   timekeeping: Fix ...
2106
2107
  		offset = logarithmic_accumulation(tk, offset, shift,
  							&clock_set);
4e250fdde   John Stultz   time: Remove all ...
2108
  		if (offset < tk->cycle_interval<<shift)
830ec0458   John Stultz   time: Fix accumul...
2109
  			shift--;
8524070b7   John Stultz   Move timekeeping ...
2110
  	}
78b98e3c5   Miroslav Lichvar   timekeeping/ntp: ...
2111
  	/* Adjust the multiplier to correct NTP error */
4e250fdde   John Stultz   time: Remove all ...
2112
  	timekeeping_adjust(tk, offset);
8524070b7   John Stultz   Move timekeeping ...
2113

6a867a395   John Stultz   time: Remove xtim...
2114
  	/*
6a867a395   John Stultz   time: Remove xtim...
2115
  	 * Finally, make sure that after the rounding
1e75fa8be   John Stultz   time: Condense ti...
2116
  	 * xtime_nsec isn't larger than NSEC_PER_SEC
6a867a395   John Stultz   time: Remove xtim...
2117
  	 */
5258d3f25   John Stultz   timekeeping: Fix ...
2118
  	clock_set |= accumulate_nsecs_to_secs(tk);
83f57a11d   Linus Torvalds   Revert "time: Rem...
2119

3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
2120
  	write_seqcount_begin(&tk_core.seq);
48cdc135d   Thomas Gleixner   timekeeping: Impl...
2121
2122
2123
2124
2125
2126
2127
  	/*
  	 * Update the real timekeeper.
  	 *
  	 * We could avoid this memcpy by switching pointers, but that
  	 * requires changes to all other timekeeper usage sites as
  	 * well, i.e. move the timekeeper pointer getter into the
  	 * spinlocked/seqcount protected sections. And we trade this
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
2128
  	 * memcpy under the tk_core.seq against one before we start
48cdc135d   Thomas Gleixner   timekeeping: Impl...
2129
2130
  	 * updating.
  	 */
906c55579   John Stultz   timekeeping: Copy...
2131
  	timekeeping_update(tk, clock_set);
48cdc135d   Thomas Gleixner   timekeeping: Impl...
2132
  	memcpy(real_tk, tk, sizeof(*tk));
906c55579   John Stultz   timekeeping: Copy...
2133
  	/* The memcpy must come last. Do not put anything here! */
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
2134
  	write_seqcount_end(&tk_core.seq);
ca4523cda   Thomas Gleixner   timekeeping: Shor...
2135
  out:
9a7a71b1d   Thomas Gleixner   timekeeping: Spli...
2136
  	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
47a1b7963   John Stultz   tick/timekeeping:...
2137
  	if (clock_set)
cab5e127e   John Stultz   time: Revert to c...
2138
2139
  		/* Have to call _delayed version, since in irq context*/
  		clock_was_set_delayed();
8524070b7   John Stultz   Move timekeeping ...
2140
  }
7c3f1a573   Tomas Janousek   Introduce boot ba...
2141
2142
  
  /**
b061c7a51   Miroslav Lichvar   timekeeping: Upda...
2143
2144
2145
2146
2147
2148
2149
2150
2151
   * update_wall_time - Uses the current clocksource to increment the wall time
   *
   */
  void update_wall_time(void)
  {
  	timekeeping_advance(TK_ADV_TICK);
  }
  
  /**
d08c0cdd2   John Stultz   time: Expose getb...
2152
2153
   * getboottime64 - Return the real time of system boot.
   * @ts:		pointer to the timespec64 to be set
7c3f1a573   Tomas Janousek   Introduce boot ba...
2154
   *
d08c0cdd2   John Stultz   time: Expose getb...
2155
   * Returns the wall-time of boot in a timespec64.
7c3f1a573   Tomas Janousek   Introduce boot ba...
2156
2157
2158
2159
2160
2161
   *
   * This is based on the wall_to_monotonic offset and the total suspend
   * time. Calls to settimeofday will affect the value returned (which
   * basically means that however wrong your real time clock is at boot time,
   * you get the right time here).
   */
d08c0cdd2   John Stultz   time: Expose getb...
2162
  void getboottime64(struct timespec64 *ts)
7c3f1a573   Tomas Janousek   Introduce boot ba...
2163
  {
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
2164
  	struct timekeeper *tk = &tk_core.timekeeper;
a3ed0e439   Thomas Gleixner   Revert: Unify CLO...
2165
  	ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
02cba1598   Thomas Gleixner   timekeeping: Simp...
2166

d08c0cdd2   John Stultz   time: Expose getb...
2167
  	*ts = ktime_to_timespec64(t);
7c3f1a573   Tomas Janousek   Introduce boot ba...
2168
  }
d08c0cdd2   John Stultz   time: Expose getb...
2169
  EXPORT_SYMBOL_GPL(getboottime64);
7c3f1a573   Tomas Janousek   Introduce boot ba...
2170

fb7fcc96a   Arnd Bergmann   timekeeping: Stan...
2171
  void ktime_get_coarse_real_ts64(struct timespec64 *ts)
2c6b47de1   John Stultz   Cleanup non-arch ...
2172
  {
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
2173
  	struct timekeeper *tk = &tk_core.timekeeper;
e1e41b6ce   Rasmus Villemoes   timekeeping: Cons...
2174
  	unsigned int seq;
2c6b47de1   John Stultz   Cleanup non-arch ...
2175
2176
  
  	do {
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
2177
  		seq = read_seqcount_begin(&tk_core.seq);
83f57a11d   Linus Torvalds   Revert "time: Rem...
2178

fb7fcc96a   Arnd Bergmann   timekeeping: Stan...
2179
  		*ts = tk_xtime(tk);
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
2180
  	} while (read_seqcount_retry(&tk_core.seq, seq));
2c6b47de1   John Stultz   Cleanup non-arch ...
2181
  }
fb7fcc96a   Arnd Bergmann   timekeeping: Stan...
2182
  EXPORT_SYMBOL(ktime_get_coarse_real_ts64);
da15cfdae   John Stultz   time: Introduce C...
2183

fb7fcc96a   Arnd Bergmann   timekeeping: Stan...
2184
  void ktime_get_coarse_ts64(struct timespec64 *ts)
da15cfdae   John Stultz   time: Introduce C...
2185
  {
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
2186
  	struct timekeeper *tk = &tk_core.timekeeper;
7d489d15c   John Stultz   timekeeping: Conv...
2187
  	struct timespec64 now, mono;
e1e41b6ce   Rasmus Villemoes   timekeeping: Cons...
2188
  	unsigned int seq;
da15cfdae   John Stultz   time: Introduce C...
2189
2190
  
  	do {
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
2191
  		seq = read_seqcount_begin(&tk_core.seq);
83f57a11d   Linus Torvalds   Revert "time: Rem...
2192

4e250fdde   John Stultz   time: Remove all ...
2193
2194
  		now = tk_xtime(tk);
  		mono = tk->wall_to_monotonic;
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
2195
  	} while (read_seqcount_retry(&tk_core.seq, seq));
da15cfdae   John Stultz   time: Introduce C...
2196

fb7fcc96a   Arnd Bergmann   timekeeping: Stan...
2197
  	set_normalized_timespec64(ts, now.tv_sec + mono.tv_sec,
da15cfdae   John Stultz   time: Introduce C...
2198
  				now.tv_nsec + mono.tv_nsec);
da15cfdae   John Stultz   time: Introduce C...
2199
  }
fb7fcc96a   Arnd Bergmann   timekeeping: Stan...
2200
  EXPORT_SYMBOL(ktime_get_coarse_ts64);
871cf1e5f   Torben Hohn   time: Move do_tim...
2201
2202
  
  /*
d6ad41876   John Stultz   time: Kill xtime_...
2203
   * Must hold jiffies_lock
871cf1e5f   Torben Hohn   time: Move do_tim...
2204
2205
2206
2207
   */
  void do_timer(unsigned long ticks)
  {
  	jiffies_64 += ticks;
46132e3ac   Paul Gortmaker   sched: nohz: stop...
2208
  	calc_global_load();
871cf1e5f   Torben Hohn   time: Move do_tim...
2209
  }
48cf76f71   Torben Hohn   time: Provide get...
2210
2211
  
  /**
76f410889   John Stultz   hrtimer: Cleanup ...
2212
   * ktime_get_update_offsets_now - hrtimer helper
868a3e915   Thomas Gleixner   hrtimer: Make off...
2213
   * @cwsseq:	pointer to check and store the clock was set sequence number
f6c06abfb   Thomas Gleixner   timekeeping: Prov...
2214
   * @offs_real:	pointer to storage for monotonic -> realtime offset
a3ed0e439   Thomas Gleixner   Revert: Unify CLO...
2215
   * @offs_boot:	pointer to storage for monotonic -> boottime offset
b7bc50e45   Xie XiuQi   timekeeping: Fix ...
2216
   * @offs_tai:	pointer to storage for monotonic -> clock tai offset
f6c06abfb   Thomas Gleixner   timekeeping: Prov...
2217
   *
868a3e915   Thomas Gleixner   hrtimer: Make off...
2218
2219
2220
2221
   * Returns current monotonic time and updates the offsets if the
   * sequence number in @cwsseq and timekeeper.clock_was_set_seq are
   * different.
   *
b7bc50e45   Xie XiuQi   timekeeping: Fix ...
2222
   * Called from hrtimer_interrupt() or retrigger_next_event()
f6c06abfb   Thomas Gleixner   timekeeping: Prov...
2223
   */
868a3e915   Thomas Gleixner   hrtimer: Make off...
2224
  ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
a3ed0e439   Thomas Gleixner   Revert: Unify CLO...
2225
  				     ktime_t *offs_boot, ktime_t *offs_tai)
f6c06abfb   Thomas Gleixner   timekeeping: Prov...
2226
  {
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
2227
  	struct timekeeper *tk = &tk_core.timekeeper;
f6c06abfb   Thomas Gleixner   timekeeping: Prov...
2228
  	unsigned int seq;
a37c0aad6   Thomas Gleixner   timekeeping: Use ...
2229
2230
  	ktime_t base;
  	u64 nsecs;
f6c06abfb   Thomas Gleixner   timekeeping: Prov...
2231
2232
  
  	do {
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
2233
  		seq = read_seqcount_begin(&tk_core.seq);
f6c06abfb   Thomas Gleixner   timekeeping: Prov...
2234

876e78818   Peter Zijlstra   time: Rename time...
2235
2236
  		base = tk->tkr_mono.base;
  		nsecs = timekeeping_get_ns(&tk->tkr_mono);
833f32d76   John Stultz   time: Prevent ear...
2237
  		base = ktime_add_ns(base, nsecs);
868a3e915   Thomas Gleixner   hrtimer: Make off...
2238
2239
2240
  		if (*cwsseq != tk->clock_was_set_seq) {
  			*cwsseq = tk->clock_was_set_seq;
  			*offs_real = tk->offs_real;
a3ed0e439   Thomas Gleixner   Revert: Unify CLO...
2241
  			*offs_boot = tk->offs_boot;
868a3e915   Thomas Gleixner   hrtimer: Make off...
2242
2243
  			*offs_tai = tk->offs_tai;
  		}
833f32d76   John Stultz   time: Prevent ear...
2244
2245
  
  		/* Handle leapsecond insertion adjustments */
2456e8553   Thomas Gleixner   ktime: Get rid of...
2246
  		if (unlikely(base >= tk->next_leap_ktime))
833f32d76   John Stultz   time: Prevent ear...
2247
  			*offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0));
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
2248
  	} while (read_seqcount_retry(&tk_core.seq, seq));
f6c06abfb   Thomas Gleixner   timekeeping: Prov...
2249

833f32d76   John Stultz   time: Prevent ear...
2250
  	return base;
f6c06abfb   Thomas Gleixner   timekeeping: Prov...
2251
  }
f6c06abfb   Thomas Gleixner   timekeeping: Prov...
2252

f0af911a9   Torben Hohn   time: Provide xti...
2253
  /**
1572fa037   Arnd Bergmann   timekeeping: Use ...
2254
   * timekeeping_validate_timex - Ensures the timex is ok for use in do_adjtimex
e0956dcc4   Arnd Bergmann   timekeeping: Cons...
2255
   */
ead25417f   Deepa Dinamani   timex: use __kern...
2256
  static int timekeeping_validate_timex(const struct __kernel_timex *txc)
e0956dcc4   Arnd Bergmann   timekeeping: Cons...
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
  {
  	if (txc->modes & ADJ_ADJTIME) {
  		/* singleshot must not be used with any other mode bits */
  		if (!(txc->modes & ADJ_OFFSET_SINGLESHOT))
  			return -EINVAL;
  		if (!(txc->modes & ADJ_OFFSET_READONLY) &&
  		    !capable(CAP_SYS_TIME))
  			return -EPERM;
  	} else {
  		/* In order to modify anything, you gotta be super-user! */
  		if (txc->modes && !capable(CAP_SYS_TIME))
  			return -EPERM;
  		/*
  		 * if the quartz is off by more than 10% then
  		 * something is VERY wrong!
  		 */
  		if (txc->modes & ADJ_TICK &&
  		    (txc->tick <  900000/USER_HZ ||
  		     txc->tick > 1100000/USER_HZ))
  			return -EINVAL;
  	}
  
  	if (txc->modes & ADJ_SETOFFSET) {
  		/* In order to inject time, you gotta be super-user! */
  		if (!capable(CAP_SYS_TIME))
  			return -EPERM;
1572fa037   Arnd Bergmann   timekeeping: Use ...
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
  		/*
  		 * Validate if a timespec/timeval used to inject a time
  		 * offset is valid.  Offsets can be postive or negative, so
  		 * we don't check tv_sec. The value of the timeval/timespec
  		 * is the sum of its fields,but *NOTE*:
  		 * The field tv_usec/tv_nsec must always be non-negative and
  		 * we can't have more nanoseconds/microseconds than a second.
  		 */
  		if (txc->time.tv_usec < 0)
  			return -EINVAL;
e0956dcc4   Arnd Bergmann   timekeeping: Cons...
2293

1572fa037   Arnd Bergmann   timekeeping: Use ...
2294
2295
  		if (txc->modes & ADJ_NANO) {
  			if (txc->time.tv_usec >= NSEC_PER_SEC)
e0956dcc4   Arnd Bergmann   timekeeping: Cons...
2296
  				return -EINVAL;
e0956dcc4   Arnd Bergmann   timekeeping: Cons...
2297
  		} else {
1572fa037   Arnd Bergmann   timekeeping: Use ...
2298
  			if (txc->time.tv_usec >= USEC_PER_SEC)
e0956dcc4   Arnd Bergmann   timekeeping: Cons...
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
  				return -EINVAL;
  		}
  	}
  
  	/*
  	 * Check for potential multiplication overflows that can
  	 * only happen on 64-bit systems:
  	 */
  	if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) {
  		if (LLONG_MIN / PPM_SCALE > txc->freq)
  			return -EINVAL;
  		if (LLONG_MAX / PPM_SCALE < txc->freq)
  			return -EINVAL;
  	}
  
  	return 0;
  }
  
  
  /**
aa6f9c595   John Stultz   ntp: Move do_adjt...
2319
2320
   * do_adjtimex() - Accessor function to NTP __do_adjtimex function
   */
ead25417f   Deepa Dinamani   timex: use __kern...
2321
  int do_adjtimex(struct __kernel_timex *txc)
aa6f9c595   John Stultz   ntp: Move do_adjt...
2322
  {
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
2323
  	struct timekeeper *tk = &tk_core.timekeeper;
7e8eda734   Ondrej Mosnacek   ntp: Audit NTP pa...
2324
  	struct audit_ntp_data ad;
06c017fdd   John Stultz   timekeeping: Hold...
2325
  	unsigned long flags;
7d489d15c   John Stultz   timekeeping: Conv...
2326
  	struct timespec64 ts;
4e8f8b34b   John Stultz   timekeeping: Make...
2327
  	s32 orig_tai, tai;
e4085693f   John Stultz   ntp: Move timex v...
2328
2329
2330
  	int ret;
  
  	/* Validate the data before disabling interrupts */
1572fa037   Arnd Bergmann   timekeeping: Use ...
2331
  	ret = timekeeping_validate_timex(txc);
e4085693f   John Stultz   ntp: Move timex v...
2332
2333
  	if (ret)
  		return ret;
cef90377f   John Stultz   timekeeping: Move...
2334
  	if (txc->modes & ADJ_SETOFFSET) {
1572fa037   Arnd Bergmann   timekeeping: Use ...
2335
  		struct timespec64 delta;
cef90377f   John Stultz   timekeeping: Move...
2336
2337
2338
2339
2340
2341
2342
  		delta.tv_sec  = txc->time.tv_sec;
  		delta.tv_nsec = txc->time.tv_usec;
  		if (!(txc->modes & ADJ_NANO))
  			delta.tv_nsec *= 1000;
  		ret = timekeeping_inject_offset(&delta);
  		if (ret)
  			return ret;
2d87a0674   Ondrej Mosnacek   timekeeping: Audi...
2343
2344
  
  		audit_tk_injoffset(delta);
cef90377f   John Stultz   timekeeping: Move...
2345
  	}
7e8eda734   Ondrej Mosnacek   ntp: Audit NTP pa...
2346
  	audit_ntp_init(&ad);
d30faff90   Arnd Bergmann   timekeeping: Use ...
2347
  	ktime_get_real_ts64(&ts);
87ace39b7   John Stultz   ntp: Rework do_ad...
2348

06c017fdd   John Stultz   timekeeping: Hold...
2349
  	raw_spin_lock_irqsave(&timekeeper_lock, flags);
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
2350
  	write_seqcount_begin(&tk_core.seq);
06c017fdd   John Stultz   timekeeping: Hold...
2351

4e8f8b34b   John Stultz   timekeeping: Make...
2352
  	orig_tai = tai = tk->tai_offset;
7e8eda734   Ondrej Mosnacek   ntp: Audit NTP pa...
2353
  	ret = __do_adjtimex(txc, &ts, &tai, &ad);
aa6f9c595   John Stultz   ntp: Move do_adjt...
2354

4e8f8b34b   John Stultz   timekeeping: Make...
2355
2356
  	if (tai != orig_tai) {
  		__timekeeping_set_tai_offset(tk, tai);
f55c07607   John Stultz   timekeeping: Fix ...
2357
  		timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
4e8f8b34b   John Stultz   timekeeping: Make...
2358
  	}
833f32d76   John Stultz   time: Prevent ear...
2359
  	tk_update_leap_state(tk);
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
2360
  	write_seqcount_end(&tk_core.seq);
06c017fdd   John Stultz   timekeeping: Hold...
2361
  	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
7e8eda734   Ondrej Mosnacek   ntp: Audit NTP pa...
2362
  	audit_ntp_log(&ad);
b061c7a51   Miroslav Lichvar   timekeeping: Upda...
2363
2364
2365
  	/* Update the multiplier immediately if frequency was set directly */
  	if (txc->modes & (ADJ_FREQUENCY | ADJ_TICK))
  		timekeeping_advance(TK_ADV_FREQ);
6fdda9a9c   John Stultz   timekeeping: Avoi...
2366
2367
  	if (tai != orig_tai)
  		clock_was_set();
7bd360144   John Stultz   timekeeping: Fix ...
2368
  	ntp_notify_cmos_timer();
87ace39b7   John Stultz   ntp: Rework do_ad...
2369
2370
  	return ret;
  }
aa6f9c595   John Stultz   ntp: Move do_adjt...
2371
2372
2373
2374
2375
  
  #ifdef CONFIG_NTP_PPS
  /**
   * hardpps() - Accessor function to NTP __hardpps function
   */
7ec88e4be   Arnd Bergmann   ntp/pps: use time...
2376
  void hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
aa6f9c595   John Stultz   ntp: Move do_adjt...
2377
  {
06c017fdd   John Stultz   timekeeping: Hold...
2378
2379
2380
  	unsigned long flags;
  
  	raw_spin_lock_irqsave(&timekeeper_lock, flags);
3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
2381
  	write_seqcount_begin(&tk_core.seq);
06c017fdd   John Stultz   timekeeping: Hold...
2382

aa6f9c595   John Stultz   ntp: Move do_adjt...
2383
  	__hardpps(phase_ts, raw_ts);
06c017fdd   John Stultz   timekeeping: Hold...
2384

3fdb14fd1   Thomas Gleixner   timekeeping: Cach...
2385
  	write_seqcount_end(&tk_core.seq);
06c017fdd   John Stultz   timekeeping: Hold...
2386
  	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
aa6f9c595   John Stultz   ntp: Move do_adjt...
2387
2388
  }
  EXPORT_SYMBOL(hardpps);
a2d818030   Robert P. J. Day   drivers/pps: aest...
2389
  #endif /* CONFIG_NTP_PPS */
aa6f9c595   John Stultz   ntp: Move do_adjt...
2390
2391
  
  /**
f0af911a9   Torben Hohn   time: Provide xti...
2392
2393
2394
2395
2396
2397
2398
   * xtime_update() - advances the timekeeping infrastructure
   * @ticks:	number of ticks, that have elapsed since the last call.
   *
   * Must be called with interrupts disabled.
   */
  void xtime_update(unsigned long ticks)
  {
e5d4d1756   Thomas Gleixner   timekeeping: Spli...
2399
2400
  	raw_spin_lock(&jiffies_lock);
  	write_seqcount_begin(&jiffies_seq);
f0af911a9   Torben Hohn   time: Provide xti...
2401
  	do_timer(ticks);
e5d4d1756   Thomas Gleixner   timekeeping: Spli...
2402
2403
  	write_seqcount_end(&jiffies_seq);
  	raw_spin_unlock(&jiffies_lock);
47a1b7963   John Stultz   tick/timekeeping:...
2404
  	update_wall_time();
f0af911a9   Torben Hohn   time: Provide xti...
2405
  }