Blame view

drivers/clocksource/hyperv_timer.c 12.5 KB
fd1fea683   Michael Kelley   clocksource/drive...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
  // SPDX-License-Identifier: GPL-2.0
  
  /*
   * Clocksource driver for the synthetic counter and timers
   * provided by the Hyper-V hypervisor to guest VMs, as described
   * in the Hyper-V Top Level Functional Spec (TLFS). This driver
   * is instruction set architecture independent.
   *
   * Copyright (C) 2019, Microsoft, Inc.
   *
   * Author:  Michael Kelley <mikelley@microsoft.com>
   */
  
  #include <linux/percpu.h>
  #include <linux/cpumask.h>
  #include <linux/clockchips.h>
dd2cb3486   Michael Kelley   clocksource/drive...
17
18
  #include <linux/clocksource.h>
  #include <linux/sched_clock.h>
fd1fea683   Michael Kelley   clocksource/drive...
19
  #include <linux/mm.h>
4df4cb9e9   Michael Kelley   x86/hyperv: Initi...
20
  #include <linux/cpuhotplug.h>
fd1fea683   Michael Kelley   clocksource/drive...
21
22
23
24
25
  #include <clocksource/hyperv_timer.h>
  #include <asm/hyperv-tlfs.h>
  #include <asm/mshyperv.h>
  
  static struct clock_event_device __percpu *hv_clock_event;
bd00cd52d   Tianyu Lan   clocksource/drive...
26
  static u64 hv_sched_clock_offset __ro_after_init;
fd1fea683   Michael Kelley   clocksource/drive...
27
28
29
30
31
32
33
  
  /*
   * If false, we're using the old mechanism for stimer0 interrupts
   * where it sends a VMbus message when it expires. The old
   * mechanism is used when running on older versions of Hyper-V
   * that don't support Direct Mode. While Hyper-V provides
   * four stimer's per CPU, Linux uses only stimer0.
4df4cb9e9   Michael Kelley   x86/hyperv: Initi...
34
35
36
37
38
39
40
41
42
   *
   * Because Direct Mode does not require processing a VMbus
   * message, stimer interrupts can be enabled earlier in the
   * process of booting a CPU, and consistent with when timer
   * interrupts are enabled for other clocksource drivers.
   * However, for legacy versions of Hyper-V when Direct Mode
   * is not enabled, setting up stimer interrupts must be
   * delayed until VMbus is initialized and can process the
   * interrupt message.
fd1fea683   Michael Kelley   clocksource/drive...
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
   */
  static bool direct_mode_enabled;
  
  static int stimer0_irq;
  static int stimer0_vector;
  static int stimer0_message_sint;
  
  /*
   * ISR for when stimer0 is operating in Direct Mode.  Direct Mode
   * does not use VMbus or any VMbus messages, so process here and not
   * in the VMbus driver code.
   */
  void hv_stimer0_isr(void)
  {
  	struct clock_event_device *ce;
  
  	ce = this_cpu_ptr(hv_clock_event);
  	ce->event_handler(ce);
  }
  EXPORT_SYMBOL_GPL(hv_stimer0_isr);
  
  static int hv_ce_set_next_event(unsigned long delta,
  				struct clock_event_device *evt)
  {
  	u64 current_tick;
0af3e137c   Andrea Parri   clocksource/drive...
68
  	current_tick = hv_read_reference_counter();
fd1fea683   Michael Kelley   clocksource/drive...
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
  	current_tick += delta;
  	hv_init_timer(0, current_tick);
  	return 0;
  }
  
  static int hv_ce_shutdown(struct clock_event_device *evt)
  {
  	hv_init_timer(0, 0);
  	hv_init_timer_config(0, 0);
  	if (direct_mode_enabled)
  		hv_disable_stimer0_percpu_irq(stimer0_irq);
  
  	return 0;
  }
  
  static int hv_ce_set_oneshot(struct clock_event_device *evt)
  {
  	union hv_stimer_config timer_cfg;
  
  	timer_cfg.as_uint64 = 0;
  	timer_cfg.enable = 1;
  	timer_cfg.auto_enable = 1;
  	if (direct_mode_enabled) {
  		/*
  		 * When it expires, the timer will directly interrupt
  		 * on the specified hardware vector/IRQ.
  		 */
  		timer_cfg.direct_mode = 1;
  		timer_cfg.apic_vector = stimer0_vector;
  		hv_enable_stimer0_percpu_irq(stimer0_irq);
  	} else {
  		/*
  		 * When it expires, the timer will generate a VMbus message,
  		 * to be handled by the normal VMbus interrupt handler.
  		 */
  		timer_cfg.direct_mode = 0;
  		timer_cfg.sintx = stimer0_message_sint;
  	}
  	hv_init_timer_config(0, timer_cfg.as_uint64);
  	return 0;
  }
  
  /*
   * hv_stimer_init - Per-cpu initialization of the clockevent
   */
4df4cb9e9   Michael Kelley   x86/hyperv: Initi...
114
  static int hv_stimer_init(unsigned int cpu)
fd1fea683   Michael Kelley   clocksource/drive...
115
116
  {
  	struct clock_event_device *ce;
4df4cb9e9   Michael Kelley   x86/hyperv: Initi...
117
118
  	if (!hv_clock_event)
  		return 0;
fd1fea683   Michael Kelley   clocksource/drive...
119
120
121
122
123
124
125
126
127
128
129
130
131
132
  
  	ce = per_cpu_ptr(hv_clock_event, cpu);
  	ce->name = "Hyper-V clockevent";
  	ce->features = CLOCK_EVT_FEAT_ONESHOT;
  	ce->cpumask = cpumask_of(cpu);
  	ce->rating = 1000;
  	ce->set_state_shutdown = hv_ce_shutdown;
  	ce->set_state_oneshot = hv_ce_set_oneshot;
  	ce->set_next_event = hv_ce_set_next_event;
  
  	clockevents_config_and_register(ce,
  					HV_CLOCK_HZ,
  					HV_MIN_DELTA_TICKS,
  					HV_MAX_MAX_DELTA_TICKS);
4df4cb9e9   Michael Kelley   x86/hyperv: Initi...
133
  	return 0;
fd1fea683   Michael Kelley   clocksource/drive...
134
  }
fd1fea683   Michael Kelley   clocksource/drive...
135
136
137
138
  
  /*
   * hv_stimer_cleanup - Per-cpu cleanup of the clockevent
   */
4df4cb9e9   Michael Kelley   x86/hyperv: Initi...
139
  int hv_stimer_cleanup(unsigned int cpu)
fd1fea683   Michael Kelley   clocksource/drive...
140
141
  {
  	struct clock_event_device *ce;
4df4cb9e9   Michael Kelley   x86/hyperv: Initi...
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
  	if (!hv_clock_event)
  		return 0;
  
  	/*
  	 * In the legacy case where Direct Mode is not enabled
  	 * (which can only be on x86/64), stimer cleanup happens
  	 * relatively early in the CPU offlining process. We
  	 * must unbind the stimer-based clockevent device so
  	 * that the LAPIC timer can take over until clockevents
  	 * are no longer needed in the offlining process. Note
  	 * that clockevents_unbind_device() eventually calls
  	 * hv_ce_shutdown().
  	 *
  	 * The unbind should not be done when Direct Mode is
  	 * enabled because we may be on an architecture where
  	 * there are no other clockevent devices to fallback to.
  	 */
  	ce = per_cpu_ptr(hv_clock_event, cpu);
  	if (direct_mode_enabled)
fd1fea683   Michael Kelley   clocksource/drive...
161
  		hv_ce_shutdown(ce);
4df4cb9e9   Michael Kelley   x86/hyperv: Initi...
162
163
164
165
  	else
  		clockevents_unbind_device(ce, cpu);
  
  	return 0;
fd1fea683   Michael Kelley   clocksource/drive...
166
167
168
169
  }
  EXPORT_SYMBOL_GPL(hv_stimer_cleanup);
  
  /* hv_stimer_alloc - Global initialization of the clockevent and stimer0 */
4df4cb9e9   Michael Kelley   x86/hyperv: Initi...
170
  int hv_stimer_alloc(void)
fd1fea683   Michael Kelley   clocksource/drive...
171
  {
4df4cb9e9   Michael Kelley   x86/hyperv: Initi...
172
173
174
175
176
177
178
179
180
  	int ret = 0;
  
  	/*
  	 * Synthetic timers are always available except on old versions of
  	 * Hyper-V on x86.  In that case, return as error as Linux will use a
  	 * clockevent based on emulated LAPIC timer hardware.
  	 */
  	if (!(ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE))
  		return -EINVAL;
fd1fea683   Michael Kelley   clocksource/drive...
181
182
183
184
185
186
187
188
189
190
  
  	hv_clock_event = alloc_percpu(struct clock_event_device);
  	if (!hv_clock_event)
  		return -ENOMEM;
  
  	direct_mode_enabled = ms_hyperv.misc_features &
  			HV_STIMER_DIRECT_MODE_AVAILABLE;
  	if (direct_mode_enabled) {
  		ret = hv_setup_stimer0_irq(&stimer0_irq, &stimer0_vector,
  				hv_stimer0_isr);
4df4cb9e9   Michael Kelley   x86/hyperv: Initi...
191
192
193
194
195
196
197
198
199
200
201
202
203
  		if (ret)
  			goto free_percpu;
  
  		/*
  		 * Since we are in Direct Mode, stimer initialization
  		 * can be done now with a CPUHP value in the same range
  		 * as other clockevent devices.
  		 */
  		ret = cpuhp_setup_state(CPUHP_AP_HYPERV_TIMER_STARTING,
  				"clockevents/hyperv/stimer:starting",
  				hv_stimer_init, hv_stimer_cleanup);
  		if (ret < 0)
  			goto free_stimer0_irq;
fd1fea683   Michael Kelley   clocksource/drive...
204
  	}
4df4cb9e9   Michael Kelley   x86/hyperv: Initi...
205
  	return ret;
fd1fea683   Michael Kelley   clocksource/drive...
206

4df4cb9e9   Michael Kelley   x86/hyperv: Initi...
207
208
209
210
211
212
213
  free_stimer0_irq:
  	hv_remove_stimer0_irq(stimer0_irq);
  	stimer0_irq = 0;
  free_percpu:
  	free_percpu(hv_clock_event);
  	hv_clock_event = NULL;
  	return ret;
fd1fea683   Michael Kelley   clocksource/drive...
214
215
  }
  EXPORT_SYMBOL_GPL(hv_stimer_alloc);
4df4cb9e9   Michael Kelley   x86/hyperv: Initi...
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
  /*
   * hv_stimer_legacy_init -- Called from the VMbus driver to handle
   * the case when Direct Mode is not enabled, and the stimer
   * must be initialized late in the CPU onlining process.
   *
   */
  void hv_stimer_legacy_init(unsigned int cpu, int sint)
  {
  	if (direct_mode_enabled)
  		return;
  
  	/*
  	 * This function gets called by each vCPU, so setting the
  	 * global stimer_message_sint value each time is conceptually
  	 * not ideal, but the value passed in is always the same and
  	 * it avoids introducing yet another interface into this
  	 * clocksource driver just to set the sint in the legacy case.
  	 */
  	stimer0_message_sint = sint;
  	(void)hv_stimer_init(cpu);
  }
  EXPORT_SYMBOL_GPL(hv_stimer_legacy_init);
  
  /*
   * hv_stimer_legacy_cleanup -- Called from the VMbus driver to
   * handle the case when Direct Mode is not enabled, and the
   * stimer must be cleaned up early in the CPU offlining
   * process.
   */
  void hv_stimer_legacy_cleanup(unsigned int cpu)
  {
  	if (direct_mode_enabled)
  		return;
  	(void)hv_stimer_cleanup(cpu);
  }
  EXPORT_SYMBOL_GPL(hv_stimer_legacy_cleanup);
fd1fea683   Michael Kelley   clocksource/drive...
252
253
254
  /* hv_stimer_free - Free global resources allocated by hv_stimer_alloc() */
  void hv_stimer_free(void)
  {
4df4cb9e9   Michael Kelley   x86/hyperv: Initi...
255
256
257
258
259
  	if (!hv_clock_event)
  		return;
  
  	if (direct_mode_enabled) {
  		cpuhp_remove_state(CPUHP_AP_HYPERV_TIMER_STARTING);
fd1fea683   Michael Kelley   clocksource/drive...
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
  		hv_remove_stimer0_irq(stimer0_irq);
  		stimer0_irq = 0;
  	}
  	free_percpu(hv_clock_event);
  	hv_clock_event = NULL;
  }
  EXPORT_SYMBOL_GPL(hv_stimer_free);
  
  /*
   * Do a global cleanup of clockevents for the cases of kexec and
   * vmbus exit
   */
  void hv_stimer_global_cleanup(void)
  {
  	int	cpu;
fd1fea683   Michael Kelley   clocksource/drive...
275

4df4cb9e9   Michael Kelley   x86/hyperv: Initi...
276
277
278
279
280
281
  	/*
  	 * hv_stime_legacy_cleanup() will stop the stimer if Direct
  	 * Mode is not enabled, and fallback to the LAPIC timer.
  	 */
  	for_each_present_cpu(cpu) {
  		hv_stimer_legacy_cleanup(cpu);
fd1fea683   Michael Kelley   clocksource/drive...
282
  	}
4df4cb9e9   Michael Kelley   x86/hyperv: Initi...
283
284
285
286
287
288
  
  	/*
  	 * If Direct Mode is enabled, the cpuhp teardown callback
  	 * (hv_stimer_cleanup) will be run on all CPUs to stop the
  	 * stimers.
  	 */
fd1fea683   Michael Kelley   clocksource/drive...
289
290
291
  	hv_stimer_free();
  }
  EXPORT_SYMBOL_GPL(hv_stimer_global_cleanup);
dd2cb3486   Michael Kelley   clocksource/drive...
292
293
294
295
296
297
298
  
  /*
   * Code and definitions for the Hyper-V clocksources.  Two
   * clocksources are defined: one that reads the Hyper-V defined MSR, and
   * the other that uses the TSC reference page feature as defined in the
   * TLFS.  The MSR version is for compatibility with old versions of
   * Hyper-V and 32-bit x86.  The TSC reference page version is preferred.
9e0333ae3   Andrea Parri   clocksource/drive...
299
300
301
302
303
304
305
306
   *
   * The Hyper-V clocksource ratings of 250 are chosen to be below the
   * TSC clocksource rating of 300.  In configurations where Hyper-V offers
   * an InvariantTSC, the TSC is not marked "unstable", so the TSC clocksource
   * is available and preferred.  With the higher rating, it will be the
   * default.  On older hardware and Hyper-V versions, the TSC is marked
   * "unstable", so no TSC clocksource is created and the selected Hyper-V
   * clocksource will be the default.
dd2cb3486   Michael Kelley   clocksource/drive...
307
   */
0af3e137c   Andrea Parri   clocksource/drive...
308
309
  u64 (*hv_read_reference_counter)(void);
  EXPORT_SYMBOL_GPL(hv_read_reference_counter);
dd2cb3486   Michael Kelley   clocksource/drive...
310

ddc61bbc4   Boqun Feng   clocksource/drive...
311
312
313
314
  static union {
  	struct ms_hyperv_tsc_page page;
  	u8 reserved[PAGE_SIZE];
  } tsc_pg __aligned(PAGE_SIZE);
dd2cb3486   Michael Kelley   clocksource/drive...
315
316
317
  
  struct ms_hyperv_tsc_page *hv_get_tsc_page(void)
  {
ddc61bbc4   Boqun Feng   clocksource/drive...
318
  	return &tsc_pg.page;
dd2cb3486   Michael Kelley   clocksource/drive...
319
320
  }
  EXPORT_SYMBOL_GPL(hv_get_tsc_page);
0af3e137c   Andrea Parri   clocksource/drive...
321
  static u64 notrace read_hv_clock_tsc(void)
dd2cb3486   Michael Kelley   clocksource/drive...
322
  {
ddc61bbc4   Boqun Feng   clocksource/drive...
323
  	u64 current_tick = hv_read_tsc_page(hv_get_tsc_page());
dd2cb3486   Michael Kelley   clocksource/drive...
324
325
326
327
328
329
  
  	if (current_tick == U64_MAX)
  		hv_get_time_ref_count(current_tick);
  
  	return current_tick;
  }
0af3e137c   Andrea Parri   clocksource/drive...
330
331
332
333
  static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg)
  {
  	return read_hv_clock_tsc();
  }
1f3aed014   Mohammed Gamal   hv: clocksource: ...
334
  static u64 notrace read_hv_sched_clock_tsc(void)
dd2cb3486   Michael Kelley   clocksource/drive...
335
  {
749da8ca9   Yubo Xie   clocksource/drive...
336
337
  	return (read_hv_clock_tsc() - hv_sched_clock_offset) *
  		(NSEC_PER_SEC / HV_CLOCK_HZ);
dd2cb3486   Michael Kelley   clocksource/drive...
338
  }
1349401ff   Dexuan Cui   clocksource/drive...
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
  static void suspend_hv_clock_tsc(struct clocksource *arg)
  {
  	u64 tsc_msr;
  
  	/* Disable the TSC page */
  	hv_get_reference_tsc(tsc_msr);
  	tsc_msr &= ~BIT_ULL(0);
  	hv_set_reference_tsc(tsc_msr);
  }
  
  
  static void resume_hv_clock_tsc(struct clocksource *arg)
  {
  	phys_addr_t phys_addr = virt_to_phys(&tsc_pg);
  	u64 tsc_msr;
  
  	/* Re-enable the TSC page */
  	hv_get_reference_tsc(tsc_msr);
  	tsc_msr &= GENMASK_ULL(11, 0);
  	tsc_msr |= BIT_ULL(0) | (u64)phys_addr;
  	hv_set_reference_tsc(tsc_msr);
  }
eec399dd8   Thomas Gleixner   x86/vdso: Move VD...
361
362
363
364
365
  static int hv_cs_enable(struct clocksource *cs)
  {
  	hv_enable_vdso_clocksource();
  	return 0;
  }
dd2cb3486   Michael Kelley   clocksource/drive...
366
367
  static struct clocksource hyperv_cs_tsc = {
  	.name	= "hyperv_clocksource_tsc_page",
9e0333ae3   Andrea Parri   clocksource/drive...
368
  	.rating	= 250,
0af3e137c   Andrea Parri   clocksource/drive...
369
  	.read	= read_hv_clock_tsc_cs,
dd2cb3486   Michael Kelley   clocksource/drive...
370
371
  	.mask	= CLOCKSOURCE_MASK(64),
  	.flags	= CLOCK_SOURCE_IS_CONTINUOUS,
1349401ff   Dexuan Cui   clocksource/drive...
372
373
  	.suspend= suspend_hv_clock_tsc,
  	.resume	= resume_hv_clock_tsc,
eec399dd8   Thomas Gleixner   x86/vdso: Move VD...
374
  	.enable = hv_cs_enable,
dd2cb3486   Michael Kelley   clocksource/drive...
375
  };
dd2cb3486   Michael Kelley   clocksource/drive...
376

0af3e137c   Andrea Parri   clocksource/drive...
377
  static u64 notrace read_hv_clock_msr(void)
dd2cb3486   Michael Kelley   clocksource/drive...
378
379
380
381
382
383
384
385
386
387
  {
  	u64 current_tick;
  	/*
  	 * Read the partition counter to get the current tick count. This count
  	 * is set to 0 when the partition is created and is incremented in
  	 * 100 nanosecond units.
  	 */
  	hv_get_time_ref_count(current_tick);
  	return current_tick;
  }
0af3e137c   Andrea Parri   clocksource/drive...
388
389
390
391
  static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg)
  {
  	return read_hv_clock_msr();
  }
1f3aed014   Mohammed Gamal   hv: clocksource: ...
392
  static u64 notrace read_hv_sched_clock_msr(void)
dd2cb3486   Michael Kelley   clocksource/drive...
393
  {
749da8ca9   Yubo Xie   clocksource/drive...
394
395
  	return (read_hv_clock_msr() - hv_sched_clock_offset) *
  		(NSEC_PER_SEC / HV_CLOCK_HZ);
dd2cb3486   Michael Kelley   clocksource/drive...
396
397
398
399
  }
  
  static struct clocksource hyperv_cs_msr = {
  	.name	= "hyperv_clocksource_msr",
9e0333ae3   Andrea Parri   clocksource/drive...
400
  	.rating	= 250,
0af3e137c   Andrea Parri   clocksource/drive...
401
  	.read	= read_hv_clock_msr_cs,
dd2cb3486   Michael Kelley   clocksource/drive...
402
403
404
  	.mask	= CLOCKSOURCE_MASK(64),
  	.flags	= CLOCK_SOURCE_IS_CONTINUOUS,
  };
dd2cb3486   Michael Kelley   clocksource/drive...
405
406
407
408
409
410
411
  static bool __init hv_init_tsc_clocksource(void)
  {
  	u64		tsc_msr;
  	phys_addr_t	phys_addr;
  
  	if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
  		return false;
0af3e137c   Andrea Parri   clocksource/drive...
412
  	hv_read_reference_counter = read_hv_clock_tsc;
ddc61bbc4   Boqun Feng   clocksource/drive...
413
  	phys_addr = virt_to_phys(hv_get_tsc_page());
dd2cb3486   Michael Kelley   clocksource/drive...
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
  
  	/*
  	 * The Hyper-V TLFS specifies to preserve the value of reserved
  	 * bits in registers. So read the existing value, preserve the
  	 * low order 12 bits, and add in the guest physical address
  	 * (which already has at least the low 12 bits set to zero since
  	 * it is page aligned). Also set the "enable" bit, which is bit 0.
  	 */
  	hv_get_reference_tsc(tsc_msr);
  	tsc_msr &= GENMASK_ULL(11, 0);
  	tsc_msr = tsc_msr | 0x1 | (u64)phys_addr;
  	hv_set_reference_tsc(tsc_msr);
  
  	hv_set_clocksource_vdso(hyperv_cs_tsc);
  	clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
0af3e137c   Andrea Parri   clocksource/drive...
429
  	hv_sched_clock_offset = hv_read_reference_counter();
bd00cd52d   Tianyu Lan   clocksource/drive...
430
  	hv_setup_sched_clock(read_hv_sched_clock_tsc);
dd2cb3486   Michael Kelley   clocksource/drive...
431
432
  	return true;
  }
dd2cb3486   Michael Kelley   clocksource/drive...
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
  
  void __init hv_init_clocksource(void)
  {
  	/*
  	 * Try to set up the TSC page clocksource. If it succeeds, we're
  	 * done. Otherwise, set up the MSR clocksoruce.  At least one of
  	 * these will always be available except on very old versions of
  	 * Hyper-V on x86.  In that case we won't have a Hyper-V
  	 * clocksource, but Linux will still run with a clocksource based
  	 * on the emulated PIT or LAPIC timer.
  	 */
  	if (hv_init_tsc_clocksource())
  		return;
  
  	if (!(ms_hyperv.features & HV_MSR_TIME_REF_COUNT_AVAILABLE))
  		return;
0af3e137c   Andrea Parri   clocksource/drive...
449
  	hv_read_reference_counter = read_hv_clock_msr;
dd2cb3486   Michael Kelley   clocksource/drive...
450
  	clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100);
0af3e137c   Andrea Parri   clocksource/drive...
451
  	hv_sched_clock_offset = hv_read_reference_counter();
bd00cd52d   Tianyu Lan   clocksource/drive...
452
  	hv_setup_sched_clock(read_hv_sched_clock_msr);
dd2cb3486   Michael Kelley   clocksource/drive...
453
454
  }
  EXPORT_SYMBOL_GPL(hv_init_clocksource);