Blame view

arch/x86/kernel/tsc_sync.c 4.58 KB
250c22777   Thomas Gleixner   x86_64: move kernel
1
  /*
835c34a16   Dave Jones   Delete filenames ...
2
   * check TSC synchronization.
250c22777   Thomas Gleixner   x86_64: move kernel
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
   *
   * Copyright (C) 2006, Red Hat, Inc., Ingo Molnar
   *
   * We check whether all boot CPUs have their TSC's synchronized,
   * print a warning if not and turn off the TSC clock-source.
   *
   * The warp-check is point-to-point between two CPUs, the CPU
   * initiating the bootup is the 'source CPU', the freshly booting
   * CPU is the 'target CPU'.
   *
   * Only two CPUs may participate - they can enter in any order.
   * ( The serial nature of the boot logic and the CPU hotplug lock
   *   protects against more than 2 CPUs entering this code. )
   */
  #include <linux/spinlock.h>
  #include <linux/kernel.h>
  #include <linux/init.h>
  #include <linux/smp.h>
  #include <linux/nmi.h>
  #include <asm/tsc.h>
  
  /*
   * Entry/exit counters that make sure that both CPUs
   * run the measurement code at once:
   */
  static __cpuinitdata atomic_t start_count;
  static __cpuinitdata atomic_t stop_count;
  
  /*
   * We use a raw spinlock in this exceptional case, because
   * we want to have the fastest, inlined, non-debug version
   * of a critical section, to be able to prove TSC time-warps:
   */
edc35bd72   Thomas Gleixner   locking: Rename _...
36
  static __cpuinitdata arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
643bec956   Ingo Molnar   x86: clean up arc...
37

250c22777   Thomas Gleixner   x86_64: move kernel
38
39
40
41
42
43
44
45
46
47
48
  static __cpuinitdata cycles_t last_tsc;
  static __cpuinitdata cycles_t max_warp;
  static __cpuinitdata int nr_warps;
  
  /*
   * TSC-warp measurement loop running on both CPUs:
   */
  static __cpuinit void check_tsc_warp(void)
  {
  	cycles_t start, now, prev, end;
  	int i;
93ce99e84   Venki Pallipadi   x86: add rdtsc ba...
49
  	rdtsc_barrier();
6d63de8db   Andi Kleen   x86: remove get_c...
50
  	start = get_cycles();
93ce99e84   Venki Pallipadi   x86: add rdtsc ba...
51
  	rdtsc_barrier();
250c22777   Thomas Gleixner   x86_64: move kernel
52
53
54
55
56
57
58
59
60
61
62
63
  	/*
  	 * The measurement runs for 20 msecs:
  	 */
  	end = start + tsc_khz * 20ULL;
  	now = start;
  
  	for (i = 0; ; i++) {
  		/*
  		 * We take the global lock, measure TSC, save the
  		 * previous TSC that was measured (possibly on
  		 * another CPU) and update the previous TSC timestamp.
  		 */
0199c4e68   Thomas Gleixner   locking: Convert ...
64
  		arch_spin_lock(&sync_lock);
250c22777   Thomas Gleixner   x86_64: move kernel
65
  		prev = last_tsc;
93ce99e84   Venki Pallipadi   x86: add rdtsc ba...
66
  		rdtsc_barrier();
6d63de8db   Andi Kleen   x86: remove get_c...
67
  		now = get_cycles();
93ce99e84   Venki Pallipadi   x86: add rdtsc ba...
68
  		rdtsc_barrier();
250c22777   Thomas Gleixner   x86_64: move kernel
69
  		last_tsc = now;
0199c4e68   Thomas Gleixner   locking: Convert ...
70
  		arch_spin_unlock(&sync_lock);
250c22777   Thomas Gleixner   x86_64: move kernel
71
72
73
  
  		/*
  		 * Be nice every now and then (and also check whether
df43510b1   Ingo Molnar   x86: check_tsc_wa...
74
  		 * measurement is done [we also insert a 10 million
250c22777   Thomas Gleixner   x86_64: move kernel
75
76
77
78
  		 * loops safety exit, so we dont lock up in case the
  		 * TSC readout is totally broken]):
  		 */
  		if (unlikely(!(i & 7))) {
df43510b1   Ingo Molnar   x86: check_tsc_wa...
79
  			if (now > end || i > 10000000)
250c22777   Thomas Gleixner   x86_64: move kernel
80
81
82
83
84
85
86
87
88
  				break;
  			cpu_relax();
  			touch_nmi_watchdog();
  		}
  		/*
  		 * Outside the critical section we can now see whether
  		 * we saw a time-warp of the TSC going backwards:
  		 */
  		if (unlikely(prev > now)) {
0199c4e68   Thomas Gleixner   locking: Convert ...
89
  			arch_spin_lock(&sync_lock);
250c22777   Thomas Gleixner   x86_64: move kernel
90
91
  			max_warp = max(max_warp, prev - now);
  			nr_warps++;
0199c4e68   Thomas Gleixner   locking: Convert ...
92
  			arch_spin_unlock(&sync_lock);
250c22777   Thomas Gleixner   x86_64: move kernel
93
  		}
ad8ca495b   Ingo Molnar   x86: add warning ...
94
  	}
bde78a79a   Arjan van de Ven   x86: use WARN() i...
95
96
97
  	WARN(!(now-start),
  		"Warning: zero tsc calibration delta: %Ld [max: %Ld]
  ",
ad8ca495b   Ingo Molnar   x86: add warning ...
98
  			now-start, end-start);
250c22777   Thomas Gleixner   x86_64: move kernel
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
  }
  
  /*
   * Source CPU calls into this - it waits for the freshly booted
   * target CPU to arrive and then starts the measurement:
   */
  void __cpuinit check_tsc_sync_source(int cpu)
  {
  	int cpus = 2;
  
  	/*
  	 * No need to check if we already know that the TSC is not
  	 * synchronized:
  	 */
  	if (unsynchronized_tsc())
  		return;
eca0cd028   Alok Kataria   x86: Add a synthe...
115
  	if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
9b3660a55   Mike Travis   x86: Limit number...
116
117
118
119
  		if (cpu == (nr_cpu_ids-1) || system_state != SYSTEM_BOOTING)
  			pr_info(
  			"Skipped synchronization checks as TSC is reliable.
  ");
eca0cd028   Alok Kataria   x86: Add a synthe...
120
121
  		return;
  	}
250c22777   Thomas Gleixner   x86_64: move kernel
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
  	/*
  	 * Reset it - in case this is a second bootup:
  	 */
  	atomic_set(&stop_count, 0);
  
  	/*
  	 * Wait for the target to arrive:
  	 */
  	while (atomic_read(&start_count) != cpus-1)
  		cpu_relax();
  	/*
  	 * Trigger the target to continue into the measurement too:
  	 */
  	atomic_inc(&start_count);
  
  	check_tsc_warp();
  
  	while (atomic_read(&stop_count) != cpus-1)
  		cpu_relax();
250c22777   Thomas Gleixner   x86_64: move kernel
141
  	if (nr_warps) {
9b3660a55   Mike Travis   x86: Limit number...
142
143
144
  		pr_warning("TSC synchronization [CPU#%d -> CPU#%d]:
  ",
  			smp_processor_id(), cpu);
643bec956   Ingo Molnar   x86: clean up arc...
145
146
147
  		pr_warning("Measured %Ld cycles TSC warp between CPUs, "
  			   "turning off TSC clock.
  ", max_warp);
250c22777   Thomas Gleixner   x86_64: move kernel
148
  		mark_tsc_unstable("check_tsc_sync_source failed");
250c22777   Thomas Gleixner   x86_64: move kernel
149
  	} else {
9b3660a55   Mike Travis   x86: Limit number...
150
151
152
  		pr_debug("TSC synchronization [CPU#%d -> CPU#%d]: passed
  ",
  			smp_processor_id(), cpu);
250c22777   Thomas Gleixner   x86_64: move kernel
153
154
155
  	}
  
  	/*
4c6b8b4d6   Mike Galbraith   x86: fix: s2ram +...
156
157
158
159
160
161
162
163
  	 * Reset it - just in case we boot another CPU later:
  	 */
  	atomic_set(&start_count, 0);
  	nr_warps = 0;
  	max_warp = 0;
  	last_tsc = 0;
  
  	/*
250c22777   Thomas Gleixner   x86_64: move kernel
164
165
166
167
168
169
170
171
172
173
174
  	 * Let the target continue with the bootup:
  	 */
  	atomic_inc(&stop_count);
  }
  
  /*
   * Freshly booted CPUs call into this:
   */
  void __cpuinit check_tsc_sync_target(void)
  {
  	int cpus = 2;
eca0cd028   Alok Kataria   x86: Add a synthe...
175
  	if (unsynchronized_tsc() || boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
250c22777   Thomas Gleixner   x86_64: move kernel
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
  		return;
  
  	/*
  	 * Register this CPU's participation and wait for the
  	 * source CPU to start the measurement:
  	 */
  	atomic_inc(&start_count);
  	while (atomic_read(&start_count) != cpus)
  		cpu_relax();
  
  	check_tsc_warp();
  
  	/*
  	 * Ok, we are done:
  	 */
  	atomic_inc(&stop_count);
  
  	/*
  	 * Wait for the source CPU to print stuff:
  	 */
  	while (atomic_read(&stop_count) != cpus)
  		cpu_relax();
  }