Commit 3586e0a9a4a5f19110627d6ba81ada32a358467d

Authored by David S. Miller
1 parent a3bc1f11e9

clocksource/timecompare: Fix symbol exports to be GPL'd.

Noticed by Thomas GLeixner.

Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 2 changed files with 6 additions and 6 deletions Inline Diff

kernel/time/clocksource.c
1 /* 1 /*
2 * linux/kernel/time/clocksource.c 2 * linux/kernel/time/clocksource.c
3 * 3 *
4 * This file contains the functions which manage clocksource drivers. 4 * This file contains the functions which manage clocksource drivers.
5 * 5 *
6 * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com) 6 * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or 10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version. 11 * (at your option) any later version.
12 * 12 *
13 * This program is distributed in the hope that it will be useful, 13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 * 17 *
18 * You should have received a copy of the GNU General Public License 18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software 19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 * 21 *
22 * TODO WishList: 22 * TODO WishList:
23 * o Allow clocksource drivers to be unregistered 23 * o Allow clocksource drivers to be unregistered
24 */ 24 */
25 25
26 #include <linux/clocksource.h> 26 #include <linux/clocksource.h>
27 #include <linux/sysdev.h> 27 #include <linux/sysdev.h>
28 #include <linux/init.h> 28 #include <linux/init.h>
29 #include <linux/module.h> 29 #include <linux/module.h>
30 #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */ 30 #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
31 #include <linux/tick.h> 31 #include <linux/tick.h>
32 #include <linux/kthread.h> 32 #include <linux/kthread.h>
33 33
34 void timecounter_init(struct timecounter *tc, 34 void timecounter_init(struct timecounter *tc,
35 const struct cyclecounter *cc, 35 const struct cyclecounter *cc,
36 u64 start_tstamp) 36 u64 start_tstamp)
37 { 37 {
38 tc->cc = cc; 38 tc->cc = cc;
39 tc->cycle_last = cc->read(cc); 39 tc->cycle_last = cc->read(cc);
40 tc->nsec = start_tstamp; 40 tc->nsec = start_tstamp;
41 } 41 }
42 EXPORT_SYMBOL(timecounter_init); 42 EXPORT_SYMBOL_GPL(timecounter_init);
43 43
44 /** 44 /**
45 * timecounter_read_delta - get nanoseconds since last call of this function 45 * timecounter_read_delta - get nanoseconds since last call of this function
46 * @tc: Pointer to time counter 46 * @tc: Pointer to time counter
47 * 47 *
48 * When the underlying cycle counter runs over, this will be handled 48 * When the underlying cycle counter runs over, this will be handled
49 * correctly as long as it does not run over more than once between 49 * correctly as long as it does not run over more than once between
50 * calls. 50 * calls.
51 * 51 *
52 * The first call to this function for a new time counter initializes 52 * The first call to this function for a new time counter initializes
53 * the time tracking and returns an undefined result. 53 * the time tracking and returns an undefined result.
54 */ 54 */
55 static u64 timecounter_read_delta(struct timecounter *tc) 55 static u64 timecounter_read_delta(struct timecounter *tc)
56 { 56 {
57 cycle_t cycle_now, cycle_delta; 57 cycle_t cycle_now, cycle_delta;
58 u64 ns_offset; 58 u64 ns_offset;
59 59
60 /* read cycle counter: */ 60 /* read cycle counter: */
61 cycle_now = tc->cc->read(tc->cc); 61 cycle_now = tc->cc->read(tc->cc);
62 62
63 /* calculate the delta since the last timecounter_read_delta(): */ 63 /* calculate the delta since the last timecounter_read_delta(): */
64 cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask; 64 cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask;
65 65
66 /* convert to nanoseconds: */ 66 /* convert to nanoseconds: */
67 ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta); 67 ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta);
68 68
69 /* update time stamp of timecounter_read_delta() call: */ 69 /* update time stamp of timecounter_read_delta() call: */
70 tc->cycle_last = cycle_now; 70 tc->cycle_last = cycle_now;
71 71
72 return ns_offset; 72 return ns_offset;
73 } 73 }
74 74
75 u64 timecounter_read(struct timecounter *tc) 75 u64 timecounter_read(struct timecounter *tc)
76 { 76 {
77 u64 nsec; 77 u64 nsec;
78 78
79 /* increment time by nanoseconds since last call */ 79 /* increment time by nanoseconds since last call */
80 nsec = timecounter_read_delta(tc); 80 nsec = timecounter_read_delta(tc);
81 nsec += tc->nsec; 81 nsec += tc->nsec;
82 tc->nsec = nsec; 82 tc->nsec = nsec;
83 83
84 return nsec; 84 return nsec;
85 } 85 }
86 EXPORT_SYMBOL(timecounter_read); 86 EXPORT_SYMBOL_GPL(timecounter_read);
87 87
88 u64 timecounter_cyc2time(struct timecounter *tc, 88 u64 timecounter_cyc2time(struct timecounter *tc,
89 cycle_t cycle_tstamp) 89 cycle_t cycle_tstamp)
90 { 90 {
91 u64 cycle_delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask; 91 u64 cycle_delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask;
92 u64 nsec; 92 u64 nsec;
93 93
94 /* 94 /*
95 * Instead of always treating cycle_tstamp as more recent 95 * Instead of always treating cycle_tstamp as more recent
96 * than tc->cycle_last, detect when it is too far in the 96 * than tc->cycle_last, detect when it is too far in the
97 * future and treat it as old time stamp instead. 97 * future and treat it as old time stamp instead.
98 */ 98 */
99 if (cycle_delta > tc->cc->mask / 2) { 99 if (cycle_delta > tc->cc->mask / 2) {
100 cycle_delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask; 100 cycle_delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask;
101 nsec = tc->nsec - cyclecounter_cyc2ns(tc->cc, cycle_delta); 101 nsec = tc->nsec - cyclecounter_cyc2ns(tc->cc, cycle_delta);
102 } else { 102 } else {
103 nsec = cyclecounter_cyc2ns(tc->cc, cycle_delta) + tc->nsec; 103 nsec = cyclecounter_cyc2ns(tc->cc, cycle_delta) + tc->nsec;
104 } 104 }
105 105
106 return nsec; 106 return nsec;
107 } 107 }
108 EXPORT_SYMBOL(timecounter_cyc2time); 108 EXPORT_SYMBOL_GPL(timecounter_cyc2time);
109 109
110 /*[Clocksource internal variables]--------- 110 /*[Clocksource internal variables]---------
111 * curr_clocksource: 111 * curr_clocksource:
112 * currently selected clocksource. 112 * currently selected clocksource.
113 * clocksource_list: 113 * clocksource_list:
114 * linked list with the registered clocksources 114 * linked list with the registered clocksources
115 * clocksource_mutex: 115 * clocksource_mutex:
116 * protects manipulations to curr_clocksource and the clocksource_list 116 * protects manipulations to curr_clocksource and the clocksource_list
117 * override_name: 117 * override_name:
118 * Name of the user-specified clocksource. 118 * Name of the user-specified clocksource.
119 */ 119 */
120 static struct clocksource *curr_clocksource; 120 static struct clocksource *curr_clocksource;
121 static LIST_HEAD(clocksource_list); 121 static LIST_HEAD(clocksource_list);
122 static DEFINE_MUTEX(clocksource_mutex); 122 static DEFINE_MUTEX(clocksource_mutex);
123 static char override_name[32]; 123 static char override_name[32];
124 static int finished_booting; 124 static int finished_booting;
125 125
126 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG 126 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
127 static void clocksource_watchdog_work(struct work_struct *work); 127 static void clocksource_watchdog_work(struct work_struct *work);
128 128
129 static LIST_HEAD(watchdog_list); 129 static LIST_HEAD(watchdog_list);
130 static struct clocksource *watchdog; 130 static struct clocksource *watchdog;
131 static struct timer_list watchdog_timer; 131 static struct timer_list watchdog_timer;
132 static DECLARE_WORK(watchdog_work, clocksource_watchdog_work); 132 static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
133 static DEFINE_SPINLOCK(watchdog_lock); 133 static DEFINE_SPINLOCK(watchdog_lock);
134 static cycle_t watchdog_last; 134 static cycle_t watchdog_last;
135 static int watchdog_running; 135 static int watchdog_running;
136 136
137 static int clocksource_watchdog_kthread(void *data); 137 static int clocksource_watchdog_kthread(void *data);
138 static void __clocksource_change_rating(struct clocksource *cs, int rating); 138 static void __clocksource_change_rating(struct clocksource *cs, int rating);
139 139
140 /* 140 /*
141 * Interval: 0.5sec Threshold: 0.0625s 141 * Interval: 0.5sec Threshold: 0.0625s
142 */ 142 */
143 #define WATCHDOG_INTERVAL (HZ >> 1) 143 #define WATCHDOG_INTERVAL (HZ >> 1)
144 #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4) 144 #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
145 145
146 static void clocksource_watchdog_work(struct work_struct *work) 146 static void clocksource_watchdog_work(struct work_struct *work)
147 { 147 {
148 /* 148 /*
149 * If kthread_run fails the next watchdog scan over the 149 * If kthread_run fails the next watchdog scan over the
150 * watchdog_list will find the unstable clock again. 150 * watchdog_list will find the unstable clock again.
151 */ 151 */
152 kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog"); 152 kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
153 } 153 }
154 154
155 static void __clocksource_unstable(struct clocksource *cs) 155 static void __clocksource_unstable(struct clocksource *cs)
156 { 156 {
157 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); 157 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
158 cs->flags |= CLOCK_SOURCE_UNSTABLE; 158 cs->flags |= CLOCK_SOURCE_UNSTABLE;
159 if (finished_booting) 159 if (finished_booting)
160 schedule_work(&watchdog_work); 160 schedule_work(&watchdog_work);
161 } 161 }
162 162
163 static void clocksource_unstable(struct clocksource *cs, int64_t delta) 163 static void clocksource_unstable(struct clocksource *cs, int64_t delta)
164 { 164 {
165 printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n", 165 printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n",
166 cs->name, delta); 166 cs->name, delta);
167 __clocksource_unstable(cs); 167 __clocksource_unstable(cs);
168 } 168 }
169 169
170 /** 170 /**
171 * clocksource_mark_unstable - mark clocksource unstable via watchdog 171 * clocksource_mark_unstable - mark clocksource unstable via watchdog
172 * @cs: clocksource to be marked unstable 172 * @cs: clocksource to be marked unstable
173 * 173 *
174 * This function is called instead of clocksource_change_rating from 174 * This function is called instead of clocksource_change_rating from
175 * cpu hotplug code to avoid a deadlock between the clocksource mutex 175 * cpu hotplug code to avoid a deadlock between the clocksource mutex
176 * and the cpu hotplug mutex. It defers the update of the clocksource 176 * and the cpu hotplug mutex. It defers the update of the clocksource
177 * to the watchdog thread. 177 * to the watchdog thread.
178 */ 178 */
179 void clocksource_mark_unstable(struct clocksource *cs) 179 void clocksource_mark_unstable(struct clocksource *cs)
180 { 180 {
181 unsigned long flags; 181 unsigned long flags;
182 182
183 spin_lock_irqsave(&watchdog_lock, flags); 183 spin_lock_irqsave(&watchdog_lock, flags);
184 if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { 184 if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
185 if (list_empty(&cs->wd_list)) 185 if (list_empty(&cs->wd_list))
186 list_add(&cs->wd_list, &watchdog_list); 186 list_add(&cs->wd_list, &watchdog_list);
187 __clocksource_unstable(cs); 187 __clocksource_unstable(cs);
188 } 188 }
189 spin_unlock_irqrestore(&watchdog_lock, flags); 189 spin_unlock_irqrestore(&watchdog_lock, flags);
190 } 190 }
191 191
192 static void clocksource_watchdog(unsigned long data) 192 static void clocksource_watchdog(unsigned long data)
193 { 193 {
194 struct clocksource *cs; 194 struct clocksource *cs;
195 cycle_t csnow, wdnow; 195 cycle_t csnow, wdnow;
196 int64_t wd_nsec, cs_nsec; 196 int64_t wd_nsec, cs_nsec;
197 int next_cpu; 197 int next_cpu;
198 198
199 spin_lock(&watchdog_lock); 199 spin_lock(&watchdog_lock);
200 if (!watchdog_running) 200 if (!watchdog_running)
201 goto out; 201 goto out;
202 202
203 wdnow = watchdog->read(watchdog); 203 wdnow = watchdog->read(watchdog);
204 wd_nsec = clocksource_cyc2ns((wdnow - watchdog_last) & watchdog->mask, 204 wd_nsec = clocksource_cyc2ns((wdnow - watchdog_last) & watchdog->mask,
205 watchdog->mult, watchdog->shift); 205 watchdog->mult, watchdog->shift);
206 watchdog_last = wdnow; 206 watchdog_last = wdnow;
207 207
208 list_for_each_entry(cs, &watchdog_list, wd_list) { 208 list_for_each_entry(cs, &watchdog_list, wd_list) {
209 209
210 /* Clocksource already marked unstable? */ 210 /* Clocksource already marked unstable? */
211 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { 211 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
212 if (finished_booting) 212 if (finished_booting)
213 schedule_work(&watchdog_work); 213 schedule_work(&watchdog_work);
214 continue; 214 continue;
215 } 215 }
216 216
217 csnow = cs->read(cs); 217 csnow = cs->read(cs);
218 218
219 /* Clocksource initialized ? */ 219 /* Clocksource initialized ? */
220 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) { 220 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
221 cs->flags |= CLOCK_SOURCE_WATCHDOG; 221 cs->flags |= CLOCK_SOURCE_WATCHDOG;
222 cs->wd_last = csnow; 222 cs->wd_last = csnow;
223 continue; 223 continue;
224 } 224 }
225 225
226 /* Check the deviation from the watchdog clocksource. */ 226 /* Check the deviation from the watchdog clocksource. */
227 cs_nsec = clocksource_cyc2ns((csnow - cs->wd_last) & 227 cs_nsec = clocksource_cyc2ns((csnow - cs->wd_last) &
228 cs->mask, cs->mult, cs->shift); 228 cs->mask, cs->mult, cs->shift);
229 cs->wd_last = csnow; 229 cs->wd_last = csnow;
230 if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { 230 if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
231 clocksource_unstable(cs, cs_nsec - wd_nsec); 231 clocksource_unstable(cs, cs_nsec - wd_nsec);
232 continue; 232 continue;
233 } 233 }
234 234
235 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && 235 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
236 (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && 236 (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
237 (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) { 237 (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) {
238 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 238 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
239 /* 239 /*
240 * We just marked the clocksource as highres-capable, 240 * We just marked the clocksource as highres-capable,
241 * notify the rest of the system as well so that we 241 * notify the rest of the system as well so that we
242 * transition into high-res mode: 242 * transition into high-res mode:
243 */ 243 */
244 tick_clock_notify(); 244 tick_clock_notify();
245 } 245 }
246 } 246 }
247 247
248 /* 248 /*
249 * Cycle through CPUs to check if the CPUs stay synchronized 249 * Cycle through CPUs to check if the CPUs stay synchronized
250 * to each other. 250 * to each other.
251 */ 251 */
252 next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); 252 next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
253 if (next_cpu >= nr_cpu_ids) 253 if (next_cpu >= nr_cpu_ids)
254 next_cpu = cpumask_first(cpu_online_mask); 254 next_cpu = cpumask_first(cpu_online_mask);
255 watchdog_timer.expires += WATCHDOG_INTERVAL; 255 watchdog_timer.expires += WATCHDOG_INTERVAL;
256 add_timer_on(&watchdog_timer, next_cpu); 256 add_timer_on(&watchdog_timer, next_cpu);
257 out: 257 out:
258 spin_unlock(&watchdog_lock); 258 spin_unlock(&watchdog_lock);
259 } 259 }
260 260
261 static inline void clocksource_start_watchdog(void) 261 static inline void clocksource_start_watchdog(void)
262 { 262 {
263 if (watchdog_running || !watchdog || list_empty(&watchdog_list)) 263 if (watchdog_running || !watchdog || list_empty(&watchdog_list))
264 return; 264 return;
265 init_timer(&watchdog_timer); 265 init_timer(&watchdog_timer);
266 watchdog_timer.function = clocksource_watchdog; 266 watchdog_timer.function = clocksource_watchdog;
267 watchdog_last = watchdog->read(watchdog); 267 watchdog_last = watchdog->read(watchdog);
268 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; 268 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
269 add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask)); 269 add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
270 watchdog_running = 1; 270 watchdog_running = 1;
271 } 271 }
272 272
273 static inline void clocksource_stop_watchdog(void) 273 static inline void clocksource_stop_watchdog(void)
274 { 274 {
275 if (!watchdog_running || (watchdog && !list_empty(&watchdog_list))) 275 if (!watchdog_running || (watchdog && !list_empty(&watchdog_list)))
276 return; 276 return;
277 del_timer(&watchdog_timer); 277 del_timer(&watchdog_timer);
278 watchdog_running = 0; 278 watchdog_running = 0;
279 } 279 }
280 280
281 static inline void clocksource_reset_watchdog(void) 281 static inline void clocksource_reset_watchdog(void)
282 { 282 {
283 struct clocksource *cs; 283 struct clocksource *cs;
284 284
285 list_for_each_entry(cs, &watchdog_list, wd_list) 285 list_for_each_entry(cs, &watchdog_list, wd_list)
286 cs->flags &= ~CLOCK_SOURCE_WATCHDOG; 286 cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
287 } 287 }
288 288
289 static void clocksource_resume_watchdog(void) 289 static void clocksource_resume_watchdog(void)
290 { 290 {
291 unsigned long flags; 291 unsigned long flags;
292 292
293 spin_lock_irqsave(&watchdog_lock, flags); 293 spin_lock_irqsave(&watchdog_lock, flags);
294 clocksource_reset_watchdog(); 294 clocksource_reset_watchdog();
295 spin_unlock_irqrestore(&watchdog_lock, flags); 295 spin_unlock_irqrestore(&watchdog_lock, flags);
296 } 296 }
297 297
298 static void clocksource_enqueue_watchdog(struct clocksource *cs) 298 static void clocksource_enqueue_watchdog(struct clocksource *cs)
299 { 299 {
300 unsigned long flags; 300 unsigned long flags;
301 301
302 spin_lock_irqsave(&watchdog_lock, flags); 302 spin_lock_irqsave(&watchdog_lock, flags);
303 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { 303 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
304 /* cs is a clocksource to be watched. */ 304 /* cs is a clocksource to be watched. */
305 list_add(&cs->wd_list, &watchdog_list); 305 list_add(&cs->wd_list, &watchdog_list);
306 cs->flags &= ~CLOCK_SOURCE_WATCHDOG; 306 cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
307 } else { 307 } else {
308 /* cs is a watchdog. */ 308 /* cs is a watchdog. */
309 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) 309 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
310 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 310 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
311 /* Pick the best watchdog. */ 311 /* Pick the best watchdog. */
312 if (!watchdog || cs->rating > watchdog->rating) { 312 if (!watchdog || cs->rating > watchdog->rating) {
313 watchdog = cs; 313 watchdog = cs;
314 /* Reset watchdog cycles */ 314 /* Reset watchdog cycles */
315 clocksource_reset_watchdog(); 315 clocksource_reset_watchdog();
316 } 316 }
317 } 317 }
318 /* Check if the watchdog timer needs to be started. */ 318 /* Check if the watchdog timer needs to be started. */
319 clocksource_start_watchdog(); 319 clocksource_start_watchdog();
320 spin_unlock_irqrestore(&watchdog_lock, flags); 320 spin_unlock_irqrestore(&watchdog_lock, flags);
321 } 321 }
322 322
323 static void clocksource_dequeue_watchdog(struct clocksource *cs) 323 static void clocksource_dequeue_watchdog(struct clocksource *cs)
324 { 324 {
325 struct clocksource *tmp; 325 struct clocksource *tmp;
326 unsigned long flags; 326 unsigned long flags;
327 327
328 spin_lock_irqsave(&watchdog_lock, flags); 328 spin_lock_irqsave(&watchdog_lock, flags);
329 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { 329 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
330 /* cs is a watched clocksource. */ 330 /* cs is a watched clocksource. */
331 list_del_init(&cs->wd_list); 331 list_del_init(&cs->wd_list);
332 } else if (cs == watchdog) { 332 } else if (cs == watchdog) {
333 /* Reset watchdog cycles */ 333 /* Reset watchdog cycles */
334 clocksource_reset_watchdog(); 334 clocksource_reset_watchdog();
335 /* Current watchdog is removed. Find an alternative. */ 335 /* Current watchdog is removed. Find an alternative. */
336 watchdog = NULL; 336 watchdog = NULL;
337 list_for_each_entry(tmp, &clocksource_list, list) { 337 list_for_each_entry(tmp, &clocksource_list, list) {
338 if (tmp == cs || tmp->flags & CLOCK_SOURCE_MUST_VERIFY) 338 if (tmp == cs || tmp->flags & CLOCK_SOURCE_MUST_VERIFY)
339 continue; 339 continue;
340 if (!watchdog || tmp->rating > watchdog->rating) 340 if (!watchdog || tmp->rating > watchdog->rating)
341 watchdog = tmp; 341 watchdog = tmp;
342 } 342 }
343 } 343 }
344 cs->flags &= ~CLOCK_SOURCE_WATCHDOG; 344 cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
345 /* Check if the watchdog timer needs to be stopped. */ 345 /* Check if the watchdog timer needs to be stopped. */
346 clocksource_stop_watchdog(); 346 clocksource_stop_watchdog();
347 spin_unlock_irqrestore(&watchdog_lock, flags); 347 spin_unlock_irqrestore(&watchdog_lock, flags);
348 } 348 }
349 349
350 static int clocksource_watchdog_kthread(void *data) 350 static int clocksource_watchdog_kthread(void *data)
351 { 351 {
352 struct clocksource *cs, *tmp; 352 struct clocksource *cs, *tmp;
353 unsigned long flags; 353 unsigned long flags;
354 LIST_HEAD(unstable); 354 LIST_HEAD(unstable);
355 355
356 mutex_lock(&clocksource_mutex); 356 mutex_lock(&clocksource_mutex);
357 spin_lock_irqsave(&watchdog_lock, flags); 357 spin_lock_irqsave(&watchdog_lock, flags);
358 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) 358 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list)
359 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { 359 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
360 list_del_init(&cs->wd_list); 360 list_del_init(&cs->wd_list);
361 list_add(&cs->wd_list, &unstable); 361 list_add(&cs->wd_list, &unstable);
362 } 362 }
363 /* Check if the watchdog timer needs to be stopped. */ 363 /* Check if the watchdog timer needs to be stopped. */
364 clocksource_stop_watchdog(); 364 clocksource_stop_watchdog();
365 spin_unlock_irqrestore(&watchdog_lock, flags); 365 spin_unlock_irqrestore(&watchdog_lock, flags);
366 366
367 /* Needs to be done outside of watchdog lock */ 367 /* Needs to be done outside of watchdog lock */
368 list_for_each_entry_safe(cs, tmp, &unstable, wd_list) { 368 list_for_each_entry_safe(cs, tmp, &unstable, wd_list) {
369 list_del_init(&cs->wd_list); 369 list_del_init(&cs->wd_list);
370 __clocksource_change_rating(cs, 0); 370 __clocksource_change_rating(cs, 0);
371 } 371 }
372 mutex_unlock(&clocksource_mutex); 372 mutex_unlock(&clocksource_mutex);
373 return 0; 373 return 0;
374 } 374 }
375 375
376 #else /* CONFIG_CLOCKSOURCE_WATCHDOG */ 376 #else /* CONFIG_CLOCKSOURCE_WATCHDOG */
377 377
378 static void clocksource_enqueue_watchdog(struct clocksource *cs) 378 static void clocksource_enqueue_watchdog(struct clocksource *cs)
379 { 379 {
380 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) 380 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
381 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 381 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
382 } 382 }
383 383
384 static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } 384 static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
385 static inline void clocksource_resume_watchdog(void) { } 385 static inline void clocksource_resume_watchdog(void) { }
386 static inline int clocksource_watchdog_kthread(void *data) { return 0; } 386 static inline int clocksource_watchdog_kthread(void *data) { return 0; }
387 387
388 #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ 388 #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
389 389
390 /** 390 /**
391 * clocksource_resume - resume the clocksource(s) 391 * clocksource_resume - resume the clocksource(s)
392 */ 392 */
393 void clocksource_resume(void) 393 void clocksource_resume(void)
394 { 394 {
395 struct clocksource *cs; 395 struct clocksource *cs;
396 396
397 list_for_each_entry(cs, &clocksource_list, list) 397 list_for_each_entry(cs, &clocksource_list, list)
398 if (cs->resume) 398 if (cs->resume)
399 cs->resume(); 399 cs->resume();
400 400
401 clocksource_resume_watchdog(); 401 clocksource_resume_watchdog();
402 } 402 }
403 403
404 /** 404 /**
405 * clocksource_touch_watchdog - Update watchdog 405 * clocksource_touch_watchdog - Update watchdog
406 * 406 *
407 * Update the watchdog after exception contexts such as kgdb so as not 407 * Update the watchdog after exception contexts such as kgdb so as not
408 * to incorrectly trip the watchdog. 408 * to incorrectly trip the watchdog.
409 * 409 *
410 */ 410 */
411 void clocksource_touch_watchdog(void) 411 void clocksource_touch_watchdog(void)
412 { 412 {
413 clocksource_resume_watchdog(); 413 clocksource_resume_watchdog();
414 } 414 }
415 415
416 #ifdef CONFIG_GENERIC_TIME 416 #ifdef CONFIG_GENERIC_TIME
417 417
418 /** 418 /**
419 * clocksource_select - Select the best clocksource available 419 * clocksource_select - Select the best clocksource available
420 * 420 *
421 * Private function. Must hold clocksource_mutex when called. 421 * Private function. Must hold clocksource_mutex when called.
422 * 422 *
423 * Select the clocksource with the best rating, or the clocksource, 423 * Select the clocksource with the best rating, or the clocksource,
424 * which is selected by userspace override. 424 * which is selected by userspace override.
425 */ 425 */
426 static void clocksource_select(void) 426 static void clocksource_select(void)
427 { 427 {
428 struct clocksource *best, *cs; 428 struct clocksource *best, *cs;
429 429
430 if (!finished_booting || list_empty(&clocksource_list)) 430 if (!finished_booting || list_empty(&clocksource_list))
431 return; 431 return;
432 /* First clocksource on the list has the best rating. */ 432 /* First clocksource on the list has the best rating. */
433 best = list_first_entry(&clocksource_list, struct clocksource, list); 433 best = list_first_entry(&clocksource_list, struct clocksource, list);
434 /* Check for the override clocksource. */ 434 /* Check for the override clocksource. */
435 list_for_each_entry(cs, &clocksource_list, list) { 435 list_for_each_entry(cs, &clocksource_list, list) {
436 if (strcmp(cs->name, override_name) != 0) 436 if (strcmp(cs->name, override_name) != 0)
437 continue; 437 continue;
438 /* 438 /*
439 * Check to make sure we don't switch to a non-highres 439 * Check to make sure we don't switch to a non-highres
440 * capable clocksource if the tick code is in oneshot 440 * capable clocksource if the tick code is in oneshot
441 * mode (highres or nohz) 441 * mode (highres or nohz)
442 */ 442 */
443 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && 443 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
444 tick_oneshot_mode_active()) { 444 tick_oneshot_mode_active()) {
445 /* Override clocksource cannot be used. */ 445 /* Override clocksource cannot be used. */
446 printk(KERN_WARNING "Override clocksource %s is not " 446 printk(KERN_WARNING "Override clocksource %s is not "
447 "HRT compatible. Cannot switch while in " 447 "HRT compatible. Cannot switch while in "
448 "HRT/NOHZ mode\n", cs->name); 448 "HRT/NOHZ mode\n", cs->name);
449 override_name[0] = 0; 449 override_name[0] = 0;
450 } else 450 } else
451 /* Override clocksource can be used. */ 451 /* Override clocksource can be used. */
452 best = cs; 452 best = cs;
453 break; 453 break;
454 } 454 }
455 if (curr_clocksource != best) { 455 if (curr_clocksource != best) {
456 printk(KERN_INFO "Switching to clocksource %s\n", best->name); 456 printk(KERN_INFO "Switching to clocksource %s\n", best->name);
457 curr_clocksource = best; 457 curr_clocksource = best;
458 timekeeping_notify(curr_clocksource); 458 timekeeping_notify(curr_clocksource);
459 } 459 }
460 } 460 }
461 461
462 #else /* CONFIG_GENERIC_TIME */ 462 #else /* CONFIG_GENERIC_TIME */
463 463
464 static inline void clocksource_select(void) { } 464 static inline void clocksource_select(void) { }
465 465
466 #endif 466 #endif
467 467
468 /* 468 /*
469 * clocksource_done_booting - Called near the end of core bootup 469 * clocksource_done_booting - Called near the end of core bootup
470 * 470 *
471 * Hack to avoid lots of clocksource churn at boot time. 471 * Hack to avoid lots of clocksource churn at boot time.
472 * We use fs_initcall because we want this to start before 472 * We use fs_initcall because we want this to start before
473 * device_initcall but after subsys_initcall. 473 * device_initcall but after subsys_initcall.
474 */ 474 */
475 static int __init clocksource_done_booting(void) 475 static int __init clocksource_done_booting(void)
476 { 476 {
477 finished_booting = 1; 477 finished_booting = 1;
478 478
479 /* 479 /*
480 * Run the watchdog first to eliminate unstable clock sources 480 * Run the watchdog first to eliminate unstable clock sources
481 */ 481 */
482 clocksource_watchdog_kthread(NULL); 482 clocksource_watchdog_kthread(NULL);
483 483
484 mutex_lock(&clocksource_mutex); 484 mutex_lock(&clocksource_mutex);
485 clocksource_select(); 485 clocksource_select();
486 mutex_unlock(&clocksource_mutex); 486 mutex_unlock(&clocksource_mutex);
487 return 0; 487 return 0;
488 } 488 }
489 fs_initcall(clocksource_done_booting); 489 fs_initcall(clocksource_done_booting);
490 490
491 /* 491 /*
492 * Enqueue the clocksource sorted by rating 492 * Enqueue the clocksource sorted by rating
493 */ 493 */
494 static void clocksource_enqueue(struct clocksource *cs) 494 static void clocksource_enqueue(struct clocksource *cs)
495 { 495 {
496 struct list_head *entry = &clocksource_list; 496 struct list_head *entry = &clocksource_list;
497 struct clocksource *tmp; 497 struct clocksource *tmp;
498 498
499 list_for_each_entry(tmp, &clocksource_list, list) 499 list_for_each_entry(tmp, &clocksource_list, list)
500 /* Keep track of the place, where to insert */ 500 /* Keep track of the place, where to insert */
501 if (tmp->rating >= cs->rating) 501 if (tmp->rating >= cs->rating)
502 entry = &tmp->list; 502 entry = &tmp->list;
503 list_add(&cs->list, entry); 503 list_add(&cs->list, entry);
504 } 504 }
505 505
506 /** 506 /**
507 * clocksource_register - Used to install new clocksources 507 * clocksource_register - Used to install new clocksources
508 * @t: clocksource to be registered 508 * @t: clocksource to be registered
509 * 509 *
510 * Returns -EBUSY if registration fails, zero otherwise. 510 * Returns -EBUSY if registration fails, zero otherwise.
511 */ 511 */
512 int clocksource_register(struct clocksource *cs) 512 int clocksource_register(struct clocksource *cs)
513 { 513 {
514 mutex_lock(&clocksource_mutex); 514 mutex_lock(&clocksource_mutex);
515 clocksource_enqueue(cs); 515 clocksource_enqueue(cs);
516 clocksource_select(); 516 clocksource_select();
517 clocksource_enqueue_watchdog(cs); 517 clocksource_enqueue_watchdog(cs);
518 mutex_unlock(&clocksource_mutex); 518 mutex_unlock(&clocksource_mutex);
519 return 0; 519 return 0;
520 } 520 }
521 EXPORT_SYMBOL(clocksource_register); 521 EXPORT_SYMBOL(clocksource_register);
522 522
523 static void __clocksource_change_rating(struct clocksource *cs, int rating) 523 static void __clocksource_change_rating(struct clocksource *cs, int rating)
524 { 524 {
525 list_del(&cs->list); 525 list_del(&cs->list);
526 cs->rating = rating; 526 cs->rating = rating;
527 clocksource_enqueue(cs); 527 clocksource_enqueue(cs);
528 clocksource_select(); 528 clocksource_select();
529 } 529 }
530 530
531 /** 531 /**
532 * clocksource_change_rating - Change the rating of a registered clocksource 532 * clocksource_change_rating - Change the rating of a registered clocksource
533 */ 533 */
534 void clocksource_change_rating(struct clocksource *cs, int rating) 534 void clocksource_change_rating(struct clocksource *cs, int rating)
535 { 535 {
536 mutex_lock(&clocksource_mutex); 536 mutex_lock(&clocksource_mutex);
537 __clocksource_change_rating(cs, rating); 537 __clocksource_change_rating(cs, rating);
538 mutex_unlock(&clocksource_mutex); 538 mutex_unlock(&clocksource_mutex);
539 } 539 }
540 EXPORT_SYMBOL(clocksource_change_rating); 540 EXPORT_SYMBOL(clocksource_change_rating);
541 541
542 /** 542 /**
543 * clocksource_unregister - remove a registered clocksource 543 * clocksource_unregister - remove a registered clocksource
544 */ 544 */
545 void clocksource_unregister(struct clocksource *cs) 545 void clocksource_unregister(struct clocksource *cs)
546 { 546 {
547 mutex_lock(&clocksource_mutex); 547 mutex_lock(&clocksource_mutex);
548 clocksource_dequeue_watchdog(cs); 548 clocksource_dequeue_watchdog(cs);
549 list_del(&cs->list); 549 list_del(&cs->list);
550 clocksource_select(); 550 clocksource_select();
551 mutex_unlock(&clocksource_mutex); 551 mutex_unlock(&clocksource_mutex);
552 } 552 }
553 EXPORT_SYMBOL(clocksource_unregister); 553 EXPORT_SYMBOL(clocksource_unregister);
554 554
555 #ifdef CONFIG_SYSFS 555 #ifdef CONFIG_SYSFS
556 /** 556 /**
557 * sysfs_show_current_clocksources - sysfs interface for current clocksource 557 * sysfs_show_current_clocksources - sysfs interface for current clocksource
558 * @dev: unused 558 * @dev: unused
559 * @buf: char buffer to be filled with clocksource list 559 * @buf: char buffer to be filled with clocksource list
560 * 560 *
561 * Provides sysfs interface for listing current clocksource. 561 * Provides sysfs interface for listing current clocksource.
562 */ 562 */
563 static ssize_t 563 static ssize_t
564 sysfs_show_current_clocksources(struct sys_device *dev, 564 sysfs_show_current_clocksources(struct sys_device *dev,
565 struct sysdev_attribute *attr, char *buf) 565 struct sysdev_attribute *attr, char *buf)
566 { 566 {
567 ssize_t count = 0; 567 ssize_t count = 0;
568 568
569 mutex_lock(&clocksource_mutex); 569 mutex_lock(&clocksource_mutex);
570 count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name); 570 count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name);
571 mutex_unlock(&clocksource_mutex); 571 mutex_unlock(&clocksource_mutex);
572 572
573 return count; 573 return count;
574 } 574 }
575 575
576 /** 576 /**
577 * sysfs_override_clocksource - interface for manually overriding clocksource 577 * sysfs_override_clocksource - interface for manually overriding clocksource
578 * @dev: unused 578 * @dev: unused
579 * @buf: name of override clocksource 579 * @buf: name of override clocksource
580 * @count: length of buffer 580 * @count: length of buffer
581 * 581 *
582 * Takes input from sysfs interface for manually overriding the default 582 * Takes input from sysfs interface for manually overriding the default
583 * clocksource selction. 583 * clocksource selction.
584 */ 584 */
585 static ssize_t sysfs_override_clocksource(struct sys_device *dev, 585 static ssize_t sysfs_override_clocksource(struct sys_device *dev,
586 struct sysdev_attribute *attr, 586 struct sysdev_attribute *attr,
587 const char *buf, size_t count) 587 const char *buf, size_t count)
588 { 588 {
589 size_t ret = count; 589 size_t ret = count;
590 590
591 /* strings from sysfs write are not 0 terminated! */ 591 /* strings from sysfs write are not 0 terminated! */
592 if (count >= sizeof(override_name)) 592 if (count >= sizeof(override_name))
593 return -EINVAL; 593 return -EINVAL;
594 594
595 /* strip of \n: */ 595 /* strip of \n: */
596 if (buf[count-1] == '\n') 596 if (buf[count-1] == '\n')
597 count--; 597 count--;
598 598
599 mutex_lock(&clocksource_mutex); 599 mutex_lock(&clocksource_mutex);
600 600
601 if (count > 0) 601 if (count > 0)
602 memcpy(override_name, buf, count); 602 memcpy(override_name, buf, count);
603 override_name[count] = 0; 603 override_name[count] = 0;
604 clocksource_select(); 604 clocksource_select();
605 605
606 mutex_unlock(&clocksource_mutex); 606 mutex_unlock(&clocksource_mutex);
607 607
608 return ret; 608 return ret;
609 } 609 }
610 610
611 /** 611 /**
612 * sysfs_show_available_clocksources - sysfs interface for listing clocksource 612 * sysfs_show_available_clocksources - sysfs interface for listing clocksource
613 * @dev: unused 613 * @dev: unused
614 * @buf: char buffer to be filled with clocksource list 614 * @buf: char buffer to be filled with clocksource list
615 * 615 *
616 * Provides sysfs interface for listing registered clocksources 616 * Provides sysfs interface for listing registered clocksources
617 */ 617 */
618 static ssize_t 618 static ssize_t
619 sysfs_show_available_clocksources(struct sys_device *dev, 619 sysfs_show_available_clocksources(struct sys_device *dev,
620 struct sysdev_attribute *attr, 620 struct sysdev_attribute *attr,
621 char *buf) 621 char *buf)
622 { 622 {
623 struct clocksource *src; 623 struct clocksource *src;
624 ssize_t count = 0; 624 ssize_t count = 0;
625 625
626 mutex_lock(&clocksource_mutex); 626 mutex_lock(&clocksource_mutex);
627 list_for_each_entry(src, &clocksource_list, list) { 627 list_for_each_entry(src, &clocksource_list, list) {
628 /* 628 /*
629 * Don't show non-HRES clocksource if the tick code is 629 * Don't show non-HRES clocksource if the tick code is
630 * in one shot mode (highres=on or nohz=on) 630 * in one shot mode (highres=on or nohz=on)
631 */ 631 */
632 if (!tick_oneshot_mode_active() || 632 if (!tick_oneshot_mode_active() ||
633 (src->flags & CLOCK_SOURCE_VALID_FOR_HRES)) 633 (src->flags & CLOCK_SOURCE_VALID_FOR_HRES))
634 count += snprintf(buf + count, 634 count += snprintf(buf + count,
635 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), 635 max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
636 "%s ", src->name); 636 "%s ", src->name);
637 } 637 }
638 mutex_unlock(&clocksource_mutex); 638 mutex_unlock(&clocksource_mutex);
639 639
640 count += snprintf(buf + count, 640 count += snprintf(buf + count,
641 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n"); 641 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
642 642
643 return count; 643 return count;
644 } 644 }
645 645
646 /* 646 /*
647 * Sysfs setup bits: 647 * Sysfs setup bits:
648 */ 648 */
649 static SYSDEV_ATTR(current_clocksource, 0644, sysfs_show_current_clocksources, 649 static SYSDEV_ATTR(current_clocksource, 0644, sysfs_show_current_clocksources,
650 sysfs_override_clocksource); 650 sysfs_override_clocksource);
651 651
652 static SYSDEV_ATTR(available_clocksource, 0444, 652 static SYSDEV_ATTR(available_clocksource, 0444,
653 sysfs_show_available_clocksources, NULL); 653 sysfs_show_available_clocksources, NULL);
654 654
655 static struct sysdev_class clocksource_sysclass = { 655 static struct sysdev_class clocksource_sysclass = {
656 .name = "clocksource", 656 .name = "clocksource",
657 }; 657 };
658 658
659 static struct sys_device device_clocksource = { 659 static struct sys_device device_clocksource = {
660 .id = 0, 660 .id = 0,
661 .cls = &clocksource_sysclass, 661 .cls = &clocksource_sysclass,
662 }; 662 };
663 663
664 static int __init init_clocksource_sysfs(void) 664 static int __init init_clocksource_sysfs(void)
665 { 665 {
666 int error = sysdev_class_register(&clocksource_sysclass); 666 int error = sysdev_class_register(&clocksource_sysclass);
667 667
668 if (!error) 668 if (!error)
669 error = sysdev_register(&device_clocksource); 669 error = sysdev_register(&device_clocksource);
670 if (!error) 670 if (!error)
671 error = sysdev_create_file( 671 error = sysdev_create_file(
672 &device_clocksource, 672 &device_clocksource,
673 &attr_current_clocksource); 673 &attr_current_clocksource);
674 if (!error) 674 if (!error)
675 error = sysdev_create_file( 675 error = sysdev_create_file(
676 &device_clocksource, 676 &device_clocksource,
677 &attr_available_clocksource); 677 &attr_available_clocksource);
678 return error; 678 return error;
679 } 679 }
680 680
681 device_initcall(init_clocksource_sysfs); 681 device_initcall(init_clocksource_sysfs);
682 #endif /* CONFIG_SYSFS */ 682 #endif /* CONFIG_SYSFS */
683 683
684 /** 684 /**
685 * boot_override_clocksource - boot clock override 685 * boot_override_clocksource - boot clock override
686 * @str: override name 686 * @str: override name
687 * 687 *
688 * Takes a clocksource= boot argument and uses it 688 * Takes a clocksource= boot argument and uses it
689 * as the clocksource override name. 689 * as the clocksource override name.
690 */ 690 */
691 static int __init boot_override_clocksource(char* str) 691 static int __init boot_override_clocksource(char* str)
692 { 692 {
693 mutex_lock(&clocksource_mutex); 693 mutex_lock(&clocksource_mutex);
694 if (str) 694 if (str)
695 strlcpy(override_name, str, sizeof(override_name)); 695 strlcpy(override_name, str, sizeof(override_name));
696 mutex_unlock(&clocksource_mutex); 696 mutex_unlock(&clocksource_mutex);
697 return 1; 697 return 1;
698 } 698 }
699 699
700 __setup("clocksource=", boot_override_clocksource); 700 __setup("clocksource=", boot_override_clocksource);
701 701
702 /** 702 /**
703 * boot_override_clock - Compatibility layer for deprecated boot option 703 * boot_override_clock - Compatibility layer for deprecated boot option
704 * @str: override name 704 * @str: override name
705 * 705 *
706 * DEPRECATED! Takes a clock= boot argument and uses it 706 * DEPRECATED! Takes a clock= boot argument and uses it
707 * as the clocksource override name 707 * as the clocksource override name
708 */ 708 */
709 static int __init boot_override_clock(char* str) 709 static int __init boot_override_clock(char* str)
710 { 710 {
711 if (!strcmp(str, "pmtmr")) { 711 if (!strcmp(str, "pmtmr")) {
712 printk("Warning: clock=pmtmr is deprecated. " 712 printk("Warning: clock=pmtmr is deprecated. "
713 "Use clocksource=acpi_pm.\n"); 713 "Use clocksource=acpi_pm.\n");
714 return boot_override_clocksource("acpi_pm"); 714 return boot_override_clocksource("acpi_pm");
715 } 715 }
716 printk("Warning! clock= boot option is deprecated. " 716 printk("Warning! clock= boot option is deprecated. "
717 "Use clocksource=xyz\n"); 717 "Use clocksource=xyz\n");
718 return boot_override_clocksource(str); 718 return boot_override_clocksource(str);
719 } 719 }
720 720
721 __setup("clock=", boot_override_clock); 721 __setup("clock=", boot_override_clock);
722 722
kernel/time/timecompare.c
1 /* 1 /*
2 * Copyright (C) 2009 Intel Corporation. 2 * Copyright (C) 2009 Intel Corporation.
3 * Author: Patrick Ohly <patrick.ohly@intel.com> 3 * Author: Patrick Ohly <patrick.ohly@intel.com>
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or 7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version. 8 * (at your option) any later version.
9 * 9 *
10 * This program is distributed in the hope that it will be useful, 10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License 15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software 16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */ 18 */
19 19
20 #include <linux/timecompare.h> 20 #include <linux/timecompare.h>
21 #include <linux/module.h> 21 #include <linux/module.h>
22 #include <linux/math64.h> 22 #include <linux/math64.h>
23 23
24 /* 24 /*
25 * fixed point arithmetic scale factor for skew 25 * fixed point arithmetic scale factor for skew
26 * 26 *
27 * Usually one would measure skew in ppb (parts per billion, 1e9), but 27 * Usually one would measure skew in ppb (parts per billion, 1e9), but
28 * using a factor of 2 simplifies the math. 28 * using a factor of 2 simplifies the math.
29 */ 29 */
30 #define TIMECOMPARE_SKEW_RESOLUTION (((s64)1)<<30) 30 #define TIMECOMPARE_SKEW_RESOLUTION (((s64)1)<<30)
31 31
32 ktime_t timecompare_transform(struct timecompare *sync, 32 ktime_t timecompare_transform(struct timecompare *sync,
33 u64 source_tstamp) 33 u64 source_tstamp)
34 { 34 {
35 u64 nsec; 35 u64 nsec;
36 36
37 nsec = source_tstamp + sync->offset; 37 nsec = source_tstamp + sync->offset;
38 nsec += (s64)(source_tstamp - sync->last_update) * sync->skew / 38 nsec += (s64)(source_tstamp - sync->last_update) * sync->skew /
39 TIMECOMPARE_SKEW_RESOLUTION; 39 TIMECOMPARE_SKEW_RESOLUTION;
40 40
41 return ns_to_ktime(nsec); 41 return ns_to_ktime(nsec);
42 } 42 }
43 EXPORT_SYMBOL(timecompare_transform); 43 EXPORT_SYMBOL_GPL(timecompare_transform);
44 44
45 int timecompare_offset(struct timecompare *sync, 45 int timecompare_offset(struct timecompare *sync,
46 s64 *offset, 46 s64 *offset,
47 u64 *source_tstamp) 47 u64 *source_tstamp)
48 { 48 {
49 u64 start_source = 0, end_source = 0; 49 u64 start_source = 0, end_source = 0;
50 struct { 50 struct {
51 s64 offset; 51 s64 offset;
52 s64 duration_target; 52 s64 duration_target;
53 } buffer[10], sample, *samples; 53 } buffer[10], sample, *samples;
54 int counter = 0, i; 54 int counter = 0, i;
55 int used; 55 int used;
56 int index; 56 int index;
57 int num_samples = sync->num_samples; 57 int num_samples = sync->num_samples;
58 58
59 if (num_samples > sizeof(buffer)/sizeof(buffer[0])) { 59 if (num_samples > sizeof(buffer)/sizeof(buffer[0])) {
60 samples = kmalloc(sizeof(*samples) * num_samples, GFP_ATOMIC); 60 samples = kmalloc(sizeof(*samples) * num_samples, GFP_ATOMIC);
61 if (!samples) { 61 if (!samples) {
62 samples = buffer; 62 samples = buffer;
63 num_samples = sizeof(buffer)/sizeof(buffer[0]); 63 num_samples = sizeof(buffer)/sizeof(buffer[0]);
64 } 64 }
65 } else { 65 } else {
66 samples = buffer; 66 samples = buffer;
67 } 67 }
68 68
69 /* run until we have enough valid samples, but do not try forever */ 69 /* run until we have enough valid samples, but do not try forever */
70 i = 0; 70 i = 0;
71 counter = 0; 71 counter = 0;
72 while (1) { 72 while (1) {
73 u64 ts; 73 u64 ts;
74 ktime_t start, end; 74 ktime_t start, end;
75 75
76 start = sync->target(); 76 start = sync->target();
77 ts = timecounter_read(sync->source); 77 ts = timecounter_read(sync->source);
78 end = sync->target(); 78 end = sync->target();
79 79
80 if (!i) 80 if (!i)
81 start_source = ts; 81 start_source = ts;
82 82
83 /* ignore negative durations */ 83 /* ignore negative durations */
84 sample.duration_target = ktime_to_ns(ktime_sub(end, start)); 84 sample.duration_target = ktime_to_ns(ktime_sub(end, start));
85 if (sample.duration_target >= 0) { 85 if (sample.duration_target >= 0) {
86 /* 86 /*
87 * assume symetric delay to and from source: 87 * assume symetric delay to and from source:
88 * average target time corresponds to measured 88 * average target time corresponds to measured
89 * source time 89 * source time
90 */ 90 */
91 sample.offset = 91 sample.offset =
92 ktime_to_ns(ktime_add(end, start)) / 2 - 92 ktime_to_ns(ktime_add(end, start)) / 2 -
93 ts; 93 ts;
94 94
95 /* simple insertion sort based on duration */ 95 /* simple insertion sort based on duration */
96 index = counter - 1; 96 index = counter - 1;
97 while (index >= 0) { 97 while (index >= 0) {
98 if (samples[index].duration_target < 98 if (samples[index].duration_target <
99 sample.duration_target) 99 sample.duration_target)
100 break; 100 break;
101 samples[index + 1] = samples[index]; 101 samples[index + 1] = samples[index];
102 index--; 102 index--;
103 } 103 }
104 samples[index + 1] = sample; 104 samples[index + 1] = sample;
105 counter++; 105 counter++;
106 } 106 }
107 107
108 i++; 108 i++;
109 if (counter >= num_samples || i >= 100000) { 109 if (counter >= num_samples || i >= 100000) {
110 end_source = ts; 110 end_source = ts;
111 break; 111 break;
112 } 112 }
113 } 113 }
114 114
115 *source_tstamp = (end_source + start_source) / 2; 115 *source_tstamp = (end_source + start_source) / 2;
116 116
117 /* remove outliers by only using 75% of the samples */ 117 /* remove outliers by only using 75% of the samples */
118 used = counter * 3 / 4; 118 used = counter * 3 / 4;
119 if (!used) 119 if (!used)
120 used = counter; 120 used = counter;
121 if (used) { 121 if (used) {
122 /* calculate average */ 122 /* calculate average */
123 s64 off = 0; 123 s64 off = 0;
124 for (index = 0; index < used; index++) 124 for (index = 0; index < used; index++)
125 off += samples[index].offset; 125 off += samples[index].offset;
126 *offset = div_s64(off, used); 126 *offset = div_s64(off, used);
127 } 127 }
128 128
129 if (samples && samples != buffer) 129 if (samples && samples != buffer)
130 kfree(samples); 130 kfree(samples);
131 131
132 return used; 132 return used;
133 } 133 }
134 EXPORT_SYMBOL(timecompare_offset); 134 EXPORT_SYMBOL_GPL(timecompare_offset);
135 135
136 void __timecompare_update(struct timecompare *sync, 136 void __timecompare_update(struct timecompare *sync,
137 u64 source_tstamp) 137 u64 source_tstamp)
138 { 138 {
139 s64 offset; 139 s64 offset;
140 u64 average_time; 140 u64 average_time;
141 141
142 if (!timecompare_offset(sync, &offset, &average_time)) 142 if (!timecompare_offset(sync, &offset, &average_time))
143 return; 143 return;
144 144
145 if (!sync->last_update) { 145 if (!sync->last_update) {
146 sync->last_update = average_time; 146 sync->last_update = average_time;
147 sync->offset = offset; 147 sync->offset = offset;
148 sync->skew = 0; 148 sync->skew = 0;
149 } else { 149 } else {
150 s64 delta_nsec = average_time - sync->last_update; 150 s64 delta_nsec = average_time - sync->last_update;
151 151
152 /* avoid division by negative or small deltas */ 152 /* avoid division by negative or small deltas */
153 if (delta_nsec >= 10000) { 153 if (delta_nsec >= 10000) {
154 s64 delta_offset_nsec = offset - sync->offset; 154 s64 delta_offset_nsec = offset - sync->offset;
155 s64 skew; /* delta_offset_nsec * 155 s64 skew; /* delta_offset_nsec *
156 TIMECOMPARE_SKEW_RESOLUTION / 156 TIMECOMPARE_SKEW_RESOLUTION /
157 delta_nsec */ 157 delta_nsec */
158 u64 divisor; 158 u64 divisor;
159 159
160 /* div_s64() is limited to 32 bit divisor */ 160 /* div_s64() is limited to 32 bit divisor */
161 skew = delta_offset_nsec * TIMECOMPARE_SKEW_RESOLUTION; 161 skew = delta_offset_nsec * TIMECOMPARE_SKEW_RESOLUTION;
162 divisor = delta_nsec; 162 divisor = delta_nsec;
163 while (unlikely(divisor >= ((s64)1) << 32)) { 163 while (unlikely(divisor >= ((s64)1) << 32)) {
164 /* divide both by 2; beware, right shift 164 /* divide both by 2; beware, right shift
165 of negative value has undefined 165 of negative value has undefined
166 behavior and can only be used for 166 behavior and can only be used for
167 the positive divisor */ 167 the positive divisor */
168 skew = div_s64(skew, 2); 168 skew = div_s64(skew, 2);
169 divisor >>= 1; 169 divisor >>= 1;
170 } 170 }
171 skew = div_s64(skew, divisor); 171 skew = div_s64(skew, divisor);
172 172
173 /* 173 /*
174 * Calculate new overall skew as 4/16 the 174 * Calculate new overall skew as 4/16 the
175 * old value and 12/16 the new one. This is 175 * old value and 12/16 the new one. This is
176 * a rather arbitrary tradeoff between 176 * a rather arbitrary tradeoff between
177 * only using the latest measurement (0/16 and 177 * only using the latest measurement (0/16 and
178 * 16/16) and even more weight on past measurements. 178 * 16/16) and even more weight on past measurements.
179 */ 179 */
180 #define TIMECOMPARE_NEW_SKEW_PER_16 12 180 #define TIMECOMPARE_NEW_SKEW_PER_16 12
181 sync->skew = 181 sync->skew =
182 div_s64((16 - TIMECOMPARE_NEW_SKEW_PER_16) * 182 div_s64((16 - TIMECOMPARE_NEW_SKEW_PER_16) *
183 sync->skew + 183 sync->skew +
184 TIMECOMPARE_NEW_SKEW_PER_16 * skew, 184 TIMECOMPARE_NEW_SKEW_PER_16 * skew,
185 16); 185 16);
186 sync->last_update = average_time; 186 sync->last_update = average_time;
187 sync->offset = offset; 187 sync->offset = offset;
188 } 188 }
189 } 189 }
190 } 190 }
191 EXPORT_SYMBOL(__timecompare_update); 191 EXPORT_SYMBOL_GPL(__timecompare_update);
192 192