Blame view

kernel/time/clockevents.c 11.3 KB
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
  /*
   * linux/kernel/time/clockevents.c
   *
   * This file contains functions which manage clock event devices.
   *
   * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
   * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
   * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
   *
   * This code is licenced under the GPL version 2. For details see
   * kernel-base/COPYING.
   */
  
  #include <linux/clockchips.h>
  #include <linux/hrtimer.h>
  #include <linux/init.h>
  #include <linux/module.h>
  #include <linux/notifier.h>
  #include <linux/smp.h>
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
20

8e1a928a2   H Hartley Sweeten   clockevents: Add ...
21
  #include "tick-internal.h"
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
22
23
24
25
26
27
28
29
  /* The registered clock event devices */
  static LIST_HEAD(clockevent_devices);
  static LIST_HEAD(clockevents_released);
  
  /* Notification for clock events */
  static RAW_NOTIFIER_HEAD(clockevents_chain);
  
  /* Protection for the above */
b5f91da0a   Thomas Gleixner   clockevents: Conv...
30
  static DEFINE_RAW_SPINLOCK(clockevents_lock);
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
31
32
33
34
35
36
37
38
  
  /**
   * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
   * @latch:	value to convert
   * @evt:	pointer to clock event device descriptor
   *
   * Math helper, returns latch value converted to nanoseconds (bound checked)
   */
97813f2fe   Jon Hunter   nohz: Allow 32-bi...
39
  u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
40
  {
97813f2fe   Jon Hunter   nohz: Allow 32-bi...
41
  	u64 clc = (u64) latch << evt->shift;
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
42

45fe4fe19   Ingo Molnar   x86: make clockev...
43
44
45
46
  	if (unlikely(!evt->mult)) {
  		evt->mult = 1;
  		WARN_ON(1);
  	}
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
47
48
49
  	do_div(clc, evt->mult);
  	if (clc < 1000)
  		clc = 1000;
97813f2fe   Jon Hunter   nohz: Allow 32-bi...
50
51
  	if (clc > KTIME_MAX)
  		clc = KTIME_MAX;
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
52

97813f2fe   Jon Hunter   nohz: Allow 32-bi...
53
  	return clc;
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
54
  }
c81fc2c33   Magnus Damm   clockevent: expor...
55
  EXPORT_SYMBOL_GPL(clockevent_delta2ns);
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
56
57
58
59
60
61
62
63
64
65
66
67
68
69
  
  /**
   * clockevents_set_mode - set the operating mode of a clock event device
   * @dev:	device to modify
   * @mode:	new mode
   *
   * Must be called with interrupts disabled !
   */
  void clockevents_set_mode(struct clock_event_device *dev,
  				 enum clock_event_mode mode)
  {
  	if (dev->mode != mode) {
  		dev->set_mode(mode, dev);
  		dev->mode = mode;
2d68259db   Magnus Damm   clockevents: let ...
70
71
72
73
74
75
76
77
78
79
80
  
  		/*
  		 * A nsec2cyc multiplicator of 0 is invalid and we'd crash
  		 * on it, so fix it up and emit a warning:
  		 */
  		if (mode == CLOCK_EVT_MODE_ONESHOT) {
  			if (unlikely(!dev->mult)) {
  				dev->mult = 1;
  				WARN_ON(1);
  			}
  		}
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
81
82
83
84
  	}
  }
  
  /**
2344abbcb   Thomas Gleixner   clockevents: make...
85
86
87
88
89
90
91
92
   * clockevents_shutdown - shutdown the device and clear next_event
   * @dev:	device to shutdown
   */
  void clockevents_shutdown(struct clock_event_device *dev)
  {
  	clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
  	dev->next_event.tv64 = KTIME_MAX;
  }
d1748302f   Martin Schwidefsky   clockevents: Make...
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
  #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
  
  /* Limit min_delta to a jiffie */
  #define MIN_DELTA_LIMIT		(NSEC_PER_SEC / HZ)
  
  /**
   * clockevents_increase_min_delta - raise minimum delta of a clock event device
   * @dev:       device to increase the minimum delta
   *
   * Returns 0 on success, -ETIME when the minimum delta reached the limit.
   */
  static int clockevents_increase_min_delta(struct clock_event_device *dev)
  {
  	/* Nothing to do if we already reached the limit */
  	if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
  		printk(KERN_WARNING "CE: Reprogramming failure. Giving up
  ");
  		dev->next_event.tv64 = KTIME_MAX;
  		return -ETIME;
  	}
  
  	if (dev->min_delta_ns < 5000)
  		dev->min_delta_ns = 5000;
  	else
  		dev->min_delta_ns += dev->min_delta_ns >> 1;
  
  	if (dev->min_delta_ns > MIN_DELTA_LIMIT)
  		dev->min_delta_ns = MIN_DELTA_LIMIT;
  
  	printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec
  ",
  	       dev->name ? dev->name : "?",
  	       (unsigned long long) dev->min_delta_ns);
  	return 0;
  }
  
  /**
   * clockevents_program_min_delta - Set clock event device to the minimum delay.
   * @dev:	device to program
   *
   * Returns 0 on success, -ETIME when the retry loop failed.
   */
  static int clockevents_program_min_delta(struct clock_event_device *dev)
  {
  	unsigned long long clc;
  	int64_t delta;
  	int i;
  
  	for (i = 0;;) {
  		delta = dev->min_delta_ns;
  		dev->next_event = ktime_add_ns(ktime_get(), delta);
  
  		if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
  			return 0;
  
  		dev->retries++;
  		clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
  		if (dev->set_next_event((unsigned long) clc, dev) == 0)
  			return 0;
  
  		if (++i > 2) {
  			/*
  			 * We tried 3 times to program the device with the
  			 * given min_delta_ns. Try to increase the minimum
  			 * delta, if that fails as well get out of here.
  			 */
  			if (clockevents_increase_min_delta(dev))
  				return -ETIME;
  			i = 0;
  		}
  	}
  }
  
  #else  /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
  
  /**
   * clockevents_program_min_delta - Set clock event device to the minimum delay.
   * @dev:	device to program
   *
   * Returns 0 on success, -ETIME when the retry loop failed.
   */
  static int clockevents_program_min_delta(struct clock_event_device *dev)
  {
  	unsigned long long clc;
  	int64_t delta;
  
  	delta = dev->min_delta_ns;
  	dev->next_event = ktime_add_ns(ktime_get(), delta);
  
  	if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
  		return 0;
  
  	dev->retries++;
  	clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
  	return dev->set_next_event((unsigned long) clc, dev);
  }
  
  #endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
2344abbcb   Thomas Gleixner   clockevents: make...
191
  /**
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
192
   * clockevents_program_event - Reprogram the clock event device.
d1748302f   Martin Schwidefsky   clockevents: Make...
193
   * @dev:	device to program
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
194
   * @expires:	absolute expiry time (monotonic clock)
d1748302f   Martin Schwidefsky   clockevents: Make...
195
   * @force:	program minimum delay if expires can not be set
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
196
197
198
199
   *
   * Returns 0 on success, -ETIME when the event is in the past.
   */
  int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
d1748302f   Martin Schwidefsky   clockevents: Make...
200
  			      bool force)
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
201
202
203
  {
  	unsigned long long clc;
  	int64_t delta;
d1748302f   Martin Schwidefsky   clockevents: Make...
204
  	int rc;
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
205

167b1de3e   Thomas Gleixner   clockevents: warn...
206
207
208
209
  	if (unlikely(expires.tv64 < 0)) {
  		WARN_ON_ONCE(1);
  		return -ETIME;
  	}
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
210
211
212
213
  	dev->next_event = expires;
  
  	if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
  		return 0;
65516f8a7   Martin Schwidefsky   clockevents: Add ...
214
215
216
  	/* Shortcut for clockevent devices that can deal with ktime. */
  	if (dev->features & CLOCK_EVT_FEAT_KTIME)
  		return dev->set_next_ktime(expires, dev);
d1748302f   Martin Schwidefsky   clockevents: Make...
217
218
219
  	delta = ktime_to_ns(ktime_sub(expires, ktime_get()));
  	if (delta <= 0)
  		return force ? clockevents_program_min_delta(dev) : -ETIME;
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
220

d1748302f   Martin Schwidefsky   clockevents: Make...
221
222
  	delta = min(delta, (int64_t) dev->max_delta_ns);
  	delta = max(delta, (int64_t) dev->min_delta_ns);
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
223

d1748302f   Martin Schwidefsky   clockevents: Make...
224
225
226
227
  	clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
  	rc = dev->set_next_event((unsigned long) clc, dev);
  
  	return (rc && force) ? clockevents_program_min_delta(dev) : rc;
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
228
229
230
231
232
233
234
  }
  
  /**
   * clockevents_register_notifier - register a clock events change listener
   */
  int clockevents_register_notifier(struct notifier_block *nb)
  {
f833bab87   Suresh Siddha   clockevent: Preve...
235
  	unsigned long flags;
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
236
  	int ret;
b5f91da0a   Thomas Gleixner   clockevents: Conv...
237
  	raw_spin_lock_irqsave(&clockevents_lock, flags);
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
238
  	ret = raw_notifier_chain_register(&clockevents_chain, nb);
b5f91da0a   Thomas Gleixner   clockevents: Conv...
239
  	raw_spin_unlock_irqrestore(&clockevents_lock, flags);
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
240
241
242
  
  	return ret;
  }
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
243
244
245
246
247
248
249
250
251
252
  /*
   * Notify about a clock event change. Called with clockevents_lock
   * held.
   */
  static void clockevents_do_notify(unsigned long reason, void *dev)
  {
  	raw_notifier_call_chain(&clockevents_chain, reason, dev);
  }
  
  /*
3eb056764   Li Zefan   time: fix typo in...
253
   * Called after a notify add to make devices available which were
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
   * released from the notifier call.
   */
  static void clockevents_notify_released(void)
  {
  	struct clock_event_device *dev;
  
  	while (!list_empty(&clockevents_released)) {
  		dev = list_entry(clockevents_released.next,
  				 struct clock_event_device, list);
  		list_del(&dev->list);
  		list_add(&dev->list, &clockevent_devices);
  		clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev);
  	}
  }
  
  /**
   * clockevents_register_device - register a clock event device
   * @dev:	device to register
   */
  void clockevents_register_device(struct clock_event_device *dev)
  {
f833bab87   Suresh Siddha   clockevent: Preve...
275
  	unsigned long flags;
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
276
  	BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
1b054b67d   Thomas Gleixner   clockevents: Hand...
277
278
279
280
  	if (!dev->cpumask) {
  		WARN_ON(num_possible_cpus() > 1);
  		dev->cpumask = cpumask_of(smp_processor_id());
  	}
320ab2b0b   Rusty Russell   cpumask: convert ...
281

b5f91da0a   Thomas Gleixner   clockevents: Conv...
282
  	raw_spin_lock_irqsave(&clockevents_lock, flags);
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
283
284
285
286
  
  	list_add(&dev->list, &clockevent_devices);
  	clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev);
  	clockevents_notify_released();
b5f91da0a   Thomas Gleixner   clockevents: Conv...
287
  	raw_spin_unlock_irqrestore(&clockevents_lock, flags);
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
288
  }
c81fc2c33   Magnus Damm   clockevent: expor...
289
  EXPORT_SYMBOL_GPL(clockevents_register_device);
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
290

57f0fcbe1   Thomas Gleixner   clockevents: Prov...
291
292
293
  static void clockevents_config(struct clock_event_device *dev,
  			       u32 freq)
  {
c0e299b1a   Thomas Gleixner   clockevents/sourc...
294
  	u64 sec;
57f0fcbe1   Thomas Gleixner   clockevents: Prov...
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
  
  	if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
  		return;
  
  	/*
  	 * Calculate the maximum number of seconds we can sleep. Limit
  	 * to 10 minutes for hardware which can program more than
  	 * 32bit ticks so we still get reasonable conversion values.
  	 */
  	sec = dev->max_delta_ticks;
  	do_div(sec, freq);
  	if (!sec)
  		sec = 1;
  	else if (sec > 600 && dev->max_delta_ticks > UINT_MAX)
  		sec = 600;
  
  	clockevents_calc_mult_shift(dev, freq, sec);
  	dev->min_delta_ns = clockevent_delta2ns(dev->min_delta_ticks, dev);
  	dev->max_delta_ns = clockevent_delta2ns(dev->max_delta_ticks, dev);
  }
  
  /**
   * clockevents_config_and_register - Configure and register a clock event device
   * @dev:	device to register
   * @freq:	The clock frequency
   * @min_delta:	The minimum clock ticks to program in oneshot mode
   * @max_delta:	The maximum clock ticks to program in oneshot mode
   *
   * min/max_delta can be 0 for devices which do not support oneshot mode.
   */
  void clockevents_config_and_register(struct clock_event_device *dev,
  				     u32 freq, unsigned long min_delta,
  				     unsigned long max_delta)
  {
  	dev->min_delta_ticks = min_delta;
  	dev->max_delta_ticks = max_delta;
  	clockevents_config(dev, freq);
  	clockevents_register_device(dev);
  }
80b816b73   Thomas Gleixner   clockevents: Prov...
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
  /**
   * clockevents_update_freq - Update frequency and reprogram a clock event device.
   * @dev:	device to modify
   * @freq:	new device frequency
   *
   * Reconfigure and reprogram a clock event device in oneshot
   * mode. Must be called on the cpu for which the device delivers per
   * cpu timer events with interrupts disabled!  Returns 0 on success,
   * -ETIME when the event is in the past.
   */
  int clockevents_update_freq(struct clock_event_device *dev, u32 freq)
  {
  	clockevents_config(dev, freq);
  
  	if (dev->mode != CLOCK_EVT_MODE_ONESHOT)
  		return 0;
d1748302f   Martin Schwidefsky   clockevents: Make...
350
  	return clockevents_program_event(dev, dev->next_event, false);
80b816b73   Thomas Gleixner   clockevents: Prov...
351
  }
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
352
353
354
  /*
   * Noop handler when we shut down an event device
   */
7c1e76897   Venkatesh Pallipadi   clockevents: prev...
355
  void clockevents_handle_noop(struct clock_event_device *dev)
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
  {
  }
  
  /**
   * clockevents_exchange_device - release and request clock devices
   * @old:	device to release (can be NULL)
   * @new:	device to request (can be NULL)
   *
   * Called from the notifier chain. clockevents_lock is held already
   */
  void clockevents_exchange_device(struct clock_event_device *old,
  				 struct clock_event_device *new)
  {
  	unsigned long flags;
  
  	local_irq_save(flags);
  	/*
  	 * Caller releases a clock event device. We queue it into the
  	 * released list and do a notify add later.
  	 */
  	if (old) {
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
377
378
379
380
381
382
383
  		clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
  		list_del(&old->list);
  		list_add(&old->list, &clockevents_released);
  	}
  
  	if (new) {
  		BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED);
2344abbcb   Thomas Gleixner   clockevents: make...
384
  		clockevents_shutdown(new);
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
385
386
387
  	}
  	local_irq_restore(flags);
  }
de68d9b17   Thomas Gleixner   clockevents: Allo...
388
  #ifdef CONFIG_GENERIC_CLOCKEVENTS
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
389
  /**
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
390
391
392
393
   * clockevents_notify - notification about relevant events
   */
  void clockevents_notify(unsigned long reason, void *arg)
  {
bb6eddf76   Thomas Gleixner   clockevents: Prev...
394
  	struct clock_event_device *dev, *tmp;
f833bab87   Suresh Siddha   clockevent: Preve...
395
  	unsigned long flags;
bb6eddf76   Thomas Gleixner   clockevents: Prev...
396
  	int cpu;
0b858e6ff   Li Zefan   clockevent: simpl...
397

b5f91da0a   Thomas Gleixner   clockevents: Conv...
398
  	raw_spin_lock_irqsave(&clockevents_lock, flags);
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
399
400
401
402
403
404
405
406
  	clockevents_do_notify(reason, arg);
  
  	switch (reason) {
  	case CLOCK_EVT_NOTIFY_CPU_DEAD:
  		/*
  		 * Unregister the clock event devices which were
  		 * released from the users in the notify chain.
  		 */
bb6eddf76   Thomas Gleixner   clockevents: Prev...
407
408
409
410
411
412
413
414
  		list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
  			list_del(&dev->list);
  		/*
  		 * Now check whether the CPU has left unused per cpu devices
  		 */
  		cpu = *((int *)arg);
  		list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
  			if (cpumask_test_cpu(cpu, dev->cpumask) &&
ea9d8e3f4   Xiaotian Feng   clockevent: Don't...
415
416
  			    cpumask_weight(dev->cpumask) == 1 &&
  			    !tick_is_broadcast_device(dev)) {
bb6eddf76   Thomas Gleixner   clockevents: Prev...
417
418
419
420
  				BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
  				list_del(&dev->list);
  			}
  		}
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
421
422
423
424
  		break;
  	default:
  		break;
  	}
b5f91da0a   Thomas Gleixner   clockevents: Conv...
425
  	raw_spin_unlock_irqrestore(&clockevents_lock, flags);
d316c57ff   Thomas Gleixner   [PATCH] clockeven...
426
427
  }
  EXPORT_SYMBOL_GPL(clockevents_notify);
de68d9b17   Thomas Gleixner   clockevents: Allo...
428
  #endif