Blame view
kernel/time/clockevents.c
6.5 KB
d316c57ff [PATCH] clockeven... |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 |
/* * linux/kernel/time/clockevents.c * * This file contains functions which manage clock event devices. * * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner * * This code is licenced under the GPL version 2. For details see * kernel-base/COPYING. */ #include <linux/clockchips.h> #include <linux/hrtimer.h> #include <linux/init.h> #include <linux/module.h> #include <linux/notifier.h> #include <linux/smp.h> #include <linux/sysdev.h> |
eea08f32a timers: Logic to ... |
21 |
#include <linux/tick.h> |
d316c57ff [PATCH] clockeven... |
22 |
|
8e1a928a2 clockevents: Add ... |
23 |
#include "tick-internal.h" |
d316c57ff [PATCH] clockeven... |
24 25 26 27 28 29 30 31 |
/* The registered clock event devices */ static LIST_HEAD(clockevent_devices); static LIST_HEAD(clockevents_released); /* Notification for clock events */ static RAW_NOTIFIER_HEAD(clockevents_chain); /* Protection for the above */ |
b5f91da0a clockevents: Conv... |
32 |
static DEFINE_RAW_SPINLOCK(clockevents_lock); |
d316c57ff [PATCH] clockeven... |
33 34 35 36 37 38 39 40 |
/** * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds * @latch: value to convert * @evt: pointer to clock event device descriptor * * Math helper, returns latch value converted to nanoseconds (bound checked) */ |
97813f2fe nohz: Allow 32-bi... |
41 |
u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt) |
d316c57ff [PATCH] clockeven... |
42 |
{ |
97813f2fe nohz: Allow 32-bi... |
43 |
u64 clc = (u64) latch << evt->shift; |
d316c57ff [PATCH] clockeven... |
44 |
|
45fe4fe19 x86: make clockev... |
45 46 47 48 |
if (unlikely(!evt->mult)) { evt->mult = 1; WARN_ON(1); } |
d316c57ff [PATCH] clockeven... |
49 50 51 |
do_div(clc, evt->mult); if (clc < 1000) clc = 1000; |
97813f2fe nohz: Allow 32-bi... |
52 53 |
if (clc > KTIME_MAX) clc = KTIME_MAX; |
d316c57ff [PATCH] clockeven... |
54 |
|
97813f2fe nohz: Allow 32-bi... |
55 |
return clc; |
d316c57ff [PATCH] clockeven... |
56 |
} |
c81fc2c33 clockevent: expor... |
57 |
EXPORT_SYMBOL_GPL(clockevent_delta2ns); |
d316c57ff [PATCH] clockeven... |
58 59 60 61 62 63 64 65 66 67 68 69 70 71 |
/** * clockevents_set_mode - set the operating mode of a clock event device * @dev: device to modify * @mode: new mode * * Must be called with interrupts disabled ! */ void clockevents_set_mode(struct clock_event_device *dev, enum clock_event_mode mode) { if (dev->mode != mode) { dev->set_mode(mode, dev); dev->mode = mode; |
2d68259db clockevents: let ... |
72 73 74 75 76 77 78 79 80 81 82 |
/* * A nsec2cyc multiplicator of 0 is invalid and we'd crash * on it, so fix it up and emit a warning: */ if (mode == CLOCK_EVT_MODE_ONESHOT) { if (unlikely(!dev->mult)) { dev->mult = 1; WARN_ON(1); } } |
d316c57ff [PATCH] clockeven... |
83 84 85 86 |
} } /** |
2344abbcb clockevents: make... |
87 88 89 90 91 92 93 94 95 96 |
* clockevents_shutdown - shutdown the device and clear next_event * @dev: device to shutdown */ void clockevents_shutdown(struct clock_event_device *dev) { clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); dev->next_event.tv64 = KTIME_MAX; } /** |
d316c57ff [PATCH] clockeven... |
97 98 99 100 101 102 103 104 105 106 |
* clockevents_program_event - Reprogram the clock event device. * @expires: absolute expiry time (monotonic clock) * * Returns 0 on success, -ETIME when the event is in the past. */ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, ktime_t now) { unsigned long long clc; int64_t delta; |
167b1de3e clockevents: warn... |
107 108 109 110 |
if (unlikely(expires.tv64 < 0)) { WARN_ON_ONCE(1); return -ETIME; } |
d316c57ff [PATCH] clockeven... |
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 |
delta = ktime_to_ns(ktime_sub(expires, now)); if (delta <= 0) return -ETIME; dev->next_event = expires; if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN) return 0; if (delta > dev->max_delta_ns) delta = dev->max_delta_ns; if (delta < dev->min_delta_ns) delta = dev->min_delta_ns; clc = delta * dev->mult; clc >>= dev->shift; return dev->set_next_event((unsigned long) clc, dev); } /** * clockevents_register_notifier - register a clock events change listener */ int clockevents_register_notifier(struct notifier_block *nb) { |
f833bab87 clockevent: Preve... |
137 |
unsigned long flags; |
d316c57ff [PATCH] clockeven... |
138 |
int ret; |
b5f91da0a clockevents: Conv... |
139 |
raw_spin_lock_irqsave(&clockevents_lock, flags); |
d316c57ff [PATCH] clockeven... |
140 |
ret = raw_notifier_chain_register(&clockevents_chain, nb); |
b5f91da0a clockevents: Conv... |
141 |
raw_spin_unlock_irqrestore(&clockevents_lock, flags); |
d316c57ff [PATCH] clockeven... |
142 143 144 |
return ret; } |
d316c57ff [PATCH] clockeven... |
145 146 147 148 149 150 151 152 153 154 |
/* * Notify about a clock event change. Called with clockevents_lock * held. */ static void clockevents_do_notify(unsigned long reason, void *dev) { raw_notifier_call_chain(&clockevents_chain, reason, dev); } /* |
3eb056764 time: fix typo in... |
155 |
* Called after a notify add to make devices available which were |
d316c57ff [PATCH] clockeven... |
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 |
* released from the notifier call. */ static void clockevents_notify_released(void) { struct clock_event_device *dev; while (!list_empty(&clockevents_released)) { dev = list_entry(clockevents_released.next, struct clock_event_device, list); list_del(&dev->list); list_add(&dev->list, &clockevent_devices); clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev); } } /** * clockevents_register_device - register a clock event device * @dev: device to register */ void clockevents_register_device(struct clock_event_device *dev) { |
f833bab87 clockevent: Preve... |
177 |
unsigned long flags; |
d316c57ff [PATCH] clockeven... |
178 |
BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); |
320ab2b0b cpumask: convert ... |
179 |
BUG_ON(!dev->cpumask); |
b5f91da0a clockevents: Conv... |
180 |
raw_spin_lock_irqsave(&clockevents_lock, flags); |
d316c57ff [PATCH] clockeven... |
181 182 183 184 |
list_add(&dev->list, &clockevent_devices); clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev); clockevents_notify_released(); |
b5f91da0a clockevents: Conv... |
185 |
raw_spin_unlock_irqrestore(&clockevents_lock, flags); |
d316c57ff [PATCH] clockeven... |
186 |
} |
c81fc2c33 clockevent: expor... |
187 |
EXPORT_SYMBOL_GPL(clockevents_register_device); |
d316c57ff [PATCH] clockeven... |
188 189 190 191 |
/* * Noop handler when we shut down an event device */ |
7c1e76897 clockevents: prev... |
192 |
void clockevents_handle_noop(struct clock_event_device *dev) |
d316c57ff [PATCH] clockeven... |
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 |
{ } /** * clockevents_exchange_device - release and request clock devices * @old: device to release (can be NULL) * @new: device to request (can be NULL) * * Called from the notifier chain. clockevents_lock is held already */ void clockevents_exchange_device(struct clock_event_device *old, struct clock_event_device *new) { unsigned long flags; local_irq_save(flags); /* * Caller releases a clock event device. We queue it into the * released list and do a notify add later. */ if (old) { |
d316c57ff [PATCH] clockeven... |
214 215 216 217 218 219 220 |
clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED); list_del(&old->list); list_add(&old->list, &clockevents_released); } if (new) { BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED); |
2344abbcb clockevents: make... |
221 |
clockevents_shutdown(new); |
d316c57ff [PATCH] clockeven... |
222 223 224 |
} local_irq_restore(flags); } |
de68d9b17 clockevents: Allo... |
225 |
#ifdef CONFIG_GENERIC_CLOCKEVENTS |
d316c57ff [PATCH] clockeven... |
226 |
/** |
d316c57ff [PATCH] clockeven... |
227 228 229 230 |
* clockevents_notify - notification about relevant events */ void clockevents_notify(unsigned long reason, void *arg) { |
bb6eddf76 clockevents: Prev... |
231 |
struct clock_event_device *dev, *tmp; |
f833bab87 clockevent: Preve... |
232 |
unsigned long flags; |
bb6eddf76 clockevents: Prev... |
233 |
int cpu; |
0b858e6ff clockevent: simpl... |
234 |
|
b5f91da0a clockevents: Conv... |
235 |
raw_spin_lock_irqsave(&clockevents_lock, flags); |
d316c57ff [PATCH] clockeven... |
236 237 238 239 240 241 242 243 |
clockevents_do_notify(reason, arg); switch (reason) { case CLOCK_EVT_NOTIFY_CPU_DEAD: /* * Unregister the clock event devices which were * released from the users in the notify chain. */ |
bb6eddf76 clockevents: Prev... |
244 245 246 247 248 249 250 251 |
list_for_each_entry_safe(dev, tmp, &clockevents_released, list) list_del(&dev->list); /* * Now check whether the CPU has left unused per cpu devices */ cpu = *((int *)arg); list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) { if (cpumask_test_cpu(cpu, dev->cpumask) && |
ea9d8e3f4 clockevent: Don't... |
252 253 |
cpumask_weight(dev->cpumask) == 1 && !tick_is_broadcast_device(dev)) { |
bb6eddf76 clockevents: Prev... |
254 255 256 257 |
BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); list_del(&dev->list); } } |
d316c57ff [PATCH] clockeven... |
258 259 260 261 |
break; default: break; } |
b5f91da0a clockevents: Conv... |
262 |
raw_spin_unlock_irqrestore(&clockevents_lock, flags); |
d316c57ff [PATCH] clockeven... |
263 264 |
} EXPORT_SYMBOL_GPL(clockevents_notify); |
de68d9b17 clockevents: Allo... |
265 |
#endif |