Commit 63070a79ba482c274bad10ac8c4b587a3e011f2c
1 parent
5a7780e725
Exists in
master
and in
4 other branches
hrtimer: catch expired CLOCK_REALTIME timers early
A CLOCK_REALTIME timer, which has an absolute expiry time less than the clock realtime offset calls with a negative delta into the clock events code and triggers the WARN_ON() there. This is a false positive and needs to be prevented. Check the result of timer->expires - timer->base->offset right away and return -ETIME right away. Thanks to Frans Pop, who reported the problem and tested the fixes. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Frans Pop <elendil@planet.nl>
Showing 1 changed file with 11 additions and 0 deletions Inline Diff
kernel/hrtimer.c
1 | /* | 1 | /* |
2 | * linux/kernel/hrtimer.c | 2 | * linux/kernel/hrtimer.c |
3 | * | 3 | * |
4 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> | 4 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> |
5 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar | 5 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar |
6 | * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner | 6 | * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner |
7 | * | 7 | * |
8 | * High-resolution kernel timers | 8 | * High-resolution kernel timers |
9 | * | 9 | * |
10 | * In contrast to the low-resolution timeout API implemented in | 10 | * In contrast to the low-resolution timeout API implemented in |
11 | * kernel/timer.c, hrtimers provide finer resolution and accuracy | 11 | * kernel/timer.c, hrtimers provide finer resolution and accuracy |
12 | * depending on system configuration and capabilities. | 12 | * depending on system configuration and capabilities. |
13 | * | 13 | * |
14 | * These timers are currently used for: | 14 | * These timers are currently used for: |
15 | * - itimers | 15 | * - itimers |
16 | * - POSIX timers | 16 | * - POSIX timers |
17 | * - nanosleep | 17 | * - nanosleep |
18 | * - precise in-kernel timing | 18 | * - precise in-kernel timing |
19 | * | 19 | * |
20 | * Started by: Thomas Gleixner and Ingo Molnar | 20 | * Started by: Thomas Gleixner and Ingo Molnar |
21 | * | 21 | * |
22 | * Credits: | 22 | * Credits: |
23 | * based on kernel/timer.c | 23 | * based on kernel/timer.c |
24 | * | 24 | * |
25 | * Help, testing, suggestions, bugfixes, improvements were | 25 | * Help, testing, suggestions, bugfixes, improvements were |
26 | * provided by: | 26 | * provided by: |
27 | * | 27 | * |
28 | * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel | 28 | * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel |
29 | * et. al. | 29 | * et. al. |
30 | * | 30 | * |
31 | * For licencing details see kernel-base/COPYING | 31 | * For licencing details see kernel-base/COPYING |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include <linux/cpu.h> | 34 | #include <linux/cpu.h> |
35 | #include <linux/irq.h> | 35 | #include <linux/irq.h> |
36 | #include <linux/module.h> | 36 | #include <linux/module.h> |
37 | #include <linux/percpu.h> | 37 | #include <linux/percpu.h> |
38 | #include <linux/hrtimer.h> | 38 | #include <linux/hrtimer.h> |
39 | #include <linux/notifier.h> | 39 | #include <linux/notifier.h> |
40 | #include <linux/syscalls.h> | 40 | #include <linux/syscalls.h> |
41 | #include <linux/kallsyms.h> | 41 | #include <linux/kallsyms.h> |
42 | #include <linux/interrupt.h> | 42 | #include <linux/interrupt.h> |
43 | #include <linux/tick.h> | 43 | #include <linux/tick.h> |
44 | #include <linux/seq_file.h> | 44 | #include <linux/seq_file.h> |
45 | #include <linux/err.h> | 45 | #include <linux/err.h> |
46 | 46 | ||
47 | #include <asm/uaccess.h> | 47 | #include <asm/uaccess.h> |
48 | 48 | ||
49 | /** | 49 | /** |
50 | * ktime_get - get the monotonic time in ktime_t format | 50 | * ktime_get - get the monotonic time in ktime_t format |
51 | * | 51 | * |
52 | * returns the time in ktime_t format | 52 | * returns the time in ktime_t format |
53 | */ | 53 | */ |
54 | ktime_t ktime_get(void) | 54 | ktime_t ktime_get(void) |
55 | { | 55 | { |
56 | struct timespec now; | 56 | struct timespec now; |
57 | 57 | ||
58 | ktime_get_ts(&now); | 58 | ktime_get_ts(&now); |
59 | 59 | ||
60 | return timespec_to_ktime(now); | 60 | return timespec_to_ktime(now); |
61 | } | 61 | } |
62 | EXPORT_SYMBOL_GPL(ktime_get); | 62 | EXPORT_SYMBOL_GPL(ktime_get); |
63 | 63 | ||
64 | /** | 64 | /** |
65 | * ktime_get_real - get the real (wall-) time in ktime_t format | 65 | * ktime_get_real - get the real (wall-) time in ktime_t format |
66 | * | 66 | * |
67 | * returns the time in ktime_t format | 67 | * returns the time in ktime_t format |
68 | */ | 68 | */ |
69 | ktime_t ktime_get_real(void) | 69 | ktime_t ktime_get_real(void) |
70 | { | 70 | { |
71 | struct timespec now; | 71 | struct timespec now; |
72 | 72 | ||
73 | getnstimeofday(&now); | 73 | getnstimeofday(&now); |
74 | 74 | ||
75 | return timespec_to_ktime(now); | 75 | return timespec_to_ktime(now); |
76 | } | 76 | } |
77 | 77 | ||
78 | EXPORT_SYMBOL_GPL(ktime_get_real); | 78 | EXPORT_SYMBOL_GPL(ktime_get_real); |
79 | 79 | ||
80 | /* | 80 | /* |
81 | * The timer bases: | 81 | * The timer bases: |
82 | * | 82 | * |
83 | * Note: If we want to add new timer bases, we have to skip the two | 83 | * Note: If we want to add new timer bases, we have to skip the two |
84 | * clock ids captured by the cpu-timers. We do this by holding empty | 84 | * clock ids captured by the cpu-timers. We do this by holding empty |
85 | * entries rather than doing math adjustment of the clock ids. | 85 | * entries rather than doing math adjustment of the clock ids. |
86 | * This ensures that we capture erroneous accesses to these clock ids | 86 | * This ensures that we capture erroneous accesses to these clock ids |
87 | * rather than moving them into the range of valid clock id's. | 87 | * rather than moving them into the range of valid clock id's. |
88 | */ | 88 | */ |
89 | DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = | 89 | DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = |
90 | { | 90 | { |
91 | 91 | ||
92 | .clock_base = | 92 | .clock_base = |
93 | { | 93 | { |
94 | { | 94 | { |
95 | .index = CLOCK_REALTIME, | 95 | .index = CLOCK_REALTIME, |
96 | .get_time = &ktime_get_real, | 96 | .get_time = &ktime_get_real, |
97 | .resolution = KTIME_LOW_RES, | 97 | .resolution = KTIME_LOW_RES, |
98 | }, | 98 | }, |
99 | { | 99 | { |
100 | .index = CLOCK_MONOTONIC, | 100 | .index = CLOCK_MONOTONIC, |
101 | .get_time = &ktime_get, | 101 | .get_time = &ktime_get, |
102 | .resolution = KTIME_LOW_RES, | 102 | .resolution = KTIME_LOW_RES, |
103 | }, | 103 | }, |
104 | } | 104 | } |
105 | }; | 105 | }; |
106 | 106 | ||
107 | /** | 107 | /** |
108 | * ktime_get_ts - get the monotonic clock in timespec format | 108 | * ktime_get_ts - get the monotonic clock in timespec format |
109 | * @ts: pointer to timespec variable | 109 | * @ts: pointer to timespec variable |
110 | * | 110 | * |
111 | * The function calculates the monotonic clock from the realtime | 111 | * The function calculates the monotonic clock from the realtime |
112 | * clock and the wall_to_monotonic offset and stores the result | 112 | * clock and the wall_to_monotonic offset and stores the result |
113 | * in normalized timespec format in the variable pointed to by @ts. | 113 | * in normalized timespec format in the variable pointed to by @ts. |
114 | */ | 114 | */ |
115 | void ktime_get_ts(struct timespec *ts) | 115 | void ktime_get_ts(struct timespec *ts) |
116 | { | 116 | { |
117 | struct timespec tomono; | 117 | struct timespec tomono; |
118 | unsigned long seq; | 118 | unsigned long seq; |
119 | 119 | ||
120 | do { | 120 | do { |
121 | seq = read_seqbegin(&xtime_lock); | 121 | seq = read_seqbegin(&xtime_lock); |
122 | getnstimeofday(ts); | 122 | getnstimeofday(ts); |
123 | tomono = wall_to_monotonic; | 123 | tomono = wall_to_monotonic; |
124 | 124 | ||
125 | } while (read_seqretry(&xtime_lock, seq)); | 125 | } while (read_seqretry(&xtime_lock, seq)); |
126 | 126 | ||
127 | set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec, | 127 | set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec, |
128 | ts->tv_nsec + tomono.tv_nsec); | 128 | ts->tv_nsec + tomono.tv_nsec); |
129 | } | 129 | } |
130 | EXPORT_SYMBOL_GPL(ktime_get_ts); | 130 | EXPORT_SYMBOL_GPL(ktime_get_ts); |
131 | 131 | ||
132 | /* | 132 | /* |
133 | * Get the coarse grained time at the softirq based on xtime and | 133 | * Get the coarse grained time at the softirq based on xtime and |
134 | * wall_to_monotonic. | 134 | * wall_to_monotonic. |
135 | */ | 135 | */ |
136 | static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base) | 136 | static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base) |
137 | { | 137 | { |
138 | ktime_t xtim, tomono; | 138 | ktime_t xtim, tomono; |
139 | struct timespec xts, tom; | 139 | struct timespec xts, tom; |
140 | unsigned long seq; | 140 | unsigned long seq; |
141 | 141 | ||
142 | do { | 142 | do { |
143 | seq = read_seqbegin(&xtime_lock); | 143 | seq = read_seqbegin(&xtime_lock); |
144 | xts = current_kernel_time(); | 144 | xts = current_kernel_time(); |
145 | tom = wall_to_monotonic; | 145 | tom = wall_to_monotonic; |
146 | } while (read_seqretry(&xtime_lock, seq)); | 146 | } while (read_seqretry(&xtime_lock, seq)); |
147 | 147 | ||
148 | xtim = timespec_to_ktime(xts); | 148 | xtim = timespec_to_ktime(xts); |
149 | tomono = timespec_to_ktime(tom); | 149 | tomono = timespec_to_ktime(tom); |
150 | base->clock_base[CLOCK_REALTIME].softirq_time = xtim; | 150 | base->clock_base[CLOCK_REALTIME].softirq_time = xtim; |
151 | base->clock_base[CLOCK_MONOTONIC].softirq_time = | 151 | base->clock_base[CLOCK_MONOTONIC].softirq_time = |
152 | ktime_add(xtim, tomono); | 152 | ktime_add(xtim, tomono); |
153 | } | 153 | } |
154 | 154 | ||
155 | /* | 155 | /* |
156 | * Helper function to check, whether the timer is running the callback | 156 | * Helper function to check, whether the timer is running the callback |
157 | * function | 157 | * function |
158 | */ | 158 | */ |
159 | static inline int hrtimer_callback_running(struct hrtimer *timer) | 159 | static inline int hrtimer_callback_running(struct hrtimer *timer) |
160 | { | 160 | { |
161 | return timer->state & HRTIMER_STATE_CALLBACK; | 161 | return timer->state & HRTIMER_STATE_CALLBACK; |
162 | } | 162 | } |
163 | 163 | ||
164 | /* | 164 | /* |
165 | * Functions and macros which are different for UP/SMP systems are kept in a | 165 | * Functions and macros which are different for UP/SMP systems are kept in a |
166 | * single place | 166 | * single place |
167 | */ | 167 | */ |
168 | #ifdef CONFIG_SMP | 168 | #ifdef CONFIG_SMP |
169 | 169 | ||
170 | /* | 170 | /* |
171 | * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock | 171 | * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock |
172 | * means that all timers which are tied to this base via timer->base are | 172 | * means that all timers which are tied to this base via timer->base are |
173 | * locked, and the base itself is locked too. | 173 | * locked, and the base itself is locked too. |
174 | * | 174 | * |
175 | * So __run_timers/migrate_timers can safely modify all timers which could | 175 | * So __run_timers/migrate_timers can safely modify all timers which could |
176 | * be found on the lists/queues. | 176 | * be found on the lists/queues. |
177 | * | 177 | * |
178 | * When the timer's base is locked, and the timer removed from list, it is | 178 | * When the timer's base is locked, and the timer removed from list, it is |
179 | * possible to set timer->base = NULL and drop the lock: the timer remains | 179 | * possible to set timer->base = NULL and drop the lock: the timer remains |
180 | * locked. | 180 | * locked. |
181 | */ | 181 | */ |
182 | static | 182 | static |
183 | struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, | 183 | struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, |
184 | unsigned long *flags) | 184 | unsigned long *flags) |
185 | { | 185 | { |
186 | struct hrtimer_clock_base *base; | 186 | struct hrtimer_clock_base *base; |
187 | 187 | ||
188 | for (;;) { | 188 | for (;;) { |
189 | base = timer->base; | 189 | base = timer->base; |
190 | if (likely(base != NULL)) { | 190 | if (likely(base != NULL)) { |
191 | spin_lock_irqsave(&base->cpu_base->lock, *flags); | 191 | spin_lock_irqsave(&base->cpu_base->lock, *flags); |
192 | if (likely(base == timer->base)) | 192 | if (likely(base == timer->base)) |
193 | return base; | 193 | return base; |
194 | /* The timer has migrated to another CPU: */ | 194 | /* The timer has migrated to another CPU: */ |
195 | spin_unlock_irqrestore(&base->cpu_base->lock, *flags); | 195 | spin_unlock_irqrestore(&base->cpu_base->lock, *flags); |
196 | } | 196 | } |
197 | cpu_relax(); | 197 | cpu_relax(); |
198 | } | 198 | } |
199 | } | 199 | } |
200 | 200 | ||
201 | /* | 201 | /* |
202 | * Switch the timer base to the current CPU when possible. | 202 | * Switch the timer base to the current CPU when possible. |
203 | */ | 203 | */ |
204 | static inline struct hrtimer_clock_base * | 204 | static inline struct hrtimer_clock_base * |
205 | switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base) | 205 | switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base) |
206 | { | 206 | { |
207 | struct hrtimer_clock_base *new_base; | 207 | struct hrtimer_clock_base *new_base; |
208 | struct hrtimer_cpu_base *new_cpu_base; | 208 | struct hrtimer_cpu_base *new_cpu_base; |
209 | 209 | ||
210 | new_cpu_base = &__get_cpu_var(hrtimer_bases); | 210 | new_cpu_base = &__get_cpu_var(hrtimer_bases); |
211 | new_base = &new_cpu_base->clock_base[base->index]; | 211 | new_base = &new_cpu_base->clock_base[base->index]; |
212 | 212 | ||
213 | if (base != new_base) { | 213 | if (base != new_base) { |
214 | /* | 214 | /* |
215 | * We are trying to schedule the timer on the local CPU. | 215 | * We are trying to schedule the timer on the local CPU. |
216 | * However we can't change timer's base while it is running, | 216 | * However we can't change timer's base while it is running, |
217 | * so we keep it on the same CPU. No hassle vs. reprogramming | 217 | * so we keep it on the same CPU. No hassle vs. reprogramming |
218 | * the event source in the high resolution case. The softirq | 218 | * the event source in the high resolution case. The softirq |
219 | * code will take care of this when the timer function has | 219 | * code will take care of this when the timer function has |
220 | * completed. There is no conflict as we hold the lock until | 220 | * completed. There is no conflict as we hold the lock until |
221 | * the timer is enqueued. | 221 | * the timer is enqueued. |
222 | */ | 222 | */ |
223 | if (unlikely(hrtimer_callback_running(timer))) | 223 | if (unlikely(hrtimer_callback_running(timer))) |
224 | return base; | 224 | return base; |
225 | 225 | ||
226 | /* See the comment in lock_timer_base() */ | 226 | /* See the comment in lock_timer_base() */ |
227 | timer->base = NULL; | 227 | timer->base = NULL; |
228 | spin_unlock(&base->cpu_base->lock); | 228 | spin_unlock(&base->cpu_base->lock); |
229 | spin_lock(&new_base->cpu_base->lock); | 229 | spin_lock(&new_base->cpu_base->lock); |
230 | timer->base = new_base; | 230 | timer->base = new_base; |
231 | } | 231 | } |
232 | return new_base; | 232 | return new_base; |
233 | } | 233 | } |
234 | 234 | ||
235 | #else /* CONFIG_SMP */ | 235 | #else /* CONFIG_SMP */ |
236 | 236 | ||
237 | static inline struct hrtimer_clock_base * | 237 | static inline struct hrtimer_clock_base * |
238 | lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | 238 | lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) |
239 | { | 239 | { |
240 | struct hrtimer_clock_base *base = timer->base; | 240 | struct hrtimer_clock_base *base = timer->base; |
241 | 241 | ||
242 | spin_lock_irqsave(&base->cpu_base->lock, *flags); | 242 | spin_lock_irqsave(&base->cpu_base->lock, *flags); |
243 | 243 | ||
244 | return base; | 244 | return base; |
245 | } | 245 | } |
246 | 246 | ||
247 | # define switch_hrtimer_base(t, b) (b) | 247 | # define switch_hrtimer_base(t, b) (b) |
248 | 248 | ||
249 | #endif /* !CONFIG_SMP */ | 249 | #endif /* !CONFIG_SMP */ |
250 | 250 | ||
251 | /* | 251 | /* |
252 | * Functions for the union type storage format of ktime_t which are | 252 | * Functions for the union type storage format of ktime_t which are |
253 | * too large for inlining: | 253 | * too large for inlining: |
254 | */ | 254 | */ |
255 | #if BITS_PER_LONG < 64 | 255 | #if BITS_PER_LONG < 64 |
256 | # ifndef CONFIG_KTIME_SCALAR | 256 | # ifndef CONFIG_KTIME_SCALAR |
257 | /** | 257 | /** |
258 | * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable | 258 | * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable |
259 | * @kt: addend | 259 | * @kt: addend |
260 | * @nsec: the scalar nsec value to add | 260 | * @nsec: the scalar nsec value to add |
261 | * | 261 | * |
262 | * Returns the sum of kt and nsec in ktime_t format | 262 | * Returns the sum of kt and nsec in ktime_t format |
263 | */ | 263 | */ |
264 | ktime_t ktime_add_ns(const ktime_t kt, u64 nsec) | 264 | ktime_t ktime_add_ns(const ktime_t kt, u64 nsec) |
265 | { | 265 | { |
266 | ktime_t tmp; | 266 | ktime_t tmp; |
267 | 267 | ||
268 | if (likely(nsec < NSEC_PER_SEC)) { | 268 | if (likely(nsec < NSEC_PER_SEC)) { |
269 | tmp.tv64 = nsec; | 269 | tmp.tv64 = nsec; |
270 | } else { | 270 | } else { |
271 | unsigned long rem = do_div(nsec, NSEC_PER_SEC); | 271 | unsigned long rem = do_div(nsec, NSEC_PER_SEC); |
272 | 272 | ||
273 | tmp = ktime_set((long)nsec, rem); | 273 | tmp = ktime_set((long)nsec, rem); |
274 | } | 274 | } |
275 | 275 | ||
276 | return ktime_add(kt, tmp); | 276 | return ktime_add(kt, tmp); |
277 | } | 277 | } |
278 | 278 | ||
279 | EXPORT_SYMBOL_GPL(ktime_add_ns); | 279 | EXPORT_SYMBOL_GPL(ktime_add_ns); |
280 | 280 | ||
281 | /** | 281 | /** |
282 | * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable | 282 | * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable |
283 | * @kt: minuend | 283 | * @kt: minuend |
284 | * @nsec: the scalar nsec value to subtract | 284 | * @nsec: the scalar nsec value to subtract |
285 | * | 285 | * |
286 | * Returns the subtraction of @nsec from @kt in ktime_t format | 286 | * Returns the subtraction of @nsec from @kt in ktime_t format |
287 | */ | 287 | */ |
288 | ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec) | 288 | ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec) |
289 | { | 289 | { |
290 | ktime_t tmp; | 290 | ktime_t tmp; |
291 | 291 | ||
292 | if (likely(nsec < NSEC_PER_SEC)) { | 292 | if (likely(nsec < NSEC_PER_SEC)) { |
293 | tmp.tv64 = nsec; | 293 | tmp.tv64 = nsec; |
294 | } else { | 294 | } else { |
295 | unsigned long rem = do_div(nsec, NSEC_PER_SEC); | 295 | unsigned long rem = do_div(nsec, NSEC_PER_SEC); |
296 | 296 | ||
297 | tmp = ktime_set((long)nsec, rem); | 297 | tmp = ktime_set((long)nsec, rem); |
298 | } | 298 | } |
299 | 299 | ||
300 | return ktime_sub(kt, tmp); | 300 | return ktime_sub(kt, tmp); |
301 | } | 301 | } |
302 | 302 | ||
303 | EXPORT_SYMBOL_GPL(ktime_sub_ns); | 303 | EXPORT_SYMBOL_GPL(ktime_sub_ns); |
304 | # endif /* !CONFIG_KTIME_SCALAR */ | 304 | # endif /* !CONFIG_KTIME_SCALAR */ |
305 | 305 | ||
306 | /* | 306 | /* |
307 | * Divide a ktime value by a nanosecond value | 307 | * Divide a ktime value by a nanosecond value |
308 | */ | 308 | */ |
309 | u64 ktime_divns(const ktime_t kt, s64 div) | 309 | u64 ktime_divns(const ktime_t kt, s64 div) |
310 | { | 310 | { |
311 | u64 dclc, inc, dns; | 311 | u64 dclc, inc, dns; |
312 | int sft = 0; | 312 | int sft = 0; |
313 | 313 | ||
314 | dclc = dns = ktime_to_ns(kt); | 314 | dclc = dns = ktime_to_ns(kt); |
315 | inc = div; | 315 | inc = div; |
316 | /* Make sure the divisor is less than 2^32: */ | 316 | /* Make sure the divisor is less than 2^32: */ |
317 | while (div >> 32) { | 317 | while (div >> 32) { |
318 | sft++; | 318 | sft++; |
319 | div >>= 1; | 319 | div >>= 1; |
320 | } | 320 | } |
321 | dclc >>= sft; | 321 | dclc >>= sft; |
322 | do_div(dclc, (unsigned long) div); | 322 | do_div(dclc, (unsigned long) div); |
323 | 323 | ||
324 | return dclc; | 324 | return dclc; |
325 | } | 325 | } |
326 | #endif /* BITS_PER_LONG >= 64 */ | 326 | #endif /* BITS_PER_LONG >= 64 */ |
327 | 327 | ||
328 | /* | 328 | /* |
329 | * Add two ktime values and do a safety check for overflow: | 329 | * Add two ktime values and do a safety check for overflow: |
330 | */ | 330 | */ |
331 | ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs) | 331 | ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs) |
332 | { | 332 | { |
333 | ktime_t res = ktime_add(lhs, rhs); | 333 | ktime_t res = ktime_add(lhs, rhs); |
334 | 334 | ||
335 | /* | 335 | /* |
336 | * We use KTIME_SEC_MAX here, the maximum timeout which we can | 336 | * We use KTIME_SEC_MAX here, the maximum timeout which we can |
337 | * return to user space in a timespec: | 337 | * return to user space in a timespec: |
338 | */ | 338 | */ |
339 | if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64) | 339 | if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64) |
340 | res = ktime_set(KTIME_SEC_MAX, 0); | 340 | res = ktime_set(KTIME_SEC_MAX, 0); |
341 | 341 | ||
342 | return res; | 342 | return res; |
343 | } | 343 | } |
344 | 344 | ||
345 | /* | 345 | /* |
346 | * Check, whether the timer is on the callback pending list | 346 | * Check, whether the timer is on the callback pending list |
347 | */ | 347 | */ |
348 | static inline int hrtimer_cb_pending(const struct hrtimer *timer) | 348 | static inline int hrtimer_cb_pending(const struct hrtimer *timer) |
349 | { | 349 | { |
350 | return timer->state & HRTIMER_STATE_PENDING; | 350 | return timer->state & HRTIMER_STATE_PENDING; |
351 | } | 351 | } |
352 | 352 | ||
353 | /* | 353 | /* |
354 | * Remove a timer from the callback pending list | 354 | * Remove a timer from the callback pending list |
355 | */ | 355 | */ |
356 | static inline void hrtimer_remove_cb_pending(struct hrtimer *timer) | 356 | static inline void hrtimer_remove_cb_pending(struct hrtimer *timer) |
357 | { | 357 | { |
358 | list_del_init(&timer->cb_entry); | 358 | list_del_init(&timer->cb_entry); |
359 | } | 359 | } |
360 | 360 | ||
361 | /* High resolution timer related functions */ | 361 | /* High resolution timer related functions */ |
362 | #ifdef CONFIG_HIGH_RES_TIMERS | 362 | #ifdef CONFIG_HIGH_RES_TIMERS |
363 | 363 | ||
364 | /* | 364 | /* |
365 | * High resolution timer enabled ? | 365 | * High resolution timer enabled ? |
366 | */ | 366 | */ |
367 | static int hrtimer_hres_enabled __read_mostly = 1; | 367 | static int hrtimer_hres_enabled __read_mostly = 1; |
368 | 368 | ||
369 | /* | 369 | /* |
370 | * Enable / Disable high resolution mode | 370 | * Enable / Disable high resolution mode |
371 | */ | 371 | */ |
372 | static int __init setup_hrtimer_hres(char *str) | 372 | static int __init setup_hrtimer_hres(char *str) |
373 | { | 373 | { |
374 | if (!strcmp(str, "off")) | 374 | if (!strcmp(str, "off")) |
375 | hrtimer_hres_enabled = 0; | 375 | hrtimer_hres_enabled = 0; |
376 | else if (!strcmp(str, "on")) | 376 | else if (!strcmp(str, "on")) |
377 | hrtimer_hres_enabled = 1; | 377 | hrtimer_hres_enabled = 1; |
378 | else | 378 | else |
379 | return 0; | 379 | return 0; |
380 | return 1; | 380 | return 1; |
381 | } | 381 | } |
382 | 382 | ||
383 | __setup("highres=", setup_hrtimer_hres); | 383 | __setup("highres=", setup_hrtimer_hres); |
384 | 384 | ||
385 | /* | 385 | /* |
386 | * hrtimer_high_res_enabled - query, if the highres mode is enabled | 386 | * hrtimer_high_res_enabled - query, if the highres mode is enabled |
387 | */ | 387 | */ |
388 | static inline int hrtimer_is_hres_enabled(void) | 388 | static inline int hrtimer_is_hres_enabled(void) |
389 | { | 389 | { |
390 | return hrtimer_hres_enabled; | 390 | return hrtimer_hres_enabled; |
391 | } | 391 | } |
392 | 392 | ||
393 | /* | 393 | /* |
394 | * Is the high resolution mode active ? | 394 | * Is the high resolution mode active ? |
395 | */ | 395 | */ |
396 | static inline int hrtimer_hres_active(void) | 396 | static inline int hrtimer_hres_active(void) |
397 | { | 397 | { |
398 | return __get_cpu_var(hrtimer_bases).hres_active; | 398 | return __get_cpu_var(hrtimer_bases).hres_active; |
399 | } | 399 | } |
400 | 400 | ||
401 | /* | 401 | /* |
402 | * Reprogram the event source with checking both queues for the | 402 | * Reprogram the event source with checking both queues for the |
403 | * next event | 403 | * next event |
404 | * Called with interrupts disabled and base->lock held | 404 | * Called with interrupts disabled and base->lock held |
405 | */ | 405 | */ |
406 | static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base) | 406 | static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base) |
407 | { | 407 | { |
408 | int i; | 408 | int i; |
409 | struct hrtimer_clock_base *base = cpu_base->clock_base; | 409 | struct hrtimer_clock_base *base = cpu_base->clock_base; |
410 | ktime_t expires; | 410 | ktime_t expires; |
411 | 411 | ||
412 | cpu_base->expires_next.tv64 = KTIME_MAX; | 412 | cpu_base->expires_next.tv64 = KTIME_MAX; |
413 | 413 | ||
414 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { | 414 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { |
415 | struct hrtimer *timer; | 415 | struct hrtimer *timer; |
416 | 416 | ||
417 | if (!base->first) | 417 | if (!base->first) |
418 | continue; | 418 | continue; |
419 | timer = rb_entry(base->first, struct hrtimer, node); | 419 | timer = rb_entry(base->first, struct hrtimer, node); |
420 | expires = ktime_sub(timer->expires, base->offset); | 420 | expires = ktime_sub(timer->expires, base->offset); |
421 | if (expires.tv64 < cpu_base->expires_next.tv64) | 421 | if (expires.tv64 < cpu_base->expires_next.tv64) |
422 | cpu_base->expires_next = expires; | 422 | cpu_base->expires_next = expires; |
423 | } | 423 | } |
424 | 424 | ||
425 | if (cpu_base->expires_next.tv64 != KTIME_MAX) | 425 | if (cpu_base->expires_next.tv64 != KTIME_MAX) |
426 | tick_program_event(cpu_base->expires_next, 1); | 426 | tick_program_event(cpu_base->expires_next, 1); |
427 | } | 427 | } |
428 | 428 | ||
429 | /* | 429 | /* |
430 | * Shared reprogramming for clock_realtime and clock_monotonic | 430 | * Shared reprogramming for clock_realtime and clock_monotonic |
431 | * | 431 | * |
432 | * When a timer is enqueued and expires earlier than the already enqueued | 432 | * When a timer is enqueued and expires earlier than the already enqueued |
433 | * timers, we have to check, whether it expires earlier than the timer for | 433 | * timers, we have to check, whether it expires earlier than the timer for |
434 | * which the clock event device was armed. | 434 | * which the clock event device was armed. |
435 | * | 435 | * |
436 | * Called with interrupts disabled and base->cpu_base.lock held | 436 | * Called with interrupts disabled and base->cpu_base.lock held |
437 | */ | 437 | */ |
438 | static int hrtimer_reprogram(struct hrtimer *timer, | 438 | static int hrtimer_reprogram(struct hrtimer *timer, |
439 | struct hrtimer_clock_base *base) | 439 | struct hrtimer_clock_base *base) |
440 | { | 440 | { |
441 | ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next; | 441 | ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next; |
442 | ktime_t expires = ktime_sub(timer->expires, base->offset); | 442 | ktime_t expires = ktime_sub(timer->expires, base->offset); |
443 | int res; | 443 | int res; |
444 | 444 | ||
445 | WARN_ON_ONCE(timer->expires.tv64 < 0); | ||
446 | |||
445 | /* | 447 | /* |
446 | * When the callback is running, we do not reprogram the clock event | 448 | * When the callback is running, we do not reprogram the clock event |
447 | * device. The timer callback is either running on a different CPU or | 449 | * device. The timer callback is either running on a different CPU or |
448 | * the callback is executed in the hrtimer_interrupt context. The | 450 | * the callback is executed in the hrtimer_interrupt context. The |
449 | * reprogramming is handled either by the softirq, which called the | 451 | * reprogramming is handled either by the softirq, which called the |
450 | * callback or at the end of the hrtimer_interrupt. | 452 | * callback or at the end of the hrtimer_interrupt. |
451 | */ | 453 | */ |
452 | if (hrtimer_callback_running(timer)) | 454 | if (hrtimer_callback_running(timer)) |
453 | return 0; | 455 | return 0; |
456 | |||
457 | /* | ||
458 | * CLOCK_REALTIME timer might be requested with an absolute | ||
459 | * expiry time which is less than base->offset. Nothing wrong | ||
460 | * about that, just avoid to call into the tick code, which | ||
461 | * has now objections against negative expiry values. | ||
462 | */ | ||
463 | if (expires.tv64 < 0) | ||
464 | return -ETIME; | ||
454 | 465 | ||
455 | if (expires.tv64 >= expires_next->tv64) | 466 | if (expires.tv64 >= expires_next->tv64) |
456 | return 0; | 467 | return 0; |
457 | 468 | ||
458 | /* | 469 | /* |
459 | * Clockevents returns -ETIME, when the event was in the past. | 470 | * Clockevents returns -ETIME, when the event was in the past. |
460 | */ | 471 | */ |
461 | res = tick_program_event(expires, 0); | 472 | res = tick_program_event(expires, 0); |
462 | if (!IS_ERR_VALUE(res)) | 473 | if (!IS_ERR_VALUE(res)) |
463 | *expires_next = expires; | 474 | *expires_next = expires; |
464 | return res; | 475 | return res; |
465 | } | 476 | } |
466 | 477 | ||
467 | 478 | ||
468 | /* | 479 | /* |
469 | * Retrigger next event is called after clock was set | 480 | * Retrigger next event is called after clock was set |
470 | * | 481 | * |
471 | * Called with interrupts disabled via on_each_cpu() | 482 | * Called with interrupts disabled via on_each_cpu() |
472 | */ | 483 | */ |
473 | static void retrigger_next_event(void *arg) | 484 | static void retrigger_next_event(void *arg) |
474 | { | 485 | { |
475 | struct hrtimer_cpu_base *base; | 486 | struct hrtimer_cpu_base *base; |
476 | struct timespec realtime_offset; | 487 | struct timespec realtime_offset; |
477 | unsigned long seq; | 488 | unsigned long seq; |
478 | 489 | ||
479 | if (!hrtimer_hres_active()) | 490 | if (!hrtimer_hres_active()) |
480 | return; | 491 | return; |
481 | 492 | ||
482 | do { | 493 | do { |
483 | seq = read_seqbegin(&xtime_lock); | 494 | seq = read_seqbegin(&xtime_lock); |
484 | set_normalized_timespec(&realtime_offset, | 495 | set_normalized_timespec(&realtime_offset, |
485 | -wall_to_monotonic.tv_sec, | 496 | -wall_to_monotonic.tv_sec, |
486 | -wall_to_monotonic.tv_nsec); | 497 | -wall_to_monotonic.tv_nsec); |
487 | } while (read_seqretry(&xtime_lock, seq)); | 498 | } while (read_seqretry(&xtime_lock, seq)); |
488 | 499 | ||
489 | base = &__get_cpu_var(hrtimer_bases); | 500 | base = &__get_cpu_var(hrtimer_bases); |
490 | 501 | ||
491 | /* Adjust CLOCK_REALTIME offset */ | 502 | /* Adjust CLOCK_REALTIME offset */ |
492 | spin_lock(&base->lock); | 503 | spin_lock(&base->lock); |
493 | base->clock_base[CLOCK_REALTIME].offset = | 504 | base->clock_base[CLOCK_REALTIME].offset = |
494 | timespec_to_ktime(realtime_offset); | 505 | timespec_to_ktime(realtime_offset); |
495 | 506 | ||
496 | hrtimer_force_reprogram(base); | 507 | hrtimer_force_reprogram(base); |
497 | spin_unlock(&base->lock); | 508 | spin_unlock(&base->lock); |
498 | } | 509 | } |
499 | 510 | ||
500 | /* | 511 | /* |
501 | * Clock realtime was set | 512 | * Clock realtime was set |
502 | * | 513 | * |
503 | * Change the offset of the realtime clock vs. the monotonic | 514 | * Change the offset of the realtime clock vs. the monotonic |
504 | * clock. | 515 | * clock. |
505 | * | 516 | * |
506 | * We might have to reprogram the high resolution timer interrupt. On | 517 | * We might have to reprogram the high resolution timer interrupt. On |
507 | * SMP we call the architecture specific code to retrigger _all_ high | 518 | * SMP we call the architecture specific code to retrigger _all_ high |
508 | * resolution timer interrupts. On UP we just disable interrupts and | 519 | * resolution timer interrupts. On UP we just disable interrupts and |
509 | * call the high resolution interrupt code. | 520 | * call the high resolution interrupt code. |
510 | */ | 521 | */ |
511 | void clock_was_set(void) | 522 | void clock_was_set(void) |
512 | { | 523 | { |
513 | /* Retrigger the CPU local events everywhere */ | 524 | /* Retrigger the CPU local events everywhere */ |
514 | on_each_cpu(retrigger_next_event, NULL, 0, 1); | 525 | on_each_cpu(retrigger_next_event, NULL, 0, 1); |
515 | } | 526 | } |
516 | 527 | ||
517 | /* | 528 | /* |
518 | * During resume we might have to reprogram the high resolution timer | 529 | * During resume we might have to reprogram the high resolution timer |
519 | * interrupt (on the local CPU): | 530 | * interrupt (on the local CPU): |
520 | */ | 531 | */ |
521 | void hres_timers_resume(void) | 532 | void hres_timers_resume(void) |
522 | { | 533 | { |
523 | WARN_ON_ONCE(num_online_cpus() > 1); | 534 | WARN_ON_ONCE(num_online_cpus() > 1); |
524 | 535 | ||
525 | /* Retrigger the CPU local events: */ | 536 | /* Retrigger the CPU local events: */ |
526 | retrigger_next_event(NULL); | 537 | retrigger_next_event(NULL); |
527 | } | 538 | } |
528 | 539 | ||
529 | /* | 540 | /* |
530 | * Initialize the high resolution related parts of cpu_base | 541 | * Initialize the high resolution related parts of cpu_base |
531 | */ | 542 | */ |
532 | static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) | 543 | static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) |
533 | { | 544 | { |
534 | base->expires_next.tv64 = KTIME_MAX; | 545 | base->expires_next.tv64 = KTIME_MAX; |
535 | base->hres_active = 0; | 546 | base->hres_active = 0; |
536 | } | 547 | } |
537 | 548 | ||
538 | /* | 549 | /* |
539 | * Initialize the high resolution related parts of a hrtimer | 550 | * Initialize the high resolution related parts of a hrtimer |
540 | */ | 551 | */ |
541 | static inline void hrtimer_init_timer_hres(struct hrtimer *timer) | 552 | static inline void hrtimer_init_timer_hres(struct hrtimer *timer) |
542 | { | 553 | { |
543 | } | 554 | } |
544 | 555 | ||
545 | /* | 556 | /* |
546 | * When High resolution timers are active, try to reprogram. Note, that in case | 557 | * When High resolution timers are active, try to reprogram. Note, that in case |
547 | * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry | 558 | * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry |
548 | * check happens. The timer gets enqueued into the rbtree. The reprogramming | 559 | * check happens. The timer gets enqueued into the rbtree. The reprogramming |
549 | * and expiry check is done in the hrtimer_interrupt or in the softirq. | 560 | * and expiry check is done in the hrtimer_interrupt or in the softirq. |
550 | */ | 561 | */ |
551 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | 562 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, |
552 | struct hrtimer_clock_base *base) | 563 | struct hrtimer_clock_base *base) |
553 | { | 564 | { |
554 | if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { | 565 | if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { |
555 | 566 | ||
556 | /* Timer is expired, act upon the callback mode */ | 567 | /* Timer is expired, act upon the callback mode */ |
557 | switch(timer->cb_mode) { | 568 | switch(timer->cb_mode) { |
558 | case HRTIMER_CB_IRQSAFE_NO_RESTART: | 569 | case HRTIMER_CB_IRQSAFE_NO_RESTART: |
559 | /* | 570 | /* |
560 | * We can call the callback from here. No restart | 571 | * We can call the callback from here. No restart |
561 | * happens, so no danger of recursion | 572 | * happens, so no danger of recursion |
562 | */ | 573 | */ |
563 | BUG_ON(timer->function(timer) != HRTIMER_NORESTART); | 574 | BUG_ON(timer->function(timer) != HRTIMER_NORESTART); |
564 | return 1; | 575 | return 1; |
565 | case HRTIMER_CB_IRQSAFE_NO_SOFTIRQ: | 576 | case HRTIMER_CB_IRQSAFE_NO_SOFTIRQ: |
566 | /* | 577 | /* |
567 | * This is solely for the sched tick emulation with | 578 | * This is solely for the sched tick emulation with |
568 | * dynamic tick support to ensure that we do not | 579 | * dynamic tick support to ensure that we do not |
569 | * restart the tick right on the edge and end up with | 580 | * restart the tick right on the edge and end up with |
570 | * the tick timer in the softirq ! The calling site | 581 | * the tick timer in the softirq ! The calling site |
571 | * takes care of this. | 582 | * takes care of this. |
572 | */ | 583 | */ |
573 | return 1; | 584 | return 1; |
574 | case HRTIMER_CB_IRQSAFE: | 585 | case HRTIMER_CB_IRQSAFE: |
575 | case HRTIMER_CB_SOFTIRQ: | 586 | case HRTIMER_CB_SOFTIRQ: |
576 | /* | 587 | /* |
577 | * Move everything else into the softirq pending list ! | 588 | * Move everything else into the softirq pending list ! |
578 | */ | 589 | */ |
579 | list_add_tail(&timer->cb_entry, | 590 | list_add_tail(&timer->cb_entry, |
580 | &base->cpu_base->cb_pending); | 591 | &base->cpu_base->cb_pending); |
581 | timer->state = HRTIMER_STATE_PENDING; | 592 | timer->state = HRTIMER_STATE_PENDING; |
582 | raise_softirq(HRTIMER_SOFTIRQ); | 593 | raise_softirq(HRTIMER_SOFTIRQ); |
583 | return 1; | 594 | return 1; |
584 | default: | 595 | default: |
585 | BUG(); | 596 | BUG(); |
586 | } | 597 | } |
587 | } | 598 | } |
588 | return 0; | 599 | return 0; |
589 | } | 600 | } |
590 | 601 | ||
591 | /* | 602 | /* |
592 | * Switch to high resolution mode | 603 | * Switch to high resolution mode |
593 | */ | 604 | */ |
594 | static int hrtimer_switch_to_hres(void) | 605 | static int hrtimer_switch_to_hres(void) |
595 | { | 606 | { |
596 | int cpu = smp_processor_id(); | 607 | int cpu = smp_processor_id(); |
597 | struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu); | 608 | struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu); |
598 | unsigned long flags; | 609 | unsigned long flags; |
599 | 610 | ||
600 | if (base->hres_active) | 611 | if (base->hres_active) |
601 | return 1; | 612 | return 1; |
602 | 613 | ||
603 | local_irq_save(flags); | 614 | local_irq_save(flags); |
604 | 615 | ||
605 | if (tick_init_highres()) { | 616 | if (tick_init_highres()) { |
606 | local_irq_restore(flags); | 617 | local_irq_restore(flags); |
607 | printk(KERN_WARNING "Could not switch to high resolution " | 618 | printk(KERN_WARNING "Could not switch to high resolution " |
608 | "mode on CPU %d\n", cpu); | 619 | "mode on CPU %d\n", cpu); |
609 | return 0; | 620 | return 0; |
610 | } | 621 | } |
611 | base->hres_active = 1; | 622 | base->hres_active = 1; |
612 | base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES; | 623 | base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES; |
613 | base->clock_base[CLOCK_MONOTONIC].resolution = KTIME_HIGH_RES; | 624 | base->clock_base[CLOCK_MONOTONIC].resolution = KTIME_HIGH_RES; |
614 | 625 | ||
615 | tick_setup_sched_timer(); | 626 | tick_setup_sched_timer(); |
616 | 627 | ||
617 | /* "Retrigger" the interrupt to get things going */ | 628 | /* "Retrigger" the interrupt to get things going */ |
618 | retrigger_next_event(NULL); | 629 | retrigger_next_event(NULL); |
619 | local_irq_restore(flags); | 630 | local_irq_restore(flags); |
620 | printk(KERN_DEBUG "Switched to high resolution mode on CPU %d\n", | 631 | printk(KERN_DEBUG "Switched to high resolution mode on CPU %d\n", |
621 | smp_processor_id()); | 632 | smp_processor_id()); |
622 | return 1; | 633 | return 1; |
623 | } | 634 | } |
624 | 635 | ||
625 | #else | 636 | #else |
626 | 637 | ||
627 | static inline int hrtimer_hres_active(void) { return 0; } | 638 | static inline int hrtimer_hres_active(void) { return 0; } |
628 | static inline int hrtimer_is_hres_enabled(void) { return 0; } | 639 | static inline int hrtimer_is_hres_enabled(void) { return 0; } |
629 | static inline int hrtimer_switch_to_hres(void) { return 0; } | 640 | static inline int hrtimer_switch_to_hres(void) { return 0; } |
630 | static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { } | 641 | static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { } |
631 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | 642 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, |
632 | struct hrtimer_clock_base *base) | 643 | struct hrtimer_clock_base *base) |
633 | { | 644 | { |
634 | return 0; | 645 | return 0; |
635 | } | 646 | } |
636 | static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } | 647 | static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } |
637 | static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { } | 648 | static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { } |
638 | static inline int hrtimer_reprogram(struct hrtimer *timer, | 649 | static inline int hrtimer_reprogram(struct hrtimer *timer, |
639 | struct hrtimer_clock_base *base) | 650 | struct hrtimer_clock_base *base) |
640 | { | 651 | { |
641 | return 0; | 652 | return 0; |
642 | } | 653 | } |
643 | 654 | ||
644 | #endif /* CONFIG_HIGH_RES_TIMERS */ | 655 | #endif /* CONFIG_HIGH_RES_TIMERS */ |
645 | 656 | ||
646 | #ifdef CONFIG_TIMER_STATS | 657 | #ifdef CONFIG_TIMER_STATS |
647 | void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, void *addr) | 658 | void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, void *addr) |
648 | { | 659 | { |
649 | if (timer->start_site) | 660 | if (timer->start_site) |
650 | return; | 661 | return; |
651 | 662 | ||
652 | timer->start_site = addr; | 663 | timer->start_site = addr; |
653 | memcpy(timer->start_comm, current->comm, TASK_COMM_LEN); | 664 | memcpy(timer->start_comm, current->comm, TASK_COMM_LEN); |
654 | timer->start_pid = current->pid; | 665 | timer->start_pid = current->pid; |
655 | } | 666 | } |
656 | #endif | 667 | #endif |
657 | 668 | ||
658 | /* | 669 | /* |
659 | * Counterpart to lock_hrtimer_base above: | 670 | * Counterpart to lock_hrtimer_base above: |
660 | */ | 671 | */ |
661 | static inline | 672 | static inline |
662 | void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | 673 | void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) |
663 | { | 674 | { |
664 | spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); | 675 | spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); |
665 | } | 676 | } |
666 | 677 | ||
667 | /** | 678 | /** |
668 | * hrtimer_forward - forward the timer expiry | 679 | * hrtimer_forward - forward the timer expiry |
669 | * @timer: hrtimer to forward | 680 | * @timer: hrtimer to forward |
670 | * @now: forward past this time | 681 | * @now: forward past this time |
671 | * @interval: the interval to forward | 682 | * @interval: the interval to forward |
672 | * | 683 | * |
673 | * Forward the timer expiry so it will expire in the future. | 684 | * Forward the timer expiry so it will expire in the future. |
674 | * Returns the number of overruns. | 685 | * Returns the number of overruns. |
675 | */ | 686 | */ |
676 | u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) | 687 | u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) |
677 | { | 688 | { |
678 | u64 orun = 1; | 689 | u64 orun = 1; |
679 | ktime_t delta; | 690 | ktime_t delta; |
680 | 691 | ||
681 | delta = ktime_sub(now, timer->expires); | 692 | delta = ktime_sub(now, timer->expires); |
682 | 693 | ||
683 | if (delta.tv64 < 0) | 694 | if (delta.tv64 < 0) |
684 | return 0; | 695 | return 0; |
685 | 696 | ||
686 | if (interval.tv64 < timer->base->resolution.tv64) | 697 | if (interval.tv64 < timer->base->resolution.tv64) |
687 | interval.tv64 = timer->base->resolution.tv64; | 698 | interval.tv64 = timer->base->resolution.tv64; |
688 | 699 | ||
689 | if (unlikely(delta.tv64 >= interval.tv64)) { | 700 | if (unlikely(delta.tv64 >= interval.tv64)) { |
690 | s64 incr = ktime_to_ns(interval); | 701 | s64 incr = ktime_to_ns(interval); |
691 | 702 | ||
692 | orun = ktime_divns(delta, incr); | 703 | orun = ktime_divns(delta, incr); |
693 | timer->expires = ktime_add_ns(timer->expires, incr * orun); | 704 | timer->expires = ktime_add_ns(timer->expires, incr * orun); |
694 | if (timer->expires.tv64 > now.tv64) | 705 | if (timer->expires.tv64 > now.tv64) |
695 | return orun; | 706 | return orun; |
696 | /* | 707 | /* |
697 | * This (and the ktime_add() below) is the | 708 | * This (and the ktime_add() below) is the |
698 | * correction for exact: | 709 | * correction for exact: |
699 | */ | 710 | */ |
700 | orun++; | 711 | orun++; |
701 | } | 712 | } |
702 | timer->expires = ktime_add_safe(timer->expires, interval); | 713 | timer->expires = ktime_add_safe(timer->expires, interval); |
703 | 714 | ||
704 | return orun; | 715 | return orun; |
705 | } | 716 | } |
706 | EXPORT_SYMBOL_GPL(hrtimer_forward); | 717 | EXPORT_SYMBOL_GPL(hrtimer_forward); |
707 | 718 | ||
708 | /* | 719 | /* |
709 | * enqueue_hrtimer - internal function to (re)start a timer | 720 | * enqueue_hrtimer - internal function to (re)start a timer |
710 | * | 721 | * |
711 | * The timer is inserted in expiry order. Insertion into the | 722 | * The timer is inserted in expiry order. Insertion into the |
712 | * red black tree is O(log(n)). Must hold the base lock. | 723 | * red black tree is O(log(n)). Must hold the base lock. |
713 | */ | 724 | */ |
714 | static void enqueue_hrtimer(struct hrtimer *timer, | 725 | static void enqueue_hrtimer(struct hrtimer *timer, |
715 | struct hrtimer_clock_base *base, int reprogram) | 726 | struct hrtimer_clock_base *base, int reprogram) |
716 | { | 727 | { |
717 | struct rb_node **link = &base->active.rb_node; | 728 | struct rb_node **link = &base->active.rb_node; |
718 | struct rb_node *parent = NULL; | 729 | struct rb_node *parent = NULL; |
719 | struct hrtimer *entry; | 730 | struct hrtimer *entry; |
720 | int leftmost = 1; | 731 | int leftmost = 1; |
721 | 732 | ||
722 | /* | 733 | /* |
723 | * Find the right place in the rbtree: | 734 | * Find the right place in the rbtree: |
724 | */ | 735 | */ |
725 | while (*link) { | 736 | while (*link) { |
726 | parent = *link; | 737 | parent = *link; |
727 | entry = rb_entry(parent, struct hrtimer, node); | 738 | entry = rb_entry(parent, struct hrtimer, node); |
728 | /* | 739 | /* |
729 | * We dont care about collisions. Nodes with | 740 | * We dont care about collisions. Nodes with |
730 | * the same expiry time stay together. | 741 | * the same expiry time stay together. |
731 | */ | 742 | */ |
732 | if (timer->expires.tv64 < entry->expires.tv64) { | 743 | if (timer->expires.tv64 < entry->expires.tv64) { |
733 | link = &(*link)->rb_left; | 744 | link = &(*link)->rb_left; |
734 | } else { | 745 | } else { |
735 | link = &(*link)->rb_right; | 746 | link = &(*link)->rb_right; |
736 | leftmost = 0; | 747 | leftmost = 0; |
737 | } | 748 | } |
738 | } | 749 | } |
739 | 750 | ||
740 | /* | 751 | /* |
741 | * Insert the timer to the rbtree and check whether it | 752 | * Insert the timer to the rbtree and check whether it |
742 | * replaces the first pending timer | 753 | * replaces the first pending timer |
743 | */ | 754 | */ |
744 | if (leftmost) { | 755 | if (leftmost) { |
745 | /* | 756 | /* |
746 | * Reprogram the clock event device. When the timer is already | 757 | * Reprogram the clock event device. When the timer is already |
747 | * expired hrtimer_enqueue_reprogram has either called the | 758 | * expired hrtimer_enqueue_reprogram has either called the |
748 | * callback or added it to the pending list and raised the | 759 | * callback or added it to the pending list and raised the |
749 | * softirq. | 760 | * softirq. |
750 | * | 761 | * |
751 | * This is a NOP for !HIGHRES | 762 | * This is a NOP for !HIGHRES |
752 | */ | 763 | */ |
753 | if (reprogram && hrtimer_enqueue_reprogram(timer, base)) | 764 | if (reprogram && hrtimer_enqueue_reprogram(timer, base)) |
754 | return; | 765 | return; |
755 | 766 | ||
756 | base->first = &timer->node; | 767 | base->first = &timer->node; |
757 | } | 768 | } |
758 | 769 | ||
759 | rb_link_node(&timer->node, parent, link); | 770 | rb_link_node(&timer->node, parent, link); |
760 | rb_insert_color(&timer->node, &base->active); | 771 | rb_insert_color(&timer->node, &base->active); |
761 | /* | 772 | /* |
762 | * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the | 773 | * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the |
763 | * state of a possibly running callback. | 774 | * state of a possibly running callback. |
764 | */ | 775 | */ |
765 | timer->state |= HRTIMER_STATE_ENQUEUED; | 776 | timer->state |= HRTIMER_STATE_ENQUEUED; |
766 | } | 777 | } |
767 | 778 | ||
768 | /* | 779 | /* |
769 | * __remove_hrtimer - internal function to remove a timer | 780 | * __remove_hrtimer - internal function to remove a timer |
770 | * | 781 | * |
771 | * Caller must hold the base lock. | 782 | * Caller must hold the base lock. |
772 | * | 783 | * |
773 | * High resolution timer mode reprograms the clock event device when the | 784 | * High resolution timer mode reprograms the clock event device when the |
774 | * timer is the one which expires next. The caller can disable this by setting | 785 | * timer is the one which expires next. The caller can disable this by setting |
775 | * reprogram to zero. This is useful, when the context does a reprogramming | 786 | * reprogram to zero. This is useful, when the context does a reprogramming |
776 | * anyway (e.g. timer interrupt) | 787 | * anyway (e.g. timer interrupt) |
777 | */ | 788 | */ |
778 | static void __remove_hrtimer(struct hrtimer *timer, | 789 | static void __remove_hrtimer(struct hrtimer *timer, |
779 | struct hrtimer_clock_base *base, | 790 | struct hrtimer_clock_base *base, |
780 | unsigned long newstate, int reprogram) | 791 | unsigned long newstate, int reprogram) |
781 | { | 792 | { |
782 | /* High res. callback list. NOP for !HIGHRES */ | 793 | /* High res. callback list. NOP for !HIGHRES */ |
783 | if (hrtimer_cb_pending(timer)) | 794 | if (hrtimer_cb_pending(timer)) |
784 | hrtimer_remove_cb_pending(timer); | 795 | hrtimer_remove_cb_pending(timer); |
785 | else { | 796 | else { |
786 | /* | 797 | /* |
787 | * Remove the timer from the rbtree and replace the | 798 | * Remove the timer from the rbtree and replace the |
788 | * first entry pointer if necessary. | 799 | * first entry pointer if necessary. |
789 | */ | 800 | */ |
790 | if (base->first == &timer->node) { | 801 | if (base->first == &timer->node) { |
791 | base->first = rb_next(&timer->node); | 802 | base->first = rb_next(&timer->node); |
792 | /* Reprogram the clock event device. if enabled */ | 803 | /* Reprogram the clock event device. if enabled */ |
793 | if (reprogram && hrtimer_hres_active()) | 804 | if (reprogram && hrtimer_hres_active()) |
794 | hrtimer_force_reprogram(base->cpu_base); | 805 | hrtimer_force_reprogram(base->cpu_base); |
795 | } | 806 | } |
796 | rb_erase(&timer->node, &base->active); | 807 | rb_erase(&timer->node, &base->active); |
797 | } | 808 | } |
798 | timer->state = newstate; | 809 | timer->state = newstate; |
799 | } | 810 | } |
800 | 811 | ||
801 | /* | 812 | /* |
802 | * remove hrtimer, called with base lock held | 813 | * remove hrtimer, called with base lock held |
803 | */ | 814 | */ |
804 | static inline int | 815 | static inline int |
805 | remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) | 816 | remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) |
806 | { | 817 | { |
807 | if (hrtimer_is_queued(timer)) { | 818 | if (hrtimer_is_queued(timer)) { |
808 | int reprogram; | 819 | int reprogram; |
809 | 820 | ||
810 | /* | 821 | /* |
811 | * Remove the timer and force reprogramming when high | 822 | * Remove the timer and force reprogramming when high |
812 | * resolution mode is active and the timer is on the current | 823 | * resolution mode is active and the timer is on the current |
813 | * CPU. If we remove a timer on another CPU, reprogramming is | 824 | * CPU. If we remove a timer on another CPU, reprogramming is |
814 | * skipped. The interrupt event on this CPU is fired and | 825 | * skipped. The interrupt event on this CPU is fired and |
815 | * reprogramming happens in the interrupt handler. This is a | 826 | * reprogramming happens in the interrupt handler. This is a |
816 | * rare case and less expensive than a smp call. | 827 | * rare case and less expensive than a smp call. |
817 | */ | 828 | */ |
818 | timer_stats_hrtimer_clear_start_info(timer); | 829 | timer_stats_hrtimer_clear_start_info(timer); |
819 | reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases); | 830 | reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases); |
820 | __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, | 831 | __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, |
821 | reprogram); | 832 | reprogram); |
822 | return 1; | 833 | return 1; |
823 | } | 834 | } |
824 | return 0; | 835 | return 0; |
825 | } | 836 | } |
826 | 837 | ||
827 | /** | 838 | /** |
828 | * hrtimer_start - (re)start an relative timer on the current CPU | 839 | * hrtimer_start - (re)start an relative timer on the current CPU |
829 | * @timer: the timer to be added | 840 | * @timer: the timer to be added |
830 | * @tim: expiry time | 841 | * @tim: expiry time |
831 | * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) | 842 | * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) |
832 | * | 843 | * |
833 | * Returns: | 844 | * Returns: |
834 | * 0 on success | 845 | * 0 on success |
835 | * 1 when the timer was active | 846 | * 1 when the timer was active |
836 | */ | 847 | */ |
837 | int | 848 | int |
838 | hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) | 849 | hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) |
839 | { | 850 | { |
840 | struct hrtimer_clock_base *base, *new_base; | 851 | struct hrtimer_clock_base *base, *new_base; |
841 | unsigned long flags; | 852 | unsigned long flags; |
842 | int ret; | 853 | int ret; |
843 | 854 | ||
844 | base = lock_hrtimer_base(timer, &flags); | 855 | base = lock_hrtimer_base(timer, &flags); |
845 | 856 | ||
846 | /* Remove an active timer from the queue: */ | 857 | /* Remove an active timer from the queue: */ |
847 | ret = remove_hrtimer(timer, base); | 858 | ret = remove_hrtimer(timer, base); |
848 | 859 | ||
849 | /* Switch the timer base, if necessary: */ | 860 | /* Switch the timer base, if necessary: */ |
850 | new_base = switch_hrtimer_base(timer, base); | 861 | new_base = switch_hrtimer_base(timer, base); |
851 | 862 | ||
852 | if (mode == HRTIMER_MODE_REL) { | 863 | if (mode == HRTIMER_MODE_REL) { |
853 | tim = ktime_add_safe(tim, new_base->get_time()); | 864 | tim = ktime_add_safe(tim, new_base->get_time()); |
854 | /* | 865 | /* |
855 | * CONFIG_TIME_LOW_RES is a temporary way for architectures | 866 | * CONFIG_TIME_LOW_RES is a temporary way for architectures |
856 | * to signal that they simply return xtime in | 867 | * to signal that they simply return xtime in |
857 | * do_gettimeoffset(). In this case we want to round up by | 868 | * do_gettimeoffset(). In this case we want to round up by |
858 | * resolution when starting a relative timer, to avoid short | 869 | * resolution when starting a relative timer, to avoid short |
859 | * timeouts. This will go away with the GTOD framework. | 870 | * timeouts. This will go away with the GTOD framework. |
860 | */ | 871 | */ |
861 | #ifdef CONFIG_TIME_LOW_RES | 872 | #ifdef CONFIG_TIME_LOW_RES |
862 | tim = ktime_add_safe(tim, base->resolution); | 873 | tim = ktime_add_safe(tim, base->resolution); |
863 | #endif | 874 | #endif |
864 | } | 875 | } |
865 | timer->expires = tim; | 876 | timer->expires = tim; |
866 | 877 | ||
867 | timer_stats_hrtimer_set_start_info(timer); | 878 | timer_stats_hrtimer_set_start_info(timer); |
868 | 879 | ||
869 | /* | 880 | /* |
870 | * Only allow reprogramming if the new base is on this CPU. | 881 | * Only allow reprogramming if the new base is on this CPU. |
871 | * (it might still be on another CPU if the timer was pending) | 882 | * (it might still be on another CPU if the timer was pending) |
872 | */ | 883 | */ |
873 | enqueue_hrtimer(timer, new_base, | 884 | enqueue_hrtimer(timer, new_base, |
874 | new_base->cpu_base == &__get_cpu_var(hrtimer_bases)); | 885 | new_base->cpu_base == &__get_cpu_var(hrtimer_bases)); |
875 | 886 | ||
876 | unlock_hrtimer_base(timer, &flags); | 887 | unlock_hrtimer_base(timer, &flags); |
877 | 888 | ||
878 | return ret; | 889 | return ret; |
879 | } | 890 | } |
880 | EXPORT_SYMBOL_GPL(hrtimer_start); | 891 | EXPORT_SYMBOL_GPL(hrtimer_start); |
881 | 892 | ||
882 | /** | 893 | /** |
883 | * hrtimer_try_to_cancel - try to deactivate a timer | 894 | * hrtimer_try_to_cancel - try to deactivate a timer |
884 | * @timer: hrtimer to stop | 895 | * @timer: hrtimer to stop |
885 | * | 896 | * |
886 | * Returns: | 897 | * Returns: |
887 | * 0 when the timer was not active | 898 | * 0 when the timer was not active |
888 | * 1 when the timer was active | 899 | * 1 when the timer was active |
889 | * -1 when the timer is currently excuting the callback function and | 900 | * -1 when the timer is currently excuting the callback function and |
890 | * cannot be stopped | 901 | * cannot be stopped |
891 | */ | 902 | */ |
892 | int hrtimer_try_to_cancel(struct hrtimer *timer) | 903 | int hrtimer_try_to_cancel(struct hrtimer *timer) |
893 | { | 904 | { |
894 | struct hrtimer_clock_base *base; | 905 | struct hrtimer_clock_base *base; |
895 | unsigned long flags; | 906 | unsigned long flags; |
896 | int ret = -1; | 907 | int ret = -1; |
897 | 908 | ||
898 | base = lock_hrtimer_base(timer, &flags); | 909 | base = lock_hrtimer_base(timer, &flags); |
899 | 910 | ||
900 | if (!hrtimer_callback_running(timer)) | 911 | if (!hrtimer_callback_running(timer)) |
901 | ret = remove_hrtimer(timer, base); | 912 | ret = remove_hrtimer(timer, base); |
902 | 913 | ||
903 | unlock_hrtimer_base(timer, &flags); | 914 | unlock_hrtimer_base(timer, &flags); |
904 | 915 | ||
905 | return ret; | 916 | return ret; |
906 | 917 | ||
907 | } | 918 | } |
908 | EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel); | 919 | EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel); |
909 | 920 | ||
910 | /** | 921 | /** |
911 | * hrtimer_cancel - cancel a timer and wait for the handler to finish. | 922 | * hrtimer_cancel - cancel a timer and wait for the handler to finish. |
912 | * @timer: the timer to be cancelled | 923 | * @timer: the timer to be cancelled |
913 | * | 924 | * |
914 | * Returns: | 925 | * Returns: |
915 | * 0 when the timer was not active | 926 | * 0 when the timer was not active |
916 | * 1 when the timer was active | 927 | * 1 when the timer was active |
917 | */ | 928 | */ |
918 | int hrtimer_cancel(struct hrtimer *timer) | 929 | int hrtimer_cancel(struct hrtimer *timer) |
919 | { | 930 | { |
920 | for (;;) { | 931 | for (;;) { |
921 | int ret = hrtimer_try_to_cancel(timer); | 932 | int ret = hrtimer_try_to_cancel(timer); |
922 | 933 | ||
923 | if (ret >= 0) | 934 | if (ret >= 0) |
924 | return ret; | 935 | return ret; |
925 | cpu_relax(); | 936 | cpu_relax(); |
926 | } | 937 | } |
927 | } | 938 | } |
928 | EXPORT_SYMBOL_GPL(hrtimer_cancel); | 939 | EXPORT_SYMBOL_GPL(hrtimer_cancel); |
929 | 940 | ||
930 | /** | 941 | /** |
931 | * hrtimer_get_remaining - get remaining time for the timer | 942 | * hrtimer_get_remaining - get remaining time for the timer |
932 | * @timer: the timer to read | 943 | * @timer: the timer to read |
933 | */ | 944 | */ |
934 | ktime_t hrtimer_get_remaining(const struct hrtimer *timer) | 945 | ktime_t hrtimer_get_remaining(const struct hrtimer *timer) |
935 | { | 946 | { |
936 | struct hrtimer_clock_base *base; | 947 | struct hrtimer_clock_base *base; |
937 | unsigned long flags; | 948 | unsigned long flags; |
938 | ktime_t rem; | 949 | ktime_t rem; |
939 | 950 | ||
940 | base = lock_hrtimer_base(timer, &flags); | 951 | base = lock_hrtimer_base(timer, &flags); |
941 | rem = ktime_sub(timer->expires, base->get_time()); | 952 | rem = ktime_sub(timer->expires, base->get_time()); |
942 | unlock_hrtimer_base(timer, &flags); | 953 | unlock_hrtimer_base(timer, &flags); |
943 | 954 | ||
944 | return rem; | 955 | return rem; |
945 | } | 956 | } |
946 | EXPORT_SYMBOL_GPL(hrtimer_get_remaining); | 957 | EXPORT_SYMBOL_GPL(hrtimer_get_remaining); |
947 | 958 | ||
948 | #if defined(CONFIG_NO_IDLE_HZ) || defined(CONFIG_NO_HZ) | 959 | #if defined(CONFIG_NO_IDLE_HZ) || defined(CONFIG_NO_HZ) |
949 | /** | 960 | /** |
950 | * hrtimer_get_next_event - get the time until next expiry event | 961 | * hrtimer_get_next_event - get the time until next expiry event |
951 | * | 962 | * |
952 | * Returns the delta to the next expiry event or KTIME_MAX if no timer | 963 | * Returns the delta to the next expiry event or KTIME_MAX if no timer |
953 | * is pending. | 964 | * is pending. |
954 | */ | 965 | */ |
955 | ktime_t hrtimer_get_next_event(void) | 966 | ktime_t hrtimer_get_next_event(void) |
956 | { | 967 | { |
957 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | 968 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
958 | struct hrtimer_clock_base *base = cpu_base->clock_base; | 969 | struct hrtimer_clock_base *base = cpu_base->clock_base; |
959 | ktime_t delta, mindelta = { .tv64 = KTIME_MAX }; | 970 | ktime_t delta, mindelta = { .tv64 = KTIME_MAX }; |
960 | unsigned long flags; | 971 | unsigned long flags; |
961 | int i; | 972 | int i; |
962 | 973 | ||
963 | spin_lock_irqsave(&cpu_base->lock, flags); | 974 | spin_lock_irqsave(&cpu_base->lock, flags); |
964 | 975 | ||
965 | if (!hrtimer_hres_active()) { | 976 | if (!hrtimer_hres_active()) { |
966 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { | 977 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { |
967 | struct hrtimer *timer; | 978 | struct hrtimer *timer; |
968 | 979 | ||
969 | if (!base->first) | 980 | if (!base->first) |
970 | continue; | 981 | continue; |
971 | 982 | ||
972 | timer = rb_entry(base->first, struct hrtimer, node); | 983 | timer = rb_entry(base->first, struct hrtimer, node); |
973 | delta.tv64 = timer->expires.tv64; | 984 | delta.tv64 = timer->expires.tv64; |
974 | delta = ktime_sub(delta, base->get_time()); | 985 | delta = ktime_sub(delta, base->get_time()); |
975 | if (delta.tv64 < mindelta.tv64) | 986 | if (delta.tv64 < mindelta.tv64) |
976 | mindelta.tv64 = delta.tv64; | 987 | mindelta.tv64 = delta.tv64; |
977 | } | 988 | } |
978 | } | 989 | } |
979 | 990 | ||
980 | spin_unlock_irqrestore(&cpu_base->lock, flags); | 991 | spin_unlock_irqrestore(&cpu_base->lock, flags); |
981 | 992 | ||
982 | if (mindelta.tv64 < 0) | 993 | if (mindelta.tv64 < 0) |
983 | mindelta.tv64 = 0; | 994 | mindelta.tv64 = 0; |
984 | return mindelta; | 995 | return mindelta; |
985 | } | 996 | } |
986 | #endif | 997 | #endif |
987 | 998 | ||
988 | /** | 999 | /** |
989 | * hrtimer_init - initialize a timer to the given clock | 1000 | * hrtimer_init - initialize a timer to the given clock |
990 | * @timer: the timer to be initialized | 1001 | * @timer: the timer to be initialized |
991 | * @clock_id: the clock to be used | 1002 | * @clock_id: the clock to be used |
992 | * @mode: timer mode abs/rel | 1003 | * @mode: timer mode abs/rel |
993 | */ | 1004 | */ |
994 | void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, | 1005 | void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, |
995 | enum hrtimer_mode mode) | 1006 | enum hrtimer_mode mode) |
996 | { | 1007 | { |
997 | struct hrtimer_cpu_base *cpu_base; | 1008 | struct hrtimer_cpu_base *cpu_base; |
998 | 1009 | ||
999 | memset(timer, 0, sizeof(struct hrtimer)); | 1010 | memset(timer, 0, sizeof(struct hrtimer)); |
1000 | 1011 | ||
1001 | cpu_base = &__raw_get_cpu_var(hrtimer_bases); | 1012 | cpu_base = &__raw_get_cpu_var(hrtimer_bases); |
1002 | 1013 | ||
1003 | if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS) | 1014 | if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS) |
1004 | clock_id = CLOCK_MONOTONIC; | 1015 | clock_id = CLOCK_MONOTONIC; |
1005 | 1016 | ||
1006 | timer->base = &cpu_base->clock_base[clock_id]; | 1017 | timer->base = &cpu_base->clock_base[clock_id]; |
1007 | INIT_LIST_HEAD(&timer->cb_entry); | 1018 | INIT_LIST_HEAD(&timer->cb_entry); |
1008 | hrtimer_init_timer_hres(timer); | 1019 | hrtimer_init_timer_hres(timer); |
1009 | 1020 | ||
1010 | #ifdef CONFIG_TIMER_STATS | 1021 | #ifdef CONFIG_TIMER_STATS |
1011 | timer->start_site = NULL; | 1022 | timer->start_site = NULL; |
1012 | timer->start_pid = -1; | 1023 | timer->start_pid = -1; |
1013 | memset(timer->start_comm, 0, TASK_COMM_LEN); | 1024 | memset(timer->start_comm, 0, TASK_COMM_LEN); |
1014 | #endif | 1025 | #endif |
1015 | } | 1026 | } |
1016 | EXPORT_SYMBOL_GPL(hrtimer_init); | 1027 | EXPORT_SYMBOL_GPL(hrtimer_init); |
1017 | 1028 | ||
1018 | /** | 1029 | /** |
1019 | * hrtimer_get_res - get the timer resolution for a clock | 1030 | * hrtimer_get_res - get the timer resolution for a clock |
1020 | * @which_clock: which clock to query | 1031 | * @which_clock: which clock to query |
1021 | * @tp: pointer to timespec variable to store the resolution | 1032 | * @tp: pointer to timespec variable to store the resolution |
1022 | * | 1033 | * |
1023 | * Store the resolution of the clock selected by @which_clock in the | 1034 | * Store the resolution of the clock selected by @which_clock in the |
1024 | * variable pointed to by @tp. | 1035 | * variable pointed to by @tp. |
1025 | */ | 1036 | */ |
1026 | int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) | 1037 | int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) |
1027 | { | 1038 | { |
1028 | struct hrtimer_cpu_base *cpu_base; | 1039 | struct hrtimer_cpu_base *cpu_base; |
1029 | 1040 | ||
1030 | cpu_base = &__raw_get_cpu_var(hrtimer_bases); | 1041 | cpu_base = &__raw_get_cpu_var(hrtimer_bases); |
1031 | *tp = ktime_to_timespec(cpu_base->clock_base[which_clock].resolution); | 1042 | *tp = ktime_to_timespec(cpu_base->clock_base[which_clock].resolution); |
1032 | 1043 | ||
1033 | return 0; | 1044 | return 0; |
1034 | } | 1045 | } |
1035 | EXPORT_SYMBOL_GPL(hrtimer_get_res); | 1046 | EXPORT_SYMBOL_GPL(hrtimer_get_res); |
1036 | 1047 | ||
1037 | static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base) | 1048 | static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base) |
1038 | { | 1049 | { |
1039 | spin_lock_irq(&cpu_base->lock); | 1050 | spin_lock_irq(&cpu_base->lock); |
1040 | 1051 | ||
1041 | while (!list_empty(&cpu_base->cb_pending)) { | 1052 | while (!list_empty(&cpu_base->cb_pending)) { |
1042 | enum hrtimer_restart (*fn)(struct hrtimer *); | 1053 | enum hrtimer_restart (*fn)(struct hrtimer *); |
1043 | struct hrtimer *timer; | 1054 | struct hrtimer *timer; |
1044 | int restart; | 1055 | int restart; |
1045 | 1056 | ||
1046 | timer = list_entry(cpu_base->cb_pending.next, | 1057 | timer = list_entry(cpu_base->cb_pending.next, |
1047 | struct hrtimer, cb_entry); | 1058 | struct hrtimer, cb_entry); |
1048 | 1059 | ||
1049 | timer_stats_account_hrtimer(timer); | 1060 | timer_stats_account_hrtimer(timer); |
1050 | 1061 | ||
1051 | fn = timer->function; | 1062 | fn = timer->function; |
1052 | __remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0); | 1063 | __remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0); |
1053 | spin_unlock_irq(&cpu_base->lock); | 1064 | spin_unlock_irq(&cpu_base->lock); |
1054 | 1065 | ||
1055 | restart = fn(timer); | 1066 | restart = fn(timer); |
1056 | 1067 | ||
1057 | spin_lock_irq(&cpu_base->lock); | 1068 | spin_lock_irq(&cpu_base->lock); |
1058 | 1069 | ||
1059 | timer->state &= ~HRTIMER_STATE_CALLBACK; | 1070 | timer->state &= ~HRTIMER_STATE_CALLBACK; |
1060 | if (restart == HRTIMER_RESTART) { | 1071 | if (restart == HRTIMER_RESTART) { |
1061 | BUG_ON(hrtimer_active(timer)); | 1072 | BUG_ON(hrtimer_active(timer)); |
1062 | /* | 1073 | /* |
1063 | * Enqueue the timer, allow reprogramming of the event | 1074 | * Enqueue the timer, allow reprogramming of the event |
1064 | * device | 1075 | * device |
1065 | */ | 1076 | */ |
1066 | enqueue_hrtimer(timer, timer->base, 1); | 1077 | enqueue_hrtimer(timer, timer->base, 1); |
1067 | } else if (hrtimer_active(timer)) { | 1078 | } else if (hrtimer_active(timer)) { |
1068 | /* | 1079 | /* |
1069 | * If the timer was rearmed on another CPU, reprogram | 1080 | * If the timer was rearmed on another CPU, reprogram |
1070 | * the event device. | 1081 | * the event device. |
1071 | */ | 1082 | */ |
1072 | if (timer->base->first == &timer->node) | 1083 | if (timer->base->first == &timer->node) |
1073 | hrtimer_reprogram(timer, timer->base); | 1084 | hrtimer_reprogram(timer, timer->base); |
1074 | } | 1085 | } |
1075 | } | 1086 | } |
1076 | spin_unlock_irq(&cpu_base->lock); | 1087 | spin_unlock_irq(&cpu_base->lock); |
1077 | } | 1088 | } |
1078 | 1089 | ||
1079 | static void __run_hrtimer(struct hrtimer *timer) | 1090 | static void __run_hrtimer(struct hrtimer *timer) |
1080 | { | 1091 | { |
1081 | struct hrtimer_clock_base *base = timer->base; | 1092 | struct hrtimer_clock_base *base = timer->base; |
1082 | struct hrtimer_cpu_base *cpu_base = base->cpu_base; | 1093 | struct hrtimer_cpu_base *cpu_base = base->cpu_base; |
1083 | enum hrtimer_restart (*fn)(struct hrtimer *); | 1094 | enum hrtimer_restart (*fn)(struct hrtimer *); |
1084 | int restart; | 1095 | int restart; |
1085 | 1096 | ||
1086 | __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); | 1097 | __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); |
1087 | timer_stats_account_hrtimer(timer); | 1098 | timer_stats_account_hrtimer(timer); |
1088 | 1099 | ||
1089 | fn = timer->function; | 1100 | fn = timer->function; |
1090 | if (timer->cb_mode == HRTIMER_CB_IRQSAFE_NO_SOFTIRQ) { | 1101 | if (timer->cb_mode == HRTIMER_CB_IRQSAFE_NO_SOFTIRQ) { |
1091 | /* | 1102 | /* |
1092 | * Used for scheduler timers, avoid lock inversion with | 1103 | * Used for scheduler timers, avoid lock inversion with |
1093 | * rq->lock and tasklist_lock. | 1104 | * rq->lock and tasklist_lock. |
1094 | * | 1105 | * |
1095 | * These timers are required to deal with enqueue expiry | 1106 | * These timers are required to deal with enqueue expiry |
1096 | * themselves and are not allowed to migrate. | 1107 | * themselves and are not allowed to migrate. |
1097 | */ | 1108 | */ |
1098 | spin_unlock(&cpu_base->lock); | 1109 | spin_unlock(&cpu_base->lock); |
1099 | restart = fn(timer); | 1110 | restart = fn(timer); |
1100 | spin_lock(&cpu_base->lock); | 1111 | spin_lock(&cpu_base->lock); |
1101 | } else | 1112 | } else |
1102 | restart = fn(timer); | 1113 | restart = fn(timer); |
1103 | 1114 | ||
1104 | /* | 1115 | /* |
1105 | * Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid | 1116 | * Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid |
1106 | * reprogramming of the event hardware. This happens at the end of this | 1117 | * reprogramming of the event hardware. This happens at the end of this |
1107 | * function anyway. | 1118 | * function anyway. |
1108 | */ | 1119 | */ |
1109 | if (restart != HRTIMER_NORESTART) { | 1120 | if (restart != HRTIMER_NORESTART) { |
1110 | BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); | 1121 | BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); |
1111 | enqueue_hrtimer(timer, base, 0); | 1122 | enqueue_hrtimer(timer, base, 0); |
1112 | } | 1123 | } |
1113 | timer->state &= ~HRTIMER_STATE_CALLBACK; | 1124 | timer->state &= ~HRTIMER_STATE_CALLBACK; |
1114 | } | 1125 | } |
1115 | 1126 | ||
1116 | #ifdef CONFIG_HIGH_RES_TIMERS | 1127 | #ifdef CONFIG_HIGH_RES_TIMERS |
1117 | 1128 | ||
1118 | /* | 1129 | /* |
1119 | * High resolution timer interrupt | 1130 | * High resolution timer interrupt |
1120 | * Called with interrupts disabled | 1131 | * Called with interrupts disabled |
1121 | */ | 1132 | */ |
1122 | void hrtimer_interrupt(struct clock_event_device *dev) | 1133 | void hrtimer_interrupt(struct clock_event_device *dev) |
1123 | { | 1134 | { |
1124 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | 1135 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
1125 | struct hrtimer_clock_base *base; | 1136 | struct hrtimer_clock_base *base; |
1126 | ktime_t expires_next, now; | 1137 | ktime_t expires_next, now; |
1127 | int i, raise = 0; | 1138 | int i, raise = 0; |
1128 | 1139 | ||
1129 | BUG_ON(!cpu_base->hres_active); | 1140 | BUG_ON(!cpu_base->hres_active); |
1130 | cpu_base->nr_events++; | 1141 | cpu_base->nr_events++; |
1131 | dev->next_event.tv64 = KTIME_MAX; | 1142 | dev->next_event.tv64 = KTIME_MAX; |
1132 | 1143 | ||
1133 | retry: | 1144 | retry: |
1134 | now = ktime_get(); | 1145 | now = ktime_get(); |
1135 | 1146 | ||
1136 | expires_next.tv64 = KTIME_MAX; | 1147 | expires_next.tv64 = KTIME_MAX; |
1137 | 1148 | ||
1138 | base = cpu_base->clock_base; | 1149 | base = cpu_base->clock_base; |
1139 | 1150 | ||
1140 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | 1151 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
1141 | ktime_t basenow; | 1152 | ktime_t basenow; |
1142 | struct rb_node *node; | 1153 | struct rb_node *node; |
1143 | 1154 | ||
1144 | spin_lock(&cpu_base->lock); | 1155 | spin_lock(&cpu_base->lock); |
1145 | 1156 | ||
1146 | basenow = ktime_add(now, base->offset); | 1157 | basenow = ktime_add(now, base->offset); |
1147 | 1158 | ||
1148 | while ((node = base->first)) { | 1159 | while ((node = base->first)) { |
1149 | struct hrtimer *timer; | 1160 | struct hrtimer *timer; |
1150 | 1161 | ||
1151 | timer = rb_entry(node, struct hrtimer, node); | 1162 | timer = rb_entry(node, struct hrtimer, node); |
1152 | 1163 | ||
1153 | if (basenow.tv64 < timer->expires.tv64) { | 1164 | if (basenow.tv64 < timer->expires.tv64) { |
1154 | ktime_t expires; | 1165 | ktime_t expires; |
1155 | 1166 | ||
1156 | expires = ktime_sub(timer->expires, | 1167 | expires = ktime_sub(timer->expires, |
1157 | base->offset); | 1168 | base->offset); |
1158 | if (expires.tv64 < expires_next.tv64) | 1169 | if (expires.tv64 < expires_next.tv64) |
1159 | expires_next = expires; | 1170 | expires_next = expires; |
1160 | break; | 1171 | break; |
1161 | } | 1172 | } |
1162 | 1173 | ||
1163 | /* Move softirq callbacks to the pending list */ | 1174 | /* Move softirq callbacks to the pending list */ |
1164 | if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) { | 1175 | if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) { |
1165 | __remove_hrtimer(timer, base, | 1176 | __remove_hrtimer(timer, base, |
1166 | HRTIMER_STATE_PENDING, 0); | 1177 | HRTIMER_STATE_PENDING, 0); |
1167 | list_add_tail(&timer->cb_entry, | 1178 | list_add_tail(&timer->cb_entry, |
1168 | &base->cpu_base->cb_pending); | 1179 | &base->cpu_base->cb_pending); |
1169 | raise = 1; | 1180 | raise = 1; |
1170 | continue; | 1181 | continue; |
1171 | } | 1182 | } |
1172 | 1183 | ||
1173 | __run_hrtimer(timer); | 1184 | __run_hrtimer(timer); |
1174 | } | 1185 | } |
1175 | spin_unlock(&cpu_base->lock); | 1186 | spin_unlock(&cpu_base->lock); |
1176 | base++; | 1187 | base++; |
1177 | } | 1188 | } |
1178 | 1189 | ||
1179 | cpu_base->expires_next = expires_next; | 1190 | cpu_base->expires_next = expires_next; |
1180 | 1191 | ||
1181 | /* Reprogramming necessary ? */ | 1192 | /* Reprogramming necessary ? */ |
1182 | if (expires_next.tv64 != KTIME_MAX) { | 1193 | if (expires_next.tv64 != KTIME_MAX) { |
1183 | if (tick_program_event(expires_next, 0)) | 1194 | if (tick_program_event(expires_next, 0)) |
1184 | goto retry; | 1195 | goto retry; |
1185 | } | 1196 | } |
1186 | 1197 | ||
1187 | /* Raise softirq ? */ | 1198 | /* Raise softirq ? */ |
1188 | if (raise) | 1199 | if (raise) |
1189 | raise_softirq(HRTIMER_SOFTIRQ); | 1200 | raise_softirq(HRTIMER_SOFTIRQ); |
1190 | } | 1201 | } |
1191 | 1202 | ||
1192 | static void run_hrtimer_softirq(struct softirq_action *h) | 1203 | static void run_hrtimer_softirq(struct softirq_action *h) |
1193 | { | 1204 | { |
1194 | run_hrtimer_pending(&__get_cpu_var(hrtimer_bases)); | 1205 | run_hrtimer_pending(&__get_cpu_var(hrtimer_bases)); |
1195 | } | 1206 | } |
1196 | 1207 | ||
1197 | #endif /* CONFIG_HIGH_RES_TIMERS */ | 1208 | #endif /* CONFIG_HIGH_RES_TIMERS */ |
1198 | 1209 | ||
1199 | /* | 1210 | /* |
1200 | * Called from timer softirq every jiffy, expire hrtimers: | 1211 | * Called from timer softirq every jiffy, expire hrtimers: |
1201 | * | 1212 | * |
1202 | * For HRT its the fall back code to run the softirq in the timer | 1213 | * For HRT its the fall back code to run the softirq in the timer |
1203 | * softirq context in case the hrtimer initialization failed or has | 1214 | * softirq context in case the hrtimer initialization failed or has |
1204 | * not been done yet. | 1215 | * not been done yet. |
1205 | */ | 1216 | */ |
1206 | void hrtimer_run_pending(void) | 1217 | void hrtimer_run_pending(void) |
1207 | { | 1218 | { |
1208 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | 1219 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
1209 | 1220 | ||
1210 | if (hrtimer_hres_active()) | 1221 | if (hrtimer_hres_active()) |
1211 | return; | 1222 | return; |
1212 | 1223 | ||
1213 | /* | 1224 | /* |
1214 | * This _is_ ugly: We have to check in the softirq context, | 1225 | * This _is_ ugly: We have to check in the softirq context, |
1215 | * whether we can switch to highres and / or nohz mode. The | 1226 | * whether we can switch to highres and / or nohz mode. The |
1216 | * clocksource switch happens in the timer interrupt with | 1227 | * clocksource switch happens in the timer interrupt with |
1217 | * xtime_lock held. Notification from there only sets the | 1228 | * xtime_lock held. Notification from there only sets the |
1218 | * check bit in the tick_oneshot code, otherwise we might | 1229 | * check bit in the tick_oneshot code, otherwise we might |
1219 | * deadlock vs. xtime_lock. | 1230 | * deadlock vs. xtime_lock. |
1220 | */ | 1231 | */ |
1221 | if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) | 1232 | if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) |
1222 | hrtimer_switch_to_hres(); | 1233 | hrtimer_switch_to_hres(); |
1223 | 1234 | ||
1224 | run_hrtimer_pending(cpu_base); | 1235 | run_hrtimer_pending(cpu_base); |
1225 | } | 1236 | } |
1226 | 1237 | ||
1227 | /* | 1238 | /* |
1228 | * Called from hardirq context every jiffy | 1239 | * Called from hardirq context every jiffy |
1229 | */ | 1240 | */ |
1230 | static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base, | 1241 | static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base, |
1231 | int index) | 1242 | int index) |
1232 | { | 1243 | { |
1233 | struct rb_node *node; | 1244 | struct rb_node *node; |
1234 | struct hrtimer_clock_base *base = &cpu_base->clock_base[index]; | 1245 | struct hrtimer_clock_base *base = &cpu_base->clock_base[index]; |
1235 | 1246 | ||
1236 | if (!base->first) | 1247 | if (!base->first) |
1237 | return; | 1248 | return; |
1238 | 1249 | ||
1239 | if (base->get_softirq_time) | 1250 | if (base->get_softirq_time) |
1240 | base->softirq_time = base->get_softirq_time(); | 1251 | base->softirq_time = base->get_softirq_time(); |
1241 | 1252 | ||
1242 | spin_lock(&cpu_base->lock); | 1253 | spin_lock(&cpu_base->lock); |
1243 | 1254 | ||
1244 | while ((node = base->first)) { | 1255 | while ((node = base->first)) { |
1245 | struct hrtimer *timer; | 1256 | struct hrtimer *timer; |
1246 | 1257 | ||
1247 | timer = rb_entry(node, struct hrtimer, node); | 1258 | timer = rb_entry(node, struct hrtimer, node); |
1248 | if (base->softirq_time.tv64 <= timer->expires.tv64) | 1259 | if (base->softirq_time.tv64 <= timer->expires.tv64) |
1249 | break; | 1260 | break; |
1250 | 1261 | ||
1251 | if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) { | 1262 | if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) { |
1252 | __remove_hrtimer(timer, base, HRTIMER_STATE_PENDING, 0); | 1263 | __remove_hrtimer(timer, base, HRTIMER_STATE_PENDING, 0); |
1253 | list_add_tail(&timer->cb_entry, | 1264 | list_add_tail(&timer->cb_entry, |
1254 | &base->cpu_base->cb_pending); | 1265 | &base->cpu_base->cb_pending); |
1255 | continue; | 1266 | continue; |
1256 | } | 1267 | } |
1257 | 1268 | ||
1258 | __run_hrtimer(timer); | 1269 | __run_hrtimer(timer); |
1259 | } | 1270 | } |
1260 | spin_unlock(&cpu_base->lock); | 1271 | spin_unlock(&cpu_base->lock); |
1261 | } | 1272 | } |
1262 | 1273 | ||
1263 | void hrtimer_run_queues(void) | 1274 | void hrtimer_run_queues(void) |
1264 | { | 1275 | { |
1265 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | 1276 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
1266 | int i; | 1277 | int i; |
1267 | 1278 | ||
1268 | if (hrtimer_hres_active()) | 1279 | if (hrtimer_hres_active()) |
1269 | return; | 1280 | return; |
1270 | 1281 | ||
1271 | hrtimer_get_softirq_time(cpu_base); | 1282 | hrtimer_get_softirq_time(cpu_base); |
1272 | 1283 | ||
1273 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) | 1284 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) |
1274 | run_hrtimer_queue(cpu_base, i); | 1285 | run_hrtimer_queue(cpu_base, i); |
1275 | } | 1286 | } |
1276 | 1287 | ||
1277 | /* | 1288 | /* |
1278 | * Sleep related functions: | 1289 | * Sleep related functions: |
1279 | */ | 1290 | */ |
1280 | static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer) | 1291 | static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer) |
1281 | { | 1292 | { |
1282 | struct hrtimer_sleeper *t = | 1293 | struct hrtimer_sleeper *t = |
1283 | container_of(timer, struct hrtimer_sleeper, timer); | 1294 | container_of(timer, struct hrtimer_sleeper, timer); |
1284 | struct task_struct *task = t->task; | 1295 | struct task_struct *task = t->task; |
1285 | 1296 | ||
1286 | t->task = NULL; | 1297 | t->task = NULL; |
1287 | if (task) | 1298 | if (task) |
1288 | wake_up_process(task); | 1299 | wake_up_process(task); |
1289 | 1300 | ||
1290 | return HRTIMER_NORESTART; | 1301 | return HRTIMER_NORESTART; |
1291 | } | 1302 | } |
1292 | 1303 | ||
1293 | void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) | 1304 | void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) |
1294 | { | 1305 | { |
1295 | sl->timer.function = hrtimer_wakeup; | 1306 | sl->timer.function = hrtimer_wakeup; |
1296 | sl->task = task; | 1307 | sl->task = task; |
1297 | #ifdef CONFIG_HIGH_RES_TIMERS | 1308 | #ifdef CONFIG_HIGH_RES_TIMERS |
1298 | sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; | 1309 | sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; |
1299 | #endif | 1310 | #endif |
1300 | } | 1311 | } |
1301 | 1312 | ||
1302 | static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) | 1313 | static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) |
1303 | { | 1314 | { |
1304 | hrtimer_init_sleeper(t, current); | 1315 | hrtimer_init_sleeper(t, current); |
1305 | 1316 | ||
1306 | do { | 1317 | do { |
1307 | set_current_state(TASK_INTERRUPTIBLE); | 1318 | set_current_state(TASK_INTERRUPTIBLE); |
1308 | hrtimer_start(&t->timer, t->timer.expires, mode); | 1319 | hrtimer_start(&t->timer, t->timer.expires, mode); |
1309 | if (!hrtimer_active(&t->timer)) | 1320 | if (!hrtimer_active(&t->timer)) |
1310 | t->task = NULL; | 1321 | t->task = NULL; |
1311 | 1322 | ||
1312 | if (likely(t->task)) | 1323 | if (likely(t->task)) |
1313 | schedule(); | 1324 | schedule(); |
1314 | 1325 | ||
1315 | hrtimer_cancel(&t->timer); | 1326 | hrtimer_cancel(&t->timer); |
1316 | mode = HRTIMER_MODE_ABS; | 1327 | mode = HRTIMER_MODE_ABS; |
1317 | 1328 | ||
1318 | } while (t->task && !signal_pending(current)); | 1329 | } while (t->task && !signal_pending(current)); |
1319 | 1330 | ||
1320 | __set_current_state(TASK_RUNNING); | 1331 | __set_current_state(TASK_RUNNING); |
1321 | 1332 | ||
1322 | return t->task == NULL; | 1333 | return t->task == NULL; |
1323 | } | 1334 | } |
1324 | 1335 | ||
1325 | static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp) | 1336 | static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp) |
1326 | { | 1337 | { |
1327 | struct timespec rmt; | 1338 | struct timespec rmt; |
1328 | ktime_t rem; | 1339 | ktime_t rem; |
1329 | 1340 | ||
1330 | rem = ktime_sub(timer->expires, timer->base->get_time()); | 1341 | rem = ktime_sub(timer->expires, timer->base->get_time()); |
1331 | if (rem.tv64 <= 0) | 1342 | if (rem.tv64 <= 0) |
1332 | return 0; | 1343 | return 0; |
1333 | rmt = ktime_to_timespec(rem); | 1344 | rmt = ktime_to_timespec(rem); |
1334 | 1345 | ||
1335 | if (copy_to_user(rmtp, &rmt, sizeof(*rmtp))) | 1346 | if (copy_to_user(rmtp, &rmt, sizeof(*rmtp))) |
1336 | return -EFAULT; | 1347 | return -EFAULT; |
1337 | 1348 | ||
1338 | return 1; | 1349 | return 1; |
1339 | } | 1350 | } |
1340 | 1351 | ||
1341 | long __sched hrtimer_nanosleep_restart(struct restart_block *restart) | 1352 | long __sched hrtimer_nanosleep_restart(struct restart_block *restart) |
1342 | { | 1353 | { |
1343 | struct hrtimer_sleeper t; | 1354 | struct hrtimer_sleeper t; |
1344 | struct timespec __user *rmtp; | 1355 | struct timespec __user *rmtp; |
1345 | 1356 | ||
1346 | hrtimer_init(&t.timer, restart->arg0, HRTIMER_MODE_ABS); | 1357 | hrtimer_init(&t.timer, restart->arg0, HRTIMER_MODE_ABS); |
1347 | t.timer.expires.tv64 = ((u64)restart->arg3 << 32) | (u64) restart->arg2; | 1358 | t.timer.expires.tv64 = ((u64)restart->arg3 << 32) | (u64) restart->arg2; |
1348 | 1359 | ||
1349 | if (do_nanosleep(&t, HRTIMER_MODE_ABS)) | 1360 | if (do_nanosleep(&t, HRTIMER_MODE_ABS)) |
1350 | return 0; | 1361 | return 0; |
1351 | 1362 | ||
1352 | rmtp = (struct timespec __user *)restart->arg1; | 1363 | rmtp = (struct timespec __user *)restart->arg1; |
1353 | if (rmtp) { | 1364 | if (rmtp) { |
1354 | int ret = update_rmtp(&t.timer, rmtp); | 1365 | int ret = update_rmtp(&t.timer, rmtp); |
1355 | if (ret <= 0) | 1366 | if (ret <= 0) |
1356 | return ret; | 1367 | return ret; |
1357 | } | 1368 | } |
1358 | 1369 | ||
1359 | /* The other values in restart are already filled in */ | 1370 | /* The other values in restart are already filled in */ |
1360 | return -ERESTART_RESTARTBLOCK; | 1371 | return -ERESTART_RESTARTBLOCK; |
1361 | } | 1372 | } |
1362 | 1373 | ||
1363 | long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, | 1374 | long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, |
1364 | const enum hrtimer_mode mode, const clockid_t clockid) | 1375 | const enum hrtimer_mode mode, const clockid_t clockid) |
1365 | { | 1376 | { |
1366 | struct restart_block *restart; | 1377 | struct restart_block *restart; |
1367 | struct hrtimer_sleeper t; | 1378 | struct hrtimer_sleeper t; |
1368 | 1379 | ||
1369 | hrtimer_init(&t.timer, clockid, mode); | 1380 | hrtimer_init(&t.timer, clockid, mode); |
1370 | t.timer.expires = timespec_to_ktime(*rqtp); | 1381 | t.timer.expires = timespec_to_ktime(*rqtp); |
1371 | if (do_nanosleep(&t, mode)) | 1382 | if (do_nanosleep(&t, mode)) |
1372 | return 0; | 1383 | return 0; |
1373 | 1384 | ||
1374 | /* Absolute timers do not update the rmtp value and restart: */ | 1385 | /* Absolute timers do not update the rmtp value and restart: */ |
1375 | if (mode == HRTIMER_MODE_ABS) | 1386 | if (mode == HRTIMER_MODE_ABS) |
1376 | return -ERESTARTNOHAND; | 1387 | return -ERESTARTNOHAND; |
1377 | 1388 | ||
1378 | if (rmtp) { | 1389 | if (rmtp) { |
1379 | int ret = update_rmtp(&t.timer, rmtp); | 1390 | int ret = update_rmtp(&t.timer, rmtp); |
1380 | if (ret <= 0) | 1391 | if (ret <= 0) |
1381 | return ret; | 1392 | return ret; |
1382 | } | 1393 | } |
1383 | 1394 | ||
1384 | restart = ¤t_thread_info()->restart_block; | 1395 | restart = ¤t_thread_info()->restart_block; |
1385 | restart->fn = hrtimer_nanosleep_restart; | 1396 | restart->fn = hrtimer_nanosleep_restart; |
1386 | restart->arg0 = (unsigned long) t.timer.base->index; | 1397 | restart->arg0 = (unsigned long) t.timer.base->index; |
1387 | restart->arg1 = (unsigned long) rmtp; | 1398 | restart->arg1 = (unsigned long) rmtp; |
1388 | restart->arg2 = t.timer.expires.tv64 & 0xFFFFFFFF; | 1399 | restart->arg2 = t.timer.expires.tv64 & 0xFFFFFFFF; |
1389 | restart->arg3 = t.timer.expires.tv64 >> 32; | 1400 | restart->arg3 = t.timer.expires.tv64 >> 32; |
1390 | 1401 | ||
1391 | return -ERESTART_RESTARTBLOCK; | 1402 | return -ERESTART_RESTARTBLOCK; |
1392 | } | 1403 | } |
1393 | 1404 | ||
1394 | asmlinkage long | 1405 | asmlinkage long |
1395 | sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp) | 1406 | sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp) |
1396 | { | 1407 | { |
1397 | struct timespec tu; | 1408 | struct timespec tu; |
1398 | 1409 | ||
1399 | if (copy_from_user(&tu, rqtp, sizeof(tu))) | 1410 | if (copy_from_user(&tu, rqtp, sizeof(tu))) |
1400 | return -EFAULT; | 1411 | return -EFAULT; |
1401 | 1412 | ||
1402 | if (!timespec_valid(&tu)) | 1413 | if (!timespec_valid(&tu)) |
1403 | return -EINVAL; | 1414 | return -EINVAL; |
1404 | 1415 | ||
1405 | return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC); | 1416 | return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC); |
1406 | } | 1417 | } |
1407 | 1418 | ||
1408 | /* | 1419 | /* |
1409 | * Functions related to boot-time initialization: | 1420 | * Functions related to boot-time initialization: |
1410 | */ | 1421 | */ |
1411 | static void __cpuinit init_hrtimers_cpu(int cpu) | 1422 | static void __cpuinit init_hrtimers_cpu(int cpu) |
1412 | { | 1423 | { |
1413 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); | 1424 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); |
1414 | int i; | 1425 | int i; |
1415 | 1426 | ||
1416 | spin_lock_init(&cpu_base->lock); | 1427 | spin_lock_init(&cpu_base->lock); |
1417 | lockdep_set_class(&cpu_base->lock, &cpu_base->lock_key); | 1428 | lockdep_set_class(&cpu_base->lock, &cpu_base->lock_key); |
1418 | 1429 | ||
1419 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) | 1430 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) |
1420 | cpu_base->clock_base[i].cpu_base = cpu_base; | 1431 | cpu_base->clock_base[i].cpu_base = cpu_base; |
1421 | 1432 | ||
1422 | INIT_LIST_HEAD(&cpu_base->cb_pending); | 1433 | INIT_LIST_HEAD(&cpu_base->cb_pending); |
1423 | hrtimer_init_hres(cpu_base); | 1434 | hrtimer_init_hres(cpu_base); |
1424 | } | 1435 | } |
1425 | 1436 | ||
1426 | #ifdef CONFIG_HOTPLUG_CPU | 1437 | #ifdef CONFIG_HOTPLUG_CPU |
1427 | 1438 | ||
1428 | static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, | 1439 | static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, |
1429 | struct hrtimer_clock_base *new_base) | 1440 | struct hrtimer_clock_base *new_base) |
1430 | { | 1441 | { |
1431 | struct hrtimer *timer; | 1442 | struct hrtimer *timer; |
1432 | struct rb_node *node; | 1443 | struct rb_node *node; |
1433 | 1444 | ||
1434 | while ((node = rb_first(&old_base->active))) { | 1445 | while ((node = rb_first(&old_base->active))) { |
1435 | timer = rb_entry(node, struct hrtimer, node); | 1446 | timer = rb_entry(node, struct hrtimer, node); |
1436 | BUG_ON(hrtimer_callback_running(timer)); | 1447 | BUG_ON(hrtimer_callback_running(timer)); |
1437 | __remove_hrtimer(timer, old_base, HRTIMER_STATE_INACTIVE, 0); | 1448 | __remove_hrtimer(timer, old_base, HRTIMER_STATE_INACTIVE, 0); |
1438 | timer->base = new_base; | 1449 | timer->base = new_base; |
1439 | /* | 1450 | /* |
1440 | * Enqueue the timer. Allow reprogramming of the event device | 1451 | * Enqueue the timer. Allow reprogramming of the event device |
1441 | */ | 1452 | */ |
1442 | enqueue_hrtimer(timer, new_base, 1); | 1453 | enqueue_hrtimer(timer, new_base, 1); |
1443 | } | 1454 | } |
1444 | } | 1455 | } |
1445 | 1456 | ||
1446 | static void migrate_hrtimers(int cpu) | 1457 | static void migrate_hrtimers(int cpu) |
1447 | { | 1458 | { |
1448 | struct hrtimer_cpu_base *old_base, *new_base; | 1459 | struct hrtimer_cpu_base *old_base, *new_base; |
1449 | int i; | 1460 | int i; |
1450 | 1461 | ||
1451 | BUG_ON(cpu_online(cpu)); | 1462 | BUG_ON(cpu_online(cpu)); |
1452 | old_base = &per_cpu(hrtimer_bases, cpu); | 1463 | old_base = &per_cpu(hrtimer_bases, cpu); |
1453 | new_base = &get_cpu_var(hrtimer_bases); | 1464 | new_base = &get_cpu_var(hrtimer_bases); |
1454 | 1465 | ||
1455 | tick_cancel_sched_timer(cpu); | 1466 | tick_cancel_sched_timer(cpu); |
1456 | 1467 | ||
1457 | local_irq_disable(); | 1468 | local_irq_disable(); |
1458 | double_spin_lock(&new_base->lock, &old_base->lock, | 1469 | double_spin_lock(&new_base->lock, &old_base->lock, |
1459 | smp_processor_id() < cpu); | 1470 | smp_processor_id() < cpu); |
1460 | 1471 | ||
1461 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | 1472 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
1462 | migrate_hrtimer_list(&old_base->clock_base[i], | 1473 | migrate_hrtimer_list(&old_base->clock_base[i], |
1463 | &new_base->clock_base[i]); | 1474 | &new_base->clock_base[i]); |
1464 | } | 1475 | } |
1465 | 1476 | ||
1466 | double_spin_unlock(&new_base->lock, &old_base->lock, | 1477 | double_spin_unlock(&new_base->lock, &old_base->lock, |
1467 | smp_processor_id() < cpu); | 1478 | smp_processor_id() < cpu); |
1468 | local_irq_enable(); | 1479 | local_irq_enable(); |
1469 | put_cpu_var(hrtimer_bases); | 1480 | put_cpu_var(hrtimer_bases); |
1470 | } | 1481 | } |
1471 | #endif /* CONFIG_HOTPLUG_CPU */ | 1482 | #endif /* CONFIG_HOTPLUG_CPU */ |
1472 | 1483 | ||
1473 | static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, | 1484 | static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, |
1474 | unsigned long action, void *hcpu) | 1485 | unsigned long action, void *hcpu) |
1475 | { | 1486 | { |
1476 | unsigned int cpu = (long)hcpu; | 1487 | unsigned int cpu = (long)hcpu; |
1477 | 1488 | ||
1478 | switch (action) { | 1489 | switch (action) { |
1479 | 1490 | ||
1480 | case CPU_UP_PREPARE: | 1491 | case CPU_UP_PREPARE: |
1481 | case CPU_UP_PREPARE_FROZEN: | 1492 | case CPU_UP_PREPARE_FROZEN: |
1482 | init_hrtimers_cpu(cpu); | 1493 | init_hrtimers_cpu(cpu); |
1483 | break; | 1494 | break; |
1484 | 1495 | ||
1485 | #ifdef CONFIG_HOTPLUG_CPU | 1496 | #ifdef CONFIG_HOTPLUG_CPU |
1486 | case CPU_DEAD: | 1497 | case CPU_DEAD: |
1487 | case CPU_DEAD_FROZEN: | 1498 | case CPU_DEAD_FROZEN: |
1488 | clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu); | 1499 | clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu); |
1489 | migrate_hrtimers(cpu); | 1500 | migrate_hrtimers(cpu); |
1490 | break; | 1501 | break; |
1491 | #endif | 1502 | #endif |
1492 | 1503 | ||
1493 | default: | 1504 | default: |
1494 | break; | 1505 | break; |
1495 | } | 1506 | } |
1496 | 1507 | ||
1497 | return NOTIFY_OK; | 1508 | return NOTIFY_OK; |
1498 | } | 1509 | } |
1499 | 1510 | ||
1500 | static struct notifier_block __cpuinitdata hrtimers_nb = { | 1511 | static struct notifier_block __cpuinitdata hrtimers_nb = { |
1501 | .notifier_call = hrtimer_cpu_notify, | 1512 | .notifier_call = hrtimer_cpu_notify, |
1502 | }; | 1513 | }; |
1503 | 1514 | ||
1504 | void __init hrtimers_init(void) | 1515 | void __init hrtimers_init(void) |
1505 | { | 1516 | { |
1506 | hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, | 1517 | hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, |
1507 | (void *)(long)smp_processor_id()); | 1518 | (void *)(long)smp_processor_id()); |
1508 | register_cpu_notifier(&hrtimers_nb); | 1519 | register_cpu_notifier(&hrtimers_nb); |
1509 | #ifdef CONFIG_HIGH_RES_TIMERS | 1520 | #ifdef CONFIG_HIGH_RES_TIMERS |
1510 | open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq, NULL); | 1521 | open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq, NULL); |
1511 | #endif | 1522 | #endif |
1512 | } | 1523 | } |
1513 | 1524 | ||
1514 | 1525 |