Blame view
kernel/timer.c
46.5 KB
1da177e4c
|
1 2 3 |
/* * linux/kernel/timer.c * |
4a22f1663
|
4 |
* Kernel internal timers |
1da177e4c
|
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 |
* * Copyright (C) 1991, 1992 Linus Torvalds * * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better. * * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 * "A Kernel Model for Precision Timekeeping" by Dave Mills * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to * serialize accesses to xtime/lost_ticks). * Copyright (C) 1998 Andrea Arcangeli * 1999-03-10 Improved NTP compatibility by Ulrich Windl * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love * 2000-10-05 Implemented scalable SMP per-CPU timer handling. * Copyright (C) 2000, 2001, 2002 Ingo Molnar * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar */ #include <linux/kernel_stat.h> |
9984de1a5
|
23 |
#include <linux/export.h> |
1da177e4c
|
24 25 26 27 28 |
#include <linux/interrupt.h> #include <linux/percpu.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/swap.h> |
b488893a3
|
29 |
#include <linux/pid_namespace.h> |
1da177e4c
|
30 31 32 33 34 35 36 |
#include <linux/notifier.h> #include <linux/thread_info.h> #include <linux/time.h> #include <linux/jiffies.h> #include <linux/posix-timers.h> #include <linux/cpu.h> #include <linux/syscalls.h> |
97a41e261
|
37 |
#include <linux/delay.h> |
79bf2bb33
|
38 |
#include <linux/tick.h> |
82f67cd9f
|
39 |
#include <linux/kallsyms.h> |
e360adbe2
|
40 |
#include <linux/irq_work.h> |
eea08f32a
|
41 |
#include <linux/sched.h> |
cf4aebc29
|
42 |
#include <linux/sched/sysctl.h> |
5a0e3ad6a
|
43 |
#include <linux/slab.h> |
1a0df5944
|
44 |
#include <linux/compat.h> |
1da177e4c
|
45 46 47 48 49 50 |
#include <asm/uaccess.h> #include <asm/unistd.h> #include <asm/div64.h> #include <asm/timex.h> #include <asm/io.h> |
2b022e3d4
|
51 52 |
#define CREATE_TRACE_POINTS #include <trace/events/timer.h> |
ecea8d19c
|
53 54 55 |
u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; EXPORT_SYMBOL(jiffies_64); |
1da177e4c
|
56 57 58 |
/* * per-CPU timer vector definitions: */ |
1da177e4c
|
59 60 61 62 63 64 |
#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6) #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8) #define TVN_SIZE (1 << TVN_BITS) #define TVR_SIZE (1 << TVR_BITS) #define TVN_MASK (TVN_SIZE - 1) #define TVR_MASK (TVR_SIZE - 1) |
26cff4e2a
|
65 |
#define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1)) |
1da177e4c
|
66 |
|
a6fa8e5a6
|
67 |
struct tvec { |
1da177e4c
|
68 |
struct list_head vec[TVN_SIZE]; |
a6fa8e5a6
|
69 |
}; |
1da177e4c
|
70 |
|
a6fa8e5a6
|
71 |
struct tvec_root { |
1da177e4c
|
72 |
struct list_head vec[TVR_SIZE]; |
a6fa8e5a6
|
73 |
}; |
1da177e4c
|
74 |
|
a6fa8e5a6
|
75 |
struct tvec_base { |
3691c5199
|
76 77 |
spinlock_t lock; struct timer_list *running_timer; |
1da177e4c
|
78 |
unsigned long timer_jiffies; |
97fd9ed48
|
79 |
unsigned long next_timer; |
99d5f3aac
|
80 |
unsigned long active_timers; |
a6fa8e5a6
|
81 82 83 84 85 |
struct tvec_root tv1; struct tvec tv2; struct tvec tv3; struct tvec tv4; struct tvec tv5; |
6e453a675
|
86 |
} ____cacheline_aligned; |
1da177e4c
|
87 |
|
a6fa8e5a6
|
88 |
struct tvec_base boot_tvec_bases; |
3691c5199
|
89 |
EXPORT_SYMBOL(boot_tvec_bases); |
a6fa8e5a6
|
90 |
static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases; |
1da177e4c
|
91 |
|
6e453a675
|
92 |
/* Functions below help us manage 'deferrable' flag */ |
a6fa8e5a6
|
93 |
static inline unsigned int tbase_get_deferrable(struct tvec_base *base) |
6e453a675
|
94 |
{ |
e52b1db37
|
95 |
return ((unsigned int)(unsigned long)base & TIMER_DEFERRABLE); |
6e453a675
|
96 |
} |
c5f66e99b
|
97 98 99 100 |
static inline unsigned int tbase_get_irqsafe(struct tvec_base *base) { return ((unsigned int)(unsigned long)base & TIMER_IRQSAFE); } |
a6fa8e5a6
|
101 |
static inline struct tvec_base *tbase_get_base(struct tvec_base *base) |
6e453a675
|
102 |
{ |
e52b1db37
|
103 |
return ((struct tvec_base *)((unsigned long)base & ~TIMER_FLAG_MASK)); |
6e453a675
|
104 |
} |
6e453a675
|
105 |
static inline void |
a6fa8e5a6
|
106 |
timer_set_base(struct timer_list *timer, struct tvec_base *new_base) |
6e453a675
|
107 |
{ |
e52b1db37
|
108 109 110 |
unsigned long flags = (unsigned long)timer->base & TIMER_FLAG_MASK; timer->base = (struct tvec_base *)((unsigned long)(new_base) | flags); |
6e453a675
|
111 |
} |
9c133c469
|
112 113 |
static unsigned long round_jiffies_common(unsigned long j, int cpu, bool force_up) |
4c36a5dec
|
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 |
{ int rem; unsigned long original = j; /* * We don't want all cpus firing their timers at once hitting the * same lock or cachelines, so we skew each extra cpu with an extra * 3 jiffies. This 3 jiffies came originally from the mm/ code which * already did this. * The skew is done by adding 3*cpunr, then round, then subtract this * extra offset again. */ j += cpu * 3; rem = j % HZ; /* * If the target jiffie is just after a whole second (which can happen * due to delays of the timer irq, long irq off times etc etc) then * we should round down to the whole second, not up. Use 1/4th second * as cutoff for this rounding as an extreme upper bound for this. |
9c133c469
|
135 |
* But never round down if @force_up is set. |
4c36a5dec
|
136 |
*/ |
9c133c469
|
137 |
if (rem < HZ/4 && !force_up) /* round down */ |
4c36a5dec
|
138 139 140 141 142 143 |
j = j - rem; else /* round up */ j = j - rem + HZ; /* now that we have rounded, subtract the extra skew again */ j -= cpu * 3; |
9e04d3804
|
144 145 146 147 148 |
/* * Make sure j is still in the future. Otherwise return the * unmodified value. */ return time_is_after_jiffies(j) ? j : original; |
4c36a5dec
|
149 |
} |
9c133c469
|
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 |
/** * __round_jiffies - function to round jiffies to a full second * @j: the time in (absolute) jiffies that should be rounded * @cpu: the processor number on which the timeout will happen * * __round_jiffies() rounds an absolute time in the future (in jiffies) * up or down to (approximately) full seconds. This is useful for timers * for which the exact time they fire does not matter too much, as long as * they fire approximately every X seconds. * * By rounding these timers to whole seconds, all such timers will fire * at the same time, rather than at various times spread out. The goal * of this is to have the CPU wake up less, which saves power. * * The exact rounding is skewed for each processor to avoid all * processors firing at the exact same time, which could lead * to lock contention or spurious cache line bouncing. * * The return value is the rounded version of the @j parameter. */ unsigned long __round_jiffies(unsigned long j, int cpu) { return round_jiffies_common(j, cpu, false); } |
4c36a5dec
|
175 176 177 178 179 180 181 |
EXPORT_SYMBOL_GPL(__round_jiffies); /** * __round_jiffies_relative - function to round jiffies to a full second * @j: the time in (relative) jiffies that should be rounded * @cpu: the processor number on which the timeout will happen * |
72fd4a35a
|
182 |
* __round_jiffies_relative() rounds a time delta in the future (in jiffies) |
4c36a5dec
|
183 184 185 186 187 188 189 190 191 192 193 194 |
* up or down to (approximately) full seconds. This is useful for timers * for which the exact time they fire does not matter too much, as long as * they fire approximately every X seconds. * * By rounding these timers to whole seconds, all such timers will fire * at the same time, rather than at various times spread out. The goal * of this is to have the CPU wake up less, which saves power. * * The exact rounding is skewed for each processor to avoid all * processors firing at the exact same time, which could lead * to lock contention or spurious cache line bouncing. * |
72fd4a35a
|
195 |
* The return value is the rounded version of the @j parameter. |
4c36a5dec
|
196 197 198 |
*/ unsigned long __round_jiffies_relative(unsigned long j, int cpu) { |
9c133c469
|
199 200 201 202 |
unsigned long j0 = jiffies; /* Use j0 because jiffies might change while we run */ return round_jiffies_common(j + j0, cpu, false) - j0; |
4c36a5dec
|
203 204 205 206 207 208 209 |
} EXPORT_SYMBOL_GPL(__round_jiffies_relative); /** * round_jiffies - function to round jiffies to a full second * @j: the time in (absolute) jiffies that should be rounded * |
72fd4a35a
|
210 |
* round_jiffies() rounds an absolute time in the future (in jiffies) |
4c36a5dec
|
211 212 213 214 215 216 217 218 |
* up or down to (approximately) full seconds. This is useful for timers * for which the exact time they fire does not matter too much, as long as * they fire approximately every X seconds. * * By rounding these timers to whole seconds, all such timers will fire * at the same time, rather than at various times spread out. The goal * of this is to have the CPU wake up less, which saves power. * |
72fd4a35a
|
219 |
* The return value is the rounded version of the @j parameter. |
4c36a5dec
|
220 221 222 |
*/ unsigned long round_jiffies(unsigned long j) { |
9c133c469
|
223 |
return round_jiffies_common(j, raw_smp_processor_id(), false); |
4c36a5dec
|
224 225 226 227 228 229 230 |
} EXPORT_SYMBOL_GPL(round_jiffies); /** * round_jiffies_relative - function to round jiffies to a full second * @j: the time in (relative) jiffies that should be rounded * |
72fd4a35a
|
231 |
* round_jiffies_relative() rounds a time delta in the future (in jiffies) |
4c36a5dec
|
232 233 234 235 236 237 238 239 |
* up or down to (approximately) full seconds. This is useful for timers * for which the exact time they fire does not matter too much, as long as * they fire approximately every X seconds. * * By rounding these timers to whole seconds, all such timers will fire * at the same time, rather than at various times spread out. The goal * of this is to have the CPU wake up less, which saves power. * |
72fd4a35a
|
240 |
* The return value is the rounded version of the @j parameter. |
4c36a5dec
|
241 242 243 244 245 246 |
*/ unsigned long round_jiffies_relative(unsigned long j) { return __round_jiffies_relative(j, raw_smp_processor_id()); } EXPORT_SYMBOL_GPL(round_jiffies_relative); |
9c133c469
|
247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 |
/** * __round_jiffies_up - function to round jiffies up to a full second * @j: the time in (absolute) jiffies that should be rounded * @cpu: the processor number on which the timeout will happen * * This is the same as __round_jiffies() except that it will never * round down. This is useful for timeouts for which the exact time * of firing does not matter too much, as long as they don't fire too * early. */ unsigned long __round_jiffies_up(unsigned long j, int cpu) { return round_jiffies_common(j, cpu, true); } EXPORT_SYMBOL_GPL(__round_jiffies_up); /** * __round_jiffies_up_relative - function to round jiffies up to a full second * @j: the time in (relative) jiffies that should be rounded * @cpu: the processor number on which the timeout will happen * * This is the same as __round_jiffies_relative() except that it will never * round down. This is useful for timeouts for which the exact time * of firing does not matter too much, as long as they don't fire too * early. */ unsigned long __round_jiffies_up_relative(unsigned long j, int cpu) { unsigned long j0 = jiffies; /* Use j0 because jiffies might change while we run */ return round_jiffies_common(j + j0, cpu, true) - j0; } EXPORT_SYMBOL_GPL(__round_jiffies_up_relative); /** * round_jiffies_up - function to round jiffies up to a full second * @j: the time in (absolute) jiffies that should be rounded * * This is the same as round_jiffies() except that it will never * round down. This is useful for timeouts for which the exact time * of firing does not matter too much, as long as they don't fire too * early. */ unsigned long round_jiffies_up(unsigned long j) { return round_jiffies_common(j, raw_smp_processor_id(), true); } EXPORT_SYMBOL_GPL(round_jiffies_up); /** * round_jiffies_up_relative - function to round jiffies up to a full second * @j: the time in (relative) jiffies that should be rounded * * This is the same as round_jiffies_relative() except that it will never * round down. This is useful for timeouts for which the exact time * of firing does not matter too much, as long as they don't fire too * early. */ unsigned long round_jiffies_up_relative(unsigned long j) { return __round_jiffies_up_relative(j, raw_smp_processor_id()); } EXPORT_SYMBOL_GPL(round_jiffies_up_relative); |
3bbb9ec94
|
311 312 |
/** * set_timer_slack - set the allowed slack for a timer |
0caa62106
|
313 |
* @timer: the timer to be modified |
3bbb9ec94
|
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 |
* @slack_hz: the amount of time (in jiffies) allowed for rounding * * Set the amount of time, in jiffies, that a certain timer has * in terms of slack. By setting this value, the timer subsystem * will schedule the actual timer somewhere between * the time mod_timer() asks for, and that time plus the slack. * * By setting the slack to -1, a percentage of the delay is used * instead. */ void set_timer_slack(struct timer_list *timer, int slack_hz) { timer->slack = slack_hz; } EXPORT_SYMBOL_GPL(set_timer_slack); |
facbb4a7e
|
329 330 |
static void __internal_add_timer(struct tvec_base *base, struct timer_list *timer) |
1da177e4c
|
331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 |
{ unsigned long expires = timer->expires; unsigned long idx = expires - base->timer_jiffies; struct list_head *vec; if (idx < TVR_SIZE) { int i = expires & TVR_MASK; vec = base->tv1.vec + i; } else if (idx < 1 << (TVR_BITS + TVN_BITS)) { int i = (expires >> TVR_BITS) & TVN_MASK; vec = base->tv2.vec + i; } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) { int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK; vec = base->tv3.vec + i; } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) { int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK; vec = base->tv4.vec + i; } else if ((signed long) idx < 0) { /* * Can happen if you add a timer with expires == jiffies, * or you set a timer to go off in the past */ vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK); } else { int i; |
26cff4e2a
|
356 357 358 |
/* If the timeout is larger than MAX_TVAL (on 64-bit * architectures or with CONFIG_BASE_SMALL=1) then we * use the maximum timeout. |
1da177e4c
|
359 |
*/ |
26cff4e2a
|
360 361 |
if (idx > MAX_TVAL) { idx = MAX_TVAL; |
1da177e4c
|
362 363 364 365 366 367 368 369 370 371 |
expires = idx + base->timer_jiffies; } i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK; vec = base->tv5.vec + i; } /* * Timers are FIFO: */ list_add_tail(&timer->entry, vec); } |
facbb4a7e
|
372 373 374 375 |
static void internal_add_timer(struct tvec_base *base, struct timer_list *timer) { __internal_add_timer(base, timer); /* |
99d5f3aac
|
376 |
* Update base->active_timers and base->next_timer |
facbb4a7e
|
377 |
*/ |
99d5f3aac
|
378 379 380 381 382 |
if (!tbase_get_deferrable(timer->base)) { if (time_before(timer->expires, base->next_timer)) base->next_timer = timer->expires; base->active_timers++; } |
facbb4a7e
|
383 |
} |
82f67cd9f
|
384 385 386 387 388 389 390 391 392 393 |
#ifdef CONFIG_TIMER_STATS void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr) { if (timer->start_site) return; timer->start_site = addr; memcpy(timer->start_comm, current->comm, TASK_COMM_LEN); timer->start_pid = current->pid; } |
c5c061b8f
|
394 395 396 397 |
static void timer_stats_account_timer(struct timer_list *timer) { unsigned int flag = 0; |
507e12315
|
398 399 |
if (likely(!timer->start_site)) return; |
c5c061b8f
|
400 401 402 403 404 405 406 407 408 |
if (unlikely(tbase_get_deferrable(timer->base))) flag |= TIMER_STATS_FLAG_DEFERRABLE; timer_stats_update_stats(timer, timer->start_pid, timer->start_site, timer->function, timer->start_comm, flag); } #else static void timer_stats_account_timer(struct timer_list *timer) {} |
82f67cd9f
|
409 |
#endif |
c6f3a97f8
|
410 411 412 |
#ifdef CONFIG_DEBUG_OBJECTS_TIMERS static struct debug_obj_descr timer_debug_descr; |
997772884
|
413 414 415 416 |
static void *timer_debug_hint(void *addr) { return ((struct timer_list *) addr)->function; } |
c6f3a97f8
|
417 418 419 |
/* * fixup_init is called when: * - an active object is initialized |
55c888d6d
|
420 |
*/ |
c6f3a97f8
|
421 422 423 424 425 426 427 428 429 430 431 432 433 |
static int timer_fixup_init(void *addr, enum debug_obj_state state) { struct timer_list *timer = addr; switch (state) { case ODEBUG_STATE_ACTIVE: del_timer_sync(timer); debug_object_init(timer, &timer_debug_descr); return 1; default: return 0; } } |
fb16b8cf0
|
434 435 436 437 438 |
/* Stub timer callback for improperly used timers. */ static void stub_timer(unsigned long data) { WARN_ON(1); } |
c6f3a97f8
|
439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 |
/* * fixup_activate is called when: * - an active object is activated * - an unknown object is activated (might be a statically initialized object) */ static int timer_fixup_activate(void *addr, enum debug_obj_state state) { struct timer_list *timer = addr; switch (state) { case ODEBUG_STATE_NOTAVAILABLE: /* * This is not really a fixup. The timer was * statically initialized. We just make sure that it * is tracked in the object tracker. */ if (timer->entry.next == NULL && timer->entry.prev == TIMER_ENTRY_STATIC) { debug_object_init(timer, &timer_debug_descr); debug_object_activate(timer, &timer_debug_descr); return 0; } else { |
fb16b8cf0
|
462 463 |
setup_timer(timer, stub_timer, 0); return 1; |
c6f3a97f8
|
464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 |
} return 0; case ODEBUG_STATE_ACTIVE: WARN_ON(1); default: return 0; } } /* * fixup_free is called when: * - an active object is freed */ static int timer_fixup_free(void *addr, enum debug_obj_state state) { struct timer_list *timer = addr; switch (state) { case ODEBUG_STATE_ACTIVE: del_timer_sync(timer); debug_object_free(timer, &timer_debug_descr); return 1; default: return 0; } } |
dc4218bd0
|
492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 |
/* * fixup_assert_init is called when: * - an untracked/uninit-ed object is found */ static int timer_fixup_assert_init(void *addr, enum debug_obj_state state) { struct timer_list *timer = addr; switch (state) { case ODEBUG_STATE_NOTAVAILABLE: if (timer->entry.prev == TIMER_ENTRY_STATIC) { /* * This is not really a fixup. The timer was * statically initialized. We just make sure that it * is tracked in the object tracker. */ debug_object_init(timer, &timer_debug_descr); return 0; } else { setup_timer(timer, stub_timer, 0); return 1; } default: return 0; } } |
c6f3a97f8
|
518 |
static struct debug_obj_descr timer_debug_descr = { |
dc4218bd0
|
519 520 521 522 523 524 |
.name = "timer_list", .debug_hint = timer_debug_hint, .fixup_init = timer_fixup_init, .fixup_activate = timer_fixup_activate, .fixup_free = timer_fixup_free, .fixup_assert_init = timer_fixup_assert_init, |
c6f3a97f8
|
525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 |
}; static inline void debug_timer_init(struct timer_list *timer) { debug_object_init(timer, &timer_debug_descr); } static inline void debug_timer_activate(struct timer_list *timer) { debug_object_activate(timer, &timer_debug_descr); } static inline void debug_timer_deactivate(struct timer_list *timer) { debug_object_deactivate(timer, &timer_debug_descr); } static inline void debug_timer_free(struct timer_list *timer) { debug_object_free(timer, &timer_debug_descr); } |
dc4218bd0
|
546 547 548 549 |
static inline void debug_timer_assert_init(struct timer_list *timer) { debug_object_assert_init(timer, &timer_debug_descr); } |
fc683995a
|
550 551 |
static void do_init_timer(struct timer_list *timer, unsigned int flags, const char *name, struct lock_class_key *key); |
c6f3a97f8
|
552 |
|
fc683995a
|
553 554 |
void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags, const char *name, struct lock_class_key *key) |
c6f3a97f8
|
555 556 |
{ debug_object_init_on_stack(timer, &timer_debug_descr); |
fc683995a
|
557 |
do_init_timer(timer, flags, name, key); |
c6f3a97f8
|
558 |
} |
6f2b9b9a9
|
559 |
EXPORT_SYMBOL_GPL(init_timer_on_stack_key); |
c6f3a97f8
|
560 561 562 563 564 565 566 567 568 569 570 |
void destroy_timer_on_stack(struct timer_list *timer) { debug_object_free(timer, &timer_debug_descr); } EXPORT_SYMBOL_GPL(destroy_timer_on_stack); #else static inline void debug_timer_init(struct timer_list *timer) { } static inline void debug_timer_activate(struct timer_list *timer) { } static inline void debug_timer_deactivate(struct timer_list *timer) { } |
dc4218bd0
|
571 |
static inline void debug_timer_assert_init(struct timer_list *timer) { } |
c6f3a97f8
|
572 |
#endif |
2b022e3d4
|
573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 |
static inline void debug_init(struct timer_list *timer) { debug_timer_init(timer); trace_timer_init(timer); } static inline void debug_activate(struct timer_list *timer, unsigned long expires) { debug_timer_activate(timer); trace_timer_start(timer, expires); } static inline void debug_deactivate(struct timer_list *timer) { debug_timer_deactivate(timer); trace_timer_cancel(timer); } |
dc4218bd0
|
591 592 593 594 |
static inline void debug_assert_init(struct timer_list *timer) { debug_timer_assert_init(timer); } |
fc683995a
|
595 596 |
static void do_init_timer(struct timer_list *timer, unsigned int flags, const char *name, struct lock_class_key *key) |
55c888d6d
|
597 |
{ |
fc683995a
|
598 |
struct tvec_base *base = __raw_get_cpu_var(tvec_bases); |
55c888d6d
|
599 |
timer->entry.next = NULL; |
fc683995a
|
600 |
timer->base = (void *)((unsigned long)base | flags); |
3bbb9ec94
|
601 |
timer->slack = -1; |
82f67cd9f
|
602 603 604 605 606 |
#ifdef CONFIG_TIMER_STATS timer->start_site = NULL; timer->start_pid = -1; memset(timer->start_comm, 0, TASK_COMM_LEN); #endif |
6f2b9b9a9
|
607 |
lockdep_init_map(&timer->lockdep_map, name, key, 0); |
55c888d6d
|
608 |
} |
c6f3a97f8
|
609 610 |
/** |
633fe795b
|
611 |
* init_timer_key - initialize a timer |
c6f3a97f8
|
612 |
* @timer: the timer to be initialized |
fc683995a
|
613 |
* @flags: timer flags |
633fe795b
|
614 615 616 |
* @name: name of the timer * @key: lockdep class key of the fake lock used for tracking timer * sync lock dependencies |
c6f3a97f8
|
617 |
* |
633fe795b
|
618 |
* init_timer_key() must be done to a timer prior calling *any* of the |
c6f3a97f8
|
619 620 |
* other timer functions. */ |
fc683995a
|
621 622 |
void init_timer_key(struct timer_list *timer, unsigned int flags, const char *name, struct lock_class_key *key) |
c6f3a97f8
|
623 |
{ |
2b022e3d4
|
624 |
debug_init(timer); |
fc683995a
|
625 |
do_init_timer(timer, flags, name, key); |
c6f3a97f8
|
626 |
} |
6f2b9b9a9
|
627 |
EXPORT_SYMBOL(init_timer_key); |
55c888d6d
|
628 |
|
ec44bc7ac
|
629 |
static inline void detach_timer(struct timer_list *timer, bool clear_pending) |
55c888d6d
|
630 631 |
{ struct list_head *entry = &timer->entry; |
2b022e3d4
|
632 |
debug_deactivate(timer); |
c6f3a97f8
|
633 |
|
55c888d6d
|
634 635 636 637 638 |
__list_del(entry->prev, entry->next); if (clear_pending) entry->next = NULL; entry->prev = LIST_POISON2; } |
99d5f3aac
|
639 640 641 642 643 |
static inline void detach_expired_timer(struct timer_list *timer, struct tvec_base *base) { detach_timer(timer, true); if (!tbase_get_deferrable(timer->base)) |
e52b1db37
|
644 |
base->active_timers--; |
99d5f3aac
|
645 |
} |
ec44bc7ac
|
646 647 648 649 650 651 652 |
static int detach_if_pending(struct timer_list *timer, struct tvec_base *base, bool clear_pending) { if (!timer_pending(timer)) return 0; detach_timer(timer, clear_pending); |
99d5f3aac
|
653 |
if (!tbase_get_deferrable(timer->base)) { |
e52b1db37
|
654 |
base->active_timers--; |
99d5f3aac
|
655 656 657 |
if (timer->expires == base->next_timer) base->next_timer = base->timer_jiffies; } |
ec44bc7ac
|
658 659 |
return 1; } |
55c888d6d
|
660 |
/* |
3691c5199
|
661 |
* We are using hashed locking: holding per_cpu(tvec_bases).lock |
55c888d6d
|
662 663 664 665 666 667 668 669 670 671 |
* means that all timers which are tied to this base via timer->base are * locked, and the base itself is locked too. * * So __run_timers/migrate_timers can safely modify all timers which could * be found on ->tvX lists. * * When the timer's base is locked, and the timer removed from list, it is * possible to set timer->base = NULL and drop the lock: the timer remains * locked. */ |
a6fa8e5a6
|
672 |
static struct tvec_base *lock_timer_base(struct timer_list *timer, |
55c888d6d
|
673 |
unsigned long *flags) |
89e7e374d
|
674 |
__acquires(timer->base->lock) |
55c888d6d
|
675 |
{ |
a6fa8e5a6
|
676 |
struct tvec_base *base; |
55c888d6d
|
677 678 |
for (;;) { |
a6fa8e5a6
|
679 |
struct tvec_base *prelock_base = timer->base; |
6e453a675
|
680 |
base = tbase_get_base(prelock_base); |
55c888d6d
|
681 682 |
if (likely(base != NULL)) { spin_lock_irqsave(&base->lock, *flags); |
6e453a675
|
683 |
if (likely(prelock_base == timer->base)) |
55c888d6d
|
684 685 686 687 688 689 690 |
return base; /* The timer has migrated to another CPU */ spin_unlock_irqrestore(&base->lock, *flags); } cpu_relax(); } } |
74019224a
|
691 |
static inline int |
597d02757
|
692 693 |
__mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only, int pinned) |
1da177e4c
|
694 |
{ |
a6fa8e5a6
|
695 |
struct tvec_base *base, *new_base; |
1da177e4c
|
696 |
unsigned long flags; |
eea08f32a
|
697 |
int ret = 0 , cpu; |
1da177e4c
|
698 |
|
82f67cd9f
|
699 |
timer_stats_timer_set_start_info(timer); |
1da177e4c
|
700 |
BUG_ON(!timer->function); |
1da177e4c
|
701 |
|
55c888d6d
|
702 |
base = lock_timer_base(timer, &flags); |
ec44bc7ac
|
703 704 705 |
ret = detach_if_pending(timer, base, false); if (!ret && pending_only) goto out_unlock; |
55c888d6d
|
706 |
|
2b022e3d4
|
707 |
debug_activate(timer, expires); |
c6f3a97f8
|
708 |
|
eea08f32a
|
709 |
cpu = smp_processor_id(); |
3451d0243
|
710 |
#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP) |
83cd4fe27
|
711 712 |
if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) cpu = get_nohz_timer_target(); |
eea08f32a
|
713 714 |
#endif new_base = per_cpu(tvec_bases, cpu); |
3691c5199
|
715 |
if (base != new_base) { |
1da177e4c
|
716 |
/* |
55c888d6d
|
717 718 719 720 721 |
* We are trying to schedule the timer on the local CPU. * However we can't change timer's base while it is running, * otherwise del_timer_sync() can't detect that the timer's * handler yet has not finished. This also guarantees that * the timer is serialized wrt itself. |
1da177e4c
|
722 |
*/ |
a2c348fe0
|
723 |
if (likely(base->running_timer != timer)) { |
55c888d6d
|
724 |
/* See the comment in lock_timer_base() */ |
6e453a675
|
725 |
timer_set_base(timer, NULL); |
55c888d6d
|
726 |
spin_unlock(&base->lock); |
a2c348fe0
|
727 728 |
base = new_base; spin_lock(&base->lock); |
6e453a675
|
729 |
timer_set_base(timer, base); |
1da177e4c
|
730 731 |
} } |
1da177e4c
|
732 |
timer->expires = expires; |
a2c348fe0
|
733 |
internal_add_timer(base, timer); |
74019224a
|
734 735 |
out_unlock: |
a2c348fe0
|
736 |
spin_unlock_irqrestore(&base->lock, flags); |
1da177e4c
|
737 738 739 |
return ret; } |
2aae4a108
|
740 |
/** |
74019224a
|
741 742 743 |
* mod_timer_pending - modify a pending timer's timeout * @timer: the pending timer to be modified * @expires: new timeout in jiffies |
1da177e4c
|
744 |
* |
74019224a
|
745 746 747 748 |
* mod_timer_pending() is the same for pending timers as mod_timer(), * but will not re-activate and modify already deleted timers. * * It is useful for unserialized use of timers. |
1da177e4c
|
749 |
*/ |
74019224a
|
750 |
int mod_timer_pending(struct timer_list *timer, unsigned long expires) |
1da177e4c
|
751 |
{ |
597d02757
|
752 |
return __mod_timer(timer, expires, true, TIMER_NOT_PINNED); |
1da177e4c
|
753 |
} |
74019224a
|
754 |
EXPORT_SYMBOL(mod_timer_pending); |
1da177e4c
|
755 |
|
3bbb9ec94
|
756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 |
/* * Decide where to put the timer while taking the slack into account * * Algorithm: * 1) calculate the maximum (absolute) time * 2) calculate the highest bit where the expires and new max are different * 3) use this bit to make a mask * 4) use the bitmask to round down the maximum time, so that all last * bits are zeros */ static inline unsigned long apply_slack(struct timer_list *timer, unsigned long expires) { unsigned long expires_limit, mask; int bit; |
8e63d7795
|
771 |
if (timer->slack >= 0) { |
f00e047ef
|
772 |
expires_limit = expires + timer->slack; |
8e63d7795
|
773 |
} else { |
1c3cc1160
|
774 775 776 777 |
long delta = expires - jiffies; if (delta < 256) return expires; |
3bbb9ec94
|
778 |
|
1c3cc1160
|
779 |
expires_limit = expires + delta / 256; |
8e63d7795
|
780 |
} |
3bbb9ec94
|
781 |
mask = expires ^ expires_limit; |
3bbb9ec94
|
782 783 784 785 786 787 788 789 790 791 792 |
if (mask == 0) return expires; bit = find_last_bit(&mask, BITS_PER_LONG); mask = (1 << bit) - 1; expires_limit = expires_limit & ~(mask); return expires_limit; } |
2aae4a108
|
793 |
/** |
1da177e4c
|
794 795 |
* mod_timer - modify a timer's timeout * @timer: the timer to be modified |
2aae4a108
|
796 |
* @expires: new timeout in jiffies |
1da177e4c
|
797 |
* |
72fd4a35a
|
798 |
* mod_timer() is a more efficient way to update the expire field of an |
1da177e4c
|
799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 |
* active timer (if the timer is inactive it will be activated) * * mod_timer(timer, expires) is equivalent to: * * del_timer(timer); timer->expires = expires; add_timer(timer); * * Note that if there are multiple unserialized concurrent users of the * same timer, then mod_timer() is the only safe way to modify the timeout, * since add_timer() cannot modify an already running timer. * * The function returns whether it has modified a pending timer or not. * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an * active timer returns 1.) */ int mod_timer(struct timer_list *timer, unsigned long expires) { |
1c3cc1160
|
815 |
expires = apply_slack(timer, expires); |
1da177e4c
|
816 817 818 819 820 |
/* * This is a common optimization triggered by the * networking code - if the timer is re-modified * to be the same thing then just return: */ |
4841158b2
|
821 |
if (timer_pending(timer) && timer->expires == expires) |
1da177e4c
|
822 |
return 1; |
597d02757
|
823 |
return __mod_timer(timer, expires, false, TIMER_NOT_PINNED); |
1da177e4c
|
824 |
} |
1da177e4c
|
825 |
EXPORT_SYMBOL(mod_timer); |
2aae4a108
|
826 |
/** |
597d02757
|
827 828 829 830 831 832 |
* mod_timer_pinned - modify a timer's timeout * @timer: the timer to be modified * @expires: new timeout in jiffies * * mod_timer_pinned() is a way to update the expire field of an * active timer (if the timer is inactive it will be activated) |
048a0e8f5
|
833 834 835 836 837 838 839 |
* and to ensure that the timer is scheduled on the current CPU. * * Note that this does not prevent the timer from being migrated * when the current CPU goes offline. If this is a problem for * you, use CPU-hotplug notifiers to handle it correctly, for * example, cancelling the timer when the corresponding CPU goes * offline. |
597d02757
|
840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 |
* * mod_timer_pinned(timer, expires) is equivalent to: * * del_timer(timer); timer->expires = expires; add_timer(timer); */ int mod_timer_pinned(struct timer_list *timer, unsigned long expires) { if (timer->expires == expires && timer_pending(timer)) return 1; return __mod_timer(timer, expires, false, TIMER_PINNED); } EXPORT_SYMBOL(mod_timer_pinned); /** |
74019224a
|
855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 |
* add_timer - start a timer * @timer: the timer to be added * * The kernel will do a ->function(->data) callback from the * timer interrupt at the ->expires point in the future. The * current time is 'jiffies'. * * The timer's ->expires, ->function (and if the handler uses it, ->data) * fields must be set prior calling this function. * * Timers with an ->expires field in the past will be executed in the next * timer tick. */ void add_timer(struct timer_list *timer) { BUG_ON(timer_pending(timer)); mod_timer(timer, timer->expires); } EXPORT_SYMBOL(add_timer); /** * add_timer_on - start a timer on a particular CPU * @timer: the timer to be added * @cpu: the CPU to start it on * * This is not very scalable on SMP. Double adds are not possible. */ void add_timer_on(struct timer_list *timer, int cpu) { struct tvec_base *base = per_cpu(tvec_bases, cpu); unsigned long flags; timer_stats_timer_set_start_info(timer); BUG_ON(timer_pending(timer) || !timer->function); spin_lock_irqsave(&base->lock, flags); timer_set_base(timer, base); |
2b022e3d4
|
891 |
debug_activate(timer, timer->expires); |
74019224a
|
892 893 |
internal_add_timer(base, timer); /* |
1c20091e7
|
894 895 896 |
* Check whether the other CPU is in dynticks mode and needs * to be triggered to reevaluate the timer wheel. * We are protected against the other CPU fiddling |
74019224a
|
897 |
* with the timer by holding the timer base lock. This also |
1c20091e7
|
898 899 |
* makes sure that a CPU on the way to stop its tick can not * evaluate the timer wheel. |
74019224a
|
900 |
*/ |
1c20091e7
|
901 |
wake_up_nohz_cpu(cpu); |
74019224a
|
902 903 |
spin_unlock_irqrestore(&base->lock, flags); } |
a9862e056
|
904 |
EXPORT_SYMBOL_GPL(add_timer_on); |
74019224a
|
905 906 |
/** |
1da177e4c
|
907 908 909 910 911 912 913 914 915 916 917 918 |
* del_timer - deactive a timer. * @timer: the timer to be deactivated * * del_timer() deactivates a timer - this works on both active and inactive * timers. * * The function returns whether it has deactivated a pending timer or not. * (ie. del_timer() of an inactive timer returns 0, del_timer() of an * active timer returns 1.) */ int del_timer(struct timer_list *timer) { |
a6fa8e5a6
|
919 |
struct tvec_base *base; |
1da177e4c
|
920 |
unsigned long flags; |
55c888d6d
|
921 |
int ret = 0; |
1da177e4c
|
922 |
|
dc4218bd0
|
923 |
debug_assert_init(timer); |
82f67cd9f
|
924 |
timer_stats_timer_clear_start_info(timer); |
55c888d6d
|
925 926 |
if (timer_pending(timer)) { base = lock_timer_base(timer, &flags); |
ec44bc7ac
|
927 |
ret = detach_if_pending(timer, base, true); |
1da177e4c
|
928 |
spin_unlock_irqrestore(&base->lock, flags); |
1da177e4c
|
929 |
} |
1da177e4c
|
930 |
|
55c888d6d
|
931 |
return ret; |
1da177e4c
|
932 |
} |
1da177e4c
|
933 |
EXPORT_SYMBOL(del_timer); |
2aae4a108
|
934 935 936 937 |
/** * try_to_del_timer_sync - Try to deactivate a timer * @timer: timer do del * |
fd450b731
|
938 939 |
* This function tries to deactivate a timer. Upon successful (ret >= 0) * exit the timer is not queued and the handler is not running on any CPU. |
fd450b731
|
940 941 942 |
*/ int try_to_del_timer_sync(struct timer_list *timer) { |
a6fa8e5a6
|
943 |
struct tvec_base *base; |
fd450b731
|
944 945 |
unsigned long flags; int ret = -1; |
dc4218bd0
|
946 |
debug_assert_init(timer); |
fd450b731
|
947 |
base = lock_timer_base(timer, &flags); |
ec44bc7ac
|
948 949 950 |
if (base->running_timer != timer) { timer_stats_timer_clear_start_info(timer); ret = detach_if_pending(timer, base, true); |
fd450b731
|
951 |
} |
fd450b731
|
952 953 954 955 |
spin_unlock_irqrestore(&base->lock, flags); return ret; } |
e19dff1fd
|
956 |
EXPORT_SYMBOL(try_to_del_timer_sync); |
6f1bc451e
|
957 |
#ifdef CONFIG_SMP |
2aae4a108
|
958 |
/** |
1da177e4c
|
959 960 961 962 963 964 965 |
* del_timer_sync - deactivate a timer and wait for the handler to finish. * @timer: the timer to be deactivated * * This function only differs from del_timer() on SMP: besides deactivating * the timer it also makes sure the handler has finished executing on other * CPUs. * |
72fd4a35a
|
966 |
* Synchronization rules: Callers must prevent restarting of the timer, |
1da177e4c
|
967 |
* otherwise this function is meaningless. It must not be called from |
c5f66e99b
|
968 969 970 971 |
* interrupt contexts unless the timer is an irqsafe one. The caller must * not hold locks which would prevent completion of the timer's * handler. The timer's handler must not call add_timer_on(). Upon exit the * timer is not queued and the handler is not running on any CPU. |
1da177e4c
|
972 |
* |
c5f66e99b
|
973 974 975 |
* Note: For !irqsafe timers, you must not hold locks that are held in * interrupt context while calling this function. Even if the lock has * nothing to do with the timer in question. Here's why: |
48228f7b4
|
976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 |
* * CPU0 CPU1 * ---- ---- * <SOFTIRQ> * call_timer_fn(); * base->running_timer = mytimer; * spin_lock_irq(somelock); * <IRQ> * spin_lock(somelock); * del_timer_sync(mytimer); * while (base->running_timer == mytimer); * * Now del_timer_sync() will never return and never release somelock. * The interrupt on the other CPU is waiting to grab somelock but * it has interrupted the softirq that CPU0 is waiting to finish. * |
1da177e4c
|
992 |
* The function returns whether it has deactivated a pending timer or not. |
1da177e4c
|
993 994 995 |
*/ int del_timer_sync(struct timer_list *timer) { |
6f2b9b9a9
|
996 |
#ifdef CONFIG_LOCKDEP |
f266a5110
|
997 |
unsigned long flags; |
48228f7b4
|
998 999 1000 1001 |
/* * If lockdep gives a backtrace here, please reference * the synchronization rules above. */ |
7ff207928
|
1002 |
local_irq_save(flags); |
6f2b9b9a9
|
1003 1004 |
lock_map_acquire(&timer->lockdep_map); lock_map_release(&timer->lockdep_map); |
7ff207928
|
1005 |
local_irq_restore(flags); |
6f2b9b9a9
|
1006 |
#endif |
466bd3030
|
1007 1008 1009 1010 |
/* * don't use it in hardirq context, because it * could lead to deadlock. */ |
c5f66e99b
|
1011 |
WARN_ON(in_irq() && !tbase_get_irqsafe(timer->base)); |
fd450b731
|
1012 1013 1014 1015 |
for (;;) { int ret = try_to_del_timer_sync(timer); if (ret >= 0) return ret; |
a0009652a
|
1016 |
cpu_relax(); |
fd450b731
|
1017 |
} |
1da177e4c
|
1018 |
} |
55c888d6d
|
1019 |
EXPORT_SYMBOL(del_timer_sync); |
1da177e4c
|
1020 |
#endif |
a6fa8e5a6
|
1021 |
static int cascade(struct tvec_base *base, struct tvec *tv, int index) |
1da177e4c
|
1022 1023 |
{ /* cascade all the timers from tv up one level */ |
3439dd86e
|
1024 1025 1026 1027 |
struct timer_list *timer, *tmp; struct list_head tv_list; list_replace_init(tv->vec + index, &tv_list); |
1da177e4c
|
1028 |
|
1da177e4c
|
1029 |
/* |
3439dd86e
|
1030 1031 |
* We are removing _all_ timers from the list, so we * don't have to detach them individually. |
1da177e4c
|
1032 |
*/ |
3439dd86e
|
1033 |
list_for_each_entry_safe(timer, tmp, &tv_list, entry) { |
6e453a675
|
1034 |
BUG_ON(tbase_get_base(timer->base) != base); |
facbb4a7e
|
1035 1036 |
/* No accounting, while moving them */ __internal_add_timer(base, timer); |
1da177e4c
|
1037 |
} |
1da177e4c
|
1038 1039 1040 |
return index; } |
576da126a
|
1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 |
static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long), unsigned long data) { int preempt_count = preempt_count(); #ifdef CONFIG_LOCKDEP /* * It is permissible to free the timer from inside the * function that is called from it, this we need to take into * account for lockdep too. To avoid bogus "held lock freed" * warnings as well as problems when looking into * timer->lockdep_map, make a copy and use that here. */ |
4d82a1deb
|
1054 1055 1056 |
struct lockdep_map lockdep_map; lockdep_copy_map(&lockdep_map, &timer->lockdep_map); |
576da126a
|
1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 |
#endif /* * Couple the lock chain with the lock chain at * del_timer_sync() by acquiring the lock_map around the fn() * call here and in del_timer_sync(). */ lock_map_acquire(&lockdep_map); trace_timer_expire_entry(timer); fn(data); trace_timer_expire_exit(timer); lock_map_release(&lockdep_map); if (preempt_count != preempt_count()) { |
802702e0c
|
1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 |
WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x ", fn, preempt_count, preempt_count()); /* * Restore the preempt count. That gives us a decent * chance to survive and extract information. If the * callback kept a lock held, bad luck, but not worse * than the BUG() we had. */ preempt_count() = preempt_count; |
576da126a
|
1082 1083 |
} } |
2aae4a108
|
1084 1085 1086 |
#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK) /** |
1da177e4c
|
1087 1088 1089 1090 1091 1092 |
* __run_timers - run all expired timers (if any) on this CPU. * @base: the timer vector to be processed. * * This function cascades all vectors and executes all expired timer * vectors. */ |
a6fa8e5a6
|
1093 |
static inline void __run_timers(struct tvec_base *base) |
1da177e4c
|
1094 1095 |
{ struct timer_list *timer; |
3691c5199
|
1096 |
spin_lock_irq(&base->lock); |
1da177e4c
|
1097 |
while (time_after_eq(jiffies, base->timer_jiffies)) { |
626ab0e69
|
1098 |
struct list_head work_list; |
1da177e4c
|
1099 |
struct list_head *head = &work_list; |
6819457d2
|
1100 |
int index = base->timer_jiffies & TVR_MASK; |
626ab0e69
|
1101 |
|
1da177e4c
|
1102 1103 1104 1105 1106 1107 1108 1109 |
/* * Cascade timers: */ if (!index && (!cascade(base, &base->tv2, INDEX(0))) && (!cascade(base, &base->tv3, INDEX(1))) && !cascade(base, &base->tv4, INDEX(2))) cascade(base, &base->tv5, INDEX(3)); |
626ab0e69
|
1110 1111 |
++base->timer_jiffies; list_replace_init(base->tv1.vec + index, &work_list); |
55c888d6d
|
1112 |
while (!list_empty(head)) { |
1da177e4c
|
1113 1114 |
void (*fn)(unsigned long); unsigned long data; |
c5f66e99b
|
1115 |
bool irqsafe; |
1da177e4c
|
1116 |
|
b5e618181
|
1117 |
timer = list_first_entry(head, struct timer_list,entry); |
6819457d2
|
1118 1119 |
fn = timer->function; data = timer->data; |
c5f66e99b
|
1120 |
irqsafe = tbase_get_irqsafe(timer->base); |
1da177e4c
|
1121 |
|
82f67cd9f
|
1122 |
timer_stats_account_timer(timer); |
6f1bc451e
|
1123 |
base->running_timer = timer; |
99d5f3aac
|
1124 |
detach_expired_timer(timer, base); |
6f2b9b9a9
|
1125 |
|
c5f66e99b
|
1126 1127 1128 1129 1130 1131 1132 1133 1134 |
if (irqsafe) { spin_unlock(&base->lock); call_timer_fn(timer, fn, data); spin_lock(&base->lock); } else { spin_unlock_irq(&base->lock); call_timer_fn(timer, fn, data); spin_lock_irq(&base->lock); } |
1da177e4c
|
1135 1136 |
} } |
6f1bc451e
|
1137 |
base->running_timer = NULL; |
3691c5199
|
1138 |
spin_unlock_irq(&base->lock); |
1da177e4c
|
1139 |
} |
3451d0243
|
1140 |
#ifdef CONFIG_NO_HZ_COMMON |
1da177e4c
|
1141 1142 |
/* * Find out when the next timer event is due to happen. This |
90cba64a5
|
1143 1144 |
* is used on S/390 to stop all activity when a CPU is idle. * This function needs to be called with interrupts disabled. |
1da177e4c
|
1145 |
*/ |
a6fa8e5a6
|
1146 |
static unsigned long __next_timer_interrupt(struct tvec_base *base) |
1da177e4c
|
1147 |
{ |
1cfd68496
|
1148 |
unsigned long timer_jiffies = base->timer_jiffies; |
eaad084bb
|
1149 |
unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA; |
1cfd68496
|
1150 |
int index, slot, array, found = 0; |
1da177e4c
|
1151 |
struct timer_list *nte; |
a6fa8e5a6
|
1152 |
struct tvec *varray[4]; |
1da177e4c
|
1153 1154 |
/* Look for timer events in tv1. */ |
1cfd68496
|
1155 |
index = slot = timer_jiffies & TVR_MASK; |
1da177e4c
|
1156 |
do { |
1cfd68496
|
1157 |
list_for_each_entry(nte, base->tv1.vec + slot, entry) { |
6819457d2
|
1158 1159 |
if (tbase_get_deferrable(nte->base)) continue; |
6e453a675
|
1160 |
|
1cfd68496
|
1161 |
found = 1; |
1da177e4c
|
1162 |
expires = nte->expires; |
1cfd68496
|
1163 1164 1165 1166 |
/* Look at the cascade bucket(s)? */ if (!index || slot < index) goto cascade; return expires; |
1da177e4c
|
1167 |
} |
1cfd68496
|
1168 1169 1170 1171 1172 1173 1174 1175 |
slot = (slot + 1) & TVR_MASK; } while (slot != index); cascade: /* Calculate the next cascade event */ if (index) timer_jiffies += TVR_SIZE - index; timer_jiffies >>= TVR_BITS; |
1da177e4c
|
1176 1177 1178 1179 1180 1181 |
/* Check tv2-tv5. */ varray[0] = &base->tv2; varray[1] = &base->tv3; varray[2] = &base->tv4; varray[3] = &base->tv5; |
1cfd68496
|
1182 1183 |
for (array = 0; array < 4; array++) { |
a6fa8e5a6
|
1184 |
struct tvec *varp = varray[array]; |
1cfd68496
|
1185 1186 |
index = slot = timer_jiffies & TVN_MASK; |
1da177e4c
|
1187 |
do { |
1cfd68496
|
1188 |
list_for_each_entry(nte, varp->vec + slot, entry) { |
a04198887
|
1189 1190 |
if (tbase_get_deferrable(nte->base)) continue; |
1cfd68496
|
1191 |
found = 1; |
1da177e4c
|
1192 1193 |
if (time_before(nte->expires, expires)) expires = nte->expires; |
1cfd68496
|
1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 |
} /* * Do we still search for the first timer or are * we looking up the cascade buckets ? */ if (found) { /* Look at the cascade bucket(s)? */ if (!index || slot < index) break; return expires; } slot = (slot + 1) & TVN_MASK; } while (slot != index); if (index) timer_jiffies += TVN_SIZE - index; timer_jiffies >>= TVN_BITS; |
1da177e4c
|
1211 |
} |
1cfd68496
|
1212 1213 |
return expires; } |
69239749e
|
1214 |
|
1cfd68496
|
1215 1216 1217 1218 1219 1220 1221 1222 1223 |
/* * Check, if the next hrtimer event is before the next timer wheel * event: */ static unsigned long cmp_next_hrtimer_event(unsigned long now, unsigned long expires) { ktime_t hr_delta = hrtimer_get_next_event(); struct timespec tsdelta; |
9501b6cf5
|
1224 |
unsigned long delta; |
1cfd68496
|
1225 1226 1227 |
if (hr_delta.tv64 == KTIME_MAX) return expires; |
0662b7132
|
1228 |
|
9501b6cf5
|
1229 1230 1231 1232 1233 |
/* * Expired timer available, let it expire in the next tick */ if (hr_delta.tv64 <= 0) return now + 1; |
69239749e
|
1234 |
|
1cfd68496
|
1235 |
tsdelta = ktime_to_timespec(hr_delta); |
9501b6cf5
|
1236 |
delta = timespec_to_jiffies(&tsdelta); |
eaad084bb
|
1237 1238 1239 1240 1241 1242 1243 |
/* * Limit the delta to the max value, which is checked in * tick_nohz_stop_sched_tick(): */ if (delta > NEXT_TIMER_MAX_DELTA) delta = NEXT_TIMER_MAX_DELTA; |
9501b6cf5
|
1244 1245 1246 1247 1248 1249 1250 1251 1252 |
/* * Take rounding errors in to account and make sure, that it * expires in the next tick. Otherwise we go into an endless * ping pong due to tick_nohz_stop_sched_tick() retriggering * the timer softirq */ if (delta < 1) delta = 1; now += delta; |
1cfd68496
|
1253 1254 |
if (time_before(now, expires)) return now; |
1da177e4c
|
1255 1256 |
return expires; } |
1cfd68496
|
1257 1258 |
/** |
8dce39c23
|
1259 |
* get_next_timer_interrupt - return the jiffy of the next pending timer |
05fb6bf0b
|
1260 |
* @now: current time (in jiffies) |
1cfd68496
|
1261 |
*/ |
fd064b9b7
|
1262 |
unsigned long get_next_timer_interrupt(unsigned long now) |
1cfd68496
|
1263 |
{ |
7496351ad
|
1264 |
struct tvec_base *base = __this_cpu_read(tvec_bases); |
e40468a54
|
1265 |
unsigned long expires = now + NEXT_TIMER_MAX_DELTA; |
1cfd68496
|
1266 |
|
dbd87b5af
|
1267 1268 1269 1270 1271 |
/* * Pretend that there is no timer pending if the cpu is offline. * Possible pending timers will be migrated later to an active cpu. */ if (cpu_is_offline(smp_processor_id())) |
e40468a54
|
1272 |
return expires; |
1cfd68496
|
1273 |
spin_lock(&base->lock); |
e40468a54
|
1274 1275 1276 1277 1278 |
if (base->active_timers) { if (time_before_eq(base->next_timer, base->timer_jiffies)) base->next_timer = __next_timer_interrupt(base); expires = base->next_timer; } |
1cfd68496
|
1279 1280 1281 1282 1283 1284 1285 |
spin_unlock(&base->lock); if (time_before_eq(expires, now)) return now; return cmp_next_hrtimer_event(now, expires); } |
1da177e4c
|
1286 |
#endif |
1da177e4c
|
1287 |
/* |
5b4db0c2f
|
1288 |
* Called from the timer interrupt handler to charge one tick to the current |
1da177e4c
|
1289 1290 1291 1292 1293 1294 1295 1296 |
* process. user_tick is 1 if the tick is user time, 0 for system. */ void update_process_times(int user_tick) { struct task_struct *p = current; int cpu = smp_processor_id(); /* Note: this timer irq context must be accounted for as well. */ |
fa13a5a1f
|
1297 |
account_process_tick(p, user_tick); |
1da177e4c
|
1298 |
run_local_timers(); |
a157229ca
|
1299 |
rcu_check_callbacks(cpu, user_tick); |
e360adbe2
|
1300 1301 1302 1303 |
#ifdef CONFIG_IRQ_WORK if (in_irq()) irq_work_run(); #endif |
1da177e4c
|
1304 |
scheduler_tick(); |
6819457d2
|
1305 |
run_posix_cpu_timers(p); |
1da177e4c
|
1306 1307 1308 |
} /* |
1da177e4c
|
1309 1310 1311 1312 |
* This function runs timers and the timer-tq in bottom half context. */ static void run_timer_softirq(struct softirq_action *h) { |
7496351ad
|
1313 |
struct tvec_base *base = __this_cpu_read(tvec_bases); |
1da177e4c
|
1314 |
|
d3d74453c
|
1315 |
hrtimer_run_pending(); |
82f67cd9f
|
1316 |
|
1da177e4c
|
1317 1318 1319 1320 1321 1322 1323 1324 1325 |
if (time_after_eq(jiffies, base->timer_jiffies)) __run_timers(base); } /* * Called by the local, per-CPU timer interrupt on SMP. */ void run_local_timers(void) { |
d3d74453c
|
1326 |
hrtimer_run_queues(); |
1da177e4c
|
1327 1328 |
raise_softirq(TIMER_SOFTIRQ); } |
1da177e4c
|
1329 1330 1331 1332 1333 1334 |
#ifdef __ARCH_WANT_SYS_ALARM /* * For backwards compatibility? This can be done in libc so Alpha * and all newer ports shouldn't need it. */ |
58fd3aa28
|
1335 |
SYSCALL_DEFINE1(alarm, unsigned int, seconds) |
1da177e4c
|
1336 |
{ |
c08b8a491
|
1337 |
return alarm_setitimer(seconds); |
1da177e4c
|
1338 1339 1340 |
} #endif |
1da177e4c
|
1341 1342 |
static void process_timeout(unsigned long __data) { |
36c8b5868
|
1343 |
wake_up_process((struct task_struct *)__data); |
1da177e4c
|
1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 |
} /** * schedule_timeout - sleep until timeout * @timeout: timeout value in jiffies * * Make the current task sleep until @timeout jiffies have * elapsed. The routine will return immediately unless * the current task state has been set (see set_current_state()). * * You can set the task state as follows - * * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to * pass before the routine returns. The routine will return 0 * * %TASK_INTERRUPTIBLE - the routine may return early if a signal is * delivered to the current task. In this case the remaining time * in jiffies will be returned, or 0 if the timer expired in time * * The current task state is guaranteed to be TASK_RUNNING when this * routine returns. * * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule * the CPU away without a bound on the timeout. In this case the return * value will be %MAX_SCHEDULE_TIMEOUT. * * In all cases the return value is guaranteed to be non-negative. */ |
7ad5b3a50
|
1372 |
signed long __sched schedule_timeout(signed long timeout) |
1da177e4c
|
1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 |
{ struct timer_list timer; unsigned long expire; switch (timeout) { case MAX_SCHEDULE_TIMEOUT: /* * These two special cases are useful to be comfortable * in the caller. Nothing more. We could take * MAX_SCHEDULE_TIMEOUT from one of the negative value * but I' d like to return a valid offset (>=0) to allow * the caller to do everything it want with the retval. */ schedule(); goto out; default: /* * Another bit of PARANOID. Note that the retval will be * 0 since no piece of kernel is supposed to do a check * for a negative retval of schedule_timeout() (since it * should never happens anyway). You just have the printk() * that will tell you if something is gone wrong and where. */ |
5b149bcc2
|
1397 |
if (timeout < 0) { |
1da177e4c
|
1398 |
printk(KERN_ERR "schedule_timeout: wrong timeout " |
5b149bcc2
|
1399 1400 1401 |
"value %lx ", timeout); dump_stack(); |
1da177e4c
|
1402 1403 1404 1405 1406 1407 |
current->state = TASK_RUNNING; goto out; } } expire = timeout + jiffies; |
c6f3a97f8
|
1408 |
setup_timer_on_stack(&timer, process_timeout, (unsigned long)current); |
597d02757
|
1409 |
__mod_timer(&timer, expire, false, TIMER_NOT_PINNED); |
1da177e4c
|
1410 1411 |
schedule(); del_singleshot_timer_sync(&timer); |
c6f3a97f8
|
1412 1413 |
/* Remove the timer from the object tracker */ destroy_timer_on_stack(&timer); |
1da177e4c
|
1414 1415 1416 1417 1418 |
timeout = expire - jiffies; out: return timeout < 0 ? 0 : timeout; } |
1da177e4c
|
1419 |
EXPORT_SYMBOL(schedule_timeout); |
8a1c17574
|
1420 1421 1422 1423 |
/* * We can use __set_current_state() here because schedule_timeout() calls * schedule() unconditionally. */ |
64ed93a26
|
1424 1425 |
signed long __sched schedule_timeout_interruptible(signed long timeout) { |
a5a0d52c7
|
1426 1427 |
__set_current_state(TASK_INTERRUPTIBLE); return schedule_timeout(timeout); |
64ed93a26
|
1428 1429 |
} EXPORT_SYMBOL(schedule_timeout_interruptible); |
294d5cc23
|
1430 1431 1432 1433 1434 1435 |
signed long __sched schedule_timeout_killable(signed long timeout) { __set_current_state(TASK_KILLABLE); return schedule_timeout(timeout); } EXPORT_SYMBOL(schedule_timeout_killable); |
64ed93a26
|
1436 1437 |
signed long __sched schedule_timeout_uninterruptible(signed long timeout) { |
a5a0d52c7
|
1438 1439 |
__set_current_state(TASK_UNINTERRUPTIBLE); return schedule_timeout(timeout); |
64ed93a26
|
1440 1441 |
} EXPORT_SYMBOL(schedule_timeout_uninterruptible); |
0db0628d9
|
1442 |
static int init_timers_cpu(int cpu) |
1da177e4c
|
1443 1444 |
{ int j; |
a6fa8e5a6
|
1445 |
struct tvec_base *base; |
0db0628d9
|
1446 |
static char tvec_base_done[NR_CPUS]; |
55c888d6d
|
1447 |
|
ba6edfcd1
|
1448 |
if (!tvec_base_done[cpu]) { |
a4a6198b8
|
1449 |
static char boot_done; |
a4a6198b8
|
1450 |
if (boot_done) { |
ba6edfcd1
|
1451 1452 1453 |
/* * The APs use this path later in boot */ |
94f6030ca
|
1454 1455 |
base = kmalloc_node(sizeof(*base), GFP_KERNEL | __GFP_ZERO, |
a4a6198b8
|
1456 1457 1458 |
cpu_to_node(cpu)); if (!base) return -ENOMEM; |
6e453a675
|
1459 1460 1461 1462 1463 1464 1465 |
/* Make sure that tvec_base is 2 byte aligned */ if (tbase_get_deferrable(base)) { WARN_ON(1); kfree(base); return -ENOMEM; } |
ba6edfcd1
|
1466 |
per_cpu(tvec_bases, cpu) = base; |
a4a6198b8
|
1467 |
} else { |
ba6edfcd1
|
1468 1469 1470 1471 1472 1473 |
/* * This is for the boot CPU - we use compile-time * static initialisation because per-cpu memory isn't * ready yet and because the memory allocators are not * initialised either. */ |
a4a6198b8
|
1474 |
boot_done = 1; |
ba6edfcd1
|
1475 |
base = &boot_tvec_bases; |
a4a6198b8
|
1476 |
} |
42a5cf46c
|
1477 |
spin_lock_init(&base->lock); |
ba6edfcd1
|
1478 1479 1480 |
tvec_base_done[cpu] = 1; } else { base = per_cpu(tvec_bases, cpu); |
a4a6198b8
|
1481 |
} |
ba6edfcd1
|
1482 |
|
d730e882a
|
1483 |
|
1da177e4c
|
1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 |
for (j = 0; j < TVN_SIZE; j++) { INIT_LIST_HEAD(base->tv5.vec + j); INIT_LIST_HEAD(base->tv4.vec + j); INIT_LIST_HEAD(base->tv3.vec + j); INIT_LIST_HEAD(base->tv2.vec + j); } for (j = 0; j < TVR_SIZE; j++) INIT_LIST_HEAD(base->tv1.vec + j); base->timer_jiffies = jiffies; |
97fd9ed48
|
1494 |
base->next_timer = base->timer_jiffies; |
99d5f3aac
|
1495 |
base->active_timers = 0; |
a4a6198b8
|
1496 |
return 0; |
1da177e4c
|
1497 1498 1499 |
} #ifdef CONFIG_HOTPLUG_CPU |
a6fa8e5a6
|
1500 |
static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head) |
1da177e4c
|
1501 1502 1503 1504 |
{ struct timer_list *timer; while (!list_empty(head)) { |
b5e618181
|
1505 |
timer = list_first_entry(head, struct timer_list, entry); |
99d5f3aac
|
1506 |
/* We ignore the accounting on the dying cpu */ |
ec44bc7ac
|
1507 |
detach_timer(timer, false); |
6e453a675
|
1508 |
timer_set_base(timer, new_base); |
1da177e4c
|
1509 |
internal_add_timer(new_base, timer); |
1da177e4c
|
1510 |
} |
1da177e4c
|
1511 |
} |
0db0628d9
|
1512 |
static void migrate_timers(int cpu) |
1da177e4c
|
1513 |
{ |
a6fa8e5a6
|
1514 1515 |
struct tvec_base *old_base; struct tvec_base *new_base; |
1da177e4c
|
1516 1517 1518 |
int i; BUG_ON(cpu_online(cpu)); |
a4a6198b8
|
1519 1520 |
old_base = per_cpu(tvec_bases, cpu); new_base = get_cpu_var(tvec_bases); |
d82f0b0f6
|
1521 1522 1523 1524 1525 |
/* * The caller is globally serialized and nobody else * takes two locks at once, deadlock is not possible. */ spin_lock_irq(&new_base->lock); |
0d180406f
|
1526 |
spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); |
3691c5199
|
1527 1528 |
BUG_ON(old_base->running_timer); |
1da177e4c
|
1529 |
|
1da177e4c
|
1530 |
for (i = 0; i < TVR_SIZE; i++) |
55c888d6d
|
1531 1532 1533 1534 1535 1536 1537 |
migrate_timer_list(new_base, old_base->tv1.vec + i); for (i = 0; i < TVN_SIZE; i++) { migrate_timer_list(new_base, old_base->tv2.vec + i); migrate_timer_list(new_base, old_base->tv3.vec + i); migrate_timer_list(new_base, old_base->tv4.vec + i); migrate_timer_list(new_base, old_base->tv5.vec + i); } |
0d180406f
|
1538 |
spin_unlock(&old_base->lock); |
d82f0b0f6
|
1539 |
spin_unlock_irq(&new_base->lock); |
1da177e4c
|
1540 |
put_cpu_var(tvec_bases); |
1da177e4c
|
1541 1542 |
} #endif /* CONFIG_HOTPLUG_CPU */ |
0db0628d9
|
1543 |
static int timer_cpu_notify(struct notifier_block *self, |
1da177e4c
|
1544 1545 1546 |
unsigned long action, void *hcpu) { long cpu = (long)hcpu; |
80b5184cc
|
1547 |
int err; |
1da177e4c
|
1548 1549 |
switch(action) { case CPU_UP_PREPARE: |
8bb784428
|
1550 |
case CPU_UP_PREPARE_FROZEN: |
80b5184cc
|
1551 1552 1553 |
err = init_timers_cpu(cpu); if (err < 0) return notifier_from_errno(err); |
1da177e4c
|
1554 1555 1556 |
break; #ifdef CONFIG_HOTPLUG_CPU case CPU_DEAD: |
8bb784428
|
1557 |
case CPU_DEAD_FROZEN: |
1da177e4c
|
1558 1559 1560 1561 1562 1563 1564 1565 |
migrate_timers(cpu); break; #endif default: break; } return NOTIFY_OK; } |
0db0628d9
|
1566 |
static struct notifier_block timers_nb = { |
1da177e4c
|
1567 1568 1569 1570 1571 1572 |
.notifier_call = timer_cpu_notify, }; void __init init_timers(void) { |
e52b1db37
|
1573 1574 1575 1576 |
int err; /* ensure there are enough low bits for flags in timer->base pointer */ BUILD_BUG_ON(__alignof__(struct tvec_base) & TIMER_FLAG_MASK); |
07dccf334
|
1577 |
|
e52b1db37
|
1578 1579 |
err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, (void *)(long)smp_processor_id()); |
82f67cd9f
|
1580 |
init_timer_stats(); |
9e506f7ad
|
1581 |
BUG_ON(err != NOTIFY_OK); |
1da177e4c
|
1582 |
register_cpu_notifier(&timers_nb); |
962cf36c5
|
1583 |
open_softirq(TIMER_SOFTIRQ, run_timer_softirq); |
1da177e4c
|
1584 |
} |
1da177e4c
|
1585 1586 1587 1588 1589 1590 1591 |
/** * msleep - sleep safely even with waitqueue interruptions * @msecs: Time in milliseconds to sleep for */ void msleep(unsigned int msecs) { unsigned long timeout = msecs_to_jiffies(msecs) + 1; |
75bcc8c5e
|
1592 1593 |
while (timeout) timeout = schedule_timeout_uninterruptible(timeout); |
1da177e4c
|
1594 1595 1596 1597 1598 |
} EXPORT_SYMBOL(msleep); /** |
96ec3efdc
|
1599 |
* msleep_interruptible - sleep waiting for signals |
1da177e4c
|
1600 1601 1602 1603 1604 |
* @msecs: Time in milliseconds to sleep for */ unsigned long msleep_interruptible(unsigned int msecs) { unsigned long timeout = msecs_to_jiffies(msecs) + 1; |
75bcc8c5e
|
1605 1606 |
while (timeout && !signal_pending(current)) timeout = schedule_timeout_interruptible(timeout); |
1da177e4c
|
1607 1608 1609 1610 |
return jiffies_to_msecs(timeout); } EXPORT_SYMBOL(msleep_interruptible); |
5e7f5a178
|
1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 |
static int __sched do_usleep_range(unsigned long min, unsigned long max) { ktime_t kmin; unsigned long delta; kmin = ktime_set(0, min * NSEC_PER_USEC); delta = (max - min) * NSEC_PER_USEC; return schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL); } /** * usleep_range - Drop in replacement for udelay where wakeup is flexible * @min: Minimum time in usecs to sleep * @max: Maximum time in usecs to sleep */ void usleep_range(unsigned long min, unsigned long max) { __set_current_state(TASK_UNINTERRUPTIBLE); do_usleep_range(min, max); } EXPORT_SYMBOL(usleep_range); |