Commit 3c8aa39d7c445ae2612b6b626f76f077e7a7ab0d
Committed by
Linus Torvalds
1 parent
c9cb2e3d7c
Exists in
master
and in
4 other branches
[PATCH] hrtimers: cleanup locking
Improve kernel/hrtimers.c locking: use a per-CPU base with a lock to control locking of all clocks belonging to a CPU. This simplifies code that needs to lock all clocks at once. This makes life easier for high-res timers and dyntick. No functional changes. [ optimization change from Andrew Morton <akpm@osdl.org> ] Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu> Cc: john stultz <johnstul@us.ibm.com> Cc: Roman Zippel <zippel@linux-m68k.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 2 changed files with 126 additions and 101 deletions Side-by-side Diff
include/linux/hrtimer.h
... | ... | @@ -21,6 +21,9 @@ |
21 | 21 | #include <linux/list.h> |
22 | 22 | #include <linux/wait.h> |
23 | 23 | |
24 | +struct hrtimer_clock_base; | |
25 | +struct hrtimer_cpu_base; | |
26 | + | |
24 | 27 | /* |
25 | 28 | * Mode arguments of xxx_hrtimer functions: |
26 | 29 | */ |
... | ... | @@ -37,8 +40,6 @@ |
37 | 40 | HRTIMER_RESTART, /* Timer must be restarted */ |
38 | 41 | }; |
39 | 42 | |
40 | -struct hrtimer_base; | |
41 | - | |
42 | 43 | /** |
43 | 44 | * struct hrtimer - the basic hrtimer structure |
44 | 45 | * @node: red black tree node for time ordered insertion |
... | ... | @@ -51,10 +52,10 @@ |
51 | 52 | * The hrtimer structure must be initialized by init_hrtimer_#CLOCKTYPE() |
52 | 53 | */ |
53 | 54 | struct hrtimer { |
54 | - struct rb_node node; | |
55 | - ktime_t expires; | |
56 | - enum hrtimer_restart (*function)(struct hrtimer *); | |
57 | - struct hrtimer_base *base; | |
55 | + struct rb_node node; | |
56 | + ktime_t expires; | |
57 | + enum hrtimer_restart (*function)(struct hrtimer *); | |
58 | + struct hrtimer_clock_base *base; | |
58 | 59 | }; |
59 | 60 | |
60 | 61 | /** |
61 | 62 | |
62 | 63 | |
63 | 64 | |
64 | 65 | |
65 | 66 | |
66 | 67 | |
... | ... | @@ -71,29 +72,41 @@ |
71 | 72 | |
72 | 73 | /** |
73 | 74 | * struct hrtimer_base - the timer base for a specific clock |
74 | - * @index: clock type index for per_cpu support when moving a timer | |
75 | - * to a base on another cpu. | |
76 | - * @lock: lock protecting the base and associated timers | |
75 | + * @index: clock type index for per_cpu support when moving a | |
76 | + * timer to a base on another cpu. | |
77 | 77 | * @active: red black tree root node for the active timers |
78 | 78 | * @first: pointer to the timer node which expires first |
79 | 79 | * @resolution: the resolution of the clock, in nanoseconds |
80 | 80 | * @get_time: function to retrieve the current time of the clock |
81 | 81 | * @get_softirq_time: function to retrieve the current time from the softirq |
82 | - * @curr_timer: the timer which is executing a callback right now | |
83 | 82 | * @softirq_time: the time when running the hrtimer queue in the softirq |
84 | - * @lock_key: the lock_class_key for use with lockdep | |
85 | 83 | */ |
86 | -struct hrtimer_base { | |
84 | +struct hrtimer_clock_base { | |
85 | + struct hrtimer_cpu_base *cpu_base; | |
87 | 86 | clockid_t index; |
88 | - spinlock_t lock; | |
89 | 87 | struct rb_root active; |
90 | 88 | struct rb_node *first; |
91 | 89 | ktime_t resolution; |
92 | 90 | ktime_t (*get_time)(void); |
93 | 91 | ktime_t (*get_softirq_time)(void); |
94 | - struct hrtimer *curr_timer; | |
95 | 92 | ktime_t softirq_time; |
96 | - struct lock_class_key lock_key; | |
93 | +}; | |
94 | + | |
95 | +#define HRTIMER_MAX_CLOCK_BASES 2 | |
96 | + | |
97 | +/* | |
98 | + * struct hrtimer_cpu_base - the per cpu clock bases | |
99 | + * @lock: lock protecting the base and associated clock bases | |
100 | + * and timers | |
101 | + * @lock_key: the lock_class_key for use with lockdep | |
102 | + * @clock_base: array of clock bases for this cpu | |
103 | + * @curr_timer: the timer which is executing a callback right now | |
104 | + */ | |
105 | +struct hrtimer_cpu_base { | |
106 | + spinlock_t lock; | |
107 | + struct lock_class_key lock_key; | |
108 | + struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; | |
109 | + struct hrtimer *curr_timer; | |
97 | 110 | }; |
98 | 111 | |
99 | 112 | /* |
kernel/hrtimer.c
1 | 1 | /* |
2 | 2 | * linux/kernel/hrtimer.c |
3 | 3 | * |
4 | - * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> | |
5 | - * Copyright(C) 2005, Red Hat, Inc., Ingo Molnar | |
4 | + * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> | |
5 | + * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar | |
6 | + * Copyright(C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> | |
6 | 7 | * |
7 | 8 | * High-resolution kernel timers |
8 | 9 | * |
9 | 10 | |
10 | 11 | |
... | ... | @@ -79,21 +80,22 @@ |
79 | 80 | * This ensures that we capture erroneous accesses to these clock ids |
80 | 81 | * rather than moving them into the range of valid clock id's. |
81 | 82 | */ |
82 | - | |
83 | -#define MAX_HRTIMER_BASES 2 | |
84 | - | |
85 | -static DEFINE_PER_CPU(struct hrtimer_base, hrtimer_bases[MAX_HRTIMER_BASES]) = | |
83 | +static DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = | |
86 | 84 | { |
85 | + | |
86 | + .clock_base = | |
87 | 87 | { |
88 | - .index = CLOCK_REALTIME, | |
89 | - .get_time = &ktime_get_real, | |
90 | - .resolution = KTIME_REALTIME_RES, | |
91 | - }, | |
92 | - { | |
93 | - .index = CLOCK_MONOTONIC, | |
94 | - .get_time = &ktime_get, | |
95 | - .resolution = KTIME_MONOTONIC_RES, | |
96 | - }, | |
88 | + { | |
89 | + .index = CLOCK_REALTIME, | |
90 | + .get_time = &ktime_get_real, | |
91 | + .resolution = KTIME_REALTIME_RES, | |
92 | + }, | |
93 | + { | |
94 | + .index = CLOCK_MONOTONIC, | |
95 | + .get_time = &ktime_get, | |
96 | + .resolution = KTIME_MONOTONIC_RES, | |
97 | + }, | |
98 | + } | |
97 | 99 | }; |
98 | 100 | |
99 | 101 | /** |
... | ... | @@ -125,7 +127,7 @@ |
125 | 127 | * Get the coarse grained time at the softirq based on xtime and |
126 | 128 | * wall_to_monotonic. |
127 | 129 | */ |
128 | -static void hrtimer_get_softirq_time(struct hrtimer_base *base) | |
130 | +static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base) | |
129 | 131 | { |
130 | 132 | ktime_t xtim, tomono; |
131 | 133 | struct timespec xts; |
... | ... | @@ -142,8 +144,9 @@ |
142 | 144 | |
143 | 145 | xtim = timespec_to_ktime(xts); |
144 | 146 | tomono = timespec_to_ktime(wall_to_monotonic); |
145 | - base[CLOCK_REALTIME].softirq_time = xtim; | |
146 | - base[CLOCK_MONOTONIC].softirq_time = ktime_add(xtim, tomono); | |
147 | + base->clock_base[CLOCK_REALTIME].softirq_time = xtim; | |
148 | + base->clock_base[CLOCK_MONOTONIC].softirq_time = | |
149 | + ktime_add(xtim, tomono); | |
147 | 150 | } |
148 | 151 | |
149 | 152 | /* |
150 | 153 | |
151 | 154 | |
152 | 155 | |
... | ... | @@ -166,19 +169,20 @@ |
166 | 169 | * possible to set timer->base = NULL and drop the lock: the timer remains |
167 | 170 | * locked. |
168 | 171 | */ |
169 | -static struct hrtimer_base *lock_hrtimer_base(const struct hrtimer *timer, | |
170 | - unsigned long *flags) | |
172 | +static | |
173 | +struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, | |
174 | + unsigned long *flags) | |
171 | 175 | { |
172 | - struct hrtimer_base *base; | |
176 | + struct hrtimer_clock_base *base; | |
173 | 177 | |
174 | 178 | for (;;) { |
175 | 179 | base = timer->base; |
176 | 180 | if (likely(base != NULL)) { |
177 | - spin_lock_irqsave(&base->lock, *flags); | |
181 | + spin_lock_irqsave(&base->cpu_base->lock, *flags); | |
178 | 182 | if (likely(base == timer->base)) |
179 | 183 | return base; |
180 | 184 | /* The timer has migrated to another CPU: */ |
181 | - spin_unlock_irqrestore(&base->lock, *flags); | |
185 | + spin_unlock_irqrestore(&base->cpu_base->lock, *flags); | |
182 | 186 | } |
183 | 187 | cpu_relax(); |
184 | 188 | } |
185 | 189 | |
186 | 190 | |
... | ... | @@ -187,12 +191,14 @@ |
187 | 191 | /* |
188 | 192 | * Switch the timer base to the current CPU when possible. |
189 | 193 | */ |
190 | -static inline struct hrtimer_base * | |
191 | -switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_base *base) | |
194 | +static inline struct hrtimer_clock_base * | |
195 | +switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base) | |
192 | 196 | { |
193 | - struct hrtimer_base *new_base; | |
197 | + struct hrtimer_clock_base *new_base; | |
198 | + struct hrtimer_cpu_base *new_cpu_base; | |
194 | 199 | |
195 | - new_base = &__get_cpu_var(hrtimer_bases)[base->index]; | |
200 | + new_cpu_base = &__get_cpu_var(hrtimer_bases); | |
201 | + new_base = &new_cpu_base->clock_base[base->index]; | |
196 | 202 | |
197 | 203 | if (base != new_base) { |
198 | 204 | /* |
199 | 205 | |
... | ... | @@ -204,13 +210,13 @@ |
204 | 210 | * completed. There is no conflict as we hold the lock until |
205 | 211 | * the timer is enqueued. |
206 | 212 | */ |
207 | - if (unlikely(base->curr_timer == timer)) | |
213 | + if (unlikely(base->cpu_base->curr_timer == timer)) | |
208 | 214 | return base; |
209 | 215 | |
210 | 216 | /* See the comment in lock_timer_base() */ |
211 | 217 | timer->base = NULL; |
212 | - spin_unlock(&base->lock); | |
213 | - spin_lock(&new_base->lock); | |
218 | + spin_unlock(&base->cpu_base->lock); | |
219 | + spin_lock(&new_base->cpu_base->lock); | |
214 | 220 | timer->base = new_base; |
215 | 221 | } |
216 | 222 | return new_base; |
217 | 223 | |
218 | 224 | |
... | ... | @@ -220,12 +226,12 @@ |
220 | 226 | |
221 | 227 | #define set_curr_timer(b, t) do { } while (0) |
222 | 228 | |
223 | -static inline struct hrtimer_base * | |
229 | +static inline struct hrtimer_clock_base * | |
224 | 230 | lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) |
225 | 231 | { |
226 | - struct hrtimer_base *base = timer->base; | |
232 | + struct hrtimer_clock_base *base = timer->base; | |
227 | 233 | |
228 | - spin_lock_irqsave(&base->lock, *flags); | |
234 | + spin_lock_irqsave(&base->cpu_base->lock, *flags); | |
229 | 235 | |
230 | 236 | return base; |
231 | 237 | } |
... | ... | @@ -305,7 +311,7 @@ |
305 | 311 | static inline |
306 | 312 | void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) |
307 | 313 | { |
308 | - spin_unlock_irqrestore(&timer->base->lock, *flags); | |
314 | + spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); | |
309 | 315 | } |
310 | 316 | |
311 | 317 | /** |
... | ... | @@ -355,7 +361,8 @@ |
355 | 361 | * The timer is inserted in expiry order. Insertion into the |
356 | 362 | * red black tree is O(log(n)). Must hold the base lock. |
357 | 363 | */ |
358 | -static void enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) | |
364 | +static void enqueue_hrtimer(struct hrtimer *timer, | |
365 | + struct hrtimer_clock_base *base) | |
359 | 366 | { |
360 | 367 | struct rb_node **link = &base->active.rb_node; |
361 | 368 | struct rb_node *parent = NULL; |
... | ... | @@ -394,7 +401,8 @@ |
394 | 401 | * |
395 | 402 | * Caller must hold the base lock. |
396 | 403 | */ |
397 | -static void __remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) | |
404 | +static void __remove_hrtimer(struct hrtimer *timer, | |
405 | + struct hrtimer_clock_base *base) | |
398 | 406 | { |
399 | 407 | /* |
400 | 408 | * Remove the timer from the rbtree and replace the |
... | ... | @@ -410,7 +418,7 @@ |
410 | 418 | * remove hrtimer, called with base lock held |
411 | 419 | */ |
412 | 420 | static inline int |
413 | -remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) | |
421 | +remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) | |
414 | 422 | { |
415 | 423 | if (hrtimer_active(timer)) { |
416 | 424 | __remove_hrtimer(timer, base); |
... | ... | @@ -432,7 +440,7 @@ |
432 | 440 | int |
433 | 441 | hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) |
434 | 442 | { |
435 | - struct hrtimer_base *base, *new_base; | |
443 | + struct hrtimer_clock_base *base, *new_base; | |
436 | 444 | unsigned long flags; |
437 | 445 | int ret; |
438 | 446 | |
439 | 447 | |
... | ... | @@ -479,13 +487,13 @@ |
479 | 487 | */ |
480 | 488 | int hrtimer_try_to_cancel(struct hrtimer *timer) |
481 | 489 | { |
482 | - struct hrtimer_base *base; | |
490 | + struct hrtimer_clock_base *base; | |
483 | 491 | unsigned long flags; |
484 | 492 | int ret = -1; |
485 | 493 | |
486 | 494 | base = lock_hrtimer_base(timer, &flags); |
487 | 495 | |
488 | - if (base->curr_timer != timer) | |
496 | + if (base->cpu_base->curr_timer != timer) | |
489 | 497 | ret = remove_hrtimer(timer, base); |
490 | 498 | |
491 | 499 | unlock_hrtimer_base(timer, &flags); |
492 | 500 | |
... | ... | @@ -521,12 +529,12 @@ |
521 | 529 | */ |
522 | 530 | ktime_t hrtimer_get_remaining(const struct hrtimer *timer) |
523 | 531 | { |
524 | - struct hrtimer_base *base; | |
532 | + struct hrtimer_clock_base *base; | |
525 | 533 | unsigned long flags; |
526 | 534 | ktime_t rem; |
527 | 535 | |
528 | 536 | base = lock_hrtimer_base(timer, &flags); |
529 | - rem = ktime_sub(timer->expires, timer->base->get_time()); | |
537 | + rem = ktime_sub(timer->expires, base->get_time()); | |
530 | 538 | unlock_hrtimer_base(timer, &flags); |
531 | 539 | |
532 | 540 | return rem; |
533 | 541 | |
534 | 542 | |
535 | 543 | |
536 | 544 | |
537 | 545 | |
... | ... | @@ -542,26 +550,29 @@ |
542 | 550 | */ |
543 | 551 | ktime_t hrtimer_get_next_event(void) |
544 | 552 | { |
545 | - struct hrtimer_base *base = __get_cpu_var(hrtimer_bases); | |
553 | + struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | |
554 | + struct hrtimer_clock_base *base = cpu_base->clock_base; | |
546 | 555 | ktime_t delta, mindelta = { .tv64 = KTIME_MAX }; |
547 | 556 | unsigned long flags; |
548 | 557 | int i; |
549 | 558 | |
550 | - for (i = 0; i < MAX_HRTIMER_BASES; i++, base++) { | |
559 | + spin_lock_irqsave(&cpu_base->lock, flags); | |
560 | + | |
561 | + for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { | |
551 | 562 | struct hrtimer *timer; |
552 | 563 | |
553 | - spin_lock_irqsave(&base->lock, flags); | |
554 | - if (!base->first) { | |
555 | - spin_unlock_irqrestore(&base->lock, flags); | |
564 | + if (!base->first) | |
556 | 565 | continue; |
557 | - } | |
566 | + | |
558 | 567 | timer = rb_entry(base->first, struct hrtimer, node); |
559 | 568 | delta.tv64 = timer->expires.tv64; |
560 | - spin_unlock_irqrestore(&base->lock, flags); | |
561 | 569 | delta = ktime_sub(delta, base->get_time()); |
562 | 570 | if (delta.tv64 < mindelta.tv64) |
563 | 571 | mindelta.tv64 = delta.tv64; |
564 | 572 | } |
573 | + | |
574 | + spin_unlock_irqrestore(&cpu_base->lock, flags); | |
575 | + | |
565 | 576 | if (mindelta.tv64 < 0) |
566 | 577 | mindelta.tv64 = 0; |
567 | 578 | return mindelta; |
568 | 579 | |
569 | 580 | |
... | ... | @@ -577,16 +588,16 @@ |
577 | 588 | void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, |
578 | 589 | enum hrtimer_mode mode) |
579 | 590 | { |
580 | - struct hrtimer_base *bases; | |
591 | + struct hrtimer_cpu_base *cpu_base; | |
581 | 592 | |
582 | 593 | memset(timer, 0, sizeof(struct hrtimer)); |
583 | 594 | |
584 | - bases = __raw_get_cpu_var(hrtimer_bases); | |
595 | + cpu_base = &__raw_get_cpu_var(hrtimer_bases); | |
585 | 596 | |
586 | 597 | if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS) |
587 | 598 | clock_id = CLOCK_MONOTONIC; |
588 | 599 | |
589 | - timer->base = &bases[clock_id]; | |
600 | + timer->base = &cpu_base->clock_base[clock_id]; | |
590 | 601 | rb_set_parent(&timer->node, &timer->node); |
591 | 602 | } |
592 | 603 | EXPORT_SYMBOL_GPL(hrtimer_init); |
593 | 604 | |
... | ... | @@ -601,10 +612,10 @@ |
601 | 612 | */ |
602 | 613 | int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) |
603 | 614 | { |
604 | - struct hrtimer_base *bases; | |
615 | + struct hrtimer_cpu_base *cpu_base; | |
605 | 616 | |
606 | - bases = __raw_get_cpu_var(hrtimer_bases); | |
607 | - *tp = ktime_to_timespec(bases[which_clock].resolution); | |
617 | + cpu_base = &__raw_get_cpu_var(hrtimer_bases); | |
618 | + *tp = ktime_to_timespec(cpu_base->clock_base[which_clock].resolution); | |
608 | 619 | |
609 | 620 | return 0; |
610 | 621 | } |
611 | 622 | |
... | ... | @@ -613,9 +624,11 @@ |
613 | 624 | /* |
614 | 625 | * Expire the per base hrtimer-queue: |
615 | 626 | */ |
616 | -static inline void run_hrtimer_queue(struct hrtimer_base *base) | |
627 | +static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base, | |
628 | + int index) | |
617 | 629 | { |
618 | 630 | struct rb_node *node; |
631 | + struct hrtimer_clock_base *base = &cpu_base->clock_base[index]; | |
619 | 632 | |
620 | 633 | if (!base->first) |
621 | 634 | return; |
... | ... | @@ -623,7 +636,7 @@ |
623 | 636 | if (base->get_softirq_time) |
624 | 637 | base->softirq_time = base->get_softirq_time(); |
625 | 638 | |
626 | - spin_lock_irq(&base->lock); | |
639 | + spin_lock_irq(&cpu_base->lock); | |
627 | 640 | |
628 | 641 | while ((node = base->first)) { |
629 | 642 | struct hrtimer *timer; |
630 | 643 | |
631 | 644 | |
632 | 645 | |
... | ... | @@ -635,21 +648,21 @@ |
635 | 648 | break; |
636 | 649 | |
637 | 650 | fn = timer->function; |
638 | - set_curr_timer(base, timer); | |
651 | + set_curr_timer(cpu_base, timer); | |
639 | 652 | __remove_hrtimer(timer, base); |
640 | - spin_unlock_irq(&base->lock); | |
653 | + spin_unlock_irq(&cpu_base->lock); | |
641 | 654 | |
642 | 655 | restart = fn(timer); |
643 | 656 | |
644 | - spin_lock_irq(&base->lock); | |
657 | + spin_lock_irq(&cpu_base->lock); | |
645 | 658 | |
646 | 659 | if (restart != HRTIMER_NORESTART) { |
647 | 660 | BUG_ON(hrtimer_active(timer)); |
648 | 661 | enqueue_hrtimer(timer, base); |
649 | 662 | } |
650 | 663 | } |
651 | - set_curr_timer(base, NULL); | |
652 | - spin_unlock_irq(&base->lock); | |
664 | + set_curr_timer(cpu_base, NULL); | |
665 | + spin_unlock_irq(&cpu_base->lock); | |
653 | 666 | } |
654 | 667 | |
655 | 668 | /* |
656 | 669 | |
657 | 670 | |
... | ... | @@ -657,13 +670,13 @@ |
657 | 670 | */ |
658 | 671 | void hrtimer_run_queues(void) |
659 | 672 | { |
660 | - struct hrtimer_base *base = __get_cpu_var(hrtimer_bases); | |
673 | + struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | |
661 | 674 | int i; |
662 | 675 | |
663 | - hrtimer_get_softirq_time(base); | |
676 | + hrtimer_get_softirq_time(cpu_base); | |
664 | 677 | |
665 | - for (i = 0; i < MAX_HRTIMER_BASES; i++) | |
666 | - run_hrtimer_queue(&base[i]); | |
678 | + for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) | |
679 | + run_hrtimer_queue(cpu_base, i); | |
667 | 680 | } |
668 | 681 | |
669 | 682 | /* |
670 | 683 | |
671 | 684 | |
... | ... | @@ -792,19 +805,21 @@ |
792 | 805 | */ |
793 | 806 | static void __devinit init_hrtimers_cpu(int cpu) |
794 | 807 | { |
795 | - struct hrtimer_base *base = per_cpu(hrtimer_bases, cpu); | |
808 | + struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); | |
796 | 809 | int i; |
797 | 810 | |
798 | - for (i = 0; i < MAX_HRTIMER_BASES; i++, base++) { | |
799 | - spin_lock_init(&base->lock); | |
800 | - lockdep_set_class(&base->lock, &base->lock_key); | |
801 | - } | |
811 | + spin_lock_init(&cpu_base->lock); | |
812 | + lockdep_set_class(&cpu_base->lock, &cpu_base->lock_key); | |
813 | + | |
814 | + for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) | |
815 | + cpu_base->clock_base[i].cpu_base = cpu_base; | |
816 | + | |
802 | 817 | } |
803 | 818 | |
804 | 819 | #ifdef CONFIG_HOTPLUG_CPU |
805 | 820 | |
806 | -static void migrate_hrtimer_list(struct hrtimer_base *old_base, | |
807 | - struct hrtimer_base *new_base) | |
821 | +static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, | |
822 | + struct hrtimer_clock_base *new_base) | |
808 | 823 | { |
809 | 824 | struct hrtimer *timer; |
810 | 825 | struct rb_node *node; |
811 | 826 | |
812 | 827 | |
813 | 828 | |
814 | 829 | |
815 | 830 | |
... | ... | @@ -819,29 +834,26 @@ |
819 | 834 | |
820 | 835 | static void migrate_hrtimers(int cpu) |
821 | 836 | { |
822 | - struct hrtimer_base *old_base, *new_base; | |
837 | + struct hrtimer_cpu_base *old_base, *new_base; | |
823 | 838 | int i; |
824 | 839 | |
825 | 840 | BUG_ON(cpu_online(cpu)); |
826 | - old_base = per_cpu(hrtimer_bases, cpu); | |
827 | - new_base = get_cpu_var(hrtimer_bases); | |
841 | + old_base = &per_cpu(hrtimer_bases, cpu); | |
842 | + new_base = &get_cpu_var(hrtimer_bases); | |
828 | 843 | |
829 | 844 | local_irq_disable(); |
830 | 845 | |
831 | - for (i = 0; i < MAX_HRTIMER_BASES; i++) { | |
846 | + spin_lock(&new_base->lock); | |
847 | + spin_lock(&old_base->lock); | |
832 | 848 | |
833 | - spin_lock(&new_base->lock); | |
834 | - spin_lock(&old_base->lock); | |
835 | - | |
849 | + for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | |
836 | 850 | BUG_ON(old_base->curr_timer); |
837 | 851 | |
838 | - migrate_hrtimer_list(old_base, new_base); | |
839 | - | |
840 | - spin_unlock(&old_base->lock); | |
841 | - spin_unlock(&new_base->lock); | |
842 | - old_base++; | |
843 | - new_base++; | |
852 | + migrate_hrtimer_list(&old_base->clock_base[i], | |
853 | + &new_base->clock_base[i]); | |
844 | 854 | } |
855 | + spin_unlock(&old_base->lock); | |
856 | + spin_unlock(&new_base->lock); | |
845 | 857 | |
846 | 858 | local_irq_enable(); |
847 | 859 | put_cpu_var(hrtimer_bases); |