Commit 74019224ac34b044b44a31dd89a54e3477db4896
1 parent
5955c7a2cf
Exists in
master
and in
39 other branches
timers: add mod_timer_pending()
Impact: new timer API Based on an idea from Martin Josefsson with the help of Patrick McHardy and Stephen Hemminger: introduce the mod_timer_pending() API which is a mod_timer() offspring that is an invariant on already removed timers. (regular mod_timer() re-activates non-pending timers.) This is useful for the networking code in that it can allow unserialized mod_timer_pending() timer-forwarding calls, but a single del_timer*() will stop the timer from being reactivated again. Also while at it: - optimize the regular mod_timer() path some more, the timer-stat and a debug check was needlessly duplicated in __mod_timer(). - make the exports come straight after the function, as most other exports in timer.c already did. - eliminate __mod_timer() as an external API, change the users to mod_timer(). The regular mod_timer() code path is not impacted significantly, due to inlining optimizations and due to the simplifications. Based-on-patch-from: Stephen Hemminger <shemminger@vyatta.com> Acked-by: Stephen Hemminger <shemminger@vyatta.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Patrick McHardy <kaber@trash.net> Cc: netdev@vger.kernel.org Cc: Oleg Nesterov <oleg@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Showing 5 changed files with 80 additions and 62 deletions Side-by-side Diff
arch/powerpc/platforms/cell/spufs/sched.c
... | ... | @@ -508,7 +508,7 @@ |
508 | 508 | list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]); |
509 | 509 | set_bit(ctx->prio, spu_prio->bitmap); |
510 | 510 | if (!spu_prio->nr_waiting++) |
511 | - __mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK); | |
511 | + mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK); | |
512 | 512 | } |
513 | 513 | } |
514 | 514 |
drivers/infiniband/hw/ipath/ipath_driver.c
... | ... | @@ -2715,7 +2715,7 @@ |
2715 | 2715 | * to prevent HoL blocking, then start the HoL timer that |
2716 | 2716 | * periodically continues, then stop procs, so they can detect |
2717 | 2717 | * link down if they want, and do something about it. |
2718 | - * Timer may already be running, so use __mod_timer, not add_timer. | |
2718 | + * Timer may already be running, so use mod_timer, not add_timer. | |
2719 | 2719 | */ |
2720 | 2720 | void ipath_hol_down(struct ipath_devdata *dd) |
2721 | 2721 | { |
... | ... | @@ -2724,7 +2724,7 @@ |
2724 | 2724 | dd->ipath_hol_next = IPATH_HOL_DOWNCONT; |
2725 | 2725 | dd->ipath_hol_timer.expires = jiffies + |
2726 | 2726 | msecs_to_jiffies(ipath_hol_timeout_ms); |
2727 | - __mod_timer(&dd->ipath_hol_timer, dd->ipath_hol_timer.expires); | |
2727 | + mod_timer(&dd->ipath_hol_timer, dd->ipath_hol_timer.expires); | |
2728 | 2728 | } |
2729 | 2729 | |
2730 | 2730 | /* |
... | ... | @@ -2763,7 +2763,7 @@ |
2763 | 2763 | else { |
2764 | 2764 | dd->ipath_hol_timer.expires = jiffies + |
2765 | 2765 | msecs_to_jiffies(ipath_hol_timeout_ms); |
2766 | - __mod_timer(&dd->ipath_hol_timer, | |
2766 | + mod_timer(&dd->ipath_hol_timer, | |
2767 | 2767 | dd->ipath_hol_timer.expires); |
2768 | 2768 | } |
2769 | 2769 | } |
include/linux/timer.h
... | ... | @@ -86,8 +86,8 @@ |
86 | 86 | |
87 | 87 | extern void add_timer_on(struct timer_list *timer, int cpu); |
88 | 88 | extern int del_timer(struct timer_list * timer); |
89 | -extern int __mod_timer(struct timer_list *timer, unsigned long expires); | |
90 | 89 | extern int mod_timer(struct timer_list *timer, unsigned long expires); |
90 | +extern int mod_timer_pending(struct timer_list *timer, unsigned long expires); | |
91 | 91 | |
92 | 92 | /* |
93 | 93 | * The jiffies value which is added to now, when there is no timer |
... | ... | @@ -146,25 +146,7 @@ |
146 | 146 | } |
147 | 147 | #endif |
148 | 148 | |
149 | -/** | |
150 | - * add_timer - start a timer | |
151 | - * @timer: the timer to be added | |
152 | - * | |
153 | - * The kernel will do a ->function(->data) callback from the | |
154 | - * timer interrupt at the ->expires point in the future. The | |
155 | - * current time is 'jiffies'. | |
156 | - * | |
157 | - * The timer's ->expires, ->function (and if the handler uses it, ->data) | |
158 | - * fields must be set prior calling this function. | |
159 | - * | |
160 | - * Timers with an ->expires field in the past will be executed in the next | |
161 | - * timer tick. | |
162 | - */ | |
163 | -static inline void add_timer(struct timer_list *timer) | |
164 | -{ | |
165 | - BUG_ON(timer_pending(timer)); | |
166 | - __mod_timer(timer, timer->expires); | |
167 | -} | |
149 | +extern void add_timer(struct timer_list *timer); | |
168 | 150 | |
169 | 151 | #ifdef CONFIG_SMP |
170 | 152 | extern int try_to_del_timer_sync(struct timer_list *timer); |
kernel/relay.c
kernel/timer.c
... | ... | @@ -589,12 +589,15 @@ |
589 | 589 | } |
590 | 590 | } |
591 | 591 | |
592 | -int __mod_timer(struct timer_list *timer, unsigned long expires) | |
592 | +static inline int | |
593 | +__mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) | |
593 | 594 | { |
594 | 595 | struct tvec_base *base, *new_base; |
595 | 596 | unsigned long flags; |
596 | - int ret = 0; | |
597 | + int ret; | |
597 | 598 | |
599 | + ret = 0; | |
600 | + | |
598 | 601 | timer_stats_timer_set_start_info(timer); |
599 | 602 | BUG_ON(!timer->function); |
600 | 603 | |
... | ... | @@ -603,6 +606,9 @@ |
603 | 606 | if (timer_pending(timer)) { |
604 | 607 | detach_timer(timer, 0); |
605 | 608 | ret = 1; |
609 | + } else { | |
610 | + if (pending_only) | |
611 | + goto out_unlock; | |
606 | 612 | } |
607 | 613 | |
608 | 614 | debug_timer_activate(timer); |
609 | 615 | |
610 | 616 | |
611 | 617 | |
612 | 618 | |
613 | 619 | |
614 | 620 | |
... | ... | @@ -629,42 +635,28 @@ |
629 | 635 | |
630 | 636 | timer->expires = expires; |
631 | 637 | internal_add_timer(base, timer); |
638 | + | |
639 | +out_unlock: | |
632 | 640 | spin_unlock_irqrestore(&base->lock, flags); |
633 | 641 | |
634 | 642 | return ret; |
635 | 643 | } |
636 | 644 | |
637 | -EXPORT_SYMBOL(__mod_timer); | |
638 | - | |
639 | 645 | /** |
640 | - * add_timer_on - start a timer on a particular CPU | |
641 | - * @timer: the timer to be added | |
642 | - * @cpu: the CPU to start it on | |
646 | + * mod_timer_pending - modify a pending timer's timeout | |
647 | + * @timer: the pending timer to be modified | |
648 | + * @expires: new timeout in jiffies | |
643 | 649 | * |
644 | - * This is not very scalable on SMP. Double adds are not possible. | |
650 | + * mod_timer_pending() is the same for pending timers as mod_timer(), | |
651 | + * but will not re-activate and modify already deleted timers. | |
652 | + * | |
653 | + * It is useful for unserialized use of timers. | |
645 | 654 | */ |
646 | -void add_timer_on(struct timer_list *timer, int cpu) | |
655 | +int mod_timer_pending(struct timer_list *timer, unsigned long expires) | |
647 | 656 | { |
648 | - struct tvec_base *base = per_cpu(tvec_bases, cpu); | |
649 | - unsigned long flags; | |
650 | - | |
651 | - timer_stats_timer_set_start_info(timer); | |
652 | - BUG_ON(timer_pending(timer) || !timer->function); | |
653 | - spin_lock_irqsave(&base->lock, flags); | |
654 | - timer_set_base(timer, base); | |
655 | - debug_timer_activate(timer); | |
656 | - internal_add_timer(base, timer); | |
657 | - /* | |
658 | - * Check whether the other CPU is idle and needs to be | |
659 | - * triggered to reevaluate the timer wheel when nohz is | |
660 | - * active. We are protected against the other CPU fiddling | |
661 | - * with the timer by holding the timer base lock. This also | |
662 | - * makes sure that a CPU on the way to idle can not evaluate | |
663 | - * the timer wheel. | |
664 | - */ | |
665 | - wake_up_idle_cpu(cpu); | |
666 | - spin_unlock_irqrestore(&base->lock, flags); | |
657 | + return __mod_timer(timer, expires, true); | |
667 | 658 | } |
659 | +EXPORT_SYMBOL(mod_timer_pending); | |
668 | 660 | |
669 | 661 | /** |
670 | 662 | * mod_timer - modify a timer's timeout |
... | ... | @@ -688,9 +680,6 @@ |
688 | 680 | */ |
689 | 681 | int mod_timer(struct timer_list *timer, unsigned long expires) |
690 | 682 | { |
691 | - BUG_ON(!timer->function); | |
692 | - | |
693 | - timer_stats_timer_set_start_info(timer); | |
694 | 683 | /* |
695 | 684 | * This is a common optimization triggered by the |
696 | 685 | * networking code - if the timer is re-modified |
697 | 686 | |
698 | 687 | |
... | ... | @@ -699,12 +688,62 @@ |
699 | 688 | if (timer->expires == expires && timer_pending(timer)) |
700 | 689 | return 1; |
701 | 690 | |
702 | - return __mod_timer(timer, expires); | |
691 | + return __mod_timer(timer, expires, false); | |
703 | 692 | } |
704 | - | |
705 | 693 | EXPORT_SYMBOL(mod_timer); |
706 | 694 | |
707 | 695 | /** |
696 | + * add_timer - start a timer | |
697 | + * @timer: the timer to be added | |
698 | + * | |
699 | + * The kernel will do a ->function(->data) callback from the | |
700 | + * timer interrupt at the ->expires point in the future. The | |
701 | + * current time is 'jiffies'. | |
702 | + * | |
703 | + * The timer's ->expires, ->function (and if the handler uses it, ->data) | |
704 | + * fields must be set prior calling this function. | |
705 | + * | |
706 | + * Timers with an ->expires field in the past will be executed in the next | |
707 | + * timer tick. | |
708 | + */ | |
709 | +void add_timer(struct timer_list *timer) | |
710 | +{ | |
711 | + BUG_ON(timer_pending(timer)); | |
712 | + mod_timer(timer, timer->expires); | |
713 | +} | |
714 | +EXPORT_SYMBOL(add_timer); | |
715 | + | |
716 | +/** | |
717 | + * add_timer_on - start a timer on a particular CPU | |
718 | + * @timer: the timer to be added | |
719 | + * @cpu: the CPU to start it on | |
720 | + * | |
721 | + * This is not very scalable on SMP. Double adds are not possible. | |
722 | + */ | |
723 | +void add_timer_on(struct timer_list *timer, int cpu) | |
724 | +{ | |
725 | + struct tvec_base *base = per_cpu(tvec_bases, cpu); | |
726 | + unsigned long flags; | |
727 | + | |
728 | + timer_stats_timer_set_start_info(timer); | |
729 | + BUG_ON(timer_pending(timer) || !timer->function); | |
730 | + spin_lock_irqsave(&base->lock, flags); | |
731 | + timer_set_base(timer, base); | |
732 | + debug_timer_activate(timer); | |
733 | + internal_add_timer(base, timer); | |
734 | + /* | |
735 | + * Check whether the other CPU is idle and needs to be | |
736 | + * triggered to reevaluate the timer wheel when nohz is | |
737 | + * active. We are protected against the other CPU fiddling | |
738 | + * with the timer by holding the timer base lock. This also | |
739 | + * makes sure that a CPU on the way to idle can not evaluate | |
740 | + * the timer wheel. | |
741 | + */ | |
742 | + wake_up_idle_cpu(cpu); | |
743 | + spin_unlock_irqrestore(&base->lock, flags); | |
744 | +} | |
745 | + | |
746 | +/** | |
708 | 747 | * del_timer - deactive a timer. |
709 | 748 | * @timer: the timer to be deactivated |
710 | 749 | * |
... | ... | @@ -733,7 +772,6 @@ |
733 | 772 | |
734 | 773 | return ret; |
735 | 774 | } |
736 | - | |
737 | 775 | EXPORT_SYMBOL(del_timer); |
738 | 776 | |
739 | 777 | #ifdef CONFIG_SMP |
... | ... | @@ -767,7 +805,6 @@ |
767 | 805 | |
768 | 806 | return ret; |
769 | 807 | } |
770 | - | |
771 | 808 | EXPORT_SYMBOL(try_to_del_timer_sync); |
772 | 809 | |
773 | 810 | /** |
... | ... | @@ -796,7 +833,6 @@ |
796 | 833 | cpu_relax(); |
797 | 834 | } |
798 | 835 | } |
799 | - | |
800 | 836 | EXPORT_SYMBOL(del_timer_sync); |
801 | 837 | #endif |
802 | 838 | |
... | ... | @@ -1268,7 +1304,7 @@ |
1268 | 1304 | expire = timeout + jiffies; |
1269 | 1305 | |
1270 | 1306 | setup_timer_on_stack(&timer, process_timeout, (unsigned long)current); |
1271 | - __mod_timer(&timer, expire); | |
1307 | + __mod_timer(&timer, expire, false); | |
1272 | 1308 | schedule(); |
1273 | 1309 | del_singleshot_timer_sync(&timer); |
1274 | 1310 |