Commit bb758e9637e5ddcff84a97177415499ae1fed498

Authored by Linus Torvalds

Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kerne…

…l/git/tip/linux-2.6-tip

* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  hrtimers: fix warning in kernel/hrtimer.c
  x86: make sure we really have an hpet mapping before using it
  x86: enable HPET on Fujitsu u9200
  linux/timex.h: cleanup for userspace
  posix-timers: simplify de_thread()->exit_itimers() path
  posix-timers: check ->it_signal instead of ->it_pid to validate the timer
  posix-timers: use "struct pid*" instead of "struct task_struct*"
  nohz: suppress needless timer reprogramming
  clocksource, acpi_pm.c: put acpi_pm_read_slow() under CONFIG_PCI
  nohz: no softirq pending warnings for offline cpus
  hrtimer: removing all ur callback modes, fix
  hrtimer: removing all ur callback modes, fix hotplug
  hrtimer: removing all ur callback modes
  x86: correct link to HPET timer specification
  rtc-cmos: export second NVRAM bank

Fixed up conflicts in sound/drivers/pcsp/pcsp.c and sound/core/hrtimer.c
manually.

Showing 19 changed files Side-by-side Diff

... ... @@ -479,7 +479,7 @@
479 479 The HPET provides a stable time base on SMP
480 480 systems, unlike the TSC, but it is more expensive to access,
481 481 as it is off-chip. You can find the HPET spec at
482   - <http://www.intel.com/hardwaredesign/hpetspec.htm>.
  482 + <http://www.intel.com/hardwaredesign/hpetspec_1.pdf>.
483 483  
484 484 You can safely choose Y here. However, HPET will only be
485 485 activated if the platform and the BIOS support this feature.
arch/x86/kernel/hpet.c
... ... @@ -813,7 +813,7 @@
813 813  
814 814 out_nohpet:
815 815 hpet_clear_mapping();
816   - boot_hpet_disable = 1;
  816 + hpet_address = 0;
817 817 return 0;
818 818 }
819 819  
820 820  
... ... @@ -836,9 +836,10 @@
836 836  
837 837 hpet_address = force_hpet_address;
838 838 hpet_enable();
839   - if (!hpet_virt_address)
840   - return -ENODEV;
841 839 }
  840 +
  841 + if (!hpet_virt_address)
  842 + return -ENODEV;
842 843  
843 844 hpet_reserve_platform_timers(hpet_readl(HPET_ID));
844 845  
arch/x86/kernel/quirks.c
... ... @@ -168,6 +168,8 @@
168 168 ich_force_enable_hpet);
169 169 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1,
170 170 ich_force_enable_hpet);
  171 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4,
  172 + ich_force_enable_hpet);
171 173 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7,
172 174 ich_force_enable_hpet);
173 175  
... ... @@ -46,7 +46,7 @@
46 46 /*
47 47 * The High Precision Event Timer driver.
48 48 * This driver is closely modelled after the rtc.c driver.
49   - * http://www.intel.com/hardwaredesign/hpetspec.htm
  49 + * http://www.intel.com/hardwaredesign/hpetspec_1.pdf
50 50 */
51 51 #define HPET_USER_FREQ (64)
52 52 #define HPET_DRIFT (500)
drivers/clocksource/acpi_pm.c
... ... @@ -57,11 +57,6 @@
57 57 return v2;
58 58 }
59 59  
60   -static cycle_t acpi_pm_read_slow(void)
61   -{
62   - return (cycle_t)acpi_pm_read_verified();
63   -}
64   -
65 60 static cycle_t acpi_pm_read(void)
66 61 {
67 62 return (cycle_t)read_pmtmr();
... ... @@ -87,6 +82,11 @@
87 82 return 1;
88 83 }
89 84 __setup("acpi_pm_good", acpi_pm_good_setup);
  85 +
  86 +static cycle_t acpi_pm_read_slow(void)
  87 +{
  88 + return (cycle_t)acpi_pm_read_verified();
  89 +}
90 90  
91 91 static inline void acpi_pm_need_workaround(void)
92 92 {
drivers/input/touchscreen/ads7846.c
... ... @@ -697,7 +697,7 @@
697 697 struct ads7846 *ts = container_of(handle, struct ads7846, timer);
698 698 int status = 0;
699 699  
700   - spin_lock_irq(&ts->lock);
  700 + spin_lock(&ts->lock);
701 701  
702 702 if (unlikely(!get_pendown_state(ts) ||
703 703 device_suspended(&ts->spi->dev))) {
... ... @@ -728,7 +728,7 @@
728 728 dev_err(&ts->spi->dev, "spi_async --> %d\n", status);
729 729 }
730 730  
731   - spin_unlock_irq(&ts->lock);
  731 + spin_unlock(&ts->lock);
732 732 return HRTIMER_NORESTART;
733 733 }
734 734  
... ... @@ -773,7 +773,6 @@
773 773 struct signal_struct *sig = tsk->signal;
774 774 struct sighand_struct *oldsighand = tsk->sighand;
775 775 spinlock_t *lock = &oldsighand->siglock;
776   - struct task_struct *leader = NULL;
777 776 int count;
778 777  
779 778 if (thread_group_empty(tsk))
... ... @@ -811,7 +810,7 @@
811 810 * and to assume its PID:
812 811 */
813 812 if (!thread_group_leader(tsk)) {
814   - leader = tsk->group_leader;
  813 + struct task_struct *leader = tsk->group_leader;
815 814  
816 815 sig->notify_count = -1; /* for exit_notify() */
817 816 for (;;) {
818 817  
... ... @@ -863,8 +862,9 @@
863 862  
864 863 BUG_ON(leader->exit_state != EXIT_ZOMBIE);
865 864 leader->exit_state = EXIT_DEAD;
866   -
867 865 write_unlock_irq(&tasklist_lock);
  866 +
  867 + release_task(leader);
868 868 }
869 869  
870 870 sig->group_exit_task = NULL;
... ... @@ -873,8 +873,6 @@
873 873 no_thread_group:
874 874 exit_itimers(sig);
875 875 flush_itimer_signals();
876   - if (leader)
877   - release_task(leader);
878 876  
879 877 if (atomic_read(&oldsighand->count) != 1) {
880 878 struct sighand_struct *newsighand;
include/linux/hrtimer.h
... ... @@ -43,26 +43,6 @@
43 43 };
44 44  
45 45 /*
46   - * hrtimer callback modes:
47   - *
48   - * HRTIMER_CB_SOFTIRQ: Callback must run in softirq context
49   - * HRTIMER_CB_IRQSAFE_PERCPU: Callback must run in hardirq context
50   - * Special mode for tick emulation and
51   - * scheduler timer. Such timers are per
52   - * cpu and not allowed to be migrated on
53   - * cpu unplug.
54   - * HRTIMER_CB_IRQSAFE_UNLOCKED: Callback should run in hardirq context
55   - * with timer->base lock unlocked
56   - * used for timers which call wakeup to
57   - * avoid lock order problems with rq->lock
58   - */
59   -enum hrtimer_cb_mode {
60   - HRTIMER_CB_SOFTIRQ,
61   - HRTIMER_CB_IRQSAFE_PERCPU,
62   - HRTIMER_CB_IRQSAFE_UNLOCKED,
63   -};
64   -
65   -/*
66 46 * Values to track state of the timer
67 47 *
68 48 * Possible states:
... ... @@ -70,7 +50,6 @@
70 50 * 0x00 inactive
71 51 * 0x01 enqueued into rbtree
72 52 * 0x02 callback function running
73   - * 0x04 callback pending (high resolution mode)
74 53 *
75 54 * Special cases:
76 55 * 0x03 callback function running and enqueued
... ... @@ -92,8 +71,7 @@
92 71 #define HRTIMER_STATE_INACTIVE 0x00
93 72 #define HRTIMER_STATE_ENQUEUED 0x01
94 73 #define HRTIMER_STATE_CALLBACK 0x02
95   -#define HRTIMER_STATE_PENDING 0x04
96   -#define HRTIMER_STATE_MIGRATE 0x08
  74 +#define HRTIMER_STATE_MIGRATE 0x04
97 75  
98 76 /**
99 77 * struct hrtimer - the basic hrtimer structure
... ... @@ -109,8 +87,6 @@
109 87 * @function: timer expiry callback function
110 88 * @base: pointer to the timer base (per cpu and per clock)
111 89 * @state: state information (See bit values above)
112   - * @cb_mode: high resolution timer feature to select the callback execution
113   - * mode
114 90 * @cb_entry: list head to enqueue an expired timer into the callback list
115 91 * @start_site: timer statistics field to store the site where the timer
116 92 * was started
... ... @@ -129,7 +105,6 @@
129 105 struct hrtimer_clock_base *base;
130 106 unsigned long state;
131 107 struct list_head cb_entry;
132   - enum hrtimer_cb_mode cb_mode;
133 108 #ifdef CONFIG_TIMER_STATS
134 109 int start_pid;
135 110 void *start_site;
136 111  
... ... @@ -188,15 +163,11 @@
188 163 * @check_clocks: Indictator, when set evaluate time source and clock
189 164 * event devices whether high resolution mode can be
190 165 * activated.
191   - * @cb_pending: Expired timers are moved from the rbtree to this
192   - * list in the timer interrupt. The list is processed
193   - * in the softirq.
194 166 * @nr_events: Total number of timer interrupt events
195 167 */
196 168 struct hrtimer_cpu_base {
197 169 spinlock_t lock;
198 170 struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
199   - struct list_head cb_pending;
200 171 #ifdef CONFIG_HIGH_RES_TIMERS
201 172 ktime_t expires_next;
202 173 int hres_active;
... ... @@ -404,8 +375,7 @@
404 375 */
405 376 static inline int hrtimer_is_queued(struct hrtimer *timer)
406 377 {
407   - return timer->state &
408   - (HRTIMER_STATE_ENQUEUED | HRTIMER_STATE_PENDING);
  378 + return timer->state & HRTIMER_STATE_ENQUEUED;
409 379 }
410 380  
411 381 /*
include/linux/interrupt.h
... ... @@ -251,9 +251,6 @@
251 251 BLOCK_SOFTIRQ,
252 252 TASKLET_SOFTIRQ,
253 253 SCHED_SOFTIRQ,
254   -#ifdef CONFIG_HIGH_RES_TIMERS
255   - HRTIMER_SOFTIRQ,
256   -#endif
257 254 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
258 255  
259 256 NR_SOFTIRQS
include/linux/posix-timers.h
... ... @@ -45,7 +45,11 @@
45 45 int it_requeue_pending; /* waiting to requeue this timer */
46 46 #define REQUEUE_PENDING 1
47 47 int it_sigev_notify; /* notify word of sigevent struct */
48   - struct task_struct *it_process; /* process to send signal to */
  48 + struct signal_struct *it_signal;
  49 + union {
  50 + struct pid *it_pid; /* pid of process to send signal to */
  51 + struct task_struct *it_process; /* for clock_nanosleep */
  52 + };
49 53 struct sigqueue *sigq; /* signal queue entry. */
50 54 union {
51 55 struct {
include/linux/timex.h
... ... @@ -53,47 +53,11 @@
53 53 #ifndef _LINUX_TIMEX_H
54 54 #define _LINUX_TIMEX_H
55 55  
56   -#include <linux/compiler.h>
57 56 #include <linux/time.h>
58 57  
59   -#include <asm/param.h>
60   -
61 58 #define NTP_API 4 /* NTP API version */
62 59  
63 60 /*
64   - * SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen
65   - * for a slightly underdamped convergence characteristic. SHIFT_KH
66   - * establishes the damping of the FLL and is chosen by wisdom and black
67   - * art.
68   - *
69   - * MAXTC establishes the maximum time constant of the PLL. With the
70   - * SHIFT_KG and SHIFT_KF values given and a time constant range from
71   - * zero to MAXTC, the PLL will converge in 15 minutes to 16 hours,
72   - * respectively.
73   - */
74   -#define SHIFT_PLL 4 /* PLL frequency factor (shift) */
75   -#define SHIFT_FLL 2 /* FLL frequency factor (shift) */
76   -#define MAXTC 10 /* maximum time constant (shift) */
77   -
78   -/*
79   - * SHIFT_USEC defines the scaling (shift) of the time_freq and
80   - * time_tolerance variables, which represent the current frequency
81   - * offset and maximum frequency tolerance.
82   - */
83   -#define SHIFT_USEC 16 /* frequency offset scale (shift) */
84   -#define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
85   -#define PPM_SCALE_INV_SHIFT 19
86   -#define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \
87   - PPM_SCALE + 1)
88   -
89   -#define MAXPHASE 500000000l /* max phase error (ns) */
90   -#define MAXFREQ 500000 /* max frequency error (ns/s) */
91   -#define MAXFREQ_SCALED ((s64)MAXFREQ << NTP_SCALE_SHIFT)
92   -#define MINSEC 256 /* min interval between updates (s) */
93   -#define MAXSEC 2048 /* max interval between updates (s) */
94   -#define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */
95   -
96   -/*
97 61 * syscall interface - used (mainly by NTP daemon)
98 62 * to discipline kernel clock oscillator
99 63 */
100 64  
... ... @@ -199,7 +163,44 @@
199 163 #define TIME_BAD TIME_ERROR /* bw compat */
200 164  
201 165 #ifdef __KERNEL__
  166 +#include <linux/compiler.h>
  167 +#include <linux/types.h>
  168 +#include <linux/param.h>
  169 +
202 170 #include <asm/timex.h>
  171 +
  172 +/*
  173 + * SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen
  174 + * for a slightly underdamped convergence characteristic. SHIFT_KH
  175 + * establishes the damping of the FLL and is chosen by wisdom and black
  176 + * art.
  177 + *
  178 + * MAXTC establishes the maximum time constant of the PLL. With the
  179 + * SHIFT_KG and SHIFT_KF values given and a time constant range from
  180 + * zero to MAXTC, the PLL will converge in 15 minutes to 16 hours,
  181 + * respectively.
  182 + */
  183 +#define SHIFT_PLL 4 /* PLL frequency factor (shift) */
  184 +#define SHIFT_FLL 2 /* FLL frequency factor (shift) */
  185 +#define MAXTC 10 /* maximum time constant (shift) */
  186 +
  187 +/*
  188 + * SHIFT_USEC defines the scaling (shift) of the time_freq and
  189 + * time_tolerance variables, which represent the current frequency
  190 + * offset and maximum frequency tolerance.
  191 + */
  192 +#define SHIFT_USEC 16 /* frequency offset scale (shift) */
  193 +#define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
  194 +#define PPM_SCALE_INV_SHIFT 19
  195 +#define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \
  196 + PPM_SCALE + 1)
  197 +
  198 +#define MAXPHASE 500000000l /* max phase error (ns) */
  199 +#define MAXFREQ 500000 /* max frequency error (ns/s) */
  200 +#define MAXFREQ_SCALED ((s64)MAXFREQ << NTP_SCALE_SHIFT)
  201 +#define MINSEC 256 /* min interval between updates (s) */
  202 +#define MAXSEC 2048 /* max interval between updates (s) */
  203 +#define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */
203 204  
204 205 /*
205 206 * kernel variables
... ... @@ -442,22 +442,6 @@
442 442 static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
443 443 #endif
444 444  
445   -/*
446   - * Check, whether the timer is on the callback pending list
447   - */
448   -static inline int hrtimer_cb_pending(const struct hrtimer *timer)
449   -{
450   - return timer->state & HRTIMER_STATE_PENDING;
451   -}
452   -
453   -/*
454   - * Remove a timer from the callback pending list
455   - */
456   -static inline void hrtimer_remove_cb_pending(struct hrtimer *timer)
457   -{
458   - list_del_init(&timer->cb_entry);
459   -}
460   -
461 445 /* High resolution timer related functions */
462 446 #ifdef CONFIG_HIGH_RES_TIMERS
463 447  
... ... @@ -651,6 +635,8 @@
651 635 {
652 636 }
653 637  
  638 +static void __run_hrtimer(struct hrtimer *timer);
  639 +
654 640 /*
655 641 * When High resolution timers are active, try to reprogram. Note, that in case
656 642 * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
... ... @@ -661,31 +647,14 @@
661 647 struct hrtimer_clock_base *base)
662 648 {
663 649 if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
664   -
665   - /* Timer is expired, act upon the callback mode */
666   - switch(timer->cb_mode) {
667   - case HRTIMER_CB_IRQSAFE_PERCPU:
668   - case HRTIMER_CB_IRQSAFE_UNLOCKED:
669   - /*
670   - * This is solely for the sched tick emulation with
671   - * dynamic tick support to ensure that we do not
672   - * restart the tick right on the edge and end up with
673   - * the tick timer in the softirq ! The calling site
674   - * takes care of this. Also used for hrtimer sleeper !
675   - */
676   - debug_hrtimer_deactivate(timer);
677   - return 1;
678   - case HRTIMER_CB_SOFTIRQ:
679   - /*
680   - * Move everything else into the softirq pending list !
681   - */
682   - list_add_tail(&timer->cb_entry,
683   - &base->cpu_base->cb_pending);
684   - timer->state = HRTIMER_STATE_PENDING;
685   - return 1;
686   - default:
687   - BUG();
688   - }
  650 + /*
  651 + * XXX: recursion check?
  652 + * hrtimer_forward() should round up with timer granularity
  653 + * so that we never get into inf recursion here,
  654 + * it doesn't do that though
  655 + */
  656 + __run_hrtimer(timer);
  657 + return 1;
689 658 }
690 659 return 0;
691 660 }
... ... @@ -724,11 +693,6 @@
724 693 return 1;
725 694 }
726 695  
727   -static inline void hrtimer_raise_softirq(void)
728   -{
729   - raise_softirq(HRTIMER_SOFTIRQ);
730   -}
731   -
732 696 #else
733 697  
734 698 static inline int hrtimer_hres_active(void) { return 0; }
... ... @@ -747,7 +711,6 @@
747 711 {
748 712 return 0;
749 713 }
750   -static inline void hrtimer_raise_softirq(void) { }
751 714  
752 715 #endif /* CONFIG_HIGH_RES_TIMERS */
753 716  
... ... @@ -890,10 +853,7 @@
890 853 struct hrtimer_clock_base *base,
891 854 unsigned long newstate, int reprogram)
892 855 {
893   - /* High res. callback list. NOP for !HIGHRES */
894   - if (hrtimer_cb_pending(timer))
895   - hrtimer_remove_cb_pending(timer);
896   - else {
  856 + if (timer->state & HRTIMER_STATE_ENQUEUED) {
897 857 /*
898 858 * Remove the timer from the rbtree and replace the
899 859 * first entry pointer if necessary.
... ... @@ -953,7 +913,7 @@
953 913 {
954 914 struct hrtimer_clock_base *base, *new_base;
955 915 unsigned long flags;
956   - int ret, raise;
  916 + int ret;
957 917  
958 918 base = lock_hrtimer_base(timer, &flags);
959 919  
960 920  
... ... @@ -988,26 +948,8 @@
988 948 enqueue_hrtimer(timer, new_base,
989 949 new_base->cpu_base == &__get_cpu_var(hrtimer_bases));
990 950  
991   - /*
992   - * The timer may be expired and moved to the cb_pending
993   - * list. We can not raise the softirq with base lock held due
994   - * to a possible deadlock with runqueue lock.
995   - */
996   - raise = timer->state == HRTIMER_STATE_PENDING;
997   -
998   - /*
999   - * We use preempt_disable to prevent this task from migrating after
1000   - * setting up the softirq and raising it. Otherwise, if me migrate
1001   - * we will raise the softirq on the wrong CPU.
1002   - */
1003   - preempt_disable();
1004   -
1005 951 unlock_hrtimer_base(timer, &flags);
1006 952  
1007   - if (raise)
1008   - hrtimer_raise_softirq();
1009   - preempt_enable();
1010   -
1011 953 return ret;
1012 954 }
1013 955 EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
... ... @@ -1192,75 +1134,6 @@
1192 1134 }
1193 1135 EXPORT_SYMBOL_GPL(hrtimer_get_res);
1194 1136  
1195   -static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base)
1196   -{
1197   - spin_lock_irq(&cpu_base->lock);
1198   -
1199   - while (!list_empty(&cpu_base->cb_pending)) {
1200   - enum hrtimer_restart (*fn)(struct hrtimer *);
1201   - struct hrtimer *timer;
1202   - int restart;
1203   - int emulate_hardirq_ctx = 0;
1204   -
1205   - timer = list_entry(cpu_base->cb_pending.next,
1206   - struct hrtimer, cb_entry);
1207   -
1208   - debug_hrtimer_deactivate(timer);
1209   - timer_stats_account_hrtimer(timer);
1210   -
1211   - fn = timer->function;
1212   - /*
1213   - * A timer might have been added to the cb_pending list
1214   - * when it was migrated during a cpu-offline operation.
1215   - * Emulate hardirq context for such timers.
1216   - */
1217   - if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU ||
1218   - timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED)
1219   - emulate_hardirq_ctx = 1;
1220   -
1221   - __remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0);
1222   - spin_unlock_irq(&cpu_base->lock);
1223   -
1224   - if (unlikely(emulate_hardirq_ctx)) {
1225   - local_irq_disable();
1226   - restart = fn(timer);
1227   - local_irq_enable();
1228   - } else
1229   - restart = fn(timer);
1230   -
1231   - spin_lock_irq(&cpu_base->lock);
1232   -
1233   - timer->state &= ~HRTIMER_STATE_CALLBACK;
1234   - if (restart == HRTIMER_RESTART) {
1235   - BUG_ON(hrtimer_active(timer));
1236   - /*
1237   - * Enqueue the timer, allow reprogramming of the event
1238   - * device
1239   - */
1240   - enqueue_hrtimer(timer, timer->base, 1);
1241   - } else if (hrtimer_active(timer)) {
1242   - /*
1243   - * If the timer was rearmed on another CPU, reprogram
1244   - * the event device.
1245   - */
1246   - struct hrtimer_clock_base *base = timer->base;
1247   -
1248   - if (base->first == &timer->node &&
1249   - hrtimer_reprogram(timer, base)) {
1250   - /*
1251   - * Timer is expired. Thus move it from tree to
1252   - * pending list again.
1253   - */
1254   - __remove_hrtimer(timer, base,
1255   - HRTIMER_STATE_PENDING, 0);
1256   - list_add_tail(&timer->cb_entry,
1257   - &base->cpu_base->cb_pending);
1258   - }
1259   - }
1260   - }
1261   - spin_unlock_irq(&cpu_base->lock);
1262   -}
1263   -
1264 1137 static void __run_hrtimer(struct hrtimer *timer)
1265 1138 {
1266 1139 struct hrtimer_clock_base *base = timer->base;
1267 1140  
1268 1141  
1269 1142  
... ... @@ -1268,27 +1141,23 @@
1268 1141 enum hrtimer_restart (*fn)(struct hrtimer *);
1269 1142 int restart;
1270 1143  
  1144 + WARN_ON(!irqs_disabled());
  1145 +
1271 1146 debug_hrtimer_deactivate(timer);
1272 1147 __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
1273 1148 timer_stats_account_hrtimer(timer);
1274   -
1275 1149 fn = timer->function;
1276   - if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU ||
1277   - timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED) {
1278   - /*
1279   - * Used for scheduler timers, avoid lock inversion with
1280   - * rq->lock and tasklist_lock.
1281   - *
1282   - * These timers are required to deal with enqueue expiry
1283   - * themselves and are not allowed to migrate.
1284   - */
1285   - spin_unlock(&cpu_base->lock);
1286   - restart = fn(timer);
1287   - spin_lock(&cpu_base->lock);
1288   - } else
1289   - restart = fn(timer);
1290 1150  
1291 1151 /*
  1152 + * Because we run timers from hardirq context, there is no chance
  1153 + * they get migrated to another cpu, therefore its safe to unlock
  1154 + * the timer base.
  1155 + */
  1156 + spin_unlock(&cpu_base->lock);
  1157 + restart = fn(timer);
  1158 + spin_lock(&cpu_base->lock);
  1159 +
  1160 + /*
1292 1161 * Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid
1293 1162 * reprogramming of the event hardware. This happens at the end of this
1294 1163 * function anyway.
... ... @@ -1311,7 +1180,7 @@
1311 1180 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1312 1181 struct hrtimer_clock_base *base;
1313 1182 ktime_t expires_next, now;
1314   - int i, raise = 0;
  1183 + int i;
1315 1184  
1316 1185 BUG_ON(!cpu_base->hres_active);
1317 1186 cpu_base->nr_events++;
... ... @@ -1360,16 +1229,6 @@
1360 1229 break;
1361 1230 }
1362 1231  
1363   - /* Move softirq callbacks to the pending list */
1364   - if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
1365   - __remove_hrtimer(timer, base,
1366   - HRTIMER_STATE_PENDING, 0);
1367   - list_add_tail(&timer->cb_entry,
1368   - &base->cpu_base->cb_pending);
1369   - raise = 1;
1370   - continue;
1371   - }
1372   -
1373 1232 __run_hrtimer(timer);
1374 1233 }
1375 1234 spin_unlock(&cpu_base->lock);
... ... @@ -1383,10 +1242,6 @@
1383 1242 if (tick_program_event(expires_next, 0))
1384 1243 goto retry;
1385 1244 }
1386   -
1387   - /* Raise softirq ? */
1388   - if (raise)
1389   - raise_softirq(HRTIMER_SOFTIRQ);
1390 1245 }
1391 1246  
1392 1247 /**
... ... @@ -1413,11 +1268,6 @@
1413 1268 local_irq_restore(flags);
1414 1269 }
1415 1270  
1416   -static void run_hrtimer_softirq(struct softirq_action *h)
1417   -{
1418   - run_hrtimer_pending(&__get_cpu_var(hrtimer_bases));
1419   -}
1420   -
1421 1271 #endif /* CONFIG_HIGH_RES_TIMERS */
1422 1272  
1423 1273 /*
... ... @@ -1429,8 +1279,6 @@
1429 1279 */
1430 1280 void hrtimer_run_pending(void)
1431 1281 {
1432   - struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1433   -
1434 1282 if (hrtimer_hres_active())
1435 1283 return;
1436 1284  
... ... @@ -1444,8 +1292,6 @@
1444 1292 */
1445 1293 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
1446 1294 hrtimer_switch_to_hres();
1447   -
1448   - run_hrtimer_pending(cpu_base);
1449 1295 }
1450 1296  
1451 1297 /*
... ... @@ -1482,14 +1328,6 @@
1482 1328 hrtimer_get_expires_tv64(timer))
1483 1329 break;
1484 1330  
1485   - if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
1486   - __remove_hrtimer(timer, base,
1487   - HRTIMER_STATE_PENDING, 0);
1488   - list_add_tail(&timer->cb_entry,
1489   - &base->cpu_base->cb_pending);
1490   - continue;
1491   - }
1492   -
1493 1331 __run_hrtimer(timer);
1494 1332 }
1495 1333 spin_unlock(&cpu_base->lock);
... ... @@ -1516,9 +1354,6 @@
1516 1354 {
1517 1355 sl->timer.function = hrtimer_wakeup;
1518 1356 sl->task = task;
1519   -#ifdef CONFIG_HIGH_RES_TIMERS
1520   - sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
1521   -#endif
1522 1357 }
1523 1358  
1524 1359 static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
1525 1360  
1526 1361  
... ... @@ -1655,18 +1490,16 @@
1655 1490 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
1656 1491 cpu_base->clock_base[i].cpu_base = cpu_base;
1657 1492  
1658   - INIT_LIST_HEAD(&cpu_base->cb_pending);
1659 1493 hrtimer_init_hres(cpu_base);
1660 1494 }
1661 1495  
1662 1496 #ifdef CONFIG_HOTPLUG_CPU
1663 1497  
1664   -static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
1665   - struct hrtimer_clock_base *new_base, int dcpu)
  1498 +static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
  1499 + struct hrtimer_clock_base *new_base)
1666 1500 {
1667 1501 struct hrtimer *timer;
1668 1502 struct rb_node *node;
1669   - int raise = 0;
1670 1503  
1671 1504 while ((node = rb_first(&old_base->active))) {
1672 1505 timer = rb_entry(node, struct hrtimer, node);
... ... @@ -1674,18 +1507,6 @@
1674 1507 debug_hrtimer_deactivate(timer);
1675 1508  
1676 1509 /*
1677   - * Should not happen. Per CPU timers should be
1678   - * canceled _before_ the migration code is called
1679   - */
1680   - if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU) {
1681   - __remove_hrtimer(timer, old_base,
1682   - HRTIMER_STATE_INACTIVE, 0);
1683   - WARN(1, "hrtimer (%p %p)active but cpu %d dead\n",
1684   - timer, timer->function, dcpu);
1685   - continue;
1686   - }
1687   -
1688   - /*
1689 1510 * Mark it as STATE_MIGRATE not INACTIVE otherwise the
1690 1511 * timer could be seen as !active and just vanish away
1691 1512 * under us on another CPU
1692 1513  
1693 1514  
1694 1515  
1695 1516  
1696 1517  
1697 1518  
1698 1519  
1699 1520  
... ... @@ -1693,69 +1514,34 @@
1693 1514 __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
1694 1515 timer->base = new_base;
1695 1516 /*
1696   - * Enqueue the timer. Allow reprogramming of the event device
  1517 + * Enqueue the timers on the new cpu, but do not reprogram
  1518 + * the timer as that would enable a deadlock between
  1519 + * hrtimer_enqueue_reprogramm() running the timer and us still
  1520 + * holding a nested base lock.
  1521 + *
  1522 + * Instead we tickle the hrtimer interrupt after the migration
  1523 + * is done, which will run all expired timers and re-programm
  1524 + * the timer device.
1697 1525 */
1698   - enqueue_hrtimer(timer, new_base, 1);
  1526 + enqueue_hrtimer(timer, new_base, 0);
1699 1527  
1700   -#ifdef CONFIG_HIGH_RES_TIMERS
1701   - /*
1702   - * Happens with high res enabled when the timer was
1703   - * already expired and the callback mode is
1704   - * HRTIMER_CB_IRQSAFE_UNLOCKED (hrtimer_sleeper). The
1705   - * enqueue code does not move them to the soft irq
1706   - * pending list for performance/latency reasons, but
1707   - * in the migration state, we need to do that
1708   - * otherwise we end up with a stale timer.
1709   - */
1710   - if (timer->state == HRTIMER_STATE_MIGRATE) {
1711   - timer->state = HRTIMER_STATE_PENDING;
1712   - list_add_tail(&timer->cb_entry,
1713   - &new_base->cpu_base->cb_pending);
1714   - raise = 1;
1715   - }
1716   -#endif
1717 1528 /* Clear the migration state bit */
1718 1529 timer->state &= ~HRTIMER_STATE_MIGRATE;
1719 1530 }
1720   - return raise;
1721 1531 }
1722 1532  
1723   -#ifdef CONFIG_HIGH_RES_TIMERS
1724   -static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
1725   - struct hrtimer_cpu_base *new_base)
  1533 +static int migrate_hrtimers(int scpu)
1726 1534 {
1727   - struct hrtimer *timer;
1728   - int raise = 0;
1729   -
1730   - while (!list_empty(&old_base->cb_pending)) {
1731   - timer = list_entry(old_base->cb_pending.next,
1732   - struct hrtimer, cb_entry);
1733   -
1734   - __remove_hrtimer(timer, timer->base, HRTIMER_STATE_PENDING, 0);
1735   - timer->base = &new_base->clock_base[timer->base->index];
1736   - list_add_tail(&timer->cb_entry, &new_base->cb_pending);
1737   - raise = 1;
1738   - }
1739   - return raise;
1740   -}
1741   -#else
1742   -static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
1743   - struct hrtimer_cpu_base *new_base)
1744   -{
1745   - return 0;
1746   -}
1747   -#endif
1748   -
1749   -static void migrate_hrtimers(int cpu)
1750   -{
1751 1535 struct hrtimer_cpu_base *old_base, *new_base;
1752   - int i, raise = 0;
  1536 + int dcpu, i;
1753 1537  
1754   - BUG_ON(cpu_online(cpu));
1755   - old_base = &per_cpu(hrtimer_bases, cpu);
  1538 + BUG_ON(cpu_online(scpu));
  1539 + old_base = &per_cpu(hrtimer_bases, scpu);
1756 1540 new_base = &get_cpu_var(hrtimer_bases);
1757 1541  
1758   - tick_cancel_sched_timer(cpu);
  1542 + dcpu = smp_processor_id();
  1543 +
  1544 + tick_cancel_sched_timer(scpu);
1759 1545 /*
1760 1546 * The caller is globally serialized and nobody else
1761 1547 * takes two locks at once, deadlock is not possible.
1762 1548  
1763 1549  
1764 1550  
1765 1551  
1766 1552  
1767 1553  
1768 1554  
... ... @@ -1764,41 +1550,47 @@
1764 1550 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1765 1551  
1766 1552 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1767   - if (migrate_hrtimer_list(&old_base->clock_base[i],
1768   - &new_base->clock_base[i], cpu))
1769   - raise = 1;
  1553 + migrate_hrtimer_list(&old_base->clock_base[i],
  1554 + &new_base->clock_base[i]);
1770 1555 }
1771 1556  
1772   - if (migrate_hrtimer_pending(old_base, new_base))
1773   - raise = 1;
1774   -
1775 1557 spin_unlock(&old_base->lock);
1776 1558 spin_unlock_irq(&new_base->lock);
1777 1559 put_cpu_var(hrtimer_bases);
1778 1560  
1779   - if (raise)
1780   - hrtimer_raise_softirq();
  1561 + return dcpu;
1781 1562 }
  1563 +
  1564 +static void tickle_timers(void *arg)
  1565 +{
  1566 + hrtimer_peek_ahead_timers();
  1567 +}
  1568 +
1782 1569 #endif /* CONFIG_HOTPLUG_CPU */
1783 1570  
1784 1571 static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
1785 1572 unsigned long action, void *hcpu)
1786 1573 {
1787   - unsigned int cpu = (long)hcpu;
  1574 + int scpu = (long)hcpu;
1788 1575  
1789 1576 switch (action) {
1790 1577  
1791 1578 case CPU_UP_PREPARE:
1792 1579 case CPU_UP_PREPARE_FROZEN:
1793   - init_hrtimers_cpu(cpu);
  1580 + init_hrtimers_cpu(scpu);
1794 1581 break;
1795 1582  
1796 1583 #ifdef CONFIG_HOTPLUG_CPU
1797 1584 case CPU_DEAD:
1798 1585 case CPU_DEAD_FROZEN:
1799   - clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu);
1800   - migrate_hrtimers(cpu);
  1586 + {
  1587 + int dcpu;
  1588 +
  1589 + clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
  1590 + dcpu = migrate_hrtimers(scpu);
  1591 + smp_call_function_single(dcpu, tickle_timers, NULL, 0);
1801 1592 break;
  1593 + }
1802 1594 #endif
1803 1595  
1804 1596 default:
... ... @@ -1817,9 +1609,6 @@
1817 1609 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
1818 1610 (void *)(long)smp_processor_id());
1819 1611 register_cpu_notifier(&hrtimers_nb);
1820   -#ifdef CONFIG_HIGH_RES_TIMERS
1821   - open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
1822   -#endif
1823 1612 }
1824 1613  
1825 1614 /**
kernel/posix-timers.c
... ... @@ -116,7 +116,7 @@
116 116 * must supply functions here, even if the function just returns
117 117 * ENOSYS. The standard POSIX timer management code assumes the
118 118 * following: 1.) The k_itimer struct (sched.h) is used for the
119   - * timer. 2.) The list, it_lock, it_clock, it_id and it_process
  119 + * timer. 2.) The list, it_lock, it_clock, it_id and it_pid
120 120 * fields are not modified by timer code.
121 121 *
122 122 * At this time all functions EXCEPT clock_nanosleep can be
... ... @@ -319,7 +319,8 @@
319 319  
320 320 int posix_timer_event(struct k_itimer *timr, int si_private)
321 321 {
322   - int shared, ret;
  322 + struct task_struct *task;
  323 + int shared, ret = -1;
323 324 /*
324 325 * FIXME: if ->sigq is queued we can race with
325 326 * dequeue_signal()->do_schedule_next_timer().
... ... @@ -333,8 +334,13 @@
333 334 */
334 335 timr->sigq->info.si_sys_private = si_private;
335 336  
336   - shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
337   - ret = send_sigqueue(timr->sigq, timr->it_process, shared);
  337 + rcu_read_lock();
  338 + task = pid_task(timr->it_pid, PIDTYPE_PID);
  339 + if (task) {
  340 + shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
  341 + ret = send_sigqueue(timr->sigq, task, shared);
  342 + }
  343 + rcu_read_unlock();
338 344 /* If we failed to send the signal the timer stops. */
339 345 return ret > 0;
340 346 }
... ... @@ -411,7 +417,7 @@
411 417 return ret;
412 418 }
413 419  
414   -static struct task_struct * good_sigevent(sigevent_t * event)
  420 +static struct pid *good_sigevent(sigevent_t * event)
415 421 {
416 422 struct task_struct *rtn = current->group_leader;
417 423  
... ... @@ -425,7 +431,7 @@
425 431 ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
426 432 return NULL;
427 433  
428   - return rtn;
  434 + return task_pid(rtn);
429 435 }
430 436  
431 437 void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
... ... @@ -464,6 +470,7 @@
464 470 idr_remove(&posix_timers_id, tmr->it_id);
465 471 spin_unlock_irqrestore(&idr_lock, flags);
466 472 }
  473 + put_pid(tmr->it_pid);
467 474 sigqueue_free(tmr->sigq);
468 475 kmem_cache_free(posix_timers_cache, tmr);
469 476 }
... ... @@ -477,7 +484,6 @@
477 484 {
478 485 struct k_itimer *new_timer;
479 486 int error, new_timer_id;
480   - struct task_struct *process;
481 487 sigevent_t event;
482 488 int it_id_set = IT_ID_NOT_SET;
483 489  
484 490  
... ... @@ -531,11 +537,9 @@
531 537 goto out;
532 538 }
533 539 rcu_read_lock();
534   - process = good_sigevent(&event);
535   - if (process)
536   - get_task_struct(process);
  540 + new_timer->it_pid = get_pid(good_sigevent(&event));
537 541 rcu_read_unlock();
538   - if (!process) {
  542 + if (!new_timer->it_pid) {
539 543 error = -EINVAL;
540 544 goto out;
541 545 }
... ... @@ -543,8 +547,7 @@
543 547 event.sigev_notify = SIGEV_SIGNAL;
544 548 event.sigev_signo = SIGALRM;
545 549 event.sigev_value.sival_int = new_timer->it_id;
546   - process = current->group_leader;
547   - get_task_struct(process);
  550 + new_timer->it_pid = get_pid(task_tgid(current));
548 551 }
549 552  
550 553 new_timer->it_sigev_notify = event.sigev_notify;
... ... @@ -554,7 +557,7 @@
554 557 new_timer->sigq->info.si_code = SI_TIMER;
555 558  
556 559 spin_lock_irq(&current->sighand->siglock);
557   - new_timer->it_process = process;
  560 + new_timer->it_signal = current->signal;
558 561 list_add(&new_timer->list, &current->signal->posix_timers);
559 562 spin_unlock_irq(&current->sighand->siglock);
560 563  
... ... @@ -589,8 +592,7 @@
589 592 timr = idr_find(&posix_timers_id, (int)timer_id);
590 593 if (timr) {
591 594 spin_lock(&timr->it_lock);
592   - if (timr->it_process &&
593   - same_thread_group(timr->it_process, current)) {
  595 + if (timr->it_signal == current->signal) {
594 596 spin_unlock(&idr_lock);
595 597 return timr;
596 598 }
... ... @@ -837,8 +839,7 @@
837 839 * This keeps any tasks waiting on the spin lock from thinking
838 840 * they got something (see the lock code above).
839 841 */
840   - put_task_struct(timer->it_process);
841   - timer->it_process = NULL;
  842 + timer->it_signal = NULL;
842 843  
843 844 unlock_timer(timer, flags);
844 845 release_posix_timer(timer, IT_ID_SET);
... ... @@ -864,8 +865,7 @@
864 865 * This keeps any tasks waiting on the spin lock from thinking
865 866 * they got something (see the lock code above).
866 867 */
867   - put_task_struct(timer->it_process);
868   - timer->it_process = NULL;
  868 + timer->it_signal = NULL;
869 869  
870 870 unlock_timer(timer, flags);
871 871 release_posix_timer(timer, IT_ID_SET);
... ... @@ -209,7 +209,6 @@
209 209 hrtimer_init(&rt_b->rt_period_timer,
210 210 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
211 211 rt_b->rt_period_timer.function = sched_rt_period_timer;
212   - rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
213 212 }
214 213  
215 214 static inline int rt_bandwidth_enabled(void)
... ... @@ -1139,7 +1138,6 @@
1139 1138  
1140 1139 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1141 1140 rq->hrtick_timer.function = hrtick;
1142   - rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
1143 1141 }
1144 1142 #else /* CONFIG_SCHED_HRTICK */
1145 1143 static inline void hrtick_clear(struct rq *rq)
... ... @@ -131,7 +131,7 @@
131 131 {
132 132 enum hrtimer_restart res = HRTIMER_NORESTART;
133 133  
134   - write_seqlock_irq(&xtime_lock);
  134 + write_seqlock(&xtime_lock);
135 135  
136 136 switch (time_state) {
137 137 case TIME_OK:
... ... @@ -164,7 +164,7 @@
164 164 }
165 165 update_vsyscall(&xtime, clock);
166 166  
167   - write_sequnlock_irq(&xtime_lock);
  167 + write_sequnlock(&xtime_lock);
168 168  
169 169 return res;
170 170 }
kernel/time/tick-sched.c
... ... @@ -247,7 +247,7 @@
247 247 if (need_resched())
248 248 goto end;
249 249  
250   - if (unlikely(local_softirq_pending())) {
  250 + if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
251 251 static int ratelimit;
252 252  
253 253 if (ratelimit < 10) {
254 254  
... ... @@ -282,8 +282,31 @@
282 282 /* Schedule the tick, if we are at least one jiffie off */
283 283 if ((long)delta_jiffies >= 1) {
284 284  
  285 + /*
  286 + * calculate the expiry time for the next timer wheel
  287 + * timer
  288 + */
  289 + expires = ktime_add_ns(last_update, tick_period.tv64 *
  290 + delta_jiffies);
  291 +
  292 + /*
  293 + * If this cpu is the one which updates jiffies, then
  294 + * give up the assignment and let it be taken by the
  295 + * cpu which runs the tick timer next, which might be
  296 + * this cpu as well. If we don't drop this here the
  297 + * jiffies might be stale and do_timer() never
  298 + * invoked.
  299 + */
  300 + if (cpu == tick_do_timer_cpu)
  301 + tick_do_timer_cpu = TICK_DO_TIMER_NONE;
  302 +
285 303 if (delta_jiffies > 1)
286 304 cpu_set(cpu, nohz_cpu_mask);
  305 +
  306 + /* Skip reprogram of event if its not changed */
  307 + if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
  308 + goto out;
  309 +
287 310 /*
288 311 * nohz_stop_sched_tick can be called several times before
289 312 * the nohz_restart_sched_tick is called. This happens when
... ... @@ -306,17 +329,6 @@
306 329 rcu_enter_nohz();
307 330 }
308 331  
309   - /*
310   - * If this cpu is the one which updates jiffies, then
311   - * give up the assignment and let it be taken by the
312   - * cpu which runs the tick timer next, which might be
313   - * this cpu as well. If we don't drop this here the
314   - * jiffies might be stale and do_timer() never
315   - * invoked.
316   - */
317   - if (cpu == tick_do_timer_cpu)
318   - tick_do_timer_cpu = TICK_DO_TIMER_NONE;
319   -
320 332 ts->idle_sleeps++;
321 333  
322 334 /*
... ... @@ -332,12 +344,7 @@
332 344 goto out;
333 345 }
334 346  
335   - /*
336   - * calculate the expiry time for the next timer wheel
337   - * timer
338   - */
339   - expires = ktime_add_ns(last_update, tick_period.tv64 *
340   - delta_jiffies);
  347 + /* Mark expiries */
341 348 ts->idle_expires = expires;
342 349  
343 350 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
... ... @@ -681,7 +688,6 @@
681 688 */
682 689 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
683 690 ts->sched_timer.function = tick_sched_timer;
684   - ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
685 691  
686 692 /* Get the next period (per cpu) */
687 693 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
kernel/trace/trace_sysprof.c
... ... @@ -202,7 +202,6 @@
202 202  
203 203 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
204 204 hrtimer->function = stack_trace_timer_fn;
205   - hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
206 205  
207 206 hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL);
208 207 }
sound/core/hrtimer.c
... ... @@ -57,7 +57,6 @@
57 57 return -ENOMEM;
58 58 hrtimer_init(&stime->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
59 59 stime->timer = t;
60   - stime->hrt.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
61 60 stime->hrt.function = snd_hrtimer_callback;
62 61 t->private_data = stime;
63 62 return 0;
sound/drivers/pcsp/pcsp.c
... ... @@ -96,7 +96,6 @@
96 96 return -EINVAL;
97 97  
98 98 hrtimer_init(&pcsp_chip.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
99   - pcsp_chip.timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
100 99 pcsp_chip.timer.function = pcsp_do_timer;
101 100  
102 101 card = snd_card_new(index, id, THIS_MODULE, 0);