Commit ca109491f612aab5c8152207631c0444f63da97f

Authored by Peter Zijlstra
Committed by Ingo Molnar
1 parent ed313489ba

hrtimer: removing all ur callback modes

Impact: cleanup, move all hrtimer processing into hardirq context

This is an attempt at removing some of the hrtimer complexity by
reducing the number of callback modes to 1.

This means that all hrtimer callback functions will be ran from HARD-irq
context.

I went through all the 30 odd hrtimer callback functions in the kernel
and saw only one that I'm not quite sure of, which is the one in
net/can/bcm.c - hence I'm CC-ing the folks responsible for that code.

Furthermore, the hrtimer core now calls callbacks directly with IRQs
disabled in case you try to enqueue an expired timer. If this timer is a
periodic timer (which should use hrtimer_forward() to advance its time)
then it might be possible to end up in an inf. recursive loop due to the
fact that hrtimer_forward() doesn't round up to the next timer
granularity, and therefore keeps on calling the callback - obviously
this needs a fix.

Aside from that, this seems to compile and actually boot on my dual core
test box - although I'm sure there are some bugs in, me not hitting any
makes me certain :-)

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

Showing 9 changed files with 37 additions and 293 deletions Side-by-side Diff

drivers/input/touchscreen/ads7846.c
... ... @@ -697,7 +697,7 @@
697 697 struct ads7846 *ts = container_of(handle, struct ads7846, timer);
698 698 int status = 0;
699 699  
700   - spin_lock_irq(&ts->lock);
  700 + spin_lock(&ts->lock);
701 701  
702 702 if (unlikely(!get_pendown_state(ts) ||
703 703 device_suspended(&ts->spi->dev))) {
... ... @@ -728,7 +728,7 @@
728 728 dev_err(&ts->spi->dev, "spi_async --> %d\n", status);
729 729 }
730 730  
731   - spin_unlock_irq(&ts->lock);
  731 + spin_unlock(&ts->lock);
732 732 return HRTIMER_NORESTART;
733 733 }
734 734  
include/linux/hrtimer.h
... ... @@ -43,26 +43,6 @@
43 43 };
44 44  
45 45 /*
46   - * hrtimer callback modes:
47   - *
48   - * HRTIMER_CB_SOFTIRQ: Callback must run in softirq context
49   - * HRTIMER_CB_IRQSAFE_PERCPU: Callback must run in hardirq context
50   - * Special mode for tick emulation and
51   - * scheduler timer. Such timers are per
52   - * cpu and not allowed to be migrated on
53   - * cpu unplug.
54   - * HRTIMER_CB_IRQSAFE_UNLOCKED: Callback should run in hardirq context
55   - * with timer->base lock unlocked
56   - * used for timers which call wakeup to
57   - * avoid lock order problems with rq->lock
58   - */
59   -enum hrtimer_cb_mode {
60   - HRTIMER_CB_SOFTIRQ,
61   - HRTIMER_CB_IRQSAFE_PERCPU,
62   - HRTIMER_CB_IRQSAFE_UNLOCKED,
63   -};
64   -
65   -/*
66 46 * Values to track state of the timer
67 47 *
68 48 * Possible states:
... ... @@ -70,7 +50,6 @@
70 50 * 0x00 inactive
71 51 * 0x01 enqueued into rbtree
72 52 * 0x02 callback function running
73   - * 0x04 callback pending (high resolution mode)
74 53 *
75 54 * Special cases:
76 55 * 0x03 callback function running and enqueued
... ... @@ -92,8 +71,7 @@
92 71 #define HRTIMER_STATE_INACTIVE 0x00
93 72 #define HRTIMER_STATE_ENQUEUED 0x01
94 73 #define HRTIMER_STATE_CALLBACK 0x02
95   -#define HRTIMER_STATE_PENDING 0x04
96   -#define HRTIMER_STATE_MIGRATE 0x08
  74 +#define HRTIMER_STATE_MIGRATE 0x04
97 75  
98 76 /**
99 77 * struct hrtimer - the basic hrtimer structure
... ... @@ -109,8 +87,6 @@
109 87 * @function: timer expiry callback function
110 88 * @base: pointer to the timer base (per cpu and per clock)
111 89 * @state: state information (See bit values above)
112   - * @cb_mode: high resolution timer feature to select the callback execution
113   - * mode
114 90 * @cb_entry: list head to enqueue an expired timer into the callback list
115 91 * @start_site: timer statistics field to store the site where the timer
116 92 * was started
... ... @@ -129,7 +105,6 @@
129 105 struct hrtimer_clock_base *base;
130 106 unsigned long state;
131 107 struct list_head cb_entry;
132   - enum hrtimer_cb_mode cb_mode;
133 108 #ifdef CONFIG_TIMER_STATS
134 109 int start_pid;
135 110 void *start_site;
136 111  
... ... @@ -188,15 +163,11 @@
188 163 * @check_clocks: Indictator, when set evaluate time source and clock
189 164 * event devices whether high resolution mode can be
190 165 * activated.
191   - * @cb_pending: Expired timers are moved from the rbtree to this
192   - * list in the timer interrupt. The list is processed
193   - * in the softirq.
194 166 * @nr_events: Total number of timer interrupt events
195 167 */
196 168 struct hrtimer_cpu_base {
197 169 spinlock_t lock;
198 170 struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
199   - struct list_head cb_pending;
200 171 #ifdef CONFIG_HIGH_RES_TIMERS
201 172 ktime_t expires_next;
202 173 int hres_active;
... ... @@ -404,8 +375,7 @@
404 375 */
405 376 static inline int hrtimer_is_queued(struct hrtimer *timer)
406 377 {
407   - return timer->state &
408   - (HRTIMER_STATE_ENQUEUED | HRTIMER_STATE_PENDING);
  378 + return timer->state & HRTIMER_STATE_ENQUEUED;
409 379 }
410 380  
411 381 /*
include/linux/interrupt.h
... ... @@ -251,9 +251,6 @@
251 251 BLOCK_SOFTIRQ,
252 252 TASKLET_SOFTIRQ,
253 253 SCHED_SOFTIRQ,
254   -#ifdef CONFIG_HIGH_RES_TIMERS
255   - HRTIMER_SOFTIRQ,
256   -#endif
257 254 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
258 255  
259 256 NR_SOFTIRQS
... ... @@ -442,22 +442,6 @@
442 442 static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
443 443 #endif
444 444  
445   -/*
446   - * Check, whether the timer is on the callback pending list
447   - */
448   -static inline int hrtimer_cb_pending(const struct hrtimer *timer)
449   -{
450   - return timer->state & HRTIMER_STATE_PENDING;
451   -}
452   -
453   -/*
454   - * Remove a timer from the callback pending list
455   - */
456   -static inline void hrtimer_remove_cb_pending(struct hrtimer *timer)
457   -{
458   - list_del_init(&timer->cb_entry);
459   -}
460   -
461 445 /* High resolution timer related functions */
462 446 #ifdef CONFIG_HIGH_RES_TIMERS
463 447  
... ... @@ -651,6 +635,8 @@
651 635 {
652 636 }
653 637  
  638 +static void __run_hrtimer(struct hrtimer *timer);
  639 +
654 640 /*
655 641 * When High resolution timers are active, try to reprogram. Note, that in case
656 642 * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
... ... @@ -661,31 +647,14 @@
661 647 struct hrtimer_clock_base *base)
662 648 {
663 649 if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
664   -
665   - /* Timer is expired, act upon the callback mode */
666   - switch(timer->cb_mode) {
667   - case HRTIMER_CB_IRQSAFE_PERCPU:
668   - case HRTIMER_CB_IRQSAFE_UNLOCKED:
669   - /*
670   - * This is solely for the sched tick emulation with
671   - * dynamic tick support to ensure that we do not
672   - * restart the tick right on the edge and end up with
673   - * the tick timer in the softirq ! The calling site
674   - * takes care of this. Also used for hrtimer sleeper !
675   - */
676   - debug_hrtimer_deactivate(timer);
677   - return 1;
678   - case HRTIMER_CB_SOFTIRQ:
679   - /*
680   - * Move everything else into the softirq pending list !
681   - */
682   - list_add_tail(&timer->cb_entry,
683   - &base->cpu_base->cb_pending);
684   - timer->state = HRTIMER_STATE_PENDING;
685   - return 1;
686   - default:
687   - BUG();
688   - }
  650 + /*
  651 + * XXX: recursion check?
  652 + * hrtimer_forward() should round up with timer granularity
  653 + * so that we never get into inf recursion here,
  654 + * it doesn't do that though
  655 + */
  656 + __run_hrtimer(timer);
  657 + return 1;
689 658 }
690 659 return 0;
691 660 }
... ... @@ -724,11 +693,6 @@
724 693 return 1;
725 694 }
726 695  
727   -static inline void hrtimer_raise_softirq(void)
728   -{
729   - raise_softirq(HRTIMER_SOFTIRQ);
730   -}
731   -
732 696 #else
733 697  
734 698 static inline int hrtimer_hres_active(void) { return 0; }
... ... @@ -747,7 +711,6 @@
747 711 {
748 712 return 0;
749 713 }
750   -static inline void hrtimer_raise_softirq(void) { }
751 714  
752 715 #endif /* CONFIG_HIGH_RES_TIMERS */
753 716  
... ... @@ -890,10 +853,7 @@
890 853 struct hrtimer_clock_base *base,
891 854 unsigned long newstate, int reprogram)
892 855 {
893   - /* High res. callback list. NOP for !HIGHRES */
894   - if (hrtimer_cb_pending(timer))
895   - hrtimer_remove_cb_pending(timer);
896   - else {
  856 + if (timer->state & HRTIMER_STATE_ENQUEUED) {
897 857 /*
898 858 * Remove the timer from the rbtree and replace the
899 859 * first entry pointer if necessary.
... ... @@ -953,7 +913,7 @@
953 913 {
954 914 struct hrtimer_clock_base *base, *new_base;
955 915 unsigned long flags;
956   - int ret, raise;
  916 + int ret;
957 917  
958 918 base = lock_hrtimer_base(timer, &flags);
959 919  
960 920  
... ... @@ -988,26 +948,8 @@
988 948 enqueue_hrtimer(timer, new_base,
989 949 new_base->cpu_base == &__get_cpu_var(hrtimer_bases));
990 950  
991   - /*
992   - * The timer may be expired and moved to the cb_pending
993   - * list. We can not raise the softirq with base lock held due
994   - * to a possible deadlock with runqueue lock.
995   - */
996   - raise = timer->state == HRTIMER_STATE_PENDING;
997   -
998   - /*
999   - * We use preempt_disable to prevent this task from migrating after
1000   - * setting up the softirq and raising it. Otherwise, if me migrate
1001   - * we will raise the softirq on the wrong CPU.
1002   - */
1003   - preempt_disable();
1004   -
1005 951 unlock_hrtimer_base(timer, &flags);
1006 952  
1007   - if (raise)
1008   - hrtimer_raise_softirq();
1009   - preempt_enable();
1010   -
1011 953 return ret;
1012 954 }
1013 955 EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
... ... @@ -1192,75 +1134,6 @@
1192 1134 }
1193 1135 EXPORT_SYMBOL_GPL(hrtimer_get_res);
1194 1136  
1195   -static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base)
1196   -{
1197   - spin_lock_irq(&cpu_base->lock);
1198   -
1199   - while (!list_empty(&cpu_base->cb_pending)) {
1200   - enum hrtimer_restart (*fn)(struct hrtimer *);
1201   - struct hrtimer *timer;
1202   - int restart;
1203   - int emulate_hardirq_ctx = 0;
1204   -
1205   - timer = list_entry(cpu_base->cb_pending.next,
1206   - struct hrtimer, cb_entry);
1207   -
1208   - debug_hrtimer_deactivate(timer);
1209   - timer_stats_account_hrtimer(timer);
1210   -
1211   - fn = timer->function;
1212   - /*
1213   - * A timer might have been added to the cb_pending list
1214   - * when it was migrated during a cpu-offline operation.
1215   - * Emulate hardirq context for such timers.
1216   - */
1217   - if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU ||
1218   - timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED)
1219   - emulate_hardirq_ctx = 1;
1220   -
1221   - __remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0);
1222   - spin_unlock_irq(&cpu_base->lock);
1223   -
1224   - if (unlikely(emulate_hardirq_ctx)) {
1225   - local_irq_disable();
1226   - restart = fn(timer);
1227   - local_irq_enable();
1228   - } else
1229   - restart = fn(timer);
1230   -
1231   - spin_lock_irq(&cpu_base->lock);
1232   -
1233   - timer->state &= ~HRTIMER_STATE_CALLBACK;
1234   - if (restart == HRTIMER_RESTART) {
1235   - BUG_ON(hrtimer_active(timer));
1236   - /*
1237   - * Enqueue the timer, allow reprogramming of the event
1238   - * device
1239   - */
1240   - enqueue_hrtimer(timer, timer->base, 1);
1241   - } else if (hrtimer_active(timer)) {
1242   - /*
1243   - * If the timer was rearmed on another CPU, reprogram
1244   - * the event device.
1245   - */
1246   - struct hrtimer_clock_base *base = timer->base;
1247   -
1248   - if (base->first == &timer->node &&
1249   - hrtimer_reprogram(timer, base)) {
1250   - /*
1251   - * Timer is expired. Thus move it from tree to
1252   - * pending list again.
1253   - */
1254   - __remove_hrtimer(timer, base,
1255   - HRTIMER_STATE_PENDING, 0);
1256   - list_add_tail(&timer->cb_entry,
1257   - &base->cpu_base->cb_pending);
1258   - }
1259   - }
1260   - }
1261   - spin_unlock_irq(&cpu_base->lock);
1262   -}
1263   -
1264 1137 static void __run_hrtimer(struct hrtimer *timer)
1265 1138 {
1266 1139 struct hrtimer_clock_base *base = timer->base;
1267 1140  
1268 1141  
1269 1142  
... ... @@ -1268,27 +1141,23 @@
1268 1141 enum hrtimer_restart (*fn)(struct hrtimer *);
1269 1142 int restart;
1270 1143  
  1144 + WARN_ON(!irqs_disabled());
  1145 +
1271 1146 debug_hrtimer_deactivate(timer);
1272 1147 __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
1273 1148 timer_stats_account_hrtimer(timer);
1274   -
1275 1149 fn = timer->function;
1276   - if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU ||
1277   - timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED) {
1278   - /*
1279   - * Used for scheduler timers, avoid lock inversion with
1280   - * rq->lock and tasklist_lock.
1281   - *
1282   - * These timers are required to deal with enqueue expiry
1283   - * themselves and are not allowed to migrate.
1284   - */
1285   - spin_unlock(&cpu_base->lock);
1286   - restart = fn(timer);
1287   - spin_lock(&cpu_base->lock);
1288   - } else
1289   - restart = fn(timer);
1290 1150  
1291 1151 /*
  1152 + * Because we run timers from hardirq context, there is no chance
  1153 + * they get migrated to another cpu, therefore its safe to unlock
  1154 + * the timer base.
  1155 + */
  1156 + spin_unlock(&cpu_base->lock);
  1157 + restart = fn(timer);
  1158 + spin_lock(&cpu_base->lock);
  1159 +
  1160 + /*
1292 1161 * Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid
1293 1162 * reprogramming of the event hardware. This happens at the end of this
1294 1163 * function anyway.
... ... @@ -1311,7 +1180,7 @@
1311 1180 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1312 1181 struct hrtimer_clock_base *base;
1313 1182 ktime_t expires_next, now;
1314   - int i, raise = 0;
  1183 + int i;
1315 1184  
1316 1185 BUG_ON(!cpu_base->hres_active);
1317 1186 cpu_base->nr_events++;
... ... @@ -1360,16 +1229,6 @@
1360 1229 break;
1361 1230 }
1362 1231  
1363   - /* Move softirq callbacks to the pending list */
1364   - if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
1365   - __remove_hrtimer(timer, base,
1366   - HRTIMER_STATE_PENDING, 0);
1367   - list_add_tail(&timer->cb_entry,
1368   - &base->cpu_base->cb_pending);
1369   - raise = 1;
1370   - continue;
1371   - }
1372   -
1373 1232 __run_hrtimer(timer);
1374 1233 }
1375 1234 spin_unlock(&cpu_base->lock);
... ... @@ -1383,10 +1242,6 @@
1383 1242 if (tick_program_event(expires_next, 0))
1384 1243 goto retry;
1385 1244 }
1386   -
1387   - /* Raise softirq ? */
1388   - if (raise)
1389   - raise_softirq(HRTIMER_SOFTIRQ);
1390 1245 }
1391 1246  
1392 1247 /**
... ... @@ -1413,11 +1268,6 @@
1413 1268 local_irq_restore(flags);
1414 1269 }
1415 1270  
1416   -static void run_hrtimer_softirq(struct softirq_action *h)
1417   -{
1418   - run_hrtimer_pending(&__get_cpu_var(hrtimer_bases));
1419   -}
1420   -
1421 1271 #endif /* CONFIG_HIGH_RES_TIMERS */
1422 1272  
1423 1273 /*
... ... @@ -1429,8 +1279,6 @@
1429 1279 */
1430 1280 void hrtimer_run_pending(void)
1431 1281 {
1432   - struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1433   -
1434 1282 if (hrtimer_hres_active())
1435 1283 return;
1436 1284  
... ... @@ -1444,8 +1292,6 @@
1444 1292 */
1445 1293 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
1446 1294 hrtimer_switch_to_hres();
1447   -
1448   - run_hrtimer_pending(cpu_base);
1449 1295 }
1450 1296  
1451 1297 /*
... ... @@ -1482,14 +1328,6 @@
1482 1328 hrtimer_get_expires_tv64(timer))
1483 1329 break;
1484 1330  
1485   - if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
1486   - __remove_hrtimer(timer, base,
1487   - HRTIMER_STATE_PENDING, 0);
1488   - list_add_tail(&timer->cb_entry,
1489   - &base->cpu_base->cb_pending);
1490   - continue;
1491   - }
1492   -
1493 1331 __run_hrtimer(timer);
1494 1332 }
1495 1333 spin_unlock(&cpu_base->lock);
... ... @@ -1516,9 +1354,6 @@
1516 1354 {
1517 1355 sl->timer.function = hrtimer_wakeup;
1518 1356 sl->task = task;
1519   -#ifdef CONFIG_HIGH_RES_TIMERS
1520   - sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
1521   -#endif
1522 1357 }
1523 1358  
1524 1359 static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
1525 1360  
1526 1361  
... ... @@ -1655,18 +1490,16 @@
1655 1490 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
1656 1491 cpu_base->clock_base[i].cpu_base = cpu_base;
1657 1492  
1658   - INIT_LIST_HEAD(&cpu_base->cb_pending);
1659 1493 hrtimer_init_hres(cpu_base);
1660 1494 }
1661 1495  
1662 1496 #ifdef CONFIG_HOTPLUG_CPU
1663 1497  
1664   -static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
1665   - struct hrtimer_clock_base *new_base, int dcpu)
  1498 +static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
  1499 + struct hrtimer_clock_base *new_base, int dcpu)
1666 1500 {
1667 1501 struct hrtimer *timer;
1668 1502 struct rb_node *node;
1669   - int raise = 0;
1670 1503  
1671 1504 while ((node = rb_first(&old_base->active))) {
1672 1505 timer = rb_entry(node, struct hrtimer, node);
... ... @@ -1674,18 +1507,6 @@
1674 1507 debug_hrtimer_deactivate(timer);
1675 1508  
1676 1509 /*
1677   - * Should not happen. Per CPU timers should be
1678   - * canceled _before_ the migration code is called
1679   - */
1680   - if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU) {
1681   - __remove_hrtimer(timer, old_base,
1682   - HRTIMER_STATE_INACTIVE, 0);
1683   - WARN(1, "hrtimer (%p %p)active but cpu %d dead\n",
1684   - timer, timer->function, dcpu);
1685   - continue;
1686   - }
1687   -
1688   - /*
1689 1510 * Mark it as STATE_MIGRATE not INACTIVE otherwise the
1690 1511 * timer could be seen as !active and just vanish away
1691 1512 * under us on another CPU
1692 1513  
1693 1514  
1694 1515  
... ... @@ -1708,48 +1529,19 @@
1708 1529 * otherwise we end up with a stale timer.
1709 1530 */
1710 1531 if (timer->state == HRTIMER_STATE_MIGRATE) {
1711   - timer->state = HRTIMER_STATE_PENDING;
1712   - list_add_tail(&timer->cb_entry,
1713   - &new_base->cpu_base->cb_pending);
1714   - raise = 1;
  1532 + /* XXX: running on offline cpu */
  1533 + __run_hrtimer(timer);
1715 1534 }
1716 1535 #endif
1717 1536 /* Clear the migration state bit */
1718 1537 timer->state &= ~HRTIMER_STATE_MIGRATE;
1719 1538 }
1720   - return raise;
1721 1539 }
1722 1540  
1723   -#ifdef CONFIG_HIGH_RES_TIMERS
1724   -static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
1725   - struct hrtimer_cpu_base *new_base)
1726   -{
1727   - struct hrtimer *timer;
1728   - int raise = 0;
1729   -
1730   - while (!list_empty(&old_base->cb_pending)) {
1731   - timer = list_entry(old_base->cb_pending.next,
1732   - struct hrtimer, cb_entry);
1733   -
1734   - __remove_hrtimer(timer, timer->base, HRTIMER_STATE_PENDING, 0);
1735   - timer->base = &new_base->clock_base[timer->base->index];
1736   - list_add_tail(&timer->cb_entry, &new_base->cb_pending);
1737   - raise = 1;
1738   - }
1739   - return raise;
1740   -}
1741   -#else
1742   -static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
1743   - struct hrtimer_cpu_base *new_base)
1744   -{
1745   - return 0;
1746   -}
1747   -#endif
1748   -
1749 1541 static void migrate_hrtimers(int cpu)
1750 1542 {
1751 1543 struct hrtimer_cpu_base *old_base, *new_base;
1752   - int i, raise = 0;
  1544 + int i;
1753 1545  
1754 1546 BUG_ON(cpu_online(cpu));
1755 1547 old_base = &per_cpu(hrtimer_bases, cpu);
1756 1548  
1757 1549  
... ... @@ -1764,20 +1556,13 @@
1764 1556 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1765 1557  
1766 1558 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1767   - if (migrate_hrtimer_list(&old_base->clock_base[i],
1768   - &new_base->clock_base[i], cpu))
1769   - raise = 1;
  1559 + migrate_hrtimer_list(&old_base->clock_base[i],
  1560 + &new_base->clock_base[i], cpu);
1770 1561 }
1771 1562  
1772   - if (migrate_hrtimer_pending(old_base, new_base))
1773   - raise = 1;
1774   -
1775 1563 spin_unlock(&old_base->lock);
1776 1564 spin_unlock_irq(&new_base->lock);
1777 1565 put_cpu_var(hrtimer_bases);
1778   -
1779   - if (raise)
1780   - hrtimer_raise_softirq();
1781 1566 }
1782 1567 #endif /* CONFIG_HOTPLUG_CPU */
1783 1568  
... ... @@ -1817,9 +1602,6 @@
1817 1602 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
1818 1603 (void *)(long)smp_processor_id());
1819 1604 register_cpu_notifier(&hrtimers_nb);
1820   -#ifdef CONFIG_HIGH_RES_TIMERS
1821   - open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
1822   -#endif
1823 1605 }
1824 1606  
1825 1607 /**
... ... @@ -203,7 +203,6 @@
203 203 hrtimer_init(&rt_b->rt_period_timer,
204 204 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
205 205 rt_b->rt_period_timer.function = sched_rt_period_timer;
206   - rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
207 206 }
208 207  
209 208 static inline int rt_bandwidth_enabled(void)
... ... @@ -1139,7 +1138,6 @@
1139 1138  
1140 1139 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1141 1140 rq->hrtick_timer.function = hrtick;
1142   - rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
1143 1141 }
1144 1142 #else /* CONFIG_SCHED_HRTICK */
1145 1143 static inline void hrtick_clear(struct rq *rq)
... ... @@ -131,7 +131,7 @@
131 131 {
132 132 enum hrtimer_restart res = HRTIMER_NORESTART;
133 133  
134   - write_seqlock_irq(&xtime_lock);
  134 + write_seqlock(&xtime_lock);
135 135  
136 136 switch (time_state) {
137 137 case TIME_OK:
... ... @@ -164,7 +164,7 @@
164 164 }
165 165 update_vsyscall(&xtime, clock);
166 166  
167   - write_sequnlock_irq(&xtime_lock);
  167 + write_sequnlock(&xtime_lock);
168 168  
169 169 return res;
170 170 }
kernel/time/tick-sched.c
... ... @@ -681,7 +681,6 @@
681 681 */
682 682 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
683 683 ts->sched_timer.function = tick_sched_timer;
684   - ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
685 684  
686 685 /* Get the next period (per cpu) */
687 686 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
kernel/trace/trace_sysprof.c
... ... @@ -202,7 +202,6 @@
202 202  
203 203 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
204 204 hrtimer->function = stack_trace_timer_fn;
205   - hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
206 205  
207 206 hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL);
208 207 }
sound/drivers/pcsp/pcsp.c
... ... @@ -96,7 +96,6 @@
96 96 return -EINVAL;
97 97  
98 98 hrtimer_init(&pcsp_chip.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
99   - pcsp_chip.timer.cb_mode = HRTIMER_CB_SOFTIRQ;
100 99 pcsp_chip.timer.function = pcsp_do_timer;
101 100  
102 101 card = snd_card_new(index, id, THIS_MODULE, 0);