Commit 1217ed1ba5c67393293dfb0f03c353b118dadeb4
Committed by
Paul E. McKenney
1 parent
29ce831000
Exists in
master
and in
7 other branches
rcu: permit rcu_read_unlock() to be called while holding runqueue locks
Avoid calling into the scheduler while holding core RCU locks. This allows rcu_read_unlock() to be called while holding the runqueue locks, but only as long as there was no chance of the RCU read-side critical section having been preempted. (Otherwise, if RCU priority boosting is enabled, rcu_read_unlock() might call into the scheduler in order to unboost itself, which might allows self-deadlock on the runqueue locks within the scheduler.) Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Showing 3 changed files with 34 additions and 79 deletions Side-by-side Diff
kernel/rcutree.c
... | ... | @@ -1133,22 +1133,7 @@ |
1133 | 1133 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1134 | 1134 | if (need_report & RCU_OFL_TASKS_EXP_GP) |
1135 | 1135 | rcu_report_exp_rnp(rsp, rnp); |
1136 | - | |
1137 | - /* | |
1138 | - * If there are no more online CPUs for this rcu_node structure, | |
1139 | - * kill the rcu_node structure's kthread. Otherwise, adjust its | |
1140 | - * affinity. | |
1141 | - */ | |
1142 | - t = rnp->node_kthread_task; | |
1143 | - if (t != NULL && | |
1144 | - rnp->qsmaskinit == 0) { | |
1145 | - raw_spin_lock_irqsave(&rnp->lock, flags); | |
1146 | - rnp->node_kthread_task = NULL; | |
1147 | - raw_spin_unlock_irqrestore(&rnp->lock, flags); | |
1148 | - kthread_stop(t); | |
1149 | - rcu_stop_boost_kthread(rnp); | |
1150 | - } else | |
1151 | - rcu_node_kthread_setaffinity(rnp, -1); | |
1136 | + rcu_node_kthread_setaffinity(rnp, -1); | |
1152 | 1137 | } |
1153 | 1138 | |
1154 | 1139 | /* |
... | ... | @@ -1320,8 +1305,7 @@ |
1320 | 1305 | return; |
1321 | 1306 | } |
1322 | 1307 | if (rnp->qsmask == 0) { |
1323 | - rcu_initiate_boost(rnp); | |
1324 | - raw_spin_unlock_irqrestore(&rnp->lock, flags); | |
1308 | + rcu_initiate_boost(rnp, flags); /* releases rnp->lock */ | |
1325 | 1309 | continue; |
1326 | 1310 | } |
1327 | 1311 | cpu = rnp->grplo; |
... | ... | @@ -1340,10 +1324,10 @@ |
1340 | 1324 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1341 | 1325 | } |
1342 | 1326 | rnp = rcu_get_root(rsp); |
1343 | - raw_spin_lock_irqsave(&rnp->lock, flags); | |
1344 | - if (rnp->qsmask == 0) | |
1345 | - rcu_initiate_boost(rnp); | |
1346 | - raw_spin_unlock_irqrestore(&rnp->lock, flags); | |
1327 | + if (rnp->qsmask == 0) { | |
1328 | + raw_spin_lock_irqsave(&rnp->lock, flags); | |
1329 | + rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ | |
1330 | + } | |
1347 | 1331 | } |
1348 | 1332 | |
1349 | 1333 | /* |
... | ... | @@ -1497,7 +1481,8 @@ |
1497 | 1481 | |
1498 | 1482 | /* |
1499 | 1483 | * Wake up the specified per-rcu_node-structure kthread. |
1500 | - * The caller must hold ->lock. | |
1484 | + * Because the per-rcu_node kthreads are immortal, we don't need | |
1485 | + * to do anything to keep them alive. | |
1501 | 1486 | */ |
1502 | 1487 | static void invoke_rcu_node_kthread(struct rcu_node *rnp) |
1503 | 1488 | { |
1504 | 1489 | |
... | ... | @@ -1546,8 +1531,8 @@ |
1546 | 1531 | |
1547 | 1532 | raw_spin_lock_irqsave(&rnp->lock, flags); |
1548 | 1533 | rnp->wakemask |= rdp->grpmask; |
1549 | - invoke_rcu_node_kthread(rnp); | |
1550 | 1534 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1535 | + invoke_rcu_node_kthread(rnp); | |
1551 | 1536 | } |
1552 | 1537 | |
1553 | 1538 | /* |
1554 | 1539 | |
... | ... | @@ -1694,16 +1679,12 @@ |
1694 | 1679 | |
1695 | 1680 | for (;;) { |
1696 | 1681 | rnp->node_kthread_status = RCU_KTHREAD_WAITING; |
1697 | - wait_event_interruptible(rnp->node_wq, rnp->wakemask != 0 || | |
1698 | - kthread_should_stop()); | |
1699 | - if (kthread_should_stop()) | |
1700 | - break; | |
1682 | + wait_event_interruptible(rnp->node_wq, rnp->wakemask != 0); | |
1701 | 1683 | rnp->node_kthread_status = RCU_KTHREAD_RUNNING; |
1702 | 1684 | raw_spin_lock_irqsave(&rnp->lock, flags); |
1703 | 1685 | mask = rnp->wakemask; |
1704 | 1686 | rnp->wakemask = 0; |
1705 | - rcu_initiate_boost(rnp); | |
1706 | - raw_spin_unlock_irqrestore(&rnp->lock, flags); | |
1687 | + rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ | |
1707 | 1688 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) { |
1708 | 1689 | if ((mask & 0x1) == 0) |
1709 | 1690 | continue; |
... | ... | @@ -1719,6 +1700,7 @@ |
1719 | 1700 | preempt_enable(); |
1720 | 1701 | } |
1721 | 1702 | } |
1703 | + /* NOTREACHED */ | |
1722 | 1704 | rnp->node_kthread_status = RCU_KTHREAD_STOPPED; |
1723 | 1705 | return 0; |
1724 | 1706 | } |
... | ... | @@ -1738,7 +1720,7 @@ |
1738 | 1720 | int cpu; |
1739 | 1721 | unsigned long mask = rnp->qsmaskinit; |
1740 | 1722 | |
1741 | - if (rnp->node_kthread_task == NULL || mask == 0) | |
1723 | + if (rnp->node_kthread_task == NULL) | |
1742 | 1724 | return; |
1743 | 1725 | if (!alloc_cpumask_var(&cm, GFP_KERNEL)) |
1744 | 1726 | return; |
kernel/rcutree.h
... | ... | @@ -444,16 +444,13 @@ |
444 | 444 | static void __init __rcu_init_preempt(void); |
445 | 445 | static void rcu_needs_cpu_flush(void); |
446 | 446 | static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp); |
447 | -static void rcu_initiate_boost(struct rcu_node *rnp); | |
447 | +static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); | |
448 | 448 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, |
449 | 449 | cpumask_var_t cm); |
450 | 450 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); |
451 | 451 | static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, |
452 | 452 | struct rcu_node *rnp, |
453 | 453 | int rnp_index); |
454 | -#ifdef CONFIG_HOTPLUG_CPU | |
455 | -static void rcu_stop_boost_kthread(struct rcu_node *rnp); | |
456 | -#endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
457 | 454 | |
458 | 455 | #endif /* #ifndef RCU_TREE_NONCORE */ |
kernel/rcutree_plugin.h
... | ... | @@ -711,15 +711,17 @@ |
711 | 711 | static void |
712 | 712 | sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) |
713 | 713 | { |
714 | + unsigned long flags; | |
714 | 715 | int must_wait = 0; |
715 | 716 | |
716 | - raw_spin_lock(&rnp->lock); /* irqs already disabled */ | |
717 | - if (!list_empty(&rnp->blkd_tasks)) { | |
717 | + raw_spin_lock_irqsave(&rnp->lock, flags); | |
718 | + if (list_empty(&rnp->blkd_tasks)) | |
719 | + raw_spin_unlock_irqrestore(&rnp->lock, flags); | |
720 | + else { | |
718 | 721 | rnp->exp_tasks = rnp->blkd_tasks.next; |
719 | - rcu_initiate_boost(rnp); | |
722 | + rcu_initiate_boost(rnp, flags); /* releases rnp->lock */ | |
720 | 723 | must_wait = 1; |
721 | 724 | } |
722 | - raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ | |
723 | 725 | if (!must_wait) |
724 | 726 | rcu_report_exp_rnp(rsp, rnp); |
725 | 727 | } |
... | ... | @@ -1179,12 +1181,7 @@ |
1179 | 1181 | */ |
1180 | 1182 | static void rcu_boost_kthread_timer(unsigned long arg) |
1181 | 1183 | { |
1182 | - unsigned long flags; | |
1183 | - struct rcu_node *rnp = (struct rcu_node *)arg; | |
1184 | - | |
1185 | - raw_spin_lock_irqsave(&rnp->lock, flags); | |
1186 | - invoke_rcu_node_kthread(rnp); | |
1187 | - raw_spin_unlock_irqrestore(&rnp->lock, flags); | |
1184 | + invoke_rcu_node_kthread((struct rcu_node *)arg); | |
1188 | 1185 | } |
1189 | 1186 | |
1190 | 1187 | /* |
... | ... | @@ -1200,10 +1197,7 @@ |
1200 | 1197 | for (;;) { |
1201 | 1198 | rnp->boost_kthread_status = RCU_KTHREAD_WAITING; |
1202 | 1199 | wait_event_interruptible(rnp->boost_wq, rnp->boost_tasks || |
1203 | - rnp->exp_tasks || | |
1204 | - kthread_should_stop()); | |
1205 | - if (kthread_should_stop()) | |
1206 | - break; | |
1200 | + rnp->exp_tasks); | |
1207 | 1201 | rnp->boost_kthread_status = RCU_KTHREAD_RUNNING; |
1208 | 1202 | more2boost = rcu_boost(rnp); |
1209 | 1203 | if (more2boost) |
... | ... | @@ -1215,7 +1209,7 @@ |
1215 | 1209 | spincnt = 0; |
1216 | 1210 | } |
1217 | 1211 | } |
1218 | - rnp->boost_kthread_status = RCU_KTHREAD_STOPPED; | |
1212 | + /* NOTREACHED */ | |
1219 | 1213 | return 0; |
1220 | 1214 | } |
1221 | 1215 | |
1222 | 1216 | |
1223 | 1217 | |
... | ... | @@ -1225,14 +1219,17 @@ |
1225 | 1219 | * kthread to start boosting them. If there is an expedited grace |
1226 | 1220 | * period in progress, it is always time to boost. |
1227 | 1221 | * |
1228 | - * The caller must hold rnp->lock. | |
1222 | + * The caller must hold rnp->lock, which this function releases, | |
1223 | + * but irqs remain disabled. The ->boost_kthread_task is immortal, | |
1224 | + * so we don't need to worry about it going away. | |
1229 | 1225 | */ |
1230 | -static void rcu_initiate_boost(struct rcu_node *rnp) | |
1226 | +static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) | |
1231 | 1227 | { |
1232 | 1228 | struct task_struct *t; |
1233 | 1229 | |
1234 | 1230 | if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) { |
1235 | 1231 | rnp->n_balk_exp_gp_tasks++; |
1232 | + raw_spin_unlock_irqrestore(&rnp->lock, flags); | |
1236 | 1233 | return; |
1237 | 1234 | } |
1238 | 1235 | if (rnp->exp_tasks != NULL || |
1239 | 1236 | |
1240 | 1237 | |
... | ... | @@ -1242,11 +1239,14 @@ |
1242 | 1239 | ULONG_CMP_GE(jiffies, rnp->boost_time))) { |
1243 | 1240 | if (rnp->exp_tasks == NULL) |
1244 | 1241 | rnp->boost_tasks = rnp->gp_tasks; |
1242 | + raw_spin_unlock_irqrestore(&rnp->lock, flags); | |
1245 | 1243 | t = rnp->boost_kthread_task; |
1246 | 1244 | if (t != NULL) |
1247 | 1245 | wake_up_process(t); |
1248 | - } else | |
1246 | + } else { | |
1249 | 1247 | rcu_initiate_boost_trace(rnp); |
1248 | + raw_spin_unlock_irqrestore(&rnp->lock, flags); | |
1249 | + } | |
1250 | 1250 | } |
1251 | 1251 | |
1252 | 1252 | /* |
1253 | 1253 | |
1254 | 1254 | |
1255 | 1255 | |
1256 | 1256 | |
... | ... | @@ -1312,29 +1312,13 @@ |
1312 | 1312 | return 0; |
1313 | 1313 | } |
1314 | 1314 | |
1315 | -#ifdef CONFIG_HOTPLUG_CPU | |
1315 | +#else /* #ifdef CONFIG_RCU_BOOST */ | |
1316 | 1316 | |
1317 | -static void rcu_stop_boost_kthread(struct rcu_node *rnp) | |
1317 | +static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) | |
1318 | 1318 | { |
1319 | - unsigned long flags; | |
1320 | - struct task_struct *t; | |
1321 | - | |
1322 | - raw_spin_lock_irqsave(&rnp->lock, flags); | |
1323 | - t = rnp->boost_kthread_task; | |
1324 | - rnp->boost_kthread_task = NULL; | |
1325 | 1319 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1326 | - if (t != NULL) | |
1327 | - kthread_stop(t); | |
1328 | 1320 | } |
1329 | 1321 | |
1330 | -#endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
1331 | - | |
1332 | -#else /* #ifdef CONFIG_RCU_BOOST */ | |
1333 | - | |
1334 | -static void rcu_initiate_boost(struct rcu_node *rnp) | |
1335 | -{ | |
1336 | -} | |
1337 | - | |
1338 | 1322 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, |
1339 | 1323 | cpumask_var_t cm) |
1340 | 1324 | { |
... | ... | @@ -1354,14 +1338,6 @@ |
1354 | 1338 | { |
1355 | 1339 | return 0; |
1356 | 1340 | } |
1357 | - | |
1358 | -#ifdef CONFIG_HOTPLUG_CPU | |
1359 | - | |
1360 | -static void rcu_stop_boost_kthread(struct rcu_node *rnp) | |
1361 | -{ | |
1362 | -} | |
1363 | - | |
1364 | -#endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
1365 | 1341 | |
1366 | 1342 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ |
1367 | 1343 |