Commit 2e597558086dec36d5c33521a36e0f6b1bc3f3a7
Committed by
Ingo Molnar
1 parent
799e64f05f
rcu: Simplify RCU CPU-hotplug notification
Use the new cpu_notifier() API to simplify RCU's CPU-hotplug notifiers, collapsing down to a single such notifier. This makes it trivial to provide the notifier-ordering guarantee that rcu_barrier() depends on. Also remove redundant open_softirq() calls from Hierarchical RCU notifier. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: josht@linux.vnet.ibm.com Cc: akpm@linux-foundation.org Cc: mathieu.desnoyers@polymtl.ca Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: hugh.dickins@tiscali.co.uk Cc: benh@kernel.crashing.org LKML-Reference: <12503552312510-git-send-email-> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Showing 3 changed files with 22 additions and 36 deletions Side-by-side Diff
kernel/rcupdate.c
... | ... | @@ -217,9 +217,13 @@ |
217 | 217 | wake_up(&rcu_migrate_wq); |
218 | 218 | } |
219 | 219 | |
220 | +extern int rcu_cpu_notify(struct notifier_block *self, | |
221 | + unsigned long action, void *hcpu); | |
222 | + | |
220 | 223 | static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, |
221 | 224 | unsigned long action, void *hcpu) |
222 | 225 | { |
226 | + rcu_cpu_notify(self, action, hcpu); | |
223 | 227 | if (action == CPU_DYING) { |
224 | 228 | /* |
225 | 229 | * preempt_disable() in on_each_cpu() prevents stop_machine(), |
226 | 230 | |
... | ... | @@ -244,8 +248,18 @@ |
244 | 248 | |
245 | 249 | void __init rcu_init(void) |
246 | 250 | { |
251 | + int i; | |
252 | + | |
247 | 253 | __rcu_init(); |
248 | - hotcpu_notifier(rcu_barrier_cpu_hotplug, 0); | |
254 | + cpu_notifier(rcu_barrier_cpu_hotplug, 0); | |
255 | + | |
256 | + /* | |
257 | + * We don't need protection against CPU-hotplug here because | |
258 | + * this is called early in boot, before either interrupts | |
259 | + * or the scheduler are operational. | |
260 | + */ | |
261 | + for_each_online_cpu(i) | |
262 | + rcu_barrier_cpu_hotplug(NULL, CPU_UP_PREPARE, (void *)(long)i); | |
249 | 263 | } |
250 | 264 | |
251 | 265 | void rcu_scheduler_starting(void) |
kernel/rcupreempt.c
... | ... | @@ -1417,8 +1417,8 @@ |
1417 | 1417 | return 0; |
1418 | 1418 | } |
1419 | 1419 | |
1420 | -static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | |
1421 | - unsigned long action, void *hcpu) | |
1420 | +int __cpuinit rcu_cpu_notify(struct notifier_block *self, | |
1421 | + unsigned long action, void *hcpu) | |
1422 | 1422 | { |
1423 | 1423 | long cpu = (long)hcpu; |
1424 | 1424 | |
... | ... | @@ -1439,10 +1439,6 @@ |
1439 | 1439 | return NOTIFY_OK; |
1440 | 1440 | } |
1441 | 1441 | |
1442 | -static struct notifier_block __cpuinitdata rcu_nb = { | |
1443 | - .notifier_call = rcu_cpu_notify, | |
1444 | -}; | |
1445 | - | |
1446 | 1442 | void __init __rcu_init(void) |
1447 | 1443 | { |
1448 | 1444 | int cpu; |
... | ... | @@ -1471,23 +1467,6 @@ |
1471 | 1467 | rdp->waitschedtail = &rdp->waitschedlist; |
1472 | 1468 | rdp->rcu_sched_sleeping = 0; |
1473 | 1469 | } |
1474 | - register_cpu_notifier(&rcu_nb); | |
1475 | - | |
1476 | - /* | |
1477 | - * We don't need protection against CPU-Hotplug here | |
1478 | - * since | |
1479 | - * a) If a CPU comes online while we are iterating over the | |
1480 | - * cpu_online_mask below, we would only end up making a | |
1481 | - * duplicate call to rcu_online_cpu() which sets the corresponding | |
1482 | - * CPU's mask in the rcu_cpu_online_map. | |
1483 | - * | |
1484 | - * b) A CPU cannot go offline at this point in time since the user | |
1485 | - * does not have access to the sysfs interface, nor do we | |
1486 | - * suspend the system. | |
1487 | - */ | |
1488 | - for_each_online_cpu(cpu) | |
1489 | - rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long) cpu); | |
1490 | - | |
1491 | 1470 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
1492 | 1471 | } |
1493 | 1472 |
kernel/rcutree.c
... | ... | @@ -1132,6 +1132,8 @@ |
1132 | 1132 | { |
1133 | 1133 | unsigned long flags; |
1134 | 1134 | |
1135 | + WARN_ON_ONCE(rdp->beenonline == 0); | |
1136 | + | |
1135 | 1137 | /* |
1136 | 1138 | * If an RCU GP has gone long enough, go check for dyntick |
1137 | 1139 | * idle CPUs and, if needed, send resched IPIs. |
1138 | 1140 | |
... | ... | @@ -1416,14 +1418,13 @@ |
1416 | 1418 | { |
1417 | 1419 | rcu_init_percpu_data(cpu, &rcu_state); |
1418 | 1420 | rcu_init_percpu_data(cpu, &rcu_bh_state); |
1419 | - open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | |
1420 | 1421 | } |
1421 | 1422 | |
1422 | 1423 | /* |
1423 | 1424 | * Handle CPU online/offline notifcation events. |
1424 | 1425 | */ |
1425 | -static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | |
1426 | - unsigned long action, void *hcpu) | |
1426 | +int __cpuinit rcu_cpu_notify(struct notifier_block *self, | |
1427 | + unsigned long action, void *hcpu) | |
1427 | 1428 | { |
1428 | 1429 | long cpu = (long)hcpu; |
1429 | 1430 | |
... | ... | @@ -1532,10 +1533,6 @@ |
1532 | 1533 | } \ |
1533 | 1534 | } while (0) |
1534 | 1535 | |
1535 | -static struct notifier_block __cpuinitdata rcu_nb = { | |
1536 | - .notifier_call = rcu_cpu_notify, | |
1537 | -}; | |
1538 | - | |
1539 | 1536 | void __init __rcu_init(void) |
1540 | 1537 | { |
1541 | 1538 | int i; /* All used by RCU_DATA_PTR_INIT(). */ |
... | ... | @@ -1554,11 +1551,7 @@ |
1554 | 1551 | RCU_DATA_PTR_INIT(&rcu_bh_state, rcu_bh_data); |
1555 | 1552 | for_each_possible_cpu(i) |
1556 | 1553 | rcu_boot_init_percpu_data(i, &rcu_bh_state); |
1557 | - | |
1558 | - for_each_online_cpu(i) | |
1559 | - rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long)i); | |
1560 | - /* Register notifier for non-boot CPUs */ | |
1561 | - register_cpu_notifier(&rcu_nb); | |
1554 | + open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | |
1562 | 1555 | } |
1563 | 1556 | |
1564 | 1557 | module_param(blimit, int, 0); |