Commit 467a9e1633043810259a7f5368fbcc1e84746137

Authored by Linus Torvalds

Merge tag 'cpu-hotplug-3.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull CPU hotplug notifiers registration fixes from Rafael Wysocki:
 "The purpose of this single series of commits from Srivatsa S Bhat
  (with a small piece from Gautham R Shenoy) touching multiple
  subsystems that use CPU hotplug notifiers is to provide a way to
  register them that will not lead to deadlocks with CPU online/offline
  operations as described in the changelog of commit 93ae4f978ca7f ("CPU
  hotplug: Provide lockless versions of callback registration
  functions").

  The first three commits in the series introduce the API and document
  it and the rest simply goes through the users of CPU hotplug notifiers
  and converts them to using the new method"

* tag 'cpu-hotplug-3.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (52 commits)
  net/iucv/iucv.c: Fix CPU hotplug callback registration
  net/core/flow.c: Fix CPU hotplug callback registration
  mm, zswap: Fix CPU hotplug callback registration
  mm, vmstat: Fix CPU hotplug callback registration
  profile: Fix CPU hotplug callback registration
  trace, ring-buffer: Fix CPU hotplug callback registration
  xen, balloon: Fix CPU hotplug callback registration
  hwmon, via-cputemp: Fix CPU hotplug callback registration
  hwmon, coretemp: Fix CPU hotplug callback registration
  thermal, x86-pkg-temp: Fix CPU hotplug callback registration
  octeon, watchdog: Fix CPU hotplug callback registration
  oprofile, nmi-timer: Fix CPU hotplug callback registration
  intel-idle: Fix CPU hotplug callback registration
  clocksource, dummy-timer: Fix CPU hotplug callback registration
  drivers/base/topology.c: Fix CPU hotplug callback registration
  acpi-cpufreq: Fix CPU hotplug callback registration
  zsmalloc: Fix CPU hotplug callback registration
  scsi, fcoe: Fix CPU hotplug callback registration
  scsi, bnx2fc: Fix CPU hotplug callback registration
  scsi, bnx2i: Fix CPU hotplug callback registration
  ...

Showing 51 changed files Side-by-side Diff

Documentation/cpu-hotplug.txt
... ... @@ -312,11 +312,56 @@
312 312 Q: I don't see my action being called for all CPUs already up and running?
313 313 A: Yes, CPU notifiers are called only when new CPUs are on-lined or offlined.
314 314 If you need to perform some action for each cpu already in the system, then
  315 + do this:
315 316  
316 317 for_each_online_cpu(i) {
317 318 foobar_cpu_callback(&foobar_cpu_notifier, CPU_UP_PREPARE, i);
318 319 foobar_cpu_callback(&foobar_cpu_notifier, CPU_ONLINE, i);
319 320 }
  321 +
  322 + However, if you want to register a hotplug callback, as well as perform
  323 + some initialization for CPUs that are already online, then do this:
  324 +
  325 + Version 1: (Correct)
  326 + ---------
  327 +
  328 + cpu_notifier_register_begin();
  329 +
  330 + for_each_online_cpu(i) {
  331 + foobar_cpu_callback(&foobar_cpu_notifier,
  332 + CPU_UP_PREPARE, i);
  333 + foobar_cpu_callback(&foobar_cpu_notifier,
  334 + CPU_ONLINE, i);
  335 + }
  336 +
  337 + /* Note the use of the double underscored version of the API */
  338 + __register_cpu_notifier(&foobar_cpu_notifier);
  339 +
  340 + cpu_notifier_register_done();
  341 +
  342 + Note that the following code is *NOT* the right way to achieve this,
  343 + because it is prone to an ABBA deadlock between the cpu_add_remove_lock
  344 + and the cpu_hotplug.lock.
  345 +
  346 + Version 2: (Wrong!)
  347 + ---------
  348 +
  349 + get_online_cpus();
  350 +
  351 + for_each_online_cpu(i) {
  352 + foobar_cpu_callback(&foobar_cpu_notifier,
  353 + CPU_UP_PREPARE, i);
  354 + foobar_cpu_callback(&foobar_cpu_notifier,
  355 + CPU_ONLINE, i);
  356 + }
  357 +
  358 + register_cpu_notifier(&foobar_cpu_notifier);
  359 +
  360 + put_online_cpus();
  361 +
  362 + So always use the first version shown above when you want to register
  363 + callbacks as well as initialize the already online CPUs.
  364 +
320 365  
321 366 Q: If i would like to develop cpu hotplug support for a new architecture,
322 367 what do i need at a minimum?
arch/arm/kernel/hw_breakpoint.c
... ... @@ -1073,6 +1073,8 @@
1073 1073 core_num_brps = get_num_brps();
1074 1074 core_num_wrps = get_num_wrps();
1075 1075  
  1076 + cpu_notifier_register_begin();
  1077 +
1076 1078 /*
1077 1079 * We need to tread carefully here because DBGSWENABLE may be
1078 1080 * driven low on this core and there isn't an architected way to
... ... @@ -1089,6 +1091,7 @@
1089 1091 if (!cpumask_empty(&debug_err_mask)) {
1090 1092 core_num_brps = 0;
1091 1093 core_num_wrps = 0;
  1094 + cpu_notifier_register_done();
1092 1095 return 0;
1093 1096 }
1094 1097  
... ... @@ -1108,7 +1111,10 @@
1108 1111 TRAP_HWBKPT, "breakpoint debug exception");
1109 1112  
1110 1113 /* Register hotplug and PM notifiers. */
1111   - register_cpu_notifier(&dbg_reset_nb);
  1114 + __register_cpu_notifier(&dbg_reset_nb);
  1115 +
  1116 + cpu_notifier_register_done();
  1117 +
1112 1118 pm_init();
1113 1119 return 0;
1114 1120 }
... ... @@ -1051,21 +1051,26 @@
1051 1051 }
1052 1052 }
1053 1053  
  1054 + cpu_notifier_register_begin();
  1055 +
1054 1056 err = init_hyp_mode();
1055 1057 if (err)
1056 1058 goto out_err;
1057 1059  
1058   - err = register_cpu_notifier(&hyp_init_cpu_nb);
  1060 + err = __register_cpu_notifier(&hyp_init_cpu_nb);
1059 1061 if (err) {
1060 1062 kvm_err("Cannot register HYP init CPU notifier (%d)\n", err);
1061 1063 goto out_err;
1062 1064 }
1063 1065  
  1066 + cpu_notifier_register_done();
  1067 +
1064 1068 hyp_cpu_pm_init();
1065 1069  
1066 1070 kvm_coproc_table_init();
1067 1071 return 0;
1068 1072 out_err:
  1073 + cpu_notifier_register_done();
1069 1074 return err;
1070 1075 }
1071 1076  
arch/arm64/kernel/debug-monitors.c
... ... @@ -154,13 +154,17 @@
154 154  
155 155 static int debug_monitors_init(void)
156 156 {
  157 + cpu_notifier_register_begin();
  158 +
157 159 /* Clear the OS lock. */
158 160 on_each_cpu(clear_os_lock, NULL, 1);
159 161 isb();
160 162 local_dbg_enable();
161 163  
162 164 /* Register hotplug handler. */
163   - register_cpu_notifier(&os_lock_nb);
  165 + __register_cpu_notifier(&os_lock_nb);
  166 +
  167 + cpu_notifier_register_done();
164 168 return 0;
165 169 }
166 170 postcore_initcall(debug_monitors_init);
arch/arm64/kernel/hw_breakpoint.c
... ... @@ -913,6 +913,8 @@
913 913 pr_info("found %d breakpoint and %d watchpoint registers.\n",
914 914 core_num_brps, core_num_wrps);
915 915  
  916 + cpu_notifier_register_begin();
  917 +
916 918 /*
917 919 * Reset the breakpoint resources. We assume that a halting
918 920 * debugger will leave the world in a nice state for us.
... ... @@ -927,7 +929,10 @@
927 929 TRAP_HWBKPT, "hw-watchpoint handler");
928 930  
929 931 /* Register hotplug notifier. */
930   - register_cpu_notifier(&hw_breakpoint_reset_nb);
  932 + __register_cpu_notifier(&hw_breakpoint_reset_nb);
  933 +
  934 + cpu_notifier_register_done();
  935 +
931 936 /* Register cpu_suspend hw breakpoint restore hook */
932 937 cpu_suspend_set_dbg_restorer(hw_breakpoint_reset);
933 938  
arch/ia64/kernel/err_inject.c
... ... @@ -269,13 +269,18 @@
269 269 #ifdef ERR_INJ_DEBUG
270 270 printk(KERN_INFO "Enter error injection driver.\n");
271 271 #endif
  272 +
  273 + cpu_notifier_register_begin();
  274 +
272 275 for_each_online_cpu(i) {
273 276 err_inject_cpu_callback(&err_inject_cpu_notifier, CPU_ONLINE,
274 277 (void *)(long)i);
275 278 }
276 279  
277   - register_hotcpu_notifier(&err_inject_cpu_notifier);
  280 + __register_hotcpu_notifier(&err_inject_cpu_notifier);
278 281  
  282 + cpu_notifier_register_done();
  283 +
279 284 return 0;
280 285 }
281 286  
282 287  
... ... @@ -288,11 +293,17 @@
288 293 #ifdef ERR_INJ_DEBUG
289 294 printk(KERN_INFO "Exit error injection driver.\n");
290 295 #endif
  296 +
  297 + cpu_notifier_register_begin();
  298 +
291 299 for_each_online_cpu(i) {
292 300 sys_dev = get_cpu_device(i);
293 301 sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group);
294 302 }
295   - unregister_hotcpu_notifier(&err_inject_cpu_notifier);
  303 +
  304 + __unregister_hotcpu_notifier(&err_inject_cpu_notifier);
  305 +
  306 + cpu_notifier_register_done();
296 307 }
297 308  
298 309 module_init(err_inject_init);
arch/ia64/kernel/palinfo.c
... ... @@ -996,13 +996,17 @@
996 996 if (!palinfo_dir)
997 997 return -ENOMEM;
998 998  
  999 + cpu_notifier_register_begin();
  1000 +
999 1001 /* Create palinfo dirs in /proc for all online cpus */
1000 1002 for_each_online_cpu(i) {
1001 1003 create_palinfo_proc_entries(i);
1002 1004 }
1003 1005  
1004 1006 /* Register for future delivery via notify registration */
1005   - register_hotcpu_notifier(&palinfo_cpu_notifier);
  1007 + __register_hotcpu_notifier(&palinfo_cpu_notifier);
  1008 +
  1009 + cpu_notifier_register_done();
1006 1010  
1007 1011 return 0;
1008 1012 }
arch/ia64/kernel/salinfo.c
... ... @@ -635,6 +635,8 @@
635 635 (void *)salinfo_entries[i].feature);
636 636 }
637 637  
  638 + cpu_notifier_register_begin();
  639 +
638 640 for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) {
639 641 data = salinfo_data + i;
640 642 data->type = i;
... ... @@ -669,7 +671,9 @@
669 671 salinfo_timer.function = &salinfo_timeout;
670 672 add_timer(&salinfo_timer);
671 673  
672   - register_hotcpu_notifier(&salinfo_cpu_notifier);
  674 + __register_hotcpu_notifier(&salinfo_cpu_notifier);
  675 +
  676 + cpu_notifier_register_done();
673 677  
674 678 return 0;
675 679 }
arch/ia64/kernel/topology.c
... ... @@ -454,12 +454,16 @@
454 454 {
455 455 int i;
456 456  
  457 + cpu_notifier_register_begin();
  458 +
457 459 for_each_online_cpu(i) {
458 460 struct device *sys_dev = get_cpu_device((unsigned int)i);
459 461 cache_add_dev(sys_dev);
460 462 }
461 463  
462   - register_hotcpu_notifier(&cache_cpu_notifier);
  464 + __register_hotcpu_notifier(&cache_cpu_notifier);
  465 +
  466 + cpu_notifier_register_done();
463 467  
464 468 return 0;
465 469 }
arch/powerpc/kernel/sysfs.c
... ... @@ -975,8 +975,9 @@
975 975 int cpu;
976 976  
977 977 register_nodes();
978   - register_cpu_notifier(&sysfs_cpu_nb);
979 978  
  979 + cpu_notifier_register_begin();
  980 +
980 981 for_each_possible_cpu(cpu) {
981 982 struct cpu *c = &per_cpu(cpu_devices, cpu);
982 983  
... ... @@ -999,6 +1000,11 @@
999 1000 if (cpu_online(cpu))
1000 1001 register_cpu_online(cpu);
1001 1002 }
  1003 +
  1004 + __register_cpu_notifier(&sysfs_cpu_nb);
  1005 +
  1006 + cpu_notifier_register_done();
  1007 +
1002 1008 #ifdef CONFIG_PPC64
1003 1009 sysfs_create_dscr_default();
1004 1010 #endif /* CONFIG_PPC64 */
arch/s390/kernel/cache.c
... ... @@ -378,9 +378,12 @@
378 378 if (!test_facility(34))
379 379 return 0;
380 380 cache_build_info();
  381 +
  382 + cpu_notifier_register_begin();
381 383 for_each_online_cpu(cpu)
382 384 cache_add_cpu(cpu);
383   - hotcpu_notifier(cache_hotplug, 0);
  385 + __hotcpu_notifier(cache_hotplug, 0);
  386 + cpu_notifier_register_done();
384 387 return 0;
385 388 }
386 389 device_initcall(cache_init);
arch/s390/kernel/smp.c
... ... @@ -1057,20 +1057,25 @@
1057 1057  
1058 1058 static int __init s390_smp_init(void)
1059 1059 {
1060   - int cpu, rc;
  1060 + int cpu, rc = 0;
1061 1061  
1062   - hotcpu_notifier(smp_cpu_notify, 0);
1063 1062 #ifdef CONFIG_HOTPLUG_CPU
1064 1063 rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
1065 1064 if (rc)
1066 1065 return rc;
1067 1066 #endif
  1067 + cpu_notifier_register_begin();
1068 1068 for_each_present_cpu(cpu) {
1069 1069 rc = smp_add_present_cpu(cpu);
1070 1070 if (rc)
1071   - return rc;
  1071 + goto out;
1072 1072 }
1073   - return 0;
  1073 +
  1074 + __hotcpu_notifier(smp_cpu_notify, 0);
  1075 +
  1076 +out:
  1077 + cpu_notifier_register_done();
  1078 + return rc;
1074 1079 }
1075 1080 subsys_initcall(s390_smp_init);
arch/sparc/kernel/sysfs.c
... ... @@ -300,7 +300,7 @@
300 300  
301 301 check_mmu_stats();
302 302  
303   - register_cpu_notifier(&sysfs_cpu_nb);
  303 + cpu_notifier_register_begin();
304 304  
305 305 for_each_possible_cpu(cpu) {
306 306 struct cpu *c = &per_cpu(cpu_devices, cpu);
... ... @@ -309,6 +309,10 @@
309 309 if (cpu_online(cpu))
310 310 register_cpu_online(cpu);
311 311 }
  312 +
  313 + __register_cpu_notifier(&sysfs_cpu_nb);
  314 +
  315 + cpu_notifier_register_done();
312 316  
313 317 return 0;
314 318 }
arch/x86/kernel/cpu/intel_cacheinfo.c
... ... @@ -1225,21 +1225,24 @@
1225 1225  
1226 1226 static int __init cache_sysfs_init(void)
1227 1227 {
1228   - int i;
  1228 + int i, err = 0;
1229 1229  
1230 1230 if (num_cache_leaves == 0)
1231 1231 return 0;
1232 1232  
  1233 + cpu_notifier_register_begin();
1233 1234 for_each_online_cpu(i) {
1234   - int err;
1235 1235 struct device *dev = get_cpu_device(i);
1236 1236  
1237 1237 err = cache_add_dev(dev);
1238 1238 if (err)
1239   - return err;
  1239 + goto out;
1240 1240 }
1241   - register_hotcpu_notifier(&cacheinfo_cpu_notifier);
1242   - return 0;
  1241 + __register_hotcpu_notifier(&cacheinfo_cpu_notifier);
  1242 +
  1243 +out:
  1244 + cpu_notifier_register_done();
  1245 + return err;
1243 1246 }
1244 1247  
1245 1248 device_initcall(cache_sysfs_init);
arch/x86/kernel/cpu/mcheck/mce.c
... ... @@ -2434,14 +2434,18 @@
2434 2434 if (err)
2435 2435 return err;
2436 2436  
  2437 + cpu_notifier_register_begin();
2437 2438 for_each_online_cpu(i) {
2438 2439 err = mce_device_create(i);
2439   - if (err)
  2440 + if (err) {
  2441 + cpu_notifier_register_done();
2440 2442 return err;
  2443 + }
2441 2444 }
2442 2445  
2443 2446 register_syscore_ops(&mce_syscore_ops);
2444   - register_hotcpu_notifier(&mce_cpu_notifier);
  2447 + __register_hotcpu_notifier(&mce_cpu_notifier);
  2448 + cpu_notifier_register_done();
2445 2449  
2446 2450 /* register character device /dev/mcelog */
2447 2451 misc_register(&mce_chrdev_device);
arch/x86/kernel/cpu/mcheck/therm_throt.c
... ... @@ -271,9 +271,6 @@
271 271 sysfs_remove_group(&dev->kobj, &thermal_attr_group);
272 272 }
273 273  
274   -/* Mutex protecting device creation against CPU hotplug: */
275   -static DEFINE_MUTEX(therm_cpu_lock);
276   -
277 274 /* Get notified when a cpu comes on/off. Be hotplug friendly. */
278 275 static int
279 276 thermal_throttle_cpu_callback(struct notifier_block *nfb,
280 277  
281 278  
282 279  
... ... @@ -289,18 +286,14 @@
289 286 switch (action) {
290 287 case CPU_UP_PREPARE:
291 288 case CPU_UP_PREPARE_FROZEN:
292   - mutex_lock(&therm_cpu_lock);
293 289 err = thermal_throttle_add_dev(dev, cpu);
294   - mutex_unlock(&therm_cpu_lock);
295 290 WARN_ON(err);
296 291 break;
297 292 case CPU_UP_CANCELED:
298 293 case CPU_UP_CANCELED_FROZEN:
299 294 case CPU_DEAD:
300 295 case CPU_DEAD_FROZEN:
301   - mutex_lock(&therm_cpu_lock);
302 296 thermal_throttle_remove_dev(dev);
303   - mutex_unlock(&therm_cpu_lock);
304 297 break;
305 298 }
306 299 return notifier_from_errno(err);
307 300  
308 301  
... ... @@ -319,19 +312,16 @@
319 312 if (!atomic_read(&therm_throt_en))
320 313 return 0;
321 314  
322   - register_hotcpu_notifier(&thermal_throttle_cpu_notifier);
  315 + cpu_notifier_register_begin();
323 316  
324   -#ifdef CONFIG_HOTPLUG_CPU
325   - mutex_lock(&therm_cpu_lock);
326   -#endif
327 317 /* connect live CPUs to sysfs */
328 318 for_each_online_cpu(cpu) {
329 319 err = thermal_throttle_add_dev(get_cpu_device(cpu), cpu);
330 320 WARN_ON(err);
331 321 }
332   -#ifdef CONFIG_HOTPLUG_CPU
333   - mutex_unlock(&therm_cpu_lock);
334   -#endif
  322 +
  323 + __register_hotcpu_notifier(&thermal_throttle_cpu_notifier);
  324 + cpu_notifier_register_done();
335 325  
336 326 return 0;
337 327 }
arch/x86/kernel/cpu/perf_event_amd_ibs.c
... ... @@ -926,13 +926,13 @@
926 926 goto out;
927 927  
928 928 perf_ibs_pm_init();
929   - get_online_cpus();
  929 + cpu_notifier_register_begin();
930 930 ibs_caps = caps;
931 931 /* make ibs_caps visible to other cpus: */
932 932 smp_mb();
933   - perf_cpu_notifier(perf_ibs_cpu_notifier);
934 933 smp_call_function(setup_APIC_ibs, NULL, 1);
935   - put_online_cpus();
  934 + __perf_cpu_notifier(perf_ibs_cpu_notifier);
  935 + cpu_notifier_register_done();
936 936  
937 937 ret = perf_event_ibs_init();
938 938 out:
arch/x86/kernel/cpu/perf_event_amd_uncore.c
... ... @@ -531,15 +531,16 @@
531 531 if (ret)
532 532 return -ENODEV;
533 533  
534   - get_online_cpus();
  534 + cpu_notifier_register_begin();
  535 +
535 536 /* init cpus already online before registering for hotplug notifier */
536 537 for_each_online_cpu(cpu) {
537 538 amd_uncore_cpu_up_prepare(cpu);
538 539 smp_call_function_single(cpu, init_cpu_already_online, NULL, 1);
539 540 }
540 541  
541   - register_cpu_notifier(&amd_uncore_cpu_notifier_block);
542   - put_online_cpus();
  542 + __register_cpu_notifier(&amd_uncore_cpu_notifier_block);
  543 + cpu_notifier_register_done();
543 544  
544 545 return 0;
545 546 }
arch/x86/kernel/cpu/perf_event_intel_rapl.c
... ... @@ -646,19 +646,20 @@
646 646 /* unsupported */
647 647 return 0;
648 648 }
649   - get_online_cpus();
650 649  
  650 + cpu_notifier_register_begin();
  651 +
651 652 for_each_online_cpu(cpu) {
652 653 rapl_cpu_prepare(cpu);
653 654 rapl_cpu_init(cpu);
654 655 }
655 656  
656   - perf_cpu_notifier(rapl_cpu_notifier);
  657 + __perf_cpu_notifier(rapl_cpu_notifier);
657 658  
658 659 ret = perf_pmu_register(&rapl_pmu_class, "power", -1);
659 660 if (WARN_ON(ret)) {
660 661 pr_info("RAPL PMU detected, registration failed (%d), RAPL PMU disabled\n", ret);
661   - put_online_cpus();
  662 + cpu_notifier_register_done();
662 663 return -1;
663 664 }
664 665  
... ... @@ -672,7 +673,7 @@
672 673 hweight32(rapl_cntr_mask),
673 674 ktime_to_ms(pmu->timer_interval));
674 675  
675   - put_online_cpus();
  676 + cpu_notifier_register_done();
676 677  
677 678 return 0;
678 679 }
arch/x86/kernel/cpu/perf_event_intel_uncore.c
... ... @@ -4244,7 +4244,7 @@
4244 4244 if (!cpumask_empty(&uncore_cpu_mask))
4245 4245 return;
4246 4246  
4247   - get_online_cpus();
  4247 + cpu_notifier_register_begin();
4248 4248  
4249 4249 for_each_online_cpu(cpu) {
4250 4250 int i, phys_id = topology_physical_package_id(cpu);
4251 4251  
... ... @@ -4263,9 +4263,9 @@
4263 4263 }
4264 4264 on_each_cpu(uncore_cpu_setup, NULL, 1);
4265 4265  
4266   - register_cpu_notifier(&uncore_cpu_nb);
  4266 + __register_cpu_notifier(&uncore_cpu_nb);
4267 4267  
4268   - put_online_cpus();
  4268 + cpu_notifier_register_done();
4269 4269 }
4270 4270  
4271 4271  
arch/x86/kernel/cpuid.c
... ... @@ -198,14 +198,15 @@
198 198 goto out_chrdev;
199 199 }
200 200 cpuid_class->devnode = cpuid_devnode;
201   - get_online_cpus();
  201 +
  202 + cpu_notifier_register_begin();
202 203 for_each_online_cpu(i) {
203 204 err = cpuid_device_create(i);
204 205 if (err != 0)
205 206 goto out_class;
206 207 }
207   - register_hotcpu_notifier(&cpuid_class_cpu_notifier);
208   - put_online_cpus();
  208 + __register_hotcpu_notifier(&cpuid_class_cpu_notifier);
  209 + cpu_notifier_register_done();
209 210  
210 211 err = 0;
211 212 goto out;
... ... @@ -215,7 +216,7 @@
215 216 for_each_online_cpu(i) {
216 217 cpuid_device_destroy(i);
217 218 }
218   - put_online_cpus();
  219 + cpu_notifier_register_done();
219 220 class_destroy(cpuid_class);
220 221 out_chrdev:
221 222 __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid");
222 223  
... ... @@ -227,13 +228,13 @@
227 228 {
228 229 int cpu = 0;
229 230  
230   - get_online_cpus();
  231 + cpu_notifier_register_begin();
231 232 for_each_online_cpu(cpu)
232 233 cpuid_device_destroy(cpu);
233 234 class_destroy(cpuid_class);
234 235 __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid");
235   - unregister_hotcpu_notifier(&cpuid_class_cpu_notifier);
236   - put_online_cpus();
  236 + __unregister_hotcpu_notifier(&cpuid_class_cpu_notifier);
  237 + cpu_notifier_register_done();
237 238 }
238 239  
239 240 module_init(cpuid_init);
arch/x86/kernel/hpet.c
... ... @@ -941,12 +941,14 @@
941 941 if (boot_cpu_has(X86_FEATURE_ARAT))
942 942 return 0;
943 943  
  944 + cpu_notifier_register_begin();
944 945 for_each_online_cpu(cpu) {
945 946 hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu);
946 947 }
947 948  
948 949 /* This notifier should be called after workqueue is ready */
949   - hotcpu_notifier(hpet_cpuhp_notify, -20);
  950 + __hotcpu_notifier(hpet_cpuhp_notify, -20);
  951 + cpu_notifier_register_done();
950 952  
951 953 return 0;
952 954 }
arch/x86/kernel/msr.c
... ... @@ -259,14 +259,15 @@
259 259 goto out_chrdev;
260 260 }
261 261 msr_class->devnode = msr_devnode;
262   - get_online_cpus();
  262 +
  263 + cpu_notifier_register_begin();
263 264 for_each_online_cpu(i) {
264 265 err = msr_device_create(i);
265 266 if (err != 0)
266 267 goto out_class;
267 268 }
268   - register_hotcpu_notifier(&msr_class_cpu_notifier);
269   - put_online_cpus();
  269 + __register_hotcpu_notifier(&msr_class_cpu_notifier);
  270 + cpu_notifier_register_done();
270 271  
271 272 err = 0;
272 273 goto out;
... ... @@ -275,7 +276,7 @@
275 276 i = 0;
276 277 for_each_online_cpu(i)
277 278 msr_device_destroy(i);
278   - put_online_cpus();
  279 + cpu_notifier_register_done();
279 280 class_destroy(msr_class);
280 281 out_chrdev:
281 282 __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");
282 283  
... ... @@ -286,13 +287,14 @@
286 287 static void __exit msr_exit(void)
287 288 {
288 289 int cpu = 0;
289   - get_online_cpus();
  290 +
  291 + cpu_notifier_register_begin();
290 292 for_each_online_cpu(cpu)
291 293 msr_device_destroy(cpu);
292 294 class_destroy(msr_class);
293 295 __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");
294   - unregister_hotcpu_notifier(&msr_class_cpu_notifier);
295   - put_online_cpus();
  296 + __unregister_hotcpu_notifier(&msr_class_cpu_notifier);
  297 + cpu_notifier_register_done();
296 298 }
297 299  
298 300 module_init(msr_init);
arch/x86/kernel/vsyscall_64.c
... ... @@ -348,9 +348,13 @@
348 348 {
349 349 BUG_ON(VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE));
350 350  
  351 + cpu_notifier_register_begin();
  352 +
351 353 on_each_cpu(cpu_vsyscall_init, NULL, 1);
352 354 /* notifier priority > KVM */
353   - hotcpu_notifier(cpu_vsyscall_notifier, 30);
  355 + __hotcpu_notifier(cpu_vsyscall_notifier, 30);
  356 +
  357 + cpu_notifier_register_done();
354 358  
355 359 return 0;
356 360 }
... ... @@ -5422,7 +5422,8 @@
5422 5422 int cpu;
5423 5423  
5424 5424 max_tsc_khz = tsc_khz;
5425   - register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
  5425 +
  5426 + cpu_notifier_register_begin();
5426 5427 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
5427 5428 #ifdef CONFIG_CPU_FREQ
5428 5429 struct cpufreq_policy policy;
... ... @@ -5439,6 +5440,10 @@
5439 5440 pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
5440 5441 for_each_online_cpu(cpu)
5441 5442 smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
  5443 +
  5444 + __register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
  5445 + cpu_notifier_register_done();
  5446 +
5442 5447 }
5443 5448  
5444 5449 static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
arch/x86/oprofile/nmi_int.c
... ... @@ -494,14 +494,19 @@
494 494 if (err)
495 495 goto fail;
496 496  
  497 + cpu_notifier_register_begin();
  498 +
  499 + /* Use get/put_online_cpus() to protect 'nmi_enabled' */
497 500 get_online_cpus();
498   - register_cpu_notifier(&oprofile_cpu_nb);
499 501 nmi_enabled = 1;
500 502 /* make nmi_enabled visible to the nmi handler: */
501 503 smp_mb();
502 504 on_each_cpu(nmi_cpu_setup, NULL, 1);
  505 + __register_cpu_notifier(&oprofile_cpu_nb);
503 506 put_online_cpus();
504 507  
  508 + cpu_notifier_register_done();
  509 +
505 510 return 0;
506 511 fail:
507 512 free_msrs();
508 513  
509 514  
510 515  
... ... @@ -512,12 +517,18 @@
512 517 {
513 518 struct op_msrs *msrs;
514 519  
  520 + cpu_notifier_register_begin();
  521 +
  522 + /* Use get/put_online_cpus() to protect 'nmi_enabled' & 'ctr_running' */
515 523 get_online_cpus();
516   - unregister_cpu_notifier(&oprofile_cpu_nb);
517 524 on_each_cpu(nmi_cpu_shutdown, NULL, 1);
518 525 nmi_enabled = 0;
519 526 ctr_running = 0;
  527 + __unregister_cpu_notifier(&oprofile_cpu_nb);
520 528 put_online_cpus();
  529 +
  530 + cpu_notifier_register_done();
  531 +
521 532 /* make variables visible to the nmi handler: */
522 533 smp_mb();
523 534 unregister_nmi_handler(NMI_LOCAL, "oprofile");
arch/x86/pci/amd_bus.c
... ... @@ -370,10 +370,13 @@
370 370 if (early_pci_allowed())
371 371 pci_enable_pci_io_ecs();
372 372  
373   - register_cpu_notifier(&amd_cpu_notifier);
  373 + cpu_notifier_register_begin();
374 374 for_each_online_cpu(cpu)
375 375 amd_cpu_notify(&amd_cpu_notifier, (unsigned long)CPU_ONLINE,
376 376 (void *)(long)cpu);
  377 + __register_cpu_notifier(&amd_cpu_notifier);
  378 + cpu_notifier_register_done();
  379 +
377 380 pci_probe |= PCI_HAS_IO_ECS;
378 381  
379 382 return 0;
drivers/base/topology.c
... ... @@ -160,16 +160,20 @@
160 160 static int topology_sysfs_init(void)
161 161 {
162 162 int cpu;
163   - int rc;
  163 + int rc = 0;
164 164  
  165 + cpu_notifier_register_begin();
  166 +
165 167 for_each_online_cpu(cpu) {
166 168 rc = topology_add_dev(cpu);
167 169 if (rc)
168   - return rc;
  170 + goto out;
169 171 }
170   - hotcpu_notifier(topology_cpu_callback, 0);
  172 + __hotcpu_notifier(topology_cpu_callback, 0);
171 173  
172   - return 0;
  174 +out:
  175 + cpu_notifier_register_done();
  176 + return rc;
173 177 }
174 178  
175 179 device_initcall(topology_sysfs_init);
drivers/clocksource/dummy_timer.c
... ... @@ -56,15 +56,20 @@
56 56  
57 57 static int __init dummy_timer_register(void)
58 58 {
59   - int err = register_cpu_notifier(&dummy_timer_cpu_nb);
  59 + int err = 0;
  60 +
  61 + cpu_notifier_register_begin();
  62 + err = __register_cpu_notifier(&dummy_timer_cpu_nb);
60 63 if (err)
61   - return err;
  64 + goto out;
62 65  
63 66 /* We won't get a call on the boot CPU, so register immediately */
64 67 if (num_possible_cpus() > 1)
65 68 dummy_timer_setup();
66 69  
67   - return 0;
  70 +out:
  71 + cpu_notifier_register_done();
  72 + return err;
68 73 }
69 74 early_initcall(dummy_timer_register);
drivers/cpufreq/acpi-cpufreq.c
... ... @@ -906,15 +906,16 @@
906 906  
907 907 acpi_cpufreq_driver.boost_supported = true;
908 908 acpi_cpufreq_driver.boost_enabled = boost_state(0);
909   - get_online_cpus();
910 909  
  910 + cpu_notifier_register_begin();
  911 +
911 912 /* Force all MSRs to the same value */
912 913 boost_set_msrs(acpi_cpufreq_driver.boost_enabled,
913 914 cpu_online_mask);
914 915  
915   - register_cpu_notifier(&boost_nb);
  916 + __register_cpu_notifier(&boost_nb);
916 917  
917   - put_online_cpus();
  918 + cpu_notifier_register_done();
918 919 }
919 920 }
920 921  
drivers/hwmon/coretemp.c
... ... @@ -810,20 +810,20 @@
810 810 if (err)
811 811 goto exit;
812 812  
813   - get_online_cpus();
  813 + cpu_notifier_register_begin();
814 814 for_each_online_cpu(i)
815 815 get_core_online(i);
816 816  
817 817 #ifndef CONFIG_HOTPLUG_CPU
818 818 if (list_empty(&pdev_list)) {
819   - put_online_cpus();
  819 + cpu_notifier_register_done();
820 820 err = -ENODEV;
821 821 goto exit_driver_unreg;
822 822 }
823 823 #endif
824 824  
825   - register_hotcpu_notifier(&coretemp_cpu_notifier);
826   - put_online_cpus();
  825 + __register_hotcpu_notifier(&coretemp_cpu_notifier);
  826 + cpu_notifier_register_done();
827 827 return 0;
828 828  
829 829 #ifndef CONFIG_HOTPLUG_CPU
... ... @@ -838,8 +838,8 @@
838 838 {
839 839 struct pdev_entry *p, *n;
840 840  
841   - get_online_cpus();
842   - unregister_hotcpu_notifier(&coretemp_cpu_notifier);
  841 + cpu_notifier_register_begin();
  842 + __unregister_hotcpu_notifier(&coretemp_cpu_notifier);
843 843 mutex_lock(&pdev_list_mutex);
844 844 list_for_each_entry_safe(p, n, &pdev_list, list) {
845 845 platform_device_unregister(p->pdev);
... ... @@ -847,7 +847,7 @@
847 847 kfree(p);
848 848 }
849 849 mutex_unlock(&pdev_list_mutex);
850   - put_online_cpus();
  850 + cpu_notifier_register_done();
851 851 platform_driver_unregister(&coretemp_driver);
852 852 }
853 853  
drivers/hwmon/via-cputemp.c
... ... @@ -319,7 +319,7 @@
319 319 if (err)
320 320 goto exit;
321 321  
322   - get_online_cpus();
  322 + cpu_notifier_register_begin();
323 323 for_each_online_cpu(i) {
324 324 struct cpuinfo_x86 *c = &cpu_data(i);
325 325  
326 326  
... ... @@ -339,14 +339,14 @@
339 339  
340 340 #ifndef CONFIG_HOTPLUG_CPU
341 341 if (list_empty(&pdev_list)) {
342   - put_online_cpus();
  342 + cpu_notifier_register_done();
343 343 err = -ENODEV;
344 344 goto exit_driver_unreg;
345 345 }
346 346 #endif
347 347  
348   - register_hotcpu_notifier(&via_cputemp_cpu_notifier);
349   - put_online_cpus();
  348 + __register_hotcpu_notifier(&via_cputemp_cpu_notifier);
  349 + cpu_notifier_register_done();
350 350 return 0;
351 351  
352 352 #ifndef CONFIG_HOTPLUG_CPU
... ... @@ -361,8 +361,8 @@
361 361 {
362 362 struct pdev_entry *p, *n;
363 363  
364   - get_online_cpus();
365   - unregister_hotcpu_notifier(&via_cputemp_cpu_notifier);
  364 + cpu_notifier_register_begin();
  365 + __unregister_hotcpu_notifier(&via_cputemp_cpu_notifier);
366 366 mutex_lock(&pdev_list_mutex);
367 367 list_for_each_entry_safe(p, n, &pdev_list, list) {
368 368 platform_device_unregister(p->pdev);
... ... @@ -370,7 +370,7 @@
370 370 kfree(p);
371 371 }
372 372 mutex_unlock(&pdev_list_mutex);
373   - put_online_cpus();
  373 + cpu_notifier_register_done();
374 374 platform_driver_unregister(&via_cputemp_driver);
375 375 }
376 376  
drivers/idle/intel_idle.c
... ... @@ -681,15 +681,20 @@
681 681 if (intel_idle_cpuidle_devices == NULL)
682 682 return -ENOMEM;
683 683  
  684 + cpu_notifier_register_begin();
  685 +
684 686 for_each_online_cpu(i) {
685 687 retval = intel_idle_cpu_init(i);
686 688 if (retval) {
  689 + cpu_notifier_register_done();
687 690 cpuidle_unregister_driver(&intel_idle_driver);
688 691 return retval;
689 692 }
690 693 }
691   - register_cpu_notifier(&cpu_hotplug_notifier);
  694 + __register_cpu_notifier(&cpu_hotplug_notifier);
692 695  
  696 + cpu_notifier_register_done();
  697 +
693 698 return 0;
694 699 }
695 700  
696 701  
... ... @@ -698,10 +703,13 @@
698 703 intel_idle_cpuidle_devices_uninit();
699 704 cpuidle_unregister_driver(&intel_idle_driver);
700 705  
  706 + cpu_notifier_register_begin();
701 707  
702 708 if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
703 709 on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
704   - unregister_cpu_notifier(&cpu_hotplug_notifier);
  710 + __unregister_cpu_notifier(&cpu_hotplug_notifier);
  711 +
  712 + cpu_notifier_register_done();
705 713  
706 714 return;
707 715 }
drivers/oprofile/nmi_timer_int.c
... ... @@ -108,8 +108,8 @@
108 108 struct perf_event *event;
109 109 int cpu;
110 110  
111   - get_online_cpus();
112   - unregister_cpu_notifier(&nmi_timer_cpu_nb);
  111 + cpu_notifier_register_begin();
  112 + __unregister_cpu_notifier(&nmi_timer_cpu_nb);
113 113 for_each_possible_cpu(cpu) {
114 114 event = per_cpu(nmi_timer_events, cpu);
115 115 if (!event)
... ... @@ -119,7 +119,7 @@
119 119 perf_event_release_kernel(event);
120 120 }
121 121  
122   - put_online_cpus();
  122 + cpu_notifier_register_done();
123 123 }
124 124  
125 125 static int nmi_timer_setup(void)
126 126  
127 127  
128 128  
129 129  
... ... @@ -132,20 +132,23 @@
132 132 do_div(period, HZ);
133 133 nmi_timer_attr.sample_period = period;
134 134  
135   - get_online_cpus();
136   - err = register_cpu_notifier(&nmi_timer_cpu_nb);
  135 + cpu_notifier_register_begin();
  136 + err = __register_cpu_notifier(&nmi_timer_cpu_nb);
137 137 if (err)
138 138 goto out;
  139 +
139 140 /* can't attach events to offline cpus: */
140 141 for_each_online_cpu(cpu) {
141 142 err = nmi_timer_start_cpu(cpu);
142   - if (err)
143   - break;
  143 + if (err) {
  144 + cpu_notifier_register_done();
  145 + nmi_timer_shutdown();
  146 + return err;
  147 + }
144 148 }
145   - if (err)
146   - nmi_timer_shutdown();
  149 +
147 150 out:
148   - put_online_cpus();
  151 + cpu_notifier_register_done();
149 152 return err;
150 153 }
151 154  
drivers/powercap/intel_rapl.c
... ... @@ -1374,6 +1374,9 @@
1374 1374  
1375 1375 return -ENODEV;
1376 1376 }
  1377 +
  1378 + cpu_notifier_register_begin();
  1379 +
1377 1380 /* prevent CPU hotplug during detection */
1378 1381 get_online_cpus();
1379 1382 ret = rapl_detect_topology();
1380 1383  
1381 1384  
1382 1385  
1383 1386  
... ... @@ -1385,20 +1388,23 @@
1385 1388 ret = -ENODEV;
1386 1389 goto done;
1387 1390 }
1388   - register_hotcpu_notifier(&rapl_cpu_notifier);
  1391 + __register_hotcpu_notifier(&rapl_cpu_notifier);
1389 1392 done:
1390 1393 put_online_cpus();
  1394 + cpu_notifier_register_done();
1391 1395  
1392 1396 return ret;
1393 1397 }
1394 1398  
1395 1399 static void __exit rapl_exit(void)
1396 1400 {
  1401 + cpu_notifier_register_begin();
1397 1402 get_online_cpus();
1398   - unregister_hotcpu_notifier(&rapl_cpu_notifier);
  1403 + __unregister_hotcpu_notifier(&rapl_cpu_notifier);
1399 1404 rapl_unregister_powercap();
1400 1405 rapl_cleanup_data();
1401 1406 put_online_cpus();
  1407 + cpu_notifier_register_done();
1402 1408 }
1403 1409  
1404 1410 module_init(rapl_init);
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
... ... @@ -2592,13 +2592,17 @@
2592 2592 spin_lock_init(&p->fp_work_lock);
2593 2593 }
2594 2594  
  2595 + cpu_notifier_register_begin();
  2596 +
2595 2597 for_each_online_cpu(cpu) {
2596 2598 bnx2fc_percpu_thread_create(cpu);
2597 2599 }
2598 2600  
2599 2601 /* Initialize per CPU interrupt thread */
2600   - register_hotcpu_notifier(&bnx2fc_cpu_notifier);
  2602 + __register_hotcpu_notifier(&bnx2fc_cpu_notifier);
2601 2603  
  2604 + cpu_notifier_register_done();
  2605 +
2602 2606 cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb);
2603 2607  
2604 2608 return 0;
2605 2609  
... ... @@ -2662,12 +2666,16 @@
2662 2666 if (l2_thread)
2663 2667 kthread_stop(l2_thread);
2664 2668  
2665   - unregister_hotcpu_notifier(&bnx2fc_cpu_notifier);
  2669 + cpu_notifier_register_begin();
2666 2670  
2667 2671 /* Destroy per cpu threads */
2668 2672 for_each_online_cpu(cpu) {
2669 2673 bnx2fc_percpu_thread_destroy(cpu);
2670 2674 }
  2675 +
  2676 + __unregister_hotcpu_notifier(&bnx2fc_cpu_notifier);
  2677 +
  2678 + cpu_notifier_register_done();
2671 2679  
2672 2680 destroy_workqueue(bnx2fc_wq);
2673 2681 /*
drivers/scsi/bnx2i/bnx2i_init.c
... ... @@ -537,12 +537,16 @@
537 537 p->iothread = NULL;
538 538 }
539 539  
  540 + cpu_notifier_register_begin();
  541 +
540 542 for_each_online_cpu(cpu)
541 543 bnx2i_percpu_thread_create(cpu);
542 544  
543 545 /* Initialize per CPU interrupt thread */
544   - register_hotcpu_notifier(&bnx2i_cpu_notifier);
  546 + __register_hotcpu_notifier(&bnx2i_cpu_notifier);
545 547  
  548 + cpu_notifier_register_done();
  549 +
546 550 return 0;
547 551  
548 552 unreg_xport:
549 553  
... ... @@ -581,10 +585,14 @@
581 585 }
582 586 mutex_unlock(&bnx2i_dev_lock);
583 587  
584   - unregister_hotcpu_notifier(&bnx2i_cpu_notifier);
  588 + cpu_notifier_register_begin();
585 589  
586 590 for_each_online_cpu(cpu)
587 591 bnx2i_percpu_thread_destroy(cpu);
  592 +
  593 + __unregister_hotcpu_notifier(&bnx2i_cpu_notifier);
  594 +
  595 + cpu_notifier_register_done();
588 596  
589 597 iscsi_unregister_transport(&bnx2i_iscsi_transport);
590 598 cnic_unregister_driver(CNIC_ULP_ISCSI);
drivers/scsi/fcoe/fcoe.c
... ... @@ -2633,14 +2633,18 @@
2633 2633 skb_queue_head_init(&p->fcoe_rx_list);
2634 2634 }
2635 2635  
  2636 + cpu_notifier_register_begin();
  2637 +
2636 2638 for_each_online_cpu(cpu)
2637 2639 fcoe_percpu_thread_create(cpu);
2638 2640  
2639 2641 /* Initialize per CPU interrupt thread */
2640   - rc = register_hotcpu_notifier(&fcoe_cpu_notifier);
  2642 + rc = __register_hotcpu_notifier(&fcoe_cpu_notifier);
2641 2643 if (rc)
2642 2644 goto out_free;
2643 2645  
  2646 + cpu_notifier_register_done();
  2647 +
2644 2648 /* Setup link change notification */
2645 2649 fcoe_dev_setup();
2646 2650  
... ... @@ -2655,6 +2659,9 @@
2655 2659 for_each_online_cpu(cpu) {
2656 2660 fcoe_percpu_thread_destroy(cpu);
2657 2661 }
  2662 +
  2663 + cpu_notifier_register_done();
  2664 +
2658 2665 mutex_unlock(&fcoe_config_mutex);
2659 2666 destroy_workqueue(fcoe_wq);
2660 2667 return rc;
2661 2668  
... ... @@ -2687,10 +2694,14 @@
2687 2694 }
2688 2695 rtnl_unlock();
2689 2696  
2690   - unregister_hotcpu_notifier(&fcoe_cpu_notifier);
  2697 + cpu_notifier_register_begin();
2691 2698  
2692 2699 for_each_online_cpu(cpu)
2693 2700 fcoe_percpu_thread_destroy(cpu);
  2701 +
  2702 + __unregister_hotcpu_notifier(&fcoe_cpu_notifier);
  2703 +
  2704 + cpu_notifier_register_done();
2694 2705  
2695 2706 mutex_unlock(&fcoe_config_mutex);
2696 2707  
drivers/thermal/x86_pkg_temp_thermal.c
... ... @@ -590,12 +590,12 @@
590 590 platform_thermal_package_rate_control =
591 591 pkg_temp_thermal_platform_thermal_rate_control;
592 592  
593   - get_online_cpus();
  593 + cpu_notifier_register_begin();
594 594 for_each_online_cpu(i)
595 595 if (get_core_online(i))
596 596 goto err_ret;
597   - register_hotcpu_notifier(&pkg_temp_thermal_notifier);
598   - put_online_cpus();
  597 + __register_hotcpu_notifier(&pkg_temp_thermal_notifier);
  598 + cpu_notifier_register_done();
599 599  
600 600 pkg_temp_debugfs_init(); /* Don't care if fails */
601 601  
... ... @@ -604,7 +604,7 @@
604 604 err_ret:
605 605 for_each_online_cpu(i)
606 606 put_core_offline(i);
607   - put_online_cpus();
  607 + cpu_notifier_register_done();
608 608 kfree(pkg_work_scheduled);
609 609 platform_thermal_package_notify = NULL;
610 610 platform_thermal_package_rate_control = NULL;
... ... @@ -617,8 +617,8 @@
617 617 struct phy_dev_entry *phdev, *n;
618 618 int i;
619 619  
620   - get_online_cpus();
621   - unregister_hotcpu_notifier(&pkg_temp_thermal_notifier);
  620 + cpu_notifier_register_begin();
  621 + __unregister_hotcpu_notifier(&pkg_temp_thermal_notifier);
622 622 mutex_lock(&phy_dev_list_mutex);
623 623 list_for_each_entry_safe(phdev, n, &phy_dev_list, list) {
624 624 /* Retore old MSR value for package thermal interrupt */
... ... @@ -636,7 +636,7 @@
636 636 for_each_online_cpu(i)
637 637 cancel_delayed_work_sync(
638 638 &per_cpu(pkg_temp_thermal_threshold_work, i));
639   - put_online_cpus();
  639 + cpu_notifier_register_done();
640 640  
641 641 kfree(pkg_work_scheduled);
642 642  
drivers/watchdog/octeon-wdt-main.c
... ... @@ -708,10 +708,13 @@
708 708  
709 709 cpumask_clear(&irq_enabled_cpus);
710 710  
  711 + cpu_notifier_register_begin();
711 712 for_each_online_cpu(cpu)
712 713 octeon_wdt_setup_interrupt(cpu);
713 714  
714   - register_hotcpu_notifier(&octeon_wdt_cpu_notifier);
  715 + __register_hotcpu_notifier(&octeon_wdt_cpu_notifier);
  716 + cpu_notifier_register_done();
  717 +
715 718 out:
716 719 return ret;
717 720 }
... ... @@ -725,7 +728,8 @@
725 728  
726 729 misc_deregister(&octeon_wdt_miscdev);
727 730  
728   - unregister_hotcpu_notifier(&octeon_wdt_cpu_notifier);
  731 + cpu_notifier_register_begin();
  732 + __unregister_hotcpu_notifier(&octeon_wdt_cpu_notifier);
729 733  
730 734 for_each_online_cpu(cpu) {
731 735 int core = cpu2core(cpu);
... ... @@ -734,6 +738,9 @@
734 738 /* Free the interrupt handler */
735 739 free_irq(OCTEON_IRQ_WDOG0 + core, octeon_wdt_poke_irq);
736 740 }
  741 +
  742 + cpu_notifier_register_done();
  743 +
737 744 /*
738 745 * Disable the boot-bus memory, the code it points to is soon
739 746 * to go missing.
drivers/xen/balloon.c
... ... @@ -604,19 +604,29 @@
604 604 }
605 605 }
606 606  
  607 +static int alloc_balloon_scratch_page(int cpu)
  608 +{
  609 + if (per_cpu(balloon_scratch_page, cpu) != NULL)
  610 + return 0;
  611 +
  612 + per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
  613 + if (per_cpu(balloon_scratch_page, cpu) == NULL) {
  614 + pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
  615 + return -ENOMEM;
  616 + }
  617 +
  618 + return 0;
  619 +}
  620 +
  621 +
607 622 static int balloon_cpu_notify(struct notifier_block *self,
608 623 unsigned long action, void *hcpu)
609 624 {
610 625 int cpu = (long)hcpu;
611 626 switch (action) {
612 627 case CPU_UP_PREPARE:
613   - if (per_cpu(balloon_scratch_page, cpu) != NULL)
614   - break;
615   - per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
616   - if (per_cpu(balloon_scratch_page, cpu) == NULL) {
617   - pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
  628 + if (alloc_balloon_scratch_page(cpu))
618 629 return NOTIFY_BAD;
619   - }
620 630 break;
621 631 default:
622 632 break;
623 633  
... ... @@ -636,15 +646,17 @@
636 646 return -ENODEV;
637 647  
638 648 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
639   - for_each_online_cpu(cpu)
640   - {
641   - per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
642   - if (per_cpu(balloon_scratch_page, cpu) == NULL) {
643   - pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
  649 + register_cpu_notifier(&balloon_cpu_notifier);
  650 +
  651 + get_online_cpus();
  652 + for_each_online_cpu(cpu) {
  653 + if (alloc_balloon_scratch_page(cpu)) {
  654 + put_online_cpus();
  655 + unregister_cpu_notifier(&balloon_cpu_notifier);
644 656 return -ENOMEM;
645 657 }
646 658 }
647   - register_cpu_notifier(&balloon_cpu_notifier);
  659 + put_online_cpus();
648 660 }
649 661  
650 662 pr_info("Initialising balloon driver\n");
... ... @@ -115,26 +115,46 @@
115 115 { .notifier_call = fn, .priority = pri }; \
116 116 register_cpu_notifier(&fn##_nb); \
117 117 }
  118 +
  119 +#define __cpu_notifier(fn, pri) { \
  120 + static struct notifier_block fn##_nb = \
  121 + { .notifier_call = fn, .priority = pri }; \
  122 + __register_cpu_notifier(&fn##_nb); \
  123 +}
118 124 #else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
119 125 #define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
  126 +#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
120 127 #endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
  128 +
121 129 #ifdef CONFIG_HOTPLUG_CPU
122 130 extern int register_cpu_notifier(struct notifier_block *nb);
  131 +extern int __register_cpu_notifier(struct notifier_block *nb);
123 132 extern void unregister_cpu_notifier(struct notifier_block *nb);
  133 +extern void __unregister_cpu_notifier(struct notifier_block *nb);
124 134 #else
125 135  
126 136 #ifndef MODULE
127 137 extern int register_cpu_notifier(struct notifier_block *nb);
  138 +extern int __register_cpu_notifier(struct notifier_block *nb);
128 139 #else
129 140 static inline int register_cpu_notifier(struct notifier_block *nb)
130 141 {
131 142 return 0;
132 143 }
  144 +
  145 +static inline int __register_cpu_notifier(struct notifier_block *nb)
  146 +{
  147 + return 0;
  148 +}
133 149 #endif
134 150  
135 151 static inline void unregister_cpu_notifier(struct notifier_block *nb)
136 152 {
137 153 }
  154 +
  155 +static inline void __unregister_cpu_notifier(struct notifier_block *nb)
  156 +{
  157 +}
138 158 #endif
139 159  
140 160 int cpu_up(unsigned int cpu);
141 161  
142 162  
143 163  
... ... @@ -142,19 +162,32 @@
142 162 extern void cpu_maps_update_begin(void);
143 163 extern void cpu_maps_update_done(void);
144 164  
  165 +#define cpu_notifier_register_begin cpu_maps_update_begin
  166 +#define cpu_notifier_register_done cpu_maps_update_done
  167 +
145 168 #else /* CONFIG_SMP */
146 169  
147 170 #define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
  171 +#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
148 172  
149 173 static inline int register_cpu_notifier(struct notifier_block *nb)
150 174 {
151 175 return 0;
152 176 }
153 177  
  178 +static inline int __register_cpu_notifier(struct notifier_block *nb)
  179 +{
  180 + return 0;
  181 +}
  182 +
154 183 static inline void unregister_cpu_notifier(struct notifier_block *nb)
155 184 {
156 185 }
157 186  
  187 +static inline void __unregister_cpu_notifier(struct notifier_block *nb)
  188 +{
  189 +}
  190 +
158 191 static inline void cpu_maps_update_begin(void)
159 192 {
160 193 }
... ... @@ -163,6 +196,14 @@
163 196 {
164 197 }
165 198  
  199 +static inline void cpu_notifier_register_begin(void)
  200 +{
  201 +}
  202 +
  203 +static inline void cpu_notifier_register_done(void)
  204 +{
  205 +}
  206 +
166 207 #endif /* CONFIG_SMP */
167 208 extern struct bus_type cpu_subsys;
168 209  
169 210  
170 211  
... ... @@ -176,8 +217,11 @@
176 217 extern void cpu_hotplug_disable(void);
177 218 extern void cpu_hotplug_enable(void);
178 219 #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
  220 +#define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri)
179 221 #define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
  222 +#define __register_hotcpu_notifier(nb) __register_cpu_notifier(nb)
180 223 #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
  224 +#define __unregister_hotcpu_notifier(nb) __unregister_cpu_notifier(nb)
181 225 void clear_tasks_mm_cpumask(int cpu);
182 226 int cpu_down(unsigned int cpu);
183 227  
184 228  
185 229  
... ... @@ -190,9 +234,12 @@
190 234 #define cpu_hotplug_disable() do { } while (0)
191 235 #define cpu_hotplug_enable() do { } while (0)
192 236 #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
  237 +#define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
193 238 /* These aren't inline functions due to a GCC bug. */
194 239 #define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
  240 +#define __register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
195 241 #define unregister_hotcpu_notifier(nb) ({ (void)(nb); })
  242 +#define __unregister_hotcpu_notifier(nb) ({ (void)(nb); })
196 243 #endif /* CONFIG_HOTPLUG_CPU */
197 244  
198 245 #ifdef CONFIG_PM_SLEEP_SMP
include/linux/perf_event.h
... ... @@ -835,6 +835,8 @@
835 835 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
836 836 unsigned long cpu = smp_processor_id(); \
837 837 unsigned long flags; \
  838 + \
  839 + cpu_notifier_register_begin(); \
838 840 fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
839 841 (void *)(unsigned long)cpu); \
840 842 local_irq_save(flags); \
841 843  
... ... @@ -843,9 +845,21 @@
843 845 local_irq_restore(flags); \
844 846 fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
845 847 (void *)(unsigned long)cpu); \
846   - register_cpu_notifier(&fn##_nb); \
  848 + __register_cpu_notifier(&fn##_nb); \
  849 + cpu_notifier_register_done(); \
847 850 } while (0)
848 851  
  852 +/*
  853 + * Bare-bones version of perf_cpu_notifier(), which doesn't invoke the
  854 + * callback for already online CPUs.
  855 + */
  856 +#define __perf_cpu_notifier(fn) \
  857 +do { \
  858 + static struct notifier_block fn##_nb = \
  859 + { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
  860 + \
  861 + __register_cpu_notifier(&fn##_nb); \
  862 +} while (0)
849 863  
850 864 struct perf_pmu_events_attr {
851 865 struct device_attribute attr;
... ... @@ -19,6 +19,7 @@
19 19 #include <linux/mutex.h>
20 20 #include <linux/gfp.h>
21 21 #include <linux/suspend.h>
  22 +#include <linux/lockdep.h>
22 23  
23 24 #include "smpboot.h"
24 25  
25 26  
26 27  
... ... @@ -27,18 +28,23 @@
27 28 static DEFINE_MUTEX(cpu_add_remove_lock);
28 29  
29 30 /*
30   - * The following two API's must be used when attempting
31   - * to serialize the updates to cpu_online_mask, cpu_present_mask.
  31 + * The following two APIs (cpu_maps_update_begin/done) must be used when
  32 + * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
  33 + * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
  34 + * hotplug callback (un)registration performed using __register_cpu_notifier()
  35 + * or __unregister_cpu_notifier().
32 36 */
33 37 void cpu_maps_update_begin(void)
34 38 {
35 39 mutex_lock(&cpu_add_remove_lock);
36 40 }
  41 +EXPORT_SYMBOL(cpu_notifier_register_begin);
37 42  
38 43 void cpu_maps_update_done(void)
39 44 {
40 45 mutex_unlock(&cpu_add_remove_lock);
41 46 }
  47 +EXPORT_SYMBOL(cpu_notifier_register_done);
42 48  
43 49 static RAW_NOTIFIER_HEAD(cpu_chain);
44 50  
45 51  
46 52  
47 53  
... ... @@ -57,17 +63,30 @@
57 63 * an ongoing cpu hotplug operation.
58 64 */
59 65 int refcount;
  66 +
  67 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
  68 + struct lockdep_map dep_map;
  69 +#endif
60 70 } cpu_hotplug = {
61 71 .active_writer = NULL,
62 72 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
63 73 .refcount = 0,
  74 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
  75 + .dep_map = {.name = "cpu_hotplug.lock" },
  76 +#endif
64 77 };
65 78  
  79 +/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
  80 +#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
  81 +#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
  82 +#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
  83 +
66 84 void get_online_cpus(void)
67 85 {
68 86 might_sleep();
69 87 if (cpu_hotplug.active_writer == current)
70 88 return;
  89 + cpuhp_lock_acquire_read();
71 90 mutex_lock(&cpu_hotplug.lock);
72 91 cpu_hotplug.refcount++;
73 92 mutex_unlock(&cpu_hotplug.lock);
... ... @@ -87,6 +106,7 @@
87 106 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
88 107 wake_up_process(cpu_hotplug.active_writer);
89 108 mutex_unlock(&cpu_hotplug.lock);
  109 + cpuhp_lock_release();
90 110  
91 111 }
92 112 EXPORT_SYMBOL_GPL(put_online_cpus);
... ... @@ -117,6 +137,7 @@
117 137 {
118 138 cpu_hotplug.active_writer = current;
119 139  
  140 + cpuhp_lock_acquire();
120 141 for (;;) {
121 142 mutex_lock(&cpu_hotplug.lock);
122 143 if (likely(!cpu_hotplug.refcount))
... ... @@ -131,6 +152,7 @@
131 152 {
132 153 cpu_hotplug.active_writer = NULL;
133 154 mutex_unlock(&cpu_hotplug.lock);
  155 + cpuhp_lock_release();
134 156 }
135 157  
136 158 /*
... ... @@ -166,6 +188,11 @@
166 188 return ret;
167 189 }
168 190  
  191 +int __ref __register_cpu_notifier(struct notifier_block *nb)
  192 +{
  193 + return raw_notifier_chain_register(&cpu_chain, nb);
  194 +}
  195 +
169 196 static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
170 197 int *nr_calls)
171 198 {
... ... @@ -189,6 +216,7 @@
189 216 BUG_ON(cpu_notify(val, v));
190 217 }
191 218 EXPORT_SYMBOL(register_cpu_notifier);
  219 +EXPORT_SYMBOL(__register_cpu_notifier);
192 220  
193 221 void __ref unregister_cpu_notifier(struct notifier_block *nb)
194 222 {
... ... @@ -197,6 +225,12 @@
197 225 cpu_maps_update_done();
198 226 }
199 227 EXPORT_SYMBOL(unregister_cpu_notifier);
  228 +
  229 +void __ref __unregister_cpu_notifier(struct notifier_block *nb)
  230 +{
  231 + raw_notifier_chain_unregister(&cpu_chain, nb);
  232 +}
  233 +EXPORT_SYMBOL(__unregister_cpu_notifier);
200 234  
201 235 /**
202 236 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
... ... @@ -591,18 +591,28 @@
591 591 int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */
592 592 {
593 593 struct proc_dir_entry *entry;
  594 + int err = 0;
594 595  
595 596 if (!prof_on)
596 597 return 0;
597   - if (create_hash_tables())
598   - return -ENOMEM;
  598 +
  599 + cpu_notifier_register_begin();
  600 +
  601 + if (create_hash_tables()) {
  602 + err = -ENOMEM;
  603 + goto out;
  604 + }
  605 +
599 606 entry = proc_create("profile", S_IWUSR | S_IRUGO,
600 607 NULL, &proc_profile_operations);
601 608 if (!entry)
602   - return 0;
  609 + goto out;
603 610 proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t));
604   - hotcpu_notifier(profile_cpu_callback, 0);
605   - return 0;
  611 + __hotcpu_notifier(profile_cpu_callback, 0);
  612 +
  613 +out:
  614 + cpu_notifier_register_done();
  615 + return err;
606 616 }
607 617 subsys_initcall(create_proc_profile);
608 618 #endif /* CONFIG_PROC_FS */
kernel/trace/ring_buffer.c
... ... @@ -1301,7 +1301,7 @@
1301 1301 * In that off case, we need to allocate for all possible cpus.
1302 1302 */
1303 1303 #ifdef CONFIG_HOTPLUG_CPU
1304   - get_online_cpus();
  1304 + cpu_notifier_register_begin();
1305 1305 cpumask_copy(buffer->cpumask, cpu_online_mask);
1306 1306 #else
1307 1307 cpumask_copy(buffer->cpumask, cpu_possible_mask);
1308 1308  
... ... @@ -1324,10 +1324,10 @@
1324 1324 #ifdef CONFIG_HOTPLUG_CPU
1325 1325 buffer->cpu_notify.notifier_call = rb_cpu_notify;
1326 1326 buffer->cpu_notify.priority = 0;
1327   - register_cpu_notifier(&buffer->cpu_notify);
  1327 + __register_cpu_notifier(&buffer->cpu_notify);
  1328 + cpu_notifier_register_done();
1328 1329 #endif
1329 1330  
1330   - put_online_cpus();
1331 1331 mutex_init(&buffer->mutex);
1332 1332  
1333 1333 return buffer;
... ... @@ -1341,7 +1341,9 @@
1341 1341  
1342 1342 fail_free_cpumask:
1343 1343 free_cpumask_var(buffer->cpumask);
1344   - put_online_cpus();
  1344 +#ifdef CONFIG_HOTPLUG_CPU
  1345 + cpu_notifier_register_done();
  1346 +#endif
1345 1347  
1346 1348 fail_free_buffer:
1347 1349 kfree(buffer);
1348 1350  
1349 1351  
... ... @@ -1358,16 +1360,17 @@
1358 1360 {
1359 1361 int cpu;
1360 1362  
1361   - get_online_cpus();
1362   -
1363 1363 #ifdef CONFIG_HOTPLUG_CPU
1364   - unregister_cpu_notifier(&buffer->cpu_notify);
  1364 + cpu_notifier_register_begin();
  1365 + __unregister_cpu_notifier(&buffer->cpu_notify);
1365 1366 #endif
1366 1367  
1367 1368 for_each_buffer_cpu(buffer, cpu)
1368 1369 rb_free_cpu_buffer(buffer->buffers[cpu]);
1369 1370  
1370   - put_online_cpus();
  1371 +#ifdef CONFIG_HOTPLUG_CPU
  1372 + cpu_notifier_register_done();
  1373 +#endif
1371 1374  
1372 1375 kfree(buffer->buffers);
1373 1376 free_cpumask_var(buffer->cpumask);
... ... @@ -1298,14 +1298,14 @@
1298 1298 #ifdef CONFIG_SMP
1299 1299 int cpu;
1300 1300  
1301   - register_cpu_notifier(&vmstat_notifier);
  1301 + cpu_notifier_register_begin();
  1302 + __register_cpu_notifier(&vmstat_notifier);
1302 1303  
1303   - get_online_cpus();
1304 1304 for_each_online_cpu(cpu) {
1305 1305 start_cpu_timer(cpu);
1306 1306 node_set_state(cpu_to_node(cpu), N_CPU);
1307 1307 }
1308   - put_online_cpus();
  1308 + cpu_notifier_register_done();
1309 1309 #endif
1310 1310 #ifdef CONFIG_PROC_FS
1311 1311 proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
... ... @@ -814,21 +814,32 @@
814 814 {
815 815 int cpu;
816 816  
  817 + cpu_notifier_register_begin();
  818 +
817 819 for_each_online_cpu(cpu)
818 820 zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu);
819   - unregister_cpu_notifier(&zs_cpu_nb);
  821 + __unregister_cpu_notifier(&zs_cpu_nb);
  822 +
  823 + cpu_notifier_register_done();
820 824 }
821 825  
822 826 static int zs_init(void)
823 827 {
824 828 int cpu, ret;
825 829  
826   - register_cpu_notifier(&zs_cpu_nb);
  830 + cpu_notifier_register_begin();
  831 +
  832 + __register_cpu_notifier(&zs_cpu_nb);
827 833 for_each_online_cpu(cpu) {
828 834 ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
829   - if (notifier_to_errno(ret))
  835 + if (notifier_to_errno(ret)) {
  836 + cpu_notifier_register_done();
830 837 goto fail;
  838 + }
831 839 }
  840 +
  841 + cpu_notifier_register_done();
  842 +
832 843 return 0;
833 844 fail:
834 845 zs_exit();
... ... @@ -387,18 +387,18 @@
387 387 {
388 388 unsigned long cpu;
389 389  
390   - get_online_cpus();
  390 + cpu_notifier_register_begin();
391 391 for_each_online_cpu(cpu)
392 392 if (__zswap_cpu_notifier(CPU_UP_PREPARE, cpu) != NOTIFY_OK)
393 393 goto cleanup;
394   - register_cpu_notifier(&zswap_cpu_notifier_block);
395   - put_online_cpus();
  394 + __register_cpu_notifier(&zswap_cpu_notifier_block);
  395 + cpu_notifier_register_done();
396 396 return 0;
397 397  
398 398 cleanup:
399 399 for_each_online_cpu(cpu)
400 400 __zswap_cpu_notifier(CPU_UP_CANCELED, cpu);
401   - put_online_cpus();
  401 + cpu_notifier_register_done();
402 402 return -ENOMEM;
403 403 }
404 404  
... ... @@ -455,6 +455,8 @@
455 455 if (!fc->percpu)
456 456 return -ENOMEM;
457 457  
  458 + cpu_notifier_register_begin();
  459 +
458 460 for_each_online_cpu(i) {
459 461 if (flow_cache_cpu_prepare(fc, i))
460 462 goto err;
461 463  
... ... @@ -462,8 +464,10 @@
462 464 fc->hotcpu_notifier = (struct notifier_block){
463 465 .notifier_call = flow_cache_cpu,
464 466 };
465   - register_hotcpu_notifier(&fc->hotcpu_notifier);
  467 + __register_hotcpu_notifier(&fc->hotcpu_notifier);
466 468  
  469 + cpu_notifier_register_done();
  470 +
467 471 setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
468 472 (unsigned long) fc);
469 473 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
... ... @@ -477,6 +481,8 @@
477 481 kfree(fcp->hash_table);
478 482 fcp->hash_table = NULL;
479 483 }
  484 +
  485 + cpu_notifier_register_done();
480 486  
481 487 free_percpu(fc->percpu);
482 488 fc->percpu = NULL;
... ... @@ -621,6 +621,42 @@
621 621 put_online_cpus();
622 622 }
623 623  
  624 +static void free_iucv_data(int cpu)
  625 +{
  626 + kfree(iucv_param_irq[cpu]);
  627 + iucv_param_irq[cpu] = NULL;
  628 + kfree(iucv_param[cpu]);
  629 + iucv_param[cpu] = NULL;
  630 + kfree(iucv_irq_data[cpu]);
  631 + iucv_irq_data[cpu] = NULL;
  632 +}
  633 +
  634 +static int alloc_iucv_data(int cpu)
  635 +{
  636 + /* Note: GFP_DMA used to get memory below 2G */
  637 + iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
  638 + GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
  639 + if (!iucv_irq_data[cpu])
  640 + goto out_free;
  641 +
  642 + /* Allocate parameter blocks. */
  643 + iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
  644 + GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
  645 + if (!iucv_param[cpu])
  646 + goto out_free;
  647 +
  648 + iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
  649 + GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
  650 + if (!iucv_param_irq[cpu])
  651 + goto out_free;
  652 +
  653 + return 0;
  654 +
  655 +out_free:
  656 + free_iucv_data(cpu);
  657 + return -ENOMEM;
  658 +}
  659 +
624 660 static int iucv_cpu_notify(struct notifier_block *self,
625 661 unsigned long action, void *hcpu)
626 662 {
627 663  
628 664  
... ... @@ -630,38 +666,14 @@
630 666 switch (action) {
631 667 case CPU_UP_PREPARE:
632 668 case CPU_UP_PREPARE_FROZEN:
633   - iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
634   - GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
635   - if (!iucv_irq_data[cpu])
  669 + if (alloc_iucv_data(cpu))
636 670 return notifier_from_errno(-ENOMEM);
637   -
638   - iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
639   - GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
640   - if (!iucv_param[cpu]) {
641   - kfree(iucv_irq_data[cpu]);
642   - iucv_irq_data[cpu] = NULL;
643   - return notifier_from_errno(-ENOMEM);
644   - }
645   - iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
646   - GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
647   - if (!iucv_param_irq[cpu]) {
648   - kfree(iucv_param[cpu]);
649   - iucv_param[cpu] = NULL;
650   - kfree(iucv_irq_data[cpu]);
651   - iucv_irq_data[cpu] = NULL;
652   - return notifier_from_errno(-ENOMEM);
653   - }
654 671 break;
655 672 case CPU_UP_CANCELED:
656 673 case CPU_UP_CANCELED_FROZEN:
657 674 case CPU_DEAD:
658 675 case CPU_DEAD_FROZEN:
659   - kfree(iucv_param_irq[cpu]);
660   - iucv_param_irq[cpu] = NULL;
661   - kfree(iucv_param[cpu]);
662   - iucv_param[cpu] = NULL;
663   - kfree(iucv_irq_data[cpu]);
664   - iucv_irq_data[cpu] = NULL;
  676 + free_iucv_data(cpu);
665 677 break;
666 678 case CPU_ONLINE:
667 679 case CPU_ONLINE_FROZEN:
668 680  
669 681  
670 682  
671 683  
... ... @@ -2025,33 +2037,20 @@
2025 2037 goto out_int;
2026 2038 }
2027 2039  
  2040 + cpu_notifier_register_begin();
  2041 +
2028 2042 for_each_online_cpu(cpu) {
2029   - /* Note: GFP_DMA used to get memory below 2G */
2030   - iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
2031   - GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
2032   - if (!iucv_irq_data[cpu]) {
  2043 + if (alloc_iucv_data(cpu)) {
2033 2044 rc = -ENOMEM;
2034 2045 goto out_free;
2035 2046 }
2036   -
2037   - /* Allocate parameter blocks. */
2038   - iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
2039   - GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
2040   - if (!iucv_param[cpu]) {
2041   - rc = -ENOMEM;
2042   - goto out_free;
2043   - }
2044   - iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
2045   - GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
2046   - if (!iucv_param_irq[cpu]) {
2047   - rc = -ENOMEM;
2048   - goto out_free;
2049   - }
2050   -
2051 2047 }
2052   - rc = register_hotcpu_notifier(&iucv_cpu_notifier);
  2048 + rc = __register_hotcpu_notifier(&iucv_cpu_notifier);
2053 2049 if (rc)
2054 2050 goto out_free;
  2051 +
  2052 + cpu_notifier_register_done();
  2053 +
2055 2054 rc = register_reboot_notifier(&iucv_reboot_notifier);
2056 2055 if (rc)
2057 2056 goto out_cpu;
2058 2057  
... ... @@ -2069,16 +2068,14 @@
2069 2068 out_reboot:
2070 2069 unregister_reboot_notifier(&iucv_reboot_notifier);
2071 2070 out_cpu:
2072   - unregister_hotcpu_notifier(&iucv_cpu_notifier);
  2071 + cpu_notifier_register_begin();
  2072 + __unregister_hotcpu_notifier(&iucv_cpu_notifier);
2073 2073 out_free:
2074   - for_each_possible_cpu(cpu) {
2075   - kfree(iucv_param_irq[cpu]);
2076   - iucv_param_irq[cpu] = NULL;
2077   - kfree(iucv_param[cpu]);
2078   - iucv_param[cpu] = NULL;
2079   - kfree(iucv_irq_data[cpu]);
2080   - iucv_irq_data[cpu] = NULL;
2081   - }
  2074 + for_each_possible_cpu(cpu)
  2075 + free_iucv_data(cpu);
  2076 +
  2077 + cpu_notifier_register_done();
  2078 +
2082 2079 root_device_unregister(iucv_root);
2083 2080 out_int:
2084 2081 unregister_external_interrupt(0x4000, iucv_external_interrupt);
... ... @@ -2105,15 +2102,11 @@
2105 2102 kfree(p);
2106 2103 spin_unlock_irq(&iucv_queue_lock);
2107 2104 unregister_reboot_notifier(&iucv_reboot_notifier);
2108   - unregister_hotcpu_notifier(&iucv_cpu_notifier);
2109   - for_each_possible_cpu(cpu) {
2110   - kfree(iucv_param_irq[cpu]);
2111   - iucv_param_irq[cpu] = NULL;
2112   - kfree(iucv_param[cpu]);
2113   - iucv_param[cpu] = NULL;
2114   - kfree(iucv_irq_data[cpu]);
2115   - iucv_irq_data[cpu] = NULL;
2116   - }
  2105 + cpu_notifier_register_begin();
  2106 + __unregister_hotcpu_notifier(&iucv_cpu_notifier);
  2107 + for_each_possible_cpu(cpu)
  2108 + free_iucv_data(cpu);
  2109 + cpu_notifier_register_done();
2117 2110 root_device_unregister(iucv_root);
2118 2111 bus_unregister(&iucv_bus);
2119 2112 unregister_external_interrupt(0x4000, iucv_external_interrupt);