Commit 9b41ea7289a589993d3daabc61f999b4147872c4
Committed by
Greg Kroah-Hartman
1 parent
2b25742556
Exists in
master
and in
7 other branches
[PATCH] workqueue: remove lock_cpu_hotplug()
Use a private lock instead. It protects all per-cpu data structures in workqueue.c, including the workqueues list. Fix a bug in schedule_on_each_cpu(): it was forgetting to lock down the per-cpu resources. Unfixed long-standing bug: if someone unplugs the CPU identified by `singlethread_cpu' the kernel will get very sick. Cc: Dave Jones <davej@codemonkey.org.uk> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Showing 1 changed file with 21 additions and 12 deletions Side-by-side Diff
kernel/workqueue.c
... | ... | @@ -68,7 +68,7 @@ |
68 | 68 | |
69 | 69 | /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove |
70 | 70 | threads to each one as cpus come/go. */ |
71 | -static DEFINE_SPINLOCK(workqueue_lock); | |
71 | +static DEFINE_MUTEX(workqueue_mutex); | |
72 | 72 | static LIST_HEAD(workqueues); |
73 | 73 | |
74 | 74 | static int singlethread_cpu; |
75 | 75 | |
... | ... | @@ -320,10 +320,10 @@ |
320 | 320 | } else { |
321 | 321 | int cpu; |
322 | 322 | |
323 | - lock_cpu_hotplug(); | |
323 | + mutex_lock(&workqueue_mutex); | |
324 | 324 | for_each_online_cpu(cpu) |
325 | 325 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); |
326 | - unlock_cpu_hotplug(); | |
326 | + mutex_unlock(&workqueue_mutex); | |
327 | 327 | } |
328 | 328 | } |
329 | 329 | EXPORT_SYMBOL_GPL(flush_workqueue); |
... | ... | @@ -371,8 +371,7 @@ |
371 | 371 | } |
372 | 372 | |
373 | 373 | wq->name = name; |
374 | - /* We don't need the distraction of CPUs appearing and vanishing. */ | |
375 | - lock_cpu_hotplug(); | |
374 | + mutex_lock(&workqueue_mutex); | |
376 | 375 | if (singlethread) { |
377 | 376 | INIT_LIST_HEAD(&wq->list); |
378 | 377 | p = create_workqueue_thread(wq, singlethread_cpu); |
379 | 378 | |
... | ... | @@ -381,9 +380,7 @@ |
381 | 380 | else |
382 | 381 | wake_up_process(p); |
383 | 382 | } else { |
384 | - spin_lock(&workqueue_lock); | |
385 | 383 | list_add(&wq->list, &workqueues); |
386 | - spin_unlock(&workqueue_lock); | |
387 | 384 | for_each_online_cpu(cpu) { |
388 | 385 | p = create_workqueue_thread(wq, cpu); |
389 | 386 | if (p) { |
... | ... | @@ -393,7 +390,7 @@ |
393 | 390 | destroy = 1; |
394 | 391 | } |
395 | 392 | } |
396 | - unlock_cpu_hotplug(); | |
393 | + mutex_unlock(&workqueue_mutex); | |
397 | 394 | |
398 | 395 | /* |
399 | 396 | * Was there any error during startup? If yes then clean up: |
400 | 397 | |
401 | 398 | |
402 | 399 | |
... | ... | @@ -434,17 +431,15 @@ |
434 | 431 | flush_workqueue(wq); |
435 | 432 | |
436 | 433 | /* We don't need the distraction of CPUs appearing and vanishing. */ |
437 | - lock_cpu_hotplug(); | |
434 | + mutex_lock(&workqueue_mutex); | |
438 | 435 | if (is_single_threaded(wq)) |
439 | 436 | cleanup_workqueue_thread(wq, singlethread_cpu); |
440 | 437 | else { |
441 | 438 | for_each_online_cpu(cpu) |
442 | 439 | cleanup_workqueue_thread(wq, cpu); |
443 | - spin_lock(&workqueue_lock); | |
444 | 440 | list_del(&wq->list); |
445 | - spin_unlock(&workqueue_lock); | |
446 | 441 | } |
447 | - unlock_cpu_hotplug(); | |
442 | + mutex_unlock(&workqueue_mutex); | |
448 | 443 | free_percpu(wq->cpu_wq); |
449 | 444 | kfree(wq); |
450 | 445 | } |
451 | 446 | |
... | ... | @@ -515,11 +510,13 @@ |
515 | 510 | if (!works) |
516 | 511 | return -ENOMEM; |
517 | 512 | |
513 | + mutex_lock(&workqueue_mutex); | |
518 | 514 | for_each_online_cpu(cpu) { |
519 | 515 | INIT_WORK(per_cpu_ptr(works, cpu), func, info); |
520 | 516 | __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), |
521 | 517 | per_cpu_ptr(works, cpu)); |
522 | 518 | } |
519 | + mutex_unlock(&workqueue_mutex); | |
523 | 520 | flush_workqueue(keventd_wq); |
524 | 521 | free_percpu(works); |
525 | 522 | return 0; |
... | ... | @@ -635,6 +632,7 @@ |
635 | 632 | |
636 | 633 | switch (action) { |
637 | 634 | case CPU_UP_PREPARE: |
635 | + mutex_lock(&workqueue_mutex); | |
638 | 636 | /* Create a new workqueue thread for it. */ |
639 | 637 | list_for_each_entry(wq, &workqueues, list) { |
640 | 638 | if (!create_workqueue_thread(wq, hotcpu)) { |
... | ... | @@ -653,6 +651,7 @@ |
653 | 651 | kthread_bind(cwq->thread, hotcpu); |
654 | 652 | wake_up_process(cwq->thread); |
655 | 653 | } |
654 | + mutex_unlock(&workqueue_mutex); | |
656 | 655 | break; |
657 | 656 | |
658 | 657 | case CPU_UP_CANCELED: |
659 | 658 | |
660 | 659 | |
... | ... | @@ -664,13 +663,23 @@ |
664 | 663 | any_online_cpu(cpu_online_map)); |
665 | 664 | cleanup_workqueue_thread(wq, hotcpu); |
666 | 665 | } |
666 | + mutex_unlock(&workqueue_mutex); | |
667 | 667 | break; |
668 | 668 | |
669 | + case CPU_DOWN_PREPARE: | |
670 | + mutex_lock(&workqueue_mutex); | |
671 | + break; | |
672 | + | |
673 | + case CPU_DOWN_FAILED: | |
674 | + mutex_unlock(&workqueue_mutex); | |
675 | + break; | |
676 | + | |
669 | 677 | case CPU_DEAD: |
670 | 678 | list_for_each_entry(wq, &workqueues, list) |
671 | 679 | cleanup_workqueue_thread(wq, hotcpu); |
672 | 680 | list_for_each_entry(wq, &workqueues, list) |
673 | 681 | take_over_work(wq, hotcpu); |
682 | + mutex_unlock(&workqueue_mutex); | |
674 | 683 | break; |
675 | 684 | } |
676 | 685 |