Commit 9398180097e359646d46083c3e079a54e20bee82
Committed by
Linus Torvalds
1 parent
e13193319d
Exists in
master
and in
7 other branches
workqueue: fix race condition in schedule_on_each_cpu()
Commit 65a64464349883891e21e74af16c05d6e1eeb4e9 ("HWPOISON: Allow schedule_on_each_cpu() from keventd") which allows schedule_on_each_cpu() to be called from keventd added a race condition. schedule_on_each_cpu() may race with cpu hotplug and end up executing the function twice on a cpu. Fix it by moving direct execution into the section protected with get/put_online_cpus(). While at it, update code such that direct execution is done after works have been scheduled for all other cpus and drop unnecessary cpu != orig test from flush loop. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Andi Kleen <ak@linux.intel.com> Acked-by: Oleg Nesterov <oleg@redhat.com> Cc: Ingo Molnar <mingo@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 1 changed file with 13 additions and 15 deletions Side-by-side Diff
kernel/workqueue.c
... | ... | @@ -692,31 +692,29 @@ |
692 | 692 | if (!works) |
693 | 693 | return -ENOMEM; |
694 | 694 | |
695 | + get_online_cpus(); | |
696 | + | |
695 | 697 | /* |
696 | - * when running in keventd don't schedule a work item on itself. | |
697 | - * Can just call directly because the work queue is already bound. | |
698 | - * This also is faster. | |
699 | - * Make this a generic parameter for other workqueues? | |
698 | + * When running in keventd don't schedule a work item on | |
699 | + * itself. Can just call directly because the work queue is | |
700 | + * already bound. This also is faster. | |
700 | 701 | */ |
701 | - if (current_is_keventd()) { | |
702 | + if (current_is_keventd()) | |
702 | 703 | orig = raw_smp_processor_id(); |
703 | - INIT_WORK(per_cpu_ptr(works, orig), func); | |
704 | - func(per_cpu_ptr(works, orig)); | |
705 | - } | |
706 | 704 | |
707 | - get_online_cpus(); | |
708 | 705 | for_each_online_cpu(cpu) { |
709 | 706 | struct work_struct *work = per_cpu_ptr(works, cpu); |
710 | 707 | |
711 | - if (cpu == orig) | |
712 | - continue; | |
713 | 708 | INIT_WORK(work, func); |
714 | - schedule_work_on(cpu, work); | |
715 | - } | |
716 | - for_each_online_cpu(cpu) { | |
717 | 709 | if (cpu != orig) |
718 | - flush_work(per_cpu_ptr(works, cpu)); | |
710 | + schedule_work_on(cpu, work); | |
719 | 711 | } |
712 | + if (orig >= 0) | |
713 | + func(per_cpu_ptr(works, orig)); | |
714 | + | |
715 | + for_each_online_cpu(cpu) | |
716 | + flush_work(per_cpu_ptr(works, cpu)); | |
717 | + | |
720 | 718 | put_online_cpus(); |
721 | 719 | free_percpu(works); |
722 | 720 | return 0; |