Commit 371cbb387e33651b4c1326457116568ff01ac422
Committed by
Linus Torvalds
1 parent
63706172f3
Exists in
master
and in
20 other branches
kthreads: simplify migration_thread() exit path
Now that kthread_stop() can be used even if the task has already exited, we can kill the "wait_to_die:" loop in migration_thread(). But we must pin rq->migration_thread after creation. Actually, I don't think CPU_UP_CANCELED or CPU_DEAD should wait for ->migration_thread exit. Perhaps we can simplify this code a bit more. migration_call() can set ->should_stop and forget about this thread. But we need a new helper in kthred.c for that. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Pavel Emelyanov <xemul@openvz.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Vitaliy Gusev <vgusev@openvz.org Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 1 changed file with 4 additions and 10 deletions Side-by-side Diff
kernel/sched.c
... | ... | @@ -7045,7 +7045,7 @@ |
7045 | 7045 | |
7046 | 7046 | if (cpu_is_offline(cpu)) { |
7047 | 7047 | spin_unlock_irq(&rq->lock); |
7048 | - goto wait_to_die; | |
7048 | + break; | |
7049 | 7049 | } |
7050 | 7050 | |
7051 | 7051 | if (rq->active_balance) { |
7052 | 7052 | |
... | ... | @@ -7071,16 +7071,7 @@ |
7071 | 7071 | complete(&req->done); |
7072 | 7072 | } |
7073 | 7073 | __set_current_state(TASK_RUNNING); |
7074 | - return 0; | |
7075 | 7074 | |
7076 | -wait_to_die: | |
7077 | - /* Wait for kthread_stop */ | |
7078 | - set_current_state(TASK_INTERRUPTIBLE); | |
7079 | - while (!kthread_should_stop()) { | |
7080 | - schedule(); | |
7081 | - set_current_state(TASK_INTERRUPTIBLE); | |
7082 | - } | |
7083 | - __set_current_state(TASK_RUNNING); | |
7084 | 7075 | return 0; |
7085 | 7076 | } |
7086 | 7077 | |
... | ... | @@ -7494,6 +7485,7 @@ |
7494 | 7485 | rq = task_rq_lock(p, &flags); |
7495 | 7486 | __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); |
7496 | 7487 | task_rq_unlock(rq, &flags); |
7488 | + get_task_struct(p); | |
7497 | 7489 | cpu_rq(cpu)->migration_thread = p; |
7498 | 7490 | break; |
7499 | 7491 | |
... | ... | @@ -7524,6 +7516,7 @@ |
7524 | 7516 | kthread_bind(cpu_rq(cpu)->migration_thread, |
7525 | 7517 | cpumask_any(cpu_online_mask)); |
7526 | 7518 | kthread_stop(cpu_rq(cpu)->migration_thread); |
7519 | + put_task_struct(cpu_rq(cpu)->migration_thread); | |
7527 | 7520 | cpu_rq(cpu)->migration_thread = NULL; |
7528 | 7521 | break; |
7529 | 7522 | |
... | ... | @@ -7533,6 +7526,7 @@ |
7533 | 7526 | migrate_live_tasks(cpu); |
7534 | 7527 | rq = cpu_rq(cpu); |
7535 | 7528 | kthread_stop(rq->migration_thread); |
7529 | + put_task_struct(rq->migration_thread); | |
7536 | 7530 | rq->migration_thread = NULL; |
7537 | 7531 | /* Idle task back to normal (off runqueue, low prio) */ |
7538 | 7532 | spin_lock_irq(&rq->lock); |