Commit 013fdb8086acaae5f8eb96f9ad48fcd98882ac46
Committed by
Ingo Molnar
1 parent
fd2f4419b4
Exists in
master
and in
7 other branches
sched: Serialize p->cpus_allowed and ttwu() using p->pi_lock
Currently p->pi_lock already serializes p->sched_class, also put p->cpus_allowed and try_to_wake_up() under it, this prepares the way to do the first part of ttwu() without holding rq->lock. By having p->sched_class and p->cpus_allowed serialized by p->pi_lock, we prepare the way to call select_task_rq() without holding rq->lock. Reviewed-by: Frank Rowand <frank.rowand@am.sony.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/20110405152728.990364093@chello.nl Signed-off-by: Ingo Molnar <mingo@elte.hu>
Showing 1 changed file with 16 additions and 21 deletions Side-by-side Diff
kernel/sched.c
... | ... | @@ -2340,7 +2340,7 @@ |
2340 | 2340 | |
2341 | 2341 | #ifdef CONFIG_SMP |
2342 | 2342 | /* |
2343 | - * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held. | |
2343 | + * ->cpus_allowed is protected by both rq->lock and p->pi_lock | |
2344 | 2344 | */ |
2345 | 2345 | static int select_fallback_rq(int cpu, struct task_struct *p) |
2346 | 2346 | { |
... | ... | @@ -2373,7 +2373,7 @@ |
2373 | 2373 | } |
2374 | 2374 | |
2375 | 2375 | /* |
2376 | - * The caller (fork, wakeup) owns TASK_WAKING, ->cpus_allowed is stable. | |
2376 | + * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable. | |
2377 | 2377 | */ |
2378 | 2378 | static inline |
2379 | 2379 | int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags) |
... | ... | @@ -2499,7 +2499,8 @@ |
2499 | 2499 | this_cpu = get_cpu(); |
2500 | 2500 | |
2501 | 2501 | smp_wmb(); |
2502 | - rq = task_rq_lock(p, &flags); | |
2502 | + raw_spin_lock_irqsave(&p->pi_lock, flags); | |
2503 | + rq = __task_rq_lock(p); | |
2503 | 2504 | if (!(p->state & state)) |
2504 | 2505 | goto out; |
2505 | 2506 | |
... | ... | @@ -2557,7 +2558,8 @@ |
2557 | 2558 | ttwu_stat(rq, p, cpu, wake_flags); |
2558 | 2559 | success = 1; |
2559 | 2560 | out: |
2560 | - task_rq_unlock(rq, &flags); | |
2561 | + __task_rq_unlock(rq); | |
2562 | + raw_spin_unlock_irqrestore(&p->pi_lock, flags); | |
2561 | 2563 | put_cpu(); |
2562 | 2564 | |
2563 | 2565 | return success; |
... | ... | @@ -4694,6 +4696,8 @@ |
4694 | 4696 | |
4695 | 4697 | BUG_ON(prio < 0 || prio > MAX_PRIO); |
4696 | 4698 | |
4699 | + lockdep_assert_held(&p->pi_lock); | |
4700 | + | |
4697 | 4701 | rq = task_rq_lock(p, &flags); |
4698 | 4702 | |
4699 | 4703 | trace_sched_pi_setprio(p, prio); |
... | ... | @@ -5317,7 +5321,6 @@ |
5317 | 5321 | { |
5318 | 5322 | struct task_struct *p; |
5319 | 5323 | unsigned long flags; |
5320 | - struct rq *rq; | |
5321 | 5324 | int retval; |
5322 | 5325 | |
5323 | 5326 | get_online_cpus(); |
5324 | 5327 | |
... | ... | @@ -5332,9 +5335,9 @@ |
5332 | 5335 | if (retval) |
5333 | 5336 | goto out_unlock; |
5334 | 5337 | |
5335 | - rq = task_rq_lock(p, &flags); | |
5338 | + raw_spin_lock_irqsave(&p->pi_lock, flags); | |
5336 | 5339 | cpumask_and(mask, &p->cpus_allowed, cpu_online_mask); |
5337 | - task_rq_unlock(rq, &flags); | |
5340 | + raw_spin_unlock_irqrestore(&p->pi_lock, flags); | |
5338 | 5341 | |
5339 | 5342 | out_unlock: |
5340 | 5343 | rcu_read_unlock(); |
... | ... | @@ -5882,18 +5885,8 @@ |
5882 | 5885 | unsigned int dest_cpu; |
5883 | 5886 | int ret = 0; |
5884 | 5887 | |
5885 | - /* | |
5886 | - * Serialize against TASK_WAKING so that ttwu() and wunt() can | |
5887 | - * drop the rq->lock and still rely on ->cpus_allowed. | |
5888 | - */ | |
5889 | -again: | |
5890 | - while (task_is_waking(p)) | |
5891 | - cpu_relax(); | |
5892 | - rq = task_rq_lock(p, &flags); | |
5893 | - if (task_is_waking(p)) { | |
5894 | - task_rq_unlock(rq, &flags); | |
5895 | - goto again; | |
5896 | - } | |
5888 | + raw_spin_lock_irqsave(&p->pi_lock, flags); | |
5889 | + rq = __task_rq_lock(p); | |
5897 | 5890 | |
5898 | 5891 | if (!cpumask_intersects(new_mask, cpu_active_mask)) { |
5899 | 5892 | ret = -EINVAL; |
5900 | 5893 | |
... | ... | @@ -5921,13 +5914,15 @@ |
5921 | 5914 | if (migrate_task(p, rq)) { |
5922 | 5915 | struct migration_arg arg = { p, dest_cpu }; |
5923 | 5916 | /* Need help from migration thread: drop lock and wait. */ |
5924 | - task_rq_unlock(rq, &flags); | |
5917 | + __task_rq_unlock(rq); | |
5918 | + raw_spin_unlock_irqrestore(&p->pi_lock, flags); | |
5925 | 5919 | stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); |
5926 | 5920 | tlb_migrate_finish(p->mm); |
5927 | 5921 | return 0; |
5928 | 5922 | } |
5929 | 5923 | out: |
5930 | - task_rq_unlock(rq, &flags); | |
5924 | + __task_rq_unlock(rq); | |
5925 | + raw_spin_unlock_irqrestore(&p->pi_lock, flags); | |
5931 | 5926 | |
5932 | 5927 | return ret; |
5933 | 5928 | } |