Commit 7608dec2ce2004c234339bef8c8074e5e601d0e9
Committed by
Ingo Molnar
1 parent
013fdb8086
Exists in
master
and in
20 other branches
sched: Drop the rq argument to sched_class::select_task_rq()
In preparation of calling select_task_rq() without rq->lock held, drop the dependency on the rq argument. Reviewed-by: Frank Rowand <frank.rowand@am.sony.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/20110405152729.031077745@chello.nl Signed-off-by: Ingo Molnar <mingo@elte.hu>
Showing 6 changed files with 41 additions and 27 deletions Side-by-side Diff
include/linux/sched.h
... | ... | @@ -1067,8 +1067,7 @@ |
1067 | 1067 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); |
1068 | 1068 | |
1069 | 1069 | #ifdef CONFIG_SMP |
1070 | - int (*select_task_rq)(struct rq *rq, struct task_struct *p, | |
1071 | - int sd_flag, int flags); | |
1070 | + int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); | |
1072 | 1071 | |
1073 | 1072 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); |
1074 | 1073 | void (*post_schedule) (struct rq *this_rq); |
kernel/sched.c
... | ... | @@ -2195,13 +2195,15 @@ |
2195 | 2195 | * The task's runqueue lock must be held. |
2196 | 2196 | * Returns true if you have to wait for migration thread. |
2197 | 2197 | */ |
2198 | -static bool migrate_task(struct task_struct *p, struct rq *rq) | |
2198 | +static bool need_migrate_task(struct task_struct *p) | |
2199 | 2199 | { |
2200 | 2200 | /* |
2201 | 2201 | * If the task is not on a runqueue (and not running), then |
2202 | 2202 | * the next wake-up will properly place the task. |
2203 | 2203 | */ |
2204 | - return p->on_rq || task_running(rq, p); | |
2204 | + bool running = p->on_rq || p->on_cpu; | |
2205 | + smp_rmb(); /* finish_lock_switch() */ | |
2206 | + return running; | |
2205 | 2207 | } |
2206 | 2208 | |
2207 | 2209 | /* |
2208 | 2210 | |
... | ... | @@ -2376,9 +2378,9 @@ |
2376 | 2378 | * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable. |
2377 | 2379 | */ |
2378 | 2380 | static inline |
2379 | -int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags) | |
2381 | +int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags) | |
2380 | 2382 | { |
2381 | - int cpu = p->sched_class->select_task_rq(rq, p, sd_flags, wake_flags); | |
2383 | + int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags); | |
2382 | 2384 | |
2383 | 2385 | /* |
2384 | 2386 | * In order not to call set_task_cpu() on a blocking task we need |
... | ... | @@ -2533,7 +2535,7 @@ |
2533 | 2535 | en_flags |= ENQUEUE_WAKING; |
2534 | 2536 | } |
2535 | 2537 | |
2536 | - cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags); | |
2538 | + cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags); | |
2537 | 2539 | if (cpu != orig_cpu) |
2538 | 2540 | set_task_cpu(p, cpu); |
2539 | 2541 | __task_rq_unlock(rq); |
... | ... | @@ -2744,7 +2746,7 @@ |
2744 | 2746 | * We set TASK_WAKING so that select_task_rq() can drop rq->lock |
2745 | 2747 | * without people poking at ->cpus_allowed. |
2746 | 2748 | */ |
2747 | - cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0); | |
2749 | + cpu = select_task_rq(p, SD_BALANCE_FORK, 0); | |
2748 | 2750 | set_task_cpu(p, cpu); |
2749 | 2751 | |
2750 | 2752 | p->state = TASK_RUNNING; |
... | ... | @@ -3474,7 +3476,7 @@ |
3474 | 3476 | int dest_cpu; |
3475 | 3477 | |
3476 | 3478 | rq = task_rq_lock(p, &flags); |
3477 | - dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0); | |
3479 | + dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0); | |
3478 | 3480 | if (dest_cpu == smp_processor_id()) |
3479 | 3481 | goto unlock; |
3480 | 3482 | |
... | ... | @@ -3482,7 +3484,7 @@ |
3482 | 3484 | * select_task_rq() can race against ->cpus_allowed |
3483 | 3485 | */ |
3484 | 3486 | if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) && |
3485 | - likely(cpu_active(dest_cpu)) && migrate_task(p, rq)) { | |
3487 | + likely(cpu_active(dest_cpu)) && need_migrate_task(p)) { | |
3486 | 3488 | struct migration_arg arg = { p, dest_cpu }; |
3487 | 3489 | |
3488 | 3490 | task_rq_unlock(rq, &flags); |
... | ... | @@ -5911,7 +5913,7 @@ |
5911 | 5913 | goto out; |
5912 | 5914 | |
5913 | 5915 | dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); |
5914 | - if (migrate_task(p, rq)) { | |
5916 | + if (need_migrate_task(p)) { | |
5915 | 5917 | struct migration_arg arg = { p, dest_cpu }; |
5916 | 5918 | /* Need help from migration thread: drop lock and wait. */ |
5917 | 5919 | __task_rq_unlock(rq); |
kernel/sched_fair.c
... | ... | @@ -1657,7 +1657,7 @@ |
1657 | 1657 | * preempt must be disabled. |
1658 | 1658 | */ |
1659 | 1659 | static int |
1660 | -select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_flags) | |
1660 | +select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) | |
1661 | 1661 | { |
1662 | 1662 | struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL; |
1663 | 1663 | int cpu = smp_processor_id(); |
kernel/sched_idletask.c
kernel/sched_rt.c
... | ... | @@ -977,13 +977,23 @@ |
977 | 977 | static int find_lowest_rq(struct task_struct *task); |
978 | 978 | |
979 | 979 | static int |
980 | -select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags) | |
980 | +select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) | |
981 | 981 | { |
982 | + struct task_struct *curr; | |
983 | + struct rq *rq; | |
984 | + int cpu; | |
985 | + | |
982 | 986 | if (sd_flag != SD_BALANCE_WAKE) |
983 | 987 | return smp_processor_id(); |
984 | 988 | |
989 | + cpu = task_cpu(p); | |
990 | + rq = cpu_rq(cpu); | |
991 | + | |
992 | + rcu_read_lock(); | |
993 | + curr = ACCESS_ONCE(rq->curr); /* unlocked access */ | |
994 | + | |
985 | 995 | /* |
986 | - * If the current task is an RT task, then | |
996 | + * If the current task on @p's runqueue is an RT task, then | |
987 | 997 | * try to see if we can wake this RT task up on another |
988 | 998 | * runqueue. Otherwise simply start this RT task |
989 | 999 | * on its current runqueue. |
990 | 1000 | |
991 | 1001 | |
992 | 1002 | |
993 | 1003 | |
994 | 1004 | |
... | ... | @@ -997,21 +1007,25 @@ |
997 | 1007 | * lock? |
998 | 1008 | * |
999 | 1009 | * For equal prio tasks, we just let the scheduler sort it out. |
1010 | + * | |
1011 | + * Otherwise, just let it ride on the affined RQ and the | |
1012 | + * post-schedule router will push the preempted task away | |
1013 | + * | |
1014 | + * This test is optimistic, if we get it wrong the load-balancer | |
1015 | + * will have to sort it out. | |
1000 | 1016 | */ |
1001 | - if (unlikely(rt_task(rq->curr)) && | |
1002 | - (rq->curr->rt.nr_cpus_allowed < 2 || | |
1003 | - rq->curr->prio < p->prio) && | |
1017 | + if (curr && unlikely(rt_task(curr)) && | |
1018 | + (curr->rt.nr_cpus_allowed < 2 || | |
1019 | + curr->prio < p->prio) && | |
1004 | 1020 | (p->rt.nr_cpus_allowed > 1)) { |
1005 | - int cpu = find_lowest_rq(p); | |
1021 | + int target = find_lowest_rq(p); | |
1006 | 1022 | |
1007 | - return (cpu == -1) ? task_cpu(p) : cpu; | |
1023 | + if (target != -1) | |
1024 | + cpu = target; | |
1008 | 1025 | } |
1026 | + rcu_read_unlock(); | |
1009 | 1027 | |
1010 | - /* | |
1011 | - * Otherwise, just let it ride on the affined RQ and the | |
1012 | - * post-schedule router will push the preempted task away | |
1013 | - */ | |
1014 | - return task_cpu(p); | |
1028 | + return cpu; | |
1015 | 1029 | } |
1016 | 1030 | |
1017 | 1031 | static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) |
kernel/sched_stoptask.c
... | ... | @@ -9,8 +9,7 @@ |
9 | 9 | |
10 | 10 | #ifdef CONFIG_SMP |
11 | 11 | static int |
12 | -select_task_rq_stop(struct rq *rq, struct task_struct *p, | |
13 | - int sd_flag, int flags) | |
12 | +select_task_rq_stop(struct task_struct *p, int sd_flag, int flags) | |
14 | 13 | { |
15 | 14 | return task_cpu(p); /* stop tasks as never migrate */ |
16 | 15 | } |