Commit fcc1d2a9cea4ba97c9800e1de0748003bba07335
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Fixes and two late cleanups" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/cleanups: Add load balance cpumask pointer to 'struct lb_env' sched: Fix comment about PREEMPT_ACTIVE bit location sched: Fix minor code style issues sched: Use task_rq_unlock() in __sched_setscheduler() sched/numa: Add SD_PERFER_SIBLING to CPU domain
Showing 5 changed files Side-by-side Diff
include/linux/hardirq.h
... | ... | @@ -22,7 +22,7 @@ |
22 | 22 | * |
23 | 23 | * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024) |
24 | 24 | * - bit 26 is the NMI_MASK |
25 | - * - bit 28 is the PREEMPT_ACTIVE flag | |
25 | + * - bit 27 is the PREEMPT_ACTIVE flag | |
26 | 26 | * |
27 | 27 | * PREEMPT_MASK: 0x000000ff |
28 | 28 | * SOFTIRQ_MASK: 0x0000ff00 |
include/linux/topology.h
kernel/sched/core.c
... | ... | @@ -4340,9 +4340,7 @@ |
4340 | 4340 | */ |
4341 | 4341 | if (unlikely(policy == p->policy && (!rt_policy(policy) || |
4342 | 4342 | param->sched_priority == p->rt_priority))) { |
4343 | - | |
4344 | - __task_rq_unlock(rq); | |
4345 | - raw_spin_unlock_irqrestore(&p->pi_lock, flags); | |
4343 | + task_rq_unlock(rq, p, &flags); | |
4346 | 4344 | return 0; |
4347 | 4345 | } |
4348 | 4346 |
kernel/sched/cpupri.c
... | ... | @@ -65,8 +65,8 @@ |
65 | 65 | int cpupri_find(struct cpupri *cp, struct task_struct *p, |
66 | 66 | struct cpumask *lowest_mask) |
67 | 67 | { |
68 | - int idx = 0; | |
69 | - int task_pri = convert_prio(p->prio); | |
68 | + int idx = 0; | |
69 | + int task_pri = convert_prio(p->prio); | |
70 | 70 | |
71 | 71 | if (task_pri >= MAX_RT_PRIO) |
72 | 72 | return 0; |
... | ... | @@ -137,9 +137,9 @@ |
137 | 137 | */ |
138 | 138 | void cpupri_set(struct cpupri *cp, int cpu, int newpri) |
139 | 139 | { |
140 | - int *currpri = &cp->cpu_to_pri[cpu]; | |
141 | - int oldpri = *currpri; | |
142 | - int do_mb = 0; | |
140 | + int *currpri = &cp->cpu_to_pri[cpu]; | |
141 | + int oldpri = *currpri; | |
142 | + int do_mb = 0; | |
143 | 143 | |
144 | 144 | newpri = convert_prio(newpri); |
145 | 145 |
kernel/sched/fair.c
... | ... | @@ -3069,6 +3069,9 @@ |
3069 | 3069 | int new_dst_cpu; |
3070 | 3070 | enum cpu_idle_type idle; |
3071 | 3071 | long imbalance; |
3072 | + /* The set of CPUs under consideration for load-balancing */ | |
3073 | + struct cpumask *cpus; | |
3074 | + | |
3072 | 3075 | unsigned int flags; |
3073 | 3076 | |
3074 | 3077 | unsigned int loop; |
... | ... | @@ -3653,8 +3656,7 @@ |
3653 | 3656 | */ |
3654 | 3657 | static inline void update_sg_lb_stats(struct lb_env *env, |
3655 | 3658 | struct sched_group *group, int load_idx, |
3656 | - int local_group, const struct cpumask *cpus, | |
3657 | - int *balance, struct sg_lb_stats *sgs) | |
3659 | + int local_group, int *balance, struct sg_lb_stats *sgs) | |
3658 | 3660 | { |
3659 | 3661 | unsigned long nr_running, max_nr_running, min_nr_running; |
3660 | 3662 | unsigned long load, max_cpu_load, min_cpu_load; |
... | ... | @@ -3671,7 +3673,7 @@ |
3671 | 3673 | max_nr_running = 0; |
3672 | 3674 | min_nr_running = ~0UL; |
3673 | 3675 | |
3674 | - for_each_cpu_and(i, sched_group_cpus(group), cpus) { | |
3676 | + for_each_cpu_and(i, sched_group_cpus(group), env->cpus) { | |
3675 | 3677 | struct rq *rq = cpu_rq(i); |
3676 | 3678 | |
3677 | 3679 | nr_running = rq->nr_running; |
... | ... | @@ -3800,8 +3802,7 @@ |
3800 | 3802 | * @sds: variable to hold the statistics for this sched_domain. |
3801 | 3803 | */ |
3802 | 3804 | static inline void update_sd_lb_stats(struct lb_env *env, |
3803 | - const struct cpumask *cpus, | |
3804 | - int *balance, struct sd_lb_stats *sds) | |
3805 | + int *balance, struct sd_lb_stats *sds) | |
3805 | 3806 | { |
3806 | 3807 | struct sched_domain *child = env->sd->child; |
3807 | 3808 | struct sched_group *sg = env->sd->groups; |
... | ... | @@ -3818,8 +3819,7 @@ |
3818 | 3819 | |
3819 | 3820 | local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg)); |
3820 | 3821 | memset(&sgs, 0, sizeof(sgs)); |
3821 | - update_sg_lb_stats(env, sg, load_idx, local_group, | |
3822 | - cpus, balance, &sgs); | |
3822 | + update_sg_lb_stats(env, sg, load_idx, local_group, balance, &sgs); | |
3823 | 3823 | |
3824 | 3824 | if (local_group && !(*balance)) |
3825 | 3825 | return; |
... | ... | @@ -4055,7 +4055,6 @@ |
4055 | 4055 | * to restore balance. |
4056 | 4056 | * |
4057 | 4057 | * @env: The load balancing environment. |
4058 | - * @cpus: The set of CPUs under consideration for load-balancing. | |
4059 | 4058 | * @balance: Pointer to a variable indicating if this_cpu |
4060 | 4059 | * is the appropriate cpu to perform load balancing at this_level. |
4061 | 4060 | * |
... | ... | @@ -4065,7 +4064,7 @@ |
4065 | 4064 | * put to idle by rebalancing its tasks onto our group. |
4066 | 4065 | */ |
4067 | 4066 | static struct sched_group * |
4068 | -find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance) | |
4067 | +find_busiest_group(struct lb_env *env, int *balance) | |
4069 | 4068 | { |
4070 | 4069 | struct sd_lb_stats sds; |
4071 | 4070 | |
... | ... | @@ -4075,7 +4074,7 @@ |
4075 | 4074 | * Compute the various statistics relavent for load balancing at |
4076 | 4075 | * this level. |
4077 | 4076 | */ |
4078 | - update_sd_lb_stats(env, cpus, balance, &sds); | |
4077 | + update_sd_lb_stats(env, balance, &sds); | |
4079 | 4078 | |
4080 | 4079 | /* |
4081 | 4080 | * this_cpu is not the appropriate cpu to perform load balancing at |
... | ... | @@ -4155,8 +4154,7 @@ |
4155 | 4154 | * find_busiest_queue - find the busiest runqueue among the cpus in group. |
4156 | 4155 | */ |
4157 | 4156 | static struct rq *find_busiest_queue(struct lb_env *env, |
4158 | - struct sched_group *group, | |
4159 | - const struct cpumask *cpus) | |
4157 | + struct sched_group *group) | |
4160 | 4158 | { |
4161 | 4159 | struct rq *busiest = NULL, *rq; |
4162 | 4160 | unsigned long max_load = 0; |
... | ... | @@ -4171,7 +4169,7 @@ |
4171 | 4169 | if (!capacity) |
4172 | 4170 | capacity = fix_small_capacity(env->sd, group); |
4173 | 4171 | |
4174 | - if (!cpumask_test_cpu(i, cpus)) | |
4172 | + if (!cpumask_test_cpu(i, env->cpus)) | |
4175 | 4173 | continue; |
4176 | 4174 | |
4177 | 4175 | rq = cpu_rq(i); |
... | ... | @@ -4252,6 +4250,7 @@ |
4252 | 4250 | .dst_grpmask = sched_group_cpus(sd->groups), |
4253 | 4251 | .idle = idle, |
4254 | 4252 | .loop_break = sched_nr_migrate_break, |
4253 | + .cpus = cpus, | |
4255 | 4254 | }; |
4256 | 4255 | |
4257 | 4256 | cpumask_copy(cpus, cpu_active_mask); |
... | ... | @@ -4260,7 +4259,7 @@ |
4260 | 4259 | schedstat_inc(sd, lb_count[idle]); |
4261 | 4260 | |
4262 | 4261 | redo: |
4263 | - group = find_busiest_group(&env, cpus, balance); | |
4262 | + group = find_busiest_group(&env, balance); | |
4264 | 4263 | |
4265 | 4264 | if (*balance == 0) |
4266 | 4265 | goto out_balanced; |
... | ... | @@ -4270,7 +4269,7 @@ |
4270 | 4269 | goto out_balanced; |
4271 | 4270 | } |
4272 | 4271 | |
4273 | - busiest = find_busiest_queue(&env, group, cpus); | |
4272 | + busiest = find_busiest_queue(&env, group); | |
4274 | 4273 | if (!busiest) { |
4275 | 4274 | schedstat_inc(sd, lb_nobusyq[idle]); |
4276 | 4275 | goto out_balanced; |