Commit 64b9e0294d24a4204232e13e01630b0690e48d61

Authored by Amit K. Arora
Committed by Ingo Molnar
1 parent b87f17242d

sched: minor optimizations in wake_affine and select_task_rq_fair

This patch does following:
o Removes unused variable and argument "rq".
o Optimizes one of the "if" conditions in wake_affine() - i.e.  if
  "balanced" is true, we need not do rest of the calculations in the
  condition.
o If this cpu is same as the previous cpu (on which woken up task
  was running when it went to sleep), no need to call wake_affine at all.

Signed-off-by: Amit K Arora <aarora@linux.vnet.ibm.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

Showing 1 changed file with 7 additions and 9 deletions Side-by-side Diff

... ... @@ -1088,7 +1088,7 @@
1088 1088 #endif
1089 1089  
1090 1090 static int
1091   -wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
  1091 +wake_affine(struct sched_domain *this_sd, struct rq *this_rq,
1092 1092 struct task_struct *p, int prev_cpu, int this_cpu, int sync,
1093 1093 int idx, unsigned long load, unsigned long this_load,
1094 1094 unsigned int imbalance)
... ... @@ -1136,8 +1136,8 @@
1136 1136 schedstat_inc(p, se.nr_wakeups_affine_attempts);
1137 1137 tl_per_task = cpu_avg_load_per_task(this_cpu);
1138 1138  
1139   - if ((tl <= load && tl + target_load(prev_cpu, idx) <= tl_per_task) ||
1140   - balanced) {
  1139 + if (balanced || (tl <= load && tl + target_load(prev_cpu, idx) <=
  1140 + tl_per_task)) {
1141 1141 /*
1142 1142 * This domain has SD_WAKE_AFFINE and
1143 1143 * p is cache cold in this domain, and
1144 1144  
1145 1145  
... ... @@ -1156,16 +1156,17 @@
1156 1156 struct sched_domain *sd, *this_sd = NULL;
1157 1157 int prev_cpu, this_cpu, new_cpu;
1158 1158 unsigned long load, this_load;
1159   - struct rq *rq, *this_rq;
  1159 + struct rq *this_rq;
1160 1160 unsigned int imbalance;
1161 1161 int idx;
1162 1162  
1163 1163 prev_cpu = task_cpu(p);
1164   - rq = task_rq(p);
1165 1164 this_cpu = smp_processor_id();
1166 1165 this_rq = cpu_rq(this_cpu);
1167 1166 new_cpu = prev_cpu;
1168 1167  
  1168 + if (prev_cpu == this_cpu)
  1169 + goto out;
1169 1170 /*
1170 1171 * 'this_sd' is the first domain that both
1171 1172 * this_cpu and prev_cpu are present in:
1172 1173  
... ... @@ -1193,12 +1194,9 @@
1193 1194 load = source_load(prev_cpu, idx);
1194 1195 this_load = target_load(this_cpu, idx);
1195 1196  
1196   - if (wake_affine(rq, this_sd, this_rq, p, prev_cpu, this_cpu, sync, idx,
  1197 + if (wake_affine(this_sd, this_rq, p, prev_cpu, this_cpu, sync, idx,
1197 1198 load, this_load, imbalance))
1198 1199 return this_cpu;
1199   -
1200   - if (prev_cpu == this_cpu)
1201   - goto out;
1202 1200  
1203 1201 /*
1204 1202 * Start passive balancing when half the imbalance_pct