Commit 6d5ab2932a21ea54406ab95c43ecff90a3eddfda

Authored by Paul Turner
Committed by Ingo Molnar
1 parent 05ca62c6ca

sched: Simplify update_cfs_shares parameters

Re-visiting this: Since update_cfs_shares will now only ever re-weight an
entity that is a relative parent of the current entity in enqueue_entity; we
can safely issue the account_entity_enqueue relative to that cfs_rq and avoid
the requirement for special handling of the enqueue case in update_cfs_shares.

Signed-off-by: Paul Turner <pjt@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20110122044851.915214637@google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

Showing 2 changed files with 15 additions and 17 deletions Side-by-side Diff

... ... @@ -8510,7 +8510,7 @@
8510 8510 /* Propagate contribution to hierarchy */
8511 8511 raw_spin_lock_irqsave(&rq->lock, flags);
8512 8512 for_each_sched_entity(se)
8513   - update_cfs_shares(group_cfs_rq(se), 0);
  8513 + update_cfs_shares(group_cfs_rq(se));
8514 8514 raw_spin_unlock_irqrestore(&rq->lock, flags);
8515 8515 }
8516 8516  
... ... @@ -540,7 +540,7 @@
540 540 }
541 541  
542 542 static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update);
543   -static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta);
  543 +static void update_cfs_shares(struct cfs_rq *cfs_rq);
544 544  
545 545 /*
546 546 * Update the current task's runtime statistics. Skip current tasks that
547 547  
548 548  
549 549  
... ... @@ -763,16 +763,15 @@
763 763 list_del_leaf_cfs_rq(cfs_rq);
764 764 }
765 765  
766   -static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg,
767   - long weight_delta)
  766 +static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
768 767 {
769 768 long load_weight, load, shares;
770 769  
771   - load = cfs_rq->load.weight + weight_delta;
  770 + load = cfs_rq->load.weight;
772 771  
773 772 load_weight = atomic_read(&tg->load_weight);
774   - load_weight -= cfs_rq->load_contribution;
775 773 load_weight += load;
  774 + load_weight -= cfs_rq->load_contribution;
776 775  
777 776 shares = (tg->shares * load);
778 777 if (load_weight)
... ... @@ -790,7 +789,7 @@
790 789 {
791 790 if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
792 791 update_cfs_load(cfs_rq, 0);
793   - update_cfs_shares(cfs_rq, 0);
  792 + update_cfs_shares(cfs_rq);
794 793 }
795 794 }
796 795 # else /* CONFIG_SMP */
... ... @@ -798,8 +797,7 @@
798 797 {
799 798 }
800 799  
801   -static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg,
802   - long weight_delta)
  800 +static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
803 801 {
804 802 return tg->shares;
805 803 }
... ... @@ -824,7 +822,7 @@
824 822 account_entity_enqueue(cfs_rq, se);
825 823 }
826 824  
827   -static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
  825 +static void update_cfs_shares(struct cfs_rq *cfs_rq)
828 826 {
829 827 struct task_group *tg;
830 828 struct sched_entity *se;
... ... @@ -838,7 +836,7 @@
838 836 if (likely(se->load.weight == tg->shares))
839 837 return;
840 838 #endif
841   - shares = calc_cfs_shares(cfs_rq, tg, weight_delta);
  839 + shares = calc_cfs_shares(cfs_rq, tg);
842 840  
843 841 reweight_entity(cfs_rq_of(se), se, shares);
844 842 }
... ... @@ -847,7 +845,7 @@
847 845 {
848 846 }
849 847  
850   -static inline void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
  848 +static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
851 849 {
852 850 }
853 851  
854 852  
... ... @@ -978,8 +976,8 @@
978 976 */
979 977 update_curr(cfs_rq);
980 978 update_cfs_load(cfs_rq, 0);
981   - update_cfs_shares(cfs_rq, se->load.weight);
982 979 account_entity_enqueue(cfs_rq, se);
  980 + update_cfs_shares(cfs_rq);
983 981  
984 982 if (flags & ENQUEUE_WAKEUP) {
985 983 place_entity(cfs_rq, se, 0);
... ... @@ -1041,7 +1039,7 @@
1041 1039 update_cfs_load(cfs_rq, 0);
1042 1040 account_entity_dequeue(cfs_rq, se);
1043 1041 update_min_vruntime(cfs_rq);
1044   - update_cfs_shares(cfs_rq, 0);
  1042 + update_cfs_shares(cfs_rq);
1045 1043  
1046 1044 /*
1047 1045 * Normalize the entity after updating the min_vruntime because the
... ... @@ -1282,7 +1280,7 @@
1282 1280 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1283 1281  
1284 1282 update_cfs_load(cfs_rq, 0);
1285   - update_cfs_shares(cfs_rq, 0);
  1283 + update_cfs_shares(cfs_rq);
1286 1284 }
1287 1285  
1288 1286 hrtick_update(rq);
... ... @@ -1312,7 +1310,7 @@
1312 1310 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1313 1311  
1314 1312 update_cfs_load(cfs_rq, 0);
1315   - update_cfs_shares(cfs_rq, 0);
  1313 + update_cfs_shares(cfs_rq);
1316 1314 }
1317 1315  
1318 1316 hrtick_update(rq);
... ... @@ -2123,7 +2121,7 @@
2123 2121 * We need to update shares after updating tg->load_weight in
2124 2122 * order to adjust the weight of groups with long running tasks.
2125 2123 */
2126   - update_cfs_shares(cfs_rq, 0);
  2124 + update_cfs_shares(cfs_rq);
2127 2125  
2128 2126 raw_spin_unlock_irqrestore(&rq->lock, flags);
2129 2127