Blame view

kernel/sched/pelt.h 5 KB
c07962986   Vincent Guittot   sched/pelt: Move ...
1
  #ifdef CONFIG_SMP
231272968   Vincent Guittot   sched/fair: Updat...
2
  #include "sched-pelt.h"
c07962986   Vincent Guittot   sched/pelt: Move ...
3

231272968   Vincent Guittot   sched/fair: Updat...
4
5
6
  int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
  int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
  int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
371bf4273   Vincent Guittot   sched/rt: Add rt_...
7
  int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
3727e0e16   Vincent Guittot   sched/dl: Add dl_...
8
  int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
c07962986   Vincent Guittot   sched/pelt: Move ...
9

11d4afd4f   Vincent Guittot   sched/pelt: Fix w...
10
  #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
91c27493e   Vincent Guittot   sched/irq: Add IR...
11
12
13
14
15
16
17
18
  int update_irq_load_avg(struct rq *rq, u64 running);
  #else
  static inline int
  update_irq_load_avg(struct rq *rq, u64 running)
  {
  	return 0;
  }
  #endif
c07962986   Vincent Guittot   sched/pelt: Move ...
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
  /*
   * When a task is dequeued, its estimated utilization should not be update if
   * its util_avg has not been updated at least once.
   * This flag is used to synchronize util_avg updates with util_est updates.
   * We map this information into the LSB bit of the utilization saved at
   * dequeue time (i.e. util_est.dequeued).
   */
  #define UTIL_AVG_UNCHANGED 0x1
  
  static inline void cfs_se_util_change(struct sched_avg *avg)
  {
  	unsigned int enqueued;
  
  	if (!sched_feat(UTIL_EST))
  		return;
  
  	/* Avoid store if the flag has been already set */
  	enqueued = avg->util_est.enqueued;
  	if (!(enqueued & UTIL_AVG_UNCHANGED))
  		return;
  
  	/* Reset flag to report util_avg has been updated */
  	enqueued &= ~UTIL_AVG_UNCHANGED;
  	WRITE_ONCE(avg->util_est.enqueued, enqueued);
  }
231272968   Vincent Guittot   sched/fair: Updat...
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
  /*
   * The clock_pelt scales the time to reflect the effective amount of
   * computation done during the running delta time but then sync back to
   * clock_task when rq is idle.
   *
   *
   * absolute time   | 1| 2| 3| 4| 5| 6| 7| 8| 9|10|11|12|13|14|15|16
   * @ max capacity  ------******---------------******---------------
   * @ half capacity ------************---------************---------
   * clock pelt      | 1| 2|    3|    4| 7| 8| 9|   10|   11|14|15|16
   *
   */
  static inline void update_rq_clock_pelt(struct rq *rq, s64 delta)
  {
  	if (unlikely(is_idle_task(rq->curr))) {
  		/* The rq is idle, we can sync to clock_task */
  		rq->clock_pelt  = rq_clock_task(rq);
  		return;
  	}
  
  	/*
  	 * When a rq runs at a lower compute capacity, it will need
  	 * more time to do the same amount of work than at max
  	 * capacity. In order to be invariant, we scale the delta to
  	 * reflect how much work has been really done.
  	 * Running longer results in stealing idle time that will
  	 * disturb the load signal compared to max capacity. This
  	 * stolen idle time will be automatically reflected when the
  	 * rq will be idle and the clock will be synced with
  	 * rq_clock_task.
  	 */
  
  	/*
  	 * Scale the elapsed time to reflect the real amount of
  	 * computation
  	 */
8ec59c0f5   Vincent Guittot   sched/topology: R...
80
  	delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq)));
231272968   Vincent Guittot   sched/fair: Updat...
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
  	delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq)));
  
  	rq->clock_pelt += delta;
  }
  
  /*
   * When rq becomes idle, we have to check if it has lost idle time
   * because it was fully busy. A rq is fully used when the /Sum util_sum
   * is greater or equal to:
   * (LOAD_AVG_MAX - 1024 + rq->cfs.avg.period_contrib) << SCHED_CAPACITY_SHIFT;
   * For optimization and computing rounding purpose, we don't take into account
   * the position in the current window (period_contrib) and we use the higher
   * bound of util_sum to decide.
   */
  static inline void update_idle_rq_clock_pelt(struct rq *rq)
  {
  	u32 divider = ((LOAD_AVG_MAX - 1024) << SCHED_CAPACITY_SHIFT) - LOAD_AVG_MAX;
  	u32 util_sum = rq->cfs.avg.util_sum;
  	util_sum += rq->avg_rt.util_sum;
  	util_sum += rq->avg_dl.util_sum;
  
  	/*
  	 * Reflecting stolen time makes sense only if the idle
  	 * phase would be present at max capacity. As soon as the
  	 * utilization of a rq has reached the maximum value, it is
  	 * considered as an always runnig rq without idle time to
  	 * steal. This potential idle time is considered as lost in
  	 * this case. We keep track of this lost idle time compare to
  	 * rq's clock_task.
  	 */
  	if (util_sum >= divider)
  		rq->lost_idle_time += rq_clock_task(rq) - rq->clock_pelt;
  }
  
  static inline u64 rq_clock_pelt(struct rq *rq)
  {
  	lockdep_assert_held(&rq->lock);
  	assert_clock_updated(rq);
  
  	return rq->clock_pelt - rq->lost_idle_time;
  }
  
  #ifdef CONFIG_CFS_BANDWIDTH
  /* rq->task_clock normalized against any time this cfs_rq has spent throttled */
  static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
  {
  	if (unlikely(cfs_rq->throttle_count))
  		return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
  
  	return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
  }
  #else
  static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
  {
  	return rq_clock_pelt(rq_of(cfs_rq));
  }
  #endif
c07962986   Vincent Guittot   sched/pelt: Move ...
138
139
140
141
142
143
144
  #else
  
  static inline int
  update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
  {
  	return 0;
  }
371bf4273   Vincent Guittot   sched/rt: Add rt_...
145
146
147
148
149
  static inline int
  update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
  {
  	return 0;
  }
3727e0e16   Vincent Guittot   sched/dl: Add dl_...
150
151
152
153
154
  static inline int
  update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
  {
  	return 0;
  }
91c27493e   Vincent Guittot   sched/irq: Add IR...
155
156
157
158
159
160
  
  static inline int
  update_irq_load_avg(struct rq *rq, u64 running)
  {
  	return 0;
  }
231272968   Vincent Guittot   sched/fair: Updat...
161
162
163
164
165
166
167
168
169
170
171
  
  static inline u64 rq_clock_pelt(struct rq *rq)
  {
  	return rq_clock_task(rq);
  }
  
  static inline void
  update_rq_clock_pelt(struct rq *rq, s64 delta) { }
  
  static inline void
  update_idle_rq_clock_pelt(struct rq *rq) { }
c07962986   Vincent Guittot   sched/pelt: Move ...
172
  #endif