Commit e1f8450854d69f0291882804406ea1bab3ca44b4

Authored by Ingo Molnar
1 parent 3f2aa307c4

sched: Fix sched::sched_stat_wait tracepoint field

This weird perf trace output:

  cc1-9943  [001]  2802.059479616: sched_stat_wait: task: as:9944 wait: 2801938766276 [ns]

Is caused by setting one component field of the delta to zero
a bit too early. Move it to later.

( Note, this does not affect the NEW_FAIR_SLEEPERS interactivity bug,
  it's just a reporting bug in essence. )

Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Nikos Chantziaras <realnc@arcor.de>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Mike Galbraith <efault@gmx.de>
LKML-Reference: <4AA93D34.8040500@arcor.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

Showing 1 changed file with 1 additions and 2 deletions Inline Diff

1 /* 1 /*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH) 2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 * 3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 * 5 *
6 * Interactivity improvements by Mike Galbraith 6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de> 7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 * 8 *
9 * Various enhancements by Dmitry Adamushko. 9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com> 10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 * 11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri 12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007 13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> 14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 * 15 *
16 * Scaled math optimizations by Thomas Gleixner 16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de> 17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
18 * 18 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra 19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
21 */ 21 */
22 22
23 #include <linux/latencytop.h> 23 #include <linux/latencytop.h>
24 24
25 /* 25 /*
26 * Targeted preemption latency for CPU-bound tasks: 26 * Targeted preemption latency for CPU-bound tasks:
27 * (default: 5ms * (1 + ilog(ncpus)), units: nanoseconds) 27 * (default: 5ms * (1 + ilog(ncpus)), units: nanoseconds)
28 * 28 *
29 * NOTE: this latency value is not the same as the concept of 29 * NOTE: this latency value is not the same as the concept of
30 * 'timeslice length' - timeslices in CFS are of variable length 30 * 'timeslice length' - timeslices in CFS are of variable length
31 * and have no persistent notion like in traditional, time-slice 31 * and have no persistent notion like in traditional, time-slice
32 * based scheduling concepts. 32 * based scheduling concepts.
33 * 33 *
34 * (to see the precise effective timeslice length of your workload, 34 * (to see the precise effective timeslice length of your workload,
35 * run vmstat and monitor the context-switches (cs) field) 35 * run vmstat and monitor the context-switches (cs) field)
36 */ 36 */
37 unsigned int sysctl_sched_latency = 5000000ULL; 37 unsigned int sysctl_sched_latency = 5000000ULL;
38 38
39 /* 39 /*
40 * Minimal preemption granularity for CPU-bound tasks: 40 * Minimal preemption granularity for CPU-bound tasks:
41 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) 41 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
42 */ 42 */
43 unsigned int sysctl_sched_min_granularity = 1000000ULL; 43 unsigned int sysctl_sched_min_granularity = 1000000ULL;
44 44
45 /* 45 /*
46 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity 46 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
47 */ 47 */
48 static unsigned int sched_nr_latency = 5; 48 static unsigned int sched_nr_latency = 5;
49 49
50 /* 50 /*
51 * After fork, child runs first. If set to 0 (default) then 51 * After fork, child runs first. If set to 0 (default) then
52 * parent will (try to) run first. 52 * parent will (try to) run first.
53 */ 53 */
54 unsigned int sysctl_sched_child_runs_first __read_mostly; 54 unsigned int sysctl_sched_child_runs_first __read_mostly;
55 55
56 /* 56 /*
57 * sys_sched_yield() compat mode 57 * sys_sched_yield() compat mode
58 * 58 *
59 * This option switches the agressive yield implementation of the 59 * This option switches the agressive yield implementation of the
60 * old scheduler back on. 60 * old scheduler back on.
61 */ 61 */
62 unsigned int __read_mostly sysctl_sched_compat_yield; 62 unsigned int __read_mostly sysctl_sched_compat_yield;
63 63
64 /* 64 /*
65 * SCHED_OTHER wake-up granularity. 65 * SCHED_OTHER wake-up granularity.
66 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) 66 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
67 * 67 *
68 * This option delays the preemption effects of decoupled workloads 68 * This option delays the preemption effects of decoupled workloads
69 * and reduces their over-scheduling. Synchronous workloads will still 69 * and reduces their over-scheduling. Synchronous workloads will still
70 * have immediate wakeup/sleep latencies. 70 * have immediate wakeup/sleep latencies.
71 */ 71 */
72 unsigned int sysctl_sched_wakeup_granularity = 1000000UL; 72 unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
73 73
74 const_debug unsigned int sysctl_sched_migration_cost = 500000UL; 74 const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
75 75
76 static const struct sched_class fair_sched_class; 76 static const struct sched_class fair_sched_class;
77 77
78 /************************************************************** 78 /**************************************************************
79 * CFS operations on generic schedulable entities: 79 * CFS operations on generic schedulable entities:
80 */ 80 */
81 81
82 #ifdef CONFIG_FAIR_GROUP_SCHED 82 #ifdef CONFIG_FAIR_GROUP_SCHED
83 83
84 /* cpu runqueue to which this cfs_rq is attached */ 84 /* cpu runqueue to which this cfs_rq is attached */
85 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 85 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
86 { 86 {
87 return cfs_rq->rq; 87 return cfs_rq->rq;
88 } 88 }
89 89
90 /* An entity is a task if it doesn't "own" a runqueue */ 90 /* An entity is a task if it doesn't "own" a runqueue */
91 #define entity_is_task(se) (!se->my_q) 91 #define entity_is_task(se) (!se->my_q)
92 92
93 static inline struct task_struct *task_of(struct sched_entity *se) 93 static inline struct task_struct *task_of(struct sched_entity *se)
94 { 94 {
95 #ifdef CONFIG_SCHED_DEBUG 95 #ifdef CONFIG_SCHED_DEBUG
96 WARN_ON_ONCE(!entity_is_task(se)); 96 WARN_ON_ONCE(!entity_is_task(se));
97 #endif 97 #endif
98 return container_of(se, struct task_struct, se); 98 return container_of(se, struct task_struct, se);
99 } 99 }
100 100
101 /* Walk up scheduling entities hierarchy */ 101 /* Walk up scheduling entities hierarchy */
102 #define for_each_sched_entity(se) \ 102 #define for_each_sched_entity(se) \
103 for (; se; se = se->parent) 103 for (; se; se = se->parent)
104 104
105 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) 105 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
106 { 106 {
107 return p->se.cfs_rq; 107 return p->se.cfs_rq;
108 } 108 }
109 109
110 /* runqueue on which this entity is (to be) queued */ 110 /* runqueue on which this entity is (to be) queued */
111 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) 111 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
112 { 112 {
113 return se->cfs_rq; 113 return se->cfs_rq;
114 } 114 }
115 115
116 /* runqueue "owned" by this group */ 116 /* runqueue "owned" by this group */
117 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) 117 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
118 { 118 {
119 return grp->my_q; 119 return grp->my_q;
120 } 120 }
121 121
122 /* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on 122 /* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
123 * another cpu ('this_cpu') 123 * another cpu ('this_cpu')
124 */ 124 */
125 static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu) 125 static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
126 { 126 {
127 return cfs_rq->tg->cfs_rq[this_cpu]; 127 return cfs_rq->tg->cfs_rq[this_cpu];
128 } 128 }
129 129
130 /* Iterate thr' all leaf cfs_rq's on a runqueue */ 130 /* Iterate thr' all leaf cfs_rq's on a runqueue */
131 #define for_each_leaf_cfs_rq(rq, cfs_rq) \ 131 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
132 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) 132 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
133 133
134 /* Do the two (enqueued) entities belong to the same group ? */ 134 /* Do the two (enqueued) entities belong to the same group ? */
135 static inline int 135 static inline int
136 is_same_group(struct sched_entity *se, struct sched_entity *pse) 136 is_same_group(struct sched_entity *se, struct sched_entity *pse)
137 { 137 {
138 if (se->cfs_rq == pse->cfs_rq) 138 if (se->cfs_rq == pse->cfs_rq)
139 return 1; 139 return 1;
140 140
141 return 0; 141 return 0;
142 } 142 }
143 143
144 static inline struct sched_entity *parent_entity(struct sched_entity *se) 144 static inline struct sched_entity *parent_entity(struct sched_entity *se)
145 { 145 {
146 return se->parent; 146 return se->parent;
147 } 147 }
148 148
149 /* return depth at which a sched entity is present in the hierarchy */ 149 /* return depth at which a sched entity is present in the hierarchy */
150 static inline int depth_se(struct sched_entity *se) 150 static inline int depth_se(struct sched_entity *se)
151 { 151 {
152 int depth = 0; 152 int depth = 0;
153 153
154 for_each_sched_entity(se) 154 for_each_sched_entity(se)
155 depth++; 155 depth++;
156 156
157 return depth; 157 return depth;
158 } 158 }
159 159
160 static void 160 static void
161 find_matching_se(struct sched_entity **se, struct sched_entity **pse) 161 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
162 { 162 {
163 int se_depth, pse_depth; 163 int se_depth, pse_depth;
164 164
165 /* 165 /*
166 * preemption test can be made between sibling entities who are in the 166 * preemption test can be made between sibling entities who are in the
167 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of 167 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
168 * both tasks until we find their ancestors who are siblings of common 168 * both tasks until we find their ancestors who are siblings of common
169 * parent. 169 * parent.
170 */ 170 */
171 171
172 /* First walk up until both entities are at same depth */ 172 /* First walk up until both entities are at same depth */
173 se_depth = depth_se(*se); 173 se_depth = depth_se(*se);
174 pse_depth = depth_se(*pse); 174 pse_depth = depth_se(*pse);
175 175
176 while (se_depth > pse_depth) { 176 while (se_depth > pse_depth) {
177 se_depth--; 177 se_depth--;
178 *se = parent_entity(*se); 178 *se = parent_entity(*se);
179 } 179 }
180 180
181 while (pse_depth > se_depth) { 181 while (pse_depth > se_depth) {
182 pse_depth--; 182 pse_depth--;
183 *pse = parent_entity(*pse); 183 *pse = parent_entity(*pse);
184 } 184 }
185 185
186 while (!is_same_group(*se, *pse)) { 186 while (!is_same_group(*se, *pse)) {
187 *se = parent_entity(*se); 187 *se = parent_entity(*se);
188 *pse = parent_entity(*pse); 188 *pse = parent_entity(*pse);
189 } 189 }
190 } 190 }
191 191
192 #else /* !CONFIG_FAIR_GROUP_SCHED */ 192 #else /* !CONFIG_FAIR_GROUP_SCHED */
193 193
194 static inline struct task_struct *task_of(struct sched_entity *se) 194 static inline struct task_struct *task_of(struct sched_entity *se)
195 { 195 {
196 return container_of(se, struct task_struct, se); 196 return container_of(se, struct task_struct, se);
197 } 197 }
198 198
199 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 199 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
200 { 200 {
201 return container_of(cfs_rq, struct rq, cfs); 201 return container_of(cfs_rq, struct rq, cfs);
202 } 202 }
203 203
204 #define entity_is_task(se) 1 204 #define entity_is_task(se) 1
205 205
206 #define for_each_sched_entity(se) \ 206 #define for_each_sched_entity(se) \
207 for (; se; se = NULL) 207 for (; se; se = NULL)
208 208
209 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) 209 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
210 { 210 {
211 return &task_rq(p)->cfs; 211 return &task_rq(p)->cfs;
212 } 212 }
213 213
214 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) 214 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
215 { 215 {
216 struct task_struct *p = task_of(se); 216 struct task_struct *p = task_of(se);
217 struct rq *rq = task_rq(p); 217 struct rq *rq = task_rq(p);
218 218
219 return &rq->cfs; 219 return &rq->cfs;
220 } 220 }
221 221
222 /* runqueue "owned" by this group */ 222 /* runqueue "owned" by this group */
223 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) 223 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
224 { 224 {
225 return NULL; 225 return NULL;
226 } 226 }
227 227
228 static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu) 228 static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
229 { 229 {
230 return &cpu_rq(this_cpu)->cfs; 230 return &cpu_rq(this_cpu)->cfs;
231 } 231 }
232 232
233 #define for_each_leaf_cfs_rq(rq, cfs_rq) \ 233 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
234 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL) 234 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
235 235
236 static inline int 236 static inline int
237 is_same_group(struct sched_entity *se, struct sched_entity *pse) 237 is_same_group(struct sched_entity *se, struct sched_entity *pse)
238 { 238 {
239 return 1; 239 return 1;
240 } 240 }
241 241
242 static inline struct sched_entity *parent_entity(struct sched_entity *se) 242 static inline struct sched_entity *parent_entity(struct sched_entity *se)
243 { 243 {
244 return NULL; 244 return NULL;
245 } 245 }
246 246
247 static inline void 247 static inline void
248 find_matching_se(struct sched_entity **se, struct sched_entity **pse) 248 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
249 { 249 {
250 } 250 }
251 251
252 #endif /* CONFIG_FAIR_GROUP_SCHED */ 252 #endif /* CONFIG_FAIR_GROUP_SCHED */
253 253
254 254
255 /************************************************************** 255 /**************************************************************
256 * Scheduling class tree data structure manipulation methods: 256 * Scheduling class tree data structure manipulation methods:
257 */ 257 */
258 258
259 static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime) 259 static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
260 { 260 {
261 s64 delta = (s64)(vruntime - min_vruntime); 261 s64 delta = (s64)(vruntime - min_vruntime);
262 if (delta > 0) 262 if (delta > 0)
263 min_vruntime = vruntime; 263 min_vruntime = vruntime;
264 264
265 return min_vruntime; 265 return min_vruntime;
266 } 266 }
267 267
268 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime) 268 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
269 { 269 {
270 s64 delta = (s64)(vruntime - min_vruntime); 270 s64 delta = (s64)(vruntime - min_vruntime);
271 if (delta < 0) 271 if (delta < 0)
272 min_vruntime = vruntime; 272 min_vruntime = vruntime;
273 273
274 return min_vruntime; 274 return min_vruntime;
275 } 275 }
276 276
277 static inline int entity_before(struct sched_entity *a, 277 static inline int entity_before(struct sched_entity *a,
278 struct sched_entity *b) 278 struct sched_entity *b)
279 { 279 {
280 return (s64)(a->vruntime - b->vruntime) < 0; 280 return (s64)(a->vruntime - b->vruntime) < 0;
281 } 281 }
282 282
283 static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) 283 static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
284 { 284 {
285 return se->vruntime - cfs_rq->min_vruntime; 285 return se->vruntime - cfs_rq->min_vruntime;
286 } 286 }
287 287
288 static void update_min_vruntime(struct cfs_rq *cfs_rq) 288 static void update_min_vruntime(struct cfs_rq *cfs_rq)
289 { 289 {
290 u64 vruntime = cfs_rq->min_vruntime; 290 u64 vruntime = cfs_rq->min_vruntime;
291 291
292 if (cfs_rq->curr) 292 if (cfs_rq->curr)
293 vruntime = cfs_rq->curr->vruntime; 293 vruntime = cfs_rq->curr->vruntime;
294 294
295 if (cfs_rq->rb_leftmost) { 295 if (cfs_rq->rb_leftmost) {
296 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost, 296 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
297 struct sched_entity, 297 struct sched_entity,
298 run_node); 298 run_node);
299 299
300 if (!cfs_rq->curr) 300 if (!cfs_rq->curr)
301 vruntime = se->vruntime; 301 vruntime = se->vruntime;
302 else 302 else
303 vruntime = min_vruntime(vruntime, se->vruntime); 303 vruntime = min_vruntime(vruntime, se->vruntime);
304 } 304 }
305 305
306 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); 306 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
307 } 307 }
308 308
309 /* 309 /*
310 * Enqueue an entity into the rb-tree: 310 * Enqueue an entity into the rb-tree:
311 */ 311 */
312 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) 312 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
313 { 313 {
314 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node; 314 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
315 struct rb_node *parent = NULL; 315 struct rb_node *parent = NULL;
316 struct sched_entity *entry; 316 struct sched_entity *entry;
317 s64 key = entity_key(cfs_rq, se); 317 s64 key = entity_key(cfs_rq, se);
318 int leftmost = 1; 318 int leftmost = 1;
319 319
320 /* 320 /*
321 * Find the right place in the rbtree: 321 * Find the right place in the rbtree:
322 */ 322 */
323 while (*link) { 323 while (*link) {
324 parent = *link; 324 parent = *link;
325 entry = rb_entry(parent, struct sched_entity, run_node); 325 entry = rb_entry(parent, struct sched_entity, run_node);
326 /* 326 /*
327 * We dont care about collisions. Nodes with 327 * We dont care about collisions. Nodes with
328 * the same key stay together. 328 * the same key stay together.
329 */ 329 */
330 if (key < entity_key(cfs_rq, entry)) { 330 if (key < entity_key(cfs_rq, entry)) {
331 link = &parent->rb_left; 331 link = &parent->rb_left;
332 } else { 332 } else {
333 link = &parent->rb_right; 333 link = &parent->rb_right;
334 leftmost = 0; 334 leftmost = 0;
335 } 335 }
336 } 336 }
337 337
338 /* 338 /*
339 * Maintain a cache of leftmost tree entries (it is frequently 339 * Maintain a cache of leftmost tree entries (it is frequently
340 * used): 340 * used):
341 */ 341 */
342 if (leftmost) 342 if (leftmost)
343 cfs_rq->rb_leftmost = &se->run_node; 343 cfs_rq->rb_leftmost = &se->run_node;
344 344
345 rb_link_node(&se->run_node, parent, link); 345 rb_link_node(&se->run_node, parent, link);
346 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); 346 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
347 } 347 }
348 348
349 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) 349 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
350 { 350 {
351 if (cfs_rq->rb_leftmost == &se->run_node) { 351 if (cfs_rq->rb_leftmost == &se->run_node) {
352 struct rb_node *next_node; 352 struct rb_node *next_node;
353 353
354 next_node = rb_next(&se->run_node); 354 next_node = rb_next(&se->run_node);
355 cfs_rq->rb_leftmost = next_node; 355 cfs_rq->rb_leftmost = next_node;
356 } 356 }
357 357
358 rb_erase(&se->run_node, &cfs_rq->tasks_timeline); 358 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
359 } 359 }
360 360
361 static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq) 361 static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
362 { 362 {
363 struct rb_node *left = cfs_rq->rb_leftmost; 363 struct rb_node *left = cfs_rq->rb_leftmost;
364 364
365 if (!left) 365 if (!left)
366 return NULL; 366 return NULL;
367 367
368 return rb_entry(left, struct sched_entity, run_node); 368 return rb_entry(left, struct sched_entity, run_node);
369 } 369 }
370 370
371 static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) 371 static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
372 { 372 {
373 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline); 373 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
374 374
375 if (!last) 375 if (!last)
376 return NULL; 376 return NULL;
377 377
378 return rb_entry(last, struct sched_entity, run_node); 378 return rb_entry(last, struct sched_entity, run_node);
379 } 379 }
380 380
381 /************************************************************** 381 /**************************************************************
382 * Scheduling class statistics methods: 382 * Scheduling class statistics methods:
383 */ 383 */
384 384
385 #ifdef CONFIG_SCHED_DEBUG 385 #ifdef CONFIG_SCHED_DEBUG
386 int sched_nr_latency_handler(struct ctl_table *table, int write, 386 int sched_nr_latency_handler(struct ctl_table *table, int write,
387 struct file *filp, void __user *buffer, size_t *lenp, 387 struct file *filp, void __user *buffer, size_t *lenp,
388 loff_t *ppos) 388 loff_t *ppos)
389 { 389 {
390 int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); 390 int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
391 391
392 if (ret || !write) 392 if (ret || !write)
393 return ret; 393 return ret;
394 394
395 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency, 395 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
396 sysctl_sched_min_granularity); 396 sysctl_sched_min_granularity);
397 397
398 return 0; 398 return 0;
399 } 399 }
400 #endif 400 #endif
401 401
402 /* 402 /*
403 * delta /= w 403 * delta /= w
404 */ 404 */
405 static inline unsigned long 405 static inline unsigned long
406 calc_delta_fair(unsigned long delta, struct sched_entity *se) 406 calc_delta_fair(unsigned long delta, struct sched_entity *se)
407 { 407 {
408 if (unlikely(se->load.weight != NICE_0_LOAD)) 408 if (unlikely(se->load.weight != NICE_0_LOAD))
409 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load); 409 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
410 410
411 return delta; 411 return delta;
412 } 412 }
413 413
414 /* 414 /*
415 * The idea is to set a period in which each task runs once. 415 * The idea is to set a period in which each task runs once.
416 * 416 *
417 * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch 417 * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
418 * this period because otherwise the slices get too small. 418 * this period because otherwise the slices get too small.
419 * 419 *
420 * p = (nr <= nl) ? l : l*nr/nl 420 * p = (nr <= nl) ? l : l*nr/nl
421 */ 421 */
422 static u64 __sched_period(unsigned long nr_running) 422 static u64 __sched_period(unsigned long nr_running)
423 { 423 {
424 u64 period = sysctl_sched_latency; 424 u64 period = sysctl_sched_latency;
425 unsigned long nr_latency = sched_nr_latency; 425 unsigned long nr_latency = sched_nr_latency;
426 426
427 if (unlikely(nr_running > nr_latency)) { 427 if (unlikely(nr_running > nr_latency)) {
428 period = sysctl_sched_min_granularity; 428 period = sysctl_sched_min_granularity;
429 period *= nr_running; 429 period *= nr_running;
430 } 430 }
431 431
432 return period; 432 return period;
433 } 433 }
434 434
435 /* 435 /*
436 * We calculate the wall-time slice from the period by taking a part 436 * We calculate the wall-time slice from the period by taking a part
437 * proportional to the weight. 437 * proportional to the weight.
438 * 438 *
439 * s = p*P[w/rw] 439 * s = p*P[w/rw]
440 */ 440 */
441 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) 441 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
442 { 442 {
443 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); 443 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
444 444
445 for_each_sched_entity(se) { 445 for_each_sched_entity(se) {
446 struct load_weight *load; 446 struct load_weight *load;
447 struct load_weight lw; 447 struct load_weight lw;
448 448
449 cfs_rq = cfs_rq_of(se); 449 cfs_rq = cfs_rq_of(se);
450 load = &cfs_rq->load; 450 load = &cfs_rq->load;
451 451
452 if (unlikely(!se->on_rq)) { 452 if (unlikely(!se->on_rq)) {
453 lw = cfs_rq->load; 453 lw = cfs_rq->load;
454 454
455 update_load_add(&lw, se->load.weight); 455 update_load_add(&lw, se->load.weight);
456 load = &lw; 456 load = &lw;
457 } 457 }
458 slice = calc_delta_mine(slice, se->load.weight, load); 458 slice = calc_delta_mine(slice, se->load.weight, load);
459 } 459 }
460 return slice; 460 return slice;
461 } 461 }
462 462
463 /* 463 /*
464 * We calculate the vruntime slice of a to be inserted task 464 * We calculate the vruntime slice of a to be inserted task
465 * 465 *
466 * vs = s/w 466 * vs = s/w
467 */ 467 */
468 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) 468 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
469 { 469 {
470 return calc_delta_fair(sched_slice(cfs_rq, se), se); 470 return calc_delta_fair(sched_slice(cfs_rq, se), se);
471 } 471 }
472 472
473 /* 473 /*
474 * Update the current task's runtime statistics. Skip current tasks that 474 * Update the current task's runtime statistics. Skip current tasks that
475 * are not in our scheduling class. 475 * are not in our scheduling class.
476 */ 476 */
477 static inline void 477 static inline void
478 __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, 478 __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
479 unsigned long delta_exec) 479 unsigned long delta_exec)
480 { 480 {
481 unsigned long delta_exec_weighted; 481 unsigned long delta_exec_weighted;
482 482
483 schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max)); 483 schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
484 484
485 curr->sum_exec_runtime += delta_exec; 485 curr->sum_exec_runtime += delta_exec;
486 schedstat_add(cfs_rq, exec_clock, delta_exec); 486 schedstat_add(cfs_rq, exec_clock, delta_exec);
487 delta_exec_weighted = calc_delta_fair(delta_exec, curr); 487 delta_exec_weighted = calc_delta_fair(delta_exec, curr);
488 curr->vruntime += delta_exec_weighted; 488 curr->vruntime += delta_exec_weighted;
489 update_min_vruntime(cfs_rq); 489 update_min_vruntime(cfs_rq);
490 } 490 }
491 491
492 static void update_curr(struct cfs_rq *cfs_rq) 492 static void update_curr(struct cfs_rq *cfs_rq)
493 { 493 {
494 struct sched_entity *curr = cfs_rq->curr; 494 struct sched_entity *curr = cfs_rq->curr;
495 u64 now = rq_of(cfs_rq)->clock; 495 u64 now = rq_of(cfs_rq)->clock;
496 unsigned long delta_exec; 496 unsigned long delta_exec;
497 497
498 if (unlikely(!curr)) 498 if (unlikely(!curr))
499 return; 499 return;
500 500
501 /* 501 /*
502 * Get the amount of time the current task was running 502 * Get the amount of time the current task was running
503 * since the last time we changed load (this cannot 503 * since the last time we changed load (this cannot
504 * overflow on 32 bits): 504 * overflow on 32 bits):
505 */ 505 */
506 delta_exec = (unsigned long)(now - curr->exec_start); 506 delta_exec = (unsigned long)(now - curr->exec_start);
507 if (!delta_exec) 507 if (!delta_exec)
508 return; 508 return;
509 509
510 __update_curr(cfs_rq, curr, delta_exec); 510 __update_curr(cfs_rq, curr, delta_exec);
511 curr->exec_start = now; 511 curr->exec_start = now;
512 512
513 if (entity_is_task(curr)) { 513 if (entity_is_task(curr)) {
514 struct task_struct *curtask = task_of(curr); 514 struct task_struct *curtask = task_of(curr);
515 515
516 cpuacct_charge(curtask, delta_exec); 516 cpuacct_charge(curtask, delta_exec);
517 account_group_exec_runtime(curtask, delta_exec); 517 account_group_exec_runtime(curtask, delta_exec);
518 } 518 }
519 } 519 }
520 520
521 static inline void 521 static inline void
522 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) 522 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
523 { 523 {
524 schedstat_set(se->wait_start, rq_of(cfs_rq)->clock); 524 schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
525 } 525 }
526 526
527 /* 527 /*
528 * Task is being enqueued - update stats: 528 * Task is being enqueued - update stats:
529 */ 529 */
530 static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) 530 static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
531 { 531 {
532 /* 532 /*
533 * Are we enqueueing a waiting task? (for current tasks 533 * Are we enqueueing a waiting task? (for current tasks
534 * a dequeue/enqueue event is a NOP) 534 * a dequeue/enqueue event is a NOP)
535 */ 535 */
536 if (se != cfs_rq->curr) 536 if (se != cfs_rq->curr)
537 update_stats_wait_start(cfs_rq, se); 537 update_stats_wait_start(cfs_rq, se);
538 } 538 }
539 539
540 static void 540 static void
541 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) 541 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
542 { 542 {
543 schedstat_set(se->wait_max, max(se->wait_max, 543 schedstat_set(se->wait_max, max(se->wait_max,
544 rq_of(cfs_rq)->clock - se->wait_start)); 544 rq_of(cfs_rq)->clock - se->wait_start));
545 schedstat_set(se->wait_count, se->wait_count + 1); 545 schedstat_set(se->wait_count, se->wait_count + 1);
546 schedstat_set(se->wait_sum, se->wait_sum + 546 schedstat_set(se->wait_sum, se->wait_sum +
547 rq_of(cfs_rq)->clock - se->wait_start); 547 rq_of(cfs_rq)->clock - se->wait_start);
548 schedstat_set(se->wait_start, 0);
549
550 #ifdef CONFIG_SCHEDSTATS 548 #ifdef CONFIG_SCHEDSTATS
551 if (entity_is_task(se)) { 549 if (entity_is_task(se)) {
552 trace_sched_stat_wait(task_of(se), 550 trace_sched_stat_wait(task_of(se),
553 rq_of(cfs_rq)->clock - se->wait_start); 551 rq_of(cfs_rq)->clock - se->wait_start);
554 } 552 }
555 #endif 553 #endif
554 schedstat_set(se->wait_start, 0);
556 } 555 }
557 556
558 static inline void 557 static inline void
559 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) 558 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
560 { 559 {
561 /* 560 /*
562 * Mark the end of the wait period if dequeueing a 561 * Mark the end of the wait period if dequeueing a
563 * waiting task: 562 * waiting task:
564 */ 563 */
565 if (se != cfs_rq->curr) 564 if (se != cfs_rq->curr)
566 update_stats_wait_end(cfs_rq, se); 565 update_stats_wait_end(cfs_rq, se);
567 } 566 }
568 567
569 /* 568 /*
570 * We are picking a new current task - update its stats: 569 * We are picking a new current task - update its stats:
571 */ 570 */
572 static inline void 571 static inline void
573 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) 572 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
574 { 573 {
575 /* 574 /*
576 * We are starting a new run period: 575 * We are starting a new run period:
577 */ 576 */
578 se->exec_start = rq_of(cfs_rq)->clock; 577 se->exec_start = rq_of(cfs_rq)->clock;
579 } 578 }
580 579
581 /************************************************** 580 /**************************************************
582 * Scheduling class queueing methods: 581 * Scheduling class queueing methods:
583 */ 582 */
584 583
585 #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED 584 #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
586 static void 585 static void
587 add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight) 586 add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
588 { 587 {
589 cfs_rq->task_weight += weight; 588 cfs_rq->task_weight += weight;
590 } 589 }
591 #else 590 #else
592 static inline void 591 static inline void
593 add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight) 592 add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
594 { 593 {
595 } 594 }
596 #endif 595 #endif
597 596
598 static void 597 static void
599 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) 598 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
600 { 599 {
601 update_load_add(&cfs_rq->load, se->load.weight); 600 update_load_add(&cfs_rq->load, se->load.weight);
602 if (!parent_entity(se)) 601 if (!parent_entity(se))
603 inc_cpu_load(rq_of(cfs_rq), se->load.weight); 602 inc_cpu_load(rq_of(cfs_rq), se->load.weight);
604 if (entity_is_task(se)) { 603 if (entity_is_task(se)) {
605 add_cfs_task_weight(cfs_rq, se->load.weight); 604 add_cfs_task_weight(cfs_rq, se->load.weight);
606 list_add(&se->group_node, &cfs_rq->tasks); 605 list_add(&se->group_node, &cfs_rq->tasks);
607 } 606 }
608 cfs_rq->nr_running++; 607 cfs_rq->nr_running++;
609 se->on_rq = 1; 608 se->on_rq = 1;
610 } 609 }
611 610
612 static void 611 static void
613 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) 612 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
614 { 613 {
615 update_load_sub(&cfs_rq->load, se->load.weight); 614 update_load_sub(&cfs_rq->load, se->load.weight);
616 if (!parent_entity(se)) 615 if (!parent_entity(se))
617 dec_cpu_load(rq_of(cfs_rq), se->load.weight); 616 dec_cpu_load(rq_of(cfs_rq), se->load.weight);
618 if (entity_is_task(se)) { 617 if (entity_is_task(se)) {
619 add_cfs_task_weight(cfs_rq, -se->load.weight); 618 add_cfs_task_weight(cfs_rq, -se->load.weight);
620 list_del_init(&se->group_node); 619 list_del_init(&se->group_node);
621 } 620 }
622 cfs_rq->nr_running--; 621 cfs_rq->nr_running--;
623 se->on_rq = 0; 622 se->on_rq = 0;
624 } 623 }
625 624
626 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) 625 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
627 { 626 {
628 #ifdef CONFIG_SCHEDSTATS 627 #ifdef CONFIG_SCHEDSTATS
629 struct task_struct *tsk = NULL; 628 struct task_struct *tsk = NULL;
630 629
631 if (entity_is_task(se)) 630 if (entity_is_task(se))
632 tsk = task_of(se); 631 tsk = task_of(se);
633 632
634 if (se->sleep_start) { 633 if (se->sleep_start) {
635 u64 delta = rq_of(cfs_rq)->clock - se->sleep_start; 634 u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
636 635
637 if ((s64)delta < 0) 636 if ((s64)delta < 0)
638 delta = 0; 637 delta = 0;
639 638
640 if (unlikely(delta > se->sleep_max)) 639 if (unlikely(delta > se->sleep_max))
641 se->sleep_max = delta; 640 se->sleep_max = delta;
642 641
643 se->sleep_start = 0; 642 se->sleep_start = 0;
644 se->sum_sleep_runtime += delta; 643 se->sum_sleep_runtime += delta;
645 644
646 if (tsk) { 645 if (tsk) {
647 account_scheduler_latency(tsk, delta >> 10, 1); 646 account_scheduler_latency(tsk, delta >> 10, 1);
648 trace_sched_stat_sleep(tsk, delta); 647 trace_sched_stat_sleep(tsk, delta);
649 } 648 }
650 } 649 }
651 if (se->block_start) { 650 if (se->block_start) {
652 u64 delta = rq_of(cfs_rq)->clock - se->block_start; 651 u64 delta = rq_of(cfs_rq)->clock - se->block_start;
653 652
654 if ((s64)delta < 0) 653 if ((s64)delta < 0)
655 delta = 0; 654 delta = 0;
656 655
657 if (unlikely(delta > se->block_max)) 656 if (unlikely(delta > se->block_max))
658 se->block_max = delta; 657 se->block_max = delta;
659 658
660 se->block_start = 0; 659 se->block_start = 0;
661 se->sum_sleep_runtime += delta; 660 se->sum_sleep_runtime += delta;
662 661
663 if (tsk) { 662 if (tsk) {
664 if (tsk->in_iowait) { 663 if (tsk->in_iowait) {
665 se->iowait_sum += delta; 664 se->iowait_sum += delta;
666 se->iowait_count++; 665 se->iowait_count++;
667 trace_sched_stat_iowait(tsk, delta); 666 trace_sched_stat_iowait(tsk, delta);
668 } 667 }
669 668
670 /* 669 /*
671 * Blocking time is in units of nanosecs, so shift by 670 * Blocking time is in units of nanosecs, so shift by
672 * 20 to get a milliseconds-range estimation of the 671 * 20 to get a milliseconds-range estimation of the
673 * amount of time that the task spent sleeping: 672 * amount of time that the task spent sleeping:
674 */ 673 */
675 if (unlikely(prof_on == SLEEP_PROFILING)) { 674 if (unlikely(prof_on == SLEEP_PROFILING)) {
676 profile_hits(SLEEP_PROFILING, 675 profile_hits(SLEEP_PROFILING,
677 (void *)get_wchan(tsk), 676 (void *)get_wchan(tsk),
678 delta >> 20); 677 delta >> 20);
679 } 678 }
680 account_scheduler_latency(tsk, delta >> 10, 0); 679 account_scheduler_latency(tsk, delta >> 10, 0);
681 } 680 }
682 } 681 }
683 #endif 682 #endif
684 } 683 }
685 684
686 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) 685 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
687 { 686 {
688 #ifdef CONFIG_SCHED_DEBUG 687 #ifdef CONFIG_SCHED_DEBUG
689 s64 d = se->vruntime - cfs_rq->min_vruntime; 688 s64 d = se->vruntime - cfs_rq->min_vruntime;
690 689
691 if (d < 0) 690 if (d < 0)
692 d = -d; 691 d = -d;
693 692
694 if (d > 3*sysctl_sched_latency) 693 if (d > 3*sysctl_sched_latency)
695 schedstat_inc(cfs_rq, nr_spread_over); 694 schedstat_inc(cfs_rq, nr_spread_over);
696 #endif 695 #endif
697 } 696 }
698 697
699 static void 698 static void
700 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) 699 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
701 { 700 {
702 u64 vruntime = cfs_rq->min_vruntime; 701 u64 vruntime = cfs_rq->min_vruntime;
703 702
704 /* 703 /*
705 * The 'current' period is already promised to the current tasks, 704 * The 'current' period is already promised to the current tasks,
706 * however the extra weight of the new task will slow them down a 705 * however the extra weight of the new task will slow them down a
707 * little, place the new task so that it fits in the slot that 706 * little, place the new task so that it fits in the slot that
708 * stays open at the end. 707 * stays open at the end.
709 */ 708 */
710 if (initial && sched_feat(START_DEBIT)) 709 if (initial && sched_feat(START_DEBIT))
711 vruntime += sched_vslice(cfs_rq, se); 710 vruntime += sched_vslice(cfs_rq, se);
712 711
713 if (!initial) { 712 if (!initial) {
714 /* sleeps upto a single latency don't count. */ 713 /* sleeps upto a single latency don't count. */
715 if (sched_feat(NEW_FAIR_SLEEPERS)) { 714 if (sched_feat(NEW_FAIR_SLEEPERS)) {
716 unsigned long thresh = sysctl_sched_latency; 715 unsigned long thresh = sysctl_sched_latency;
717 716
718 /* 717 /*
719 * Convert the sleeper threshold into virtual time. 718 * Convert the sleeper threshold into virtual time.
720 * SCHED_IDLE is a special sub-class. We care about 719 * SCHED_IDLE is a special sub-class. We care about
721 * fairness only relative to other SCHED_IDLE tasks, 720 * fairness only relative to other SCHED_IDLE tasks,
722 * all of which have the same weight. 721 * all of which have the same weight.
723 */ 722 */
724 if (sched_feat(NORMALIZED_SLEEPER) && 723 if (sched_feat(NORMALIZED_SLEEPER) &&
725 (!entity_is_task(se) || 724 (!entity_is_task(se) ||
726 task_of(se)->policy != SCHED_IDLE)) 725 task_of(se)->policy != SCHED_IDLE))
727 thresh = calc_delta_fair(thresh, se); 726 thresh = calc_delta_fair(thresh, se);
728 727
729 vruntime -= thresh; 728 vruntime -= thresh;
730 } 729 }
731 } 730 }
732 731
733 /* ensure we never gain time by being placed backwards. */ 732 /* ensure we never gain time by being placed backwards. */
734 vruntime = max_vruntime(se->vruntime, vruntime); 733 vruntime = max_vruntime(se->vruntime, vruntime);
735 734
736 se->vruntime = vruntime; 735 se->vruntime = vruntime;
737 } 736 }
738 737
739 static void 738 static void
740 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup) 739 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
741 { 740 {
742 /* 741 /*
743 * Update run-time statistics of the 'current'. 742 * Update run-time statistics of the 'current'.
744 */ 743 */
745 update_curr(cfs_rq); 744 update_curr(cfs_rq);
746 account_entity_enqueue(cfs_rq, se); 745 account_entity_enqueue(cfs_rq, se);
747 746
748 if (wakeup) { 747 if (wakeup) {
749 place_entity(cfs_rq, se, 0); 748 place_entity(cfs_rq, se, 0);
750 enqueue_sleeper(cfs_rq, se); 749 enqueue_sleeper(cfs_rq, se);
751 } 750 }
752 751
753 update_stats_enqueue(cfs_rq, se); 752 update_stats_enqueue(cfs_rq, se);
754 check_spread(cfs_rq, se); 753 check_spread(cfs_rq, se);
755 if (se != cfs_rq->curr) 754 if (se != cfs_rq->curr)
756 __enqueue_entity(cfs_rq, se); 755 __enqueue_entity(cfs_rq, se);
757 } 756 }
758 757
759 static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) 758 static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
760 { 759 {
761 if (cfs_rq->last == se) 760 if (cfs_rq->last == se)
762 cfs_rq->last = NULL; 761 cfs_rq->last = NULL;
763 762
764 if (cfs_rq->next == se) 763 if (cfs_rq->next == se)
765 cfs_rq->next = NULL; 764 cfs_rq->next = NULL;
766 } 765 }
767 766
768 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) 767 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
769 { 768 {
770 for_each_sched_entity(se) 769 for_each_sched_entity(se)
771 __clear_buddies(cfs_rq_of(se), se); 770 __clear_buddies(cfs_rq_of(se), se);
772 } 771 }
773 772
774 static void 773 static void
775 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) 774 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
776 { 775 {
777 /* 776 /*
778 * Update run-time statistics of the 'current'. 777 * Update run-time statistics of the 'current'.
779 */ 778 */
780 update_curr(cfs_rq); 779 update_curr(cfs_rq);
781 780
782 update_stats_dequeue(cfs_rq, se); 781 update_stats_dequeue(cfs_rq, se);
783 if (sleep) { 782 if (sleep) {
784 #ifdef CONFIG_SCHEDSTATS 783 #ifdef CONFIG_SCHEDSTATS
785 if (entity_is_task(se)) { 784 if (entity_is_task(se)) {
786 struct task_struct *tsk = task_of(se); 785 struct task_struct *tsk = task_of(se);
787 786
788 if (tsk->state & TASK_INTERRUPTIBLE) 787 if (tsk->state & TASK_INTERRUPTIBLE)
789 se->sleep_start = rq_of(cfs_rq)->clock; 788 se->sleep_start = rq_of(cfs_rq)->clock;
790 if (tsk->state & TASK_UNINTERRUPTIBLE) 789 if (tsk->state & TASK_UNINTERRUPTIBLE)
791 se->block_start = rq_of(cfs_rq)->clock; 790 se->block_start = rq_of(cfs_rq)->clock;
792 } 791 }
793 #endif 792 #endif
794 } 793 }
795 794
796 clear_buddies(cfs_rq, se); 795 clear_buddies(cfs_rq, se);
797 796
798 if (se != cfs_rq->curr) 797 if (se != cfs_rq->curr)
799 __dequeue_entity(cfs_rq, se); 798 __dequeue_entity(cfs_rq, se);
800 account_entity_dequeue(cfs_rq, se); 799 account_entity_dequeue(cfs_rq, se);
801 update_min_vruntime(cfs_rq); 800 update_min_vruntime(cfs_rq);
802 } 801 }
803 802
804 /* 803 /*
805 * Preempt the current task with a newly woken task if needed: 804 * Preempt the current task with a newly woken task if needed:
806 */ 805 */
807 static void 806 static void
808 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) 807 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
809 { 808 {
810 unsigned long ideal_runtime, delta_exec; 809 unsigned long ideal_runtime, delta_exec;
811 810
812 ideal_runtime = sched_slice(cfs_rq, curr); 811 ideal_runtime = sched_slice(cfs_rq, curr);
813 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; 812 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
814 if (delta_exec > ideal_runtime) { 813 if (delta_exec > ideal_runtime) {
815 resched_task(rq_of(cfs_rq)->curr); 814 resched_task(rq_of(cfs_rq)->curr);
816 /* 815 /*
817 * The current task ran long enough, ensure it doesn't get 816 * The current task ran long enough, ensure it doesn't get
818 * re-elected due to buddy favours. 817 * re-elected due to buddy favours.
819 */ 818 */
820 clear_buddies(cfs_rq, curr); 819 clear_buddies(cfs_rq, curr);
821 } 820 }
822 } 821 }
823 822
824 static void 823 static void
825 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) 824 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
826 { 825 {
827 /* 'current' is not kept within the tree. */ 826 /* 'current' is not kept within the tree. */
828 if (se->on_rq) { 827 if (se->on_rq) {
829 /* 828 /*
830 * Any task has to be enqueued before it get to execute on 829 * Any task has to be enqueued before it get to execute on
831 * a CPU. So account for the time it spent waiting on the 830 * a CPU. So account for the time it spent waiting on the
832 * runqueue. 831 * runqueue.
833 */ 832 */
834 update_stats_wait_end(cfs_rq, se); 833 update_stats_wait_end(cfs_rq, se);
835 __dequeue_entity(cfs_rq, se); 834 __dequeue_entity(cfs_rq, se);
836 } 835 }
837 836
838 update_stats_curr_start(cfs_rq, se); 837 update_stats_curr_start(cfs_rq, se);
839 cfs_rq->curr = se; 838 cfs_rq->curr = se;
840 #ifdef CONFIG_SCHEDSTATS 839 #ifdef CONFIG_SCHEDSTATS
841 /* 840 /*
842 * Track our maximum slice length, if the CPU's load is at 841 * Track our maximum slice length, if the CPU's load is at
843 * least twice that of our own weight (i.e. dont track it 842 * least twice that of our own weight (i.e. dont track it
844 * when there are only lesser-weight tasks around): 843 * when there are only lesser-weight tasks around):
845 */ 844 */
846 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { 845 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
847 se->slice_max = max(se->slice_max, 846 se->slice_max = max(se->slice_max,
848 se->sum_exec_runtime - se->prev_sum_exec_runtime); 847 se->sum_exec_runtime - se->prev_sum_exec_runtime);
849 } 848 }
850 #endif 849 #endif
851 se->prev_sum_exec_runtime = se->sum_exec_runtime; 850 se->prev_sum_exec_runtime = se->sum_exec_runtime;
852 } 851 }
853 852
854 static int 853 static int
855 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); 854 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
856 855
857 static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) 856 static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
858 { 857 {
859 struct sched_entity *se = __pick_next_entity(cfs_rq); 858 struct sched_entity *se = __pick_next_entity(cfs_rq);
860 859
861 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, se) < 1) 860 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, se) < 1)
862 return cfs_rq->next; 861 return cfs_rq->next;
863 862
864 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, se) < 1) 863 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, se) < 1)
865 return cfs_rq->last; 864 return cfs_rq->last;
866 865
867 return se; 866 return se;
868 } 867 }
869 868
870 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) 869 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
871 { 870 {
872 /* 871 /*
873 * If still on the runqueue then deactivate_task() 872 * If still on the runqueue then deactivate_task()
874 * was not called and update_curr() has to be done: 873 * was not called and update_curr() has to be done:
875 */ 874 */
876 if (prev->on_rq) 875 if (prev->on_rq)
877 update_curr(cfs_rq); 876 update_curr(cfs_rq);
878 877
879 check_spread(cfs_rq, prev); 878 check_spread(cfs_rq, prev);
880 if (prev->on_rq) { 879 if (prev->on_rq) {
881 update_stats_wait_start(cfs_rq, prev); 880 update_stats_wait_start(cfs_rq, prev);
882 /* Put 'current' back into the tree. */ 881 /* Put 'current' back into the tree. */
883 __enqueue_entity(cfs_rq, prev); 882 __enqueue_entity(cfs_rq, prev);
884 } 883 }
885 cfs_rq->curr = NULL; 884 cfs_rq->curr = NULL;
886 } 885 }
887 886
888 static void 887 static void
889 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) 888 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
890 { 889 {
891 /* 890 /*
892 * Update run-time statistics of the 'current'. 891 * Update run-time statistics of the 'current'.
893 */ 892 */
894 update_curr(cfs_rq); 893 update_curr(cfs_rq);
895 894
896 #ifdef CONFIG_SCHED_HRTICK 895 #ifdef CONFIG_SCHED_HRTICK
897 /* 896 /*
898 * queued ticks are scheduled to match the slice, so don't bother 897 * queued ticks are scheduled to match the slice, so don't bother
899 * validating it and just reschedule. 898 * validating it and just reschedule.
900 */ 899 */
901 if (queued) { 900 if (queued) {
902 resched_task(rq_of(cfs_rq)->curr); 901 resched_task(rq_of(cfs_rq)->curr);
903 return; 902 return;
904 } 903 }
905 /* 904 /*
906 * don't let the period tick interfere with the hrtick preemption 905 * don't let the period tick interfere with the hrtick preemption
907 */ 906 */
908 if (!sched_feat(DOUBLE_TICK) && 907 if (!sched_feat(DOUBLE_TICK) &&
909 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) 908 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
910 return; 909 return;
911 #endif 910 #endif
912 911
913 if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT)) 912 if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
914 check_preempt_tick(cfs_rq, curr); 913 check_preempt_tick(cfs_rq, curr);
915 } 914 }
916 915
917 /************************************************** 916 /**************************************************
918 * CFS operations on tasks: 917 * CFS operations on tasks:
919 */ 918 */
920 919
921 #ifdef CONFIG_SCHED_HRTICK 920 #ifdef CONFIG_SCHED_HRTICK
922 static void hrtick_start_fair(struct rq *rq, struct task_struct *p) 921 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
923 { 922 {
924 struct sched_entity *se = &p->se; 923 struct sched_entity *se = &p->se;
925 struct cfs_rq *cfs_rq = cfs_rq_of(se); 924 struct cfs_rq *cfs_rq = cfs_rq_of(se);
926 925
927 WARN_ON(task_rq(p) != rq); 926 WARN_ON(task_rq(p) != rq);
928 927
929 if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) { 928 if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) {
930 u64 slice = sched_slice(cfs_rq, se); 929 u64 slice = sched_slice(cfs_rq, se);
931 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; 930 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
932 s64 delta = slice - ran; 931 s64 delta = slice - ran;
933 932
934 if (delta < 0) { 933 if (delta < 0) {
935 if (rq->curr == p) 934 if (rq->curr == p)
936 resched_task(p); 935 resched_task(p);
937 return; 936 return;
938 } 937 }
939 938
940 /* 939 /*
941 * Don't schedule slices shorter than 10000ns, that just 940 * Don't schedule slices shorter than 10000ns, that just
942 * doesn't make sense. Rely on vruntime for fairness. 941 * doesn't make sense. Rely on vruntime for fairness.
943 */ 942 */
944 if (rq->curr != p) 943 if (rq->curr != p)
945 delta = max_t(s64, 10000LL, delta); 944 delta = max_t(s64, 10000LL, delta);
946 945
947 hrtick_start(rq, delta); 946 hrtick_start(rq, delta);
948 } 947 }
949 } 948 }
950 949
951 /* 950 /*
952 * called from enqueue/dequeue and updates the hrtick when the 951 * called from enqueue/dequeue and updates the hrtick when the
953 * current task is from our class and nr_running is low enough 952 * current task is from our class and nr_running is low enough
954 * to matter. 953 * to matter.
955 */ 954 */
956 static void hrtick_update(struct rq *rq) 955 static void hrtick_update(struct rq *rq)
957 { 956 {
958 struct task_struct *curr = rq->curr; 957 struct task_struct *curr = rq->curr;
959 958
960 if (curr->sched_class != &fair_sched_class) 959 if (curr->sched_class != &fair_sched_class)
961 return; 960 return;
962 961
963 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) 962 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
964 hrtick_start_fair(rq, curr); 963 hrtick_start_fair(rq, curr);
965 } 964 }
966 #else /* !CONFIG_SCHED_HRTICK */ 965 #else /* !CONFIG_SCHED_HRTICK */
967 static inline void 966 static inline void
968 hrtick_start_fair(struct rq *rq, struct task_struct *p) 967 hrtick_start_fair(struct rq *rq, struct task_struct *p)
969 { 968 {
970 } 969 }
971 970
972 static inline void hrtick_update(struct rq *rq) 971 static inline void hrtick_update(struct rq *rq)
973 { 972 {
974 } 973 }
975 #endif 974 #endif
976 975
977 /* 976 /*
978 * The enqueue_task method is called before nr_running is 977 * The enqueue_task method is called before nr_running is
979 * increased. Here we update the fair scheduling stats and 978 * increased. Here we update the fair scheduling stats and
980 * then put the task into the rbtree: 979 * then put the task into the rbtree:
981 */ 980 */
982 static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup) 981 static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
983 { 982 {
984 struct cfs_rq *cfs_rq; 983 struct cfs_rq *cfs_rq;
985 struct sched_entity *se = &p->se; 984 struct sched_entity *se = &p->se;
986 985
987 for_each_sched_entity(se) { 986 for_each_sched_entity(se) {
988 if (se->on_rq) 987 if (se->on_rq)
989 break; 988 break;
990 cfs_rq = cfs_rq_of(se); 989 cfs_rq = cfs_rq_of(se);
991 enqueue_entity(cfs_rq, se, wakeup); 990 enqueue_entity(cfs_rq, se, wakeup);
992 wakeup = 1; 991 wakeup = 1;
993 } 992 }
994 993
995 hrtick_update(rq); 994 hrtick_update(rq);
996 } 995 }
997 996
998 /* 997 /*
999 * The dequeue_task method is called before nr_running is 998 * The dequeue_task method is called before nr_running is
1000 * decreased. We remove the task from the rbtree and 999 * decreased. We remove the task from the rbtree and
1001 * update the fair scheduling stats: 1000 * update the fair scheduling stats:
1002 */ 1001 */
1003 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep) 1002 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
1004 { 1003 {
1005 struct cfs_rq *cfs_rq; 1004 struct cfs_rq *cfs_rq;
1006 struct sched_entity *se = &p->se; 1005 struct sched_entity *se = &p->se;
1007 1006
1008 for_each_sched_entity(se) { 1007 for_each_sched_entity(se) {
1009 cfs_rq = cfs_rq_of(se); 1008 cfs_rq = cfs_rq_of(se);
1010 dequeue_entity(cfs_rq, se, sleep); 1009 dequeue_entity(cfs_rq, se, sleep);
1011 /* Don't dequeue parent if it has other entities besides us */ 1010 /* Don't dequeue parent if it has other entities besides us */
1012 if (cfs_rq->load.weight) 1011 if (cfs_rq->load.weight)
1013 break; 1012 break;
1014 sleep = 1; 1013 sleep = 1;
1015 } 1014 }
1016 1015
1017 hrtick_update(rq); 1016 hrtick_update(rq);
1018 } 1017 }
1019 1018
1020 /* 1019 /*
1021 * sched_yield() support is very simple - we dequeue and enqueue. 1020 * sched_yield() support is very simple - we dequeue and enqueue.
1022 * 1021 *
1023 * If compat_yield is turned on then we requeue to the end of the tree. 1022 * If compat_yield is turned on then we requeue to the end of the tree.
1024 */ 1023 */
1025 static void yield_task_fair(struct rq *rq) 1024 static void yield_task_fair(struct rq *rq)
1026 { 1025 {
1027 struct task_struct *curr = rq->curr; 1026 struct task_struct *curr = rq->curr;
1028 struct cfs_rq *cfs_rq = task_cfs_rq(curr); 1027 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1029 struct sched_entity *rightmost, *se = &curr->se; 1028 struct sched_entity *rightmost, *se = &curr->se;
1030 1029
1031 /* 1030 /*
1032 * Are we the only task in the tree? 1031 * Are we the only task in the tree?
1033 */ 1032 */
1034 if (unlikely(cfs_rq->nr_running == 1)) 1033 if (unlikely(cfs_rq->nr_running == 1))
1035 return; 1034 return;
1036 1035
1037 clear_buddies(cfs_rq, se); 1036 clear_buddies(cfs_rq, se);
1038 1037
1039 if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) { 1038 if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
1040 update_rq_clock(rq); 1039 update_rq_clock(rq);
1041 /* 1040 /*
1042 * Update run-time statistics of the 'current'. 1041 * Update run-time statistics of the 'current'.
1043 */ 1042 */
1044 update_curr(cfs_rq); 1043 update_curr(cfs_rq);
1045 1044
1046 return; 1045 return;
1047 } 1046 }
1048 /* 1047 /*
1049 * Find the rightmost entry in the rbtree: 1048 * Find the rightmost entry in the rbtree:
1050 */ 1049 */
1051 rightmost = __pick_last_entity(cfs_rq); 1050 rightmost = __pick_last_entity(cfs_rq);
1052 /* 1051 /*
1053 * Already in the rightmost position? 1052 * Already in the rightmost position?
1054 */ 1053 */
1055 if (unlikely(!rightmost || entity_before(rightmost, se))) 1054 if (unlikely(!rightmost || entity_before(rightmost, se)))
1056 return; 1055 return;
1057 1056
1058 /* 1057 /*
1059 * Minimally necessary key value to be last in the tree: 1058 * Minimally necessary key value to be last in the tree:
1060 * Upon rescheduling, sched_class::put_prev_task() will place 1059 * Upon rescheduling, sched_class::put_prev_task() will place
1061 * 'current' within the tree based on its new key value. 1060 * 'current' within the tree based on its new key value.
1062 */ 1061 */
1063 se->vruntime = rightmost->vruntime + 1; 1062 se->vruntime = rightmost->vruntime + 1;
1064 } 1063 }
1065 1064
1066 /* 1065 /*
1067 * wake_idle() will wake a task on an idle cpu if task->cpu is 1066 * wake_idle() will wake a task on an idle cpu if task->cpu is
1068 * not idle and an idle cpu is available. The span of cpus to 1067 * not idle and an idle cpu is available. The span of cpus to
1069 * search starts with cpus closest then further out as needed, 1068 * search starts with cpus closest then further out as needed,
1070 * so we always favor a closer, idle cpu. 1069 * so we always favor a closer, idle cpu.
1071 * Domains may include CPUs that are not usable for migration, 1070 * Domains may include CPUs that are not usable for migration,
1072 * hence we need to mask them out (rq->rd->online) 1071 * hence we need to mask them out (rq->rd->online)
1073 * 1072 *
1074 * Returns the CPU we should wake onto. 1073 * Returns the CPU we should wake onto.
1075 */ 1074 */
1076 #if defined(ARCH_HAS_SCHED_WAKE_IDLE) 1075 #if defined(ARCH_HAS_SCHED_WAKE_IDLE)
1077 1076
1078 #define cpu_rd_active(cpu, rq) cpumask_test_cpu(cpu, rq->rd->online) 1077 #define cpu_rd_active(cpu, rq) cpumask_test_cpu(cpu, rq->rd->online)
1079 1078
1080 static int wake_idle(int cpu, struct task_struct *p) 1079 static int wake_idle(int cpu, struct task_struct *p)
1081 { 1080 {
1082 struct sched_domain *sd; 1081 struct sched_domain *sd;
1083 int i; 1082 int i;
1084 unsigned int chosen_wakeup_cpu; 1083 unsigned int chosen_wakeup_cpu;
1085 int this_cpu; 1084 int this_cpu;
1086 struct rq *task_rq = task_rq(p); 1085 struct rq *task_rq = task_rq(p);
1087 1086
1088 /* 1087 /*
1089 * At POWERSAVINGS_BALANCE_WAKEUP level, if both this_cpu and prev_cpu 1088 * At POWERSAVINGS_BALANCE_WAKEUP level, if both this_cpu and prev_cpu
1090 * are idle and this is not a kernel thread and this task's affinity 1089 * are idle and this is not a kernel thread and this task's affinity
1091 * allows it to be moved to preferred cpu, then just move! 1090 * allows it to be moved to preferred cpu, then just move!
1092 */ 1091 */
1093 1092
1094 this_cpu = smp_processor_id(); 1093 this_cpu = smp_processor_id();
1095 chosen_wakeup_cpu = 1094 chosen_wakeup_cpu =
1096 cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu; 1095 cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu;
1097 1096
1098 if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP && 1097 if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP &&
1099 idle_cpu(cpu) && idle_cpu(this_cpu) && 1098 idle_cpu(cpu) && idle_cpu(this_cpu) &&
1100 p->mm && !(p->flags & PF_KTHREAD) && 1099 p->mm && !(p->flags & PF_KTHREAD) &&
1101 cpu_isset(chosen_wakeup_cpu, p->cpus_allowed)) 1100 cpu_isset(chosen_wakeup_cpu, p->cpus_allowed))
1102 return chosen_wakeup_cpu; 1101 return chosen_wakeup_cpu;
1103 1102
1104 /* 1103 /*
1105 * If it is idle, then it is the best cpu to run this task. 1104 * If it is idle, then it is the best cpu to run this task.
1106 * 1105 *
1107 * This cpu is also the best, if it has more than one task already. 1106 * This cpu is also the best, if it has more than one task already.
1108 * Siblings must be also busy(in most cases) as they didn't already 1107 * Siblings must be also busy(in most cases) as they didn't already
1109 * pickup the extra load from this cpu and hence we need not check 1108 * pickup the extra load from this cpu and hence we need not check
1110 * sibling runqueue info. This will avoid the checks and cache miss 1109 * sibling runqueue info. This will avoid the checks and cache miss
1111 * penalities associated with that. 1110 * penalities associated with that.
1112 */ 1111 */
1113 if (idle_cpu(cpu) || cpu_rq(cpu)->cfs.nr_running > 1) 1112 if (idle_cpu(cpu) || cpu_rq(cpu)->cfs.nr_running > 1)
1114 return cpu; 1113 return cpu;
1115 1114
1116 for_each_domain(cpu, sd) { 1115 for_each_domain(cpu, sd) {
1117 if ((sd->flags & SD_WAKE_IDLE) 1116 if ((sd->flags & SD_WAKE_IDLE)
1118 || ((sd->flags & SD_WAKE_IDLE_FAR) 1117 || ((sd->flags & SD_WAKE_IDLE_FAR)
1119 && !task_hot(p, task_rq->clock, sd))) { 1118 && !task_hot(p, task_rq->clock, sd))) {
1120 for_each_cpu_and(i, sched_domain_span(sd), 1119 for_each_cpu_and(i, sched_domain_span(sd),
1121 &p->cpus_allowed) { 1120 &p->cpus_allowed) {
1122 if (cpu_rd_active(i, task_rq) && idle_cpu(i)) { 1121 if (cpu_rd_active(i, task_rq) && idle_cpu(i)) {
1123 if (i != task_cpu(p)) { 1122 if (i != task_cpu(p)) {
1124 schedstat_inc(p, 1123 schedstat_inc(p,
1125 se.nr_wakeups_idle); 1124 se.nr_wakeups_idle);
1126 } 1125 }
1127 return i; 1126 return i;
1128 } 1127 }
1129 } 1128 }
1130 } else { 1129 } else {
1131 break; 1130 break;
1132 } 1131 }
1133 } 1132 }
1134 return cpu; 1133 return cpu;
1135 } 1134 }
1136 #else /* !ARCH_HAS_SCHED_WAKE_IDLE*/ 1135 #else /* !ARCH_HAS_SCHED_WAKE_IDLE*/
1137 static inline int wake_idle(int cpu, struct task_struct *p) 1136 static inline int wake_idle(int cpu, struct task_struct *p)
1138 { 1137 {
1139 return cpu; 1138 return cpu;
1140 } 1139 }
1141 #endif 1140 #endif
1142 1141
1143 #ifdef CONFIG_SMP 1142 #ifdef CONFIG_SMP
1144 1143
1145 #ifdef CONFIG_FAIR_GROUP_SCHED 1144 #ifdef CONFIG_FAIR_GROUP_SCHED
1146 /* 1145 /*
1147 * effective_load() calculates the load change as seen from the root_task_group 1146 * effective_load() calculates the load change as seen from the root_task_group
1148 * 1147 *
1149 * Adding load to a group doesn't make a group heavier, but can cause movement 1148 * Adding load to a group doesn't make a group heavier, but can cause movement
1150 * of group shares between cpus. Assuming the shares were perfectly aligned one 1149 * of group shares between cpus. Assuming the shares were perfectly aligned one
1151 * can calculate the shift in shares. 1150 * can calculate the shift in shares.
1152 * 1151 *
1153 * The problem is that perfectly aligning the shares is rather expensive, hence 1152 * The problem is that perfectly aligning the shares is rather expensive, hence
1154 * we try to avoid doing that too often - see update_shares(), which ratelimits 1153 * we try to avoid doing that too often - see update_shares(), which ratelimits
1155 * this change. 1154 * this change.
1156 * 1155 *
1157 * We compensate this by not only taking the current delta into account, but 1156 * We compensate this by not only taking the current delta into account, but
1158 * also considering the delta between when the shares were last adjusted and 1157 * also considering the delta between when the shares were last adjusted and
1159 * now. 1158 * now.
1160 * 1159 *
1161 * We still saw a performance dip, some tracing learned us that between 1160 * We still saw a performance dip, some tracing learned us that between
1162 * cgroup:/ and cgroup:/foo balancing the number of affine wakeups increased 1161 * cgroup:/ and cgroup:/foo balancing the number of affine wakeups increased
1163 * significantly. Therefore try to bias the error in direction of failing 1162 * significantly. Therefore try to bias the error in direction of failing
1164 * the affine wakeup. 1163 * the affine wakeup.
1165 * 1164 *
1166 */ 1165 */
1167 static long effective_load(struct task_group *tg, int cpu, 1166 static long effective_load(struct task_group *tg, int cpu,
1168 long wl, long wg) 1167 long wl, long wg)
1169 { 1168 {
1170 struct sched_entity *se = tg->se[cpu]; 1169 struct sched_entity *se = tg->se[cpu];
1171 1170
1172 if (!tg->parent) 1171 if (!tg->parent)
1173 return wl; 1172 return wl;
1174 1173
1175 /* 1174 /*
1176 * By not taking the decrease of shares on the other cpu into 1175 * By not taking the decrease of shares on the other cpu into
1177 * account our error leans towards reducing the affine wakeups. 1176 * account our error leans towards reducing the affine wakeups.
1178 */ 1177 */
1179 if (!wl && sched_feat(ASYM_EFF_LOAD)) 1178 if (!wl && sched_feat(ASYM_EFF_LOAD))
1180 return wl; 1179 return wl;
1181 1180
1182 for_each_sched_entity(se) { 1181 for_each_sched_entity(se) {
1183 long S, rw, s, a, b; 1182 long S, rw, s, a, b;
1184 long more_w; 1183 long more_w;
1185 1184
1186 /* 1185 /*
1187 * Instead of using this increment, also add the difference 1186 * Instead of using this increment, also add the difference
1188 * between when the shares were last updated and now. 1187 * between when the shares were last updated and now.
1189 */ 1188 */
1190 more_w = se->my_q->load.weight - se->my_q->rq_weight; 1189 more_w = se->my_q->load.weight - se->my_q->rq_weight;
1191 wl += more_w; 1190 wl += more_w;
1192 wg += more_w; 1191 wg += more_w;
1193 1192
1194 S = se->my_q->tg->shares; 1193 S = se->my_q->tg->shares;
1195 s = se->my_q->shares; 1194 s = se->my_q->shares;
1196 rw = se->my_q->rq_weight; 1195 rw = se->my_q->rq_weight;
1197 1196
1198 a = S*(rw + wl); 1197 a = S*(rw + wl);
1199 b = S*rw + s*wg; 1198 b = S*rw + s*wg;
1200 1199
1201 wl = s*(a-b); 1200 wl = s*(a-b);
1202 1201
1203 if (likely(b)) 1202 if (likely(b))
1204 wl /= b; 1203 wl /= b;
1205 1204
1206 /* 1205 /*
1207 * Assume the group is already running and will 1206 * Assume the group is already running and will
1208 * thus already be accounted for in the weight. 1207 * thus already be accounted for in the weight.
1209 * 1208 *
1210 * That is, moving shares between CPUs, does not 1209 * That is, moving shares between CPUs, does not
1211 * alter the group weight. 1210 * alter the group weight.
1212 */ 1211 */
1213 wg = 0; 1212 wg = 0;
1214 } 1213 }
1215 1214
1216 return wl; 1215 return wl;
1217 } 1216 }
1218 1217
1219 #else 1218 #else
1220 1219
1221 static inline unsigned long effective_load(struct task_group *tg, int cpu, 1220 static inline unsigned long effective_load(struct task_group *tg, int cpu,
1222 unsigned long wl, unsigned long wg) 1221 unsigned long wl, unsigned long wg)
1223 { 1222 {
1224 return wl; 1223 return wl;
1225 } 1224 }
1226 1225
1227 #endif 1226 #endif
1228 1227
1229 static int 1228 static int
1230 wake_affine(struct sched_domain *this_sd, struct rq *this_rq, 1229 wake_affine(struct sched_domain *this_sd, struct rq *this_rq,
1231 struct task_struct *p, int prev_cpu, int this_cpu, int sync, 1230 struct task_struct *p, int prev_cpu, int this_cpu, int sync,
1232 int idx, unsigned long load, unsigned long this_load, 1231 int idx, unsigned long load, unsigned long this_load,
1233 unsigned int imbalance) 1232 unsigned int imbalance)
1234 { 1233 {
1235 struct task_struct *curr = this_rq->curr; 1234 struct task_struct *curr = this_rq->curr;
1236 struct task_group *tg; 1235 struct task_group *tg;
1237 unsigned long tl = this_load; 1236 unsigned long tl = this_load;
1238 unsigned long tl_per_task; 1237 unsigned long tl_per_task;
1239 unsigned long weight; 1238 unsigned long weight;
1240 int balanced; 1239 int balanced;
1241 1240
1242 if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS)) 1241 if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS))
1243 return 0; 1242 return 0;
1244 1243
1245 if (sync && (curr->se.avg_overlap > sysctl_sched_migration_cost || 1244 if (sync && (curr->se.avg_overlap > sysctl_sched_migration_cost ||
1246 p->se.avg_overlap > sysctl_sched_migration_cost)) 1245 p->se.avg_overlap > sysctl_sched_migration_cost))
1247 sync = 0; 1246 sync = 0;
1248 1247
1249 /* 1248 /*
1250 * If sync wakeup then subtract the (maximum possible) 1249 * If sync wakeup then subtract the (maximum possible)
1251 * effect of the currently running task from the load 1250 * effect of the currently running task from the load
1252 * of the current CPU: 1251 * of the current CPU:
1253 */ 1252 */
1254 if (sync) { 1253 if (sync) {
1255 tg = task_group(current); 1254 tg = task_group(current);
1256 weight = current->se.load.weight; 1255 weight = current->se.load.weight;
1257 1256
1258 tl += effective_load(tg, this_cpu, -weight, -weight); 1257 tl += effective_load(tg, this_cpu, -weight, -weight);
1259 load += effective_load(tg, prev_cpu, 0, -weight); 1258 load += effective_load(tg, prev_cpu, 0, -weight);
1260 } 1259 }
1261 1260
1262 tg = task_group(p); 1261 tg = task_group(p);
1263 weight = p->se.load.weight; 1262 weight = p->se.load.weight;
1264 1263
1265 /* 1264 /*
1266 * In low-load situations, where prev_cpu is idle and this_cpu is idle 1265 * In low-load situations, where prev_cpu is idle and this_cpu is idle
1267 * due to the sync cause above having dropped tl to 0, we'll always have 1266 * due to the sync cause above having dropped tl to 0, we'll always have
1268 * an imbalance, but there's really nothing you can do about that, so 1267 * an imbalance, but there's really nothing you can do about that, so
1269 * that's good too. 1268 * that's good too.
1270 * 1269 *
1271 * Otherwise check if either cpus are near enough in load to allow this 1270 * Otherwise check if either cpus are near enough in load to allow this
1272 * task to be woken on this_cpu. 1271 * task to be woken on this_cpu.
1273 */ 1272 */
1274 balanced = !tl || 1273 balanced = !tl ||
1275 100*(tl + effective_load(tg, this_cpu, weight, weight)) <= 1274 100*(tl + effective_load(tg, this_cpu, weight, weight)) <=
1276 imbalance*(load + effective_load(tg, prev_cpu, 0, weight)); 1275 imbalance*(load + effective_load(tg, prev_cpu, 0, weight));
1277 1276
1278 /* 1277 /*
1279 * If the currently running task will sleep within 1278 * If the currently running task will sleep within
1280 * a reasonable amount of time then attract this newly 1279 * a reasonable amount of time then attract this newly
1281 * woken task: 1280 * woken task:
1282 */ 1281 */
1283 if (sync && balanced) 1282 if (sync && balanced)
1284 return 1; 1283 return 1;
1285 1284
1286 schedstat_inc(p, se.nr_wakeups_affine_attempts); 1285 schedstat_inc(p, se.nr_wakeups_affine_attempts);
1287 tl_per_task = cpu_avg_load_per_task(this_cpu); 1286 tl_per_task = cpu_avg_load_per_task(this_cpu);
1288 1287
1289 if (balanced || (tl <= load && tl + target_load(prev_cpu, idx) <= 1288 if (balanced || (tl <= load && tl + target_load(prev_cpu, idx) <=
1290 tl_per_task)) { 1289 tl_per_task)) {
1291 /* 1290 /*
1292 * This domain has SD_WAKE_AFFINE and 1291 * This domain has SD_WAKE_AFFINE and
1293 * p is cache cold in this domain, and 1292 * p is cache cold in this domain, and
1294 * there is no bad imbalance. 1293 * there is no bad imbalance.
1295 */ 1294 */
1296 schedstat_inc(this_sd, ttwu_move_affine); 1295 schedstat_inc(this_sd, ttwu_move_affine);
1297 schedstat_inc(p, se.nr_wakeups_affine); 1296 schedstat_inc(p, se.nr_wakeups_affine);
1298 1297
1299 return 1; 1298 return 1;
1300 } 1299 }
1301 return 0; 1300 return 0;
1302 } 1301 }
1303 1302
1304 static int select_task_rq_fair(struct task_struct *p, int sync) 1303 static int select_task_rq_fair(struct task_struct *p, int sync)
1305 { 1304 {
1306 struct sched_domain *sd, *this_sd = NULL; 1305 struct sched_domain *sd, *this_sd = NULL;
1307 int prev_cpu, this_cpu, new_cpu; 1306 int prev_cpu, this_cpu, new_cpu;
1308 unsigned long load, this_load; 1307 unsigned long load, this_load;
1309 struct rq *this_rq; 1308 struct rq *this_rq;
1310 unsigned int imbalance; 1309 unsigned int imbalance;
1311 int idx; 1310 int idx;
1312 1311
1313 prev_cpu = task_cpu(p); 1312 prev_cpu = task_cpu(p);
1314 this_cpu = smp_processor_id(); 1313 this_cpu = smp_processor_id();
1315 this_rq = cpu_rq(this_cpu); 1314 this_rq = cpu_rq(this_cpu);
1316 new_cpu = prev_cpu; 1315 new_cpu = prev_cpu;
1317 1316
1318 /* 1317 /*
1319 * 'this_sd' is the first domain that both 1318 * 'this_sd' is the first domain that both
1320 * this_cpu and prev_cpu are present in: 1319 * this_cpu and prev_cpu are present in:
1321 */ 1320 */
1322 for_each_domain(this_cpu, sd) { 1321 for_each_domain(this_cpu, sd) {
1323 if (cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) { 1322 if (cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) {
1324 this_sd = sd; 1323 this_sd = sd;
1325 break; 1324 break;
1326 } 1325 }
1327 } 1326 }
1328 1327
1329 if (unlikely(!cpumask_test_cpu(this_cpu, &p->cpus_allowed))) 1328 if (unlikely(!cpumask_test_cpu(this_cpu, &p->cpus_allowed)))
1330 goto out; 1329 goto out;
1331 1330
1332 /* 1331 /*
1333 * Check for affine wakeup and passive balancing possibilities. 1332 * Check for affine wakeup and passive balancing possibilities.
1334 */ 1333 */
1335 if (!this_sd) 1334 if (!this_sd)
1336 goto out; 1335 goto out;
1337 1336
1338 idx = this_sd->wake_idx; 1337 idx = this_sd->wake_idx;
1339 1338
1340 imbalance = 100 + (this_sd->imbalance_pct - 100) / 2; 1339 imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;
1341 1340
1342 load = source_load(prev_cpu, idx); 1341 load = source_load(prev_cpu, idx);
1343 this_load = target_load(this_cpu, idx); 1342 this_load = target_load(this_cpu, idx);
1344 1343
1345 if (wake_affine(this_sd, this_rq, p, prev_cpu, this_cpu, sync, idx, 1344 if (wake_affine(this_sd, this_rq, p, prev_cpu, this_cpu, sync, idx,
1346 load, this_load, imbalance)) 1345 load, this_load, imbalance))
1347 return this_cpu; 1346 return this_cpu;
1348 1347
1349 /* 1348 /*
1350 * Start passive balancing when half the imbalance_pct 1349 * Start passive balancing when half the imbalance_pct
1351 * limit is reached. 1350 * limit is reached.
1352 */ 1351 */
1353 if (this_sd->flags & SD_WAKE_BALANCE) { 1352 if (this_sd->flags & SD_WAKE_BALANCE) {
1354 if (imbalance*this_load <= 100*load) { 1353 if (imbalance*this_load <= 100*load) {
1355 schedstat_inc(this_sd, ttwu_move_balance); 1354 schedstat_inc(this_sd, ttwu_move_balance);
1356 schedstat_inc(p, se.nr_wakeups_passive); 1355 schedstat_inc(p, se.nr_wakeups_passive);
1357 return this_cpu; 1356 return this_cpu;
1358 } 1357 }
1359 } 1358 }
1360 1359
1361 out: 1360 out:
1362 return wake_idle(new_cpu, p); 1361 return wake_idle(new_cpu, p);
1363 } 1362 }
1364 #endif /* CONFIG_SMP */ 1363 #endif /* CONFIG_SMP */
1365 1364
1366 /* 1365 /*
1367 * Adaptive granularity 1366 * Adaptive granularity
1368 * 1367 *
1369 * se->avg_wakeup gives the average time a task runs until it does a wakeup, 1368 * se->avg_wakeup gives the average time a task runs until it does a wakeup,
1370 * with the limit of wakeup_gran -- when it never does a wakeup. 1369 * with the limit of wakeup_gran -- when it never does a wakeup.
1371 * 1370 *
1372 * So the smaller avg_wakeup is the faster we want this task to preempt, 1371 * So the smaller avg_wakeup is the faster we want this task to preempt,
1373 * but we don't want to treat the preemptee unfairly and therefore allow it 1372 * but we don't want to treat the preemptee unfairly and therefore allow it
1374 * to run for at least the amount of time we'd like to run. 1373 * to run for at least the amount of time we'd like to run.
1375 * 1374 *
1376 * NOTE: we use 2*avg_wakeup to increase the probability of actually doing one 1375 * NOTE: we use 2*avg_wakeup to increase the probability of actually doing one
1377 * 1376 *
1378 * NOTE: we use *nr_running to scale with load, this nicely matches the 1377 * NOTE: we use *nr_running to scale with load, this nicely matches the
1379 * degrading latency on load. 1378 * degrading latency on load.
1380 */ 1379 */
1381 static unsigned long 1380 static unsigned long
1382 adaptive_gran(struct sched_entity *curr, struct sched_entity *se) 1381 adaptive_gran(struct sched_entity *curr, struct sched_entity *se)
1383 { 1382 {
1384 u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; 1383 u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
1385 u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running; 1384 u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running;
1386 u64 gran = 0; 1385 u64 gran = 0;
1387 1386
1388 if (this_run < expected_wakeup) 1387 if (this_run < expected_wakeup)
1389 gran = expected_wakeup - this_run; 1388 gran = expected_wakeup - this_run;
1390 1389
1391 return min_t(s64, gran, sysctl_sched_wakeup_granularity); 1390 return min_t(s64, gran, sysctl_sched_wakeup_granularity);
1392 } 1391 }
1393 1392
1394 static unsigned long 1393 static unsigned long
1395 wakeup_gran(struct sched_entity *curr, struct sched_entity *se) 1394 wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
1396 { 1395 {
1397 unsigned long gran = sysctl_sched_wakeup_granularity; 1396 unsigned long gran = sysctl_sched_wakeup_granularity;
1398 1397
1399 if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN)) 1398 if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN))
1400 gran = adaptive_gran(curr, se); 1399 gran = adaptive_gran(curr, se);
1401 1400
1402 /* 1401 /*
1403 * Since its curr running now, convert the gran from real-time 1402 * Since its curr running now, convert the gran from real-time
1404 * to virtual-time in his units. 1403 * to virtual-time in his units.
1405 */ 1404 */
1406 if (sched_feat(ASYM_GRAN)) { 1405 if (sched_feat(ASYM_GRAN)) {
1407 /* 1406 /*
1408 * By using 'se' instead of 'curr' we penalize light tasks, so 1407 * By using 'se' instead of 'curr' we penalize light tasks, so
1409 * they get preempted easier. That is, if 'se' < 'curr' then 1408 * they get preempted easier. That is, if 'se' < 'curr' then
1410 * the resulting gran will be larger, therefore penalizing the 1409 * the resulting gran will be larger, therefore penalizing the
1411 * lighter, if otoh 'se' > 'curr' then the resulting gran will 1410 * lighter, if otoh 'se' > 'curr' then the resulting gran will
1412 * be smaller, again penalizing the lighter task. 1411 * be smaller, again penalizing the lighter task.
1413 * 1412 *
1414 * This is especially important for buddies when the leftmost 1413 * This is especially important for buddies when the leftmost
1415 * task is higher priority than the buddy. 1414 * task is higher priority than the buddy.
1416 */ 1415 */
1417 if (unlikely(se->load.weight != NICE_0_LOAD)) 1416 if (unlikely(se->load.weight != NICE_0_LOAD))
1418 gran = calc_delta_fair(gran, se); 1417 gran = calc_delta_fair(gran, se);
1419 } else { 1418 } else {
1420 if (unlikely(curr->load.weight != NICE_0_LOAD)) 1419 if (unlikely(curr->load.weight != NICE_0_LOAD))
1421 gran = calc_delta_fair(gran, curr); 1420 gran = calc_delta_fair(gran, curr);
1422 } 1421 }
1423 1422
1424 return gran; 1423 return gran;
1425 } 1424 }
1426 1425
1427 /* 1426 /*
1428 * Should 'se' preempt 'curr'. 1427 * Should 'se' preempt 'curr'.
1429 * 1428 *
1430 * |s1 1429 * |s1
1431 * |s2 1430 * |s2
1432 * |s3 1431 * |s3
1433 * g 1432 * g
1434 * |<--->|c 1433 * |<--->|c
1435 * 1434 *
1436 * w(c, s1) = -1 1435 * w(c, s1) = -1
1437 * w(c, s2) = 0 1436 * w(c, s2) = 0
1438 * w(c, s3) = 1 1437 * w(c, s3) = 1
1439 * 1438 *
1440 */ 1439 */
1441 static int 1440 static int
1442 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) 1441 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1443 { 1442 {
1444 s64 gran, vdiff = curr->vruntime - se->vruntime; 1443 s64 gran, vdiff = curr->vruntime - se->vruntime;
1445 1444
1446 if (vdiff <= 0) 1445 if (vdiff <= 0)
1447 return -1; 1446 return -1;
1448 1447
1449 gran = wakeup_gran(curr, se); 1448 gran = wakeup_gran(curr, se);
1450 if (vdiff > gran) 1449 if (vdiff > gran)
1451 return 1; 1450 return 1;
1452 1451
1453 return 0; 1452 return 0;
1454 } 1453 }
1455 1454
1456 static void set_last_buddy(struct sched_entity *se) 1455 static void set_last_buddy(struct sched_entity *se)
1457 { 1456 {
1458 if (likely(task_of(se)->policy != SCHED_IDLE)) { 1457 if (likely(task_of(se)->policy != SCHED_IDLE)) {
1459 for_each_sched_entity(se) 1458 for_each_sched_entity(se)
1460 cfs_rq_of(se)->last = se; 1459 cfs_rq_of(se)->last = se;
1461 } 1460 }
1462 } 1461 }
1463 1462
1464 static void set_next_buddy(struct sched_entity *se) 1463 static void set_next_buddy(struct sched_entity *se)
1465 { 1464 {
1466 if (likely(task_of(se)->policy != SCHED_IDLE)) { 1465 if (likely(task_of(se)->policy != SCHED_IDLE)) {
1467 for_each_sched_entity(se) 1466 for_each_sched_entity(se)
1468 cfs_rq_of(se)->next = se; 1467 cfs_rq_of(se)->next = se;
1469 } 1468 }
1470 } 1469 }
1471 1470
1472 /* 1471 /*
1473 * Preempt the current task with a newly woken task if needed: 1472 * Preempt the current task with a newly woken task if needed:
1474 */ 1473 */
1475 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) 1474 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
1476 { 1475 {
1477 struct task_struct *curr = rq->curr; 1476 struct task_struct *curr = rq->curr;
1478 struct sched_entity *se = &curr->se, *pse = &p->se; 1477 struct sched_entity *se = &curr->se, *pse = &p->se;
1479 struct cfs_rq *cfs_rq = task_cfs_rq(curr); 1478 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1480 1479
1481 update_curr(cfs_rq); 1480 update_curr(cfs_rq);
1482 1481
1483 if (unlikely(rt_prio(p->prio))) { 1482 if (unlikely(rt_prio(p->prio))) {
1484 resched_task(curr); 1483 resched_task(curr);
1485 return; 1484 return;
1486 } 1485 }
1487 1486
1488 if (unlikely(p->sched_class != &fair_sched_class)) 1487 if (unlikely(p->sched_class != &fair_sched_class))
1489 return; 1488 return;
1490 1489
1491 if (unlikely(se == pse)) 1490 if (unlikely(se == pse))
1492 return; 1491 return;
1493 1492
1494 /* 1493 /*
1495 * Only set the backward buddy when the current task is still on the 1494 * Only set the backward buddy when the current task is still on the
1496 * rq. This can happen when a wakeup gets interleaved with schedule on 1495 * rq. This can happen when a wakeup gets interleaved with schedule on
1497 * the ->pre_schedule() or idle_balance() point, either of which can 1496 * the ->pre_schedule() or idle_balance() point, either of which can
1498 * drop the rq lock. 1497 * drop the rq lock.
1499 * 1498 *
1500 * Also, during early boot the idle thread is in the fair class, for 1499 * Also, during early boot the idle thread is in the fair class, for
1501 * obvious reasons its a bad idea to schedule back to the idle thread. 1500 * obvious reasons its a bad idea to schedule back to the idle thread.
1502 */ 1501 */
1503 if (sched_feat(LAST_BUDDY) && likely(se->on_rq && curr != rq->idle)) 1502 if (sched_feat(LAST_BUDDY) && likely(se->on_rq && curr != rq->idle))
1504 set_last_buddy(se); 1503 set_last_buddy(se);
1505 set_next_buddy(pse); 1504 set_next_buddy(pse);
1506 1505
1507 /* 1506 /*
1508 * We can come here with TIF_NEED_RESCHED already set from new task 1507 * We can come here with TIF_NEED_RESCHED already set from new task
1509 * wake up path. 1508 * wake up path.
1510 */ 1509 */
1511 if (test_tsk_need_resched(curr)) 1510 if (test_tsk_need_resched(curr))
1512 return; 1511 return;
1513 1512
1514 /* 1513 /*
1515 * Batch and idle tasks do not preempt (their preemption is driven by 1514 * Batch and idle tasks do not preempt (their preemption is driven by
1516 * the tick): 1515 * the tick):
1517 */ 1516 */
1518 if (unlikely(p->policy != SCHED_NORMAL)) 1517 if (unlikely(p->policy != SCHED_NORMAL))
1519 return; 1518 return;
1520 1519
1521 /* Idle tasks are by definition preempted by everybody. */ 1520 /* Idle tasks are by definition preempted by everybody. */
1522 if (unlikely(curr->policy == SCHED_IDLE)) { 1521 if (unlikely(curr->policy == SCHED_IDLE)) {
1523 resched_task(curr); 1522 resched_task(curr);
1524 return; 1523 return;
1525 } 1524 }
1526 1525
1527 if (!sched_feat(WAKEUP_PREEMPT)) 1526 if (!sched_feat(WAKEUP_PREEMPT))
1528 return; 1527 return;
1529 1528
1530 if (sched_feat(WAKEUP_OVERLAP) && (sync || 1529 if (sched_feat(WAKEUP_OVERLAP) && (sync ||
1531 (se->avg_overlap < sysctl_sched_migration_cost && 1530 (se->avg_overlap < sysctl_sched_migration_cost &&
1532 pse->avg_overlap < sysctl_sched_migration_cost))) { 1531 pse->avg_overlap < sysctl_sched_migration_cost))) {
1533 resched_task(curr); 1532 resched_task(curr);
1534 return; 1533 return;
1535 } 1534 }
1536 1535
1537 find_matching_se(&se, &pse); 1536 find_matching_se(&se, &pse);
1538 1537
1539 BUG_ON(!pse); 1538 BUG_ON(!pse);
1540 1539
1541 if (wakeup_preempt_entity(se, pse) == 1) 1540 if (wakeup_preempt_entity(se, pse) == 1)
1542 resched_task(curr); 1541 resched_task(curr);
1543 } 1542 }
1544 1543
1545 static struct task_struct *pick_next_task_fair(struct rq *rq) 1544 static struct task_struct *pick_next_task_fair(struct rq *rq)
1546 { 1545 {
1547 struct task_struct *p; 1546 struct task_struct *p;
1548 struct cfs_rq *cfs_rq = &rq->cfs; 1547 struct cfs_rq *cfs_rq = &rq->cfs;
1549 struct sched_entity *se; 1548 struct sched_entity *se;
1550 1549
1551 if (unlikely(!cfs_rq->nr_running)) 1550 if (unlikely(!cfs_rq->nr_running))
1552 return NULL; 1551 return NULL;
1553 1552
1554 do { 1553 do {
1555 se = pick_next_entity(cfs_rq); 1554 se = pick_next_entity(cfs_rq);
1556 /* 1555 /*
1557 * If se was a buddy, clear it so that it will have to earn 1556 * If se was a buddy, clear it so that it will have to earn
1558 * the favour again. 1557 * the favour again.
1559 */ 1558 */
1560 __clear_buddies(cfs_rq, se); 1559 __clear_buddies(cfs_rq, se);
1561 set_next_entity(cfs_rq, se); 1560 set_next_entity(cfs_rq, se);
1562 cfs_rq = group_cfs_rq(se); 1561 cfs_rq = group_cfs_rq(se);
1563 } while (cfs_rq); 1562 } while (cfs_rq);
1564 1563
1565 p = task_of(se); 1564 p = task_of(se);
1566 hrtick_start_fair(rq, p); 1565 hrtick_start_fair(rq, p);
1567 1566
1568 return p; 1567 return p;
1569 } 1568 }
1570 1569
1571 /* 1570 /*
1572 * Account for a descheduled task: 1571 * Account for a descheduled task:
1573 */ 1572 */
1574 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) 1573 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
1575 { 1574 {
1576 struct sched_entity *se = &prev->se; 1575 struct sched_entity *se = &prev->se;
1577 struct cfs_rq *cfs_rq; 1576 struct cfs_rq *cfs_rq;
1578 1577
1579 for_each_sched_entity(se) { 1578 for_each_sched_entity(se) {
1580 cfs_rq = cfs_rq_of(se); 1579 cfs_rq = cfs_rq_of(se);
1581 put_prev_entity(cfs_rq, se); 1580 put_prev_entity(cfs_rq, se);
1582 } 1581 }
1583 } 1582 }
1584 1583
1585 #ifdef CONFIG_SMP 1584 #ifdef CONFIG_SMP
1586 /************************************************** 1585 /**************************************************
1587 * Fair scheduling class load-balancing methods: 1586 * Fair scheduling class load-balancing methods:
1588 */ 1587 */
1589 1588
1590 /* 1589 /*
1591 * Load-balancing iterator. Note: while the runqueue stays locked 1590 * Load-balancing iterator. Note: while the runqueue stays locked
1592 * during the whole iteration, the current task might be 1591 * during the whole iteration, the current task might be
1593 * dequeued so the iterator has to be dequeue-safe. Here we 1592 * dequeued so the iterator has to be dequeue-safe. Here we
1594 * achieve that by always pre-iterating before returning 1593 * achieve that by always pre-iterating before returning
1595 * the current task: 1594 * the current task:
1596 */ 1595 */
1597 static struct task_struct * 1596 static struct task_struct *
1598 __load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next) 1597 __load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next)
1599 { 1598 {
1600 struct task_struct *p = NULL; 1599 struct task_struct *p = NULL;
1601 struct sched_entity *se; 1600 struct sched_entity *se;
1602 1601
1603 if (next == &cfs_rq->tasks) 1602 if (next == &cfs_rq->tasks)
1604 return NULL; 1603 return NULL;
1605 1604
1606 se = list_entry(next, struct sched_entity, group_node); 1605 se = list_entry(next, struct sched_entity, group_node);
1607 p = task_of(se); 1606 p = task_of(se);
1608 cfs_rq->balance_iterator = next->next; 1607 cfs_rq->balance_iterator = next->next;
1609 1608
1610 return p; 1609 return p;
1611 } 1610 }
1612 1611
1613 static struct task_struct *load_balance_start_fair(void *arg) 1612 static struct task_struct *load_balance_start_fair(void *arg)
1614 { 1613 {
1615 struct cfs_rq *cfs_rq = arg; 1614 struct cfs_rq *cfs_rq = arg;
1616 1615
1617 return __load_balance_iterator(cfs_rq, cfs_rq->tasks.next); 1616 return __load_balance_iterator(cfs_rq, cfs_rq->tasks.next);
1618 } 1617 }
1619 1618
1620 static struct task_struct *load_balance_next_fair(void *arg) 1619 static struct task_struct *load_balance_next_fair(void *arg)
1621 { 1620 {
1622 struct cfs_rq *cfs_rq = arg; 1621 struct cfs_rq *cfs_rq = arg;
1623 1622
1624 return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator); 1623 return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator);
1625 } 1624 }
1626 1625
1627 static unsigned long 1626 static unsigned long
1628 __load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, 1627 __load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1629 unsigned long max_load_move, struct sched_domain *sd, 1628 unsigned long max_load_move, struct sched_domain *sd,
1630 enum cpu_idle_type idle, int *all_pinned, int *this_best_prio, 1629 enum cpu_idle_type idle, int *all_pinned, int *this_best_prio,
1631 struct cfs_rq *cfs_rq) 1630 struct cfs_rq *cfs_rq)
1632 { 1631 {
1633 struct rq_iterator cfs_rq_iterator; 1632 struct rq_iterator cfs_rq_iterator;
1634 1633
1635 cfs_rq_iterator.start = load_balance_start_fair; 1634 cfs_rq_iterator.start = load_balance_start_fair;
1636 cfs_rq_iterator.next = load_balance_next_fair; 1635 cfs_rq_iterator.next = load_balance_next_fair;
1637 cfs_rq_iterator.arg = cfs_rq; 1636 cfs_rq_iterator.arg = cfs_rq;
1638 1637
1639 return balance_tasks(this_rq, this_cpu, busiest, 1638 return balance_tasks(this_rq, this_cpu, busiest,
1640 max_load_move, sd, idle, all_pinned, 1639 max_load_move, sd, idle, all_pinned,
1641 this_best_prio, &cfs_rq_iterator); 1640 this_best_prio, &cfs_rq_iterator);
1642 } 1641 }
1643 1642
1644 #ifdef CONFIG_FAIR_GROUP_SCHED 1643 #ifdef CONFIG_FAIR_GROUP_SCHED
1645 static unsigned long 1644 static unsigned long
1646 load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, 1645 load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1647 unsigned long max_load_move, 1646 unsigned long max_load_move,
1648 struct sched_domain *sd, enum cpu_idle_type idle, 1647 struct sched_domain *sd, enum cpu_idle_type idle,
1649 int *all_pinned, int *this_best_prio) 1648 int *all_pinned, int *this_best_prio)
1650 { 1649 {
1651 long rem_load_move = max_load_move; 1650 long rem_load_move = max_load_move;
1652 int busiest_cpu = cpu_of(busiest); 1651 int busiest_cpu = cpu_of(busiest);
1653 struct task_group *tg; 1652 struct task_group *tg;
1654 1653
1655 rcu_read_lock(); 1654 rcu_read_lock();
1656 update_h_load(busiest_cpu); 1655 update_h_load(busiest_cpu);
1657 1656
1658 list_for_each_entry_rcu(tg, &task_groups, list) { 1657 list_for_each_entry_rcu(tg, &task_groups, list) {
1659 struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu]; 1658 struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu];
1660 unsigned long busiest_h_load = busiest_cfs_rq->h_load; 1659 unsigned long busiest_h_load = busiest_cfs_rq->h_load;
1661 unsigned long busiest_weight = busiest_cfs_rq->load.weight; 1660 unsigned long busiest_weight = busiest_cfs_rq->load.weight;
1662 u64 rem_load, moved_load; 1661 u64 rem_load, moved_load;
1663 1662
1664 /* 1663 /*
1665 * empty group 1664 * empty group
1666 */ 1665 */
1667 if (!busiest_cfs_rq->task_weight) 1666 if (!busiest_cfs_rq->task_weight)
1668 continue; 1667 continue;
1669 1668
1670 rem_load = (u64)rem_load_move * busiest_weight; 1669 rem_load = (u64)rem_load_move * busiest_weight;
1671 rem_load = div_u64(rem_load, busiest_h_load + 1); 1670 rem_load = div_u64(rem_load, busiest_h_load + 1);
1672 1671
1673 moved_load = __load_balance_fair(this_rq, this_cpu, busiest, 1672 moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
1674 rem_load, sd, idle, all_pinned, this_best_prio, 1673 rem_load, sd, idle, all_pinned, this_best_prio,
1675 tg->cfs_rq[busiest_cpu]); 1674 tg->cfs_rq[busiest_cpu]);
1676 1675
1677 if (!moved_load) 1676 if (!moved_load)
1678 continue; 1677 continue;
1679 1678
1680 moved_load *= busiest_h_load; 1679 moved_load *= busiest_h_load;
1681 moved_load = div_u64(moved_load, busiest_weight + 1); 1680 moved_load = div_u64(moved_load, busiest_weight + 1);
1682 1681
1683 rem_load_move -= moved_load; 1682 rem_load_move -= moved_load;
1684 if (rem_load_move < 0) 1683 if (rem_load_move < 0)
1685 break; 1684 break;
1686 } 1685 }
1687 rcu_read_unlock(); 1686 rcu_read_unlock();
1688 1687
1689 return max_load_move - rem_load_move; 1688 return max_load_move - rem_load_move;
1690 } 1689 }
1691 #else 1690 #else
1692 static unsigned long 1691 static unsigned long
1693 load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, 1692 load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1694 unsigned long max_load_move, 1693 unsigned long max_load_move,
1695 struct sched_domain *sd, enum cpu_idle_type idle, 1694 struct sched_domain *sd, enum cpu_idle_type idle,
1696 int *all_pinned, int *this_best_prio) 1695 int *all_pinned, int *this_best_prio)
1697 { 1696 {
1698 return __load_balance_fair(this_rq, this_cpu, busiest, 1697 return __load_balance_fair(this_rq, this_cpu, busiest,
1699 max_load_move, sd, idle, all_pinned, 1698 max_load_move, sd, idle, all_pinned,
1700 this_best_prio, &busiest->cfs); 1699 this_best_prio, &busiest->cfs);
1701 } 1700 }
1702 #endif 1701 #endif
1703 1702
1704 static int 1703 static int
1705 move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, 1704 move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1706 struct sched_domain *sd, enum cpu_idle_type idle) 1705 struct sched_domain *sd, enum cpu_idle_type idle)
1707 { 1706 {
1708 struct cfs_rq *busy_cfs_rq; 1707 struct cfs_rq *busy_cfs_rq;
1709 struct rq_iterator cfs_rq_iterator; 1708 struct rq_iterator cfs_rq_iterator;
1710 1709
1711 cfs_rq_iterator.start = load_balance_start_fair; 1710 cfs_rq_iterator.start = load_balance_start_fair;
1712 cfs_rq_iterator.next = load_balance_next_fair; 1711 cfs_rq_iterator.next = load_balance_next_fair;
1713 1712
1714 for_each_leaf_cfs_rq(busiest, busy_cfs_rq) { 1713 for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
1715 /* 1714 /*
1716 * pass busy_cfs_rq argument into 1715 * pass busy_cfs_rq argument into
1717 * load_balance_[start|next]_fair iterators 1716 * load_balance_[start|next]_fair iterators
1718 */ 1717 */
1719 cfs_rq_iterator.arg = busy_cfs_rq; 1718 cfs_rq_iterator.arg = busy_cfs_rq;
1720 if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle, 1719 if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
1721 &cfs_rq_iterator)) 1720 &cfs_rq_iterator))
1722 return 1; 1721 return 1;
1723 } 1722 }
1724 1723
1725 return 0; 1724 return 0;
1726 } 1725 }
1727 #endif /* CONFIG_SMP */ 1726 #endif /* CONFIG_SMP */
1728 1727
1729 /* 1728 /*
1730 * scheduler tick hitting a task of our scheduling class: 1729 * scheduler tick hitting a task of our scheduling class:
1731 */ 1730 */
1732 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) 1731 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
1733 { 1732 {
1734 struct cfs_rq *cfs_rq; 1733 struct cfs_rq *cfs_rq;
1735 struct sched_entity *se = &curr->se; 1734 struct sched_entity *se = &curr->se;
1736 1735
1737 for_each_sched_entity(se) { 1736 for_each_sched_entity(se) {
1738 cfs_rq = cfs_rq_of(se); 1737 cfs_rq = cfs_rq_of(se);
1739 entity_tick(cfs_rq, se, queued); 1738 entity_tick(cfs_rq, se, queued);
1740 } 1739 }
1741 } 1740 }
1742 1741
1743 /* 1742 /*
1744 * Share the fairness runtime between parent and child, thus the 1743 * Share the fairness runtime between parent and child, thus the
1745 * total amount of pressure for CPU stays equal - new tasks 1744 * total amount of pressure for CPU stays equal - new tasks
1746 * get a chance to run but frequent forkers are not allowed to 1745 * get a chance to run but frequent forkers are not allowed to
1747 * monopolize the CPU. Note: the parent runqueue is locked, 1746 * monopolize the CPU. Note: the parent runqueue is locked,
1748 * the child is not running yet. 1747 * the child is not running yet.
1749 */ 1748 */
1750 static void task_new_fair(struct rq *rq, struct task_struct *p) 1749 static void task_new_fair(struct rq *rq, struct task_struct *p)
1751 { 1750 {
1752 struct cfs_rq *cfs_rq = task_cfs_rq(p); 1751 struct cfs_rq *cfs_rq = task_cfs_rq(p);
1753 struct sched_entity *se = &p->se, *curr = cfs_rq->curr; 1752 struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
1754 int this_cpu = smp_processor_id(); 1753 int this_cpu = smp_processor_id();
1755 1754
1756 sched_info_queued(p); 1755 sched_info_queued(p);
1757 1756
1758 update_curr(cfs_rq); 1757 update_curr(cfs_rq);
1759 if (curr) 1758 if (curr)
1760 se->vruntime = curr->vruntime; 1759 se->vruntime = curr->vruntime;
1761 place_entity(cfs_rq, se, 1); 1760 place_entity(cfs_rq, se, 1);
1762 1761
1763 /* 'curr' will be NULL if the child belongs to a different group */ 1762 /* 'curr' will be NULL if the child belongs to a different group */
1764 if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) && 1763 if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
1765 curr && entity_before(curr, se)) { 1764 curr && entity_before(curr, se)) {
1766 /* 1765 /*
1767 * Upon rescheduling, sched_class::put_prev_task() will place 1766 * Upon rescheduling, sched_class::put_prev_task() will place
1768 * 'current' within the tree based on its new key value. 1767 * 'current' within the tree based on its new key value.
1769 */ 1768 */
1770 swap(curr->vruntime, se->vruntime); 1769 swap(curr->vruntime, se->vruntime);
1771 resched_task(rq->curr); 1770 resched_task(rq->curr);
1772 } 1771 }
1773 1772
1774 enqueue_task_fair(rq, p, 0); 1773 enqueue_task_fair(rq, p, 0);
1775 } 1774 }
1776 1775
1777 /* 1776 /*
1778 * Priority of the task has changed. Check to see if we preempt 1777 * Priority of the task has changed. Check to see if we preempt
1779 * the current task. 1778 * the current task.
1780 */ 1779 */
1781 static void prio_changed_fair(struct rq *rq, struct task_struct *p, 1780 static void prio_changed_fair(struct rq *rq, struct task_struct *p,
1782 int oldprio, int running) 1781 int oldprio, int running)
1783 { 1782 {
1784 /* 1783 /*
1785 * Reschedule if we are currently running on this runqueue and 1784 * Reschedule if we are currently running on this runqueue and
1786 * our priority decreased, or if we are not currently running on 1785 * our priority decreased, or if we are not currently running on
1787 * this runqueue and our priority is higher than the current's 1786 * this runqueue and our priority is higher than the current's
1788 */ 1787 */
1789 if (running) { 1788 if (running) {
1790 if (p->prio > oldprio) 1789 if (p->prio > oldprio)
1791 resched_task(rq->curr); 1790 resched_task(rq->curr);
1792 } else 1791 } else
1793 check_preempt_curr(rq, p, 0); 1792 check_preempt_curr(rq, p, 0);
1794 } 1793 }
1795 1794
1796 /* 1795 /*
1797 * We switched to the sched_fair class. 1796 * We switched to the sched_fair class.
1798 */ 1797 */
1799 static void switched_to_fair(struct rq *rq, struct task_struct *p, 1798 static void switched_to_fair(struct rq *rq, struct task_struct *p,
1800 int running) 1799 int running)
1801 { 1800 {
1802 /* 1801 /*
1803 * We were most likely switched from sched_rt, so 1802 * We were most likely switched from sched_rt, so
1804 * kick off the schedule if running, otherwise just see 1803 * kick off the schedule if running, otherwise just see
1805 * if we can still preempt the current task. 1804 * if we can still preempt the current task.
1806 */ 1805 */
1807 if (running) 1806 if (running)
1808 resched_task(rq->curr); 1807 resched_task(rq->curr);
1809 else 1808 else
1810 check_preempt_curr(rq, p, 0); 1809 check_preempt_curr(rq, p, 0);
1811 } 1810 }
1812 1811
1813 /* Account for a task changing its policy or group. 1812 /* Account for a task changing its policy or group.
1814 * 1813 *
1815 * This routine is mostly called to set cfs_rq->curr field when a task 1814 * This routine is mostly called to set cfs_rq->curr field when a task
1816 * migrates between groups/classes. 1815 * migrates between groups/classes.
1817 */ 1816 */
1818 static void set_curr_task_fair(struct rq *rq) 1817 static void set_curr_task_fair(struct rq *rq)
1819 { 1818 {
1820 struct sched_entity *se = &rq->curr->se; 1819 struct sched_entity *se = &rq->curr->se;
1821 1820
1822 for_each_sched_entity(se) 1821 for_each_sched_entity(se)
1823 set_next_entity(cfs_rq_of(se), se); 1822 set_next_entity(cfs_rq_of(se), se);
1824 } 1823 }
1825 1824
1826 #ifdef CONFIG_FAIR_GROUP_SCHED 1825 #ifdef CONFIG_FAIR_GROUP_SCHED
1827 static void moved_group_fair(struct task_struct *p) 1826 static void moved_group_fair(struct task_struct *p)
1828 { 1827 {
1829 struct cfs_rq *cfs_rq = task_cfs_rq(p); 1828 struct cfs_rq *cfs_rq = task_cfs_rq(p);
1830 1829
1831 update_curr(cfs_rq); 1830 update_curr(cfs_rq);
1832 place_entity(cfs_rq, &p->se, 1); 1831 place_entity(cfs_rq, &p->se, 1);
1833 } 1832 }
1834 #endif 1833 #endif
1835 1834
1836 /* 1835 /*
1837 * All the scheduling class methods: 1836 * All the scheduling class methods:
1838 */ 1837 */
1839 static const struct sched_class fair_sched_class = { 1838 static const struct sched_class fair_sched_class = {
1840 .next = &idle_sched_class, 1839 .next = &idle_sched_class,
1841 .enqueue_task = enqueue_task_fair, 1840 .enqueue_task = enqueue_task_fair,
1842 .dequeue_task = dequeue_task_fair, 1841 .dequeue_task = dequeue_task_fair,
1843 .yield_task = yield_task_fair, 1842 .yield_task = yield_task_fair,
1844 1843
1845 .check_preempt_curr = check_preempt_wakeup, 1844 .check_preempt_curr = check_preempt_wakeup,
1846 1845
1847 .pick_next_task = pick_next_task_fair, 1846 .pick_next_task = pick_next_task_fair,
1848 .put_prev_task = put_prev_task_fair, 1847 .put_prev_task = put_prev_task_fair,
1849 1848
1850 #ifdef CONFIG_SMP 1849 #ifdef CONFIG_SMP
1851 .select_task_rq = select_task_rq_fair, 1850 .select_task_rq = select_task_rq_fair,
1852 1851
1853 .load_balance = load_balance_fair, 1852 .load_balance = load_balance_fair,
1854 .move_one_task = move_one_task_fair, 1853 .move_one_task = move_one_task_fair,
1855 #endif 1854 #endif
1856 1855
1857 .set_curr_task = set_curr_task_fair, 1856 .set_curr_task = set_curr_task_fair,
1858 .task_tick = task_tick_fair, 1857 .task_tick = task_tick_fair,
1859 .task_new = task_new_fair, 1858 .task_new = task_new_fair,
1860 1859
1861 .prio_changed = prio_changed_fair, 1860 .prio_changed = prio_changed_fair,
1862 .switched_to = switched_to_fair, 1861 .switched_to = switched_to_fair,
1863 1862
1864 #ifdef CONFIG_FAIR_GROUP_SCHED 1863 #ifdef CONFIG_FAIR_GROUP_SCHED
1865 .moved_group = moved_group_fair, 1864 .moved_group = moved_group_fair,
1866 #endif 1865 #endif
1867 }; 1866 };
1868 1867
1869 #ifdef CONFIG_SCHED_DEBUG 1868 #ifdef CONFIG_SCHED_DEBUG
1870 static void print_cfs_stats(struct seq_file *m, int cpu) 1869 static void print_cfs_stats(struct seq_file *m, int cpu)
1871 { 1870 {
1872 struct cfs_rq *cfs_rq; 1871 struct cfs_rq *cfs_rq;
1873 1872
1874 rcu_read_lock(); 1873 rcu_read_lock();
1875 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq) 1874 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
1876 print_cfs_rq(m, cpu, cfs_rq); 1875 print_cfs_rq(m, cpu, cfs_rq);
1877 rcu_read_unlock(); 1876 rcu_read_unlock();
1878 } 1877 }
1879 #endif 1878 #endif