Blame view

kernel/sched_rt.c 40.3 KB
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1
2
3
4
  /*
   * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
   * policies)
   */
398a153b1   Gregory Haskins   sched: fix build ...
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
  static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
  {
  	return container_of(rt_se, struct task_struct, rt);
  }
  
  #ifdef CONFIG_RT_GROUP_SCHED
  
  static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
  {
  	return rt_rq->rq;
  }
  
  static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
  {
  	return rt_se->rt_rq;
  }
  
  #else /* CONFIG_RT_GROUP_SCHED */
  
  static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
  {
  	return container_of(rt_rq, struct rq, rt);
  }
  
  static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
  {
  	struct task_struct *p = rt_task_of(rt_se);
  	struct rq *rq = task_rq(p);
  
  	return &rq->rt;
  }
  
  #endif /* CONFIG_RT_GROUP_SCHED */
4fd29176b   Steven Rostedt   sched: add rt-ove...
38
  #ifdef CONFIG_SMP
84de42748   Ingo Molnar   sched: clean up k...
39

637f50851   Gregory Haskins   sched: only balan...
40
  static inline int rt_overloaded(struct rq *rq)
4fd29176b   Steven Rostedt   sched: add rt-ove...
41
  {
637f50851   Gregory Haskins   sched: only balan...
42
  	return atomic_read(&rq->rd->rto_count);
4fd29176b   Steven Rostedt   sched: add rt-ove...
43
  }
84de42748   Ingo Molnar   sched: clean up k...
44

4fd29176b   Steven Rostedt   sched: add rt-ove...
45
46
  static inline void rt_set_overload(struct rq *rq)
  {
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
47
48
  	if (!rq->online)
  		return;
c6c4927b2   Rusty Russell   sched: convert st...
49
  	cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
4fd29176b   Steven Rostedt   sched: add rt-ove...
50
51
52
53
54
55
56
57
  	/*
  	 * Make sure the mask is visible before we set
  	 * the overload count. That is checked to determine
  	 * if we should look at the mask. It would be a shame
  	 * if we looked at the mask, but the mask was not
  	 * updated yet.
  	 */
  	wmb();
637f50851   Gregory Haskins   sched: only balan...
58
  	atomic_inc(&rq->rd->rto_count);
4fd29176b   Steven Rostedt   sched: add rt-ove...
59
  }
84de42748   Ingo Molnar   sched: clean up k...
60

4fd29176b   Steven Rostedt   sched: add rt-ove...
61
62
  static inline void rt_clear_overload(struct rq *rq)
  {
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
63
64
  	if (!rq->online)
  		return;
4fd29176b   Steven Rostedt   sched: add rt-ove...
65
  	/* the order here really doesn't matter */
637f50851   Gregory Haskins   sched: only balan...
66
  	atomic_dec(&rq->rd->rto_count);
c6c4927b2   Rusty Russell   sched: convert st...
67
  	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
4fd29176b   Steven Rostedt   sched: add rt-ove...
68
  }
73fe6aae8   Gregory Haskins   sched: add RT-bal...
69

398a153b1   Gregory Haskins   sched: fix build ...
70
  static void update_rt_migration(struct rt_rq *rt_rq)
73fe6aae8   Gregory Haskins   sched: add RT-bal...
71
  {
398a153b1   Gregory Haskins   sched: fix build ...
72
73
74
75
  	if (rt_rq->rt_nr_migratory && (rt_rq->rt_nr_running > 1)) {
  		if (!rt_rq->overloaded) {
  			rt_set_overload(rq_of_rt_rq(rt_rq));
  			rt_rq->overloaded = 1;
cdc8eb984   Gregory Haskins   sched: RT-balance...
76
  		}
398a153b1   Gregory Haskins   sched: fix build ...
77
78
79
  	} else if (rt_rq->overloaded) {
  		rt_clear_overload(rq_of_rt_rq(rt_rq));
  		rt_rq->overloaded = 0;
637f50851   Gregory Haskins   sched: only balan...
80
  	}
73fe6aae8   Gregory Haskins   sched: add RT-bal...
81
  }
4fd29176b   Steven Rostedt   sched: add rt-ove...
82

398a153b1   Gregory Haskins   sched: fix build ...
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
  static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  	if (rt_se->nr_cpus_allowed > 1)
  		rt_rq->rt_nr_migratory++;
  
  	update_rt_migration(rt_rq);
  }
  
  static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  	if (rt_se->nr_cpus_allowed > 1)
  		rt_rq->rt_nr_migratory--;
  
  	update_rt_migration(rt_rq);
  }
917b627d4   Gregory Haskins   sched: create "pu...
98
99
100
101
102
103
104
105
106
107
108
109
110
  static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
  {
  	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
  	plist_node_init(&p->pushable_tasks, p->prio);
  	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
  }
  
  static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
  {
  	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
  }
  
  #else
ceacc2c1c   Peter Zijlstra   sched: make plist...
111
  static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
fa85ae241   Peter Zijlstra   sched: rt time limit
112
  {
6f505b164   Peter Zijlstra   sched: rt group s...
113
  }
ceacc2c1c   Peter Zijlstra   sched: make plist...
114
115
116
  static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
  {
  }
b07430ac3   Gregory Haskins   sched: de CPP-ify...
117
  static inline
ceacc2c1c   Peter Zijlstra   sched: make plist...
118
119
120
  void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  }
398a153b1   Gregory Haskins   sched: fix build ...
121
  static inline
ceacc2c1c   Peter Zijlstra   sched: make plist...
122
123
124
  void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  }
917b627d4   Gregory Haskins   sched: create "pu...
125

4fd29176b   Steven Rostedt   sched: add rt-ove...
126
  #endif /* CONFIG_SMP */
6f505b164   Peter Zijlstra   sched: rt group s...
127
128
129
130
  static inline int on_rt_rq(struct sched_rt_entity *rt_se)
  {
  	return !list_empty(&rt_se->run_list);
  }
052f1dc7e   Peter Zijlstra   sched: rt-group: ...
131
  #ifdef CONFIG_RT_GROUP_SCHED
6f505b164   Peter Zijlstra   sched: rt group s...
132

9f0c1e560   Peter Zijlstra   sched: rt-group: ...
133
  static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
134
135
  {
  	if (!rt_rq->tg)
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
136
  		return RUNTIME_INF;
6f505b164   Peter Zijlstra   sched: rt group s...
137

ac086bc22   Peter Zijlstra   sched: rt-group: ...
138
139
140
141
142
143
  	return rt_rq->rt_runtime;
  }
  
  static inline u64 sched_rt_period(struct rt_rq *rt_rq)
  {
  	return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
6f505b164   Peter Zijlstra   sched: rt group s...
144
145
146
  }
  
  #define for_each_leaf_rt_rq(rt_rq, rq) \
80f40ee4a   Bharata B Rao   sched: use RCU va...
147
  	list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
6f505b164   Peter Zijlstra   sched: rt group s...
148

6f505b164   Peter Zijlstra   sched: rt group s...
149
150
151
152
153
154
155
156
157
158
  #define for_each_sched_rt_entity(rt_se) \
  	for (; rt_se; rt_se = rt_se->parent)
  
  static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
  {
  	return rt_se->my_q;
  }
  
  static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
  static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
159
  static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
160
  {
f6121f4f8   Dario Faggioli   sched_rt.c: resch...
161
  	struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
6f505b164   Peter Zijlstra   sched: rt group s...
162
  	struct sched_rt_entity *rt_se = rt_rq->rt_se;
f6121f4f8   Dario Faggioli   sched_rt.c: resch...
163
164
165
  	if (rt_rq->rt_nr_running) {
  		if (rt_se && !on_rt_rq(rt_se))
  			enqueue_rt_entity(rt_se);
e864c499d   Gregory Haskins   sched: track the ...
166
  		if (rt_rq->highest_prio.curr < curr->prio)
1020387f5   Peter Zijlstra   sched: rt-group: ...
167
  			resched_task(curr);
6f505b164   Peter Zijlstra   sched: rt group s...
168
169
  	}
  }
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
170
  static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
171
172
173
174
175
176
  {
  	struct sched_rt_entity *rt_se = rt_rq->rt_se;
  
  	if (rt_se && on_rt_rq(rt_se))
  		dequeue_rt_entity(rt_se);
  }
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
  static inline int rt_rq_throttled(struct rt_rq *rt_rq)
  {
  	return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
  }
  
  static int rt_se_boosted(struct sched_rt_entity *rt_se)
  {
  	struct rt_rq *rt_rq = group_rt_rq(rt_se);
  	struct task_struct *p;
  
  	if (rt_rq)
  		return !!rt_rq->rt_nr_boosted;
  
  	p = rt_task_of(rt_se);
  	return p->prio != p->normal_prio;
  }
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
193
  #ifdef CONFIG_SMP
c6c4927b2   Rusty Russell   sched: convert st...
194
  static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
195
196
197
  {
  	return cpu_rq(smp_processor_id())->rd->span;
  }
6f505b164   Peter Zijlstra   sched: rt group s...
198
  #else
c6c4927b2   Rusty Russell   sched: convert st...
199
  static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
200
  {
c6c4927b2   Rusty Russell   sched: convert st...
201
  	return cpu_online_mask;
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
202
203
  }
  #endif
6f505b164   Peter Zijlstra   sched: rt group s...
204

d0b27fa77   Peter Zijlstra   sched: rt-group: ...
205
206
  static inline
  struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
6f505b164   Peter Zijlstra   sched: rt group s...
207
  {
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
208
209
  	return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
  }
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
210

ac086bc22   Peter Zijlstra   sched: rt-group: ...
211
212
213
214
  static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
  {
  	return &rt_rq->tg->rt_bandwidth;
  }
55e12e5e7   Dhaval Giani   sched: make sched...
215
  #else /* !CONFIG_RT_GROUP_SCHED */
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
216
217
218
  
  static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
  {
ac086bc22   Peter Zijlstra   sched: rt-group: ...
219
220
221
222
223
224
  	return rt_rq->rt_runtime;
  }
  
  static inline u64 sched_rt_period(struct rt_rq *rt_rq)
  {
  	return ktime_to_ns(def_rt_bandwidth.rt_period);
6f505b164   Peter Zijlstra   sched: rt group s...
225
226
227
228
  }
  
  #define for_each_leaf_rt_rq(rt_rq, rq) \
  	for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
6f505b164   Peter Zijlstra   sched: rt group s...
229
230
231
232
233
234
235
  #define for_each_sched_rt_entity(rt_se) \
  	for (; rt_se; rt_se = NULL)
  
  static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
  {
  	return NULL;
  }
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
236
  static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
237
  {
f3ade8378   John Blackwood   sched: fix sched_...
238
239
  	if (rt_rq->rt_nr_running)
  		resched_task(rq_of_rt_rq(rt_rq)->curr);
6f505b164   Peter Zijlstra   sched: rt group s...
240
  }
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
241
  static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
242
243
  {
  }
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
244
245
246
247
  static inline int rt_rq_throttled(struct rt_rq *rt_rq)
  {
  	return rt_rq->rt_throttled;
  }
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
248

c6c4927b2   Rusty Russell   sched: convert st...
249
  static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
250
  {
c6c4927b2   Rusty Russell   sched: convert st...
251
  	return cpu_online_mask;
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
252
253
254
255
256
257
258
  }
  
  static inline
  struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
  {
  	return &cpu_rq(cpu)->rt;
  }
ac086bc22   Peter Zijlstra   sched: rt-group: ...
259
260
261
262
  static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
  {
  	return &def_rt_bandwidth;
  }
55e12e5e7   Dhaval Giani   sched: make sched...
263
  #endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
264

ac086bc22   Peter Zijlstra   sched: rt-group: ...
265
  #ifdef CONFIG_SMP
78333cdd0   Peter Zijlstra   sched: add some c...
266
267
268
  /*
   * We ran out of runtime, see if we can borrow some from our neighbours.
   */
b79f3833d   Peter Zijlstra   sched: rt: fix SM...
269
  static int do_balance_runtime(struct rt_rq *rt_rq)
ac086bc22   Peter Zijlstra   sched: rt-group: ...
270
271
272
273
274
  {
  	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
  	struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
  	int i, weight, more = 0;
  	u64 rt_period;
c6c4927b2   Rusty Russell   sched: convert st...
275
  	weight = cpumask_weight(rd->span);
ac086bc22   Peter Zijlstra   sched: rt-group: ...
276
277
278
  
  	spin_lock(&rt_b->rt_runtime_lock);
  	rt_period = ktime_to_ns(rt_b->rt_period);
c6c4927b2   Rusty Russell   sched: convert st...
279
  	for_each_cpu(i, rd->span) {
ac086bc22   Peter Zijlstra   sched: rt-group: ...
280
281
282
283
284
285
286
  		struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
  		s64 diff;
  
  		if (iter == rt_rq)
  			continue;
  
  		spin_lock(&iter->rt_runtime_lock);
78333cdd0   Peter Zijlstra   sched: add some c...
287
288
289
290
291
  		/*
  		 * Either all rqs have inf runtime and there's nothing to steal
  		 * or __disable_runtime() below sets a specific rq to inf to
  		 * indicate its been disabled and disalow stealing.
  		 */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
292
293
  		if (iter->rt_runtime == RUNTIME_INF)
  			goto next;
78333cdd0   Peter Zijlstra   sched: add some c...
294
295
296
297
  		/*
  		 * From runqueues with spare time, take 1/n part of their
  		 * spare time, but no more than our period.
  		 */
ac086bc22   Peter Zijlstra   sched: rt-group: ...
298
299
  		diff = iter->rt_runtime - iter->rt_time;
  		if (diff > 0) {
58838cf3c   Peter Zijlstra   sched: clean up c...
300
  			diff = div_u64((u64)diff, weight);
ac086bc22   Peter Zijlstra   sched: rt-group: ...
301
302
303
304
305
306
307
308
309
310
  			if (rt_rq->rt_runtime + diff > rt_period)
  				diff = rt_period - rt_rq->rt_runtime;
  			iter->rt_runtime -= diff;
  			rt_rq->rt_runtime += diff;
  			more = 1;
  			if (rt_rq->rt_runtime == rt_period) {
  				spin_unlock(&iter->rt_runtime_lock);
  				break;
  			}
  		}
7def2be1d   Peter Zijlstra   sched: fix hotplu...
311
  next:
ac086bc22   Peter Zijlstra   sched: rt-group: ...
312
313
314
315
316
317
  		spin_unlock(&iter->rt_runtime_lock);
  	}
  	spin_unlock(&rt_b->rt_runtime_lock);
  
  	return more;
  }
7def2be1d   Peter Zijlstra   sched: fix hotplu...
318

78333cdd0   Peter Zijlstra   sched: add some c...
319
320
321
  /*
   * Ensure this RQ takes back all the runtime it lend to its neighbours.
   */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
  static void __disable_runtime(struct rq *rq)
  {
  	struct root_domain *rd = rq->rd;
  	struct rt_rq *rt_rq;
  
  	if (unlikely(!scheduler_running))
  		return;
  
  	for_each_leaf_rt_rq(rt_rq, rq) {
  		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
  		s64 want;
  		int i;
  
  		spin_lock(&rt_b->rt_runtime_lock);
  		spin_lock(&rt_rq->rt_runtime_lock);
78333cdd0   Peter Zijlstra   sched: add some c...
337
338
339
340
341
  		/*
  		 * Either we're all inf and nobody needs to borrow, or we're
  		 * already disabled and thus have nothing to do, or we have
  		 * exactly the right amount of runtime to take out.
  		 */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
342
343
344
345
  		if (rt_rq->rt_runtime == RUNTIME_INF ||
  				rt_rq->rt_runtime == rt_b->rt_runtime)
  			goto balanced;
  		spin_unlock(&rt_rq->rt_runtime_lock);
78333cdd0   Peter Zijlstra   sched: add some c...
346
347
348
349
350
  		/*
  		 * Calculate the difference between what we started out with
  		 * and what we current have, that's the amount of runtime
  		 * we lend and now have to reclaim.
  		 */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
351
  		want = rt_b->rt_runtime - rt_rq->rt_runtime;
78333cdd0   Peter Zijlstra   sched: add some c...
352
353
354
  		/*
  		 * Greedy reclaim, take back as much as we can.
  		 */
c6c4927b2   Rusty Russell   sched: convert st...
355
  		for_each_cpu(i, rd->span) {
7def2be1d   Peter Zijlstra   sched: fix hotplu...
356
357
  			struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
  			s64 diff;
78333cdd0   Peter Zijlstra   sched: add some c...
358
359
360
  			/*
  			 * Can't reclaim from ourselves or disabled runqueues.
  			 */
f1679d084   Peter Zijlstra   sched: fix rt-ban...
361
  			if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
7def2be1d   Peter Zijlstra   sched: fix hotplu...
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
  				continue;
  
  			spin_lock(&iter->rt_runtime_lock);
  			if (want > 0) {
  				diff = min_t(s64, iter->rt_runtime, want);
  				iter->rt_runtime -= diff;
  				want -= diff;
  			} else {
  				iter->rt_runtime -= want;
  				want -= want;
  			}
  			spin_unlock(&iter->rt_runtime_lock);
  
  			if (!want)
  				break;
  		}
  
  		spin_lock(&rt_rq->rt_runtime_lock);
78333cdd0   Peter Zijlstra   sched: add some c...
380
381
382
383
  		/*
  		 * We cannot be left wanting - that would mean some runtime
  		 * leaked out of the system.
  		 */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
384
385
  		BUG_ON(want);
  balanced:
78333cdd0   Peter Zijlstra   sched: add some c...
386
387
388
389
  		/*
  		 * Disable all the borrow logic by pretending we have inf
  		 * runtime - in which case borrowing doesn't make sense.
  		 */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
  		rt_rq->rt_runtime = RUNTIME_INF;
  		spin_unlock(&rt_rq->rt_runtime_lock);
  		spin_unlock(&rt_b->rt_runtime_lock);
  	}
  }
  
  static void disable_runtime(struct rq *rq)
  {
  	unsigned long flags;
  
  	spin_lock_irqsave(&rq->lock, flags);
  	__disable_runtime(rq);
  	spin_unlock_irqrestore(&rq->lock, flags);
  }
  
  static void __enable_runtime(struct rq *rq)
  {
7def2be1d   Peter Zijlstra   sched: fix hotplu...
407
408
409
410
  	struct rt_rq *rt_rq;
  
  	if (unlikely(!scheduler_running))
  		return;
78333cdd0   Peter Zijlstra   sched: add some c...
411
412
413
  	/*
  	 * Reset each runqueue's bandwidth settings
  	 */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
414
415
416
417
418
419
420
  	for_each_leaf_rt_rq(rt_rq, rq) {
  		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
  
  		spin_lock(&rt_b->rt_runtime_lock);
  		spin_lock(&rt_rq->rt_runtime_lock);
  		rt_rq->rt_runtime = rt_b->rt_runtime;
  		rt_rq->rt_time = 0;
baf25731e   Zhang, Yanmin   sched: fix 2.6.27...
421
  		rt_rq->rt_throttled = 0;
7def2be1d   Peter Zijlstra   sched: fix hotplu...
422
423
424
425
426
427
428
429
430
431
432
433
434
  		spin_unlock(&rt_rq->rt_runtime_lock);
  		spin_unlock(&rt_b->rt_runtime_lock);
  	}
  }
  
  static void enable_runtime(struct rq *rq)
  {
  	unsigned long flags;
  
  	spin_lock_irqsave(&rq->lock, flags);
  	__enable_runtime(rq);
  	spin_unlock_irqrestore(&rq->lock, flags);
  }
eff6549b9   Peter Zijlstra   sched: rt: move s...
435
436
437
438
439
440
441
442
443
444
445
446
  static int balance_runtime(struct rt_rq *rt_rq)
  {
  	int more = 0;
  
  	if (rt_rq->rt_time > rt_rq->rt_runtime) {
  		spin_unlock(&rt_rq->rt_runtime_lock);
  		more = do_balance_runtime(rt_rq);
  		spin_lock(&rt_rq->rt_runtime_lock);
  	}
  
  	return more;
  }
55e12e5e7   Dhaval Giani   sched: make sched...
447
  #else /* !CONFIG_SMP */
eff6549b9   Peter Zijlstra   sched: rt: move s...
448
449
450
451
  static inline int balance_runtime(struct rt_rq *rt_rq)
  {
  	return 0;
  }
55e12e5e7   Dhaval Giani   sched: make sched...
452
  #endif /* CONFIG_SMP */
ac086bc22   Peter Zijlstra   sched: rt-group: ...
453

eff6549b9   Peter Zijlstra   sched: rt: move s...
454
455
456
  static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
  {
  	int i, idle = 1;
c6c4927b2   Rusty Russell   sched: convert st...
457
  	const struct cpumask *span;
eff6549b9   Peter Zijlstra   sched: rt: move s...
458

0b148fa04   Peter Zijlstra   sched: rt-bandwid...
459
  	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
eff6549b9   Peter Zijlstra   sched: rt: move s...
460
461
462
  		return 1;
  
  	span = sched_rt_period_mask();
c6c4927b2   Rusty Russell   sched: convert st...
463
  	for_each_cpu(i, span) {
eff6549b9   Peter Zijlstra   sched: rt: move s...
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
  		int enqueue = 0;
  		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
  		struct rq *rq = rq_of_rt_rq(rt_rq);
  
  		spin_lock(&rq->lock);
  		if (rt_rq->rt_time) {
  			u64 runtime;
  
  			spin_lock(&rt_rq->rt_runtime_lock);
  			if (rt_rq->rt_throttled)
  				balance_runtime(rt_rq);
  			runtime = rt_rq->rt_runtime;
  			rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
  			if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
  				rt_rq->rt_throttled = 0;
  				enqueue = 1;
  			}
  			if (rt_rq->rt_time || rt_rq->rt_nr_running)
  				idle = 0;
  			spin_unlock(&rt_rq->rt_runtime_lock);
6c3df2551   Peter Zijlstra   sched: rt: dont s...
484
485
  		} else if (rt_rq->rt_nr_running)
  			idle = 0;
eff6549b9   Peter Zijlstra   sched: rt: move s...
486
487
488
489
490
491
492
493
  
  		if (enqueue)
  			sched_rt_rq_enqueue(rt_rq);
  		spin_unlock(&rq->lock);
  	}
  
  	return idle;
  }
ac086bc22   Peter Zijlstra   sched: rt-group: ...
494

6f505b164   Peter Zijlstra   sched: rt group s...
495
496
  static inline int rt_se_prio(struct sched_rt_entity *rt_se)
  {
052f1dc7e   Peter Zijlstra   sched: rt-group: ...
497
  #ifdef CONFIG_RT_GROUP_SCHED
6f505b164   Peter Zijlstra   sched: rt group s...
498
499
500
  	struct rt_rq *rt_rq = group_rt_rq(rt_se);
  
  	if (rt_rq)
e864c499d   Gregory Haskins   sched: track the ...
501
  		return rt_rq->highest_prio.curr;
6f505b164   Peter Zijlstra   sched: rt group s...
502
503
504
505
  #endif
  
  	return rt_task_of(rt_se)->prio;
  }
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
506
  static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
507
  {
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
508
  	u64 runtime = sched_rt_runtime(rt_rq);
fa85ae241   Peter Zijlstra   sched: rt time limit
509

fa85ae241   Peter Zijlstra   sched: rt time limit
510
  	if (rt_rq->rt_throttled)
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
511
  		return rt_rq_throttled(rt_rq);
fa85ae241   Peter Zijlstra   sched: rt time limit
512

ac086bc22   Peter Zijlstra   sched: rt-group: ...
513
514
  	if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
  		return 0;
b79f3833d   Peter Zijlstra   sched: rt: fix SM...
515
516
517
518
  	balance_runtime(rt_rq);
  	runtime = sched_rt_runtime(rt_rq);
  	if (runtime == RUNTIME_INF)
  		return 0;
ac086bc22   Peter Zijlstra   sched: rt-group: ...
519

9f0c1e560   Peter Zijlstra   sched: rt-group: ...
520
  	if (rt_rq->rt_time > runtime) {
6f505b164   Peter Zijlstra   sched: rt group s...
521
  		rt_rq->rt_throttled = 1;
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
522
  		if (rt_rq_throttled(rt_rq)) {
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
523
  			sched_rt_rq_dequeue(rt_rq);
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
524
525
  			return 1;
  		}
fa85ae241   Peter Zijlstra   sched: rt time limit
526
527
528
529
  	}
  
  	return 0;
  }
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
530
531
532
533
  /*
   * Update the current task's runtime statistics. Skip current tasks that
   * are not in our scheduling class.
   */
a9957449b   Alexey Dobriyan   sched: uninline s...
534
  static void update_curr_rt(struct rq *rq)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
535
536
  {
  	struct task_struct *curr = rq->curr;
6f505b164   Peter Zijlstra   sched: rt group s...
537
538
  	struct sched_rt_entity *rt_se = &curr->rt;
  	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
539
540
541
542
  	u64 delta_exec;
  
  	if (!task_has_rt_policy(curr))
  		return;
d281918d7   Ingo Molnar   sched: remove 'no...
543
  	delta_exec = rq->clock - curr->se.exec_start;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
544
545
  	if (unlikely((s64)delta_exec < 0))
  		delta_exec = 0;
6cfb0d5d0   Ingo Molnar   [PATCH] sched: re...
546
547
  
  	schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
548
549
  
  	curr->se.sum_exec_runtime += delta_exec;
f06febc96   Frank Mayhar   timers: fix itime...
550
  	account_group_exec_runtime(curr, delta_exec);
d281918d7   Ingo Molnar   sched: remove 'no...
551
  	curr->se.exec_start = rq->clock;
d842de871   Srivatsa Vaddagiri   sched: cpu accoun...
552
  	cpuacct_charge(curr, delta_exec);
fa85ae241   Peter Zijlstra   sched: rt time limit
553

0b148fa04   Peter Zijlstra   sched: rt-bandwid...
554
555
  	if (!rt_bandwidth_enabled())
  		return;
354d60c2f   Dhaval Giani   sched: mix tasks ...
556
557
  	for_each_sched_rt_entity(rt_se) {
  		rt_rq = rt_rq_of_se(rt_se);
cc2991cf1   Peter Zijlstra   sched: rt-bandwid...
558
  		if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
e113a745f   Dimitri Sivanich   sched/rt: small o...
559
  			spin_lock(&rt_rq->rt_runtime_lock);
cc2991cf1   Peter Zijlstra   sched: rt-bandwid...
560
561
562
  			rt_rq->rt_time += delta_exec;
  			if (sched_rt_runtime_exceeded(rt_rq))
  				resched_task(curr);
e113a745f   Dimitri Sivanich   sched/rt: small o...
563
  			spin_unlock(&rt_rq->rt_runtime_lock);
cc2991cf1   Peter Zijlstra   sched: rt-bandwid...
564
  		}
354d60c2f   Dhaval Giani   sched: mix tasks ...
565
  	}
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
566
  }
398a153b1   Gregory Haskins   sched: fix build ...
567
  #if defined CONFIG_SMP
e864c499d   Gregory Haskins   sched: track the ...
568
569
570
571
  
  static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
  
  static inline int next_prio(struct rq *rq)
63489e45e   Steven Rostedt   sched: count # of...
572
  {
e864c499d   Gregory Haskins   sched: track the ...
573
574
575
576
577
578
579
  	struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
  
  	if (next && rt_prio(next->prio))
  		return next->prio;
  	else
  		return MAX_RT_PRIO;
  }
e864c499d   Gregory Haskins   sched: track the ...
580

398a153b1   Gregory Haskins   sched: fix build ...
581
582
  static void
  inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
63489e45e   Steven Rostedt   sched: count # of...
583
  {
4d9842776   Gregory Haskins   sched: cleanup in...
584
  	struct rq *rq = rq_of_rt_rq(rt_rq);
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
585

398a153b1   Gregory Haskins   sched: fix build ...
586
  	if (prio < prev_prio) {
4d9842776   Gregory Haskins   sched: cleanup in...
587

e864c499d   Gregory Haskins   sched: track the ...
588
589
  		/*
  		 * If the new task is higher in priority than anything on the
398a153b1   Gregory Haskins   sched: fix build ...
590
591
  		 * run-queue, we know that the previous high becomes our
  		 * next-highest.
e864c499d   Gregory Haskins   sched: track the ...
592
  		 */
398a153b1   Gregory Haskins   sched: fix build ...
593
  		rt_rq->highest_prio.next = prev_prio;
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
594
595
  
  		if (rq->online)
4d9842776   Gregory Haskins   sched: cleanup in...
596
  			cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1100ac91b   Ingo Molnar   sched: fix cpupri...
597

e864c499d   Gregory Haskins   sched: track the ...
598
599
600
601
602
603
604
605
606
607
608
609
  	} else if (prio == rt_rq->highest_prio.curr)
  		/*
  		 * If the next task is equal in priority to the highest on
  		 * the run-queue, then we implicitly know that the next highest
  		 * task cannot be any lower than current
  		 */
  		rt_rq->highest_prio.next = prio;
  	else if (prio < rt_rq->highest_prio.next)
  		/*
  		 * Otherwise, we need to recompute next-highest
  		 */
  		rt_rq->highest_prio.next = next_prio(rq);
398a153b1   Gregory Haskins   sched: fix build ...
610
  }
73fe6aae8   Gregory Haskins   sched: add RT-bal...
611

398a153b1   Gregory Haskins   sched: fix build ...
612
613
614
615
  static void
  dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
  {
  	struct rq *rq = rq_of_rt_rq(rt_rq);
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
616

398a153b1   Gregory Haskins   sched: fix build ...
617
618
619
620
621
  	if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next))
  		rt_rq->highest_prio.next = next_prio(rq);
  
  	if (rq->online && rt_rq->highest_prio.curr != prev_prio)
  		cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
63489e45e   Steven Rostedt   sched: count # of...
622
  }
398a153b1   Gregory Haskins   sched: fix build ...
623
  #else /* CONFIG_SMP */
6f505b164   Peter Zijlstra   sched: rt group s...
624
  static inline
398a153b1   Gregory Haskins   sched: fix build ...
625
626
627
628
629
  void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
  static inline
  void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
  
  #endif /* CONFIG_SMP */
6e0534f27   Gregory Haskins   sched: use a 2-d ...
630

052f1dc7e   Peter Zijlstra   sched: rt-group: ...
631
  #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
398a153b1   Gregory Haskins   sched: fix build ...
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
  static void
  inc_rt_prio(struct rt_rq *rt_rq, int prio)
  {
  	int prev_prio = rt_rq->highest_prio.curr;
  
  	if (prio < prev_prio)
  		rt_rq->highest_prio.curr = prio;
  
  	inc_rt_prio_smp(rt_rq, prio, prev_prio);
  }
  
  static void
  dec_rt_prio(struct rt_rq *rt_rq, int prio)
  {
  	int prev_prio = rt_rq->highest_prio.curr;
6f505b164   Peter Zijlstra   sched: rt group s...
647
  	if (rt_rq->rt_nr_running) {
764a9d6fe   Steven Rostedt   sched: track high...
648

398a153b1   Gregory Haskins   sched: fix build ...
649
  		WARN_ON(prio < prev_prio);
764a9d6fe   Steven Rostedt   sched: track high...
650

e864c499d   Gregory Haskins   sched: track the ...
651
  		/*
398a153b1   Gregory Haskins   sched: fix build ...
652
653
  		 * This may have been our highest task, and therefore
  		 * we may have some recomputation to do
e864c499d   Gregory Haskins   sched: track the ...
654
  		 */
398a153b1   Gregory Haskins   sched: fix build ...
655
  		if (prio == prev_prio) {
e864c499d   Gregory Haskins   sched: track the ...
656
657
658
  			struct rt_prio_array *array = &rt_rq->active;
  
  			rt_rq->highest_prio.curr =
764a9d6fe   Steven Rostedt   sched: track high...
659
  				sched_find_first_bit(array->bitmap);
e864c499d   Gregory Haskins   sched: track the ...
660
  		}
764a9d6fe   Steven Rostedt   sched: track high...
661
  	} else
e864c499d   Gregory Haskins   sched: track the ...
662
  		rt_rq->highest_prio.curr = MAX_RT_PRIO;
73fe6aae8   Gregory Haskins   sched: add RT-bal...
663

398a153b1   Gregory Haskins   sched: fix build ...
664
665
  	dec_rt_prio_smp(rt_rq, prio, prev_prio);
  }
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
666

398a153b1   Gregory Haskins   sched: fix build ...
667
668
669
670
671
672
  #else
  
  static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
  static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
  
  #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
6e0534f27   Gregory Haskins   sched: use a 2-d ...
673

052f1dc7e   Peter Zijlstra   sched: rt-group: ...
674
  #ifdef CONFIG_RT_GROUP_SCHED
398a153b1   Gregory Haskins   sched: fix build ...
675
676
677
678
679
680
681
682
683
684
685
686
687
688
  
  static void
  inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  	if (rt_se_boosted(rt_se))
  		rt_rq->rt_nr_boosted++;
  
  	if (rt_rq->tg)
  		start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
  }
  
  static void
  dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
689
690
691
692
  	if (rt_se_boosted(rt_se))
  		rt_rq->rt_nr_boosted--;
  
  	WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
398a153b1   Gregory Haskins   sched: fix build ...
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
  }
  
  #else /* CONFIG_RT_GROUP_SCHED */
  
  static void
  inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  	start_rt_bandwidth(&def_rt_bandwidth);
  }
  
  static inline
  void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
  
  #endif /* CONFIG_RT_GROUP_SCHED */
  
  static inline
  void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  	int prio = rt_se_prio(rt_se);
  
  	WARN_ON(!rt_prio(prio));
  	rt_rq->rt_nr_running++;
  
  	inc_rt_prio(rt_rq, prio);
  	inc_rt_migration(rt_se, rt_rq);
  	inc_rt_group(rt_se, rt_rq);
  }
  
  static inline
  void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  	WARN_ON(!rt_prio(rt_se_prio(rt_se)));
  	WARN_ON(!rt_rq->rt_nr_running);
  	rt_rq->rt_nr_running--;
  
  	dec_rt_prio(rt_rq, rt_se_prio(rt_se));
  	dec_rt_migration(rt_se, rt_rq);
  	dec_rt_group(rt_se, rt_rq);
63489e45e   Steven Rostedt   sched: count # of...
731
  }
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
732
  static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
733
  {
6f505b164   Peter Zijlstra   sched: rt group s...
734
735
736
  	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
  	struct rt_prio_array *array = &rt_rq->active;
  	struct rt_rq *group_rq = group_rt_rq(rt_se);
20b6331bf   Dmitry Adamushko   sched: rework of ...
737
  	struct list_head *queue = array->queue + rt_se_prio(rt_se);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
738

ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
739
740
741
742
743
744
745
  	/*
  	 * Don't enqueue the group if its throttled, or when empty.
  	 * The latter is a consequence of the former when a child group
  	 * get throttled and the current group doesn't have any other
  	 * active members.
  	 */
  	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
6f505b164   Peter Zijlstra   sched: rt group s...
746
  		return;
63489e45e   Steven Rostedt   sched: count # of...
747

7ebefa8ce   Dmitry Adamushko   sched: rework of ...
748
  	list_add_tail(&rt_se->run_list, queue);
6f505b164   Peter Zijlstra   sched: rt group s...
749
  	__set_bit(rt_se_prio(rt_se), array->bitmap);
78f2c7db6   Peter Zijlstra   sched: SCHED_FIFO...
750

6f505b164   Peter Zijlstra   sched: rt group s...
751
752
  	inc_rt_tasks(rt_se, rt_rq);
  }
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
753
  static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
6f505b164   Peter Zijlstra   sched: rt group s...
754
755
756
757
758
759
760
761
762
763
764
765
766
767
  {
  	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
  	struct rt_prio_array *array = &rt_rq->active;
  
  	list_del_init(&rt_se->run_list);
  	if (list_empty(array->queue + rt_se_prio(rt_se)))
  		__clear_bit(rt_se_prio(rt_se), array->bitmap);
  
  	dec_rt_tasks(rt_se, rt_rq);
  }
  
  /*
   * Because the prio of an upper entry depends on the lower
   * entries, we must remove entries top - down.
6f505b164   Peter Zijlstra   sched: rt group s...
768
   */
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
769
  static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
6f505b164   Peter Zijlstra   sched: rt group s...
770
  {
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
771
  	struct sched_rt_entity *back = NULL;
6f505b164   Peter Zijlstra   sched: rt group s...
772

58d6c2d72   Peter Zijlstra   sched: rt-group: ...
773
774
775
776
777
778
779
  	for_each_sched_rt_entity(rt_se) {
  		rt_se->back = back;
  		back = rt_se;
  	}
  
  	for (rt_se = back; rt_se; rt_se = rt_se->back) {
  		if (on_rt_rq(rt_se))
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
  			__dequeue_rt_entity(rt_se);
  	}
  }
  
  static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
  {
  	dequeue_rt_stack(rt_se);
  	for_each_sched_rt_entity(rt_se)
  		__enqueue_rt_entity(rt_se);
  }
  
  static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
  {
  	dequeue_rt_stack(rt_se);
  
  	for_each_sched_rt_entity(rt_se) {
  		struct rt_rq *rt_rq = group_rt_rq(rt_se);
  
  		if (rt_rq && rt_rq->rt_nr_running)
  			__enqueue_rt_entity(rt_se);
58d6c2d72   Peter Zijlstra   sched: rt-group: ...
800
  	}
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
801
802
803
804
805
  }
  
  /*
   * Adding/removing a task to/from a priority array:
   */
6f505b164   Peter Zijlstra   sched: rt group s...
806
807
808
809
810
811
  static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
  {
  	struct sched_rt_entity *rt_se = &p->rt;
  
  	if (wakeup)
  		rt_se->timeout = 0;
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
812
  	enqueue_rt_entity(rt_se);
c09595f63   Peter Zijlstra   sched: revert rev...
813

917b627d4   Gregory Haskins   sched: create "pu...
814
815
  	if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
  		enqueue_pushable_task(rq, p);
c09595f63   Peter Zijlstra   sched: revert rev...
816
  	inc_cpu_load(rq, p->se.load.weight);
6f505b164   Peter Zijlstra   sched: rt group s...
817
  }
f02231e51   Ingo Molnar   sched: remove the...
818
  static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
819
  {
6f505b164   Peter Zijlstra   sched: rt group s...
820
  	struct sched_rt_entity *rt_se = &p->rt;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
821

f1e14ef64   Ingo Molnar   sched: remove the...
822
  	update_curr_rt(rq);
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
823
  	dequeue_rt_entity(rt_se);
c09595f63   Peter Zijlstra   sched: revert rev...
824

917b627d4   Gregory Haskins   sched: create "pu...
825
  	dequeue_pushable_task(rq, p);
c09595f63   Peter Zijlstra   sched: revert rev...
826
  	dec_cpu_load(rq, p->se.load.weight);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
827
828
829
830
831
832
  }
  
  /*
   * Put task to the end of the run list without the overhead of dequeue
   * followed by enqueue.
   */
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
833
834
  static void
  requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
6f505b164   Peter Zijlstra   sched: rt group s...
835
  {
1cdad7153   Ingo Molnar   Merge branch 'sch...
836
  	if (on_rt_rq(rt_se)) {
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
837
838
839
840
841
842
843
  		struct rt_prio_array *array = &rt_rq->active;
  		struct list_head *queue = array->queue + rt_se_prio(rt_se);
  
  		if (head)
  			list_move(&rt_se->run_list, queue);
  		else
  			list_move_tail(&rt_se->run_list, queue);
1cdad7153   Ingo Molnar   Merge branch 'sch...
844
  	}
6f505b164   Peter Zijlstra   sched: rt group s...
845
  }
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
846
  static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
847
  {
6f505b164   Peter Zijlstra   sched: rt group s...
848
849
  	struct sched_rt_entity *rt_se = &p->rt;
  	struct rt_rq *rt_rq;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
850

6f505b164   Peter Zijlstra   sched: rt group s...
851
852
  	for_each_sched_rt_entity(rt_se) {
  		rt_rq = rt_rq_of_se(rt_se);
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
853
  		requeue_rt_entity(rt_rq, rt_se, head);
6f505b164   Peter Zijlstra   sched: rt group s...
854
  	}
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
855
  }
6f505b164   Peter Zijlstra   sched: rt group s...
856
  static void yield_task_rt(struct rq *rq)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
857
  {
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
858
  	requeue_task_rt(rq, rq->curr, 0);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
859
  }
e7693a362   Gregory Haskins   sched: de-SCHED_O...
860
  #ifdef CONFIG_SMP
318e0893c   Gregory Haskins   sched: pre-route ...
861
  static int find_lowest_rq(struct task_struct *task);
e7693a362   Gregory Haskins   sched: de-SCHED_O...
862
863
  static int select_task_rq_rt(struct task_struct *p, int sync)
  {
318e0893c   Gregory Haskins   sched: pre-route ...
864
865
866
  	struct rq *rq = task_rq(p);
  
  	/*
e1f47d891   Steven Rostedt   sched: RT-balance...
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
  	 * If the current task is an RT task, then
  	 * try to see if we can wake this RT task up on another
  	 * runqueue. Otherwise simply start this RT task
  	 * on its current runqueue.
  	 *
  	 * We want to avoid overloading runqueues. Even if
  	 * the RT task is of higher priority than the current RT task.
  	 * RT tasks behave differently than other tasks. If
  	 * one gets preempted, we try to push it off to another queue.
  	 * So trying to keep a preempting RT task on the same
  	 * cache hot CPU will force the running RT task to
  	 * a cold CPU. So we waste all the cache for the lower
  	 * RT task in hopes of saving some of a RT task
  	 * that is just being woken and probably will have
  	 * cold cache anyway.
318e0893c   Gregory Haskins   sched: pre-route ...
882
  	 */
17b3279b4   Gregory Haskins   sched: break out ...
883
  	if (unlikely(rt_task(rq->curr)) &&
6f505b164   Peter Zijlstra   sched: rt group s...
884
  	    (p->rt.nr_cpus_allowed > 1)) {
318e0893c   Gregory Haskins   sched: pre-route ...
885
886
887
888
889
890
891
892
893
  		int cpu = find_lowest_rq(p);
  
  		return (cpu == -1) ? task_cpu(p) : cpu;
  	}
  
  	/*
  	 * Otherwise, just let it ride on the affined RQ and the
  	 * post-schedule router will push the preempted task away
  	 */
e7693a362   Gregory Haskins   sched: de-SCHED_O...
894
895
  	return task_cpu(p);
  }
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
896
897
898
  
  static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
  {
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
899
900
  	if (rq->curr->rt.nr_cpus_allowed == 1)
  		return;
24600ce89   Rusty Russell   sched: convert ch...
901
  	if (p->rt.nr_cpus_allowed != 1
13b8bd0a5   Rusty Russell   sched_rt: don't a...
902
903
  	    && cpupri_find(&rq->rd->cpupri, p, NULL))
  		return;
24600ce89   Rusty Russell   sched: convert ch...
904

13b8bd0a5   Rusty Russell   sched_rt: don't a...
905
906
  	if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
  		return;
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
907
908
909
910
911
912
913
914
915
  
  	/*
  	 * There appears to be other cpus that can accept
  	 * current and none to run 'p', so lets reschedule
  	 * to try and push current away:
  	 */
  	requeue_task_rt(rq, p, 1);
  	resched_task(rq->curr);
  }
e7693a362   Gregory Haskins   sched: de-SCHED_O...
916
  #endif /* CONFIG_SMP */
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
917
918
919
  /*
   * Preempt the current task with a newly woken task if needed:
   */
15afe09bf   Peter Zijlstra   sched: wakeup pre...
920
  static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int sync)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
921
  {
45c01e824   Gregory Haskins   sched: prioritize...
922
  	if (p->prio < rq->curr->prio) {
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
923
  		resched_task(rq->curr);
45c01e824   Gregory Haskins   sched: prioritize...
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
  		return;
  	}
  
  #ifdef CONFIG_SMP
  	/*
  	 * If:
  	 *
  	 * - the newly woken task is of equal priority to the current task
  	 * - the newly woken task is non-migratable while current is migratable
  	 * - current will be preempted on the next reschedule
  	 *
  	 * we should check to see if current can readily move to a different
  	 * cpu.  If so, we will reschedule to allow the push logic to try
  	 * to move current somewhere else, making room for our non-migratable
  	 * task.
  	 */
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
940
941
  	if (p->prio == rq->curr->prio && !need_resched())
  		check_preempt_equal_prio(rq, p);
45c01e824   Gregory Haskins   sched: prioritize...
942
  #endif
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
943
  }
6f505b164   Peter Zijlstra   sched: rt group s...
944
945
  static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
  						   struct rt_rq *rt_rq)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
946
  {
6f505b164   Peter Zijlstra   sched: rt group s...
947
948
  	struct rt_prio_array *array = &rt_rq->active;
  	struct sched_rt_entity *next = NULL;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
949
950
951
952
  	struct list_head *queue;
  	int idx;
  
  	idx = sched_find_first_bit(array->bitmap);
6f505b164   Peter Zijlstra   sched: rt group s...
953
  	BUG_ON(idx >= MAX_RT_PRIO);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
954
955
  
  	queue = array->queue + idx;
6f505b164   Peter Zijlstra   sched: rt group s...
956
  	next = list_entry(queue->next, struct sched_rt_entity, run_list);
326587b84   Dmitry Adamushko   sched: fix goto r...
957

6f505b164   Peter Zijlstra   sched: rt group s...
958
959
  	return next;
  }
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
960

917b627d4   Gregory Haskins   sched: create "pu...
961
  static struct task_struct *_pick_next_task_rt(struct rq *rq)
6f505b164   Peter Zijlstra   sched: rt group s...
962
963
964
965
  {
  	struct sched_rt_entity *rt_se;
  	struct task_struct *p;
  	struct rt_rq *rt_rq;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
966

6f505b164   Peter Zijlstra   sched: rt group s...
967
968
969
970
  	rt_rq = &rq->rt;
  
  	if (unlikely(!rt_rq->rt_nr_running))
  		return NULL;
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
971
  	if (rt_rq_throttled(rt_rq))
6f505b164   Peter Zijlstra   sched: rt group s...
972
973
974
975
  		return NULL;
  
  	do {
  		rt_se = pick_next_rt_entity(rq, rt_rq);
326587b84   Dmitry Adamushko   sched: fix goto r...
976
  		BUG_ON(!rt_se);
6f505b164   Peter Zijlstra   sched: rt group s...
977
978
979
980
981
  		rt_rq = group_rt_rq(rt_se);
  	} while (rt_rq);
  
  	p = rt_task_of(rt_se);
  	p->se.exec_start = rq->clock;
917b627d4   Gregory Haskins   sched: create "pu...
982
983
984
985
986
987
988
989
990
991
992
  
  	return p;
  }
  
  static struct task_struct *pick_next_task_rt(struct rq *rq)
  {
  	struct task_struct *p = _pick_next_task_rt(rq);
  
  	/* The running task is never eligible for pushing */
  	if (p)
  		dequeue_pushable_task(rq, p);
6f505b164   Peter Zijlstra   sched: rt group s...
993
  	return p;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
994
  }
31ee529cc   Ingo Molnar   sched: remove the...
995
  static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
996
  {
f1e14ef64   Ingo Molnar   sched: remove the...
997
  	update_curr_rt(rq);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
998
  	p->se.exec_start = 0;
917b627d4   Gregory Haskins   sched: create "pu...
999
1000
1001
1002
1003
1004
1005
  
  	/*
  	 * The previous task needs to be made eligible for pushing
  	 * if it is still active
  	 */
  	if (p->se.on_rq && p->rt.nr_cpus_allowed > 1)
  		enqueue_pushable_task(rq, p);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1006
  }
681f3e685   Peter Williams   sched: isolate SM...
1007
  #ifdef CONFIG_SMP
6f505b164   Peter Zijlstra   sched: rt group s...
1008

e8fa13626   Steven Rostedt   sched: add RT tas...
1009
1010
  /* Only try algorithms three times */
  #define RT_MAX_TRIES 3
e8fa13626   Steven Rostedt   sched: add RT tas...
1011
  static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1012
1013
1014
  static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
  {
  	if (!task_running(rq, p) &&
96f874e26   Rusty Russell   sched: convert re...
1015
  	    (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
6f505b164   Peter Zijlstra   sched: rt group s...
1016
  	    (p->rt.nr_cpus_allowed > 1))
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1017
1018
1019
  		return 1;
  	return 0;
  }
e8fa13626   Steven Rostedt   sched: add RT tas...
1020
  /* Return the second highest RT task, NULL otherwise */
79064fbf7   Ingo Molnar   sched: clean up p...
1021
  static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
e8fa13626   Steven Rostedt   sched: add RT tas...
1022
  {
6f505b164   Peter Zijlstra   sched: rt group s...
1023
1024
1025
1026
  	struct task_struct *next = NULL;
  	struct sched_rt_entity *rt_se;
  	struct rt_prio_array *array;
  	struct rt_rq *rt_rq;
e8fa13626   Steven Rostedt   sched: add RT tas...
1027
  	int idx;
6f505b164   Peter Zijlstra   sched: rt group s...
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
  	for_each_leaf_rt_rq(rt_rq, rq) {
  		array = &rt_rq->active;
  		idx = sched_find_first_bit(array->bitmap);
   next_idx:
  		if (idx >= MAX_RT_PRIO)
  			continue;
  		if (next && next->prio < idx)
  			continue;
  		list_for_each_entry(rt_se, array->queue + idx, run_list) {
  			struct task_struct *p = rt_task_of(rt_se);
  			if (pick_rt_task(rq, p, cpu)) {
  				next = p;
  				break;
  			}
  		}
  		if (!next) {
  			idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
  			goto next_idx;
  		}
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1047
  	}
e8fa13626   Steven Rostedt   sched: add RT tas...
1048
1049
  	return next;
  }
0e3900e6d   Rusty Russell   sched: convert lo...
1050
  static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
e8fa13626   Steven Rostedt   sched: add RT tas...
1051

d38b223c8   Mike Travis   cpumask: reduce s...
1052
1053
  static inline int pick_optimal_cpu(int this_cpu,
  				   const struct cpumask *mask)
6e1254d2c   Gregory Haskins   sched: optimize R...
1054
1055
1056
1057
  {
  	int first;
  
  	/* "this_cpu" is cheaper to preempt than a remote processor */
d38b223c8   Mike Travis   cpumask: reduce s...
1058
  	if ((this_cpu != -1) && cpumask_test_cpu(this_cpu, mask))
6e1254d2c   Gregory Haskins   sched: optimize R...
1059
  		return this_cpu;
3d398703e   Rusty Russell   sched_rt: don't u...
1060
1061
  	first = cpumask_first(mask);
  	if (first < nr_cpu_ids)
6e1254d2c   Gregory Haskins   sched: optimize R...
1062
1063
1064
1065
1066
1067
1068
1069
  		return first;
  
  	return -1;
  }
  
  static int find_lowest_rq(struct task_struct *task)
  {
  	struct sched_domain *sd;
96f874e26   Rusty Russell   sched: convert re...
1070
  	struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
6e1254d2c   Gregory Haskins   sched: optimize R...
1071
1072
  	int this_cpu = smp_processor_id();
  	int cpu      = task_cpu(task);
d38b223c8   Mike Travis   cpumask: reduce s...
1073
  	cpumask_var_t domain_mask;
06f90dbd7   Gregory Haskins   sched: RT-balance...
1074

6e0534f27   Gregory Haskins   sched: use a 2-d ...
1075
1076
  	if (task->rt.nr_cpus_allowed == 1)
  		return -1; /* No other targets possible */
6e1254d2c   Gregory Haskins   sched: optimize R...
1077

6e0534f27   Gregory Haskins   sched: use a 2-d ...
1078
1079
  	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
  		return -1; /* No targets found */
6e1254d2c   Gregory Haskins   sched: optimize R...
1080
1081
  
  	/*
e761b7725   Max Krasnyansky   cpu hotplug, sche...
1082
1083
1084
1085
  	 * Only consider CPUs that are usable for migration.
  	 * I guess we might want to change cpupri_find() to ignore those
  	 * in the first place.
  	 */
96f874e26   Rusty Russell   sched: convert re...
1086
  	cpumask_and(lowest_mask, lowest_mask, cpu_active_mask);
e761b7725   Max Krasnyansky   cpu hotplug, sche...
1087
1088
  
  	/*
6e1254d2c   Gregory Haskins   sched: optimize R...
1089
1090
1091
1092
1093
1094
1095
  	 * At this point we have built a mask of cpus representing the
  	 * lowest priority tasks in the system.  Now we want to elect
  	 * the best one based on our affinity and topology.
  	 *
  	 * We prioritize the last cpu that the task executed on since
  	 * it is most likely cache-hot in that location.
  	 */
96f874e26   Rusty Russell   sched: convert re...
1096
  	if (cpumask_test_cpu(cpu, lowest_mask))
6e1254d2c   Gregory Haskins   sched: optimize R...
1097
1098
1099
1100
1101
1102
1103
1104
  		return cpu;
  
  	/*
  	 * Otherwise, we consult the sched_domains span maps to figure
  	 * out which cpu is logically closest to our hot cache data.
  	 */
  	if (this_cpu == cpu)
  		this_cpu = -1; /* Skip this_cpu opt if the same */
d38b223c8   Mike Travis   cpumask: reduce s...
1105
1106
1107
1108
  	if (alloc_cpumask_var(&domain_mask, GFP_ATOMIC)) {
  		for_each_domain(cpu, sd) {
  			if (sd->flags & SD_WAKE_AFFINE) {
  				int best_cpu;
6e1254d2c   Gregory Haskins   sched: optimize R...
1109

d38b223c8   Mike Travis   cpumask: reduce s...
1110
1111
1112
  				cpumask_and(domain_mask,
  					    sched_domain_span(sd),
  					    lowest_mask);
6e1254d2c   Gregory Haskins   sched: optimize R...
1113

d38b223c8   Mike Travis   cpumask: reduce s...
1114
1115
  				best_cpu = pick_optimal_cpu(this_cpu,
  							    domain_mask);
6e1254d2c   Gregory Haskins   sched: optimize R...
1116

d38b223c8   Mike Travis   cpumask: reduce s...
1117
1118
1119
1120
1121
  				if (best_cpu != -1) {
  					free_cpumask_var(domain_mask);
  					return best_cpu;
  				}
  			}
6e1254d2c   Gregory Haskins   sched: optimize R...
1122
  		}
d38b223c8   Mike Travis   cpumask: reduce s...
1123
  		free_cpumask_var(domain_mask);
6e1254d2c   Gregory Haskins   sched: optimize R...
1124
1125
1126
1127
1128
1129
1130
1131
  	}
  
  	/*
  	 * And finally, if there were no matches within the domains
  	 * just give the caller *something* to work with from the compatible
  	 * locations.
  	 */
  	return pick_optimal_cpu(this_cpu, lowest_mask);
07b4032c9   Gregory Haskins   sched: break out ...
1132
1133
1134
  }
  
  /* Will lock the rq it finds */
4df64c0bf   Ingo Molnar   sched: clean up f...
1135
  static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
07b4032c9   Gregory Haskins   sched: break out ...
1136
1137
  {
  	struct rq *lowest_rq = NULL;
07b4032c9   Gregory Haskins   sched: break out ...
1138
  	int tries;
4df64c0bf   Ingo Molnar   sched: clean up f...
1139
  	int cpu;
e8fa13626   Steven Rostedt   sched: add RT tas...
1140

07b4032c9   Gregory Haskins   sched: break out ...
1141
1142
  	for (tries = 0; tries < RT_MAX_TRIES; tries++) {
  		cpu = find_lowest_rq(task);
2de0b4639   Gregory Haskins   sched: RT balanci...
1143
  		if ((cpu == -1) || (cpu == rq->cpu))
e8fa13626   Steven Rostedt   sched: add RT tas...
1144
  			break;
07b4032c9   Gregory Haskins   sched: break out ...
1145
  		lowest_rq = cpu_rq(cpu);
e8fa13626   Steven Rostedt   sched: add RT tas...
1146
  		/* if the prio of this runqueue changed, try again */
07b4032c9   Gregory Haskins   sched: break out ...
1147
  		if (double_lock_balance(rq, lowest_rq)) {
e8fa13626   Steven Rostedt   sched: add RT tas...
1148
1149
1150
1151
1152
1153
  			/*
  			 * We had to unlock the run queue. In
  			 * the mean time, task could have
  			 * migrated already or had its affinity changed.
  			 * Also make sure that it wasn't scheduled on its rq.
  			 */
07b4032c9   Gregory Haskins   sched: break out ...
1154
  			if (unlikely(task_rq(task) != rq ||
96f874e26   Rusty Russell   sched: convert re...
1155
1156
  				     !cpumask_test_cpu(lowest_rq->cpu,
  						       &task->cpus_allowed) ||
07b4032c9   Gregory Haskins   sched: break out ...
1157
  				     task_running(rq, task) ||
e8fa13626   Steven Rostedt   sched: add RT tas...
1158
  				     !task->se.on_rq)) {
4df64c0bf   Ingo Molnar   sched: clean up f...
1159

e8fa13626   Steven Rostedt   sched: add RT tas...
1160
1161
1162
1163
1164
1165
1166
  				spin_unlock(&lowest_rq->lock);
  				lowest_rq = NULL;
  				break;
  			}
  		}
  
  		/* If this rq is still suitable use it. */
e864c499d   Gregory Haskins   sched: track the ...
1167
  		if (lowest_rq->rt.highest_prio.curr > task->prio)
e8fa13626   Steven Rostedt   sched: add RT tas...
1168
1169
1170
  			break;
  
  		/* try again */
1b12bbc74   Peter Zijlstra   lockdep: re-annot...
1171
  		double_unlock_balance(rq, lowest_rq);
e8fa13626   Steven Rostedt   sched: add RT tas...
1172
1173
1174
1175
1176
  		lowest_rq = NULL;
  	}
  
  	return lowest_rq;
  }
917b627d4   Gregory Haskins   sched: create "pu...
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
  static inline int has_pushable_tasks(struct rq *rq)
  {
  	return !plist_head_empty(&rq->rt.pushable_tasks);
  }
  
  static struct task_struct *pick_next_pushable_task(struct rq *rq)
  {
  	struct task_struct *p;
  
  	if (!has_pushable_tasks(rq))
  		return NULL;
  
  	p = plist_first_entry(&rq->rt.pushable_tasks,
  			      struct task_struct, pushable_tasks);
  
  	BUG_ON(rq->cpu != task_cpu(p));
  	BUG_ON(task_current(rq, p));
  	BUG_ON(p->rt.nr_cpus_allowed <= 1);
  
  	BUG_ON(!p->se.on_rq);
  	BUG_ON(!rt_task(p));
  
  	return p;
  }
e8fa13626   Steven Rostedt   sched: add RT tas...
1201
1202
1203
1204
1205
  /*
   * If the current CPU has more than one RT task, see if the non
   * running task can migrate over to a CPU that is running a task
   * of lesser priority.
   */
697f0a487   Gregory Haskins   sched: clean up t...
1206
  static int push_rt_task(struct rq *rq)
e8fa13626   Steven Rostedt   sched: add RT tas...
1207
1208
1209
  {
  	struct task_struct *next_task;
  	struct rq *lowest_rq;
e8fa13626   Steven Rostedt   sched: add RT tas...
1210

a22d7fc18   Gregory Haskins   sched: wake-balan...
1211
1212
  	if (!rq->rt.overloaded)
  		return 0;
917b627d4   Gregory Haskins   sched: create "pu...
1213
  	next_task = pick_next_pushable_task(rq);
e8fa13626   Steven Rostedt   sched: add RT tas...
1214
1215
1216
1217
  	if (!next_task)
  		return 0;
  
   retry:
697f0a487   Gregory Haskins   sched: clean up t...
1218
  	if (unlikely(next_task == rq->curr)) {
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1219
  		WARN_ON(1);
e8fa13626   Steven Rostedt   sched: add RT tas...
1220
  		return 0;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1221
  	}
e8fa13626   Steven Rostedt   sched: add RT tas...
1222
1223
1224
1225
1226
1227
  
  	/*
  	 * It's possible that the next_task slipped in of
  	 * higher priority than current. If that's the case
  	 * just reschedule current.
  	 */
697f0a487   Gregory Haskins   sched: clean up t...
1228
1229
  	if (unlikely(next_task->prio < rq->curr->prio)) {
  		resched_task(rq->curr);
e8fa13626   Steven Rostedt   sched: add RT tas...
1230
1231
  		return 0;
  	}
697f0a487   Gregory Haskins   sched: clean up t...
1232
  	/* We might release rq lock */
e8fa13626   Steven Rostedt   sched: add RT tas...
1233
1234
1235
  	get_task_struct(next_task);
  
  	/* find_lock_lowest_rq locks the rq if found */
697f0a487   Gregory Haskins   sched: clean up t...
1236
  	lowest_rq = find_lock_lowest_rq(next_task, rq);
e8fa13626   Steven Rostedt   sched: add RT tas...
1237
1238
1239
  	if (!lowest_rq) {
  		struct task_struct *task;
  		/*
697f0a487   Gregory Haskins   sched: clean up t...
1240
  		 * find lock_lowest_rq releases rq->lock
1563513d3   Gregory Haskins   RT: fix push_rt_t...
1241
1242
1243
1244
1245
  		 * so it is possible that next_task has migrated.
  		 *
  		 * We need to make sure that the task is still on the same
  		 * run-queue and is also still the next task eligible for
  		 * pushing.
e8fa13626   Steven Rostedt   sched: add RT tas...
1246
  		 */
917b627d4   Gregory Haskins   sched: create "pu...
1247
  		task = pick_next_pushable_task(rq);
1563513d3   Gregory Haskins   RT: fix push_rt_t...
1248
1249
1250
1251
1252
1253
1254
1255
1256
  		if (task_cpu(next_task) == rq->cpu && task == next_task) {
  			/*
  			 * If we get here, the task hasnt moved at all, but
  			 * it has failed to push.  We will not try again,
  			 * since the other cpus will pull from us when they
  			 * are ready.
  			 */
  			dequeue_pushable_task(rq, next_task);
  			goto out;
e8fa13626   Steven Rostedt   sched: add RT tas...
1257
  		}
917b627d4   Gregory Haskins   sched: create "pu...
1258

1563513d3   Gregory Haskins   RT: fix push_rt_t...
1259
1260
1261
  		if (!task)
  			/* No more tasks, just exit */
  			goto out;
917b627d4   Gregory Haskins   sched: create "pu...
1262
  		/*
1563513d3   Gregory Haskins   RT: fix push_rt_t...
1263
  		 * Something has shifted, try again.
917b627d4   Gregory Haskins   sched: create "pu...
1264
  		 */
1563513d3   Gregory Haskins   RT: fix push_rt_t...
1265
1266
1267
  		put_task_struct(next_task);
  		next_task = task;
  		goto retry;
e8fa13626   Steven Rostedt   sched: add RT tas...
1268
  	}
697f0a487   Gregory Haskins   sched: clean up t...
1269
  	deactivate_task(rq, next_task, 0);
e8fa13626   Steven Rostedt   sched: add RT tas...
1270
1271
1272
1273
  	set_task_cpu(next_task, lowest_rq->cpu);
  	activate_task(lowest_rq, next_task, 0);
  
  	resched_task(lowest_rq->curr);
1b12bbc74   Peter Zijlstra   lockdep: re-annot...
1274
  	double_unlock_balance(rq, lowest_rq);
e8fa13626   Steven Rostedt   sched: add RT tas...
1275

e8fa13626   Steven Rostedt   sched: add RT tas...
1276
1277
  out:
  	put_task_struct(next_task);
917b627d4   Gregory Haskins   sched: create "pu...
1278
  	return 1;
e8fa13626   Steven Rostedt   sched: add RT tas...
1279
  }
e8fa13626   Steven Rostedt   sched: add RT tas...
1280
1281
1282
1283
1284
1285
  static void push_rt_tasks(struct rq *rq)
  {
  	/* push_rt_task will return true if it moved an RT */
  	while (push_rt_task(rq))
  		;
  }
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1286
1287
  static int pull_rt_task(struct rq *this_rq)
  {
80bf3171d   Ingo Molnar   sched: clean up p...
1288
  	int this_cpu = this_rq->cpu, ret = 0, cpu;
a8728944e   Gregory Haskins   sched: use highes...
1289
  	struct task_struct *p;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1290
  	struct rq *src_rq;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1291

637f50851   Gregory Haskins   sched: only balan...
1292
  	if (likely(!rt_overloaded(this_rq)))
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1293
  		return 0;
c6c4927b2   Rusty Russell   sched: convert st...
1294
  	for_each_cpu(cpu, this_rq->rd->rto_mask) {
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1295
1296
1297
1298
  		if (this_cpu == cpu)
  			continue;
  
  		src_rq = cpu_rq(cpu);
74ab8e4f6   Gregory Haskins   sched: use highes...
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
  
  		/*
  		 * Don't bother taking the src_rq->lock if the next highest
  		 * task is known to be lower-priority than our current task.
  		 * This may look racy, but if this value is about to go
  		 * logically higher, the src_rq will push this task away.
  		 * And if its going logically lower, we do not care
  		 */
  		if (src_rq->rt.highest_prio.next >=
  		    this_rq->rt.highest_prio.curr)
  			continue;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1310
1311
1312
  		/*
  		 * We can potentially drop this_rq's lock in
  		 * double_lock_balance, and another CPU could
a8728944e   Gregory Haskins   sched: use highes...
1313
  		 * alter this_rq
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1314
  		 */
a8728944e   Gregory Haskins   sched: use highes...
1315
  		double_lock_balance(this_rq, src_rq);
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1316
1317
1318
1319
  
  		/*
  		 * Are there still pullable RT tasks?
  		 */
614ee1f61   Mike Galbraith   sched: pull_rt_ta...
1320
1321
  		if (src_rq->rt.rt_nr_running <= 1)
  			goto skip;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1322

f65eda4f7   Steven Rostedt   sched: pull RT ta...
1323
1324
1325
1326
1327
1328
  		p = pick_next_highest_task_rt(src_rq, this_cpu);
  
  		/*
  		 * Do we have an RT task that preempts
  		 * the to-be-scheduled task?
  		 */
a8728944e   Gregory Haskins   sched: use highes...
1329
  		if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1330
1331
1332
1333
1334
1335
1336
1337
1338
  			WARN_ON(p == src_rq->curr);
  			WARN_ON(!p->se.on_rq);
  
  			/*
  			 * There's a chance that p is higher in priority
  			 * than what's currently running on its cpu.
  			 * This is just that p is wakeing up and hasn't
  			 * had a chance to schedule. We only pull
  			 * p if it is lower in priority than the
a8728944e   Gregory Haskins   sched: use highes...
1339
  			 * current task on the run queue
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1340
  			 */
a8728944e   Gregory Haskins   sched: use highes...
1341
  			if (p->prio < src_rq->curr->prio)
614ee1f61   Mike Galbraith   sched: pull_rt_ta...
1342
  				goto skip;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
  
  			ret = 1;
  
  			deactivate_task(src_rq, p, 0);
  			set_task_cpu(p, this_cpu);
  			activate_task(this_rq, p, 0);
  			/*
  			 * We continue with the search, just in
  			 * case there's an even higher prio task
  			 * in another runqueue. (low likelyhood
  			 * but possible)
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1354
  			 */
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1355
  		}
614ee1f61   Mike Galbraith   sched: pull_rt_ta...
1356
   skip:
1b12bbc74   Peter Zijlstra   lockdep: re-annot...
1357
  		double_unlock_balance(this_rq, src_rq);
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1358
1359
1360
1361
  	}
  
  	return ret;
  }
9a897c5a6   Steven Rostedt   sched: RT-balance...
1362
  static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1363
1364
  {
  	/* Try to pull RT tasks here if we lower this rq's prio */
e864c499d   Gregory Haskins   sched: track the ...
1365
  	if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1366
1367
  		pull_rt_task(rq);
  }
967fc0467   Gregory Haskins   sched: add sched_...
1368
1369
1370
1371
1372
  /*
   * assumes rq->lock is held
   */
  static int needs_post_schedule_rt(struct rq *rq)
  {
917b627d4   Gregory Haskins   sched: create "pu...
1373
  	return has_pushable_tasks(rq);
967fc0467   Gregory Haskins   sched: add sched_...
1374
  }
9a897c5a6   Steven Rostedt   sched: RT-balance...
1375
  static void post_schedule_rt(struct rq *rq)
e8fa13626   Steven Rostedt   sched: add RT tas...
1376
1377
  {
  	/*
967fc0467   Gregory Haskins   sched: add sched_...
1378
1379
  	 * This is only called if needs_post_schedule_rt() indicates that
  	 * we need to push tasks away
e8fa13626   Steven Rostedt   sched: add RT tas...
1380
  	 */
967fc0467   Gregory Haskins   sched: add sched_...
1381
1382
1383
  	spin_lock_irq(&rq->lock);
  	push_rt_tasks(rq);
  	spin_unlock_irq(&rq->lock);
e8fa13626   Steven Rostedt   sched: add RT tas...
1384
  }
8ae121ac8   Gregory Haskins   sched: fix RT tas...
1385
1386
1387
1388
  /*
   * If we are not running and we are not going to reschedule soon, we should
   * try to push tasks away now
   */
9a897c5a6   Steven Rostedt   sched: RT-balance...
1389
  static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
4642dafdf   Steven Rostedt   sched: push RT ta...
1390
  {
9a897c5a6   Steven Rostedt   sched: RT-balance...
1391
  	if (!task_running(rq, p) &&
8ae121ac8   Gregory Haskins   sched: fix RT tas...
1392
  	    !test_tsk_need_resched(rq->curr) &&
917b627d4   Gregory Haskins   sched: create "pu...
1393
  	    has_pushable_tasks(rq) &&
777c2f389   Gregory Haskins   sched: only try t...
1394
  	    p->rt.nr_cpus_allowed > 1)
4642dafdf   Steven Rostedt   sched: push RT ta...
1395
1396
  		push_rt_tasks(rq);
  }
430106592   Peter Williams   sched: simplify m...
1397
  static unsigned long
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1398
  load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
e1d1484f7   Peter Williams   sched: reduce bal...
1399
1400
1401
  		unsigned long max_load_move,
  		struct sched_domain *sd, enum cpu_idle_type idle,
  		int *all_pinned, int *this_best_prio)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1402
  {
c7a1e46aa   Steven Rostedt   sched: disable st...
1403
1404
  	/* don't touch RT tasks */
  	return 0;
e1d1484f7   Peter Williams   sched: reduce bal...
1405
1406
1407
1408
1409
1410
  }
  
  static int
  move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
  		 struct sched_domain *sd, enum cpu_idle_type idle)
  {
c7a1e46aa   Steven Rostedt   sched: disable st...
1411
1412
  	/* don't touch RT tasks */
  	return 0;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1413
  }
deeeccd41   Ingo Molnar   sched: clean up o...
1414

cd8ba7cd9   Mike Travis   sched: add new se...
1415
  static void set_cpus_allowed_rt(struct task_struct *p,
96f874e26   Rusty Russell   sched: convert re...
1416
  				const struct cpumask *new_mask)
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1417
  {
96f874e26   Rusty Russell   sched: convert re...
1418
  	int weight = cpumask_weight(new_mask);
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1419
1420
1421
1422
1423
1424
1425
  
  	BUG_ON(!rt_task(p));
  
  	/*
  	 * Update the migration status of the RQ if we have an RT task
  	 * which is running AND changing its weight value.
  	 */
6f505b164   Peter Zijlstra   sched: rt group s...
1426
  	if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1427
  		struct rq *rq = task_rq(p);
917b627d4   Gregory Haskins   sched: create "pu...
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
  		if (!task_current(rq, p)) {
  			/*
  			 * Make sure we dequeue this task from the pushable list
  			 * before going further.  It will either remain off of
  			 * the list because we are no longer pushable, or it
  			 * will be requeued.
  			 */
  			if (p->rt.nr_cpus_allowed > 1)
  				dequeue_pushable_task(rq, p);
  
  			/*
  			 * Requeue if our weight is changing and still > 1
  			 */
  			if (weight > 1)
  				enqueue_pushable_task(rq, p);
  
  		}
6f505b164   Peter Zijlstra   sched: rt group s...
1445
  		if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1446
  			rq->rt.rt_nr_migratory++;
6f505b164   Peter Zijlstra   sched: rt group s...
1447
  		} else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1448
1449
1450
  			BUG_ON(!rq->rt.rt_nr_migratory);
  			rq->rt.rt_nr_migratory--;
  		}
398a153b1   Gregory Haskins   sched: fix build ...
1451
  		update_rt_migration(&rq->rt);
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1452
  	}
96f874e26   Rusty Russell   sched: convert re...
1453
  	cpumask_copy(&p->cpus_allowed, new_mask);
6f505b164   Peter Zijlstra   sched: rt group s...
1454
  	p->rt.nr_cpus_allowed = weight;
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1455
  }
deeeccd41   Ingo Molnar   sched: clean up o...
1456

bdd7c81b4   Ingo Molnar   sched: fix sched_...
1457
  /* Assumes rq->lock is held */
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
1458
  static void rq_online_rt(struct rq *rq)
bdd7c81b4   Ingo Molnar   sched: fix sched_...
1459
1460
1461
  {
  	if (rq->rt.overloaded)
  		rt_set_overload(rq);
6e0534f27   Gregory Haskins   sched: use a 2-d ...
1462

7def2be1d   Peter Zijlstra   sched: fix hotplu...
1463
  	__enable_runtime(rq);
e864c499d   Gregory Haskins   sched: track the ...
1464
  	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
bdd7c81b4   Ingo Molnar   sched: fix sched_...
1465
1466
1467
  }
  
  /* Assumes rq->lock is held */
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
1468
  static void rq_offline_rt(struct rq *rq)
bdd7c81b4   Ingo Molnar   sched: fix sched_...
1469
1470
1471
  {
  	if (rq->rt.overloaded)
  		rt_clear_overload(rq);
6e0534f27   Gregory Haskins   sched: use a 2-d ...
1472

7def2be1d   Peter Zijlstra   sched: fix hotplu...
1473
  	__disable_runtime(rq);
6e0534f27   Gregory Haskins   sched: use a 2-d ...
1474
  	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
bdd7c81b4   Ingo Molnar   sched: fix sched_...
1475
  }
cb4698450   Steven Rostedt   sched: RT-balance...
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
  
  /*
   * When switch from the rt queue, we bring ourselves to a position
   * that we might want to pull RT tasks from other runqueues.
   */
  static void switched_from_rt(struct rq *rq, struct task_struct *p,
  			   int running)
  {
  	/*
  	 * If there are other RT tasks then we will reschedule
  	 * and the scheduling of the other RT tasks will handle
  	 * the balancing. But if we are the last RT task
  	 * we may need to handle the pulling of RT tasks
  	 * now.
  	 */
  	if (!rq->rt.rt_nr_running)
  		pull_rt_task(rq);
  }
3d8cbdf86   Rusty Russell   sched: convert lo...
1494
1495
1496
1497
1498
1499
  
  static inline void init_sched_rt_class(void)
  {
  	unsigned int i;
  
  	for_each_possible_cpu(i)
eaa958402   Yinghai Lu   cpumask: alloc ze...
1500
  		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
6ca09dfc9   Mike Travis   sched: put back s...
1501
  					GFP_KERNEL, cpu_to_node(i));
3d8cbdf86   Rusty Russell   sched: convert lo...
1502
  }
cb4698450   Steven Rostedt   sched: RT-balance...
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
  #endif /* CONFIG_SMP */
  
  /*
   * When switching a task to RT, we may overload the runqueue
   * with RT tasks. In this case we try to push them off to
   * other runqueues.
   */
  static void switched_to_rt(struct rq *rq, struct task_struct *p,
  			   int running)
  {
  	int check_resched = 1;
  
  	/*
  	 * If we are already running, then there's nothing
  	 * that needs to be done. But if we are not running
  	 * we may need to preempt the current running task.
  	 * If that current running task is also an RT task
  	 * then see if we can move to another run queue.
  	 */
  	if (!running) {
  #ifdef CONFIG_SMP
  		if (rq->rt.overloaded && push_rt_task(rq) &&
  		    /* Don't resched if we changed runqueues */
  		    rq != task_rq(p))
  			check_resched = 0;
  #endif /* CONFIG_SMP */
  		if (check_resched && p->prio < rq->curr->prio)
  			resched_task(rq->curr);
  	}
  }
  
  /*
   * Priority of the task has changed. This may cause
   * us to initiate a push or pull.
   */
  static void prio_changed_rt(struct rq *rq, struct task_struct *p,
  			    int oldprio, int running)
  {
  	if (running) {
  #ifdef CONFIG_SMP
  		/*
  		 * If our priority decreases while running, we
  		 * may need to pull tasks to this runqueue.
  		 */
  		if (oldprio < p->prio)
  			pull_rt_task(rq);
  		/*
  		 * If there's a higher priority task waiting to run
6fa46fa52   Steven Rostedt   sched: balance RT...
1551
1552
1553
  		 * then reschedule. Note, the above pull_rt_task
  		 * can release the rq lock and p could migrate.
  		 * Only reschedule if p is still on the same runqueue.
cb4698450   Steven Rostedt   sched: RT-balance...
1554
  		 */
e864c499d   Gregory Haskins   sched: track the ...
1555
  		if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
cb4698450   Steven Rostedt   sched: RT-balance...
1556
1557
1558
1559
1560
  			resched_task(p);
  #else
  		/* For UP simply resched on drop of prio */
  		if (oldprio < p->prio)
  			resched_task(p);
e8fa13626   Steven Rostedt   sched: add RT tas...
1561
  #endif /* CONFIG_SMP */
cb4698450   Steven Rostedt   sched: RT-balance...
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
  	} else {
  		/*
  		 * This task is not running, but if it is
  		 * greater than the current running task
  		 * then reschedule.
  		 */
  		if (p->prio < rq->curr->prio)
  			resched_task(rq->curr);
  	}
  }
78f2c7db6   Peter Zijlstra   sched: SCHED_FIFO...
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
  static void watchdog(struct rq *rq, struct task_struct *p)
  {
  	unsigned long soft, hard;
  
  	if (!p->signal)
  		return;
  
  	soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
  	hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
  
  	if (soft != RLIM_INFINITY) {
  		unsigned long next;
  
  		p->rt.timeout++;
  		next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
5a52dd500   Peter Zijlstra   sched: rt-watchdo...
1587
  		if (p->rt.timeout > next)
f06febc96   Frank Mayhar   timers: fix itime...
1588
  			p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
78f2c7db6   Peter Zijlstra   sched: SCHED_FIFO...
1589
1590
  	}
  }
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1591

8f4d37ec0   Peter Zijlstra   sched: high-res p...
1592
  static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1593
  {
67e2be023   Peter Zijlstra   sched: rt: accoun...
1594
  	update_curr_rt(rq);
78f2c7db6   Peter Zijlstra   sched: SCHED_FIFO...
1595
  	watchdog(rq, p);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1596
1597
1598
1599
1600
1601
  	/*
  	 * RR tasks need a special form of timeslice management.
  	 * FIFO tasks have no timeslices.
  	 */
  	if (p->policy != SCHED_RR)
  		return;
fa717060f   Peter Zijlstra   sched: sched_rt_e...
1602
  	if (--p->rt.time_slice)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1603
  		return;
fa717060f   Peter Zijlstra   sched: sched_rt_e...
1604
  	p->rt.time_slice = DEF_TIMESLICE;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1605

98fbc7985   Dmitry Adamushko   sched: optimize t...
1606
1607
1608
1609
  	/*
  	 * Requeue to the end of queue if we are not the only element
  	 * on the queue:
  	 */
fa717060f   Peter Zijlstra   sched: sched_rt_e...
1610
  	if (p->rt.run_list.prev != p->rt.run_list.next) {
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
1611
  		requeue_task_rt(rq, p, 0);
98fbc7985   Dmitry Adamushko   sched: optimize t...
1612
1613
  		set_tsk_need_resched(p);
  	}
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1614
  }
83b699ed2   Srivatsa Vaddagiri   sched: revert rec...
1615
1616
1617
1618
1619
  static void set_curr_task_rt(struct rq *rq)
  {
  	struct task_struct *p = rq->curr;
  
  	p->se.exec_start = rq->clock;
917b627d4   Gregory Haskins   sched: create "pu...
1620
1621
1622
  
  	/* The running task is never eligible for pushing */
  	dequeue_pushable_task(rq, p);
83b699ed2   Srivatsa Vaddagiri   sched: revert rec...
1623
  }
2abdad0a4   Harvey Harrison   sched: make rt_sc...
1624
  static const struct sched_class rt_sched_class = {
5522d5d5f   Ingo Molnar   sched: mark sched...
1625
  	.next			= &fair_sched_class,
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1626
1627
1628
1629
1630
1631
1632
1633
  	.enqueue_task		= enqueue_task_rt,
  	.dequeue_task		= dequeue_task_rt,
  	.yield_task		= yield_task_rt,
  
  	.check_preempt_curr	= check_preempt_curr_rt,
  
  	.pick_next_task		= pick_next_task_rt,
  	.put_prev_task		= put_prev_task_rt,
681f3e685   Peter Williams   sched: isolate SM...
1634
  #ifdef CONFIG_SMP
4ce72a2c0   Li Zefan   sched: add CONFIG...
1635
  	.select_task_rq		= select_task_rq_rt,
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1636
  	.load_balance		= load_balance_rt,
e1d1484f7   Peter Williams   sched: reduce bal...
1637
  	.move_one_task		= move_one_task_rt,
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1638
  	.set_cpus_allowed       = set_cpus_allowed_rt,
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
1639
1640
  	.rq_online              = rq_online_rt,
  	.rq_offline             = rq_offline_rt,
9a897c5a6   Steven Rostedt   sched: RT-balance...
1641
  	.pre_schedule		= pre_schedule_rt,
967fc0467   Gregory Haskins   sched: add sched_...
1642
  	.needs_post_schedule	= needs_post_schedule_rt,
9a897c5a6   Steven Rostedt   sched: RT-balance...
1643
1644
  	.post_schedule		= post_schedule_rt,
  	.task_wake_up		= task_wake_up_rt,
cb4698450   Steven Rostedt   sched: RT-balance...
1645
  	.switched_from		= switched_from_rt,
681f3e685   Peter Williams   sched: isolate SM...
1646
  #endif
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1647

83b699ed2   Srivatsa Vaddagiri   sched: revert rec...
1648
  	.set_curr_task          = set_curr_task_rt,
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1649
  	.task_tick		= task_tick_rt,
cb4698450   Steven Rostedt   sched: RT-balance...
1650
1651
1652
  
  	.prio_changed		= prio_changed_rt,
  	.switched_to		= switched_to_rt,
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1653
  };
ada18de2e   Peter Zijlstra   sched: debug: add...
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
  
  #ifdef CONFIG_SCHED_DEBUG
  extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
  
  static void print_rt_stats(struct seq_file *m, int cpu)
  {
  	struct rt_rq *rt_rq;
  
  	rcu_read_lock();
  	for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))
  		print_rt_rq(m, cpu, rt_rq);
  	rcu_read_unlock();
  }
55e12e5e7   Dhaval Giani   sched: make sched...
1667
  #endif /* CONFIG_SCHED_DEBUG */
0e3900e6d   Rusty Russell   sched: convert lo...
1668