Blame view

kernel/sched_rt.c 40.4 KB
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1
2
3
4
  /*
   * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
   * policies)
   */
8f48894fc   Peter Zijlstra   sched: Add debug ...
5
6
7
  #ifdef CONFIG_RT_GROUP_SCHED
  
  #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
398a153b1   Gregory Haskins   sched: fix build ...
8
9
  static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
  {
8f48894fc   Peter Zijlstra   sched: Add debug ...
10
11
12
  #ifdef CONFIG_SCHED_DEBUG
  	WARN_ON_ONCE(!rt_entity_is_task(rt_se));
  #endif
398a153b1   Gregory Haskins   sched: fix build ...
13
14
  	return container_of(rt_se, struct task_struct, rt);
  }
398a153b1   Gregory Haskins   sched: fix build ...
15
16
17
18
19
20
21
22
23
24
25
  static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
  {
  	return rt_rq->rq;
  }
  
  static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
  {
  	return rt_se->rt_rq;
  }
  
  #else /* CONFIG_RT_GROUP_SCHED */
a1ba4d8ba   Peter Zijlstra   sched_rt: Fix ove...
26
  #define rt_entity_is_task(rt_se) (1)
8f48894fc   Peter Zijlstra   sched: Add debug ...
27
28
29
30
  static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
  {
  	return container_of(rt_se, struct task_struct, rt);
  }
398a153b1   Gregory Haskins   sched: fix build ...
31
32
33
34
35
36
37
38
39
40
41
42
43
44
  static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
  {
  	return container_of(rt_rq, struct rq, rt);
  }
  
  static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
  {
  	struct task_struct *p = rt_task_of(rt_se);
  	struct rq *rq = task_rq(p);
  
  	return &rq->rt;
  }
  
  #endif /* CONFIG_RT_GROUP_SCHED */
4fd29176b   Steven Rostedt   sched: add rt-ove...
45
  #ifdef CONFIG_SMP
84de42748   Ingo Molnar   sched: clean up k...
46

637f50851   Gregory Haskins   sched: only balan...
47
  static inline int rt_overloaded(struct rq *rq)
4fd29176b   Steven Rostedt   sched: add rt-ove...
48
  {
637f50851   Gregory Haskins   sched: only balan...
49
  	return atomic_read(&rq->rd->rto_count);
4fd29176b   Steven Rostedt   sched: add rt-ove...
50
  }
84de42748   Ingo Molnar   sched: clean up k...
51

4fd29176b   Steven Rostedt   sched: add rt-ove...
52
53
  static inline void rt_set_overload(struct rq *rq)
  {
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
54
55
  	if (!rq->online)
  		return;
c6c4927b2   Rusty Russell   sched: convert st...
56
  	cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
4fd29176b   Steven Rostedt   sched: add rt-ove...
57
58
59
60
61
62
63
64
  	/*
  	 * Make sure the mask is visible before we set
  	 * the overload count. That is checked to determine
  	 * if we should look at the mask. It would be a shame
  	 * if we looked at the mask, but the mask was not
  	 * updated yet.
  	 */
  	wmb();
637f50851   Gregory Haskins   sched: only balan...
65
  	atomic_inc(&rq->rd->rto_count);
4fd29176b   Steven Rostedt   sched: add rt-ove...
66
  }
84de42748   Ingo Molnar   sched: clean up k...
67

4fd29176b   Steven Rostedt   sched: add rt-ove...
68
69
  static inline void rt_clear_overload(struct rq *rq)
  {
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
70
71
  	if (!rq->online)
  		return;
4fd29176b   Steven Rostedt   sched: add rt-ove...
72
  	/* the order here really doesn't matter */
637f50851   Gregory Haskins   sched: only balan...
73
  	atomic_dec(&rq->rd->rto_count);
c6c4927b2   Rusty Russell   sched: convert st...
74
  	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
4fd29176b   Steven Rostedt   sched: add rt-ove...
75
  }
73fe6aae8   Gregory Haskins   sched: add RT-bal...
76

398a153b1   Gregory Haskins   sched: fix build ...
77
  static void update_rt_migration(struct rt_rq *rt_rq)
73fe6aae8   Gregory Haskins   sched: add RT-bal...
78
  {
a1ba4d8ba   Peter Zijlstra   sched_rt: Fix ove...
79
  	if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
398a153b1   Gregory Haskins   sched: fix build ...
80
81
82
  		if (!rt_rq->overloaded) {
  			rt_set_overload(rq_of_rt_rq(rt_rq));
  			rt_rq->overloaded = 1;
cdc8eb984   Gregory Haskins   sched: RT-balance...
83
  		}
398a153b1   Gregory Haskins   sched: fix build ...
84
85
86
  	} else if (rt_rq->overloaded) {
  		rt_clear_overload(rq_of_rt_rq(rt_rq));
  		rt_rq->overloaded = 0;
637f50851   Gregory Haskins   sched: only balan...
87
  	}
73fe6aae8   Gregory Haskins   sched: add RT-bal...
88
  }
4fd29176b   Steven Rostedt   sched: add rt-ove...
89

398a153b1   Gregory Haskins   sched: fix build ...
90
91
  static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
a1ba4d8ba   Peter Zijlstra   sched_rt: Fix ove...
92
93
94
95
96
97
  	if (!rt_entity_is_task(rt_se))
  		return;
  
  	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
  
  	rt_rq->rt_nr_total++;
398a153b1   Gregory Haskins   sched: fix build ...
98
99
100
101
102
103
104
105
  	if (rt_se->nr_cpus_allowed > 1)
  		rt_rq->rt_nr_migratory++;
  
  	update_rt_migration(rt_rq);
  }
  
  static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
a1ba4d8ba   Peter Zijlstra   sched_rt: Fix ove...
106
107
108
109
110
111
  	if (!rt_entity_is_task(rt_se))
  		return;
  
  	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
  
  	rt_rq->rt_nr_total--;
398a153b1   Gregory Haskins   sched: fix build ...
112
113
114
115
116
  	if (rt_se->nr_cpus_allowed > 1)
  		rt_rq->rt_nr_migratory--;
  
  	update_rt_migration(rt_rq);
  }
917b627d4   Gregory Haskins   sched: create "pu...
117
118
119
120
121
122
123
124
125
126
127
  static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
  {
  	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
  	plist_node_init(&p->pushable_tasks, p->prio);
  	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
  }
  
  static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
  {
  	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
  }
bcf08df3b   Ingo Molnar   sched: Fix cpupri...
128
129
130
131
  static inline int has_pushable_tasks(struct rq *rq)
  {
  	return !plist_head_empty(&rq->rt.pushable_tasks);
  }
917b627d4   Gregory Haskins   sched: create "pu...
132
  #else
ceacc2c1c   Peter Zijlstra   sched: make plist...
133
  static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
fa85ae241   Peter Zijlstra   sched: rt time limit
134
  {
6f505b164   Peter Zijlstra   sched: rt group s...
135
  }
ceacc2c1c   Peter Zijlstra   sched: make plist...
136
137
138
  static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
  {
  }
b07430ac3   Gregory Haskins   sched: de CPP-ify...
139
  static inline
ceacc2c1c   Peter Zijlstra   sched: make plist...
140
141
142
  void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  }
398a153b1   Gregory Haskins   sched: fix build ...
143
  static inline
ceacc2c1c   Peter Zijlstra   sched: make plist...
144
145
146
  void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  }
917b627d4   Gregory Haskins   sched: create "pu...
147

4fd29176b   Steven Rostedt   sched: add rt-ove...
148
  #endif /* CONFIG_SMP */
6f505b164   Peter Zijlstra   sched: rt group s...
149
150
151
152
  static inline int on_rt_rq(struct sched_rt_entity *rt_se)
  {
  	return !list_empty(&rt_se->run_list);
  }
052f1dc7e   Peter Zijlstra   sched: rt-group: ...
153
  #ifdef CONFIG_RT_GROUP_SCHED
6f505b164   Peter Zijlstra   sched: rt group s...
154

9f0c1e560   Peter Zijlstra   sched: rt-group: ...
155
  static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
156
157
  {
  	if (!rt_rq->tg)
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
158
  		return RUNTIME_INF;
6f505b164   Peter Zijlstra   sched: rt group s...
159

ac086bc22   Peter Zijlstra   sched: rt-group: ...
160
161
162
163
164
165
  	return rt_rq->rt_runtime;
  }
  
  static inline u64 sched_rt_period(struct rt_rq *rt_rq)
  {
  	return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
6f505b164   Peter Zijlstra   sched: rt group s...
166
167
168
  }
  
  #define for_each_leaf_rt_rq(rt_rq, rq) \
80f40ee4a   Bharata B Rao   sched: use RCU va...
169
  	list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
6f505b164   Peter Zijlstra   sched: rt group s...
170

6f505b164   Peter Zijlstra   sched: rt group s...
171
172
173
174
175
176
177
  #define for_each_sched_rt_entity(rt_se) \
  	for (; rt_se; rt_se = rt_se->parent)
  
  static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
  {
  	return rt_se->my_q;
  }
37dad3fce   Thomas Gleixner   sched: Implement ...
178
  static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
6f505b164   Peter Zijlstra   sched: rt group s...
179
  static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
180
  static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
181
  {
74b7eb588   Yong Zhang   sched: Change usa...
182
  	int this_cpu = smp_processor_id();
f6121f4f8   Dario Faggioli   sched_rt.c: resch...
183
  	struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
74b7eb588   Yong Zhang   sched: Change usa...
184
185
186
  	struct sched_rt_entity *rt_se;
  
  	rt_se = rt_rq->tg->rt_se[this_cpu];
6f505b164   Peter Zijlstra   sched: rt group s...
187

f6121f4f8   Dario Faggioli   sched_rt.c: resch...
188
189
  	if (rt_rq->rt_nr_running) {
  		if (rt_se && !on_rt_rq(rt_se))
37dad3fce   Thomas Gleixner   sched: Implement ...
190
  			enqueue_rt_entity(rt_se, false);
e864c499d   Gregory Haskins   sched: track the ...
191
  		if (rt_rq->highest_prio.curr < curr->prio)
1020387f5   Peter Zijlstra   sched: rt-group: ...
192
  			resched_task(curr);
6f505b164   Peter Zijlstra   sched: rt group s...
193
194
  	}
  }
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
195
  static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
196
  {
74b7eb588   Yong Zhang   sched: Change usa...
197
198
199
200
  	int this_cpu = smp_processor_id();
  	struct sched_rt_entity *rt_se;
  
  	rt_se = rt_rq->tg->rt_se[this_cpu];
6f505b164   Peter Zijlstra   sched: rt group s...
201
202
203
204
  
  	if (rt_se && on_rt_rq(rt_se))
  		dequeue_rt_entity(rt_se);
  }
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
  static inline int rt_rq_throttled(struct rt_rq *rt_rq)
  {
  	return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
  }
  
  static int rt_se_boosted(struct sched_rt_entity *rt_se)
  {
  	struct rt_rq *rt_rq = group_rt_rq(rt_se);
  	struct task_struct *p;
  
  	if (rt_rq)
  		return !!rt_rq->rt_nr_boosted;
  
  	p = rt_task_of(rt_se);
  	return p->prio != p->normal_prio;
  }
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
221
  #ifdef CONFIG_SMP
c6c4927b2   Rusty Russell   sched: convert st...
222
  static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
223
224
225
  {
  	return cpu_rq(smp_processor_id())->rd->span;
  }
6f505b164   Peter Zijlstra   sched: rt group s...
226
  #else
c6c4927b2   Rusty Russell   sched: convert st...
227
  static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
228
  {
c6c4927b2   Rusty Russell   sched: convert st...
229
  	return cpu_online_mask;
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
230
231
  }
  #endif
6f505b164   Peter Zijlstra   sched: rt group s...
232

d0b27fa77   Peter Zijlstra   sched: rt-group: ...
233
234
  static inline
  struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
6f505b164   Peter Zijlstra   sched: rt group s...
235
  {
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
236
237
  	return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
  }
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
238

ac086bc22   Peter Zijlstra   sched: rt-group: ...
239
240
241
242
  static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
  {
  	return &rt_rq->tg->rt_bandwidth;
  }
55e12e5e7   Dhaval Giani   sched: make sched...
243
  #else /* !CONFIG_RT_GROUP_SCHED */
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
244
245
246
  
  static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
  {
ac086bc22   Peter Zijlstra   sched: rt-group: ...
247
248
249
250
251
252
  	return rt_rq->rt_runtime;
  }
  
  static inline u64 sched_rt_period(struct rt_rq *rt_rq)
  {
  	return ktime_to_ns(def_rt_bandwidth.rt_period);
6f505b164   Peter Zijlstra   sched: rt group s...
253
254
255
256
  }
  
  #define for_each_leaf_rt_rq(rt_rq, rq) \
  	for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
6f505b164   Peter Zijlstra   sched: rt group s...
257
258
259
260
261
262
263
  #define for_each_sched_rt_entity(rt_se) \
  	for (; rt_se; rt_se = NULL)
  
  static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
  {
  	return NULL;
  }
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
264
  static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
265
  {
f3ade8378   John Blackwood   sched: fix sched_...
266
267
  	if (rt_rq->rt_nr_running)
  		resched_task(rq_of_rt_rq(rt_rq)->curr);
6f505b164   Peter Zijlstra   sched: rt group s...
268
  }
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
269
  static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
270
271
  {
  }
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
272
273
274
275
  static inline int rt_rq_throttled(struct rt_rq *rt_rq)
  {
  	return rt_rq->rt_throttled;
  }
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
276

c6c4927b2   Rusty Russell   sched: convert st...
277
  static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
278
  {
c6c4927b2   Rusty Russell   sched: convert st...
279
  	return cpu_online_mask;
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
280
281
282
283
284
285
286
  }
  
  static inline
  struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
  {
  	return &cpu_rq(cpu)->rt;
  }
ac086bc22   Peter Zijlstra   sched: rt-group: ...
287
288
289
290
  static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
  {
  	return &def_rt_bandwidth;
  }
55e12e5e7   Dhaval Giani   sched: make sched...
291
  #endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
292

ac086bc22   Peter Zijlstra   sched: rt-group: ...
293
  #ifdef CONFIG_SMP
78333cdd0   Peter Zijlstra   sched: add some c...
294
295
296
  /*
   * We ran out of runtime, see if we can borrow some from our neighbours.
   */
b79f3833d   Peter Zijlstra   sched: rt: fix SM...
297
  static int do_balance_runtime(struct rt_rq *rt_rq)
ac086bc22   Peter Zijlstra   sched: rt-group: ...
298
299
300
301
302
  {
  	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
  	struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
  	int i, weight, more = 0;
  	u64 rt_period;
c6c4927b2   Rusty Russell   sched: convert st...
303
  	weight = cpumask_weight(rd->span);
ac086bc22   Peter Zijlstra   sched: rt-group: ...
304

0986b11b1   Thomas Gleixner   sched: Convert rt...
305
  	raw_spin_lock(&rt_b->rt_runtime_lock);
ac086bc22   Peter Zijlstra   sched: rt-group: ...
306
  	rt_period = ktime_to_ns(rt_b->rt_period);
c6c4927b2   Rusty Russell   sched: convert st...
307
  	for_each_cpu(i, rd->span) {
ac086bc22   Peter Zijlstra   sched: rt-group: ...
308
309
310
311
312
  		struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
  		s64 diff;
  
  		if (iter == rt_rq)
  			continue;
0986b11b1   Thomas Gleixner   sched: Convert rt...
313
  		raw_spin_lock(&iter->rt_runtime_lock);
78333cdd0   Peter Zijlstra   sched: add some c...
314
315
316
317
318
  		/*
  		 * Either all rqs have inf runtime and there's nothing to steal
  		 * or __disable_runtime() below sets a specific rq to inf to
  		 * indicate its been disabled and disalow stealing.
  		 */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
319
320
  		if (iter->rt_runtime == RUNTIME_INF)
  			goto next;
78333cdd0   Peter Zijlstra   sched: add some c...
321
322
323
324
  		/*
  		 * From runqueues with spare time, take 1/n part of their
  		 * spare time, but no more than our period.
  		 */
ac086bc22   Peter Zijlstra   sched: rt-group: ...
325
326
  		diff = iter->rt_runtime - iter->rt_time;
  		if (diff > 0) {
58838cf3c   Peter Zijlstra   sched: clean up c...
327
  			diff = div_u64((u64)diff, weight);
ac086bc22   Peter Zijlstra   sched: rt-group: ...
328
329
330
331
332
333
  			if (rt_rq->rt_runtime + diff > rt_period)
  				diff = rt_period - rt_rq->rt_runtime;
  			iter->rt_runtime -= diff;
  			rt_rq->rt_runtime += diff;
  			more = 1;
  			if (rt_rq->rt_runtime == rt_period) {
0986b11b1   Thomas Gleixner   sched: Convert rt...
334
  				raw_spin_unlock(&iter->rt_runtime_lock);
ac086bc22   Peter Zijlstra   sched: rt-group: ...
335
336
337
  				break;
  			}
  		}
7def2be1d   Peter Zijlstra   sched: fix hotplu...
338
  next:
0986b11b1   Thomas Gleixner   sched: Convert rt...
339
  		raw_spin_unlock(&iter->rt_runtime_lock);
ac086bc22   Peter Zijlstra   sched: rt-group: ...
340
  	}
0986b11b1   Thomas Gleixner   sched: Convert rt...
341
  	raw_spin_unlock(&rt_b->rt_runtime_lock);
ac086bc22   Peter Zijlstra   sched: rt-group: ...
342
343
344
  
  	return more;
  }
7def2be1d   Peter Zijlstra   sched: fix hotplu...
345

78333cdd0   Peter Zijlstra   sched: add some c...
346
347
348
  /*
   * Ensure this RQ takes back all the runtime it lend to its neighbours.
   */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
349
350
351
352
353
354
355
356
357
358
359
360
  static void __disable_runtime(struct rq *rq)
  {
  	struct root_domain *rd = rq->rd;
  	struct rt_rq *rt_rq;
  
  	if (unlikely(!scheduler_running))
  		return;
  
  	for_each_leaf_rt_rq(rt_rq, rq) {
  		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
  		s64 want;
  		int i;
0986b11b1   Thomas Gleixner   sched: Convert rt...
361
362
  		raw_spin_lock(&rt_b->rt_runtime_lock);
  		raw_spin_lock(&rt_rq->rt_runtime_lock);
78333cdd0   Peter Zijlstra   sched: add some c...
363
364
365
366
367
  		/*
  		 * Either we're all inf and nobody needs to borrow, or we're
  		 * already disabled and thus have nothing to do, or we have
  		 * exactly the right amount of runtime to take out.
  		 */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
368
369
370
  		if (rt_rq->rt_runtime == RUNTIME_INF ||
  				rt_rq->rt_runtime == rt_b->rt_runtime)
  			goto balanced;
0986b11b1   Thomas Gleixner   sched: Convert rt...
371
  		raw_spin_unlock(&rt_rq->rt_runtime_lock);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
372

78333cdd0   Peter Zijlstra   sched: add some c...
373
374
375
376
377
  		/*
  		 * Calculate the difference between what we started out with
  		 * and what we current have, that's the amount of runtime
  		 * we lend and now have to reclaim.
  		 */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
378
  		want = rt_b->rt_runtime - rt_rq->rt_runtime;
78333cdd0   Peter Zijlstra   sched: add some c...
379
380
381
  		/*
  		 * Greedy reclaim, take back as much as we can.
  		 */
c6c4927b2   Rusty Russell   sched: convert st...
382
  		for_each_cpu(i, rd->span) {
7def2be1d   Peter Zijlstra   sched: fix hotplu...
383
384
  			struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
  			s64 diff;
78333cdd0   Peter Zijlstra   sched: add some c...
385
386
387
  			/*
  			 * Can't reclaim from ourselves or disabled runqueues.
  			 */
f1679d084   Peter Zijlstra   sched: fix rt-ban...
388
  			if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
7def2be1d   Peter Zijlstra   sched: fix hotplu...
389
  				continue;
0986b11b1   Thomas Gleixner   sched: Convert rt...
390
  			raw_spin_lock(&iter->rt_runtime_lock);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
391
392
393
394
395
396
397
398
  			if (want > 0) {
  				diff = min_t(s64, iter->rt_runtime, want);
  				iter->rt_runtime -= diff;
  				want -= diff;
  			} else {
  				iter->rt_runtime -= want;
  				want -= want;
  			}
0986b11b1   Thomas Gleixner   sched: Convert rt...
399
  			raw_spin_unlock(&iter->rt_runtime_lock);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
400
401
402
403
  
  			if (!want)
  				break;
  		}
0986b11b1   Thomas Gleixner   sched: Convert rt...
404
  		raw_spin_lock(&rt_rq->rt_runtime_lock);
78333cdd0   Peter Zijlstra   sched: add some c...
405
406
407
408
  		/*
  		 * We cannot be left wanting - that would mean some runtime
  		 * leaked out of the system.
  		 */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
409
410
  		BUG_ON(want);
  balanced:
78333cdd0   Peter Zijlstra   sched: add some c...
411
412
413
414
  		/*
  		 * Disable all the borrow logic by pretending we have inf
  		 * runtime - in which case borrowing doesn't make sense.
  		 */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
415
  		rt_rq->rt_runtime = RUNTIME_INF;
0986b11b1   Thomas Gleixner   sched: Convert rt...
416
417
  		raw_spin_unlock(&rt_rq->rt_runtime_lock);
  		raw_spin_unlock(&rt_b->rt_runtime_lock);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
418
419
420
421
422
423
  	}
  }
  
  static void disable_runtime(struct rq *rq)
  {
  	unsigned long flags;
05fa785cf   Thomas Gleixner   sched: Convert rq...
424
  	raw_spin_lock_irqsave(&rq->lock, flags);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
425
  	__disable_runtime(rq);
05fa785cf   Thomas Gleixner   sched: Convert rq...
426
  	raw_spin_unlock_irqrestore(&rq->lock, flags);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
427
428
429
430
  }
  
  static void __enable_runtime(struct rq *rq)
  {
7def2be1d   Peter Zijlstra   sched: fix hotplu...
431
432
433
434
  	struct rt_rq *rt_rq;
  
  	if (unlikely(!scheduler_running))
  		return;
78333cdd0   Peter Zijlstra   sched: add some c...
435
436
437
  	/*
  	 * Reset each runqueue's bandwidth settings
  	 */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
438
439
  	for_each_leaf_rt_rq(rt_rq, rq) {
  		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
0986b11b1   Thomas Gleixner   sched: Convert rt...
440
441
  		raw_spin_lock(&rt_b->rt_runtime_lock);
  		raw_spin_lock(&rt_rq->rt_runtime_lock);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
442
443
  		rt_rq->rt_runtime = rt_b->rt_runtime;
  		rt_rq->rt_time = 0;
baf25731e   Zhang, Yanmin   sched: fix 2.6.27...
444
  		rt_rq->rt_throttled = 0;
0986b11b1   Thomas Gleixner   sched: Convert rt...
445
446
  		raw_spin_unlock(&rt_rq->rt_runtime_lock);
  		raw_spin_unlock(&rt_b->rt_runtime_lock);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
447
448
449
450
451
452
  	}
  }
  
  static void enable_runtime(struct rq *rq)
  {
  	unsigned long flags;
05fa785cf   Thomas Gleixner   sched: Convert rq...
453
  	raw_spin_lock_irqsave(&rq->lock, flags);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
454
  	__enable_runtime(rq);
05fa785cf   Thomas Gleixner   sched: Convert rq...
455
  	raw_spin_unlock_irqrestore(&rq->lock, flags);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
456
  }
eff6549b9   Peter Zijlstra   sched: rt: move s...
457
458
459
460
461
  static int balance_runtime(struct rt_rq *rt_rq)
  {
  	int more = 0;
  
  	if (rt_rq->rt_time > rt_rq->rt_runtime) {
0986b11b1   Thomas Gleixner   sched: Convert rt...
462
  		raw_spin_unlock(&rt_rq->rt_runtime_lock);
eff6549b9   Peter Zijlstra   sched: rt: move s...
463
  		more = do_balance_runtime(rt_rq);
0986b11b1   Thomas Gleixner   sched: Convert rt...
464
  		raw_spin_lock(&rt_rq->rt_runtime_lock);
eff6549b9   Peter Zijlstra   sched: rt: move s...
465
466
467
468
  	}
  
  	return more;
  }
55e12e5e7   Dhaval Giani   sched: make sched...
469
  #else /* !CONFIG_SMP */
eff6549b9   Peter Zijlstra   sched: rt: move s...
470
471
472
473
  static inline int balance_runtime(struct rt_rq *rt_rq)
  {
  	return 0;
  }
55e12e5e7   Dhaval Giani   sched: make sched...
474
  #endif /* CONFIG_SMP */
ac086bc22   Peter Zijlstra   sched: rt-group: ...
475

eff6549b9   Peter Zijlstra   sched: rt: move s...
476
477
478
  static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
  {
  	int i, idle = 1;
c6c4927b2   Rusty Russell   sched: convert st...
479
  	const struct cpumask *span;
eff6549b9   Peter Zijlstra   sched: rt: move s...
480

0b148fa04   Peter Zijlstra   sched: rt-bandwid...
481
  	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
eff6549b9   Peter Zijlstra   sched: rt: move s...
482
483
484
  		return 1;
  
  	span = sched_rt_period_mask();
c6c4927b2   Rusty Russell   sched: convert st...
485
  	for_each_cpu(i, span) {
eff6549b9   Peter Zijlstra   sched: rt: move s...
486
487
488
  		int enqueue = 0;
  		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
  		struct rq *rq = rq_of_rt_rq(rt_rq);
05fa785cf   Thomas Gleixner   sched: Convert rq...
489
  		raw_spin_lock(&rq->lock);
eff6549b9   Peter Zijlstra   sched: rt: move s...
490
491
  		if (rt_rq->rt_time) {
  			u64 runtime;
0986b11b1   Thomas Gleixner   sched: Convert rt...
492
  			raw_spin_lock(&rt_rq->rt_runtime_lock);
eff6549b9   Peter Zijlstra   sched: rt: move s...
493
494
495
496
497
498
499
500
501
502
  			if (rt_rq->rt_throttled)
  				balance_runtime(rt_rq);
  			runtime = rt_rq->rt_runtime;
  			rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
  			if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
  				rt_rq->rt_throttled = 0;
  				enqueue = 1;
  			}
  			if (rt_rq->rt_time || rt_rq->rt_nr_running)
  				idle = 0;
0986b11b1   Thomas Gleixner   sched: Convert rt...
503
  			raw_spin_unlock(&rt_rq->rt_runtime_lock);
6c3df2551   Peter Zijlstra   sched: rt: dont s...
504
505
  		} else if (rt_rq->rt_nr_running)
  			idle = 0;
eff6549b9   Peter Zijlstra   sched: rt: move s...
506
507
508
  
  		if (enqueue)
  			sched_rt_rq_enqueue(rt_rq);
05fa785cf   Thomas Gleixner   sched: Convert rq...
509
  		raw_spin_unlock(&rq->lock);
eff6549b9   Peter Zijlstra   sched: rt: move s...
510
511
512
513
  	}
  
  	return idle;
  }
ac086bc22   Peter Zijlstra   sched: rt-group: ...
514

6f505b164   Peter Zijlstra   sched: rt group s...
515
516
  static inline int rt_se_prio(struct sched_rt_entity *rt_se)
  {
052f1dc7e   Peter Zijlstra   sched: rt-group: ...
517
  #ifdef CONFIG_RT_GROUP_SCHED
6f505b164   Peter Zijlstra   sched: rt group s...
518
519
520
  	struct rt_rq *rt_rq = group_rt_rq(rt_se);
  
  	if (rt_rq)
e864c499d   Gregory Haskins   sched: track the ...
521
  		return rt_rq->highest_prio.curr;
6f505b164   Peter Zijlstra   sched: rt group s...
522
523
524
525
  #endif
  
  	return rt_task_of(rt_se)->prio;
  }
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
526
  static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
527
  {
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
528
  	u64 runtime = sched_rt_runtime(rt_rq);
fa85ae241   Peter Zijlstra   sched: rt time limit
529

fa85ae241   Peter Zijlstra   sched: rt time limit
530
  	if (rt_rq->rt_throttled)
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
531
  		return rt_rq_throttled(rt_rq);
fa85ae241   Peter Zijlstra   sched: rt time limit
532

ac086bc22   Peter Zijlstra   sched: rt-group: ...
533
534
  	if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
  		return 0;
b79f3833d   Peter Zijlstra   sched: rt: fix SM...
535
536
537
538
  	balance_runtime(rt_rq);
  	runtime = sched_rt_runtime(rt_rq);
  	if (runtime == RUNTIME_INF)
  		return 0;
ac086bc22   Peter Zijlstra   sched: rt-group: ...
539

9f0c1e560   Peter Zijlstra   sched: rt-group: ...
540
  	if (rt_rq->rt_time > runtime) {
6f505b164   Peter Zijlstra   sched: rt group s...
541
  		rt_rq->rt_throttled = 1;
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
542
  		if (rt_rq_throttled(rt_rq)) {
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
543
  			sched_rt_rq_dequeue(rt_rq);
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
544
545
  			return 1;
  		}
fa85ae241   Peter Zijlstra   sched: rt time limit
546
547
548
549
  	}
  
  	return 0;
  }
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
550
551
552
553
  /*
   * Update the current task's runtime statistics. Skip current tasks that
   * are not in our scheduling class.
   */
a9957449b   Alexey Dobriyan   sched: uninline s...
554
  static void update_curr_rt(struct rq *rq)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
555
556
  {
  	struct task_struct *curr = rq->curr;
6f505b164   Peter Zijlstra   sched: rt group s...
557
558
  	struct sched_rt_entity *rt_se = &curr->rt;
  	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
559
560
561
562
  	u64 delta_exec;
  
  	if (!task_has_rt_policy(curr))
  		return;
d281918d7   Ingo Molnar   sched: remove 'no...
563
  	delta_exec = rq->clock - curr->se.exec_start;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
564
565
  	if (unlikely((s64)delta_exec < 0))
  		delta_exec = 0;
6cfb0d5d0   Ingo Molnar   [PATCH] sched: re...
566
567
  
  	schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
568
569
  
  	curr->se.sum_exec_runtime += delta_exec;
f06febc96   Frank Mayhar   timers: fix itime...
570
  	account_group_exec_runtime(curr, delta_exec);
d281918d7   Ingo Molnar   sched: remove 'no...
571
  	curr->se.exec_start = rq->clock;
d842de871   Srivatsa Vaddagiri   sched: cpu accoun...
572
  	cpuacct_charge(curr, delta_exec);
fa85ae241   Peter Zijlstra   sched: rt time limit
573

e9e9250bc   Peter Zijlstra   sched: Scale down...
574
  	sched_rt_avg_update(rq, delta_exec);
0b148fa04   Peter Zijlstra   sched: rt-bandwid...
575
576
  	if (!rt_bandwidth_enabled())
  		return;
354d60c2f   Dhaval Giani   sched: mix tasks ...
577
578
  	for_each_sched_rt_entity(rt_se) {
  		rt_rq = rt_rq_of_se(rt_se);
cc2991cf1   Peter Zijlstra   sched: rt-bandwid...
579
  		if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
0986b11b1   Thomas Gleixner   sched: Convert rt...
580
  			raw_spin_lock(&rt_rq->rt_runtime_lock);
cc2991cf1   Peter Zijlstra   sched: rt-bandwid...
581
582
583
  			rt_rq->rt_time += delta_exec;
  			if (sched_rt_runtime_exceeded(rt_rq))
  				resched_task(curr);
0986b11b1   Thomas Gleixner   sched: Convert rt...
584
  			raw_spin_unlock(&rt_rq->rt_runtime_lock);
cc2991cf1   Peter Zijlstra   sched: rt-bandwid...
585
  		}
354d60c2f   Dhaval Giani   sched: mix tasks ...
586
  	}
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
587
  }
398a153b1   Gregory Haskins   sched: fix build ...
588
  #if defined CONFIG_SMP
e864c499d   Gregory Haskins   sched: track the ...
589
590
591
592
  
  static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
  
  static inline int next_prio(struct rq *rq)
63489e45e   Steven Rostedt   sched: count # of...
593
  {
e864c499d   Gregory Haskins   sched: track the ...
594
595
596
597
598
599
600
  	struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
  
  	if (next && rt_prio(next->prio))
  		return next->prio;
  	else
  		return MAX_RT_PRIO;
  }
e864c499d   Gregory Haskins   sched: track the ...
601

398a153b1   Gregory Haskins   sched: fix build ...
602
603
  static void
  inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
63489e45e   Steven Rostedt   sched: count # of...
604
  {
4d9842776   Gregory Haskins   sched: cleanup in...
605
  	struct rq *rq = rq_of_rt_rq(rt_rq);
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
606

398a153b1   Gregory Haskins   sched: fix build ...
607
  	if (prio < prev_prio) {
4d9842776   Gregory Haskins   sched: cleanup in...
608

e864c499d   Gregory Haskins   sched: track the ...
609
610
  		/*
  		 * If the new task is higher in priority than anything on the
398a153b1   Gregory Haskins   sched: fix build ...
611
612
  		 * run-queue, we know that the previous high becomes our
  		 * next-highest.
e864c499d   Gregory Haskins   sched: track the ...
613
  		 */
398a153b1   Gregory Haskins   sched: fix build ...
614
  		rt_rq->highest_prio.next = prev_prio;
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
615
616
  
  		if (rq->online)
4d9842776   Gregory Haskins   sched: cleanup in...
617
  			cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1100ac91b   Ingo Molnar   sched: fix cpupri...
618

e864c499d   Gregory Haskins   sched: track the ...
619
620
621
622
623
624
625
626
627
628
629
630
  	} else if (prio == rt_rq->highest_prio.curr)
  		/*
  		 * If the next task is equal in priority to the highest on
  		 * the run-queue, then we implicitly know that the next highest
  		 * task cannot be any lower than current
  		 */
  		rt_rq->highest_prio.next = prio;
  	else if (prio < rt_rq->highest_prio.next)
  		/*
  		 * Otherwise, we need to recompute next-highest
  		 */
  		rt_rq->highest_prio.next = next_prio(rq);
398a153b1   Gregory Haskins   sched: fix build ...
631
  }
73fe6aae8   Gregory Haskins   sched: add RT-bal...
632

398a153b1   Gregory Haskins   sched: fix build ...
633
634
635
636
  static void
  dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
  {
  	struct rq *rq = rq_of_rt_rq(rt_rq);
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
637

398a153b1   Gregory Haskins   sched: fix build ...
638
639
640
641
642
  	if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next))
  		rt_rq->highest_prio.next = next_prio(rq);
  
  	if (rq->online && rt_rq->highest_prio.curr != prev_prio)
  		cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
63489e45e   Steven Rostedt   sched: count # of...
643
  }
398a153b1   Gregory Haskins   sched: fix build ...
644
  #else /* CONFIG_SMP */
6f505b164   Peter Zijlstra   sched: rt group s...
645
  static inline
398a153b1   Gregory Haskins   sched: fix build ...
646
647
648
649
650
  void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
  static inline
  void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
  
  #endif /* CONFIG_SMP */
6e0534f27   Gregory Haskins   sched: use a 2-d ...
651

052f1dc7e   Peter Zijlstra   sched: rt-group: ...
652
  #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
398a153b1   Gregory Haskins   sched: fix build ...
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
  static void
  inc_rt_prio(struct rt_rq *rt_rq, int prio)
  {
  	int prev_prio = rt_rq->highest_prio.curr;
  
  	if (prio < prev_prio)
  		rt_rq->highest_prio.curr = prio;
  
  	inc_rt_prio_smp(rt_rq, prio, prev_prio);
  }
  
  static void
  dec_rt_prio(struct rt_rq *rt_rq, int prio)
  {
  	int prev_prio = rt_rq->highest_prio.curr;
6f505b164   Peter Zijlstra   sched: rt group s...
668
  	if (rt_rq->rt_nr_running) {
764a9d6fe   Steven Rostedt   sched: track high...
669

398a153b1   Gregory Haskins   sched: fix build ...
670
  		WARN_ON(prio < prev_prio);
764a9d6fe   Steven Rostedt   sched: track high...
671

e864c499d   Gregory Haskins   sched: track the ...
672
  		/*
398a153b1   Gregory Haskins   sched: fix build ...
673
674
  		 * This may have been our highest task, and therefore
  		 * we may have some recomputation to do
e864c499d   Gregory Haskins   sched: track the ...
675
  		 */
398a153b1   Gregory Haskins   sched: fix build ...
676
  		if (prio == prev_prio) {
e864c499d   Gregory Haskins   sched: track the ...
677
678
679
  			struct rt_prio_array *array = &rt_rq->active;
  
  			rt_rq->highest_prio.curr =
764a9d6fe   Steven Rostedt   sched: track high...
680
  				sched_find_first_bit(array->bitmap);
e864c499d   Gregory Haskins   sched: track the ...
681
  		}
764a9d6fe   Steven Rostedt   sched: track high...
682
  	} else
e864c499d   Gregory Haskins   sched: track the ...
683
  		rt_rq->highest_prio.curr = MAX_RT_PRIO;
73fe6aae8   Gregory Haskins   sched: add RT-bal...
684

398a153b1   Gregory Haskins   sched: fix build ...
685
686
  	dec_rt_prio_smp(rt_rq, prio, prev_prio);
  }
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
687

398a153b1   Gregory Haskins   sched: fix build ...
688
689
690
691
692
693
  #else
  
  static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
  static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
  
  #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
6e0534f27   Gregory Haskins   sched: use a 2-d ...
694

052f1dc7e   Peter Zijlstra   sched: rt-group: ...
695
  #ifdef CONFIG_RT_GROUP_SCHED
398a153b1   Gregory Haskins   sched: fix build ...
696
697
698
699
700
701
702
703
704
705
706
707
708
709
  
  static void
  inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  	if (rt_se_boosted(rt_se))
  		rt_rq->rt_nr_boosted++;
  
  	if (rt_rq->tg)
  		start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
  }
  
  static void
  dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
710
711
712
713
  	if (rt_se_boosted(rt_se))
  		rt_rq->rt_nr_boosted--;
  
  	WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
398a153b1   Gregory Haskins   sched: fix build ...
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
  }
  
  #else /* CONFIG_RT_GROUP_SCHED */
  
  static void
  inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  	start_rt_bandwidth(&def_rt_bandwidth);
  }
  
  static inline
  void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
  
  #endif /* CONFIG_RT_GROUP_SCHED */
  
  static inline
  void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  	int prio = rt_se_prio(rt_se);
  
  	WARN_ON(!rt_prio(prio));
  	rt_rq->rt_nr_running++;
  
  	inc_rt_prio(rt_rq, prio);
  	inc_rt_migration(rt_se, rt_rq);
  	inc_rt_group(rt_se, rt_rq);
  }
  
  static inline
  void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  	WARN_ON(!rt_prio(rt_se_prio(rt_se)));
  	WARN_ON(!rt_rq->rt_nr_running);
  	rt_rq->rt_nr_running--;
  
  	dec_rt_prio(rt_rq, rt_se_prio(rt_se));
  	dec_rt_migration(rt_se, rt_rq);
  	dec_rt_group(rt_se, rt_rq);
63489e45e   Steven Rostedt   sched: count # of...
752
  }
37dad3fce   Thomas Gleixner   sched: Implement ...
753
  static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
754
  {
6f505b164   Peter Zijlstra   sched: rt group s...
755
756
757
  	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
  	struct rt_prio_array *array = &rt_rq->active;
  	struct rt_rq *group_rq = group_rt_rq(rt_se);
20b6331bf   Dmitry Adamushko   sched: rework of ...
758
  	struct list_head *queue = array->queue + rt_se_prio(rt_se);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
759

ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
760
761
762
763
764
765
766
  	/*
  	 * Don't enqueue the group if its throttled, or when empty.
  	 * The latter is a consequence of the former when a child group
  	 * get throttled and the current group doesn't have any other
  	 * active members.
  	 */
  	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
6f505b164   Peter Zijlstra   sched: rt group s...
767
  		return;
63489e45e   Steven Rostedt   sched: count # of...
768

37dad3fce   Thomas Gleixner   sched: Implement ...
769
770
771
772
  	if (head)
  		list_add(&rt_se->run_list, queue);
  	else
  		list_add_tail(&rt_se->run_list, queue);
6f505b164   Peter Zijlstra   sched: rt group s...
773
  	__set_bit(rt_se_prio(rt_se), array->bitmap);
78f2c7db6   Peter Zijlstra   sched: SCHED_FIFO...
774

6f505b164   Peter Zijlstra   sched: rt group s...
775
776
  	inc_rt_tasks(rt_se, rt_rq);
  }
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
777
  static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
6f505b164   Peter Zijlstra   sched: rt group s...
778
779
780
781
782
783
784
785
786
787
788
789
790
791
  {
  	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
  	struct rt_prio_array *array = &rt_rq->active;
  
  	list_del_init(&rt_se->run_list);
  	if (list_empty(array->queue + rt_se_prio(rt_se)))
  		__clear_bit(rt_se_prio(rt_se), array->bitmap);
  
  	dec_rt_tasks(rt_se, rt_rq);
  }
  
  /*
   * Because the prio of an upper entry depends on the lower
   * entries, we must remove entries top - down.
6f505b164   Peter Zijlstra   sched: rt group s...
792
   */
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
793
  static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
6f505b164   Peter Zijlstra   sched: rt group s...
794
  {
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
795
  	struct sched_rt_entity *back = NULL;
6f505b164   Peter Zijlstra   sched: rt group s...
796

58d6c2d72   Peter Zijlstra   sched: rt-group: ...
797
798
799
800
801
802
803
  	for_each_sched_rt_entity(rt_se) {
  		rt_se->back = back;
  		back = rt_se;
  	}
  
  	for (rt_se = back; rt_se; rt_se = rt_se->back) {
  		if (on_rt_rq(rt_se))
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
804
805
806
  			__dequeue_rt_entity(rt_se);
  	}
  }
37dad3fce   Thomas Gleixner   sched: Implement ...
807
  static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
808
809
810
  {
  	dequeue_rt_stack(rt_se);
  	for_each_sched_rt_entity(rt_se)
37dad3fce   Thomas Gleixner   sched: Implement ...
811
  		__enqueue_rt_entity(rt_se, head);
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
812
813
814
815
816
817
818
819
820
821
  }
  
  static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
  {
  	dequeue_rt_stack(rt_se);
  
  	for_each_sched_rt_entity(rt_se) {
  		struct rt_rq *rt_rq = group_rt_rq(rt_se);
  
  		if (rt_rq && rt_rq->rt_nr_running)
37dad3fce   Thomas Gleixner   sched: Implement ...
822
  			__enqueue_rt_entity(rt_se, false);
58d6c2d72   Peter Zijlstra   sched: rt-group: ...
823
  	}
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
824
825
826
827
828
  }
  
  /*
   * Adding/removing a task to/from a priority array:
   */
ea87bb785   Thomas Gleixner   sched: Extend enq...
829
830
  static void
  enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup, bool head)
6f505b164   Peter Zijlstra   sched: rt group s...
831
832
833
834
835
  {
  	struct sched_rt_entity *rt_se = &p->rt;
  
  	if (wakeup)
  		rt_se->timeout = 0;
37dad3fce   Thomas Gleixner   sched: Implement ...
836
  	enqueue_rt_entity(rt_se, head);
c09595f63   Peter Zijlstra   sched: revert rev...
837

917b627d4   Gregory Haskins   sched: create "pu...
838
839
  	if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
  		enqueue_pushable_task(rq, p);
6f505b164   Peter Zijlstra   sched: rt group s...
840
  }
f02231e51   Ingo Molnar   sched: remove the...
841
  static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
842
  {
6f505b164   Peter Zijlstra   sched: rt group s...
843
  	struct sched_rt_entity *rt_se = &p->rt;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
844

f1e14ef64   Ingo Molnar   sched: remove the...
845
  	update_curr_rt(rq);
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
846
  	dequeue_rt_entity(rt_se);
c09595f63   Peter Zijlstra   sched: revert rev...
847

917b627d4   Gregory Haskins   sched: create "pu...
848
  	dequeue_pushable_task(rq, p);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
849
850
851
852
853
854
  }
  
  /*
   * Put task to the end of the run list without the overhead of dequeue
   * followed by enqueue.
   */
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
855
856
  static void
  requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
6f505b164   Peter Zijlstra   sched: rt group s...
857
  {
1cdad7153   Ingo Molnar   Merge branch 'sch...
858
  	if (on_rt_rq(rt_se)) {
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
859
860
861
862
863
864
865
  		struct rt_prio_array *array = &rt_rq->active;
  		struct list_head *queue = array->queue + rt_se_prio(rt_se);
  
  		if (head)
  			list_move(&rt_se->run_list, queue);
  		else
  			list_move_tail(&rt_se->run_list, queue);
1cdad7153   Ingo Molnar   Merge branch 'sch...
866
  	}
6f505b164   Peter Zijlstra   sched: rt group s...
867
  }
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
868
  static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
869
  {
6f505b164   Peter Zijlstra   sched: rt group s...
870
871
  	struct sched_rt_entity *rt_se = &p->rt;
  	struct rt_rq *rt_rq;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
872

6f505b164   Peter Zijlstra   sched: rt group s...
873
874
  	for_each_sched_rt_entity(rt_se) {
  		rt_rq = rt_rq_of_se(rt_se);
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
875
  		requeue_rt_entity(rt_rq, rt_se, head);
6f505b164   Peter Zijlstra   sched: rt group s...
876
  	}
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
877
  }
6f505b164   Peter Zijlstra   sched: rt group s...
878
  static void yield_task_rt(struct rq *rq)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
879
  {
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
880
  	requeue_task_rt(rq, rq->curr, 0);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
881
  }
e7693a362   Gregory Haskins   sched: de-SCHED_O...
882
  #ifdef CONFIG_SMP
318e0893c   Gregory Haskins   sched: pre-route ...
883
  static int find_lowest_rq(struct task_struct *task);
7d4787214   Peter Zijlstra   sched: Rename syn...
884
  static int select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
e7693a362   Gregory Haskins   sched: de-SCHED_O...
885
  {
318e0893c   Gregory Haskins   sched: pre-route ...
886
  	struct rq *rq = task_rq(p);
0763a660a   Peter Zijlstra   sched: Rename sel...
887
  	if (sd_flag != SD_BALANCE_WAKE)
5f3edc1b1   Peter Zijlstra   sched: Hook sched...
888
  		return smp_processor_id();
318e0893c   Gregory Haskins   sched: pre-route ...
889
  	/*
e1f47d891   Steven Rostedt   sched: RT-balance...
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
  	 * If the current task is an RT task, then
  	 * try to see if we can wake this RT task up on another
  	 * runqueue. Otherwise simply start this RT task
  	 * on its current runqueue.
  	 *
  	 * We want to avoid overloading runqueues. Even if
  	 * the RT task is of higher priority than the current RT task.
  	 * RT tasks behave differently than other tasks. If
  	 * one gets preempted, we try to push it off to another queue.
  	 * So trying to keep a preempting RT task on the same
  	 * cache hot CPU will force the running RT task to
  	 * a cold CPU. So we waste all the cache for the lower
  	 * RT task in hopes of saving some of a RT task
  	 * that is just being woken and probably will have
  	 * cold cache anyway.
318e0893c   Gregory Haskins   sched: pre-route ...
905
  	 */
17b3279b4   Gregory Haskins   sched: break out ...
906
  	if (unlikely(rt_task(rq->curr)) &&
6f505b164   Peter Zijlstra   sched: rt group s...
907
  	    (p->rt.nr_cpus_allowed > 1)) {
318e0893c   Gregory Haskins   sched: pre-route ...
908
909
910
911
912
913
914
915
916
  		int cpu = find_lowest_rq(p);
  
  		return (cpu == -1) ? task_cpu(p) : cpu;
  	}
  
  	/*
  	 * Otherwise, just let it ride on the affined RQ and the
  	 * post-schedule router will push the preempted task away
  	 */
e7693a362   Gregory Haskins   sched: de-SCHED_O...
917
918
  	return task_cpu(p);
  }
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
919
920
921
  
  static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
  {
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
922
923
  	if (rq->curr->rt.nr_cpus_allowed == 1)
  		return;
24600ce89   Rusty Russell   sched: convert ch...
924
  	if (p->rt.nr_cpus_allowed != 1
13b8bd0a5   Rusty Russell   sched_rt: don't a...
925
926
  	    && cpupri_find(&rq->rd->cpupri, p, NULL))
  		return;
24600ce89   Rusty Russell   sched: convert ch...
927

13b8bd0a5   Rusty Russell   sched_rt: don't a...
928
929
  	if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
  		return;
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
930
931
932
933
934
935
936
937
938
  
  	/*
  	 * There appears to be other cpus that can accept
  	 * current and none to run 'p', so lets reschedule
  	 * to try and push current away:
  	 */
  	requeue_task_rt(rq, p, 1);
  	resched_task(rq->curr);
  }
e7693a362   Gregory Haskins   sched: de-SCHED_O...
939
  #endif /* CONFIG_SMP */
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
940
941
942
  /*
   * Preempt the current task with a newly woken task if needed:
   */
7d4787214   Peter Zijlstra   sched: Rename syn...
943
  static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
944
  {
45c01e824   Gregory Haskins   sched: prioritize...
945
  	if (p->prio < rq->curr->prio) {
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
946
  		resched_task(rq->curr);
45c01e824   Gregory Haskins   sched: prioritize...
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
  		return;
  	}
  
  #ifdef CONFIG_SMP
  	/*
  	 * If:
  	 *
  	 * - the newly woken task is of equal priority to the current task
  	 * - the newly woken task is non-migratable while current is migratable
  	 * - current will be preempted on the next reschedule
  	 *
  	 * we should check to see if current can readily move to a different
  	 * cpu.  If so, we will reschedule to allow the push logic to try
  	 * to move current somewhere else, making room for our non-migratable
  	 * task.
  	 */
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
963
964
  	if (p->prio == rq->curr->prio && !need_resched())
  		check_preempt_equal_prio(rq, p);
45c01e824   Gregory Haskins   sched: prioritize...
965
  #endif
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
966
  }
6f505b164   Peter Zijlstra   sched: rt group s...
967
968
  static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
  						   struct rt_rq *rt_rq)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
969
  {
6f505b164   Peter Zijlstra   sched: rt group s...
970
971
  	struct rt_prio_array *array = &rt_rq->active;
  	struct sched_rt_entity *next = NULL;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
972
973
974
975
  	struct list_head *queue;
  	int idx;
  
  	idx = sched_find_first_bit(array->bitmap);
6f505b164   Peter Zijlstra   sched: rt group s...
976
  	BUG_ON(idx >= MAX_RT_PRIO);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
977
978
  
  	queue = array->queue + idx;
6f505b164   Peter Zijlstra   sched: rt group s...
979
  	next = list_entry(queue->next, struct sched_rt_entity, run_list);
326587b84   Dmitry Adamushko   sched: fix goto r...
980

6f505b164   Peter Zijlstra   sched: rt group s...
981
982
  	return next;
  }
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
983

917b627d4   Gregory Haskins   sched: create "pu...
984
  static struct task_struct *_pick_next_task_rt(struct rq *rq)
6f505b164   Peter Zijlstra   sched: rt group s...
985
986
987
988
  {
  	struct sched_rt_entity *rt_se;
  	struct task_struct *p;
  	struct rt_rq *rt_rq;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
989

6f505b164   Peter Zijlstra   sched: rt group s...
990
991
992
993
  	rt_rq = &rq->rt;
  
  	if (unlikely(!rt_rq->rt_nr_running))
  		return NULL;
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
994
  	if (rt_rq_throttled(rt_rq))
6f505b164   Peter Zijlstra   sched: rt group s...
995
996
997
998
  		return NULL;
  
  	do {
  		rt_se = pick_next_rt_entity(rq, rt_rq);
326587b84   Dmitry Adamushko   sched: fix goto r...
999
  		BUG_ON(!rt_se);
6f505b164   Peter Zijlstra   sched: rt group s...
1000
1001
1002
1003
1004
  		rt_rq = group_rt_rq(rt_se);
  	} while (rt_rq);
  
  	p = rt_task_of(rt_se);
  	p->se.exec_start = rq->clock;
917b627d4   Gregory Haskins   sched: create "pu...
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
  
  	return p;
  }
  
  static struct task_struct *pick_next_task_rt(struct rq *rq)
  {
  	struct task_struct *p = _pick_next_task_rt(rq);
  
  	/* The running task is never eligible for pushing */
  	if (p)
  		dequeue_pushable_task(rq, p);
bcf08df3b   Ingo Molnar   sched: Fix cpupri...
1016
  #ifdef CONFIG_SMP
3f029d3c6   Gregory Haskins   sched: Enhance th...
1017
1018
1019
1020
1021
  	/*
  	 * We detect this state here so that we can avoid taking the RQ
  	 * lock again later if there is no need to push
  	 */
  	rq->post_schedule = has_pushable_tasks(rq);
bcf08df3b   Ingo Molnar   sched: Fix cpupri...
1022
  #endif
3f029d3c6   Gregory Haskins   sched: Enhance th...
1023

6f505b164   Peter Zijlstra   sched: rt group s...
1024
  	return p;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1025
  }
31ee529cc   Ingo Molnar   sched: remove the...
1026
  static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1027
  {
f1e14ef64   Ingo Molnar   sched: remove the...
1028
  	update_curr_rt(rq);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1029
  	p->se.exec_start = 0;
917b627d4   Gregory Haskins   sched: create "pu...
1030
1031
1032
1033
1034
1035
1036
  
  	/*
  	 * The previous task needs to be made eligible for pushing
  	 * if it is still active
  	 */
  	if (p->se.on_rq && p->rt.nr_cpus_allowed > 1)
  		enqueue_pushable_task(rq, p);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1037
  }
681f3e685   Peter Williams   sched: isolate SM...
1038
  #ifdef CONFIG_SMP
6f505b164   Peter Zijlstra   sched: rt group s...
1039

e8fa13626   Steven Rostedt   sched: add RT tas...
1040
1041
  /* Only try algorithms three times */
  #define RT_MAX_TRIES 3
e8fa13626   Steven Rostedt   sched: add RT tas...
1042
  static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1043
1044
1045
  static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
  {
  	if (!task_running(rq, p) &&
96f874e26   Rusty Russell   sched: convert re...
1046
  	    (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
6f505b164   Peter Zijlstra   sched: rt group s...
1047
  	    (p->rt.nr_cpus_allowed > 1))
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1048
1049
1050
  		return 1;
  	return 0;
  }
e8fa13626   Steven Rostedt   sched: add RT tas...
1051
  /* Return the second highest RT task, NULL otherwise */
79064fbf7   Ingo Molnar   sched: clean up p...
1052
  static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
e8fa13626   Steven Rostedt   sched: add RT tas...
1053
  {
6f505b164   Peter Zijlstra   sched: rt group s...
1054
1055
1056
1057
  	struct task_struct *next = NULL;
  	struct sched_rt_entity *rt_se;
  	struct rt_prio_array *array;
  	struct rt_rq *rt_rq;
e8fa13626   Steven Rostedt   sched: add RT tas...
1058
  	int idx;
6f505b164   Peter Zijlstra   sched: rt group s...
1059
1060
1061
1062
1063
1064
1065
1066
1067
  	for_each_leaf_rt_rq(rt_rq, rq) {
  		array = &rt_rq->active;
  		idx = sched_find_first_bit(array->bitmap);
   next_idx:
  		if (idx >= MAX_RT_PRIO)
  			continue;
  		if (next && next->prio < idx)
  			continue;
  		list_for_each_entry(rt_se, array->queue + idx, run_list) {
3d07467b7   Peter Zijlstra   sched: Fix pick_n...
1068
1069
1070
1071
1072
1073
  			struct task_struct *p;
  
  			if (!rt_entity_is_task(rt_se))
  				continue;
  
  			p = rt_task_of(rt_se);
6f505b164   Peter Zijlstra   sched: rt group s...
1074
1075
1076
1077
1078
1079
1080
1081
1082
  			if (pick_rt_task(rq, p, cpu)) {
  				next = p;
  				break;
  			}
  		}
  		if (!next) {
  			idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
  			goto next_idx;
  		}
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1083
  	}
e8fa13626   Steven Rostedt   sched: add RT tas...
1084
1085
  	return next;
  }
0e3900e6d   Rusty Russell   sched: convert lo...
1086
  static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
e8fa13626   Steven Rostedt   sched: add RT tas...
1087

6e1254d2c   Gregory Haskins   sched: optimize R...
1088
1089
1090
  static int find_lowest_rq(struct task_struct *task)
  {
  	struct sched_domain *sd;
96f874e26   Rusty Russell   sched: convert re...
1091
  	struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
6e1254d2c   Gregory Haskins   sched: optimize R...
1092
1093
  	int this_cpu = smp_processor_id();
  	int cpu      = task_cpu(task);
06f90dbd7   Gregory Haskins   sched: RT-balance...
1094

6e0534f27   Gregory Haskins   sched: use a 2-d ...
1095
1096
  	if (task->rt.nr_cpus_allowed == 1)
  		return -1; /* No other targets possible */
6e1254d2c   Gregory Haskins   sched: optimize R...
1097

6e0534f27   Gregory Haskins   sched: use a 2-d ...
1098
1099
  	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
  		return -1; /* No targets found */
6e1254d2c   Gregory Haskins   sched: optimize R...
1100
1101
1102
1103
1104
1105
1106
1107
1108
  
  	/*
  	 * At this point we have built a mask of cpus representing the
  	 * lowest priority tasks in the system.  Now we want to elect
  	 * the best one based on our affinity and topology.
  	 *
  	 * We prioritize the last cpu that the task executed on since
  	 * it is most likely cache-hot in that location.
  	 */
96f874e26   Rusty Russell   sched: convert re...
1109
  	if (cpumask_test_cpu(cpu, lowest_mask))
6e1254d2c   Gregory Haskins   sched: optimize R...
1110
1111
1112
1113
1114
1115
  		return cpu;
  
  	/*
  	 * Otherwise, we consult the sched_domains span maps to figure
  	 * out which cpu is logically closest to our hot cache data.
  	 */
e2c880630   Rusty Russell   cpumask: Simplify...
1116
1117
  	if (!cpumask_test_cpu(this_cpu, lowest_mask))
  		this_cpu = -1; /* Skip this_cpu opt if not among lowest */
6e1254d2c   Gregory Haskins   sched: optimize R...
1118

e2c880630   Rusty Russell   cpumask: Simplify...
1119
1120
1121
  	for_each_domain(cpu, sd) {
  		if (sd->flags & SD_WAKE_AFFINE) {
  			int best_cpu;
6e1254d2c   Gregory Haskins   sched: optimize R...
1122

e2c880630   Rusty Russell   cpumask: Simplify...
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
  			/*
  			 * "this_cpu" is cheaper to preempt than a
  			 * remote processor.
  			 */
  			if (this_cpu != -1 &&
  			    cpumask_test_cpu(this_cpu, sched_domain_span(sd)))
  				return this_cpu;
  
  			best_cpu = cpumask_first_and(lowest_mask,
  						     sched_domain_span(sd));
  			if (best_cpu < nr_cpu_ids)
  				return best_cpu;
6e1254d2c   Gregory Haskins   sched: optimize R...
1135
1136
1137
1138
1139
1140
1141
1142
  		}
  	}
  
  	/*
  	 * And finally, if there were no matches within the domains
  	 * just give the caller *something* to work with from the compatible
  	 * locations.
  	 */
e2c880630   Rusty Russell   cpumask: Simplify...
1143
1144
1145
1146
1147
1148
1149
  	if (this_cpu != -1)
  		return this_cpu;
  
  	cpu = cpumask_any(lowest_mask);
  	if (cpu < nr_cpu_ids)
  		return cpu;
  	return -1;
07b4032c9   Gregory Haskins   sched: break out ...
1150
1151
1152
  }
  
  /* Will lock the rq it finds */
4df64c0bf   Ingo Molnar   sched: clean up f...
1153
  static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
07b4032c9   Gregory Haskins   sched: break out ...
1154
1155
  {
  	struct rq *lowest_rq = NULL;
07b4032c9   Gregory Haskins   sched: break out ...
1156
  	int tries;
4df64c0bf   Ingo Molnar   sched: clean up f...
1157
  	int cpu;
e8fa13626   Steven Rostedt   sched: add RT tas...
1158

07b4032c9   Gregory Haskins   sched: break out ...
1159
1160
  	for (tries = 0; tries < RT_MAX_TRIES; tries++) {
  		cpu = find_lowest_rq(task);
2de0b4639   Gregory Haskins   sched: RT balanci...
1161
  		if ((cpu == -1) || (cpu == rq->cpu))
e8fa13626   Steven Rostedt   sched: add RT tas...
1162
  			break;
07b4032c9   Gregory Haskins   sched: break out ...
1163
  		lowest_rq = cpu_rq(cpu);
e8fa13626   Steven Rostedt   sched: add RT tas...
1164
  		/* if the prio of this runqueue changed, try again */
07b4032c9   Gregory Haskins   sched: break out ...
1165
  		if (double_lock_balance(rq, lowest_rq)) {
e8fa13626   Steven Rostedt   sched: add RT tas...
1166
1167
1168
1169
1170
1171
  			/*
  			 * We had to unlock the run queue. In
  			 * the mean time, task could have
  			 * migrated already or had its affinity changed.
  			 * Also make sure that it wasn't scheduled on its rq.
  			 */
07b4032c9   Gregory Haskins   sched: break out ...
1172
  			if (unlikely(task_rq(task) != rq ||
96f874e26   Rusty Russell   sched: convert re...
1173
1174
  				     !cpumask_test_cpu(lowest_rq->cpu,
  						       &task->cpus_allowed) ||
07b4032c9   Gregory Haskins   sched: break out ...
1175
  				     task_running(rq, task) ||
e8fa13626   Steven Rostedt   sched: add RT tas...
1176
  				     !task->se.on_rq)) {
4df64c0bf   Ingo Molnar   sched: clean up f...
1177

05fa785cf   Thomas Gleixner   sched: Convert rq...
1178
  				raw_spin_unlock(&lowest_rq->lock);
e8fa13626   Steven Rostedt   sched: add RT tas...
1179
1180
1181
1182
1183
1184
  				lowest_rq = NULL;
  				break;
  			}
  		}
  
  		/* If this rq is still suitable use it. */
e864c499d   Gregory Haskins   sched: track the ...
1185
  		if (lowest_rq->rt.highest_prio.curr > task->prio)
e8fa13626   Steven Rostedt   sched: add RT tas...
1186
1187
1188
  			break;
  
  		/* try again */
1b12bbc74   Peter Zijlstra   lockdep: re-annot...
1189
  		double_unlock_balance(rq, lowest_rq);
e8fa13626   Steven Rostedt   sched: add RT tas...
1190
1191
1192
1193
1194
  		lowest_rq = NULL;
  	}
  
  	return lowest_rq;
  }
917b627d4   Gregory Haskins   sched: create "pu...
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
  static struct task_struct *pick_next_pushable_task(struct rq *rq)
  {
  	struct task_struct *p;
  
  	if (!has_pushable_tasks(rq))
  		return NULL;
  
  	p = plist_first_entry(&rq->rt.pushable_tasks,
  			      struct task_struct, pushable_tasks);
  
  	BUG_ON(rq->cpu != task_cpu(p));
  	BUG_ON(task_current(rq, p));
  	BUG_ON(p->rt.nr_cpus_allowed <= 1);
  
  	BUG_ON(!p->se.on_rq);
  	BUG_ON(!rt_task(p));
  
  	return p;
  }
e8fa13626   Steven Rostedt   sched: add RT tas...
1214
1215
1216
1217
1218
  /*
   * If the current CPU has more than one RT task, see if the non
   * running task can migrate over to a CPU that is running a task
   * of lesser priority.
   */
697f0a487   Gregory Haskins   sched: clean up t...
1219
  static int push_rt_task(struct rq *rq)
e8fa13626   Steven Rostedt   sched: add RT tas...
1220
1221
1222
  {
  	struct task_struct *next_task;
  	struct rq *lowest_rq;
e8fa13626   Steven Rostedt   sched: add RT tas...
1223

a22d7fc18   Gregory Haskins   sched: wake-balan...
1224
1225
  	if (!rq->rt.overloaded)
  		return 0;
917b627d4   Gregory Haskins   sched: create "pu...
1226
  	next_task = pick_next_pushable_task(rq);
e8fa13626   Steven Rostedt   sched: add RT tas...
1227
1228
1229
1230
  	if (!next_task)
  		return 0;
  
   retry:
697f0a487   Gregory Haskins   sched: clean up t...
1231
  	if (unlikely(next_task == rq->curr)) {
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1232
  		WARN_ON(1);
e8fa13626   Steven Rostedt   sched: add RT tas...
1233
  		return 0;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1234
  	}
e8fa13626   Steven Rostedt   sched: add RT tas...
1235
1236
1237
1238
1239
1240
  
  	/*
  	 * It's possible that the next_task slipped in of
  	 * higher priority than current. If that's the case
  	 * just reschedule current.
  	 */
697f0a487   Gregory Haskins   sched: clean up t...
1241
1242
  	if (unlikely(next_task->prio < rq->curr->prio)) {
  		resched_task(rq->curr);
e8fa13626   Steven Rostedt   sched: add RT tas...
1243
1244
  		return 0;
  	}
697f0a487   Gregory Haskins   sched: clean up t...
1245
  	/* We might release rq lock */
e8fa13626   Steven Rostedt   sched: add RT tas...
1246
1247
1248
  	get_task_struct(next_task);
  
  	/* find_lock_lowest_rq locks the rq if found */
697f0a487   Gregory Haskins   sched: clean up t...
1249
  	lowest_rq = find_lock_lowest_rq(next_task, rq);
e8fa13626   Steven Rostedt   sched: add RT tas...
1250
1251
1252
  	if (!lowest_rq) {
  		struct task_struct *task;
  		/*
697f0a487   Gregory Haskins   sched: clean up t...
1253
  		 * find lock_lowest_rq releases rq->lock
1563513d3   Gregory Haskins   RT: fix push_rt_t...
1254
1255
1256
1257
1258
  		 * so it is possible that next_task has migrated.
  		 *
  		 * We need to make sure that the task is still on the same
  		 * run-queue and is also still the next task eligible for
  		 * pushing.
e8fa13626   Steven Rostedt   sched: add RT tas...
1259
  		 */
917b627d4   Gregory Haskins   sched: create "pu...
1260
  		task = pick_next_pushable_task(rq);
1563513d3   Gregory Haskins   RT: fix push_rt_t...
1261
1262
1263
1264
1265
1266
1267
1268
1269
  		if (task_cpu(next_task) == rq->cpu && task == next_task) {
  			/*
  			 * If we get here, the task hasnt moved at all, but
  			 * it has failed to push.  We will not try again,
  			 * since the other cpus will pull from us when they
  			 * are ready.
  			 */
  			dequeue_pushable_task(rq, next_task);
  			goto out;
e8fa13626   Steven Rostedt   sched: add RT tas...
1270
  		}
917b627d4   Gregory Haskins   sched: create "pu...
1271

1563513d3   Gregory Haskins   RT: fix push_rt_t...
1272
1273
1274
  		if (!task)
  			/* No more tasks, just exit */
  			goto out;
917b627d4   Gregory Haskins   sched: create "pu...
1275
  		/*
1563513d3   Gregory Haskins   RT: fix push_rt_t...
1276
  		 * Something has shifted, try again.
917b627d4   Gregory Haskins   sched: create "pu...
1277
  		 */
1563513d3   Gregory Haskins   RT: fix push_rt_t...
1278
1279
1280
  		put_task_struct(next_task);
  		next_task = task;
  		goto retry;
e8fa13626   Steven Rostedt   sched: add RT tas...
1281
  	}
697f0a487   Gregory Haskins   sched: clean up t...
1282
  	deactivate_task(rq, next_task, 0);
e8fa13626   Steven Rostedt   sched: add RT tas...
1283
1284
1285
1286
  	set_task_cpu(next_task, lowest_rq->cpu);
  	activate_task(lowest_rq, next_task, 0);
  
  	resched_task(lowest_rq->curr);
1b12bbc74   Peter Zijlstra   lockdep: re-annot...
1287
  	double_unlock_balance(rq, lowest_rq);
e8fa13626   Steven Rostedt   sched: add RT tas...
1288

e8fa13626   Steven Rostedt   sched: add RT tas...
1289
1290
  out:
  	put_task_struct(next_task);
917b627d4   Gregory Haskins   sched: create "pu...
1291
  	return 1;
e8fa13626   Steven Rostedt   sched: add RT tas...
1292
  }
e8fa13626   Steven Rostedt   sched: add RT tas...
1293
1294
1295
1296
1297
1298
  static void push_rt_tasks(struct rq *rq)
  {
  	/* push_rt_task will return true if it moved an RT */
  	while (push_rt_task(rq))
  		;
  }
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1299
1300
  static int pull_rt_task(struct rq *this_rq)
  {
80bf3171d   Ingo Molnar   sched: clean up p...
1301
  	int this_cpu = this_rq->cpu, ret = 0, cpu;
a8728944e   Gregory Haskins   sched: use highes...
1302
  	struct task_struct *p;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1303
  	struct rq *src_rq;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1304

637f50851   Gregory Haskins   sched: only balan...
1305
  	if (likely(!rt_overloaded(this_rq)))
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1306
  		return 0;
c6c4927b2   Rusty Russell   sched: convert st...
1307
  	for_each_cpu(cpu, this_rq->rd->rto_mask) {
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1308
1309
1310
1311
  		if (this_cpu == cpu)
  			continue;
  
  		src_rq = cpu_rq(cpu);
74ab8e4f6   Gregory Haskins   sched: use highes...
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
  
  		/*
  		 * Don't bother taking the src_rq->lock if the next highest
  		 * task is known to be lower-priority than our current task.
  		 * This may look racy, but if this value is about to go
  		 * logically higher, the src_rq will push this task away.
  		 * And if its going logically lower, we do not care
  		 */
  		if (src_rq->rt.highest_prio.next >=
  		    this_rq->rt.highest_prio.curr)
  			continue;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1323
1324
1325
  		/*
  		 * We can potentially drop this_rq's lock in
  		 * double_lock_balance, and another CPU could
a8728944e   Gregory Haskins   sched: use highes...
1326
  		 * alter this_rq
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1327
  		 */
a8728944e   Gregory Haskins   sched: use highes...
1328
  		double_lock_balance(this_rq, src_rq);
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1329
1330
1331
1332
  
  		/*
  		 * Are there still pullable RT tasks?
  		 */
614ee1f61   Mike Galbraith   sched: pull_rt_ta...
1333
1334
  		if (src_rq->rt.rt_nr_running <= 1)
  			goto skip;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1335

f65eda4f7   Steven Rostedt   sched: pull RT ta...
1336
1337
1338
1339
1340
1341
  		p = pick_next_highest_task_rt(src_rq, this_cpu);
  
  		/*
  		 * Do we have an RT task that preempts
  		 * the to-be-scheduled task?
  		 */
a8728944e   Gregory Haskins   sched: use highes...
1342
  		if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1343
1344
1345
1346
1347
1348
1349
1350
1351
  			WARN_ON(p == src_rq->curr);
  			WARN_ON(!p->se.on_rq);
  
  			/*
  			 * There's a chance that p is higher in priority
  			 * than what's currently running on its cpu.
  			 * This is just that p is wakeing up and hasn't
  			 * had a chance to schedule. We only pull
  			 * p if it is lower in priority than the
a8728944e   Gregory Haskins   sched: use highes...
1352
  			 * current task on the run queue
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1353
  			 */
a8728944e   Gregory Haskins   sched: use highes...
1354
  			if (p->prio < src_rq->curr->prio)
614ee1f61   Mike Galbraith   sched: pull_rt_ta...
1355
  				goto skip;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
  
  			ret = 1;
  
  			deactivate_task(src_rq, p, 0);
  			set_task_cpu(p, this_cpu);
  			activate_task(this_rq, p, 0);
  			/*
  			 * We continue with the search, just in
  			 * case there's an even higher prio task
  			 * in another runqueue. (low likelyhood
  			 * but possible)
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1367
  			 */
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1368
  		}
614ee1f61   Mike Galbraith   sched: pull_rt_ta...
1369
   skip:
1b12bbc74   Peter Zijlstra   lockdep: re-annot...
1370
  		double_unlock_balance(this_rq, src_rq);
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1371
1372
1373
1374
  	}
  
  	return ret;
  }
9a897c5a6   Steven Rostedt   sched: RT-balance...
1375
  static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1376
1377
  {
  	/* Try to pull RT tasks here if we lower this rq's prio */
e864c499d   Gregory Haskins   sched: track the ...
1378
  	if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1379
1380
  		pull_rt_task(rq);
  }
9a897c5a6   Steven Rostedt   sched: RT-balance...
1381
  static void post_schedule_rt(struct rq *rq)
e8fa13626   Steven Rostedt   sched: add RT tas...
1382
  {
967fc0467   Gregory Haskins   sched: add sched_...
1383
  	push_rt_tasks(rq);
e8fa13626   Steven Rostedt   sched: add RT tas...
1384
  }
8ae121ac8   Gregory Haskins   sched: fix RT tas...
1385
1386
1387
1388
  /*
   * If we are not running and we are not going to reschedule soon, we should
   * try to push tasks away now
   */
efbbd05a5   Peter Zijlstra   sched: Add pre an...
1389
  static void task_woken_rt(struct rq *rq, struct task_struct *p)
4642dafdf   Steven Rostedt   sched: push RT ta...
1390
  {
9a897c5a6   Steven Rostedt   sched: RT-balance...
1391
  	if (!task_running(rq, p) &&
8ae121ac8   Gregory Haskins   sched: fix RT tas...
1392
  	    !test_tsk_need_resched(rq->curr) &&
917b627d4   Gregory Haskins   sched: create "pu...
1393
  	    has_pushable_tasks(rq) &&
777c2f389   Gregory Haskins   sched: only try t...
1394
  	    p->rt.nr_cpus_allowed > 1)
4642dafdf   Steven Rostedt   sched: push RT ta...
1395
1396
  		push_rt_tasks(rq);
  }
cd8ba7cd9   Mike Travis   sched: add new se...
1397
  static void set_cpus_allowed_rt(struct task_struct *p,
96f874e26   Rusty Russell   sched: convert re...
1398
  				const struct cpumask *new_mask)
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1399
  {
96f874e26   Rusty Russell   sched: convert re...
1400
  	int weight = cpumask_weight(new_mask);
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1401
1402
1403
1404
1405
1406
1407
  
  	BUG_ON(!rt_task(p));
  
  	/*
  	 * Update the migration status of the RQ if we have an RT task
  	 * which is running AND changing its weight value.
  	 */
6f505b164   Peter Zijlstra   sched: rt group s...
1408
  	if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1409
  		struct rq *rq = task_rq(p);
917b627d4   Gregory Haskins   sched: create "pu...
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
  		if (!task_current(rq, p)) {
  			/*
  			 * Make sure we dequeue this task from the pushable list
  			 * before going further.  It will either remain off of
  			 * the list because we are no longer pushable, or it
  			 * will be requeued.
  			 */
  			if (p->rt.nr_cpus_allowed > 1)
  				dequeue_pushable_task(rq, p);
  
  			/*
  			 * Requeue if our weight is changing and still > 1
  			 */
  			if (weight > 1)
  				enqueue_pushable_task(rq, p);
  
  		}
6f505b164   Peter Zijlstra   sched: rt group s...
1427
  		if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1428
  			rq->rt.rt_nr_migratory++;
6f505b164   Peter Zijlstra   sched: rt group s...
1429
  		} else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1430
1431
1432
  			BUG_ON(!rq->rt.rt_nr_migratory);
  			rq->rt.rt_nr_migratory--;
  		}
398a153b1   Gregory Haskins   sched: fix build ...
1433
  		update_rt_migration(&rq->rt);
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1434
  	}
96f874e26   Rusty Russell   sched: convert re...
1435
  	cpumask_copy(&p->cpus_allowed, new_mask);
6f505b164   Peter Zijlstra   sched: rt group s...
1436
  	p->rt.nr_cpus_allowed = weight;
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1437
  }
deeeccd41   Ingo Molnar   sched: clean up o...
1438

bdd7c81b4   Ingo Molnar   sched: fix sched_...
1439
  /* Assumes rq->lock is held */
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
1440
  static void rq_online_rt(struct rq *rq)
bdd7c81b4   Ingo Molnar   sched: fix sched_...
1441
1442
1443
  {
  	if (rq->rt.overloaded)
  		rt_set_overload(rq);
6e0534f27   Gregory Haskins   sched: use a 2-d ...
1444

7def2be1d   Peter Zijlstra   sched: fix hotplu...
1445
  	__enable_runtime(rq);
e864c499d   Gregory Haskins   sched: track the ...
1446
  	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
bdd7c81b4   Ingo Molnar   sched: fix sched_...
1447
1448
1449
  }
  
  /* Assumes rq->lock is held */
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
1450
  static void rq_offline_rt(struct rq *rq)
bdd7c81b4   Ingo Molnar   sched: fix sched_...
1451
1452
1453
  {
  	if (rq->rt.overloaded)
  		rt_clear_overload(rq);
6e0534f27   Gregory Haskins   sched: use a 2-d ...
1454

7def2be1d   Peter Zijlstra   sched: fix hotplu...
1455
  	__disable_runtime(rq);
6e0534f27   Gregory Haskins   sched: use a 2-d ...
1456
  	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
bdd7c81b4   Ingo Molnar   sched: fix sched_...
1457
  }
cb4698450   Steven Rostedt   sched: RT-balance...
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
  
  /*
   * When switch from the rt queue, we bring ourselves to a position
   * that we might want to pull RT tasks from other runqueues.
   */
  static void switched_from_rt(struct rq *rq, struct task_struct *p,
  			   int running)
  {
  	/*
  	 * If there are other RT tasks then we will reschedule
  	 * and the scheduling of the other RT tasks will handle
  	 * the balancing. But if we are the last RT task
  	 * we may need to handle the pulling of RT tasks
  	 * now.
  	 */
  	if (!rq->rt.rt_nr_running)
  		pull_rt_task(rq);
  }
3d8cbdf86   Rusty Russell   sched: convert lo...
1476
1477
1478
1479
1480
1481
  
  static inline void init_sched_rt_class(void)
  {
  	unsigned int i;
  
  	for_each_possible_cpu(i)
eaa958402   Yinghai Lu   cpumask: alloc ze...
1482
  		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
6ca09dfc9   Mike Travis   sched: put back s...
1483
  					GFP_KERNEL, cpu_to_node(i));
3d8cbdf86   Rusty Russell   sched: convert lo...
1484
  }
cb4698450   Steven Rostedt   sched: RT-balance...
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
  #endif /* CONFIG_SMP */
  
  /*
   * When switching a task to RT, we may overload the runqueue
   * with RT tasks. In this case we try to push them off to
   * other runqueues.
   */
  static void switched_to_rt(struct rq *rq, struct task_struct *p,
  			   int running)
  {
  	int check_resched = 1;
  
  	/*
  	 * If we are already running, then there's nothing
  	 * that needs to be done. But if we are not running
  	 * we may need to preempt the current running task.
  	 * If that current running task is also an RT task
  	 * then see if we can move to another run queue.
  	 */
  	if (!running) {
  #ifdef CONFIG_SMP
  		if (rq->rt.overloaded && push_rt_task(rq) &&
  		    /* Don't resched if we changed runqueues */
  		    rq != task_rq(p))
  			check_resched = 0;
  #endif /* CONFIG_SMP */
  		if (check_resched && p->prio < rq->curr->prio)
  			resched_task(rq->curr);
  	}
  }
  
  /*
   * Priority of the task has changed. This may cause
   * us to initiate a push or pull.
   */
  static void prio_changed_rt(struct rq *rq, struct task_struct *p,
  			    int oldprio, int running)
  {
  	if (running) {
  #ifdef CONFIG_SMP
  		/*
  		 * If our priority decreases while running, we
  		 * may need to pull tasks to this runqueue.
  		 */
  		if (oldprio < p->prio)
  			pull_rt_task(rq);
  		/*
  		 * If there's a higher priority task waiting to run
6fa46fa52   Steven Rostedt   sched: balance RT...
1533
1534
1535
  		 * then reschedule. Note, the above pull_rt_task
  		 * can release the rq lock and p could migrate.
  		 * Only reschedule if p is still on the same runqueue.
cb4698450   Steven Rostedt   sched: RT-balance...
1536
  		 */
e864c499d   Gregory Haskins   sched: track the ...
1537
  		if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
cb4698450   Steven Rostedt   sched: RT-balance...
1538
1539
1540
1541
1542
  			resched_task(p);
  #else
  		/* For UP simply resched on drop of prio */
  		if (oldprio < p->prio)
  			resched_task(p);
e8fa13626   Steven Rostedt   sched: add RT tas...
1543
  #endif /* CONFIG_SMP */
cb4698450   Steven Rostedt   sched: RT-balance...
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
  	} else {
  		/*
  		 * This task is not running, but if it is
  		 * greater than the current running task
  		 * then reschedule.
  		 */
  		if (p->prio < rq->curr->prio)
  			resched_task(rq->curr);
  	}
  }
78f2c7db6   Peter Zijlstra   sched: SCHED_FIFO...
1554
1555
1556
1557
1558
1559
  static void watchdog(struct rq *rq, struct task_struct *p)
  {
  	unsigned long soft, hard;
  
  	if (!p->signal)
  		return;
78d7d407b   Jiri Slaby   kernel core: use ...
1560
1561
1562
  	/* max may change after cur was read, this will be fixed next tick */
  	soft = task_rlimit(p, RLIMIT_RTTIME);
  	hard = task_rlimit_max(p, RLIMIT_RTTIME);
78f2c7db6   Peter Zijlstra   sched: SCHED_FIFO...
1563
1564
1565
1566
1567
1568
  
  	if (soft != RLIM_INFINITY) {
  		unsigned long next;
  
  		p->rt.timeout++;
  		next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
5a52dd500   Peter Zijlstra   sched: rt-watchdo...
1569
  		if (p->rt.timeout > next)
f06febc96   Frank Mayhar   timers: fix itime...
1570
  			p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
78f2c7db6   Peter Zijlstra   sched: SCHED_FIFO...
1571
1572
  	}
  }
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1573

8f4d37ec0   Peter Zijlstra   sched: high-res p...
1574
  static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1575
  {
67e2be023   Peter Zijlstra   sched: rt: accoun...
1576
  	update_curr_rt(rq);
78f2c7db6   Peter Zijlstra   sched: SCHED_FIFO...
1577
  	watchdog(rq, p);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1578
1579
1580
1581
1582
1583
  	/*
  	 * RR tasks need a special form of timeslice management.
  	 * FIFO tasks have no timeslices.
  	 */
  	if (p->policy != SCHED_RR)
  		return;
fa717060f   Peter Zijlstra   sched: sched_rt_e...
1584
  	if (--p->rt.time_slice)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1585
  		return;
fa717060f   Peter Zijlstra   sched: sched_rt_e...
1586
  	p->rt.time_slice = DEF_TIMESLICE;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1587

98fbc7985   Dmitry Adamushko   sched: optimize t...
1588
1589
1590
1591
  	/*
  	 * Requeue to the end of queue if we are not the only element
  	 * on the queue:
  	 */
fa717060f   Peter Zijlstra   sched: sched_rt_e...
1592
  	if (p->rt.run_list.prev != p->rt.run_list.next) {
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
1593
  		requeue_task_rt(rq, p, 0);
98fbc7985   Dmitry Adamushko   sched: optimize t...
1594
1595
  		set_tsk_need_resched(p);
  	}
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1596
  }
83b699ed2   Srivatsa Vaddagiri   sched: revert rec...
1597
1598
1599
1600
1601
  static void set_curr_task_rt(struct rq *rq)
  {
  	struct task_struct *p = rq->curr;
  
  	p->se.exec_start = rq->clock;
917b627d4   Gregory Haskins   sched: create "pu...
1602
1603
1604
  
  	/* The running task is never eligible for pushing */
  	dequeue_pushable_task(rq, p);
83b699ed2   Srivatsa Vaddagiri   sched: revert rec...
1605
  }
6d686f456   H Hartley Sweeten   sched: Don't expo...
1606
  static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
0d721cead   Peter Williams   sched: Simplify s...
1607
1608
1609
1610
1611
1612
1613
1614
1615
  {
  	/*
  	 * Time slice is 0 for SCHED_FIFO tasks
  	 */
  	if (task->policy == SCHED_RR)
  		return DEF_TIMESLICE;
  	else
  		return 0;
  }
2abdad0a4   Harvey Harrison   sched: make rt_sc...
1616
  static const struct sched_class rt_sched_class = {
5522d5d5f   Ingo Molnar   sched: mark sched...
1617
  	.next			= &fair_sched_class,
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1618
1619
1620
1621
1622
1623
1624
1625
  	.enqueue_task		= enqueue_task_rt,
  	.dequeue_task		= dequeue_task_rt,
  	.yield_task		= yield_task_rt,
  
  	.check_preempt_curr	= check_preempt_curr_rt,
  
  	.pick_next_task		= pick_next_task_rt,
  	.put_prev_task		= put_prev_task_rt,
681f3e685   Peter Williams   sched: isolate SM...
1626
  #ifdef CONFIG_SMP
4ce72a2c0   Li Zefan   sched: add CONFIG...
1627
  	.select_task_rq		= select_task_rq_rt,
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1628
  	.set_cpus_allowed       = set_cpus_allowed_rt,
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
1629
1630
  	.rq_online              = rq_online_rt,
  	.rq_offline             = rq_offline_rt,
9a897c5a6   Steven Rostedt   sched: RT-balance...
1631
1632
  	.pre_schedule		= pre_schedule_rt,
  	.post_schedule		= post_schedule_rt,
efbbd05a5   Peter Zijlstra   sched: Add pre an...
1633
  	.task_woken		= task_woken_rt,
cb4698450   Steven Rostedt   sched: RT-balance...
1634
  	.switched_from		= switched_from_rt,
681f3e685   Peter Williams   sched: isolate SM...
1635
  #endif
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1636

83b699ed2   Srivatsa Vaddagiri   sched: revert rec...
1637
  	.set_curr_task          = set_curr_task_rt,
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1638
  	.task_tick		= task_tick_rt,
cb4698450   Steven Rostedt   sched: RT-balance...
1639

0d721cead   Peter Williams   sched: Simplify s...
1640
  	.get_rr_interval	= get_rr_interval_rt,
cb4698450   Steven Rostedt   sched: RT-balance...
1641
1642
  	.prio_changed		= prio_changed_rt,
  	.switched_to		= switched_to_rt,
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1643
  };
ada18de2e   Peter Zijlstra   sched: debug: add...
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
  
  #ifdef CONFIG_SCHED_DEBUG
  extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
  
  static void print_rt_stats(struct seq_file *m, int cpu)
  {
  	struct rt_rq *rt_rq;
  
  	rcu_read_lock();
  	for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))
  		print_rt_rq(m, cpu, rt_rq);
  	rcu_read_unlock();
  }
55e12e5e7   Dhaval Giani   sched: make sched...
1657
  #endif /* CONFIG_SCHED_DEBUG */
0e3900e6d   Rusty Russell   sched: convert lo...
1658