Blame view

kernel/sched_rt.c 42.4 KB
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1
2
3
4
  /*
   * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
   * policies)
   */
8f48894fc   Peter Zijlstra   sched: Add debug ...
5
6
7
  #ifdef CONFIG_RT_GROUP_SCHED
  
  #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
398a153b1   Gregory Haskins   sched: fix build ...
8
9
  static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
  {
8f48894fc   Peter Zijlstra   sched: Add debug ...
10
11
12
  #ifdef CONFIG_SCHED_DEBUG
  	WARN_ON_ONCE(!rt_entity_is_task(rt_se));
  #endif
398a153b1   Gregory Haskins   sched: fix build ...
13
14
  	return container_of(rt_se, struct task_struct, rt);
  }
398a153b1   Gregory Haskins   sched: fix build ...
15
16
17
18
19
20
21
22
23
24
25
  static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
  {
  	return rt_rq->rq;
  }
  
  static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
  {
  	return rt_se->rt_rq;
  }
  
  #else /* CONFIG_RT_GROUP_SCHED */
a1ba4d8ba   Peter Zijlstra   sched_rt: Fix ove...
26
  #define rt_entity_is_task(rt_se) (1)
8f48894fc   Peter Zijlstra   sched: Add debug ...
27
28
29
30
  static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
  {
  	return container_of(rt_se, struct task_struct, rt);
  }
398a153b1   Gregory Haskins   sched: fix build ...
31
32
33
34
35
36
37
38
39
40
41
42
43
44
  static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
  {
  	return container_of(rt_rq, struct rq, rt);
  }
  
  static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
  {
  	struct task_struct *p = rt_task_of(rt_se);
  	struct rq *rq = task_rq(p);
  
  	return &rq->rt;
  }
  
  #endif /* CONFIG_RT_GROUP_SCHED */
4fd29176b   Steven Rostedt   sched: add rt-ove...
45
  #ifdef CONFIG_SMP
84de42748   Ingo Molnar   sched: clean up k...
46

637f50851   Gregory Haskins   sched: only balan...
47
  static inline int rt_overloaded(struct rq *rq)
4fd29176b   Steven Rostedt   sched: add rt-ove...
48
  {
637f50851   Gregory Haskins   sched: only balan...
49
  	return atomic_read(&rq->rd->rto_count);
4fd29176b   Steven Rostedt   sched: add rt-ove...
50
  }
84de42748   Ingo Molnar   sched: clean up k...
51

4fd29176b   Steven Rostedt   sched: add rt-ove...
52
53
  static inline void rt_set_overload(struct rq *rq)
  {
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
54
55
  	if (!rq->online)
  		return;
c6c4927b2   Rusty Russell   sched: convert st...
56
  	cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
4fd29176b   Steven Rostedt   sched: add rt-ove...
57
58
59
60
61
62
63
64
  	/*
  	 * Make sure the mask is visible before we set
  	 * the overload count. That is checked to determine
  	 * if we should look at the mask. It would be a shame
  	 * if we looked at the mask, but the mask was not
  	 * updated yet.
  	 */
  	wmb();
637f50851   Gregory Haskins   sched: only balan...
65
  	atomic_inc(&rq->rd->rto_count);
4fd29176b   Steven Rostedt   sched: add rt-ove...
66
  }
84de42748   Ingo Molnar   sched: clean up k...
67

4fd29176b   Steven Rostedt   sched: add rt-ove...
68
69
  static inline void rt_clear_overload(struct rq *rq)
  {
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
70
71
  	if (!rq->online)
  		return;
4fd29176b   Steven Rostedt   sched: add rt-ove...
72
  	/* the order here really doesn't matter */
637f50851   Gregory Haskins   sched: only balan...
73
  	atomic_dec(&rq->rd->rto_count);
c6c4927b2   Rusty Russell   sched: convert st...
74
  	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
4fd29176b   Steven Rostedt   sched: add rt-ove...
75
  }
73fe6aae8   Gregory Haskins   sched: add RT-bal...
76

398a153b1   Gregory Haskins   sched: fix build ...
77
  static void update_rt_migration(struct rt_rq *rt_rq)
73fe6aae8   Gregory Haskins   sched: add RT-bal...
78
  {
a1ba4d8ba   Peter Zijlstra   sched_rt: Fix ove...
79
  	if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
398a153b1   Gregory Haskins   sched: fix build ...
80
81
82
  		if (!rt_rq->overloaded) {
  			rt_set_overload(rq_of_rt_rq(rt_rq));
  			rt_rq->overloaded = 1;
cdc8eb984   Gregory Haskins   sched: RT-balance...
83
  		}
398a153b1   Gregory Haskins   sched: fix build ...
84
85
86
  	} else if (rt_rq->overloaded) {
  		rt_clear_overload(rq_of_rt_rq(rt_rq));
  		rt_rq->overloaded = 0;
637f50851   Gregory Haskins   sched: only balan...
87
  	}
73fe6aae8   Gregory Haskins   sched: add RT-bal...
88
  }
4fd29176b   Steven Rostedt   sched: add rt-ove...
89

398a153b1   Gregory Haskins   sched: fix build ...
90
91
  static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
a1ba4d8ba   Peter Zijlstra   sched_rt: Fix ove...
92
93
94
95
96
97
  	if (!rt_entity_is_task(rt_se))
  		return;
  
  	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
  
  	rt_rq->rt_nr_total++;
398a153b1   Gregory Haskins   sched: fix build ...
98
99
100
101
102
103
104
105
  	if (rt_se->nr_cpus_allowed > 1)
  		rt_rq->rt_nr_migratory++;
  
  	update_rt_migration(rt_rq);
  }
  
  static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
a1ba4d8ba   Peter Zijlstra   sched_rt: Fix ove...
106
107
108
109
110
111
  	if (!rt_entity_is_task(rt_se))
  		return;
  
  	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
  
  	rt_rq->rt_nr_total--;
398a153b1   Gregory Haskins   sched: fix build ...
112
113
114
115
116
  	if (rt_se->nr_cpus_allowed > 1)
  		rt_rq->rt_nr_migratory--;
  
  	update_rt_migration(rt_rq);
  }
917b627d4   Gregory Haskins   sched: create "pu...
117
118
119
120
121
122
123
124
125
126
127
  static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
  {
  	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
  	plist_node_init(&p->pushable_tasks, p->prio);
  	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
  }
  
  static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
  {
  	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
  }
bcf08df3b   Ingo Molnar   sched: Fix cpupri...
128
129
130
131
  static inline int has_pushable_tasks(struct rq *rq)
  {
  	return !plist_head_empty(&rq->rt.pushable_tasks);
  }
917b627d4   Gregory Haskins   sched: create "pu...
132
  #else
ceacc2c1c   Peter Zijlstra   sched: make plist...
133
  static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
fa85ae241   Peter Zijlstra   sched: rt time limit
134
  {
6f505b164   Peter Zijlstra   sched: rt group s...
135
  }
ceacc2c1c   Peter Zijlstra   sched: make plist...
136
137
138
  static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
  {
  }
b07430ac3   Gregory Haskins   sched: de CPP-ify...
139
  static inline
ceacc2c1c   Peter Zijlstra   sched: make plist...
140
141
142
  void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  }
398a153b1   Gregory Haskins   sched: fix build ...
143
  static inline
ceacc2c1c   Peter Zijlstra   sched: make plist...
144
145
146
  void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  }
917b627d4   Gregory Haskins   sched: create "pu...
147

4fd29176b   Steven Rostedt   sched: add rt-ove...
148
  #endif /* CONFIG_SMP */
6f505b164   Peter Zijlstra   sched: rt group s...
149
150
151
152
  static inline int on_rt_rq(struct sched_rt_entity *rt_se)
  {
  	return !list_empty(&rt_se->run_list);
  }
052f1dc7e   Peter Zijlstra   sched: rt-group: ...
153
  #ifdef CONFIG_RT_GROUP_SCHED
6f505b164   Peter Zijlstra   sched: rt group s...
154

9f0c1e560   Peter Zijlstra   sched: rt-group: ...
155
  static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
156
157
  {
  	if (!rt_rq->tg)
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
158
  		return RUNTIME_INF;
6f505b164   Peter Zijlstra   sched: rt group s...
159

ac086bc22   Peter Zijlstra   sched: rt-group: ...
160
161
162
163
164
165
  	return rt_rq->rt_runtime;
  }
  
  static inline u64 sched_rt_period(struct rt_rq *rt_rq)
  {
  	return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
6f505b164   Peter Zijlstra   sched: rt group s...
166
  }
ec514c487   Cheng Xu   sched: Fix rt_rq ...
167
  typedef struct task_group *rt_rq_iter_t;
1c09ab0d2   Yong Zhang   sched: Skip autog...
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
  static inline struct task_group *next_task_group(struct task_group *tg)
  {
  	do {
  		tg = list_entry_rcu(tg->list.next,
  			typeof(struct task_group), list);
  	} while (&tg->list != &task_groups && task_group_is_autogroup(tg));
  
  	if (&tg->list == &task_groups)
  		tg = NULL;
  
  	return tg;
  }
  
  #define for_each_rt_rq(rt_rq, iter, rq)					\
  	for (iter = container_of(&task_groups, typeof(*iter), list);	\
  		(iter = next_task_group(iter)) &&			\
  		(rt_rq = iter->rt_rq[cpu_of(rq)]);)
ec514c487   Cheng Xu   sched: Fix rt_rq ...
185

3d4b47b4b   Peter Zijlstra   sched: Implement ...
186
187
188
189
190
191
192
193
194
195
  static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
  {
  	list_add_rcu(&rt_rq->leaf_rt_rq_list,
  			&rq_of_rt_rq(rt_rq)->leaf_rt_rq_list);
  }
  
  static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
  {
  	list_del_rcu(&rt_rq->leaf_rt_rq_list);
  }
6f505b164   Peter Zijlstra   sched: rt group s...
196
  #define for_each_leaf_rt_rq(rt_rq, rq) \
80f40ee4a   Bharata B Rao   sched: use RCU va...
197
  	list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
6f505b164   Peter Zijlstra   sched: rt group s...
198

6f505b164   Peter Zijlstra   sched: rt group s...
199
200
201
202
203
204
205
  #define for_each_sched_rt_entity(rt_se) \
  	for (; rt_se; rt_se = rt_se->parent)
  
  static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
  {
  	return rt_se->my_q;
  }
37dad3fce   Thomas Gleixner   sched: Implement ...
206
  static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
6f505b164   Peter Zijlstra   sched: rt group s...
207
  static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
208
  static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
209
  {
f6121f4f8   Dario Faggioli   sched_rt.c: resch...
210
  	struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
74b7eb588   Yong Zhang   sched: Change usa...
211
  	struct sched_rt_entity *rt_se;
0c3b91680   Balbir Singh   sched: Fix sched ...
212
213
214
  	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
  
  	rt_se = rt_rq->tg->rt_se[cpu];
6f505b164   Peter Zijlstra   sched: rt group s...
215

f6121f4f8   Dario Faggioli   sched_rt.c: resch...
216
217
  	if (rt_rq->rt_nr_running) {
  		if (rt_se && !on_rt_rq(rt_se))
37dad3fce   Thomas Gleixner   sched: Implement ...
218
  			enqueue_rt_entity(rt_se, false);
e864c499d   Gregory Haskins   sched: track the ...
219
  		if (rt_rq->highest_prio.curr < curr->prio)
1020387f5   Peter Zijlstra   sched: rt-group: ...
220
  			resched_task(curr);
6f505b164   Peter Zijlstra   sched: rt group s...
221
222
  	}
  }
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
223
  static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
224
  {
74b7eb588   Yong Zhang   sched: Change usa...
225
  	struct sched_rt_entity *rt_se;
0c3b91680   Balbir Singh   sched: Fix sched ...
226
  	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
74b7eb588   Yong Zhang   sched: Change usa...
227

0c3b91680   Balbir Singh   sched: Fix sched ...
228
  	rt_se = rt_rq->tg->rt_se[cpu];
6f505b164   Peter Zijlstra   sched: rt group s...
229
230
231
232
  
  	if (rt_se && on_rt_rq(rt_se))
  		dequeue_rt_entity(rt_se);
  }
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
  static inline int rt_rq_throttled(struct rt_rq *rt_rq)
  {
  	return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
  }
  
  static int rt_se_boosted(struct sched_rt_entity *rt_se)
  {
  	struct rt_rq *rt_rq = group_rt_rq(rt_se);
  	struct task_struct *p;
  
  	if (rt_rq)
  		return !!rt_rq->rt_nr_boosted;
  
  	p = rt_task_of(rt_se);
  	return p->prio != p->normal_prio;
  }
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
249
  #ifdef CONFIG_SMP
c6c4927b2   Rusty Russell   sched: convert st...
250
  static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
251
252
253
  {
  	return cpu_rq(smp_processor_id())->rd->span;
  }
6f505b164   Peter Zijlstra   sched: rt group s...
254
  #else
c6c4927b2   Rusty Russell   sched: convert st...
255
  static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
256
  {
c6c4927b2   Rusty Russell   sched: convert st...
257
  	return cpu_online_mask;
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
258
259
  }
  #endif
6f505b164   Peter Zijlstra   sched: rt group s...
260

d0b27fa77   Peter Zijlstra   sched: rt-group: ...
261
262
  static inline
  struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
6f505b164   Peter Zijlstra   sched: rt group s...
263
  {
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
264
265
  	return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
  }
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
266

ac086bc22   Peter Zijlstra   sched: rt-group: ...
267
268
269
270
  static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
  {
  	return &rt_rq->tg->rt_bandwidth;
  }
55e12e5e7   Dhaval Giani   sched: make sched...
271
  #else /* !CONFIG_RT_GROUP_SCHED */
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
272
273
274
  
  static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
  {
ac086bc22   Peter Zijlstra   sched: rt-group: ...
275
276
277
278
279
280
  	return rt_rq->rt_runtime;
  }
  
  static inline u64 sched_rt_period(struct rt_rq *rt_rq)
  {
  	return ktime_to_ns(def_rt_bandwidth.rt_period);
6f505b164   Peter Zijlstra   sched: rt group s...
281
  }
ec514c487   Cheng Xu   sched: Fix rt_rq ...
282
283
284
285
  typedef struct rt_rq *rt_rq_iter_t;
  
  #define for_each_rt_rq(rt_rq, iter, rq) \
  	for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
3d4b47b4b   Peter Zijlstra   sched: Implement ...
286
287
288
289
290
291
292
  static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
  {
  }
  
  static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
  {
  }
6f505b164   Peter Zijlstra   sched: rt group s...
293
294
  #define for_each_leaf_rt_rq(rt_rq, rq) \
  	for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
6f505b164   Peter Zijlstra   sched: rt group s...
295
296
297
298
299
300
301
  #define for_each_sched_rt_entity(rt_se) \
  	for (; rt_se; rt_se = NULL)
  
  static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
  {
  	return NULL;
  }
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
302
  static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
303
  {
f3ade8378   John Blackwood   sched: fix sched_...
304
305
  	if (rt_rq->rt_nr_running)
  		resched_task(rq_of_rt_rq(rt_rq)->curr);
6f505b164   Peter Zijlstra   sched: rt group s...
306
  }
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
307
  static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
308
309
  {
  }
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
310
311
312
313
  static inline int rt_rq_throttled(struct rt_rq *rt_rq)
  {
  	return rt_rq->rt_throttled;
  }
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
314

c6c4927b2   Rusty Russell   sched: convert st...
315
  static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
316
  {
c6c4927b2   Rusty Russell   sched: convert st...
317
  	return cpu_online_mask;
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
318
319
320
321
322
323
324
  }
  
  static inline
  struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
  {
  	return &cpu_rq(cpu)->rt;
  }
ac086bc22   Peter Zijlstra   sched: rt-group: ...
325
326
327
328
  static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
  {
  	return &def_rt_bandwidth;
  }
55e12e5e7   Dhaval Giani   sched: make sched...
329
  #endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
330

ac086bc22   Peter Zijlstra   sched: rt-group: ...
331
  #ifdef CONFIG_SMP
78333cdd0   Peter Zijlstra   sched: add some c...
332
333
334
  /*
   * We ran out of runtime, see if we can borrow some from our neighbours.
   */
b79f3833d   Peter Zijlstra   sched: rt: fix SM...
335
  static int do_balance_runtime(struct rt_rq *rt_rq)
ac086bc22   Peter Zijlstra   sched: rt-group: ...
336
337
338
339
340
  {
  	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
  	struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
  	int i, weight, more = 0;
  	u64 rt_period;
c6c4927b2   Rusty Russell   sched: convert st...
341
  	weight = cpumask_weight(rd->span);
ac086bc22   Peter Zijlstra   sched: rt-group: ...
342

0986b11b1   Thomas Gleixner   sched: Convert rt...
343
  	raw_spin_lock(&rt_b->rt_runtime_lock);
ac086bc22   Peter Zijlstra   sched: rt-group: ...
344
  	rt_period = ktime_to_ns(rt_b->rt_period);
c6c4927b2   Rusty Russell   sched: convert st...
345
  	for_each_cpu(i, rd->span) {
ac086bc22   Peter Zijlstra   sched: rt-group: ...
346
347
348
349
350
  		struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
  		s64 diff;
  
  		if (iter == rt_rq)
  			continue;
0986b11b1   Thomas Gleixner   sched: Convert rt...
351
  		raw_spin_lock(&iter->rt_runtime_lock);
78333cdd0   Peter Zijlstra   sched: add some c...
352
353
354
355
356
  		/*
  		 * Either all rqs have inf runtime and there's nothing to steal
  		 * or __disable_runtime() below sets a specific rq to inf to
  		 * indicate its been disabled and disalow stealing.
  		 */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
357
358
  		if (iter->rt_runtime == RUNTIME_INF)
  			goto next;
78333cdd0   Peter Zijlstra   sched: add some c...
359
360
361
362
  		/*
  		 * From runqueues with spare time, take 1/n part of their
  		 * spare time, but no more than our period.
  		 */
ac086bc22   Peter Zijlstra   sched: rt-group: ...
363
364
  		diff = iter->rt_runtime - iter->rt_time;
  		if (diff > 0) {
58838cf3c   Peter Zijlstra   sched: clean up c...
365
  			diff = div_u64((u64)diff, weight);
ac086bc22   Peter Zijlstra   sched: rt-group: ...
366
367
368
369
370
371
  			if (rt_rq->rt_runtime + diff > rt_period)
  				diff = rt_period - rt_rq->rt_runtime;
  			iter->rt_runtime -= diff;
  			rt_rq->rt_runtime += diff;
  			more = 1;
  			if (rt_rq->rt_runtime == rt_period) {
0986b11b1   Thomas Gleixner   sched: Convert rt...
372
  				raw_spin_unlock(&iter->rt_runtime_lock);
ac086bc22   Peter Zijlstra   sched: rt-group: ...
373
374
375
  				break;
  			}
  		}
7def2be1d   Peter Zijlstra   sched: fix hotplu...
376
  next:
0986b11b1   Thomas Gleixner   sched: Convert rt...
377
  		raw_spin_unlock(&iter->rt_runtime_lock);
ac086bc22   Peter Zijlstra   sched: rt-group: ...
378
  	}
0986b11b1   Thomas Gleixner   sched: Convert rt...
379
  	raw_spin_unlock(&rt_b->rt_runtime_lock);
ac086bc22   Peter Zijlstra   sched: rt-group: ...
380
381
382
  
  	return more;
  }
7def2be1d   Peter Zijlstra   sched: fix hotplu...
383

78333cdd0   Peter Zijlstra   sched: add some c...
384
385
386
  /*
   * Ensure this RQ takes back all the runtime it lend to its neighbours.
   */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
387
388
389
  static void __disable_runtime(struct rq *rq)
  {
  	struct root_domain *rd = rq->rd;
ec514c487   Cheng Xu   sched: Fix rt_rq ...
390
  	rt_rq_iter_t iter;
7def2be1d   Peter Zijlstra   sched: fix hotplu...
391
392
393
394
  	struct rt_rq *rt_rq;
  
  	if (unlikely(!scheduler_running))
  		return;
ec514c487   Cheng Xu   sched: Fix rt_rq ...
395
  	for_each_rt_rq(rt_rq, iter, rq) {
7def2be1d   Peter Zijlstra   sched: fix hotplu...
396
397
398
  		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
  		s64 want;
  		int i;
0986b11b1   Thomas Gleixner   sched: Convert rt...
399
400
  		raw_spin_lock(&rt_b->rt_runtime_lock);
  		raw_spin_lock(&rt_rq->rt_runtime_lock);
78333cdd0   Peter Zijlstra   sched: add some c...
401
402
403
404
405
  		/*
  		 * Either we're all inf and nobody needs to borrow, or we're
  		 * already disabled and thus have nothing to do, or we have
  		 * exactly the right amount of runtime to take out.
  		 */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
406
407
408
  		if (rt_rq->rt_runtime == RUNTIME_INF ||
  				rt_rq->rt_runtime == rt_b->rt_runtime)
  			goto balanced;
0986b11b1   Thomas Gleixner   sched: Convert rt...
409
  		raw_spin_unlock(&rt_rq->rt_runtime_lock);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
410

78333cdd0   Peter Zijlstra   sched: add some c...
411
412
413
414
415
  		/*
  		 * Calculate the difference between what we started out with
  		 * and what we current have, that's the amount of runtime
  		 * we lend and now have to reclaim.
  		 */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
416
  		want = rt_b->rt_runtime - rt_rq->rt_runtime;
78333cdd0   Peter Zijlstra   sched: add some c...
417
418
419
  		/*
  		 * Greedy reclaim, take back as much as we can.
  		 */
c6c4927b2   Rusty Russell   sched: convert st...
420
  		for_each_cpu(i, rd->span) {
7def2be1d   Peter Zijlstra   sched: fix hotplu...
421
422
  			struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
  			s64 diff;
78333cdd0   Peter Zijlstra   sched: add some c...
423
424
425
  			/*
  			 * Can't reclaim from ourselves or disabled runqueues.
  			 */
f1679d084   Peter Zijlstra   sched: fix rt-ban...
426
  			if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
7def2be1d   Peter Zijlstra   sched: fix hotplu...
427
  				continue;
0986b11b1   Thomas Gleixner   sched: Convert rt...
428
  			raw_spin_lock(&iter->rt_runtime_lock);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
429
430
431
432
433
434
435
436
  			if (want > 0) {
  				diff = min_t(s64, iter->rt_runtime, want);
  				iter->rt_runtime -= diff;
  				want -= diff;
  			} else {
  				iter->rt_runtime -= want;
  				want -= want;
  			}
0986b11b1   Thomas Gleixner   sched: Convert rt...
437
  			raw_spin_unlock(&iter->rt_runtime_lock);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
438
439
440
441
  
  			if (!want)
  				break;
  		}
0986b11b1   Thomas Gleixner   sched: Convert rt...
442
  		raw_spin_lock(&rt_rq->rt_runtime_lock);
78333cdd0   Peter Zijlstra   sched: add some c...
443
444
445
446
  		/*
  		 * We cannot be left wanting - that would mean some runtime
  		 * leaked out of the system.
  		 */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
447
448
  		BUG_ON(want);
  balanced:
78333cdd0   Peter Zijlstra   sched: add some c...
449
450
451
452
  		/*
  		 * Disable all the borrow logic by pretending we have inf
  		 * runtime - in which case borrowing doesn't make sense.
  		 */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
453
  		rt_rq->rt_runtime = RUNTIME_INF;
0986b11b1   Thomas Gleixner   sched: Convert rt...
454
455
  		raw_spin_unlock(&rt_rq->rt_runtime_lock);
  		raw_spin_unlock(&rt_b->rt_runtime_lock);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
456
457
458
459
460
461
  	}
  }
  
  static void disable_runtime(struct rq *rq)
  {
  	unsigned long flags;
05fa785cf   Thomas Gleixner   sched: Convert rq...
462
  	raw_spin_lock_irqsave(&rq->lock, flags);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
463
  	__disable_runtime(rq);
05fa785cf   Thomas Gleixner   sched: Convert rq...
464
  	raw_spin_unlock_irqrestore(&rq->lock, flags);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
465
466
467
468
  }
  
  static void __enable_runtime(struct rq *rq)
  {
ec514c487   Cheng Xu   sched: Fix rt_rq ...
469
  	rt_rq_iter_t iter;
7def2be1d   Peter Zijlstra   sched: fix hotplu...
470
471
472
473
  	struct rt_rq *rt_rq;
  
  	if (unlikely(!scheduler_running))
  		return;
78333cdd0   Peter Zijlstra   sched: add some c...
474
475
476
  	/*
  	 * Reset each runqueue's bandwidth settings
  	 */
ec514c487   Cheng Xu   sched: Fix rt_rq ...
477
  	for_each_rt_rq(rt_rq, iter, rq) {
7def2be1d   Peter Zijlstra   sched: fix hotplu...
478
  		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
0986b11b1   Thomas Gleixner   sched: Convert rt...
479
480
  		raw_spin_lock(&rt_b->rt_runtime_lock);
  		raw_spin_lock(&rt_rq->rt_runtime_lock);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
481
482
  		rt_rq->rt_runtime = rt_b->rt_runtime;
  		rt_rq->rt_time = 0;
baf25731e   Zhang, Yanmin   sched: fix 2.6.27...
483
  		rt_rq->rt_throttled = 0;
0986b11b1   Thomas Gleixner   sched: Convert rt...
484
485
  		raw_spin_unlock(&rt_rq->rt_runtime_lock);
  		raw_spin_unlock(&rt_b->rt_runtime_lock);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
486
487
488
489
490
491
  	}
  }
  
  static void enable_runtime(struct rq *rq)
  {
  	unsigned long flags;
05fa785cf   Thomas Gleixner   sched: Convert rq...
492
  	raw_spin_lock_irqsave(&rq->lock, flags);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
493
  	__enable_runtime(rq);
05fa785cf   Thomas Gleixner   sched: Convert rq...
494
  	raw_spin_unlock_irqrestore(&rq->lock, flags);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
495
  }
eff6549b9   Peter Zijlstra   sched: rt: move s...
496
497
498
499
500
  static int balance_runtime(struct rt_rq *rt_rq)
  {
  	int more = 0;
  
  	if (rt_rq->rt_time > rt_rq->rt_runtime) {
0986b11b1   Thomas Gleixner   sched: Convert rt...
501
  		raw_spin_unlock(&rt_rq->rt_runtime_lock);
eff6549b9   Peter Zijlstra   sched: rt: move s...
502
  		more = do_balance_runtime(rt_rq);
0986b11b1   Thomas Gleixner   sched: Convert rt...
503
  		raw_spin_lock(&rt_rq->rt_runtime_lock);
eff6549b9   Peter Zijlstra   sched: rt: move s...
504
505
506
507
  	}
  
  	return more;
  }
55e12e5e7   Dhaval Giani   sched: make sched...
508
  #else /* !CONFIG_SMP */
eff6549b9   Peter Zijlstra   sched: rt: move s...
509
510
511
512
  static inline int balance_runtime(struct rt_rq *rt_rq)
  {
  	return 0;
  }
55e12e5e7   Dhaval Giani   sched: make sched...
513
  #endif /* CONFIG_SMP */
ac086bc22   Peter Zijlstra   sched: rt-group: ...
514

eff6549b9   Peter Zijlstra   sched: rt: move s...
515
516
517
  static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
  {
  	int i, idle = 1;
c6c4927b2   Rusty Russell   sched: convert st...
518
  	const struct cpumask *span;
eff6549b9   Peter Zijlstra   sched: rt: move s...
519

0b148fa04   Peter Zijlstra   sched: rt-bandwid...
520
  	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
eff6549b9   Peter Zijlstra   sched: rt: move s...
521
522
523
  		return 1;
  
  	span = sched_rt_period_mask();
c6c4927b2   Rusty Russell   sched: convert st...
524
  	for_each_cpu(i, span) {
eff6549b9   Peter Zijlstra   sched: rt: move s...
525
526
527
  		int enqueue = 0;
  		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
  		struct rq *rq = rq_of_rt_rq(rt_rq);
05fa785cf   Thomas Gleixner   sched: Convert rq...
528
  		raw_spin_lock(&rq->lock);
eff6549b9   Peter Zijlstra   sched: rt: move s...
529
530
  		if (rt_rq->rt_time) {
  			u64 runtime;
0986b11b1   Thomas Gleixner   sched: Convert rt...
531
  			raw_spin_lock(&rt_rq->rt_runtime_lock);
eff6549b9   Peter Zijlstra   sched: rt: move s...
532
533
534
535
536
537
538
  			if (rt_rq->rt_throttled)
  				balance_runtime(rt_rq);
  			runtime = rt_rq->rt_runtime;
  			rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
  			if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
  				rt_rq->rt_throttled = 0;
  				enqueue = 1;
61eadef6a   Mike Galbraith   sched, rt: Update...
539
540
541
542
543
544
545
  
  				/*
  				 * Force a clock update if the CPU was idle,
  				 * lest wakeup -> unthrottle time accumulate.
  				 */
  				if (rt_rq->rt_nr_running && rq->curr == rq->idle)
  					rq->skip_clock_update = -1;
eff6549b9   Peter Zijlstra   sched: rt: move s...
546
547
548
  			}
  			if (rt_rq->rt_time || rt_rq->rt_nr_running)
  				idle = 0;
0986b11b1   Thomas Gleixner   sched: Convert rt...
549
  			raw_spin_unlock(&rt_rq->rt_runtime_lock);
0c3b91680   Balbir Singh   sched: Fix sched ...
550
  		} else if (rt_rq->rt_nr_running) {
6c3df2551   Peter Zijlstra   sched: rt: dont s...
551
  			idle = 0;
0c3b91680   Balbir Singh   sched: Fix sched ...
552
553
554
  			if (!rt_rq_throttled(rt_rq))
  				enqueue = 1;
  		}
eff6549b9   Peter Zijlstra   sched: rt: move s...
555
556
557
  
  		if (enqueue)
  			sched_rt_rq_enqueue(rt_rq);
05fa785cf   Thomas Gleixner   sched: Convert rq...
558
  		raw_spin_unlock(&rq->lock);
eff6549b9   Peter Zijlstra   sched: rt: move s...
559
560
561
562
  	}
  
  	return idle;
  }
ac086bc22   Peter Zijlstra   sched: rt-group: ...
563

6f505b164   Peter Zijlstra   sched: rt group s...
564
565
  static inline int rt_se_prio(struct sched_rt_entity *rt_se)
  {
052f1dc7e   Peter Zijlstra   sched: rt-group: ...
566
  #ifdef CONFIG_RT_GROUP_SCHED
6f505b164   Peter Zijlstra   sched: rt group s...
567
568
569
  	struct rt_rq *rt_rq = group_rt_rq(rt_se);
  
  	if (rt_rq)
e864c499d   Gregory Haskins   sched: track the ...
570
  		return rt_rq->highest_prio.curr;
6f505b164   Peter Zijlstra   sched: rt group s...
571
572
573
574
  #endif
  
  	return rt_task_of(rt_se)->prio;
  }
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
575
  static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
576
  {
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
577
  	u64 runtime = sched_rt_runtime(rt_rq);
fa85ae241   Peter Zijlstra   sched: rt time limit
578

fa85ae241   Peter Zijlstra   sched: rt time limit
579
  	if (rt_rq->rt_throttled)
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
580
  		return rt_rq_throttled(rt_rq);
fa85ae241   Peter Zijlstra   sched: rt time limit
581

ac086bc22   Peter Zijlstra   sched: rt-group: ...
582
583
  	if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
  		return 0;
b79f3833d   Peter Zijlstra   sched: rt: fix SM...
584
585
586
587
  	balance_runtime(rt_rq);
  	runtime = sched_rt_runtime(rt_rq);
  	if (runtime == RUNTIME_INF)
  		return 0;
ac086bc22   Peter Zijlstra   sched: rt-group: ...
588

9f0c1e560   Peter Zijlstra   sched: rt-group: ...
589
  	if (rt_rq->rt_time > runtime) {
6f505b164   Peter Zijlstra   sched: rt group s...
590
  		rt_rq->rt_throttled = 1;
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
591
  		if (rt_rq_throttled(rt_rq)) {
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
592
  			sched_rt_rq_dequeue(rt_rq);
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
593
594
  			return 1;
  		}
fa85ae241   Peter Zijlstra   sched: rt time limit
595
596
597
598
  	}
  
  	return 0;
  }
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
599
600
601
602
  /*
   * Update the current task's runtime statistics. Skip current tasks that
   * are not in our scheduling class.
   */
a9957449b   Alexey Dobriyan   sched: uninline s...
603
  static void update_curr_rt(struct rq *rq)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
604
605
  {
  	struct task_struct *curr = rq->curr;
6f505b164   Peter Zijlstra   sched: rt group s...
606
607
  	struct sched_rt_entity *rt_se = &curr->rt;
  	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
608
  	u64 delta_exec;
06c3bc655   Peter Zijlstra   sched: Fix update...
609
  	if (curr->sched_class != &rt_sched_class)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
610
  		return;
305e6835e   Venkatesh Pallipadi   sched: Do not acc...
611
  	delta_exec = rq->clock_task - curr->se.exec_start;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
612
613
  	if (unlikely((s64)delta_exec < 0))
  		delta_exec = 0;
6cfb0d5d0   Ingo Molnar   [PATCH] sched: re...
614

41acab885   Lucas De Marchi   sched: Implement ...
615
  	schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec));
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
616
617
  
  	curr->se.sum_exec_runtime += delta_exec;
f06febc96   Frank Mayhar   timers: fix itime...
618
  	account_group_exec_runtime(curr, delta_exec);
305e6835e   Venkatesh Pallipadi   sched: Do not acc...
619
  	curr->se.exec_start = rq->clock_task;
d842de871   Srivatsa Vaddagiri   sched: cpu accoun...
620
  	cpuacct_charge(curr, delta_exec);
fa85ae241   Peter Zijlstra   sched: rt time limit
621

e9e9250bc   Peter Zijlstra   sched: Scale down...
622
  	sched_rt_avg_update(rq, delta_exec);
0b148fa04   Peter Zijlstra   sched: rt-bandwid...
623
624
  	if (!rt_bandwidth_enabled())
  		return;
354d60c2f   Dhaval Giani   sched: mix tasks ...
625
626
  	for_each_sched_rt_entity(rt_se) {
  		rt_rq = rt_rq_of_se(rt_se);
cc2991cf1   Peter Zijlstra   sched: rt-bandwid...
627
  		if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
0986b11b1   Thomas Gleixner   sched: Convert rt...
628
  			raw_spin_lock(&rt_rq->rt_runtime_lock);
cc2991cf1   Peter Zijlstra   sched: rt-bandwid...
629
630
631
  			rt_rq->rt_time += delta_exec;
  			if (sched_rt_runtime_exceeded(rt_rq))
  				resched_task(curr);
0986b11b1   Thomas Gleixner   sched: Convert rt...
632
  			raw_spin_unlock(&rt_rq->rt_runtime_lock);
cc2991cf1   Peter Zijlstra   sched: rt-bandwid...
633
  		}
354d60c2f   Dhaval Giani   sched: mix tasks ...
634
  	}
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
635
  }
398a153b1   Gregory Haskins   sched: fix build ...
636
  #if defined CONFIG_SMP
e864c499d   Gregory Haskins   sched: track the ...
637
638
639
640
  
  static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
  
  static inline int next_prio(struct rq *rq)
63489e45e   Steven Rostedt   sched: count # of...
641
  {
e864c499d   Gregory Haskins   sched: track the ...
642
643
644
645
646
647
648
  	struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
  
  	if (next && rt_prio(next->prio))
  		return next->prio;
  	else
  		return MAX_RT_PRIO;
  }
e864c499d   Gregory Haskins   sched: track the ...
649

398a153b1   Gregory Haskins   sched: fix build ...
650
651
  static void
  inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
63489e45e   Steven Rostedt   sched: count # of...
652
  {
4d9842776   Gregory Haskins   sched: cleanup in...
653
  	struct rq *rq = rq_of_rt_rq(rt_rq);
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
654

398a153b1   Gregory Haskins   sched: fix build ...
655
  	if (prio < prev_prio) {
4d9842776   Gregory Haskins   sched: cleanup in...
656

e864c499d   Gregory Haskins   sched: track the ...
657
658
  		/*
  		 * If the new task is higher in priority than anything on the
398a153b1   Gregory Haskins   sched: fix build ...
659
660
  		 * run-queue, we know that the previous high becomes our
  		 * next-highest.
e864c499d   Gregory Haskins   sched: track the ...
661
  		 */
398a153b1   Gregory Haskins   sched: fix build ...
662
  		rt_rq->highest_prio.next = prev_prio;
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
663
664
  
  		if (rq->online)
4d9842776   Gregory Haskins   sched: cleanup in...
665
  			cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1100ac91b   Ingo Molnar   sched: fix cpupri...
666

e864c499d   Gregory Haskins   sched: track the ...
667
668
669
670
671
672
673
674
675
676
677
678
  	} else if (prio == rt_rq->highest_prio.curr)
  		/*
  		 * If the next task is equal in priority to the highest on
  		 * the run-queue, then we implicitly know that the next highest
  		 * task cannot be any lower than current
  		 */
  		rt_rq->highest_prio.next = prio;
  	else if (prio < rt_rq->highest_prio.next)
  		/*
  		 * Otherwise, we need to recompute next-highest
  		 */
  		rt_rq->highest_prio.next = next_prio(rq);
398a153b1   Gregory Haskins   sched: fix build ...
679
  }
73fe6aae8   Gregory Haskins   sched: add RT-bal...
680

398a153b1   Gregory Haskins   sched: fix build ...
681
682
683
684
  static void
  dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
  {
  	struct rq *rq = rq_of_rt_rq(rt_rq);
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
685

398a153b1   Gregory Haskins   sched: fix build ...
686
687
688
689
690
  	if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next))
  		rt_rq->highest_prio.next = next_prio(rq);
  
  	if (rq->online && rt_rq->highest_prio.curr != prev_prio)
  		cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
63489e45e   Steven Rostedt   sched: count # of...
691
  }
398a153b1   Gregory Haskins   sched: fix build ...
692
  #else /* CONFIG_SMP */
6f505b164   Peter Zijlstra   sched: rt group s...
693
  static inline
398a153b1   Gregory Haskins   sched: fix build ...
694
695
696
697
698
  void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
  static inline
  void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
  
  #endif /* CONFIG_SMP */
6e0534f27   Gregory Haskins   sched: use a 2-d ...
699

052f1dc7e   Peter Zijlstra   sched: rt-group: ...
700
  #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
398a153b1   Gregory Haskins   sched: fix build ...
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
  static void
  inc_rt_prio(struct rt_rq *rt_rq, int prio)
  {
  	int prev_prio = rt_rq->highest_prio.curr;
  
  	if (prio < prev_prio)
  		rt_rq->highest_prio.curr = prio;
  
  	inc_rt_prio_smp(rt_rq, prio, prev_prio);
  }
  
  static void
  dec_rt_prio(struct rt_rq *rt_rq, int prio)
  {
  	int prev_prio = rt_rq->highest_prio.curr;
6f505b164   Peter Zijlstra   sched: rt group s...
716
  	if (rt_rq->rt_nr_running) {
764a9d6fe   Steven Rostedt   sched: track high...
717

398a153b1   Gregory Haskins   sched: fix build ...
718
  		WARN_ON(prio < prev_prio);
764a9d6fe   Steven Rostedt   sched: track high...
719

e864c499d   Gregory Haskins   sched: track the ...
720
  		/*
398a153b1   Gregory Haskins   sched: fix build ...
721
722
  		 * This may have been our highest task, and therefore
  		 * we may have some recomputation to do
e864c499d   Gregory Haskins   sched: track the ...
723
  		 */
398a153b1   Gregory Haskins   sched: fix build ...
724
  		if (prio == prev_prio) {
e864c499d   Gregory Haskins   sched: track the ...
725
726
727
  			struct rt_prio_array *array = &rt_rq->active;
  
  			rt_rq->highest_prio.curr =
764a9d6fe   Steven Rostedt   sched: track high...
728
  				sched_find_first_bit(array->bitmap);
e864c499d   Gregory Haskins   sched: track the ...
729
  		}
764a9d6fe   Steven Rostedt   sched: track high...
730
  	} else
e864c499d   Gregory Haskins   sched: track the ...
731
  		rt_rq->highest_prio.curr = MAX_RT_PRIO;
73fe6aae8   Gregory Haskins   sched: add RT-bal...
732

398a153b1   Gregory Haskins   sched: fix build ...
733
734
  	dec_rt_prio_smp(rt_rq, prio, prev_prio);
  }
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
735

398a153b1   Gregory Haskins   sched: fix build ...
736
737
738
739
740
741
  #else
  
  static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
  static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
  
  #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
6e0534f27   Gregory Haskins   sched: use a 2-d ...
742

052f1dc7e   Peter Zijlstra   sched: rt-group: ...
743
  #ifdef CONFIG_RT_GROUP_SCHED
398a153b1   Gregory Haskins   sched: fix build ...
744
745
746
747
748
749
750
751
752
753
754
755
756
757
  
  static void
  inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  	if (rt_se_boosted(rt_se))
  		rt_rq->rt_nr_boosted++;
  
  	if (rt_rq->tg)
  		start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
  }
  
  static void
  dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
758
759
760
761
  	if (rt_se_boosted(rt_se))
  		rt_rq->rt_nr_boosted--;
  
  	WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
398a153b1   Gregory Haskins   sched: fix build ...
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
  }
  
  #else /* CONFIG_RT_GROUP_SCHED */
  
  static void
  inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  	start_rt_bandwidth(&def_rt_bandwidth);
  }
  
  static inline
  void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
  
  #endif /* CONFIG_RT_GROUP_SCHED */
  
  static inline
  void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  	int prio = rt_se_prio(rt_se);
  
  	WARN_ON(!rt_prio(prio));
  	rt_rq->rt_nr_running++;
  
  	inc_rt_prio(rt_rq, prio);
  	inc_rt_migration(rt_se, rt_rq);
  	inc_rt_group(rt_se, rt_rq);
  }
  
  static inline
  void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  	WARN_ON(!rt_prio(rt_se_prio(rt_se)));
  	WARN_ON(!rt_rq->rt_nr_running);
  	rt_rq->rt_nr_running--;
  
  	dec_rt_prio(rt_rq, rt_se_prio(rt_se));
  	dec_rt_migration(rt_se, rt_rq);
  	dec_rt_group(rt_se, rt_rq);
63489e45e   Steven Rostedt   sched: count # of...
800
  }
37dad3fce   Thomas Gleixner   sched: Implement ...
801
  static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
802
  {
6f505b164   Peter Zijlstra   sched: rt group s...
803
804
805
  	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
  	struct rt_prio_array *array = &rt_rq->active;
  	struct rt_rq *group_rq = group_rt_rq(rt_se);
20b6331bf   Dmitry Adamushko   sched: rework of ...
806
  	struct list_head *queue = array->queue + rt_se_prio(rt_se);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
807

ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
808
809
810
811
812
813
814
  	/*
  	 * Don't enqueue the group if its throttled, or when empty.
  	 * The latter is a consequence of the former when a child group
  	 * get throttled and the current group doesn't have any other
  	 * active members.
  	 */
  	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
6f505b164   Peter Zijlstra   sched: rt group s...
815
  		return;
63489e45e   Steven Rostedt   sched: count # of...
816

3d4b47b4b   Peter Zijlstra   sched: Implement ...
817
818
  	if (!rt_rq->rt_nr_running)
  		list_add_leaf_rt_rq(rt_rq);
37dad3fce   Thomas Gleixner   sched: Implement ...
819
820
821
822
  	if (head)
  		list_add(&rt_se->run_list, queue);
  	else
  		list_add_tail(&rt_se->run_list, queue);
6f505b164   Peter Zijlstra   sched: rt group s...
823
  	__set_bit(rt_se_prio(rt_se), array->bitmap);
78f2c7db6   Peter Zijlstra   sched: SCHED_FIFO...
824

6f505b164   Peter Zijlstra   sched: rt group s...
825
826
  	inc_rt_tasks(rt_se, rt_rq);
  }
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
827
  static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
6f505b164   Peter Zijlstra   sched: rt group s...
828
829
830
831
832
833
834
835
836
  {
  	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
  	struct rt_prio_array *array = &rt_rq->active;
  
  	list_del_init(&rt_se->run_list);
  	if (list_empty(array->queue + rt_se_prio(rt_se)))
  		__clear_bit(rt_se_prio(rt_se), array->bitmap);
  
  	dec_rt_tasks(rt_se, rt_rq);
3d4b47b4b   Peter Zijlstra   sched: Implement ...
837
838
  	if (!rt_rq->rt_nr_running)
  		list_del_leaf_rt_rq(rt_rq);
6f505b164   Peter Zijlstra   sched: rt group s...
839
840
841
842
843
  }
  
  /*
   * Because the prio of an upper entry depends on the lower
   * entries, we must remove entries top - down.
6f505b164   Peter Zijlstra   sched: rt group s...
844
   */
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
845
  static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
6f505b164   Peter Zijlstra   sched: rt group s...
846
  {
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
847
  	struct sched_rt_entity *back = NULL;
6f505b164   Peter Zijlstra   sched: rt group s...
848

58d6c2d72   Peter Zijlstra   sched: rt-group: ...
849
850
851
852
853
854
855
  	for_each_sched_rt_entity(rt_se) {
  		rt_se->back = back;
  		back = rt_se;
  	}
  
  	for (rt_se = back; rt_se; rt_se = rt_se->back) {
  		if (on_rt_rq(rt_se))
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
856
857
858
  			__dequeue_rt_entity(rt_se);
  	}
  }
37dad3fce   Thomas Gleixner   sched: Implement ...
859
  static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
860
861
862
  {
  	dequeue_rt_stack(rt_se);
  	for_each_sched_rt_entity(rt_se)
37dad3fce   Thomas Gleixner   sched: Implement ...
863
  		__enqueue_rt_entity(rt_se, head);
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
864
865
866
867
868
869
870
871
872
873
  }
  
  static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
  {
  	dequeue_rt_stack(rt_se);
  
  	for_each_sched_rt_entity(rt_se) {
  		struct rt_rq *rt_rq = group_rt_rq(rt_se);
  
  		if (rt_rq && rt_rq->rt_nr_running)
37dad3fce   Thomas Gleixner   sched: Implement ...
874
  			__enqueue_rt_entity(rt_se, false);
58d6c2d72   Peter Zijlstra   sched: rt-group: ...
875
  	}
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
876
877
878
879
880
  }
  
  /*
   * Adding/removing a task to/from a priority array:
   */
ea87bb785   Thomas Gleixner   sched: Extend enq...
881
  static void
371fd7e7a   Peter Zijlstra   sched: Add enqueu...
882
  enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
6f505b164   Peter Zijlstra   sched: rt group s...
883
884
  {
  	struct sched_rt_entity *rt_se = &p->rt;
371fd7e7a   Peter Zijlstra   sched: Add enqueu...
885
  	if (flags & ENQUEUE_WAKEUP)
6f505b164   Peter Zijlstra   sched: rt group s...
886
  		rt_se->timeout = 0;
371fd7e7a   Peter Zijlstra   sched: Add enqueu...
887
  	enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
c09595f63   Peter Zijlstra   sched: revert rev...
888

917b627d4   Gregory Haskins   sched: create "pu...
889
890
  	if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
  		enqueue_pushable_task(rq, p);
6f505b164   Peter Zijlstra   sched: rt group s...
891
  }
371fd7e7a   Peter Zijlstra   sched: Add enqueu...
892
  static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
893
  {
6f505b164   Peter Zijlstra   sched: rt group s...
894
  	struct sched_rt_entity *rt_se = &p->rt;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
895

f1e14ef64   Ingo Molnar   sched: remove the...
896
  	update_curr_rt(rq);
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
897
  	dequeue_rt_entity(rt_se);
c09595f63   Peter Zijlstra   sched: revert rev...
898

917b627d4   Gregory Haskins   sched: create "pu...
899
  	dequeue_pushable_task(rq, p);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
900
901
902
903
904
905
  }
  
  /*
   * Put task to the end of the run list without the overhead of dequeue
   * followed by enqueue.
   */
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
906
907
  static void
  requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
6f505b164   Peter Zijlstra   sched: rt group s...
908
  {
1cdad7153   Ingo Molnar   Merge branch 'sch...
909
  	if (on_rt_rq(rt_se)) {
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
910
911
912
913
914
915
916
  		struct rt_prio_array *array = &rt_rq->active;
  		struct list_head *queue = array->queue + rt_se_prio(rt_se);
  
  		if (head)
  			list_move(&rt_se->run_list, queue);
  		else
  			list_move_tail(&rt_se->run_list, queue);
1cdad7153   Ingo Molnar   Merge branch 'sch...
917
  	}
6f505b164   Peter Zijlstra   sched: rt group s...
918
  }
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
919
  static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
920
  {
6f505b164   Peter Zijlstra   sched: rt group s...
921
922
  	struct sched_rt_entity *rt_se = &p->rt;
  	struct rt_rq *rt_rq;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
923

6f505b164   Peter Zijlstra   sched: rt group s...
924
925
  	for_each_sched_rt_entity(rt_se) {
  		rt_rq = rt_rq_of_se(rt_se);
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
926
  		requeue_rt_entity(rt_rq, rt_se, head);
6f505b164   Peter Zijlstra   sched: rt group s...
927
  	}
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
928
  }
6f505b164   Peter Zijlstra   sched: rt group s...
929
  static void yield_task_rt(struct rq *rq)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
930
  {
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
931
  	requeue_task_rt(rq, rq->curr, 0);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
932
  }
e7693a362   Gregory Haskins   sched: de-SCHED_O...
933
  #ifdef CONFIG_SMP
318e0893c   Gregory Haskins   sched: pre-route ...
934
  static int find_lowest_rq(struct task_struct *task);
0017d7350   Peter Zijlstra   sched: Fix TASK_W...
935
  static int
7608dec2c   Peter Zijlstra   sched: Drop the r...
936
  select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
e7693a362   Gregory Haskins   sched: de-SCHED_O...
937
  {
7608dec2c   Peter Zijlstra   sched: Drop the r...
938
939
940
  	struct task_struct *curr;
  	struct rq *rq;
  	int cpu;
0763a660a   Peter Zijlstra   sched: Rename sel...
941
  	if (sd_flag != SD_BALANCE_WAKE)
5f3edc1b1   Peter Zijlstra   sched: Hook sched...
942
  		return smp_processor_id();
7608dec2c   Peter Zijlstra   sched: Drop the r...
943
944
945
946
947
  	cpu = task_cpu(p);
  	rq = cpu_rq(cpu);
  
  	rcu_read_lock();
  	curr = ACCESS_ONCE(rq->curr); /* unlocked access */
318e0893c   Gregory Haskins   sched: pre-route ...
948
  	/*
7608dec2c   Peter Zijlstra   sched: Drop the r...
949
  	 * If the current task on @p's runqueue is an RT task, then
e1f47d891   Steven Rostedt   sched: RT-balance...
950
951
952
953
  	 * try to see if we can wake this RT task up on another
  	 * runqueue. Otherwise simply start this RT task
  	 * on its current runqueue.
  	 *
43fa5460f   Steven Rostedt   sched: Try not to...
954
955
956
957
958
959
960
961
962
  	 * We want to avoid overloading runqueues. If the woken
  	 * task is a higher priority, then it will stay on this CPU
  	 * and the lower prio task should be moved to another CPU.
  	 * Even though this will probably make the lower prio task
  	 * lose its cache, we do not want to bounce a higher task
  	 * around just because it gave up its CPU, perhaps for a
  	 * lock?
  	 *
  	 * For equal prio tasks, we just let the scheduler sort it out.
7608dec2c   Peter Zijlstra   sched: Drop the r...
963
964
965
966
967
968
  	 *
  	 * Otherwise, just let it ride on the affined RQ and the
  	 * post-schedule router will push the preempted task away
  	 *
  	 * This test is optimistic, if we get it wrong the load-balancer
  	 * will have to sort it out.
318e0893c   Gregory Haskins   sched: pre-route ...
969
  	 */
7608dec2c   Peter Zijlstra   sched: Drop the r...
970
971
  	if (curr && unlikely(rt_task(curr)) &&
  	    (curr->rt.nr_cpus_allowed < 2 ||
3be209a8e   Shawn Bohrer   sched/rt: Migrate...
972
  	     curr->prio <= p->prio) &&
6f505b164   Peter Zijlstra   sched: rt group s...
973
  	    (p->rt.nr_cpus_allowed > 1)) {
7608dec2c   Peter Zijlstra   sched: Drop the r...
974
  		int target = find_lowest_rq(p);
318e0893c   Gregory Haskins   sched: pre-route ...
975

7608dec2c   Peter Zijlstra   sched: Drop the r...
976
977
  		if (target != -1)
  			cpu = target;
318e0893c   Gregory Haskins   sched: pre-route ...
978
  	}
7608dec2c   Peter Zijlstra   sched: Drop the r...
979
  	rcu_read_unlock();
318e0893c   Gregory Haskins   sched: pre-route ...
980

7608dec2c   Peter Zijlstra   sched: Drop the r...
981
  	return cpu;
e7693a362   Gregory Haskins   sched: de-SCHED_O...
982
  }
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
983
984
985
  
  static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
  {
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
986
987
  	if (rq->curr->rt.nr_cpus_allowed == 1)
  		return;
24600ce89   Rusty Russell   sched: convert ch...
988
  	if (p->rt.nr_cpus_allowed != 1
13b8bd0a5   Rusty Russell   sched_rt: don't a...
989
990
  	    && cpupri_find(&rq->rd->cpupri, p, NULL))
  		return;
24600ce89   Rusty Russell   sched: convert ch...
991

13b8bd0a5   Rusty Russell   sched_rt: don't a...
992
993
  	if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
  		return;
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
994
995
996
997
998
999
1000
1001
1002
  
  	/*
  	 * There appears to be other cpus that can accept
  	 * current and none to run 'p', so lets reschedule
  	 * to try and push current away:
  	 */
  	requeue_task_rt(rq, p, 1);
  	resched_task(rq->curr);
  }
e7693a362   Gregory Haskins   sched: de-SCHED_O...
1003
  #endif /* CONFIG_SMP */
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1004
1005
1006
  /*
   * Preempt the current task with a newly woken task if needed:
   */
7d4787214   Peter Zijlstra   sched: Rename syn...
1007
  static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1008
  {
45c01e824   Gregory Haskins   sched: prioritize...
1009
  	if (p->prio < rq->curr->prio) {
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1010
  		resched_task(rq->curr);
45c01e824   Gregory Haskins   sched: prioritize...
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
  		return;
  	}
  
  #ifdef CONFIG_SMP
  	/*
  	 * If:
  	 *
  	 * - the newly woken task is of equal priority to the current task
  	 * - the newly woken task is non-migratable while current is migratable
  	 * - current will be preempted on the next reschedule
  	 *
  	 * we should check to see if current can readily move to a different
  	 * cpu.  If so, we will reschedule to allow the push logic to try
  	 * to move current somewhere else, making room for our non-migratable
  	 * task.
  	 */
8dd0de8be   Hillf Danton   sched: Fix need_r...
1027
  	if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
1028
  		check_preempt_equal_prio(rq, p);
45c01e824   Gregory Haskins   sched: prioritize...
1029
  #endif
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1030
  }
6f505b164   Peter Zijlstra   sched: rt group s...
1031
1032
  static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
  						   struct rt_rq *rt_rq)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1033
  {
6f505b164   Peter Zijlstra   sched: rt group s...
1034
1035
  	struct rt_prio_array *array = &rt_rq->active;
  	struct sched_rt_entity *next = NULL;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1036
1037
1038
1039
  	struct list_head *queue;
  	int idx;
  
  	idx = sched_find_first_bit(array->bitmap);
6f505b164   Peter Zijlstra   sched: rt group s...
1040
  	BUG_ON(idx >= MAX_RT_PRIO);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1041
1042
  
  	queue = array->queue + idx;
6f505b164   Peter Zijlstra   sched: rt group s...
1043
  	next = list_entry(queue->next, struct sched_rt_entity, run_list);
326587b84   Dmitry Adamushko   sched: fix goto r...
1044

6f505b164   Peter Zijlstra   sched: rt group s...
1045
1046
  	return next;
  }
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1047

917b627d4   Gregory Haskins   sched: create "pu...
1048
  static struct task_struct *_pick_next_task_rt(struct rq *rq)
6f505b164   Peter Zijlstra   sched: rt group s...
1049
1050
1051
1052
  {
  	struct sched_rt_entity *rt_se;
  	struct task_struct *p;
  	struct rt_rq *rt_rq;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1053

6f505b164   Peter Zijlstra   sched: rt group s...
1054
  	rt_rq = &rq->rt;
8e54a2c03   Steven Rostedt   sched: Change pic...
1055
  	if (!rt_rq->rt_nr_running)
6f505b164   Peter Zijlstra   sched: rt group s...
1056
  		return NULL;
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
1057
  	if (rt_rq_throttled(rt_rq))
6f505b164   Peter Zijlstra   sched: rt group s...
1058
1059
1060
1061
  		return NULL;
  
  	do {
  		rt_se = pick_next_rt_entity(rq, rt_rq);
326587b84   Dmitry Adamushko   sched: fix goto r...
1062
  		BUG_ON(!rt_se);
6f505b164   Peter Zijlstra   sched: rt group s...
1063
1064
1065
1066
  		rt_rq = group_rt_rq(rt_se);
  	} while (rt_rq);
  
  	p = rt_task_of(rt_se);
305e6835e   Venkatesh Pallipadi   sched: Do not acc...
1067
  	p->se.exec_start = rq->clock_task;
917b627d4   Gregory Haskins   sched: create "pu...
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
  
  	return p;
  }
  
  static struct task_struct *pick_next_task_rt(struct rq *rq)
  {
  	struct task_struct *p = _pick_next_task_rt(rq);
  
  	/* The running task is never eligible for pushing */
  	if (p)
  		dequeue_pushable_task(rq, p);
bcf08df3b   Ingo Molnar   sched: Fix cpupri...
1079
  #ifdef CONFIG_SMP
3f029d3c6   Gregory Haskins   sched: Enhance th...
1080
1081
1082
1083
1084
  	/*
  	 * We detect this state here so that we can avoid taking the RQ
  	 * lock again later if there is no need to push
  	 */
  	rq->post_schedule = has_pushable_tasks(rq);
bcf08df3b   Ingo Molnar   sched: Fix cpupri...
1085
  #endif
3f029d3c6   Gregory Haskins   sched: Enhance th...
1086

6f505b164   Peter Zijlstra   sched: rt group s...
1087
  	return p;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1088
  }
31ee529cc   Ingo Molnar   sched: remove the...
1089
  static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1090
  {
f1e14ef64   Ingo Molnar   sched: remove the...
1091
  	update_curr_rt(rq);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1092
  	p->se.exec_start = 0;
917b627d4   Gregory Haskins   sched: create "pu...
1093
1094
1095
1096
1097
  
  	/*
  	 * The previous task needs to be made eligible for pushing
  	 * if it is still active
  	 */
fd2f4419b   Peter Zijlstra   sched: Provide p-...
1098
  	if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1)
917b627d4   Gregory Haskins   sched: create "pu...
1099
  		enqueue_pushable_task(rq, p);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1100
  }
681f3e685   Peter Williams   sched: isolate SM...
1101
  #ifdef CONFIG_SMP
6f505b164   Peter Zijlstra   sched: rt group s...
1102

e8fa13626   Steven Rostedt   sched: add RT tas...
1103
1104
  /* Only try algorithms three times */
  #define RT_MAX_TRIES 3
e8fa13626   Steven Rostedt   sched: add RT tas...
1105
  static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1106
1107
1108
  static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
  {
  	if (!task_running(rq, p) &&
96f874e26   Rusty Russell   sched: convert re...
1109
  	    (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
6f505b164   Peter Zijlstra   sched: rt group s...
1110
  	    (p->rt.nr_cpus_allowed > 1))
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1111
1112
1113
  		return 1;
  	return 0;
  }
e8fa13626   Steven Rostedt   sched: add RT tas...
1114
  /* Return the second highest RT task, NULL otherwise */
79064fbf7   Ingo Molnar   sched: clean up p...
1115
  static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
e8fa13626   Steven Rostedt   sched: add RT tas...
1116
  {
6f505b164   Peter Zijlstra   sched: rt group s...
1117
1118
1119
1120
  	struct task_struct *next = NULL;
  	struct sched_rt_entity *rt_se;
  	struct rt_prio_array *array;
  	struct rt_rq *rt_rq;
e8fa13626   Steven Rostedt   sched: add RT tas...
1121
  	int idx;
6f505b164   Peter Zijlstra   sched: rt group s...
1122
1123
1124
  	for_each_leaf_rt_rq(rt_rq, rq) {
  		array = &rt_rq->active;
  		idx = sched_find_first_bit(array->bitmap);
492462742   Peter Zijlstra   sched: Unindent l...
1125
  next_idx:
6f505b164   Peter Zijlstra   sched: rt group s...
1126
1127
1128
1129
1130
  		if (idx >= MAX_RT_PRIO)
  			continue;
  		if (next && next->prio < idx)
  			continue;
  		list_for_each_entry(rt_se, array->queue + idx, run_list) {
3d07467b7   Peter Zijlstra   sched: Fix pick_n...
1131
1132
1133
1134
1135
1136
  			struct task_struct *p;
  
  			if (!rt_entity_is_task(rt_se))
  				continue;
  
  			p = rt_task_of(rt_se);
6f505b164   Peter Zijlstra   sched: rt group s...
1137
1138
1139
1140
1141
1142
1143
1144
1145
  			if (pick_rt_task(rq, p, cpu)) {
  				next = p;
  				break;
  			}
  		}
  		if (!next) {
  			idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
  			goto next_idx;
  		}
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1146
  	}
e8fa13626   Steven Rostedt   sched: add RT tas...
1147
1148
  	return next;
  }
0e3900e6d   Rusty Russell   sched: convert lo...
1149
  static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
e8fa13626   Steven Rostedt   sched: add RT tas...
1150

6e1254d2c   Gregory Haskins   sched: optimize R...
1151
1152
1153
  static int find_lowest_rq(struct task_struct *task)
  {
  	struct sched_domain *sd;
96f874e26   Rusty Russell   sched: convert re...
1154
  	struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
6e1254d2c   Gregory Haskins   sched: optimize R...
1155
1156
  	int this_cpu = smp_processor_id();
  	int cpu      = task_cpu(task);
06f90dbd7   Gregory Haskins   sched: RT-balance...
1157

0da938c44   Steven Rostedt   sched: Check if l...
1158
1159
1160
  	/* Make sure the mask is initialized first */
  	if (unlikely(!lowest_mask))
  		return -1;
6e0534f27   Gregory Haskins   sched: use a 2-d ...
1161
1162
  	if (task->rt.nr_cpus_allowed == 1)
  		return -1; /* No other targets possible */
6e1254d2c   Gregory Haskins   sched: optimize R...
1163

6e0534f27   Gregory Haskins   sched: use a 2-d ...
1164
1165
  	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
  		return -1; /* No targets found */
6e1254d2c   Gregory Haskins   sched: optimize R...
1166
1167
1168
1169
1170
1171
1172
1173
1174
  
  	/*
  	 * At this point we have built a mask of cpus representing the
  	 * lowest priority tasks in the system.  Now we want to elect
  	 * the best one based on our affinity and topology.
  	 *
  	 * We prioritize the last cpu that the task executed on since
  	 * it is most likely cache-hot in that location.
  	 */
96f874e26   Rusty Russell   sched: convert re...
1175
  	if (cpumask_test_cpu(cpu, lowest_mask))
6e1254d2c   Gregory Haskins   sched: optimize R...
1176
1177
1178
1179
1180
1181
  		return cpu;
  
  	/*
  	 * Otherwise, we consult the sched_domains span maps to figure
  	 * out which cpu is logically closest to our hot cache data.
  	 */
e2c880630   Rusty Russell   cpumask: Simplify...
1182
1183
  	if (!cpumask_test_cpu(this_cpu, lowest_mask))
  		this_cpu = -1; /* Skip this_cpu opt if not among lowest */
6e1254d2c   Gregory Haskins   sched: optimize R...
1184

cd4ae6adf   Xiaotian Feng   sched: More sched...
1185
  	rcu_read_lock();
e2c880630   Rusty Russell   cpumask: Simplify...
1186
1187
1188
  	for_each_domain(cpu, sd) {
  		if (sd->flags & SD_WAKE_AFFINE) {
  			int best_cpu;
6e1254d2c   Gregory Haskins   sched: optimize R...
1189

e2c880630   Rusty Russell   cpumask: Simplify...
1190
1191
1192
1193
1194
  			/*
  			 * "this_cpu" is cheaper to preempt than a
  			 * remote processor.
  			 */
  			if (this_cpu != -1 &&
cd4ae6adf   Xiaotian Feng   sched: More sched...
1195
1196
  			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
  				rcu_read_unlock();
e2c880630   Rusty Russell   cpumask: Simplify...
1197
  				return this_cpu;
cd4ae6adf   Xiaotian Feng   sched: More sched...
1198
  			}
e2c880630   Rusty Russell   cpumask: Simplify...
1199
1200
1201
  
  			best_cpu = cpumask_first_and(lowest_mask,
  						     sched_domain_span(sd));
cd4ae6adf   Xiaotian Feng   sched: More sched...
1202
1203
  			if (best_cpu < nr_cpu_ids) {
  				rcu_read_unlock();
e2c880630   Rusty Russell   cpumask: Simplify...
1204
  				return best_cpu;
cd4ae6adf   Xiaotian Feng   sched: More sched...
1205
  			}
6e1254d2c   Gregory Haskins   sched: optimize R...
1206
1207
  		}
  	}
cd4ae6adf   Xiaotian Feng   sched: More sched...
1208
  	rcu_read_unlock();
6e1254d2c   Gregory Haskins   sched: optimize R...
1209
1210
1211
1212
1213
1214
  
  	/*
  	 * And finally, if there were no matches within the domains
  	 * just give the caller *something* to work with from the compatible
  	 * locations.
  	 */
e2c880630   Rusty Russell   cpumask: Simplify...
1215
1216
1217
1218
1219
1220
1221
  	if (this_cpu != -1)
  		return this_cpu;
  
  	cpu = cpumask_any(lowest_mask);
  	if (cpu < nr_cpu_ids)
  		return cpu;
  	return -1;
07b4032c9   Gregory Haskins   sched: break out ...
1222
1223
1224
  }
  
  /* Will lock the rq it finds */
4df64c0bf   Ingo Molnar   sched: clean up f...
1225
  static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
07b4032c9   Gregory Haskins   sched: break out ...
1226
1227
  {
  	struct rq *lowest_rq = NULL;
07b4032c9   Gregory Haskins   sched: break out ...
1228
  	int tries;
4df64c0bf   Ingo Molnar   sched: clean up f...
1229
  	int cpu;
e8fa13626   Steven Rostedt   sched: add RT tas...
1230

07b4032c9   Gregory Haskins   sched: break out ...
1231
1232
  	for (tries = 0; tries < RT_MAX_TRIES; tries++) {
  		cpu = find_lowest_rq(task);
2de0b4639   Gregory Haskins   sched: RT balanci...
1233
  		if ((cpu == -1) || (cpu == rq->cpu))
e8fa13626   Steven Rostedt   sched: add RT tas...
1234
  			break;
07b4032c9   Gregory Haskins   sched: break out ...
1235
  		lowest_rq = cpu_rq(cpu);
e8fa13626   Steven Rostedt   sched: add RT tas...
1236
  		/* if the prio of this runqueue changed, try again */
07b4032c9   Gregory Haskins   sched: break out ...
1237
  		if (double_lock_balance(rq, lowest_rq)) {
e8fa13626   Steven Rostedt   sched: add RT tas...
1238
1239
1240
1241
1242
1243
  			/*
  			 * We had to unlock the run queue. In
  			 * the mean time, task could have
  			 * migrated already or had its affinity changed.
  			 * Also make sure that it wasn't scheduled on its rq.
  			 */
07b4032c9   Gregory Haskins   sched: break out ...
1244
  			if (unlikely(task_rq(task) != rq ||
96f874e26   Rusty Russell   sched: convert re...
1245
1246
  				     !cpumask_test_cpu(lowest_rq->cpu,
  						       &task->cpus_allowed) ||
07b4032c9   Gregory Haskins   sched: break out ...
1247
  				     task_running(rq, task) ||
fd2f4419b   Peter Zijlstra   sched: Provide p-...
1248
  				     !task->on_rq)) {
4df64c0bf   Ingo Molnar   sched: clean up f...
1249

05fa785cf   Thomas Gleixner   sched: Convert rq...
1250
  				raw_spin_unlock(&lowest_rq->lock);
e8fa13626   Steven Rostedt   sched: add RT tas...
1251
1252
1253
1254
1255
1256
  				lowest_rq = NULL;
  				break;
  			}
  		}
  
  		/* If this rq is still suitable use it. */
e864c499d   Gregory Haskins   sched: track the ...
1257
  		if (lowest_rq->rt.highest_prio.curr > task->prio)
e8fa13626   Steven Rostedt   sched: add RT tas...
1258
1259
1260
  			break;
  
  		/* try again */
1b12bbc74   Peter Zijlstra   lockdep: re-annot...
1261
  		double_unlock_balance(rq, lowest_rq);
e8fa13626   Steven Rostedt   sched: add RT tas...
1262
1263
1264
1265
1266
  		lowest_rq = NULL;
  	}
  
  	return lowest_rq;
  }
917b627d4   Gregory Haskins   sched: create "pu...
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
  static struct task_struct *pick_next_pushable_task(struct rq *rq)
  {
  	struct task_struct *p;
  
  	if (!has_pushable_tasks(rq))
  		return NULL;
  
  	p = plist_first_entry(&rq->rt.pushable_tasks,
  			      struct task_struct, pushable_tasks);
  
  	BUG_ON(rq->cpu != task_cpu(p));
  	BUG_ON(task_current(rq, p));
  	BUG_ON(p->rt.nr_cpus_allowed <= 1);
fd2f4419b   Peter Zijlstra   sched: Provide p-...
1280
  	BUG_ON(!p->on_rq);
917b627d4   Gregory Haskins   sched: create "pu...
1281
1282
1283
1284
  	BUG_ON(!rt_task(p));
  
  	return p;
  }
e8fa13626   Steven Rostedt   sched: add RT tas...
1285
1286
1287
1288
1289
  /*
   * If the current CPU has more than one RT task, see if the non
   * running task can migrate over to a CPU that is running a task
   * of lesser priority.
   */
697f0a487   Gregory Haskins   sched: clean up t...
1290
  static int push_rt_task(struct rq *rq)
e8fa13626   Steven Rostedt   sched: add RT tas...
1291
1292
1293
  {
  	struct task_struct *next_task;
  	struct rq *lowest_rq;
e8fa13626   Steven Rostedt   sched: add RT tas...
1294

a22d7fc18   Gregory Haskins   sched: wake-balan...
1295
1296
  	if (!rq->rt.overloaded)
  		return 0;
917b627d4   Gregory Haskins   sched: create "pu...
1297
  	next_task = pick_next_pushable_task(rq);
e8fa13626   Steven Rostedt   sched: add RT tas...
1298
1299
  	if (!next_task)
  		return 0;
492462742   Peter Zijlstra   sched: Unindent l...
1300
  retry:
697f0a487   Gregory Haskins   sched: clean up t...
1301
  	if (unlikely(next_task == rq->curr)) {
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1302
  		WARN_ON(1);
e8fa13626   Steven Rostedt   sched: add RT tas...
1303
  		return 0;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1304
  	}
e8fa13626   Steven Rostedt   sched: add RT tas...
1305
1306
1307
1308
1309
1310
  
  	/*
  	 * It's possible that the next_task slipped in of
  	 * higher priority than current. If that's the case
  	 * just reschedule current.
  	 */
697f0a487   Gregory Haskins   sched: clean up t...
1311
1312
  	if (unlikely(next_task->prio < rq->curr->prio)) {
  		resched_task(rq->curr);
e8fa13626   Steven Rostedt   sched: add RT tas...
1313
1314
  		return 0;
  	}
697f0a487   Gregory Haskins   sched: clean up t...
1315
  	/* We might release rq lock */
e8fa13626   Steven Rostedt   sched: add RT tas...
1316
1317
1318
  	get_task_struct(next_task);
  
  	/* find_lock_lowest_rq locks the rq if found */
697f0a487   Gregory Haskins   sched: clean up t...
1319
  	lowest_rq = find_lock_lowest_rq(next_task, rq);
e8fa13626   Steven Rostedt   sched: add RT tas...
1320
1321
1322
  	if (!lowest_rq) {
  		struct task_struct *task;
  		/*
697f0a487   Gregory Haskins   sched: clean up t...
1323
  		 * find lock_lowest_rq releases rq->lock
1563513d3   Gregory Haskins   RT: fix push_rt_t...
1324
1325
1326
1327
1328
  		 * so it is possible that next_task has migrated.
  		 *
  		 * We need to make sure that the task is still on the same
  		 * run-queue and is also still the next task eligible for
  		 * pushing.
e8fa13626   Steven Rostedt   sched: add RT tas...
1329
  		 */
917b627d4   Gregory Haskins   sched: create "pu...
1330
  		task = pick_next_pushable_task(rq);
1563513d3   Gregory Haskins   RT: fix push_rt_t...
1331
1332
  		if (task_cpu(next_task) == rq->cpu && task == next_task) {
  			/*
25985edce   Lucas De Marchi   Fix common misspe...
1333
  			 * If we get here, the task hasn't moved at all, but
1563513d3   Gregory Haskins   RT: fix push_rt_t...
1334
1335
1336
1337
1338
1339
  			 * it has failed to push.  We will not try again,
  			 * since the other cpus will pull from us when they
  			 * are ready.
  			 */
  			dequeue_pushable_task(rq, next_task);
  			goto out;
e8fa13626   Steven Rostedt   sched: add RT tas...
1340
  		}
917b627d4   Gregory Haskins   sched: create "pu...
1341

1563513d3   Gregory Haskins   RT: fix push_rt_t...
1342
1343
1344
  		if (!task)
  			/* No more tasks, just exit */
  			goto out;
917b627d4   Gregory Haskins   sched: create "pu...
1345
  		/*
1563513d3   Gregory Haskins   RT: fix push_rt_t...
1346
  		 * Something has shifted, try again.
917b627d4   Gregory Haskins   sched: create "pu...
1347
  		 */
1563513d3   Gregory Haskins   RT: fix push_rt_t...
1348
1349
1350
  		put_task_struct(next_task);
  		next_task = task;
  		goto retry;
e8fa13626   Steven Rostedt   sched: add RT tas...
1351
  	}
697f0a487   Gregory Haskins   sched: clean up t...
1352
  	deactivate_task(rq, next_task, 0);
e8fa13626   Steven Rostedt   sched: add RT tas...
1353
1354
1355
1356
  	set_task_cpu(next_task, lowest_rq->cpu);
  	activate_task(lowest_rq, next_task, 0);
  
  	resched_task(lowest_rq->curr);
1b12bbc74   Peter Zijlstra   lockdep: re-annot...
1357
  	double_unlock_balance(rq, lowest_rq);
e8fa13626   Steven Rostedt   sched: add RT tas...
1358

e8fa13626   Steven Rostedt   sched: add RT tas...
1359
1360
  out:
  	put_task_struct(next_task);
917b627d4   Gregory Haskins   sched: create "pu...
1361
  	return 1;
e8fa13626   Steven Rostedt   sched: add RT tas...
1362
  }
e8fa13626   Steven Rostedt   sched: add RT tas...
1363
1364
1365
1366
1367
1368
  static void push_rt_tasks(struct rq *rq)
  {
  	/* push_rt_task will return true if it moved an RT */
  	while (push_rt_task(rq))
  		;
  }
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1369
1370
  static int pull_rt_task(struct rq *this_rq)
  {
80bf3171d   Ingo Molnar   sched: clean up p...
1371
  	int this_cpu = this_rq->cpu, ret = 0, cpu;
a8728944e   Gregory Haskins   sched: use highes...
1372
  	struct task_struct *p;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1373
  	struct rq *src_rq;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1374

637f50851   Gregory Haskins   sched: only balan...
1375
  	if (likely(!rt_overloaded(this_rq)))
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1376
  		return 0;
c6c4927b2   Rusty Russell   sched: convert st...
1377
  	for_each_cpu(cpu, this_rq->rd->rto_mask) {
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1378
1379
1380
1381
  		if (this_cpu == cpu)
  			continue;
  
  		src_rq = cpu_rq(cpu);
74ab8e4f6   Gregory Haskins   sched: use highes...
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
  
  		/*
  		 * Don't bother taking the src_rq->lock if the next highest
  		 * task is known to be lower-priority than our current task.
  		 * This may look racy, but if this value is about to go
  		 * logically higher, the src_rq will push this task away.
  		 * And if its going logically lower, we do not care
  		 */
  		if (src_rq->rt.highest_prio.next >=
  		    this_rq->rt.highest_prio.curr)
  			continue;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1393
1394
1395
  		/*
  		 * We can potentially drop this_rq's lock in
  		 * double_lock_balance, and another CPU could
a8728944e   Gregory Haskins   sched: use highes...
1396
  		 * alter this_rq
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1397
  		 */
a8728944e   Gregory Haskins   sched: use highes...
1398
  		double_lock_balance(this_rq, src_rq);
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1399
1400
1401
1402
  
  		/*
  		 * Are there still pullable RT tasks?
  		 */
614ee1f61   Mike Galbraith   sched: pull_rt_ta...
1403
1404
  		if (src_rq->rt.rt_nr_running <= 1)
  			goto skip;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1405

f65eda4f7   Steven Rostedt   sched: pull RT ta...
1406
1407
1408
1409
1410
1411
  		p = pick_next_highest_task_rt(src_rq, this_cpu);
  
  		/*
  		 * Do we have an RT task that preempts
  		 * the to-be-scheduled task?
  		 */
a8728944e   Gregory Haskins   sched: use highes...
1412
  		if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1413
  			WARN_ON(p == src_rq->curr);
fd2f4419b   Peter Zijlstra   sched: Provide p-...
1414
  			WARN_ON(!p->on_rq);
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1415
1416
1417
1418
1419
1420
1421
  
  			/*
  			 * There's a chance that p is higher in priority
  			 * than what's currently running on its cpu.
  			 * This is just that p is wakeing up and hasn't
  			 * had a chance to schedule. We only pull
  			 * p if it is lower in priority than the
a8728944e   Gregory Haskins   sched: use highes...
1422
  			 * current task on the run queue
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1423
  			 */
a8728944e   Gregory Haskins   sched: use highes...
1424
  			if (p->prio < src_rq->curr->prio)
614ee1f61   Mike Galbraith   sched: pull_rt_ta...
1425
  				goto skip;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1426
1427
1428
1429
1430
1431
1432
1433
1434
  
  			ret = 1;
  
  			deactivate_task(src_rq, p, 0);
  			set_task_cpu(p, this_cpu);
  			activate_task(this_rq, p, 0);
  			/*
  			 * We continue with the search, just in
  			 * case there's an even higher prio task
25985edce   Lucas De Marchi   Fix common misspe...
1435
  			 * in another runqueue. (low likelihood
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1436
  			 * but possible)
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1437
  			 */
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1438
  		}
492462742   Peter Zijlstra   sched: Unindent l...
1439
  skip:
1b12bbc74   Peter Zijlstra   lockdep: re-annot...
1440
  		double_unlock_balance(this_rq, src_rq);
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1441
1442
1443
1444
  	}
  
  	return ret;
  }
9a897c5a6   Steven Rostedt   sched: RT-balance...
1445
  static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1446
1447
  {
  	/* Try to pull RT tasks here if we lower this rq's prio */
33c3d6c61   Yong Zhang   sched: Cleanup pr...
1448
  	if (rq->rt.highest_prio.curr > prev->prio)
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1449
1450
  		pull_rt_task(rq);
  }
9a897c5a6   Steven Rostedt   sched: RT-balance...
1451
  static void post_schedule_rt(struct rq *rq)
e8fa13626   Steven Rostedt   sched: add RT tas...
1452
  {
967fc0467   Gregory Haskins   sched: add sched_...
1453
  	push_rt_tasks(rq);
e8fa13626   Steven Rostedt   sched: add RT tas...
1454
  }
8ae121ac8   Gregory Haskins   sched: fix RT tas...
1455
1456
1457
1458
  /*
   * If we are not running and we are not going to reschedule soon, we should
   * try to push tasks away now
   */
efbbd05a5   Peter Zijlstra   sched: Add pre an...
1459
  static void task_woken_rt(struct rq *rq, struct task_struct *p)
4642dafdf   Steven Rostedt   sched: push RT ta...
1460
  {
9a897c5a6   Steven Rostedt   sched: RT-balance...
1461
  	if (!task_running(rq, p) &&
8ae121ac8   Gregory Haskins   sched: fix RT tas...
1462
  	    !test_tsk_need_resched(rq->curr) &&
917b627d4   Gregory Haskins   sched: create "pu...
1463
  	    has_pushable_tasks(rq) &&
b3bc211cf   Steven Rostedt   sched: Give CPU b...
1464
  	    p->rt.nr_cpus_allowed > 1 &&
43fa5460f   Steven Rostedt   sched: Try not to...
1465
  	    rt_task(rq->curr) &&
b3bc211cf   Steven Rostedt   sched: Give CPU b...
1466
  	    (rq->curr->rt.nr_cpus_allowed < 2 ||
3be209a8e   Shawn Bohrer   sched/rt: Migrate...
1467
  	     rq->curr->prio <= p->prio))
4642dafdf   Steven Rostedt   sched: push RT ta...
1468
1469
  		push_rt_tasks(rq);
  }
cd8ba7cd9   Mike Travis   sched: add new se...
1470
  static void set_cpus_allowed_rt(struct task_struct *p,
96f874e26   Rusty Russell   sched: convert re...
1471
  				const struct cpumask *new_mask)
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1472
  {
96f874e26   Rusty Russell   sched: convert re...
1473
  	int weight = cpumask_weight(new_mask);
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1474
1475
1476
1477
1478
1479
1480
  
  	BUG_ON(!rt_task(p));
  
  	/*
  	 * Update the migration status of the RQ if we have an RT task
  	 * which is running AND changing its weight value.
  	 */
fd2f4419b   Peter Zijlstra   sched: Provide p-...
1481
  	if (p->on_rq && (weight != p->rt.nr_cpus_allowed)) {
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1482
  		struct rq *rq = task_rq(p);
917b627d4   Gregory Haskins   sched: create "pu...
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
  		if (!task_current(rq, p)) {
  			/*
  			 * Make sure we dequeue this task from the pushable list
  			 * before going further.  It will either remain off of
  			 * the list because we are no longer pushable, or it
  			 * will be requeued.
  			 */
  			if (p->rt.nr_cpus_allowed > 1)
  				dequeue_pushable_task(rq, p);
  
  			/*
  			 * Requeue if our weight is changing and still > 1
  			 */
  			if (weight > 1)
  				enqueue_pushable_task(rq, p);
  
  		}
6f505b164   Peter Zijlstra   sched: rt group s...
1500
  		if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1501
  			rq->rt.rt_nr_migratory++;
6f505b164   Peter Zijlstra   sched: rt group s...
1502
  		} else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1503
1504
1505
  			BUG_ON(!rq->rt.rt_nr_migratory);
  			rq->rt.rt_nr_migratory--;
  		}
398a153b1   Gregory Haskins   sched: fix build ...
1506
  		update_rt_migration(&rq->rt);
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1507
  	}
96f874e26   Rusty Russell   sched: convert re...
1508
  	cpumask_copy(&p->cpus_allowed, new_mask);
6f505b164   Peter Zijlstra   sched: rt group s...
1509
  	p->rt.nr_cpus_allowed = weight;
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1510
  }
deeeccd41   Ingo Molnar   sched: clean up o...
1511

bdd7c81b4   Ingo Molnar   sched: fix sched_...
1512
  /* Assumes rq->lock is held */
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
1513
  static void rq_online_rt(struct rq *rq)
bdd7c81b4   Ingo Molnar   sched: fix sched_...
1514
1515
1516
  {
  	if (rq->rt.overloaded)
  		rt_set_overload(rq);
6e0534f27   Gregory Haskins   sched: use a 2-d ...
1517

7def2be1d   Peter Zijlstra   sched: fix hotplu...
1518
  	__enable_runtime(rq);
e864c499d   Gregory Haskins   sched: track the ...
1519
  	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
bdd7c81b4   Ingo Molnar   sched: fix sched_...
1520
1521
1522
  }
  
  /* Assumes rq->lock is held */
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
1523
  static void rq_offline_rt(struct rq *rq)
bdd7c81b4   Ingo Molnar   sched: fix sched_...
1524
1525
1526
  {
  	if (rq->rt.overloaded)
  		rt_clear_overload(rq);
6e0534f27   Gregory Haskins   sched: use a 2-d ...
1527

7def2be1d   Peter Zijlstra   sched: fix hotplu...
1528
  	__disable_runtime(rq);
6e0534f27   Gregory Haskins   sched: use a 2-d ...
1529
  	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
bdd7c81b4   Ingo Molnar   sched: fix sched_...
1530
  }
cb4698450   Steven Rostedt   sched: RT-balance...
1531
1532
1533
1534
1535
  
  /*
   * When switch from the rt queue, we bring ourselves to a position
   * that we might want to pull RT tasks from other runqueues.
   */
da7a735e5   Peter Zijlstra   sched: Fix switch...
1536
  static void switched_from_rt(struct rq *rq, struct task_struct *p)
cb4698450   Steven Rostedt   sched: RT-balance...
1537
1538
1539
1540
1541
1542
1543
1544
  {
  	/*
  	 * If there are other RT tasks then we will reschedule
  	 * and the scheduling of the other RT tasks will handle
  	 * the balancing. But if we are the last RT task
  	 * we may need to handle the pulling of RT tasks
  	 * now.
  	 */
fd2f4419b   Peter Zijlstra   sched: Provide p-...
1545
  	if (p->on_rq && !rq->rt.rt_nr_running)
cb4698450   Steven Rostedt   sched: RT-balance...
1546
1547
  		pull_rt_task(rq);
  }
3d8cbdf86   Rusty Russell   sched: convert lo...
1548
1549
1550
1551
1552
1553
  
  static inline void init_sched_rt_class(void)
  {
  	unsigned int i;
  
  	for_each_possible_cpu(i)
eaa958402   Yinghai Lu   cpumask: alloc ze...
1554
  		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
6ca09dfc9   Mike Travis   sched: put back s...
1555
  					GFP_KERNEL, cpu_to_node(i));
3d8cbdf86   Rusty Russell   sched: convert lo...
1556
  }
cb4698450   Steven Rostedt   sched: RT-balance...
1557
1558
1559
1560
1561
1562
1563
  #endif /* CONFIG_SMP */
  
  /*
   * When switching a task to RT, we may overload the runqueue
   * with RT tasks. In this case we try to push them off to
   * other runqueues.
   */
da7a735e5   Peter Zijlstra   sched: Fix switch...
1564
  static void switched_to_rt(struct rq *rq, struct task_struct *p)
cb4698450   Steven Rostedt   sched: RT-balance...
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
  {
  	int check_resched = 1;
  
  	/*
  	 * If we are already running, then there's nothing
  	 * that needs to be done. But if we are not running
  	 * we may need to preempt the current running task.
  	 * If that current running task is also an RT task
  	 * then see if we can move to another run queue.
  	 */
fd2f4419b   Peter Zijlstra   sched: Provide p-...
1575
  	if (p->on_rq && rq->curr != p) {
cb4698450   Steven Rostedt   sched: RT-balance...
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
  #ifdef CONFIG_SMP
  		if (rq->rt.overloaded && push_rt_task(rq) &&
  		    /* Don't resched if we changed runqueues */
  		    rq != task_rq(p))
  			check_resched = 0;
  #endif /* CONFIG_SMP */
  		if (check_resched && p->prio < rq->curr->prio)
  			resched_task(rq->curr);
  	}
  }
  
  /*
   * Priority of the task has changed. This may cause
   * us to initiate a push or pull.
   */
da7a735e5   Peter Zijlstra   sched: Fix switch...
1591
1592
  static void
  prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
cb4698450   Steven Rostedt   sched: RT-balance...
1593
  {
fd2f4419b   Peter Zijlstra   sched: Provide p-...
1594
  	if (!p->on_rq)
da7a735e5   Peter Zijlstra   sched: Fix switch...
1595
1596
1597
  		return;
  
  	if (rq->curr == p) {
cb4698450   Steven Rostedt   sched: RT-balance...
1598
1599
1600
1601
1602
1603
1604
1605
1606
  #ifdef CONFIG_SMP
  		/*
  		 * If our priority decreases while running, we
  		 * may need to pull tasks to this runqueue.
  		 */
  		if (oldprio < p->prio)
  			pull_rt_task(rq);
  		/*
  		 * If there's a higher priority task waiting to run
6fa46fa52   Steven Rostedt   sched: balance RT...
1607
1608
1609
  		 * then reschedule. Note, the above pull_rt_task
  		 * can release the rq lock and p could migrate.
  		 * Only reschedule if p is still on the same runqueue.
cb4698450   Steven Rostedt   sched: RT-balance...
1610
  		 */
e864c499d   Gregory Haskins   sched: track the ...
1611
  		if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
cb4698450   Steven Rostedt   sched: RT-balance...
1612
1613
1614
1615
1616
  			resched_task(p);
  #else
  		/* For UP simply resched on drop of prio */
  		if (oldprio < p->prio)
  			resched_task(p);
e8fa13626   Steven Rostedt   sched: add RT tas...
1617
  #endif /* CONFIG_SMP */
cb4698450   Steven Rostedt   sched: RT-balance...
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
  	} else {
  		/*
  		 * This task is not running, but if it is
  		 * greater than the current running task
  		 * then reschedule.
  		 */
  		if (p->prio < rq->curr->prio)
  			resched_task(rq->curr);
  	}
  }
78f2c7db6   Peter Zijlstra   sched: SCHED_FIFO...
1628
1629
1630
  static void watchdog(struct rq *rq, struct task_struct *p)
  {
  	unsigned long soft, hard;
78d7d407b   Jiri Slaby   kernel core: use ...
1631
1632
1633
  	/* max may change after cur was read, this will be fixed next tick */
  	soft = task_rlimit(p, RLIMIT_RTTIME);
  	hard = task_rlimit_max(p, RLIMIT_RTTIME);
78f2c7db6   Peter Zijlstra   sched: SCHED_FIFO...
1634
1635
1636
1637
1638
1639
  
  	if (soft != RLIM_INFINITY) {
  		unsigned long next;
  
  		p->rt.timeout++;
  		next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
5a52dd500   Peter Zijlstra   sched: rt-watchdo...
1640
  		if (p->rt.timeout > next)
f06febc96   Frank Mayhar   timers: fix itime...
1641
  			p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
78f2c7db6   Peter Zijlstra   sched: SCHED_FIFO...
1642
1643
  	}
  }
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1644

8f4d37ec0   Peter Zijlstra   sched: high-res p...
1645
  static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1646
  {
67e2be023   Peter Zijlstra   sched: rt: accoun...
1647
  	update_curr_rt(rq);
78f2c7db6   Peter Zijlstra   sched: SCHED_FIFO...
1648
  	watchdog(rq, p);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1649
1650
1651
1652
1653
1654
  	/*
  	 * RR tasks need a special form of timeslice management.
  	 * FIFO tasks have no timeslices.
  	 */
  	if (p->policy != SCHED_RR)
  		return;
fa717060f   Peter Zijlstra   sched: sched_rt_e...
1655
  	if (--p->rt.time_slice)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1656
  		return;
fa717060f   Peter Zijlstra   sched: sched_rt_e...
1657
  	p->rt.time_slice = DEF_TIMESLICE;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1658

98fbc7985   Dmitry Adamushko   sched: optimize t...
1659
1660
1661
1662
  	/*
  	 * Requeue to the end of queue if we are not the only element
  	 * on the queue:
  	 */
fa717060f   Peter Zijlstra   sched: sched_rt_e...
1663
  	if (p->rt.run_list.prev != p->rt.run_list.next) {
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
1664
  		requeue_task_rt(rq, p, 0);
98fbc7985   Dmitry Adamushko   sched: optimize t...
1665
1666
  		set_tsk_need_resched(p);
  	}
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1667
  }
83b699ed2   Srivatsa Vaddagiri   sched: revert rec...
1668
1669
1670
  static void set_curr_task_rt(struct rq *rq)
  {
  	struct task_struct *p = rq->curr;
305e6835e   Venkatesh Pallipadi   sched: Do not acc...
1671
  	p->se.exec_start = rq->clock_task;
917b627d4   Gregory Haskins   sched: create "pu...
1672
1673
1674
  
  	/* The running task is never eligible for pushing */
  	dequeue_pushable_task(rq, p);
83b699ed2   Srivatsa Vaddagiri   sched: revert rec...
1675
  }
6d686f456   H Hartley Sweeten   sched: Don't expo...
1676
  static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
0d721cead   Peter Williams   sched: Simplify s...
1677
1678
1679
1680
1681
1682
1683
1684
1685
  {
  	/*
  	 * Time slice is 0 for SCHED_FIFO tasks
  	 */
  	if (task->policy == SCHED_RR)
  		return DEF_TIMESLICE;
  	else
  		return 0;
  }
2abdad0a4   Harvey Harrison   sched: make rt_sc...
1686
  static const struct sched_class rt_sched_class = {
5522d5d5f   Ingo Molnar   sched: mark sched...
1687
  	.next			= &fair_sched_class,
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1688
1689
1690
1691
1692
1693
1694
1695
  	.enqueue_task		= enqueue_task_rt,
  	.dequeue_task		= dequeue_task_rt,
  	.yield_task		= yield_task_rt,
  
  	.check_preempt_curr	= check_preempt_curr_rt,
  
  	.pick_next_task		= pick_next_task_rt,
  	.put_prev_task		= put_prev_task_rt,
681f3e685   Peter Williams   sched: isolate SM...
1696
  #ifdef CONFIG_SMP
4ce72a2c0   Li Zefan   sched: add CONFIG...
1697
  	.select_task_rq		= select_task_rq_rt,
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1698
  	.set_cpus_allowed       = set_cpus_allowed_rt,
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
1699
1700
  	.rq_online              = rq_online_rt,
  	.rq_offline             = rq_offline_rt,
9a897c5a6   Steven Rostedt   sched: RT-balance...
1701
1702
  	.pre_schedule		= pre_schedule_rt,
  	.post_schedule		= post_schedule_rt,
efbbd05a5   Peter Zijlstra   sched: Add pre an...
1703
  	.task_woken		= task_woken_rt,
cb4698450   Steven Rostedt   sched: RT-balance...
1704
  	.switched_from		= switched_from_rt,
681f3e685   Peter Williams   sched: isolate SM...
1705
  #endif
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1706

83b699ed2   Srivatsa Vaddagiri   sched: revert rec...
1707
  	.set_curr_task          = set_curr_task_rt,
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1708
  	.task_tick		= task_tick_rt,
cb4698450   Steven Rostedt   sched: RT-balance...
1709

0d721cead   Peter Williams   sched: Simplify s...
1710
  	.get_rr_interval	= get_rr_interval_rt,
cb4698450   Steven Rostedt   sched: RT-balance...
1711
1712
  	.prio_changed		= prio_changed_rt,
  	.switched_to		= switched_to_rt,
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1713
  };
ada18de2e   Peter Zijlstra   sched: debug: add...
1714
1715
1716
1717
1718
1719
  
  #ifdef CONFIG_SCHED_DEBUG
  extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
  
  static void print_rt_stats(struct seq_file *m, int cpu)
  {
ec514c487   Cheng Xu   sched: Fix rt_rq ...
1720
  	rt_rq_iter_t iter;
ada18de2e   Peter Zijlstra   sched: debug: add...
1721
1722
1723
  	struct rt_rq *rt_rq;
  
  	rcu_read_lock();
ec514c487   Cheng Xu   sched: Fix rt_rq ...
1724
  	for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
ada18de2e   Peter Zijlstra   sched: debug: add...
1725
1726
1727
  		print_rt_rq(m, cpu, rt_rq);
  	rcu_read_unlock();
  }
55e12e5e7   Dhaval Giani   sched: make sched...
1728
  #endif /* CONFIG_SCHED_DEBUG */
0e3900e6d   Rusty Russell   sched: convert lo...
1729