Blame view

kernel/sched_rt.c 41.9 KB
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1
2
3
4
  /*
   * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
   * policies)
   */
8f48894fc   Peter Zijlstra   sched: Add debug ...
5
6
7
  #ifdef CONFIG_RT_GROUP_SCHED
  
  #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
398a153b1   Gregory Haskins   sched: fix build ...
8
9
  static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
  {
8f48894fc   Peter Zijlstra   sched: Add debug ...
10
11
12
  #ifdef CONFIG_SCHED_DEBUG
  	WARN_ON_ONCE(!rt_entity_is_task(rt_se));
  #endif
398a153b1   Gregory Haskins   sched: fix build ...
13
14
  	return container_of(rt_se, struct task_struct, rt);
  }
398a153b1   Gregory Haskins   sched: fix build ...
15
16
17
18
19
20
21
22
23
24
25
  static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
  {
  	return rt_rq->rq;
  }
  
  static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
  {
  	return rt_se->rt_rq;
  }
  
  #else /* CONFIG_RT_GROUP_SCHED */
a1ba4d8ba   Peter Zijlstra   sched_rt: Fix ove...
26
  #define rt_entity_is_task(rt_se) (1)
8f48894fc   Peter Zijlstra   sched: Add debug ...
27
28
29
30
  static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
  {
  	return container_of(rt_se, struct task_struct, rt);
  }
398a153b1   Gregory Haskins   sched: fix build ...
31
32
33
34
35
36
37
38
39
40
41
42
43
44
  static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
  {
  	return container_of(rt_rq, struct rq, rt);
  }
  
  static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
  {
  	struct task_struct *p = rt_task_of(rt_se);
  	struct rq *rq = task_rq(p);
  
  	return &rq->rt;
  }
  
  #endif /* CONFIG_RT_GROUP_SCHED */
4fd29176b   Steven Rostedt   sched: add rt-ove...
45
  #ifdef CONFIG_SMP
84de42748   Ingo Molnar   sched: clean up k...
46

637f50851   Gregory Haskins   sched: only balan...
47
  static inline int rt_overloaded(struct rq *rq)
4fd29176b   Steven Rostedt   sched: add rt-ove...
48
  {
637f50851   Gregory Haskins   sched: only balan...
49
  	return atomic_read(&rq->rd->rto_count);
4fd29176b   Steven Rostedt   sched: add rt-ove...
50
  }
84de42748   Ingo Molnar   sched: clean up k...
51

4fd29176b   Steven Rostedt   sched: add rt-ove...
52
53
  static inline void rt_set_overload(struct rq *rq)
  {
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
54
55
  	if (!rq->online)
  		return;
c6c4927b2   Rusty Russell   sched: convert st...
56
  	cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
4fd29176b   Steven Rostedt   sched: add rt-ove...
57
58
59
60
61
62
63
64
  	/*
  	 * Make sure the mask is visible before we set
  	 * the overload count. That is checked to determine
  	 * if we should look at the mask. It would be a shame
  	 * if we looked at the mask, but the mask was not
  	 * updated yet.
  	 */
  	wmb();
637f50851   Gregory Haskins   sched: only balan...
65
  	atomic_inc(&rq->rd->rto_count);
4fd29176b   Steven Rostedt   sched: add rt-ove...
66
  }
84de42748   Ingo Molnar   sched: clean up k...
67

4fd29176b   Steven Rostedt   sched: add rt-ove...
68
69
  static inline void rt_clear_overload(struct rq *rq)
  {
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
70
71
  	if (!rq->online)
  		return;
4fd29176b   Steven Rostedt   sched: add rt-ove...
72
  	/* the order here really doesn't matter */
637f50851   Gregory Haskins   sched: only balan...
73
  	atomic_dec(&rq->rd->rto_count);
c6c4927b2   Rusty Russell   sched: convert st...
74
  	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
4fd29176b   Steven Rostedt   sched: add rt-ove...
75
  }
73fe6aae8   Gregory Haskins   sched: add RT-bal...
76

398a153b1   Gregory Haskins   sched: fix build ...
77
  static void update_rt_migration(struct rt_rq *rt_rq)
73fe6aae8   Gregory Haskins   sched: add RT-bal...
78
  {
a1ba4d8ba   Peter Zijlstra   sched_rt: Fix ove...
79
  	if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
398a153b1   Gregory Haskins   sched: fix build ...
80
81
82
  		if (!rt_rq->overloaded) {
  			rt_set_overload(rq_of_rt_rq(rt_rq));
  			rt_rq->overloaded = 1;
cdc8eb984   Gregory Haskins   sched: RT-balance...
83
  		}
398a153b1   Gregory Haskins   sched: fix build ...
84
85
86
  	} else if (rt_rq->overloaded) {
  		rt_clear_overload(rq_of_rt_rq(rt_rq));
  		rt_rq->overloaded = 0;
637f50851   Gregory Haskins   sched: only balan...
87
  	}
73fe6aae8   Gregory Haskins   sched: add RT-bal...
88
  }
4fd29176b   Steven Rostedt   sched: add rt-ove...
89

398a153b1   Gregory Haskins   sched: fix build ...
90
91
  static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
a1ba4d8ba   Peter Zijlstra   sched_rt: Fix ove...
92
93
94
95
96
97
  	if (!rt_entity_is_task(rt_se))
  		return;
  
  	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
  
  	rt_rq->rt_nr_total++;
398a153b1   Gregory Haskins   sched: fix build ...
98
99
100
101
102
103
104
105
  	if (rt_se->nr_cpus_allowed > 1)
  		rt_rq->rt_nr_migratory++;
  
  	update_rt_migration(rt_rq);
  }
  
  static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
a1ba4d8ba   Peter Zijlstra   sched_rt: Fix ove...
106
107
108
109
110
111
  	if (!rt_entity_is_task(rt_se))
  		return;
  
  	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
  
  	rt_rq->rt_nr_total--;
398a153b1   Gregory Haskins   sched: fix build ...
112
113
114
115
116
  	if (rt_se->nr_cpus_allowed > 1)
  		rt_rq->rt_nr_migratory--;
  
  	update_rt_migration(rt_rq);
  }
5181f4a46   Steven Rostedt   sched: Use pushab...
117
118
119
120
  static inline int has_pushable_tasks(struct rq *rq)
  {
  	return !plist_head_empty(&rq->rt.pushable_tasks);
  }
917b627d4   Gregory Haskins   sched: create "pu...
121
122
123
124
125
  static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
  {
  	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
  	plist_node_init(&p->pushable_tasks, p->prio);
  	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
5181f4a46   Steven Rostedt   sched: Use pushab...
126
127
128
129
  
  	/* Update the highest prio pushable task */
  	if (p->prio < rq->rt.highest_prio.next)
  		rq->rt.highest_prio.next = p->prio;
917b627d4   Gregory Haskins   sched: create "pu...
130
131
132
133
134
  }
  
  static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
  {
  	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
917b627d4   Gregory Haskins   sched: create "pu...
135

5181f4a46   Steven Rostedt   sched: Use pushab...
136
137
138
139
140
141
142
  	/* Update the new highest prio pushable task */
  	if (has_pushable_tasks(rq)) {
  		p = plist_first_entry(&rq->rt.pushable_tasks,
  				      struct task_struct, pushable_tasks);
  		rq->rt.highest_prio.next = p->prio;
  	} else
  		rq->rt.highest_prio.next = MAX_RT_PRIO;
bcf08df3b   Ingo Molnar   sched: Fix cpupri...
143
  }
917b627d4   Gregory Haskins   sched: create "pu...
144
  #else
ceacc2c1c   Peter Zijlstra   sched: make plist...
145
  static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
fa85ae241   Peter Zijlstra   sched: rt time limit
146
  {
6f505b164   Peter Zijlstra   sched: rt group s...
147
  }
ceacc2c1c   Peter Zijlstra   sched: make plist...
148
149
150
  static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
  {
  }
b07430ac3   Gregory Haskins   sched: de CPP-ify...
151
  static inline
ceacc2c1c   Peter Zijlstra   sched: make plist...
152
153
154
  void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  }
398a153b1   Gregory Haskins   sched: fix build ...
155
  static inline
ceacc2c1c   Peter Zijlstra   sched: make plist...
156
157
158
  void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  }
917b627d4   Gregory Haskins   sched: create "pu...
159

4fd29176b   Steven Rostedt   sched: add rt-ove...
160
  #endif /* CONFIG_SMP */
6f505b164   Peter Zijlstra   sched: rt group s...
161
162
163
164
  static inline int on_rt_rq(struct sched_rt_entity *rt_se)
  {
  	return !list_empty(&rt_se->run_list);
  }
052f1dc7e   Peter Zijlstra   sched: rt-group: ...
165
  #ifdef CONFIG_RT_GROUP_SCHED
6f505b164   Peter Zijlstra   sched: rt group s...
166

9f0c1e560   Peter Zijlstra   sched: rt-group: ...
167
  static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
168
169
  {
  	if (!rt_rq->tg)
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
170
  		return RUNTIME_INF;
6f505b164   Peter Zijlstra   sched: rt group s...
171

ac086bc22   Peter Zijlstra   sched: rt-group: ...
172
173
174
175
176
177
  	return rt_rq->rt_runtime;
  }
  
  static inline u64 sched_rt_period(struct rt_rq *rt_rq)
  {
  	return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
6f505b164   Peter Zijlstra   sched: rt group s...
178
  }
ec514c487   Cheng Xu   sched: Fix rt_rq ...
179
  typedef struct task_group *rt_rq_iter_t;
1c09ab0d2   Yong Zhang   sched: Skip autog...
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
  static inline struct task_group *next_task_group(struct task_group *tg)
  {
  	do {
  		tg = list_entry_rcu(tg->list.next,
  			typeof(struct task_group), list);
  	} while (&tg->list != &task_groups && task_group_is_autogroup(tg));
  
  	if (&tg->list == &task_groups)
  		tg = NULL;
  
  	return tg;
  }
  
  #define for_each_rt_rq(rt_rq, iter, rq)					\
  	for (iter = container_of(&task_groups, typeof(*iter), list);	\
  		(iter = next_task_group(iter)) &&			\
  		(rt_rq = iter->rt_rq[cpu_of(rq)]);)
ec514c487   Cheng Xu   sched: Fix rt_rq ...
197

3d4b47b4b   Peter Zijlstra   sched: Implement ...
198
199
200
201
202
203
204
205
206
207
  static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
  {
  	list_add_rcu(&rt_rq->leaf_rt_rq_list,
  			&rq_of_rt_rq(rt_rq)->leaf_rt_rq_list);
  }
  
  static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
  {
  	list_del_rcu(&rt_rq->leaf_rt_rq_list);
  }
6f505b164   Peter Zijlstra   sched: rt group s...
208
  #define for_each_leaf_rt_rq(rt_rq, rq) \
80f40ee4a   Bharata B Rao   sched: use RCU va...
209
  	list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
6f505b164   Peter Zijlstra   sched: rt group s...
210

6f505b164   Peter Zijlstra   sched: rt group s...
211
212
213
214
215
216
217
  #define for_each_sched_rt_entity(rt_se) \
  	for (; rt_se; rt_se = rt_se->parent)
  
  static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
  {
  	return rt_se->my_q;
  }
37dad3fce   Thomas Gleixner   sched: Implement ...
218
  static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
6f505b164   Peter Zijlstra   sched: rt group s...
219
  static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
220
  static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
221
  {
f6121f4f8   Dario Faggioli   sched_rt.c: resch...
222
  	struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
74b7eb588   Yong Zhang   sched: Change usa...
223
  	struct sched_rt_entity *rt_se;
0c3b91680   Balbir Singh   sched: Fix sched ...
224
225
226
  	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
  
  	rt_se = rt_rq->tg->rt_se[cpu];
6f505b164   Peter Zijlstra   sched: rt group s...
227

f6121f4f8   Dario Faggioli   sched_rt.c: resch...
228
229
  	if (rt_rq->rt_nr_running) {
  		if (rt_se && !on_rt_rq(rt_se))
37dad3fce   Thomas Gleixner   sched: Implement ...
230
  			enqueue_rt_entity(rt_se, false);
e864c499d   Gregory Haskins   sched: track the ...
231
  		if (rt_rq->highest_prio.curr < curr->prio)
1020387f5   Peter Zijlstra   sched: rt-group: ...
232
  			resched_task(curr);
6f505b164   Peter Zijlstra   sched: rt group s...
233
234
  	}
  }
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
235
  static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
236
  {
74b7eb588   Yong Zhang   sched: Change usa...
237
  	struct sched_rt_entity *rt_se;
0c3b91680   Balbir Singh   sched: Fix sched ...
238
  	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
74b7eb588   Yong Zhang   sched: Change usa...
239

0c3b91680   Balbir Singh   sched: Fix sched ...
240
  	rt_se = rt_rq->tg->rt_se[cpu];
6f505b164   Peter Zijlstra   sched: rt group s...
241
242
243
244
  
  	if (rt_se && on_rt_rq(rt_se))
  		dequeue_rt_entity(rt_se);
  }
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
  static inline int rt_rq_throttled(struct rt_rq *rt_rq)
  {
  	return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
  }
  
  static int rt_se_boosted(struct sched_rt_entity *rt_se)
  {
  	struct rt_rq *rt_rq = group_rt_rq(rt_se);
  	struct task_struct *p;
  
  	if (rt_rq)
  		return !!rt_rq->rt_nr_boosted;
  
  	p = rt_task_of(rt_se);
  	return p->prio != p->normal_prio;
  }
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
261
  #ifdef CONFIG_SMP
c6c4927b2   Rusty Russell   sched: convert st...
262
  static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
263
264
265
  {
  	return cpu_rq(smp_processor_id())->rd->span;
  }
6f505b164   Peter Zijlstra   sched: rt group s...
266
  #else
c6c4927b2   Rusty Russell   sched: convert st...
267
  static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
268
  {
c6c4927b2   Rusty Russell   sched: convert st...
269
  	return cpu_online_mask;
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
270
271
  }
  #endif
6f505b164   Peter Zijlstra   sched: rt group s...
272

d0b27fa77   Peter Zijlstra   sched: rt-group: ...
273
274
  static inline
  struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
6f505b164   Peter Zijlstra   sched: rt group s...
275
  {
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
276
277
  	return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
  }
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
278

ac086bc22   Peter Zijlstra   sched: rt-group: ...
279
280
281
282
  static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
  {
  	return &rt_rq->tg->rt_bandwidth;
  }
55e12e5e7   Dhaval Giani   sched: make sched...
283
  #else /* !CONFIG_RT_GROUP_SCHED */
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
284
285
286
  
  static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
  {
ac086bc22   Peter Zijlstra   sched: rt-group: ...
287
288
289
290
291
292
  	return rt_rq->rt_runtime;
  }
  
  static inline u64 sched_rt_period(struct rt_rq *rt_rq)
  {
  	return ktime_to_ns(def_rt_bandwidth.rt_period);
6f505b164   Peter Zijlstra   sched: rt group s...
293
  }
ec514c487   Cheng Xu   sched: Fix rt_rq ...
294
295
296
297
  typedef struct rt_rq *rt_rq_iter_t;
  
  #define for_each_rt_rq(rt_rq, iter, rq) \
  	for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
3d4b47b4b   Peter Zijlstra   sched: Implement ...
298
299
300
301
302
303
304
  static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
  {
  }
  
  static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
  {
  }
6f505b164   Peter Zijlstra   sched: rt group s...
305
306
  #define for_each_leaf_rt_rq(rt_rq, rq) \
  	for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
6f505b164   Peter Zijlstra   sched: rt group s...
307
308
309
310
311
312
313
  #define for_each_sched_rt_entity(rt_se) \
  	for (; rt_se; rt_se = NULL)
  
  static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
  {
  	return NULL;
  }
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
314
  static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
315
  {
f3ade8378   John Blackwood   sched: fix sched_...
316
317
  	if (rt_rq->rt_nr_running)
  		resched_task(rq_of_rt_rq(rt_rq)->curr);
6f505b164   Peter Zijlstra   sched: rt group s...
318
  }
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
319
  static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
320
321
  {
  }
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
322
323
324
325
  static inline int rt_rq_throttled(struct rt_rq *rt_rq)
  {
  	return rt_rq->rt_throttled;
  }
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
326

c6c4927b2   Rusty Russell   sched: convert st...
327
  static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
328
  {
c6c4927b2   Rusty Russell   sched: convert st...
329
  	return cpu_online_mask;
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
330
331
332
333
334
335
336
  }
  
  static inline
  struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
  {
  	return &cpu_rq(cpu)->rt;
  }
ac086bc22   Peter Zijlstra   sched: rt-group: ...
337
338
339
340
  static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
  {
  	return &def_rt_bandwidth;
  }
55e12e5e7   Dhaval Giani   sched: make sched...
341
  #endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
342

ac086bc22   Peter Zijlstra   sched: rt-group: ...
343
  #ifdef CONFIG_SMP
78333cdd0   Peter Zijlstra   sched: add some c...
344
345
346
  /*
   * We ran out of runtime, see if we can borrow some from our neighbours.
   */
b79f3833d   Peter Zijlstra   sched: rt: fix SM...
347
  static int do_balance_runtime(struct rt_rq *rt_rq)
ac086bc22   Peter Zijlstra   sched: rt-group: ...
348
349
350
351
352
  {
  	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
  	struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
  	int i, weight, more = 0;
  	u64 rt_period;
c6c4927b2   Rusty Russell   sched: convert st...
353
  	weight = cpumask_weight(rd->span);
ac086bc22   Peter Zijlstra   sched: rt-group: ...
354

0986b11b1   Thomas Gleixner   sched: Convert rt...
355
  	raw_spin_lock(&rt_b->rt_runtime_lock);
ac086bc22   Peter Zijlstra   sched: rt-group: ...
356
  	rt_period = ktime_to_ns(rt_b->rt_period);
c6c4927b2   Rusty Russell   sched: convert st...
357
  	for_each_cpu(i, rd->span) {
ac086bc22   Peter Zijlstra   sched: rt-group: ...
358
359
360
361
362
  		struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
  		s64 diff;
  
  		if (iter == rt_rq)
  			continue;
0986b11b1   Thomas Gleixner   sched: Convert rt...
363
  		raw_spin_lock(&iter->rt_runtime_lock);
78333cdd0   Peter Zijlstra   sched: add some c...
364
365
366
367
368
  		/*
  		 * Either all rqs have inf runtime and there's nothing to steal
  		 * or __disable_runtime() below sets a specific rq to inf to
  		 * indicate its been disabled and disalow stealing.
  		 */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
369
370
  		if (iter->rt_runtime == RUNTIME_INF)
  			goto next;
78333cdd0   Peter Zijlstra   sched: add some c...
371
372
373
374
  		/*
  		 * From runqueues with spare time, take 1/n part of their
  		 * spare time, but no more than our period.
  		 */
ac086bc22   Peter Zijlstra   sched: rt-group: ...
375
376
  		diff = iter->rt_runtime - iter->rt_time;
  		if (diff > 0) {
58838cf3c   Peter Zijlstra   sched: clean up c...
377
  			diff = div_u64((u64)diff, weight);
ac086bc22   Peter Zijlstra   sched: rt-group: ...
378
379
380
381
382
383
  			if (rt_rq->rt_runtime + diff > rt_period)
  				diff = rt_period - rt_rq->rt_runtime;
  			iter->rt_runtime -= diff;
  			rt_rq->rt_runtime += diff;
  			more = 1;
  			if (rt_rq->rt_runtime == rt_period) {
0986b11b1   Thomas Gleixner   sched: Convert rt...
384
  				raw_spin_unlock(&iter->rt_runtime_lock);
ac086bc22   Peter Zijlstra   sched: rt-group: ...
385
386
387
  				break;
  			}
  		}
7def2be1d   Peter Zijlstra   sched: fix hotplu...
388
  next:
0986b11b1   Thomas Gleixner   sched: Convert rt...
389
  		raw_spin_unlock(&iter->rt_runtime_lock);
ac086bc22   Peter Zijlstra   sched: rt-group: ...
390
  	}
0986b11b1   Thomas Gleixner   sched: Convert rt...
391
  	raw_spin_unlock(&rt_b->rt_runtime_lock);
ac086bc22   Peter Zijlstra   sched: rt-group: ...
392
393
394
  
  	return more;
  }
7def2be1d   Peter Zijlstra   sched: fix hotplu...
395

78333cdd0   Peter Zijlstra   sched: add some c...
396
397
398
  /*
   * Ensure this RQ takes back all the runtime it lend to its neighbours.
   */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
399
400
401
  static void __disable_runtime(struct rq *rq)
  {
  	struct root_domain *rd = rq->rd;
ec514c487   Cheng Xu   sched: Fix rt_rq ...
402
  	rt_rq_iter_t iter;
7def2be1d   Peter Zijlstra   sched: fix hotplu...
403
404
405
406
  	struct rt_rq *rt_rq;
  
  	if (unlikely(!scheduler_running))
  		return;
ec514c487   Cheng Xu   sched: Fix rt_rq ...
407
  	for_each_rt_rq(rt_rq, iter, rq) {
7def2be1d   Peter Zijlstra   sched: fix hotplu...
408
409
410
  		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
  		s64 want;
  		int i;
0986b11b1   Thomas Gleixner   sched: Convert rt...
411
412
  		raw_spin_lock(&rt_b->rt_runtime_lock);
  		raw_spin_lock(&rt_rq->rt_runtime_lock);
78333cdd0   Peter Zijlstra   sched: add some c...
413
414
415
416
417
  		/*
  		 * Either we're all inf and nobody needs to borrow, or we're
  		 * already disabled and thus have nothing to do, or we have
  		 * exactly the right amount of runtime to take out.
  		 */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
418
419
420
  		if (rt_rq->rt_runtime == RUNTIME_INF ||
  				rt_rq->rt_runtime == rt_b->rt_runtime)
  			goto balanced;
0986b11b1   Thomas Gleixner   sched: Convert rt...
421
  		raw_spin_unlock(&rt_rq->rt_runtime_lock);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
422

78333cdd0   Peter Zijlstra   sched: add some c...
423
424
425
426
427
  		/*
  		 * Calculate the difference between what we started out with
  		 * and what we current have, that's the amount of runtime
  		 * we lend and now have to reclaim.
  		 */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
428
  		want = rt_b->rt_runtime - rt_rq->rt_runtime;
78333cdd0   Peter Zijlstra   sched: add some c...
429
430
431
  		/*
  		 * Greedy reclaim, take back as much as we can.
  		 */
c6c4927b2   Rusty Russell   sched: convert st...
432
  		for_each_cpu(i, rd->span) {
7def2be1d   Peter Zijlstra   sched: fix hotplu...
433
434
  			struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
  			s64 diff;
78333cdd0   Peter Zijlstra   sched: add some c...
435
436
437
  			/*
  			 * Can't reclaim from ourselves or disabled runqueues.
  			 */
f1679d084   Peter Zijlstra   sched: fix rt-ban...
438
  			if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
7def2be1d   Peter Zijlstra   sched: fix hotplu...
439
  				continue;
0986b11b1   Thomas Gleixner   sched: Convert rt...
440
  			raw_spin_lock(&iter->rt_runtime_lock);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
441
442
443
444
445
446
447
448
  			if (want > 0) {
  				diff = min_t(s64, iter->rt_runtime, want);
  				iter->rt_runtime -= diff;
  				want -= diff;
  			} else {
  				iter->rt_runtime -= want;
  				want -= want;
  			}
0986b11b1   Thomas Gleixner   sched: Convert rt...
449
  			raw_spin_unlock(&iter->rt_runtime_lock);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
450
451
452
453
  
  			if (!want)
  				break;
  		}
0986b11b1   Thomas Gleixner   sched: Convert rt...
454
  		raw_spin_lock(&rt_rq->rt_runtime_lock);
78333cdd0   Peter Zijlstra   sched: add some c...
455
456
457
458
  		/*
  		 * We cannot be left wanting - that would mean some runtime
  		 * leaked out of the system.
  		 */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
459
460
  		BUG_ON(want);
  balanced:
78333cdd0   Peter Zijlstra   sched: add some c...
461
462
463
464
  		/*
  		 * Disable all the borrow logic by pretending we have inf
  		 * runtime - in which case borrowing doesn't make sense.
  		 */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
465
  		rt_rq->rt_runtime = RUNTIME_INF;
0986b11b1   Thomas Gleixner   sched: Convert rt...
466
467
  		raw_spin_unlock(&rt_rq->rt_runtime_lock);
  		raw_spin_unlock(&rt_b->rt_runtime_lock);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
468
469
470
471
472
473
  	}
  }
  
  static void disable_runtime(struct rq *rq)
  {
  	unsigned long flags;
05fa785cf   Thomas Gleixner   sched: Convert rq...
474
  	raw_spin_lock_irqsave(&rq->lock, flags);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
475
  	__disable_runtime(rq);
05fa785cf   Thomas Gleixner   sched: Convert rq...
476
  	raw_spin_unlock_irqrestore(&rq->lock, flags);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
477
478
479
480
  }
  
  static void __enable_runtime(struct rq *rq)
  {
ec514c487   Cheng Xu   sched: Fix rt_rq ...
481
  	rt_rq_iter_t iter;
7def2be1d   Peter Zijlstra   sched: fix hotplu...
482
483
484
485
  	struct rt_rq *rt_rq;
  
  	if (unlikely(!scheduler_running))
  		return;
78333cdd0   Peter Zijlstra   sched: add some c...
486
487
488
  	/*
  	 * Reset each runqueue's bandwidth settings
  	 */
ec514c487   Cheng Xu   sched: Fix rt_rq ...
489
  	for_each_rt_rq(rt_rq, iter, rq) {
7def2be1d   Peter Zijlstra   sched: fix hotplu...
490
  		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
0986b11b1   Thomas Gleixner   sched: Convert rt...
491
492
  		raw_spin_lock(&rt_b->rt_runtime_lock);
  		raw_spin_lock(&rt_rq->rt_runtime_lock);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
493
494
  		rt_rq->rt_runtime = rt_b->rt_runtime;
  		rt_rq->rt_time = 0;
baf25731e   Zhang, Yanmin   sched: fix 2.6.27...
495
  		rt_rq->rt_throttled = 0;
0986b11b1   Thomas Gleixner   sched: Convert rt...
496
497
  		raw_spin_unlock(&rt_rq->rt_runtime_lock);
  		raw_spin_unlock(&rt_b->rt_runtime_lock);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
498
499
500
501
502
503
  	}
  }
  
  static void enable_runtime(struct rq *rq)
  {
  	unsigned long flags;
05fa785cf   Thomas Gleixner   sched: Convert rq...
504
  	raw_spin_lock_irqsave(&rq->lock, flags);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
505
  	__enable_runtime(rq);
05fa785cf   Thomas Gleixner   sched: Convert rq...
506
  	raw_spin_unlock_irqrestore(&rq->lock, flags);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
507
  }
eff6549b9   Peter Zijlstra   sched: rt: move s...
508
509
510
511
512
  static int balance_runtime(struct rt_rq *rt_rq)
  {
  	int more = 0;
  
  	if (rt_rq->rt_time > rt_rq->rt_runtime) {
0986b11b1   Thomas Gleixner   sched: Convert rt...
513
  		raw_spin_unlock(&rt_rq->rt_runtime_lock);
eff6549b9   Peter Zijlstra   sched: rt: move s...
514
  		more = do_balance_runtime(rt_rq);
0986b11b1   Thomas Gleixner   sched: Convert rt...
515
  		raw_spin_lock(&rt_rq->rt_runtime_lock);
eff6549b9   Peter Zijlstra   sched: rt: move s...
516
517
518
519
  	}
  
  	return more;
  }
55e12e5e7   Dhaval Giani   sched: make sched...
520
  #else /* !CONFIG_SMP */
eff6549b9   Peter Zijlstra   sched: rt: move s...
521
522
523
524
  static inline int balance_runtime(struct rt_rq *rt_rq)
  {
  	return 0;
  }
55e12e5e7   Dhaval Giani   sched: make sched...
525
  #endif /* CONFIG_SMP */
ac086bc22   Peter Zijlstra   sched: rt-group: ...
526

eff6549b9   Peter Zijlstra   sched: rt: move s...
527
528
529
  static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
  {
  	int i, idle = 1;
c6c4927b2   Rusty Russell   sched: convert st...
530
  	const struct cpumask *span;
eff6549b9   Peter Zijlstra   sched: rt: move s...
531

0b148fa04   Peter Zijlstra   sched: rt-bandwid...
532
  	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
eff6549b9   Peter Zijlstra   sched: rt: move s...
533
534
535
  		return 1;
  
  	span = sched_rt_period_mask();
c6c4927b2   Rusty Russell   sched: convert st...
536
  	for_each_cpu(i, span) {
eff6549b9   Peter Zijlstra   sched: rt: move s...
537
538
539
  		int enqueue = 0;
  		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
  		struct rq *rq = rq_of_rt_rq(rt_rq);
05fa785cf   Thomas Gleixner   sched: Convert rq...
540
  		raw_spin_lock(&rq->lock);
eff6549b9   Peter Zijlstra   sched: rt: move s...
541
542
  		if (rt_rq->rt_time) {
  			u64 runtime;
0986b11b1   Thomas Gleixner   sched: Convert rt...
543
  			raw_spin_lock(&rt_rq->rt_runtime_lock);
eff6549b9   Peter Zijlstra   sched: rt: move s...
544
545
546
547
548
549
550
  			if (rt_rq->rt_throttled)
  				balance_runtime(rt_rq);
  			runtime = rt_rq->rt_runtime;
  			rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
  			if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
  				rt_rq->rt_throttled = 0;
  				enqueue = 1;
61eadef6a   Mike Galbraith   sched, rt: Update...
551
552
553
554
555
556
557
  
  				/*
  				 * Force a clock update if the CPU was idle,
  				 * lest wakeup -> unthrottle time accumulate.
  				 */
  				if (rt_rq->rt_nr_running && rq->curr == rq->idle)
  					rq->skip_clock_update = -1;
eff6549b9   Peter Zijlstra   sched: rt: move s...
558
559
560
  			}
  			if (rt_rq->rt_time || rt_rq->rt_nr_running)
  				idle = 0;
0986b11b1   Thomas Gleixner   sched: Convert rt...
561
  			raw_spin_unlock(&rt_rq->rt_runtime_lock);
0c3b91680   Balbir Singh   sched: Fix sched ...
562
  		} else if (rt_rq->rt_nr_running) {
6c3df2551   Peter Zijlstra   sched: rt: dont s...
563
  			idle = 0;
0c3b91680   Balbir Singh   sched: Fix sched ...
564
565
566
  			if (!rt_rq_throttled(rt_rq))
  				enqueue = 1;
  		}
eff6549b9   Peter Zijlstra   sched: rt: move s...
567
568
569
  
  		if (enqueue)
  			sched_rt_rq_enqueue(rt_rq);
05fa785cf   Thomas Gleixner   sched: Convert rq...
570
  		raw_spin_unlock(&rq->lock);
eff6549b9   Peter Zijlstra   sched: rt: move s...
571
572
573
574
  	}
  
  	return idle;
  }
ac086bc22   Peter Zijlstra   sched: rt-group: ...
575

6f505b164   Peter Zijlstra   sched: rt group s...
576
577
  static inline int rt_se_prio(struct sched_rt_entity *rt_se)
  {
052f1dc7e   Peter Zijlstra   sched: rt-group: ...
578
  #ifdef CONFIG_RT_GROUP_SCHED
6f505b164   Peter Zijlstra   sched: rt group s...
579
580
581
  	struct rt_rq *rt_rq = group_rt_rq(rt_se);
  
  	if (rt_rq)
e864c499d   Gregory Haskins   sched: track the ...
582
  		return rt_rq->highest_prio.curr;
6f505b164   Peter Zijlstra   sched: rt group s...
583
584
585
586
  #endif
  
  	return rt_task_of(rt_se)->prio;
  }
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
587
  static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
588
  {
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
589
  	u64 runtime = sched_rt_runtime(rt_rq);
fa85ae241   Peter Zijlstra   sched: rt time limit
590

fa85ae241   Peter Zijlstra   sched: rt time limit
591
  	if (rt_rq->rt_throttled)
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
592
  		return rt_rq_throttled(rt_rq);
fa85ae241   Peter Zijlstra   sched: rt time limit
593

ac086bc22   Peter Zijlstra   sched: rt-group: ...
594
595
  	if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
  		return 0;
b79f3833d   Peter Zijlstra   sched: rt: fix SM...
596
597
598
599
  	balance_runtime(rt_rq);
  	runtime = sched_rt_runtime(rt_rq);
  	if (runtime == RUNTIME_INF)
  		return 0;
ac086bc22   Peter Zijlstra   sched: rt-group: ...
600

9f0c1e560   Peter Zijlstra   sched: rt-group: ...
601
  	if (rt_rq->rt_time > runtime) {
6f505b164   Peter Zijlstra   sched: rt group s...
602
  		rt_rq->rt_throttled = 1;
1c83437e8   Thomas Gleixner   sched: Warn on rt...
603
604
  		printk_once(KERN_WARNING "sched: RT throttling activated
  ");
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
605
  		if (rt_rq_throttled(rt_rq)) {
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
606
  			sched_rt_rq_dequeue(rt_rq);
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
607
608
  			return 1;
  		}
fa85ae241   Peter Zijlstra   sched: rt time limit
609
610
611
612
  	}
  
  	return 0;
  }
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
613
614
615
616
  /*
   * Update the current task's runtime statistics. Skip current tasks that
   * are not in our scheduling class.
   */
a9957449b   Alexey Dobriyan   sched: uninline s...
617
  static void update_curr_rt(struct rq *rq)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
618
619
  {
  	struct task_struct *curr = rq->curr;
6f505b164   Peter Zijlstra   sched: rt group s...
620
621
  	struct sched_rt_entity *rt_se = &curr->rt;
  	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
622
  	u64 delta_exec;
06c3bc655   Peter Zijlstra   sched: Fix update...
623
  	if (curr->sched_class != &rt_sched_class)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
624
  		return;
305e6835e   Venkatesh Pallipadi   sched: Do not acc...
625
  	delta_exec = rq->clock_task - curr->se.exec_start;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
626
627
  	if (unlikely((s64)delta_exec < 0))
  		delta_exec = 0;
6cfb0d5d0   Ingo Molnar   [PATCH] sched: re...
628

41acab885   Lucas De Marchi   sched: Implement ...
629
  	schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec));
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
630
631
  
  	curr->se.sum_exec_runtime += delta_exec;
f06febc96   Frank Mayhar   timers: fix itime...
632
  	account_group_exec_runtime(curr, delta_exec);
305e6835e   Venkatesh Pallipadi   sched: Do not acc...
633
  	curr->se.exec_start = rq->clock_task;
d842de871   Srivatsa Vaddagiri   sched: cpu accoun...
634
  	cpuacct_charge(curr, delta_exec);
fa85ae241   Peter Zijlstra   sched: rt time limit
635

e9e9250bc   Peter Zijlstra   sched: Scale down...
636
  	sched_rt_avg_update(rq, delta_exec);
0b148fa04   Peter Zijlstra   sched: rt-bandwid...
637
638
  	if (!rt_bandwidth_enabled())
  		return;
354d60c2f   Dhaval Giani   sched: mix tasks ...
639
640
  	for_each_sched_rt_entity(rt_se) {
  		rt_rq = rt_rq_of_se(rt_se);
cc2991cf1   Peter Zijlstra   sched: rt-bandwid...
641
  		if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
0986b11b1   Thomas Gleixner   sched: Convert rt...
642
  			raw_spin_lock(&rt_rq->rt_runtime_lock);
cc2991cf1   Peter Zijlstra   sched: rt-bandwid...
643
644
645
  			rt_rq->rt_time += delta_exec;
  			if (sched_rt_runtime_exceeded(rt_rq))
  				resched_task(curr);
0986b11b1   Thomas Gleixner   sched: Convert rt...
646
  			raw_spin_unlock(&rt_rq->rt_runtime_lock);
cc2991cf1   Peter Zijlstra   sched: rt-bandwid...
647
  		}
354d60c2f   Dhaval Giani   sched: mix tasks ...
648
  	}
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
649
  }
398a153b1   Gregory Haskins   sched: fix build ...
650
  #if defined CONFIG_SMP
e864c499d   Gregory Haskins   sched: track the ...
651

398a153b1   Gregory Haskins   sched: fix build ...
652
653
  static void
  inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
63489e45e   Steven Rostedt   sched: count # of...
654
  {
4d9842776   Gregory Haskins   sched: cleanup in...
655
  	struct rq *rq = rq_of_rt_rq(rt_rq);
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
656

5181f4a46   Steven Rostedt   sched: Use pushab...
657
658
  	if (rq->online && prio < prev_prio)
  		cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
398a153b1   Gregory Haskins   sched: fix build ...
659
  }
73fe6aae8   Gregory Haskins   sched: add RT-bal...
660

398a153b1   Gregory Haskins   sched: fix build ...
661
662
663
664
  static void
  dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
  {
  	struct rq *rq = rq_of_rt_rq(rt_rq);
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
665

398a153b1   Gregory Haskins   sched: fix build ...
666
667
  	if (rq->online && rt_rq->highest_prio.curr != prev_prio)
  		cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
63489e45e   Steven Rostedt   sched: count # of...
668
  }
398a153b1   Gregory Haskins   sched: fix build ...
669
  #else /* CONFIG_SMP */
6f505b164   Peter Zijlstra   sched: rt group s...
670
  static inline
398a153b1   Gregory Haskins   sched: fix build ...
671
672
673
674
675
  void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
  static inline
  void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
  
  #endif /* CONFIG_SMP */
6e0534f27   Gregory Haskins   sched: use a 2-d ...
676

052f1dc7e   Peter Zijlstra   sched: rt-group: ...
677
  #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
398a153b1   Gregory Haskins   sched: fix build ...
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
  static void
  inc_rt_prio(struct rt_rq *rt_rq, int prio)
  {
  	int prev_prio = rt_rq->highest_prio.curr;
  
  	if (prio < prev_prio)
  		rt_rq->highest_prio.curr = prio;
  
  	inc_rt_prio_smp(rt_rq, prio, prev_prio);
  }
  
  static void
  dec_rt_prio(struct rt_rq *rt_rq, int prio)
  {
  	int prev_prio = rt_rq->highest_prio.curr;
6f505b164   Peter Zijlstra   sched: rt group s...
693
  	if (rt_rq->rt_nr_running) {
764a9d6fe   Steven Rostedt   sched: track high...
694

398a153b1   Gregory Haskins   sched: fix build ...
695
  		WARN_ON(prio < prev_prio);
764a9d6fe   Steven Rostedt   sched: track high...
696

e864c499d   Gregory Haskins   sched: track the ...
697
  		/*
398a153b1   Gregory Haskins   sched: fix build ...
698
699
  		 * This may have been our highest task, and therefore
  		 * we may have some recomputation to do
e864c499d   Gregory Haskins   sched: track the ...
700
  		 */
398a153b1   Gregory Haskins   sched: fix build ...
701
  		if (prio == prev_prio) {
e864c499d   Gregory Haskins   sched: track the ...
702
703
704
  			struct rt_prio_array *array = &rt_rq->active;
  
  			rt_rq->highest_prio.curr =
764a9d6fe   Steven Rostedt   sched: track high...
705
  				sched_find_first_bit(array->bitmap);
e864c499d   Gregory Haskins   sched: track the ...
706
  		}
764a9d6fe   Steven Rostedt   sched: track high...
707
  	} else
e864c499d   Gregory Haskins   sched: track the ...
708
  		rt_rq->highest_prio.curr = MAX_RT_PRIO;
73fe6aae8   Gregory Haskins   sched: add RT-bal...
709

398a153b1   Gregory Haskins   sched: fix build ...
710
711
  	dec_rt_prio_smp(rt_rq, prio, prev_prio);
  }
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
712

398a153b1   Gregory Haskins   sched: fix build ...
713
714
715
716
717
718
  #else
  
  static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
  static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
  
  #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
6e0534f27   Gregory Haskins   sched: use a 2-d ...
719

052f1dc7e   Peter Zijlstra   sched: rt-group: ...
720
  #ifdef CONFIG_RT_GROUP_SCHED
398a153b1   Gregory Haskins   sched: fix build ...
721
722
723
724
725
726
727
728
729
730
731
732
733
734
  
  static void
  inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  	if (rt_se_boosted(rt_se))
  		rt_rq->rt_nr_boosted++;
  
  	if (rt_rq->tg)
  		start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
  }
  
  static void
  dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
735
736
737
738
  	if (rt_se_boosted(rt_se))
  		rt_rq->rt_nr_boosted--;
  
  	WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
398a153b1   Gregory Haskins   sched: fix build ...
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
  }
  
  #else /* CONFIG_RT_GROUP_SCHED */
  
  static void
  inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  	start_rt_bandwidth(&def_rt_bandwidth);
  }
  
  static inline
  void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
  
  #endif /* CONFIG_RT_GROUP_SCHED */
  
  static inline
  void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  	int prio = rt_se_prio(rt_se);
  
  	WARN_ON(!rt_prio(prio));
  	rt_rq->rt_nr_running++;
  
  	inc_rt_prio(rt_rq, prio);
  	inc_rt_migration(rt_se, rt_rq);
  	inc_rt_group(rt_se, rt_rq);
  }
  
  static inline
  void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  	WARN_ON(!rt_prio(rt_se_prio(rt_se)));
  	WARN_ON(!rt_rq->rt_nr_running);
  	rt_rq->rt_nr_running--;
  
  	dec_rt_prio(rt_rq, rt_se_prio(rt_se));
  	dec_rt_migration(rt_se, rt_rq);
  	dec_rt_group(rt_se, rt_rq);
63489e45e   Steven Rostedt   sched: count # of...
777
  }
37dad3fce   Thomas Gleixner   sched: Implement ...
778
  static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
779
  {
6f505b164   Peter Zijlstra   sched: rt group s...
780
781
782
  	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
  	struct rt_prio_array *array = &rt_rq->active;
  	struct rt_rq *group_rq = group_rt_rq(rt_se);
20b6331bf   Dmitry Adamushko   sched: rework of ...
783
  	struct list_head *queue = array->queue + rt_se_prio(rt_se);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
784

ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
785
786
787
788
789
790
791
  	/*
  	 * Don't enqueue the group if its throttled, or when empty.
  	 * The latter is a consequence of the former when a child group
  	 * get throttled and the current group doesn't have any other
  	 * active members.
  	 */
  	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
6f505b164   Peter Zijlstra   sched: rt group s...
792
  		return;
63489e45e   Steven Rostedt   sched: count # of...
793

3d4b47b4b   Peter Zijlstra   sched: Implement ...
794
795
  	if (!rt_rq->rt_nr_running)
  		list_add_leaf_rt_rq(rt_rq);
37dad3fce   Thomas Gleixner   sched: Implement ...
796
797
798
799
  	if (head)
  		list_add(&rt_se->run_list, queue);
  	else
  		list_add_tail(&rt_se->run_list, queue);
6f505b164   Peter Zijlstra   sched: rt group s...
800
  	__set_bit(rt_se_prio(rt_se), array->bitmap);
78f2c7db6   Peter Zijlstra   sched: SCHED_FIFO...
801

6f505b164   Peter Zijlstra   sched: rt group s...
802
803
  	inc_rt_tasks(rt_se, rt_rq);
  }
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
804
  static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
6f505b164   Peter Zijlstra   sched: rt group s...
805
806
807
808
809
810
811
812
813
  {
  	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
  	struct rt_prio_array *array = &rt_rq->active;
  
  	list_del_init(&rt_se->run_list);
  	if (list_empty(array->queue + rt_se_prio(rt_se)))
  		__clear_bit(rt_se_prio(rt_se), array->bitmap);
  
  	dec_rt_tasks(rt_se, rt_rq);
3d4b47b4b   Peter Zijlstra   sched: Implement ...
814
815
  	if (!rt_rq->rt_nr_running)
  		list_del_leaf_rt_rq(rt_rq);
6f505b164   Peter Zijlstra   sched: rt group s...
816
817
818
819
820
  }
  
  /*
   * Because the prio of an upper entry depends on the lower
   * entries, we must remove entries top - down.
6f505b164   Peter Zijlstra   sched: rt group s...
821
   */
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
822
  static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
6f505b164   Peter Zijlstra   sched: rt group s...
823
  {
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
824
  	struct sched_rt_entity *back = NULL;
6f505b164   Peter Zijlstra   sched: rt group s...
825

58d6c2d72   Peter Zijlstra   sched: rt-group: ...
826
827
828
829
830
831
832
  	for_each_sched_rt_entity(rt_se) {
  		rt_se->back = back;
  		back = rt_se;
  	}
  
  	for (rt_se = back; rt_se; rt_se = rt_se->back) {
  		if (on_rt_rq(rt_se))
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
833
834
835
  			__dequeue_rt_entity(rt_se);
  	}
  }
37dad3fce   Thomas Gleixner   sched: Implement ...
836
  static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
837
838
839
  {
  	dequeue_rt_stack(rt_se);
  	for_each_sched_rt_entity(rt_se)
37dad3fce   Thomas Gleixner   sched: Implement ...
840
  		__enqueue_rt_entity(rt_se, head);
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
841
842
843
844
845
846
847
848
849
850
  }
  
  static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
  {
  	dequeue_rt_stack(rt_se);
  
  	for_each_sched_rt_entity(rt_se) {
  		struct rt_rq *rt_rq = group_rt_rq(rt_se);
  
  		if (rt_rq && rt_rq->rt_nr_running)
37dad3fce   Thomas Gleixner   sched: Implement ...
851
  			__enqueue_rt_entity(rt_se, false);
58d6c2d72   Peter Zijlstra   sched: rt-group: ...
852
  	}
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
853
854
855
856
857
  }
  
  /*
   * Adding/removing a task to/from a priority array:
   */
ea87bb785   Thomas Gleixner   sched: Extend enq...
858
  static void
371fd7e7a   Peter Zijlstra   sched: Add enqueu...
859
  enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
6f505b164   Peter Zijlstra   sched: rt group s...
860
861
  {
  	struct sched_rt_entity *rt_se = &p->rt;
371fd7e7a   Peter Zijlstra   sched: Add enqueu...
862
  	if (flags & ENQUEUE_WAKEUP)
6f505b164   Peter Zijlstra   sched: rt group s...
863
  		rt_se->timeout = 0;
371fd7e7a   Peter Zijlstra   sched: Add enqueu...
864
  	enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
c09595f63   Peter Zijlstra   sched: revert rev...
865

917b627d4   Gregory Haskins   sched: create "pu...
866
867
  	if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
  		enqueue_pushable_task(rq, p);
953bfcd10   Paul Turner   sched: Implement ...
868
869
  
  	inc_nr_running(rq);
6f505b164   Peter Zijlstra   sched: rt group s...
870
  }
371fd7e7a   Peter Zijlstra   sched: Add enqueu...
871
  static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
872
  {
6f505b164   Peter Zijlstra   sched: rt group s...
873
  	struct sched_rt_entity *rt_se = &p->rt;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
874

f1e14ef64   Ingo Molnar   sched: remove the...
875
  	update_curr_rt(rq);
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
876
  	dequeue_rt_entity(rt_se);
c09595f63   Peter Zijlstra   sched: revert rev...
877

917b627d4   Gregory Haskins   sched: create "pu...
878
  	dequeue_pushable_task(rq, p);
953bfcd10   Paul Turner   sched: Implement ...
879
880
  
  	dec_nr_running(rq);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
881
882
883
884
885
886
  }
  
  /*
   * Put task to the end of the run list without the overhead of dequeue
   * followed by enqueue.
   */
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
887
888
  static void
  requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
6f505b164   Peter Zijlstra   sched: rt group s...
889
  {
1cdad7153   Ingo Molnar   Merge branch 'sch...
890
  	if (on_rt_rq(rt_se)) {
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
891
892
893
894
895
896
897
  		struct rt_prio_array *array = &rt_rq->active;
  		struct list_head *queue = array->queue + rt_se_prio(rt_se);
  
  		if (head)
  			list_move(&rt_se->run_list, queue);
  		else
  			list_move_tail(&rt_se->run_list, queue);
1cdad7153   Ingo Molnar   Merge branch 'sch...
898
  	}
6f505b164   Peter Zijlstra   sched: rt group s...
899
  }
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
900
  static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
901
  {
6f505b164   Peter Zijlstra   sched: rt group s...
902
903
  	struct sched_rt_entity *rt_se = &p->rt;
  	struct rt_rq *rt_rq;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
904

6f505b164   Peter Zijlstra   sched: rt group s...
905
906
  	for_each_sched_rt_entity(rt_se) {
  		rt_rq = rt_rq_of_se(rt_se);
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
907
  		requeue_rt_entity(rt_rq, rt_se, head);
6f505b164   Peter Zijlstra   sched: rt group s...
908
  	}
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
909
  }
6f505b164   Peter Zijlstra   sched: rt group s...
910
  static void yield_task_rt(struct rq *rq)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
911
  {
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
912
  	requeue_task_rt(rq, rq->curr, 0);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
913
  }
e7693a362   Gregory Haskins   sched: de-SCHED_O...
914
  #ifdef CONFIG_SMP
318e0893c   Gregory Haskins   sched: pre-route ...
915
  static int find_lowest_rq(struct task_struct *task);
0017d7350   Peter Zijlstra   sched: Fix TASK_W...
916
  static int
7608dec2c   Peter Zijlstra   sched: Drop the r...
917
  select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
e7693a362   Gregory Haskins   sched: de-SCHED_O...
918
  {
7608dec2c   Peter Zijlstra   sched: Drop the r...
919
920
921
  	struct task_struct *curr;
  	struct rq *rq;
  	int cpu;
7608dec2c   Peter Zijlstra   sched: Drop the r...
922
  	cpu = task_cpu(p);
c37495fd0   Steven Rostedt   sched: Balance RT...
923
924
925
926
  
  	/* For anything but wake ups, just return the task_cpu */
  	if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
  		goto out;
7608dec2c   Peter Zijlstra   sched: Drop the r...
927
928
929
930
  	rq = cpu_rq(cpu);
  
  	rcu_read_lock();
  	curr = ACCESS_ONCE(rq->curr); /* unlocked access */
318e0893c   Gregory Haskins   sched: pre-route ...
931
  	/*
7608dec2c   Peter Zijlstra   sched: Drop the r...
932
  	 * If the current task on @p's runqueue is an RT task, then
e1f47d891   Steven Rostedt   sched: RT-balance...
933
934
935
936
  	 * try to see if we can wake this RT task up on another
  	 * runqueue. Otherwise simply start this RT task
  	 * on its current runqueue.
  	 *
43fa5460f   Steven Rostedt   sched: Try not to...
937
938
939
940
941
942
943
944
945
  	 * We want to avoid overloading runqueues. If the woken
  	 * task is a higher priority, then it will stay on this CPU
  	 * and the lower prio task should be moved to another CPU.
  	 * Even though this will probably make the lower prio task
  	 * lose its cache, we do not want to bounce a higher task
  	 * around just because it gave up its CPU, perhaps for a
  	 * lock?
  	 *
  	 * For equal prio tasks, we just let the scheduler sort it out.
7608dec2c   Peter Zijlstra   sched: Drop the r...
946
947
948
949
950
951
  	 *
  	 * Otherwise, just let it ride on the affined RQ and the
  	 * post-schedule router will push the preempted task away
  	 *
  	 * This test is optimistic, if we get it wrong the load-balancer
  	 * will have to sort it out.
318e0893c   Gregory Haskins   sched: pre-route ...
952
  	 */
7608dec2c   Peter Zijlstra   sched: Drop the r...
953
954
  	if (curr && unlikely(rt_task(curr)) &&
  	    (curr->rt.nr_cpus_allowed < 2 ||
3be209a8e   Shawn Bohrer   sched/rt: Migrate...
955
  	     curr->prio <= p->prio) &&
6f505b164   Peter Zijlstra   sched: rt group s...
956
  	    (p->rt.nr_cpus_allowed > 1)) {
7608dec2c   Peter Zijlstra   sched: Drop the r...
957
  		int target = find_lowest_rq(p);
318e0893c   Gregory Haskins   sched: pre-route ...
958

7608dec2c   Peter Zijlstra   sched: Drop the r...
959
960
  		if (target != -1)
  			cpu = target;
318e0893c   Gregory Haskins   sched: pre-route ...
961
  	}
7608dec2c   Peter Zijlstra   sched: Drop the r...
962
  	rcu_read_unlock();
318e0893c   Gregory Haskins   sched: pre-route ...
963

c37495fd0   Steven Rostedt   sched: Balance RT...
964
  out:
7608dec2c   Peter Zijlstra   sched: Drop the r...
965
  	return cpu;
e7693a362   Gregory Haskins   sched: de-SCHED_O...
966
  }
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
967
968
969
  
  static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
  {
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
970
971
  	if (rq->curr->rt.nr_cpus_allowed == 1)
  		return;
24600ce89   Rusty Russell   sched: convert ch...
972
  	if (p->rt.nr_cpus_allowed != 1
13b8bd0a5   Rusty Russell   sched_rt: don't a...
973
974
  	    && cpupri_find(&rq->rd->cpupri, p, NULL))
  		return;
24600ce89   Rusty Russell   sched: convert ch...
975

13b8bd0a5   Rusty Russell   sched_rt: don't a...
976
977
  	if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
  		return;
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
978
979
980
981
982
983
984
985
986
  
  	/*
  	 * There appears to be other cpus that can accept
  	 * current and none to run 'p', so lets reschedule
  	 * to try and push current away:
  	 */
  	requeue_task_rt(rq, p, 1);
  	resched_task(rq->curr);
  }
e7693a362   Gregory Haskins   sched: de-SCHED_O...
987
  #endif /* CONFIG_SMP */
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
988
989
990
  /*
   * Preempt the current task with a newly woken task if needed:
   */
7d4787214   Peter Zijlstra   sched: Rename syn...
991
  static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
992
  {
45c01e824   Gregory Haskins   sched: prioritize...
993
  	if (p->prio < rq->curr->prio) {
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
994
  		resched_task(rq->curr);
45c01e824   Gregory Haskins   sched: prioritize...
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
  		return;
  	}
  
  #ifdef CONFIG_SMP
  	/*
  	 * If:
  	 *
  	 * - the newly woken task is of equal priority to the current task
  	 * - the newly woken task is non-migratable while current is migratable
  	 * - current will be preempted on the next reschedule
  	 *
  	 * we should check to see if current can readily move to a different
  	 * cpu.  If so, we will reschedule to allow the push logic to try
  	 * to move current somewhere else, making room for our non-migratable
  	 * task.
  	 */
8dd0de8be   Hillf Danton   sched: Fix need_r...
1011
  	if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
1012
  		check_preempt_equal_prio(rq, p);
45c01e824   Gregory Haskins   sched: prioritize...
1013
  #endif
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1014
  }
6f505b164   Peter Zijlstra   sched: rt group s...
1015
1016
  static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
  						   struct rt_rq *rt_rq)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1017
  {
6f505b164   Peter Zijlstra   sched: rt group s...
1018
1019
  	struct rt_prio_array *array = &rt_rq->active;
  	struct sched_rt_entity *next = NULL;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1020
1021
1022
1023
  	struct list_head *queue;
  	int idx;
  
  	idx = sched_find_first_bit(array->bitmap);
6f505b164   Peter Zijlstra   sched: rt group s...
1024
  	BUG_ON(idx >= MAX_RT_PRIO);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1025
1026
  
  	queue = array->queue + idx;
6f505b164   Peter Zijlstra   sched: rt group s...
1027
  	next = list_entry(queue->next, struct sched_rt_entity, run_list);
326587b84   Dmitry Adamushko   sched: fix goto r...
1028

6f505b164   Peter Zijlstra   sched: rt group s...
1029
1030
  	return next;
  }
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1031

917b627d4   Gregory Haskins   sched: create "pu...
1032
  static struct task_struct *_pick_next_task_rt(struct rq *rq)
6f505b164   Peter Zijlstra   sched: rt group s...
1033
1034
1035
1036
  {
  	struct sched_rt_entity *rt_se;
  	struct task_struct *p;
  	struct rt_rq *rt_rq;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1037

6f505b164   Peter Zijlstra   sched: rt group s...
1038
  	rt_rq = &rq->rt;
8e54a2c03   Steven Rostedt   sched: Change pic...
1039
  	if (!rt_rq->rt_nr_running)
6f505b164   Peter Zijlstra   sched: rt group s...
1040
  		return NULL;
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
1041
  	if (rt_rq_throttled(rt_rq))
6f505b164   Peter Zijlstra   sched: rt group s...
1042
1043
1044
1045
  		return NULL;
  
  	do {
  		rt_se = pick_next_rt_entity(rq, rt_rq);
326587b84   Dmitry Adamushko   sched: fix goto r...
1046
  		BUG_ON(!rt_se);
6f505b164   Peter Zijlstra   sched: rt group s...
1047
1048
1049
1050
  		rt_rq = group_rt_rq(rt_se);
  	} while (rt_rq);
  
  	p = rt_task_of(rt_se);
305e6835e   Venkatesh Pallipadi   sched: Do not acc...
1051
  	p->se.exec_start = rq->clock_task;
917b627d4   Gregory Haskins   sched: create "pu...
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
  
  	return p;
  }
  
  static struct task_struct *pick_next_task_rt(struct rq *rq)
  {
  	struct task_struct *p = _pick_next_task_rt(rq);
  
  	/* The running task is never eligible for pushing */
  	if (p)
  		dequeue_pushable_task(rq, p);
bcf08df3b   Ingo Molnar   sched: Fix cpupri...
1063
  #ifdef CONFIG_SMP
3f029d3c6   Gregory Haskins   sched: Enhance th...
1064
1065
1066
1067
1068
  	/*
  	 * We detect this state here so that we can avoid taking the RQ
  	 * lock again later if there is no need to push
  	 */
  	rq->post_schedule = has_pushable_tasks(rq);
bcf08df3b   Ingo Molnar   sched: Fix cpupri...
1069
  #endif
3f029d3c6   Gregory Haskins   sched: Enhance th...
1070

6f505b164   Peter Zijlstra   sched: rt group s...
1071
  	return p;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1072
  }
31ee529cc   Ingo Molnar   sched: remove the...
1073
  static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1074
  {
f1e14ef64   Ingo Molnar   sched: remove the...
1075
  	update_curr_rt(rq);
917b627d4   Gregory Haskins   sched: create "pu...
1076
1077
1078
1079
1080
  
  	/*
  	 * The previous task needs to be made eligible for pushing
  	 * if it is still active
  	 */
fd2f4419b   Peter Zijlstra   sched: Provide p-...
1081
  	if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1)
917b627d4   Gregory Haskins   sched: create "pu...
1082
  		enqueue_pushable_task(rq, p);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1083
  }
681f3e685   Peter Williams   sched: isolate SM...
1084
  #ifdef CONFIG_SMP
6f505b164   Peter Zijlstra   sched: rt group s...
1085

e8fa13626   Steven Rostedt   sched: add RT tas...
1086
1087
  /* Only try algorithms three times */
  #define RT_MAX_TRIES 3
e8fa13626   Steven Rostedt   sched: add RT tas...
1088
  static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1089
1090
1091
  static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
  {
  	if (!task_running(rq, p) &&
fa17b507f   Peter Zijlstra   sched: Wrap sched...
1092
  	    (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) &&
6f505b164   Peter Zijlstra   sched: rt group s...
1093
  	    (p->rt.nr_cpus_allowed > 1))
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1094
1095
1096
  		return 1;
  	return 0;
  }
e8fa13626   Steven Rostedt   sched: add RT tas...
1097
  /* Return the second highest RT task, NULL otherwise */
79064fbf7   Ingo Molnar   sched: clean up p...
1098
  static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
e8fa13626   Steven Rostedt   sched: add RT tas...
1099
  {
6f505b164   Peter Zijlstra   sched: rt group s...
1100
1101
1102
1103
  	struct task_struct *next = NULL;
  	struct sched_rt_entity *rt_se;
  	struct rt_prio_array *array;
  	struct rt_rq *rt_rq;
e8fa13626   Steven Rostedt   sched: add RT tas...
1104
  	int idx;
6f505b164   Peter Zijlstra   sched: rt group s...
1105
1106
1107
  	for_each_leaf_rt_rq(rt_rq, rq) {
  		array = &rt_rq->active;
  		idx = sched_find_first_bit(array->bitmap);
492462742   Peter Zijlstra   sched: Unindent l...
1108
  next_idx:
6f505b164   Peter Zijlstra   sched: rt group s...
1109
1110
1111
1112
1113
  		if (idx >= MAX_RT_PRIO)
  			continue;
  		if (next && next->prio < idx)
  			continue;
  		list_for_each_entry(rt_se, array->queue + idx, run_list) {
3d07467b7   Peter Zijlstra   sched: Fix pick_n...
1114
1115
1116
1117
1118
1119
  			struct task_struct *p;
  
  			if (!rt_entity_is_task(rt_se))
  				continue;
  
  			p = rt_task_of(rt_se);
6f505b164   Peter Zijlstra   sched: rt group s...
1120
1121
1122
1123
1124
1125
1126
1127
1128
  			if (pick_rt_task(rq, p, cpu)) {
  				next = p;
  				break;
  			}
  		}
  		if (!next) {
  			idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
  			goto next_idx;
  		}
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1129
  	}
e8fa13626   Steven Rostedt   sched: add RT tas...
1130
1131
  	return next;
  }
0e3900e6d   Rusty Russell   sched: convert lo...
1132
  static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
e8fa13626   Steven Rostedt   sched: add RT tas...
1133

6e1254d2c   Gregory Haskins   sched: optimize R...
1134
1135
1136
  static int find_lowest_rq(struct task_struct *task)
  {
  	struct sched_domain *sd;
96f874e26   Rusty Russell   sched: convert re...
1137
  	struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
6e1254d2c   Gregory Haskins   sched: optimize R...
1138
1139
  	int this_cpu = smp_processor_id();
  	int cpu      = task_cpu(task);
06f90dbd7   Gregory Haskins   sched: RT-balance...
1140

0da938c44   Steven Rostedt   sched: Check if l...
1141
1142
1143
  	/* Make sure the mask is initialized first */
  	if (unlikely(!lowest_mask))
  		return -1;
6e0534f27   Gregory Haskins   sched: use a 2-d ...
1144
1145
  	if (task->rt.nr_cpus_allowed == 1)
  		return -1; /* No other targets possible */
6e1254d2c   Gregory Haskins   sched: optimize R...
1146

6e0534f27   Gregory Haskins   sched: use a 2-d ...
1147
1148
  	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
  		return -1; /* No targets found */
6e1254d2c   Gregory Haskins   sched: optimize R...
1149
1150
1151
1152
1153
1154
1155
1156
1157
  
  	/*
  	 * At this point we have built a mask of cpus representing the
  	 * lowest priority tasks in the system.  Now we want to elect
  	 * the best one based on our affinity and topology.
  	 *
  	 * We prioritize the last cpu that the task executed on since
  	 * it is most likely cache-hot in that location.
  	 */
96f874e26   Rusty Russell   sched: convert re...
1158
  	if (cpumask_test_cpu(cpu, lowest_mask))
6e1254d2c   Gregory Haskins   sched: optimize R...
1159
1160
1161
1162
1163
1164
  		return cpu;
  
  	/*
  	 * Otherwise, we consult the sched_domains span maps to figure
  	 * out which cpu is logically closest to our hot cache data.
  	 */
e2c880630   Rusty Russell   cpumask: Simplify...
1165
1166
  	if (!cpumask_test_cpu(this_cpu, lowest_mask))
  		this_cpu = -1; /* Skip this_cpu opt if not among lowest */
6e1254d2c   Gregory Haskins   sched: optimize R...
1167

cd4ae6adf   Xiaotian Feng   sched: More sched...
1168
  	rcu_read_lock();
e2c880630   Rusty Russell   cpumask: Simplify...
1169
1170
1171
  	for_each_domain(cpu, sd) {
  		if (sd->flags & SD_WAKE_AFFINE) {
  			int best_cpu;
6e1254d2c   Gregory Haskins   sched: optimize R...
1172

e2c880630   Rusty Russell   cpumask: Simplify...
1173
1174
1175
1176
1177
  			/*
  			 * "this_cpu" is cheaper to preempt than a
  			 * remote processor.
  			 */
  			if (this_cpu != -1 &&
cd4ae6adf   Xiaotian Feng   sched: More sched...
1178
1179
  			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
  				rcu_read_unlock();
e2c880630   Rusty Russell   cpumask: Simplify...
1180
  				return this_cpu;
cd4ae6adf   Xiaotian Feng   sched: More sched...
1181
  			}
e2c880630   Rusty Russell   cpumask: Simplify...
1182
1183
1184
  
  			best_cpu = cpumask_first_and(lowest_mask,
  						     sched_domain_span(sd));
cd4ae6adf   Xiaotian Feng   sched: More sched...
1185
1186
  			if (best_cpu < nr_cpu_ids) {
  				rcu_read_unlock();
e2c880630   Rusty Russell   cpumask: Simplify...
1187
  				return best_cpu;
cd4ae6adf   Xiaotian Feng   sched: More sched...
1188
  			}
6e1254d2c   Gregory Haskins   sched: optimize R...
1189
1190
  		}
  	}
cd4ae6adf   Xiaotian Feng   sched: More sched...
1191
  	rcu_read_unlock();
6e1254d2c   Gregory Haskins   sched: optimize R...
1192
1193
1194
1195
1196
1197
  
  	/*
  	 * And finally, if there were no matches within the domains
  	 * just give the caller *something* to work with from the compatible
  	 * locations.
  	 */
e2c880630   Rusty Russell   cpumask: Simplify...
1198
1199
1200
1201
1202
1203
1204
  	if (this_cpu != -1)
  		return this_cpu;
  
  	cpu = cpumask_any(lowest_mask);
  	if (cpu < nr_cpu_ids)
  		return cpu;
  	return -1;
07b4032c9   Gregory Haskins   sched: break out ...
1205
1206
1207
  }
  
  /* Will lock the rq it finds */
4df64c0bf   Ingo Molnar   sched: clean up f...
1208
  static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
07b4032c9   Gregory Haskins   sched: break out ...
1209
1210
  {
  	struct rq *lowest_rq = NULL;
07b4032c9   Gregory Haskins   sched: break out ...
1211
  	int tries;
4df64c0bf   Ingo Molnar   sched: clean up f...
1212
  	int cpu;
e8fa13626   Steven Rostedt   sched: add RT tas...
1213

07b4032c9   Gregory Haskins   sched: break out ...
1214
1215
  	for (tries = 0; tries < RT_MAX_TRIES; tries++) {
  		cpu = find_lowest_rq(task);
2de0b4639   Gregory Haskins   sched: RT balanci...
1216
  		if ((cpu == -1) || (cpu == rq->cpu))
e8fa13626   Steven Rostedt   sched: add RT tas...
1217
  			break;
07b4032c9   Gregory Haskins   sched: break out ...
1218
  		lowest_rq = cpu_rq(cpu);
e8fa13626   Steven Rostedt   sched: add RT tas...
1219
  		/* if the prio of this runqueue changed, try again */
07b4032c9   Gregory Haskins   sched: break out ...
1220
  		if (double_lock_balance(rq, lowest_rq)) {
e8fa13626   Steven Rostedt   sched: add RT tas...
1221
1222
1223
1224
1225
1226
  			/*
  			 * We had to unlock the run queue. In
  			 * the mean time, task could have
  			 * migrated already or had its affinity changed.
  			 * Also make sure that it wasn't scheduled on its rq.
  			 */
07b4032c9   Gregory Haskins   sched: break out ...
1227
  			if (unlikely(task_rq(task) != rq ||
96f874e26   Rusty Russell   sched: convert re...
1228
  				     !cpumask_test_cpu(lowest_rq->cpu,
fa17b507f   Peter Zijlstra   sched: Wrap sched...
1229
  						       tsk_cpus_allowed(task)) ||
07b4032c9   Gregory Haskins   sched: break out ...
1230
  				     task_running(rq, task) ||
fd2f4419b   Peter Zijlstra   sched: Provide p-...
1231
  				     !task->on_rq)) {
4df64c0bf   Ingo Molnar   sched: clean up f...
1232

05fa785cf   Thomas Gleixner   sched: Convert rq...
1233
  				raw_spin_unlock(&lowest_rq->lock);
e8fa13626   Steven Rostedt   sched: add RT tas...
1234
1235
1236
1237
1238
1239
  				lowest_rq = NULL;
  				break;
  			}
  		}
  
  		/* If this rq is still suitable use it. */
e864c499d   Gregory Haskins   sched: track the ...
1240
  		if (lowest_rq->rt.highest_prio.curr > task->prio)
e8fa13626   Steven Rostedt   sched: add RT tas...
1241
1242
1243
  			break;
  
  		/* try again */
1b12bbc74   Peter Zijlstra   lockdep: re-annot...
1244
  		double_unlock_balance(rq, lowest_rq);
e8fa13626   Steven Rostedt   sched: add RT tas...
1245
1246
1247
1248
1249
  		lowest_rq = NULL;
  	}
  
  	return lowest_rq;
  }
917b627d4   Gregory Haskins   sched: create "pu...
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
  static struct task_struct *pick_next_pushable_task(struct rq *rq)
  {
  	struct task_struct *p;
  
  	if (!has_pushable_tasks(rq))
  		return NULL;
  
  	p = plist_first_entry(&rq->rt.pushable_tasks,
  			      struct task_struct, pushable_tasks);
  
  	BUG_ON(rq->cpu != task_cpu(p));
  	BUG_ON(task_current(rq, p));
  	BUG_ON(p->rt.nr_cpus_allowed <= 1);
fd2f4419b   Peter Zijlstra   sched: Provide p-...
1263
  	BUG_ON(!p->on_rq);
917b627d4   Gregory Haskins   sched: create "pu...
1264
1265
1266
1267
  	BUG_ON(!rt_task(p));
  
  	return p;
  }
e8fa13626   Steven Rostedt   sched: add RT tas...
1268
1269
1270
1271
1272
  /*
   * If the current CPU has more than one RT task, see if the non
   * running task can migrate over to a CPU that is running a task
   * of lesser priority.
   */
697f0a487   Gregory Haskins   sched: clean up t...
1273
  static int push_rt_task(struct rq *rq)
e8fa13626   Steven Rostedt   sched: add RT tas...
1274
1275
1276
  {
  	struct task_struct *next_task;
  	struct rq *lowest_rq;
311e800e1   Hillf Danton   sched, rt: Fix rq...
1277
  	int ret = 0;
e8fa13626   Steven Rostedt   sched: add RT tas...
1278

a22d7fc18   Gregory Haskins   sched: wake-balan...
1279
1280
  	if (!rq->rt.overloaded)
  		return 0;
917b627d4   Gregory Haskins   sched: create "pu...
1281
  	next_task = pick_next_pushable_task(rq);
e8fa13626   Steven Rostedt   sched: add RT tas...
1282
1283
  	if (!next_task)
  		return 0;
492462742   Peter Zijlstra   sched: Unindent l...
1284
  retry:
697f0a487   Gregory Haskins   sched: clean up t...
1285
  	if (unlikely(next_task == rq->curr)) {
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1286
  		WARN_ON(1);
e8fa13626   Steven Rostedt   sched: add RT tas...
1287
  		return 0;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1288
  	}
e8fa13626   Steven Rostedt   sched: add RT tas...
1289
1290
1291
1292
1293
1294
  
  	/*
  	 * It's possible that the next_task slipped in of
  	 * higher priority than current. If that's the case
  	 * just reschedule current.
  	 */
697f0a487   Gregory Haskins   sched: clean up t...
1295
1296
  	if (unlikely(next_task->prio < rq->curr->prio)) {
  		resched_task(rq->curr);
e8fa13626   Steven Rostedt   sched: add RT tas...
1297
1298
  		return 0;
  	}
697f0a487   Gregory Haskins   sched: clean up t...
1299
  	/* We might release rq lock */
e8fa13626   Steven Rostedt   sched: add RT tas...
1300
1301
1302
  	get_task_struct(next_task);
  
  	/* find_lock_lowest_rq locks the rq if found */
697f0a487   Gregory Haskins   sched: clean up t...
1303
  	lowest_rq = find_lock_lowest_rq(next_task, rq);
e8fa13626   Steven Rostedt   sched: add RT tas...
1304
1305
1306
  	if (!lowest_rq) {
  		struct task_struct *task;
  		/*
311e800e1   Hillf Danton   sched, rt: Fix rq...
1307
  		 * find_lock_lowest_rq releases rq->lock
1563513d3   Gregory Haskins   RT: fix push_rt_t...
1308
1309
1310
1311
1312
  		 * so it is possible that next_task has migrated.
  		 *
  		 * We need to make sure that the task is still on the same
  		 * run-queue and is also still the next task eligible for
  		 * pushing.
e8fa13626   Steven Rostedt   sched: add RT tas...
1313
  		 */
917b627d4   Gregory Haskins   sched: create "pu...
1314
  		task = pick_next_pushable_task(rq);
1563513d3   Gregory Haskins   RT: fix push_rt_t...
1315
1316
  		if (task_cpu(next_task) == rq->cpu && task == next_task) {
  			/*
311e800e1   Hillf Danton   sched, rt: Fix rq...
1317
1318
1319
1320
  			 * The task hasn't migrated, and is still the next
  			 * eligible task, but we failed to find a run-queue
  			 * to push it to.  Do not retry in this case, since
  			 * other cpus will pull from us when ready.
1563513d3   Gregory Haskins   RT: fix push_rt_t...
1321
  			 */
1563513d3   Gregory Haskins   RT: fix push_rt_t...
1322
  			goto out;
e8fa13626   Steven Rostedt   sched: add RT tas...
1323
  		}
917b627d4   Gregory Haskins   sched: create "pu...
1324

1563513d3   Gregory Haskins   RT: fix push_rt_t...
1325
1326
1327
  		if (!task)
  			/* No more tasks, just exit */
  			goto out;
917b627d4   Gregory Haskins   sched: create "pu...
1328
  		/*
1563513d3   Gregory Haskins   RT: fix push_rt_t...
1329
  		 * Something has shifted, try again.
917b627d4   Gregory Haskins   sched: create "pu...
1330
  		 */
1563513d3   Gregory Haskins   RT: fix push_rt_t...
1331
1332
1333
  		put_task_struct(next_task);
  		next_task = task;
  		goto retry;
e8fa13626   Steven Rostedt   sched: add RT tas...
1334
  	}
697f0a487   Gregory Haskins   sched: clean up t...
1335
  	deactivate_task(rq, next_task, 0);
e8fa13626   Steven Rostedt   sched: add RT tas...
1336
1337
  	set_task_cpu(next_task, lowest_rq->cpu);
  	activate_task(lowest_rq, next_task, 0);
311e800e1   Hillf Danton   sched, rt: Fix rq...
1338
  	ret = 1;
e8fa13626   Steven Rostedt   sched: add RT tas...
1339
1340
  
  	resched_task(lowest_rq->curr);
1b12bbc74   Peter Zijlstra   lockdep: re-annot...
1341
  	double_unlock_balance(rq, lowest_rq);
e8fa13626   Steven Rostedt   sched: add RT tas...
1342

e8fa13626   Steven Rostedt   sched: add RT tas...
1343
1344
  out:
  	put_task_struct(next_task);
311e800e1   Hillf Danton   sched, rt: Fix rq...
1345
  	return ret;
e8fa13626   Steven Rostedt   sched: add RT tas...
1346
  }
e8fa13626   Steven Rostedt   sched: add RT tas...
1347
1348
1349
1350
1351
1352
  static void push_rt_tasks(struct rq *rq)
  {
  	/* push_rt_task will return true if it moved an RT */
  	while (push_rt_task(rq))
  		;
  }
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1353
1354
  static int pull_rt_task(struct rq *this_rq)
  {
80bf3171d   Ingo Molnar   sched: clean up p...
1355
  	int this_cpu = this_rq->cpu, ret = 0, cpu;
a8728944e   Gregory Haskins   sched: use highes...
1356
  	struct task_struct *p;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1357
  	struct rq *src_rq;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1358

637f50851   Gregory Haskins   sched: only balan...
1359
  	if (likely(!rt_overloaded(this_rq)))
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1360
  		return 0;
c6c4927b2   Rusty Russell   sched: convert st...
1361
  	for_each_cpu(cpu, this_rq->rd->rto_mask) {
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1362
1363
1364
1365
  		if (this_cpu == cpu)
  			continue;
  
  		src_rq = cpu_rq(cpu);
74ab8e4f6   Gregory Haskins   sched: use highes...
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
  
  		/*
  		 * Don't bother taking the src_rq->lock if the next highest
  		 * task is known to be lower-priority than our current task.
  		 * This may look racy, but if this value is about to go
  		 * logically higher, the src_rq will push this task away.
  		 * And if its going logically lower, we do not care
  		 */
  		if (src_rq->rt.highest_prio.next >=
  		    this_rq->rt.highest_prio.curr)
  			continue;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1377
1378
1379
  		/*
  		 * We can potentially drop this_rq's lock in
  		 * double_lock_balance, and another CPU could
a8728944e   Gregory Haskins   sched: use highes...
1380
  		 * alter this_rq
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1381
  		 */
a8728944e   Gregory Haskins   sched: use highes...
1382
  		double_lock_balance(this_rq, src_rq);
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1383
1384
1385
1386
  
  		/*
  		 * Are there still pullable RT tasks?
  		 */
614ee1f61   Mike Galbraith   sched: pull_rt_ta...
1387
1388
  		if (src_rq->rt.rt_nr_running <= 1)
  			goto skip;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1389

f65eda4f7   Steven Rostedt   sched: pull RT ta...
1390
1391
1392
1393
1394
1395
  		p = pick_next_highest_task_rt(src_rq, this_cpu);
  
  		/*
  		 * Do we have an RT task that preempts
  		 * the to-be-scheduled task?
  		 */
a8728944e   Gregory Haskins   sched: use highes...
1396
  		if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1397
  			WARN_ON(p == src_rq->curr);
fd2f4419b   Peter Zijlstra   sched: Provide p-...
1398
  			WARN_ON(!p->on_rq);
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1399
1400
1401
1402
1403
1404
1405
  
  			/*
  			 * There's a chance that p is higher in priority
  			 * than what's currently running on its cpu.
  			 * This is just that p is wakeing up and hasn't
  			 * had a chance to schedule. We only pull
  			 * p if it is lower in priority than the
a8728944e   Gregory Haskins   sched: use highes...
1406
  			 * current task on the run queue
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1407
  			 */
a8728944e   Gregory Haskins   sched: use highes...
1408
  			if (p->prio < src_rq->curr->prio)
614ee1f61   Mike Galbraith   sched: pull_rt_ta...
1409
  				goto skip;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1410
1411
1412
1413
1414
1415
1416
1417
1418
  
  			ret = 1;
  
  			deactivate_task(src_rq, p, 0);
  			set_task_cpu(p, this_cpu);
  			activate_task(this_rq, p, 0);
  			/*
  			 * We continue with the search, just in
  			 * case there's an even higher prio task
25985edce   Lucas De Marchi   Fix common misspe...
1419
  			 * in another runqueue. (low likelihood
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1420
  			 * but possible)
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1421
  			 */
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1422
  		}
492462742   Peter Zijlstra   sched: Unindent l...
1423
  skip:
1b12bbc74   Peter Zijlstra   lockdep: re-annot...
1424
  		double_unlock_balance(this_rq, src_rq);
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1425
1426
1427
1428
  	}
  
  	return ret;
  }
9a897c5a6   Steven Rostedt   sched: RT-balance...
1429
  static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1430
1431
  {
  	/* Try to pull RT tasks here if we lower this rq's prio */
33c3d6c61   Yong Zhang   sched: Cleanup pr...
1432
  	if (rq->rt.highest_prio.curr > prev->prio)
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1433
1434
  		pull_rt_task(rq);
  }
9a897c5a6   Steven Rostedt   sched: RT-balance...
1435
  static void post_schedule_rt(struct rq *rq)
e8fa13626   Steven Rostedt   sched: add RT tas...
1436
  {
967fc0467   Gregory Haskins   sched: add sched_...
1437
  	push_rt_tasks(rq);
e8fa13626   Steven Rostedt   sched: add RT tas...
1438
  }
8ae121ac8   Gregory Haskins   sched: fix RT tas...
1439
1440
1441
1442
  /*
   * If we are not running and we are not going to reschedule soon, we should
   * try to push tasks away now
   */
efbbd05a5   Peter Zijlstra   sched: Add pre an...
1443
  static void task_woken_rt(struct rq *rq, struct task_struct *p)
4642dafdf   Steven Rostedt   sched: push RT ta...
1444
  {
9a897c5a6   Steven Rostedt   sched: RT-balance...
1445
  	if (!task_running(rq, p) &&
8ae121ac8   Gregory Haskins   sched: fix RT tas...
1446
  	    !test_tsk_need_resched(rq->curr) &&
917b627d4   Gregory Haskins   sched: create "pu...
1447
  	    has_pushable_tasks(rq) &&
b3bc211cf   Steven Rostedt   sched: Give CPU b...
1448
  	    p->rt.nr_cpus_allowed > 1 &&
43fa5460f   Steven Rostedt   sched: Try not to...
1449
  	    rt_task(rq->curr) &&
b3bc211cf   Steven Rostedt   sched: Give CPU b...
1450
  	    (rq->curr->rt.nr_cpus_allowed < 2 ||
3be209a8e   Shawn Bohrer   sched/rt: Migrate...
1451
  	     rq->curr->prio <= p->prio))
4642dafdf   Steven Rostedt   sched: push RT ta...
1452
1453
  		push_rt_tasks(rq);
  }
cd8ba7cd9   Mike Travis   sched: add new se...
1454
  static void set_cpus_allowed_rt(struct task_struct *p,
96f874e26   Rusty Russell   sched: convert re...
1455
  				const struct cpumask *new_mask)
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1456
  {
96f874e26   Rusty Russell   sched: convert re...
1457
  	int weight = cpumask_weight(new_mask);
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1458
1459
1460
1461
1462
1463
1464
  
  	BUG_ON(!rt_task(p));
  
  	/*
  	 * Update the migration status of the RQ if we have an RT task
  	 * which is running AND changing its weight value.
  	 */
fd2f4419b   Peter Zijlstra   sched: Provide p-...
1465
  	if (p->on_rq && (weight != p->rt.nr_cpus_allowed)) {
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1466
  		struct rq *rq = task_rq(p);
917b627d4   Gregory Haskins   sched: create "pu...
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
  		if (!task_current(rq, p)) {
  			/*
  			 * Make sure we dequeue this task from the pushable list
  			 * before going further.  It will either remain off of
  			 * the list because we are no longer pushable, or it
  			 * will be requeued.
  			 */
  			if (p->rt.nr_cpus_allowed > 1)
  				dequeue_pushable_task(rq, p);
  
  			/*
  			 * Requeue if our weight is changing and still > 1
  			 */
  			if (weight > 1)
  				enqueue_pushable_task(rq, p);
  
  		}
6f505b164   Peter Zijlstra   sched: rt group s...
1484
  		if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1485
  			rq->rt.rt_nr_migratory++;
6f505b164   Peter Zijlstra   sched: rt group s...
1486
  		} else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1487
1488
1489
  			BUG_ON(!rq->rt.rt_nr_migratory);
  			rq->rt.rt_nr_migratory--;
  		}
398a153b1   Gregory Haskins   sched: fix build ...
1490
  		update_rt_migration(&rq->rt);
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1491
  	}
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1492
  }
deeeccd41   Ingo Molnar   sched: clean up o...
1493

bdd7c81b4   Ingo Molnar   sched: fix sched_...
1494
  /* Assumes rq->lock is held */
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
1495
  static void rq_online_rt(struct rq *rq)
bdd7c81b4   Ingo Molnar   sched: fix sched_...
1496
1497
1498
  {
  	if (rq->rt.overloaded)
  		rt_set_overload(rq);
6e0534f27   Gregory Haskins   sched: use a 2-d ...
1499

7def2be1d   Peter Zijlstra   sched: fix hotplu...
1500
  	__enable_runtime(rq);
e864c499d   Gregory Haskins   sched: track the ...
1501
  	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
bdd7c81b4   Ingo Molnar   sched: fix sched_...
1502
1503
1504
  }
  
  /* Assumes rq->lock is held */
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
1505
  static void rq_offline_rt(struct rq *rq)
bdd7c81b4   Ingo Molnar   sched: fix sched_...
1506
1507
1508
  {
  	if (rq->rt.overloaded)
  		rt_clear_overload(rq);
6e0534f27   Gregory Haskins   sched: use a 2-d ...
1509

7def2be1d   Peter Zijlstra   sched: fix hotplu...
1510
  	__disable_runtime(rq);
6e0534f27   Gregory Haskins   sched: use a 2-d ...
1511
  	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
bdd7c81b4   Ingo Molnar   sched: fix sched_...
1512
  }
cb4698450   Steven Rostedt   sched: RT-balance...
1513
1514
1515
1516
1517
  
  /*
   * When switch from the rt queue, we bring ourselves to a position
   * that we might want to pull RT tasks from other runqueues.
   */
da7a735e5   Peter Zijlstra   sched: Fix switch...
1518
  static void switched_from_rt(struct rq *rq, struct task_struct *p)
cb4698450   Steven Rostedt   sched: RT-balance...
1519
1520
1521
1522
1523
1524
1525
1526
  {
  	/*
  	 * If there are other RT tasks then we will reschedule
  	 * and the scheduling of the other RT tasks will handle
  	 * the balancing. But if we are the last RT task
  	 * we may need to handle the pulling of RT tasks
  	 * now.
  	 */
fd2f4419b   Peter Zijlstra   sched: Provide p-...
1527
  	if (p->on_rq && !rq->rt.rt_nr_running)
cb4698450   Steven Rostedt   sched: RT-balance...
1528
1529
  		pull_rt_task(rq);
  }
3d8cbdf86   Rusty Russell   sched: convert lo...
1530
1531
1532
1533
1534
1535
  
  static inline void init_sched_rt_class(void)
  {
  	unsigned int i;
  
  	for_each_possible_cpu(i)
eaa958402   Yinghai Lu   cpumask: alloc ze...
1536
  		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
6ca09dfc9   Mike Travis   sched: put back s...
1537
  					GFP_KERNEL, cpu_to_node(i));
3d8cbdf86   Rusty Russell   sched: convert lo...
1538
  }
cb4698450   Steven Rostedt   sched: RT-balance...
1539
1540
1541
1542
1543
1544
1545
  #endif /* CONFIG_SMP */
  
  /*
   * When switching a task to RT, we may overload the runqueue
   * with RT tasks. In this case we try to push them off to
   * other runqueues.
   */
da7a735e5   Peter Zijlstra   sched: Fix switch...
1546
  static void switched_to_rt(struct rq *rq, struct task_struct *p)
cb4698450   Steven Rostedt   sched: RT-balance...
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
  {
  	int check_resched = 1;
  
  	/*
  	 * If we are already running, then there's nothing
  	 * that needs to be done. But if we are not running
  	 * we may need to preempt the current running task.
  	 * If that current running task is also an RT task
  	 * then see if we can move to another run queue.
  	 */
fd2f4419b   Peter Zijlstra   sched: Provide p-...
1557
  	if (p->on_rq && rq->curr != p) {
cb4698450   Steven Rostedt   sched: RT-balance...
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
  #ifdef CONFIG_SMP
  		if (rq->rt.overloaded && push_rt_task(rq) &&
  		    /* Don't resched if we changed runqueues */
  		    rq != task_rq(p))
  			check_resched = 0;
  #endif /* CONFIG_SMP */
  		if (check_resched && p->prio < rq->curr->prio)
  			resched_task(rq->curr);
  	}
  }
  
  /*
   * Priority of the task has changed. This may cause
   * us to initiate a push or pull.
   */
da7a735e5   Peter Zijlstra   sched: Fix switch...
1573
1574
  static void
  prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
cb4698450   Steven Rostedt   sched: RT-balance...
1575
  {
fd2f4419b   Peter Zijlstra   sched: Provide p-...
1576
  	if (!p->on_rq)
da7a735e5   Peter Zijlstra   sched: Fix switch...
1577
1578
1579
  		return;
  
  	if (rq->curr == p) {
cb4698450   Steven Rostedt   sched: RT-balance...
1580
1581
1582
1583
1584
1585
1586
1587
1588
  #ifdef CONFIG_SMP
  		/*
  		 * If our priority decreases while running, we
  		 * may need to pull tasks to this runqueue.
  		 */
  		if (oldprio < p->prio)
  			pull_rt_task(rq);
  		/*
  		 * If there's a higher priority task waiting to run
6fa46fa52   Steven Rostedt   sched: balance RT...
1589
1590
1591
  		 * then reschedule. Note, the above pull_rt_task
  		 * can release the rq lock and p could migrate.
  		 * Only reschedule if p is still on the same runqueue.
cb4698450   Steven Rostedt   sched: RT-balance...
1592
  		 */
e864c499d   Gregory Haskins   sched: track the ...
1593
  		if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
cb4698450   Steven Rostedt   sched: RT-balance...
1594
1595
1596
1597
1598
  			resched_task(p);
  #else
  		/* For UP simply resched on drop of prio */
  		if (oldprio < p->prio)
  			resched_task(p);
e8fa13626   Steven Rostedt   sched: add RT tas...
1599
  #endif /* CONFIG_SMP */
cb4698450   Steven Rostedt   sched: RT-balance...
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
  	} else {
  		/*
  		 * This task is not running, but if it is
  		 * greater than the current running task
  		 * then reschedule.
  		 */
  		if (p->prio < rq->curr->prio)
  			resched_task(rq->curr);
  	}
  }
78f2c7db6   Peter Zijlstra   sched: SCHED_FIFO...
1610
1611
1612
  static void watchdog(struct rq *rq, struct task_struct *p)
  {
  	unsigned long soft, hard;
78d7d407b   Jiri Slaby   kernel core: use ...
1613
1614
1615
  	/* max may change after cur was read, this will be fixed next tick */
  	soft = task_rlimit(p, RLIMIT_RTTIME);
  	hard = task_rlimit_max(p, RLIMIT_RTTIME);
78f2c7db6   Peter Zijlstra   sched: SCHED_FIFO...
1616
1617
1618
1619
1620
1621
  
  	if (soft != RLIM_INFINITY) {
  		unsigned long next;
  
  		p->rt.timeout++;
  		next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
5a52dd500   Peter Zijlstra   sched: rt-watchdo...
1622
  		if (p->rt.timeout > next)
f06febc96   Frank Mayhar   timers: fix itime...
1623
  			p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
78f2c7db6   Peter Zijlstra   sched: SCHED_FIFO...
1624
1625
  	}
  }
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1626

8f4d37ec0   Peter Zijlstra   sched: high-res p...
1627
  static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1628
  {
67e2be023   Peter Zijlstra   sched: rt: accoun...
1629
  	update_curr_rt(rq);
78f2c7db6   Peter Zijlstra   sched: SCHED_FIFO...
1630
  	watchdog(rq, p);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1631
1632
1633
1634
1635
1636
  	/*
  	 * RR tasks need a special form of timeslice management.
  	 * FIFO tasks have no timeslices.
  	 */
  	if (p->policy != SCHED_RR)
  		return;
fa717060f   Peter Zijlstra   sched: sched_rt_e...
1637
  	if (--p->rt.time_slice)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1638
  		return;
fa717060f   Peter Zijlstra   sched: sched_rt_e...
1639
  	p->rt.time_slice = DEF_TIMESLICE;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1640

98fbc7985   Dmitry Adamushko   sched: optimize t...
1641
1642
1643
1644
  	/*
  	 * Requeue to the end of queue if we are not the only element
  	 * on the queue:
  	 */
fa717060f   Peter Zijlstra   sched: sched_rt_e...
1645
  	if (p->rt.run_list.prev != p->rt.run_list.next) {
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
1646
  		requeue_task_rt(rq, p, 0);
98fbc7985   Dmitry Adamushko   sched: optimize t...
1647
1648
  		set_tsk_need_resched(p);
  	}
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1649
  }
83b699ed2   Srivatsa Vaddagiri   sched: revert rec...
1650
1651
1652
  static void set_curr_task_rt(struct rq *rq)
  {
  	struct task_struct *p = rq->curr;
305e6835e   Venkatesh Pallipadi   sched: Do not acc...
1653
  	p->se.exec_start = rq->clock_task;
917b627d4   Gregory Haskins   sched: create "pu...
1654
1655
1656
  
  	/* The running task is never eligible for pushing */
  	dequeue_pushable_task(rq, p);
83b699ed2   Srivatsa Vaddagiri   sched: revert rec...
1657
  }
6d686f456   H Hartley Sweeten   sched: Don't expo...
1658
  static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
0d721cead   Peter Williams   sched: Simplify s...
1659
1660
1661
1662
1663
1664
1665
1666
1667
  {
  	/*
  	 * Time slice is 0 for SCHED_FIFO tasks
  	 */
  	if (task->policy == SCHED_RR)
  		return DEF_TIMESLICE;
  	else
  		return 0;
  }
2abdad0a4   Harvey Harrison   sched: make rt_sc...
1668
  static const struct sched_class rt_sched_class = {
5522d5d5f   Ingo Molnar   sched: mark sched...
1669
  	.next			= &fair_sched_class,
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1670
1671
1672
1673
1674
1675
1676
1677
  	.enqueue_task		= enqueue_task_rt,
  	.dequeue_task		= dequeue_task_rt,
  	.yield_task		= yield_task_rt,
  
  	.check_preempt_curr	= check_preempt_curr_rt,
  
  	.pick_next_task		= pick_next_task_rt,
  	.put_prev_task		= put_prev_task_rt,
681f3e685   Peter Williams   sched: isolate SM...
1678
  #ifdef CONFIG_SMP
4ce72a2c0   Li Zefan   sched: add CONFIG...
1679
  	.select_task_rq		= select_task_rq_rt,
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1680
  	.set_cpus_allowed       = set_cpus_allowed_rt,
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
1681
1682
  	.rq_online              = rq_online_rt,
  	.rq_offline             = rq_offline_rt,
9a897c5a6   Steven Rostedt   sched: RT-balance...
1683
1684
  	.pre_schedule		= pre_schedule_rt,
  	.post_schedule		= post_schedule_rt,
efbbd05a5   Peter Zijlstra   sched: Add pre an...
1685
  	.task_woken		= task_woken_rt,
cb4698450   Steven Rostedt   sched: RT-balance...
1686
  	.switched_from		= switched_from_rt,
681f3e685   Peter Williams   sched: isolate SM...
1687
  #endif
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1688

83b699ed2   Srivatsa Vaddagiri   sched: revert rec...
1689
  	.set_curr_task          = set_curr_task_rt,
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1690
  	.task_tick		= task_tick_rt,
cb4698450   Steven Rostedt   sched: RT-balance...
1691

0d721cead   Peter Williams   sched: Simplify s...
1692
  	.get_rr_interval	= get_rr_interval_rt,
cb4698450   Steven Rostedt   sched: RT-balance...
1693
1694
  	.prio_changed		= prio_changed_rt,
  	.switched_to		= switched_to_rt,
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1695
  };
ada18de2e   Peter Zijlstra   sched: debug: add...
1696
1697
1698
1699
1700
1701
  
  #ifdef CONFIG_SCHED_DEBUG
  extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
  
  static void print_rt_stats(struct seq_file *m, int cpu)
  {
ec514c487   Cheng Xu   sched: Fix rt_rq ...
1702
  	rt_rq_iter_t iter;
ada18de2e   Peter Zijlstra   sched: debug: add...
1703
1704
1705
  	struct rt_rq *rt_rq;
  
  	rcu_read_lock();
ec514c487   Cheng Xu   sched: Fix rt_rq ...
1706
  	for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
ada18de2e   Peter Zijlstra   sched: debug: add...
1707
1708
1709
  		print_rt_rq(m, cpu, rt_rq);
  	rcu_read_unlock();
  }
55e12e5e7   Dhaval Giani   sched: make sched...
1710
  #endif /* CONFIG_SCHED_DEBUG */