Blame view

kernel/sched_rt.c 42 KB
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1
2
3
4
  /*
   * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
   * policies)
   */
8f48894fc   Peter Zijlstra   sched: Add debug ...
5
6
7
  #ifdef CONFIG_RT_GROUP_SCHED
  
  #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
398a153b1   Gregory Haskins   sched: fix build ...
8
9
  static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
  {
8f48894fc   Peter Zijlstra   sched: Add debug ...
10
11
12
  #ifdef CONFIG_SCHED_DEBUG
  	WARN_ON_ONCE(!rt_entity_is_task(rt_se));
  #endif
398a153b1   Gregory Haskins   sched: fix build ...
13
14
  	return container_of(rt_se, struct task_struct, rt);
  }
398a153b1   Gregory Haskins   sched: fix build ...
15
16
17
18
19
20
21
22
23
24
25
  static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
  {
  	return rt_rq->rq;
  }
  
  static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
  {
  	return rt_se->rt_rq;
  }
  
  #else /* CONFIG_RT_GROUP_SCHED */
a1ba4d8ba   Peter Zijlstra   sched_rt: Fix ove...
26
  #define rt_entity_is_task(rt_se) (1)
8f48894fc   Peter Zijlstra   sched: Add debug ...
27
28
29
30
  static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
  {
  	return container_of(rt_se, struct task_struct, rt);
  }
398a153b1   Gregory Haskins   sched: fix build ...
31
32
33
34
35
36
37
38
39
40
41
42
43
44
  static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
  {
  	return container_of(rt_rq, struct rq, rt);
  }
  
  static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
  {
  	struct task_struct *p = rt_task_of(rt_se);
  	struct rq *rq = task_rq(p);
  
  	return &rq->rt;
  }
  
  #endif /* CONFIG_RT_GROUP_SCHED */
4fd29176b   Steven Rostedt   sched: add rt-ove...
45
  #ifdef CONFIG_SMP
84de42748   Ingo Molnar   sched: clean up k...
46

637f50851   Gregory Haskins   sched: only balan...
47
  static inline int rt_overloaded(struct rq *rq)
4fd29176b   Steven Rostedt   sched: add rt-ove...
48
  {
637f50851   Gregory Haskins   sched: only balan...
49
  	return atomic_read(&rq->rd->rto_count);
4fd29176b   Steven Rostedt   sched: add rt-ove...
50
  }
84de42748   Ingo Molnar   sched: clean up k...
51

4fd29176b   Steven Rostedt   sched: add rt-ove...
52
53
  static inline void rt_set_overload(struct rq *rq)
  {
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
54
55
  	if (!rq->online)
  		return;
c6c4927b2   Rusty Russell   sched: convert st...
56
  	cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
4fd29176b   Steven Rostedt   sched: add rt-ove...
57
58
59
60
61
62
63
64
  	/*
  	 * Make sure the mask is visible before we set
  	 * the overload count. That is checked to determine
  	 * if we should look at the mask. It would be a shame
  	 * if we looked at the mask, but the mask was not
  	 * updated yet.
  	 */
  	wmb();
637f50851   Gregory Haskins   sched: only balan...
65
  	atomic_inc(&rq->rd->rto_count);
4fd29176b   Steven Rostedt   sched: add rt-ove...
66
  }
84de42748   Ingo Molnar   sched: clean up k...
67

4fd29176b   Steven Rostedt   sched: add rt-ove...
68
69
  static inline void rt_clear_overload(struct rq *rq)
  {
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
70
71
  	if (!rq->online)
  		return;
4fd29176b   Steven Rostedt   sched: add rt-ove...
72
  	/* the order here really doesn't matter */
637f50851   Gregory Haskins   sched: only balan...
73
  	atomic_dec(&rq->rd->rto_count);
c6c4927b2   Rusty Russell   sched: convert st...
74
  	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
4fd29176b   Steven Rostedt   sched: add rt-ove...
75
  }
73fe6aae8   Gregory Haskins   sched: add RT-bal...
76

398a153b1   Gregory Haskins   sched: fix build ...
77
  static void update_rt_migration(struct rt_rq *rt_rq)
73fe6aae8   Gregory Haskins   sched: add RT-bal...
78
  {
a1ba4d8ba   Peter Zijlstra   sched_rt: Fix ove...
79
  	if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
398a153b1   Gregory Haskins   sched: fix build ...
80
81
82
  		if (!rt_rq->overloaded) {
  			rt_set_overload(rq_of_rt_rq(rt_rq));
  			rt_rq->overloaded = 1;
cdc8eb984   Gregory Haskins   sched: RT-balance...
83
  		}
398a153b1   Gregory Haskins   sched: fix build ...
84
85
86
  	} else if (rt_rq->overloaded) {
  		rt_clear_overload(rq_of_rt_rq(rt_rq));
  		rt_rq->overloaded = 0;
637f50851   Gregory Haskins   sched: only balan...
87
  	}
73fe6aae8   Gregory Haskins   sched: add RT-bal...
88
  }
4fd29176b   Steven Rostedt   sched: add rt-ove...
89

398a153b1   Gregory Haskins   sched: fix build ...
90
91
  static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
a1ba4d8ba   Peter Zijlstra   sched_rt: Fix ove...
92
93
94
95
96
97
  	if (!rt_entity_is_task(rt_se))
  		return;
  
  	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
  
  	rt_rq->rt_nr_total++;
398a153b1   Gregory Haskins   sched: fix build ...
98
99
100
101
102
103
104
105
  	if (rt_se->nr_cpus_allowed > 1)
  		rt_rq->rt_nr_migratory++;
  
  	update_rt_migration(rt_rq);
  }
  
  static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
a1ba4d8ba   Peter Zijlstra   sched_rt: Fix ove...
106
107
108
109
110
111
  	if (!rt_entity_is_task(rt_se))
  		return;
  
  	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
  
  	rt_rq->rt_nr_total--;
398a153b1   Gregory Haskins   sched: fix build ...
112
113
114
115
116
  	if (rt_se->nr_cpus_allowed > 1)
  		rt_rq->rt_nr_migratory--;
  
  	update_rt_migration(rt_rq);
  }
917b627d4   Gregory Haskins   sched: create "pu...
117
118
119
120
121
122
123
124
125
126
127
  static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
  {
  	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
  	plist_node_init(&p->pushable_tasks, p->prio);
  	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
  }
  
  static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
  {
  	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
  }
bcf08df3b   Ingo Molnar   sched: Fix cpupri...
128
129
130
131
  static inline int has_pushable_tasks(struct rq *rq)
  {
  	return !plist_head_empty(&rq->rt.pushable_tasks);
  }
917b627d4   Gregory Haskins   sched: create "pu...
132
  #else
ceacc2c1c   Peter Zijlstra   sched: make plist...
133
  static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
fa85ae241   Peter Zijlstra   sched: rt time limit
134
  {
6f505b164   Peter Zijlstra   sched: rt group s...
135
  }
ceacc2c1c   Peter Zijlstra   sched: make plist...
136
137
138
  static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
  {
  }
b07430ac3   Gregory Haskins   sched: de CPP-ify...
139
  static inline
ceacc2c1c   Peter Zijlstra   sched: make plist...
140
141
142
  void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  }
398a153b1   Gregory Haskins   sched: fix build ...
143
  static inline
ceacc2c1c   Peter Zijlstra   sched: make plist...
144
145
146
  void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  }
917b627d4   Gregory Haskins   sched: create "pu...
147

4fd29176b   Steven Rostedt   sched: add rt-ove...
148
  #endif /* CONFIG_SMP */
6f505b164   Peter Zijlstra   sched: rt group s...
149
150
151
152
  static inline int on_rt_rq(struct sched_rt_entity *rt_se)
  {
  	return !list_empty(&rt_se->run_list);
  }
052f1dc7e   Peter Zijlstra   sched: rt-group: ...
153
  #ifdef CONFIG_RT_GROUP_SCHED
6f505b164   Peter Zijlstra   sched: rt group s...
154

9f0c1e560   Peter Zijlstra   sched: rt-group: ...
155
  static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
156
157
  {
  	if (!rt_rq->tg)
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
158
  		return RUNTIME_INF;
6f505b164   Peter Zijlstra   sched: rt group s...
159

ac086bc22   Peter Zijlstra   sched: rt-group: ...
160
161
162
163
164
165
  	return rt_rq->rt_runtime;
  }
  
  static inline u64 sched_rt_period(struct rt_rq *rt_rq)
  {
  	return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
6f505b164   Peter Zijlstra   sched: rt group s...
166
  }
ec514c487   Cheng Xu   sched: Fix rt_rq ...
167
168
169
170
171
172
173
  typedef struct task_group *rt_rq_iter_t;
  
  #define for_each_rt_rq(rt_rq, iter, rq) \
  	for (iter = list_entry_rcu(task_groups.next, typeof(*iter), list); \
  	     (&iter->list != &task_groups) && \
  	     (rt_rq = iter->rt_rq[cpu_of(rq)]); \
  	     iter = list_entry_rcu(iter->list.next, typeof(*iter), list))
3d4b47b4b   Peter Zijlstra   sched: Implement ...
174
175
176
177
178
179
180
181
182
183
  static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
  {
  	list_add_rcu(&rt_rq->leaf_rt_rq_list,
  			&rq_of_rt_rq(rt_rq)->leaf_rt_rq_list);
  }
  
  static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
  {
  	list_del_rcu(&rt_rq->leaf_rt_rq_list);
  }
6f505b164   Peter Zijlstra   sched: rt group s...
184
  #define for_each_leaf_rt_rq(rt_rq, rq) \
80f40ee4a   Bharata B Rao   sched: use RCU va...
185
  	list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
6f505b164   Peter Zijlstra   sched: rt group s...
186

6f505b164   Peter Zijlstra   sched: rt group s...
187
188
189
190
191
192
193
  #define for_each_sched_rt_entity(rt_se) \
  	for (; rt_se; rt_se = rt_se->parent)
  
  static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
  {
  	return rt_se->my_q;
  }
37dad3fce   Thomas Gleixner   sched: Implement ...
194
  static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
6f505b164   Peter Zijlstra   sched: rt group s...
195
  static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
196
  static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
197
  {
f6121f4f8   Dario Faggioli   sched_rt.c: resch...
198
  	struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
74b7eb588   Yong Zhang   sched: Change usa...
199
  	struct sched_rt_entity *rt_se;
0c3b91680   Balbir Singh   sched: Fix sched ...
200
201
202
  	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
  
  	rt_se = rt_rq->tg->rt_se[cpu];
6f505b164   Peter Zijlstra   sched: rt group s...
203

f6121f4f8   Dario Faggioli   sched_rt.c: resch...
204
205
  	if (rt_rq->rt_nr_running) {
  		if (rt_se && !on_rt_rq(rt_se))
37dad3fce   Thomas Gleixner   sched: Implement ...
206
  			enqueue_rt_entity(rt_se, false);
e864c499d   Gregory Haskins   sched: track the ...
207
  		if (rt_rq->highest_prio.curr < curr->prio)
1020387f5   Peter Zijlstra   sched: rt-group: ...
208
  			resched_task(curr);
6f505b164   Peter Zijlstra   sched: rt group s...
209
210
  	}
  }
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
211
  static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
212
  {
74b7eb588   Yong Zhang   sched: Change usa...
213
  	struct sched_rt_entity *rt_se;
0c3b91680   Balbir Singh   sched: Fix sched ...
214
  	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
74b7eb588   Yong Zhang   sched: Change usa...
215

0c3b91680   Balbir Singh   sched: Fix sched ...
216
  	rt_se = rt_rq->tg->rt_se[cpu];
6f505b164   Peter Zijlstra   sched: rt group s...
217
218
219
220
  
  	if (rt_se && on_rt_rq(rt_se))
  		dequeue_rt_entity(rt_se);
  }
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
  static inline int rt_rq_throttled(struct rt_rq *rt_rq)
  {
  	return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
  }
  
  static int rt_se_boosted(struct sched_rt_entity *rt_se)
  {
  	struct rt_rq *rt_rq = group_rt_rq(rt_se);
  	struct task_struct *p;
  
  	if (rt_rq)
  		return !!rt_rq->rt_nr_boosted;
  
  	p = rt_task_of(rt_se);
  	return p->prio != p->normal_prio;
  }
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
237
  #ifdef CONFIG_SMP
c6c4927b2   Rusty Russell   sched: convert st...
238
  static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
239
240
241
  {
  	return cpu_rq(smp_processor_id())->rd->span;
  }
6f505b164   Peter Zijlstra   sched: rt group s...
242
  #else
c6c4927b2   Rusty Russell   sched: convert st...
243
  static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
244
  {
c6c4927b2   Rusty Russell   sched: convert st...
245
  	return cpu_online_mask;
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
246
247
  }
  #endif
6f505b164   Peter Zijlstra   sched: rt group s...
248

d0b27fa77   Peter Zijlstra   sched: rt-group: ...
249
250
  static inline
  struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
6f505b164   Peter Zijlstra   sched: rt group s...
251
  {
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
252
253
  	return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
  }
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
254

ac086bc22   Peter Zijlstra   sched: rt-group: ...
255
256
257
258
  static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
  {
  	return &rt_rq->tg->rt_bandwidth;
  }
55e12e5e7   Dhaval Giani   sched: make sched...
259
  #else /* !CONFIG_RT_GROUP_SCHED */
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
260
261
262
  
  static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
  {
ac086bc22   Peter Zijlstra   sched: rt-group: ...
263
264
265
266
267
268
  	return rt_rq->rt_runtime;
  }
  
  static inline u64 sched_rt_period(struct rt_rq *rt_rq)
  {
  	return ktime_to_ns(def_rt_bandwidth.rt_period);
6f505b164   Peter Zijlstra   sched: rt group s...
269
  }
ec514c487   Cheng Xu   sched: Fix rt_rq ...
270
271
272
273
  typedef struct rt_rq *rt_rq_iter_t;
  
  #define for_each_rt_rq(rt_rq, iter, rq) \
  	for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
3d4b47b4b   Peter Zijlstra   sched: Implement ...
274
275
276
277
278
279
280
  static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
  {
  }
  
  static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
  {
  }
6f505b164   Peter Zijlstra   sched: rt group s...
281
282
  #define for_each_leaf_rt_rq(rt_rq, rq) \
  	for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
6f505b164   Peter Zijlstra   sched: rt group s...
283
284
285
286
287
288
289
  #define for_each_sched_rt_entity(rt_se) \
  	for (; rt_se; rt_se = NULL)
  
  static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
  {
  	return NULL;
  }
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
290
  static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
291
  {
f3ade8378   John Blackwood   sched: fix sched_...
292
293
  	if (rt_rq->rt_nr_running)
  		resched_task(rq_of_rt_rq(rt_rq)->curr);
6f505b164   Peter Zijlstra   sched: rt group s...
294
  }
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
295
  static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
296
297
  {
  }
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
298
299
300
301
  static inline int rt_rq_throttled(struct rt_rq *rt_rq)
  {
  	return rt_rq->rt_throttled;
  }
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
302

c6c4927b2   Rusty Russell   sched: convert st...
303
  static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
304
  {
c6c4927b2   Rusty Russell   sched: convert st...
305
  	return cpu_online_mask;
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
306
307
308
309
310
311
312
  }
  
  static inline
  struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
  {
  	return &cpu_rq(cpu)->rt;
  }
ac086bc22   Peter Zijlstra   sched: rt-group: ...
313
314
315
316
  static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
  {
  	return &def_rt_bandwidth;
  }
55e12e5e7   Dhaval Giani   sched: make sched...
317
  #endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
318

ac086bc22   Peter Zijlstra   sched: rt-group: ...
319
  #ifdef CONFIG_SMP
78333cdd0   Peter Zijlstra   sched: add some c...
320
321
322
  /*
   * We ran out of runtime, see if we can borrow some from our neighbours.
   */
b79f3833d   Peter Zijlstra   sched: rt: fix SM...
323
  static int do_balance_runtime(struct rt_rq *rt_rq)
ac086bc22   Peter Zijlstra   sched: rt-group: ...
324
325
326
327
328
  {
  	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
  	struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
  	int i, weight, more = 0;
  	u64 rt_period;
c6c4927b2   Rusty Russell   sched: convert st...
329
  	weight = cpumask_weight(rd->span);
ac086bc22   Peter Zijlstra   sched: rt-group: ...
330

0986b11b1   Thomas Gleixner   sched: Convert rt...
331
  	raw_spin_lock(&rt_b->rt_runtime_lock);
ac086bc22   Peter Zijlstra   sched: rt-group: ...
332
  	rt_period = ktime_to_ns(rt_b->rt_period);
c6c4927b2   Rusty Russell   sched: convert st...
333
  	for_each_cpu(i, rd->span) {
ac086bc22   Peter Zijlstra   sched: rt-group: ...
334
335
336
337
338
  		struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
  		s64 diff;
  
  		if (iter == rt_rq)
  			continue;
0986b11b1   Thomas Gleixner   sched: Convert rt...
339
  		raw_spin_lock(&iter->rt_runtime_lock);
78333cdd0   Peter Zijlstra   sched: add some c...
340
341
342
343
344
  		/*
  		 * Either all rqs have inf runtime and there's nothing to steal
  		 * or __disable_runtime() below sets a specific rq to inf to
  		 * indicate its been disabled and disalow stealing.
  		 */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
345
346
  		if (iter->rt_runtime == RUNTIME_INF)
  			goto next;
78333cdd0   Peter Zijlstra   sched: add some c...
347
348
349
350
  		/*
  		 * From runqueues with spare time, take 1/n part of their
  		 * spare time, but no more than our period.
  		 */
ac086bc22   Peter Zijlstra   sched: rt-group: ...
351
352
  		diff = iter->rt_runtime - iter->rt_time;
  		if (diff > 0) {
58838cf3c   Peter Zijlstra   sched: clean up c...
353
  			diff = div_u64((u64)diff, weight);
ac086bc22   Peter Zijlstra   sched: rt-group: ...
354
355
356
357
358
359
  			if (rt_rq->rt_runtime + diff > rt_period)
  				diff = rt_period - rt_rq->rt_runtime;
  			iter->rt_runtime -= diff;
  			rt_rq->rt_runtime += diff;
  			more = 1;
  			if (rt_rq->rt_runtime == rt_period) {
0986b11b1   Thomas Gleixner   sched: Convert rt...
360
  				raw_spin_unlock(&iter->rt_runtime_lock);
ac086bc22   Peter Zijlstra   sched: rt-group: ...
361
362
363
  				break;
  			}
  		}
7def2be1d   Peter Zijlstra   sched: fix hotplu...
364
  next:
0986b11b1   Thomas Gleixner   sched: Convert rt...
365
  		raw_spin_unlock(&iter->rt_runtime_lock);
ac086bc22   Peter Zijlstra   sched: rt-group: ...
366
  	}
0986b11b1   Thomas Gleixner   sched: Convert rt...
367
  	raw_spin_unlock(&rt_b->rt_runtime_lock);
ac086bc22   Peter Zijlstra   sched: rt-group: ...
368
369
370
  
  	return more;
  }
7def2be1d   Peter Zijlstra   sched: fix hotplu...
371

78333cdd0   Peter Zijlstra   sched: add some c...
372
373
374
  /*
   * Ensure this RQ takes back all the runtime it lend to its neighbours.
   */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
375
376
377
  static void __disable_runtime(struct rq *rq)
  {
  	struct root_domain *rd = rq->rd;
ec514c487   Cheng Xu   sched: Fix rt_rq ...
378
  	rt_rq_iter_t iter;
7def2be1d   Peter Zijlstra   sched: fix hotplu...
379
380
381
382
  	struct rt_rq *rt_rq;
  
  	if (unlikely(!scheduler_running))
  		return;
ec514c487   Cheng Xu   sched: Fix rt_rq ...
383
  	for_each_rt_rq(rt_rq, iter, rq) {
7def2be1d   Peter Zijlstra   sched: fix hotplu...
384
385
386
  		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
  		s64 want;
  		int i;
0986b11b1   Thomas Gleixner   sched: Convert rt...
387
388
  		raw_spin_lock(&rt_b->rt_runtime_lock);
  		raw_spin_lock(&rt_rq->rt_runtime_lock);
78333cdd0   Peter Zijlstra   sched: add some c...
389
390
391
392
393
  		/*
  		 * Either we're all inf and nobody needs to borrow, or we're
  		 * already disabled and thus have nothing to do, or we have
  		 * exactly the right amount of runtime to take out.
  		 */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
394
395
396
  		if (rt_rq->rt_runtime == RUNTIME_INF ||
  				rt_rq->rt_runtime == rt_b->rt_runtime)
  			goto balanced;
0986b11b1   Thomas Gleixner   sched: Convert rt...
397
  		raw_spin_unlock(&rt_rq->rt_runtime_lock);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
398

78333cdd0   Peter Zijlstra   sched: add some c...
399
400
401
402
403
  		/*
  		 * Calculate the difference between what we started out with
  		 * and what we current have, that's the amount of runtime
  		 * we lend and now have to reclaim.
  		 */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
404
  		want = rt_b->rt_runtime - rt_rq->rt_runtime;
78333cdd0   Peter Zijlstra   sched: add some c...
405
406
407
  		/*
  		 * Greedy reclaim, take back as much as we can.
  		 */
c6c4927b2   Rusty Russell   sched: convert st...
408
  		for_each_cpu(i, rd->span) {
7def2be1d   Peter Zijlstra   sched: fix hotplu...
409
410
  			struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
  			s64 diff;
78333cdd0   Peter Zijlstra   sched: add some c...
411
412
413
  			/*
  			 * Can't reclaim from ourselves or disabled runqueues.
  			 */
f1679d084   Peter Zijlstra   sched: fix rt-ban...
414
  			if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
7def2be1d   Peter Zijlstra   sched: fix hotplu...
415
  				continue;
0986b11b1   Thomas Gleixner   sched: Convert rt...
416
  			raw_spin_lock(&iter->rt_runtime_lock);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
417
418
419
420
421
422
423
424
  			if (want > 0) {
  				diff = min_t(s64, iter->rt_runtime, want);
  				iter->rt_runtime -= diff;
  				want -= diff;
  			} else {
  				iter->rt_runtime -= want;
  				want -= want;
  			}
0986b11b1   Thomas Gleixner   sched: Convert rt...
425
  			raw_spin_unlock(&iter->rt_runtime_lock);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
426
427
428
429
  
  			if (!want)
  				break;
  		}
0986b11b1   Thomas Gleixner   sched: Convert rt...
430
  		raw_spin_lock(&rt_rq->rt_runtime_lock);
78333cdd0   Peter Zijlstra   sched: add some c...
431
432
433
434
  		/*
  		 * We cannot be left wanting - that would mean some runtime
  		 * leaked out of the system.
  		 */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
435
436
  		BUG_ON(want);
  balanced:
78333cdd0   Peter Zijlstra   sched: add some c...
437
438
439
440
  		/*
  		 * Disable all the borrow logic by pretending we have inf
  		 * runtime - in which case borrowing doesn't make sense.
  		 */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
441
  		rt_rq->rt_runtime = RUNTIME_INF;
0986b11b1   Thomas Gleixner   sched: Convert rt...
442
443
  		raw_spin_unlock(&rt_rq->rt_runtime_lock);
  		raw_spin_unlock(&rt_b->rt_runtime_lock);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
444
445
446
447
448
449
  	}
  }
  
  static void disable_runtime(struct rq *rq)
  {
  	unsigned long flags;
05fa785cf   Thomas Gleixner   sched: Convert rq...
450
  	raw_spin_lock_irqsave(&rq->lock, flags);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
451
  	__disable_runtime(rq);
05fa785cf   Thomas Gleixner   sched: Convert rq...
452
  	raw_spin_unlock_irqrestore(&rq->lock, flags);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
453
454
455
456
  }
  
  static void __enable_runtime(struct rq *rq)
  {
ec514c487   Cheng Xu   sched: Fix rt_rq ...
457
  	rt_rq_iter_t iter;
7def2be1d   Peter Zijlstra   sched: fix hotplu...
458
459
460
461
  	struct rt_rq *rt_rq;
  
  	if (unlikely(!scheduler_running))
  		return;
78333cdd0   Peter Zijlstra   sched: add some c...
462
463
464
  	/*
  	 * Reset each runqueue's bandwidth settings
  	 */
ec514c487   Cheng Xu   sched: Fix rt_rq ...
465
  	for_each_rt_rq(rt_rq, iter, rq) {
7def2be1d   Peter Zijlstra   sched: fix hotplu...
466
  		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
0986b11b1   Thomas Gleixner   sched: Convert rt...
467
468
  		raw_spin_lock(&rt_b->rt_runtime_lock);
  		raw_spin_lock(&rt_rq->rt_runtime_lock);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
469
470
  		rt_rq->rt_runtime = rt_b->rt_runtime;
  		rt_rq->rt_time = 0;
baf25731e   Zhang, Yanmin   sched: fix 2.6.27...
471
  		rt_rq->rt_throttled = 0;
0986b11b1   Thomas Gleixner   sched: Convert rt...
472
473
  		raw_spin_unlock(&rt_rq->rt_runtime_lock);
  		raw_spin_unlock(&rt_b->rt_runtime_lock);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
474
475
476
477
478
479
  	}
  }
  
  static void enable_runtime(struct rq *rq)
  {
  	unsigned long flags;
05fa785cf   Thomas Gleixner   sched: Convert rq...
480
  	raw_spin_lock_irqsave(&rq->lock, flags);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
481
  	__enable_runtime(rq);
05fa785cf   Thomas Gleixner   sched: Convert rq...
482
  	raw_spin_unlock_irqrestore(&rq->lock, flags);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
483
  }
eff6549b9   Peter Zijlstra   sched: rt: move s...
484
485
486
487
488
  static int balance_runtime(struct rt_rq *rt_rq)
  {
  	int more = 0;
  
  	if (rt_rq->rt_time > rt_rq->rt_runtime) {
0986b11b1   Thomas Gleixner   sched: Convert rt...
489
  		raw_spin_unlock(&rt_rq->rt_runtime_lock);
eff6549b9   Peter Zijlstra   sched: rt: move s...
490
  		more = do_balance_runtime(rt_rq);
0986b11b1   Thomas Gleixner   sched: Convert rt...
491
  		raw_spin_lock(&rt_rq->rt_runtime_lock);
eff6549b9   Peter Zijlstra   sched: rt: move s...
492
493
494
495
  	}
  
  	return more;
  }
55e12e5e7   Dhaval Giani   sched: make sched...
496
  #else /* !CONFIG_SMP */
eff6549b9   Peter Zijlstra   sched: rt: move s...
497
498
499
500
  static inline int balance_runtime(struct rt_rq *rt_rq)
  {
  	return 0;
  }
55e12e5e7   Dhaval Giani   sched: make sched...
501
  #endif /* CONFIG_SMP */
ac086bc22   Peter Zijlstra   sched: rt-group: ...
502

eff6549b9   Peter Zijlstra   sched: rt: move s...
503
504
505
  static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
  {
  	int i, idle = 1;
c6c4927b2   Rusty Russell   sched: convert st...
506
  	const struct cpumask *span;
eff6549b9   Peter Zijlstra   sched: rt: move s...
507

0b148fa04   Peter Zijlstra   sched: rt-bandwid...
508
  	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
eff6549b9   Peter Zijlstra   sched: rt: move s...
509
510
511
  		return 1;
  
  	span = sched_rt_period_mask();
c6c4927b2   Rusty Russell   sched: convert st...
512
  	for_each_cpu(i, span) {
eff6549b9   Peter Zijlstra   sched: rt: move s...
513
514
515
  		int enqueue = 0;
  		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
  		struct rq *rq = rq_of_rt_rq(rt_rq);
05fa785cf   Thomas Gleixner   sched: Convert rq...
516
  		raw_spin_lock(&rq->lock);
eff6549b9   Peter Zijlstra   sched: rt: move s...
517
518
  		if (rt_rq->rt_time) {
  			u64 runtime;
0986b11b1   Thomas Gleixner   sched: Convert rt...
519
  			raw_spin_lock(&rt_rq->rt_runtime_lock);
eff6549b9   Peter Zijlstra   sched: rt: move s...
520
521
522
523
524
525
526
  			if (rt_rq->rt_throttled)
  				balance_runtime(rt_rq);
  			runtime = rt_rq->rt_runtime;
  			rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
  			if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
  				rt_rq->rt_throttled = 0;
  				enqueue = 1;
61eadef6a   Mike Galbraith   sched, rt: Update...
527
528
529
530
531
532
533
  
  				/*
  				 * Force a clock update if the CPU was idle,
  				 * lest wakeup -> unthrottle time accumulate.
  				 */
  				if (rt_rq->rt_nr_running && rq->curr == rq->idle)
  					rq->skip_clock_update = -1;
eff6549b9   Peter Zijlstra   sched: rt: move s...
534
535
536
  			}
  			if (rt_rq->rt_time || rt_rq->rt_nr_running)
  				idle = 0;
0986b11b1   Thomas Gleixner   sched: Convert rt...
537
  			raw_spin_unlock(&rt_rq->rt_runtime_lock);
0c3b91680   Balbir Singh   sched: Fix sched ...
538
  		} else if (rt_rq->rt_nr_running) {
6c3df2551   Peter Zijlstra   sched: rt: dont s...
539
  			idle = 0;
0c3b91680   Balbir Singh   sched: Fix sched ...
540
541
542
  			if (!rt_rq_throttled(rt_rq))
  				enqueue = 1;
  		}
eff6549b9   Peter Zijlstra   sched: rt: move s...
543
544
545
  
  		if (enqueue)
  			sched_rt_rq_enqueue(rt_rq);
05fa785cf   Thomas Gleixner   sched: Convert rq...
546
  		raw_spin_unlock(&rq->lock);
eff6549b9   Peter Zijlstra   sched: rt: move s...
547
548
549
550
  	}
  
  	return idle;
  }
ac086bc22   Peter Zijlstra   sched: rt-group: ...
551

6f505b164   Peter Zijlstra   sched: rt group s...
552
553
  static inline int rt_se_prio(struct sched_rt_entity *rt_se)
  {
052f1dc7e   Peter Zijlstra   sched: rt-group: ...
554
  #ifdef CONFIG_RT_GROUP_SCHED
6f505b164   Peter Zijlstra   sched: rt group s...
555
556
557
  	struct rt_rq *rt_rq = group_rt_rq(rt_se);
  
  	if (rt_rq)
e864c499d   Gregory Haskins   sched: track the ...
558
  		return rt_rq->highest_prio.curr;
6f505b164   Peter Zijlstra   sched: rt group s...
559
560
561
562
  #endif
  
  	return rt_task_of(rt_se)->prio;
  }
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
563
  static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
564
  {
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
565
  	u64 runtime = sched_rt_runtime(rt_rq);
fa85ae241   Peter Zijlstra   sched: rt time limit
566

fa85ae241   Peter Zijlstra   sched: rt time limit
567
  	if (rt_rq->rt_throttled)
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
568
  		return rt_rq_throttled(rt_rq);
fa85ae241   Peter Zijlstra   sched: rt time limit
569

ac086bc22   Peter Zijlstra   sched: rt-group: ...
570
571
  	if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
  		return 0;
b79f3833d   Peter Zijlstra   sched: rt: fix SM...
572
573
574
575
  	balance_runtime(rt_rq);
  	runtime = sched_rt_runtime(rt_rq);
  	if (runtime == RUNTIME_INF)
  		return 0;
ac086bc22   Peter Zijlstra   sched: rt-group: ...
576

9f0c1e560   Peter Zijlstra   sched: rt-group: ...
577
  	if (rt_rq->rt_time > runtime) {
6f505b164   Peter Zijlstra   sched: rt group s...
578
  		rt_rq->rt_throttled = 1;
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
579
  		if (rt_rq_throttled(rt_rq)) {
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
580
  			sched_rt_rq_dequeue(rt_rq);
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
581
582
  			return 1;
  		}
fa85ae241   Peter Zijlstra   sched: rt time limit
583
584
585
586
  	}
  
  	return 0;
  }
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
587
588
589
590
  /*
   * Update the current task's runtime statistics. Skip current tasks that
   * are not in our scheduling class.
   */
a9957449b   Alexey Dobriyan   sched: uninline s...
591
  static void update_curr_rt(struct rq *rq)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
592
593
  {
  	struct task_struct *curr = rq->curr;
6f505b164   Peter Zijlstra   sched: rt group s...
594
595
  	struct sched_rt_entity *rt_se = &curr->rt;
  	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
596
  	u64 delta_exec;
06c3bc655   Peter Zijlstra   sched: Fix update...
597
  	if (curr->sched_class != &rt_sched_class)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
598
  		return;
305e6835e   Venkatesh Pallipadi   sched: Do not acc...
599
  	delta_exec = rq->clock_task - curr->se.exec_start;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
600
601
  	if (unlikely((s64)delta_exec < 0))
  		delta_exec = 0;
6cfb0d5d0   Ingo Molnar   [PATCH] sched: re...
602

41acab885   Lucas De Marchi   sched: Implement ...
603
  	schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec));
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
604
605
  
  	curr->se.sum_exec_runtime += delta_exec;
f06febc96   Frank Mayhar   timers: fix itime...
606
  	account_group_exec_runtime(curr, delta_exec);
305e6835e   Venkatesh Pallipadi   sched: Do not acc...
607
  	curr->se.exec_start = rq->clock_task;
d842de871   Srivatsa Vaddagiri   sched: cpu accoun...
608
  	cpuacct_charge(curr, delta_exec);
fa85ae241   Peter Zijlstra   sched: rt time limit
609

e9e9250bc   Peter Zijlstra   sched: Scale down...
610
  	sched_rt_avg_update(rq, delta_exec);
0b148fa04   Peter Zijlstra   sched: rt-bandwid...
611
612
  	if (!rt_bandwidth_enabled())
  		return;
354d60c2f   Dhaval Giani   sched: mix tasks ...
613
614
  	for_each_sched_rt_entity(rt_se) {
  		rt_rq = rt_rq_of_se(rt_se);
cc2991cf1   Peter Zijlstra   sched: rt-bandwid...
615
  		if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
0986b11b1   Thomas Gleixner   sched: Convert rt...
616
  			raw_spin_lock(&rt_rq->rt_runtime_lock);
cc2991cf1   Peter Zijlstra   sched: rt-bandwid...
617
618
619
  			rt_rq->rt_time += delta_exec;
  			if (sched_rt_runtime_exceeded(rt_rq))
  				resched_task(curr);
0986b11b1   Thomas Gleixner   sched: Convert rt...
620
  			raw_spin_unlock(&rt_rq->rt_runtime_lock);
cc2991cf1   Peter Zijlstra   sched: rt-bandwid...
621
  		}
354d60c2f   Dhaval Giani   sched: mix tasks ...
622
  	}
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
623
  }
398a153b1   Gregory Haskins   sched: fix build ...
624
  #if defined CONFIG_SMP
e864c499d   Gregory Haskins   sched: track the ...
625
626
627
628
  
  static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
  
  static inline int next_prio(struct rq *rq)
63489e45e   Steven Rostedt   sched: count # of...
629
  {
e864c499d   Gregory Haskins   sched: track the ...
630
631
632
633
634
635
636
  	struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
  
  	if (next && rt_prio(next->prio))
  		return next->prio;
  	else
  		return MAX_RT_PRIO;
  }
e864c499d   Gregory Haskins   sched: track the ...
637

398a153b1   Gregory Haskins   sched: fix build ...
638
639
  static void
  inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
63489e45e   Steven Rostedt   sched: count # of...
640
  {
4d9842776   Gregory Haskins   sched: cleanup in...
641
  	struct rq *rq = rq_of_rt_rq(rt_rq);
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
642

398a153b1   Gregory Haskins   sched: fix build ...
643
  	if (prio < prev_prio) {
4d9842776   Gregory Haskins   sched: cleanup in...
644

e864c499d   Gregory Haskins   sched: track the ...
645
646
  		/*
  		 * If the new task is higher in priority than anything on the
398a153b1   Gregory Haskins   sched: fix build ...
647
648
  		 * run-queue, we know that the previous high becomes our
  		 * next-highest.
e864c499d   Gregory Haskins   sched: track the ...
649
  		 */
398a153b1   Gregory Haskins   sched: fix build ...
650
  		rt_rq->highest_prio.next = prev_prio;
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
651
652
  
  		if (rq->online)
4d9842776   Gregory Haskins   sched: cleanup in...
653
  			cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1100ac91b   Ingo Molnar   sched: fix cpupri...
654

e864c499d   Gregory Haskins   sched: track the ...
655
656
657
658
659
660
661
662
663
664
665
666
  	} else if (prio == rt_rq->highest_prio.curr)
  		/*
  		 * If the next task is equal in priority to the highest on
  		 * the run-queue, then we implicitly know that the next highest
  		 * task cannot be any lower than current
  		 */
  		rt_rq->highest_prio.next = prio;
  	else if (prio < rt_rq->highest_prio.next)
  		/*
  		 * Otherwise, we need to recompute next-highest
  		 */
  		rt_rq->highest_prio.next = next_prio(rq);
398a153b1   Gregory Haskins   sched: fix build ...
667
  }
73fe6aae8   Gregory Haskins   sched: add RT-bal...
668

398a153b1   Gregory Haskins   sched: fix build ...
669
670
671
672
  static void
  dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
  {
  	struct rq *rq = rq_of_rt_rq(rt_rq);
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
673

398a153b1   Gregory Haskins   sched: fix build ...
674
675
676
677
678
  	if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next))
  		rt_rq->highest_prio.next = next_prio(rq);
  
  	if (rq->online && rt_rq->highest_prio.curr != prev_prio)
  		cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
63489e45e   Steven Rostedt   sched: count # of...
679
  }
398a153b1   Gregory Haskins   sched: fix build ...
680
  #else /* CONFIG_SMP */
6f505b164   Peter Zijlstra   sched: rt group s...
681
  static inline
398a153b1   Gregory Haskins   sched: fix build ...
682
683
684
685
686
  void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
  static inline
  void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
  
  #endif /* CONFIG_SMP */
6e0534f27   Gregory Haskins   sched: use a 2-d ...
687

052f1dc7e   Peter Zijlstra   sched: rt-group: ...
688
  #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
398a153b1   Gregory Haskins   sched: fix build ...
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
  static void
  inc_rt_prio(struct rt_rq *rt_rq, int prio)
  {
  	int prev_prio = rt_rq->highest_prio.curr;
  
  	if (prio < prev_prio)
  		rt_rq->highest_prio.curr = prio;
  
  	inc_rt_prio_smp(rt_rq, prio, prev_prio);
  }
  
  static void
  dec_rt_prio(struct rt_rq *rt_rq, int prio)
  {
  	int prev_prio = rt_rq->highest_prio.curr;
6f505b164   Peter Zijlstra   sched: rt group s...
704
  	if (rt_rq->rt_nr_running) {
764a9d6fe   Steven Rostedt   sched: track high...
705

398a153b1   Gregory Haskins   sched: fix build ...
706
  		WARN_ON(prio < prev_prio);
764a9d6fe   Steven Rostedt   sched: track high...
707

e864c499d   Gregory Haskins   sched: track the ...
708
  		/*
398a153b1   Gregory Haskins   sched: fix build ...
709
710
  		 * This may have been our highest task, and therefore
  		 * we may have some recomputation to do
e864c499d   Gregory Haskins   sched: track the ...
711
  		 */
398a153b1   Gregory Haskins   sched: fix build ...
712
  		if (prio == prev_prio) {
e864c499d   Gregory Haskins   sched: track the ...
713
714
715
  			struct rt_prio_array *array = &rt_rq->active;
  
  			rt_rq->highest_prio.curr =
764a9d6fe   Steven Rostedt   sched: track high...
716
  				sched_find_first_bit(array->bitmap);
e864c499d   Gregory Haskins   sched: track the ...
717
  		}
764a9d6fe   Steven Rostedt   sched: track high...
718
  	} else
e864c499d   Gregory Haskins   sched: track the ...
719
  		rt_rq->highest_prio.curr = MAX_RT_PRIO;
73fe6aae8   Gregory Haskins   sched: add RT-bal...
720

398a153b1   Gregory Haskins   sched: fix build ...
721
722
  	dec_rt_prio_smp(rt_rq, prio, prev_prio);
  }
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
723

398a153b1   Gregory Haskins   sched: fix build ...
724
725
726
727
728
729
  #else
  
  static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
  static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
  
  #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
6e0534f27   Gregory Haskins   sched: use a 2-d ...
730

052f1dc7e   Peter Zijlstra   sched: rt-group: ...
731
  #ifdef CONFIG_RT_GROUP_SCHED
398a153b1   Gregory Haskins   sched: fix build ...
732
733
734
735
736
737
738
739
740
741
742
743
744
745
  
  static void
  inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  	if (rt_se_boosted(rt_se))
  		rt_rq->rt_nr_boosted++;
  
  	if (rt_rq->tg)
  		start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
  }
  
  static void
  dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
746
747
748
749
  	if (rt_se_boosted(rt_se))
  		rt_rq->rt_nr_boosted--;
  
  	WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
398a153b1   Gregory Haskins   sched: fix build ...
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
  }
  
  #else /* CONFIG_RT_GROUP_SCHED */
  
  static void
  inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  	start_rt_bandwidth(&def_rt_bandwidth);
  }
  
  static inline
  void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
  
  #endif /* CONFIG_RT_GROUP_SCHED */
  
  static inline
  void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  	int prio = rt_se_prio(rt_se);
  
  	WARN_ON(!rt_prio(prio));
  	rt_rq->rt_nr_running++;
  
  	inc_rt_prio(rt_rq, prio);
  	inc_rt_migration(rt_se, rt_rq);
  	inc_rt_group(rt_se, rt_rq);
  }
  
  static inline
  void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  	WARN_ON(!rt_prio(rt_se_prio(rt_se)));
  	WARN_ON(!rt_rq->rt_nr_running);
  	rt_rq->rt_nr_running--;
  
  	dec_rt_prio(rt_rq, rt_se_prio(rt_se));
  	dec_rt_migration(rt_se, rt_rq);
  	dec_rt_group(rt_se, rt_rq);
63489e45e   Steven Rostedt   sched: count # of...
788
  }
37dad3fce   Thomas Gleixner   sched: Implement ...
789
  static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
790
  {
6f505b164   Peter Zijlstra   sched: rt group s...
791
792
793
  	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
  	struct rt_prio_array *array = &rt_rq->active;
  	struct rt_rq *group_rq = group_rt_rq(rt_se);
20b6331bf   Dmitry Adamushko   sched: rework of ...
794
  	struct list_head *queue = array->queue + rt_se_prio(rt_se);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
795

ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
796
797
798
799
800
801
802
  	/*
  	 * Don't enqueue the group if its throttled, or when empty.
  	 * The latter is a consequence of the former when a child group
  	 * get throttled and the current group doesn't have any other
  	 * active members.
  	 */
  	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
6f505b164   Peter Zijlstra   sched: rt group s...
803
  		return;
63489e45e   Steven Rostedt   sched: count # of...
804

3d4b47b4b   Peter Zijlstra   sched: Implement ...
805
806
  	if (!rt_rq->rt_nr_running)
  		list_add_leaf_rt_rq(rt_rq);
37dad3fce   Thomas Gleixner   sched: Implement ...
807
808
809
810
  	if (head)
  		list_add(&rt_se->run_list, queue);
  	else
  		list_add_tail(&rt_se->run_list, queue);
6f505b164   Peter Zijlstra   sched: rt group s...
811
  	__set_bit(rt_se_prio(rt_se), array->bitmap);
78f2c7db6   Peter Zijlstra   sched: SCHED_FIFO...
812

6f505b164   Peter Zijlstra   sched: rt group s...
813
814
  	inc_rt_tasks(rt_se, rt_rq);
  }
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
815
  static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
6f505b164   Peter Zijlstra   sched: rt group s...
816
817
818
819
820
821
822
823
824
  {
  	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
  	struct rt_prio_array *array = &rt_rq->active;
  
  	list_del_init(&rt_se->run_list);
  	if (list_empty(array->queue + rt_se_prio(rt_se)))
  		__clear_bit(rt_se_prio(rt_se), array->bitmap);
  
  	dec_rt_tasks(rt_se, rt_rq);
3d4b47b4b   Peter Zijlstra   sched: Implement ...
825
826
  	if (!rt_rq->rt_nr_running)
  		list_del_leaf_rt_rq(rt_rq);
6f505b164   Peter Zijlstra   sched: rt group s...
827
828
829
830
831
  }
  
  /*
   * Because the prio of an upper entry depends on the lower
   * entries, we must remove entries top - down.
6f505b164   Peter Zijlstra   sched: rt group s...
832
   */
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
833
  static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
6f505b164   Peter Zijlstra   sched: rt group s...
834
  {
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
835
  	struct sched_rt_entity *back = NULL;
6f505b164   Peter Zijlstra   sched: rt group s...
836

58d6c2d72   Peter Zijlstra   sched: rt-group: ...
837
838
839
840
841
842
843
  	for_each_sched_rt_entity(rt_se) {
  		rt_se->back = back;
  		back = rt_se;
  	}
  
  	for (rt_se = back; rt_se; rt_se = rt_se->back) {
  		if (on_rt_rq(rt_se))
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
844
845
846
  			__dequeue_rt_entity(rt_se);
  	}
  }
37dad3fce   Thomas Gleixner   sched: Implement ...
847
  static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
848
849
850
  {
  	dequeue_rt_stack(rt_se);
  	for_each_sched_rt_entity(rt_se)
37dad3fce   Thomas Gleixner   sched: Implement ...
851
  		__enqueue_rt_entity(rt_se, head);
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
852
853
854
855
856
857
858
859
860
861
  }
  
  static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
  {
  	dequeue_rt_stack(rt_se);
  
  	for_each_sched_rt_entity(rt_se) {
  		struct rt_rq *rt_rq = group_rt_rq(rt_se);
  
  		if (rt_rq && rt_rq->rt_nr_running)
37dad3fce   Thomas Gleixner   sched: Implement ...
862
  			__enqueue_rt_entity(rt_se, false);
58d6c2d72   Peter Zijlstra   sched: rt-group: ...
863
  	}
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
864
865
866
867
868
  }
  
  /*
   * Adding/removing a task to/from a priority array:
   */
ea87bb785   Thomas Gleixner   sched: Extend enq...
869
  static void
371fd7e7a   Peter Zijlstra   sched: Add enqueu...
870
  enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
6f505b164   Peter Zijlstra   sched: rt group s...
871
872
  {
  	struct sched_rt_entity *rt_se = &p->rt;
371fd7e7a   Peter Zijlstra   sched: Add enqueu...
873
  	if (flags & ENQUEUE_WAKEUP)
6f505b164   Peter Zijlstra   sched: rt group s...
874
  		rt_se->timeout = 0;
371fd7e7a   Peter Zijlstra   sched: Add enqueu...
875
  	enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
c09595f63   Peter Zijlstra   sched: revert rev...
876

917b627d4   Gregory Haskins   sched: create "pu...
877
878
  	if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
  		enqueue_pushable_task(rq, p);
6f505b164   Peter Zijlstra   sched: rt group s...
879
  }
371fd7e7a   Peter Zijlstra   sched: Add enqueu...
880
  static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
881
  {
6f505b164   Peter Zijlstra   sched: rt group s...
882
  	struct sched_rt_entity *rt_se = &p->rt;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
883

f1e14ef64   Ingo Molnar   sched: remove the...
884
  	update_curr_rt(rq);
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
885
  	dequeue_rt_entity(rt_se);
c09595f63   Peter Zijlstra   sched: revert rev...
886

917b627d4   Gregory Haskins   sched: create "pu...
887
  	dequeue_pushable_task(rq, p);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
888
889
890
891
892
893
  }
  
  /*
   * Put task to the end of the run list without the overhead of dequeue
   * followed by enqueue.
   */
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
894
895
  static void
  requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
6f505b164   Peter Zijlstra   sched: rt group s...
896
  {
1cdad7153   Ingo Molnar   Merge branch 'sch...
897
  	if (on_rt_rq(rt_se)) {
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
898
899
900
901
902
903
904
  		struct rt_prio_array *array = &rt_rq->active;
  		struct list_head *queue = array->queue + rt_se_prio(rt_se);
  
  		if (head)
  			list_move(&rt_se->run_list, queue);
  		else
  			list_move_tail(&rt_se->run_list, queue);
1cdad7153   Ingo Molnar   Merge branch 'sch...
905
  	}
6f505b164   Peter Zijlstra   sched: rt group s...
906
  }
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
907
  static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
908
  {
6f505b164   Peter Zijlstra   sched: rt group s...
909
910
  	struct sched_rt_entity *rt_se = &p->rt;
  	struct rt_rq *rt_rq;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
911

6f505b164   Peter Zijlstra   sched: rt group s...
912
913
  	for_each_sched_rt_entity(rt_se) {
  		rt_rq = rt_rq_of_se(rt_se);
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
914
  		requeue_rt_entity(rt_rq, rt_se, head);
6f505b164   Peter Zijlstra   sched: rt group s...
915
  	}
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
916
  }
6f505b164   Peter Zijlstra   sched: rt group s...
917
  static void yield_task_rt(struct rq *rq)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
918
  {
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
919
  	requeue_task_rt(rq, rq->curr, 0);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
920
  }
e7693a362   Gregory Haskins   sched: de-SCHED_O...
921
  #ifdef CONFIG_SMP
318e0893c   Gregory Haskins   sched: pre-route ...
922
  static int find_lowest_rq(struct task_struct *task);
0017d7350   Peter Zijlstra   sched: Fix TASK_W...
923
  static int
7608dec2c   Peter Zijlstra   sched: Drop the r...
924
  select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
e7693a362   Gregory Haskins   sched: de-SCHED_O...
925
  {
7608dec2c   Peter Zijlstra   sched: Drop the r...
926
927
928
  	struct task_struct *curr;
  	struct rq *rq;
  	int cpu;
0763a660a   Peter Zijlstra   sched: Rename sel...
929
  	if (sd_flag != SD_BALANCE_WAKE)
5f3edc1b1   Peter Zijlstra   sched: Hook sched...
930
  		return smp_processor_id();
7608dec2c   Peter Zijlstra   sched: Drop the r...
931
932
933
934
935
  	cpu = task_cpu(p);
  	rq = cpu_rq(cpu);
  
  	rcu_read_lock();
  	curr = ACCESS_ONCE(rq->curr); /* unlocked access */
318e0893c   Gregory Haskins   sched: pre-route ...
936
  	/*
7608dec2c   Peter Zijlstra   sched: Drop the r...
937
  	 * If the current task on @p's runqueue is an RT task, then
e1f47d891   Steven Rostedt   sched: RT-balance...
938
939
940
941
  	 * try to see if we can wake this RT task up on another
  	 * runqueue. Otherwise simply start this RT task
  	 * on its current runqueue.
  	 *
43fa5460f   Steven Rostedt   sched: Try not to...
942
943
944
945
946
947
948
949
950
  	 * We want to avoid overloading runqueues. If the woken
  	 * task is a higher priority, then it will stay on this CPU
  	 * and the lower prio task should be moved to another CPU.
  	 * Even though this will probably make the lower prio task
  	 * lose its cache, we do not want to bounce a higher task
  	 * around just because it gave up its CPU, perhaps for a
  	 * lock?
  	 *
  	 * For equal prio tasks, we just let the scheduler sort it out.
7608dec2c   Peter Zijlstra   sched: Drop the r...
951
952
953
954
955
956
  	 *
  	 * Otherwise, just let it ride on the affined RQ and the
  	 * post-schedule router will push the preempted task away
  	 *
  	 * This test is optimistic, if we get it wrong the load-balancer
  	 * will have to sort it out.
318e0893c   Gregory Haskins   sched: pre-route ...
957
  	 */
7608dec2c   Peter Zijlstra   sched: Drop the r...
958
959
960
  	if (curr && unlikely(rt_task(curr)) &&
  	    (curr->rt.nr_cpus_allowed < 2 ||
  	     curr->prio < p->prio) &&
6f505b164   Peter Zijlstra   sched: rt group s...
961
  	    (p->rt.nr_cpus_allowed > 1)) {
7608dec2c   Peter Zijlstra   sched: Drop the r...
962
  		int target = find_lowest_rq(p);
318e0893c   Gregory Haskins   sched: pre-route ...
963

7608dec2c   Peter Zijlstra   sched: Drop the r...
964
965
  		if (target != -1)
  			cpu = target;
318e0893c   Gregory Haskins   sched: pre-route ...
966
  	}
7608dec2c   Peter Zijlstra   sched: Drop the r...
967
  	rcu_read_unlock();
318e0893c   Gregory Haskins   sched: pre-route ...
968

7608dec2c   Peter Zijlstra   sched: Drop the r...
969
  	return cpu;
e7693a362   Gregory Haskins   sched: de-SCHED_O...
970
  }
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
971
972
973
  
  static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
  {
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
974
975
  	if (rq->curr->rt.nr_cpus_allowed == 1)
  		return;
24600ce89   Rusty Russell   sched: convert ch...
976
  	if (p->rt.nr_cpus_allowed != 1
13b8bd0a5   Rusty Russell   sched_rt: don't a...
977
978
  	    && cpupri_find(&rq->rd->cpupri, p, NULL))
  		return;
24600ce89   Rusty Russell   sched: convert ch...
979

13b8bd0a5   Rusty Russell   sched_rt: don't a...
980
981
  	if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
  		return;
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
982
983
984
985
986
987
988
989
990
  
  	/*
  	 * There appears to be other cpus that can accept
  	 * current and none to run 'p', so lets reschedule
  	 * to try and push current away:
  	 */
  	requeue_task_rt(rq, p, 1);
  	resched_task(rq->curr);
  }
e7693a362   Gregory Haskins   sched: de-SCHED_O...
991
  #endif /* CONFIG_SMP */
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
992
993
994
  /*
   * Preempt the current task with a newly woken task if needed:
   */
7d4787214   Peter Zijlstra   sched: Rename syn...
995
  static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
996
  {
45c01e824   Gregory Haskins   sched: prioritize...
997
  	if (p->prio < rq->curr->prio) {
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
998
  		resched_task(rq->curr);
45c01e824   Gregory Haskins   sched: prioritize...
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
  		return;
  	}
  
  #ifdef CONFIG_SMP
  	/*
  	 * If:
  	 *
  	 * - the newly woken task is of equal priority to the current task
  	 * - the newly woken task is non-migratable while current is migratable
  	 * - current will be preempted on the next reschedule
  	 *
  	 * we should check to see if current can readily move to a different
  	 * cpu.  If so, we will reschedule to allow the push logic to try
  	 * to move current somewhere else, making room for our non-migratable
  	 * task.
  	 */
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
1015
1016
  	if (p->prio == rq->curr->prio && !need_resched())
  		check_preempt_equal_prio(rq, p);
45c01e824   Gregory Haskins   sched: prioritize...
1017
  #endif
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1018
  }
6f505b164   Peter Zijlstra   sched: rt group s...
1019
1020
  static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
  						   struct rt_rq *rt_rq)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1021
  {
6f505b164   Peter Zijlstra   sched: rt group s...
1022
1023
  	struct rt_prio_array *array = &rt_rq->active;
  	struct sched_rt_entity *next = NULL;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1024
1025
1026
1027
  	struct list_head *queue;
  	int idx;
  
  	idx = sched_find_first_bit(array->bitmap);
6f505b164   Peter Zijlstra   sched: rt group s...
1028
  	BUG_ON(idx >= MAX_RT_PRIO);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1029
1030
  
  	queue = array->queue + idx;
6f505b164   Peter Zijlstra   sched: rt group s...
1031
  	next = list_entry(queue->next, struct sched_rt_entity, run_list);
326587b84   Dmitry Adamushko   sched: fix goto r...
1032

6f505b164   Peter Zijlstra   sched: rt group s...
1033
1034
  	return next;
  }
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1035

917b627d4   Gregory Haskins   sched: create "pu...
1036
  static struct task_struct *_pick_next_task_rt(struct rq *rq)
6f505b164   Peter Zijlstra   sched: rt group s...
1037
1038
1039
1040
  {
  	struct sched_rt_entity *rt_se;
  	struct task_struct *p;
  	struct rt_rq *rt_rq;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1041

6f505b164   Peter Zijlstra   sched: rt group s...
1042
1043
1044
1045
  	rt_rq = &rq->rt;
  
  	if (unlikely(!rt_rq->rt_nr_running))
  		return NULL;
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
1046
  	if (rt_rq_throttled(rt_rq))
6f505b164   Peter Zijlstra   sched: rt group s...
1047
1048
1049
1050
  		return NULL;
  
  	do {
  		rt_se = pick_next_rt_entity(rq, rt_rq);
326587b84   Dmitry Adamushko   sched: fix goto r...
1051
  		BUG_ON(!rt_se);
6f505b164   Peter Zijlstra   sched: rt group s...
1052
1053
1054
1055
  		rt_rq = group_rt_rq(rt_se);
  	} while (rt_rq);
  
  	p = rt_task_of(rt_se);
305e6835e   Venkatesh Pallipadi   sched: Do not acc...
1056
  	p->se.exec_start = rq->clock_task;
917b627d4   Gregory Haskins   sched: create "pu...
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
  
  	return p;
  }
  
  static struct task_struct *pick_next_task_rt(struct rq *rq)
  {
  	struct task_struct *p = _pick_next_task_rt(rq);
  
  	/* The running task is never eligible for pushing */
  	if (p)
  		dequeue_pushable_task(rq, p);
bcf08df3b   Ingo Molnar   sched: Fix cpupri...
1068
  #ifdef CONFIG_SMP
3f029d3c6   Gregory Haskins   sched: Enhance th...
1069
1070
1071
1072
1073
  	/*
  	 * We detect this state here so that we can avoid taking the RQ
  	 * lock again later if there is no need to push
  	 */
  	rq->post_schedule = has_pushable_tasks(rq);
bcf08df3b   Ingo Molnar   sched: Fix cpupri...
1074
  #endif
3f029d3c6   Gregory Haskins   sched: Enhance th...
1075

6f505b164   Peter Zijlstra   sched: rt group s...
1076
  	return p;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1077
  }
31ee529cc   Ingo Molnar   sched: remove the...
1078
  static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1079
  {
f1e14ef64   Ingo Molnar   sched: remove the...
1080
  	update_curr_rt(rq);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1081
  	p->se.exec_start = 0;
917b627d4   Gregory Haskins   sched: create "pu...
1082
1083
1084
1085
1086
  
  	/*
  	 * The previous task needs to be made eligible for pushing
  	 * if it is still active
  	 */
fd2f4419b   Peter Zijlstra   sched: Provide p-...
1087
  	if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1)
917b627d4   Gregory Haskins   sched: create "pu...
1088
  		enqueue_pushable_task(rq, p);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1089
  }
681f3e685   Peter Williams   sched: isolate SM...
1090
  #ifdef CONFIG_SMP
6f505b164   Peter Zijlstra   sched: rt group s...
1091

e8fa13626   Steven Rostedt   sched: add RT tas...
1092
1093
  /* Only try algorithms three times */
  #define RT_MAX_TRIES 3
e8fa13626   Steven Rostedt   sched: add RT tas...
1094
  static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1095
1096
1097
  static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
  {
  	if (!task_running(rq, p) &&
96f874e26   Rusty Russell   sched: convert re...
1098
  	    (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
6f505b164   Peter Zijlstra   sched: rt group s...
1099
  	    (p->rt.nr_cpus_allowed > 1))
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1100
1101
1102
  		return 1;
  	return 0;
  }
e8fa13626   Steven Rostedt   sched: add RT tas...
1103
  /* Return the second highest RT task, NULL otherwise */
79064fbf7   Ingo Molnar   sched: clean up p...
1104
  static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
e8fa13626   Steven Rostedt   sched: add RT tas...
1105
  {
6f505b164   Peter Zijlstra   sched: rt group s...
1106
1107
1108
1109
  	struct task_struct *next = NULL;
  	struct sched_rt_entity *rt_se;
  	struct rt_prio_array *array;
  	struct rt_rq *rt_rq;
e8fa13626   Steven Rostedt   sched: add RT tas...
1110
  	int idx;
6f505b164   Peter Zijlstra   sched: rt group s...
1111
1112
1113
  	for_each_leaf_rt_rq(rt_rq, rq) {
  		array = &rt_rq->active;
  		idx = sched_find_first_bit(array->bitmap);
492462742   Peter Zijlstra   sched: Unindent l...
1114
  next_idx:
6f505b164   Peter Zijlstra   sched: rt group s...
1115
1116
1117
1118
1119
  		if (idx >= MAX_RT_PRIO)
  			continue;
  		if (next && next->prio < idx)
  			continue;
  		list_for_each_entry(rt_se, array->queue + idx, run_list) {
3d07467b7   Peter Zijlstra   sched: Fix pick_n...
1120
1121
1122
1123
1124
1125
  			struct task_struct *p;
  
  			if (!rt_entity_is_task(rt_se))
  				continue;
  
  			p = rt_task_of(rt_se);
6f505b164   Peter Zijlstra   sched: rt group s...
1126
1127
1128
1129
1130
1131
1132
1133
1134
  			if (pick_rt_task(rq, p, cpu)) {
  				next = p;
  				break;
  			}
  		}
  		if (!next) {
  			idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
  			goto next_idx;
  		}
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1135
  	}
e8fa13626   Steven Rostedt   sched: add RT tas...
1136
1137
  	return next;
  }
0e3900e6d   Rusty Russell   sched: convert lo...
1138
  static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
e8fa13626   Steven Rostedt   sched: add RT tas...
1139

6e1254d2c   Gregory Haskins   sched: optimize R...
1140
1141
1142
  static int find_lowest_rq(struct task_struct *task)
  {
  	struct sched_domain *sd;
96f874e26   Rusty Russell   sched: convert re...
1143
  	struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
6e1254d2c   Gregory Haskins   sched: optimize R...
1144
1145
  	int this_cpu = smp_processor_id();
  	int cpu      = task_cpu(task);
06f90dbd7   Gregory Haskins   sched: RT-balance...
1146

6e0534f27   Gregory Haskins   sched: use a 2-d ...
1147
1148
  	if (task->rt.nr_cpus_allowed == 1)
  		return -1; /* No other targets possible */
6e1254d2c   Gregory Haskins   sched: optimize R...
1149

6e0534f27   Gregory Haskins   sched: use a 2-d ...
1150
1151
  	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
  		return -1; /* No targets found */
6e1254d2c   Gregory Haskins   sched: optimize R...
1152
1153
1154
1155
1156
1157
1158
1159
1160
  
  	/*
  	 * At this point we have built a mask of cpus representing the
  	 * lowest priority tasks in the system.  Now we want to elect
  	 * the best one based on our affinity and topology.
  	 *
  	 * We prioritize the last cpu that the task executed on since
  	 * it is most likely cache-hot in that location.
  	 */
96f874e26   Rusty Russell   sched: convert re...
1161
  	if (cpumask_test_cpu(cpu, lowest_mask))
6e1254d2c   Gregory Haskins   sched: optimize R...
1162
1163
1164
1165
1166
1167
  		return cpu;
  
  	/*
  	 * Otherwise, we consult the sched_domains span maps to figure
  	 * out which cpu is logically closest to our hot cache data.
  	 */
e2c880630   Rusty Russell   cpumask: Simplify...
1168
1169
  	if (!cpumask_test_cpu(this_cpu, lowest_mask))
  		this_cpu = -1; /* Skip this_cpu opt if not among lowest */
6e1254d2c   Gregory Haskins   sched: optimize R...
1170

e2c880630   Rusty Russell   cpumask: Simplify...
1171
1172
1173
  	for_each_domain(cpu, sd) {
  		if (sd->flags & SD_WAKE_AFFINE) {
  			int best_cpu;
6e1254d2c   Gregory Haskins   sched: optimize R...
1174

e2c880630   Rusty Russell   cpumask: Simplify...
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
  			/*
  			 * "this_cpu" is cheaper to preempt than a
  			 * remote processor.
  			 */
  			if (this_cpu != -1 &&
  			    cpumask_test_cpu(this_cpu, sched_domain_span(sd)))
  				return this_cpu;
  
  			best_cpu = cpumask_first_and(lowest_mask,
  						     sched_domain_span(sd));
  			if (best_cpu < nr_cpu_ids)
  				return best_cpu;
6e1254d2c   Gregory Haskins   sched: optimize R...
1187
1188
1189
1190
1191
1192
1193
1194
  		}
  	}
  
  	/*
  	 * And finally, if there were no matches within the domains
  	 * just give the caller *something* to work with from the compatible
  	 * locations.
  	 */
e2c880630   Rusty Russell   cpumask: Simplify...
1195
1196
1197
1198
1199
1200
1201
  	if (this_cpu != -1)
  		return this_cpu;
  
  	cpu = cpumask_any(lowest_mask);
  	if (cpu < nr_cpu_ids)
  		return cpu;
  	return -1;
07b4032c9   Gregory Haskins   sched: break out ...
1202
1203
1204
  }
  
  /* Will lock the rq it finds */
4df64c0bf   Ingo Molnar   sched: clean up f...
1205
  static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
07b4032c9   Gregory Haskins   sched: break out ...
1206
1207
  {
  	struct rq *lowest_rq = NULL;
07b4032c9   Gregory Haskins   sched: break out ...
1208
  	int tries;
4df64c0bf   Ingo Molnar   sched: clean up f...
1209
  	int cpu;
e8fa13626   Steven Rostedt   sched: add RT tas...
1210

07b4032c9   Gregory Haskins   sched: break out ...
1211
1212
  	for (tries = 0; tries < RT_MAX_TRIES; tries++) {
  		cpu = find_lowest_rq(task);
2de0b4639   Gregory Haskins   sched: RT balanci...
1213
  		if ((cpu == -1) || (cpu == rq->cpu))
e8fa13626   Steven Rostedt   sched: add RT tas...
1214
  			break;
07b4032c9   Gregory Haskins   sched: break out ...
1215
  		lowest_rq = cpu_rq(cpu);
e8fa13626   Steven Rostedt   sched: add RT tas...
1216
  		/* if the prio of this runqueue changed, try again */
07b4032c9   Gregory Haskins   sched: break out ...
1217
  		if (double_lock_balance(rq, lowest_rq)) {
e8fa13626   Steven Rostedt   sched: add RT tas...
1218
1219
1220
1221
1222
1223
  			/*
  			 * We had to unlock the run queue. In
  			 * the mean time, task could have
  			 * migrated already or had its affinity changed.
  			 * Also make sure that it wasn't scheduled on its rq.
  			 */
07b4032c9   Gregory Haskins   sched: break out ...
1224
  			if (unlikely(task_rq(task) != rq ||
96f874e26   Rusty Russell   sched: convert re...
1225
1226
  				     !cpumask_test_cpu(lowest_rq->cpu,
  						       &task->cpus_allowed) ||
07b4032c9   Gregory Haskins   sched: break out ...
1227
  				     task_running(rq, task) ||
fd2f4419b   Peter Zijlstra   sched: Provide p-...
1228
  				     !task->on_rq)) {
4df64c0bf   Ingo Molnar   sched: clean up f...
1229

05fa785cf   Thomas Gleixner   sched: Convert rq...
1230
  				raw_spin_unlock(&lowest_rq->lock);
e8fa13626   Steven Rostedt   sched: add RT tas...
1231
1232
1233
1234
1235
1236
  				lowest_rq = NULL;
  				break;
  			}
  		}
  
  		/* If this rq is still suitable use it. */
e864c499d   Gregory Haskins   sched: track the ...
1237
  		if (lowest_rq->rt.highest_prio.curr > task->prio)
e8fa13626   Steven Rostedt   sched: add RT tas...
1238
1239
1240
  			break;
  
  		/* try again */
1b12bbc74   Peter Zijlstra   lockdep: re-annot...
1241
  		double_unlock_balance(rq, lowest_rq);
e8fa13626   Steven Rostedt   sched: add RT tas...
1242
1243
1244
1245
1246
  		lowest_rq = NULL;
  	}
  
  	return lowest_rq;
  }
917b627d4   Gregory Haskins   sched: create "pu...
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
  static struct task_struct *pick_next_pushable_task(struct rq *rq)
  {
  	struct task_struct *p;
  
  	if (!has_pushable_tasks(rq))
  		return NULL;
  
  	p = plist_first_entry(&rq->rt.pushable_tasks,
  			      struct task_struct, pushable_tasks);
  
  	BUG_ON(rq->cpu != task_cpu(p));
  	BUG_ON(task_current(rq, p));
  	BUG_ON(p->rt.nr_cpus_allowed <= 1);
fd2f4419b   Peter Zijlstra   sched: Provide p-...
1260
  	BUG_ON(!p->on_rq);
917b627d4   Gregory Haskins   sched: create "pu...
1261
1262
1263
1264
  	BUG_ON(!rt_task(p));
  
  	return p;
  }
e8fa13626   Steven Rostedt   sched: add RT tas...
1265
1266
1267
1268
1269
  /*
   * If the current CPU has more than one RT task, see if the non
   * running task can migrate over to a CPU that is running a task
   * of lesser priority.
   */
697f0a487   Gregory Haskins   sched: clean up t...
1270
  static int push_rt_task(struct rq *rq)
e8fa13626   Steven Rostedt   sched: add RT tas...
1271
1272
1273
  {
  	struct task_struct *next_task;
  	struct rq *lowest_rq;
e8fa13626   Steven Rostedt   sched: add RT tas...
1274

a22d7fc18   Gregory Haskins   sched: wake-balan...
1275
1276
  	if (!rq->rt.overloaded)
  		return 0;
917b627d4   Gregory Haskins   sched: create "pu...
1277
  	next_task = pick_next_pushable_task(rq);
e8fa13626   Steven Rostedt   sched: add RT tas...
1278
1279
  	if (!next_task)
  		return 0;
492462742   Peter Zijlstra   sched: Unindent l...
1280
  retry:
697f0a487   Gregory Haskins   sched: clean up t...
1281
  	if (unlikely(next_task == rq->curr)) {
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1282
  		WARN_ON(1);
e8fa13626   Steven Rostedt   sched: add RT tas...
1283
  		return 0;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1284
  	}
e8fa13626   Steven Rostedt   sched: add RT tas...
1285
1286
1287
1288
1289
1290
  
  	/*
  	 * It's possible that the next_task slipped in of
  	 * higher priority than current. If that's the case
  	 * just reschedule current.
  	 */
697f0a487   Gregory Haskins   sched: clean up t...
1291
1292
  	if (unlikely(next_task->prio < rq->curr->prio)) {
  		resched_task(rq->curr);
e8fa13626   Steven Rostedt   sched: add RT tas...
1293
1294
  		return 0;
  	}
697f0a487   Gregory Haskins   sched: clean up t...
1295
  	/* We might release rq lock */
e8fa13626   Steven Rostedt   sched: add RT tas...
1296
1297
1298
  	get_task_struct(next_task);
  
  	/* find_lock_lowest_rq locks the rq if found */
697f0a487   Gregory Haskins   sched: clean up t...
1299
  	lowest_rq = find_lock_lowest_rq(next_task, rq);
e8fa13626   Steven Rostedt   sched: add RT tas...
1300
1301
1302
  	if (!lowest_rq) {
  		struct task_struct *task;
  		/*
697f0a487   Gregory Haskins   sched: clean up t...
1303
  		 * find lock_lowest_rq releases rq->lock
1563513d3   Gregory Haskins   RT: fix push_rt_t...
1304
1305
1306
1307
1308
  		 * so it is possible that next_task has migrated.
  		 *
  		 * We need to make sure that the task is still on the same
  		 * run-queue and is also still the next task eligible for
  		 * pushing.
e8fa13626   Steven Rostedt   sched: add RT tas...
1309
  		 */
917b627d4   Gregory Haskins   sched: create "pu...
1310
  		task = pick_next_pushable_task(rq);
1563513d3   Gregory Haskins   RT: fix push_rt_t...
1311
1312
  		if (task_cpu(next_task) == rq->cpu && task == next_task) {
  			/*
25985edce   Lucas De Marchi   Fix common misspe...
1313
  			 * If we get here, the task hasn't moved at all, but
1563513d3   Gregory Haskins   RT: fix push_rt_t...
1314
1315
1316
1317
1318
1319
  			 * it has failed to push.  We will not try again,
  			 * since the other cpus will pull from us when they
  			 * are ready.
  			 */
  			dequeue_pushable_task(rq, next_task);
  			goto out;
e8fa13626   Steven Rostedt   sched: add RT tas...
1320
  		}
917b627d4   Gregory Haskins   sched: create "pu...
1321

1563513d3   Gregory Haskins   RT: fix push_rt_t...
1322
1323
1324
  		if (!task)
  			/* No more tasks, just exit */
  			goto out;
917b627d4   Gregory Haskins   sched: create "pu...
1325
  		/*
1563513d3   Gregory Haskins   RT: fix push_rt_t...
1326
  		 * Something has shifted, try again.
917b627d4   Gregory Haskins   sched: create "pu...
1327
  		 */
1563513d3   Gregory Haskins   RT: fix push_rt_t...
1328
1329
1330
  		put_task_struct(next_task);
  		next_task = task;
  		goto retry;
e8fa13626   Steven Rostedt   sched: add RT tas...
1331
  	}
697f0a487   Gregory Haskins   sched: clean up t...
1332
  	deactivate_task(rq, next_task, 0);
e8fa13626   Steven Rostedt   sched: add RT tas...
1333
1334
1335
1336
  	set_task_cpu(next_task, lowest_rq->cpu);
  	activate_task(lowest_rq, next_task, 0);
  
  	resched_task(lowest_rq->curr);
1b12bbc74   Peter Zijlstra   lockdep: re-annot...
1337
  	double_unlock_balance(rq, lowest_rq);
e8fa13626   Steven Rostedt   sched: add RT tas...
1338

e8fa13626   Steven Rostedt   sched: add RT tas...
1339
1340
  out:
  	put_task_struct(next_task);
917b627d4   Gregory Haskins   sched: create "pu...
1341
  	return 1;
e8fa13626   Steven Rostedt   sched: add RT tas...
1342
  }
e8fa13626   Steven Rostedt   sched: add RT tas...
1343
1344
1345
1346
1347
1348
  static void push_rt_tasks(struct rq *rq)
  {
  	/* push_rt_task will return true if it moved an RT */
  	while (push_rt_task(rq))
  		;
  }
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1349
1350
  static int pull_rt_task(struct rq *this_rq)
  {
80bf3171d   Ingo Molnar   sched: clean up p...
1351
  	int this_cpu = this_rq->cpu, ret = 0, cpu;
a8728944e   Gregory Haskins   sched: use highes...
1352
  	struct task_struct *p;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1353
  	struct rq *src_rq;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1354

637f50851   Gregory Haskins   sched: only balan...
1355
  	if (likely(!rt_overloaded(this_rq)))
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1356
  		return 0;
c6c4927b2   Rusty Russell   sched: convert st...
1357
  	for_each_cpu(cpu, this_rq->rd->rto_mask) {
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1358
1359
1360
1361
  		if (this_cpu == cpu)
  			continue;
  
  		src_rq = cpu_rq(cpu);
74ab8e4f6   Gregory Haskins   sched: use highes...
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
  
  		/*
  		 * Don't bother taking the src_rq->lock if the next highest
  		 * task is known to be lower-priority than our current task.
  		 * This may look racy, but if this value is about to go
  		 * logically higher, the src_rq will push this task away.
  		 * And if its going logically lower, we do not care
  		 */
  		if (src_rq->rt.highest_prio.next >=
  		    this_rq->rt.highest_prio.curr)
  			continue;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1373
1374
1375
  		/*
  		 * We can potentially drop this_rq's lock in
  		 * double_lock_balance, and another CPU could
a8728944e   Gregory Haskins   sched: use highes...
1376
  		 * alter this_rq
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1377
  		 */
a8728944e   Gregory Haskins   sched: use highes...
1378
  		double_lock_balance(this_rq, src_rq);
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1379
1380
1381
1382
  
  		/*
  		 * Are there still pullable RT tasks?
  		 */
614ee1f61   Mike Galbraith   sched: pull_rt_ta...
1383
1384
  		if (src_rq->rt.rt_nr_running <= 1)
  			goto skip;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1385

f65eda4f7   Steven Rostedt   sched: pull RT ta...
1386
1387
1388
1389
1390
1391
  		p = pick_next_highest_task_rt(src_rq, this_cpu);
  
  		/*
  		 * Do we have an RT task that preempts
  		 * the to-be-scheduled task?
  		 */
a8728944e   Gregory Haskins   sched: use highes...
1392
  		if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1393
  			WARN_ON(p == src_rq->curr);
fd2f4419b   Peter Zijlstra   sched: Provide p-...
1394
  			WARN_ON(!p->on_rq);
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1395
1396
1397
1398
1399
1400
1401
  
  			/*
  			 * There's a chance that p is higher in priority
  			 * than what's currently running on its cpu.
  			 * This is just that p is wakeing up and hasn't
  			 * had a chance to schedule. We only pull
  			 * p if it is lower in priority than the
a8728944e   Gregory Haskins   sched: use highes...
1402
  			 * current task on the run queue
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1403
  			 */
a8728944e   Gregory Haskins   sched: use highes...
1404
  			if (p->prio < src_rq->curr->prio)
614ee1f61   Mike Galbraith   sched: pull_rt_ta...
1405
  				goto skip;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1406
1407
1408
1409
1410
1411
1412
1413
1414
  
  			ret = 1;
  
  			deactivate_task(src_rq, p, 0);
  			set_task_cpu(p, this_cpu);
  			activate_task(this_rq, p, 0);
  			/*
  			 * We continue with the search, just in
  			 * case there's an even higher prio task
25985edce   Lucas De Marchi   Fix common misspe...
1415
  			 * in another runqueue. (low likelihood
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1416
  			 * but possible)
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1417
  			 */
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1418
  		}
492462742   Peter Zijlstra   sched: Unindent l...
1419
  skip:
1b12bbc74   Peter Zijlstra   lockdep: re-annot...
1420
  		double_unlock_balance(this_rq, src_rq);
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1421
1422
1423
1424
  	}
  
  	return ret;
  }
9a897c5a6   Steven Rostedt   sched: RT-balance...
1425
  static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1426
1427
  {
  	/* Try to pull RT tasks here if we lower this rq's prio */
e864c499d   Gregory Haskins   sched: track the ...
1428
  	if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1429
1430
  		pull_rt_task(rq);
  }
9a897c5a6   Steven Rostedt   sched: RT-balance...
1431
  static void post_schedule_rt(struct rq *rq)
e8fa13626   Steven Rostedt   sched: add RT tas...
1432
  {
967fc0467   Gregory Haskins   sched: add sched_...
1433
  	push_rt_tasks(rq);
e8fa13626   Steven Rostedt   sched: add RT tas...
1434
  }
8ae121ac8   Gregory Haskins   sched: fix RT tas...
1435
1436
1437
1438
  /*
   * If we are not running and we are not going to reschedule soon, we should
   * try to push tasks away now
   */
efbbd05a5   Peter Zijlstra   sched: Add pre an...
1439
  static void task_woken_rt(struct rq *rq, struct task_struct *p)
4642dafdf   Steven Rostedt   sched: push RT ta...
1440
  {
9a897c5a6   Steven Rostedt   sched: RT-balance...
1441
  	if (!task_running(rq, p) &&
8ae121ac8   Gregory Haskins   sched: fix RT tas...
1442
  	    !test_tsk_need_resched(rq->curr) &&
917b627d4   Gregory Haskins   sched: create "pu...
1443
  	    has_pushable_tasks(rq) &&
b3bc211cf   Steven Rostedt   sched: Give CPU b...
1444
  	    p->rt.nr_cpus_allowed > 1 &&
43fa5460f   Steven Rostedt   sched: Try not to...
1445
  	    rt_task(rq->curr) &&
b3bc211cf   Steven Rostedt   sched: Give CPU b...
1446
1447
  	    (rq->curr->rt.nr_cpus_allowed < 2 ||
  	     rq->curr->prio < p->prio))
4642dafdf   Steven Rostedt   sched: push RT ta...
1448
1449
  		push_rt_tasks(rq);
  }
cd8ba7cd9   Mike Travis   sched: add new se...
1450
  static void set_cpus_allowed_rt(struct task_struct *p,
96f874e26   Rusty Russell   sched: convert re...
1451
  				const struct cpumask *new_mask)
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1452
  {
96f874e26   Rusty Russell   sched: convert re...
1453
  	int weight = cpumask_weight(new_mask);
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1454
1455
1456
1457
1458
1459
1460
  
  	BUG_ON(!rt_task(p));
  
  	/*
  	 * Update the migration status of the RQ if we have an RT task
  	 * which is running AND changing its weight value.
  	 */
fd2f4419b   Peter Zijlstra   sched: Provide p-...
1461
  	if (p->on_rq && (weight != p->rt.nr_cpus_allowed)) {
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1462
  		struct rq *rq = task_rq(p);
917b627d4   Gregory Haskins   sched: create "pu...
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
  		if (!task_current(rq, p)) {
  			/*
  			 * Make sure we dequeue this task from the pushable list
  			 * before going further.  It will either remain off of
  			 * the list because we are no longer pushable, or it
  			 * will be requeued.
  			 */
  			if (p->rt.nr_cpus_allowed > 1)
  				dequeue_pushable_task(rq, p);
  
  			/*
  			 * Requeue if our weight is changing and still > 1
  			 */
  			if (weight > 1)
  				enqueue_pushable_task(rq, p);
  
  		}
6f505b164   Peter Zijlstra   sched: rt group s...
1480
  		if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1481
  			rq->rt.rt_nr_migratory++;
6f505b164   Peter Zijlstra   sched: rt group s...
1482
  		} else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1483
1484
1485
  			BUG_ON(!rq->rt.rt_nr_migratory);
  			rq->rt.rt_nr_migratory--;
  		}
398a153b1   Gregory Haskins   sched: fix build ...
1486
  		update_rt_migration(&rq->rt);
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1487
  	}
96f874e26   Rusty Russell   sched: convert re...
1488
  	cpumask_copy(&p->cpus_allowed, new_mask);
6f505b164   Peter Zijlstra   sched: rt group s...
1489
  	p->rt.nr_cpus_allowed = weight;
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1490
  }
deeeccd41   Ingo Molnar   sched: clean up o...
1491

bdd7c81b4   Ingo Molnar   sched: fix sched_...
1492
  /* Assumes rq->lock is held */
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
1493
  static void rq_online_rt(struct rq *rq)
bdd7c81b4   Ingo Molnar   sched: fix sched_...
1494
1495
1496
  {
  	if (rq->rt.overloaded)
  		rt_set_overload(rq);
6e0534f27   Gregory Haskins   sched: use a 2-d ...
1497

7def2be1d   Peter Zijlstra   sched: fix hotplu...
1498
  	__enable_runtime(rq);
e864c499d   Gregory Haskins   sched: track the ...
1499
  	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
bdd7c81b4   Ingo Molnar   sched: fix sched_...
1500
1501
1502
  }
  
  /* Assumes rq->lock is held */
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
1503
  static void rq_offline_rt(struct rq *rq)
bdd7c81b4   Ingo Molnar   sched: fix sched_...
1504
1505
1506
  {
  	if (rq->rt.overloaded)
  		rt_clear_overload(rq);
6e0534f27   Gregory Haskins   sched: use a 2-d ...
1507

7def2be1d   Peter Zijlstra   sched: fix hotplu...
1508
  	__disable_runtime(rq);
6e0534f27   Gregory Haskins   sched: use a 2-d ...
1509
  	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
bdd7c81b4   Ingo Molnar   sched: fix sched_...
1510
  }
cb4698450   Steven Rostedt   sched: RT-balance...
1511
1512
1513
1514
1515
  
  /*
   * When switch from the rt queue, we bring ourselves to a position
   * that we might want to pull RT tasks from other runqueues.
   */
da7a735e5   Peter Zijlstra   sched: Fix switch...
1516
  static void switched_from_rt(struct rq *rq, struct task_struct *p)
cb4698450   Steven Rostedt   sched: RT-balance...
1517
1518
1519
1520
1521
1522
1523
1524
  {
  	/*
  	 * If there are other RT tasks then we will reschedule
  	 * and the scheduling of the other RT tasks will handle
  	 * the balancing. But if we are the last RT task
  	 * we may need to handle the pulling of RT tasks
  	 * now.
  	 */
fd2f4419b   Peter Zijlstra   sched: Provide p-...
1525
  	if (p->on_rq && !rq->rt.rt_nr_running)
cb4698450   Steven Rostedt   sched: RT-balance...
1526
1527
  		pull_rt_task(rq);
  }
3d8cbdf86   Rusty Russell   sched: convert lo...
1528
1529
1530
1531
1532
1533
  
  static inline void init_sched_rt_class(void)
  {
  	unsigned int i;
  
  	for_each_possible_cpu(i)
eaa958402   Yinghai Lu   cpumask: alloc ze...
1534
  		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
6ca09dfc9   Mike Travis   sched: put back s...
1535
  					GFP_KERNEL, cpu_to_node(i));
3d8cbdf86   Rusty Russell   sched: convert lo...
1536
  }
cb4698450   Steven Rostedt   sched: RT-balance...
1537
1538
1539
1540
1541
1542
1543
  #endif /* CONFIG_SMP */
  
  /*
   * When switching a task to RT, we may overload the runqueue
   * with RT tasks. In this case we try to push them off to
   * other runqueues.
   */
da7a735e5   Peter Zijlstra   sched: Fix switch...
1544
  static void switched_to_rt(struct rq *rq, struct task_struct *p)
cb4698450   Steven Rostedt   sched: RT-balance...
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
  {
  	int check_resched = 1;
  
  	/*
  	 * If we are already running, then there's nothing
  	 * that needs to be done. But if we are not running
  	 * we may need to preempt the current running task.
  	 * If that current running task is also an RT task
  	 * then see if we can move to another run queue.
  	 */
fd2f4419b   Peter Zijlstra   sched: Provide p-...
1555
  	if (p->on_rq && rq->curr != p) {
cb4698450   Steven Rostedt   sched: RT-balance...
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
  #ifdef CONFIG_SMP
  		if (rq->rt.overloaded && push_rt_task(rq) &&
  		    /* Don't resched if we changed runqueues */
  		    rq != task_rq(p))
  			check_resched = 0;
  #endif /* CONFIG_SMP */
  		if (check_resched && p->prio < rq->curr->prio)
  			resched_task(rq->curr);
  	}
  }
  
  /*
   * Priority of the task has changed. This may cause
   * us to initiate a push or pull.
   */
da7a735e5   Peter Zijlstra   sched: Fix switch...
1571
1572
  static void
  prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
cb4698450   Steven Rostedt   sched: RT-balance...
1573
  {
fd2f4419b   Peter Zijlstra   sched: Provide p-...
1574
  	if (!p->on_rq)
da7a735e5   Peter Zijlstra   sched: Fix switch...
1575
1576
1577
  		return;
  
  	if (rq->curr == p) {
cb4698450   Steven Rostedt   sched: RT-balance...
1578
1579
1580
1581
1582
1583
1584
1585
1586
  #ifdef CONFIG_SMP
  		/*
  		 * If our priority decreases while running, we
  		 * may need to pull tasks to this runqueue.
  		 */
  		if (oldprio < p->prio)
  			pull_rt_task(rq);
  		/*
  		 * If there's a higher priority task waiting to run
6fa46fa52   Steven Rostedt   sched: balance RT...
1587
1588
1589
  		 * then reschedule. Note, the above pull_rt_task
  		 * can release the rq lock and p could migrate.
  		 * Only reschedule if p is still on the same runqueue.
cb4698450   Steven Rostedt   sched: RT-balance...
1590
  		 */
e864c499d   Gregory Haskins   sched: track the ...
1591
  		if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
cb4698450   Steven Rostedt   sched: RT-balance...
1592
1593
1594
1595
1596
  			resched_task(p);
  #else
  		/* For UP simply resched on drop of prio */
  		if (oldprio < p->prio)
  			resched_task(p);
e8fa13626   Steven Rostedt   sched: add RT tas...
1597
  #endif /* CONFIG_SMP */
cb4698450   Steven Rostedt   sched: RT-balance...
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
  	} else {
  		/*
  		 * This task is not running, but if it is
  		 * greater than the current running task
  		 * then reschedule.
  		 */
  		if (p->prio < rq->curr->prio)
  			resched_task(rq->curr);
  	}
  }
78f2c7db6   Peter Zijlstra   sched: SCHED_FIFO...
1608
1609
1610
  static void watchdog(struct rq *rq, struct task_struct *p)
  {
  	unsigned long soft, hard;
78d7d407b   Jiri Slaby   kernel core: use ...
1611
1612
1613
  	/* max may change after cur was read, this will be fixed next tick */
  	soft = task_rlimit(p, RLIMIT_RTTIME);
  	hard = task_rlimit_max(p, RLIMIT_RTTIME);
78f2c7db6   Peter Zijlstra   sched: SCHED_FIFO...
1614
1615
1616
1617
1618
1619
  
  	if (soft != RLIM_INFINITY) {
  		unsigned long next;
  
  		p->rt.timeout++;
  		next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
5a52dd500   Peter Zijlstra   sched: rt-watchdo...
1620
  		if (p->rt.timeout > next)
f06febc96   Frank Mayhar   timers: fix itime...
1621
  			p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
78f2c7db6   Peter Zijlstra   sched: SCHED_FIFO...
1622
1623
  	}
  }
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1624

8f4d37ec0   Peter Zijlstra   sched: high-res p...
1625
  static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1626
  {
67e2be023   Peter Zijlstra   sched: rt: accoun...
1627
  	update_curr_rt(rq);
78f2c7db6   Peter Zijlstra   sched: SCHED_FIFO...
1628
  	watchdog(rq, p);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1629
1630
1631
1632
1633
1634
  	/*
  	 * RR tasks need a special form of timeslice management.
  	 * FIFO tasks have no timeslices.
  	 */
  	if (p->policy != SCHED_RR)
  		return;
fa717060f   Peter Zijlstra   sched: sched_rt_e...
1635
  	if (--p->rt.time_slice)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1636
  		return;
fa717060f   Peter Zijlstra   sched: sched_rt_e...
1637
  	p->rt.time_slice = DEF_TIMESLICE;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1638

98fbc7985   Dmitry Adamushko   sched: optimize t...
1639
1640
1641
1642
  	/*
  	 * Requeue to the end of queue if we are not the only element
  	 * on the queue:
  	 */
fa717060f   Peter Zijlstra   sched: sched_rt_e...
1643
  	if (p->rt.run_list.prev != p->rt.run_list.next) {
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
1644
  		requeue_task_rt(rq, p, 0);
98fbc7985   Dmitry Adamushko   sched: optimize t...
1645
1646
  		set_tsk_need_resched(p);
  	}
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1647
  }
83b699ed2   Srivatsa Vaddagiri   sched: revert rec...
1648
1649
1650
  static void set_curr_task_rt(struct rq *rq)
  {
  	struct task_struct *p = rq->curr;
305e6835e   Venkatesh Pallipadi   sched: Do not acc...
1651
  	p->se.exec_start = rq->clock_task;
917b627d4   Gregory Haskins   sched: create "pu...
1652
1653
1654
  
  	/* The running task is never eligible for pushing */
  	dequeue_pushable_task(rq, p);
83b699ed2   Srivatsa Vaddagiri   sched: revert rec...
1655
  }
6d686f456   H Hartley Sweeten   sched: Don't expo...
1656
  static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
0d721cead   Peter Williams   sched: Simplify s...
1657
1658
1659
1660
1661
1662
1663
1664
1665
  {
  	/*
  	 * Time slice is 0 for SCHED_FIFO tasks
  	 */
  	if (task->policy == SCHED_RR)
  		return DEF_TIMESLICE;
  	else
  		return 0;
  }
2abdad0a4   Harvey Harrison   sched: make rt_sc...
1666
  static const struct sched_class rt_sched_class = {
5522d5d5f   Ingo Molnar   sched: mark sched...
1667
  	.next			= &fair_sched_class,
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1668
1669
1670
1671
1672
1673
1674
1675
  	.enqueue_task		= enqueue_task_rt,
  	.dequeue_task		= dequeue_task_rt,
  	.yield_task		= yield_task_rt,
  
  	.check_preempt_curr	= check_preempt_curr_rt,
  
  	.pick_next_task		= pick_next_task_rt,
  	.put_prev_task		= put_prev_task_rt,
681f3e685   Peter Williams   sched: isolate SM...
1676
  #ifdef CONFIG_SMP
4ce72a2c0   Li Zefan   sched: add CONFIG...
1677
  	.select_task_rq		= select_task_rq_rt,
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1678
  	.set_cpus_allowed       = set_cpus_allowed_rt,
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
1679
1680
  	.rq_online              = rq_online_rt,
  	.rq_offline             = rq_offline_rt,
9a897c5a6   Steven Rostedt   sched: RT-balance...
1681
1682
  	.pre_schedule		= pre_schedule_rt,
  	.post_schedule		= post_schedule_rt,
efbbd05a5   Peter Zijlstra   sched: Add pre an...
1683
  	.task_woken		= task_woken_rt,
cb4698450   Steven Rostedt   sched: RT-balance...
1684
  	.switched_from		= switched_from_rt,
681f3e685   Peter Williams   sched: isolate SM...
1685
  #endif
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1686

83b699ed2   Srivatsa Vaddagiri   sched: revert rec...
1687
  	.set_curr_task          = set_curr_task_rt,
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1688
  	.task_tick		= task_tick_rt,
cb4698450   Steven Rostedt   sched: RT-balance...
1689

0d721cead   Peter Williams   sched: Simplify s...
1690
  	.get_rr_interval	= get_rr_interval_rt,
cb4698450   Steven Rostedt   sched: RT-balance...
1691
1692
  	.prio_changed		= prio_changed_rt,
  	.switched_to		= switched_to_rt,
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1693
  };
ada18de2e   Peter Zijlstra   sched: debug: add...
1694
1695
1696
1697
1698
1699
  
  #ifdef CONFIG_SCHED_DEBUG
  extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
  
  static void print_rt_stats(struct seq_file *m, int cpu)
  {
ec514c487   Cheng Xu   sched: Fix rt_rq ...
1700
  	rt_rq_iter_t iter;
ada18de2e   Peter Zijlstra   sched: debug: add...
1701
1702
1703
  	struct rt_rq *rt_rq;
  
  	rcu_read_lock();
ec514c487   Cheng Xu   sched: Fix rt_rq ...
1704
  	for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
ada18de2e   Peter Zijlstra   sched: debug: add...
1705
1706
1707
  		print_rt_rq(m, cpu, rt_rq);
  	rcu_read_unlock();
  }
55e12e5e7   Dhaval Giani   sched: make sched...
1708
  #endif /* CONFIG_SCHED_DEBUG */
0e3900e6d   Rusty Russell   sched: convert lo...
1709