Blame view

kernel/sched/rt.c 54.2 KB
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1
2
3
4
  /*
   * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
   * policies)
   */
029632fbb   Peter Zijlstra   sched: Make separ...
5
6
7
  #include "sched.h"
  
  #include <linux/slab.h>
b6366f048   Steven Rostedt   sched/rt: Use IPI...
8
  #include <linux/irq_work.h>
029632fbb   Peter Zijlstra   sched: Make separ...
9

ce0dbbbb3   Clark Williams   sched/rt: Add a t...
10
  int sched_rr_timeslice = RR_TIMESLICE;
029632fbb   Peter Zijlstra   sched: Make separ...
11
12
13
14
15
16
17
18
  static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
  
  struct rt_bandwidth def_rt_bandwidth;
  
  static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
  {
  	struct rt_bandwidth *rt_b =
  		container_of(timer, struct rt_bandwidth, rt_period_timer);
029632fbb   Peter Zijlstra   sched: Make separ...
19
  	int idle = 0;
77a4d1a1b   Peter Zijlstra   sched: Cleanup ba...
20
  	int overrun;
029632fbb   Peter Zijlstra   sched: Make separ...
21

77a4d1a1b   Peter Zijlstra   sched: Cleanup ba...
22
  	raw_spin_lock(&rt_b->rt_runtime_lock);
029632fbb   Peter Zijlstra   sched: Make separ...
23
  	for (;;) {
77a4d1a1b   Peter Zijlstra   sched: Cleanup ba...
24
  		overrun = hrtimer_forward_now(timer, rt_b->rt_period);
029632fbb   Peter Zijlstra   sched: Make separ...
25
26
  		if (!overrun)
  			break;
77a4d1a1b   Peter Zijlstra   sched: Cleanup ba...
27
  		raw_spin_unlock(&rt_b->rt_runtime_lock);
029632fbb   Peter Zijlstra   sched: Make separ...
28
  		idle = do_sched_rt_period_timer(rt_b, overrun);
77a4d1a1b   Peter Zijlstra   sched: Cleanup ba...
29
  		raw_spin_lock(&rt_b->rt_runtime_lock);
029632fbb   Peter Zijlstra   sched: Make separ...
30
  	}
4cfafd308   Peter Zijlstra   sched,perf: Fix p...
31
32
  	if (idle)
  		rt_b->rt_period_active = 0;
77a4d1a1b   Peter Zijlstra   sched: Cleanup ba...
33
  	raw_spin_unlock(&rt_b->rt_runtime_lock);
029632fbb   Peter Zijlstra   sched: Make separ...
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
  
  	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
  }
  
  void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
  {
  	rt_b->rt_period = ns_to_ktime(period);
  	rt_b->rt_runtime = runtime;
  
  	raw_spin_lock_init(&rt_b->rt_runtime_lock);
  
  	hrtimer_init(&rt_b->rt_period_timer,
  			CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  	rt_b->rt_period_timer.function = sched_rt_period_timer;
  }
  
  static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
  {
  	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
  		return;
029632fbb   Peter Zijlstra   sched: Make separ...
54
  	raw_spin_lock(&rt_b->rt_runtime_lock);
4cfafd308   Peter Zijlstra   sched,perf: Fix p...
55
56
  	if (!rt_b->rt_period_active) {
  		rt_b->rt_period_active = 1;
c3a990dc9   Steven Rostedt   sched/rt: Kick RT...
57
58
59
60
61
62
63
64
65
  		/*
  		 * SCHED_DEADLINE updates the bandwidth, as a run away
  		 * RT task with a DL task could hog a CPU. But DL does
  		 * not reset the period. If a deadline task was running
  		 * without an RT task running, it can cause RT tasks to
  		 * throttle when they start up. Kick the timer right away
  		 * to update the period.
  		 */
  		hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
4cfafd308   Peter Zijlstra   sched,perf: Fix p...
66
67
  		hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED);
  	}
029632fbb   Peter Zijlstra   sched: Make separ...
68
69
  	raw_spin_unlock(&rt_b->rt_runtime_lock);
  }
89b411081   Arnd Bergmann   sched/rt: Hide th...
70
  #if defined(CONFIG_SMP) && defined(HAVE_RT_PUSH_IPI)
b6366f048   Steven Rostedt   sched/rt: Use IPI...
71
72
  static void push_irq_work_func(struct irq_work *work);
  #endif
07c54f7a7   Abel Vesa   sched/core: Remov...
73
  void init_rt_rq(struct rt_rq *rt_rq)
029632fbb   Peter Zijlstra   sched: Make separ...
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
  {
  	struct rt_prio_array *array;
  	int i;
  
  	array = &rt_rq->active;
  	for (i = 0; i < MAX_RT_PRIO; i++) {
  		INIT_LIST_HEAD(array->queue + i);
  		__clear_bit(i, array->bitmap);
  	}
  	/* delimiter for bitsearch: */
  	__set_bit(MAX_RT_PRIO, array->bitmap);
  
  #if defined CONFIG_SMP
  	rt_rq->highest_prio.curr = MAX_RT_PRIO;
  	rt_rq->highest_prio.next = MAX_RT_PRIO;
  	rt_rq->rt_nr_migratory = 0;
  	rt_rq->overloaded = 0;
  	plist_head_init(&rt_rq->pushable_tasks);
b6366f048   Steven Rostedt   sched/rt: Use IPI...
92
93
94
95
96
97
  
  #ifdef HAVE_RT_PUSH_IPI
  	rt_rq->push_flags = 0;
  	rt_rq->push_cpu = nr_cpu_ids;
  	raw_spin_lock_init(&rt_rq->push_lock);
  	init_irq_work(&rt_rq->push_work, push_irq_work_func);
029632fbb   Peter Zijlstra   sched: Make separ...
98
  #endif
b6366f048   Steven Rostedt   sched/rt: Use IPI...
99
  #endif /* CONFIG_SMP */
f4ebcbc0d   Kirill Tkhai   sched/rt: Substra...
100
101
  	/* We start is dequeued state, because no RT tasks are queued */
  	rt_rq->rt_queued = 0;
029632fbb   Peter Zijlstra   sched: Make separ...
102
103
104
105
106
107
  
  	rt_rq->rt_time = 0;
  	rt_rq->rt_throttled = 0;
  	rt_rq->rt_runtime = 0;
  	raw_spin_lock_init(&rt_rq->rt_runtime_lock);
  }
8f48894fc   Peter Zijlstra   sched: Add debug ...
108
  #ifdef CONFIG_RT_GROUP_SCHED
029632fbb   Peter Zijlstra   sched: Make separ...
109
110
111
112
  static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
  {
  	hrtimer_cancel(&rt_b->rt_period_timer);
  }
8f48894fc   Peter Zijlstra   sched: Add debug ...
113
114
  
  #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
398a153b1   Gregory Haskins   sched: fix build ...
115
116
  static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
  {
8f48894fc   Peter Zijlstra   sched: Add debug ...
117
118
119
  #ifdef CONFIG_SCHED_DEBUG
  	WARN_ON_ONCE(!rt_entity_is_task(rt_se));
  #endif
398a153b1   Gregory Haskins   sched: fix build ...
120
121
  	return container_of(rt_se, struct task_struct, rt);
  }
398a153b1   Gregory Haskins   sched: fix build ...
122
123
124
125
126
127
128
129
130
  static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
  {
  	return rt_rq->rq;
  }
  
  static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
  {
  	return rt_se->rt_rq;
  }
653d07a69   Kirill Tkhai   sched/rt: Add acc...
131
132
133
134
135
136
  static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
  {
  	struct rt_rq *rt_rq = rt_se->rt_rq;
  
  	return rt_rq->rq;
  }
029632fbb   Peter Zijlstra   sched: Make separ...
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
  void free_rt_sched_group(struct task_group *tg)
  {
  	int i;
  
  	if (tg->rt_se)
  		destroy_rt_bandwidth(&tg->rt_bandwidth);
  
  	for_each_possible_cpu(i) {
  		if (tg->rt_rq)
  			kfree(tg->rt_rq[i]);
  		if (tg->rt_se)
  			kfree(tg->rt_se[i]);
  	}
  
  	kfree(tg->rt_rq);
  	kfree(tg->rt_se);
  }
  
  void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
  		struct sched_rt_entity *rt_se, int cpu,
  		struct sched_rt_entity *parent)
  {
  	struct rq *rq = cpu_rq(cpu);
  
  	rt_rq->highest_prio.curr = MAX_RT_PRIO;
  	rt_rq->rt_nr_boosted = 0;
  	rt_rq->rq = rq;
  	rt_rq->tg = tg;
  
  	tg->rt_rq[cpu] = rt_rq;
  	tg->rt_se[cpu] = rt_se;
  
  	if (!rt_se)
  		return;
  
  	if (!parent)
  		rt_se->rt_rq = &rq->rt;
  	else
  		rt_se->rt_rq = parent->my_q;
  
  	rt_se->my_q = rt_rq;
  	rt_se->parent = parent;
  	INIT_LIST_HEAD(&rt_se->run_list);
  }
  
  int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
  {
  	struct rt_rq *rt_rq;
  	struct sched_rt_entity *rt_se;
  	int i;
  
  	tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
  	if (!tg->rt_rq)
  		goto err;
  	tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
  	if (!tg->rt_se)
  		goto err;
  
  	init_rt_bandwidth(&tg->rt_bandwidth,
  			ktime_to_ns(def_rt_bandwidth.rt_period), 0);
  
  	for_each_possible_cpu(i) {
  		rt_rq = kzalloc_node(sizeof(struct rt_rq),
  				     GFP_KERNEL, cpu_to_node(i));
  		if (!rt_rq)
  			goto err;
  
  		rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
  				     GFP_KERNEL, cpu_to_node(i));
  		if (!rt_se)
  			goto err_free_rq;
07c54f7a7   Abel Vesa   sched/core: Remov...
208
  		init_rt_rq(rt_rq);
029632fbb   Peter Zijlstra   sched: Make separ...
209
210
211
212
213
214
215
216
217
218
219
  		rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
  		init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
  	}
  
  	return 1;
  
  err_free_rq:
  	kfree(rt_rq);
  err:
  	return 0;
  }
398a153b1   Gregory Haskins   sched: fix build ...
220
  #else /* CONFIG_RT_GROUP_SCHED */
a1ba4d8ba   Peter Zijlstra   sched_rt: Fix ove...
221
  #define rt_entity_is_task(rt_se) (1)
8f48894fc   Peter Zijlstra   sched: Add debug ...
222
223
224
225
  static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
  {
  	return container_of(rt_se, struct task_struct, rt);
  }
398a153b1   Gregory Haskins   sched: fix build ...
226
227
228
229
  static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
  {
  	return container_of(rt_rq, struct rq, rt);
  }
653d07a69   Kirill Tkhai   sched/rt: Add acc...
230
  static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
398a153b1   Gregory Haskins   sched: fix build ...
231
232
  {
  	struct task_struct *p = rt_task_of(rt_se);
653d07a69   Kirill Tkhai   sched/rt: Add acc...
233
234
235
236
237
238
239
  
  	return task_rq(p);
  }
  
  static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
  {
  	struct rq *rq = rq_of_rt_se(rt_se);
398a153b1   Gregory Haskins   sched: fix build ...
240
241
242
  
  	return &rq->rt;
  }
029632fbb   Peter Zijlstra   sched: Make separ...
243
244
245
246
247
248
  void free_rt_sched_group(struct task_group *tg) { }
  
  int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
  {
  	return 1;
  }
398a153b1   Gregory Haskins   sched: fix build ...
249
  #endif /* CONFIG_RT_GROUP_SCHED */
4fd29176b   Steven Rostedt   sched: add rt-ove...
250
  #ifdef CONFIG_SMP
84de42748   Ingo Molnar   sched: clean up k...
251

8046d6806   Peter Zijlstra   sched,rt: Remove ...
252
  static void pull_rt_task(struct rq *this_rq);
38033c37f   Peter Zijlstra   sched: Push down ...
253

dc8773410   Peter Zijlstra   sched: Remove som...
254
255
256
257
258
  static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
  {
  	/* Try to pull RT tasks here if we lower this rq's prio */
  	return rq->rt.highest_prio.curr > prev->prio;
  }
637f50851   Gregory Haskins   sched: only balan...
259
  static inline int rt_overloaded(struct rq *rq)
4fd29176b   Steven Rostedt   sched: add rt-ove...
260
  {
637f50851   Gregory Haskins   sched: only balan...
261
  	return atomic_read(&rq->rd->rto_count);
4fd29176b   Steven Rostedt   sched: add rt-ove...
262
  }
84de42748   Ingo Molnar   sched: clean up k...
263

4fd29176b   Steven Rostedt   sched: add rt-ove...
264
265
  static inline void rt_set_overload(struct rq *rq)
  {
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
266
267
  	if (!rq->online)
  		return;
c6c4927b2   Rusty Russell   sched: convert st...
268
  	cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
4fd29176b   Steven Rostedt   sched: add rt-ove...
269
270
271
272
273
274
  	/*
  	 * Make sure the mask is visible before we set
  	 * the overload count. That is checked to determine
  	 * if we should look at the mask. It would be a shame
  	 * if we looked at the mask, but the mask was not
  	 * updated yet.
7c3f2ab7b   Peter Zijlstra   sched/rt: Add mis...
275
276
  	 *
  	 * Matched by the barrier in pull_rt_task().
4fd29176b   Steven Rostedt   sched: add rt-ove...
277
  	 */
7c3f2ab7b   Peter Zijlstra   sched/rt: Add mis...
278
  	smp_wmb();
637f50851   Gregory Haskins   sched: only balan...
279
  	atomic_inc(&rq->rd->rto_count);
4fd29176b   Steven Rostedt   sched: add rt-ove...
280
  }
84de42748   Ingo Molnar   sched: clean up k...
281

4fd29176b   Steven Rostedt   sched: add rt-ove...
282
283
  static inline void rt_clear_overload(struct rq *rq)
  {
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
284
285
  	if (!rq->online)
  		return;
4fd29176b   Steven Rostedt   sched: add rt-ove...
286
  	/* the order here really doesn't matter */
637f50851   Gregory Haskins   sched: only balan...
287
  	atomic_dec(&rq->rd->rto_count);
c6c4927b2   Rusty Russell   sched: convert st...
288
  	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
4fd29176b   Steven Rostedt   sched: add rt-ove...
289
  }
73fe6aae8   Gregory Haskins   sched: add RT-bal...
290

398a153b1   Gregory Haskins   sched: fix build ...
291
  static void update_rt_migration(struct rt_rq *rt_rq)
73fe6aae8   Gregory Haskins   sched: add RT-bal...
292
  {
a1ba4d8ba   Peter Zijlstra   sched_rt: Fix ove...
293
  	if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
398a153b1   Gregory Haskins   sched: fix build ...
294
295
296
  		if (!rt_rq->overloaded) {
  			rt_set_overload(rq_of_rt_rq(rt_rq));
  			rt_rq->overloaded = 1;
cdc8eb984   Gregory Haskins   sched: RT-balance...
297
  		}
398a153b1   Gregory Haskins   sched: fix build ...
298
299
300
  	} else if (rt_rq->overloaded) {
  		rt_clear_overload(rq_of_rt_rq(rt_rq));
  		rt_rq->overloaded = 0;
637f50851   Gregory Haskins   sched: only balan...
301
  	}
73fe6aae8   Gregory Haskins   sched: add RT-bal...
302
  }
4fd29176b   Steven Rostedt   sched: add rt-ove...
303

398a153b1   Gregory Haskins   sched: fix build ...
304
305
  static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
29baa7478   Peter Zijlstra   sched: Move nr_cp...
306
  	struct task_struct *p;
a1ba4d8ba   Peter Zijlstra   sched_rt: Fix ove...
307
308
  	if (!rt_entity_is_task(rt_se))
  		return;
29baa7478   Peter Zijlstra   sched: Move nr_cp...
309
  	p = rt_task_of(rt_se);
a1ba4d8ba   Peter Zijlstra   sched_rt: Fix ove...
310
311
312
  	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
  
  	rt_rq->rt_nr_total++;
50605ffbd   Thomas Gleixner   sched/core: Provi...
313
  	if (tsk_nr_cpus_allowed(p) > 1)
398a153b1   Gregory Haskins   sched: fix build ...
314
315
316
317
318
319
320
  		rt_rq->rt_nr_migratory++;
  
  	update_rt_migration(rt_rq);
  }
  
  static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
29baa7478   Peter Zijlstra   sched: Move nr_cp...
321
  	struct task_struct *p;
a1ba4d8ba   Peter Zijlstra   sched_rt: Fix ove...
322
323
  	if (!rt_entity_is_task(rt_se))
  		return;
29baa7478   Peter Zijlstra   sched: Move nr_cp...
324
  	p = rt_task_of(rt_se);
a1ba4d8ba   Peter Zijlstra   sched_rt: Fix ove...
325
326
327
  	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
  
  	rt_rq->rt_nr_total--;
50605ffbd   Thomas Gleixner   sched/core: Provi...
328
  	if (tsk_nr_cpus_allowed(p) > 1)
398a153b1   Gregory Haskins   sched: fix build ...
329
330
331
332
  		rt_rq->rt_nr_migratory--;
  
  	update_rt_migration(rt_rq);
  }
5181f4a46   Steven Rostedt   sched: Use pushab...
333
334
335
336
  static inline int has_pushable_tasks(struct rq *rq)
  {
  	return !plist_head_empty(&rq->rt.pushable_tasks);
  }
fd7a4bed1   Peter Zijlstra   sched, rt: Conver...
337
338
  static DEFINE_PER_CPU(struct callback_head, rt_push_head);
  static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
e3fca9e7c   Peter Zijlstra   sched: Replace po...
339
340
  
  static void push_rt_tasks(struct rq *);
fd7a4bed1   Peter Zijlstra   sched, rt: Conver...
341
  static void pull_rt_task(struct rq *);
e3fca9e7c   Peter Zijlstra   sched: Replace po...
342
343
  
  static inline void queue_push_tasks(struct rq *rq)
dc8773410   Peter Zijlstra   sched: Remove som...
344
  {
e3fca9e7c   Peter Zijlstra   sched: Replace po...
345
346
  	if (!has_pushable_tasks(rq))
  		return;
fd7a4bed1   Peter Zijlstra   sched, rt: Conver...
347
348
349
350
351
352
  	queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
  }
  
  static inline void queue_pull_task(struct rq *rq)
  {
  	queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
dc8773410   Peter Zijlstra   sched: Remove som...
353
  }
917b627d4   Gregory Haskins   sched: create "pu...
354
355
356
357
358
  static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
  {
  	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
  	plist_node_init(&p->pushable_tasks, p->prio);
  	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
5181f4a46   Steven Rostedt   sched: Use pushab...
359
360
361
362
  
  	/* Update the highest prio pushable task */
  	if (p->prio < rq->rt.highest_prio.next)
  		rq->rt.highest_prio.next = p->prio;
917b627d4   Gregory Haskins   sched: create "pu...
363
364
365
366
367
  }
  
  static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
  {
  	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
917b627d4   Gregory Haskins   sched: create "pu...
368

5181f4a46   Steven Rostedt   sched: Use pushab...
369
370
371
372
373
374
375
  	/* Update the new highest prio pushable task */
  	if (has_pushable_tasks(rq)) {
  		p = plist_first_entry(&rq->rt.pushable_tasks,
  				      struct task_struct, pushable_tasks);
  		rq->rt.highest_prio.next = p->prio;
  	} else
  		rq->rt.highest_prio.next = MAX_RT_PRIO;
bcf08df3b   Ingo Molnar   sched: Fix cpupri...
376
  }
917b627d4   Gregory Haskins   sched: create "pu...
377
  #else
ceacc2c1c   Peter Zijlstra   sched: make plist...
378
  static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
fa85ae241   Peter Zijlstra   sched: rt time limit
379
  {
6f505b164   Peter Zijlstra   sched: rt group s...
380
  }
ceacc2c1c   Peter Zijlstra   sched: make plist...
381
382
383
  static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
  {
  }
b07430ac3   Gregory Haskins   sched: de CPP-ify...
384
  static inline
ceacc2c1c   Peter Zijlstra   sched: make plist...
385
386
387
  void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  }
398a153b1   Gregory Haskins   sched: fix build ...
388
  static inline
ceacc2c1c   Peter Zijlstra   sched: make plist...
389
390
391
  void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  }
917b627d4   Gregory Haskins   sched: create "pu...
392

dc8773410   Peter Zijlstra   sched: Remove som...
393
394
395
396
  static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
  {
  	return false;
  }
8046d6806   Peter Zijlstra   sched,rt: Remove ...
397
  static inline void pull_rt_task(struct rq *this_rq)
dc8773410   Peter Zijlstra   sched: Remove som...
398
  {
dc8773410   Peter Zijlstra   sched: Remove som...
399
  }
e3fca9e7c   Peter Zijlstra   sched: Replace po...
400
  static inline void queue_push_tasks(struct rq *rq)
dc8773410   Peter Zijlstra   sched: Remove som...
401
402
  {
  }
4fd29176b   Steven Rostedt   sched: add rt-ove...
403
  #endif /* CONFIG_SMP */
f4ebcbc0d   Kirill Tkhai   sched/rt: Substra...
404
405
  static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
  static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
6f505b164   Peter Zijlstra   sched: rt group s...
406
407
  static inline int on_rt_rq(struct sched_rt_entity *rt_se)
  {
ff77e4685   Peter Zijlstra   sched/rt: Fix PI ...
408
  	return rt_se->on_rq;
6f505b164   Peter Zijlstra   sched: rt group s...
409
  }
052f1dc7e   Peter Zijlstra   sched: rt-group: ...
410
  #ifdef CONFIG_RT_GROUP_SCHED
6f505b164   Peter Zijlstra   sched: rt group s...
411

9f0c1e560   Peter Zijlstra   sched: rt-group: ...
412
  static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
413
414
  {
  	if (!rt_rq->tg)
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
415
  		return RUNTIME_INF;
6f505b164   Peter Zijlstra   sched: rt group s...
416

ac086bc22   Peter Zijlstra   sched: rt-group: ...
417
418
419
420
421
422
  	return rt_rq->rt_runtime;
  }
  
  static inline u64 sched_rt_period(struct rt_rq *rt_rq)
  {
  	return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
6f505b164   Peter Zijlstra   sched: rt group s...
423
  }
ec514c487   Cheng Xu   sched: Fix rt_rq ...
424
  typedef struct task_group *rt_rq_iter_t;
1c09ab0d2   Yong Zhang   sched: Skip autog...
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
  static inline struct task_group *next_task_group(struct task_group *tg)
  {
  	do {
  		tg = list_entry_rcu(tg->list.next,
  			typeof(struct task_group), list);
  	} while (&tg->list != &task_groups && task_group_is_autogroup(tg));
  
  	if (&tg->list == &task_groups)
  		tg = NULL;
  
  	return tg;
  }
  
  #define for_each_rt_rq(rt_rq, iter, rq)					\
  	for (iter = container_of(&task_groups, typeof(*iter), list);	\
  		(iter = next_task_group(iter)) &&			\
  		(rt_rq = iter->rt_rq[cpu_of(rq)]);)
ec514c487   Cheng Xu   sched: Fix rt_rq ...
442

6f505b164   Peter Zijlstra   sched: rt group s...
443
444
445
446
447
448
449
  #define for_each_sched_rt_entity(rt_se) \
  	for (; rt_se; rt_se = rt_se->parent)
  
  static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
  {
  	return rt_se->my_q;
  }
ff77e4685   Peter Zijlstra   sched/rt: Fix PI ...
450
451
  static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
  static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
6f505b164   Peter Zijlstra   sched: rt group s...
452

9f0c1e560   Peter Zijlstra   sched: rt-group: ...
453
  static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
454
  {
f6121f4f8   Dario Faggioli   sched_rt.c: resch...
455
  	struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
8875125ef   Kirill Tkhai   sched: Transform ...
456
  	struct rq *rq = rq_of_rt_rq(rt_rq);
74b7eb588   Yong Zhang   sched: Change usa...
457
  	struct sched_rt_entity *rt_se;
8875125ef   Kirill Tkhai   sched: Transform ...
458
  	int cpu = cpu_of(rq);
0c3b91680   Balbir Singh   sched: Fix sched ...
459
460
  
  	rt_se = rt_rq->tg->rt_se[cpu];
6f505b164   Peter Zijlstra   sched: rt group s...
461

f6121f4f8   Dario Faggioli   sched_rt.c: resch...
462
  	if (rt_rq->rt_nr_running) {
f4ebcbc0d   Kirill Tkhai   sched/rt: Substra...
463
464
465
  		if (!rt_se)
  			enqueue_top_rt_rq(rt_rq);
  		else if (!on_rt_rq(rt_se))
ff77e4685   Peter Zijlstra   sched/rt: Fix PI ...
466
  			enqueue_rt_entity(rt_se, 0);
f4ebcbc0d   Kirill Tkhai   sched/rt: Substra...
467

e864c499d   Gregory Haskins   sched: track the ...
468
  		if (rt_rq->highest_prio.curr < curr->prio)
8875125ef   Kirill Tkhai   sched: Transform ...
469
  			resched_curr(rq);
6f505b164   Peter Zijlstra   sched: rt group s...
470
471
  	}
  }
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
472
  static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
473
  {
74b7eb588   Yong Zhang   sched: Change usa...
474
  	struct sched_rt_entity *rt_se;
0c3b91680   Balbir Singh   sched: Fix sched ...
475
  	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
74b7eb588   Yong Zhang   sched: Change usa...
476

0c3b91680   Balbir Singh   sched: Fix sched ...
477
  	rt_se = rt_rq->tg->rt_se[cpu];
6f505b164   Peter Zijlstra   sched: rt group s...
478

f4ebcbc0d   Kirill Tkhai   sched/rt: Substra...
479
480
481
  	if (!rt_se)
  		dequeue_top_rt_rq(rt_rq);
  	else if (on_rt_rq(rt_se))
ff77e4685   Peter Zijlstra   sched/rt: Fix PI ...
482
  		dequeue_rt_entity(rt_se, 0);
6f505b164   Peter Zijlstra   sched: rt group s...
483
  }
46383648b   Kirill Tkhai   sched: Revert com...
484
485
486
487
  static inline int rt_rq_throttled(struct rt_rq *rt_rq)
  {
  	return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
  }
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
488
489
490
491
492
493
494
495
496
497
498
  static int rt_se_boosted(struct sched_rt_entity *rt_se)
  {
  	struct rt_rq *rt_rq = group_rt_rq(rt_se);
  	struct task_struct *p;
  
  	if (rt_rq)
  		return !!rt_rq->rt_nr_boosted;
  
  	p = rt_task_of(rt_se);
  	return p->prio != p->normal_prio;
  }
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
499
  #ifdef CONFIG_SMP
c6c4927b2   Rusty Russell   sched: convert st...
500
  static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
501
  {
424c93fe4   Nathan Zimmer   sched: Use this_r...
502
  	return this_rq()->rd->span;
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
503
  }
6f505b164   Peter Zijlstra   sched: rt group s...
504
  #else
c6c4927b2   Rusty Russell   sched: convert st...
505
  static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
506
  {
c6c4927b2   Rusty Russell   sched: convert st...
507
  	return cpu_online_mask;
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
508
509
  }
  #endif
6f505b164   Peter Zijlstra   sched: rt group s...
510

d0b27fa77   Peter Zijlstra   sched: rt-group: ...
511
512
  static inline
  struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
6f505b164   Peter Zijlstra   sched: rt group s...
513
  {
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
514
515
  	return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
  }
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
516

ac086bc22   Peter Zijlstra   sched: rt-group: ...
517
518
519
520
  static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
  {
  	return &rt_rq->tg->rt_bandwidth;
  }
55e12e5e7   Dhaval Giani   sched: make sched...
521
  #else /* !CONFIG_RT_GROUP_SCHED */
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
522
523
524
  
  static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
  {
ac086bc22   Peter Zijlstra   sched: rt-group: ...
525
526
527
528
529
530
  	return rt_rq->rt_runtime;
  }
  
  static inline u64 sched_rt_period(struct rt_rq *rt_rq)
  {
  	return ktime_to_ns(def_rt_bandwidth.rt_period);
6f505b164   Peter Zijlstra   sched: rt group s...
531
  }
ec514c487   Cheng Xu   sched: Fix rt_rq ...
532
533
534
535
  typedef struct rt_rq *rt_rq_iter_t;
  
  #define for_each_rt_rq(rt_rq, iter, rq) \
  	for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
6f505b164   Peter Zijlstra   sched: rt group s...
536
537
538
539
540
541
542
  #define for_each_sched_rt_entity(rt_se) \
  	for (; rt_se; rt_se = NULL)
  
  static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
  {
  	return NULL;
  }
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
543
  static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
544
  {
f4ebcbc0d   Kirill Tkhai   sched/rt: Substra...
545
546
547
548
549
550
  	struct rq *rq = rq_of_rt_rq(rt_rq);
  
  	if (!rt_rq->rt_nr_running)
  		return;
  
  	enqueue_top_rt_rq(rt_rq);
8875125ef   Kirill Tkhai   sched: Transform ...
551
  	resched_curr(rq);
6f505b164   Peter Zijlstra   sched: rt group s...
552
  }
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
553
  static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
554
  {
f4ebcbc0d   Kirill Tkhai   sched/rt: Substra...
555
  	dequeue_top_rt_rq(rt_rq);
6f505b164   Peter Zijlstra   sched: rt group s...
556
  }
46383648b   Kirill Tkhai   sched: Revert com...
557
558
559
560
  static inline int rt_rq_throttled(struct rt_rq *rt_rq)
  {
  	return rt_rq->rt_throttled;
  }
c6c4927b2   Rusty Russell   sched: convert st...
561
  static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
562
  {
c6c4927b2   Rusty Russell   sched: convert st...
563
  	return cpu_online_mask;
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
564
565
566
567
568
569
570
  }
  
  static inline
  struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
  {
  	return &cpu_rq(cpu)->rt;
  }
ac086bc22   Peter Zijlstra   sched: rt-group: ...
571
572
573
574
  static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
  {
  	return &def_rt_bandwidth;
  }
55e12e5e7   Dhaval Giani   sched: make sched...
575
  #endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
576

faa599373   Juri Lelli   sched/deadline: P...
577
578
579
580
581
582
583
  bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
  {
  	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
  
  	return (hrtimer_active(&rt_b->rt_period_timer) ||
  		rt_rq->rt_time < rt_b->rt_runtime);
  }
ac086bc22   Peter Zijlstra   sched: rt-group: ...
584
  #ifdef CONFIG_SMP
78333cdd0   Peter Zijlstra   sched: add some c...
585
586
587
  /*
   * We ran out of runtime, see if we can borrow some from our neighbours.
   */
269b26a5e   Juri Lelli   sched/rt: Make (d...
588
  static void do_balance_runtime(struct rt_rq *rt_rq)
ac086bc22   Peter Zijlstra   sched: rt-group: ...
589
590
  {
  	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
aa7f67304   Shawn Bohrer   sched/rt: Use roo...
591
  	struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
269b26a5e   Juri Lelli   sched/rt: Make (d...
592
  	int i, weight;
ac086bc22   Peter Zijlstra   sched: rt-group: ...
593
  	u64 rt_period;
c6c4927b2   Rusty Russell   sched: convert st...
594
  	weight = cpumask_weight(rd->span);
ac086bc22   Peter Zijlstra   sched: rt-group: ...
595

0986b11b1   Thomas Gleixner   sched: Convert rt...
596
  	raw_spin_lock(&rt_b->rt_runtime_lock);
ac086bc22   Peter Zijlstra   sched: rt-group: ...
597
  	rt_period = ktime_to_ns(rt_b->rt_period);
c6c4927b2   Rusty Russell   sched: convert st...
598
  	for_each_cpu(i, rd->span) {
ac086bc22   Peter Zijlstra   sched: rt-group: ...
599
600
601
602
603
  		struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
  		s64 diff;
  
  		if (iter == rt_rq)
  			continue;
0986b11b1   Thomas Gleixner   sched: Convert rt...
604
  		raw_spin_lock(&iter->rt_runtime_lock);
78333cdd0   Peter Zijlstra   sched: add some c...
605
606
607
608
609
  		/*
  		 * Either all rqs have inf runtime and there's nothing to steal
  		 * or __disable_runtime() below sets a specific rq to inf to
  		 * indicate its been disabled and disalow stealing.
  		 */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
610
611
  		if (iter->rt_runtime == RUNTIME_INF)
  			goto next;
78333cdd0   Peter Zijlstra   sched: add some c...
612
613
614
615
  		/*
  		 * From runqueues with spare time, take 1/n part of their
  		 * spare time, but no more than our period.
  		 */
ac086bc22   Peter Zijlstra   sched: rt-group: ...
616
617
  		diff = iter->rt_runtime - iter->rt_time;
  		if (diff > 0) {
58838cf3c   Peter Zijlstra   sched: clean up c...
618
  			diff = div_u64((u64)diff, weight);
ac086bc22   Peter Zijlstra   sched: rt-group: ...
619
620
621
622
  			if (rt_rq->rt_runtime + diff > rt_period)
  				diff = rt_period - rt_rq->rt_runtime;
  			iter->rt_runtime -= diff;
  			rt_rq->rt_runtime += diff;
ac086bc22   Peter Zijlstra   sched: rt-group: ...
623
  			if (rt_rq->rt_runtime == rt_period) {
0986b11b1   Thomas Gleixner   sched: Convert rt...
624
  				raw_spin_unlock(&iter->rt_runtime_lock);
ac086bc22   Peter Zijlstra   sched: rt-group: ...
625
626
627
  				break;
  			}
  		}
7def2be1d   Peter Zijlstra   sched: fix hotplu...
628
  next:
0986b11b1   Thomas Gleixner   sched: Convert rt...
629
  		raw_spin_unlock(&iter->rt_runtime_lock);
ac086bc22   Peter Zijlstra   sched: rt-group: ...
630
  	}
0986b11b1   Thomas Gleixner   sched: Convert rt...
631
  	raw_spin_unlock(&rt_b->rt_runtime_lock);
ac086bc22   Peter Zijlstra   sched: rt-group: ...
632
  }
7def2be1d   Peter Zijlstra   sched: fix hotplu...
633

78333cdd0   Peter Zijlstra   sched: add some c...
634
635
636
  /*
   * Ensure this RQ takes back all the runtime it lend to its neighbours.
   */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
637
638
639
  static void __disable_runtime(struct rq *rq)
  {
  	struct root_domain *rd = rq->rd;
ec514c487   Cheng Xu   sched: Fix rt_rq ...
640
  	rt_rq_iter_t iter;
7def2be1d   Peter Zijlstra   sched: fix hotplu...
641
642
643
644
  	struct rt_rq *rt_rq;
  
  	if (unlikely(!scheduler_running))
  		return;
ec514c487   Cheng Xu   sched: Fix rt_rq ...
645
  	for_each_rt_rq(rt_rq, iter, rq) {
7def2be1d   Peter Zijlstra   sched: fix hotplu...
646
647
648
  		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
  		s64 want;
  		int i;
0986b11b1   Thomas Gleixner   sched: Convert rt...
649
650
  		raw_spin_lock(&rt_b->rt_runtime_lock);
  		raw_spin_lock(&rt_rq->rt_runtime_lock);
78333cdd0   Peter Zijlstra   sched: add some c...
651
652
653
654
655
  		/*
  		 * Either we're all inf and nobody needs to borrow, or we're
  		 * already disabled and thus have nothing to do, or we have
  		 * exactly the right amount of runtime to take out.
  		 */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
656
657
658
  		if (rt_rq->rt_runtime == RUNTIME_INF ||
  				rt_rq->rt_runtime == rt_b->rt_runtime)
  			goto balanced;
0986b11b1   Thomas Gleixner   sched: Convert rt...
659
  		raw_spin_unlock(&rt_rq->rt_runtime_lock);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
660

78333cdd0   Peter Zijlstra   sched: add some c...
661
662
663
664
665
  		/*
  		 * Calculate the difference between what we started out with
  		 * and what we current have, that's the amount of runtime
  		 * we lend and now have to reclaim.
  		 */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
666
  		want = rt_b->rt_runtime - rt_rq->rt_runtime;
78333cdd0   Peter Zijlstra   sched: add some c...
667
668
669
  		/*
  		 * Greedy reclaim, take back as much as we can.
  		 */
c6c4927b2   Rusty Russell   sched: convert st...
670
  		for_each_cpu(i, rd->span) {
7def2be1d   Peter Zijlstra   sched: fix hotplu...
671
672
  			struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
  			s64 diff;
78333cdd0   Peter Zijlstra   sched: add some c...
673
674
675
  			/*
  			 * Can't reclaim from ourselves or disabled runqueues.
  			 */
f1679d084   Peter Zijlstra   sched: fix rt-ban...
676
  			if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
7def2be1d   Peter Zijlstra   sched: fix hotplu...
677
  				continue;
0986b11b1   Thomas Gleixner   sched: Convert rt...
678
  			raw_spin_lock(&iter->rt_runtime_lock);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
679
680
681
682
683
684
685
686
  			if (want > 0) {
  				diff = min_t(s64, iter->rt_runtime, want);
  				iter->rt_runtime -= diff;
  				want -= diff;
  			} else {
  				iter->rt_runtime -= want;
  				want -= want;
  			}
0986b11b1   Thomas Gleixner   sched: Convert rt...
687
  			raw_spin_unlock(&iter->rt_runtime_lock);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
688
689
690
691
  
  			if (!want)
  				break;
  		}
0986b11b1   Thomas Gleixner   sched: Convert rt...
692
  		raw_spin_lock(&rt_rq->rt_runtime_lock);
78333cdd0   Peter Zijlstra   sched: add some c...
693
694
695
696
  		/*
  		 * We cannot be left wanting - that would mean some runtime
  		 * leaked out of the system.
  		 */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
697
698
  		BUG_ON(want);
  balanced:
78333cdd0   Peter Zijlstra   sched: add some c...
699
700
701
702
  		/*
  		 * Disable all the borrow logic by pretending we have inf
  		 * runtime - in which case borrowing doesn't make sense.
  		 */
7def2be1d   Peter Zijlstra   sched: fix hotplu...
703
  		rt_rq->rt_runtime = RUNTIME_INF;
a4c96ae31   Peter Boonstoppel   sched: Unthrottle...
704
  		rt_rq->rt_throttled = 0;
0986b11b1   Thomas Gleixner   sched: Convert rt...
705
706
  		raw_spin_unlock(&rt_rq->rt_runtime_lock);
  		raw_spin_unlock(&rt_b->rt_runtime_lock);
99b625670   Kirill Tkhai   sched/rt: Enqueue...
707
708
709
  
  		/* Make rt_rq available for pick_next_task() */
  		sched_rt_rq_enqueue(rt_rq);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
710
711
  	}
  }
7def2be1d   Peter Zijlstra   sched: fix hotplu...
712
713
  static void __enable_runtime(struct rq *rq)
  {
ec514c487   Cheng Xu   sched: Fix rt_rq ...
714
  	rt_rq_iter_t iter;
7def2be1d   Peter Zijlstra   sched: fix hotplu...
715
716
717
718
  	struct rt_rq *rt_rq;
  
  	if (unlikely(!scheduler_running))
  		return;
78333cdd0   Peter Zijlstra   sched: add some c...
719
720
721
  	/*
  	 * Reset each runqueue's bandwidth settings
  	 */
ec514c487   Cheng Xu   sched: Fix rt_rq ...
722
  	for_each_rt_rq(rt_rq, iter, rq) {
7def2be1d   Peter Zijlstra   sched: fix hotplu...
723
  		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
0986b11b1   Thomas Gleixner   sched: Convert rt...
724
725
  		raw_spin_lock(&rt_b->rt_runtime_lock);
  		raw_spin_lock(&rt_rq->rt_runtime_lock);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
726
727
  		rt_rq->rt_runtime = rt_b->rt_runtime;
  		rt_rq->rt_time = 0;
baf25731e   Zhang, Yanmin   sched: fix 2.6.27...
728
  		rt_rq->rt_throttled = 0;
0986b11b1   Thomas Gleixner   sched: Convert rt...
729
730
  		raw_spin_unlock(&rt_rq->rt_runtime_lock);
  		raw_spin_unlock(&rt_b->rt_runtime_lock);
7def2be1d   Peter Zijlstra   sched: fix hotplu...
731
732
  	}
  }
269b26a5e   Juri Lelli   sched/rt: Make (d...
733
  static void balance_runtime(struct rt_rq *rt_rq)
eff6549b9   Peter Zijlstra   sched: rt: move s...
734
  {
4a6184ce7   Peter Zijlstra   sched, rt: Provid...
735
  	if (!sched_feat(RT_RUNTIME_SHARE))
269b26a5e   Juri Lelli   sched/rt: Make (d...
736
  		return;
4a6184ce7   Peter Zijlstra   sched, rt: Provid...
737

eff6549b9   Peter Zijlstra   sched: rt: move s...
738
  	if (rt_rq->rt_time > rt_rq->rt_runtime) {
0986b11b1   Thomas Gleixner   sched: Convert rt...
739
  		raw_spin_unlock(&rt_rq->rt_runtime_lock);
269b26a5e   Juri Lelli   sched/rt: Make (d...
740
  		do_balance_runtime(rt_rq);
0986b11b1   Thomas Gleixner   sched: Convert rt...
741
  		raw_spin_lock(&rt_rq->rt_runtime_lock);
eff6549b9   Peter Zijlstra   sched: rt: move s...
742
  	}
eff6549b9   Peter Zijlstra   sched: rt: move s...
743
  }
55e12e5e7   Dhaval Giani   sched: make sched...
744
  #else /* !CONFIG_SMP */
269b26a5e   Juri Lelli   sched/rt: Make (d...
745
  static inline void balance_runtime(struct rt_rq *rt_rq) {}
55e12e5e7   Dhaval Giani   sched: make sched...
746
  #endif /* CONFIG_SMP */
ac086bc22   Peter Zijlstra   sched: rt-group: ...
747

eff6549b9   Peter Zijlstra   sched: rt: move s...
748
749
  static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
  {
42c62a589   Peter Zijlstra   sched/rt: Keep pe...
750
  	int i, idle = 1, throttled = 0;
c6c4927b2   Rusty Russell   sched: convert st...
751
  	const struct cpumask *span;
eff6549b9   Peter Zijlstra   sched: rt: move s...
752

eff6549b9   Peter Zijlstra   sched: rt: move s...
753
  	span = sched_rt_period_mask();
e221d028b   Mike Galbraith   sched,rt: fix iso...
754
755
756
757
758
759
760
761
762
763
764
765
766
  #ifdef CONFIG_RT_GROUP_SCHED
  	/*
  	 * FIXME: isolated CPUs should really leave the root task group,
  	 * whether they are isolcpus or were isolated via cpusets, lest
  	 * the timer run on a CPU which does not service all runqueues,
  	 * potentially leaving other CPUs indefinitely throttled.  If
  	 * isolation is really required, the user will turn the throttle
  	 * off to kill the perturbations it causes anyway.  Meanwhile,
  	 * this maintains functionality for boot and/or troubleshooting.
  	 */
  	if (rt_b == &root_task_group.rt_bandwidth)
  		span = cpu_online_mask;
  #endif
c6c4927b2   Rusty Russell   sched: convert st...
767
  	for_each_cpu(i, span) {
eff6549b9   Peter Zijlstra   sched: rt: move s...
768
769
770
  		int enqueue = 0;
  		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
  		struct rq *rq = rq_of_rt_rq(rt_rq);
05fa785cf   Thomas Gleixner   sched: Convert rq...
771
  		raw_spin_lock(&rq->lock);
eff6549b9   Peter Zijlstra   sched: rt: move s...
772
773
  		if (rt_rq->rt_time) {
  			u64 runtime;
0986b11b1   Thomas Gleixner   sched: Convert rt...
774
  			raw_spin_lock(&rt_rq->rt_runtime_lock);
eff6549b9   Peter Zijlstra   sched: rt: move s...
775
776
777
778
779
780
781
  			if (rt_rq->rt_throttled)
  				balance_runtime(rt_rq);
  			runtime = rt_rq->rt_runtime;
  			rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
  			if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
  				rt_rq->rt_throttled = 0;
  				enqueue = 1;
61eadef6a   Mike Galbraith   sched, rt: Update...
782
783
  
  				/*
9edfbfed3   Peter Zijlstra   sched/core: Rewor...
784
785
786
787
788
  				 * When we're idle and a woken (rt) task is
  				 * throttled check_preempt_curr() will set
  				 * skip_update and the time between the wakeup
  				 * and this unthrottle will get accounted as
  				 * 'runtime'.
61eadef6a   Mike Galbraith   sched, rt: Update...
789
790
  				 */
  				if (rt_rq->rt_nr_running && rq->curr == rq->idle)
9edfbfed3   Peter Zijlstra   sched/core: Rewor...
791
  					rq_clock_skip_update(rq, false);
eff6549b9   Peter Zijlstra   sched: rt: move s...
792
793
794
  			}
  			if (rt_rq->rt_time || rt_rq->rt_nr_running)
  				idle = 0;
0986b11b1   Thomas Gleixner   sched: Convert rt...
795
  			raw_spin_unlock(&rt_rq->rt_runtime_lock);
0c3b91680   Balbir Singh   sched: Fix sched ...
796
  		} else if (rt_rq->rt_nr_running) {
6c3df2551   Peter Zijlstra   sched: rt: dont s...
797
  			idle = 0;
0c3b91680   Balbir Singh   sched: Fix sched ...
798
799
800
  			if (!rt_rq_throttled(rt_rq))
  				enqueue = 1;
  		}
42c62a589   Peter Zijlstra   sched/rt: Keep pe...
801
802
  		if (rt_rq->rt_throttled)
  			throttled = 1;
eff6549b9   Peter Zijlstra   sched: rt: move s...
803
804
805
  
  		if (enqueue)
  			sched_rt_rq_enqueue(rt_rq);
05fa785cf   Thomas Gleixner   sched: Convert rq...
806
  		raw_spin_unlock(&rq->lock);
eff6549b9   Peter Zijlstra   sched: rt: move s...
807
  	}
42c62a589   Peter Zijlstra   sched/rt: Keep pe...
808
809
  	if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
  		return 1;
eff6549b9   Peter Zijlstra   sched: rt: move s...
810
811
  	return idle;
  }
ac086bc22   Peter Zijlstra   sched: rt-group: ...
812

6f505b164   Peter Zijlstra   sched: rt group s...
813
814
  static inline int rt_se_prio(struct sched_rt_entity *rt_se)
  {
052f1dc7e   Peter Zijlstra   sched: rt-group: ...
815
  #ifdef CONFIG_RT_GROUP_SCHED
6f505b164   Peter Zijlstra   sched: rt group s...
816
817
818
  	struct rt_rq *rt_rq = group_rt_rq(rt_se);
  
  	if (rt_rq)
e864c499d   Gregory Haskins   sched: track the ...
819
  		return rt_rq->highest_prio.curr;
6f505b164   Peter Zijlstra   sched: rt group s...
820
821
822
823
  #endif
  
  	return rt_task_of(rt_se)->prio;
  }
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
824
  static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
6f505b164   Peter Zijlstra   sched: rt group s...
825
  {
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
826
  	u64 runtime = sched_rt_runtime(rt_rq);
fa85ae241   Peter Zijlstra   sched: rt time limit
827

fa85ae241   Peter Zijlstra   sched: rt time limit
828
  	if (rt_rq->rt_throttled)
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
829
  		return rt_rq_throttled(rt_rq);
fa85ae241   Peter Zijlstra   sched: rt time limit
830

5b680fd61   Shan Hai   sched/rt: Code cl...
831
  	if (runtime >= sched_rt_period(rt_rq))
ac086bc22   Peter Zijlstra   sched: rt-group: ...
832
  		return 0;
b79f3833d   Peter Zijlstra   sched: rt: fix SM...
833
834
835
836
  	balance_runtime(rt_rq);
  	runtime = sched_rt_runtime(rt_rq);
  	if (runtime == RUNTIME_INF)
  		return 0;
ac086bc22   Peter Zijlstra   sched: rt-group: ...
837

9f0c1e560   Peter Zijlstra   sched: rt-group: ...
838
  	if (rt_rq->rt_time > runtime) {
7abc63b1b   Peter Zijlstra   sched/rt: Do not ...
839
840
841
842
843
844
845
846
  		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
  
  		/*
  		 * Don't actually throttle groups that have no runtime assigned
  		 * but accrue some time due to boosting.
  		 */
  		if (likely(rt_b->rt_runtime)) {
  			rt_rq->rt_throttled = 1;
c224815da   John Stultz   printk: Add print...
847
848
  			printk_deferred_once("sched: RT throttling activated
  ");
7abc63b1b   Peter Zijlstra   sched/rt: Do not ...
849
850
851
852
853
854
855
856
  		} else {
  			/*
  			 * In case we did anyway, make it go away,
  			 * replenishment is a joke, since it will replenish us
  			 * with exactly 0 ns.
  			 */
  			rt_rq->rt_time = 0;
  		}
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
857
  		if (rt_rq_throttled(rt_rq)) {
9f0c1e560   Peter Zijlstra   sched: rt-group: ...
858
  			sched_rt_rq_dequeue(rt_rq);
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
859
860
  			return 1;
  		}
fa85ae241   Peter Zijlstra   sched: rt time limit
861
862
863
864
  	}
  
  	return 0;
  }
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
865
866
867
868
  /*
   * Update the current task's runtime statistics. Skip current tasks that
   * are not in our scheduling class.
   */
a9957449b   Alexey Dobriyan   sched: uninline s...
869
  static void update_curr_rt(struct rq *rq)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
870
871
  {
  	struct task_struct *curr = rq->curr;
6f505b164   Peter Zijlstra   sched: rt group s...
872
  	struct sched_rt_entity *rt_se = &curr->rt;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
873
  	u64 delta_exec;
06c3bc655   Peter Zijlstra   sched: Fix update...
874
  	if (curr->sched_class != &rt_sched_class)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
875
  		return;
78becc270   Frederic Weisbecker   sched: Use an acc...
876
  	delta_exec = rq_clock_task(rq) - curr->se.exec_start;
fc79e240b   Kirill Tkhai   sched/rt: Do not ...
877
878
  	if (unlikely((s64)delta_exec <= 0))
  		return;
6cfb0d5d0   Ingo Molnar   [PATCH] sched: re...
879

58919e83c   Rafael J. Wysocki   cpufreq / sched: ...
880
  	/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
12bde33db   Rafael J. Wysocki   cpufreq / sched: ...
881
  	cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
594dd290c   Wanpeng Li   sched/cpufreq: Op...
882

42c62a589   Peter Zijlstra   sched/rt: Keep pe...
883
884
  	schedstat_set(curr->se.statistics.exec_max,
  		      max(curr->se.statistics.exec_max, delta_exec));
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
885
886
  
  	curr->se.sum_exec_runtime += delta_exec;
f06febc96   Frank Mayhar   timers: fix itime...
887
  	account_group_exec_runtime(curr, delta_exec);
78becc270   Frederic Weisbecker   sched: Use an acc...
888
  	curr->se.exec_start = rq_clock_task(rq);
d842de871   Srivatsa Vaddagiri   sched: cpu accoun...
889
  	cpuacct_charge(curr, delta_exec);
fa85ae241   Peter Zijlstra   sched: rt time limit
890

e9e9250bc   Peter Zijlstra   sched: Scale down...
891
  	sched_rt_avg_update(rq, delta_exec);
0b148fa04   Peter Zijlstra   sched: rt-bandwid...
892
893
  	if (!rt_bandwidth_enabled())
  		return;
354d60c2f   Dhaval Giani   sched: mix tasks ...
894
  	for_each_sched_rt_entity(rt_se) {
0b07939cb   Giedrius Rekasius   sched: Remove red...
895
  		struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
354d60c2f   Dhaval Giani   sched: mix tasks ...
896

cc2991cf1   Peter Zijlstra   sched: rt-bandwid...
897
  		if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
0986b11b1   Thomas Gleixner   sched: Convert rt...
898
  			raw_spin_lock(&rt_rq->rt_runtime_lock);
cc2991cf1   Peter Zijlstra   sched: rt-bandwid...
899
900
  			rt_rq->rt_time += delta_exec;
  			if (sched_rt_runtime_exceeded(rt_rq))
8875125ef   Kirill Tkhai   sched: Transform ...
901
  				resched_curr(rq);
0986b11b1   Thomas Gleixner   sched: Convert rt...
902
  			raw_spin_unlock(&rt_rq->rt_runtime_lock);
cc2991cf1   Peter Zijlstra   sched: rt-bandwid...
903
  		}
354d60c2f   Dhaval Giani   sched: mix tasks ...
904
  	}
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
905
  }
f4ebcbc0d   Kirill Tkhai   sched/rt: Substra...
906
907
908
909
910
911
912
913
914
915
916
  static void
  dequeue_top_rt_rq(struct rt_rq *rt_rq)
  {
  	struct rq *rq = rq_of_rt_rq(rt_rq);
  
  	BUG_ON(&rq->rt != rt_rq);
  
  	if (!rt_rq->rt_queued)
  		return;
  
  	BUG_ON(!rq->nr_running);
724654478   Kirill Tkhai   sched, nohz: Chan...
917
  	sub_nr_running(rq, rt_rq->rt_nr_running);
f4ebcbc0d   Kirill Tkhai   sched/rt: Substra...
918
919
920
921
922
923
924
925
926
927
928
929
930
931
  	rt_rq->rt_queued = 0;
  }
  
  static void
  enqueue_top_rt_rq(struct rt_rq *rt_rq)
  {
  	struct rq *rq = rq_of_rt_rq(rt_rq);
  
  	BUG_ON(&rq->rt != rt_rq);
  
  	if (rt_rq->rt_queued)
  		return;
  	if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
  		return;
724654478   Kirill Tkhai   sched, nohz: Chan...
932
  	add_nr_running(rq, rt_rq->rt_nr_running);
f4ebcbc0d   Kirill Tkhai   sched/rt: Substra...
933
934
  	rt_rq->rt_queued = 1;
  }
398a153b1   Gregory Haskins   sched: fix build ...
935
  #if defined CONFIG_SMP
e864c499d   Gregory Haskins   sched: track the ...
936

398a153b1   Gregory Haskins   sched: fix build ...
937
938
  static void
  inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
63489e45e   Steven Rostedt   sched: count # of...
939
  {
4d9842776   Gregory Haskins   sched: cleanup in...
940
  	struct rq *rq = rq_of_rt_rq(rt_rq);
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
941

757dfcaa4   Kirill Tkhai   sched/rt: Fix rq'...
942
943
944
945
946
947
948
  #ifdef CONFIG_RT_GROUP_SCHED
  	/*
  	 * Change rq's cpupri only if rt_rq is the top queue.
  	 */
  	if (&rq->rt != rt_rq)
  		return;
  #endif
5181f4a46   Steven Rostedt   sched: Use pushab...
949
950
  	if (rq->online && prio < prev_prio)
  		cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
398a153b1   Gregory Haskins   sched: fix build ...
951
  }
73fe6aae8   Gregory Haskins   sched: add RT-bal...
952

398a153b1   Gregory Haskins   sched: fix build ...
953
954
955
956
  static void
  dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
  {
  	struct rq *rq = rq_of_rt_rq(rt_rq);
d0b27fa77   Peter Zijlstra   sched: rt-group: ...
957

757dfcaa4   Kirill Tkhai   sched/rt: Fix rq'...
958
959
960
961
962
963
964
  #ifdef CONFIG_RT_GROUP_SCHED
  	/*
  	 * Change rq's cpupri only if rt_rq is the top queue.
  	 */
  	if (&rq->rt != rt_rq)
  		return;
  #endif
398a153b1   Gregory Haskins   sched: fix build ...
965
966
  	if (rq->online && rt_rq->highest_prio.curr != prev_prio)
  		cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
63489e45e   Steven Rostedt   sched: count # of...
967
  }
398a153b1   Gregory Haskins   sched: fix build ...
968
  #else /* CONFIG_SMP */
6f505b164   Peter Zijlstra   sched: rt group s...
969
  static inline
398a153b1   Gregory Haskins   sched: fix build ...
970
971
972
973
974
  void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
  static inline
  void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
  
  #endif /* CONFIG_SMP */
6e0534f27   Gregory Haskins   sched: use a 2-d ...
975

052f1dc7e   Peter Zijlstra   sched: rt-group: ...
976
  #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
398a153b1   Gregory Haskins   sched: fix build ...
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
  static void
  inc_rt_prio(struct rt_rq *rt_rq, int prio)
  {
  	int prev_prio = rt_rq->highest_prio.curr;
  
  	if (prio < prev_prio)
  		rt_rq->highest_prio.curr = prio;
  
  	inc_rt_prio_smp(rt_rq, prio, prev_prio);
  }
  
  static void
  dec_rt_prio(struct rt_rq *rt_rq, int prio)
  {
  	int prev_prio = rt_rq->highest_prio.curr;
6f505b164   Peter Zijlstra   sched: rt group s...
992
  	if (rt_rq->rt_nr_running) {
764a9d6fe   Steven Rostedt   sched: track high...
993

398a153b1   Gregory Haskins   sched: fix build ...
994
  		WARN_ON(prio < prev_prio);
764a9d6fe   Steven Rostedt   sched: track high...
995

e864c499d   Gregory Haskins   sched: track the ...
996
  		/*
398a153b1   Gregory Haskins   sched: fix build ...
997
998
  		 * This may have been our highest task, and therefore
  		 * we may have some recomputation to do
e864c499d   Gregory Haskins   sched: track the ...
999
  		 */
398a153b1   Gregory Haskins   sched: fix build ...
1000
  		if (prio == prev_prio) {
e864c499d   Gregory Haskins   sched: track the ...
1001
1002
1003
  			struct rt_prio_array *array = &rt_rq->active;
  
  			rt_rq->highest_prio.curr =
764a9d6fe   Steven Rostedt   sched: track high...
1004
  				sched_find_first_bit(array->bitmap);
e864c499d   Gregory Haskins   sched: track the ...
1005
  		}
764a9d6fe   Steven Rostedt   sched: track high...
1006
  	} else
e864c499d   Gregory Haskins   sched: track the ...
1007
  		rt_rq->highest_prio.curr = MAX_RT_PRIO;
73fe6aae8   Gregory Haskins   sched: add RT-bal...
1008

398a153b1   Gregory Haskins   sched: fix build ...
1009
1010
  	dec_rt_prio_smp(rt_rq, prio, prev_prio);
  }
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
1011

398a153b1   Gregory Haskins   sched: fix build ...
1012
1013
1014
1015
1016
1017
  #else
  
  static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
  static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
  
  #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
6e0534f27   Gregory Haskins   sched: use a 2-d ...
1018

052f1dc7e   Peter Zijlstra   sched: rt-group: ...
1019
  #ifdef CONFIG_RT_GROUP_SCHED
398a153b1   Gregory Haskins   sched: fix build ...
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
  
  static void
  inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  	if (rt_se_boosted(rt_se))
  		rt_rq->rt_nr_boosted++;
  
  	if (rt_rq->tg)
  		start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
  }
  
  static void
  dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
23b0fdfc9   Peter Zijlstra   sched: rt-group: ...
1034
1035
1036
1037
  	if (rt_se_boosted(rt_se))
  		rt_rq->rt_nr_boosted--;
  
  	WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
398a153b1   Gregory Haskins   sched: fix build ...
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
  }
  
  #else /* CONFIG_RT_GROUP_SCHED */
  
  static void
  inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  	start_rt_bandwidth(&def_rt_bandwidth);
  }
  
  static inline
  void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
  
  #endif /* CONFIG_RT_GROUP_SCHED */
  
  static inline
22abdef37   Kirill Tkhai   sched/rt: Sum num...
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
  unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
  {
  	struct rt_rq *group_rq = group_rt_rq(rt_se);
  
  	if (group_rq)
  		return group_rq->rt_nr_running;
  	else
  		return 1;
  }
  
  static inline
01d36d0ac   Frederic Weisbecker   sched: Account rr...
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
  unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
  {
  	struct rt_rq *group_rq = group_rt_rq(rt_se);
  	struct task_struct *tsk;
  
  	if (group_rq)
  		return group_rq->rr_nr_running;
  
  	tsk = rt_task_of(rt_se);
  
  	return (tsk->policy == SCHED_RR) ? 1 : 0;
  }
  
  static inline
398a153b1   Gregory Haskins   sched: fix build ...
1079
1080
1081
1082
1083
  void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  	int prio = rt_se_prio(rt_se);
  
  	WARN_ON(!rt_prio(prio));
22abdef37   Kirill Tkhai   sched/rt: Sum num...
1084
  	rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
01d36d0ac   Frederic Weisbecker   sched: Account rr...
1085
  	rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
398a153b1   Gregory Haskins   sched: fix build ...
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
  
  	inc_rt_prio(rt_rq, prio);
  	inc_rt_migration(rt_se, rt_rq);
  	inc_rt_group(rt_se, rt_rq);
  }
  
  static inline
  void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  {
  	WARN_ON(!rt_prio(rt_se_prio(rt_se)));
  	WARN_ON(!rt_rq->rt_nr_running);
22abdef37   Kirill Tkhai   sched/rt: Sum num...
1097
  	rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
01d36d0ac   Frederic Weisbecker   sched: Account rr...
1098
  	rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
398a153b1   Gregory Haskins   sched: fix build ...
1099
1100
1101
1102
  
  	dec_rt_prio(rt_rq, rt_se_prio(rt_se));
  	dec_rt_migration(rt_se, rt_rq);
  	dec_rt_group(rt_se, rt_rq);
63489e45e   Steven Rostedt   sched: count # of...
1103
  }
ff77e4685   Peter Zijlstra   sched/rt: Fix PI ...
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
  /*
   * Change rt_se->run_list location unless SAVE && !MOVE
   *
   * assumes ENQUEUE/DEQUEUE flags match
   */
  static inline bool move_entity(unsigned int flags)
  {
  	if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
  		return false;
  
  	return true;
  }
  
  static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
  {
  	list_del_init(&rt_se->run_list);
  
  	if (list_empty(array->queue + rt_se_prio(rt_se)))
  		__clear_bit(rt_se_prio(rt_se), array->bitmap);
  
  	rt_se->on_list = 0;
  }
  
  static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1128
  {
6f505b164   Peter Zijlstra   sched: rt group s...
1129
1130
1131
  	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
  	struct rt_prio_array *array = &rt_rq->active;
  	struct rt_rq *group_rq = group_rt_rq(rt_se);
20b6331bf   Dmitry Adamushko   sched: rework of ...
1132
  	struct list_head *queue = array->queue + rt_se_prio(rt_se);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1133

ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
1134
1135
1136
1137
1138
1139
  	/*
  	 * Don't enqueue the group if its throttled, or when empty.
  	 * The latter is a consequence of the former when a child group
  	 * get throttled and the current group doesn't have any other
  	 * active members.
  	 */
ff77e4685   Peter Zijlstra   sched/rt: Fix PI ...
1140
1141
1142
  	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
  		if (rt_se->on_list)
  			__delist_rt_entity(rt_se, array);
6f505b164   Peter Zijlstra   sched: rt group s...
1143
  		return;
ff77e4685   Peter Zijlstra   sched/rt: Fix PI ...
1144
  	}
63489e45e   Steven Rostedt   sched: count # of...
1145

ff77e4685   Peter Zijlstra   sched/rt: Fix PI ...
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
  	if (move_entity(flags)) {
  		WARN_ON_ONCE(rt_se->on_list);
  		if (flags & ENQUEUE_HEAD)
  			list_add(&rt_se->run_list, queue);
  		else
  			list_add_tail(&rt_se->run_list, queue);
  
  		__set_bit(rt_se_prio(rt_se), array->bitmap);
  		rt_se->on_list = 1;
  	}
  	rt_se->on_rq = 1;
78f2c7db6   Peter Zijlstra   sched: SCHED_FIFO...
1157

6f505b164   Peter Zijlstra   sched: rt group s...
1158
1159
  	inc_rt_tasks(rt_se, rt_rq);
  }
ff77e4685   Peter Zijlstra   sched/rt: Fix PI ...
1160
  static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
6f505b164   Peter Zijlstra   sched: rt group s...
1161
1162
1163
  {
  	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
  	struct rt_prio_array *array = &rt_rq->active;
ff77e4685   Peter Zijlstra   sched/rt: Fix PI ...
1164
1165
1166
1167
1168
  	if (move_entity(flags)) {
  		WARN_ON_ONCE(!rt_se->on_list);
  		__delist_rt_entity(rt_se, array);
  	}
  	rt_se->on_rq = 0;
6f505b164   Peter Zijlstra   sched: rt group s...
1169
1170
1171
1172
1173
1174
1175
  
  	dec_rt_tasks(rt_se, rt_rq);
  }
  
  /*
   * Because the prio of an upper entry depends on the lower
   * entries, we must remove entries top - down.
6f505b164   Peter Zijlstra   sched: rt group s...
1176
   */
ff77e4685   Peter Zijlstra   sched/rt: Fix PI ...
1177
  static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
6f505b164   Peter Zijlstra   sched: rt group s...
1178
  {
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
1179
  	struct sched_rt_entity *back = NULL;
6f505b164   Peter Zijlstra   sched: rt group s...
1180

58d6c2d72   Peter Zijlstra   sched: rt-group: ...
1181
1182
1183
1184
  	for_each_sched_rt_entity(rt_se) {
  		rt_se->back = back;
  		back = rt_se;
  	}
f4ebcbc0d   Kirill Tkhai   sched/rt: Substra...
1185
  	dequeue_top_rt_rq(rt_rq_of_se(back));
58d6c2d72   Peter Zijlstra   sched: rt-group: ...
1186
1187
  	for (rt_se = back; rt_se; rt_se = rt_se->back) {
  		if (on_rt_rq(rt_se))
ff77e4685   Peter Zijlstra   sched/rt: Fix PI ...
1188
  			__dequeue_rt_entity(rt_se, flags);
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
1189
1190
  	}
  }
ff77e4685   Peter Zijlstra   sched/rt: Fix PI ...
1191
  static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
1192
  {
f4ebcbc0d   Kirill Tkhai   sched/rt: Substra...
1193
  	struct rq *rq = rq_of_rt_se(rt_se);
ff77e4685   Peter Zijlstra   sched/rt: Fix PI ...
1194
  	dequeue_rt_stack(rt_se, flags);
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
1195
  	for_each_sched_rt_entity(rt_se)
ff77e4685   Peter Zijlstra   sched/rt: Fix PI ...
1196
  		__enqueue_rt_entity(rt_se, flags);
f4ebcbc0d   Kirill Tkhai   sched/rt: Substra...
1197
  	enqueue_top_rt_rq(&rq->rt);
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
1198
  }
ff77e4685   Peter Zijlstra   sched/rt: Fix PI ...
1199
  static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
1200
  {
f4ebcbc0d   Kirill Tkhai   sched/rt: Substra...
1201
  	struct rq *rq = rq_of_rt_se(rt_se);
ff77e4685   Peter Zijlstra   sched/rt: Fix PI ...
1202
  	dequeue_rt_stack(rt_se, flags);
ad2a3f13b   Peter Zijlstra   sched: rt-group: ...
1203
1204
1205
1206
1207
  
  	for_each_sched_rt_entity(rt_se) {
  		struct rt_rq *rt_rq = group_rt_rq(rt_se);
  
  		if (rt_rq && rt_rq->rt_nr_running)
ff77e4685   Peter Zijlstra   sched/rt: Fix PI ...
1208
  			__enqueue_rt_entity(rt_se, flags);
58d6c2d72   Peter Zijlstra   sched: rt-group: ...
1209
  	}
f4ebcbc0d   Kirill Tkhai   sched/rt: Substra...
1210
  	enqueue_top_rt_rq(&rq->rt);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1211
1212
1213
1214
1215
  }
  
  /*
   * Adding/removing a task to/from a priority array:
   */
ea87bb785   Thomas Gleixner   sched: Extend enq...
1216
  static void
371fd7e7a   Peter Zijlstra   sched: Add enqueu...
1217
  enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
6f505b164   Peter Zijlstra   sched: rt group s...
1218
1219
  {
  	struct sched_rt_entity *rt_se = &p->rt;
371fd7e7a   Peter Zijlstra   sched: Add enqueu...
1220
  	if (flags & ENQUEUE_WAKEUP)
6f505b164   Peter Zijlstra   sched: rt group s...
1221
  		rt_se->timeout = 0;
ff77e4685   Peter Zijlstra   sched/rt: Fix PI ...
1222
  	enqueue_rt_entity(rt_se, flags);
c09595f63   Peter Zijlstra   sched: revert rev...
1223

50605ffbd   Thomas Gleixner   sched/core: Provi...
1224
  	if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1)
917b627d4   Gregory Haskins   sched: create "pu...
1225
  		enqueue_pushable_task(rq, p);
6f505b164   Peter Zijlstra   sched: rt group s...
1226
  }
371fd7e7a   Peter Zijlstra   sched: Add enqueu...
1227
  static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1228
  {
6f505b164   Peter Zijlstra   sched: rt group s...
1229
  	struct sched_rt_entity *rt_se = &p->rt;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1230

f1e14ef64   Ingo Molnar   sched: remove the...
1231
  	update_curr_rt(rq);
ff77e4685   Peter Zijlstra   sched/rt: Fix PI ...
1232
  	dequeue_rt_entity(rt_se, flags);
c09595f63   Peter Zijlstra   sched: revert rev...
1233

917b627d4   Gregory Haskins   sched: create "pu...
1234
  	dequeue_pushable_task(rq, p);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1235
1236
1237
  }
  
  /*
60686317d   Richard Weinberger   sched: Fix commen...
1238
1239
   * Put task to the head or the end of the run list without the overhead of
   * dequeue followed by enqueue.
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1240
   */
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
1241
1242
  static void
  requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
6f505b164   Peter Zijlstra   sched: rt group s...
1243
  {
1cdad7153   Ingo Molnar   Merge branch 'sch...
1244
  	if (on_rt_rq(rt_se)) {
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
1245
1246
1247
1248
1249
1250
1251
  		struct rt_prio_array *array = &rt_rq->active;
  		struct list_head *queue = array->queue + rt_se_prio(rt_se);
  
  		if (head)
  			list_move(&rt_se->run_list, queue);
  		else
  			list_move_tail(&rt_se->run_list, queue);
1cdad7153   Ingo Molnar   Merge branch 'sch...
1252
  	}
6f505b164   Peter Zijlstra   sched: rt group s...
1253
  }
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
1254
  static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1255
  {
6f505b164   Peter Zijlstra   sched: rt group s...
1256
1257
  	struct sched_rt_entity *rt_se = &p->rt;
  	struct rt_rq *rt_rq;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1258

6f505b164   Peter Zijlstra   sched: rt group s...
1259
1260
  	for_each_sched_rt_entity(rt_se) {
  		rt_rq = rt_rq_of_se(rt_se);
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
1261
  		requeue_rt_entity(rt_rq, rt_se, head);
6f505b164   Peter Zijlstra   sched: rt group s...
1262
  	}
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1263
  }
6f505b164   Peter Zijlstra   sched: rt group s...
1264
  static void yield_task_rt(struct rq *rq)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1265
  {
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
1266
  	requeue_task_rt(rq, rq->curr, 0);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1267
  }
e7693a362   Gregory Haskins   sched: de-SCHED_O...
1268
  #ifdef CONFIG_SMP
318e0893c   Gregory Haskins   sched: pre-route ...
1269
  static int find_lowest_rq(struct task_struct *task);
0017d7350   Peter Zijlstra   sched: Fix TASK_W...
1270
  static int
ac66f5477   Peter Zijlstra   sched/numa: Intro...
1271
  select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
e7693a362   Gregory Haskins   sched: de-SCHED_O...
1272
  {
7608dec2c   Peter Zijlstra   sched: Drop the r...
1273
1274
  	struct task_struct *curr;
  	struct rq *rq;
c37495fd0   Steven Rostedt   sched: Balance RT...
1275
1276
1277
1278
  
  	/* For anything but wake ups, just return the task_cpu */
  	if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
  		goto out;
7608dec2c   Peter Zijlstra   sched: Drop the r...
1279
1280
1281
  	rq = cpu_rq(cpu);
  
  	rcu_read_lock();
316c1608d   Jason Low   sched, timer: Con...
1282
  	curr = READ_ONCE(rq->curr); /* unlocked access */
7608dec2c   Peter Zijlstra   sched: Drop the r...
1283

318e0893c   Gregory Haskins   sched: pre-route ...
1284
  	/*
7608dec2c   Peter Zijlstra   sched: Drop the r...
1285
  	 * If the current task on @p's runqueue is an RT task, then
e1f47d891   Steven Rostedt   sched: RT-balance...
1286
1287
1288
1289
  	 * try to see if we can wake this RT task up on another
  	 * runqueue. Otherwise simply start this RT task
  	 * on its current runqueue.
  	 *
43fa5460f   Steven Rostedt   sched: Try not to...
1290
1291
1292
1293
1294
1295
1296
1297
1298
  	 * We want to avoid overloading runqueues. If the woken
  	 * task is a higher priority, then it will stay on this CPU
  	 * and the lower prio task should be moved to another CPU.
  	 * Even though this will probably make the lower prio task
  	 * lose its cache, we do not want to bounce a higher task
  	 * around just because it gave up its CPU, perhaps for a
  	 * lock?
  	 *
  	 * For equal prio tasks, we just let the scheduler sort it out.
7608dec2c   Peter Zijlstra   sched: Drop the r...
1299
1300
1301
1302
1303
1304
  	 *
  	 * Otherwise, just let it ride on the affined RQ and the
  	 * post-schedule router will push the preempted task away
  	 *
  	 * This test is optimistic, if we get it wrong the load-balancer
  	 * will have to sort it out.
318e0893c   Gregory Haskins   sched: pre-route ...
1305
  	 */
7608dec2c   Peter Zijlstra   sched: Drop the r...
1306
  	if (curr && unlikely(rt_task(curr)) &&
50605ffbd   Thomas Gleixner   sched/core: Provi...
1307
  	    (tsk_nr_cpus_allowed(curr) < 2 ||
6bfa687c1   Shawn Bohrer   sched/rt: Remove ...
1308
  	     curr->prio <= p->prio)) {
7608dec2c   Peter Zijlstra   sched: Drop the r...
1309
  		int target = find_lowest_rq(p);
318e0893c   Gregory Haskins   sched: pre-route ...
1310

80e3d87b2   Tim Chen   sched/rt: Reduce ...
1311
1312
1313
1314
1315
1316
  		/*
  		 * Don't bother moving it if the destination CPU is
  		 * not running a lower priority task.
  		 */
  		if (target != -1 &&
  		    p->prio < cpu_rq(target)->rt.highest_prio.curr)
7608dec2c   Peter Zijlstra   sched: Drop the r...
1317
  			cpu = target;
318e0893c   Gregory Haskins   sched: pre-route ...
1318
  	}
7608dec2c   Peter Zijlstra   sched: Drop the r...
1319
  	rcu_read_unlock();
318e0893c   Gregory Haskins   sched: pre-route ...
1320

c37495fd0   Steven Rostedt   sched: Balance RT...
1321
  out:
7608dec2c   Peter Zijlstra   sched: Drop the r...
1322
  	return cpu;
e7693a362   Gregory Haskins   sched: de-SCHED_O...
1323
  }
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
1324
1325
1326
  
  static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
  {
308a623a4   Wanpeng Li   sched/rt: Clean u...
1327
1328
1329
1330
  	/*
  	 * Current can't be migrated, useless to reschedule,
  	 * let's hope p can move out.
  	 */
50605ffbd   Thomas Gleixner   sched/core: Provi...
1331
  	if (tsk_nr_cpus_allowed(rq->curr) == 1 ||
308a623a4   Wanpeng Li   sched/rt: Clean u...
1332
  	    !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
1333
  		return;
308a623a4   Wanpeng Li   sched/rt: Clean u...
1334
1335
1336
1337
  	/*
  	 * p is migratable, so let's not schedule it and
  	 * see if it is pushed or pulled somewhere else.
  	 */
50605ffbd   Thomas Gleixner   sched/core: Provi...
1338
  	if (tsk_nr_cpus_allowed(p) != 1
13b8bd0a5   Rusty Russell   sched_rt: don't a...
1339
1340
  	    && cpupri_find(&rq->rd->cpupri, p, NULL))
  		return;
24600ce89   Rusty Russell   sched: convert ch...
1341

7ebefa8ce   Dmitry Adamushko   sched: rework of ...
1342
1343
1344
1345
1346
1347
  	/*
  	 * There appears to be other cpus that can accept
  	 * current and none to run 'p', so lets reschedule
  	 * to try and push current away:
  	 */
  	requeue_task_rt(rq, p, 1);
8875125ef   Kirill Tkhai   sched: Transform ...
1348
  	resched_curr(rq);
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
1349
  }
e7693a362   Gregory Haskins   sched: de-SCHED_O...
1350
  #endif /* CONFIG_SMP */
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1351
1352
1353
  /*
   * Preempt the current task with a newly woken task if needed:
   */
7d4787214   Peter Zijlstra   sched: Rename syn...
1354
  static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1355
  {
45c01e824   Gregory Haskins   sched: prioritize...
1356
  	if (p->prio < rq->curr->prio) {
8875125ef   Kirill Tkhai   sched: Transform ...
1357
  		resched_curr(rq);
45c01e824   Gregory Haskins   sched: prioritize...
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
  		return;
  	}
  
  #ifdef CONFIG_SMP
  	/*
  	 * If:
  	 *
  	 * - the newly woken task is of equal priority to the current task
  	 * - the newly woken task is non-migratable while current is migratable
  	 * - current will be preempted on the next reschedule
  	 *
  	 * we should check to see if current can readily move to a different
  	 * cpu.  If so, we will reschedule to allow the push logic to try
  	 * to move current somewhere else, making room for our non-migratable
  	 * task.
  	 */
8dd0de8be   Hillf Danton   sched: Fix need_r...
1374
  	if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
7ebefa8ce   Dmitry Adamushko   sched: rework of ...
1375
  		check_preempt_equal_prio(rq, p);
45c01e824   Gregory Haskins   sched: prioritize...
1376
  #endif
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1377
  }
6f505b164   Peter Zijlstra   sched: rt group s...
1378
1379
  static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
  						   struct rt_rq *rt_rq)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1380
  {
6f505b164   Peter Zijlstra   sched: rt group s...
1381
1382
  	struct rt_prio_array *array = &rt_rq->active;
  	struct sched_rt_entity *next = NULL;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1383
1384
1385
1386
  	struct list_head *queue;
  	int idx;
  
  	idx = sched_find_first_bit(array->bitmap);
6f505b164   Peter Zijlstra   sched: rt group s...
1387
  	BUG_ON(idx >= MAX_RT_PRIO);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1388
1389
  
  	queue = array->queue + idx;
6f505b164   Peter Zijlstra   sched: rt group s...
1390
  	next = list_entry(queue->next, struct sched_rt_entity, run_list);
326587b84   Dmitry Adamushko   sched: fix goto r...
1391

6f505b164   Peter Zijlstra   sched: rt group s...
1392
1393
  	return next;
  }
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1394

917b627d4   Gregory Haskins   sched: create "pu...
1395
  static struct task_struct *_pick_next_task_rt(struct rq *rq)
6f505b164   Peter Zijlstra   sched: rt group s...
1396
1397
1398
  {
  	struct sched_rt_entity *rt_se;
  	struct task_struct *p;
606dba2e2   Peter Zijlstra   sched: Push put_p...
1399
  	struct rt_rq *rt_rq  = &rq->rt;
6f505b164   Peter Zijlstra   sched: rt group s...
1400
1401
1402
  
  	do {
  		rt_se = pick_next_rt_entity(rq, rt_rq);
326587b84   Dmitry Adamushko   sched: fix goto r...
1403
  		BUG_ON(!rt_se);
6f505b164   Peter Zijlstra   sched: rt group s...
1404
1405
1406
1407
  		rt_rq = group_rt_rq(rt_se);
  	} while (rt_rq);
  
  	p = rt_task_of(rt_se);
78becc270   Frederic Weisbecker   sched: Use an acc...
1408
  	p->se.exec_start = rq_clock_task(rq);
917b627d4   Gregory Haskins   sched: create "pu...
1409
1410
1411
  
  	return p;
  }
606dba2e2   Peter Zijlstra   sched: Push put_p...
1412
  static struct task_struct *
e7904a28f   Peter Zijlstra   locking/lockdep, ...
1413
  pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
917b627d4   Gregory Haskins   sched: create "pu...
1414
  {
606dba2e2   Peter Zijlstra   sched: Push put_p...
1415
1416
  	struct task_struct *p;
  	struct rt_rq *rt_rq = &rq->rt;
37e117c07   Peter Zijlstra   sched: Guarantee ...
1417
  	if (need_pull_rt_task(rq, prev)) {
cbce1a686   Peter Zijlstra   sched,lockdep: Em...
1418
1419
1420
1421
1422
1423
  		/*
  		 * This is OK, because current is on_cpu, which avoids it being
  		 * picked for load-balance and preemption/IRQs are still
  		 * disabled avoiding further scheduler activity on it and we're
  		 * being very careful to re-start the picking loop.
  		 */
e7904a28f   Peter Zijlstra   locking/lockdep, ...
1424
  		lockdep_unpin_lock(&rq->lock, cookie);
38033c37f   Peter Zijlstra   sched: Push down ...
1425
  		pull_rt_task(rq);
e7904a28f   Peter Zijlstra   locking/lockdep, ...
1426
  		lockdep_repin_lock(&rq->lock, cookie);
37e117c07   Peter Zijlstra   sched: Guarantee ...
1427
1428
  		/*
  		 * pull_rt_task() can drop (and re-acquire) rq->lock; this
a1d9a3231   Kirill Tkhai   sched: Check for ...
1429
1430
  		 * means a dl or stop task can slip in, in which case we need
  		 * to re-start task selection.
37e117c07   Peter Zijlstra   sched: Guarantee ...
1431
  		 */
da0c1e65b   Kirill Tkhai   sched: Add wrappe...
1432
  		if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
a1d9a3231   Kirill Tkhai   sched: Check for ...
1433
  			     rq->dl.dl_nr_running))
37e117c07   Peter Zijlstra   sched: Guarantee ...
1434
1435
  			return RETRY_TASK;
  	}
38033c37f   Peter Zijlstra   sched: Push down ...
1436

734ff2a71   Kirill Tkhai   sched/rt: Fix pic...
1437
1438
1439
1440
1441
1442
  	/*
  	 * We may dequeue prev's rt_rq in put_prev_task().
  	 * So, we update time before rt_nr_running check.
  	 */
  	if (prev->sched_class == &rt_sched_class)
  		update_curr_rt(rq);
f4ebcbc0d   Kirill Tkhai   sched/rt: Substra...
1443
  	if (!rt_rq->rt_queued)
606dba2e2   Peter Zijlstra   sched: Push put_p...
1444
  		return NULL;
3f1d2a318   Peter Zijlstra   sched: Fix hotplu...
1445
  	put_prev_task(rq, prev);
606dba2e2   Peter Zijlstra   sched: Push put_p...
1446
1447
  
  	p = _pick_next_task_rt(rq);
917b627d4   Gregory Haskins   sched: create "pu...
1448
1449
  
  	/* The running task is never eligible for pushing */
f3f1768f8   Kirill Tkhai   sched/rt: Remove ...
1450
  	dequeue_pushable_task(rq, p);
917b627d4   Gregory Haskins   sched: create "pu...
1451

e3fca9e7c   Peter Zijlstra   sched: Replace po...
1452
  	queue_push_tasks(rq);
3f029d3c6   Gregory Haskins   sched: Enhance th...
1453

6f505b164   Peter Zijlstra   sched: rt group s...
1454
  	return p;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1455
  }
31ee529cc   Ingo Molnar   sched: remove the...
1456
  static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1457
  {
f1e14ef64   Ingo Molnar   sched: remove the...
1458
  	update_curr_rt(rq);
917b627d4   Gregory Haskins   sched: create "pu...
1459
1460
1461
1462
1463
  
  	/*
  	 * The previous task needs to be made eligible for pushing
  	 * if it is still active
  	 */
50605ffbd   Thomas Gleixner   sched/core: Provi...
1464
  	if (on_rt_rq(&p->rt) && tsk_nr_cpus_allowed(p) > 1)
917b627d4   Gregory Haskins   sched: create "pu...
1465
  		enqueue_pushable_task(rq, p);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
1466
  }
681f3e685   Peter Williams   sched: isolate SM...
1467
  #ifdef CONFIG_SMP
6f505b164   Peter Zijlstra   sched: rt group s...
1468

e8fa13626   Steven Rostedt   sched: add RT tas...
1469
1470
  /* Only try algorithms three times */
  #define RT_MAX_TRIES 3
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1471
1472
1473
  static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
  {
  	if (!task_running(rq, p) &&
60334caf3   Kirill Tkhai   sched/rt: Further...
1474
  	    cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1475
1476
1477
  		return 1;
  	return 0;
  }
e23ee7477   Kirill Tkhai   sched/rt: Simplif...
1478
1479
1480
1481
1482
  /*
   * Return the highest pushable rq's task, which is suitable to be executed
   * on the cpu, NULL otherwise
   */
  static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
e8fa13626   Steven Rostedt   sched: add RT tas...
1483
  {
e23ee7477   Kirill Tkhai   sched/rt: Simplif...
1484
1485
  	struct plist_head *head = &rq->rt.pushable_tasks;
  	struct task_struct *p;
3d07467b7   Peter Zijlstra   sched: Fix pick_n...
1486

e23ee7477   Kirill Tkhai   sched/rt: Simplif...
1487
1488
  	if (!has_pushable_tasks(rq))
  		return NULL;
3d07467b7   Peter Zijlstra   sched: Fix pick_n...
1489

e23ee7477   Kirill Tkhai   sched/rt: Simplif...
1490
1491
1492
  	plist_for_each_entry(p, head, pushable_tasks) {
  		if (pick_rt_task(rq, p, cpu))
  			return p;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1493
  	}
e23ee7477   Kirill Tkhai   sched/rt: Simplif...
1494
  	return NULL;
e8fa13626   Steven Rostedt   sched: add RT tas...
1495
  }
0e3900e6d   Rusty Russell   sched: convert lo...
1496
  static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
e8fa13626   Steven Rostedt   sched: add RT tas...
1497

6e1254d2c   Gregory Haskins   sched: optimize R...
1498
1499
1500
  static int find_lowest_rq(struct task_struct *task)
  {
  	struct sched_domain *sd;
4ba296842   Christoph Lameter   percpu: Resolve a...
1501
  	struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
6e1254d2c   Gregory Haskins   sched: optimize R...
1502
1503
  	int this_cpu = smp_processor_id();
  	int cpu      = task_cpu(task);
06f90dbd7   Gregory Haskins   sched: RT-balance...
1504

0da938c44   Steven Rostedt   sched: Check if l...
1505
1506
1507
  	/* Make sure the mask is initialized first */
  	if (unlikely(!lowest_mask))
  		return -1;
50605ffbd   Thomas Gleixner   sched/core: Provi...
1508
  	if (tsk_nr_cpus_allowed(task) == 1)
6e0534f27   Gregory Haskins   sched: use a 2-d ...
1509
  		return -1; /* No other targets possible */
6e1254d2c   Gregory Haskins   sched: optimize R...
1510

6e0534f27   Gregory Haskins   sched: use a 2-d ...
1511
1512
  	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
  		return -1; /* No targets found */
6e1254d2c   Gregory Haskins   sched: optimize R...
1513
1514
1515
1516
1517
1518
1519
1520
1521
  
  	/*
  	 * At this point we have built a mask of cpus representing the
  	 * lowest priority tasks in the system.  Now we want to elect
  	 * the best one based on our affinity and topology.
  	 *
  	 * We prioritize the last cpu that the task executed on since
  	 * it is most likely cache-hot in that location.
  	 */
96f874e26   Rusty Russell   sched: convert re...
1522
  	if (cpumask_test_cpu(cpu, lowest_mask))
6e1254d2c   Gregory Haskins   sched: optimize R...
1523
1524
1525
1526
1527
1528
  		return cpu;
  
  	/*
  	 * Otherwise, we consult the sched_domains span maps to figure
  	 * out which cpu is logically closest to our hot cache data.
  	 */
e2c880630   Rusty Russell   cpumask: Simplify...
1529
1530
  	if (!cpumask_test_cpu(this_cpu, lowest_mask))
  		this_cpu = -1; /* Skip this_cpu opt if not among lowest */
6e1254d2c   Gregory Haskins   sched: optimize R...
1531

cd4ae6adf   Xiaotian Feng   sched: More sched...
1532
  	rcu_read_lock();
e2c880630   Rusty Russell   cpumask: Simplify...
1533
1534
1535
  	for_each_domain(cpu, sd) {
  		if (sd->flags & SD_WAKE_AFFINE) {
  			int best_cpu;
6e1254d2c   Gregory Haskins   sched: optimize R...
1536

e2c880630   Rusty Russell   cpumask: Simplify...
1537
1538
1539
1540
1541
  			/*
  			 * "this_cpu" is cheaper to preempt than a
  			 * remote processor.
  			 */
  			if (this_cpu != -1 &&
cd4ae6adf   Xiaotian Feng   sched: More sched...
1542
1543
  			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
  				rcu_read_unlock();
e2c880630   Rusty Russell   cpumask: Simplify...
1544
  				return this_cpu;
cd4ae6adf   Xiaotian Feng   sched: More sched...
1545
  			}
e2c880630   Rusty Russell   cpumask: Simplify...
1546
1547
1548
  
  			best_cpu = cpumask_first_and(lowest_mask,
  						     sched_domain_span(sd));
cd4ae6adf   Xiaotian Feng   sched: More sched...
1549
1550
  			if (best_cpu < nr_cpu_ids) {
  				rcu_read_unlock();
e2c880630   Rusty Russell   cpumask: Simplify...
1551
  				return best_cpu;
cd4ae6adf   Xiaotian Feng   sched: More sched...
1552
  			}
6e1254d2c   Gregory Haskins   sched: optimize R...
1553
1554
  		}
  	}
cd4ae6adf   Xiaotian Feng   sched: More sched...
1555
  	rcu_read_unlock();
6e1254d2c   Gregory Haskins   sched: optimize R...
1556
1557
1558
1559
1560
1561
  
  	/*
  	 * And finally, if there were no matches within the domains
  	 * just give the caller *something* to work with from the compatible
  	 * locations.
  	 */
e2c880630   Rusty Russell   cpumask: Simplify...
1562
1563
1564
1565
1566
1567
1568
  	if (this_cpu != -1)
  		return this_cpu;
  
  	cpu = cpumask_any(lowest_mask);
  	if (cpu < nr_cpu_ids)
  		return cpu;
  	return -1;
07b4032c9   Gregory Haskins   sched: break out ...
1569
1570
1571
  }
  
  /* Will lock the rq it finds */
4df64c0bf   Ingo Molnar   sched: clean up f...
1572
  static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
07b4032c9   Gregory Haskins   sched: break out ...
1573
1574
  {
  	struct rq *lowest_rq = NULL;
07b4032c9   Gregory Haskins   sched: break out ...
1575
  	int tries;
4df64c0bf   Ingo Molnar   sched: clean up f...
1576
  	int cpu;
e8fa13626   Steven Rostedt   sched: add RT tas...
1577

07b4032c9   Gregory Haskins   sched: break out ...
1578
1579
  	for (tries = 0; tries < RT_MAX_TRIES; tries++) {
  		cpu = find_lowest_rq(task);
2de0b4639   Gregory Haskins   sched: RT balanci...
1580
  		if ((cpu == -1) || (cpu == rq->cpu))
e8fa13626   Steven Rostedt   sched: add RT tas...
1581
  			break;
07b4032c9   Gregory Haskins   sched: break out ...
1582
  		lowest_rq = cpu_rq(cpu);
80e3d87b2   Tim Chen   sched/rt: Reduce ...
1583
1584
1585
1586
1587
1588
1589
1590
1591
  		if (lowest_rq->rt.highest_prio.curr <= task->prio) {
  			/*
  			 * Target rq has tasks of equal or higher priority,
  			 * retrying does not release any lock and is unlikely
  			 * to yield a different result.
  			 */
  			lowest_rq = NULL;
  			break;
  		}
e8fa13626   Steven Rostedt   sched: add RT tas...
1592
  		/* if the prio of this runqueue changed, try again */
07b4032c9   Gregory Haskins   sched: break out ...
1593
  		if (double_lock_balance(rq, lowest_rq)) {
e8fa13626   Steven Rostedt   sched: add RT tas...
1594
1595
1596
1597
1598
1599
  			/*
  			 * We had to unlock the run queue. In
  			 * the mean time, task could have
  			 * migrated already or had its affinity changed.
  			 * Also make sure that it wasn't scheduled on its rq.
  			 */
07b4032c9   Gregory Haskins   sched: break out ...
1600
  			if (unlikely(task_rq(task) != rq ||
96f874e26   Rusty Russell   sched: convert re...
1601
  				     !cpumask_test_cpu(lowest_rq->cpu,
fa17b507f   Peter Zijlstra   sched: Wrap sched...
1602
  						       tsk_cpus_allowed(task)) ||
07b4032c9   Gregory Haskins   sched: break out ...
1603
  				     task_running(rq, task) ||
13b5ab02a   Xunlei Pang   sched/rt, sched/d...
1604
  				     !rt_task(task) ||
da0c1e65b   Kirill Tkhai   sched: Add wrappe...
1605
  				     !task_on_rq_queued(task))) {
4df64c0bf   Ingo Molnar   sched: clean up f...
1606

7f1b43936   Peter Zijlstra   sched/rt: Fix loc...
1607
  				double_unlock_balance(rq, lowest_rq);
e8fa13626   Steven Rostedt   sched: add RT tas...
1608
1609
1610
1611
1612
1613
  				lowest_rq = NULL;
  				break;
  			}
  		}
  
  		/* If this rq is still suitable use it. */
e864c499d   Gregory Haskins   sched: track the ...
1614
  		if (lowest_rq->rt.highest_prio.curr > task->prio)
e8fa13626   Steven Rostedt   sched: add RT tas...
1615
1616
1617
  			break;
  
  		/* try again */
1b12bbc74   Peter Zijlstra   lockdep: re-annot...
1618
  		double_unlock_balance(rq, lowest_rq);
e8fa13626   Steven Rostedt   sched: add RT tas...
1619
1620
1621
1622
1623
  		lowest_rq = NULL;
  	}
  
  	return lowest_rq;
  }
917b627d4   Gregory Haskins   sched: create "pu...
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
  static struct task_struct *pick_next_pushable_task(struct rq *rq)
  {
  	struct task_struct *p;
  
  	if (!has_pushable_tasks(rq))
  		return NULL;
  
  	p = plist_first_entry(&rq->rt.pushable_tasks,
  			      struct task_struct, pushable_tasks);
  
  	BUG_ON(rq->cpu != task_cpu(p));
  	BUG_ON(task_current(rq, p));
50605ffbd   Thomas Gleixner   sched/core: Provi...
1636
  	BUG_ON(tsk_nr_cpus_allowed(p) <= 1);
917b627d4   Gregory Haskins   sched: create "pu...
1637

da0c1e65b   Kirill Tkhai   sched: Add wrappe...
1638
  	BUG_ON(!task_on_rq_queued(p));
917b627d4   Gregory Haskins   sched: create "pu...
1639
1640
1641
1642
  	BUG_ON(!rt_task(p));
  
  	return p;
  }
e8fa13626   Steven Rostedt   sched: add RT tas...
1643
1644
1645
1646
1647
  /*
   * If the current CPU has more than one RT task, see if the non
   * running task can migrate over to a CPU that is running a task
   * of lesser priority.
   */
697f0a487   Gregory Haskins   sched: clean up t...
1648
  static int push_rt_task(struct rq *rq)
e8fa13626   Steven Rostedt   sched: add RT tas...
1649
1650
1651
  {
  	struct task_struct *next_task;
  	struct rq *lowest_rq;
311e800e1   Hillf Danton   sched, rt: Fix rq...
1652
  	int ret = 0;
e8fa13626   Steven Rostedt   sched: add RT tas...
1653

a22d7fc18   Gregory Haskins   sched: wake-balan...
1654
1655
  	if (!rq->rt.overloaded)
  		return 0;
917b627d4   Gregory Haskins   sched: create "pu...
1656
  	next_task = pick_next_pushable_task(rq);
e8fa13626   Steven Rostedt   sched: add RT tas...
1657
1658
  	if (!next_task)
  		return 0;
492462742   Peter Zijlstra   sched: Unindent l...
1659
  retry:
697f0a487   Gregory Haskins   sched: clean up t...
1660
  	if (unlikely(next_task == rq->curr)) {
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1661
  		WARN_ON(1);
e8fa13626   Steven Rostedt   sched: add RT tas...
1662
  		return 0;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1663
  	}
e8fa13626   Steven Rostedt   sched: add RT tas...
1664
1665
1666
1667
1668
1669
  
  	/*
  	 * It's possible that the next_task slipped in of
  	 * higher priority than current. If that's the case
  	 * just reschedule current.
  	 */
697f0a487   Gregory Haskins   sched: clean up t...
1670
  	if (unlikely(next_task->prio < rq->curr->prio)) {
8875125ef   Kirill Tkhai   sched: Transform ...
1671
  		resched_curr(rq);
e8fa13626   Steven Rostedt   sched: add RT tas...
1672
1673
  		return 0;
  	}
697f0a487   Gregory Haskins   sched: clean up t...
1674
  	/* We might release rq lock */
e8fa13626   Steven Rostedt   sched: add RT tas...
1675
1676
1677
  	get_task_struct(next_task);
  
  	/* find_lock_lowest_rq locks the rq if found */
697f0a487   Gregory Haskins   sched: clean up t...
1678
  	lowest_rq = find_lock_lowest_rq(next_task, rq);
e8fa13626   Steven Rostedt   sched: add RT tas...
1679
1680
1681
  	if (!lowest_rq) {
  		struct task_struct *task;
  		/*
311e800e1   Hillf Danton   sched, rt: Fix rq...
1682
  		 * find_lock_lowest_rq releases rq->lock
1563513d3   Gregory Haskins   RT: fix push_rt_t...
1683
1684
1685
1686
1687
  		 * so it is possible that next_task has migrated.
  		 *
  		 * We need to make sure that the task is still on the same
  		 * run-queue and is also still the next task eligible for
  		 * pushing.
e8fa13626   Steven Rostedt   sched: add RT tas...
1688
  		 */
917b627d4   Gregory Haskins   sched: create "pu...
1689
  		task = pick_next_pushable_task(rq);
1563513d3   Gregory Haskins   RT: fix push_rt_t...
1690
1691
  		if (task_cpu(next_task) == rq->cpu && task == next_task) {
  			/*
311e800e1   Hillf Danton   sched, rt: Fix rq...
1692
1693
1694
1695
  			 * The task hasn't migrated, and is still the next
  			 * eligible task, but we failed to find a run-queue
  			 * to push it to.  Do not retry in this case, since
  			 * other cpus will pull from us when ready.
1563513d3   Gregory Haskins   RT: fix push_rt_t...
1696
  			 */
1563513d3   Gregory Haskins   RT: fix push_rt_t...
1697
  			goto out;
e8fa13626   Steven Rostedt   sched: add RT tas...
1698
  		}
917b627d4   Gregory Haskins   sched: create "pu...
1699

1563513d3   Gregory Haskins   RT: fix push_rt_t...
1700
1701
1702
  		if (!task)
  			/* No more tasks, just exit */
  			goto out;
917b627d4   Gregory Haskins   sched: create "pu...
1703
  		/*
1563513d3   Gregory Haskins   RT: fix push_rt_t...
1704
  		 * Something has shifted, try again.
917b627d4   Gregory Haskins   sched: create "pu...
1705
  		 */
1563513d3   Gregory Haskins   RT: fix push_rt_t...
1706
1707
1708
  		put_task_struct(next_task);
  		next_task = task;
  		goto retry;
e8fa13626   Steven Rostedt   sched: add RT tas...
1709
  	}
697f0a487   Gregory Haskins   sched: clean up t...
1710
  	deactivate_task(rq, next_task, 0);
e8fa13626   Steven Rostedt   sched: add RT tas...
1711
1712
  	set_task_cpu(next_task, lowest_rq->cpu);
  	activate_task(lowest_rq, next_task, 0);
311e800e1   Hillf Danton   sched, rt: Fix rq...
1713
  	ret = 1;
e8fa13626   Steven Rostedt   sched: add RT tas...
1714

8875125ef   Kirill Tkhai   sched: Transform ...
1715
  	resched_curr(lowest_rq);
e8fa13626   Steven Rostedt   sched: add RT tas...
1716

1b12bbc74   Peter Zijlstra   lockdep: re-annot...
1717
  	double_unlock_balance(rq, lowest_rq);
e8fa13626   Steven Rostedt   sched: add RT tas...
1718

e8fa13626   Steven Rostedt   sched: add RT tas...
1719
1720
  out:
  	put_task_struct(next_task);
311e800e1   Hillf Danton   sched, rt: Fix rq...
1721
  	return ret;
e8fa13626   Steven Rostedt   sched: add RT tas...
1722
  }
e8fa13626   Steven Rostedt   sched: add RT tas...
1723
1724
1725
1726
1727
1728
  static void push_rt_tasks(struct rq *rq)
  {
  	/* push_rt_task will return true if it moved an RT */
  	while (push_rt_task(rq))
  		;
  }
b6366f048   Steven Rostedt   sched/rt: Use IPI...
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
  #ifdef HAVE_RT_PUSH_IPI
  /*
   * The search for the next cpu always starts at rq->cpu and ends
   * when we reach rq->cpu again. It will never return rq->cpu.
   * This returns the next cpu to check, or nr_cpu_ids if the loop
   * is complete.
   *
   * rq->rt.push_cpu holds the last cpu returned by this function,
   * or if this is the first instance, it must hold rq->cpu.
   */
  static int rto_next_cpu(struct rq *rq)
  {
  	int prev_cpu = rq->rt.push_cpu;
  	int cpu;
  
  	cpu = cpumask_next(prev_cpu, rq->rd->rto_mask);
  
  	/*
  	 * If the previous cpu is less than the rq's CPU, then it already
  	 * passed the end of the mask, and has started from the beginning.
  	 * We end if the next CPU is greater or equal to rq's CPU.
  	 */
  	if (prev_cpu < rq->cpu) {
  		if (cpu >= rq->cpu)
  			return nr_cpu_ids;
  
  	} else if (cpu >= nr_cpu_ids) {
  		/*
  		 * We passed the end of the mask, start at the beginning.
  		 * If the result is greater or equal to the rq's CPU, then
  		 * the loop is finished.
  		 */
  		cpu = cpumask_first(rq->rd->rto_mask);
  		if (cpu >= rq->cpu)
  			return nr_cpu_ids;
  	}
  	rq->rt.push_cpu = cpu;
  
  	/* Return cpu to let the caller know if the loop is finished or not */
  	return cpu;
  }
  
  static int find_next_push_cpu(struct rq *rq)
  {
  	struct rq *next_rq;
  	int cpu;
  
  	while (1) {
  		cpu = rto_next_cpu(rq);
  		if (cpu >= nr_cpu_ids)
  			break;
  		next_rq = cpu_rq(cpu);
  
  		/* Make sure the next rq can push to this rq */
  		if (next_rq->rt.highest_prio.next < rq->rt.highest_prio.curr)
  			break;
  	}
  
  	return cpu;
  }
  
  #define RT_PUSH_IPI_EXECUTING		1
  #define RT_PUSH_IPI_RESTART		2
  
  static void tell_cpu_to_push(struct rq *rq)
  {
  	int cpu;
  
  	if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
  		raw_spin_lock(&rq->rt.push_lock);
  		/* Make sure it's still executing */
  		if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
  			/*
  			 * Tell the IPI to restart the loop as things have
  			 * changed since it started.
  			 */
  			rq->rt.push_flags |= RT_PUSH_IPI_RESTART;
  			raw_spin_unlock(&rq->rt.push_lock);
  			return;
  		}
  		raw_spin_unlock(&rq->rt.push_lock);
  	}
  
  	/* When here, there's no IPI going around */
  
  	rq->rt.push_cpu = rq->cpu;
  	cpu = find_next_push_cpu(rq);
  	if (cpu >= nr_cpu_ids)
  		return;
  
  	rq->rt.push_flags = RT_PUSH_IPI_EXECUTING;
  
  	irq_work_queue_on(&rq->rt.push_work, cpu);
  }
  
  /* Called from hardirq context */
  static void try_to_push_tasks(void *arg)
  {
  	struct rt_rq *rt_rq = arg;
  	struct rq *rq, *src_rq;
  	int this_cpu;
  	int cpu;
  
  	this_cpu = rt_rq->push_cpu;
  
  	/* Paranoid check */
  	BUG_ON(this_cpu != smp_processor_id());
  
  	rq = cpu_rq(this_cpu);
  	src_rq = rq_of_rt_rq(rt_rq);
  
  again:
  	if (has_pushable_tasks(rq)) {
  		raw_spin_lock(&rq->lock);
  		push_rt_task(rq);
  		raw_spin_unlock(&rq->lock);
  	}
  
  	/* Pass the IPI to the next rt overloaded queue */
  	raw_spin_lock(&rt_rq->push_lock);
  	/*
  	 * If the source queue changed since the IPI went out,
  	 * we need to restart the search from that CPU again.
  	 */
  	if (rt_rq->push_flags & RT_PUSH_IPI_RESTART) {
  		rt_rq->push_flags &= ~RT_PUSH_IPI_RESTART;
  		rt_rq->push_cpu = src_rq->cpu;
  	}
  
  	cpu = find_next_push_cpu(src_rq);
  
  	if (cpu >= nr_cpu_ids)
  		rt_rq->push_flags &= ~RT_PUSH_IPI_EXECUTING;
  	raw_spin_unlock(&rt_rq->push_lock);
  
  	if (cpu >= nr_cpu_ids)
  		return;
  
  	/*
  	 * It is possible that a restart caused this CPU to be
  	 * chosen again. Don't bother with an IPI, just see if we
  	 * have more to push.
  	 */
  	if (unlikely(cpu == rq->cpu))
  		goto again;
  
  	/* Try the next RT overloaded CPU */
  	irq_work_queue_on(&rt_rq->push_work, cpu);
  }
  
  static void push_irq_work_func(struct irq_work *work)
  {
  	struct rt_rq *rt_rq = container_of(work, struct rt_rq, push_work);
  
  	try_to_push_tasks(rt_rq);
  }
  #endif /* HAVE_RT_PUSH_IPI */
8046d6806   Peter Zijlstra   sched,rt: Remove ...
1886
  static void pull_rt_task(struct rq *this_rq)
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1887
  {
8046d6806   Peter Zijlstra   sched,rt: Remove ...
1888
1889
  	int this_cpu = this_rq->cpu, cpu;
  	bool resched = false;
a8728944e   Gregory Haskins   sched: use highes...
1890
  	struct task_struct *p;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1891
  	struct rq *src_rq;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1892

637f50851   Gregory Haskins   sched: only balan...
1893
  	if (likely(!rt_overloaded(this_rq)))
8046d6806   Peter Zijlstra   sched,rt: Remove ...
1894
  		return;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1895

7c3f2ab7b   Peter Zijlstra   sched/rt: Add mis...
1896
1897
1898
1899
1900
  	/*
  	 * Match the barrier from rt_set_overloaded; this guarantees that if we
  	 * see overloaded we must also see the rto_mask bit.
  	 */
  	smp_rmb();
b6366f048   Steven Rostedt   sched/rt: Use IPI...
1901
1902
1903
  #ifdef HAVE_RT_PUSH_IPI
  	if (sched_feat(RT_PUSH_IPI)) {
  		tell_cpu_to_push(this_rq);
8046d6806   Peter Zijlstra   sched,rt: Remove ...
1904
  		return;
b6366f048   Steven Rostedt   sched/rt: Use IPI...
1905
1906
  	}
  #endif
c6c4927b2   Rusty Russell   sched: convert st...
1907
  	for_each_cpu(cpu, this_rq->rd->rto_mask) {
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1908
1909
1910
1911
  		if (this_cpu == cpu)
  			continue;
  
  		src_rq = cpu_rq(cpu);
74ab8e4f6   Gregory Haskins   sched: use highes...
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
  
  		/*
  		 * Don't bother taking the src_rq->lock if the next highest
  		 * task is known to be lower-priority than our current task.
  		 * This may look racy, but if this value is about to go
  		 * logically higher, the src_rq will push this task away.
  		 * And if its going logically lower, we do not care
  		 */
  		if (src_rq->rt.highest_prio.next >=
  		    this_rq->rt.highest_prio.curr)
  			continue;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1923
1924
1925
  		/*
  		 * We can potentially drop this_rq's lock in
  		 * double_lock_balance, and another CPU could
a8728944e   Gregory Haskins   sched: use highes...
1926
  		 * alter this_rq
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1927
  		 */
a8728944e   Gregory Haskins   sched: use highes...
1928
  		double_lock_balance(this_rq, src_rq);
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1929
1930
  
  		/*
e23ee7477   Kirill Tkhai   sched/rt: Simplif...
1931
1932
  		 * We can pull only a task, which is pushable
  		 * on its rq, and no others.
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1933
  		 */
e23ee7477   Kirill Tkhai   sched/rt: Simplif...
1934
  		p = pick_highest_pushable_task(src_rq, this_cpu);
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1935
1936
1937
1938
1939
  
  		/*
  		 * Do we have an RT task that preempts
  		 * the to-be-scheduled task?
  		 */
a8728944e   Gregory Haskins   sched: use highes...
1940
  		if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1941
  			WARN_ON(p == src_rq->curr);
da0c1e65b   Kirill Tkhai   sched: Add wrappe...
1942
  			WARN_ON(!task_on_rq_queued(p));
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1943
1944
1945
1946
1947
1948
1949
  
  			/*
  			 * There's a chance that p is higher in priority
  			 * than what's currently running on its cpu.
  			 * This is just that p is wakeing up and hasn't
  			 * had a chance to schedule. We only pull
  			 * p if it is lower in priority than the
a8728944e   Gregory Haskins   sched: use highes...
1950
  			 * current task on the run queue
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1951
  			 */
a8728944e   Gregory Haskins   sched: use highes...
1952
  			if (p->prio < src_rq->curr->prio)
614ee1f61   Mike Galbraith   sched: pull_rt_ta...
1953
  				goto skip;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1954

8046d6806   Peter Zijlstra   sched,rt: Remove ...
1955
  			resched = true;
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1956
1957
1958
1959
1960
1961
1962
  
  			deactivate_task(src_rq, p, 0);
  			set_task_cpu(p, this_cpu);
  			activate_task(this_rq, p, 0);
  			/*
  			 * We continue with the search, just in
  			 * case there's an even higher prio task
25985edce   Lucas De Marchi   Fix common misspe...
1963
  			 * in another runqueue. (low likelihood
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1964
  			 * but possible)
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1965
  			 */
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1966
  		}
492462742   Peter Zijlstra   sched: Unindent l...
1967
  skip:
1b12bbc74   Peter Zijlstra   lockdep: re-annot...
1968
  		double_unlock_balance(this_rq, src_rq);
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1969
  	}
8046d6806   Peter Zijlstra   sched,rt: Remove ...
1970
1971
  	if (resched)
  		resched_curr(this_rq);
f65eda4f7   Steven Rostedt   sched: pull RT ta...
1972
  }
8ae121ac8   Gregory Haskins   sched: fix RT tas...
1973
1974
1975
1976
  /*
   * If we are not running and we are not going to reschedule soon, we should
   * try to push tasks away now
   */
efbbd05a5   Peter Zijlstra   sched: Add pre an...
1977
  static void task_woken_rt(struct rq *rq, struct task_struct *p)
4642dafdf   Steven Rostedt   sched: push RT ta...
1978
  {
9a897c5a6   Steven Rostedt   sched: RT-balance...
1979
  	if (!task_running(rq, p) &&
8ae121ac8   Gregory Haskins   sched: fix RT tas...
1980
  	    !test_tsk_need_resched(rq->curr) &&
50605ffbd   Thomas Gleixner   sched/core: Provi...
1981
  	    tsk_nr_cpus_allowed(p) > 1 &&
1baca4ce1   Juri Lelli   sched/deadline: A...
1982
  	    (dl_task(rq->curr) || rt_task(rq->curr)) &&
50605ffbd   Thomas Gleixner   sched/core: Provi...
1983
  	    (tsk_nr_cpus_allowed(rq->curr) < 2 ||
3be209a8e   Shawn Bohrer   sched/rt: Migrate...
1984
  	     rq->curr->prio <= p->prio))
4642dafdf   Steven Rostedt   sched: push RT ta...
1985
1986
  		push_rt_tasks(rq);
  }
bdd7c81b4   Ingo Molnar   sched: fix sched_...
1987
  /* Assumes rq->lock is held */
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
1988
  static void rq_online_rt(struct rq *rq)
bdd7c81b4   Ingo Molnar   sched: fix sched_...
1989
1990
1991
  {
  	if (rq->rt.overloaded)
  		rt_set_overload(rq);
6e0534f27   Gregory Haskins   sched: use a 2-d ...
1992

7def2be1d   Peter Zijlstra   sched: fix hotplu...
1993
  	__enable_runtime(rq);
e864c499d   Gregory Haskins   sched: track the ...
1994
  	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
bdd7c81b4   Ingo Molnar   sched: fix sched_...
1995
1996
1997
  }
  
  /* Assumes rq->lock is held */
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
1998
  static void rq_offline_rt(struct rq *rq)
bdd7c81b4   Ingo Molnar   sched: fix sched_...
1999
2000
2001
  {
  	if (rq->rt.overloaded)
  		rt_clear_overload(rq);
6e0534f27   Gregory Haskins   sched: use a 2-d ...
2002

7def2be1d   Peter Zijlstra   sched: fix hotplu...
2003
  	__disable_runtime(rq);
6e0534f27   Gregory Haskins   sched: use a 2-d ...
2004
  	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
bdd7c81b4   Ingo Molnar   sched: fix sched_...
2005
  }
cb4698450   Steven Rostedt   sched: RT-balance...
2006
2007
2008
2009
2010
  
  /*
   * When switch from the rt queue, we bring ourselves to a position
   * that we might want to pull RT tasks from other runqueues.
   */
da7a735e5   Peter Zijlstra   sched: Fix switch...
2011
  static void switched_from_rt(struct rq *rq, struct task_struct *p)
cb4698450   Steven Rostedt   sched: RT-balance...
2012
2013
2014
2015
2016
2017
2018
2019
  {
  	/*
  	 * If there are other RT tasks then we will reschedule
  	 * and the scheduling of the other RT tasks will handle
  	 * the balancing. But if we are the last RT task
  	 * we may need to handle the pulling of RT tasks
  	 * now.
  	 */
da0c1e65b   Kirill Tkhai   sched: Add wrappe...
2020
  	if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
1158ddb55   Kirill Tkhai   sched/rt: Add res...
2021
  		return;
fd7a4bed1   Peter Zijlstra   sched, rt: Conver...
2022
  	queue_pull_task(rq);
cb4698450   Steven Rostedt   sched: RT-balance...
2023
  }
3d8cbdf86   Rusty Russell   sched: convert lo...
2024

11c785b79   Li Zefan   sched/rt: Make in...
2025
  void __init init_sched_rt_class(void)
3d8cbdf86   Rusty Russell   sched: convert lo...
2026
2027
  {
  	unsigned int i;
029632fbb   Peter Zijlstra   sched: Make separ...
2028
  	for_each_possible_cpu(i) {
eaa958402   Yinghai Lu   cpumask: alloc ze...
2029
  		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
6ca09dfc9   Mike Travis   sched: put back s...
2030
  					GFP_KERNEL, cpu_to_node(i));
029632fbb   Peter Zijlstra   sched: Make separ...
2031
  	}
3d8cbdf86   Rusty Russell   sched: convert lo...
2032
  }
cb4698450   Steven Rostedt   sched: RT-balance...
2033
2034
2035
2036
2037
2038
2039
  #endif /* CONFIG_SMP */
  
  /*
   * When switching a task to RT, we may overload the runqueue
   * with RT tasks. In this case we try to push them off to
   * other runqueues.
   */
da7a735e5   Peter Zijlstra   sched: Fix switch...
2040
  static void switched_to_rt(struct rq *rq, struct task_struct *p)
cb4698450   Steven Rostedt   sched: RT-balance...
2041
  {
cb4698450   Steven Rostedt   sched: RT-balance...
2042
2043
2044
2045
2046
2047
2048
  	/*
  	 * If we are already running, then there's nothing
  	 * that needs to be done. But if we are not running
  	 * we may need to preempt the current running task.
  	 * If that current running task is also an RT task
  	 * then see if we can move to another run queue.
  	 */
da0c1e65b   Kirill Tkhai   sched: Add wrappe...
2049
  	if (task_on_rq_queued(p) && rq->curr != p) {
cb4698450   Steven Rostedt   sched: RT-balance...
2050
  #ifdef CONFIG_SMP
50605ffbd   Thomas Gleixner   sched/core: Provi...
2051
  		if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded)
fd7a4bed1   Peter Zijlstra   sched, rt: Conver...
2052
2053
2054
  			queue_push_tasks(rq);
  #else
  		if (p->prio < rq->curr->prio)
8875125ef   Kirill Tkhai   sched: Transform ...
2055
  			resched_curr(rq);
fd7a4bed1   Peter Zijlstra   sched, rt: Conver...
2056
  #endif /* CONFIG_SMP */
cb4698450   Steven Rostedt   sched: RT-balance...
2057
2058
2059
2060
2061
2062
2063
  	}
  }
  
  /*
   * Priority of the task has changed. This may cause
   * us to initiate a push or pull.
   */
da7a735e5   Peter Zijlstra   sched: Fix switch...
2064
2065
  static void
  prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
cb4698450   Steven Rostedt   sched: RT-balance...
2066
  {
da0c1e65b   Kirill Tkhai   sched: Add wrappe...
2067
  	if (!task_on_rq_queued(p))
da7a735e5   Peter Zijlstra   sched: Fix switch...
2068
2069
2070
  		return;
  
  	if (rq->curr == p) {
cb4698450   Steven Rostedt   sched: RT-balance...
2071
2072
2073
2074
2075
2076
  #ifdef CONFIG_SMP
  		/*
  		 * If our priority decreases while running, we
  		 * may need to pull tasks to this runqueue.
  		 */
  		if (oldprio < p->prio)
fd7a4bed1   Peter Zijlstra   sched, rt: Conver...
2077
  			queue_pull_task(rq);
cb4698450   Steven Rostedt   sched: RT-balance...
2078
2079
  		/*
  		 * If there's a higher priority task waiting to run
fd7a4bed1   Peter Zijlstra   sched, rt: Conver...
2080
  		 * then reschedule.
cb4698450   Steven Rostedt   sched: RT-balance...
2081
  		 */
fd7a4bed1   Peter Zijlstra   sched, rt: Conver...
2082
  		if (p->prio > rq->rt.highest_prio.curr)
8875125ef   Kirill Tkhai   sched: Transform ...
2083
  			resched_curr(rq);
cb4698450   Steven Rostedt   sched: RT-balance...
2084
2085
2086
  #else
  		/* For UP simply resched on drop of prio */
  		if (oldprio < p->prio)
8875125ef   Kirill Tkhai   sched: Transform ...
2087
  			resched_curr(rq);
e8fa13626   Steven Rostedt   sched: add RT tas...
2088
  #endif /* CONFIG_SMP */
cb4698450   Steven Rostedt   sched: RT-balance...
2089
2090
2091
2092
2093
2094
2095
  	} else {
  		/*
  		 * This task is not running, but if it is
  		 * greater than the current running task
  		 * then reschedule.
  		 */
  		if (p->prio < rq->curr->prio)
8875125ef   Kirill Tkhai   sched: Transform ...
2096
  			resched_curr(rq);
cb4698450   Steven Rostedt   sched: RT-balance...
2097
2098
  	}
  }
78f2c7db6   Peter Zijlstra   sched: SCHED_FIFO...
2099
2100
2101
  static void watchdog(struct rq *rq, struct task_struct *p)
  {
  	unsigned long soft, hard;
78d7d407b   Jiri Slaby   kernel core: use ...
2102
2103
2104
  	/* max may change after cur was read, this will be fixed next tick */
  	soft = task_rlimit(p, RLIMIT_RTTIME);
  	hard = task_rlimit_max(p, RLIMIT_RTTIME);
78f2c7db6   Peter Zijlstra   sched: SCHED_FIFO...
2105
2106
2107
  
  	if (soft != RLIM_INFINITY) {
  		unsigned long next;
57d2aa00d   Ying Xue   sched/rt: Avoid u...
2108
2109
2110
2111
  		if (p->rt.watchdog_stamp != jiffies) {
  			p->rt.timeout++;
  			p->rt.watchdog_stamp = jiffies;
  		}
78f2c7db6   Peter Zijlstra   sched: SCHED_FIFO...
2112
  		next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
5a52dd500   Peter Zijlstra   sched: rt-watchdo...
2113
  		if (p->rt.timeout > next)
f06febc96   Frank Mayhar   timers: fix itime...
2114
  			p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
78f2c7db6   Peter Zijlstra   sched: SCHED_FIFO...
2115
2116
  	}
  }
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
2117

8f4d37ec0   Peter Zijlstra   sched: high-res p...
2118
  static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
2119
  {
454c79999   Colin Cross   sched/rt: Fix SCH...
2120
  	struct sched_rt_entity *rt_se = &p->rt;
67e2be023   Peter Zijlstra   sched: rt: accoun...
2121
  	update_curr_rt(rq);
78f2c7db6   Peter Zijlstra   sched: SCHED_FIFO...
2122
  	watchdog(rq, p);
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
2123
2124
2125
2126
2127
2128
  	/*
  	 * RR tasks need a special form of timeslice management.
  	 * FIFO tasks have no timeslices.
  	 */
  	if (p->policy != SCHED_RR)
  		return;
fa717060f   Peter Zijlstra   sched: sched_rt_e...
2129
  	if (--p->rt.time_slice)
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
2130
  		return;
ce0dbbbb3   Clark Williams   sched/rt: Add a t...
2131
  	p->rt.time_slice = sched_rr_timeslice;
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
2132

98fbc7985   Dmitry Adamushko   sched: optimize t...
2133
  	/*
e9aa39bb7   Li Bin   sched/rt: Fix tas...
2134
2135
  	 * Requeue to the end of queue if we (and all of our ancestors) are not
  	 * the only element on the queue
98fbc7985   Dmitry Adamushko   sched: optimize t...
2136
  	 */
454c79999   Colin Cross   sched/rt: Fix SCH...
2137
2138
2139
  	for_each_sched_rt_entity(rt_se) {
  		if (rt_se->run_list.prev != rt_se->run_list.next) {
  			requeue_task_rt(rq, p, 0);
8aa6f0ebf   Kirill Tkhai   sched/rt: Use res...
2140
  			resched_curr(rq);
454c79999   Colin Cross   sched/rt: Fix SCH...
2141
2142
  			return;
  		}
98fbc7985   Dmitry Adamushko   sched: optimize t...
2143
  	}
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
2144
  }
83b699ed2   Srivatsa Vaddagiri   sched: revert rec...
2145
2146
2147
  static void set_curr_task_rt(struct rq *rq)
  {
  	struct task_struct *p = rq->curr;
78becc270   Frederic Weisbecker   sched: Use an acc...
2148
  	p->se.exec_start = rq_clock_task(rq);
917b627d4   Gregory Haskins   sched: create "pu...
2149
2150
2151
  
  	/* The running task is never eligible for pushing */
  	dequeue_pushable_task(rq, p);
83b699ed2   Srivatsa Vaddagiri   sched: revert rec...
2152
  }
6d686f456   H Hartley Sweeten   sched: Don't expo...
2153
  static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
0d721cead   Peter Williams   sched: Simplify s...
2154
2155
2156
2157
2158
  {
  	/*
  	 * Time slice is 0 for SCHED_FIFO tasks
  	 */
  	if (task->policy == SCHED_RR)
ce0dbbbb3   Clark Williams   sched/rt: Add a t...
2159
  		return sched_rr_timeslice;
0d721cead   Peter Williams   sched: Simplify s...
2160
2161
2162
  	else
  		return 0;
  }
029632fbb   Peter Zijlstra   sched: Make separ...
2163
  const struct sched_class rt_sched_class = {
5522d5d5f   Ingo Molnar   sched: mark sched...
2164
  	.next			= &fair_sched_class,
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
2165
2166
2167
2168
2169
2170
2171
2172
  	.enqueue_task		= enqueue_task_rt,
  	.dequeue_task		= dequeue_task_rt,
  	.yield_task		= yield_task_rt,
  
  	.check_preempt_curr	= check_preempt_curr_rt,
  
  	.pick_next_task		= pick_next_task_rt,
  	.put_prev_task		= put_prev_task_rt,
681f3e685   Peter Williams   sched: isolate SM...
2173
  #ifdef CONFIG_SMP
4ce72a2c0   Li Zefan   sched: add CONFIG...
2174
  	.select_task_rq		= select_task_rq_rt,
6c37067e2   Peter Zijlstra   sched: Change the...
2175
  	.set_cpus_allowed       = set_cpus_allowed_common,
1f11eb6a8   Gregory Haskins   sched: fix cpupri...
2176
2177
  	.rq_online              = rq_online_rt,
  	.rq_offline             = rq_offline_rt,
efbbd05a5   Peter Zijlstra   sched: Add pre an...
2178
  	.task_woken		= task_woken_rt,
cb4698450   Steven Rostedt   sched: RT-balance...
2179
  	.switched_from		= switched_from_rt,
681f3e685   Peter Williams   sched: isolate SM...
2180
  #endif
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
2181

83b699ed2   Srivatsa Vaddagiri   sched: revert rec...
2182
  	.set_curr_task          = set_curr_task_rt,
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
2183
  	.task_tick		= task_tick_rt,
cb4698450   Steven Rostedt   sched: RT-balance...
2184

0d721cead   Peter Williams   sched: Simplify s...
2185
  	.get_rr_interval	= get_rr_interval_rt,
cb4698450   Steven Rostedt   sched: RT-balance...
2186
2187
  	.prio_changed		= prio_changed_rt,
  	.switched_to		= switched_to_rt,
6e998916d   Stanislaw Gruszka   sched/cputime: Fi...
2188
2189
  
  	.update_curr		= update_curr_rt,
bb44e5d1c   Ingo Molnar   sched: cfs core, ...
2190
  };
ada18de2e   Peter Zijlstra   sched: debug: add...
2191
2192
2193
  
  #ifdef CONFIG_SCHED_DEBUG
  extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
029632fbb   Peter Zijlstra   sched: Make separ...
2194
  void print_rt_stats(struct seq_file *m, int cpu)
ada18de2e   Peter Zijlstra   sched: debug: add...
2195
  {
ec514c487   Cheng Xu   sched: Fix rt_rq ...
2196
  	rt_rq_iter_t iter;
ada18de2e   Peter Zijlstra   sched: debug: add...
2197
2198
2199
  	struct rt_rq *rt_rq;
  
  	rcu_read_lock();
ec514c487   Cheng Xu   sched: Fix rt_rq ...
2200
  	for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
ada18de2e   Peter Zijlstra   sched: debug: add...
2201
2202
2203
  		print_rt_rq(m, cpu, rt_rq);
  	rcu_read_unlock();
  }
55e12e5e7   Dhaval Giani   sched: make sched...
2204
  #endif /* CONFIG_SCHED_DEBUG */