Blame view

kernel/sched_idletask.c 2.62 KB
fa72e9e48   Ingo Molnar   sched: cfs core, ...
1
2
3
4
5
6
  /*
   * idle-task scheduling class.
   *
   * (NOTE: these are not related to SCHED_IDLE tasks which are
   *  handled in sched_fair.c)
   */
e7693a362   Gregory Haskins   sched: de-SCHED_O...
7
  #ifdef CONFIG_SMP
0017d7350   Peter Zijlstra   sched: Fix TASK_W...
8
9
  static int
  select_task_rq_idle(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
e7693a362   Gregory Haskins   sched: de-SCHED_O...
10
11
12
13
  {
  	return task_cpu(p); /* IDLE tasks as never migrated */
  }
  #endif /* CONFIG_SMP */
fa72e9e48   Ingo Molnar   sched: cfs core, ...
14
15
16
  /*
   * Idle tasks are unconditionally rescheduled:
   */
7d4787214   Peter Zijlstra   sched: Rename syn...
17
  static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
fa72e9e48   Ingo Molnar   sched: cfs core, ...
18
19
20
  {
  	resched_task(rq->idle);
  }
fb8d47240   Ingo Molnar   sched: remove the...
21
  static struct task_struct *pick_next_task_idle(struct rq *rq)
fa72e9e48   Ingo Molnar   sched: cfs core, ...
22
23
  {
  	schedstat_inc(rq, sched_goidle);
74f5187ac   Peter Zijlstra   sched: Cure load ...
24
  	calc_load_account_idle(rq);
fa72e9e48   Ingo Molnar   sched: cfs core, ...
25
26
27
28
29
30
31
32
  	return rq->idle;
  }
  
  /*
   * It is not legal to sleep in the idle task - print a warning
   * message if some code attempts to do it:
   */
  static void
371fd7e7a   Peter Zijlstra   sched: Add enqueu...
33
  dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
fa72e9e48   Ingo Molnar   sched: cfs core, ...
34
  {
05fa785cf   Thomas Gleixner   sched: Convert rq...
35
  	raw_spin_unlock_irq(&rq->lock);
3df0fc5b2   Peter Zijlstra   sched: Restore pr...
36
37
  	printk(KERN_ERR "bad: scheduling from the idle thread!
  ");
fa72e9e48   Ingo Molnar   sched: cfs core, ...
38
  	dump_stack();
05fa785cf   Thomas Gleixner   sched: Convert rq...
39
  	raw_spin_lock_irq(&rq->lock);
fa72e9e48   Ingo Molnar   sched: cfs core, ...
40
  }
31ee529cc   Ingo Molnar   sched: remove the...
41
  static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
fa72e9e48   Ingo Molnar   sched: cfs core, ...
42
43
  {
  }
8f4d37ec0   Peter Zijlstra   sched: high-res p...
44
  static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
fa72e9e48   Ingo Molnar   sched: cfs core, ...
45
46
  {
  }
83b699ed2   Srivatsa Vaddagiri   sched: revert rec...
47
48
49
  static void set_curr_task_idle(struct rq *rq)
  {
  }
cb4698450   Steven Rostedt   sched: RT-balance...
50
51
52
53
54
55
56
  static void switched_to_idle(struct rq *rq, struct task_struct *p,
  			     int running)
  {
  	/* Can this actually happen?? */
  	if (running)
  		resched_task(rq->curr);
  	else
15afe09bf   Peter Zijlstra   sched: wakeup pre...
57
  		check_preempt_curr(rq, p, 0);
cb4698450   Steven Rostedt   sched: RT-balance...
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
  }
  
  static void prio_changed_idle(struct rq *rq, struct task_struct *p,
  			      int oldprio, int running)
  {
  	/* This can happen for hot plug CPUS */
  
  	/*
  	 * Reschedule if we are currently running on this runqueue and
  	 * our priority decreased, or if we are not currently running on
  	 * this runqueue and our priority is higher than the current's
  	 */
  	if (running) {
  		if (p->prio > oldprio)
  			resched_task(rq->curr);
  	} else
15afe09bf   Peter Zijlstra   sched: wakeup pre...
74
  		check_preempt_curr(rq, p, 0);
cb4698450   Steven Rostedt   sched: RT-balance...
75
  }
6d686f456   H Hartley Sweeten   sched: Don't expo...
76
  static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
0d721cead   Peter Williams   sched: Simplify s...
77
78
79
  {
  	return 0;
  }
fa72e9e48   Ingo Molnar   sched: cfs core, ...
80
81
82
  /*
   * Simple, special scheduling class for the per-CPU idle tasks:
   */
2abdad0a4   Harvey Harrison   sched: make rt_sc...
83
  static const struct sched_class idle_sched_class = {
5522d5d5f   Ingo Molnar   sched: mark sched...
84
  	/* .next is NULL */
fa72e9e48   Ingo Molnar   sched: cfs core, ...
85
86
87
88
89
90
91
92
93
  	/* no enqueue/yield_task for idle tasks */
  
  	/* dequeue is not valid, we print a debug message there: */
  	.dequeue_task		= dequeue_task_idle,
  
  	.check_preempt_curr	= check_preempt_curr_idle,
  
  	.pick_next_task		= pick_next_task_idle,
  	.put_prev_task		= put_prev_task_idle,
681f3e685   Peter Williams   sched: isolate SM...
94
  #ifdef CONFIG_SMP
4ce72a2c0   Li Zefan   sched: add CONFIG...
95
  	.select_task_rq		= select_task_rq_idle,
681f3e685   Peter Williams   sched: isolate SM...
96
  #endif
fa72e9e48   Ingo Molnar   sched: cfs core, ...
97

83b699ed2   Srivatsa Vaddagiri   sched: revert rec...
98
  	.set_curr_task          = set_curr_task_idle,
fa72e9e48   Ingo Molnar   sched: cfs core, ...
99
  	.task_tick		= task_tick_idle,
cb4698450   Steven Rostedt   sched: RT-balance...
100

0d721cead   Peter Williams   sched: Simplify s...
101
  	.get_rr_interval	= get_rr_interval_idle,
cb4698450   Steven Rostedt   sched: RT-balance...
102
103
  	.prio_changed		= prio_changed_idle,
  	.switched_to		= switched_to_idle,
fa72e9e48   Ingo Molnar   sched: cfs core, ...
104
105
  	/* no .task_new for idle tasks */
  };