Blame view

kernel/sched_idletask.c 2.99 KB
fa72e9e48   Ingo Molnar   sched: cfs core, ...
1
2
3
4
5
6
  /*
   * idle-task scheduling class.
   *
   * (NOTE: these are not related to SCHED_IDLE tasks which are
   *  handled in sched_fair.c)
   */
e7693a362   Gregory Haskins   sched: de-SCHED_O...
7
8
9
10
11
12
  #ifdef CONFIG_SMP
  static int select_task_rq_idle(struct task_struct *p, int sync)
  {
  	return task_cpu(p); /* IDLE tasks as never migrated */
  }
  #endif /* CONFIG_SMP */
fa72e9e48   Ingo Molnar   sched: cfs core, ...
13
14
15
  /*
   * Idle tasks are unconditionally rescheduled:
   */
15afe09bf   Peter Zijlstra   sched: wakeup pre...
16
  static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int sync)
fa72e9e48   Ingo Molnar   sched: cfs core, ...
17
18
19
  {
  	resched_task(rq->idle);
  }
fb8d47240   Ingo Molnar   sched: remove the...
20
  static struct task_struct *pick_next_task_idle(struct rq *rq)
fa72e9e48   Ingo Molnar   sched: cfs core, ...
21
22
  {
  	schedstat_inc(rq, sched_goidle);
dce48a84a   Thomas Gleixner   sched, timers: mo...
23
24
  	/* adjust the active tasks as we might go into a long sleep */
  	calc_load_account_active(rq);
fa72e9e48   Ingo Molnar   sched: cfs core, ...
25
26
27
28
29
30
31
32
  	return rq->idle;
  }
  
  /*
   * It is not legal to sleep in the idle task - print a warning
   * message if some code attempts to do it:
   */
  static void
f02231e51   Ingo Molnar   sched: remove the...
33
  dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep)
fa72e9e48   Ingo Molnar   sched: cfs core, ...
34
35
36
37
38
39
40
  {
  	spin_unlock_irq(&rq->lock);
  	printk(KERN_ERR "bad: scheduling from the idle thread!
  ");
  	dump_stack();
  	spin_lock_irq(&rq->lock);
  }
31ee529cc   Ingo Molnar   sched: remove the...
41
  static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
fa72e9e48   Ingo Molnar   sched: cfs core, ...
42
43
  {
  }
681f3e685   Peter Williams   sched: isolate SM...
44
  #ifdef CONFIG_SMP
430106592   Peter Williams   sched: simplify m...
45
  static unsigned long
fa72e9e48   Ingo Molnar   sched: cfs core, ...
46
  load_balance_idle(struct rq *this_rq, int this_cpu, struct rq *busiest,
e1d1484f7   Peter Williams   sched: reduce bal...
47
48
49
50
51
52
53
54
55
56
  		  unsigned long max_load_move,
  		  struct sched_domain *sd, enum cpu_idle_type idle,
  		  int *all_pinned, int *this_best_prio)
  {
  	return 0;
  }
  
  static int
  move_one_task_idle(struct rq *this_rq, int this_cpu, struct rq *busiest,
  		   struct sched_domain *sd, enum cpu_idle_type idle)
fa72e9e48   Ingo Molnar   sched: cfs core, ...
57
58
59
  {
  	return 0;
  }
681f3e685   Peter Williams   sched: isolate SM...
60
  #endif
fa72e9e48   Ingo Molnar   sched: cfs core, ...
61

8f4d37ec0   Peter Zijlstra   sched: high-res p...
62
  static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
fa72e9e48   Ingo Molnar   sched: cfs core, ...
63
64
  {
  }
83b699ed2   Srivatsa Vaddagiri   sched: revert rec...
65
66
67
  static void set_curr_task_idle(struct rq *rq)
  {
  }
cb4698450   Steven Rostedt   sched: RT-balance...
68
69
70
71
72
73
74
  static void switched_to_idle(struct rq *rq, struct task_struct *p,
  			     int running)
  {
  	/* Can this actually happen?? */
  	if (running)
  		resched_task(rq->curr);
  	else
15afe09bf   Peter Zijlstra   sched: wakeup pre...
75
  		check_preempt_curr(rq, p, 0);
cb4698450   Steven Rostedt   sched: RT-balance...
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
  }
  
  static void prio_changed_idle(struct rq *rq, struct task_struct *p,
  			      int oldprio, int running)
  {
  	/* This can happen for hot plug CPUS */
  
  	/*
  	 * Reschedule if we are currently running on this runqueue and
  	 * our priority decreased, or if we are not currently running on
  	 * this runqueue and our priority is higher than the current's
  	 */
  	if (running) {
  		if (p->prio > oldprio)
  			resched_task(rq->curr);
  	} else
15afe09bf   Peter Zijlstra   sched: wakeup pre...
92
  		check_preempt_curr(rq, p, 0);
cb4698450   Steven Rostedt   sched: RT-balance...
93
  }
fa72e9e48   Ingo Molnar   sched: cfs core, ...
94
95
96
  /*
   * Simple, special scheduling class for the per-CPU idle tasks:
   */
2abdad0a4   Harvey Harrison   sched: make rt_sc...
97
  static const struct sched_class idle_sched_class = {
5522d5d5f   Ingo Molnar   sched: mark sched...
98
  	/* .next is NULL */
fa72e9e48   Ingo Molnar   sched: cfs core, ...
99
100
101
102
103
104
105
106
107
  	/* no enqueue/yield_task for idle tasks */
  
  	/* dequeue is not valid, we print a debug message there: */
  	.dequeue_task		= dequeue_task_idle,
  
  	.check_preempt_curr	= check_preempt_curr_idle,
  
  	.pick_next_task		= pick_next_task_idle,
  	.put_prev_task		= put_prev_task_idle,
681f3e685   Peter Williams   sched: isolate SM...
108
  #ifdef CONFIG_SMP
4ce72a2c0   Li Zefan   sched: add CONFIG...
109
  	.select_task_rq		= select_task_rq_idle,
fa72e9e48   Ingo Molnar   sched: cfs core, ...
110
  	.load_balance		= load_balance_idle,
e1d1484f7   Peter Williams   sched: reduce bal...
111
  	.move_one_task		= move_one_task_idle,
681f3e685   Peter Williams   sched: isolate SM...
112
  #endif
fa72e9e48   Ingo Molnar   sched: cfs core, ...
113

83b699ed2   Srivatsa Vaddagiri   sched: revert rec...
114
  	.set_curr_task          = set_curr_task_idle,
fa72e9e48   Ingo Molnar   sched: cfs core, ...
115
  	.task_tick		= task_tick_idle,
cb4698450   Steven Rostedt   sched: RT-balance...
116
117
118
  
  	.prio_changed		= prio_changed_idle,
  	.switched_to		= switched_to_idle,
fa72e9e48   Ingo Molnar   sched: cfs core, ...
119
120
  	/* no .task_new for idle tasks */
  };