Blame view

kernel/sched/stats.h 6.59 KB
425e0968a   Ingo Molnar   sched: move code ...
1
2
  
  #ifdef CONFIG_SCHEDSTATS
b5aadf7f1   Alexey Dobriyan   proc: move /proc/...
3

425e0968a   Ingo Molnar   sched: move code ...
4
5
6
7
8
9
10
11
  /*
   * Expects runqueue lock to be held for atomicity of update
   */
  static inline void
  rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
  {
  	if (rq) {
  		rq->rq_sched_info.run_delay += delta;
2d72376b3   Ingo Molnar   sched: clean up s...
12
  		rq->rq_sched_info.pcount++;
425e0968a   Ingo Molnar   sched: move code ...
13
14
15
16
17
18
19
20
21
22
  	}
  }
  
  /*
   * Expects runqueue lock to be held for atomicity of update
   */
  static inline void
  rq_sched_info_depart(struct rq *rq, unsigned long long delta)
  {
  	if (rq)
9c2c48020   Ken Chen   schedstat: consol...
23
  		rq->rq_cpu_time += delta;
425e0968a   Ingo Molnar   sched: move code ...
24
  }
46ac22bab   Ankita Garg   sched: fix accoun...
25
26
27
28
29
30
31
  
  static inline void
  rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
  {
  	if (rq)
  		rq->rq_sched_info.run_delay += delta;
  }
425e0968a   Ingo Molnar   sched: move code ...
32
33
  # define schedstat_inc(rq, field)	do { (rq)->field++; } while (0)
  # define schedstat_add(rq, field, amt)	do { (rq)->field += (amt); } while (0)
c3c701196   Ingo Molnar   [PATCH] sched: ad...
34
  # define schedstat_set(var, val)	do { var = (val); } while (0)
425e0968a   Ingo Molnar   sched: move code ...
35
36
37
38
39
  #else /* !CONFIG_SCHEDSTATS */
  static inline void
  rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
  {}
  static inline void
46ac22bab   Ankita Garg   sched: fix accoun...
40
41
42
  rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
  {}
  static inline void
425e0968a   Ingo Molnar   sched: move code ...
43
44
45
46
  rq_sched_info_depart(struct rq *rq, unsigned long long delta)
  {}
  # define schedstat_inc(rq, field)	do { } while (0)
  # define schedstat_add(rq, field, amt)	do { } while (0)
c3c701196   Ingo Molnar   [PATCH] sched: ad...
47
  # define schedstat_set(var, val)	do { } while (0)
425e0968a   Ingo Molnar   sched: move code ...
48
  #endif
9a41785cc   Balbir Singh   sched: fix delay ...
49
  #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
46ac22bab   Ankita Garg   sched: fix accoun...
50
51
52
53
  static inline void sched_info_reset_dequeued(struct task_struct *t)
  {
  	t->sched_info.last_queued = 0;
  }
425e0968a   Ingo Molnar   sched: move code ...
54
  /*
d4a6f3c32   Rakib Mullick   sched_stat: Updat...
55
   * We are interested in knowing how long it was from the *first* time a
46ac22bab   Ankita Garg   sched: fix accoun...
56
57
58
   * task was queued to the time that it finally hit a cpu, we call this routine
   * from dequeue_task() to account for possible rq->clock skew across cpus. The
   * delta taken on each cpu would annul the skew.
425e0968a   Ingo Molnar   sched: move code ...
59
60
61
   */
  static inline void sched_info_dequeued(struct task_struct *t)
  {
46ac22bab   Ankita Garg   sched: fix accoun...
62
63
64
65
66
67
68
69
70
  	unsigned long long now = task_rq(t)->clock, delta = 0;
  
  	if (unlikely(sched_info_on()))
  		if (t->sched_info.last_queued)
  			delta = now - t->sched_info.last_queued;
  	sched_info_reset_dequeued(t);
  	t->sched_info.run_delay += delta;
  
  	rq_sched_info_dequeued(task_rq(t), delta);
425e0968a   Ingo Molnar   sched: move code ...
71
72
73
74
75
76
77
78
79
  }
  
  /*
   * Called when a task finally hits the cpu.  We can now calculate how
   * long it was waiting to run.  We also note when it began so that we
   * can keep stats on how long its timeslice is.
   */
  static void sched_info_arrive(struct task_struct *t)
  {
9a41785cc   Balbir Singh   sched: fix delay ...
80
  	unsigned long long now = task_rq(t)->clock, delta = 0;
425e0968a   Ingo Molnar   sched: move code ...
81
82
83
  
  	if (t->sched_info.last_queued)
  		delta = now - t->sched_info.last_queued;
46ac22bab   Ankita Garg   sched: fix accoun...
84
  	sched_info_reset_dequeued(t);
425e0968a   Ingo Molnar   sched: move code ...
85
86
  	t->sched_info.run_delay += delta;
  	t->sched_info.last_arrival = now;
2d72376b3   Ingo Molnar   sched: clean up s...
87
  	t->sched_info.pcount++;
425e0968a   Ingo Molnar   sched: move code ...
88
89
90
91
92
  
  	rq_sched_info_arrive(task_rq(t), delta);
  }
  
  /*
425e0968a   Ingo Molnar   sched: move code ...
93
94
95
96
97
98
99
100
   * This function is only called from enqueue_task(), but also only updates
   * the timestamp if it is already not set.  It's assumed that
   * sched_info_dequeued() will clear that stamp when appropriate.
   */
  static inline void sched_info_queued(struct task_struct *t)
  {
  	if (unlikely(sched_info_on()))
  		if (!t->sched_info.last_queued)
9a41785cc   Balbir Singh   sched: fix delay ...
101
  			t->sched_info.last_queued = task_rq(t)->clock;
425e0968a   Ingo Molnar   sched: move code ...
102
103
104
105
106
  }
  
  /*
   * Called when a process ceases being the active-running process, either
   * voluntarily or involuntarily.  Now we can calculate how long we ran.
d4abc238c   Bharath Ravi   sched, delay acco...
107
108
109
   * Also, if the process is still in the TASK_RUNNING state, call
   * sched_info_queued() to mark that it has now again started waiting on
   * the runqueue.
425e0968a   Ingo Molnar   sched: move code ...
110
111
112
   */
  static inline void sched_info_depart(struct task_struct *t)
  {
9a41785cc   Balbir Singh   sched: fix delay ...
113
114
  	unsigned long long delta = task_rq(t)->clock -
  					t->sched_info.last_arrival;
425e0968a   Ingo Molnar   sched: move code ...
115

425e0968a   Ingo Molnar   sched: move code ...
116
  	rq_sched_info_depart(task_rq(t), delta);
d4abc238c   Bharath Ravi   sched, delay acco...
117
118
119
  
  	if (t->state == TASK_RUNNING)
  		sched_info_queued(t);
425e0968a   Ingo Molnar   sched: move code ...
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
  }
  
  /*
   * Called when tasks are switched involuntarily due, typically, to expiring
   * their time slice.  (This may also be called when switching to or from
   * the idle task.)  We are only called when prev != next.
   */
  static inline void
  __sched_info_switch(struct task_struct *prev, struct task_struct *next)
  {
  	struct rq *rq = task_rq(prev);
  
  	/*
  	 * prev now departs the cpu.  It's not interesting to record
  	 * stats about how efficient we were at scheduling the idle
  	 * process, however.
  	 */
  	if (prev != rq->idle)
  		sched_info_depart(prev);
  
  	if (next != rq->idle)
  		sched_info_arrive(next);
  }
  static inline void
  sched_info_switch(struct task_struct *prev, struct task_struct *next)
  {
  	if (unlikely(sched_info_on()))
  		__sched_info_switch(prev, next);
  }
  #else
46ac22bab   Ankita Garg   sched: fix accoun...
150
151
152
153
  #define sched_info_queued(t)			do { } while (0)
  #define sched_info_reset_dequeued(t)	do { } while (0)
  #define sched_info_dequeued(t)			do { } while (0)
  #define sched_info_switch(t, next)		do { } while (0)
9a41785cc   Balbir Singh   sched: fix delay ...
154
  #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
425e0968a   Ingo Molnar   sched: move code ...
155

bb34d92f6   Frank Mayhar   timers: fix itime...
156
157
158
159
160
  /*
   * The following are functions that support scheduler-internal time accounting.
   * These functions are generally called at the timer tick.  None of this depends
   * on CONFIG_SCHEDSTATS.
   */
bb34d92f6   Frank Mayhar   timers: fix itime...
161
  /**
7086efe1c   Frank Mayhar   timers: fix itime...
162
   * account_group_user_time - Maintain utime for a thread group.
bb34d92f6   Frank Mayhar   timers: fix itime...
163
   *
7086efe1c   Frank Mayhar   timers: fix itime...
164
165
166
   * @tsk:	Pointer to task structure.
   * @cputime:	Time value by which to increment the utime field of the
   *		thread_group_cputime structure.
bb34d92f6   Frank Mayhar   timers: fix itime...
167
168
169
170
   *
   * If thread group time is being maintained, get the structure for the
   * running CPU and update the utime field there.
   */
7086efe1c   Frank Mayhar   timers: fix itime...
171
172
  static inline void account_group_user_time(struct task_struct *tsk,
  					   cputime_t cputime)
bb34d92f6   Frank Mayhar   timers: fix itime...
173
  {
48286d508   Oleg Nesterov   sched: Remove the...
174
  	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
bb34d92f6   Frank Mayhar   timers: fix itime...
175

4cd4c1b40   Peter Zijlstra   timers: split pro...
176
177
  	if (!cputimer->running)
  		return;
ee30a7b2f   Thomas Gleixner   locking, sched: A...
178
  	raw_spin_lock(&cputimer->lock);
648616343   Martin Schwidefsky   [S390] cputime: a...
179
  	cputimer->cputime.utime += cputime;
ee30a7b2f   Thomas Gleixner   locking, sched: A...
180
  	raw_spin_unlock(&cputimer->lock);
bb34d92f6   Frank Mayhar   timers: fix itime...
181
182
183
  }
  
  /**
7086efe1c   Frank Mayhar   timers: fix itime...
184
   * account_group_system_time - Maintain stime for a thread group.
bb34d92f6   Frank Mayhar   timers: fix itime...
185
   *
7086efe1c   Frank Mayhar   timers: fix itime...
186
187
188
   * @tsk:	Pointer to task structure.
   * @cputime:	Time value by which to increment the stime field of the
   *		thread_group_cputime structure.
bb34d92f6   Frank Mayhar   timers: fix itime...
189
190
191
192
   *
   * If thread group time is being maintained, get the structure for the
   * running CPU and update the stime field there.
   */
7086efe1c   Frank Mayhar   timers: fix itime...
193
194
  static inline void account_group_system_time(struct task_struct *tsk,
  					     cputime_t cputime)
bb34d92f6   Frank Mayhar   timers: fix itime...
195
  {
48286d508   Oleg Nesterov   sched: Remove the...
196
  	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
4cd4c1b40   Peter Zijlstra   timers: split pro...
197
198
199
  
  	if (!cputimer->running)
  		return;
bb34d92f6   Frank Mayhar   timers: fix itime...
200

ee30a7b2f   Thomas Gleixner   locking, sched: A...
201
  	raw_spin_lock(&cputimer->lock);
648616343   Martin Schwidefsky   [S390] cputime: a...
202
  	cputimer->cputime.stime += cputime;
ee30a7b2f   Thomas Gleixner   locking, sched: A...
203
  	raw_spin_unlock(&cputimer->lock);
bb34d92f6   Frank Mayhar   timers: fix itime...
204
205
206
  }
  
  /**
7086efe1c   Frank Mayhar   timers: fix itime...
207
   * account_group_exec_runtime - Maintain exec runtime for a thread group.
bb34d92f6   Frank Mayhar   timers: fix itime...
208
   *
7086efe1c   Frank Mayhar   timers: fix itime...
209
   * @tsk:	Pointer to task structure.
bb34d92f6   Frank Mayhar   timers: fix itime...
210
   * @ns:		Time value by which to increment the sum_exec_runtime field
7086efe1c   Frank Mayhar   timers: fix itime...
211
   *		of the thread_group_cputime structure.
bb34d92f6   Frank Mayhar   timers: fix itime...
212
213
214
215
   *
   * If thread group time is being maintained, get the structure for the
   * running CPU and update the sum_exec_runtime field there.
   */
7086efe1c   Frank Mayhar   timers: fix itime...
216
217
  static inline void account_group_exec_runtime(struct task_struct *tsk,
  					      unsigned long long ns)
bb34d92f6   Frank Mayhar   timers: fix itime...
218
  {
48286d508   Oleg Nesterov   sched: Remove the...
219
  	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
4cd4c1b40   Peter Zijlstra   timers: split pro...
220
221
222
  
  	if (!cputimer->running)
  		return;
bb34d92f6   Frank Mayhar   timers: fix itime...
223

ee30a7b2f   Thomas Gleixner   locking, sched: A...
224
  	raw_spin_lock(&cputimer->lock);
4cd4c1b40   Peter Zijlstra   timers: split pro...
225
  	cputimer->cputime.sum_exec_runtime += ns;
ee30a7b2f   Thomas Gleixner   locking, sched: A...
226
  	raw_spin_unlock(&cputimer->lock);
bb34d92f6   Frank Mayhar   timers: fix itime...
227
  }