Blame view

kernel/sched/stats.h 8.04 KB
425e0968a   Ingo Molnar   sched: move code ...
1
2
  
  #ifdef CONFIG_SCHEDSTATS
b5aadf7f1   Alexey Dobriyan   proc: move /proc/...
3

425e0968a   Ingo Molnar   sched: move code ...
4
5
6
7
8
9
10
11
  /*
   * Expects runqueue lock to be held for atomicity of update
   */
  static inline void
  rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
  {
  	if (rq) {
  		rq->rq_sched_info.run_delay += delta;
2d72376b3   Ingo Molnar   sched: clean up s...
12
  		rq->rq_sched_info.pcount++;
425e0968a   Ingo Molnar   sched: move code ...
13
14
15
16
17
18
19
20
21
22
  	}
  }
  
  /*
   * Expects runqueue lock to be held for atomicity of update
   */
  static inline void
  rq_sched_info_depart(struct rq *rq, unsigned long long delta)
  {
  	if (rq)
9c2c48020   Ken Chen   schedstat: consol...
23
  		rq->rq_cpu_time += delta;
425e0968a   Ingo Molnar   sched: move code ...
24
  }
46ac22bab   Ankita Garg   sched: fix accoun...
25
26
27
28
29
30
31
  
  static inline void
  rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
  {
  	if (rq)
  		rq->rq_sched_info.run_delay += delta;
  }
ae92882e5   Josh Poimboeuf   sched/debug: Clea...
32
33
34
35
  #define schedstat_enabled()		static_branch_unlikely(&sched_schedstats)
  #define schedstat_inc(var)		do { if (schedstat_enabled()) { var++; } } while (0)
  #define schedstat_add(var, amt)		do { if (schedstat_enabled()) { var += (amt); } } while (0)
  #define schedstat_set(var, val)		do { if (schedstat_enabled()) { var = (val); } } while (0)
20e1d4863   Josh Poimboeuf   sched/debug: Rena...
36
37
  #define schedstat_val(var)		(var)
  #define schedstat_val_or_zero(var)	((schedstat_enabled()) ? (var) : 0)
9c5725911   Josh Poimboeuf   sched/debug: Fix ...
38

425e0968a   Ingo Molnar   sched: move code ...
39
40
41
42
43
  #else /* !CONFIG_SCHEDSTATS */
  static inline void
  rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
  {}
  static inline void
46ac22bab   Ankita Garg   sched: fix accoun...
44
45
46
  rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
  {}
  static inline void
425e0968a   Ingo Molnar   sched: move code ...
47
48
  rq_sched_info_depart(struct rq *rq, unsigned long long delta)
  {}
ae92882e5   Josh Poimboeuf   sched/debug: Clea...
49
50
51
52
53
  #define schedstat_enabled()		0
  #define schedstat_inc(var)		do { } while (0)
  #define schedstat_add(var, amt)		do { } while (0)
  #define schedstat_set(var, val)		do { } while (0)
  #define schedstat_val(var)		0
20e1d4863   Josh Poimboeuf   sched/debug: Rena...
54
  #define schedstat_val_or_zero(var)	0
ae92882e5   Josh Poimboeuf   sched/debug: Clea...
55
  #endif /* CONFIG_SCHEDSTATS */
425e0968a   Ingo Molnar   sched: move code ...
56

f6db83479   Naveen N. Rao   sched/stat: Simpl...
57
  #ifdef CONFIG_SCHED_INFO
46ac22bab   Ankita Garg   sched: fix accoun...
58
59
60
61
  static inline void sched_info_reset_dequeued(struct task_struct *t)
  {
  	t->sched_info.last_queued = 0;
  }
425e0968a   Ingo Molnar   sched: move code ...
62
  /*
d4a6f3c32   Rakib Mullick   sched_stat: Updat...
63
   * We are interested in knowing how long it was from the *first* time a
46ac22bab   Ankita Garg   sched: fix accoun...
64
65
66
   * task was queued to the time that it finally hit a cpu, we call this routine
   * from dequeue_task() to account for possible rq->clock skew across cpus. The
   * delta taken on each cpu would annul the skew.
425e0968a   Ingo Molnar   sched: move code ...
67
   */
431489516   Michael S. Tsirkin   sched: Micro-opti...
68
  static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
425e0968a   Ingo Molnar   sched: move code ...
69
  {
431489516   Michael S. Tsirkin   sched: Micro-opti...
70
  	unsigned long long now = rq_clock(rq), delta = 0;
46ac22bab   Ankita Garg   sched: fix accoun...
71
72
73
74
75
76
  
  	if (unlikely(sched_info_on()))
  		if (t->sched_info.last_queued)
  			delta = now - t->sched_info.last_queued;
  	sched_info_reset_dequeued(t);
  	t->sched_info.run_delay += delta;
431489516   Michael S. Tsirkin   sched: Micro-opti...
77
  	rq_sched_info_dequeued(rq, delta);
425e0968a   Ingo Molnar   sched: move code ...
78
79
80
81
82
83
84
  }
  
  /*
   * Called when a task finally hits the cpu.  We can now calculate how
   * long it was waiting to run.  We also note when it began so that we
   * can keep stats on how long its timeslice is.
   */
431489516   Michael S. Tsirkin   sched: Micro-opti...
85
  static void sched_info_arrive(struct rq *rq, struct task_struct *t)
425e0968a   Ingo Molnar   sched: move code ...
86
  {
431489516   Michael S. Tsirkin   sched: Micro-opti...
87
  	unsigned long long now = rq_clock(rq), delta = 0;
425e0968a   Ingo Molnar   sched: move code ...
88
89
90
  
  	if (t->sched_info.last_queued)
  		delta = now - t->sched_info.last_queued;
46ac22bab   Ankita Garg   sched: fix accoun...
91
  	sched_info_reset_dequeued(t);
425e0968a   Ingo Molnar   sched: move code ...
92
93
  	t->sched_info.run_delay += delta;
  	t->sched_info.last_arrival = now;
2d72376b3   Ingo Molnar   sched: clean up s...
94
  	t->sched_info.pcount++;
425e0968a   Ingo Molnar   sched: move code ...
95

431489516   Michael S. Tsirkin   sched: Micro-opti...
96
  	rq_sched_info_arrive(rq, delta);
425e0968a   Ingo Molnar   sched: move code ...
97
98
99
  }
  
  /*
425e0968a   Ingo Molnar   sched: move code ...
100
101
102
103
   * This function is only called from enqueue_task(), but also only updates
   * the timestamp if it is already not set.  It's assumed that
   * sched_info_dequeued() will clear that stamp when appropriate.
   */
431489516   Michael S. Tsirkin   sched: Micro-opti...
104
  static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
425e0968a   Ingo Molnar   sched: move code ...
105
106
107
  {
  	if (unlikely(sched_info_on()))
  		if (!t->sched_info.last_queued)
431489516   Michael S. Tsirkin   sched: Micro-opti...
108
  			t->sched_info.last_queued = rq_clock(rq);
425e0968a   Ingo Molnar   sched: move code ...
109
110
111
  }
  
  /*
13b62e46d   Michael S. Tsirkin   sched: Fix commen...
112
113
114
   * Called when a process ceases being the active-running process involuntarily
   * due, typically, to expiring its time slice (this may also be called when
   * switching to the idle task).  Now we can calculate how long we ran.
d4abc238c   Bharath Ravi   sched, delay acco...
115
116
117
   * Also, if the process is still in the TASK_RUNNING state, call
   * sched_info_queued() to mark that it has now again started waiting on
   * the runqueue.
425e0968a   Ingo Molnar   sched: move code ...
118
   */
431489516   Michael S. Tsirkin   sched: Micro-opti...
119
  static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
425e0968a   Ingo Molnar   sched: move code ...
120
  {
431489516   Michael S. Tsirkin   sched: Micro-opti...
121
  	unsigned long long delta = rq_clock(rq) -
9a41785cc   Balbir Singh   sched: fix delay ...
122
  					t->sched_info.last_arrival;
425e0968a   Ingo Molnar   sched: move code ...
123

431489516   Michael S. Tsirkin   sched: Micro-opti...
124
  	rq_sched_info_depart(rq, delta);
d4abc238c   Bharath Ravi   sched, delay acco...
125
126
  
  	if (t->state == TASK_RUNNING)
431489516   Michael S. Tsirkin   sched: Micro-opti...
127
  		sched_info_queued(rq, t);
425e0968a   Ingo Molnar   sched: move code ...
128
129
130
131
132
133
134
135
  }
  
  /*
   * Called when tasks are switched involuntarily due, typically, to expiring
   * their time slice.  (This may also be called when switching to or from
   * the idle task.)  We are only called when prev != next.
   */
  static inline void
431489516   Michael S. Tsirkin   sched: Micro-opti...
136
137
  __sched_info_switch(struct rq *rq,
  		    struct task_struct *prev, struct task_struct *next)
425e0968a   Ingo Molnar   sched: move code ...
138
  {
425e0968a   Ingo Molnar   sched: move code ...
139
140
141
142
143
144
  	/*
  	 * prev now departs the cpu.  It's not interesting to record
  	 * stats about how efficient we were at scheduling the idle
  	 * process, however.
  	 */
  	if (prev != rq->idle)
431489516   Michael S. Tsirkin   sched: Micro-opti...
145
  		sched_info_depart(rq, prev);
425e0968a   Ingo Molnar   sched: move code ...
146
147
  
  	if (next != rq->idle)
431489516   Michael S. Tsirkin   sched: Micro-opti...
148
  		sched_info_arrive(rq, next);
425e0968a   Ingo Molnar   sched: move code ...
149
150
  }
  static inline void
431489516   Michael S. Tsirkin   sched: Micro-opti...
151
152
  sched_info_switch(struct rq *rq,
  		  struct task_struct *prev, struct task_struct *next)
425e0968a   Ingo Molnar   sched: move code ...
153
154
  {
  	if (unlikely(sched_info_on()))
431489516   Michael S. Tsirkin   sched: Micro-opti...
155
  		__sched_info_switch(rq, prev, next);
425e0968a   Ingo Molnar   sched: move code ...
156
157
  }
  #else
431489516   Michael S. Tsirkin   sched: Micro-opti...
158
  #define sched_info_queued(rq, t)		do { } while (0)
46ac22bab   Ankita Garg   sched: fix accoun...
159
  #define sched_info_reset_dequeued(t)	do { } while (0)
431489516   Michael S. Tsirkin   sched: Micro-opti...
160
161
162
163
  #define sched_info_dequeued(rq, t)		do { } while (0)
  #define sched_info_depart(rq, t)		do { } while (0)
  #define sched_info_arrive(rq, next)		do { } while (0)
  #define sched_info_switch(rq, t, next)		do { } while (0)
f6db83479   Naveen N. Rao   sched/stat: Simpl...
164
  #endif /* CONFIG_SCHED_INFO */
425e0968a   Ingo Molnar   sched: move code ...
165

bb34d92f6   Frank Mayhar   timers: fix itime...
166
167
168
169
170
  /*
   * The following are functions that support scheduler-internal time accounting.
   * These functions are generally called at the timer tick.  None of this depends
   * on CONFIG_SCHEDSTATS.
   */
bb34d92f6   Frank Mayhar   timers: fix itime...
171
  /**
fa18f7bde   KOSAKI Motohiro   posix-cpu-timers:...
172
173
174
175
176
177
178
179
   * cputimer_running - return true if cputimer is running
   *
   * @tsk:	Pointer to target task.
   */
  static inline bool cputimer_running(struct task_struct *tsk)
  
  {
  	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
1018016c7   Jason Low   sched, timer: Rep...
180
181
  	/* Check if cputimer isn't running. This is accessed without locking. */
  	if (!READ_ONCE(cputimer->running))
fa18f7bde   KOSAKI Motohiro   posix-cpu-timers:...
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
  		return false;
  
  	/*
  	 * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime
  	 * in __exit_signal(), we won't account to the signal struct further
  	 * cputime consumed by that task, even though the task can still be
  	 * ticking after __exit_signal().
  	 *
  	 * In order to keep a consistent behaviour between thread group cputime
  	 * and thread group cputimer accounting, lets also ignore the cputime
  	 * elapsing after __exit_signal() in any thread group timer running.
  	 *
  	 * This makes sure that POSIX CPU clocks and timers are synchronized, so
  	 * that a POSIX CPU timer won't expire while the corresponding POSIX CPU
  	 * clock delta is behind the expiring timer value.
  	 */
  	if (unlikely(!tsk->sighand))
  		return false;
  
  	return true;
  }
  
  /**
7086efe1c   Frank Mayhar   timers: fix itime...
205
   * account_group_user_time - Maintain utime for a thread group.
bb34d92f6   Frank Mayhar   timers: fix itime...
206
   *
7086efe1c   Frank Mayhar   timers: fix itime...
207
208
209
   * @tsk:	Pointer to task structure.
   * @cputime:	Time value by which to increment the utime field of the
   *		thread_group_cputime structure.
bb34d92f6   Frank Mayhar   timers: fix itime...
210
211
212
213
   *
   * If thread group time is being maintained, get the structure for the
   * running CPU and update the utime field there.
   */
7086efe1c   Frank Mayhar   timers: fix itime...
214
215
  static inline void account_group_user_time(struct task_struct *tsk,
  					   cputime_t cputime)
bb34d92f6   Frank Mayhar   timers: fix itime...
216
  {
48286d508   Oleg Nesterov   sched: Remove the...
217
  	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
bb34d92f6   Frank Mayhar   timers: fix itime...
218

fa18f7bde   KOSAKI Motohiro   posix-cpu-timers:...
219
  	if (!cputimer_running(tsk))
4cd4c1b40   Peter Zijlstra   timers: split pro...
220
  		return;
711074451   Jason Low   sched, timer: Use...
221
  	atomic64_add(cputime, &cputimer->cputime_atomic.utime);
bb34d92f6   Frank Mayhar   timers: fix itime...
222
223
224
  }
  
  /**
7086efe1c   Frank Mayhar   timers: fix itime...
225
   * account_group_system_time - Maintain stime for a thread group.
bb34d92f6   Frank Mayhar   timers: fix itime...
226
   *
7086efe1c   Frank Mayhar   timers: fix itime...
227
228
229
   * @tsk:	Pointer to task structure.
   * @cputime:	Time value by which to increment the stime field of the
   *		thread_group_cputime structure.
bb34d92f6   Frank Mayhar   timers: fix itime...
230
231
232
233
   *
   * If thread group time is being maintained, get the structure for the
   * running CPU and update the stime field there.
   */
7086efe1c   Frank Mayhar   timers: fix itime...
234
235
  static inline void account_group_system_time(struct task_struct *tsk,
  					     cputime_t cputime)
bb34d92f6   Frank Mayhar   timers: fix itime...
236
  {
48286d508   Oleg Nesterov   sched: Remove the...
237
  	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
4cd4c1b40   Peter Zijlstra   timers: split pro...
238

fa18f7bde   KOSAKI Motohiro   posix-cpu-timers:...
239
  	if (!cputimer_running(tsk))
4cd4c1b40   Peter Zijlstra   timers: split pro...
240
  		return;
bb34d92f6   Frank Mayhar   timers: fix itime...
241

711074451   Jason Low   sched, timer: Use...
242
  	atomic64_add(cputime, &cputimer->cputime_atomic.stime);
bb34d92f6   Frank Mayhar   timers: fix itime...
243
244
245
  }
  
  /**
7086efe1c   Frank Mayhar   timers: fix itime...
246
   * account_group_exec_runtime - Maintain exec runtime for a thread group.
bb34d92f6   Frank Mayhar   timers: fix itime...
247
   *
7086efe1c   Frank Mayhar   timers: fix itime...
248
   * @tsk:	Pointer to task structure.
bb34d92f6   Frank Mayhar   timers: fix itime...
249
   * @ns:		Time value by which to increment the sum_exec_runtime field
7086efe1c   Frank Mayhar   timers: fix itime...
250
   *		of the thread_group_cputime structure.
bb34d92f6   Frank Mayhar   timers: fix itime...
251
252
253
254
   *
   * If thread group time is being maintained, get the structure for the
   * running CPU and update the sum_exec_runtime field there.
   */
7086efe1c   Frank Mayhar   timers: fix itime...
255
256
  static inline void account_group_exec_runtime(struct task_struct *tsk,
  					      unsigned long long ns)
bb34d92f6   Frank Mayhar   timers: fix itime...
257
  {
48286d508   Oleg Nesterov   sched: Remove the...
258
  	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
4cd4c1b40   Peter Zijlstra   timers: split pro...
259

fa18f7bde   KOSAKI Motohiro   posix-cpu-timers:...
260
  	if (!cputimer_running(tsk))
4cd4c1b40   Peter Zijlstra   timers: split pro...
261
  		return;
bb34d92f6   Frank Mayhar   timers: fix itime...
262

711074451   Jason Low   sched, timer: Use...
263
  	atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime);
bb34d92f6   Frank Mayhar   timers: fix itime...
264
  }