Blame view

kernel/sched/cputime.c 22.4 KB
73fbec604   Frederic Weisbecker   sched: Move cputi...
1
2
3
4
5
  #include <linux/export.h>
  #include <linux/sched.h>
  #include <linux/tsacct_kern.h>
  #include <linux/kernel_stat.h>
  #include <linux/static_key.h>
abf917cd9   Frederic Weisbecker   cputime: Generic ...
6
  #include <linux/context_tracking.h>
32ef5517c   Ingo Molnar   sched/headers: Pr...
7
  #include <linux/sched/cputime.h>
73fbec604   Frederic Weisbecker   sched: Move cputi...
8
  #include "sched.h"
73fbec604   Frederic Weisbecker   sched: Move cputi...
9
10
11
12
13
  
  #ifdef CONFIG_IRQ_TIME_ACCOUNTING
  
  /*
   * There are no locks covering percpu hardirq/softirq time.
bf9fae9f5   Frederic Weisbecker   cputime: Use a pr...
14
   * They are only modified in vtime_account, on corresponding CPU
73fbec604   Frederic Weisbecker   sched: Move cputi...
15
16
17
   * with interrupts disabled. So, writes are safe.
   * They are read and saved off onto struct rq in update_rq_clock().
   * This may result in other CPU reading this CPU's irq time and can
bf9fae9f5   Frederic Weisbecker   cputime: Use a pr...
18
   * race with irq/vtime_account on this CPU. We would either get old
73fbec604   Frederic Weisbecker   sched: Move cputi...
19
20
21
22
   * or new value with a side effect of accounting a slice of irq time to wrong
   * task when irq is in progress while we read rq->clock. That is a worthy
   * compromise in place of having locks on each irq in account_system_time.
   */
19d23dbfe   Frederic Weisbecker   sched/irqtime: Co...
23
  DEFINE_PER_CPU(struct irqtime, cpu_irqtime);
73fbec604   Frederic Weisbecker   sched: Move cputi...
24

73fbec604   Frederic Weisbecker   sched: Move cputi...
25
26
27
28
29
30
31
32
33
34
35
  static int sched_clock_irqtime;
  
  void enable_sched_clock_irqtime(void)
  {
  	sched_clock_irqtime = 1;
  }
  
  void disable_sched_clock_irqtime(void)
  {
  	sched_clock_irqtime = 0;
  }
25e2d8c1b   Frederic Weisbecker   sched/cputime: Fi...
36
37
38
39
40
41
42
43
44
45
46
  static void irqtime_account_delta(struct irqtime *irqtime, u64 delta,
  				  enum cpu_usage_stat idx)
  {
  	u64 *cpustat = kcpustat_this_cpu->cpustat;
  
  	u64_stats_update_begin(&irqtime->sync);
  	cpustat[idx] += delta;
  	irqtime->total += delta;
  	irqtime->tick_delta += delta;
  	u64_stats_update_end(&irqtime->sync);
  }
73fbec604   Frederic Weisbecker   sched: Move cputi...
47
48
49
50
  /*
   * Called before incrementing preempt_count on {soft,}irq_enter
   * and before decrementing preempt_count on {soft,}irq_exit.
   */
3e1df4f50   Frederic Weisbecker   cputime: Separate...
51
  void irqtime_account_irq(struct task_struct *curr)
73fbec604   Frederic Weisbecker   sched: Move cputi...
52
  {
19d23dbfe   Frederic Weisbecker   sched/irqtime: Co...
53
  	struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
73fbec604   Frederic Weisbecker   sched: Move cputi...
54
55
56
57
58
  	s64 delta;
  	int cpu;
  
  	if (!sched_clock_irqtime)
  		return;
73fbec604   Frederic Weisbecker   sched: Move cputi...
59
  	cpu = smp_processor_id();
19d23dbfe   Frederic Weisbecker   sched/irqtime: Co...
60
61
  	delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
  	irqtime->irq_start_time += delta;
73fbec604   Frederic Weisbecker   sched: Move cputi...
62

73fbec604   Frederic Weisbecker   sched: Move cputi...
63
64
65
66
67
68
  	/*
  	 * We do not account for softirq time from ksoftirqd here.
  	 * We want to continue accounting softirq time to ksoftirqd thread
  	 * in that case, so as not to confuse scheduler with a special task
  	 * that do not consume any time, but still wants to run.
  	 */
25e2d8c1b   Frederic Weisbecker   sched/cputime: Fi...
69
70
71
72
  	if (hardirq_count())
  		irqtime_account_delta(irqtime, delta, CPUTIME_IRQ);
  	else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
  		irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ);
73fbec604   Frederic Weisbecker   sched: Move cputi...
73
  }
3e1df4f50   Frederic Weisbecker   cputime: Separate...
74
  EXPORT_SYMBOL_GPL(irqtime_account_irq);
73fbec604   Frederic Weisbecker   sched: Move cputi...
75

2b1f967d8   Frederic Weisbecker   sched/cputime: Co...
76
  static u64 irqtime_tick_accounted(u64 maxtime)
73fbec604   Frederic Weisbecker   sched: Move cputi...
77
  {
a499a5a14   Frederic Weisbecker   sched/cputime: In...
78
  	struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
2b1f967d8   Frederic Weisbecker   sched/cputime: Co...
79
  	u64 delta;
73fbec604   Frederic Weisbecker   sched: Move cputi...
80

2b1f967d8   Frederic Weisbecker   sched/cputime: Co...
81
82
  	delta = min(irqtime->tick_delta, maxtime);
  	irqtime->tick_delta -= delta;
2810f611f   Frederic Weisbecker   sched/irqtime: Re...
83

a499a5a14   Frederic Weisbecker   sched/cputime: In...
84
  	return delta;
73fbec604   Frederic Weisbecker   sched: Move cputi...
85
86
87
88
89
  }
  
  #else /* CONFIG_IRQ_TIME_ACCOUNTING */
  
  #define sched_clock_irqtime	(0)
2b1f967d8   Frederic Weisbecker   sched/cputime: Co...
90
  static u64 irqtime_tick_accounted(u64 dummy)
574302183   Rik van Riel   sched/cputime: Co...
91
92
93
  {
  	return 0;
  }
73fbec604   Frederic Weisbecker   sched: Move cputi...
94
95
96
97
98
  #endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
  
  static inline void task_group_account_field(struct task_struct *p, int index,
  					    u64 tmp)
  {
73fbec604   Frederic Weisbecker   sched: Move cputi...
99
100
101
102
103
104
  	/*
  	 * Since all updates are sure to touch the root cgroup, we
  	 * get ourselves ahead and touch it first. If the root cgroup
  	 * is the only cgroup, then nothing else should be necessary.
  	 *
  	 */
a4f61cc03   Christoph Lameter   sched/cputime: Us...
105
  	__this_cpu_add(kernel_cpustat.cpustat[index], tmp);
73fbec604   Frederic Weisbecker   sched: Move cputi...
106

1966aaf7d   Li Zefan   sched/cpuacct: Ad...
107
  	cpuacct_account_field(p, index, tmp);
73fbec604   Frederic Weisbecker   sched: Move cputi...
108
109
110
111
112
113
  }
  
  /*
   * Account user cpu time to a process.
   * @p: the process that the cpu time gets accounted to
   * @cputime: the cpu time spent in user space since the last update
73fbec604   Frederic Weisbecker   sched: Move cputi...
114
   */
23244a5c8   Frederic Weisbecker   sched/cputime: Pu...
115
  void account_user_time(struct task_struct *p, u64 cputime)
73fbec604   Frederic Weisbecker   sched: Move cputi...
116
117
118
119
  {
  	int index;
  
  	/* Add user time to process. */
23244a5c8   Frederic Weisbecker   sched/cputime: Pu...
120
121
  	p->utime += cputime;
  	account_group_user_time(p, cputime);
73fbec604   Frederic Weisbecker   sched: Move cputi...
122

d0ea02680   Dongsheng Yang   sched: Implement ...
123
  	index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
73fbec604   Frederic Weisbecker   sched: Move cputi...
124
125
  
  	/* Add user time to cpustat. */
23244a5c8   Frederic Weisbecker   sched/cputime: Pu...
126
  	task_group_account_field(p, index, cputime);
73fbec604   Frederic Weisbecker   sched: Move cputi...
127
128
  
  	/* Account for user time used */
6fac4829c   Frederic Weisbecker   cputime: Use acce...
129
  	acct_account_cputime(p);
73fbec604   Frederic Weisbecker   sched: Move cputi...
130
131
132
133
134
135
  }
  
  /*
   * Account guest cpu time to a process.
   * @p: the process that the cpu time gets accounted to
   * @cputime: the cpu time spent in virtual machine since the last update
73fbec604   Frederic Weisbecker   sched: Move cputi...
136
   */
fb8b049c9   Frederic Weisbecker   sched/cputime: Pu...
137
  void account_guest_time(struct task_struct *p, u64 cputime)
73fbec604   Frederic Weisbecker   sched: Move cputi...
138
139
140
141
  {
  	u64 *cpustat = kcpustat_this_cpu->cpustat;
  
  	/* Add guest time to process. */
fb8b049c9   Frederic Weisbecker   sched/cputime: Pu...
142
143
144
  	p->utime += cputime;
  	account_group_user_time(p, cputime);
  	p->gtime += cputime;
73fbec604   Frederic Weisbecker   sched: Move cputi...
145
146
  
  	/* Add guest time to cpustat. */
d0ea02680   Dongsheng Yang   sched: Implement ...
147
  	if (task_nice(p) > 0) {
fb8b049c9   Frederic Weisbecker   sched/cputime: Pu...
148
149
  		cpustat[CPUTIME_NICE] += cputime;
  		cpustat[CPUTIME_GUEST_NICE] += cputime;
73fbec604   Frederic Weisbecker   sched: Move cputi...
150
  	} else {
fb8b049c9   Frederic Weisbecker   sched/cputime: Pu...
151
152
  		cpustat[CPUTIME_USER] += cputime;
  		cpustat[CPUTIME_GUEST] += cputime;
73fbec604   Frederic Weisbecker   sched: Move cputi...
153
154
155
156
157
158
159
  	}
  }
  
  /*
   * Account system cpu time to a process and desired cpustat field
   * @p: the process that the cpu time gets accounted to
   * @cputime: the cpu time spent in kernel space since the last update
40565b5ae   Stanislaw Gruszka   sched/cputime, po...
160
   * @index: pointer to cpustat field that has to be updated
73fbec604   Frederic Weisbecker   sched: Move cputi...
161
   */
c31cc6a51   Frederic Weisbecker   sched/cputime: Al...
162
  void account_system_index_time(struct task_struct *p,
fb8b049c9   Frederic Weisbecker   sched/cputime: Pu...
163
  			       u64 cputime, enum cpu_usage_stat index)
73fbec604   Frederic Weisbecker   sched: Move cputi...
164
165
  {
  	/* Add system time to process. */
fb8b049c9   Frederic Weisbecker   sched/cputime: Pu...
166
167
  	p->stime += cputime;
  	account_group_system_time(p, cputime);
73fbec604   Frederic Weisbecker   sched: Move cputi...
168
169
  
  	/* Add system time to cpustat. */
fb8b049c9   Frederic Weisbecker   sched/cputime: Pu...
170
  	task_group_account_field(p, index, cputime);
73fbec604   Frederic Weisbecker   sched: Move cputi...
171
172
  
  	/* Account for system time used */
6fac4829c   Frederic Weisbecker   cputime: Use acce...
173
  	acct_account_cputime(p);
73fbec604   Frederic Weisbecker   sched: Move cputi...
174
175
176
177
178
179
180
  }
  
  /*
   * Account system cpu time to a process.
   * @p: the process that the cpu time gets accounted to
   * @hardirq_offset: the offset to subtract from hardirq_count()
   * @cputime: the cpu time spent in kernel space since the last update
73fbec604   Frederic Weisbecker   sched: Move cputi...
181
   */
fb8b049c9   Frederic Weisbecker   sched/cputime: Pu...
182
  void account_system_time(struct task_struct *p, int hardirq_offset, u64 cputime)
73fbec604   Frederic Weisbecker   sched: Move cputi...
183
184
185
186
  {
  	int index;
  
  	if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
40565b5ae   Stanislaw Gruszka   sched/cputime, po...
187
  		account_guest_time(p, cputime);
73fbec604   Frederic Weisbecker   sched: Move cputi...
188
189
190
191
192
193
194
195
196
  		return;
  	}
  
  	if (hardirq_count() - hardirq_offset)
  		index = CPUTIME_IRQ;
  	else if (in_serving_softirq())
  		index = CPUTIME_SOFTIRQ;
  	else
  		index = CPUTIME_SYSTEM;
c31cc6a51   Frederic Weisbecker   sched/cputime: Al...
197
  	account_system_index_time(p, cputime, index);
73fbec604   Frederic Weisbecker   sched: Move cputi...
198
199
200
201
202
203
  }
  
  /*
   * Account for involuntary wait time.
   * @cputime: the cpu time spent in involuntary wait
   */
be9095ed4   Frederic Weisbecker   sched/cputime: Pu...
204
  void account_steal_time(u64 cputime)
73fbec604   Frederic Weisbecker   sched: Move cputi...
205
206
  {
  	u64 *cpustat = kcpustat_this_cpu->cpustat;
be9095ed4   Frederic Weisbecker   sched/cputime: Pu...
207
  	cpustat[CPUTIME_STEAL] += cputime;
73fbec604   Frederic Weisbecker   sched: Move cputi...
208
209
210
211
212
213
  }
  
  /*
   * Account for idle time.
   * @cputime: the cpu time spent in idle wait
   */
18b43a9bd   Frederic Weisbecker   sched/cputime: Pu...
214
  void account_idle_time(u64 cputime)
73fbec604   Frederic Weisbecker   sched: Move cputi...
215
216
217
218
219
  {
  	u64 *cpustat = kcpustat_this_cpu->cpustat;
  	struct rq *rq = this_rq();
  
  	if (atomic_read(&rq->nr_iowait) > 0)
18b43a9bd   Frederic Weisbecker   sched/cputime: Pu...
220
  		cpustat[CPUTIME_IOWAIT] += cputime;
73fbec604   Frederic Weisbecker   sched: Move cputi...
221
  	else
18b43a9bd   Frederic Weisbecker   sched/cputime: Pu...
222
  		cpustat[CPUTIME_IDLE] += cputime;
73fbec604   Frederic Weisbecker   sched: Move cputi...
223
  }
03cbc7326   Wanpeng Li   sched/cputime: Re...
224
225
226
227
228
  /*
   * When a guest is interrupted for a longer amount of time, missed clock
   * ticks are not redelivered later. Due to that, this function may on
   * occasion account more time than the calling functions think elapsed.
   */
2b1f967d8   Frederic Weisbecker   sched/cputime: Co...
229
  static __always_inline u64 steal_account_process_time(u64 maxtime)
73fbec604   Frederic Weisbecker   sched: Move cputi...
230
231
232
  {
  #ifdef CONFIG_PARAVIRT
  	if (static_key_false(&paravirt_steal_enabled)) {
2b1f967d8   Frederic Weisbecker   sched/cputime: Co...
233
  		u64 steal;
73fbec604   Frederic Weisbecker   sched: Move cputi...
234
235
236
  
  		steal = paravirt_steal_clock(smp_processor_id());
  		steal -= this_rq()->prev_steal_time;
2b1f967d8   Frederic Weisbecker   sched/cputime: Co...
237
238
239
  		steal = min(steal, maxtime);
  		account_steal_time(steal);
  		this_rq()->prev_steal_time += steal;
73fbec604   Frederic Weisbecker   sched: Move cputi...
240

2b1f967d8   Frederic Weisbecker   sched/cputime: Co...
241
  		return steal;
73fbec604   Frederic Weisbecker   sched: Move cputi...
242
243
  	}
  #endif
807e5b806   Wanpeng Li   sched/cputime: Ad...
244
  	return 0;
73fbec604   Frederic Weisbecker   sched: Move cputi...
245
  }
a634f9333   Frederic Weisbecker   cputime: Move thr...
246
  /*
574302183   Rik van Riel   sched/cputime: Co...
247
248
   * Account how much elapsed time was spent in steal, irq, or softirq time.
   */
2b1f967d8   Frederic Weisbecker   sched/cputime: Co...
249
  static inline u64 account_other_time(u64 max)
574302183   Rik van Riel   sched/cputime: Co...
250
  {
2b1f967d8   Frederic Weisbecker   sched/cputime: Co...
251
  	u64 accounted;
574302183   Rik van Riel   sched/cputime: Co...
252

2810f611f   Frederic Weisbecker   sched/irqtime: Re...
253
254
  	/* Shall be converted to a lockdep-enabled lightweight check */
  	WARN_ON_ONCE(!irqs_disabled());
574302183   Rik van Riel   sched/cputime: Co...
255
256
257
  	accounted = steal_account_process_time(max);
  
  	if (accounted < max)
a499a5a14   Frederic Weisbecker   sched/cputime: In...
258
  		accounted += irqtime_tick_accounted(max - accounted);
574302183   Rik van Riel   sched/cputime: Co...
259
260
261
  
  	return accounted;
  }
a1eb1411b   Stanislaw Gruszka   sched/cputime: Im...
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
  #ifdef CONFIG_64BIT
  static inline u64 read_sum_exec_runtime(struct task_struct *t)
  {
  	return t->se.sum_exec_runtime;
  }
  #else
  static u64 read_sum_exec_runtime(struct task_struct *t)
  {
  	u64 ns;
  	struct rq_flags rf;
  	struct rq *rq;
  
  	rq = task_rq_lock(t, &rf);
  	ns = t->se.sum_exec_runtime;
  	task_rq_unlock(rq, t, &rf);
  
  	return ns;
  }
  #endif
574302183   Rik van Riel   sched/cputime: Co...
281
  /*
a634f9333   Frederic Weisbecker   cputime: Move thr...
282
283
284
285
286
287
   * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
   * tasks (sum on group iteration) belonging to @tsk's group.
   */
  void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
  {
  	struct signal_struct *sig = tsk->signal;
5613fda9a   Frederic Weisbecker   sched/cputime: Co...
288
  	u64 utime, stime;
a634f9333   Frederic Weisbecker   cputime: Move thr...
289
  	struct task_struct *t;
e78c34967   Rik van Riel   time, signal: Pro...
290
  	unsigned int seq, nextseq;
9c368b5b6   Rik van Riel   sched, time: Fix ...
291
  	unsigned long flags;
a634f9333   Frederic Weisbecker   cputime: Move thr...
292

a1eb1411b   Stanislaw Gruszka   sched/cputime: Im...
293
294
295
296
297
298
299
300
301
302
  	/*
  	 * Update current task runtime to account pending time since last
  	 * scheduler action or thread_group_cputime() call. This thread group
  	 * might have other running tasks on different CPUs, but updating
  	 * their runtime can affect syscall performance, so we skip account
  	 * those pending times and rely only on values updated on tick or
  	 * other scheduler action.
  	 */
  	if (same_thread_group(current, tsk))
  		(void) task_sched_runtime(current);
a634f9333   Frederic Weisbecker   cputime: Move thr...
303
  	rcu_read_lock();
e78c34967   Rik van Riel   time, signal: Pro...
304
305
306
307
  	/* Attempt a lockless read on the first round. */
  	nextseq = 0;
  	do {
  		seq = nextseq;
9c368b5b6   Rik van Riel   sched, time: Fix ...
308
  		flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
e78c34967   Rik van Riel   time, signal: Pro...
309
310
311
312
313
314
315
316
  		times->utime = sig->utime;
  		times->stime = sig->stime;
  		times->sum_exec_runtime = sig->sum_sched_runtime;
  
  		for_each_thread(tsk, t) {
  			task_cputime(t, &utime, &stime);
  			times->utime += utime;
  			times->stime += stime;
a1eb1411b   Stanislaw Gruszka   sched/cputime: Im...
317
  			times->sum_exec_runtime += read_sum_exec_runtime(t);
e78c34967   Rik van Riel   time, signal: Pro...
318
319
320
321
  		}
  		/* If lockless access failed, take the lock. */
  		nextseq = 1;
  	} while (need_seqretry(&sig->stats_lock, seq));
9c368b5b6   Rik van Riel   sched, time: Fix ...
322
  	done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
a634f9333   Frederic Weisbecker   cputime: Move thr...
323
324
  	rcu_read_unlock();
  }
73fbec604   Frederic Weisbecker   sched: Move cputi...
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
  #ifdef CONFIG_IRQ_TIME_ACCOUNTING
  /*
   * Account a tick to a process and cpustat
   * @p: the process that the cpu time gets accounted to
   * @user_tick: is the tick from userspace
   * @rq: the pointer to rq
   *
   * Tick demultiplexing follows the order
   * - pending hardirq update
   * - pending softirq update
   * - user_time
   * - idle_time
   * - system time
   *   - check for guest_time
   *   - else account as system_time
   *
   * Check for hardirq is done both for system and user time as there is
   * no timer going off while we are on hardirq and hence we may never get an
   * opportunity to update it solely in system time.
   * p->stime and friends are only updated on system time and not on irq
   * softirq as those do not count in task exec_runtime any more.
   */
  static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
2d513868e   Thomas Gleixner   sched: Sanitize i...
348
  					 struct rq *rq, int ticks)
73fbec604   Frederic Weisbecker   sched: Move cputi...
349
  {
2b1f967d8   Frederic Weisbecker   sched/cputime: Co...
350
  	u64 other, cputime = TICK_NSEC * ticks;
73fbec604   Frederic Weisbecker   sched: Move cputi...
351

574302183   Rik van Riel   sched/cputime: Co...
352
353
354
355
356
357
358
  	/*
  	 * When returning from idle, many ticks can get accounted at
  	 * once, including some ticks of steal, irq, and softirq time.
  	 * Subtract those ticks from the amount of time accounted to
  	 * idle, or potentially user or system time. Due to rounding,
  	 * other time can exceed ticks occasionally.
  	 */
03cbc7326   Wanpeng Li   sched/cputime: Re...
359
  	other = account_other_time(ULONG_MAX);
2b1f967d8   Frederic Weisbecker   sched/cputime: Co...
360
  	if (other >= cputime)
73fbec604   Frederic Weisbecker   sched: Move cputi...
361
  		return;
23244a5c8   Frederic Weisbecker   sched/cputime: Pu...
362

2b1f967d8   Frederic Weisbecker   sched/cputime: Co...
363
  	cputime -= other;
73fbec604   Frederic Weisbecker   sched: Move cputi...
364

574302183   Rik van Riel   sched/cputime: Co...
365
  	if (this_cpu_ksoftirqd() == p) {
73fbec604   Frederic Weisbecker   sched: Move cputi...
366
367
368
369
370
  		/*
  		 * ksoftirqd time do not get accounted in cpu_softirq_time.
  		 * So, we have to handle it separately here.
  		 * Also, p->stime needs to be updated for ksoftirqd.
  		 */
fb8b049c9   Frederic Weisbecker   sched/cputime: Pu...
371
  		account_system_index_time(p, cputime, CPUTIME_SOFTIRQ);
73fbec604   Frederic Weisbecker   sched: Move cputi...
372
  	} else if (user_tick) {
40565b5ae   Stanislaw Gruszka   sched/cputime, po...
373
  		account_user_time(p, cputime);
73fbec604   Frederic Weisbecker   sched: Move cputi...
374
  	} else if (p == rq->idle) {
18b43a9bd   Frederic Weisbecker   sched/cputime: Pu...
375
  		account_idle_time(cputime);
73fbec604   Frederic Weisbecker   sched: Move cputi...
376
  	} else if (p->flags & PF_VCPU) { /* System time or guest time */
fb8b049c9   Frederic Weisbecker   sched/cputime: Pu...
377
  		account_guest_time(p, cputime);
73fbec604   Frederic Weisbecker   sched: Move cputi...
378
  	} else {
fb8b049c9   Frederic Weisbecker   sched/cputime: Pu...
379
  		account_system_index_time(p, cputime, CPUTIME_SYSTEM);
73fbec604   Frederic Weisbecker   sched: Move cputi...
380
381
382
383
384
  	}
  }
  
  static void irqtime_account_idle_ticks(int ticks)
  {
73fbec604   Frederic Weisbecker   sched: Move cputi...
385
  	struct rq *rq = this_rq();
2d513868e   Thomas Gleixner   sched: Sanitize i...
386
  	irqtime_account_process_tick(current, 0, rq, ticks);
73fbec604   Frederic Weisbecker   sched: Move cputi...
387
388
  }
  #else /* CONFIG_IRQ_TIME_ACCOUNTING */
3f4724ea8   Frederic Weisbecker   cputime: Allow dy...
389
390
  static inline void irqtime_account_idle_ticks(int ticks) {}
  static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
2d513868e   Thomas Gleixner   sched: Sanitize i...
391
  						struct rq *rq, int nr_ticks) {}
73fbec604   Frederic Weisbecker   sched: Move cputi...
392
  #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
73fbec604   Frederic Weisbecker   sched: Move cputi...
393
394
395
396
  /*
   * Use precise platform statistics if available:
   */
  #ifdef CONFIG_VIRT_CPU_ACCOUNTING
a7e1a9e3a   Frederic Weisbecker   vtime: Consolidat...
397

e3942ba04   Frederic Weisbecker   vtime: Consolidat...
398
  #ifndef __ARCH_HAS_VTIME_TASK_SWITCH
b04934061   Frederic Weisbecker   vtime: Optimize f...
399
  void vtime_common_task_switch(struct task_struct *prev)
e3942ba04   Frederic Weisbecker   vtime: Consolidat...
400
401
402
403
404
  {
  	if (is_idle_task(prev))
  		vtime_account_idle(prev);
  	else
  		vtime_account_system(prev);
c8d7dabf8   Frederic Weisbecker   sched/cputime: Re...
405
  	vtime_flush(prev);
e3942ba04   Frederic Weisbecker   vtime: Consolidat...
406
407
408
  	arch_vtime_task_switch(prev);
  }
  #endif
11113334d   Frederic Weisbecker   vtime: Make vtime...
409

0cfdf9a19   Frederic Weisbecker   sched/cputime: Cl...
410
411
412
413
  #endif /* CONFIG_VIRT_CPU_ACCOUNTING */
  
  
  #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
a7e1a9e3a   Frederic Weisbecker   vtime: Consolidat...
414
415
416
  /*
   * Archs that account the whole time spent in the idle task
   * (outside irq) as idle time can rely on this and just implement
fd25b4c2f   Frederic Weisbecker   vtime: Remove the...
417
   * vtime_account_system() and vtime_account_idle(). Archs that
a7e1a9e3a   Frederic Weisbecker   vtime: Consolidat...
418
419
420
421
422
   * have other meaning of the idle time (s390 only includes the
   * time spent by the CPU when it's in low power mode) must override
   * vtime_account().
   */
  #ifndef __ARCH_HAS_VTIME_ACCOUNT
0cfdf9a19   Frederic Weisbecker   sched/cputime: Cl...
423
  void vtime_account_irq_enter(struct task_struct *tsk)
a7e1a9e3a   Frederic Weisbecker   vtime: Consolidat...
424
  {
0cfdf9a19   Frederic Weisbecker   sched/cputime: Cl...
425
426
427
428
  	if (!in_interrupt() && is_idle_task(tsk))
  		vtime_account_idle(tsk);
  	else
  		vtime_account_system(tsk);
a7e1a9e3a   Frederic Weisbecker   vtime: Consolidat...
429
  }
0cfdf9a19   Frederic Weisbecker   sched/cputime: Cl...
430
  EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
a7e1a9e3a   Frederic Weisbecker   vtime: Consolidat...
431
  #endif /* __ARCH_HAS_VTIME_ACCOUNT */
9fbc42eac   Frederic Weisbecker   cputime: Dynamica...
432

5613fda9a   Frederic Weisbecker   sched/cputime: Co...
433
  void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
9fbc42eac   Frederic Weisbecker   cputime: Dynamica...
434
435
436
437
  {
  	*ut = p->utime;
  	*st = p->stime;
  }
9eec50b8b   Andrey Smetanin   kvm/x86: Hyper-V ...
438
  EXPORT_SYMBOL_GPL(task_cputime_adjusted);
a7e1a9e3a   Frederic Weisbecker   vtime: Consolidat...
439

5613fda9a   Frederic Weisbecker   sched/cputime: Co...
440
  void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
9fbc42eac   Frederic Weisbecker   cputime: Dynamica...
441
442
  {
  	struct task_cputime cputime;
73fbec604   Frederic Weisbecker   sched: Move cputi...
443

9fbc42eac   Frederic Weisbecker   cputime: Dynamica...
444
445
446
447
448
449
450
451
452
453
454
455
  	thread_group_cputime(p, &cputime);
  
  	*ut = cputime.utime;
  	*st = cputime.stime;
  }
  #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
  /*
   * Account a single tick of cpu time.
   * @p: the process that the cpu time gets accounted to
   * @user_tick: indicates if the tick is a user or a system tick
   */
  void account_process_tick(struct task_struct *p, int user_tick)
73fbec604   Frederic Weisbecker   sched: Move cputi...
456
  {
2b1f967d8   Frederic Weisbecker   sched/cputime: Co...
457
  	u64 cputime, steal;
9fbc42eac   Frederic Weisbecker   cputime: Dynamica...
458
  	struct rq *rq = this_rq();
73fbec604   Frederic Weisbecker   sched: Move cputi...
459

55dbdcfa0   Frederic Weisbecker   sched/cputime: Re...
460
  	if (vtime_accounting_cpu_enabled())
9fbc42eac   Frederic Weisbecker   cputime: Dynamica...
461
462
463
  		return;
  
  	if (sched_clock_irqtime) {
2d513868e   Thomas Gleixner   sched: Sanitize i...
464
  		irqtime_account_process_tick(p, user_tick, rq, 1);
9fbc42eac   Frederic Weisbecker   cputime: Dynamica...
465
466
  		return;
  	}
2b1f967d8   Frederic Weisbecker   sched/cputime: Co...
467
  	cputime = TICK_NSEC;
03cbc7326   Wanpeng Li   sched/cputime: Re...
468
  	steal = steal_account_process_time(ULONG_MAX);
574302183   Rik van Riel   sched/cputime: Co...
469

2b1f967d8   Frederic Weisbecker   sched/cputime: Co...
470
  	if (steal >= cputime)
9fbc42eac   Frederic Weisbecker   cputime: Dynamica...
471
  		return;
73fbec604   Frederic Weisbecker   sched: Move cputi...
472

2b1f967d8   Frederic Weisbecker   sched/cputime: Co...
473
  	cputime -= steal;
574302183   Rik van Riel   sched/cputime: Co...
474

9fbc42eac   Frederic Weisbecker   cputime: Dynamica...
475
  	if (user_tick)
40565b5ae   Stanislaw Gruszka   sched/cputime, po...
476
  		account_user_time(p, cputime);
9fbc42eac   Frederic Weisbecker   cputime: Dynamica...
477
  	else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
fb8b049c9   Frederic Weisbecker   sched/cputime: Pu...
478
  		account_system_time(p, HARDIRQ_OFFSET, cputime);
73fbec604   Frederic Weisbecker   sched: Move cputi...
479
  	else
18b43a9bd   Frederic Weisbecker   sched/cputime: Pu...
480
  		account_idle_time(cputime);
9fbc42eac   Frederic Weisbecker   cputime: Dynamica...
481
  }
73fbec604   Frederic Weisbecker   sched: Move cputi...
482

9fbc42eac   Frederic Weisbecker   cputime: Dynamica...
483
  /*
9fbc42eac   Frederic Weisbecker   cputime: Dynamica...
484
485
486
487
488
   * Account multiple ticks of idle time.
   * @ticks: number of stolen ticks
   */
  void account_idle_ticks(unsigned long ticks)
  {
18b43a9bd   Frederic Weisbecker   sched/cputime: Pu...
489
  	u64 cputime, steal;
26f2c75cd   Frederic Weisbecker   sched/cputime: Fi...
490

9fbc42eac   Frederic Weisbecker   cputime: Dynamica...
491
492
493
494
  	if (sched_clock_irqtime) {
  		irqtime_account_idle_ticks(ticks);
  		return;
  	}
18b43a9bd   Frederic Weisbecker   sched/cputime: Pu...
495
  	cputime = ticks * TICK_NSEC;
2b1f967d8   Frederic Weisbecker   sched/cputime: Co...
496
  	steal = steal_account_process_time(ULONG_MAX);
f9bcf1e0e   Wanpeng Li   sched/cputime: Fi...
497
498
499
500
501
502
  
  	if (steal >= cputime)
  		return;
  
  	cputime -= steal;
  	account_idle_time(cputime);
9fbc42eac   Frederic Weisbecker   cputime: Dynamica...
503
  }
73fbec604   Frederic Weisbecker   sched: Move cputi...
504

d9a3c9823   Frederic Weisbecker   sched: Lower chan...
505
  /*
55eaa7c1f   Stanislaw Gruszka   sched: Avoid cput...
506
507
   * Perform (stime * rtime) / total, but avoid multiplication overflow by
   * loosing precision when the numbers are big.
d9a3c9823   Frederic Weisbecker   sched: Lower chan...
508
   */
5613fda9a   Frederic Weisbecker   sched/cputime: Co...
509
  static u64 scale_stime(u64 stime, u64 rtime, u64 total)
73fbec604   Frederic Weisbecker   sched: Move cputi...
510
  {
55eaa7c1f   Stanislaw Gruszka   sched: Avoid cput...
511
  	u64 scaled;
73fbec604   Frederic Weisbecker   sched: Move cputi...
512

55eaa7c1f   Stanislaw Gruszka   sched: Avoid cput...
513
514
  	for (;;) {
  		/* Make sure "rtime" is the bigger of stime/rtime */
84f9f3a15   Stanislaw Gruszka   sched: Use swap()...
515
516
  		if (stime > rtime)
  			swap(rtime, stime);
55eaa7c1f   Stanislaw Gruszka   sched: Avoid cput...
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
  
  		/* Make sure 'total' fits in 32 bits */
  		if (total >> 32)
  			goto drop_precision;
  
  		/* Does rtime (and thus stime) fit in 32 bits? */
  		if (!(rtime >> 32))
  			break;
  
  		/* Can we just balance rtime/stime rather than dropping bits? */
  		if (stime >> 31)
  			goto drop_precision;
  
  		/* We can grow stime and shrink rtime and try to make them both fit */
  		stime <<= 1;
  		rtime >>= 1;
  		continue;
  
  drop_precision:
  		/* We drop from rtime, it has more bits than stime */
  		rtime >>= 1;
  		total >>= 1;
d9a3c9823   Frederic Weisbecker   sched: Lower chan...
539
  	}
73fbec604   Frederic Weisbecker   sched: Move cputi...
540

55eaa7c1f   Stanislaw Gruszka   sched: Avoid cput...
541
542
543
544
545
  	/*
  	 * Make sure gcc understands that this is a 32x32->64 multiply,
  	 * followed by a 64/32->64 divide.
  	 */
  	scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total);
5613fda9a   Frederic Weisbecker   sched/cputime: Co...
546
  	return scaled;
73fbec604   Frederic Weisbecker   sched: Move cputi...
547
  }
fa0920578   Frederic Weisbecker   cputime: Comment ...
548
  /*
9d7fb0427   Peter Zijlstra   sched/cputime: Gu...
549
550
   * Adjust tick based cputime random precision against scheduler runtime
   * accounting.
347abad98   Rik van Riel   sched, time: Fix ...
551
   *
9d7fb0427   Peter Zijlstra   sched/cputime: Gu...
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
   * Tick based cputime accounting depend on random scheduling timeslices of a
   * task to be interrupted or not by the timer.  Depending on these
   * circumstances, the number of these interrupts may be over or
   * under-optimistic, matching the real user and system cputime with a variable
   * precision.
   *
   * Fix this by scaling these tick based values against the total runtime
   * accounted by the CFS scheduler.
   *
   * This code provides the following guarantees:
   *
   *   stime + utime == rtime
   *   stime_i+1 >= stime_i, utime_i+1 >= utime_i
   *
   * Assuming that rtime_i+1 >= rtime_i.
fa0920578   Frederic Weisbecker   cputime: Comment ...
567
   */
d37f761db   Frederic Weisbecker   cputime: Consolid...
568
  static void cputime_adjust(struct task_cputime *curr,
9d7fb0427   Peter Zijlstra   sched/cputime: Gu...
569
  			   struct prev_cputime *prev,
5613fda9a   Frederic Weisbecker   sched/cputime: Co...
570
  			   u64 *ut, u64 *st)
73fbec604   Frederic Weisbecker   sched: Move cputi...
571
  {
5613fda9a   Frederic Weisbecker   sched/cputime: Co...
572
  	u64 rtime, stime, utime;
9d7fb0427   Peter Zijlstra   sched/cputime: Gu...
573
  	unsigned long flags;
fa0920578   Frederic Weisbecker   cputime: Comment ...
574

9d7fb0427   Peter Zijlstra   sched/cputime: Gu...
575
576
  	/* Serialize concurrent callers such that we can honour our guarantees */
  	raw_spin_lock_irqsave(&prev->lock, flags);
5613fda9a   Frederic Weisbecker   sched/cputime: Co...
577
  	rtime = curr->sum_exec_runtime;
73fbec604   Frederic Weisbecker   sched: Move cputi...
578

772c808a2   Stanislaw Gruszka   sched: Do not acc...
579
  	/*
9d7fb0427   Peter Zijlstra   sched/cputime: Gu...
580
581
582
583
584
585
  	 * This is possible under two circumstances:
  	 *  - rtime isn't monotonic after all (a bug);
  	 *  - we got reordered by the lock.
  	 *
  	 * In both cases this acts as a filter such that the rest of the code
  	 * can assume it is monotonic regardless of anything else.
772c808a2   Stanislaw Gruszka   sched: Do not acc...
586
587
588
  	 */
  	if (prev->stime + prev->utime >= rtime)
  		goto out;
5a8e01f8f   Stanislaw Gruszka   sched/cputime: Do...
589
590
  	stime = curr->stime;
  	utime = curr->utime;
173be9a14   Peter Zijlstra   sched/cputime: Fi...
591
  	/*
3b9c08ae3   Ingo Molnar   Revert "sched/cpu...
592
593
594
  	 * If either stime or utime are 0, assume all runtime is userspace.
  	 * Once a task gets some ticks, the monotonicy code at 'update:'
  	 * will ensure things converge to the observed ratio.
173be9a14   Peter Zijlstra   sched/cputime: Fi...
595
  	 */
3b9c08ae3   Ingo Molnar   Revert "sched/cpu...
596
597
598
  	if (stime == 0) {
  		utime = rtime;
  		goto update;
9d7fb0427   Peter Zijlstra   sched/cputime: Gu...
599
  	}
5a8e01f8f   Stanislaw Gruszka   sched/cputime: Do...
600

3b9c08ae3   Ingo Molnar   Revert "sched/cpu...
601
602
603
604
605
606
607
608
  	if (utime == 0) {
  		stime = rtime;
  		goto update;
  	}
  
  	stime = scale_stime(stime, rtime, stime + utime);
  
  update:
9d7fb0427   Peter Zijlstra   sched/cputime: Gu...
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
  	/*
  	 * Make sure stime doesn't go backwards; this preserves monotonicity
  	 * for utime because rtime is monotonic.
  	 *
  	 *  utime_i+1 = rtime_i+1 - stime_i
  	 *            = rtime_i+1 - (rtime_i - utime_i)
  	 *            = (rtime_i+1 - rtime_i) + utime_i
  	 *            >= utime_i
  	 */
  	if (stime < prev->stime)
  		stime = prev->stime;
  	utime = rtime - stime;
  
  	/*
  	 * Make sure utime doesn't go backwards; this still preserves
  	 * monotonicity for stime, analogous argument to above.
  	 */
  	if (utime < prev->utime) {
  		utime = prev->utime;
  		stime = rtime - utime;
  	}
d37f761db   Frederic Weisbecker   cputime: Consolid...
630

9d7fb0427   Peter Zijlstra   sched/cputime: Gu...
631
632
  	prev->stime = stime;
  	prev->utime = utime;
772c808a2   Stanislaw Gruszka   sched: Do not acc...
633
  out:
d37f761db   Frederic Weisbecker   cputime: Consolid...
634
635
  	*ut = prev->utime;
  	*st = prev->stime;
9d7fb0427   Peter Zijlstra   sched/cputime: Gu...
636
  	raw_spin_unlock_irqrestore(&prev->lock, flags);
d37f761db   Frederic Weisbecker   cputime: Consolid...
637
  }
73fbec604   Frederic Weisbecker   sched: Move cputi...
638

5613fda9a   Frederic Weisbecker   sched/cputime: Co...
639
  void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
d37f761db   Frederic Weisbecker   cputime: Consolid...
640
641
  {
  	struct task_cputime cputime = {
d37f761db   Frederic Weisbecker   cputime: Consolid...
642
643
  		.sum_exec_runtime = p->se.sum_exec_runtime,
  	};
6fac4829c   Frederic Weisbecker   cputime: Use acce...
644
  	task_cputime(p, &cputime.utime, &cputime.stime);
d37f761db   Frederic Weisbecker   cputime: Consolid...
645
  	cputime_adjust(&cputime, &p->prev_cputime, ut, st);
73fbec604   Frederic Weisbecker   sched: Move cputi...
646
  }
9eec50b8b   Andrey Smetanin   kvm/x86: Hyper-V ...
647
  EXPORT_SYMBOL_GPL(task_cputime_adjusted);
73fbec604   Frederic Weisbecker   sched: Move cputi...
648

5613fda9a   Frederic Weisbecker   sched/cputime: Co...
649
  void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
73fbec604   Frederic Weisbecker   sched: Move cputi...
650
  {
73fbec604   Frederic Weisbecker   sched: Move cputi...
651
  	struct task_cputime cputime;
73fbec604   Frederic Weisbecker   sched: Move cputi...
652
653
  
  	thread_group_cputime(p, &cputime);
d37f761db   Frederic Weisbecker   cputime: Consolid...
654
  	cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
73fbec604   Frederic Weisbecker   sched: Move cputi...
655
  }
9fbc42eac   Frederic Weisbecker   cputime: Dynamica...
656
  #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
abf917cd9   Frederic Weisbecker   cputime: Generic ...
657
658
  
  #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
659
  static u64 vtime_delta(struct vtime *vtime)
6a61671bb   Frederic Weisbecker   cputime: Safely r...
660
  {
2a42eb959   Wanpeng Li   sched/cputime: Ac...
661
  	unsigned long long clock;
6a61671bb   Frederic Weisbecker   cputime: Safely r...
662

0e4097c33   Wanpeng Li   sched/cputime: Do...
663
  	clock = sched_clock();
2a42eb959   Wanpeng Li   sched/cputime: Ac...
664
  	if (clock < vtime->starttime)
6a61671bb   Frederic Weisbecker   cputime: Safely r...
665
  		return 0;
abf917cd9   Frederic Weisbecker   cputime: Generic ...
666

2a42eb959   Wanpeng Li   sched/cputime: Ac...
667
  	return clock - vtime->starttime;
6a61671bb   Frederic Weisbecker   cputime: Safely r...
668
  }
bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
669
  static u64 get_vtime_delta(struct vtime *vtime)
abf917cd9   Frederic Weisbecker   cputime: Generic ...
670
  {
2a42eb959   Wanpeng Li   sched/cputime: Ac...
671
672
  	u64 delta = vtime_delta(vtime);
  	u64 other;
abf917cd9   Frederic Weisbecker   cputime: Generic ...
673

03cbc7326   Wanpeng Li   sched/cputime: Re...
674
675
676
677
678
679
680
  	/*
  	 * Unlike tick based timing, vtime based timing never has lost
  	 * ticks, and no need for steal time accounting to make up for
  	 * lost ticks. Vtime accounts a rounded version of actual
  	 * elapsed time. Limit account_other_time to prevent rounding
  	 * errors from causing elapsed vtime to go negative.
  	 */
b58c35840   Rik van Riel   sched/cputime: Re...
681
  	other = account_other_time(delta);
bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
682
  	WARN_ON_ONCE(vtime->state == VTIME_INACTIVE);
2a42eb959   Wanpeng Li   sched/cputime: Ac...
683
  	vtime->starttime += delta;
abf917cd9   Frederic Weisbecker   cputime: Generic ...
684

b58c35840   Rik van Riel   sched/cputime: Re...
685
  	return delta - other;
abf917cd9   Frederic Weisbecker   cputime: Generic ...
686
  }
2a42eb959   Wanpeng Li   sched/cputime: Ac...
687
688
  static void __vtime_account_system(struct task_struct *tsk,
  				   struct vtime *vtime)
6a61671bb   Frederic Weisbecker   cputime: Safely r...
689
  {
2a42eb959   Wanpeng Li   sched/cputime: Ac...
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
  	vtime->stime += get_vtime_delta(vtime);
  	if (vtime->stime >= TICK_NSEC) {
  		account_system_time(tsk, irq_count(), vtime->stime);
  		vtime->stime = 0;
  	}
  }
  
  static void vtime_account_guest(struct task_struct *tsk,
  				struct vtime *vtime)
  {
  	vtime->gtime += get_vtime_delta(vtime);
  	if (vtime->gtime >= TICK_NSEC) {
  		account_guest_time(tsk, vtime->gtime);
  		vtime->gtime = 0;
  	}
6a61671bb   Frederic Weisbecker   cputime: Safely r...
705
  }
abf917cd9   Frederic Weisbecker   cputime: Generic ...
706
707
  void vtime_account_system(struct task_struct *tsk)
  {
bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
708
709
710
  	struct vtime *vtime = &tsk->vtime;
  
  	if (!vtime_delta(vtime))
ff9a9b4c4   Rik van Riel   sched, time: Swit...
711
  		return;
bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
712
  	write_seqcount_begin(&vtime->seqcount);
2a42eb959   Wanpeng Li   sched/cputime: Ac...
713
714
715
716
717
  	/* We might have scheduled out from guest path */
  	if (current->flags & PF_VCPU)
  		vtime_account_guest(tsk, vtime);
  	else
  		__vtime_account_system(tsk, vtime);
bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
718
  	write_seqcount_end(&vtime->seqcount);
6a61671bb   Frederic Weisbecker   cputime: Safely r...
719
  }
3f4724ea8   Frederic Weisbecker   cputime: Allow dy...
720

1c3eda01a   Frederic Weisbecker   vtime, sched/cput...
721
  void vtime_user_enter(struct task_struct *tsk)
abf917cd9   Frederic Weisbecker   cputime: Generic ...
722
  {
bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
723
724
725
  	struct vtime *vtime = &tsk->vtime;
  
  	write_seqcount_begin(&vtime->seqcount);
2a42eb959   Wanpeng Li   sched/cputime: Ac...
726
  	__vtime_account_system(tsk, vtime);
bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
727
728
  	vtime->state = VTIME_USER;
  	write_seqcount_end(&vtime->seqcount);
6a61671bb   Frederic Weisbecker   cputime: Safely r...
729
  }
1c3eda01a   Frederic Weisbecker   vtime, sched/cput...
730
  void vtime_user_exit(struct task_struct *tsk)
6a61671bb   Frederic Weisbecker   cputime: Safely r...
731
  {
bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
732
733
734
  	struct vtime *vtime = &tsk->vtime;
  
  	write_seqcount_begin(&vtime->seqcount);
2a42eb959   Wanpeng Li   sched/cputime: Ac...
735
736
737
738
739
  	vtime->utime += get_vtime_delta(vtime);
  	if (vtime->utime >= TICK_NSEC) {
  		account_user_time(tsk, vtime->utime);
  		vtime->utime = 0;
  	}
bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
740
741
  	vtime->state = VTIME_SYS;
  	write_seqcount_end(&vtime->seqcount);
6a61671bb   Frederic Weisbecker   cputime: Safely r...
742
743
744
745
  }
  
  void vtime_guest_enter(struct task_struct *tsk)
  {
bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
746
  	struct vtime *vtime = &tsk->vtime;
5b206d48e   Frederic Weisbecker   vtime: Update a f...
747
748
  	/*
  	 * The flags must be updated under the lock with
60a9ce57e   Frederic Weisbecker   sched/cputime: Re...
749
  	 * the vtime_starttime flush and update.
5b206d48e   Frederic Weisbecker   vtime: Update a f...
750
751
752
753
  	 * That enforces a right ordering and update sequence
  	 * synchronization against the reader (task_gtime())
  	 * that can thus safely catch up with a tickless delta.
  	 */
bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
754
  	write_seqcount_begin(&vtime->seqcount);
2a42eb959   Wanpeng Li   sched/cputime: Ac...
755
  	__vtime_account_system(tsk, vtime);
6a61671bb   Frederic Weisbecker   cputime: Safely r...
756
  	current->flags |= PF_VCPU;
bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
757
  	write_seqcount_end(&vtime->seqcount);
6a61671bb   Frederic Weisbecker   cputime: Safely r...
758
  }
48d6a816a   Frederic Weisbecker   context_tracking:...
759
  EXPORT_SYMBOL_GPL(vtime_guest_enter);
6a61671bb   Frederic Weisbecker   cputime: Safely r...
760
761
762
  
  void vtime_guest_exit(struct task_struct *tsk)
  {
bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
763
764
765
  	struct vtime *vtime = &tsk->vtime;
  
  	write_seqcount_begin(&vtime->seqcount);
2a42eb959   Wanpeng Li   sched/cputime: Ac...
766
  	vtime_account_guest(tsk, vtime);
6a61671bb   Frederic Weisbecker   cputime: Safely r...
767
  	current->flags &= ~PF_VCPU;
bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
768
  	write_seqcount_end(&vtime->seqcount);
abf917cd9   Frederic Weisbecker   cputime: Generic ...
769
  }
48d6a816a   Frederic Weisbecker   context_tracking:...
770
  EXPORT_SYMBOL_GPL(vtime_guest_exit);
abf917cd9   Frederic Weisbecker   cputime: Generic ...
771
772
773
  
  void vtime_account_idle(struct task_struct *tsk)
  {
bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
774
  	account_idle_time(get_vtime_delta(&tsk->vtime));
abf917cd9   Frederic Weisbecker   cputime: Generic ...
775
  }
3f4724ea8   Frederic Weisbecker   cputime: Allow dy...
776

6a61671bb   Frederic Weisbecker   cputime: Safely r...
777
778
  void arch_vtime_task_switch(struct task_struct *prev)
  {
bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
779
  	struct vtime *vtime = &prev->vtime;
6a61671bb   Frederic Weisbecker   cputime: Safely r...
780

bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
781
782
783
784
785
786
787
788
  	write_seqcount_begin(&vtime->seqcount);
  	vtime->state = VTIME_INACTIVE;
  	write_seqcount_end(&vtime->seqcount);
  
  	vtime = &current->vtime;
  
  	write_seqcount_begin(&vtime->seqcount);
  	vtime->state = VTIME_SYS;
0e4097c33   Wanpeng Li   sched/cputime: Do...
789
  	vtime->starttime = sched_clock();
bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
790
  	write_seqcount_end(&vtime->seqcount);
6a61671bb   Frederic Weisbecker   cputime: Safely r...
791
  }
45eacc692   Frederic Weisbecker   vtime: Use consis...
792
  void vtime_init_idle(struct task_struct *t, int cpu)
6a61671bb   Frederic Weisbecker   cputime: Safely r...
793
  {
bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
794
  	struct vtime *vtime = &t->vtime;
6a61671bb   Frederic Weisbecker   cputime: Safely r...
795
  	unsigned long flags;
b7ce2277f   Frederic Weisbecker   sched/cputime: Co...
796
  	local_irq_save(flags);
bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
797
798
  	write_seqcount_begin(&vtime->seqcount);
  	vtime->state = VTIME_SYS;
0e4097c33   Wanpeng Li   sched/cputime: Do...
799
  	vtime->starttime = sched_clock();
bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
800
  	write_seqcount_end(&vtime->seqcount);
b7ce2277f   Frederic Weisbecker   sched/cputime: Co...
801
  	local_irq_restore(flags);
6a61671bb   Frederic Weisbecker   cputime: Safely r...
802
  }
16a6d9be9   Frederic Weisbecker   sched/cputime: Co...
803
  u64 task_gtime(struct task_struct *t)
6a61671bb   Frederic Weisbecker   cputime: Safely r...
804
  {
bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
805
  	struct vtime *vtime = &t->vtime;
6a61671bb   Frederic Weisbecker   cputime: Safely r...
806
  	unsigned int seq;
16a6d9be9   Frederic Weisbecker   sched/cputime: Co...
807
  	u64 gtime;
6a61671bb   Frederic Weisbecker   cputime: Safely r...
808

e59253946   Frederic Weisbecker   sched/cputime: In...
809
  	if (!vtime_accounting_enabled())
2541117b0   Hiroshi Shimamoto   sched/cputime: Fi...
810
  		return t->gtime;
6a61671bb   Frederic Weisbecker   cputime: Safely r...
811
  	do {
bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
812
  		seq = read_seqcount_begin(&vtime->seqcount);
6a61671bb   Frederic Weisbecker   cputime: Safely r...
813
814
  
  		gtime = t->gtime;
bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
815
  		if (vtime->state == VTIME_SYS && t->flags & PF_VCPU)
2a42eb959   Wanpeng Li   sched/cputime: Ac...
816
  			gtime += vtime->gtime + vtime_delta(vtime);
6a61671bb   Frederic Weisbecker   cputime: Safely r...
817

bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
818
  	} while (read_seqcount_retry(&vtime->seqcount, seq));
6a61671bb   Frederic Weisbecker   cputime: Safely r...
819
820
821
822
823
824
825
826
827
  
  	return gtime;
  }
  
  /*
   * Fetch cputime raw values from fields of task_struct and
   * add up the pending nohz execution time since the last
   * cputime snapshot.
   */
5613fda9a   Frederic Weisbecker   sched/cputime: Co...
828
  void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
6a61671bb   Frederic Weisbecker   cputime: Safely r...
829
  {
bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
830
  	struct vtime *vtime = &t->vtime;
6a61671bb   Frederic Weisbecker   cputime: Safely r...
831
  	unsigned int seq;
bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
832
  	u64 delta;
6a61671bb   Frederic Weisbecker   cputime: Safely r...
833

353c50ebe   Stanislaw Gruszka   sched/cputime: Si...
834
835
836
837
838
  	if (!vtime_accounting_enabled()) {
  		*utime = t->utime;
  		*stime = t->stime;
  		return;
  	}
6a61671bb   Frederic Weisbecker   cputime: Safely r...
839

353c50ebe   Stanislaw Gruszka   sched/cputime: Si...
840
  	do {
bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
841
  		seq = read_seqcount_begin(&vtime->seqcount);
6a61671bb   Frederic Weisbecker   cputime: Safely r...
842

353c50ebe   Stanislaw Gruszka   sched/cputime: Si...
843
844
  		*utime = t->utime;
  		*stime = t->stime;
6a61671bb   Frederic Weisbecker   cputime: Safely r...
845
846
  
  		/* Task is sleeping, nothing to add */
bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
847
  		if (vtime->state == VTIME_INACTIVE || is_idle_task(t))
6a61671bb   Frederic Weisbecker   cputime: Safely r...
848
  			continue;
bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
849
  		delta = vtime_delta(vtime);
6a61671bb   Frederic Weisbecker   cputime: Safely r...
850
851
852
853
854
  
  		/*
  		 * Task runs either in user or kernel space, add pending nohz time to
  		 * the right place.
  		 */
bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
855
  		if (vtime->state == VTIME_USER || t->flags & PF_VCPU)
2a42eb959   Wanpeng Li   sched/cputime: Ac...
856
  			*utime += vtime->utime + delta;
bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
857
  		else if (vtime->state == VTIME_SYS)
2a42eb959   Wanpeng Li   sched/cputime: Ac...
858
  			*stime += vtime->stime + delta;
bac5b6b6b   Frederic Weisbecker   sched/cputime: Mo...
859
  	} while (read_seqcount_retry(&vtime->seqcount, seq));
6a61671bb   Frederic Weisbecker   cputime: Safely r...
860
  }
abf917cd9   Frederic Weisbecker   cputime: Generic ...
861
  #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */