Blame view
kernel/sched/cputime.c
26.6 KB
457c89965 treewide: Add SPD... |
1 |
// SPDX-License-Identifier: GPL-2.0-only |
325ea10c0 sched/headers: Si... |
2 3 4 |
/* * Simple CPU accounting cgroup controller */ |
8bea10e03 ANDROID: cpufreq:... |
5 |
#include <linux/cpufreq_times.h> |
73fbec604 sched: Move cputi... |
6 |
#include "sched.h" |
292f43081 ANDROID: Sched: A... |
7 |
#include <trace/hooks/sched.h> |
73fbec604 sched: Move cputi... |
8 9 10 11 12 |
#ifdef CONFIG_IRQ_TIME_ACCOUNTING /* * There are no locks covering percpu hardirq/softirq time. |
bf9fae9f5 cputime: Use a pr... |
13 |
* They are only modified in vtime_account, on corresponding CPU |
73fbec604 sched: Move cputi... |
14 15 16 |
* with interrupts disabled. So, writes are safe. * They are read and saved off onto struct rq in update_rq_clock(). * This may result in other CPU reading this CPU's irq time and can |
bf9fae9f5 cputime: Use a pr... |
17 |
* race with irq/vtime_account on this CPU. We would either get old |
73fbec604 sched: Move cputi... |
18 19 20 21 |
* or new value with a side effect of accounting a slice of irq time to wrong * task when irq is in progress while we read rq->clock. That is a worthy * compromise in place of having locks on each irq in account_system_time. */ |
19d23dbfe sched/irqtime: Co... |
22 |
DEFINE_PER_CPU(struct irqtime, cpu_irqtime); |
5a920a650 ANDROID: Sched: E... |
23 |
EXPORT_PER_CPU_SYMBOL_GPL(cpu_irqtime); |
73fbec604 sched: Move cputi... |
24 |
|
73fbec604 sched: Move cputi... |
25 26 27 28 29 30 31 32 33 34 35 |
static int sched_clock_irqtime; void enable_sched_clock_irqtime(void) { sched_clock_irqtime = 1; } void disable_sched_clock_irqtime(void) { sched_clock_irqtime = 0; } |
25e2d8c1b sched/cputime: Fi... |
36 37 38 39 40 41 42 43 44 45 46 |
static void irqtime_account_delta(struct irqtime *irqtime, u64 delta, enum cpu_usage_stat idx) { u64 *cpustat = kcpustat_this_cpu->cpustat; u64_stats_update_begin(&irqtime->sync); cpustat[idx] += delta; irqtime->total += delta; irqtime->tick_delta += delta; u64_stats_update_end(&irqtime->sync); } |
73fbec604 sched: Move cputi... |
47 48 49 50 |
/* * Called before incrementing preempt_count on {soft,}irq_enter * and before decrementing preempt_count on {soft,}irq_exit. */ |
3e1df4f50 cputime: Separate... |
51 |
void irqtime_account_irq(struct task_struct *curr) |
73fbec604 sched: Move cputi... |
52 |
{ |
19d23dbfe sched/irqtime: Co... |
53 |
struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime); |
73fbec604 sched: Move cputi... |
54 55 56 57 58 |
s64 delta; int cpu; if (!sched_clock_irqtime) return; |
73fbec604 sched: Move cputi... |
59 |
cpu = smp_processor_id(); |
19d23dbfe sched/irqtime: Co... |
60 61 |
delta = sched_clock_cpu(cpu) - irqtime->irq_start_time; irqtime->irq_start_time += delta; |
73fbec604 sched: Move cputi... |
62 |
|
73fbec604 sched: Move cputi... |
63 64 65 66 67 68 |
/* * We do not account for softirq time from ksoftirqd here. * We want to continue accounting softirq time to ksoftirqd thread * in that case, so as not to confuse scheduler with a special task * that do not consume any time, but still wants to run. */ |
25e2d8c1b sched/cputime: Fi... |
69 70 71 72 |
if (hardirq_count()) irqtime_account_delta(irqtime, delta, CPUTIME_IRQ); else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ); |
292f43081 ANDROID: Sched: A... |
73 74 |
trace_android_rvh_account_irq(curr, cpu, delta); |
73fbec604 sched: Move cputi... |
75 |
} |
3e1df4f50 cputime: Separate... |
76 |
EXPORT_SYMBOL_GPL(irqtime_account_irq); |
73fbec604 sched: Move cputi... |
77 |
|
2b1f967d8 sched/cputime: Co... |
78 |
static u64 irqtime_tick_accounted(u64 maxtime) |
73fbec604 sched: Move cputi... |
79 |
{ |
a499a5a14 sched/cputime: In... |
80 |
struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime); |
2b1f967d8 sched/cputime: Co... |
81 |
u64 delta; |
73fbec604 sched: Move cputi... |
82 |
|
2b1f967d8 sched/cputime: Co... |
83 84 |
delta = min(irqtime->tick_delta, maxtime); irqtime->tick_delta -= delta; |
2810f611f sched/irqtime: Re... |
85 |
|
a499a5a14 sched/cputime: In... |
86 |
return delta; |
73fbec604 sched: Move cputi... |
87 88 89 90 91 |
} #else /* CONFIG_IRQ_TIME_ACCOUNTING */ #define sched_clock_irqtime (0) |
2b1f967d8 sched/cputime: Co... |
92 |
static u64 irqtime_tick_accounted(u64 dummy) |
574302183 sched/cputime: Co... |
93 94 95 |
{ return 0; } |
73fbec604 sched: Move cputi... |
96 97 98 99 100 |
#endif /* !CONFIG_IRQ_TIME_ACCOUNTING */ static inline void task_group_account_field(struct task_struct *p, int index, u64 tmp) { |
73fbec604 sched: Move cputi... |
101 102 103 104 105 106 |
/* * Since all updates are sure to touch the root cgroup, we * get ourselves ahead and touch it first. If the root cgroup * is the only cgroup, then nothing else should be necessary. * */ |
a4f61cc03 sched/cputime: Us... |
107 |
__this_cpu_add(kernel_cpustat.cpustat[index], tmp); |
73fbec604 sched: Move cputi... |
108 |
|
d2cc5ed69 cpuacct: Introduc... |
109 |
cgroup_account_cputime_field(p, index, tmp); |
73fbec604 sched: Move cputi... |
110 111 112 |
} /* |
97fb7a0a8 sched: Clean up a... |
113 114 115 |
* Account user CPU time to a process. * @p: the process that the CPU time gets accounted to * @cputime: the CPU time spent in user space since the last update |
73fbec604 sched: Move cputi... |
116 |
*/ |
23244a5c8 sched/cputime: Pu... |
117 |
void account_user_time(struct task_struct *p, u64 cputime) |
73fbec604 sched: Move cputi... |
118 119 120 121 |
{ int index; /* Add user time to process. */ |
23244a5c8 sched/cputime: Pu... |
122 123 |
p->utime += cputime; account_group_user_time(p, cputime); |
73fbec604 sched: Move cputi... |
124 |
|
d0ea02680 sched: Implement ... |
125 |
index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; |
73fbec604 sched: Move cputi... |
126 127 |
/* Add user time to cpustat. */ |
23244a5c8 sched/cputime: Pu... |
128 |
task_group_account_field(p, index, cputime); |
73fbec604 sched: Move cputi... |
129 130 |
/* Account for user time used */ |
6fac4829c cputime: Use acce... |
131 |
acct_account_cputime(p); |
8bea10e03 ANDROID: cpufreq:... |
132 133 134 |
/* Account power usage for user time */ cpufreq_acct_update_power(p, cputime); |
73fbec604 sched: Move cputi... |
135 136 137 |
} /* |
97fb7a0a8 sched: Clean up a... |
138 139 140 |
* Account guest CPU time to a process. * @p: the process that the CPU time gets accounted to * @cputime: the CPU time spent in virtual machine since the last update |
73fbec604 sched: Move cputi... |
141 |
*/ |
fb8b049c9 sched/cputime: Pu... |
142 |
void account_guest_time(struct task_struct *p, u64 cputime) |
73fbec604 sched: Move cputi... |
143 144 145 146 |
{ u64 *cpustat = kcpustat_this_cpu->cpustat; /* Add guest time to process. */ |
fb8b049c9 sched/cputime: Pu... |
147 148 149 |
p->utime += cputime; account_group_user_time(p, cputime); p->gtime += cputime; |
73fbec604 sched: Move cputi... |
150 151 |
/* Add guest time to cpustat. */ |
d0ea02680 sched: Implement ... |
152 |
if (task_nice(p) > 0) { |
fb8b049c9 sched/cputime: Pu... |
153 154 |
cpustat[CPUTIME_NICE] += cputime; cpustat[CPUTIME_GUEST_NICE] += cputime; |
73fbec604 sched: Move cputi... |
155 |
} else { |
fb8b049c9 sched/cputime: Pu... |
156 157 |
cpustat[CPUTIME_USER] += cputime; cpustat[CPUTIME_GUEST] += cputime; |
73fbec604 sched: Move cputi... |
158 159 160 161 |
} } /* |
97fb7a0a8 sched: Clean up a... |
162 163 164 |
* Account system CPU time to a process and desired cpustat field * @p: the process that the CPU time gets accounted to * @cputime: the CPU time spent in kernel space since the last update |
40565b5ae sched/cputime, po... |
165 |
* @index: pointer to cpustat field that has to be updated |
73fbec604 sched: Move cputi... |
166 |
*/ |
c31cc6a51 sched/cputime: Al... |
167 |
void account_system_index_time(struct task_struct *p, |
fb8b049c9 sched/cputime: Pu... |
168 |
u64 cputime, enum cpu_usage_stat index) |
73fbec604 sched: Move cputi... |
169 170 |
{ /* Add system time to process. */ |
fb8b049c9 sched/cputime: Pu... |
171 172 |
p->stime += cputime; account_group_system_time(p, cputime); |
73fbec604 sched: Move cputi... |
173 174 |
/* Add system time to cpustat. */ |
fb8b049c9 sched/cputime: Pu... |
175 |
task_group_account_field(p, index, cputime); |
73fbec604 sched: Move cputi... |
176 177 |
/* Account for system time used */ |
6fac4829c cputime: Use acce... |
178 |
acct_account_cputime(p); |
8bea10e03 ANDROID: cpufreq:... |
179 180 181 |
/* Account power usage for system time */ cpufreq_acct_update_power(p, cputime); |
73fbec604 sched: Move cputi... |
182 183 184 |
} /* |
97fb7a0a8 sched: Clean up a... |
185 186 |
* Account system CPU time to a process. * @p: the process that the CPU time gets accounted to |
73fbec604 sched: Move cputi... |
187 |
* @hardirq_offset: the offset to subtract from hardirq_count() |
97fb7a0a8 sched: Clean up a... |
188 |
* @cputime: the CPU time spent in kernel space since the last update |
73fbec604 sched: Move cputi... |
189 |
*/ |
fb8b049c9 sched/cputime: Pu... |
190 |
void account_system_time(struct task_struct *p, int hardirq_offset, u64 cputime) |
73fbec604 sched: Move cputi... |
191 192 193 194 |
{ int index; if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { |
40565b5ae sched/cputime, po... |
195 |
account_guest_time(p, cputime); |
73fbec604 sched: Move cputi... |
196 197 198 199 200 201 202 203 204 |
return; } if (hardirq_count() - hardirq_offset) index = CPUTIME_IRQ; else if (in_serving_softirq()) index = CPUTIME_SOFTIRQ; else index = CPUTIME_SYSTEM; |
c31cc6a51 sched/cputime: Al... |
205 |
account_system_index_time(p, cputime, index); |
73fbec604 sched: Move cputi... |
206 207 208 209 |
} /* * Account for involuntary wait time. |
97fb7a0a8 sched: Clean up a... |
210 |
* @cputime: the CPU time spent in involuntary wait |
73fbec604 sched: Move cputi... |
211 |
*/ |
be9095ed4 sched/cputime: Pu... |
212 |
void account_steal_time(u64 cputime) |
73fbec604 sched: Move cputi... |
213 214 |
{ u64 *cpustat = kcpustat_this_cpu->cpustat; |
be9095ed4 sched/cputime: Pu... |
215 |
cpustat[CPUTIME_STEAL] += cputime; |
73fbec604 sched: Move cputi... |
216 217 218 219 |
} /* * Account for idle time. |
97fb7a0a8 sched: Clean up a... |
220 |
* @cputime: the CPU time spent in idle wait |
73fbec604 sched: Move cputi... |
221 |
*/ |
18b43a9bd sched/cputime: Pu... |
222 |
void account_idle_time(u64 cputime) |
73fbec604 sched: Move cputi... |
223 224 225 226 227 |
{ u64 *cpustat = kcpustat_this_cpu->cpustat; struct rq *rq = this_rq(); if (atomic_read(&rq->nr_iowait) > 0) |
18b43a9bd sched/cputime: Pu... |
228 |
cpustat[CPUTIME_IOWAIT] += cputime; |
73fbec604 sched: Move cputi... |
229 |
else |
18b43a9bd sched/cputime: Pu... |
230 |
cpustat[CPUTIME_IDLE] += cputime; |
73fbec604 sched: Move cputi... |
231 |
} |
03cbc7326 sched/cputime: Re... |
232 233 234 235 236 |
/* * When a guest is interrupted for a longer amount of time, missed clock * ticks are not redelivered later. Due to that, this function may on * occasion account more time than the calling functions think elapsed. */ |
2b1f967d8 sched/cputime: Co... |
237 |
static __always_inline u64 steal_account_process_time(u64 maxtime) |
73fbec604 sched: Move cputi... |
238 239 240 |
{ #ifdef CONFIG_PARAVIRT if (static_key_false(¶virt_steal_enabled)) { |
2b1f967d8 sched/cputime: Co... |
241 |
u64 steal; |
73fbec604 sched: Move cputi... |
242 243 244 |
steal = paravirt_steal_clock(smp_processor_id()); steal -= this_rq()->prev_steal_time; |
2b1f967d8 sched/cputime: Co... |
245 246 247 |
steal = min(steal, maxtime); account_steal_time(steal); this_rq()->prev_steal_time += steal; |
73fbec604 sched: Move cputi... |
248 |
|
2b1f967d8 sched/cputime: Co... |
249 |
return steal; |
73fbec604 sched: Move cputi... |
250 251 |
} #endif |
807e5b806 sched/cputime: Ad... |
252 |
return 0; |
73fbec604 sched: Move cputi... |
253 |
} |
a634f9333 cputime: Move thr... |
254 |
/* |
574302183 sched/cputime: Co... |
255 256 |
* Account how much elapsed time was spent in steal, irq, or softirq time. */ |
2b1f967d8 sched/cputime: Co... |
257 |
static inline u64 account_other_time(u64 max) |
574302183 sched/cputime: Co... |
258 |
{ |
2b1f967d8 sched/cputime: Co... |
259 |
u64 accounted; |
574302183 sched/cputime: Co... |
260 |
|
2c11dba00 sched/clock, sche... |
261 |
lockdep_assert_irqs_disabled(); |
2810f611f sched/irqtime: Re... |
262 |
|
574302183 sched/cputime: Co... |
263 264 265 |
accounted = steal_account_process_time(max); if (accounted < max) |
a499a5a14 sched/cputime: In... |
266 |
accounted += irqtime_tick_accounted(max - accounted); |
574302183 sched/cputime: Co... |
267 268 269 |
return accounted; } |
a1eb1411b sched/cputime: Im... |
270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 |
#ifdef CONFIG_64BIT static inline u64 read_sum_exec_runtime(struct task_struct *t) { return t->se.sum_exec_runtime; } #else static u64 read_sum_exec_runtime(struct task_struct *t) { u64 ns; struct rq_flags rf; struct rq *rq; rq = task_rq_lock(t, &rf); ns = t->se.sum_exec_runtime; task_rq_unlock(rq, t, &rf); return ns; } #endif |
574302183 sched/cputime: Co... |
289 |
/* |
a634f9333 cputime: Move thr... |
290 291 292 293 294 295 |
* Accumulate raw cputime values of dead tasks (sig->[us]time) and live * tasks (sum on group iteration) belonging to @tsk's group. */ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) { struct signal_struct *sig = tsk->signal; |
5613fda9a sched/cputime: Co... |
296 |
u64 utime, stime; |
a634f9333 cputime: Move thr... |
297 |
struct task_struct *t; |
e78c34967 time, signal: Pro... |
298 |
unsigned int seq, nextseq; |
9c368b5b6 sched, time: Fix ... |
299 |
unsigned long flags; |
a634f9333 cputime: Move thr... |
300 |
|
a1eb1411b sched/cputime: Im... |
301 302 303 304 305 306 307 308 309 310 |
/* * Update current task runtime to account pending time since last * scheduler action or thread_group_cputime() call. This thread group * might have other running tasks on different CPUs, but updating * their runtime can affect syscall performance, so we skip account * those pending times and rely only on values updated on tick or * other scheduler action. */ if (same_thread_group(current, tsk)) (void) task_sched_runtime(current); |
a634f9333 cputime: Move thr... |
311 |
rcu_read_lock(); |
e78c34967 time, signal: Pro... |
312 313 314 315 |
/* Attempt a lockless read on the first round. */ nextseq = 0; do { seq = nextseq; |
9c368b5b6 sched, time: Fix ... |
316 |
flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq); |
e78c34967 time, signal: Pro... |
317 318 319 320 321 322 323 324 |
times->utime = sig->utime; times->stime = sig->stime; times->sum_exec_runtime = sig->sum_sched_runtime; for_each_thread(tsk, t) { task_cputime(t, &utime, &stime); times->utime += utime; times->stime += stime; |
a1eb1411b sched/cputime: Im... |
325 |
times->sum_exec_runtime += read_sum_exec_runtime(t); |
e78c34967 time, signal: Pro... |
326 327 328 329 |
} /* If lockless access failed, take the lock. */ nextseq = 1; } while (need_seqretry(&sig->stats_lock, seq)); |
9c368b5b6 sched, time: Fix ... |
330 |
done_seqretry_irqrestore(&sig->stats_lock, seq, flags); |
a634f9333 cputime: Move thr... |
331 332 |
rcu_read_unlock(); } |
73fbec604 sched: Move cputi... |
333 334 335 |
#ifdef CONFIG_IRQ_TIME_ACCOUNTING /* * Account a tick to a process and cpustat |
97fb7a0a8 sched: Clean up a... |
336 |
* @p: the process that the CPU time gets accounted to |
73fbec604 sched: Move cputi... |
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 |
* @user_tick: is the tick from userspace * @rq: the pointer to rq * * Tick demultiplexing follows the order * - pending hardirq update * - pending softirq update * - user_time * - idle_time * - system time * - check for guest_time * - else account as system_time * * Check for hardirq is done both for system and user time as there is * no timer going off while we are on hardirq and hence we may never get an * opportunity to update it solely in system time. * p->stime and friends are only updated on system time and not on irq * softirq as those do not count in task exec_runtime any more. */ static void irqtime_account_process_tick(struct task_struct *p, int user_tick, |
9dec1b694 sched/cputime: mo... |
356 |
int ticks) |
73fbec604 sched: Move cputi... |
357 |
{ |
2b1f967d8 sched/cputime: Co... |
358 |
u64 other, cputime = TICK_NSEC * ticks; |
73fbec604 sched: Move cputi... |
359 |
|
574302183 sched/cputime: Co... |
360 361 362 363 364 365 366 |
/* * When returning from idle, many ticks can get accounted at * once, including some ticks of steal, irq, and softirq time. * Subtract those ticks from the amount of time accounted to * idle, or potentially user or system time. Due to rounding, * other time can exceed ticks occasionally. */ |
03cbc7326 sched/cputime: Re... |
367 |
other = account_other_time(ULONG_MAX); |
2b1f967d8 sched/cputime: Co... |
368 |
if (other >= cputime) |
73fbec604 sched: Move cputi... |
369 |
return; |
23244a5c8 sched/cputime: Pu... |
370 |
|
2b1f967d8 sched/cputime: Co... |
371 |
cputime -= other; |
73fbec604 sched: Move cputi... |
372 |
|
574302183 sched/cputime: Co... |
373 |
if (this_cpu_ksoftirqd() == p) { |
73fbec604 sched: Move cputi... |
374 375 376 377 378 |
/* * ksoftirqd time do not get accounted in cpu_softirq_time. * So, we have to handle it separately here. * Also, p->stime needs to be updated for ksoftirqd. */ |
fb8b049c9 sched/cputime: Pu... |
379 |
account_system_index_time(p, cputime, CPUTIME_SOFTIRQ); |
73fbec604 sched: Move cputi... |
380 |
} else if (user_tick) { |
40565b5ae sched/cputime, po... |
381 |
account_user_time(p, cputime); |
9dec1b694 sched/cputime: mo... |
382 |
} else if (p == this_rq()->idle) { |
18b43a9bd sched/cputime: Pu... |
383 |
account_idle_time(cputime); |
73fbec604 sched: Move cputi... |
384 |
} else if (p->flags & PF_VCPU) { /* System time or guest time */ |
fb8b049c9 sched/cputime: Pu... |
385 |
account_guest_time(p, cputime); |
73fbec604 sched: Move cputi... |
386 |
} else { |
fb8b049c9 sched/cputime: Pu... |
387 |
account_system_index_time(p, cputime, CPUTIME_SYSTEM); |
73fbec604 sched: Move cputi... |
388 389 390 391 392 |
} } static void irqtime_account_idle_ticks(int ticks) { |
9dec1b694 sched/cputime: mo... |
393 |
irqtime_account_process_tick(current, 0, ticks); |
73fbec604 sched: Move cputi... |
394 395 |
} #else /* CONFIG_IRQ_TIME_ACCOUNTING */ |
97fb7a0a8 sched: Clean up a... |
396 |
static inline void irqtime_account_idle_ticks(int ticks) { } |
3f4724ea8 cputime: Allow dy... |
397 |
static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick, |
9dec1b694 sched/cputime: mo... |
398 |
int nr_ticks) { } |
73fbec604 sched: Move cputi... |
399 |
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ |
73fbec604 sched: Move cputi... |
400 401 402 |
/* * Use precise platform statistics if available: */ |
8d495477d sched/cputime: Sp... |
403 |
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
97fb7a0a8 sched: Clean up a... |
404 |
# ifndef __ARCH_HAS_VTIME_TASK_SWITCH |
8d495477d sched/cputime: Sp... |
405 |
void vtime_task_switch(struct task_struct *prev) |
e3942ba04 vtime: Consolidat... |
406 407 408 409 |
{ if (is_idle_task(prev)) vtime_account_idle(prev); else |
f83eeb1a0 sched/cputime: Re... |
410 |
vtime_account_kernel(prev); |
e3942ba04 vtime: Consolidat... |
411 |
|
c8d7dabf8 sched/cputime: Re... |
412 |
vtime_flush(prev); |
e3942ba04 vtime: Consolidat... |
413 414 |
arch_vtime_task_switch(prev); } |
97fb7a0a8 sched: Clean up a... |
415 |
# endif |
0cfdf9a19 sched/cputime: Cl... |
416 |
|
a7e1a9e3a vtime: Consolidat... |
417 418 419 |
/* * Archs that account the whole time spent in the idle task * (outside irq) as idle time can rely on this and just implement |
f83eeb1a0 sched/cputime: Re... |
420 |
* vtime_account_kernel() and vtime_account_idle(). Archs that |
a7e1a9e3a vtime: Consolidat... |
421 422 423 424 425 |
* have other meaning of the idle time (s390 only includes the * time spent by the CPU when it's in low power mode) must override * vtime_account(). */ #ifndef __ARCH_HAS_VTIME_ACCOUNT |
0cfdf9a19 sched/cputime: Cl... |
426 |
void vtime_account_irq_enter(struct task_struct *tsk) |
a7e1a9e3a vtime: Consolidat... |
427 |
{ |
0cfdf9a19 sched/cputime: Cl... |
428 429 430 |
if (!in_interrupt() && is_idle_task(tsk)) vtime_account_idle(tsk); else |
f83eeb1a0 sched/cputime: Re... |
431 |
vtime_account_kernel(tsk); |
a7e1a9e3a vtime: Consolidat... |
432 |
} |
0cfdf9a19 sched/cputime: Cl... |
433 |
EXPORT_SYMBOL_GPL(vtime_account_irq_enter); |
a7e1a9e3a vtime: Consolidat... |
434 |
#endif /* __ARCH_HAS_VTIME_ACCOUNT */ |
9fbc42eac cputime: Dynamica... |
435 |
|
8157a7faf sched/cputime: Ad... |
436 437 438 439 440 441 |
void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, u64 *ut, u64 *st) { *ut = curr->utime; *st = curr->stime; } |
5613fda9a sched/cputime: Co... |
442 |
void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st) |
9fbc42eac cputime: Dynamica... |
443 444 445 446 |
{ *ut = p->utime; *st = p->stime; } |
9eec50b8b kvm/x86: Hyper-V ... |
447 |
EXPORT_SYMBOL_GPL(task_cputime_adjusted); |
a7e1a9e3a vtime: Consolidat... |
448 |
|
5613fda9a sched/cputime: Co... |
449 |
void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st) |
9fbc42eac cputime: Dynamica... |
450 451 |
{ struct task_cputime cputime; |
73fbec604 sched: Move cputi... |
452 |
|
9fbc42eac cputime: Dynamica... |
453 454 455 456 457 |
thread_group_cputime(p, &cputime); *ut = cputime.utime; *st = cputime.stime; } |
bf679b941 ANDROID: GKI: cpu... |
458 |
EXPORT_SYMBOL_GPL(thread_group_cputime_adjusted); |
97fb7a0a8 sched: Clean up a... |
459 460 |
#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE: */ |
9fbc42eac cputime: Dynamica... |
461 |
/* |
97fb7a0a8 sched: Clean up a... |
462 463 |
* Account a single tick of CPU time. * @p: the process that the CPU time gets accounted to |
9fbc42eac cputime: Dynamica... |
464 465 466 |
* @user_tick: indicates if the tick is a user or a system tick */ void account_process_tick(struct task_struct *p, int user_tick) |
73fbec604 sched: Move cputi... |
467 |
{ |
2b1f967d8 sched/cputime: Co... |
468 |
u64 cputime, steal; |
73fbec604 sched: Move cputi... |
469 |
|
e44fcb4b7 sched/vtime: Rena... |
470 |
if (vtime_accounting_enabled_this_cpu()) |
9fbc42eac cputime: Dynamica... |
471 472 473 |
return; if (sched_clock_irqtime) { |
9dec1b694 sched/cputime: mo... |
474 |
irqtime_account_process_tick(p, user_tick, 1); |
9fbc42eac cputime: Dynamica... |
475 476 |
return; } |
2b1f967d8 sched/cputime: Co... |
477 |
cputime = TICK_NSEC; |
03cbc7326 sched/cputime: Re... |
478 |
steal = steal_account_process_time(ULONG_MAX); |
574302183 sched/cputime: Co... |
479 |
|
2b1f967d8 sched/cputime: Co... |
480 |
if (steal >= cputime) |
9fbc42eac cputime: Dynamica... |
481 |
return; |
73fbec604 sched: Move cputi... |
482 |
|
2b1f967d8 sched/cputime: Co... |
483 |
cputime -= steal; |
574302183 sched/cputime: Co... |
484 |
|
9fbc42eac cputime: Dynamica... |
485 |
if (user_tick) |
40565b5ae sched/cputime, po... |
486 |
account_user_time(p, cputime); |
9dec1b694 sched/cputime: mo... |
487 |
else if ((p != this_rq()->idle) || (irq_count() != HARDIRQ_OFFSET)) |
fb8b049c9 sched/cputime: Pu... |
488 |
account_system_time(p, HARDIRQ_OFFSET, cputime); |
73fbec604 sched: Move cputi... |
489 |
else |
18b43a9bd sched/cputime: Pu... |
490 |
account_idle_time(cputime); |
9fbc42eac cputime: Dynamica... |
491 |
} |
73fbec604 sched: Move cputi... |
492 |
|
9fbc42eac cputime: Dynamica... |
493 |
/* |
9fbc42eac cputime: Dynamica... |
494 495 496 497 498 |
* Account multiple ticks of idle time. * @ticks: number of stolen ticks */ void account_idle_ticks(unsigned long ticks) { |
18b43a9bd sched/cputime: Pu... |
499 |
u64 cputime, steal; |
26f2c75cd sched/cputime: Fi... |
500 |
|
9fbc42eac cputime: Dynamica... |
501 502 503 504 |
if (sched_clock_irqtime) { irqtime_account_idle_ticks(ticks); return; } |
18b43a9bd sched/cputime: Pu... |
505 |
cputime = ticks * TICK_NSEC; |
2b1f967d8 sched/cputime: Co... |
506 |
steal = steal_account_process_time(ULONG_MAX); |
f9bcf1e0e sched/cputime: Fi... |
507 508 509 510 511 512 |
if (steal >= cputime) return; cputime -= steal; account_idle_time(cputime); |
9fbc42eac cputime: Dynamica... |
513 |
} |
73fbec604 sched: Move cputi... |
514 |
|
d9a3c9823 sched: Lower chan... |
515 |
/* |
9d7fb0427 sched/cputime: Gu... |
516 517 |
* Adjust tick based cputime random precision against scheduler runtime * accounting. |
347abad98 sched, time: Fix ... |
518 |
* |
9d7fb0427 sched/cputime: Gu... |
519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 |
* Tick based cputime accounting depend on random scheduling timeslices of a * task to be interrupted or not by the timer. Depending on these * circumstances, the number of these interrupts may be over or * under-optimistic, matching the real user and system cputime with a variable * precision. * * Fix this by scaling these tick based values against the total runtime * accounted by the CFS scheduler. * * This code provides the following guarantees: * * stime + utime == rtime * stime_i+1 >= stime_i, utime_i+1 >= utime_i * * Assuming that rtime_i+1 >= rtime_i. |
fa0920578 cputime: Comment ... |
534 |
*/ |
cfb766da5 sched/cputime: Ex... |
535 536 |
void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, u64 *ut, u64 *st) |
73fbec604 sched: Move cputi... |
537 |
{ |
5613fda9a sched/cputime: Co... |
538 |
u64 rtime, stime, utime; |
9d7fb0427 sched/cputime: Gu... |
539 |
unsigned long flags; |
fa0920578 cputime: Comment ... |
540 |
|
9d7fb0427 sched/cputime: Gu... |
541 542 |
/* Serialize concurrent callers such that we can honour our guarantees */ raw_spin_lock_irqsave(&prev->lock, flags); |
5613fda9a sched/cputime: Co... |
543 |
rtime = curr->sum_exec_runtime; |
73fbec604 sched: Move cputi... |
544 |
|
772c808a2 sched: Do not acc... |
545 |
/* |
9d7fb0427 sched/cputime: Gu... |
546 547 548 549 550 551 |
* This is possible under two circumstances: * - rtime isn't monotonic after all (a bug); * - we got reordered by the lock. * * In both cases this acts as a filter such that the rest of the code * can assume it is monotonic regardless of anything else. |
772c808a2 sched: Do not acc... |
552 553 554 |
*/ if (prev->stime + prev->utime >= rtime) goto out; |
5a8e01f8f sched/cputime: Do... |
555 556 |
stime = curr->stime; utime = curr->utime; |
173be9a14 sched/cputime: Fi... |
557 |
/* |
3b9c08ae3 Revert "sched/cpu... |
558 559 560 |
* If either stime or utime are 0, assume all runtime is userspace. * Once a task gets some ticks, the monotonicy code at 'update:' * will ensure things converge to the observed ratio. |
173be9a14 sched/cputime: Fi... |
561 |
*/ |
3b9c08ae3 Revert "sched/cpu... |
562 563 564 |
if (stime == 0) { utime = rtime; goto update; |
9d7fb0427 sched/cputime: Gu... |
565 |
} |
5a8e01f8f sched/cputime: Do... |
566 |
|
3b9c08ae3 Revert "sched/cpu... |
567 568 569 570 |
if (utime == 0) { stime = rtime; goto update; } |
3dc167ba5 sched/cputime: Im... |
571 |
stime = mul_u64_u64_div_u64(stime, rtime, stime + utime); |
3b9c08ae3 Revert "sched/cpu... |
572 573 |
update: |
9d7fb0427 sched/cputime: Gu... |
574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 |
/* * Make sure stime doesn't go backwards; this preserves monotonicity * for utime because rtime is monotonic. * * utime_i+1 = rtime_i+1 - stime_i * = rtime_i+1 - (rtime_i - utime_i) * = (rtime_i+1 - rtime_i) + utime_i * >= utime_i */ if (stime < prev->stime) stime = prev->stime; utime = rtime - stime; /* * Make sure utime doesn't go backwards; this still preserves * monotonicity for stime, analogous argument to above. */ if (utime < prev->utime) { utime = prev->utime; stime = rtime - utime; } |
d37f761db cputime: Consolid... |
595 |
|
9d7fb0427 sched/cputime: Gu... |
596 597 |
prev->stime = stime; prev->utime = utime; |
772c808a2 sched: Do not acc... |
598 |
out: |
d37f761db cputime: Consolid... |
599 600 |
*ut = prev->utime; *st = prev->stime; |
9d7fb0427 sched/cputime: Gu... |
601 |
raw_spin_unlock_irqrestore(&prev->lock, flags); |
d37f761db cputime: Consolid... |
602 |
} |
73fbec604 sched: Move cputi... |
603 |
|
5613fda9a sched/cputime: Co... |
604 |
void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st) |
d37f761db cputime: Consolid... |
605 606 |
{ struct task_cputime cputime = { |
d37f761db cputime: Consolid... |
607 608 |
.sum_exec_runtime = p->se.sum_exec_runtime, }; |
6fac4829c cputime: Use acce... |
609 |
task_cputime(p, &cputime.utime, &cputime.stime); |
d37f761db cputime: Consolid... |
610 |
cputime_adjust(&cputime, &p->prev_cputime, ut, st); |
73fbec604 sched: Move cputi... |
611 |
} |
9eec50b8b kvm/x86: Hyper-V ... |
612 |
EXPORT_SYMBOL_GPL(task_cputime_adjusted); |
73fbec604 sched: Move cputi... |
613 |
|
5613fda9a sched/cputime: Co... |
614 |
void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st) |
73fbec604 sched: Move cputi... |
615 |
{ |
73fbec604 sched: Move cputi... |
616 |
struct task_cputime cputime; |
73fbec604 sched: Move cputi... |
617 618 |
thread_group_cputime(p, &cputime); |
d37f761db cputime: Consolid... |
619 |
cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st); |
73fbec604 sched: Move cputi... |
620 |
} |
bf679b941 ANDROID: GKI: cpu... |
621 |
EXPORT_SYMBOL_GPL(thread_group_cputime_adjusted); |
9fbc42eac cputime: Dynamica... |
622 |
#endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ |
abf917cd9 cputime: Generic ... |
623 624 |
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN |
bac5b6b6b sched/cputime: Mo... |
625 |
static u64 vtime_delta(struct vtime *vtime) |
6a61671bb cputime: Safely r... |
626 |
{ |
2a42eb959 sched/cputime: Ac... |
627 |
unsigned long long clock; |
6a61671bb cputime: Safely r... |
628 |
|
0e4097c33 sched/cputime: Do... |
629 |
clock = sched_clock(); |
2a42eb959 sched/cputime: Ac... |
630 |
if (clock < vtime->starttime) |
6a61671bb cputime: Safely r... |
631 |
return 0; |
abf917cd9 cputime: Generic ... |
632 |
|
2a42eb959 sched/cputime: Ac... |
633 |
return clock - vtime->starttime; |
6a61671bb cputime: Safely r... |
634 |
} |
bac5b6b6b sched/cputime: Mo... |
635 |
static u64 get_vtime_delta(struct vtime *vtime) |
abf917cd9 cputime: Generic ... |
636 |
{ |
2a42eb959 sched/cputime: Ac... |
637 638 |
u64 delta = vtime_delta(vtime); u64 other; |
abf917cd9 cputime: Generic ... |
639 |
|
03cbc7326 sched/cputime: Re... |
640 641 642 643 644 645 646 |
/* * Unlike tick based timing, vtime based timing never has lost * ticks, and no need for steal time accounting to make up for * lost ticks. Vtime accounts a rounded version of actual * elapsed time. Limit account_other_time to prevent rounding * errors from causing elapsed vtime to go negative. */ |
b58c35840 sched/cputime: Re... |
647 |
other = account_other_time(delta); |
bac5b6b6b sched/cputime: Mo... |
648 |
WARN_ON_ONCE(vtime->state == VTIME_INACTIVE); |
2a42eb959 sched/cputime: Ac... |
649 |
vtime->starttime += delta; |
abf917cd9 cputime: Generic ... |
650 |
|
b58c35840 sched/cputime: Re... |
651 |
return delta - other; |
abf917cd9 cputime: Generic ... |
652 |
} |
f83eeb1a0 sched/cputime: Re... |
653 654 |
static void vtime_account_system(struct task_struct *tsk, struct vtime *vtime) |
6a61671bb cputime: Safely r... |
655 |
{ |
2a42eb959 sched/cputime: Ac... |
656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 |
vtime->stime += get_vtime_delta(vtime); if (vtime->stime >= TICK_NSEC) { account_system_time(tsk, irq_count(), vtime->stime); vtime->stime = 0; } } static void vtime_account_guest(struct task_struct *tsk, struct vtime *vtime) { vtime->gtime += get_vtime_delta(vtime); if (vtime->gtime >= TICK_NSEC) { account_guest_time(tsk, vtime->gtime); vtime->gtime = 0; } |
6a61671bb cputime: Safely r... |
671 |
} |
8d495477d sched/cputime: Sp... |
672 673 674 675 |
static void __vtime_account_kernel(struct task_struct *tsk, struct vtime *vtime) { /* We might have scheduled out from guest path */ |
e6d5bf3e3 sched/cputime: Ad... |
676 |
if (vtime->state == VTIME_GUEST) |
8d495477d sched/cputime: Sp... |
677 678 679 680 |
vtime_account_guest(tsk, vtime); else vtime_account_system(tsk, vtime); } |
f83eeb1a0 sched/cputime: Re... |
681 |
void vtime_account_kernel(struct task_struct *tsk) |
abf917cd9 cputime: Generic ... |
682 |
{ |
bac5b6b6b sched/cputime: Mo... |
683 684 685 |
struct vtime *vtime = &tsk->vtime; if (!vtime_delta(vtime)) |
ff9a9b4c4 sched, time: Swit... |
686 |
return; |
bac5b6b6b sched/cputime: Mo... |
687 |
write_seqcount_begin(&vtime->seqcount); |
8d495477d sched/cputime: Sp... |
688 |
__vtime_account_kernel(tsk, vtime); |
bac5b6b6b sched/cputime: Mo... |
689 |
write_seqcount_end(&vtime->seqcount); |
6a61671bb cputime: Safely r... |
690 |
} |
3f4724ea8 cputime: Allow dy... |
691 |
|
1c3eda01a vtime, sched/cput... |
692 |
void vtime_user_enter(struct task_struct *tsk) |
abf917cd9 cputime: Generic ... |
693 |
{ |
bac5b6b6b sched/cputime: Mo... |
694 695 696 |
struct vtime *vtime = &tsk->vtime; write_seqcount_begin(&vtime->seqcount); |
f83eeb1a0 sched/cputime: Re... |
697 |
vtime_account_system(tsk, vtime); |
bac5b6b6b sched/cputime: Mo... |
698 699 |
vtime->state = VTIME_USER; write_seqcount_end(&vtime->seqcount); |
6a61671bb cputime: Safely r... |
700 |
} |
1c3eda01a vtime, sched/cput... |
701 |
void vtime_user_exit(struct task_struct *tsk) |
6a61671bb cputime: Safely r... |
702 |
{ |
bac5b6b6b sched/cputime: Mo... |
703 704 705 |
struct vtime *vtime = &tsk->vtime; write_seqcount_begin(&vtime->seqcount); |
2a42eb959 sched/cputime: Ac... |
706 707 708 709 710 |
vtime->utime += get_vtime_delta(vtime); if (vtime->utime >= TICK_NSEC) { account_user_time(tsk, vtime->utime); vtime->utime = 0; } |
bac5b6b6b sched/cputime: Mo... |
711 712 |
vtime->state = VTIME_SYS; write_seqcount_end(&vtime->seqcount); |
6a61671bb cputime: Safely r... |
713 714 715 716 |
} void vtime_guest_enter(struct task_struct *tsk) { |
bac5b6b6b sched/cputime: Mo... |
717 |
struct vtime *vtime = &tsk->vtime; |
5b206d48e vtime: Update a f... |
718 719 |
/* * The flags must be updated under the lock with |
60a9ce57e sched/cputime: Re... |
720 |
* the vtime_starttime flush and update. |
5b206d48e vtime: Update a f... |
721 722 723 724 |
* That enforces a right ordering and update sequence * synchronization against the reader (task_gtime()) * that can thus safely catch up with a tickless delta. */ |
bac5b6b6b sched/cputime: Mo... |
725 |
write_seqcount_begin(&vtime->seqcount); |
f83eeb1a0 sched/cputime: Re... |
726 |
vtime_account_system(tsk, vtime); |
68e7a4d66 sched/vtime: Fix ... |
727 |
tsk->flags |= PF_VCPU; |
e6d5bf3e3 sched/cputime: Ad... |
728 |
vtime->state = VTIME_GUEST; |
bac5b6b6b sched/cputime: Mo... |
729 |
write_seqcount_end(&vtime->seqcount); |
6a61671bb cputime: Safely r... |
730 |
} |
48d6a816a context_tracking:... |
731 |
EXPORT_SYMBOL_GPL(vtime_guest_enter); |
6a61671bb cputime: Safely r... |
732 733 734 |
void vtime_guest_exit(struct task_struct *tsk) { |
bac5b6b6b sched/cputime: Mo... |
735 736 737 |
struct vtime *vtime = &tsk->vtime; write_seqcount_begin(&vtime->seqcount); |
2a42eb959 sched/cputime: Ac... |
738 |
vtime_account_guest(tsk, vtime); |
68e7a4d66 sched/vtime: Fix ... |
739 |
tsk->flags &= ~PF_VCPU; |
e6d5bf3e3 sched/cputime: Ad... |
740 |
vtime->state = VTIME_SYS; |
bac5b6b6b sched/cputime: Mo... |
741 |
write_seqcount_end(&vtime->seqcount); |
abf917cd9 cputime: Generic ... |
742 |
} |
48d6a816a context_tracking:... |
743 |
EXPORT_SYMBOL_GPL(vtime_guest_exit); |
abf917cd9 cputime: Generic ... |
744 745 746 |
void vtime_account_idle(struct task_struct *tsk) { |
bac5b6b6b sched/cputime: Mo... |
747 |
account_idle_time(get_vtime_delta(&tsk->vtime)); |
abf917cd9 cputime: Generic ... |
748 |
} |
3f4724ea8 cputime: Allow dy... |
749 |
|
8d495477d sched/cputime: Sp... |
750 |
void vtime_task_switch_generic(struct task_struct *prev) |
6a61671bb cputime: Safely r... |
751 |
{ |
bac5b6b6b sched/cputime: Mo... |
752 |
struct vtime *vtime = &prev->vtime; |
6a61671bb cputime: Safely r... |
753 |
|
bac5b6b6b sched/cputime: Mo... |
754 |
write_seqcount_begin(&vtime->seqcount); |
14faf6fca sched/cputime: Ad... |
755 |
if (vtime->state == VTIME_IDLE) |
8d495477d sched/cputime: Sp... |
756 757 758 |
vtime_account_idle(prev); else __vtime_account_kernel(prev, vtime); |
bac5b6b6b sched/cputime: Mo... |
759 |
vtime->state = VTIME_INACTIVE; |
802f4a827 sched/vtime: Reco... |
760 |
vtime->cpu = -1; |
bac5b6b6b sched/cputime: Mo... |
761 762 763 764 765 |
write_seqcount_end(&vtime->seqcount); vtime = ¤t->vtime; write_seqcount_begin(&vtime->seqcount); |
14faf6fca sched/cputime: Ad... |
766 767 |
if (is_idle_task(current)) vtime->state = VTIME_IDLE; |
e6d5bf3e3 sched/cputime: Ad... |
768 769 |
else if (current->flags & PF_VCPU) vtime->state = VTIME_GUEST; |
14faf6fca sched/cputime: Ad... |
770 771 |
else vtime->state = VTIME_SYS; |
0e4097c33 sched/cputime: Do... |
772 |
vtime->starttime = sched_clock(); |
802f4a827 sched/vtime: Reco... |
773 |
vtime->cpu = smp_processor_id(); |
bac5b6b6b sched/cputime: Mo... |
774 |
write_seqcount_end(&vtime->seqcount); |
6a61671bb cputime: Safely r... |
775 |
} |
45eacc692 vtime: Use consis... |
776 |
void vtime_init_idle(struct task_struct *t, int cpu) |
6a61671bb cputime: Safely r... |
777 |
{ |
bac5b6b6b sched/cputime: Mo... |
778 |
struct vtime *vtime = &t->vtime; |
6a61671bb cputime: Safely r... |
779 |
unsigned long flags; |
b7ce2277f sched/cputime: Co... |
780 |
local_irq_save(flags); |
bac5b6b6b sched/cputime: Mo... |
781 |
write_seqcount_begin(&vtime->seqcount); |
14faf6fca sched/cputime: Ad... |
782 |
vtime->state = VTIME_IDLE; |
0e4097c33 sched/cputime: Do... |
783 |
vtime->starttime = sched_clock(); |
802f4a827 sched/vtime: Reco... |
784 |
vtime->cpu = cpu; |
bac5b6b6b sched/cputime: Mo... |
785 |
write_seqcount_end(&vtime->seqcount); |
b7ce2277f sched/cputime: Co... |
786 |
local_irq_restore(flags); |
6a61671bb cputime: Safely r... |
787 |
} |
16a6d9be9 sched/cputime: Co... |
788 |
u64 task_gtime(struct task_struct *t) |
6a61671bb cputime: Safely r... |
789 |
{ |
bac5b6b6b sched/cputime: Mo... |
790 |
struct vtime *vtime = &t->vtime; |
6a61671bb cputime: Safely r... |
791 |
unsigned int seq; |
16a6d9be9 sched/cputime: Co... |
792 |
u64 gtime; |
6a61671bb cputime: Safely r... |
793 |
|
e59253946 sched/cputime: In... |
794 |
if (!vtime_accounting_enabled()) |
2541117b0 sched/cputime: Fi... |
795 |
return t->gtime; |
6a61671bb cputime: Safely r... |
796 |
do { |
bac5b6b6b sched/cputime: Mo... |
797 |
seq = read_seqcount_begin(&vtime->seqcount); |
6a61671bb cputime: Safely r... |
798 799 |
gtime = t->gtime; |
e6d5bf3e3 sched/cputime: Ad... |
800 |
if (vtime->state == VTIME_GUEST) |
2a42eb959 sched/cputime: Ac... |
801 |
gtime += vtime->gtime + vtime_delta(vtime); |
6a61671bb cputime: Safely r... |
802 |
|
bac5b6b6b sched/cputime: Mo... |
803 |
} while (read_seqcount_retry(&vtime->seqcount, seq)); |
6a61671bb cputime: Safely r... |
804 805 806 807 808 809 810 811 812 |
return gtime; } /* * Fetch cputime raw values from fields of task_struct and * add up the pending nohz execution time since the last * cputime snapshot. */ |
5613fda9a sched/cputime: Co... |
813 |
void task_cputime(struct task_struct *t, u64 *utime, u64 *stime) |
6a61671bb cputime: Safely r... |
814 |
{ |
bac5b6b6b sched/cputime: Mo... |
815 |
struct vtime *vtime = &t->vtime; |
6a61671bb cputime: Safely r... |
816 |
unsigned int seq; |
bac5b6b6b sched/cputime: Mo... |
817 |
u64 delta; |
6a61671bb cputime: Safely r... |
818 |
|
353c50ebe sched/cputime: Si... |
819 820 821 822 823 |
if (!vtime_accounting_enabled()) { *utime = t->utime; *stime = t->stime; return; } |
6a61671bb cputime: Safely r... |
824 |
|
353c50ebe sched/cputime: Si... |
825 |
do { |
bac5b6b6b sched/cputime: Mo... |
826 |
seq = read_seqcount_begin(&vtime->seqcount); |
6a61671bb cputime: Safely r... |
827 |
|
353c50ebe sched/cputime: Si... |
828 829 |
*utime = t->utime; *stime = t->stime; |
6a61671bb cputime: Safely r... |
830 |
|
14faf6fca sched/cputime: Ad... |
831 832 |
/* Task is sleeping or idle, nothing to add */ if (vtime->state < VTIME_SYS) |
6a61671bb cputime: Safely r... |
833 |
continue; |
bac5b6b6b sched/cputime: Mo... |
834 |
delta = vtime_delta(vtime); |
6a61671bb cputime: Safely r... |
835 836 |
/* |
e6d5bf3e3 sched/cputime: Ad... |
837 838 |
* Task runs either in user (including guest) or kernel space, * add pending nohz time to the right place. |
6a61671bb cputime: Safely r... |
839 |
*/ |
e6d5bf3e3 sched/cputime: Ad... |
840 |
if (vtime->state == VTIME_SYS) |
2a42eb959 sched/cputime: Ac... |
841 |
*stime += vtime->stime + delta; |
e6d5bf3e3 sched/cputime: Ad... |
842 843 |
else *utime += vtime->utime + delta; |
bac5b6b6b sched/cputime: Mo... |
844 |
} while (read_seqcount_retry(&vtime->seqcount, seq)); |
6a61671bb cputime: Safely r... |
845 |
} |
64eea63c1 sched/kcpustat: I... |
846 |
|
f1dfdab69 sched/vtime: Prev... |
847 |
static int vtime_state_fetch(struct vtime *vtime, int cpu) |
74722bb22 sched/vtime: Brin... |
848 |
{ |
f1dfdab69 sched/vtime: Prev... |
849 |
int state = READ_ONCE(vtime->state); |
74722bb22 sched/vtime: Brin... |
850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 |
/* * We raced against a context switch, fetch the * kcpustat task again. */ if (vtime->cpu != cpu && vtime->cpu != -1) return -EAGAIN; /* * Two possible things here: * 1) We are seeing the scheduling out task (prev) or any past one. * 2) We are seeing the scheduling in task (next) but it hasn't * passed though vtime_task_switch() yet so the pending * cputime of the prev task may not be flushed yet. * * Case 1) is ok but 2) is not. So wait for a safe VTIME state. */ |
f1dfdab69 sched/vtime: Prev... |
866 |
if (state == VTIME_INACTIVE) |
74722bb22 sched/vtime: Brin... |
867 |
return -EAGAIN; |
f1dfdab69 sched/vtime: Prev... |
868 |
return state; |
74722bb22 sched/vtime: Brin... |
869 |
} |
5a1c95580 sched/cputime: Su... |
870 871 872 873 874 875 876 877 |
static u64 kcpustat_user_vtime(struct vtime *vtime) { if (vtime->state == VTIME_USER) return vtime->utime + vtime_delta(vtime); else if (vtime->state == VTIME_GUEST) return vtime->gtime + vtime_delta(vtime); return 0; } |
64eea63c1 sched/kcpustat: I... |
878 |
static int kcpustat_field_vtime(u64 *cpustat, |
5a1c95580 sched/cputime: Su... |
879 |
struct task_struct *tsk, |
64eea63c1 sched/kcpustat: I... |
880 881 882 |
enum cpu_usage_stat usage, int cpu, u64 *val) { |
5a1c95580 sched/cputime: Su... |
883 |
struct vtime *vtime = &tsk->vtime; |
64eea63c1 sched/kcpustat: I... |
884 |
unsigned int seq; |
64eea63c1 sched/kcpustat: I... |
885 886 |
do { |
f1dfdab69 sched/vtime: Prev... |
887 |
int state; |
64eea63c1 sched/kcpustat: I... |
888 |
seq = read_seqcount_begin(&vtime->seqcount); |
f1dfdab69 sched/vtime: Prev... |
889 890 891 |
state = vtime_state_fetch(vtime, cpu); if (state < 0) return state; |
64eea63c1 sched/kcpustat: I... |
892 893 |
*val = cpustat[usage]; |
5a1c95580 sched/cputime: Su... |
894 895 896 897 898 899 900 901 902 |
/* * Nice VS unnice cputime accounting may be inaccurate if * the nice value has changed since the last vtime update. * But proper fix would involve interrupting target on nice * updates which is a no go on nohz_full (although the scheduler * may still interrupt the target if rescheduling is needed...) */ switch (usage) { case CPUTIME_SYSTEM: |
f1dfdab69 sched/vtime: Prev... |
903 |
if (state == VTIME_SYS) |
5a1c95580 sched/cputime: Su... |
904 905 906 907 908 909 910 911 912 913 914 |
*val += vtime->stime + vtime_delta(vtime); break; case CPUTIME_USER: if (task_nice(tsk) <= 0) *val += kcpustat_user_vtime(vtime); break; case CPUTIME_NICE: if (task_nice(tsk) > 0) *val += kcpustat_user_vtime(vtime); break; case CPUTIME_GUEST: |
f1dfdab69 sched/vtime: Prev... |
915 |
if (state == VTIME_GUEST && task_nice(tsk) <= 0) |
5a1c95580 sched/cputime: Su... |
916 917 918 |
*val += vtime->gtime + vtime_delta(vtime); break; case CPUTIME_GUEST_NICE: |
f1dfdab69 sched/vtime: Prev... |
919 |
if (state == VTIME_GUEST && task_nice(tsk) > 0) |
5a1c95580 sched/cputime: Su... |
920 921 922 923 924 |
*val += vtime->gtime + vtime_delta(vtime); break; default: break; } |
64eea63c1 sched/kcpustat: I... |
925 926 927 928 929 930 931 932 933 |
} while (read_seqcount_retry(&vtime->seqcount, seq)); return 0; } u64 kcpustat_field(struct kernel_cpustat *kcpustat, enum cpu_usage_stat usage, int cpu) { u64 *cpustat = kcpustat->cpustat; |
e0d648f9d sched/vtime: Work... |
934 |
u64 val = cpustat[usage]; |
64eea63c1 sched/kcpustat: I... |
935 |
struct rq *rq; |
64eea63c1 sched/kcpustat: I... |
936 937 938 |
int err; if (!vtime_accounting_enabled_cpu(cpu)) |
e0d648f9d sched/vtime: Work... |
939 |
return val; |
64eea63c1 sched/kcpustat: I... |
940 |
|
64eea63c1 sched/kcpustat: I... |
941 942 943 944 |
rq = cpu_rq(cpu); for (;;) { struct task_struct *curr; |
64eea63c1 sched/kcpustat: I... |
945 946 947 948 949 950 951 |
rcu_read_lock(); curr = rcu_dereference(rq->curr); if (WARN_ON_ONCE(!curr)) { rcu_read_unlock(); return cpustat[usage]; } |
5a1c95580 sched/cputime: Su... |
952 |
err = kcpustat_field_vtime(cpustat, curr, usage, cpu, &val); |
64eea63c1 sched/kcpustat: I... |
953 954 955 956 957 958 959 960 961 |
rcu_read_unlock(); if (!err) return val; cpu_relax(); } } EXPORT_SYMBOL_GPL(kcpustat_field); |
74722bb22 sched/vtime: Brin... |
962 963 964 965 966 967 968 |
static int kcpustat_cpu_fetch_vtime(struct kernel_cpustat *dst, const struct kernel_cpustat *src, struct task_struct *tsk, int cpu) { struct vtime *vtime = &tsk->vtime; unsigned int seq; |
74722bb22 sched/vtime: Brin... |
969 970 971 972 |
do { u64 *cpustat; u64 delta; |
f1dfdab69 sched/vtime: Prev... |
973 |
int state; |
74722bb22 sched/vtime: Brin... |
974 975 |
seq = read_seqcount_begin(&vtime->seqcount); |
f1dfdab69 sched/vtime: Prev... |
976 977 978 |
state = vtime_state_fetch(vtime, cpu); if (state < 0) return state; |
74722bb22 sched/vtime: Brin... |
979 980 981 982 983 |
*dst = *src; cpustat = dst->cpustat; /* Task is sleeping, dead or idle, nothing to add */ |
f1dfdab69 sched/vtime: Prev... |
984 |
if (state < VTIME_SYS) |
74722bb22 sched/vtime: Brin... |
985 986 987 988 989 990 991 992 |
continue; delta = vtime_delta(vtime); /* * Task runs either in user (including guest) or kernel space, * add pending nohz time to the right place. */ |
f1dfdab69 sched/vtime: Prev... |
993 |
if (state == VTIME_SYS) { |
74722bb22 sched/vtime: Brin... |
994 |
cpustat[CPUTIME_SYSTEM] += vtime->stime + delta; |
f1dfdab69 sched/vtime: Prev... |
995 |
} else if (state == VTIME_USER) { |
74722bb22 sched/vtime: Brin... |
996 997 998 999 1000 |
if (task_nice(tsk) > 0) cpustat[CPUTIME_NICE] += vtime->utime + delta; else cpustat[CPUTIME_USER] += vtime->utime + delta; } else { |
f1dfdab69 sched/vtime: Prev... |
1001 |
WARN_ON_ONCE(state != VTIME_GUEST); |
74722bb22 sched/vtime: Brin... |
1002 1003 1004 1005 1006 1007 1008 1009 |
if (task_nice(tsk) > 0) { cpustat[CPUTIME_GUEST_NICE] += vtime->gtime + delta; cpustat[CPUTIME_NICE] += vtime->gtime + delta; } else { cpustat[CPUTIME_GUEST] += vtime->gtime + delta; cpustat[CPUTIME_USER] += vtime->gtime + delta; } } |
bac5b6b6b sched/cputime: Mo... |
1010 |
} while (read_seqcount_retry(&vtime->seqcount, seq)); |
74722bb22 sched/vtime: Brin... |
1011 |
|
f1dfdab69 sched/vtime: Prev... |
1012 |
return 0; |
74722bb22 sched/vtime: Brin... |
1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 |
} void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu) { const struct kernel_cpustat *src = &kcpustat_cpu(cpu); struct rq *rq; int err; if (!vtime_accounting_enabled_cpu(cpu)) { *dst = *src; return; } rq = cpu_rq(cpu); for (;;) { struct task_struct *curr; rcu_read_lock(); curr = rcu_dereference(rq->curr); if (WARN_ON_ONCE(!curr)) { rcu_read_unlock(); *dst = *src; return; } err = kcpustat_cpu_fetch_vtime(dst, src, curr, cpu); rcu_read_unlock(); if (!err) return; cpu_relax(); } |
6a61671bb cputime: Safely r... |
1047 |
} |
74722bb22 sched/vtime: Brin... |
1048 |
EXPORT_SYMBOL_GPL(kcpustat_cpu_fetch); |
abf917cd9 cputime: Generic ... |
1049 |
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */ |