Blame view
kernel/sched/cputime.c
21.3 KB
73fbec604 sched: Move cputi... |
1 2 3 4 5 |
#include <linux/export.h> #include <linux/sched.h> #include <linux/tsacct_kern.h> #include <linux/kernel_stat.h> #include <linux/static_key.h> |
abf917cd9 cputime: Generic ... |
6 |
#include <linux/context_tracking.h> |
73fbec604 sched: Move cputi... |
7 8 9 10 11 12 13 |
#include "sched.h" #ifdef CONFIG_IRQ_TIME_ACCOUNTING /* * There are no locks covering percpu hardirq/softirq time. |
bf9fae9f5 cputime: Use a pr... |
14 |
* They are only modified in vtime_account, on corresponding CPU |
73fbec604 sched: Move cputi... |
15 16 17 |
* with interrupts disabled. So, writes are safe. * They are read and saved off onto struct rq in update_rq_clock(). * This may result in other CPU reading this CPU's irq time and can |
bf9fae9f5 cputime: Use a pr... |
18 |
* race with irq/vtime_account on this CPU. We would either get old |
73fbec604 sched: Move cputi... |
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
* or new value with a side effect of accounting a slice of irq time to wrong * task when irq is in progress while we read rq->clock. That is a worthy * compromise in place of having locks on each irq in account_system_time. */ DEFINE_PER_CPU(u64, cpu_hardirq_time); DEFINE_PER_CPU(u64, cpu_softirq_time); static DEFINE_PER_CPU(u64, irq_start_time); static int sched_clock_irqtime; void enable_sched_clock_irqtime(void) { sched_clock_irqtime = 1; } void disable_sched_clock_irqtime(void) { sched_clock_irqtime = 0; } #ifndef CONFIG_64BIT DEFINE_PER_CPU(seqcount_t, irq_time_seq); #endif /* CONFIG_64BIT */ /* * Called before incrementing preempt_count on {soft,}irq_enter * and before decrementing preempt_count on {soft,}irq_exit. */ |
3e1df4f50 cputime: Separate... |
47 |
void irqtime_account_irq(struct task_struct *curr) |
73fbec604 sched: Move cputi... |
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
{ unsigned long flags; s64 delta; int cpu; if (!sched_clock_irqtime) return; local_irq_save(flags); cpu = smp_processor_id(); delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time); __this_cpu_add(irq_start_time, delta); irq_time_write_begin(); /* * We do not account for softirq time from ksoftirqd here. * We want to continue accounting softirq time to ksoftirqd thread * in that case, so as not to confuse scheduler with a special task * that do not consume any time, but still wants to run. */ if (hardirq_count()) __this_cpu_add(cpu_hardirq_time, delta); else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) __this_cpu_add(cpu_softirq_time, delta); irq_time_write_end(); local_irq_restore(flags); } |
3e1df4f50 cputime: Separate... |
77 |
EXPORT_SYMBOL_GPL(irqtime_account_irq); |
73fbec604 sched: Move cputi... |
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 |
static int irqtime_account_hi_update(void) { u64 *cpustat = kcpustat_this_cpu->cpustat; unsigned long flags; u64 latest_ns; int ret = 0; local_irq_save(flags); latest_ns = this_cpu_read(cpu_hardirq_time); if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_IRQ]) ret = 1; local_irq_restore(flags); return ret; } static int irqtime_account_si_update(void) { u64 *cpustat = kcpustat_this_cpu->cpustat; unsigned long flags; u64 latest_ns; int ret = 0; local_irq_save(flags); latest_ns = this_cpu_read(cpu_softirq_time); if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_SOFTIRQ]) ret = 1; local_irq_restore(flags); return ret; } #else /* CONFIG_IRQ_TIME_ACCOUNTING */ #define sched_clock_irqtime (0) #endif /* !CONFIG_IRQ_TIME_ACCOUNTING */ static inline void task_group_account_field(struct task_struct *p, int index, u64 tmp) { |
73fbec604 sched: Move cputi... |
118 119 120 121 122 123 124 |
/* * Since all updates are sure to touch the root cgroup, we * get ourselves ahead and touch it first. If the root cgroup * is the only cgroup, then nothing else should be necessary. * */ __get_cpu_var(kernel_cpustat).cpustat[index] += tmp; |
1966aaf7d sched/cpuacct: Ad... |
125 |
cpuacct_account_field(p, index, tmp); |
73fbec604 sched: Move cputi... |
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 |
} /* * Account user cpu time to a process. * @p: the process that the cpu time gets accounted to * @cputime: the cpu time spent in user space since the last update * @cputime_scaled: cputime scaled by cpu frequency */ void account_user_time(struct task_struct *p, cputime_t cputime, cputime_t cputime_scaled) { int index; /* Add user time to process. */ p->utime += cputime; p->utimescaled += cputime_scaled; account_group_user_time(p, cputime); index = (TASK_NICE(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; /* Add user time to cpustat. */ task_group_account_field(p, index, (__force u64) cputime); /* Account for user time used */ |
6fac4829c cputime: Use acce... |
150 |
acct_account_cputime(p); |
73fbec604 sched: Move cputi... |
151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 |
} /* * Account guest cpu time to a process. * @p: the process that the cpu time gets accounted to * @cputime: the cpu time spent in virtual machine since the last update * @cputime_scaled: cputime scaled by cpu frequency */ static void account_guest_time(struct task_struct *p, cputime_t cputime, cputime_t cputime_scaled) { u64 *cpustat = kcpustat_this_cpu->cpustat; /* Add guest time to process. */ p->utime += cputime; p->utimescaled += cputime_scaled; account_group_user_time(p, cputime); p->gtime += cputime; /* Add guest time to cpustat. */ if (TASK_NICE(p) > 0) { cpustat[CPUTIME_NICE] += (__force u64) cputime; cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime; } else { cpustat[CPUTIME_USER] += (__force u64) cputime; cpustat[CPUTIME_GUEST] += (__force u64) cputime; } } /* * Account system cpu time to a process and desired cpustat field * @p: the process that the cpu time gets accounted to * @cputime: the cpu time spent in kernel space since the last update * @cputime_scaled: cputime scaled by cpu frequency * @target_cputime64: pointer to cpustat field that has to be updated */ static inline void __account_system_time(struct task_struct *p, cputime_t cputime, cputime_t cputime_scaled, int index) { /* Add system time to process. */ p->stime += cputime; p->stimescaled += cputime_scaled; account_group_system_time(p, cputime); /* Add system time to cpustat. */ task_group_account_field(p, index, (__force u64) cputime); /* Account for system time used */ |
6fac4829c cputime: Use acce... |
200 |
acct_account_cputime(p); |
73fbec604 sched: Move cputi... |
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 |
} /* * Account system cpu time to a process. * @p: the process that the cpu time gets accounted to * @hardirq_offset: the offset to subtract from hardirq_count() * @cputime: the cpu time spent in kernel space since the last update * @cputime_scaled: cputime scaled by cpu frequency */ void account_system_time(struct task_struct *p, int hardirq_offset, cputime_t cputime, cputime_t cputime_scaled) { int index; if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { account_guest_time(p, cputime, cputime_scaled); return; } if (hardirq_count() - hardirq_offset) index = CPUTIME_IRQ; else if (in_serving_softirq()) index = CPUTIME_SOFTIRQ; else index = CPUTIME_SYSTEM; __account_system_time(p, cputime, cputime_scaled, index); } /* * Account for involuntary wait time. * @cputime: the cpu time spent in involuntary wait */ void account_steal_time(cputime_t cputime) { u64 *cpustat = kcpustat_this_cpu->cpustat; cpustat[CPUTIME_STEAL] += (__force u64) cputime; } /* * Account for idle time. * @cputime: the cpu time spent in idle wait */ void account_idle_time(cputime_t cputime) { u64 *cpustat = kcpustat_this_cpu->cpustat; struct rq *rq = this_rq(); if (atomic_read(&rq->nr_iowait) > 0) cpustat[CPUTIME_IOWAIT] += (__force u64) cputime; else cpustat[CPUTIME_IDLE] += (__force u64) cputime; } static __always_inline bool steal_account_process_tick(void) { #ifdef CONFIG_PARAVIRT if (static_key_false(¶virt_steal_enabled)) { u64 steal, st = 0; steal = paravirt_steal_clock(smp_processor_id()); steal -= this_rq()->prev_steal_time; st = steal_ticks(steal); this_rq()->prev_steal_time += st * TICK_NSEC; account_steal_time(st); return st; } #endif return false; } |
a634f9333 cputime: Move thr... |
274 275 276 277 278 279 280 |
/* * Accumulate raw cputime values of dead tasks (sig->[us]time) and live * tasks (sum on group iteration) belonging to @tsk's group. */ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) { struct signal_struct *sig = tsk->signal; |
6fac4829c cputime: Use acce... |
281 |
cputime_t utime, stime; |
a634f9333 cputime: Move thr... |
282 283 284 285 286 287 288 289 290 291 292 293 294 |
struct task_struct *t; times->utime = sig->utime; times->stime = sig->stime; times->sum_exec_runtime = sig->sum_sched_runtime; rcu_read_lock(); /* make sure we can trust tsk->thread_group list */ if (!likely(pid_alive(tsk))) goto out; t = tsk; do { |
e614b3332 sched/cputime: Fi... |
295 |
task_cputime(t, &utime, &stime); |
6fac4829c cputime: Use acce... |
296 297 |
times->utime += utime; times->stime += stime; |
a634f9333 cputime: Move thr... |
298 299 300 301 302 |
times->sum_exec_runtime += task_sched_runtime(t); } while_each_thread(tsk, t); out: rcu_read_unlock(); } |
73fbec604 sched: Move cputi... |
303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 |
#ifdef CONFIG_IRQ_TIME_ACCOUNTING /* * Account a tick to a process and cpustat * @p: the process that the cpu time gets accounted to * @user_tick: is the tick from userspace * @rq: the pointer to rq * * Tick demultiplexing follows the order * - pending hardirq update * - pending softirq update * - user_time * - idle_time * - system time * - check for guest_time * - else account as system_time * * Check for hardirq is done both for system and user time as there is * no timer going off while we are on hardirq and hence we may never get an * opportunity to update it solely in system time. * p->stime and friends are only updated on system time and not on irq * softirq as those do not count in task exec_runtime any more. */ static void irqtime_account_process_tick(struct task_struct *p, int user_tick, struct rq *rq) { cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); u64 *cpustat = kcpustat_this_cpu->cpustat; if (steal_account_process_tick()) return; if (irqtime_account_hi_update()) { cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy; } else if (irqtime_account_si_update()) { cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy; } else if (this_cpu_ksoftirqd() == p) { /* * ksoftirqd time do not get accounted in cpu_softirq_time. * So, we have to handle it separately here. * Also, p->stime needs to be updated for ksoftirqd. */ __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, CPUTIME_SOFTIRQ); } else if (user_tick) { account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); } else if (p == rq->idle) { account_idle_time(cputime_one_jiffy); } else if (p->flags & PF_VCPU) { /* System time or guest time */ account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled); } else { __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, CPUTIME_SYSTEM); } } static void irqtime_account_idle_ticks(int ticks) { int i; struct rq *rq = this_rq(); for (i = 0; i < ticks; i++) irqtime_account_process_tick(current, 0, rq); } #else /* CONFIG_IRQ_TIME_ACCOUNTING */ |
3f4724ea8 cputime: Allow dy... |
367 368 |
static inline void irqtime_account_idle_ticks(int ticks) {} static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick, |
73fbec604 sched: Move cputi... |
369 370 |
struct rq *rq) {} #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ |
73fbec604 sched: Move cputi... |
371 372 373 374 |
/* * Use precise platform statistics if available: */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
a7e1a9e3a vtime: Consolidat... |
375 |
|
e3942ba04 vtime: Consolidat... |
376 377 378 |
#ifndef __ARCH_HAS_VTIME_TASK_SWITCH void vtime_task_switch(struct task_struct *prev) { |
3f4724ea8 cputime: Allow dy... |
379 380 |
if (!vtime_accounting_enabled()) return; |
e3942ba04 vtime: Consolidat... |
381 382 383 384 |
if (is_idle_task(prev)) vtime_account_idle(prev); else vtime_account_system(prev); |
abf917cd9 cputime: Generic ... |
385 |
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
e3942ba04 vtime: Consolidat... |
386 |
vtime_account_user(prev); |
abf917cd9 cputime: Generic ... |
387 |
#endif |
e3942ba04 vtime: Consolidat... |
388 389 390 |
arch_vtime_task_switch(prev); } #endif |
11113334d vtime: Make vtime... |
391 |
|
a7e1a9e3a vtime: Consolidat... |
392 393 394 |
/* * Archs that account the whole time spent in the idle task * (outside irq) as idle time can rely on this and just implement |
fd25b4c2f vtime: Remove the... |
395 |
* vtime_account_system() and vtime_account_idle(). Archs that |
a7e1a9e3a vtime: Consolidat... |
396 397 398 399 400 |
* have other meaning of the idle time (s390 only includes the * time spent by the CPU when it's in low power mode) must override * vtime_account(). */ #ifndef __ARCH_HAS_VTIME_ACCOUNT |
6a61671bb cputime: Safely r... |
401 |
void vtime_account_irq_enter(struct task_struct *tsk) |
a7e1a9e3a vtime: Consolidat... |
402 |
{ |
3f4724ea8 cputime: Allow dy... |
403 404 |
if (!vtime_accounting_enabled()) return; |
abf917cd9 cputime: Generic ... |
405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 |
if (!in_interrupt()) { /* * If we interrupted user, context_tracking_in_user() * is 1 because the context tracking don't hook * on irq entry/exit. This way we know if * we need to flush user time on kernel entry. */ if (context_tracking_in_user()) { vtime_account_user(tsk); return; } if (is_idle_task(tsk)) { vtime_account_idle(tsk); return; } } vtime_account_system(tsk); |
a7e1a9e3a vtime: Consolidat... |
423 |
} |
6a61671bb cputime: Safely r... |
424 |
EXPORT_SYMBOL_GPL(vtime_account_irq_enter); |
a7e1a9e3a vtime: Consolidat... |
425 |
#endif /* __ARCH_HAS_VTIME_ACCOUNT */ |
9fbc42eac cputime: Dynamica... |
426 427 428 429 430 431 432 433 434 |
#endif /* CONFIG_VIRT_CPU_ACCOUNTING */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) { *ut = p->utime; *st = p->stime; } |
a7e1a9e3a vtime: Consolidat... |
435 |
|
9fbc42eac cputime: Dynamica... |
436 437 438 |
void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) { struct task_cputime cputime; |
73fbec604 sched: Move cputi... |
439 |
|
9fbc42eac cputime: Dynamica... |
440 441 442 443 444 445 446 447 448 449 450 451 |
thread_group_cputime(p, &cputime); *ut = cputime.utime; *st = cputime.stime; } #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ /* * Account a single tick of cpu time. * @p: the process that the cpu time gets accounted to * @user_tick: indicates if the tick is a user or a system tick */ void account_process_tick(struct task_struct *p, int user_tick) |
73fbec604 sched: Move cputi... |
452 |
{ |
9fbc42eac cputime: Dynamica... |
453 454 |
cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); struct rq *rq = this_rq(); |
73fbec604 sched: Move cputi... |
455 |
|
9fbc42eac cputime: Dynamica... |
456 457 458 459 460 461 462 463 464 465 |
if (vtime_accounting_enabled()) return; if (sched_clock_irqtime) { irqtime_account_process_tick(p, user_tick, rq); return; } if (steal_account_process_tick()) return; |
73fbec604 sched: Move cputi... |
466 |
|
9fbc42eac cputime: Dynamica... |
467 468 469 470 471 |
if (user_tick) account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy, one_jiffy_scaled); |
73fbec604 sched: Move cputi... |
472 |
else |
9fbc42eac cputime: Dynamica... |
473 474 |
account_idle_time(cputime_one_jiffy); } |
73fbec604 sched: Move cputi... |
475 |
|
9fbc42eac cputime: Dynamica... |
476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 |
/* * Account multiple ticks of steal time. * @p: the process from which the cpu time has been stolen * @ticks: number of stolen ticks */ void account_steal_ticks(unsigned long ticks) { account_steal_time(jiffies_to_cputime(ticks)); } /* * Account multiple ticks of idle time. * @ticks: number of stolen ticks */ void account_idle_ticks(unsigned long ticks) { if (sched_clock_irqtime) { irqtime_account_idle_ticks(ticks); return; } account_idle_time(jiffies_to_cputime(ticks)); } |
73fbec604 sched: Move cputi... |
500 |
|
d9a3c9823 sched: Lower chan... |
501 |
/* |
55eaa7c1f sched: Avoid cput... |
502 503 |
* Perform (stime * rtime) / total, but avoid multiplication overflow by * loosing precision when the numbers are big. |
d9a3c9823 sched: Lower chan... |
504 505 |
*/ static cputime_t scale_stime(u64 stime, u64 rtime, u64 total) |
73fbec604 sched: Move cputi... |
506 |
{ |
55eaa7c1f sched: Avoid cput... |
507 |
u64 scaled; |
73fbec604 sched: Move cputi... |
508 |
|
55eaa7c1f sched: Avoid cput... |
509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 |
for (;;) { /* Make sure "rtime" is the bigger of stime/rtime */ if (stime > rtime) { u64 tmp = rtime; rtime = stime; stime = tmp; } /* Make sure 'total' fits in 32 bits */ if (total >> 32) goto drop_precision; /* Does rtime (and thus stime) fit in 32 bits? */ if (!(rtime >> 32)) break; /* Can we just balance rtime/stime rather than dropping bits? */ if (stime >> 31) goto drop_precision; /* We can grow stime and shrink rtime and try to make them both fit */ stime <<= 1; rtime >>= 1; continue; drop_precision: /* We drop from rtime, it has more bits than stime */ rtime >>= 1; total >>= 1; |
d9a3c9823 sched: Lower chan... |
536 |
} |
73fbec604 sched: Move cputi... |
537 |
|
55eaa7c1f sched: Avoid cput... |
538 539 540 541 542 |
/* * Make sure gcc understands that this is a 32x32->64 multiply, * followed by a 64/32->64 divide. */ scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total); |
d9a3c9823 sched: Lower chan... |
543 |
return (__force cputime_t) scaled; |
73fbec604 sched: Move cputi... |
544 |
} |
fa0920578 cputime: Comment ... |
545 546 547 548 |
/* * Adjust tick based cputime random precision against scheduler * runtime accounting. */ |
d37f761db cputime: Consolid... |
549 550 551 |
static void cputime_adjust(struct task_cputime *curr, struct cputime *prev, cputime_t *ut, cputime_t *st) |
73fbec604 sched: Move cputi... |
552 |
{ |
013b14c30 sched/cputime: Do... |
553 |
cputime_t rtime, stime, utime; |
73fbec604 sched: Move cputi... |
554 |
|
9fbc42eac cputime: Dynamica... |
555 556 557 558 559 |
if (vtime_accounting_enabled()) { *ut = curr->utime; *st = curr->stime; return; } |
73fbec604 sched: Move cputi... |
560 |
/* |
fa0920578 cputime: Comment ... |
561 562 563 564 565 566 567 568 |
* Tick based cputime accounting depend on random scheduling * timeslices of a task to be interrupted or not by the timer. * Depending on these circumstances, the number of these interrupts * may be over or under-optimistic, matching the real user and system * cputime with a variable precision. * * Fix this by scaling these tick based values against the total * runtime accounted by the CFS scheduler. |
73fbec604 sched: Move cputi... |
569 |
*/ |
d37f761db cputime: Consolid... |
570 |
rtime = nsecs_to_cputime(curr->sum_exec_runtime); |
73fbec604 sched: Move cputi... |
571 |
|
772c808a2 sched: Do not acc... |
572 573 574 575 576 577 578 |
/* * Update userspace visible utime/stime values only if actual execution * time is bigger than already exported. Note that can happen, that we * provided bigger values due to scaling inaccuracy on big numbers. */ if (prev->stime + prev->utime >= rtime) goto out; |
013b14c30 sched/cputime: Do... |
579 580 581 582 583 584 585 586 587 |
stime = curr->stime; utime = curr->utime; if (utime == 0) { stime = rtime; } else if (stime == 0) { utime = rtime; } else { cputime_t total = stime + utime; |
d9a3c9823 sched: Lower chan... |
588 589 |
stime = scale_stime((__force u64)stime, (__force u64)rtime, (__force u64)total); |
68aa8efcd sched: Avoid prev... |
590 |
utime = rtime - stime; |
d9a3c9823 sched: Lower chan... |
591 |
} |
73fbec604 sched: Move cputi... |
592 593 |
/* |
fa0920578 cputime: Comment ... |
594 595 596 |
* If the tick based count grows faster than the scheduler one, * the result of the scaling may go backward. * Let's enforce monotonicity. |
73fbec604 sched: Move cputi... |
597 |
*/ |
62188451f cputime: Avoid mu... |
598 |
prev->stime = max(prev->stime, stime); |
68aa8efcd sched: Avoid prev... |
599 |
prev->utime = max(prev->utime, utime); |
d37f761db cputime: Consolid... |
600 |
|
772c808a2 sched: Do not acc... |
601 |
out: |
d37f761db cputime: Consolid... |
602 603 604 |
*ut = prev->utime; *st = prev->stime; } |
73fbec604 sched: Move cputi... |
605 |
|
d37f761db cputime: Consolid... |
606 607 608 |
void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) { struct task_cputime cputime = { |
d37f761db cputime: Consolid... |
609 610 |
.sum_exec_runtime = p->se.sum_exec_runtime, }; |
6fac4829c cputime: Use acce... |
611 |
task_cputime(p, &cputime.utime, &cputime.stime); |
d37f761db cputime: Consolid... |
612 |
cputime_adjust(&cputime, &p->prev_cputime, ut, st); |
73fbec604 sched: Move cputi... |
613 614 615 616 617 |
} /* * Must be called with siglock held. */ |
e80d0a1ae cputime: Rename t... |
618 |
void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) |
73fbec604 sched: Move cputi... |
619 |
{ |
73fbec604 sched: Move cputi... |
620 |
struct task_cputime cputime; |
73fbec604 sched: Move cputi... |
621 622 |
thread_group_cputime(p, &cputime); |
d37f761db cputime: Consolid... |
623 |
cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st); |
73fbec604 sched: Move cputi... |
624 |
} |
9fbc42eac cputime: Dynamica... |
625 |
#endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ |
abf917cd9 cputime: Generic ... |
626 627 |
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN |
6a61671bb cputime: Safely r... |
628 629 630 |
static unsigned long long vtime_delta(struct task_struct *tsk) { unsigned long long clock; |
7f6575f1f cputime: Use loca... |
631 |
clock = local_clock(); |
6a61671bb cputime: Safely r... |
632 633 |
if (clock < tsk->vtime_snap) return 0; |
abf917cd9 cputime: Generic ... |
634 |
|
6a61671bb cputime: Safely r... |
635 636 637 638 |
return clock - tsk->vtime_snap; } static cputime_t get_vtime_delta(struct task_struct *tsk) |
abf917cd9 cputime: Generic ... |
639 |
{ |
6a61671bb cputime: Safely r... |
640 |
unsigned long long delta = vtime_delta(tsk); |
abf917cd9 cputime: Generic ... |
641 |
|
6a61671bb cputime: Safely r... |
642 643 |
WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_SLEEPING); tsk->vtime_snap += delta; |
abf917cd9 cputime: Generic ... |
644 645 646 647 |
/* CHECKME: always safe to convert nsecs to cputime? */ return nsecs_to_cputime(delta); } |
6a61671bb cputime: Safely r... |
648 649 650 651 652 653 |
static void __vtime_account_system(struct task_struct *tsk) { cputime_t delta_cpu = get_vtime_delta(tsk); account_system_time(tsk, irq_count(), delta_cpu, cputime_to_scaled(delta_cpu)); } |
abf917cd9 cputime: Generic ... |
654 655 |
void vtime_account_system(struct task_struct *tsk) { |
6a61671bb cputime: Safely r... |
656 657 658 659 660 661 662 |
if (!vtime_accounting_enabled()) return; write_seqlock(&tsk->vtime_seqlock); __vtime_account_system(tsk); write_sequnlock(&tsk->vtime_seqlock); } |
3f4724ea8 cputime: Allow dy... |
663 |
|
6a61671bb cputime: Safely r... |
664 665 |
void vtime_account_irq_exit(struct task_struct *tsk) { |
3f4724ea8 cputime: Allow dy... |
666 667 |
if (!vtime_accounting_enabled()) return; |
abf917cd9 cputime: Generic ... |
668 |
|
6a61671bb cputime: Safely r... |
669 670 671 672 673 |
write_seqlock(&tsk->vtime_seqlock); if (context_tracking_in_user()) tsk->vtime_snap_whence = VTIME_USER; __vtime_account_system(tsk); write_sequnlock(&tsk->vtime_seqlock); |
abf917cd9 cputime: Generic ... |
674 675 676 677 |
} void vtime_account_user(struct task_struct *tsk) { |
3f4724ea8 cputime: Allow dy... |
678 679 680 681 |
cputime_t delta_cpu; if (!vtime_accounting_enabled()) return; |
6a61671bb cputime: Safely r... |
682 |
delta_cpu = get_vtime_delta(tsk); |
abf917cd9 cputime: Generic ... |
683 |
|
6a61671bb cputime: Safely r... |
684 685 |
write_seqlock(&tsk->vtime_seqlock); tsk->vtime_snap_whence = VTIME_SYS; |
abf917cd9 cputime: Generic ... |
686 |
account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu)); |
6a61671bb cputime: Safely r... |
687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 |
write_sequnlock(&tsk->vtime_seqlock); } void vtime_user_enter(struct task_struct *tsk) { if (!vtime_accounting_enabled()) return; write_seqlock(&tsk->vtime_seqlock); tsk->vtime_snap_whence = VTIME_USER; __vtime_account_system(tsk); write_sequnlock(&tsk->vtime_seqlock); } void vtime_guest_enter(struct task_struct *tsk) { write_seqlock(&tsk->vtime_seqlock); __vtime_account_system(tsk); current->flags |= PF_VCPU; write_sequnlock(&tsk->vtime_seqlock); } void vtime_guest_exit(struct task_struct *tsk) { write_seqlock(&tsk->vtime_seqlock); __vtime_account_system(tsk); current->flags &= ~PF_VCPU; write_sequnlock(&tsk->vtime_seqlock); |
abf917cd9 cputime: Generic ... |
715 716 717 718 |
} void vtime_account_idle(struct task_struct *tsk) { |
6a61671bb cputime: Safely r... |
719 |
cputime_t delta_cpu = get_vtime_delta(tsk); |
abf917cd9 cputime: Generic ... |
720 721 722 |
account_idle_time(delta_cpu); } |
3f4724ea8 cputime: Allow dy... |
723 724 725 726 727 |
bool vtime_accounting_enabled(void) { return context_tracking_active(); } |
6a61671bb cputime: Safely r... |
728 729 730 731 732 733 734 735 736 |
void arch_vtime_task_switch(struct task_struct *prev) { write_seqlock(&prev->vtime_seqlock); prev->vtime_snap_whence = VTIME_SLEEPING; write_sequnlock(&prev->vtime_seqlock); write_seqlock(¤t->vtime_seqlock); current->vtime_snap_whence = VTIME_SYS; |
45eacc692 vtime: Use consis... |
737 |
current->vtime_snap = sched_clock_cpu(smp_processor_id()); |
6a61671bb cputime: Safely r... |
738 739 |
write_sequnlock(¤t->vtime_seqlock); } |
45eacc692 vtime: Use consis... |
740 |
void vtime_init_idle(struct task_struct *t, int cpu) |
6a61671bb cputime: Safely r... |
741 742 743 744 745 |
{ unsigned long flags; write_seqlock_irqsave(&t->vtime_seqlock, flags); t->vtime_snap_whence = VTIME_SYS; |
45eacc692 vtime: Use consis... |
746 |
t->vtime_snap = sched_clock_cpu(cpu); |
6a61671bb cputime: Safely r... |
747 748 749 750 751 |
write_sequnlock_irqrestore(&t->vtime_seqlock, flags); } cputime_t task_gtime(struct task_struct *t) { |
6a61671bb cputime: Safely r... |
752 753 754 755 |
unsigned int seq; cputime_t gtime; do { |
cdc4e86b5 cputime: Remove i... |
756 |
seq = read_seqbegin(&t->vtime_seqlock); |
6a61671bb cputime: Safely r... |
757 758 759 760 |
gtime = t->gtime; if (t->flags & PF_VCPU) gtime += vtime_delta(t); |
cdc4e86b5 cputime: Remove i... |
761 |
} while (read_seqretry(&t->vtime_seqlock, seq)); |
6a61671bb cputime: Safely r... |
762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 |
return gtime; } /* * Fetch cputime raw values from fields of task_struct and * add up the pending nohz execution time since the last * cputime snapshot. */ static void fetch_task_cputime(struct task_struct *t, cputime_t *u_dst, cputime_t *s_dst, cputime_t *u_src, cputime_t *s_src, cputime_t *udelta, cputime_t *sdelta) { |
6a61671bb cputime: Safely r... |
777 778 779 780 781 782 |
unsigned int seq; unsigned long long delta; do { *udelta = 0; *sdelta = 0; |
cdc4e86b5 cputime: Remove i... |
783 |
seq = read_seqbegin(&t->vtime_seqlock); |
6a61671bb cputime: Safely r... |
784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 |
if (u_dst) *u_dst = *u_src; if (s_dst) *s_dst = *s_src; /* Task is sleeping, nothing to add */ if (t->vtime_snap_whence == VTIME_SLEEPING || is_idle_task(t)) continue; delta = vtime_delta(t); /* * Task runs either in user or kernel space, add pending nohz time to * the right place. */ if (t->vtime_snap_whence == VTIME_USER || t->flags & PF_VCPU) { *udelta = delta; } else { if (t->vtime_snap_whence == VTIME_SYS) *sdelta = delta; } |
cdc4e86b5 cputime: Remove i... |
807 |
} while (read_seqretry(&t->vtime_seqlock, seq)); |
6a61671bb cputime: Safely r... |
808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 |
} void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime) { cputime_t udelta, sdelta; fetch_task_cputime(t, utime, stime, &t->utime, &t->stime, &udelta, &sdelta); if (utime) *utime += udelta; if (stime) *stime += sdelta; } void task_cputime_scaled(struct task_struct *t, cputime_t *utimescaled, cputime_t *stimescaled) { cputime_t udelta, sdelta; fetch_task_cputime(t, utimescaled, stimescaled, &t->utimescaled, &t->stimescaled, &udelta, &sdelta); if (utimescaled) *utimescaled += cputime_to_scaled(udelta); if (stimescaled) *stimescaled += cputime_to_scaled(sdelta); } |
abf917cd9 cputime: Generic ... |
835 |
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */ |