Commit d37f761dbd276790f70dcf73a287fde2c3464482

Authored by Frederic Weisbecker
1 parent e80d0a1ae8

cputime: Consolidate cputime adjustment code

task_cputime_adjusted() and thread_group_cputime_adjusted()
essentially share the same code. They just don't use the same
source:

* The first function uses the cputime in the task struct and the
previous adjusted snapshot that ensures monotonicity.

* The second adds the cputime of all tasks in the group and the
previous adjusted snapshot of the whole group from the signal
structure.

Just consolidate the common code that does the adjustment. These
functions just need to fetch the values from the appropriate
source.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>

Showing 3 changed files with 43 additions and 28 deletions Side-by-side Diff

include/linux/sched.h
... ... @@ -434,13 +434,28 @@
434 434 };
435 435  
436 436 /**
  437 + * struct cputime - snaphsot of system and user cputime
  438 + * @utime: time spent in user mode
  439 + * @stime: time spent in system mode
  440 + *
  441 + * Gathers a generic snapshot of user and system time.
  442 + */
  443 +struct cputime {
  444 + cputime_t utime;
  445 + cputime_t stime;
  446 +};
  447 +
  448 +/**
437 449 * struct task_cputime - collected CPU time counts
438 450 * @utime: time spent in user mode, in &cputime_t units
439 451 * @stime: time spent in kernel mode, in &cputime_t units
440 452 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
441 453 *
442   - * This structure groups together three kinds of CPU time that are
443   - * tracked for threads and thread groups. Most things considering
  454 + * This is an extension of struct cputime that includes the total runtime
  455 + * spent by the task from the scheduler point of view.
  456 + *
  457 + * As a result, this structure groups together three kinds of CPU time
  458 + * that are tracked for threads and thread groups. Most things considering
444 459 * CPU time want to group these counts together and treat all three
445 460 * of them in parallel.
446 461 */
... ... @@ -581,7 +596,7 @@
581 596 cputime_t gtime;
582 597 cputime_t cgtime;
583 598 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
584   - cputime_t prev_utime, prev_stime;
  599 + struct cputime prev_cputime;
585 600 #endif
586 601 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
587 602 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
... ... @@ -1340,7 +1355,7 @@
1340 1355 cputime_t utime, stime, utimescaled, stimescaled;
1341 1356 cputime_t gtime;
1342 1357 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
1343   - cputime_t prev_utime, prev_stime;
  1358 + struct cputime prev_cputime;
1344 1359 #endif
1345 1360 unsigned long nvcsw, nivcsw; /* context switch counts */
1346 1361 struct timespec start_time; /* monotonic time */
... ... @@ -1222,7 +1222,7 @@
1222 1222 p->utime = p->stime = p->gtime = 0;
1223 1223 p->utimescaled = p->stimescaled = 0;
1224 1224 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
1225   - p->prev_utime = p->prev_stime = 0;
  1225 + p->prev_cputime.utime = p->prev_cputime.stime = 0;
1226 1226 #endif
1227 1227 #if defined(SPLIT_RSS_COUNTING)
1228 1228 memset(&p->rss_stat, 0, sizeof(p->rss_stat));
kernel/sched/cputime.c
... ... @@ -516,14 +516,18 @@
516 516 return (__force cputime_t) temp;
517 517 }
518 518  
519   -void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
  519 +static void cputime_adjust(struct task_cputime *curr,
  520 + struct cputime *prev,
  521 + cputime_t *ut, cputime_t *st)
520 522 {
521   - cputime_t rtime, utime = p->utime, total = utime + p->stime;
  523 + cputime_t rtime, utime, total;
522 524  
  525 + utime = curr->utime;
  526 + total = utime + curr->stime;
523 527 /*
524 528 * Use CFS's precise accounting:
525 529 */
526   - rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
  530 + rtime = nsecs_to_cputime(curr->sum_exec_runtime);
527 531  
528 532 if (total)
529 533 utime = scale_utime(utime, rtime, total);
530 534  
531 535  
532 536  
533 537  
534 538  
... ... @@ -533,37 +537,33 @@
533 537 /*
534 538 * Compare with previous values, to keep monotonicity:
535 539 */
536   - p->prev_utime = max(p->prev_utime, utime);
537   - p->prev_stime = max(p->prev_stime, rtime - p->prev_utime);
  540 + prev->utime = max(prev->utime, utime);
  541 + prev->stime = max(prev->stime, rtime - prev->utime);
538 542  
539   - *ut = p->prev_utime;
540   - *st = p->prev_stime;
  543 + *ut = prev->utime;
  544 + *st = prev->stime;
541 545 }
542 546  
  547 +void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
  548 +{
  549 + struct task_cputime cputime = {
  550 + .utime = p->utime,
  551 + .stime = p->stime,
  552 + .sum_exec_runtime = p->se.sum_exec_runtime,
  553 + };
  554 +
  555 + cputime_adjust(&cputime, &p->prev_cputime, ut, st);
  556 +}
  557 +
543 558 /*
544 559 * Must be called with siglock held.
545 560 */
546 561 void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
547 562 {
548   - struct signal_struct *sig = p->signal;
549 563 struct task_cputime cputime;
550   - cputime_t rtime, utime, total;
551 564  
552 565 thread_group_cputime(p, &cputime);
553   -
554   - total = cputime.utime + cputime.stime;
555   - rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
556   -
557   - if (total)
558   - utime = scale_utime(cputime.utime, rtime, total);
559   - else
560   - utime = rtime;
561   -
562   - sig->prev_utime = max(sig->prev_utime, utime);
563   - sig->prev_stime = max(sig->prev_stime, rtime - sig->prev_utime);
564   -
565   - *ut = sig->prev_utime;
566   - *st = sig->prev_stime;
  566 + cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
567 567 }
568 568 #endif