Commit d281918d7c135c555d9cebcf73d4320efa8177dc
1 parent
eb59449400
sched: remove 'now' use from assignments
change all 'now' timestamp uses in assignments to rq->clock. ( this is an identity transformation that causes no functionality change: all such new rq->clock is necessarily preceded by an update_rq_clock() call. ) Signed-off-by: Ingo Molnar <mingo@elte.hu>
Showing 3 changed files with 17 additions and 16 deletions Side-by-side Diff
kernel/sched.c
... | ... | @@ -788,8 +788,8 @@ |
788 | 788 | u64 start; |
789 | 789 | |
790 | 790 | start = ls->load_update_start; |
791 | - ls->load_update_start = now; | |
792 | - ls->delta_stat += now - start; | |
791 | + ls->load_update_start = rq->clock; | |
792 | + ls->delta_stat += rq->clock - start; | |
793 | 793 | /* |
794 | 794 | * Stagger updates to ls->delta_fair. Very frequent updates |
795 | 795 | * can be expensive. |
... | ... | @@ -1979,8 +1979,8 @@ |
1979 | 1979 | exec_delta64 = ls->delta_exec + 1; |
1980 | 1980 | ls->delta_exec = 0; |
1981 | 1981 | |
1982 | - sample_interval64 = now - ls->load_update_last; | |
1983 | - ls->load_update_last = now; | |
1982 | + sample_interval64 = this_rq->clock - ls->load_update_last; | |
1983 | + ls->load_update_last = this_rq->clock; | |
1984 | 1984 | |
1985 | 1985 | if ((s64)sample_interval64 < (s64)TICK_NSEC) |
1986 | 1986 | sample_interval64 = TICK_NSEC; |
kernel/sched_fair.c
... | ... | @@ -333,7 +333,7 @@ |
333 | 333 | * since the last time we changed load (this cannot |
334 | 334 | * overflow on 32 bits): |
335 | 335 | */ |
336 | - delta_exec = (unsigned long)(now - curr->exec_start); | |
336 | + delta_exec = (unsigned long)(rq_of(cfs_rq)->clock - curr->exec_start); | |
337 | 337 | |
338 | 338 | curr->delta_exec += delta_exec; |
339 | 339 | |
340 | 340 | |
... | ... | @@ -341,14 +341,14 @@ |
341 | 341 | __update_curr(cfs_rq, curr, now); |
342 | 342 | curr->delta_exec = 0; |
343 | 343 | } |
344 | - curr->exec_start = now; | |
344 | + curr->exec_start = rq_of(cfs_rq)->clock; | |
345 | 345 | } |
346 | 346 | |
347 | 347 | static inline void |
348 | 348 | update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now) |
349 | 349 | { |
350 | 350 | se->wait_start_fair = cfs_rq->fair_clock; |
351 | - schedstat_set(se->wait_start, now); | |
351 | + schedstat_set(se->wait_start, rq_of(cfs_rq)->clock); | |
352 | 352 | } |
353 | 353 | |
354 | 354 | /* |
... | ... | @@ -421,7 +421,8 @@ |
421 | 421 | { |
422 | 422 | unsigned long delta_fair = se->delta_fair_run; |
423 | 423 | |
424 | - schedstat_set(se->wait_max, max(se->wait_max, now - se->wait_start)); | |
424 | + schedstat_set(se->wait_max, max(se->wait_max, | |
425 | + rq_of(cfs_rq)->clock - se->wait_start)); | |
425 | 426 | |
426 | 427 | if (unlikely(se->load.weight != NICE_0_LOAD)) |
427 | 428 | delta_fair = calc_weighted(delta_fair, se->load.weight, |
... | ... | @@ -470,7 +471,7 @@ |
470 | 471 | /* |
471 | 472 | * We are starting a new run period: |
472 | 473 | */ |
473 | - se->exec_start = now; | |
474 | + se->exec_start = rq_of(cfs_rq)->clock; | |
474 | 475 | } |
475 | 476 | |
476 | 477 | /* |
... | ... | @@ -545,7 +546,7 @@ |
545 | 546 | |
546 | 547 | #ifdef CONFIG_SCHEDSTATS |
547 | 548 | if (se->sleep_start) { |
548 | - u64 delta = now - se->sleep_start; | |
549 | + u64 delta = rq_of(cfs_rq)->clock - se->sleep_start; | |
549 | 550 | |
550 | 551 | if ((s64)delta < 0) |
551 | 552 | delta = 0; |
... | ... | @@ -557,7 +558,7 @@ |
557 | 558 | se->sum_sleep_runtime += delta; |
558 | 559 | } |
559 | 560 | if (se->block_start) { |
560 | - u64 delta = now - se->block_start; | |
561 | + u64 delta = rq_of(cfs_rq)->clock - se->block_start; | |
561 | 562 | |
562 | 563 | if ((s64)delta < 0) |
563 | 564 | delta = 0; |
564 | 565 | |
... | ... | @@ -599,9 +600,9 @@ |
599 | 600 | struct task_struct *tsk = task_of(se); |
600 | 601 | |
601 | 602 | if (tsk->state & TASK_INTERRUPTIBLE) |
602 | - se->sleep_start = now; | |
603 | + se->sleep_start = rq_of(cfs_rq)->clock; | |
603 | 604 | if (tsk->state & TASK_UNINTERRUPTIBLE) |
604 | - se->block_start = now; | |
605 | + se->block_start = rq_of(cfs_rq)->clock; | |
605 | 606 | } |
606 | 607 | cfs_rq->wait_runtime -= se->wait_runtime; |
607 | 608 | #endif |
kernel/sched_rt.c
... | ... | @@ -15,14 +15,14 @@ |
15 | 15 | if (!task_has_rt_policy(curr)) |
16 | 16 | return; |
17 | 17 | |
18 | - delta_exec = now - curr->se.exec_start; | |
18 | + delta_exec = rq->clock - curr->se.exec_start; | |
19 | 19 | if (unlikely((s64)delta_exec < 0)) |
20 | 20 | delta_exec = 0; |
21 | 21 | |
22 | 22 | schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec)); |
23 | 23 | |
24 | 24 | curr->se.sum_exec_runtime += delta_exec; |
25 | - curr->se.exec_start = now; | |
25 | + curr->se.exec_start = rq->clock; | |
26 | 26 | } |
27 | 27 | |
28 | 28 | static void |
... | ... | @@ -89,7 +89,7 @@ |
89 | 89 | queue = array->queue + idx; |
90 | 90 | next = list_entry(queue->next, struct task_struct, run_list); |
91 | 91 | |
92 | - next->se.exec_start = now; | |
92 | + next->se.exec_start = rq->clock; | |
93 | 93 | |
94 | 94 | return next; |
95 | 95 | } |