Commit e6e6685accfa81f509fadfc9624bc7c3862d75c4
Committed by
Avi Kivity
1 parent
747f292583
Exists in
master
and in
6 other branches
KVM guest: Steal time accounting
This patch accounts steal time time in account_process_tick. If one or more tick is considered stolen in the current accounting cycle, user/system accounting is skipped. Idle is fine, since the hypervisor does not report steal time if the guest is halted. Accounting steal time from the core scheduler give us the advantage of direct acess to the runqueue data. In a later opportunity, it can be used to tweak cpu power and make the scheduler aware of the time it lost. [avi: <asm/paravirt.h> doesn't exist on many archs] Signed-off-by: Glauber Costa <glommer@redhat.com> Acked-by: Rik van Riel <riel@redhat.com> Acked-by: Peter Zijlstra <peterz@infradead.org> Tested-by: Eric B Munson <emunson@mgebm.net> CC: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> CC: Anthony Liguori <aliguori@us.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Showing 1 changed file with 43 additions and 0 deletions Side-by-side Diff
kernel/sched.c
... | ... | @@ -75,6 +75,9 @@ |
75 | 75 | #include <asm/tlb.h> |
76 | 76 | #include <asm/irq_regs.h> |
77 | 77 | #include <asm/mutex.h> |
78 | +#ifdef CONFIG_PARAVIRT | |
79 | +#include <asm/paravirt.h> | |
80 | +#endif | |
78 | 81 | |
79 | 82 | #include "sched_cpupri.h" |
80 | 83 | #include "workqueue_sched.h" |
... | ... | @@ -528,6 +531,9 @@ |
528 | 531 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
529 | 532 | u64 prev_irq_time; |
530 | 533 | #endif |
534 | +#ifdef CONFIG_PARAVIRT | |
535 | + u64 prev_steal_time; | |
536 | +#endif | |
531 | 537 | |
532 | 538 | /* calc_load related fields */ |
533 | 539 | unsigned long calc_load_update; |
... | ... | @@ -1953,6 +1959,18 @@ |
1953 | 1959 | } |
1954 | 1960 | EXPORT_SYMBOL_GPL(account_system_vtime); |
1955 | 1961 | |
1962 | +#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ | |
1963 | + | |
1964 | +#ifdef CONFIG_PARAVIRT | |
1965 | +static inline u64 steal_ticks(u64 steal) | |
1966 | +{ | |
1967 | + if (unlikely(steal > NSEC_PER_SEC)) | |
1968 | + return div_u64(steal, TICK_NSEC); | |
1969 | + | |
1970 | + return __iter_div_u64_rem(steal, TICK_NSEC, &steal); | |
1971 | +} | |
1972 | +#endif | |
1973 | + | |
1956 | 1974 | static void update_rq_clock_task(struct rq *rq, s64 delta) |
1957 | 1975 | { |
1958 | 1976 | s64 irq_delta; |
... | ... | @@ -3845,6 +3863,25 @@ |
3845 | 3863 | cpustat->idle = cputime64_add(cpustat->idle, cputime64); |
3846 | 3864 | } |
3847 | 3865 | |
3866 | +static __always_inline bool steal_account_process_tick(void) | |
3867 | +{ | |
3868 | +#ifdef CONFIG_PARAVIRT | |
3869 | + if (static_branch(¶virt_steal_enabled)) { | |
3870 | + u64 steal, st = 0; | |
3871 | + | |
3872 | + steal = paravirt_steal_clock(smp_processor_id()); | |
3873 | + steal -= this_rq()->prev_steal_time; | |
3874 | + | |
3875 | + st = steal_ticks(steal); | |
3876 | + this_rq()->prev_steal_time += st * TICK_NSEC; | |
3877 | + | |
3878 | + account_steal_time(st); | |
3879 | + return st; | |
3880 | + } | |
3881 | +#endif | |
3882 | + return false; | |
3883 | +} | |
3884 | + | |
3848 | 3885 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING |
3849 | 3886 | |
3850 | 3887 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
... | ... | @@ -3876,6 +3913,9 @@ |
3876 | 3913 | cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy); |
3877 | 3914 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
3878 | 3915 | |
3916 | + if (steal_account_process_tick()) | |
3917 | + return; | |
3918 | + | |
3879 | 3919 | if (irqtime_account_hi_update()) { |
3880 | 3920 | cpustat->irq = cputime64_add(cpustat->irq, tmp); |
3881 | 3921 | } else if (irqtime_account_si_update()) { |
... | ... | @@ -3928,6 +3968,9 @@ |
3928 | 3968 | irqtime_account_process_tick(p, user_tick, rq); |
3929 | 3969 | return; |
3930 | 3970 | } |
3971 | + | |
3972 | + if (steal_account_process_tick()) | |
3973 | + return; | |
3931 | 3974 | |
3932 | 3975 | if (user_tick) |
3933 | 3976 | account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); |