Commit aa483808516ca5cacfa0e5849691f64fec25828e
Committed by
Ingo Molnar
1 parent
305e6835e0
sched: Remove irq time from available CPU power
The idea was suggested by Peter Zijlstra here: http://marc.info/?l=linux-kernel&m=127476934517534&w=2 irq time is technically not available to the tasks running on the CPU. This patch removes irq time from CPU power piggybacking on sched_rt_avg_update(). Tested this by keeping CPU X busy with a network intensive task having 75% oa a single CPU irq processing (hard+soft) on a 4-way system. And start seven cycle soakers on the system. Without this change, there will be two tasks on each CPU. With this change, there is a single task on irq busy CPU X and remaining 7 tasks are spread around among other 3 CPUs. Signed-off-by: Venkatesh Pallipadi <venki@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1286237003-12406-8-git-send-email-venki@google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Showing 3 changed files with 30 additions and 1 deletions Side-by-side Diff
kernel/sched.c
... | ... | @@ -519,6 +519,10 @@ |
519 | 519 | u64 avg_idle; |
520 | 520 | #endif |
521 | 521 | |
522 | +#ifdef CONFIG_IRQ_TIME_ACCOUNTING | |
523 | + u64 prev_irq_time; | |
524 | +#endif | |
525 | + | |
522 | 526 | /* calc_load related fields */ |
523 | 527 | unsigned long calc_load_update; |
524 | 528 | long calc_load_active; |
... | ... | @@ -643,6 +647,7 @@ |
643 | 647 | #endif /* CONFIG_CGROUP_SCHED */ |
644 | 648 | |
645 | 649 | static u64 irq_time_cpu(int cpu); |
650 | +static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); | |
646 | 651 | |
647 | 652 | inline void update_rq_clock(struct rq *rq) |
648 | 653 | { |
... | ... | @@ -654,6 +659,8 @@ |
654 | 659 | irq_time = irq_time_cpu(cpu); |
655 | 660 | if (rq->clock - irq_time > rq->clock_task) |
656 | 661 | rq->clock_task = rq->clock - irq_time; |
662 | + | |
663 | + sched_irq_time_avg_update(rq, irq_time); | |
657 | 664 | } |
658 | 665 | } |
659 | 666 | |
660 | 667 | |
... | ... | @@ -1985,12 +1992,23 @@ |
1985 | 1992 | local_irq_restore(flags); |
1986 | 1993 | } |
1987 | 1994 | |
1995 | +static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time) | |
1996 | +{ | |
1997 | + if (sched_clock_irqtime && sched_feat(NONIRQ_POWER)) { | |
1998 | + u64 delta_irq = curr_irq_time - rq->prev_irq_time; | |
1999 | + rq->prev_irq_time = curr_irq_time; | |
2000 | + sched_rt_avg_update(rq, delta_irq); | |
2001 | + } | |
2002 | +} | |
2003 | + | |
1988 | 2004 | #else |
1989 | 2005 | |
1990 | 2006 | static u64 irq_time_cpu(int cpu) |
1991 | 2007 | { |
1992 | 2008 | return 0; |
1993 | 2009 | } |
2010 | + | |
2011 | +static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time) { } | |
1994 | 2012 | |
1995 | 2013 | #endif |
1996 | 2014 |
kernel/sched_fair.c
... | ... | @@ -2275,7 +2275,13 @@ |
2275 | 2275 | u64 total, available; |
2276 | 2276 | |
2277 | 2277 | total = sched_avg_period() + (rq->clock - rq->age_stamp); |
2278 | - available = total - rq->rt_avg; | |
2278 | + | |
2279 | + if (unlikely(total < rq->rt_avg)) { | |
2280 | + /* Ensures that power won't end up being negative */ | |
2281 | + available = 0; | |
2282 | + } else { | |
2283 | + available = total - rq->rt_avg; | |
2284 | + } | |
2279 | 2285 | |
2280 | 2286 | if (unlikely((s64)total < SCHED_LOAD_SCALE)) |
2281 | 2287 | total = SCHED_LOAD_SCALE; |