Commit b42e0c41a422a212ddea0666d5a3a0e3c35206db
Committed by
Ingo Molnar
1 parent
39c0cbe215
Exists in
master
and in
7 other branches
sched: Remove avg_wakeup
Testing the load which led to this heuristic (nfs4 kbuild) shows that it has outlived it's usefullness. With intervening load balancing changes, I cannot see any difference with/without, so recover there fastpath cycles. Signed-off-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1268301062.6785.29.camel@marge.simson.net> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Showing 5 changed files with 4 additions and 63 deletions Side-by-side Diff
include/linux/sched.h
kernel/sched.c
... | ... | @@ -1880,9 +1880,6 @@ |
1880 | 1880 | static void |
1881 | 1881 | enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head) |
1882 | 1882 | { |
1883 | - if (wakeup) | |
1884 | - p->se.start_runtime = p->se.sum_exec_runtime; | |
1885 | - | |
1886 | 1883 | sched_info_queued(p); |
1887 | 1884 | p->sched_class->enqueue_task(rq, p, wakeup, head); |
1888 | 1885 | p->se.on_rq = 1; |
1889 | 1886 | |
... | ... | @@ -1890,17 +1887,11 @@ |
1890 | 1887 | |
1891 | 1888 | static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep) |
1892 | 1889 | { |
1893 | - if (sleep) { | |
1894 | - if (p->se.last_wakeup) { | |
1895 | - update_avg(&p->se.avg_overlap, | |
1896 | - p->se.sum_exec_runtime - p->se.last_wakeup); | |
1897 | - p->se.last_wakeup = 0; | |
1898 | - } else { | |
1899 | - update_avg(&p->se.avg_wakeup, | |
1900 | - sysctl_sched_wakeup_granularity); | |
1901 | - } | |
1890 | + if (sleep && p->se.last_wakeup) { | |
1891 | + update_avg(&p->se.avg_overlap, | |
1892 | + p->se.sum_exec_runtime - p->se.last_wakeup); | |
1893 | + p->se.last_wakeup = 0; | |
1902 | 1894 | } |
1903 | - | |
1904 | 1895 | sched_info_dequeued(p); |
1905 | 1896 | p->sched_class->dequeue_task(rq, p, sleep); |
1906 | 1897 | p->se.on_rq = 0; |
1907 | 1898 | |
... | ... | @@ -2466,14 +2457,7 @@ |
2466 | 2457 | */ |
2467 | 2458 | if (!in_interrupt()) { |
2468 | 2459 | struct sched_entity *se = ¤t->se; |
2469 | - u64 sample = se->sum_exec_runtime; | |
2470 | 2460 | |
2471 | - if (se->last_wakeup) | |
2472 | - sample -= se->last_wakeup; | |
2473 | - else | |
2474 | - sample -= se->start_runtime; | |
2475 | - update_avg(&se->avg_wakeup, sample); | |
2476 | - | |
2477 | 2461 | se->last_wakeup = se->sum_exec_runtime; |
2478 | 2462 | } |
2479 | 2463 | |
... | ... | @@ -2540,8 +2524,6 @@ |
2540 | 2524 | p->se.nr_migrations = 0; |
2541 | 2525 | p->se.last_wakeup = 0; |
2542 | 2526 | p->se.avg_overlap = 0; |
2543 | - p->se.start_runtime = 0; | |
2544 | - p->se.avg_wakeup = sysctl_sched_wakeup_granularity; | |
2545 | 2527 | |
2546 | 2528 | #ifdef CONFIG_SCHEDSTATS |
2547 | 2529 | memset(&p->se.statistics, 0, sizeof(p->se.statistics)); |
kernel/sched_debug.c
kernel/sched_fair.c
... | ... | @@ -1592,41 +1592,10 @@ |
1592 | 1592 | } |
1593 | 1593 | #endif /* CONFIG_SMP */ |
1594 | 1594 | |
1595 | -/* | |
1596 | - * Adaptive granularity | |
1597 | - * | |
1598 | - * se->avg_wakeup gives the average time a task runs until it does a wakeup, | |
1599 | - * with the limit of wakeup_gran -- when it never does a wakeup. | |
1600 | - * | |
1601 | - * So the smaller avg_wakeup is the faster we want this task to preempt, | |
1602 | - * but we don't want to treat the preemptee unfairly and therefore allow it | |
1603 | - * to run for at least the amount of time we'd like to run. | |
1604 | - * | |
1605 | - * NOTE: we use 2*avg_wakeup to increase the probability of actually doing one | |
1606 | - * | |
1607 | - * NOTE: we use *nr_running to scale with load, this nicely matches the | |
1608 | - * degrading latency on load. | |
1609 | - */ | |
1610 | 1595 | static unsigned long |
1611 | -adaptive_gran(struct sched_entity *curr, struct sched_entity *se) | |
1612 | -{ | |
1613 | - u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; | |
1614 | - u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running; | |
1615 | - u64 gran = 0; | |
1616 | - | |
1617 | - if (this_run < expected_wakeup) | |
1618 | - gran = expected_wakeup - this_run; | |
1619 | - | |
1620 | - return min_t(s64, gran, sysctl_sched_wakeup_granularity); | |
1621 | -} | |
1622 | - | |
1623 | -static unsigned long | |
1624 | 1596 | wakeup_gran(struct sched_entity *curr, struct sched_entity *se) |
1625 | 1597 | { |
1626 | 1598 | unsigned long gran = sysctl_sched_wakeup_granularity; |
1627 | - | |
1628 | - if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN)) | |
1629 | - gran = adaptive_gran(curr, se); | |
1630 | 1599 | |
1631 | 1600 | /* |
1632 | 1601 | * Since its curr running now, convert the gran from real-time |
kernel/sched_features.h
... | ... | @@ -31,12 +31,6 @@ |
31 | 31 | SCHED_FEAT(WAKEUP_PREEMPT, 1) |
32 | 32 | |
33 | 33 | /* |
34 | - * Compute wakeup_gran based on task behaviour, clipped to | |
35 | - * [0, sched_wakeup_gran_ns] | |
36 | - */ | |
37 | -SCHED_FEAT(ADAPTIVE_GRAN, 1) | |
38 | - | |
39 | -/* | |
40 | 34 | * When converting the wakeup granularity to virtual time, do it such |
41 | 35 | * that heavier tasks preempting a lighter task have an edge. |
42 | 36 | */ |