Commit 90d6e24a3686325edea7748b966e138c9923017d

Authored by Arjan van de Ven
1 parent 6976675d94

hrtimer: make select() and poll() use the hrtimer range feature

This patch makes the select() and poll() hrtimers use the new range
feature and settings from the task struct.

In addition, this includes the estimate_accuracy() function that Linus
posted to lkml, but changed entirely based on other peoples lkml feedback.

Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>

Showing 1 changed file with 62 additions and 2 deletions Side-by-side Diff

... ... @@ -28,6 +28,58 @@
28 28  
29 29 #include <asm/uaccess.h>
30 30  
  31 +
  32 +/*
  33 + * Estimate expected accuracy in ns from a timeval.
  34 + *
  35 + * After quite a bit of churning around, we've settled on
  36 + * a simple thing of taking 0.1% of the timeout as the
  37 + * slack, with a cap of 100 msec.
  38 + * "nice" tasks get a 0.5% slack instead.
  39 + *
  40 + * Consider this comment an open invitation to come up with even
  41 + * better solutions..
  42 + */
  43 +
  44 +static unsigned long __estimate_accuracy(struct timespec *tv)
  45 +{
  46 + unsigned long slack;
  47 + int divfactor = 1000;
  48 +
  49 + if (task_nice(current))
  50 + divfactor = divfactor / 5;
  51 +
  52 + slack = tv->tv_nsec / divfactor;
  53 + slack += tv->tv_sec * (NSEC_PER_SEC/divfactor);
  54 +
  55 + if (slack > 100 * NSEC_PER_MSEC)
  56 + slack = 100 * NSEC_PER_MSEC;
  57 + return slack;
  58 +}
  59 +
  60 +static unsigned long estimate_accuracy(struct timespec *tv)
  61 +{
  62 + unsigned long ret;
  63 + struct timespec now;
  64 +
  65 + /*
  66 + * Realtime tasks get a slack of 0 for obvious reasons.
  67 + */
  68 +
  69 + if (current->policy == SCHED_FIFO ||
  70 + current->policy == SCHED_RR)
  71 + return 0;
  72 +
  73 + ktime_get_ts(&now);
  74 + now = timespec_sub(*tv, now);
  75 + ret = __estimate_accuracy(&now);
  76 + if (ret < current->timer_slack_ns)
  77 + return current->timer_slack_ns;
  78 + return ret;
  79 +}
  80 +
  81 +
  82 +
31 83 struct poll_table_page {
32 84 struct poll_table_page * next;
33 85 struct poll_table_entry * entry;
... ... @@ -262,6 +314,7 @@
262 314 struct poll_wqueues table;
263 315 poll_table *wait;
264 316 int retval, i, timed_out = 0;
  317 + unsigned long slack = 0;
265 318  
266 319 rcu_read_lock();
267 320 retval = max_select_fd(n, fds);
... ... @@ -278,6 +331,9 @@
278 331 timed_out = 1;
279 332 }
280 333  
  334 + if (end_time)
  335 + slack = estimate_accuracy(end_time);
  336 +
281 337 retval = 0;
282 338 for (;;) {
283 339 unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
... ... @@ -353,7 +409,7 @@
353 409 to = &expire;
354 410 }
355 411  
356   - if (!schedule_hrtimeout(to, HRTIMER_MODE_ABS))
  412 + if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS))
357 413 timed_out = 1;
358 414 }
359 415 __set_current_state(TASK_RUNNING);
... ... @@ -593,6 +649,7 @@
593 649 poll_table* pt = &wait->pt;
594 650 ktime_t expire, *to = NULL;
595 651 int timed_out = 0, count = 0;
  652 + unsigned long slack = 0;
596 653  
597 654 /* Optimise the no-wait case */
598 655 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
... ... @@ -600,6 +657,9 @@
600 657 timed_out = 1;
601 658 }
602 659  
  660 + if (end_time)
  661 + slack = estimate_accuracy(end_time);
  662 +
603 663 for (;;) {
604 664 struct poll_list *walk;
605 665  
... ... @@ -646,7 +706,7 @@
646 706 to = &expire;
647 707 }
648 708  
649   - if (!schedule_hrtimeout(to, HRTIMER_MODE_ABS))
  709 + if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS))
650 710 timed_out = 1;
651 711 }
652 712 __set_current_state(TASK_RUNNING);