Commit f1241c87a16c4fe9f4f51d6ed3589f031c505e8d

Authored by Matthew Wilcox
Committed by Matthew Wilcox
1 parent f06d968658

Add down_timeout and change ACPI to use it

ACPI currently emulates a timeout for semaphores with calls to
down_trylock and sleep.  This produces horrible behaviour in terms of
fairness and excessive wakeups.  Now that we have a unified semaphore
implementation, adding a real down_trylock is almost trivial.

Signed-off-by: Matthew Wilcox <willy@linux.intel.com>

Showing 3 changed files with 62 additions and 75 deletions Side-by-side Diff

... ... @@ -4,6 +4,8 @@
4 4 * Copyright (C) 2000 Andrew Henroid
5 5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6 6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  7 + * Copyright (c) 2008 Intel Corporation
  8 + * Author: Matthew Wilcox <willy@linux.intel.com>
7 9 *
8 10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 11 *
10 12  
11 13  
... ... @@ -37,16 +39,19 @@
37 39 #include <linux/workqueue.h>
38 40 #include <linux/nmi.h>
39 41 #include <linux/acpi.h>
40   -#include <acpi/acpi.h>
41   -#include <asm/io.h>
42   -#include <acpi/acpi_bus.h>
43   -#include <acpi/processor.h>
44   -#include <asm/uaccess.h>
45   -
46 42 #include <linux/efi.h>
47 43 #include <linux/ioport.h>
48 44 #include <linux/list.h>
  45 +#include <linux/jiffies.h>
  46 +#include <linux/semaphore.h>
49 47  
  48 +#include <asm/io.h>
  49 +#include <asm/uaccess.h>
  50 +
  51 +#include <acpi/acpi.h>
  52 +#include <acpi/acpi_bus.h>
  53 +#include <acpi/processor.h>
  54 +
50 55 #define _COMPONENT ACPI_OS_SERVICES
51 56 ACPI_MODULE_NAME("osl");
52 57 #define PREFIX "ACPI: "
... ... @@ -764,7 +769,6 @@
764 769 {
765 770 struct semaphore *sem = NULL;
766 771  
767   -
768 772 sem = acpi_os_allocate(sizeof(struct semaphore));
769 773 if (!sem)
770 774 return AE_NO_MEMORY;
771 775  
... ... @@ -791,12 +795,12 @@
791 795 {
792 796 struct semaphore *sem = (struct semaphore *)handle;
793 797  
794   -
795 798 if (!sem)
796 799 return AE_BAD_PARAMETER;
797 800  
798 801 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
799 802  
  803 + BUG_ON(!list_empty(&sem->wait_list));
800 804 kfree(sem);
801 805 sem = NULL;
802 806  
803 807  
804 808  
... ... @@ -804,21 +808,15 @@
804 808 }
805 809  
806 810 /*
807   - * TODO: The kernel doesn't have a 'down_timeout' function -- had to
808   - * improvise. The process is to sleep for one scheduler quantum
809   - * until the semaphore becomes available. Downside is that this
810   - * may result in starvation for timeout-based waits when there's
811   - * lots of semaphore activity.
812   - *
813 811 * TODO: Support for units > 1?
814 812 */
815 813 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
816 814 {
817 815 acpi_status status = AE_OK;
818 816 struct semaphore *sem = (struct semaphore *)handle;
  817 + long jiffies;
819 818 int ret = 0;
820 819  
821   -
822 820 if (!sem || (units < 1))
823 821 return AE_BAD_PARAMETER;
824 822  
825 823  
... ... @@ -828,59 +826,15 @@
828 826 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
829 827 handle, units, timeout));
830 828  
831   - /*
832   - * This can be called during resume with interrupts off.
833   - * Like boot-time, we should be single threaded and will
834   - * always get the lock if we try -- timeout or not.
835   - * If this doesn't succeed, then we will oops courtesy of
836   - * might_sleep() in down().
837   - */
838   - if (!down_trylock(sem))
839   - return AE_OK;
  829 + if (timeout == ACPI_WAIT_FOREVER)
  830 + jiffies = MAX_SCHEDULE_TIMEOUT;
  831 + else
  832 + jiffies = msecs_to_jiffies(timeout);
  833 +
  834 + ret = down_timeout(sem, jiffies);
  835 + if (ret)
  836 + status = AE_TIME;
840 837  
841   - switch (timeout) {
842   - /*
843   - * No Wait:
844   - * --------
845   - * A zero timeout value indicates that we shouldn't wait - just
846   - * acquire the semaphore if available otherwise return AE_TIME
847   - * (a.k.a. 'would block').
848   - */
849   - case 0:
850   - if (down_trylock(sem))
851   - status = AE_TIME;
852   - break;
853   -
854   - /*
855   - * Wait Indefinitely:
856   - * ------------------
857   - */
858   - case ACPI_WAIT_FOREVER:
859   - down(sem);
860   - break;
861   -
862   - /*
863   - * Wait w/ Timeout:
864   - * ----------------
865   - */
866   - default:
867   - // TODO: A better timeout algorithm?
868   - {
869   - int i = 0;
870   - static const int quantum_ms = 1000 / HZ;
871   -
872   - ret = down_trylock(sem);
873   - for (i = timeout; (i > 0 && ret != 0); i -= quantum_ms) {
874   - schedule_timeout_interruptible(1);
875   - ret = down_trylock(sem);
876   - }
877   -
878   - if (ret != 0)
879   - status = AE_TIME;
880   - }
881   - break;
882   - }
883   -
884 838 if (ACPI_FAILURE(status)) {
885 839 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
886 840 "Failed to acquire semaphore[%p|%d|%d], %s",
... ... @@ -901,7 +855,6 @@
901 855 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
902 856 {
903 857 struct semaphore *sem = (struct semaphore *)handle;
904   -
905 858  
906 859 if (!sem || (units < 1))
907 860 return AE_BAD_PARAMETER;
include/linux/semaphore.h
... ... @@ -75,6 +75,12 @@
75 75 extern int __must_check down_trylock(struct semaphore *sem);
76 76  
77 77 /*
  78 + * As down(), except this function will return -ETIME if it fails to
  79 + * acquire the semaphore within the specified number of jiffies.
  80 + */
  81 +extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
  82 +
  83 +/*
78 84 * Release the semaphore. Unlike mutexes, up() may be called from any
79 85 * context and even by tasks which have never called down().
80 86 */
... ... @@ -35,6 +35,7 @@
35 35 static noinline void __down(struct semaphore *sem);
36 36 static noinline int __down_interruptible(struct semaphore *sem);
37 37 static noinline int __down_killable(struct semaphore *sem);
  38 +static noinline int __down_timeout(struct semaphore *sem, long jiffies);
38 39 static noinline void __up(struct semaphore *sem);
39 40  
40 41 void down(struct semaphore *sem)
... ... @@ -104,6 +105,20 @@
104 105 }
105 106 EXPORT_SYMBOL(down_trylock);
106 107  
  108 +int down_timeout(struct semaphore *sem, long jiffies)
  109 +{
  110 + unsigned long flags;
  111 + int result = 0;
  112 +
  113 + spin_lock_irqsave(&sem->lock, flags);
  114 + if (unlikely(sem->count-- <= 0))
  115 + result = __down_timeout(sem, jiffies);
  116 + spin_unlock_irqrestore(&sem->lock, flags);
  117 +
  118 + return result;
  119 +}
  120 +EXPORT_SYMBOL(down_timeout);
  121 +
107 122 void up(struct semaphore *sem)
108 123 {
109 124 unsigned long flags;
110 125  
... ... @@ -142,10 +157,12 @@
142 157 }
143 158  
144 159 /*
145   - * Because this function is inlined, the 'state' parameter will be constant,
146   - * and thus optimised away by the compiler.
  160 + * Because this function is inlined, the 'state' parameter will be
  161 + * constant, and thus optimised away by the compiler. Likewise the
  162 + * 'timeout' parameter for the cases without timeouts.
147 163 */
148   -static inline int __sched __down_common(struct semaphore *sem, long state)
  164 +static inline int __sched __down_common(struct semaphore *sem, long state,
  165 + long timeout)
149 166 {
150 167 int result = 0;
151 168 struct task_struct *task = current;
152 169  
153 170  
... ... @@ -160,14 +177,20 @@
160 177 goto interrupted;
161 178 if (state == TASK_KILLABLE && fatal_signal_pending(task))
162 179 goto interrupted;
  180 + if (timeout <= 0)
  181 + goto timed_out;
163 182 __set_task_state(task, state);
164 183 spin_unlock_irq(&sem->lock);
165   - schedule();
  184 + timeout = schedule_timeout(timeout);
166 185 spin_lock_irq(&sem->lock);
167 186 if (waiter.up)
168 187 goto woken;
169 188 }
170 189  
  190 + timed_out:
  191 + list_del(&waiter.list);
  192 + result = -ETIME;
  193 + goto woken;
171 194 interrupted:
172 195 list_del(&waiter.list);
173 196 result = -EINTR;
174 197  
175 198  
... ... @@ -187,17 +210,22 @@
187 210  
188 211 static noinline void __sched __down(struct semaphore *sem)
189 212 {
190   - __down_common(sem, TASK_UNINTERRUPTIBLE);
  213 + __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
191 214 }
192 215  
193 216 static noinline int __sched __down_interruptible(struct semaphore *sem)
194 217 {
195   - return __down_common(sem, TASK_INTERRUPTIBLE);
  218 + return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
196 219 }
197 220  
198 221 static noinline int __sched __down_killable(struct semaphore *sem)
199 222 {
200   - return __down_common(sem, TASK_KILLABLE);
  223 + return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
  224 +}
  225 +
  226 +static noinline int __sched __down_timeout(struct semaphore *sem, long jiffies)
  227 +{
  228 + return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies);
201 229 }
202 230  
203 231 static noinline void __sched __up(struct semaphore *sem)