Blame view

kernel/semaphore.c 7.18 KB
64ac24e73   Matthew Wilcox   Generic semaphore...
1
2
3
4
5
  /*
   * Copyright (c) 2008 Intel Corporation
   * Author: Matthew Wilcox <willy@linux.intel.com>
   *
   * Distributed under the terms of the GNU GPL, version 2
714493cd5   Matthew Wilcox   Improve semaphore...
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
   *
   * This file implements counting semaphores.
   * A counting semaphore may be acquired 'n' times before sleeping.
   * See mutex.c for single-acquisition sleeping locks which enforce
   * rules which allow code to be debugged more easily.
   */
  
  /*
   * Some notes on the implementation:
   *
   * The spinlock controls access to the other members of the semaphore.
   * down_trylock() and up() can be called from interrupt context, so we
   * have to disable interrupts when taking the lock.  It turns out various
   * parts of the kernel expect to be able to use down() on a semaphore in
   * interrupt context when they know it will succeed, so we have to use
   * irqsave variants for down(), down_interruptible() and down_killable()
   * too.
   *
   * The ->count variable represents how many more tasks can acquire this
   * semaphore.  If it's zero, there may be tasks waiting on the wait_list.
64ac24e73   Matthew Wilcox   Generic semaphore...
26
27
28
29
30
31
32
33
   */
  
  #include <linux/compiler.h>
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/sched.h>
  #include <linux/semaphore.h>
  #include <linux/spinlock.h>
74f4e369f   Ingo Molnar   ftrace: stacktrac...
34
  #include <linux/ftrace.h>
64ac24e73   Matthew Wilcox   Generic semaphore...
35

64ac24e73   Matthew Wilcox   Generic semaphore...
36
37
  static noinline void __down(struct semaphore *sem);
  static noinline int __down_interruptible(struct semaphore *sem);
f06d96865   Matthew Wilcox   Introduce down_ki...
38
  static noinline int __down_killable(struct semaphore *sem);
f1241c87a   Matthew Wilcox   Add down_timeout ...
39
  static noinline int __down_timeout(struct semaphore *sem, long jiffies);
64ac24e73   Matthew Wilcox   Generic semaphore...
40
  static noinline void __up(struct semaphore *sem);
714493cd5   Matthew Wilcox   Improve semaphore...
41
42
43
44
45
46
47
48
49
50
51
  /**
   * down - acquire the semaphore
   * @sem: the semaphore to be acquired
   *
   * Acquires the semaphore.  If no more tasks are allowed to acquire the
   * semaphore, calling this function will put the task to sleep until the
   * semaphore is released.
   *
   * Use of this function is deprecated, please use down_interruptible() or
   * down_killable() instead.
   */
64ac24e73   Matthew Wilcox   Generic semaphore...
52
53
54
55
56
  void down(struct semaphore *sem)
  {
  	unsigned long flags;
  
  	spin_lock_irqsave(&sem->lock, flags);
00b41ec26   Linus Torvalds   Revert "semaphore...
57
58
59
  	if (likely(sem->count > 0))
  		sem->count--;
  	else
64ac24e73   Matthew Wilcox   Generic semaphore...
60
61
62
63
  		__down(sem);
  	spin_unlock_irqrestore(&sem->lock, flags);
  }
  EXPORT_SYMBOL(down);
714493cd5   Matthew Wilcox   Improve semaphore...
64
65
66
67
68
69
70
71
72
  /**
   * down_interruptible - acquire the semaphore unless interrupted
   * @sem: the semaphore to be acquired
   *
   * Attempts to acquire the semaphore.  If no more tasks are allowed to
   * acquire the semaphore, calling this function will put the task to sleep.
   * If the sleep is interrupted by a signal, this function will return -EINTR.
   * If the semaphore is successfully acquired, this function returns 0.
   */
64ac24e73   Matthew Wilcox   Generic semaphore...
73
74
75
76
77
78
  int down_interruptible(struct semaphore *sem)
  {
  	unsigned long flags;
  	int result = 0;
  
  	spin_lock_irqsave(&sem->lock, flags);
00b41ec26   Linus Torvalds   Revert "semaphore...
79
  	if (likely(sem->count > 0))
bf726eab3   Ingo Molnar   semaphore: fix
80
  		sem->count--;
00b41ec26   Linus Torvalds   Revert "semaphore...
81
82
  	else
  		result = __down_interruptible(sem);
64ac24e73   Matthew Wilcox   Generic semaphore...
83
84
85
86
87
  	spin_unlock_irqrestore(&sem->lock, flags);
  
  	return result;
  }
  EXPORT_SYMBOL(down_interruptible);
714493cd5   Matthew Wilcox   Improve semaphore...
88
89
90
91
92
93
94
95
96
97
  /**
   * down_killable - acquire the semaphore unless killed
   * @sem: the semaphore to be acquired
   *
   * Attempts to acquire the semaphore.  If no more tasks are allowed to
   * acquire the semaphore, calling this function will put the task to sleep.
   * If the sleep is interrupted by a fatal signal, this function will return
   * -EINTR.  If the semaphore is successfully acquired, this function returns
   * 0.
   */
f06d96865   Matthew Wilcox   Introduce down_ki...
98
99
100
101
102
103
  int down_killable(struct semaphore *sem)
  {
  	unsigned long flags;
  	int result = 0;
  
  	spin_lock_irqsave(&sem->lock, flags);
00b41ec26   Linus Torvalds   Revert "semaphore...
104
  	if (likely(sem->count > 0))
bf726eab3   Ingo Molnar   semaphore: fix
105
  		sem->count--;
00b41ec26   Linus Torvalds   Revert "semaphore...
106
107
  	else
  		result = __down_killable(sem);
f06d96865   Matthew Wilcox   Introduce down_ki...
108
109
110
111
112
  	spin_unlock_irqrestore(&sem->lock, flags);
  
  	return result;
  }
  EXPORT_SYMBOL(down_killable);
64ac24e73   Matthew Wilcox   Generic semaphore...
113
114
115
116
117
  /**
   * down_trylock - try to acquire the semaphore, without waiting
   * @sem: the semaphore to be acquired
   *
   * Try to acquire the semaphore atomically.  Returns 0 if the mutex has
714493cd5   Matthew Wilcox   Improve semaphore...
118
   * been acquired successfully or 1 if it it cannot be acquired.
64ac24e73   Matthew Wilcox   Generic semaphore...
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
   *
   * NOTE: This return value is inverted from both spin_trylock and
   * mutex_trylock!  Be careful about this when converting code.
   *
   * Unlike mutex_trylock, this function can be used from interrupt context,
   * and the semaphore can be released by any task or interrupt.
   */
  int down_trylock(struct semaphore *sem)
  {
  	unsigned long flags;
  	int count;
  
  	spin_lock_irqsave(&sem->lock, flags);
  	count = sem->count - 1;
  	if (likely(count >= 0))
  		sem->count = count;
  	spin_unlock_irqrestore(&sem->lock, flags);
  
  	return (count < 0);
  }
  EXPORT_SYMBOL(down_trylock);
714493cd5   Matthew Wilcox   Improve semaphore...
140
141
142
143
144
145
146
147
148
149
  /**
   * down_timeout - acquire the semaphore within a specified time
   * @sem: the semaphore to be acquired
   * @jiffies: how long to wait before failing
   *
   * Attempts to acquire the semaphore.  If no more tasks are allowed to
   * acquire the semaphore, calling this function will put the task to sleep.
   * If the semaphore is not released within the specified number of jiffies,
   * this function returns -ETIME.  It returns 0 if the semaphore was acquired.
   */
f1241c87a   Matthew Wilcox   Add down_timeout ...
150
151
152
153
154
155
  int down_timeout(struct semaphore *sem, long jiffies)
  {
  	unsigned long flags;
  	int result = 0;
  
  	spin_lock_irqsave(&sem->lock, flags);
00b41ec26   Linus Torvalds   Revert "semaphore...
156
  	if (likely(sem->count > 0))
bf726eab3   Ingo Molnar   semaphore: fix
157
  		sem->count--;
00b41ec26   Linus Torvalds   Revert "semaphore...
158
159
  	else
  		result = __down_timeout(sem, jiffies);
f1241c87a   Matthew Wilcox   Add down_timeout ...
160
161
162
163
164
  	spin_unlock_irqrestore(&sem->lock, flags);
  
  	return result;
  }
  EXPORT_SYMBOL(down_timeout);
714493cd5   Matthew Wilcox   Improve semaphore...
165
166
167
168
169
170
171
  /**
   * up - release the semaphore
   * @sem: the semaphore to release
   *
   * Release the semaphore.  Unlike mutexes, up() may be called from any
   * context and even by tasks which have never called down().
   */
64ac24e73   Matthew Wilcox   Generic semaphore...
172
173
174
175
176
  void up(struct semaphore *sem)
  {
  	unsigned long flags;
  
  	spin_lock_irqsave(&sem->lock, flags);
00b41ec26   Linus Torvalds   Revert "semaphore...
177
178
179
  	if (likely(list_empty(&sem->wait_list)))
  		sem->count++;
  	else
64ac24e73   Matthew Wilcox   Generic semaphore...
180
181
182
183
184
185
186
187
188
189
  		__up(sem);
  	spin_unlock_irqrestore(&sem->lock, flags);
  }
  EXPORT_SYMBOL(up);
  
  /* Functions for the contended case */
  
  struct semaphore_waiter {
  	struct list_head list;
  	struct task_struct *task;
00b41ec26   Linus Torvalds   Revert "semaphore...
190
  	int up;
64ac24e73   Matthew Wilcox   Generic semaphore...
191
192
193
  };
  
  /*
f1241c87a   Matthew Wilcox   Add down_timeout ...
194
195
196
   * Because this function is inlined, the 'state' parameter will be
   * constant, and thus optimised away by the compiler.  Likewise the
   * 'timeout' parameter for the cases without timeouts.
64ac24e73   Matthew Wilcox   Generic semaphore...
197
   */
f1241c87a   Matthew Wilcox   Add down_timeout ...
198
199
  static inline int __sched __down_common(struct semaphore *sem, long state,
  								long timeout)
64ac24e73   Matthew Wilcox   Generic semaphore...
200
  {
64ac24e73   Matthew Wilcox   Generic semaphore...
201
202
  	struct task_struct *task = current;
  	struct semaphore_waiter waiter;
bf726eab3   Ingo Molnar   semaphore: fix
203
  	list_add_tail(&waiter.list, &sem->wait_list);
00b41ec26   Linus Torvalds   Revert "semaphore...
204
205
  	waiter.task = task;
  	waiter.up = 0;
64ac24e73   Matthew Wilcox   Generic semaphore...
206
207
  
  	for (;;) {
5b2becc8c   Oleg Nesterov   semaphore: __down...
208
  		if (signal_pending_state(state, task))
00b41ec26   Linus Torvalds   Revert "semaphore...
209
210
211
  			goto interrupted;
  		if (timeout <= 0)
  			goto timed_out;
64ac24e73   Matthew Wilcox   Generic semaphore...
212
213
  		__set_task_state(task, state);
  		spin_unlock_irq(&sem->lock);
f1241c87a   Matthew Wilcox   Add down_timeout ...
214
  		timeout = schedule_timeout(timeout);
64ac24e73   Matthew Wilcox   Generic semaphore...
215
  		spin_lock_irq(&sem->lock);
00b41ec26   Linus Torvalds   Revert "semaphore...
216
217
  		if (waiter.up)
  			return 0;
64ac24e73   Matthew Wilcox   Generic semaphore...
218
  	}
00b41ec26   Linus Torvalds   Revert "semaphore...
219
220
221
222
223
   timed_out:
  	list_del(&waiter.list);
  	return -ETIME;
  
   interrupted:
64ac24e73   Matthew Wilcox   Generic semaphore...
224
  	list_del(&waiter.list);
00b41ec26   Linus Torvalds   Revert "semaphore...
225
  	return -EINTR;
64ac24e73   Matthew Wilcox   Generic semaphore...
226
227
228
229
  }
  
  static noinline void __sched __down(struct semaphore *sem)
  {
f1241c87a   Matthew Wilcox   Add down_timeout ...
230
  	__down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
64ac24e73   Matthew Wilcox   Generic semaphore...
231
232
233
234
  }
  
  static noinline int __sched __down_interruptible(struct semaphore *sem)
  {
f1241c87a   Matthew Wilcox   Add down_timeout ...
235
  	return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
64ac24e73   Matthew Wilcox   Generic semaphore...
236
  }
f06d96865   Matthew Wilcox   Introduce down_ki...
237
238
  static noinline int __sched __down_killable(struct semaphore *sem)
  {
f1241c87a   Matthew Wilcox   Add down_timeout ...
239
240
241
242
243
244
  	return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
  }
  
  static noinline int __sched __down_timeout(struct semaphore *sem, long jiffies)
  {
  	return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies);
f06d96865   Matthew Wilcox   Introduce down_ki...
245
  }
64ac24e73   Matthew Wilcox   Generic semaphore...
246
247
  static noinline void __sched __up(struct semaphore *sem)
  {
b17170b2f   Matthew Wilcox   Simplify semaphor...
248
249
  	struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
  						struct semaphore_waiter, list);
00b41ec26   Linus Torvalds   Revert "semaphore...
250
251
  	list_del(&waiter->list);
  	waiter->up = 1;
b17170b2f   Matthew Wilcox   Simplify semaphor...
252
  	wake_up_process(waiter->task);
64ac24e73   Matthew Wilcox   Generic semaphore...
253
  }