Blame view

kernel/mutex.c 10.4 KB
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
  /*
   * kernel/mutex.c
   *
   * Mutexes: blocking mutual exclusion locks
   *
   * Started by Ingo Molnar:
   *
   *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
   *
   * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
   * David Howells for suggestions and improvements.
   *
   * Also see Documentation/mutex-design.txt.
   */
  #include <linux/mutex.h>
  #include <linux/sched.h>
  #include <linux/module.h>
  #include <linux/spinlock.h>
  #include <linux/interrupt.h>
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
20
  #include <linux/debug_locks.h>
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
  
  /*
   * In the DEBUG case we are using the "NULL fastpath" for mutexes,
   * which forces all calls into the slowpath:
   */
  #ifdef CONFIG_DEBUG_MUTEXES
  # include "mutex-debug.h"
  # include <asm-generic/mutex-null.h>
  #else
  # include "mutex.h"
  # include <asm/mutex.h>
  #endif
  
  /***
   * mutex_init - initialize the mutex
   * @lock: the mutex to be initialized
   *
   * Initialize the mutex to unlocked state.
   *
   * It is not allowed to initialize an already locked mutex.
   */
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
42
43
  void
  __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
44
45
46
47
  {
  	atomic_set(&lock->count, 1);
  	spin_lock_init(&lock->wait_lock);
  	INIT_LIST_HEAD(&lock->wait_list);
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
48
  	debug_mutex_init(lock, name, key);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
49
50
51
  }
  
  EXPORT_SYMBOL(__mutex_init);
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
52
  #ifndef CONFIG_DEBUG_LOCK_ALLOC
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
53
54
55
56
57
58
  /*
   * We split the mutex lock/unlock logic into separate fastpath and
   * slowpath functions, to reduce the register pressure on the fastpath.
   * We also put the fastpath first in the kernel image, to make sure the
   * branch is predicted by the CPU as default-untaken.
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
59
  static void noinline __sched
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
60
  __mutex_lock_slowpath(atomic_t *lock_count);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
  
  /***
   * mutex_lock - acquire the mutex
   * @lock: the mutex to be acquired
   *
   * Lock the mutex exclusively for this task. If the mutex is not
   * available right now, it will sleep until it can get it.
   *
   * The mutex must later on be released by the same task that
   * acquired it. Recursive locking is not allowed. The task
   * may not exit without first unlocking the mutex. Also, kernel
   * memory where the mutex resides mutex must not be freed with
   * the mutex still locked. The mutex must first be initialized
   * (or statically defined) before it can be locked. memset()-ing
   * the mutex to 0 is not allowed.
   *
   * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
   *   checks that will enforce the restrictions and will also do
   *   deadlock debugging. )
   *
   * This function is similar to (but not equivalent to) down().
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
83
  void inline __sched mutex_lock(struct mutex *lock)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
84
  {
c544bdb19   Ingo Molnar   [PATCH] mark mute...
85
  	might_sleep();
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
86
87
88
  	/*
  	 * The locking fastpath is the 1->0 transition from
  	 * 'unlocked' into 'locked' state.
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
89
90
91
92
93
  	 */
  	__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
  }
  
  EXPORT_SYMBOL(mutex_lock);
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
94
  #endif
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
95

7ad5b3a50   Harvey Harrison   kernel: remove fa...
96
  static noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
97
98
99
100
101
102
103
104
105
106
107
108
  
  /***
   * mutex_unlock - release the mutex
   * @lock: the mutex to be released
   *
   * Unlock a mutex that has been locked by this task previously.
   *
   * This function must not be used in interrupt context. Unlocking
   * of a not locked mutex is not allowed.
   *
   * This function is similar to (but not equivalent to) up().
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
109
  void __sched mutex_unlock(struct mutex *lock)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
110
111
112
113
  {
  	/*
  	 * The unlocking fastpath is the 0->1 transition from 'locked'
  	 * into 'unlocked' state:
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
114
115
116
117
118
119
120
121
122
123
  	 */
  	__mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
  }
  
  EXPORT_SYMBOL(mutex_unlock);
  
  /*
   * Lock a mutex (possibly interruptible), slowpath:
   */
  static inline int __sched
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
124
125
  __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
  	       	unsigned long ip)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
126
127
128
129
  {
  	struct task_struct *task = current;
  	struct mutex_waiter waiter;
  	unsigned int old_val;
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
130
  	unsigned long flags;
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
131

1fb00c6cb   Ingo Molnar   [PATCH] work arou...
132
  	spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
133

9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
134
  	debug_mutex_lock_common(lock, &waiter);
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
135
  	mutex_acquire(&lock->dep_map, subclass, 0, ip);
c9f4f06d3   Roman Zippel   wrap access to th...
136
  	debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
137
138
139
140
  
  	/* add waiting tasks to the end of the waitqueue (FIFO): */
  	list_add_tail(&waiter.list, &lock->wait_list);
  	waiter.task = task;
4fe87745a   Peter Zijlstra   lockstat: hook in...
141
142
143
  	old_val = atomic_xchg(&lock->count, -1);
  	if (old_val == 1)
  		goto done;
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
144
  	lock_contended(&lock->dep_map, ip);
4fe87745a   Peter Zijlstra   lockstat: hook in...
145

6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
  	for (;;) {
  		/*
  		 * Lets try to take the lock again - this is needed even if
  		 * we get here for the first time (shortly after failing to
  		 * acquire the lock), to make sure that we get a wakeup once
  		 * it's unlocked. Later on, if we sleep, this is the
  		 * operation that gives us the lock. We xchg it to -1, so
  		 * that when we release the lock, we properly wake up the
  		 * other waiters:
  		 */
  		old_val = atomic_xchg(&lock->count, -1);
  		if (old_val == 1)
  			break;
  
  		/*
  		 * got a signal? (This code gets eliminated in the
  		 * TASK_UNINTERRUPTIBLE case.)
  		 */
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
164
165
166
167
168
169
  		if (unlikely((state == TASK_INTERRUPTIBLE &&
  					signal_pending(task)) ||
  			      (state == TASK_KILLABLE &&
  					fatal_signal_pending(task)))) {
  			mutex_remove_waiter(lock, &waiter,
  					    task_thread_info(task));
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
170
  			mutex_release(&lock->dep_map, 1, ip);
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
171
  			spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
172
173
174
175
176
177
178
  
  			debug_mutex_free_waiter(&waiter);
  			return -EINTR;
  		}
  		__set_task_state(task, state);
  
  		/* didnt get the lock, go to sleep: */
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
179
  		spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
180
  		schedule();
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
181
  		spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
182
  	}
4fe87745a   Peter Zijlstra   lockstat: hook in...
183
  done:
96645678c   Peter Zijlstra   lockstat: measure...
184
  	lock_acquired(&lock->dep_map);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
185
  	/* got the lock - rejoice! */
c9f4f06d3   Roman Zippel   wrap access to th...
186
187
  	mutex_remove_waiter(lock, &waiter, task_thread_info(task));
  	debug_mutex_set_owner(lock, task_thread_info(task));
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
188
189
190
191
  
  	/* set it to 0 if there are no waiters left: */
  	if (likely(list_empty(&lock->wait_list)))
  		atomic_set(&lock->count, 0);
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
192
  	spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
193
194
  
  	debug_mutex_free_waiter(&waiter);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
195
196
  	return 0;
  }
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
197
198
199
200
201
  #ifdef CONFIG_DEBUG_LOCK_ALLOC
  void __sched
  mutex_lock_nested(struct mutex *lock, unsigned int subclass)
  {
  	might_sleep();
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
202
  	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_);
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
203
204
205
  }
  
  EXPORT_SYMBOL_GPL(mutex_lock_nested);
d63a5a74d   NeilBrown   [PATCH] lockdep: ...
206
207
  
  int __sched
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
208
209
210
211
212
213
214
215
  mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
  {
  	might_sleep();
  	return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_);
  }
  EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
  
  int __sched
d63a5a74d   NeilBrown   [PATCH] lockdep: ...
216
217
218
  mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
  {
  	might_sleep();
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
219
  	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_);
d63a5a74d   NeilBrown   [PATCH] lockdep: ...
220
221
222
  }
  
  EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
223
  #endif
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
224
225
226
  /*
   * Release the lock, slowpath:
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
227
  static inline void
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
228
  __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
229
  {
02706647a   Ingo Molnar   [PATCH] mutex: tr...
230
  	struct mutex *lock = container_of(lock_count, struct mutex, count);
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
231
  	unsigned long flags;
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
232

1fb00c6cb   Ingo Molnar   [PATCH] work arou...
233
  	spin_lock_mutex(&lock->wait_lock, flags);
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
234
  	mutex_release(&lock->dep_map, nested, _RET_IP_);
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
235
  	debug_mutex_unlock(lock);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
236
237
238
239
240
241
242
243
  
  	/*
  	 * some architectures leave the lock unlocked in the fastpath failure
  	 * case, others need to leave it locked. In the later case we have to
  	 * unlock it here
  	 */
  	if (__mutex_slowpath_needs_to_unlock())
  		atomic_set(&lock->count, 1);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
244
245
246
247
248
249
250
251
252
253
254
255
  	if (!list_empty(&lock->wait_list)) {
  		/* get the first entry from the wait-list: */
  		struct mutex_waiter *waiter =
  				list_entry(lock->wait_list.next,
  					   struct mutex_waiter, list);
  
  		debug_mutex_wake_waiter(lock, waiter);
  
  		wake_up_process(waiter->task);
  	}
  
  	debug_mutex_clear_owner(lock);
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
256
  	spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
257
258
259
  }
  
  /*
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
260
261
   * Release the lock, slowpath:
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
262
  static noinline void
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
263
264
  __mutex_unlock_slowpath(atomic_t *lock_count)
  {
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
265
  	__mutex_unlock_common_slowpath(lock_count, 1);
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
266
  }
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
267
  #ifndef CONFIG_DEBUG_LOCK_ALLOC
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
268
  /*
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
269
270
271
   * Here come the less common (and hence less performance-critical) APIs:
   * mutex_lock_interruptible() and mutex_trylock().
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
272
  static noinline int __sched
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
273
  __mutex_lock_killable_slowpath(atomic_t *lock_count);
7ad5b3a50   Harvey Harrison   kernel: remove fa...
274
  static noinline int __sched
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
275
  __mutex_lock_interruptible_slowpath(atomic_t *lock_count);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
276
277
278
279
280
281
282
283
284
285
286
287
  
  /***
   * mutex_lock_interruptible - acquire the mutex, interruptable
   * @lock: the mutex to be acquired
   *
   * Lock the mutex like mutex_lock(), and return 0 if the mutex has
   * been acquired or sleep until the mutex becomes available. If a
   * signal arrives while waiting for the lock then this function
   * returns -EINTR.
   *
   * This function is similar to (but not equivalent to) down_interruptible().
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
288
  int __sched mutex_lock_interruptible(struct mutex *lock)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
289
  {
c544bdb19   Ingo Molnar   [PATCH] mark mute...
290
  	might_sleep();
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
291
292
293
294
295
  	return __mutex_fastpath_lock_retval
  			(&lock->count, __mutex_lock_interruptible_slowpath);
  }
  
  EXPORT_SYMBOL(mutex_lock_interruptible);
7ad5b3a50   Harvey Harrison   kernel: remove fa...
296
  int __sched mutex_lock_killable(struct mutex *lock)
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
297
298
299
300
301
302
  {
  	might_sleep();
  	return __mutex_fastpath_lock_retval
  			(&lock->count, __mutex_lock_killable_slowpath);
  }
  EXPORT_SYMBOL(mutex_lock_killable);
7ad5b3a50   Harvey Harrison   kernel: remove fa...
303
  static noinline void __sched
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
304
305
306
307
308
309
  __mutex_lock_slowpath(atomic_t *lock_count)
  {
  	struct mutex *lock = container_of(lock_count, struct mutex, count);
  
  	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_);
  }
7ad5b3a50   Harvey Harrison   kernel: remove fa...
310
  static noinline int __sched
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
311
312
313
314
315
316
  __mutex_lock_killable_slowpath(atomic_t *lock_count)
  {
  	struct mutex *lock = container_of(lock_count, struct mutex, count);
  
  	return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_);
  }
7ad5b3a50   Harvey Harrison   kernel: remove fa...
317
  static noinline int __sched
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
318
  __mutex_lock_interruptible_slowpath(atomic_t *lock_count)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
319
320
  {
  	struct mutex *lock = container_of(lock_count, struct mutex, count);
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
321
  	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
322
  }
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
323
  #endif
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
324
325
326
327
328
329
330
331
  
  /*
   * Spinlock based trylock, we take the spinlock and check whether we
   * can get the lock:
   */
  static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
  {
  	struct mutex *lock = container_of(lock_count, struct mutex, count);
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
332
  	unsigned long flags;
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
333
  	int prev;
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
334
  	spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
335
336
  
  	prev = atomic_xchg(&lock->count, -1);
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
337
  	if (likely(prev == 1)) {
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
338
  		debug_mutex_set_owner(lock, current_thread_info());
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
339
340
  		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  	}
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
341
342
343
  	/* Set it back to 0 if there are no waiters: */
  	if (likely(list_empty(&lock->wait_list)))
  		atomic_set(&lock->count, 0);
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
344
  	spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
  
  	return prev == 1;
  }
  
  /***
   * mutex_trylock - try acquire the mutex, without waiting
   * @lock: the mutex to be acquired
   *
   * Try to acquire the mutex atomically. Returns 1 if the mutex
   * has been acquired successfully, and 0 on contention.
   *
   * NOTE: this function follows the spin_trylock() convention, so
   * it is negated to the down_trylock() return values! Be careful
   * about this when converting semaphore users to mutexes.
   *
   * This function must not be used in interrupt context. The
   * mutex must be released by the same task that acquired it.
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
363
  int __sched mutex_trylock(struct mutex *lock)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
364
365
366
367
368
369
  {
  	return __mutex_fastpath_trylock(&lock->count,
  					__mutex_trylock_slowpath);
  }
  
  EXPORT_SYMBOL(mutex_trylock);