Blame view

kernel/mutex.c 10.4 KB
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
  /*
   * kernel/mutex.c
   *
   * Mutexes: blocking mutual exclusion locks
   *
   * Started by Ingo Molnar:
   *
   *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
   *
   * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
   * David Howells for suggestions and improvements.
   *
   * Also see Documentation/mutex-design.txt.
   */
  #include <linux/mutex.h>
  #include <linux/sched.h>
  #include <linux/module.h>
  #include <linux/spinlock.h>
  #include <linux/interrupt.h>
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
20
  #include <linux/debug_locks.h>
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
  
  /*
   * In the DEBUG case we are using the "NULL fastpath" for mutexes,
   * which forces all calls into the slowpath:
   */
  #ifdef CONFIG_DEBUG_MUTEXES
  # include "mutex-debug.h"
  # include <asm-generic/mutex-null.h>
  #else
  # include "mutex.h"
  # include <asm/mutex.h>
  #endif
  
  /***
   * mutex_init - initialize the mutex
   * @lock: the mutex to be initialized
0e241ffd3   Randy Dunlap   locking: fix mute...
37
   * @key: the lock_class_key for the class; used by mutex lock debugging
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
38
39
40
41
42
   *
   * Initialize the mutex to unlocked state.
   *
   * It is not allowed to initialize an already locked mutex.
   */
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
43
44
  void
  __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
45
46
47
48
  {
  	atomic_set(&lock->count, 1);
  	spin_lock_init(&lock->wait_lock);
  	INIT_LIST_HEAD(&lock->wait_list);
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
49
  	debug_mutex_init(lock, name, key);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
50
51
52
  }
  
  EXPORT_SYMBOL(__mutex_init);
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
53
  #ifndef CONFIG_DEBUG_LOCK_ALLOC
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
54
55
56
57
58
59
  /*
   * We split the mutex lock/unlock logic into separate fastpath and
   * slowpath functions, to reduce the register pressure on the fastpath.
   * We also put the fastpath first in the kernel image, to make sure the
   * branch is predicted by the CPU as default-untaken.
   */
7918baa55   Török Edwin   mutex: __used is ...
60
  static __used noinline void __sched
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
61
  __mutex_lock_slowpath(atomic_t *lock_count);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
  
  /***
   * mutex_lock - acquire the mutex
   * @lock: the mutex to be acquired
   *
   * Lock the mutex exclusively for this task. If the mutex is not
   * available right now, it will sleep until it can get it.
   *
   * The mutex must later on be released by the same task that
   * acquired it. Recursive locking is not allowed. The task
   * may not exit without first unlocking the mutex. Also, kernel
   * memory where the mutex resides mutex must not be freed with
   * the mutex still locked. The mutex must first be initialized
   * (or statically defined) before it can be locked. memset()-ing
   * the mutex to 0 is not allowed.
   *
   * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
   *   checks that will enforce the restrictions and will also do
   *   deadlock debugging. )
   *
   * This function is similar to (but not equivalent to) down().
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
84
  void inline __sched mutex_lock(struct mutex *lock)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
85
  {
c544bdb19   Ingo Molnar   [PATCH] mark mute...
86
  	might_sleep();
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
87
88
89
  	/*
  	 * The locking fastpath is the 1->0 transition from
  	 * 'unlocked' into 'locked' state.
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
90
91
92
93
94
  	 */
  	__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
  }
  
  EXPORT_SYMBOL(mutex_lock);
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
95
  #endif
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
96

7918baa55   Török Edwin   mutex: __used is ...
97
  static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
98
99
100
101
102
103
104
105
106
107
108
109
  
  /***
   * mutex_unlock - release the mutex
   * @lock: the mutex to be released
   *
   * Unlock a mutex that has been locked by this task previously.
   *
   * This function must not be used in interrupt context. Unlocking
   * of a not locked mutex is not allowed.
   *
   * This function is similar to (but not equivalent to) up().
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
110
  void __sched mutex_unlock(struct mutex *lock)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
111
112
113
114
  {
  	/*
  	 * The unlocking fastpath is the 0->1 transition from 'locked'
  	 * into 'unlocked' state:
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
115
116
117
118
119
120
121
122
123
124
  	 */
  	__mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
  }
  
  EXPORT_SYMBOL(mutex_unlock);
  
  /*
   * Lock a mutex (possibly interruptible), slowpath:
   */
  static inline int __sched
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
125
126
  __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
  	       	unsigned long ip)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
127
128
129
130
  {
  	struct task_struct *task = current;
  	struct mutex_waiter waiter;
  	unsigned int old_val;
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
131
  	unsigned long flags;
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
132

1fb00c6cb   Ingo Molnar   [PATCH] work arou...
133
  	spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
134

9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
135
  	debug_mutex_lock_common(lock, &waiter);
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
136
  	mutex_acquire(&lock->dep_map, subclass, 0, ip);
c9f4f06d3   Roman Zippel   wrap access to th...
137
  	debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
138
139
140
141
  
  	/* add waiting tasks to the end of the waitqueue (FIFO): */
  	list_add_tail(&waiter.list, &lock->wait_list);
  	waiter.task = task;
4fe87745a   Peter Zijlstra   lockstat: hook in...
142
143
144
  	old_val = atomic_xchg(&lock->count, -1);
  	if (old_val == 1)
  		goto done;
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
145
  	lock_contended(&lock->dep_map, ip);
4fe87745a   Peter Zijlstra   lockstat: hook in...
146

6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
  	for (;;) {
  		/*
  		 * Lets try to take the lock again - this is needed even if
  		 * we get here for the first time (shortly after failing to
  		 * acquire the lock), to make sure that we get a wakeup once
  		 * it's unlocked. Later on, if we sleep, this is the
  		 * operation that gives us the lock. We xchg it to -1, so
  		 * that when we release the lock, we properly wake up the
  		 * other waiters:
  		 */
  		old_val = atomic_xchg(&lock->count, -1);
  		if (old_val == 1)
  			break;
  
  		/*
  		 * got a signal? (This code gets eliminated in the
  		 * TASK_UNINTERRUPTIBLE case.)
  		 */
6ad36762d   Oleg Nesterov   __mutex_lock_comm...
165
  		if (unlikely(signal_pending_state(state, task))) {
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
166
167
  			mutex_remove_waiter(lock, &waiter,
  					    task_thread_info(task));
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
168
  			mutex_release(&lock->dep_map, 1, ip);
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
169
  			spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
170
171
172
173
174
175
176
  
  			debug_mutex_free_waiter(&waiter);
  			return -EINTR;
  		}
  		__set_task_state(task, state);
  
  		/* didnt get the lock, go to sleep: */
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
177
  		spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
178
  		schedule();
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
179
  		spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
180
  	}
4fe87745a   Peter Zijlstra   lockstat: hook in...
181
  done:
c7e78cff6   Peter Zijlstra   lockstat: contend...
182
  	lock_acquired(&lock->dep_map, ip);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
183
  	/* got the lock - rejoice! */
c9f4f06d3   Roman Zippel   wrap access to th...
184
185
  	mutex_remove_waiter(lock, &waiter, task_thread_info(task));
  	debug_mutex_set_owner(lock, task_thread_info(task));
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
186
187
188
189
  
  	/* set it to 0 if there are no waiters left: */
  	if (likely(list_empty(&lock->wait_list)))
  		atomic_set(&lock->count, 0);
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
190
  	spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
191
192
  
  	debug_mutex_free_waiter(&waiter);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
193
194
  	return 0;
  }
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
195
196
197
198
199
  #ifdef CONFIG_DEBUG_LOCK_ALLOC
  void __sched
  mutex_lock_nested(struct mutex *lock, unsigned int subclass)
  {
  	might_sleep();
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
200
  	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_);
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
201
202
203
  }
  
  EXPORT_SYMBOL_GPL(mutex_lock_nested);
d63a5a74d   NeilBrown   [PATCH] lockdep: ...
204
205
  
  int __sched
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
206
207
208
209
210
211
212
213
  mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
  {
  	might_sleep();
  	return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_);
  }
  EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
  
  int __sched
d63a5a74d   NeilBrown   [PATCH] lockdep: ...
214
215
216
  mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
  {
  	might_sleep();
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
217
  	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_);
d63a5a74d   NeilBrown   [PATCH] lockdep: ...
218
219
220
  }
  
  EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
221
  #endif
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
222
223
224
  /*
   * Release the lock, slowpath:
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
225
  static inline void
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
226
  __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
227
  {
02706647a   Ingo Molnar   [PATCH] mutex: tr...
228
  	struct mutex *lock = container_of(lock_count, struct mutex, count);
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
229
  	unsigned long flags;
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
230

1fb00c6cb   Ingo Molnar   [PATCH] work arou...
231
  	spin_lock_mutex(&lock->wait_lock, flags);
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
232
  	mutex_release(&lock->dep_map, nested, _RET_IP_);
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
233
  	debug_mutex_unlock(lock);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
234
235
236
237
238
239
240
241
  
  	/*
  	 * some architectures leave the lock unlocked in the fastpath failure
  	 * case, others need to leave it locked. In the later case we have to
  	 * unlock it here
  	 */
  	if (__mutex_slowpath_needs_to_unlock())
  		atomic_set(&lock->count, 1);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
242
243
244
245
246
247
248
249
250
251
252
253
  	if (!list_empty(&lock->wait_list)) {
  		/* get the first entry from the wait-list: */
  		struct mutex_waiter *waiter =
  				list_entry(lock->wait_list.next,
  					   struct mutex_waiter, list);
  
  		debug_mutex_wake_waiter(lock, waiter);
  
  		wake_up_process(waiter->task);
  	}
  
  	debug_mutex_clear_owner(lock);
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
254
  	spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
255
256
257
  }
  
  /*
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
258
259
   * Release the lock, slowpath:
   */
7918baa55   Török Edwin   mutex: __used is ...
260
  static __used noinline void
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
261
262
  __mutex_unlock_slowpath(atomic_t *lock_count)
  {
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
263
  	__mutex_unlock_common_slowpath(lock_count, 1);
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
264
  }
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
265
  #ifndef CONFIG_DEBUG_LOCK_ALLOC
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
266
  /*
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
267
268
269
   * Here come the less common (and hence less performance-critical) APIs:
   * mutex_lock_interruptible() and mutex_trylock().
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
270
  static noinline int __sched
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
271
  __mutex_lock_killable_slowpath(atomic_t *lock_count);
7ad5b3a50   Harvey Harrison   kernel: remove fa...
272
  static noinline int __sched
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
273
  __mutex_lock_interruptible_slowpath(atomic_t *lock_count);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
274
275
276
277
278
279
280
281
282
283
284
285
  
  /***
   * mutex_lock_interruptible - acquire the mutex, interruptable
   * @lock: the mutex to be acquired
   *
   * Lock the mutex like mutex_lock(), and return 0 if the mutex has
   * been acquired or sleep until the mutex becomes available. If a
   * signal arrives while waiting for the lock then this function
   * returns -EINTR.
   *
   * This function is similar to (but not equivalent to) down_interruptible().
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
286
  int __sched mutex_lock_interruptible(struct mutex *lock)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
287
  {
c544bdb19   Ingo Molnar   [PATCH] mark mute...
288
  	might_sleep();
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
289
290
291
292
293
  	return __mutex_fastpath_lock_retval
  			(&lock->count, __mutex_lock_interruptible_slowpath);
  }
  
  EXPORT_SYMBOL(mutex_lock_interruptible);
7ad5b3a50   Harvey Harrison   kernel: remove fa...
294
  int __sched mutex_lock_killable(struct mutex *lock)
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
295
296
297
298
299
300
  {
  	might_sleep();
  	return __mutex_fastpath_lock_retval
  			(&lock->count, __mutex_lock_killable_slowpath);
  }
  EXPORT_SYMBOL(mutex_lock_killable);
7918baa55   Török Edwin   mutex: __used is ...
301
  static __used noinline void __sched
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
302
303
304
305
306
307
  __mutex_lock_slowpath(atomic_t *lock_count)
  {
  	struct mutex *lock = container_of(lock_count, struct mutex, count);
  
  	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_);
  }
7ad5b3a50   Harvey Harrison   kernel: remove fa...
308
  static noinline int __sched
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
309
310
311
312
313
314
  __mutex_lock_killable_slowpath(atomic_t *lock_count)
  {
  	struct mutex *lock = container_of(lock_count, struct mutex, count);
  
  	return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_);
  }
7ad5b3a50   Harvey Harrison   kernel: remove fa...
315
  static noinline int __sched
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
316
  __mutex_lock_interruptible_slowpath(atomic_t *lock_count)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
317
318
  {
  	struct mutex *lock = container_of(lock_count, struct mutex, count);
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
319
  	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
320
  }
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
321
  #endif
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
322
323
324
325
326
327
328
329
  
  /*
   * Spinlock based trylock, we take the spinlock and check whether we
   * can get the lock:
   */
  static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
  {
  	struct mutex *lock = container_of(lock_count, struct mutex, count);
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
330
  	unsigned long flags;
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
331
  	int prev;
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
332
  	spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
333
334
  
  	prev = atomic_xchg(&lock->count, -1);
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
335
  	if (likely(prev == 1)) {
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
336
  		debug_mutex_set_owner(lock, current_thread_info());
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
337
338
  		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  	}
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
339
340
341
  	/* Set it back to 0 if there are no waiters: */
  	if (likely(list_empty(&lock->wait_list)))
  		atomic_set(&lock->count, 0);
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
342
  	spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
  
  	return prev == 1;
  }
  
  /***
   * mutex_trylock - try acquire the mutex, without waiting
   * @lock: the mutex to be acquired
   *
   * Try to acquire the mutex atomically. Returns 1 if the mutex
   * has been acquired successfully, and 0 on contention.
   *
   * NOTE: this function follows the spin_trylock() convention, so
   * it is negated to the down_trylock() return values! Be careful
   * about this when converting semaphore users to mutexes.
   *
   * This function must not be used in interrupt context. The
   * mutex must be released by the same task that acquired it.
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
361
  int __sched mutex_trylock(struct mutex *lock)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
362
363
364
365
366
367
  {
  	return __mutex_fastpath_trylock(&lock->count,
  					__mutex_trylock_slowpath);
  }
  
  EXPORT_SYMBOL(mutex_trylock);