Blame view

kernel/mutex.c 13.3 KB
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1
2
3
4
5
6
7
8
9
10
11
12
  /*
   * kernel/mutex.c
   *
   * Mutexes: blocking mutual exclusion locks
   *
   * Started by Ingo Molnar:
   *
   *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
   *
   * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
   * David Howells for suggestions and improvements.
   *
0d66bf6d3   Peter Zijlstra   mutex: implement ...
13
14
15
16
17
   *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
   *    from the -rt tree, where it was originally implemented for rtmutexes
   *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
   *    and Sven Dietrich.
   *
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
18
19
20
21
   * Also see Documentation/mutex-design.txt.
   */
  #include <linux/mutex.h>
  #include <linux/sched.h>
9984de1a5   Paul Gortmaker   kernel: Map most ...
22
  #include <linux/export.h>
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
23
24
  #include <linux/spinlock.h>
  #include <linux/interrupt.h>
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
25
  #include <linux/debug_locks.h>
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
26
27
28
29
30
31
32
33
34
35
36
37
  
  /*
   * In the DEBUG case we are using the "NULL fastpath" for mutexes,
   * which forces all calls into the slowpath:
   */
  #ifdef CONFIG_DEBUG_MUTEXES
  # include "mutex-debug.h"
  # include <asm-generic/mutex-null.h>
  #else
  # include "mutex.h"
  # include <asm/mutex.h>
  #endif
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
38
39
  void
  __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
40
41
42
43
  {
  	atomic_set(&lock->count, 1);
  	spin_lock_init(&lock->wait_lock);
  	INIT_LIST_HEAD(&lock->wait_list);
0d66bf6d3   Peter Zijlstra   mutex: implement ...
44
  	mutex_clear_owner(lock);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
45

ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
46
  	debug_mutex_init(lock, name, key);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
47
48
49
  }
  
  EXPORT_SYMBOL(__mutex_init);
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
50
  #ifndef CONFIG_DEBUG_LOCK_ALLOC
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
51
52
53
54
55
56
  /*
   * We split the mutex lock/unlock logic into separate fastpath and
   * slowpath functions, to reduce the register pressure on the fastpath.
   * We also put the fastpath first in the kernel image, to make sure the
   * branch is predicted by the CPU as default-untaken.
   */
7918baa55   Török Edwin   mutex: __used is ...
57
  static __used noinline void __sched
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
58
  __mutex_lock_slowpath(atomic_t *lock_count);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
59

ef5dc121d   Randy Dunlap   mutex: Fix annota...
60
  /**
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
   * mutex_lock - acquire the mutex
   * @lock: the mutex to be acquired
   *
   * Lock the mutex exclusively for this task. If the mutex is not
   * available right now, it will sleep until it can get it.
   *
   * The mutex must later on be released by the same task that
   * acquired it. Recursive locking is not allowed. The task
   * may not exit without first unlocking the mutex. Also, kernel
   * memory where the mutex resides mutex must not be freed with
   * the mutex still locked. The mutex must first be initialized
   * (or statically defined) before it can be locked. memset()-ing
   * the mutex to 0 is not allowed.
   *
   * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
   *   checks that will enforce the restrictions and will also do
   *   deadlock debugging. )
   *
   * This function is similar to (but not equivalent to) down().
   */
b09d2501e   H. Peter Anvin   mutex: drop "inli...
81
  void __sched mutex_lock(struct mutex *lock)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
82
  {
c544bdb19   Ingo Molnar   [PATCH] mark mute...
83
  	might_sleep();
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
84
85
86
  	/*
  	 * The locking fastpath is the 1->0 transition from
  	 * 'unlocked' into 'locked' state.
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
87
88
  	 */
  	__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
0d66bf6d3   Peter Zijlstra   mutex: implement ...
89
  	mutex_set_owner(lock);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
90
91
92
  }
  
  EXPORT_SYMBOL(mutex_lock);
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
93
  #endif
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
94

7918baa55   Török Edwin   mutex: __used is ...
95
  static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
96

ef5dc121d   Randy Dunlap   mutex: Fix annota...
97
  /**
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
98
99
100
101
102
103
104
105
106
107
   * mutex_unlock - release the mutex
   * @lock: the mutex to be released
   *
   * Unlock a mutex that has been locked by this task previously.
   *
   * This function must not be used in interrupt context. Unlocking
   * of a not locked mutex is not allowed.
   *
   * This function is similar to (but not equivalent to) up().
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
108
  void __sched mutex_unlock(struct mutex *lock)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
109
110
111
112
  {
  	/*
  	 * The unlocking fastpath is the 0->1 transition from 'locked'
  	 * into 'unlocked' state:
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
113
  	 */
0d66bf6d3   Peter Zijlstra   mutex: implement ...
114
115
116
117
118
119
120
121
  #ifndef CONFIG_DEBUG_MUTEXES
  	/*
  	 * When debugging is enabled we must not clear the owner before time,
  	 * the slow path will always be taken, and that clears the owner field
  	 * after verifying that it was indeed current.
  	 */
  	mutex_clear_owner(lock);
  #endif
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
122
123
124
125
126
127
128
129
130
  	__mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
  }
  
  EXPORT_SYMBOL(mutex_unlock);
  
  /*
   * Lock a mutex (possibly interruptible), slowpath:
   */
  static inline int __sched
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
131
  __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
e4c70a662   Peter Zijlstra   lockdep, mutex: p...
132
  		    struct lockdep_map *nest_lock, unsigned long ip)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
133
134
135
  {
  	struct task_struct *task = current;
  	struct mutex_waiter waiter;
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
136
  	unsigned long flags;
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
137

41719b030   Peter Zijlstra   mutex: preemption...
138
  	preempt_disable();
e4c70a662   Peter Zijlstra   lockdep, mutex: p...
139
  	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
c02260277   Frederic Weisbecker   mutex: Better con...
140
141
  
  #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
0d66bf6d3   Peter Zijlstra   mutex: implement ...
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
  	/*
  	 * Optimistic spinning.
  	 *
  	 * We try to spin for acquisition when we find that there are no
  	 * pending waiters and the lock owner is currently running on a
  	 * (different) CPU.
  	 *
  	 * The rationale is that if the lock owner is running, it is likely to
  	 * release the lock soon.
  	 *
  	 * Since this needs the lock owner, and this mutex implementation
  	 * doesn't track the owner atomically in the lock field, we need to
  	 * track it non-atomically.
  	 *
  	 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
  	 * to serialize everything.
  	 */
  
  	for (;;) {
c6eb3dda2   Peter Zijlstra   mutex: Use p->on_...
161
  		struct task_struct *owner;
0d66bf6d3   Peter Zijlstra   mutex: implement ...
162
163
  
  		/*
0d66bf6d3   Peter Zijlstra   mutex: implement ...
164
165
166
167
168
169
  		 * If there's an owner, wait for it to either
  		 * release the lock or go to sleep.
  		 */
  		owner = ACCESS_ONCE(lock->owner);
  		if (owner && !mutex_spin_on_owner(lock, owner))
  			break;
ac6e60ee4   Chris Mason   mutex: adaptive s...
170
171
172
173
174
175
  		if (atomic_cmpxchg(&lock->count, 1, 0) == 1) {
  			lock_acquired(&lock->dep_map, ip);
  			mutex_set_owner(lock);
  			preempt_enable();
  			return 0;
  		}
0d66bf6d3   Peter Zijlstra   mutex: implement ...
176
177
178
179
180
181
182
183
  		/*
  		 * When there's no owner, we might have preempted between the
  		 * owner acquiring the lock and setting the owner field. If
  		 * we're an RT task that will live-lock because we won't let
  		 * the owner complete.
  		 */
  		if (!owner && (need_resched() || rt_task(task)))
  			break;
0d66bf6d3   Peter Zijlstra   mutex: implement ...
184
185
186
187
188
189
  		/*
  		 * The cpu_relax() call is a compiler barrier which forces
  		 * everything in this loop to be re-loaded. We don't need
  		 * memory barriers as we'll eventually observe the right
  		 * values at the cost of a few extra spins.
  		 */
335d7afbf   Gerald Schaefer   mutexes, sched: I...
190
  		arch_mutex_cpu_relax();
0d66bf6d3   Peter Zijlstra   mutex: implement ...
191
192
  	}
  #endif
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
193
  	spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
194

9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
195
  	debug_mutex_lock_common(lock, &waiter);
c9f4f06d3   Roman Zippel   wrap access to th...
196
  	debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
197
198
199
200
  
  	/* add waiting tasks to the end of the waitqueue (FIFO): */
  	list_add_tail(&waiter.list, &lock->wait_list);
  	waiter.task = task;
93d81d1ac   Peter Zijlstra   mutex: small cleanup
201
  	if (atomic_xchg(&lock->count, -1) == 1)
4fe87745a   Peter Zijlstra   lockstat: hook in...
202
  		goto done;
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
203
  	lock_contended(&lock->dep_map, ip);
4fe87745a   Peter Zijlstra   lockstat: hook in...
204

6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
205
206
207
208
209
210
211
212
213
214
  	for (;;) {
  		/*
  		 * Lets try to take the lock again - this is needed even if
  		 * we get here for the first time (shortly after failing to
  		 * acquire the lock), to make sure that we get a wakeup once
  		 * it's unlocked. Later on, if we sleep, this is the
  		 * operation that gives us the lock. We xchg it to -1, so
  		 * that when we release the lock, we properly wake up the
  		 * other waiters:
  		 */
93d81d1ac   Peter Zijlstra   mutex: small cleanup
215
  		if (atomic_xchg(&lock->count, -1) == 1)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
216
217
218
219
220
221
  			break;
  
  		/*
  		 * got a signal? (This code gets eliminated in the
  		 * TASK_UNINTERRUPTIBLE case.)
  		 */
6ad36762d   Oleg Nesterov   __mutex_lock_comm...
222
  		if (unlikely(signal_pending_state(state, task))) {
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
223
224
  			mutex_remove_waiter(lock, &waiter,
  					    task_thread_info(task));
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
225
  			mutex_release(&lock->dep_map, 1, ip);
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
226
  			spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
227
228
  
  			debug_mutex_free_waiter(&waiter);
41719b030   Peter Zijlstra   mutex: preemption...
229
  			preempt_enable();
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
230
231
232
  			return -EINTR;
  		}
  		__set_task_state(task, state);
25985edce   Lucas De Marchi   Fix common misspe...
233
  		/* didn't get the lock, go to sleep: */
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
234
  		spin_unlock_mutex(&lock->wait_lock, flags);
ff743345b   Peter Zijlstra   sched: remove ext...
235
236
237
  		preempt_enable_no_resched();
  		schedule();
  		preempt_disable();
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
238
  		spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
239
  	}
4fe87745a   Peter Zijlstra   lockstat: hook in...
240
  done:
c7e78cff6   Peter Zijlstra   lockstat: contend...
241
  	lock_acquired(&lock->dep_map, ip);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
242
  	/* got the lock - rejoice! */
0d66bf6d3   Peter Zijlstra   mutex: implement ...
243
244
  	mutex_remove_waiter(lock, &waiter, current_thread_info());
  	mutex_set_owner(lock);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
245
246
247
248
  
  	/* set it to 0 if there are no waiters left: */
  	if (likely(list_empty(&lock->wait_list)))
  		atomic_set(&lock->count, 0);
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
249
  	spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
250
251
  
  	debug_mutex_free_waiter(&waiter);
41719b030   Peter Zijlstra   mutex: preemption...
252
  	preempt_enable();
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
253

6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
254
255
  	return 0;
  }
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
256
257
258
259
260
  #ifdef CONFIG_DEBUG_LOCK_ALLOC
  void __sched
  mutex_lock_nested(struct mutex *lock, unsigned int subclass)
  {
  	might_sleep();
e4c70a662   Peter Zijlstra   lockdep, mutex: p...
261
  	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
262
263
264
  }
  
  EXPORT_SYMBOL_GPL(mutex_lock_nested);
d63a5a74d   NeilBrown   [PATCH] lockdep: ...
265

e4c70a662   Peter Zijlstra   lockdep, mutex: p...
266
267
268
269
270
271
272
273
  void __sched
  _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
  {
  	might_sleep();
  	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
  }
  
  EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
d63a5a74d   NeilBrown   [PATCH] lockdep: ...
274
  int __sched
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
275
276
277
  mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
  {
  	might_sleep();
e4c70a662   Peter Zijlstra   lockdep, mutex: p...
278
  	return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
279
280
281
282
  }
  EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
  
  int __sched
d63a5a74d   NeilBrown   [PATCH] lockdep: ...
283
284
285
  mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
  {
  	might_sleep();
0d66bf6d3   Peter Zijlstra   mutex: implement ...
286
  	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
e4c70a662   Peter Zijlstra   lockdep, mutex: p...
287
  				   subclass, NULL, _RET_IP_);
d63a5a74d   NeilBrown   [PATCH] lockdep: ...
288
289
290
  }
  
  EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
291
  #endif
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
292
293
294
  /*
   * Release the lock, slowpath:
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
295
  static inline void
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
296
  __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
297
  {
02706647a   Ingo Molnar   [PATCH] mutex: tr...
298
  	struct mutex *lock = container_of(lock_count, struct mutex, count);
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
299
  	unsigned long flags;
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
300

1fb00c6cb   Ingo Molnar   [PATCH] work arou...
301
  	spin_lock_mutex(&lock->wait_lock, flags);
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
302
  	mutex_release(&lock->dep_map, nested, _RET_IP_);
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
303
  	debug_mutex_unlock(lock);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
304
305
306
307
308
309
310
311
  
  	/*
  	 * some architectures leave the lock unlocked in the fastpath failure
  	 * case, others need to leave it locked. In the later case we have to
  	 * unlock it here
  	 */
  	if (__mutex_slowpath_needs_to_unlock())
  		atomic_set(&lock->count, 1);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
312
313
314
315
316
317
318
319
320
321
  	if (!list_empty(&lock->wait_list)) {
  		/* get the first entry from the wait-list: */
  		struct mutex_waiter *waiter =
  				list_entry(lock->wait_list.next,
  					   struct mutex_waiter, list);
  
  		debug_mutex_wake_waiter(lock, waiter);
  
  		wake_up_process(waiter->task);
  	}
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
322
  	spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
323
324
325
  }
  
  /*
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
326
327
   * Release the lock, slowpath:
   */
7918baa55   Török Edwin   mutex: __used is ...
328
  static __used noinline void
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
329
330
  __mutex_unlock_slowpath(atomic_t *lock_count)
  {
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
331
  	__mutex_unlock_common_slowpath(lock_count, 1);
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
332
  }
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
333
  #ifndef CONFIG_DEBUG_LOCK_ALLOC
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
334
  /*
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
335
336
337
   * Here come the less common (and hence less performance-critical) APIs:
   * mutex_lock_interruptible() and mutex_trylock().
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
338
  static noinline int __sched
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
339
  __mutex_lock_killable_slowpath(atomic_t *lock_count);
7ad5b3a50   Harvey Harrison   kernel: remove fa...
340
  static noinline int __sched
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
341
  __mutex_lock_interruptible_slowpath(atomic_t *lock_count);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
342

ef5dc121d   Randy Dunlap   mutex: Fix annota...
343
344
  /**
   * mutex_lock_interruptible - acquire the mutex, interruptible
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
345
346
347
348
349
350
351
352
353
   * @lock: the mutex to be acquired
   *
   * Lock the mutex like mutex_lock(), and return 0 if the mutex has
   * been acquired or sleep until the mutex becomes available. If a
   * signal arrives while waiting for the lock then this function
   * returns -EINTR.
   *
   * This function is similar to (but not equivalent to) down_interruptible().
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
354
  int __sched mutex_lock_interruptible(struct mutex *lock)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
355
  {
0d66bf6d3   Peter Zijlstra   mutex: implement ...
356
  	int ret;
c544bdb19   Ingo Molnar   [PATCH] mark mute...
357
  	might_sleep();
0d66bf6d3   Peter Zijlstra   mutex: implement ...
358
  	ret =  __mutex_fastpath_lock_retval
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
359
  			(&lock->count, __mutex_lock_interruptible_slowpath);
0d66bf6d3   Peter Zijlstra   mutex: implement ...
360
361
362
363
  	if (!ret)
  		mutex_set_owner(lock);
  
  	return ret;
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
364
365
366
  }
  
  EXPORT_SYMBOL(mutex_lock_interruptible);
7ad5b3a50   Harvey Harrison   kernel: remove fa...
367
  int __sched mutex_lock_killable(struct mutex *lock)
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
368
  {
0d66bf6d3   Peter Zijlstra   mutex: implement ...
369
  	int ret;
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
370
  	might_sleep();
0d66bf6d3   Peter Zijlstra   mutex: implement ...
371
  	ret = __mutex_fastpath_lock_retval
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
372
  			(&lock->count, __mutex_lock_killable_slowpath);
0d66bf6d3   Peter Zijlstra   mutex: implement ...
373
374
375
376
  	if (!ret)
  		mutex_set_owner(lock);
  
  	return ret;
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
377
378
  }
  EXPORT_SYMBOL(mutex_lock_killable);
7918baa55   Török Edwin   mutex: __used is ...
379
  static __used noinline void __sched
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
380
381
382
  __mutex_lock_slowpath(atomic_t *lock_count)
  {
  	struct mutex *lock = container_of(lock_count, struct mutex, count);
e4c70a662   Peter Zijlstra   lockdep, mutex: p...
383
  	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
384
  }
7ad5b3a50   Harvey Harrison   kernel: remove fa...
385
  static noinline int __sched
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
386
387
388
  __mutex_lock_killable_slowpath(atomic_t *lock_count)
  {
  	struct mutex *lock = container_of(lock_count, struct mutex, count);
e4c70a662   Peter Zijlstra   lockdep, mutex: p...
389
  	return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
390
  }
7ad5b3a50   Harvey Harrison   kernel: remove fa...
391
  static noinline int __sched
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
392
  __mutex_lock_interruptible_slowpath(atomic_t *lock_count)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
393
394
  {
  	struct mutex *lock = container_of(lock_count, struct mutex, count);
e4c70a662   Peter Zijlstra   lockdep, mutex: p...
395
  	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
396
  }
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
397
  #endif
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
398
399
400
401
402
403
404
405
  
  /*
   * Spinlock based trylock, we take the spinlock and check whether we
   * can get the lock:
   */
  static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
  {
  	struct mutex *lock = container_of(lock_count, struct mutex, count);
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
406
  	unsigned long flags;
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
407
  	int prev;
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
408
  	spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
409
410
  
  	prev = atomic_xchg(&lock->count, -1);
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
411
  	if (likely(prev == 1)) {
0d66bf6d3   Peter Zijlstra   mutex: implement ...
412
  		mutex_set_owner(lock);
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
413
414
  		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  	}
0d66bf6d3   Peter Zijlstra   mutex: implement ...
415

6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
416
417
418
  	/* Set it back to 0 if there are no waiters: */
  	if (likely(list_empty(&lock->wait_list)))
  		atomic_set(&lock->count, 0);
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
419
  	spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
420
421
422
  
  	return prev == 1;
  }
ef5dc121d   Randy Dunlap   mutex: Fix annota...
423
424
  /**
   * mutex_trylock - try to acquire the mutex, without waiting
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
425
426
427
428
429
430
   * @lock: the mutex to be acquired
   *
   * Try to acquire the mutex atomically. Returns 1 if the mutex
   * has been acquired successfully, and 0 on contention.
   *
   * NOTE: this function follows the spin_trylock() convention, so
ef5dc121d   Randy Dunlap   mutex: Fix annota...
431
   * it is negated from the down_trylock() return values! Be careful
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
432
433
434
435
436
   * about this when converting semaphore users to mutexes.
   *
   * This function must not be used in interrupt context. The
   * mutex must be released by the same task that acquired it.
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
437
  int __sched mutex_trylock(struct mutex *lock)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
438
  {
0d66bf6d3   Peter Zijlstra   mutex: implement ...
439
440
441
442
443
444
445
  	int ret;
  
  	ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
  	if (ret)
  		mutex_set_owner(lock);
  
  	return ret;
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
446
  }
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
447
  EXPORT_SYMBOL(mutex_trylock);
a511e3f96   Andrew Morton   mutex: add atomic...
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
  
  /**
   * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
   * @cnt: the atomic which we are to dec
   * @lock: the mutex to return holding if we dec to 0
   *
   * return true and hold lock if we dec to 0, return false otherwise
   */
  int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
  {
  	/* dec if we can't possibly hit 0 */
  	if (atomic_add_unless(cnt, -1, 1))
  		return 0;
  	/* we might hit 0, so take the lock */
  	mutex_lock(lock);
  	if (!atomic_dec_and_test(cnt)) {
  		/* when we actually did the dec, we didn't hit 0 */
  		mutex_unlock(lock);
  		return 0;
  	}
  	/* we hit 0, and we hold the lock */
  	return 1;
  }
  EXPORT_SYMBOL(atomic_dec_and_mutex_lock);