Blame view

kernel/mutex.c 24.5 KB
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1
2
3
4
5
6
7
8
9
10
11
12
  /*
   * kernel/mutex.c
   *
   * Mutexes: blocking mutual exclusion locks
   *
   * Started by Ingo Molnar:
   *
   *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
   *
   * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
   * David Howells for suggestions and improvements.
   *
0d66bf6d3   Peter Zijlstra   mutex: implement ...
13
14
15
16
17
   *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
   *    from the -rt tree, where it was originally implemented for rtmutexes
   *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
   *    and Sven Dietrich.
   *
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
18
19
20
   * Also see Documentation/mutex-design.txt.
   */
  #include <linux/mutex.h>
1b375dc30   Maarten Lankhorst   mutex: Move ww_mu...
21
  #include <linux/ww_mutex.h>
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
22
  #include <linux/sched.h>
8bd75c77b   Clark Williams   sched/rt: Move rt...
23
  #include <linux/sched/rt.h>
9984de1a5   Paul Gortmaker   kernel: Map most ...
24
  #include <linux/export.h>
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
25
26
  #include <linux/spinlock.h>
  #include <linux/interrupt.h>
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
27
  #include <linux/debug_locks.h>
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
28
29
30
31
32
33
34
35
36
37
38
39
  
  /*
   * In the DEBUG case we are using the "NULL fastpath" for mutexes,
   * which forces all calls into the slowpath:
   */
  #ifdef CONFIG_DEBUG_MUTEXES
  # include "mutex-debug.h"
  # include <asm-generic/mutex-null.h>
  #else
  # include "mutex.h"
  # include <asm/mutex.h>
  #endif
0dc8c730c   Waiman Long   mutex: Make more ...
40
  /*
cc189d251   Waiman Long   mutex: Back out a...
41
42
   * A negative mutex count indicates that waiters are sleeping waiting for the
   * mutex.
0dc8c730c   Waiman Long   mutex: Make more ...
43
   */
0dc8c730c   Waiman Long   mutex: Make more ...
44
  #define	MUTEX_SHOW_NO_WAITER(mutex)	(atomic_read(&(mutex)->count) >= 0)
0dc8c730c   Waiman Long   mutex: Make more ...
45

ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
46
47
  void
  __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
48
49
50
51
  {
  	atomic_set(&lock->count, 1);
  	spin_lock_init(&lock->wait_lock);
  	INIT_LIST_HEAD(&lock->wait_list);
0d66bf6d3   Peter Zijlstra   mutex: implement ...
52
  	mutex_clear_owner(lock);
2bd2c92cf   Waiman Long   mutex: Queue mute...
53
54
55
  #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
  	lock->spin_mlock = NULL;
  #endif
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
56

ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
57
  	debug_mutex_init(lock, name, key);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
58
59
60
  }
  
  EXPORT_SYMBOL(__mutex_init);
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
61
  #ifndef CONFIG_DEBUG_LOCK_ALLOC
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
62
63
64
65
66
67
  /*
   * We split the mutex lock/unlock logic into separate fastpath and
   * slowpath functions, to reduce the register pressure on the fastpath.
   * We also put the fastpath first in the kernel image, to make sure the
   * branch is predicted by the CPU as default-untaken.
   */
7918baa55   Török Edwin   mutex: __used is ...
68
  static __used noinline void __sched
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
69
  __mutex_lock_slowpath(atomic_t *lock_count);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
70

ef5dc121d   Randy Dunlap   mutex: Fix annota...
71
  /**
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
   * mutex_lock - acquire the mutex
   * @lock: the mutex to be acquired
   *
   * Lock the mutex exclusively for this task. If the mutex is not
   * available right now, it will sleep until it can get it.
   *
   * The mutex must later on be released by the same task that
   * acquired it. Recursive locking is not allowed. The task
   * may not exit without first unlocking the mutex. Also, kernel
   * memory where the mutex resides mutex must not be freed with
   * the mutex still locked. The mutex must first be initialized
   * (or statically defined) before it can be locked. memset()-ing
   * the mutex to 0 is not allowed.
   *
   * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
   *   checks that will enforce the restrictions and will also do
   *   deadlock debugging. )
   *
   * This function is similar to (but not equivalent to) down().
   */
b09d2501e   H. Peter Anvin   mutex: drop "inli...
92
  void __sched mutex_lock(struct mutex *lock)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
93
  {
c544bdb19   Ingo Molnar   [PATCH] mark mute...
94
  	might_sleep();
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
95
96
97
  	/*
  	 * The locking fastpath is the 1->0 transition from
  	 * 'unlocked' into 'locked' state.
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
98
99
  	 */
  	__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
0d66bf6d3   Peter Zijlstra   mutex: implement ...
100
  	mutex_set_owner(lock);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
101
102
103
  }
  
  EXPORT_SYMBOL(mutex_lock);
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
104
  #endif
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
105

41fcb9f23   Waiman Long   mutex: Move mutex...
106
107
  #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
  /*
2bd2c92cf   Waiman Long   mutex: Queue mute...
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
   * In order to avoid a stampede of mutex spinners from acquiring the mutex
   * more or less simultaneously, the spinners need to acquire a MCS lock
   * first before spinning on the owner field.
   *
   * We don't inline mspin_lock() so that perf can correctly account for the
   * time spent in this lock function.
   */
  struct mspin_node {
  	struct mspin_node *next ;
  	int		  locked;	/* 1 if lock acquired */
  };
  #define	MLOCK(mutex)	((struct mspin_node **)&((mutex)->spin_mlock))
  
  static noinline
  void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
  {
  	struct mspin_node *prev;
  
  	/* Init node */
  	node->locked = 0;
  	node->next   = NULL;
  
  	prev = xchg(lock, node);
  	if (likely(prev == NULL)) {
  		/* Lock acquired */
  		node->locked = 1;
  		return;
  	}
  	ACCESS_ONCE(prev->next) = node;
  	smp_wmb();
  	/* Wait until the lock holder passes the lock down */
  	while (!ACCESS_ONCE(node->locked))
  		arch_mutex_cpu_relax();
  }
  
  static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
  {
  	struct mspin_node *next = ACCESS_ONCE(node->next);
  
  	if (likely(!next)) {
  		/*
  		 * Release the lock by setting it to NULL
  		 */
  		if (cmpxchg(lock, node, NULL) == node)
  			return;
  		/* Wait until the next pointer is set */
  		while (!(next = ACCESS_ONCE(node->next)))
  			arch_mutex_cpu_relax();
  	}
  	ACCESS_ONCE(next->locked) = 1;
  	smp_wmb();
  }
  
  /*
41fcb9f23   Waiman Long   mutex: Move mutex...
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
   * Mutex spinning code migrated from kernel/sched/core.c
   */
  
  static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
  {
  	if (lock->owner != owner)
  		return false;
  
  	/*
  	 * Ensure we emit the owner->on_cpu, dereference _after_ checking
  	 * lock->owner still matches owner, if that fails, owner might
  	 * point to free()d memory, if it still matches, the rcu_read_lock()
  	 * ensures the memory stays valid.
  	 */
  	barrier();
  
  	return owner->on_cpu;
  }
  
  /*
   * Look out! "owner" is an entirely speculative pointer
   * access and not reliable.
   */
  static noinline
  int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
  {
  	rcu_read_lock();
  	while (owner_running(lock, owner)) {
  		if (need_resched())
  			break;
  
  		arch_mutex_cpu_relax();
  	}
  	rcu_read_unlock();
  
  	/*
  	 * We break out the loop above on need_resched() and when the
  	 * owner changed, which is a sign for heavy contention. Return
  	 * success only when lock->owner is NULL.
  	 */
  	return lock->owner == NULL;
  }
2bd2c92cf   Waiman Long   mutex: Queue mute...
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
  
  /*
   * Initial check for entering the mutex spinning loop
   */
  static inline int mutex_can_spin_on_owner(struct mutex *lock)
  {
  	int retval = 1;
  
  	rcu_read_lock();
  	if (lock->owner)
  		retval = lock->owner->on_cpu;
  	rcu_read_unlock();
  	/*
  	 * if lock->owner is not set, the mutex owner may have just acquired
  	 * it and not set the owner yet or the mutex has been released.
  	 */
  	return retval;
  }
41fcb9f23   Waiman Long   mutex: Move mutex...
222
  #endif
7918baa55   Török Edwin   mutex: __used is ...
223
  static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
224

ef5dc121d   Randy Dunlap   mutex: Fix annota...
225
  /**
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
226
227
228
229
230
231
232
233
234
235
   * mutex_unlock - release the mutex
   * @lock: the mutex to be released
   *
   * Unlock a mutex that has been locked by this task previously.
   *
   * This function must not be used in interrupt context. Unlocking
   * of a not locked mutex is not allowed.
   *
   * This function is similar to (but not equivalent to) up().
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
236
  void __sched mutex_unlock(struct mutex *lock)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
237
238
239
240
  {
  	/*
  	 * The unlocking fastpath is the 0->1 transition from 'locked'
  	 * into 'unlocked' state:
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
241
  	 */
0d66bf6d3   Peter Zijlstra   mutex: implement ...
242
243
244
245
246
247
248
249
  #ifndef CONFIG_DEBUG_MUTEXES
  	/*
  	 * When debugging is enabled we must not clear the owner before time,
  	 * the slow path will always be taken, and that clears the owner field
  	 * after verifying that it was indeed current.
  	 */
  	mutex_clear_owner(lock);
  #endif
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
250
251
252
253
  	__mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
  }
  
  EXPORT_SYMBOL(mutex_unlock);
040a0a371   Maarten Lankhorst   mutex: Add suppor...
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
  /**
   * ww_mutex_unlock - release the w/w mutex
   * @lock: the mutex to be released
   *
   * Unlock a mutex that has been locked by this task previously with any of the
   * ww_mutex_lock* functions (with or without an acquire context). It is
   * forbidden to release the locks after releasing the acquire context.
   *
   * This function must not be used in interrupt context. Unlocking
   * of a unlocked mutex is not allowed.
   */
  void __sched ww_mutex_unlock(struct ww_mutex *lock)
  {
  	/*
  	 * The unlocking fastpath is the 0->1 transition from 'locked'
  	 * into 'unlocked' state:
  	 */
  	if (lock->ctx) {
  #ifdef CONFIG_DEBUG_MUTEXES
  		DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
  #endif
  		if (lock->ctx->acquired > 0)
  			lock->ctx->acquired--;
  		lock->ctx = NULL;
  	}
  
  #ifndef CONFIG_DEBUG_MUTEXES
  	/*
  	 * When debugging is enabled we must not clear the owner before time,
  	 * the slow path will always be taken, and that clears the owner field
  	 * after verifying that it was indeed current.
  	 */
  	mutex_clear_owner(&lock->base);
  #endif
  	__mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath);
  }
  EXPORT_SYMBOL(ww_mutex_unlock);
  
  static inline int __sched
  __mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
  {
  	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
  	struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
  
  	if (!hold_ctx)
  		return 0;
  
  	if (unlikely(ctx == hold_ctx))
  		return -EALREADY;
  
  	if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
  	    (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
  #ifdef CONFIG_DEBUG_MUTEXES
  		DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
  		ctx->contending_lock = ww;
  #endif
  		return -EDEADLK;
  	}
  
  	return 0;
  }
  
  static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
  						   struct ww_acquire_ctx *ww_ctx)
  {
  #ifdef CONFIG_DEBUG_MUTEXES
  	/*
  	 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
  	 * but released with a normal mutex_unlock in this call.
  	 *
  	 * This should never happen, always use ww_mutex_unlock.
  	 */
  	DEBUG_LOCKS_WARN_ON(ww->ctx);
  
  	/*
  	 * Not quite done after calling ww_acquire_done() ?
  	 */
  	DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
  
  	if (ww_ctx->contending_lock) {
  		/*
  		 * After -EDEADLK you tried to
  		 * acquire a different ww_mutex? Bad!
  		 */
  		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
  
  		/*
  		 * You called ww_mutex_lock after receiving -EDEADLK,
  		 * but 'forgot' to unlock everything else first?
  		 */
  		DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
  		ww_ctx->contending_lock = NULL;
  	}
  
  	/*
  	 * Naughty, using a different class will lead to undefined behavior!
  	 */
  	DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
  #endif
  	ww_ctx->acquired++;
  }
  
  /*
   * after acquiring lock with fastpath or when we lost out in contested
   * slowpath, set ctx and wake up any waiters so they can recheck.
   *
   * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
   * as the fastpath and opportunistic spinning are disabled in that case.
   */
  static __always_inline void
  ww_mutex_set_context_fastpath(struct ww_mutex *lock,
  			       struct ww_acquire_ctx *ctx)
  {
  	unsigned long flags;
  	struct mutex_waiter *cur;
  
  	ww_mutex_lock_acquired(lock, ctx);
  
  	lock->ctx = ctx;
  
  	/*
  	 * The lock->ctx update should be visible on all cores before
  	 * the atomic read is done, otherwise contended waiters might be
  	 * missed. The contended waiters will either see ww_ctx == NULL
  	 * and keep spinning, or it will acquire wait_lock, add itself
  	 * to waiter list and sleep.
  	 */
  	smp_mb(); /* ^^^ */
  
  	/*
  	 * Check if lock is contended, if not there is nobody to wake up
  	 */
  	if (likely(atomic_read(&lock->base.count) == 0))
  		return;
  
  	/*
  	 * Uh oh, we raced in fastpath, wake up everyone in this case,
  	 * so they can see the new lock->ctx.
  	 */
  	spin_lock_mutex(&lock->base.wait_lock, flags);
  	list_for_each_entry(cur, &lock->base.wait_list, list) {
  		debug_mutex_wake_waiter(&lock->base, cur);
  		wake_up_process(cur->task);
  	}
  	spin_unlock_mutex(&lock->base.wait_lock, flags);
  }
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
400
401
402
  /*
   * Lock a mutex (possibly interruptible), slowpath:
   */
040a0a371   Maarten Lankhorst   mutex: Add suppor...
403
  static __always_inline int __sched
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
404
  __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
040a0a371   Maarten Lankhorst   mutex: Add suppor...
405
406
  		    struct lockdep_map *nest_lock, unsigned long ip,
  		    struct ww_acquire_ctx *ww_ctx)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
407
408
409
  {
  	struct task_struct *task = current;
  	struct mutex_waiter waiter;
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
410
  	unsigned long flags;
040a0a371   Maarten Lankhorst   mutex: Add suppor...
411
  	int ret;
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
412

41719b030   Peter Zijlstra   mutex: preemption...
413
  	preempt_disable();
e4c70a662   Peter Zijlstra   lockdep, mutex: p...
414
  	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
c02260277   Frederic Weisbecker   mutex: Better con...
415
416
  
  #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
0d66bf6d3   Peter Zijlstra   mutex: implement ...
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
  	/*
  	 * Optimistic spinning.
  	 *
  	 * We try to spin for acquisition when we find that there are no
  	 * pending waiters and the lock owner is currently running on a
  	 * (different) CPU.
  	 *
  	 * The rationale is that if the lock owner is running, it is likely to
  	 * release the lock soon.
  	 *
  	 * Since this needs the lock owner, and this mutex implementation
  	 * doesn't track the owner atomically in the lock field, we need to
  	 * track it non-atomically.
  	 *
  	 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
  	 * to serialize everything.
2bd2c92cf   Waiman Long   mutex: Queue mute...
433
434
435
436
437
  	 *
  	 * The mutex spinners are queued up using MCS lock so that only one
  	 * spinner can compete for the mutex. However, if mutex spinning isn't
  	 * going to happen, there is no point in going through the lock/unlock
  	 * overhead.
0d66bf6d3   Peter Zijlstra   mutex: implement ...
438
  	 */
2bd2c92cf   Waiman Long   mutex: Queue mute...
439
440
  	if (!mutex_can_spin_on_owner(lock))
  		goto slowpath;
0d66bf6d3   Peter Zijlstra   mutex: implement ...
441
442
  
  	for (;;) {
c6eb3dda2   Peter Zijlstra   mutex: Use p->on_...
443
  		struct task_struct *owner;
2bd2c92cf   Waiman Long   mutex: Queue mute...
444
  		struct mspin_node  node;
0d66bf6d3   Peter Zijlstra   mutex: implement ...
445

040a0a371   Maarten Lankhorst   mutex: Add suppor...
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
  		if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) {
  			struct ww_mutex *ww;
  
  			ww = container_of(lock, struct ww_mutex, base);
  			/*
  			 * If ww->ctx is set the contents are undefined, only
  			 * by acquiring wait_lock there is a guarantee that
  			 * they are not invalid when reading.
  			 *
  			 * As such, when deadlock detection needs to be
  			 * performed the optimistic spinning cannot be done.
  			 */
  			if (ACCESS_ONCE(ww->ctx))
  				break;
  		}
0d66bf6d3   Peter Zijlstra   mutex: implement ...
461
  		/*
0d66bf6d3   Peter Zijlstra   mutex: implement ...
462
463
464
  		 * If there's an owner, wait for it to either
  		 * release the lock or go to sleep.
  		 */
2bd2c92cf   Waiman Long   mutex: Queue mute...
465
  		mspin_lock(MLOCK(lock), &node);
0d66bf6d3   Peter Zijlstra   mutex: implement ...
466
  		owner = ACCESS_ONCE(lock->owner);
2bd2c92cf   Waiman Long   mutex: Queue mute...
467
468
  		if (owner && !mutex_spin_on_owner(lock, owner)) {
  			mspin_unlock(MLOCK(lock), &node);
0d66bf6d3   Peter Zijlstra   mutex: implement ...
469
  			break;
2bd2c92cf   Waiman Long   mutex: Queue mute...
470
  		}
0d66bf6d3   Peter Zijlstra   mutex: implement ...
471

0dc8c730c   Waiman Long   mutex: Make more ...
472
473
  		if ((atomic_read(&lock->count) == 1) &&
  		    (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
ac6e60ee4   Chris Mason   mutex: adaptive s...
474
  			lock_acquired(&lock->dep_map, ip);
040a0a371   Maarten Lankhorst   mutex: Add suppor...
475
476
477
478
479
480
  			if (!__builtin_constant_p(ww_ctx == NULL)) {
  				struct ww_mutex *ww;
  				ww = container_of(lock, struct ww_mutex, base);
  
  				ww_mutex_set_context_fastpath(ww, ww_ctx);
  			}
ac6e60ee4   Chris Mason   mutex: adaptive s...
481
  			mutex_set_owner(lock);
2bd2c92cf   Waiman Long   mutex: Queue mute...
482
  			mspin_unlock(MLOCK(lock), &node);
ac6e60ee4   Chris Mason   mutex: adaptive s...
483
484
485
  			preempt_enable();
  			return 0;
  		}
2bd2c92cf   Waiman Long   mutex: Queue mute...
486
  		mspin_unlock(MLOCK(lock), &node);
ac6e60ee4   Chris Mason   mutex: adaptive s...
487

0d66bf6d3   Peter Zijlstra   mutex: implement ...
488
489
490
491
492
493
494
495
  		/*
  		 * When there's no owner, we might have preempted between the
  		 * owner acquiring the lock and setting the owner field. If
  		 * we're an RT task that will live-lock because we won't let
  		 * the owner complete.
  		 */
  		if (!owner && (need_resched() || rt_task(task)))
  			break;
0d66bf6d3   Peter Zijlstra   mutex: implement ...
496
497
498
499
500
501
  		/*
  		 * The cpu_relax() call is a compiler barrier which forces
  		 * everything in this loop to be re-loaded. We don't need
  		 * memory barriers as we'll eventually observe the right
  		 * values at the cost of a few extra spins.
  		 */
335d7afbf   Gerald Schaefer   mutexes, sched: I...
502
  		arch_mutex_cpu_relax();
0d66bf6d3   Peter Zijlstra   mutex: implement ...
503
  	}
2bd2c92cf   Waiman Long   mutex: Queue mute...
504
  slowpath:
0d66bf6d3   Peter Zijlstra   mutex: implement ...
505
  #endif
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
506
  	spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
507

9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
508
  	debug_mutex_lock_common(lock, &waiter);
c9f4f06d3   Roman Zippel   wrap access to th...
509
  	debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
510
511
512
513
  
  	/* add waiting tasks to the end of the waitqueue (FIFO): */
  	list_add_tail(&waiter.list, &lock->wait_list);
  	waiter.task = task;
0dc8c730c   Waiman Long   mutex: Make more ...
514
  	if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1))
4fe87745a   Peter Zijlstra   lockstat: hook in...
515
  		goto done;
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
516
  	lock_contended(&lock->dep_map, ip);
4fe87745a   Peter Zijlstra   lockstat: hook in...
517

6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
518
519
520
521
522
523
524
525
526
527
  	for (;;) {
  		/*
  		 * Lets try to take the lock again - this is needed even if
  		 * we get here for the first time (shortly after failing to
  		 * acquire the lock), to make sure that we get a wakeup once
  		 * it's unlocked. Later on, if we sleep, this is the
  		 * operation that gives us the lock. We xchg it to -1, so
  		 * that when we release the lock, we properly wake up the
  		 * other waiters:
  		 */
0dc8c730c   Waiman Long   mutex: Make more ...
528
529
  		if (MUTEX_SHOW_NO_WAITER(lock) &&
  		   (atomic_xchg(&lock->count, -1) == 1))
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
530
531
532
533
534
535
  			break;
  
  		/*
  		 * got a signal? (This code gets eliminated in the
  		 * TASK_UNINTERRUPTIBLE case.)
  		 */
6ad36762d   Oleg Nesterov   __mutex_lock_comm...
536
  		if (unlikely(signal_pending_state(state, task))) {
040a0a371   Maarten Lankhorst   mutex: Add suppor...
537
538
539
  			ret = -EINTR;
  			goto err;
  		}
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
540

040a0a371   Maarten Lankhorst   mutex: Add suppor...
541
542
543
544
  		if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) {
  			ret = __mutex_lock_check_stamp(lock, ww_ctx);
  			if (ret)
  				goto err;
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
545
  		}
040a0a371   Maarten Lankhorst   mutex: Add suppor...
546

6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
547
  		__set_task_state(task, state);
25985edce   Lucas De Marchi   Fix common misspe...
548
  		/* didn't get the lock, go to sleep: */
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
549
  		spin_unlock_mutex(&lock->wait_lock, flags);
bd2f55361   Thomas Gleixner   sched/rt: Use sch...
550
  		schedule_preempt_disabled();
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
551
  		spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
552
  	}
4fe87745a   Peter Zijlstra   lockstat: hook in...
553
  done:
c7e78cff6   Peter Zijlstra   lockstat: contend...
554
  	lock_acquired(&lock->dep_map, ip);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
555
  	/* got the lock - rejoice! */
0d66bf6d3   Peter Zijlstra   mutex: implement ...
556
557
  	mutex_remove_waiter(lock, &waiter, current_thread_info());
  	mutex_set_owner(lock);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
558

040a0a371   Maarten Lankhorst   mutex: Add suppor...
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
  	if (!__builtin_constant_p(ww_ctx == NULL)) {
  		struct ww_mutex *ww = container_of(lock,
  						      struct ww_mutex,
  						      base);
  		struct mutex_waiter *cur;
  
  		/*
  		 * This branch gets optimized out for the common case,
  		 * and is only important for ww_mutex_lock.
  		 */
  
  		ww_mutex_lock_acquired(ww, ww_ctx);
  		ww->ctx = ww_ctx;
  
  		/*
  		 * Give any possible sleeping processes the chance to wake up,
  		 * so they can recheck if they have to back off.
  		 */
  		list_for_each_entry(cur, &lock->wait_list, list) {
  			debug_mutex_wake_waiter(lock, cur);
  			wake_up_process(cur->task);
  		}
  	}
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
582
583
584
  	/* set it to 0 if there are no waiters left: */
  	if (likely(list_empty(&lock->wait_list)))
  		atomic_set(&lock->count, 0);
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
585
  	spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
586
587
  
  	debug_mutex_free_waiter(&waiter);
41719b030   Peter Zijlstra   mutex: preemption...
588
  	preempt_enable();
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
589

6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
590
  	return 0;
040a0a371   Maarten Lankhorst   mutex: Add suppor...
591
592
593
594
595
596
597
598
  
  err:
  	mutex_remove_waiter(lock, &waiter, task_thread_info(task));
  	spin_unlock_mutex(&lock->wait_lock, flags);
  	debug_mutex_free_waiter(&waiter);
  	mutex_release(&lock->dep_map, 1, ip);
  	preempt_enable();
  	return ret;
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
599
  }
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
600
601
602
603
604
  #ifdef CONFIG_DEBUG_LOCK_ALLOC
  void __sched
  mutex_lock_nested(struct mutex *lock, unsigned int subclass)
  {
  	might_sleep();
040a0a371   Maarten Lankhorst   mutex: Add suppor...
605
606
  	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
  			    subclass, NULL, _RET_IP_, NULL);
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
607
608
609
  }
  
  EXPORT_SYMBOL_GPL(mutex_lock_nested);
d63a5a74d   NeilBrown   [PATCH] lockdep: ...
610

e4c70a662   Peter Zijlstra   lockdep, mutex: p...
611
612
613
614
  void __sched
  _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
  {
  	might_sleep();
040a0a371   Maarten Lankhorst   mutex: Add suppor...
615
616
  	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
  			    0, nest, _RET_IP_, NULL);
e4c70a662   Peter Zijlstra   lockdep, mutex: p...
617
618
619
  }
  
  EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
d63a5a74d   NeilBrown   [PATCH] lockdep: ...
620
  int __sched
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
621
622
623
  mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
  {
  	might_sleep();
040a0a371   Maarten Lankhorst   mutex: Add suppor...
624
625
  	return __mutex_lock_common(lock, TASK_KILLABLE,
  				   subclass, NULL, _RET_IP_, NULL);
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
626
627
628
629
  }
  EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
  
  int __sched
d63a5a74d   NeilBrown   [PATCH] lockdep: ...
630
631
632
  mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
  {
  	might_sleep();
0d66bf6d3   Peter Zijlstra   mutex: implement ...
633
  	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
040a0a371   Maarten Lankhorst   mutex: Add suppor...
634
  				   subclass, NULL, _RET_IP_, NULL);
d63a5a74d   NeilBrown   [PATCH] lockdep: ...
635
636
637
  }
  
  EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
040a0a371   Maarten Lankhorst   mutex: Add suppor...
638

230100276   Daniel Vetter   mutex: Add w/w mu...
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
  static inline int
  ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
  {
  #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
  	unsigned tmp;
  
  	if (ctx->deadlock_inject_countdown-- == 0) {
  		tmp = ctx->deadlock_inject_interval;
  		if (tmp > UINT_MAX/4)
  			tmp = UINT_MAX;
  		else
  			tmp = tmp*2 + tmp + tmp/2;
  
  		ctx->deadlock_inject_interval = tmp;
  		ctx->deadlock_inject_countdown = tmp;
  		ctx->contending_lock = lock;
  
  		ww_mutex_unlock(lock);
  
  		return -EDEADLK;
  	}
  #endif
  
  	return 0;
  }
040a0a371   Maarten Lankhorst   mutex: Add suppor...
664
665
666
667
  
  int __sched
  __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
  {
230100276   Daniel Vetter   mutex: Add w/w mu...
668
  	int ret;
040a0a371   Maarten Lankhorst   mutex: Add suppor...
669
  	might_sleep();
230100276   Daniel Vetter   mutex: Add w/w mu...
670
  	ret =  __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
040a0a371   Maarten Lankhorst   mutex: Add suppor...
671
  				   0, &ctx->dep_map, _RET_IP_, ctx);
85f489612   Maarten Lankhorst   mutex: Fix w/w mu...
672
  	if (!ret && ctx->acquired > 1)
230100276   Daniel Vetter   mutex: Add w/w mu...
673
674
675
  		return ww_mutex_deadlock_injection(lock, ctx);
  
  	return ret;
040a0a371   Maarten Lankhorst   mutex: Add suppor...
676
677
678
679
680
681
  }
  EXPORT_SYMBOL_GPL(__ww_mutex_lock);
  
  int __sched
  __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
  {
230100276   Daniel Vetter   mutex: Add w/w mu...
682
  	int ret;
040a0a371   Maarten Lankhorst   mutex: Add suppor...
683
  	might_sleep();
230100276   Daniel Vetter   mutex: Add w/w mu...
684
685
  	ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
  				  0, &ctx->dep_map, _RET_IP_, ctx);
85f489612   Maarten Lankhorst   mutex: Fix w/w mu...
686
  	if (!ret && ctx->acquired > 1)
230100276   Daniel Vetter   mutex: Add w/w mu...
687
688
689
  		return ww_mutex_deadlock_injection(lock, ctx);
  
  	return ret;
040a0a371   Maarten Lankhorst   mutex: Add suppor...
690
691
  }
  EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
692
  #endif
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
693
694
695
  /*
   * Release the lock, slowpath:
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
696
  static inline void
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
697
  __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
698
  {
02706647a   Ingo Molnar   [PATCH] mutex: tr...
699
  	struct mutex *lock = container_of(lock_count, struct mutex, count);
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
700
  	unsigned long flags;
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
701

1fb00c6cb   Ingo Molnar   [PATCH] work arou...
702
  	spin_lock_mutex(&lock->wait_lock, flags);
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
703
  	mutex_release(&lock->dep_map, nested, _RET_IP_);
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
704
  	debug_mutex_unlock(lock);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
705
706
707
708
709
710
711
712
  
  	/*
  	 * some architectures leave the lock unlocked in the fastpath failure
  	 * case, others need to leave it locked. In the later case we have to
  	 * unlock it here
  	 */
  	if (__mutex_slowpath_needs_to_unlock())
  		atomic_set(&lock->count, 1);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
713
714
715
716
717
718
719
720
721
722
  	if (!list_empty(&lock->wait_list)) {
  		/* get the first entry from the wait-list: */
  		struct mutex_waiter *waiter =
  				list_entry(lock->wait_list.next,
  					   struct mutex_waiter, list);
  
  		debug_mutex_wake_waiter(lock, waiter);
  
  		wake_up_process(waiter->task);
  	}
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
723
  	spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
724
725
726
  }
  
  /*
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
727
728
   * Release the lock, slowpath:
   */
7918baa55   Török Edwin   mutex: __used is ...
729
  static __used noinline void
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
730
731
  __mutex_unlock_slowpath(atomic_t *lock_count)
  {
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
732
  	__mutex_unlock_common_slowpath(lock_count, 1);
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
733
  }
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
734
  #ifndef CONFIG_DEBUG_LOCK_ALLOC
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
735
  /*
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
736
737
738
   * Here come the less common (and hence less performance-critical) APIs:
   * mutex_lock_interruptible() and mutex_trylock().
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
739
  static noinline int __sched
a41b56efa   Maarten Lankhorst   arch: Make __mute...
740
  __mutex_lock_killable_slowpath(struct mutex *lock);
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
741

7ad5b3a50   Harvey Harrison   kernel: remove fa...
742
  static noinline int __sched
a41b56efa   Maarten Lankhorst   arch: Make __mute...
743
  __mutex_lock_interruptible_slowpath(struct mutex *lock);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
744

ef5dc121d   Randy Dunlap   mutex: Fix annota...
745
746
  /**
   * mutex_lock_interruptible - acquire the mutex, interruptible
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
747
748
749
750
751
752
753
754
755
   * @lock: the mutex to be acquired
   *
   * Lock the mutex like mutex_lock(), and return 0 if the mutex has
   * been acquired or sleep until the mutex becomes available. If a
   * signal arrives while waiting for the lock then this function
   * returns -EINTR.
   *
   * This function is similar to (but not equivalent to) down_interruptible().
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
756
  int __sched mutex_lock_interruptible(struct mutex *lock)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
757
  {
0d66bf6d3   Peter Zijlstra   mutex: implement ...
758
  	int ret;
c544bdb19   Ingo Molnar   [PATCH] mark mute...
759
  	might_sleep();
a41b56efa   Maarten Lankhorst   arch: Make __mute...
760
761
  	ret =  __mutex_fastpath_lock_retval(&lock->count);
  	if (likely(!ret)) {
0d66bf6d3   Peter Zijlstra   mutex: implement ...
762
  		mutex_set_owner(lock);
a41b56efa   Maarten Lankhorst   arch: Make __mute...
763
764
765
  		return 0;
  	} else
  		return __mutex_lock_interruptible_slowpath(lock);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
766
767
768
  }
  
  EXPORT_SYMBOL(mutex_lock_interruptible);
7ad5b3a50   Harvey Harrison   kernel: remove fa...
769
  int __sched mutex_lock_killable(struct mutex *lock)
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
770
  {
0d66bf6d3   Peter Zijlstra   mutex: implement ...
771
  	int ret;
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
772
  	might_sleep();
a41b56efa   Maarten Lankhorst   arch: Make __mute...
773
774
  	ret = __mutex_fastpath_lock_retval(&lock->count);
  	if (likely(!ret)) {
0d66bf6d3   Peter Zijlstra   mutex: implement ...
775
  		mutex_set_owner(lock);
a41b56efa   Maarten Lankhorst   arch: Make __mute...
776
777
778
  		return 0;
  	} else
  		return __mutex_lock_killable_slowpath(lock);
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
779
780
  }
  EXPORT_SYMBOL(mutex_lock_killable);
7918baa55   Török Edwin   mutex: __used is ...
781
  static __used noinline void __sched
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
782
783
784
  __mutex_lock_slowpath(atomic_t *lock_count)
  {
  	struct mutex *lock = container_of(lock_count, struct mutex, count);
040a0a371   Maarten Lankhorst   mutex: Add suppor...
785
786
  	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
  			    NULL, _RET_IP_, NULL);
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
787
  }
7ad5b3a50   Harvey Harrison   kernel: remove fa...
788
  static noinline int __sched
a41b56efa   Maarten Lankhorst   arch: Make __mute...
789
  __mutex_lock_killable_slowpath(struct mutex *lock)
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
790
  {
040a0a371   Maarten Lankhorst   mutex: Add suppor...
791
792
  	return __mutex_lock_common(lock, TASK_KILLABLE, 0,
  				   NULL, _RET_IP_, NULL);
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
793
  }
7ad5b3a50   Harvey Harrison   kernel: remove fa...
794
  static noinline int __sched
a41b56efa   Maarten Lankhorst   arch: Make __mute...
795
  __mutex_lock_interruptible_slowpath(struct mutex *lock)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
796
  {
040a0a371   Maarten Lankhorst   mutex: Add suppor...
797
798
799
800
801
802
803
804
805
  	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
  				   NULL, _RET_IP_, NULL);
  }
  
  static noinline int __sched
  __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
  {
  	return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
  				   NULL, _RET_IP_, ctx);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
806
  }
040a0a371   Maarten Lankhorst   mutex: Add suppor...
807
808
809
810
811
812
813
814
  
  static noinline int __sched
  __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
  					    struct ww_acquire_ctx *ctx)
  {
  	return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
  				   NULL, _RET_IP_, ctx);
  }
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
815
  #endif
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
816
817
818
819
820
821
822
823
  
  /*
   * Spinlock based trylock, we take the spinlock and check whether we
   * can get the lock:
   */
  static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
  {
  	struct mutex *lock = container_of(lock_count, struct mutex, count);
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
824
  	unsigned long flags;
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
825
  	int prev;
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
826
  	spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
827
828
  
  	prev = atomic_xchg(&lock->count, -1);
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
829
  	if (likely(prev == 1)) {
0d66bf6d3   Peter Zijlstra   mutex: implement ...
830
  		mutex_set_owner(lock);
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
831
832
  		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  	}
0d66bf6d3   Peter Zijlstra   mutex: implement ...
833

6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
834
835
836
  	/* Set it back to 0 if there are no waiters: */
  	if (likely(list_empty(&lock->wait_list)))
  		atomic_set(&lock->count, 0);
1fb00c6cb   Ingo Molnar   [PATCH] work arou...
837
  	spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
838
839
840
  
  	return prev == 1;
  }
ef5dc121d   Randy Dunlap   mutex: Fix annota...
841
842
  /**
   * mutex_trylock - try to acquire the mutex, without waiting
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
843
844
845
846
847
848
   * @lock: the mutex to be acquired
   *
   * Try to acquire the mutex atomically. Returns 1 if the mutex
   * has been acquired successfully, and 0 on contention.
   *
   * NOTE: this function follows the spin_trylock() convention, so
ef5dc121d   Randy Dunlap   mutex: Fix annota...
849
   * it is negated from the down_trylock() return values! Be careful
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
850
851
852
853
854
   * about this when converting semaphore users to mutexes.
   *
   * This function must not be used in interrupt context. The
   * mutex must be released by the same task that acquired it.
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
855
  int __sched mutex_trylock(struct mutex *lock)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
856
  {
0d66bf6d3   Peter Zijlstra   mutex: implement ...
857
858
859
860
861
862
863
  	int ret;
  
  	ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
  	if (ret)
  		mutex_set_owner(lock);
  
  	return ret;
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
864
  }
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
865
  EXPORT_SYMBOL(mutex_trylock);
a511e3f96   Andrew Morton   mutex: add atomic...
866

040a0a371   Maarten Lankhorst   mutex: Add suppor...
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
  #ifndef CONFIG_DEBUG_LOCK_ALLOC
  int __sched
  __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
  {
  	int ret;
  
  	might_sleep();
  
  	ret = __mutex_fastpath_lock_retval(&lock->base.count);
  
  	if (likely(!ret)) {
  		ww_mutex_set_context_fastpath(lock, ctx);
  		mutex_set_owner(&lock->base);
  	} else
  		ret = __ww_mutex_lock_slowpath(lock, ctx);
  	return ret;
  }
  EXPORT_SYMBOL(__ww_mutex_lock);
  
  int __sched
  __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
  {
  	int ret;
  
  	might_sleep();
  
  	ret = __mutex_fastpath_lock_retval(&lock->base.count);
  
  	if (likely(!ret)) {
  		ww_mutex_set_context_fastpath(lock, ctx);
  		mutex_set_owner(&lock->base);
  	} else
  		ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx);
  	return ret;
  }
  EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
  
  #endif
a511e3f96   Andrew Morton   mutex: add atomic...
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
  /**
   * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
   * @cnt: the atomic which we are to dec
   * @lock: the mutex to return holding if we dec to 0
   *
   * return true and hold lock if we dec to 0, return false otherwise
   */
  int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
  {
  	/* dec if we can't possibly hit 0 */
  	if (atomic_add_unless(cnt, -1, 1))
  		return 0;
  	/* we might hit 0, so take the lock */
  	mutex_lock(lock);
  	if (!atomic_dec_and_test(cnt)) {
  		/* when we actually did the dec, we didn't hit 0 */
  		mutex_unlock(lock);
  		return 0;
  	}
  	/* we hit 0, and we hold the lock */
  	return 1;
  }
  EXPORT_SYMBOL(atomic_dec_and_mutex_lock);