Blame view

kernel/locking/mutex.c 37.6 KB
457c89965   Thomas Gleixner   treewide: Add SPD...
1
  // SPDX-License-Identifier: GPL-2.0-only
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
2
  /*
67a6de49b   Peter Zijlstra   locking/doc: Upda...
3
   * kernel/locking/mutex.c
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
4
5
6
7
8
9
10
11
12
13
   *
   * Mutexes: blocking mutual exclusion locks
   *
   * Started by Ingo Molnar:
   *
   *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
   *
   * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
   * David Howells for suggestions and improvements.
   *
0d66bf6d3   Peter Zijlstra   mutex: implement ...
14
15
16
17
18
   *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
   *    from the -rt tree, where it was originally implemented for rtmutexes
   *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
   *    and Sven Dietrich.
   *
387b14684   Mauro Carvalho Chehab   docs: locking: co...
19
   * Also see Documentation/locking/mutex-design.rst.
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
20
21
   */
  #include <linux/mutex.h>
1b375dc30   Maarten Lankhorst   mutex: Move ww_mu...
22
  #include <linux/ww_mutex.h>
174cd4b1e   Ingo Molnar   sched/headers: Pr...
23
  #include <linux/sched/signal.h>
8bd75c77b   Clark Williams   sched/rt: Move rt...
24
  #include <linux/sched/rt.h>
84f001e15   Ingo Molnar   sched/headers: Pr...
25
  #include <linux/sched/wake_q.h>
b17b01533   Ingo Molnar   sched/headers: Pr...
26
  #include <linux/sched/debug.h>
9984de1a5   Paul Gortmaker   kernel: Map most ...
27
  #include <linux/export.h>
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
28
29
  #include <linux/spinlock.h>
  #include <linux/interrupt.h>
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
30
  #include <linux/debug_locks.h>
7a215f89a   Davidlohr Bueso   locking/rwsem: Se...
31
  #include <linux/osq_lock.h>
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
32

6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
33
34
  #ifdef CONFIG_DEBUG_MUTEXES
  # include "mutex-debug.h"
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
35
36
  #else
  # include "mutex.h"
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
37
  #endif
9ad8ff902   Sangmoon Kim   ANDROID: vendor_h...
38
  #include <trace/hooks/dtask.h>
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
39
40
  void
  __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
41
  {
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
42
  	atomic_long_set(&lock->owner, 0);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
43
44
  	spin_lock_init(&lock->wait_lock);
  	INIT_LIST_HEAD(&lock->wait_list);
2bd2c92cf   Waiman Long   mutex: Queue mute...
45
  #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
4d9d951e6   Jason Low   locking/spinlocks...
46
  	osq_lock_init(&lock->osq);
2bd2c92cf   Waiman Long   mutex: Queue mute...
47
  #endif
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
48

ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
49
  	debug_mutex_init(lock, name, key);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
50
  }
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
51
  EXPORT_SYMBOL(__mutex_init);
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
52
53
54
  /*
   * @owner: contains: 'struct task_struct *' to the current lock owner,
   * NULL means not owned. Since task_struct pointers are aligned at
e274795ea   Peter Zijlstra   locking/mutex: Fi...
55
   * at least L1_CACHE_BYTES, we have low bits to store extra state.
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
56
57
   *
   * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
9d659ae14   Peter Zijlstra   locking/mutex: Ad...
58
   * Bit1 indicates unlock needs to hand the lock to the top-waiter
e274795ea   Peter Zijlstra   locking/mutex: Fi...
59
   * Bit2 indicates handoff has been done and we're waiting for pickup.
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
60
61
   */
  #define MUTEX_FLAG_WAITERS	0x01
9d659ae14   Peter Zijlstra   locking/mutex: Ad...
62
  #define MUTEX_FLAG_HANDOFF	0x02
e274795ea   Peter Zijlstra   locking/mutex: Fi...
63
  #define MUTEX_FLAG_PICKUP	0x04
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
64

e274795ea   Peter Zijlstra   locking/mutex: Fi...
65
  #define MUTEX_FLAGS		0x07
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
66

5f35d5a66   Mukesh Ojha   locking/mutex: Ma...
67
68
69
70
71
72
73
  /*
   * Internal helper function; C doesn't allow us to hide it :/
   *
   * DO NOT USE (outside of mutex code).
   */
  static inline struct task_struct *__mutex_owner(struct mutex *lock)
  {
a037d2692   Mukesh Ojha   locking/mutex: Us...
74
  	return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
5f35d5a66   Mukesh Ojha   locking/mutex: Ma...
75
  }
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
76
77
78
79
  static inline struct task_struct *__owner_task(unsigned long owner)
  {
  	return (struct task_struct *)(owner & ~MUTEX_FLAGS);
  }
5f35d5a66   Mukesh Ojha   locking/mutex: Ma...
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
  bool mutex_is_locked(struct mutex *lock)
  {
  	return __mutex_owner(lock) != NULL;
  }
  EXPORT_SYMBOL(mutex_is_locked);
  
  __must_check enum mutex_trylock_recursive_enum
  mutex_trylock_recursive(struct mutex *lock)
  {
  	if (unlikely(__mutex_owner(lock) == current))
  		return MUTEX_TRYLOCK_RECURSIVE;
  
  	return mutex_trylock(lock);
  }
  EXPORT_SYMBOL(mutex_trylock_recursive);
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
95
96
97
98
99
100
  static inline unsigned long __owner_flags(unsigned long owner)
  {
  	return owner & MUTEX_FLAGS;
  }
  
  /*
e274795ea   Peter Zijlstra   locking/mutex: Fi...
101
   * Trylock variant that retuns the owning task on failure.
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
102
   */
e274795ea   Peter Zijlstra   locking/mutex: Fi...
103
  static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
104
105
106
107
108
  {
  	unsigned long owner, curr = (unsigned long)current;
  
  	owner = atomic_long_read(&lock->owner);
  	for (;;) { /* must loop, can race against a flag */
9d659ae14   Peter Zijlstra   locking/mutex: Ad...
109
  		unsigned long old, flags = __owner_flags(owner);
e274795ea   Peter Zijlstra   locking/mutex: Fi...
110
  		unsigned long task = owner & ~MUTEX_FLAGS;
9d659ae14   Peter Zijlstra   locking/mutex: Ad...
111

e274795ea   Peter Zijlstra   locking/mutex: Fi...
112
113
114
  		if (task) {
  			if (likely(task != curr))
  				break;
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
115

e274795ea   Peter Zijlstra   locking/mutex: Fi...
116
117
  			if (likely(!(flags & MUTEX_FLAG_PICKUP)))
  				break;
9d659ae14   Peter Zijlstra   locking/mutex: Ad...
118

e274795ea   Peter Zijlstra   locking/mutex: Fi...
119
120
121
122
123
  			flags &= ~MUTEX_FLAG_PICKUP;
  		} else {
  #ifdef CONFIG_DEBUG_MUTEXES
  			DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
  #endif
9d659ae14   Peter Zijlstra   locking/mutex: Ad...
124
125
126
127
128
129
130
  		}
  
  		/*
  		 * We set the HANDOFF bit, we must make sure it doesn't live
  		 * past the point where we acquire it. This would be possible
  		 * if we (accidentally) set the bit on an unlocked mutex.
  		 */
e274795ea   Peter Zijlstra   locking/mutex: Fi...
131
  		flags &= ~MUTEX_FLAG_HANDOFF;
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
132

9d659ae14   Peter Zijlstra   locking/mutex: Ad...
133
  		old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
134
  		if (old == owner)
e274795ea   Peter Zijlstra   locking/mutex: Fi...
135
  			return NULL;
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
136
137
138
  
  		owner = old;
  	}
e274795ea   Peter Zijlstra   locking/mutex: Fi...
139
140
141
142
143
144
145
146
147
148
  
  	return __owner_task(owner);
  }
  
  /*
   * Actual trylock that will work on any unlocked state.
   */
  static inline bool __mutex_trylock(struct mutex *lock)
  {
  	return !__mutex_trylock_or_owner(lock);
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
  }
  
  #ifndef CONFIG_DEBUG_LOCK_ALLOC
  /*
   * Lockdep annotations are contained to the slow paths for simplicity.
   * There is nothing that would stop spreading the lockdep annotations outwards
   * except more code.
   */
  
  /*
   * Optimistic trylock that only works in the uncontended case. Make sure to
   * follow with a __mutex_trylock() before failing.
   */
  static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
  {
  	unsigned long curr = (unsigned long)current;
c427f6956   Peter Zijlstra   locking/mutex: Op...
165
  	unsigned long zero = 0UL;
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
166

c427f6956   Peter Zijlstra   locking/mutex: Op...
167
  	if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
  		return true;
  
  	return false;
  }
  
  static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
  {
  	unsigned long curr = (unsigned long)current;
  
  	if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
  		return true;
  
  	return false;
  }
  #endif
  
  static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
  {
  	atomic_long_or(flag, &lock->owner);
  }
  
  static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
  {
  	atomic_long_andnot(flag, &lock->owner);
  }
9d659ae14   Peter Zijlstra   locking/mutex: Ad...
193
194
195
196
197
198
  static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
  {
  	return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
  }
  
  /*
08295b3b5   Thomas Hellstrom   locking: Implemen...
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
   * Add @waiter to a given location in the lock wait_list and set the
   * FLAG_WAITERS flag if it's the first waiter.
   */
  static void __sched
  __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
  		   struct list_head *list)
  {
  	debug_mutex_add_waiter(lock, waiter, current);
  
  	list_add_tail(&waiter->list, list);
  	if (__mutex_waiter_is_first(lock, waiter))
  		__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
  }
  
  /*
9d659ae14   Peter Zijlstra   locking/mutex: Ad...
214
   * Give up ownership to a specific task, when @task = NULL, this is equivalent
e274795ea   Peter Zijlstra   locking/mutex: Fi...
215
216
217
   * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
   * WAITERS. Provides RELEASE semantics like a regular unlock, the
   * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
9d659ae14   Peter Zijlstra   locking/mutex: Ad...
218
219
220
221
222
223
224
225
226
227
   */
  static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
  {
  	unsigned long owner = atomic_long_read(&lock->owner);
  
  	for (;;) {
  		unsigned long old, new;
  
  #ifdef CONFIG_DEBUG_MUTEXES
  		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
e274795ea   Peter Zijlstra   locking/mutex: Fi...
228
  		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
9d659ae14   Peter Zijlstra   locking/mutex: Ad...
229
230
231
232
  #endif
  
  		new = (owner & MUTEX_FLAG_WAITERS);
  		new |= (unsigned long)task;
e274795ea   Peter Zijlstra   locking/mutex: Fi...
233
234
  		if (task)
  			new |= MUTEX_FLAG_PICKUP;
9d659ae14   Peter Zijlstra   locking/mutex: Ad...
235
236
237
238
239
240
241
242
  
  		old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
  		if (old == owner)
  			break;
  
  		owner = old;
  	}
  }
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
243
  #ifndef CONFIG_DEBUG_LOCK_ALLOC
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
244
245
246
247
248
249
  /*
   * We split the mutex lock/unlock logic into separate fastpath and
   * slowpath functions, to reduce the register pressure on the fastpath.
   * We also put the fastpath first in the kernel image, to make sure the
   * branch is predicted by the CPU as default-untaken.
   */
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
250
  static void __sched __mutex_lock_slowpath(struct mutex *lock);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
251

ef5dc121d   Randy Dunlap   mutex: Fix annota...
252
  /**
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
253
254
255
256
257
258
259
260
261
   * mutex_lock - acquire the mutex
   * @lock: the mutex to be acquired
   *
   * Lock the mutex exclusively for this task. If the mutex is not
   * available right now, it will sleep until it can get it.
   *
   * The mutex must later on be released by the same task that
   * acquired it. Recursive locking is not allowed. The task
   * may not exit without first unlocking the mutex. Also, kernel
139b6fd26   Sharon Dvir   sched/Documentati...
262
   * memory where the mutex resides must not be freed with
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
263
264
265
266
   * the mutex still locked. The mutex must first be initialized
   * (or statically defined) before it can be locked. memset()-ing
   * the mutex to 0 is not allowed.
   *
7b4ff1adb   Mauro Carvalho Chehab   mutex, futex: adj...
267
268
269
   * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
   * checks that will enforce the restrictions and will also do
   * deadlock debugging)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
270
271
272
   *
   * This function is similar to (but not equivalent to) down().
   */
b09d2501e   H. Peter Anvin   mutex: drop "inli...
273
  void __sched mutex_lock(struct mutex *lock)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
274
  {
c544bdb19   Ingo Molnar   [PATCH] mark mute...
275
  	might_sleep();
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
276

3ca0ff571   Peter Zijlstra   locking/mutex: Re...
277
278
279
  	if (!__mutex_trylock_fast(lock))
  		__mutex_lock_slowpath(lock);
  }
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
280
  EXPORT_SYMBOL(mutex_lock);
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
281
  #endif
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
282

55f036ca7   Peter Ziljstra   locking: WW mutex...
283
284
285
286
287
  /*
   * Wait-Die:
   *   The newer transactions are killed when:
   *     It (the new transaction) makes a request for a lock being held
   *     by an older transaction.
08295b3b5   Thomas Hellstrom   locking: Implemen...
288
289
290
291
292
   *
   * Wound-Wait:
   *   The newer transactions are wounded when:
   *     An older transaction makes a request for a lock being held by
   *     the newer transaction.
55f036ca7   Peter Ziljstra   locking: WW mutex...
293
294
295
296
297
298
   */
  
  /*
   * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
   * it.
   */
427b18207   Peter Zijlstra   locking/mutex: Im...
299
300
  static __always_inline void
  ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
76916515d   Davidlohr Bueso   locking/mutexes: ...
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
  {
  #ifdef CONFIG_DEBUG_MUTEXES
  	/*
  	 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
  	 * but released with a normal mutex_unlock in this call.
  	 *
  	 * This should never happen, always use ww_mutex_unlock.
  	 */
  	DEBUG_LOCKS_WARN_ON(ww->ctx);
  
  	/*
  	 * Not quite done after calling ww_acquire_done() ?
  	 */
  	DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
  
  	if (ww_ctx->contending_lock) {
  		/*
  		 * After -EDEADLK you tried to
  		 * acquire a different ww_mutex? Bad!
  		 */
  		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
  
  		/*
  		 * You called ww_mutex_lock after receiving -EDEADLK,
  		 * but 'forgot' to unlock everything else first?
  		 */
  		DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
  		ww_ctx->contending_lock = NULL;
  	}
  
  	/*
  	 * Naughty, using a different class will lead to undefined behavior!
  	 */
  	DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
  #endif
  	ww_ctx->acquired++;
55f036ca7   Peter Ziljstra   locking: WW mutex...
337
  	ww->ctx = ww_ctx;
76916515d   Davidlohr Bueso   locking/mutexes: ...
338
  }
55f036ca7   Peter Ziljstra   locking: WW mutex...
339
340
341
342
343
  /*
   * Determine if context @a is 'after' context @b. IOW, @a is a younger
   * transaction than @b and depending on algorithm either needs to wait for
   * @b or die.
   */
3822da3ed   Nicolai Hähnle   locking/ww_mutex:...
344
345
346
  static inline bool __sched
  __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
  {
55f036ca7   Peter Ziljstra   locking: WW mutex...
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
  
  	return (signed long)(a->stamp - b->stamp) > 0;
  }
  
  /*
   * Wait-Die; wake a younger waiter context (when locks held) such that it can
   * die.
   *
   * Among waiters with context, only the first one can have other locks acquired
   * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
   * __ww_mutex_check_kill() wake any but the earliest context.
   */
  static bool __sched
  __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
  	       struct ww_acquire_ctx *ww_ctx)
  {
08295b3b5   Thomas Hellstrom   locking: Implemen...
363
364
  	if (!ww_ctx->is_wait_die)
  		return false;
55f036ca7   Peter Ziljstra   locking: WW mutex...
365
366
367
368
369
370
371
  	if (waiter->ww_ctx->acquired > 0 &&
  			__ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) {
  		debug_mutex_wake_waiter(lock, waiter);
  		wake_up_process(waiter->task);
  	}
  
  	return true;
3822da3ed   Nicolai Hähnle   locking/ww_mutex:...
372
  }
76916515d   Davidlohr Bueso   locking/mutexes: ...
373
  /*
08295b3b5   Thomas Hellstrom   locking: Implemen...
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
   * Wound-Wait; wound a younger @hold_ctx if it holds the lock.
   *
   * Wound the lock holder if there are waiters with older transactions than
   * the lock holders. Even if multiple waiters may wound the lock holder,
   * it's sufficient that only one does.
   */
  static bool __ww_mutex_wound(struct mutex *lock,
  			     struct ww_acquire_ctx *ww_ctx,
  			     struct ww_acquire_ctx *hold_ctx)
  {
  	struct task_struct *owner = __mutex_owner(lock);
  
  	lockdep_assert_held(&lock->wait_lock);
  
  	/*
  	 * Possible through __ww_mutex_add_waiter() when we race with
  	 * ww_mutex_set_context_fastpath(). In that case we'll get here again
  	 * through __ww_mutex_check_waiters().
  	 */
  	if (!hold_ctx)
  		return false;
  
  	/*
  	 * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
  	 * it cannot go away because we'll have FLAG_WAITERS set and hold
  	 * wait_lock.
  	 */
  	if (!owner)
  		return false;
  
  	if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) {
  		hold_ctx->wounded = 1;
  
  		/*
  		 * wake_up_process() paired with set_current_state()
  		 * inserts sufficient barriers to make sure @owner either sees
e13e2366d   Thomas Hellstrom   locking/mutex: Fi...
410
  		 * it's wounded in __ww_mutex_check_kill() or has a
08295b3b5   Thomas Hellstrom   locking: Implemen...
411
412
413
414
415
416
417
418
419
420
421
422
  		 * wakeup pending to re-read the wounded state.
  		 */
  		if (owner != current)
  			wake_up_process(owner);
  
  		return true;
  	}
  
  	return false;
  }
  
  /*
55f036ca7   Peter Ziljstra   locking: WW mutex...
423
   * We just acquired @lock under @ww_ctx, if there are later contexts waiting
08295b3b5   Thomas Hellstrom   locking: Implemen...
424
   * behind us on the wait-list, check if they need to die, or wound us.
659cf9f58   Nicolai Hähnle   locking/ww_mutex:...
425
   *
55f036ca7   Peter Ziljstra   locking: WW mutex...
426
427
   * See __ww_mutex_add_waiter() for the list-order construction; basically the
   * list is ordered by stamp, smallest (oldest) first.
659cf9f58   Nicolai Hähnle   locking/ww_mutex:...
428
   *
08295b3b5   Thomas Hellstrom   locking: Implemen...
429
430
431
   * This relies on never mixing wait-die/wound-wait on the same wait-list;
   * which is currently ensured by that being a ww_class property.
   *
659cf9f58   Nicolai Hähnle   locking/ww_mutex:...
432
433
434
   * The current task must not be on the wait list.
   */
  static void __sched
55f036ca7   Peter Ziljstra   locking: WW mutex...
435
  __ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
659cf9f58   Nicolai Hähnle   locking/ww_mutex:...
436
437
438
439
440
441
442
443
  {
  	struct mutex_waiter *cur;
  
  	lockdep_assert_held(&lock->wait_lock);
  
  	list_for_each_entry(cur, &lock->wait_list, list) {
  		if (!cur->ww_ctx)
  			continue;
08295b3b5   Thomas Hellstrom   locking: Implemen...
444
445
  		if (__ww_mutex_die(lock, cur, ww_ctx) ||
  		    __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
55f036ca7   Peter Ziljstra   locking: WW mutex...
446
  			break;
659cf9f58   Nicolai Hähnle   locking/ww_mutex:...
447
448
  	}
  }
76916515d   Davidlohr Bueso   locking/mutexes: ...
449
  /*
55f036ca7   Peter Ziljstra   locking: WW mutex...
450
451
   * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
   * and wake up any waiters so they can recheck.
76916515d   Davidlohr Bueso   locking/mutexes: ...
452
453
   */
  static __always_inline void
427b18207   Peter Zijlstra   locking/mutex: Im...
454
  ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
76916515d   Davidlohr Bueso   locking/mutexes: ...
455
  {
76916515d   Davidlohr Bueso   locking/mutexes: ...
456
  	ww_mutex_lock_acquired(lock, ctx);
76916515d   Davidlohr Bueso   locking/mutexes: ...
457
458
  	/*
  	 * The lock->ctx update should be visible on all cores before
55f036ca7   Peter Ziljstra   locking: WW mutex...
459
  	 * the WAITERS check is done, otherwise contended waiters might be
76916515d   Davidlohr Bueso   locking/mutexes: ...
460
461
462
463
  	 * missed. The contended waiters will either see ww_ctx == NULL
  	 * and keep spinning, or it will acquire wait_lock, add itself
  	 * to waiter list and sleep.
  	 */
08295b3b5   Thomas Hellstrom   locking: Implemen...
464
  	smp_mb(); /* See comments above and below. */
76916515d   Davidlohr Bueso   locking/mutexes: ...
465
466
  
  	/*
08295b3b5   Thomas Hellstrom   locking: Implemen...
467
468
469
470
471
472
473
  	 * [W] ww->ctx = ctx	    [W] MUTEX_FLAG_WAITERS
  	 *     MB		        MB
  	 * [R] MUTEX_FLAG_WAITERS   [R] ww->ctx
  	 *
  	 * The memory barrier above pairs with the memory barrier in
  	 * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
  	 * and/or !empty list.
76916515d   Davidlohr Bueso   locking/mutexes: ...
474
  	 */
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
475
  	if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
76916515d   Davidlohr Bueso   locking/mutexes: ...
476
477
478
  		return;
  
  	/*
55f036ca7   Peter Ziljstra   locking: WW mutex...
479
  	 * Uh oh, we raced in fastpath, check if any of the waiters need to
08295b3b5   Thomas Hellstrom   locking: Implemen...
480
  	 * die or wound us.
76916515d   Davidlohr Bueso   locking/mutexes: ...
481
  	 */
b9c16a0e1   Peter Zijlstra   locking/mutex: Fi...
482
  	spin_lock(&lock->base.wait_lock);
55f036ca7   Peter Ziljstra   locking: WW mutex...
483
  	__ww_mutex_check_waiters(&lock->base, ctx);
b9c16a0e1   Peter Zijlstra   locking/mutex: Fi...
484
  	spin_unlock(&lock->base.wait_lock);
76916515d   Davidlohr Bueso   locking/mutexes: ...
485
  }
41fcb9f23   Waiman Long   mutex: Move mutex...
486
  #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
c516df978   Nicolai Hähnle   locking/ww_mutex:...
487
488
489
490
491
492
493
494
  
  static inline
  bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
  			    struct mutex_waiter *waiter)
  {
  	struct ww_mutex *ww;
  
  	ww = container_of(lock, struct ww_mutex, base);
4bd19084f   Davidlohr Bueso   locking/mutex: In...
495
496
  
  	/*
c516df978   Nicolai Hähnle   locking/ww_mutex:...
497
498
499
500
501
502
503
504
505
  	 * If ww->ctx is set the contents are undefined, only
  	 * by acquiring wait_lock there is a guarantee that
  	 * they are not invalid when reading.
  	 *
  	 * As such, when deadlock detection needs to be
  	 * performed the optimistic spinning cannot be done.
  	 *
  	 * Check this in every inner iteration because we may
  	 * be racing against another thread's ww_mutex_lock.
4bd19084f   Davidlohr Bueso   locking/mutex: In...
506
  	 */
c516df978   Nicolai Hähnle   locking/ww_mutex:...
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
  	if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
  		return false;
  
  	/*
  	 * If we aren't on the wait list yet, cancel the spin
  	 * if there are waiters. We want  to avoid stealing the
  	 * lock from a waiter with an earlier stamp, since the
  	 * other thread may already own a lock that we also
  	 * need.
  	 */
  	if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
  		return false;
  
  	/*
  	 * Similarly, stop spinning if we are no longer the
  	 * first waiter.
  	 */
  	if (waiter && !__mutex_waiter_is_first(lock, waiter))
  		return false;
  
  	return true;
4bd19084f   Davidlohr Bueso   locking/mutex: In...
528
  }
76916515d   Davidlohr Bueso   locking/mutexes: ...
529

41fcb9f23   Waiman Long   mutex: Move mutex...
530
  /*
25f13b404   Nicolai Hähnle   locking/ww_mutex:...
531
532
533
534
   * Look out! "owner" is an entirely speculative pointer access and not
   * reliable.
   *
   * "noinline" so that this function shows up on perf profiles.
41fcb9f23   Waiman Long   mutex: Move mutex...
535
536
   */
  static noinline
25f13b404   Nicolai Hähnle   locking/ww_mutex:...
537
  bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
c516df978   Nicolai Hähnle   locking/ww_mutex:...
538
  			 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
41fcb9f23   Waiman Long   mutex: Move mutex...
539
  {
01ac33c1f   Jason Low   locking/mutex: Fu...
540
  	bool ret = true;
be1f7bf21   Jason Low   locking/mutex: Re...
541

41fcb9f23   Waiman Long   mutex: Move mutex...
542
  	rcu_read_lock();
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
543
  	while (__mutex_owner(lock) == owner) {
be1f7bf21   Jason Low   locking/mutex: Re...
544
545
  		/*
  		 * Ensure we emit the owner->on_cpu, dereference _after_
01ac33c1f   Jason Low   locking/mutex: Fu...
546
547
  		 * checking lock->owner still matches owner. If that fails,
  		 * owner might point to freed memory. If it still matches,
be1f7bf21   Jason Low   locking/mutex: Re...
548
549
550
  		 * the rcu_read_lock() ensures the memory stays valid.
  		 */
  		barrier();
05ffc9513   Pan Xinhui   locking/mutex: Br...
551
552
553
554
555
  		/*
  		 * Use vcpu_is_preempted to detect lock holder preemption issue.
  		 */
  		if (!owner->on_cpu || need_resched() ||
  				vcpu_is_preempted(task_cpu(owner))) {
be1f7bf21   Jason Low   locking/mutex: Re...
556
557
558
  			ret = false;
  			break;
  		}
41fcb9f23   Waiman Long   mutex: Move mutex...
559

c516df978   Nicolai Hähnle   locking/ww_mutex:...
560
561
562
  		if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
  			ret = false;
  			break;
25f13b404   Nicolai Hähnle   locking/ww_mutex:...
563
  		}
f2f09a4ce   Christian Borntraeger   locking/core: Rem...
564
  		cpu_relax();
41fcb9f23   Waiman Long   mutex: Move mutex...
565
566
  	}
  	rcu_read_unlock();
be1f7bf21   Jason Low   locking/mutex: Re...
567
  	return ret;
41fcb9f23   Waiman Long   mutex: Move mutex...
568
  }
2bd2c92cf   Waiman Long   mutex: Queue mute...
569
570
571
572
573
574
  
  /*
   * Initial check for entering the mutex spinning loop
   */
  static inline int mutex_can_spin_on_owner(struct mutex *lock)
  {
1e40c2ede   Peter Zijlstra   mutex: Fix/docume...
575
  	struct task_struct *owner;
2bd2c92cf   Waiman Long   mutex: Queue mute...
576
  	int retval = 1;
46af29e47   Jason Low   locking/mutexes: ...
577
578
  	if (need_resched())
  		return 0;
2bd2c92cf   Waiman Long   mutex: Queue mute...
579
  	rcu_read_lock();
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
580
  	owner = __mutex_owner(lock);
05ffc9513   Pan Xinhui   locking/mutex: Br...
581
582
583
584
585
  
  	/*
  	 * As lock holder preemption issue, we both skip spinning if task is not
  	 * on cpu or its cpu is preempted
  	 */
1e40c2ede   Peter Zijlstra   mutex: Fix/docume...
586
  	if (owner)
05ffc9513   Pan Xinhui   locking/mutex: Br...
587
  		retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
2bd2c92cf   Waiman Long   mutex: Queue mute...
588
  	rcu_read_unlock();
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
589

2bd2c92cf   Waiman Long   mutex: Queue mute...
590
  	/*
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
591
592
593
  	 * If lock->owner is not set, the mutex has been released. Return true
  	 * such that we'll trylock in the spin path, which is a faster option
  	 * than the blocking slow path.
2bd2c92cf   Waiman Long   mutex: Queue mute...
594
595
596
  	 */
  	return retval;
  }
76916515d   Davidlohr Bueso   locking/mutexes: ...
597
598
  
  /*
76916515d   Davidlohr Bueso   locking/mutexes: ...
599
600
601
602
603
604
605
   * Optimistic spinning.
   *
   * We try to spin for acquisition when we find that the lock owner
   * is currently running on a (different) CPU and while we don't
   * need to reschedule. The rationale is that if the lock owner is
   * running, it is likely to release the lock soon.
   *
76916515d   Davidlohr Bueso   locking/mutexes: ...
606
607
608
609
610
611
612
   * The mutex spinners are queued up using MCS lock so that only one
   * spinner can compete for the mutex. However, if mutex spinning isn't
   * going to happen, there is no point in going through the lock/unlock
   * overhead.
   *
   * Returns true when the lock was taken, otherwise false, indicating
   * that we need to jump to the slowpath and sleep.
b341afb32   Waiman Long   locking/mutex: En...
613
614
615
616
617
   *
   * The waiter flag is set to true if the spinner is a waiter in the wait
   * queue. The waiter-spinner will spin on the lock directly and concurrently
   * with the spinner at the head of the OSQ, if present, until the owner is
   * changed to itself.
76916515d   Davidlohr Bueso   locking/mutexes: ...
618
   */
427b18207   Peter Zijlstra   locking/mutex: Im...
619
620
  static __always_inline bool
  mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
c516df978   Nicolai Hähnle   locking/ww_mutex:...
621
  		      const bool use_ww_ctx, struct mutex_waiter *waiter)
76916515d   Davidlohr Bueso   locking/mutexes: ...
622
  {
b341afb32   Waiman Long   locking/mutex: En...
623
624
625
626
627
628
629
630
631
632
  	if (!waiter) {
  		/*
  		 * The purpose of the mutex_can_spin_on_owner() function is
  		 * to eliminate the overhead of osq_lock() and osq_unlock()
  		 * in case spinning isn't possible. As a waiter-spinner
  		 * is not going to take OSQ lock anyway, there is no need
  		 * to call mutex_can_spin_on_owner().
  		 */
  		if (!mutex_can_spin_on_owner(lock))
  			goto fail;
76916515d   Davidlohr Bueso   locking/mutexes: ...
633

b341afb32   Waiman Long   locking/mutex: En...
634
635
636
637
638
639
640
641
  		/*
  		 * In order to avoid a stampede of mutex spinners trying to
  		 * acquire the mutex all at once, the spinners need to take a
  		 * MCS (queued) lock first before spinning on the owner field.
  		 */
  		if (!osq_lock(&lock->osq))
  			goto fail;
  	}
76916515d   Davidlohr Bueso   locking/mutexes: ...
642

b341afb32   Waiman Long   locking/mutex: En...
643
  	for (;;) {
76916515d   Davidlohr Bueso   locking/mutexes: ...
644
  		struct task_struct *owner;
e274795ea   Peter Zijlstra   locking/mutex: Fi...
645
646
647
648
  		/* Try to acquire the mutex... */
  		owner = __mutex_trylock_or_owner(lock);
  		if (!owner)
  			break;
76916515d   Davidlohr Bueso   locking/mutexes: ...
649
650
  
  		/*
e274795ea   Peter Zijlstra   locking/mutex: Fi...
651
  		 * There's an owner, wait for it to either
76916515d   Davidlohr Bueso   locking/mutexes: ...
652
653
  		 * release the lock or go to sleep.
  		 */
c516df978   Nicolai Hähnle   locking/ww_mutex:...
654
  		if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
e274795ea   Peter Zijlstra   locking/mutex: Fi...
655
  			goto fail_unlock;
b341afb32   Waiman Long   locking/mutex: En...
656

76916515d   Davidlohr Bueso   locking/mutexes: ...
657
  		/*
76916515d   Davidlohr Bueso   locking/mutexes: ...
658
659
660
661
662
  		 * The cpu_relax() call is a compiler barrier which forces
  		 * everything in this loop to be re-loaded. We don't need
  		 * memory barriers as we'll eventually observe the right
  		 * values at the cost of a few extra spins.
  		 */
f2f09a4ce   Christian Borntraeger   locking/core: Rem...
663
  		cpu_relax();
76916515d   Davidlohr Bueso   locking/mutexes: ...
664
  	}
b341afb32   Waiman Long   locking/mutex: En...
665
666
667
668
669
670
671
672
673
674
675
  	if (!waiter)
  		osq_unlock(&lock->osq);
  
  	return true;
  
  
  fail_unlock:
  	if (!waiter)
  		osq_unlock(&lock->osq);
  
  fail:
76916515d   Davidlohr Bueso   locking/mutexes: ...
676
677
678
679
680
  	/*
  	 * If we fell out of the spin path because of need_resched(),
  	 * reschedule now, before we try-lock the mutex. This avoids getting
  	 * scheduled out right after we obtained the mutex.
  	 */
6f942a1f2   Peter Zijlstra   locking/mutex: Do...
681
682
683
684
685
686
  	if (need_resched()) {
  		/*
  		 * We _should_ have TASK_RUNNING here, but just in case
  		 * we do not, make it so, otherwise we might get stuck.
  		 */
  		__set_current_state(TASK_RUNNING);
76916515d   Davidlohr Bueso   locking/mutexes: ...
687
  		schedule_preempt_disabled();
6f942a1f2   Peter Zijlstra   locking/mutex: Do...
688
  	}
76916515d   Davidlohr Bueso   locking/mutexes: ...
689
690
691
692
  
  	return false;
  }
  #else
427b18207   Peter Zijlstra   locking/mutex: Im...
693
694
  static __always_inline bool
  mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
c516df978   Nicolai Hähnle   locking/ww_mutex:...
695
  		      const bool use_ww_ctx, struct mutex_waiter *waiter)
76916515d   Davidlohr Bueso   locking/mutexes: ...
696
697
698
  {
  	return false;
  }
41fcb9f23   Waiman Long   mutex: Move mutex...
699
  #endif
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
700
  static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
701

ef5dc121d   Randy Dunlap   mutex: Fix annota...
702
  /**
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
703
704
705
706
707
708
709
710
711
712
   * mutex_unlock - release the mutex
   * @lock: the mutex to be released
   *
   * Unlock a mutex that has been locked by this task previously.
   *
   * This function must not be used in interrupt context. Unlocking
   * of a not locked mutex is not allowed.
   *
   * This function is similar to (but not equivalent to) up().
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
713
  void __sched mutex_unlock(struct mutex *lock)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
714
  {
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
715
716
717
  #ifndef CONFIG_DEBUG_LOCK_ALLOC
  	if (__mutex_unlock_fast(lock))
  		return;
0d66bf6d3   Peter Zijlstra   mutex: implement ...
718
  #endif
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
719
  	__mutex_unlock_slowpath(lock, _RET_IP_);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
720
  }
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
721
  EXPORT_SYMBOL(mutex_unlock);
040a0a371   Maarten Lankhorst   mutex: Add suppor...
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
  /**
   * ww_mutex_unlock - release the w/w mutex
   * @lock: the mutex to be released
   *
   * Unlock a mutex that has been locked by this task previously with any of the
   * ww_mutex_lock* functions (with or without an acquire context). It is
   * forbidden to release the locks after releasing the acquire context.
   *
   * This function must not be used in interrupt context. Unlocking
   * of a unlocked mutex is not allowed.
   */
  void __sched ww_mutex_unlock(struct ww_mutex *lock)
  {
  	/*
  	 * The unlocking fastpath is the 0->1 transition from 'locked'
  	 * into 'unlocked' state:
  	 */
  	if (lock->ctx) {
  #ifdef CONFIG_DEBUG_MUTEXES
  		DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
  #endif
  		if (lock->ctx->acquired > 0)
  			lock->ctx->acquired--;
  		lock->ctx = NULL;
  	}
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
747
  	mutex_unlock(&lock->base);
040a0a371   Maarten Lankhorst   mutex: Add suppor...
748
749
  }
  EXPORT_SYMBOL(ww_mutex_unlock);
55f036ca7   Peter Ziljstra   locking: WW mutex...
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
  
  static __always_inline int __sched
  __ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
  {
  	if (ww_ctx->acquired > 0) {
  #ifdef CONFIG_DEBUG_MUTEXES
  		struct ww_mutex *ww;
  
  		ww = container_of(lock, struct ww_mutex, base);
  		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
  		ww_ctx->contending_lock = ww;
  #endif
  		return -EDEADLK;
  	}
  
  	return 0;
  }
  
  
  /*
08295b3b5   Thomas Hellstrom   locking: Implemen...
770
771
772
   * Check the wound condition for the current lock acquire.
   *
   * Wound-Wait: If we're wounded, kill ourself.
55f036ca7   Peter Ziljstra   locking: WW mutex...
773
774
775
776
777
778
779
   *
   * Wait-Die: If we're trying to acquire a lock already held by an older
   *           context, kill ourselves.
   *
   * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
   * look at waiters before us in the wait-list.
   */
040a0a371   Maarten Lankhorst   mutex: Add suppor...
780
  static inline int __sched
55f036ca7   Peter Ziljstra   locking: WW mutex...
781
782
  __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
  		      struct ww_acquire_ctx *ctx)
040a0a371   Maarten Lankhorst   mutex: Add suppor...
783
784
  {
  	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
4d3199e4c   Davidlohr Bueso   locking: Remove A...
785
  	struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
200b18744   Nicolai Hähnle   locking/ww_mutex:...
786
  	struct mutex_waiter *cur;
040a0a371   Maarten Lankhorst   mutex: Add suppor...
787

55f036ca7   Peter Ziljstra   locking: WW mutex...
788
789
  	if (ctx->acquired == 0)
  		return 0;
08295b3b5   Thomas Hellstrom   locking: Implemen...
790
791
792
793
794
795
  	if (!ctx->is_wait_die) {
  		if (ctx->wounded)
  			return __ww_mutex_kill(lock, ctx);
  
  		return 0;
  	}
200b18744   Nicolai Hähnle   locking/ww_mutex:...
796
  	if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
55f036ca7   Peter Ziljstra   locking: WW mutex...
797
  		return __ww_mutex_kill(lock, ctx);
040a0a371   Maarten Lankhorst   mutex: Add suppor...
798

200b18744   Nicolai Hähnle   locking/ww_mutex:...
799
800
  	/*
  	 * If there is a waiter in front of us that has a context, then its
55f036ca7   Peter Ziljstra   locking: WW mutex...
801
  	 * stamp is earlier than ours and we must kill ourself.
200b18744   Nicolai Hähnle   locking/ww_mutex:...
802
803
804
  	 */
  	cur = waiter;
  	list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
55f036ca7   Peter Ziljstra   locking: WW mutex...
805
806
807
808
  		if (!cur->ww_ctx)
  			continue;
  
  		return __ww_mutex_kill(lock, ctx);
040a0a371   Maarten Lankhorst   mutex: Add suppor...
809
810
811
812
  	}
  
  	return 0;
  }
55f036ca7   Peter Ziljstra   locking: WW mutex...
813
814
815
816
817
818
819
820
  /*
   * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
   * first. Such that older contexts are preferred to acquire the lock over
   * younger contexts.
   *
   * Waiters without context are interspersed in FIFO order.
   *
   * Furthermore, for Wait-Die kill ourself immediately when possible (there are
08295b3b5   Thomas Hellstrom   locking: Implemen...
821
822
   * older contexts already waiting) to avoid unnecessary waiting and for
   * Wound-Wait ensure we wound the owning context when it is younger.
55f036ca7   Peter Ziljstra   locking: WW mutex...
823
   */
6baa5c60a   Nicolai Hähnle   locking/ww_mutex:...
824
825
826
827
828
829
830
  static inline int __sched
  __ww_mutex_add_waiter(struct mutex_waiter *waiter,
  		      struct mutex *lock,
  		      struct ww_acquire_ctx *ww_ctx)
  {
  	struct mutex_waiter *cur;
  	struct list_head *pos;
08295b3b5   Thomas Hellstrom   locking: Implemen...
831
  	bool is_wait_die;
6baa5c60a   Nicolai Hähnle   locking/ww_mutex:...
832
833
  
  	if (!ww_ctx) {
08295b3b5   Thomas Hellstrom   locking: Implemen...
834
  		__mutex_add_waiter(lock, waiter, &lock->wait_list);
040a0a371   Maarten Lankhorst   mutex: Add suppor...
835
  		return 0;
6baa5c60a   Nicolai Hähnle   locking/ww_mutex:...
836
  	}
040a0a371   Maarten Lankhorst   mutex: Add suppor...
837

08295b3b5   Thomas Hellstrom   locking: Implemen...
838
  	is_wait_die = ww_ctx->is_wait_die;
6baa5c60a   Nicolai Hähnle   locking/ww_mutex:...
839
840
841
  	/*
  	 * Add the waiter before the first waiter with a higher stamp.
  	 * Waiters without a context are skipped to avoid starving
08295b3b5   Thomas Hellstrom   locking: Implemen...
842
843
844
  	 * them. Wait-Die waiters may die here. Wound-Wait waiters
  	 * never die here, but they are sorted in stamp order and
  	 * may wound the lock holder.
6baa5c60a   Nicolai Hähnle   locking/ww_mutex:...
845
846
847
848
849
850
851
  	 */
  	pos = &lock->wait_list;
  	list_for_each_entry_reverse(cur, &lock->wait_list, list) {
  		if (!cur->ww_ctx)
  			continue;
  
  		if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
55f036ca7   Peter Ziljstra   locking: WW mutex...
852
853
854
855
856
  			/*
  			 * Wait-Die: if we find an older context waiting, there
  			 * is no point in queueing behind it, as we'd have to
  			 * die the moment it would acquire the lock.
  			 */
08295b3b5   Thomas Hellstrom   locking: Implemen...
857
858
  			if (is_wait_die) {
  				int ret = __ww_mutex_kill(lock, ww_ctx);
6baa5c60a   Nicolai Hähnle   locking/ww_mutex:...
859

08295b3b5   Thomas Hellstrom   locking: Implemen...
860
861
862
  				if (ret)
  					return ret;
  			}
6baa5c60a   Nicolai Hähnle   locking/ww_mutex:...
863
864
865
866
867
  
  			break;
  		}
  
  		pos = &cur->list;
200b18744   Nicolai Hähnle   locking/ww_mutex:...
868

55f036ca7   Peter Ziljstra   locking: WW mutex...
869
870
  		/* Wait-Die: ensure younger waiters die. */
  		__ww_mutex_die(lock, cur, ww_ctx);
040a0a371   Maarten Lankhorst   mutex: Add suppor...
871
  	}
08295b3b5   Thomas Hellstrom   locking: Implemen...
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
  	__mutex_add_waiter(lock, waiter, pos);
  
  	/*
  	 * Wound-Wait: if we're blocking on a mutex owned by a younger context,
  	 * wound that such that we might proceed.
  	 */
  	if (!is_wait_die) {
  		struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
  
  		/*
  		 * See ww_mutex_set_context_fastpath(). Orders setting
  		 * MUTEX_FLAG_WAITERS vs the ww->ctx load,
  		 * such that either we or the fastpath will wound @ww->ctx.
  		 */
  		smp_mb();
  		__ww_mutex_wound(lock, ww_ctx, ww->ctx);
  	}
55f036ca7   Peter Ziljstra   locking: WW mutex...
889

040a0a371   Maarten Lankhorst   mutex: Add suppor...
890
891
  	return 0;
  }
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
892
893
894
  /*
   * Lock a mutex (possibly interruptible), slowpath:
   */
040a0a371   Maarten Lankhorst   mutex: Add suppor...
895
  static __always_inline int __sched
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
896
  __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
040a0a371   Maarten Lankhorst   mutex: Add suppor...
897
  		    struct lockdep_map *nest_lock, unsigned long ip,
b0267507d   Tetsuo Handa   mutex: Avoid gcc ...
898
  		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
899
  {
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
900
  	struct mutex_waiter waiter;
9d659ae14   Peter Zijlstra   locking/mutex: Ad...
901
  	bool first = false;
a40ca5657   Waiman Long   locking/mutex: Si...
902
  	struct ww_mutex *ww;
040a0a371   Maarten Lankhorst   mutex: Add suppor...
903
  	int ret;
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
904

427b18207   Peter Zijlstra   locking/mutex: Im...
905
  	might_sleep();
ea9e0fb8f   Nicolai Hähnle   locking/ww_mutex:...
906

6c11c6e3d   Sebastian Andrzej Siewior   locking/mutex: Te...
907
908
909
  #ifdef CONFIG_DEBUG_MUTEXES
  	DEBUG_LOCKS_WARN_ON(lock->magic != lock);
  #endif
427b18207   Peter Zijlstra   locking/mutex: Im...
910
  	ww = container_of(lock, struct ww_mutex, base);
ea9e0fb8f   Nicolai Hähnle   locking/ww_mutex:...
911
  	if (use_ww_ctx && ww_ctx) {
0422e83d8   Chris Wilson   locking/ww_mutex:...
912
913
  		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
  			return -EALREADY;
08295b3b5   Thomas Hellstrom   locking: Implemen...
914
915
916
917
918
919
920
921
  
  		/*
  		 * Reset the wounded flag after a kill. No other process can
  		 * race and wound us here since they can't have a valid owner
  		 * pointer if we don't have any locks held.
  		 */
  		if (ww_ctx->acquired == 0)
  			ww_ctx->wounded = 0;
0422e83d8   Chris Wilson   locking/ww_mutex:...
922
  	}
41719b030   Peter Zijlstra   mutex: preemption...
923
  	preempt_disable();
e4c70a662   Peter Zijlstra   lockdep, mutex: p...
924
  	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
c02260277   Frederic Weisbecker   mutex: Better con...
925

e274795ea   Peter Zijlstra   locking/mutex: Fi...
926
  	if (__mutex_trylock(lock) ||
c516df978   Nicolai Hähnle   locking/ww_mutex:...
927
  	    mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
76916515d   Davidlohr Bueso   locking/mutexes: ...
928
  		/* got the lock, yay! */
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
929
  		lock_acquired(&lock->dep_map, ip);
ea9e0fb8f   Nicolai Hähnle   locking/ww_mutex:...
930
  		if (use_ww_ctx && ww_ctx)
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
931
  			ww_mutex_set_context_fastpath(ww, ww_ctx);
76916515d   Davidlohr Bueso   locking/mutexes: ...
932
933
  		preempt_enable();
  		return 0;
0d66bf6d3   Peter Zijlstra   mutex: implement ...
934
  	}
76916515d   Davidlohr Bueso   locking/mutexes: ...
935

b9c16a0e1   Peter Zijlstra   locking/mutex: Fi...
936
  	spin_lock(&lock->wait_lock);
1e820c960   Jason Low   locking/mutexes: ...
937
  	/*
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
938
  	 * After waiting to acquire the wait_lock, try again.
1e820c960   Jason Low   locking/mutexes: ...
939
  	 */
659cf9f58   Nicolai Hähnle   locking/ww_mutex:...
940
941
  	if (__mutex_trylock(lock)) {
  		if (use_ww_ctx && ww_ctx)
55f036ca7   Peter Ziljstra   locking: WW mutex...
942
  			__ww_mutex_check_waiters(lock, ww_ctx);
659cf9f58   Nicolai Hähnle   locking/ww_mutex:...
943

ec83f425d   Davidlohr Bueso   mutex: Do not unn...
944
  		goto skip_wait;
659cf9f58   Nicolai Hähnle   locking/ww_mutex:...
945
  	}
ec83f425d   Davidlohr Bueso   mutex: Do not unn...
946

9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
947
  	debug_mutex_lock_common(lock, &waiter);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
948

6baa5c60a   Nicolai Hähnle   locking/ww_mutex:...
949
950
951
952
  	lock_contended(&lock->dep_map, ip);
  
  	if (!use_ww_ctx) {
  		/* add waiting tasks to the end of the waitqueue (FIFO): */
08295b3b5   Thomas Hellstrom   locking: Implemen...
953
  		__mutex_add_waiter(lock, &waiter, &lock->wait_list);
977625a69   Nicolai Hähnle   locking/mutex: In...
954
955
956
957
  
  #ifdef CONFIG_DEBUG_MUTEXES
  		waiter.ww_ctx = MUTEX_POISON_WW_CTX;
  #endif
6baa5c60a   Nicolai Hähnle   locking/ww_mutex:...
958
  	} else {
55f036ca7   Peter Ziljstra   locking: WW mutex...
959
960
961
962
  		/*
  		 * Add in stamp order, waking up waiters that must kill
  		 * themselves.
  		 */
6baa5c60a   Nicolai Hähnle   locking/ww_mutex:...
963
964
  		ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
  		if (ret)
55f036ca7   Peter Ziljstra   locking: WW mutex...
965
  			goto err_early_kill;
6baa5c60a   Nicolai Hähnle   locking/ww_mutex:...
966
967
968
  
  		waiter.ww_ctx = ww_ctx;
  	}
d269a8b8c   Davidlohr Bueso   kernel/locking: C...
969
  	waiter.task = current;
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
970

9ad8ff902   Sangmoon Kim   ANDROID: vendor_h...
971
  	trace_android_vh_mutex_wait_start(lock);
642fa448a   Davidlohr Bueso   sched/core: Remov...
972
  	set_current_state(state);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
973
  	for (;;) {
5bbd7e644   Peter Zijlstra   locking/mutex: Re...
974
975
976
977
978
979
  		/*
  		 * Once we hold wait_lock, we're serialized against
  		 * mutex_unlock() handing the lock off to us, do a trylock
  		 * before testing the error conditions to make sure we pick up
  		 * the handoff.
  		 */
e274795ea   Peter Zijlstra   locking/mutex: Fi...
980
  		if (__mutex_trylock(lock))
5bbd7e644   Peter Zijlstra   locking/mutex: Re...
981
  			goto acquired;
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
982
983
  
  		/*
55f036ca7   Peter Ziljstra   locking: WW mutex...
984
  		 * Check for signals and kill conditions while holding
5bbd7e644   Peter Zijlstra   locking/mutex: Re...
985
986
  		 * wait_lock. This ensures the lock cancellation is ordered
  		 * against mutex_unlock() and wake-ups do not go missing.
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
987
  		 */
3bb5f4ac5   Davidlohr Bueso   kernel/locking/mu...
988
  		if (signal_pending_state(state, current)) {
040a0a371   Maarten Lankhorst   mutex: Add suppor...
989
990
991
  			ret = -EINTR;
  			goto err;
  		}
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
992

55f036ca7   Peter Ziljstra   locking: WW mutex...
993
994
  		if (use_ww_ctx && ww_ctx) {
  			ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
040a0a371   Maarten Lankhorst   mutex: Add suppor...
995
996
  			if (ret)
  				goto err;
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
997
  		}
040a0a371   Maarten Lankhorst   mutex: Add suppor...
998

b9c16a0e1   Peter Zijlstra   locking/mutex: Fi...
999
  		spin_unlock(&lock->wait_lock);
bd2f55361   Thomas Gleixner   sched/rt: Use sch...
1000
  		schedule_preempt_disabled();
9d659ae14   Peter Zijlstra   locking/mutex: Ad...
1001

6baa5c60a   Nicolai Hähnle   locking/ww_mutex:...
1002
1003
1004
1005
1006
1007
1008
1009
  		/*
  		 * ww_mutex needs to always recheck its position since its waiter
  		 * list is not FIFO ordered.
  		 */
  		if ((use_ww_ctx && ww_ctx) || !first) {
  			first = __mutex_waiter_is_first(lock, &waiter);
  			if (first)
  				__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
9d659ae14   Peter Zijlstra   locking/mutex: Ad...
1010
  		}
5bbd7e644   Peter Zijlstra   locking/mutex: Re...
1011

642fa448a   Davidlohr Bueso   sched/core: Remov...
1012
  		set_current_state(state);
5bbd7e644   Peter Zijlstra   locking/mutex: Re...
1013
1014
1015
1016
1017
  		/*
  		 * Here we order against unlock; we must either see it change
  		 * state back to RUNNING and fall through the next schedule(),
  		 * or we must see its unlock and acquire.
  		 */
e274795ea   Peter Zijlstra   locking/mutex: Fi...
1018
  		if (__mutex_trylock(lock) ||
c516df978   Nicolai Hähnle   locking/ww_mutex:...
1019
  		    (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
5bbd7e644   Peter Zijlstra   locking/mutex: Re...
1020
  			break;
b9c16a0e1   Peter Zijlstra   locking/mutex: Fi...
1021
  		spin_lock(&lock->wait_lock);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1022
  	}
b9c16a0e1   Peter Zijlstra   locking/mutex: Fi...
1023
  	spin_lock(&lock->wait_lock);
5bbd7e644   Peter Zijlstra   locking/mutex: Re...
1024
  acquired:
642fa448a   Davidlohr Bueso   sched/core: Remov...
1025
  	__set_current_state(TASK_RUNNING);
9ad8ff902   Sangmoon Kim   ANDROID: vendor_h...
1026
  	trace_android_vh_mutex_wait_finish(lock);
51587bcf3   Davidlohr Bueso   locking/mutex: Ex...
1027

08295b3b5   Thomas Hellstrom   locking: Implemen...
1028
1029
1030
1031
1032
1033
1034
1035
1036
  	if (use_ww_ctx && ww_ctx) {
  		/*
  		 * Wound-Wait; we stole the lock (!first_waiter), check the
  		 * waiters as anyone might want to wound us.
  		 */
  		if (!ww_ctx->is_wait_die &&
  		    !__mutex_waiter_is_first(lock, &waiter))
  			__ww_mutex_check_waiters(lock, ww_ctx);
  	}
d269a8b8c   Davidlohr Bueso   kernel/locking: C...
1037
  	mutex_remove_waiter(lock, &waiter, current);
ec83f425d   Davidlohr Bueso   mutex: Do not unn...
1038
  	if (likely(list_empty(&lock->wait_list)))
9d659ae14   Peter Zijlstra   locking/mutex: Ad...
1039
  		__mutex_clear_flag(lock, MUTEX_FLAGS);
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
1040

ec83f425d   Davidlohr Bueso   mutex: Do not unn...
1041
  	debug_mutex_free_waiter(&waiter);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1042

ec83f425d   Davidlohr Bueso   mutex: Do not unn...
1043
1044
  skip_wait:
  	/* got the lock - cleanup and rejoice! */
c7e78cff6   Peter Zijlstra   lockstat: contend...
1045
  	lock_acquired(&lock->dep_map, ip);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1046

ea9e0fb8f   Nicolai Hähnle   locking/ww_mutex:...
1047
  	if (use_ww_ctx && ww_ctx)
55f036ca7   Peter Ziljstra   locking: WW mutex...
1048
  		ww_mutex_lock_acquired(ww, ww_ctx);
040a0a371   Maarten Lankhorst   mutex: Add suppor...
1049

b9c16a0e1   Peter Zijlstra   locking/mutex: Fi...
1050
  	spin_unlock(&lock->wait_lock);
41719b030   Peter Zijlstra   mutex: preemption...
1051
  	preempt_enable();
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1052
  	return 0;
040a0a371   Maarten Lankhorst   mutex: Add suppor...
1053
1054
  
  err:
642fa448a   Davidlohr Bueso   sched/core: Remov...
1055
  	__set_current_state(TASK_RUNNING);
9ad8ff902   Sangmoon Kim   ANDROID: vendor_h...
1056
  	trace_android_vh_mutex_wait_finish(lock);
d269a8b8c   Davidlohr Bueso   kernel/locking: C...
1057
  	mutex_remove_waiter(lock, &waiter, current);
55f036ca7   Peter Ziljstra   locking: WW mutex...
1058
  err_early_kill:
b9c16a0e1   Peter Zijlstra   locking/mutex: Fi...
1059
  	spin_unlock(&lock->wait_lock);
040a0a371   Maarten Lankhorst   mutex: Add suppor...
1060
  	debug_mutex_free_waiter(&waiter);
5facae4f3   Qian Cai   locking/lockdep: ...
1061
  	mutex_release(&lock->dep_map, ip);
040a0a371   Maarten Lankhorst   mutex: Add suppor...
1062
1063
  	preempt_enable();
  	return ret;
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1064
  }
427b18207   Peter Zijlstra   locking/mutex: Im...
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
  static int __sched
  __mutex_lock(struct mutex *lock, long state, unsigned int subclass,
  	     struct lockdep_map *nest_lock, unsigned long ip)
  {
  	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
  }
  
  static int __sched
  __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
  		struct lockdep_map *nest_lock, unsigned long ip,
  		struct ww_acquire_ctx *ww_ctx)
  {
  	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
  }
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
1079
1080
1081
1082
  #ifdef CONFIG_DEBUG_LOCK_ALLOC
  void __sched
  mutex_lock_nested(struct mutex *lock, unsigned int subclass)
  {
427b18207   Peter Zijlstra   locking/mutex: Im...
1083
  	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
1084
1085
1086
  }
  
  EXPORT_SYMBOL_GPL(mutex_lock_nested);
d63a5a74d   NeilBrown   [PATCH] lockdep: ...
1087

e4c70a662   Peter Zijlstra   lockdep, mutex: p...
1088
1089
1090
  void __sched
  _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
  {
427b18207   Peter Zijlstra   locking/mutex: Im...
1091
  	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
e4c70a662   Peter Zijlstra   lockdep, mutex: p...
1092
  }
e4c70a662   Peter Zijlstra   lockdep, mutex: p...
1093
  EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
d63a5a74d   NeilBrown   [PATCH] lockdep: ...
1094
  int __sched
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
1095
1096
  mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
  {
427b18207   Peter Zijlstra   locking/mutex: Im...
1097
  	return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
1098
1099
1100
1101
  }
  EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
  
  int __sched
d63a5a74d   NeilBrown   [PATCH] lockdep: ...
1102
1103
  mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
  {
427b18207   Peter Zijlstra   locking/mutex: Im...
1104
  	return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
d63a5a74d   NeilBrown   [PATCH] lockdep: ...
1105
  }
d63a5a74d   NeilBrown   [PATCH] lockdep: ...
1106
  EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
040a0a371   Maarten Lankhorst   mutex: Add suppor...
1107

1460cb65a   Tejun Heo   locking/mutex, sc...
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
  void __sched
  mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
  {
  	int token;
  
  	might_sleep();
  
  	token = io_schedule_prepare();
  	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
  			    subclass, NULL, _RET_IP_, NULL, 0);
  	io_schedule_finish(token);
  }
  EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
230100276   Daniel Vetter   mutex: Add w/w mu...
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
  static inline int
  ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
  {
  #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
  	unsigned tmp;
  
  	if (ctx->deadlock_inject_countdown-- == 0) {
  		tmp = ctx->deadlock_inject_interval;
  		if (tmp > UINT_MAX/4)
  			tmp = UINT_MAX;
  		else
  			tmp = tmp*2 + tmp + tmp/2;
  
  		ctx->deadlock_inject_interval = tmp;
  		ctx->deadlock_inject_countdown = tmp;
  		ctx->contending_lock = lock;
  
  		ww_mutex_unlock(lock);
  
  		return -EDEADLK;
  	}
  #endif
  
  	return 0;
  }
040a0a371   Maarten Lankhorst   mutex: Add suppor...
1146
1147
  
  int __sched
c5470b22d   Nicolai Hähnle   locking/ww_mutex:...
1148
  ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
040a0a371   Maarten Lankhorst   mutex: Add suppor...
1149
  {
230100276   Daniel Vetter   mutex: Add w/w mu...
1150
  	int ret;
040a0a371   Maarten Lankhorst   mutex: Add suppor...
1151
  	might_sleep();
427b18207   Peter Zijlstra   locking/mutex: Im...
1152
1153
1154
  	ret =  __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
  			       0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
  			       ctx);
ea9e0fb8f   Nicolai Hähnle   locking/ww_mutex:...
1155
  	if (!ret && ctx && ctx->acquired > 1)
230100276   Daniel Vetter   mutex: Add w/w mu...
1156
1157
1158
  		return ww_mutex_deadlock_injection(lock, ctx);
  
  	return ret;
040a0a371   Maarten Lankhorst   mutex: Add suppor...
1159
  }
c5470b22d   Nicolai Hähnle   locking/ww_mutex:...
1160
  EXPORT_SYMBOL_GPL(ww_mutex_lock);
040a0a371   Maarten Lankhorst   mutex: Add suppor...
1161
1162
  
  int __sched
c5470b22d   Nicolai Hähnle   locking/ww_mutex:...
1163
  ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
040a0a371   Maarten Lankhorst   mutex: Add suppor...
1164
  {
230100276   Daniel Vetter   mutex: Add w/w mu...
1165
  	int ret;
040a0a371   Maarten Lankhorst   mutex: Add suppor...
1166
  	might_sleep();
427b18207   Peter Zijlstra   locking/mutex: Im...
1167
1168
1169
  	ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
  			      0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
  			      ctx);
230100276   Daniel Vetter   mutex: Add w/w mu...
1170

ea9e0fb8f   Nicolai Hähnle   locking/ww_mutex:...
1171
  	if (!ret && ctx && ctx->acquired > 1)
230100276   Daniel Vetter   mutex: Add w/w mu...
1172
1173
1174
  		return ww_mutex_deadlock_injection(lock, ctx);
  
  	return ret;
040a0a371   Maarten Lankhorst   mutex: Add suppor...
1175
  }
c5470b22d   Nicolai Hähnle   locking/ww_mutex:...
1176
  EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
040a0a371   Maarten Lankhorst   mutex: Add suppor...
1177

ef5d4707b   Ingo Molnar   [PATCH] lockdep: ...
1178
  #endif
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1179
1180
1181
  /*
   * Release the lock, slowpath:
   */
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
1182
  static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1183
  {
9d659ae14   Peter Zijlstra   locking/mutex: Ad...
1184
  	struct task_struct *next = NULL;
194a6b5b9   Waiman Long   sched/wake_q: Ren...
1185
  	DEFINE_WAKE_Q(wake_q);
b9c16a0e1   Peter Zijlstra   locking/mutex: Fi...
1186
  	unsigned long owner;
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1187

5facae4f3   Qian Cai   locking/lockdep: ...
1188
  	mutex_release(&lock->dep_map, ip);
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
1189

6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1190
  	/*
9d659ae14   Peter Zijlstra   locking/mutex: Ad...
1191
1192
1193
1194
1195
  	 * Release the lock before (potentially) taking the spinlock such that
  	 * other contenders can get on with things ASAP.
  	 *
  	 * Except when HANDOFF, in that case we must not clear the owner field,
  	 * but instead set it to the top waiter.
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1196
  	 */
9d659ae14   Peter Zijlstra   locking/mutex: Ad...
1197
1198
1199
1200
1201
1202
  	owner = atomic_long_read(&lock->owner);
  	for (;;) {
  		unsigned long old;
  
  #ifdef CONFIG_DEBUG_MUTEXES
  		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
e274795ea   Peter Zijlstra   locking/mutex: Fi...
1203
  		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
9d659ae14   Peter Zijlstra   locking/mutex: Ad...
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
  #endif
  
  		if (owner & MUTEX_FLAG_HANDOFF)
  			break;
  
  		old = atomic_long_cmpxchg_release(&lock->owner, owner,
  						  __owner_flags(owner));
  		if (old == owner) {
  			if (owner & MUTEX_FLAG_WAITERS)
  				break;
  
  			return;
  		}
  
  		owner = old;
  	}
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1220

b9c16a0e1   Peter Zijlstra   locking/mutex: Fi...
1221
  	spin_lock(&lock->wait_lock);
1d8fe7dc8   Jason Low   locking/mutexes: ...
1222
  	debug_mutex_unlock(lock);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1223
1224
1225
  	if (!list_empty(&lock->wait_list)) {
  		/* get the first entry from the wait-list: */
  		struct mutex_waiter *waiter =
9d659ae14   Peter Zijlstra   locking/mutex: Ad...
1226
1227
1228
1229
  			list_first_entry(&lock->wait_list,
  					 struct mutex_waiter, list);
  
  		next = waiter->task;
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1230
1231
  
  		debug_mutex_wake_waiter(lock, waiter);
9d659ae14   Peter Zijlstra   locking/mutex: Ad...
1232
  		wake_q_add(&wake_q, next);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1233
  	}
9d659ae14   Peter Zijlstra   locking/mutex: Ad...
1234
1235
  	if (owner & MUTEX_FLAG_HANDOFF)
  		__mutex_handoff(lock, next);
b9c16a0e1   Peter Zijlstra   locking/mutex: Fi...
1236
  	spin_unlock(&lock->wait_lock);
9d659ae14   Peter Zijlstra   locking/mutex: Ad...
1237

1329ce6fb   Davidlohr Bueso   locking/mutex: Al...
1238
  	wake_up_q(&wake_q);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1239
  }
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
1240
  #ifndef CONFIG_DEBUG_LOCK_ALLOC
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
1241
  /*
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1242
1243
1244
   * Here come the less common (and hence less performance-critical) APIs:
   * mutex_lock_interruptible() and mutex_trylock().
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
1245
  static noinline int __sched
a41b56efa   Maarten Lankhorst   arch: Make __mute...
1246
  __mutex_lock_killable_slowpath(struct mutex *lock);
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
1247

7ad5b3a50   Harvey Harrison   kernel: remove fa...
1248
  static noinline int __sched
a41b56efa   Maarten Lankhorst   arch: Make __mute...
1249
  __mutex_lock_interruptible_slowpath(struct mutex *lock);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1250

ef5dc121d   Randy Dunlap   mutex: Fix annota...
1251
  /**
45dbac0e2   Matthew Wilcox   locking/mutex: Im...
1252
1253
   * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
   * @lock: The mutex to be acquired.
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1254
   *
45dbac0e2   Matthew Wilcox   locking/mutex: Im...
1255
1256
1257
   * Lock the mutex like mutex_lock().  If a signal is delivered while the
   * process is sleeping, this function will return without acquiring the
   * mutex.
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1258
   *
45dbac0e2   Matthew Wilcox   locking/mutex: Im...
1259
1260
1261
   * Context: Process context.
   * Return: 0 if the lock was successfully acquired or %-EINTR if a
   * signal arrived.
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1262
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
1263
  int __sched mutex_lock_interruptible(struct mutex *lock)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1264
  {
c544bdb19   Ingo Molnar   [PATCH] mark mute...
1265
  	might_sleep();
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
1266
1267
  
  	if (__mutex_trylock_fast(lock))
a41b56efa   Maarten Lankhorst   arch: Make __mute...
1268
  		return 0;
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
1269
1270
  
  	return __mutex_lock_interruptible_slowpath(lock);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1271
1272
1273
  }
  
  EXPORT_SYMBOL(mutex_lock_interruptible);
45dbac0e2   Matthew Wilcox   locking/mutex: Im...
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
  /**
   * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
   * @lock: The mutex to be acquired.
   *
   * Lock the mutex like mutex_lock().  If a signal which will be fatal to
   * the current process is delivered while the process is sleeping, this
   * function will return without acquiring the mutex.
   *
   * Context: Process context.
   * Return: 0 if the lock was successfully acquired or %-EINTR if a
   * fatal signal arrived.
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
1286
  int __sched mutex_lock_killable(struct mutex *lock)
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
1287
1288
  {
  	might_sleep();
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
1289
1290
  
  	if (__mutex_trylock_fast(lock))
a41b56efa   Maarten Lankhorst   arch: Make __mute...
1291
  		return 0;
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
1292
1293
  
  	return __mutex_lock_killable_slowpath(lock);
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
1294
1295
  }
  EXPORT_SYMBOL(mutex_lock_killable);
45dbac0e2   Matthew Wilcox   locking/mutex: Im...
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
  /**
   * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
   * @lock: The mutex to be acquired.
   *
   * Lock the mutex like mutex_lock().  While the task is waiting for this
   * mutex, it will be accounted as being in the IO wait state by the
   * scheduler.
   *
   * Context: Process context.
   */
1460cb65a   Tejun Heo   locking/mutex, sc...
1306
1307
1308
1309
1310
1311
1312
1313
1314
  void __sched mutex_lock_io(struct mutex *lock)
  {
  	int token;
  
  	token = io_schedule_prepare();
  	mutex_lock(lock);
  	io_schedule_finish(token);
  }
  EXPORT_SYMBOL_GPL(mutex_lock_io);
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
1315
1316
  static noinline void __sched
  __mutex_lock_slowpath(struct mutex *lock)
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
1317
  {
427b18207   Peter Zijlstra   locking/mutex: Im...
1318
  	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
1319
  }
7ad5b3a50   Harvey Harrison   kernel: remove fa...
1320
  static noinline int __sched
a41b56efa   Maarten Lankhorst   arch: Make __mute...
1321
  __mutex_lock_killable_slowpath(struct mutex *lock)
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
1322
  {
427b18207   Peter Zijlstra   locking/mutex: Im...
1323
  	return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
ad776537c   Liam R. Howlett   Add mutex_lock_ki...
1324
  }
7ad5b3a50   Harvey Harrison   kernel: remove fa...
1325
  static noinline int __sched
a41b56efa   Maarten Lankhorst   arch: Make __mute...
1326
  __mutex_lock_interruptible_slowpath(struct mutex *lock)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1327
  {
427b18207   Peter Zijlstra   locking/mutex: Im...
1328
  	return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
040a0a371   Maarten Lankhorst   mutex: Add suppor...
1329
1330
1331
1332
1333
  }
  
  static noinline int __sched
  __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
  {
427b18207   Peter Zijlstra   locking/mutex: Im...
1334
1335
  	return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
  			       _RET_IP_, ctx);
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1336
  }
040a0a371   Maarten Lankhorst   mutex: Add suppor...
1337
1338
1339
1340
1341
  
  static noinline int __sched
  __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
  					    struct ww_acquire_ctx *ctx)
  {
427b18207   Peter Zijlstra   locking/mutex: Im...
1342
1343
  	return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
  			       _RET_IP_, ctx);
040a0a371   Maarten Lankhorst   mutex: Add suppor...
1344
  }
e4564f79d   Peter Zijlstra   lockdep: fixup mu...
1345
  #endif
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1346

ef5dc121d   Randy Dunlap   mutex: Fix annota...
1347
1348
  /**
   * mutex_trylock - try to acquire the mutex, without waiting
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1349
1350
1351
1352
1353
1354
   * @lock: the mutex to be acquired
   *
   * Try to acquire the mutex atomically. Returns 1 if the mutex
   * has been acquired successfully, and 0 on contention.
   *
   * NOTE: this function follows the spin_trylock() convention, so
ef5dc121d   Randy Dunlap   mutex: Fix annota...
1355
   * it is negated from the down_trylock() return values! Be careful
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1356
1357
1358
1359
1360
   * about this when converting semaphore users to mutexes.
   *
   * This function must not be used in interrupt context. The
   * mutex must be released by the same task that acquired it.
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
1361
  int __sched mutex_trylock(struct mutex *lock)
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1362
  {
6c11c6e3d   Sebastian Andrzej Siewior   locking/mutex: Te...
1363
1364
1365
1366
1367
  	bool locked;
  
  #ifdef CONFIG_DEBUG_MUTEXES
  	DEBUG_LOCKS_WARN_ON(lock->magic != lock);
  #endif
0d66bf6d3   Peter Zijlstra   mutex: implement ...
1368

6c11c6e3d   Sebastian Andrzej Siewior   locking/mutex: Te...
1369
  	locked = __mutex_trylock(lock);
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
1370
1371
  	if (locked)
  		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
0d66bf6d3   Peter Zijlstra   mutex: implement ...
1372

3ca0ff571   Peter Zijlstra   locking/mutex: Re...
1373
  	return locked;
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1374
  }
6053ee3b3   Ingo Molnar   [PATCH] mutex sub...
1375
  EXPORT_SYMBOL(mutex_trylock);
a511e3f96   Andrew Morton   mutex: add atomic...
1376

040a0a371   Maarten Lankhorst   mutex: Add suppor...
1377
1378
  #ifndef CONFIG_DEBUG_LOCK_ALLOC
  int __sched
c5470b22d   Nicolai Hähnle   locking/ww_mutex:...
1379
  ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
040a0a371   Maarten Lankhorst   mutex: Add suppor...
1380
  {
040a0a371   Maarten Lankhorst   mutex: Add suppor...
1381
  	might_sleep();
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
1382
  	if (__mutex_trylock_fast(&lock->base)) {
ea9e0fb8f   Nicolai Hähnle   locking/ww_mutex:...
1383
1384
  		if (ctx)
  			ww_mutex_set_context_fastpath(lock, ctx);
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
1385
1386
1387
1388
  		return 0;
  	}
  
  	return __ww_mutex_lock_slowpath(lock, ctx);
040a0a371   Maarten Lankhorst   mutex: Add suppor...
1389
  }
c5470b22d   Nicolai Hähnle   locking/ww_mutex:...
1390
  EXPORT_SYMBOL(ww_mutex_lock);
040a0a371   Maarten Lankhorst   mutex: Add suppor...
1391
1392
  
  int __sched
c5470b22d   Nicolai Hähnle   locking/ww_mutex:...
1393
  ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
040a0a371   Maarten Lankhorst   mutex: Add suppor...
1394
  {
040a0a371   Maarten Lankhorst   mutex: Add suppor...
1395
  	might_sleep();
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
1396
  	if (__mutex_trylock_fast(&lock->base)) {
ea9e0fb8f   Nicolai Hähnle   locking/ww_mutex:...
1397
1398
  		if (ctx)
  			ww_mutex_set_context_fastpath(lock, ctx);
3ca0ff571   Peter Zijlstra   locking/mutex: Re...
1399
1400
1401
1402
  		return 0;
  	}
  
  	return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
040a0a371   Maarten Lankhorst   mutex: Add suppor...
1403
  }
c5470b22d   Nicolai Hähnle   locking/ww_mutex:...
1404
  EXPORT_SYMBOL(ww_mutex_lock_interruptible);
040a0a371   Maarten Lankhorst   mutex: Add suppor...
1405
1406
  
  #endif
a511e3f96   Andrew Morton   mutex: add atomic...
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
  /**
   * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
   * @cnt: the atomic which we are to dec
   * @lock: the mutex to return holding if we dec to 0
   *
   * return true and hold lock if we dec to 0, return false otherwise
   */
  int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
  {
  	/* dec if we can't possibly hit 0 */
  	if (atomic_add_unless(cnt, -1, 1))
  		return 0;
  	/* we might hit 0, so take the lock */
  	mutex_lock(lock);
  	if (!atomic_dec_and_test(cnt)) {
  		/* when we actually did the dec, we didn't hit 0 */
  		mutex_unlock(lock);
  		return 0;
  	}
  	/* we hit 0, and we hold the lock */
  	return 1;
  }
  EXPORT_SYMBOL(atomic_dec_and_mutex_lock);