Blame view

kernel/locking/rtmutex.c 50 KB
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1
2
3
4
5
6
7
8
9
  /*
   * RT-Mutexes: simple blocking mutual exclusion locks with PI support
   *
   * started by Ingo Molnar and Thomas Gleixner.
   *
   *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
   *  Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
   *  Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
   *  Copyright (C) 2006 Esben Nielsen
d07fe82c2   Steven Rostedt   [PATCH] reference...
10
   *
214e0aed6   Davidlohr Bueso   locking/Documenta...
11
   *  See Documentation/locking/rt-mutex-design.txt for details.
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
12
13
   */
  #include <linux/spinlock.h>
9984de1a5   Paul Gortmaker   kernel: Map most ...
14
  #include <linux/export.h>
174cd4b1e   Ingo Molnar   sched/headers: Pr...
15
  #include <linux/sched/signal.h>
8bd75c77b   Clark Williams   sched/rt: Move rt...
16
  #include <linux/sched/rt.h>
fb00aca47   Peter Zijlstra   rtmutex: Turn the...
17
  #include <linux/sched/deadline.h>
84f001e15   Ingo Molnar   sched/headers: Pr...
18
  #include <linux/sched/wake_q.h>
b17b01533   Ingo Molnar   sched/headers: Pr...
19
  #include <linux/sched/debug.h>
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
20
21
22
  #include <linux/timer.h>
  
  #include "rtmutex_common.h"
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
23
24
25
  /*
   * lock->owner state tracking:
   *
8161239a8   Lai Jiangshan   rtmutex: Simplify...
26
27
   * lock->owner holds the task_struct pointer of the owner. Bit 0
   * is used to keep track of the "lock has waiters" state.
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
28
   *
8161239a8   Lai Jiangshan   rtmutex: Simplify...
29
30
31
32
33
34
   * owner	bit0
   * NULL		0	lock is free (fast acquire possible)
   * NULL		1	lock is free and has waiters and the top waiter
   *				is going to take the lock*
   * taskpointer	0	lock is held (fast release possible)
   * taskpointer	1	lock is held and has waiters**
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
35
36
   *
   * The fast atomic compare exchange based acquire and release is only
8161239a8   Lai Jiangshan   rtmutex: Simplify...
37
38
39
40
41
42
   * possible when bit 0 of lock->owner is 0.
   *
   * (*) It also can be a transitional state when grabbing the lock
   * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
   * we need to set the bit0 before looking at the lock, and the owner may be
   * NULL in this small time, hence this can be a transitional state.
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
43
   *
8161239a8   Lai Jiangshan   rtmutex: Simplify...
44
45
46
47
   * (**) There is a small time when bit 0 is set but there are no
   * waiters. This can happen when grabbing the lock in the slow path.
   * To prevent a cmpxchg of the owner releasing the lock, we need to
   * set this bit before looking at the lock.
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
48
   */
bd197234b   Thomas Gleixner   Revert "futex_req...
49
  static void
8161239a8   Lai Jiangshan   rtmutex: Simplify...
50
  rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
51
  {
8161239a8   Lai Jiangshan   rtmutex: Simplify...
52
  	unsigned long val = (unsigned long)owner;
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
  
  	if (rt_mutex_has_waiters(lock))
  		val |= RT_MUTEX_HAS_WAITERS;
  
  	lock->owner = (struct task_struct *)val;
  }
  
  static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
  {
  	lock->owner = (struct task_struct *)
  			((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
  }
  
  static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
  {
dbb26055d   Thomas Gleixner   locking/rtmutex: ...
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
  	unsigned long owner, *p = (unsigned long *) &lock->owner;
  
  	if (rt_mutex_has_waiters(lock))
  		return;
  
  	/*
  	 * The rbtree has no waiters enqueued, now make sure that the
  	 * lock->owner still has the waiters bit set, otherwise the
  	 * following can happen:
  	 *
  	 * CPU 0	CPU 1		CPU2
  	 * l->owner=T1
  	 *		rt_mutex_lock(l)
  	 *		lock(l->lock)
  	 *		l->owner = T1 | HAS_WAITERS;
  	 *		enqueue(T2)
  	 *		boost()
  	 *		  unlock(l->lock)
  	 *		block()
  	 *
  	 *				rt_mutex_lock(l)
  	 *				lock(l->lock)
  	 *				l->owner = T1 | HAS_WAITERS;
  	 *				enqueue(T3)
  	 *				boost()
  	 *				  unlock(l->lock)
  	 *				block()
  	 *		signal(->T2)	signal(->T3)
  	 *		lock(l->lock)
  	 *		dequeue(T2)
  	 *		deboost()
  	 *		  unlock(l->lock)
  	 *				lock(l->lock)
  	 *				dequeue(T3)
  	 *				 ==> wait list is empty
  	 *				deboost()
  	 *				 unlock(l->lock)
  	 *		lock(l->lock)
  	 *		fixup_rt_mutex_waiters()
  	 *		  if (wait_list_empty(l) {
  	 *		    l->owner = owner
  	 *		    owner = l->owner & ~HAS_WAITERS;
  	 *		      ==> l->owner = T1
  	 *		  }
  	 *				lock(l->lock)
  	 * rt_mutex_unlock(l)		fixup_rt_mutex_waiters()
  	 *				  if (wait_list_empty(l) {
  	 *				    owner = l->owner & ~HAS_WAITERS;
  	 * cmpxchg(l->owner, T1, NULL)
  	 *  ===> Success (l->owner = NULL)
  	 *
  	 *				    l->owner = owner
  	 *				      ==> l->owner = T1
  	 *				  }
  	 *
  	 * With the check for the waiter bit in place T3 on CPU2 will not
  	 * overwrite. All tasks fiddling with the waiters bit are
  	 * serialized by l->lock, so nothing else can modify the waiters
  	 * bit. If the bit is set then nothing can change l->owner either
  	 * so the simple RMW is safe. The cmpxchg() will simply fail if it
  	 * happens in the middle of the RMW because the waiters bit is
  	 * still set.
  	 */
  	owner = READ_ONCE(*p);
  	if (owner & RT_MUTEX_HAS_WAITERS)
  		WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
134
135
136
  }
  
  /*
cede88418   Sebastian Andrzej Siewior   locking/rtmutex: ...
137
138
   * We can speed up the acquire/release, if there's no debugging state to be
   * set up.
bd197234b   Thomas Gleixner   Revert "futex_req...
139
   */
cede88418   Sebastian Andrzej Siewior   locking/rtmutex: ...
140
  #ifndef CONFIG_DEBUG_RT_MUTEXES
700318d1d   Davidlohr Bueso   locking/rtmutex: ...
141
142
143
144
145
146
147
148
149
  # define rt_mutex_cmpxchg_relaxed(l,c,n) (cmpxchg_relaxed(&l->owner, c, n) == c)
  # define rt_mutex_cmpxchg_acquire(l,c,n) (cmpxchg_acquire(&l->owner, c, n) == c)
  # define rt_mutex_cmpxchg_release(l,c,n) (cmpxchg_release(&l->owner, c, n) == c)
  
  /*
   * Callers must hold the ->wait_lock -- which is the whole purpose as we force
   * all future threads that attempt to [Rmw] the lock to the slowpath. As such
   * relaxed semantics suffice.
   */
bd197234b   Thomas Gleixner   Revert "futex_req...
150
151
152
153
154
155
  static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
  {
  	unsigned long owner, *p = (unsigned long *) &lock->owner;
  
  	do {
  		owner = *p;
700318d1d   Davidlohr Bueso   locking/rtmutex: ...
156
157
  	} while (cmpxchg_relaxed(p, owner,
  				 owner | RT_MUTEX_HAS_WAITERS) != owner);
bd197234b   Thomas Gleixner   Revert "futex_req...
158
  }
27e35715d   Thomas Gleixner   rtmutex: Plug slo...
159
160
161
162
163
164
165
  
  /*
   * Safe fastpath aware unlock:
   * 1) Clear the waiters bit
   * 2) Drop lock->wait_lock
   * 3) Try to unlock the lock with cmpxchg
   */
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
166
167
  static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
  					unsigned long flags)
27e35715d   Thomas Gleixner   rtmutex: Plug slo...
168
169
170
171
172
  	__releases(lock->wait_lock)
  {
  	struct task_struct *owner = rt_mutex_owner(lock);
  
  	clear_rt_mutex_waiters(lock);
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
173
  	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
27e35715d   Thomas Gleixner   rtmutex: Plug slo...
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
  	/*
  	 * If a new waiter comes in between the unlock and the cmpxchg
  	 * we have two situations:
  	 *
  	 * unlock(wait_lock);
  	 *					lock(wait_lock);
  	 * cmpxchg(p, owner, 0) == owner
  	 *					mark_rt_mutex_waiters(lock);
  	 *					acquire(lock);
  	 * or:
  	 *
  	 * unlock(wait_lock);
  	 *					lock(wait_lock);
  	 *					mark_rt_mutex_waiters(lock);
  	 *
  	 * cmpxchg(p, owner, 0) != owner
  	 *					enqueue_waiter();
  	 *					unlock(wait_lock);
  	 * lock(wait_lock);
  	 * wake waiter();
  	 * unlock(wait_lock);
  	 *					lock(wait_lock);
  	 *					acquire(lock);
  	 */
700318d1d   Davidlohr Bueso   locking/rtmutex: ...
198
  	return rt_mutex_cmpxchg_release(lock, owner, NULL);
27e35715d   Thomas Gleixner   rtmutex: Plug slo...
199
  }
bd197234b   Thomas Gleixner   Revert "futex_req...
200
  #else
700318d1d   Davidlohr Bueso   locking/rtmutex: ...
201
202
203
  # define rt_mutex_cmpxchg_relaxed(l,c,n)	(0)
  # define rt_mutex_cmpxchg_acquire(l,c,n)	(0)
  # define rt_mutex_cmpxchg_release(l,c,n)	(0)
bd197234b   Thomas Gleixner   Revert "futex_req...
204
205
206
207
208
  static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
  {
  	lock->owner = (struct task_struct *)
  			((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
  }
27e35715d   Thomas Gleixner   rtmutex: Plug slo...
209
210
211
212
  
  /*
   * Simple slow path only version: lock->owner is protected by lock->wait_lock.
   */
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
213
214
  static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
  					unsigned long flags)
27e35715d   Thomas Gleixner   rtmutex: Plug slo...
215
216
217
  	__releases(lock->wait_lock)
  {
  	lock->owner = NULL;
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
218
  	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
27e35715d   Thomas Gleixner   rtmutex: Plug slo...
219
220
  	return true;
  }
bd197234b   Thomas Gleixner   Revert "futex_req...
221
  #endif
19830e552   Peter Zijlstra   rtmutex: Fix more...
222
223
224
225
226
  /*
   * Only use with rt_mutex_waiter_{less,equal}()
   */
  #define task_to_waiter(p)	\
  	&(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline }
fb00aca47   Peter Zijlstra   rtmutex: Turn the...
227
228
229
230
  static inline int
  rt_mutex_waiter_less(struct rt_mutex_waiter *left,
  		     struct rt_mutex_waiter *right)
  {
2d3d891d3   Dario Faggioli   sched/deadline: A...
231
  	if (left->prio < right->prio)
fb00aca47   Peter Zijlstra   rtmutex: Turn the...
232
233
234
  		return 1;
  
  	/*
2d3d891d3   Dario Faggioli   sched/deadline: A...
235
236
237
238
  	 * If both waiters have dl_prio(), we check the deadlines of the
  	 * associated tasks.
  	 * If left waiter has a dl_prio(), and we didn't return 1 above,
  	 * then right waiter has a dl_prio() too.
fb00aca47   Peter Zijlstra   rtmutex: Turn the...
239
  	 */
2d3d891d3   Dario Faggioli   sched/deadline: A...
240
  	if (dl_prio(left->prio))
e0aad5b44   Peter Zijlstra   rtmutex: Fix PI c...
241
  		return dl_time_before(left->deadline, right->deadline);
fb00aca47   Peter Zijlstra   rtmutex: Turn the...
242
243
244
  
  	return 0;
  }
19830e552   Peter Zijlstra   rtmutex: Fix more...
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
  static inline int
  rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
  		      struct rt_mutex_waiter *right)
  {
  	if (left->prio != right->prio)
  		return 0;
  
  	/*
  	 * If both waiters have dl_prio(), we check the deadlines of the
  	 * associated tasks.
  	 * If left waiter has a dl_prio(), and we didn't return 0 above,
  	 * then right waiter has a dl_prio() too.
  	 */
  	if (dl_prio(left->prio))
  		return left->deadline == right->deadline;
  
  	return 1;
  }
fb00aca47   Peter Zijlstra   rtmutex: Turn the...
263
264
265
  static void
  rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
  {
a23ba907d   Davidlohr Bueso   locking/rtmutex: ...
266
  	struct rb_node **link = &lock->waiters.rb_root.rb_node;
fb00aca47   Peter Zijlstra   rtmutex: Turn the...
267
268
  	struct rb_node *parent = NULL;
  	struct rt_mutex_waiter *entry;
a23ba907d   Davidlohr Bueso   locking/rtmutex: ...
269
  	bool leftmost = true;
fb00aca47   Peter Zijlstra   rtmutex: Turn the...
270
271
272
273
274
275
276
277
  
  	while (*link) {
  		parent = *link;
  		entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
  		if (rt_mutex_waiter_less(waiter, entry)) {
  			link = &parent->rb_left;
  		} else {
  			link = &parent->rb_right;
a23ba907d   Davidlohr Bueso   locking/rtmutex: ...
278
  			leftmost = false;
fb00aca47   Peter Zijlstra   rtmutex: Turn the...
279
280
  		}
  	}
fb00aca47   Peter Zijlstra   rtmutex: Turn the...
281
  	rb_link_node(&waiter->tree_entry, parent, link);
a23ba907d   Davidlohr Bueso   locking/rtmutex: ...
282
  	rb_insert_color_cached(&waiter->tree_entry, &lock->waiters, leftmost);
fb00aca47   Peter Zijlstra   rtmutex: Turn the...
283
284
285
286
287
288
289
  }
  
  static void
  rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
  {
  	if (RB_EMPTY_NODE(&waiter->tree_entry))
  		return;
a23ba907d   Davidlohr Bueso   locking/rtmutex: ...
290
  	rb_erase_cached(&waiter->tree_entry, &lock->waiters);
fb00aca47   Peter Zijlstra   rtmutex: Turn the...
291
292
293
294
295
296
  	RB_CLEAR_NODE(&waiter->tree_entry);
  }
  
  static void
  rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
  {
a23ba907d   Davidlohr Bueso   locking/rtmutex: ...
297
  	struct rb_node **link = &task->pi_waiters.rb_root.rb_node;
fb00aca47   Peter Zijlstra   rtmutex: Turn the...
298
299
  	struct rb_node *parent = NULL;
  	struct rt_mutex_waiter *entry;
a23ba907d   Davidlohr Bueso   locking/rtmutex: ...
300
  	bool leftmost = true;
fb00aca47   Peter Zijlstra   rtmutex: Turn the...
301
302
303
304
305
306
307
308
  
  	while (*link) {
  		parent = *link;
  		entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
  		if (rt_mutex_waiter_less(waiter, entry)) {
  			link = &parent->rb_left;
  		} else {
  			link = &parent->rb_right;
a23ba907d   Davidlohr Bueso   locking/rtmutex: ...
309
  			leftmost = false;
fb00aca47   Peter Zijlstra   rtmutex: Turn the...
310
311
  		}
  	}
fb00aca47   Peter Zijlstra   rtmutex: Turn the...
312
  	rb_link_node(&waiter->pi_tree_entry, parent, link);
a23ba907d   Davidlohr Bueso   locking/rtmutex: ...
313
  	rb_insert_color_cached(&waiter->pi_tree_entry, &task->pi_waiters, leftmost);
fb00aca47   Peter Zijlstra   rtmutex: Turn the...
314
315
316
317
318
319
320
  }
  
  static void
  rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
  {
  	if (RB_EMPTY_NODE(&waiter->pi_tree_entry))
  		return;
a23ba907d   Davidlohr Bueso   locking/rtmutex: ...
321
  	rb_erase_cached(&waiter->pi_tree_entry, &task->pi_waiters);
fb00aca47   Peter Zijlstra   rtmutex: Turn the...
322
323
  	RB_CLEAR_NODE(&waiter->pi_tree_entry);
  }
acd58620e   Peter Zijlstra   sched/rtmutex: Re...
324
  static void rt_mutex_adjust_prio(struct task_struct *p)
c365c292d   Thomas Gleixner   sched: Consider p...
325
  {
acd58620e   Peter Zijlstra   sched/rtmutex: Re...
326
  	struct task_struct *pi_task = NULL;
e96a7705e   Xunlei Pang   sched/rtmutex/dea...
327

acd58620e   Peter Zijlstra   sched/rtmutex: Re...
328
  	lockdep_assert_held(&p->pi_lock);
c365c292d   Thomas Gleixner   sched: Consider p...
329

acd58620e   Peter Zijlstra   sched/rtmutex: Re...
330
331
  	if (task_has_pi_waiters(p))
  		pi_task = task_top_pi_waiter(p)->task;
c365c292d   Thomas Gleixner   sched: Consider p...
332

acd58620e   Peter Zijlstra   sched/rtmutex: Re...
333
  	rt_mutex_setprio(p, pi_task);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
334
335
336
  }
  
  /*
8930ed80f   Thomas Gleixner   rtmutex: Cleanup ...
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
   * Deadlock detection is conditional:
   *
   * If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted
   * if the detect argument is == RT_MUTEX_FULL_CHAINWALK.
   *
   * If CONFIG_DEBUG_RT_MUTEXES=y, deadlock detection is always
   * conducted independent of the detect argument.
   *
   * If the waiter argument is NULL this indicates the deboost path and
   * deadlock detection is disabled independent of the detect argument
   * and the config settings.
   */
  static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
  					  enum rtmutex_chainwalk chwalk)
  {
  	/*
  	 * This is just a wrapper function for the following call,
  	 * because debug_rt_mutex_detect_deadlock() smells like a magic
  	 * debug feature and I wanted to keep the cond function in the
  	 * main source file along with the comments instead of having
  	 * two of the same in the headers.
  	 */
  	return debug_rt_mutex_detect_deadlock(waiter, chwalk);
  }
  
  /*
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
363
364
365
   * Max number of times we'll walk the boosting chain:
   */
  int max_lock_depth = 1024;
820849843   Thomas Gleixner   rtmutex: Detect c...
366
367
368
369
  static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
  {
  	return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
  }
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
370
371
372
  /*
   * Adjust the priority chain. Also used for deadlock detection.
   * Decreases task's usage by one - may thus free the task.
0c1061733   Juri Lelli   rtmutex: Document...
373
   *
820849843   Thomas Gleixner   rtmutex: Detect c...
374
375
   * @task:	the task owning the mutex (owner) for which a chain walk is
   *		probably needed
e6beaa363   Tom(JeHyeon) Yeon   locking/rtmutex: ...
376
   * @chwalk:	do we have to carry out deadlock detection?
820849843   Thomas Gleixner   rtmutex: Detect c...
377
378
379
380
381
382
   * @orig_lock:	the mutex (can be NULL if we are walking the chain to recheck
   *		things for a task that has just got its priority adjusted, and
   *		is waiting on a mutex)
   * @next_lock:	the mutex on which the owner of @orig_lock was blocked before
   *		we dropped its pi_lock. Is never dereferenced, only used for
   *		comparison to detect lock chain changes.
0c1061733   Juri Lelli   rtmutex: Document...
383
   * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
820849843   Thomas Gleixner   rtmutex: Detect c...
384
385
386
387
   *		its priority to the mutex owner (can be NULL in the case
   *		depicted above or if the top waiter is gone away and we are
   *		actually deboosting the owner)
   * @top_task:	the current top waiter
0c1061733   Juri Lelli   rtmutex: Document...
388
   *
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
389
   * Returns 0 or -EDEADLK.
3eb65aead   Thomas Gleixner   rtmutex: Document...
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
   *
   * Chain walk basics and protection scope
   *
   * [R] refcount on task
   * [P] task->pi_lock held
   * [L] rtmutex->wait_lock held
   *
   * Step	Description				Protected by
   *	function arguments:
   *	@task					[R]
   *	@orig_lock if != NULL			@top_task is blocked on it
   *	@next_lock				Unprotected. Cannot be
   *						dereferenced. Only used for
   *						comparison.
   *	@orig_waiter if != NULL			@top_task is blocked on it
   *	@top_task				current, or in case of proxy
   *						locking protected by calling
   *						code
   *	again:
   *	  loop_sanity_check();
   *	retry:
   * [1]	  lock(task->pi_lock);			[R] acquire [P]
   * [2]	  waiter = task->pi_blocked_on;		[P]
   * [3]	  check_exit_conditions_1();		[P]
   * [4]	  lock = waiter->lock;			[P]
   * [5]	  if (!try_lock(lock->wait_lock)) {	[P] try to acquire [L]
   *	    unlock(task->pi_lock);		release [P]
   *	    goto retry;
   *	  }
   * [6]	  check_exit_conditions_2();		[P] + [L]
   * [7]	  requeue_lock_waiter(lock, waiter);	[P] + [L]
   * [8]	  unlock(task->pi_lock);		release [P]
   *	  put_task_struct(task);		release [R]
   * [9]	  check_exit_conditions_3();		[L]
   * [10]	  task = owner(lock);			[L]
   *	  get_task_struct(task);		[L] acquire [R]
   *	  lock(task->pi_lock);			[L] acquire [P]
   * [11]	  requeue_pi_waiter(tsk, waiters(lock));[P] + [L]
   * [12]	  check_exit_conditions_4();		[P] + [L]
   * [13]	  unlock(task->pi_lock);		release [P]
   *	  unlock(lock->wait_lock);		release [L]
   *	  goto again;
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
432
   */
bd197234b   Thomas Gleixner   Revert "futex_req...
433
  static int rt_mutex_adjust_prio_chain(struct task_struct *task,
8930ed80f   Thomas Gleixner   rtmutex: Cleanup ...
434
  				      enum rtmutex_chainwalk chwalk,
bd197234b   Thomas Gleixner   Revert "futex_req...
435
  				      struct rt_mutex *orig_lock,
820849843   Thomas Gleixner   rtmutex: Detect c...
436
  				      struct rt_mutex *next_lock,
bd197234b   Thomas Gleixner   Revert "futex_req...
437
438
  				      struct rt_mutex_waiter *orig_waiter,
  				      struct task_struct *top_task)
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
439
  {
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
440
  	struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
a57594a13   Thomas Gleixner   rtmutex: Clarify ...
441
  	struct rt_mutex_waiter *prerequeue_top_waiter;
8930ed80f   Thomas Gleixner   rtmutex: Cleanup ...
442
  	int ret = 0, depth = 0;
a57594a13   Thomas Gleixner   rtmutex: Clarify ...
443
  	struct rt_mutex *lock;
8930ed80f   Thomas Gleixner   rtmutex: Cleanup ...
444
  	bool detect_deadlock;
67792e2ca   Thomas Gleixner   rtmutex: Avoid po...
445
  	bool requeue = true;
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
446

8930ed80f   Thomas Gleixner   rtmutex: Cleanup ...
447
  	detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
448
449
450
451
452
453
454
455
  
  	/*
  	 * The (de)boosting is a step by step approach with a lot of
  	 * pitfalls. We want this to be preemptible and we want hold a
  	 * maximum of two locks per step. So we have to check
  	 * carefully whether things change under us.
  	 */
   again:
3eb65aead   Thomas Gleixner   rtmutex: Document...
456
457
458
  	/*
  	 * We limit the lock chain length for each invocation.
  	 */
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
459
460
461
462
463
464
465
466
467
468
469
470
  	if (++depth > max_lock_depth) {
  		static int prev_max;
  
  		/*
  		 * Print this only once. If the admin changes the limit,
  		 * print a new message when reaching the limit again.
  		 */
  		if (prev_max != max_lock_depth) {
  			prev_max = max_lock_depth;
  			printk(KERN_WARNING "Maximum lock depth %d reached "
  			       "task: %s (%d)
  ", max_lock_depth,
ba25f9dcc   Pavel Emelyanov   Use helpers to ob...
471
  			       top_task->comm, task_pid_nr(top_task));
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
472
473
  		}
  		put_task_struct(task);
3d5c9340d   Thomas Gleixner   rtmutex: Handle d...
474
  		return -EDEADLK;
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
475
  	}
3eb65aead   Thomas Gleixner   rtmutex: Document...
476
477
478
479
480
481
482
  
  	/*
  	 * We are fully preemptible here and only hold the refcount on
  	 * @task. So everything can have changed under us since the
  	 * caller or our own code below (goto retry/again) dropped all
  	 * locks.
  	 */
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
483
484
   retry:
  	/*
3eb65aead   Thomas Gleixner   rtmutex: Document...
485
  	 * [1] Task cannot go away as we did a get_task() before !
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
486
  	 */
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
487
  	raw_spin_lock_irq(&task->pi_lock);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
488

3eb65aead   Thomas Gleixner   rtmutex: Document...
489
490
491
  	/*
  	 * [2] Get the waiter on which @task is blocked on.
  	 */
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
492
  	waiter = task->pi_blocked_on;
3eb65aead   Thomas Gleixner   rtmutex: Document...
493
494
495
496
  
  	/*
  	 * [3] check_exit_conditions_1() protected by task->pi_lock.
  	 */
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
497
498
499
500
501
  	/*
  	 * Check whether the end of the boosting chain has been
  	 * reached or the state of the chain has changed while we
  	 * dropped the locks.
  	 */
8161239a8   Lai Jiangshan   rtmutex: Simplify...
502
  	if (!waiter)
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
503
  		goto out_unlock_pi;
1a539a872   Thomas Gleixner   rt-mutex: fix cha...
504
505
  	/*
  	 * Check the orig_waiter state. After we dropped the locks,
8161239a8   Lai Jiangshan   rtmutex: Simplify...
506
  	 * the previous owner of the lock might have released the lock.
1a539a872   Thomas Gleixner   rt-mutex: fix cha...
507
  	 */
8161239a8   Lai Jiangshan   rtmutex: Simplify...
508
  	if (orig_waiter && !rt_mutex_owner(orig_lock))
1a539a872   Thomas Gleixner   rt-mutex: fix cha...
509
510
511
  		goto out_unlock_pi;
  
  	/*
820849843   Thomas Gleixner   rtmutex: Detect c...
512
513
514
515
516
517
518
519
520
521
522
523
  	 * We dropped all locks after taking a refcount on @task, so
  	 * the task might have moved on in the lock chain or even left
  	 * the chain completely and blocks now on an unrelated lock or
  	 * on @orig_lock.
  	 *
  	 * We stored the lock on which @task was blocked in @next_lock,
  	 * so we can detect the chain change.
  	 */
  	if (next_lock != waiter->lock)
  		goto out_unlock_pi;
  
  	/*
1a539a872   Thomas Gleixner   rt-mutex: fix cha...
524
525
526
527
  	 * Drop out, when the task has no waiters. Note,
  	 * top_waiter can be NULL, when we are in the deboosting
  	 * mode!
  	 */
397335f00   Thomas Gleixner   rtmutex: Fix dead...
528
529
530
531
532
  	if (top_waiter) {
  		if (!task_has_pi_waiters(task))
  			goto out_unlock_pi;
  		/*
  		 * If deadlock detection is off, we stop here if we
67792e2ca   Thomas Gleixner   rtmutex: Avoid po...
533
534
535
  		 * are not the top pi waiter of the task. If deadlock
  		 * detection is enabled we continue, but stop the
  		 * requeueing in the chain walk.
397335f00   Thomas Gleixner   rtmutex: Fix dead...
536
  		 */
67792e2ca   Thomas Gleixner   rtmutex: Avoid po...
537
538
539
540
541
542
  		if (top_waiter != task_top_pi_waiter(task)) {
  			if (!detect_deadlock)
  				goto out_unlock_pi;
  			else
  				requeue = false;
  		}
397335f00   Thomas Gleixner   rtmutex: Fix dead...
543
  	}
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
544
545
  
  	/*
67792e2ca   Thomas Gleixner   rtmutex: Avoid po...
546
547
548
549
550
  	 * If the waiter priority is the same as the task priority
  	 * then there is no further priority adjustment necessary.  If
  	 * deadlock detection is off, we stop the chain walk. If its
  	 * enabled we continue, but stop the requeueing in the chain
  	 * walk.
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
551
  	 */
19830e552   Peter Zijlstra   rtmutex: Fix more...
552
  	if (rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
67792e2ca   Thomas Gleixner   rtmutex: Avoid po...
553
554
555
556
557
  		if (!detect_deadlock)
  			goto out_unlock_pi;
  		else
  			requeue = false;
  	}
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
558

3eb65aead   Thomas Gleixner   rtmutex: Document...
559
560
561
  	/*
  	 * [4] Get the next lock
  	 */
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
562
  	lock = waiter->lock;
3eb65aead   Thomas Gleixner   rtmutex: Document...
563
564
565
566
567
  	/*
  	 * [5] We need to trylock here as we are holding task->pi_lock,
  	 * which is the reverse lock order versus the other rtmutex
  	 * operations.
  	 */
d209d74d5   Thomas Gleixner   rtmutes: Convert ...
568
  	if (!raw_spin_trylock(&lock->wait_lock)) {
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
569
  		raw_spin_unlock_irq(&task->pi_lock);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
570
571
572
  		cpu_relax();
  		goto retry;
  	}
397335f00   Thomas Gleixner   rtmutex: Fix dead...
573
  	/*
3eb65aead   Thomas Gleixner   rtmutex: Document...
574
575
576
  	 * [6] check_exit_conditions_2() protected by task->pi_lock and
  	 * lock->wait_lock.
  	 *
397335f00   Thomas Gleixner   rtmutex: Fix dead...
577
578
579
580
581
  	 * Deadlock detection. If the lock is the same as the original
  	 * lock which caused us to walk the lock chain or if the
  	 * current lock is owned by the task which initiated the chain
  	 * walk, we detected a deadlock.
  	 */
95e02ca9b   Thomas Gleixner   [PATCH] rtmutex: ...
582
  	if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
8930ed80f   Thomas Gleixner   rtmutex: Cleanup ...
583
  		debug_rt_mutex_deadlock(chwalk, orig_waiter, lock);
d209d74d5   Thomas Gleixner   rtmutes: Convert ...
584
  		raw_spin_unlock(&lock->wait_lock);
3d5c9340d   Thomas Gleixner   rtmutex: Handle d...
585
  		ret = -EDEADLK;
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
586
587
  		goto out_unlock_pi;
  	}
a57594a13   Thomas Gleixner   rtmutex: Clarify ...
588
  	/*
67792e2ca   Thomas Gleixner   rtmutex: Avoid po...
589
590
591
592
593
594
595
596
597
  	 * If we just follow the lock chain for deadlock detection, no
  	 * need to do all the requeue operations. To avoid a truckload
  	 * of conditionals around the various places below, just do the
  	 * minimum chain walk checks.
  	 */
  	if (!requeue) {
  		/*
  		 * No requeue[7] here. Just release @task [8]
  		 */
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
598
  		raw_spin_unlock(&task->pi_lock);
67792e2ca   Thomas Gleixner   rtmutex: Avoid po...
599
600
601
602
603
604
605
  		put_task_struct(task);
  
  		/*
  		 * [9] check_exit_conditions_3 protected by lock->wait_lock.
  		 * If there is no owner of the lock, end of chain.
  		 */
  		if (!rt_mutex_owner(lock)) {
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
606
  			raw_spin_unlock_irq(&lock->wait_lock);
67792e2ca   Thomas Gleixner   rtmutex: Avoid po...
607
608
609
610
611
612
  			return 0;
  		}
  
  		/* [10] Grab the next task, i.e. owner of @lock */
  		task = rt_mutex_owner(lock);
  		get_task_struct(task);
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
613
  		raw_spin_lock(&task->pi_lock);
67792e2ca   Thomas Gleixner   rtmutex: Avoid po...
614
615
616
617
618
619
620
621
622
623
624
625
626
627
  
  		/*
  		 * No requeue [11] here. We just do deadlock detection.
  		 *
  		 * [12] Store whether owner is blocked
  		 * itself. Decision is made after dropping the locks
  		 */
  		next_lock = task_blocked_on_lock(task);
  		/*
  		 * Get the top waiter for the next iteration
  		 */
  		top_waiter = rt_mutex_top_waiter(lock);
  
  		/* [13] Drop locks */
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
628
629
  		raw_spin_unlock(&task->pi_lock);
  		raw_spin_unlock_irq(&lock->wait_lock);
67792e2ca   Thomas Gleixner   rtmutex: Avoid po...
630
631
632
633
634
635
636
637
  
  		/* If owner is not blocked, end of chain. */
  		if (!next_lock)
  			goto out_put_task;
  		goto again;
  	}
  
  	/*
a57594a13   Thomas Gleixner   rtmutex: Clarify ...
638
639
640
641
642
  	 * Store the current top waiter before doing the requeue
  	 * operation on @lock. We need it for the boost/deboost
  	 * decision below.
  	 */
  	prerequeue_top_waiter = rt_mutex_top_waiter(lock);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
643

9f40a51a3   Davidlohr Bueso   locking/rtmutex: ...
644
  	/* [7] Requeue the waiter in the lock waiter tree. */
fb00aca47   Peter Zijlstra   rtmutex: Turn the...
645
  	rt_mutex_dequeue(lock, waiter);
e0aad5b44   Peter Zijlstra   rtmutex: Fix PI c...
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
  
  	/*
  	 * Update the waiter prio fields now that we're dequeued.
  	 *
  	 * These values can have changed through either:
  	 *
  	 *   sys_sched_set_scheduler() / sys_sched_setattr()
  	 *
  	 * or
  	 *
  	 *   DL CBS enforcement advancing the effective deadline.
  	 *
  	 * Even though pi_waiters also uses these fields, and that tree is only
  	 * updated in [11], we can do this here, since we hold [L], which
  	 * serializes all pi_waiters access and rb_erase() does not care about
  	 * the values of the node being removed.
  	 */
2d3d891d3   Dario Faggioli   sched/deadline: A...
663
  	waiter->prio = task->prio;
e0aad5b44   Peter Zijlstra   rtmutex: Fix PI c...
664
  	waiter->deadline = task->dl.deadline;
fb00aca47   Peter Zijlstra   rtmutex: Turn the...
665
  	rt_mutex_enqueue(lock, waiter);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
666

3eb65aead   Thomas Gleixner   rtmutex: Document...
667
  	/* [8] Release the task */
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
668
  	raw_spin_unlock(&task->pi_lock);
2ffa5a5cd   Thomas Gleixner   rtmutex: No need ...
669
  	put_task_struct(task);
a57594a13   Thomas Gleixner   rtmutex: Clarify ...
670
  	/*
3eb65aead   Thomas Gleixner   rtmutex: Document...
671
672
  	 * [9] check_exit_conditions_3 protected by lock->wait_lock.
  	 *
a57594a13   Thomas Gleixner   rtmutex: Clarify ...
673
674
675
676
  	 * We must abort the chain walk if there is no lock owner even
  	 * in the dead lock detection case, as we have nothing to
  	 * follow here. This is the end of the chain we are walking.
  	 */
8161239a8   Lai Jiangshan   rtmutex: Simplify...
677
678
  	if (!rt_mutex_owner(lock)) {
  		/*
3eb65aead   Thomas Gleixner   rtmutex: Document...
679
680
681
  		 * If the requeue [7] above changed the top waiter,
  		 * then we need to wake the new top waiter up to try
  		 * to get the lock.
8161239a8   Lai Jiangshan   rtmutex: Simplify...
682
  		 */
a57594a13   Thomas Gleixner   rtmutex: Clarify ...
683
  		if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
8161239a8   Lai Jiangshan   rtmutex: Simplify...
684
  			wake_up_process(rt_mutex_top_waiter(lock)->task);
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
685
  		raw_spin_unlock_irq(&lock->wait_lock);
2ffa5a5cd   Thomas Gleixner   rtmutex: No need ...
686
  		return 0;
8161239a8   Lai Jiangshan   rtmutex: Simplify...
687
  	}
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
688

3eb65aead   Thomas Gleixner   rtmutex: Document...
689
  	/* [10] Grab the next task, i.e. the owner of @lock */
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
690
  	task = rt_mutex_owner(lock);
db630637b   Steven Rostedt   [PATCH] clean up ...
691
  	get_task_struct(task);
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
692
  	raw_spin_lock(&task->pi_lock);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
693

3eb65aead   Thomas Gleixner   rtmutex: Document...
694
  	/* [11] requeue the pi waiters if necessary */
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
695
  	if (waiter == rt_mutex_top_waiter(lock)) {
a57594a13   Thomas Gleixner   rtmutex: Clarify ...
696
697
698
  		/*
  		 * The waiter became the new top (highest priority)
  		 * waiter on the lock. Replace the previous top waiter
9f40a51a3   Davidlohr Bueso   locking/rtmutex: ...
699
  		 * in the owner tasks pi waiters tree with this waiter
a57594a13   Thomas Gleixner   rtmutex: Clarify ...
700
701
702
  		 * and adjust the priority of the owner.
  		 */
  		rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
fb00aca47   Peter Zijlstra   rtmutex: Turn the...
703
  		rt_mutex_enqueue_pi(task, waiter);
acd58620e   Peter Zijlstra   sched/rtmutex: Re...
704
  		rt_mutex_adjust_prio(task);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
705

a57594a13   Thomas Gleixner   rtmutex: Clarify ...
706
707
708
709
  	} else if (prerequeue_top_waiter == waiter) {
  		/*
  		 * The waiter was the top waiter on the lock, but is
  		 * no longer the top prority waiter. Replace waiter in
9f40a51a3   Davidlohr Bueso   locking/rtmutex: ...
710
  		 * the owner tasks pi waiters tree with the new top
a57594a13   Thomas Gleixner   rtmutex: Clarify ...
711
712
713
714
715
716
  		 * (highest priority) waiter and adjust the priority
  		 * of the owner.
  		 * The new top waiter is stored in @waiter so that
  		 * @waiter == @top_waiter evaluates to true below and
  		 * we continue to deboost the rest of the chain.
  		 */
fb00aca47   Peter Zijlstra   rtmutex: Turn the...
717
  		rt_mutex_dequeue_pi(task, waiter);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
718
  		waiter = rt_mutex_top_waiter(lock);
fb00aca47   Peter Zijlstra   rtmutex: Turn the...
719
  		rt_mutex_enqueue_pi(task, waiter);
acd58620e   Peter Zijlstra   sched/rtmutex: Re...
720
  		rt_mutex_adjust_prio(task);
a57594a13   Thomas Gleixner   rtmutex: Clarify ...
721
722
723
724
725
  	} else {
  		/*
  		 * Nothing changed. No need to do any priority
  		 * adjustment.
  		 */
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
726
  	}
820849843   Thomas Gleixner   rtmutex: Detect c...
727
  	/*
3eb65aead   Thomas Gleixner   rtmutex: Document...
728
729
730
731
  	 * [12] check_exit_conditions_4() protected by task->pi_lock
  	 * and lock->wait_lock. The actual decisions are made after we
  	 * dropped the locks.
  	 *
820849843   Thomas Gleixner   rtmutex: Detect c...
732
733
734
735
736
737
  	 * Check whether the task which owns the current lock is pi
  	 * blocked itself. If yes we store a pointer to the lock for
  	 * the lock chain change detection above. After we dropped
  	 * task->pi_lock next_lock cannot be dereferenced anymore.
  	 */
  	next_lock = task_blocked_on_lock(task);
a57594a13   Thomas Gleixner   rtmutex: Clarify ...
738
739
740
741
  	/*
  	 * Store the top waiter of @lock for the end of chain walk
  	 * decision below.
  	 */
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
742
  	top_waiter = rt_mutex_top_waiter(lock);
3eb65aead   Thomas Gleixner   rtmutex: Document...
743
744
  
  	/* [13] Drop the locks */
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
745
746
  	raw_spin_unlock(&task->pi_lock);
  	raw_spin_unlock_irq(&lock->wait_lock);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
747

820849843   Thomas Gleixner   rtmutex: Detect c...
748
  	/*
3eb65aead   Thomas Gleixner   rtmutex: Document...
749
750
751
  	 * Make the actual exit decisions [12], based on the stored
  	 * values.
  	 *
820849843   Thomas Gleixner   rtmutex: Detect c...
752
753
754
755
756
  	 * We reached the end of the lock chain. Stop right here. No
  	 * point to go back just to figure that out.
  	 */
  	if (!next_lock)
  		goto out_put_task;
a57594a13   Thomas Gleixner   rtmutex: Clarify ...
757
758
759
760
761
  	/*
  	 * If the current waiter is not the top waiter on the lock,
  	 * then we can stop the chain walk here if we are not in full
  	 * deadlock detection mode.
  	 */
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
762
763
764
765
766
767
  	if (!detect_deadlock && waiter != top_waiter)
  		goto out_put_task;
  
  	goto again;
  
   out_unlock_pi:
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
768
  	raw_spin_unlock_irq(&task->pi_lock);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
769
770
   out_put_task:
  	put_task_struct(task);
36c8b5868   Ingo Molnar   [PATCH] sched: cl...
771

23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
772
773
774
775
  	return ret;
  }
  
  /*
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
776
777
   * Try to take an rt-mutex
   *
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
778
   * Must be called with lock->wait_lock held and interrupts disabled
8161239a8   Lai Jiangshan   rtmutex: Simplify...
779
   *
358c331f3   Thomas Gleixner   rtmutex: Simplify...
780
781
   * @lock:   The lock to be acquired.
   * @task:   The task which wants to acquire the lock
9f40a51a3   Davidlohr Bueso   locking/rtmutex: ...
782
   * @waiter: The waiter that is queued to the lock's wait tree if the
358c331f3   Thomas Gleixner   rtmutex: Simplify...
783
   *	    callsite called task_blocked_on_lock(), otherwise NULL
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
784
   */
8161239a8   Lai Jiangshan   rtmutex: Simplify...
785
  static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
358c331f3   Thomas Gleixner   rtmutex: Simplify...
786
  				struct rt_mutex_waiter *waiter)
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
787
  {
e0aad5b44   Peter Zijlstra   rtmutex: Fix PI c...
788
  	lockdep_assert_held(&lock->wait_lock);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
789
  	/*
358c331f3   Thomas Gleixner   rtmutex: Simplify...
790
791
792
793
  	 * Before testing whether we can acquire @lock, we set the
  	 * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
  	 * other tasks which try to modify @lock into the slow path
  	 * and they serialize on @lock->wait_lock.
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
794
  	 *
358c331f3   Thomas Gleixner   rtmutex: Simplify...
795
796
  	 * The RT_MUTEX_HAS_WAITERS bit can have a transitional state
  	 * as explained at the top of this file if and only if:
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
797
  	 *
358c331f3   Thomas Gleixner   rtmutex: Simplify...
798
799
800
801
802
803
804
  	 * - There is a lock owner. The caller must fixup the
  	 *   transient state if it does a trylock or leaves the lock
  	 *   function due to a signal or timeout.
  	 *
  	 * - @task acquires the lock and there are no other
  	 *   waiters. This is undone in rt_mutex_set_owner(@task) at
  	 *   the end of this function.
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
805
806
  	 */
  	mark_rt_mutex_waiters(lock);
358c331f3   Thomas Gleixner   rtmutex: Simplify...
807
808
809
  	/*
  	 * If @lock has an owner, give up.
  	 */
8161239a8   Lai Jiangshan   rtmutex: Simplify...
810
  	if (rt_mutex_owner(lock))
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
811
  		return 0;
8161239a8   Lai Jiangshan   rtmutex: Simplify...
812
  	/*
358c331f3   Thomas Gleixner   rtmutex: Simplify...
813
  	 * If @waiter != NULL, @task has already enqueued the waiter
9f40a51a3   Davidlohr Bueso   locking/rtmutex: ...
814
  	 * into @lock waiter tree. If @waiter == NULL then this is a
358c331f3   Thomas Gleixner   rtmutex: Simplify...
815
  	 * trylock attempt.
8161239a8   Lai Jiangshan   rtmutex: Simplify...
816
  	 */
358c331f3   Thomas Gleixner   rtmutex: Simplify...
817
818
819
820
821
822
823
  	if (waiter) {
  		/*
  		 * If waiter is not the highest priority waiter of
  		 * @lock, give up.
  		 */
  		if (waiter != rt_mutex_top_waiter(lock))
  			return 0;
8161239a8   Lai Jiangshan   rtmutex: Simplify...
824

358c331f3   Thomas Gleixner   rtmutex: Simplify...
825
826
  		/*
  		 * We can acquire the lock. Remove the waiter from the
9f40a51a3   Davidlohr Bueso   locking/rtmutex: ...
827
  		 * lock waiters tree.
358c331f3   Thomas Gleixner   rtmutex: Simplify...
828
829
  		 */
  		rt_mutex_dequeue(lock, waiter);
8161239a8   Lai Jiangshan   rtmutex: Simplify...
830

358c331f3   Thomas Gleixner   rtmutex: Simplify...
831
  	} else {
8161239a8   Lai Jiangshan   rtmutex: Simplify...
832
  		/*
358c331f3   Thomas Gleixner   rtmutex: Simplify...
833
834
835
836
837
838
  		 * If the lock has waiters already we check whether @task is
  		 * eligible to take over the lock.
  		 *
  		 * If there are no other waiters, @task can acquire
  		 * the lock.  @task->pi_blocked_on is NULL, so it does
  		 * not need to be dequeued.
8161239a8   Lai Jiangshan   rtmutex: Simplify...
839
840
  		 */
  		if (rt_mutex_has_waiters(lock)) {
358c331f3   Thomas Gleixner   rtmutex: Simplify...
841
842
843
844
845
  			/*
  			 * If @task->prio is greater than or equal to
  			 * the top waiter priority (kernel view),
  			 * @task lost.
  			 */
19830e552   Peter Zijlstra   rtmutex: Fix more...
846
847
  			if (!rt_mutex_waiter_less(task_to_waiter(task),
  						  rt_mutex_top_waiter(lock)))
358c331f3   Thomas Gleixner   rtmutex: Simplify...
848
849
850
851
852
853
854
855
856
857
858
859
  				return 0;
  
  			/*
  			 * The current top waiter stays enqueued. We
  			 * don't have to change anything in the lock
  			 * waiters order.
  			 */
  		} else {
  			/*
  			 * No waiters. Take the lock without the
  			 * pi_lock dance.@task->pi_blocked_on is NULL
  			 * and we have no waiters to enqueue in @task
9f40a51a3   Davidlohr Bueso   locking/rtmutex: ...
860
  			 * pi waiters tree.
358c331f3   Thomas Gleixner   rtmutex: Simplify...
861
862
  			 */
  			goto takeit;
8161239a8   Lai Jiangshan   rtmutex: Simplify...
863
  		}
8161239a8   Lai Jiangshan   rtmutex: Simplify...
864
  	}
358c331f3   Thomas Gleixner   rtmutex: Simplify...
865
866
867
868
869
870
  	/*
  	 * Clear @task->pi_blocked_on. Requires protection by
  	 * @task->pi_lock. Redundant operation for the @waiter == NULL
  	 * case, but conditionals are more expensive than a redundant
  	 * store.
  	 */
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
871
  	raw_spin_lock(&task->pi_lock);
358c331f3   Thomas Gleixner   rtmutex: Simplify...
872
873
874
875
  	task->pi_blocked_on = NULL;
  	/*
  	 * Finish the lock acquisition. @task is the new owner. If
  	 * other waiters exist we have to insert the highest priority
9f40a51a3   Davidlohr Bueso   locking/rtmutex: ...
876
  	 * waiter into @task->pi_waiters tree.
358c331f3   Thomas Gleixner   rtmutex: Simplify...
877
878
879
  	 */
  	if (rt_mutex_has_waiters(lock))
  		rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
880
  	raw_spin_unlock(&task->pi_lock);
358c331f3   Thomas Gleixner   rtmutex: Simplify...
881
882
  
  takeit:
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
883
  	/* We got the lock. */
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
884
  	debug_rt_mutex_lock(lock);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
885

358c331f3   Thomas Gleixner   rtmutex: Simplify...
886
887
888
889
  	/*
  	 * This either preserves the RT_MUTEX_HAS_WAITERS bit if there
  	 * are still waiters or clears it.
  	 */
8161239a8   Lai Jiangshan   rtmutex: Simplify...
890
  	rt_mutex_set_owner(lock, task);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
891

23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
892
893
894
895
896
897
898
899
  	return 1;
  }
  
  /*
   * Task blocks on lock.
   *
   * Prepare waiter and propagate pi chain
   *
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
900
   * This must be called with lock->wait_lock held and interrupts disabled
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
901
902
903
   */
  static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
  				   struct rt_mutex_waiter *waiter,
8dac456a6   Darren Hart   rt_mutex: add pro...
904
  				   struct task_struct *task,
8930ed80f   Thomas Gleixner   rtmutex: Cleanup ...
905
  				   enum rtmutex_chainwalk chwalk)
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
906
  {
36c8b5868   Ingo Molnar   [PATCH] sched: cl...
907
  	struct task_struct *owner = rt_mutex_owner(lock);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
908
  	struct rt_mutex_waiter *top_waiter = waiter;
820849843   Thomas Gleixner   rtmutex: Detect c...
909
  	struct rt_mutex *next_lock;
db630637b   Steven Rostedt   [PATCH] clean up ...
910
  	int chain_walk = 0, res;
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
911

e0aad5b44   Peter Zijlstra   rtmutex: Fix PI c...
912
  	lockdep_assert_held(&lock->wait_lock);
397335f00   Thomas Gleixner   rtmutex: Fix dead...
913
914
915
916
917
918
919
920
921
  	/*
  	 * Early deadlock detection. We really don't want the task to
  	 * enqueue on itself just to untangle the mess later. It's not
  	 * only an optimization. We drop the locks, so another waiter
  	 * can come in before the chain walk detects the deadlock. So
  	 * the other will detect the deadlock and return -EDEADLOCK,
  	 * which is wrong, as the other waiter is not in a deadlock
  	 * situation.
  	 */
3d5c9340d   Thomas Gleixner   rtmutex: Handle d...
922
  	if (owner == task)
397335f00   Thomas Gleixner   rtmutex: Fix dead...
923
  		return -EDEADLK;
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
924
  	raw_spin_lock(&task->pi_lock);
8dac456a6   Darren Hart   rt_mutex: add pro...
925
  	waiter->task = task;
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
926
  	waiter->lock = lock;
2d3d891d3   Dario Faggioli   sched/deadline: A...
927
  	waiter->prio = task->prio;
e0aad5b44   Peter Zijlstra   rtmutex: Fix PI c...
928
  	waiter->deadline = task->dl.deadline;
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
929
930
931
932
  
  	/* Get the top priority waiter on the lock */
  	if (rt_mutex_has_waiters(lock))
  		top_waiter = rt_mutex_top_waiter(lock);
fb00aca47   Peter Zijlstra   rtmutex: Turn the...
933
  	rt_mutex_enqueue(lock, waiter);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
934

8dac456a6   Darren Hart   rt_mutex: add pro...
935
  	task->pi_blocked_on = waiter;
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
936

b4abf9104   Thomas Gleixner   rtmutex: Make wai...
937
  	raw_spin_unlock(&task->pi_lock);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
938

8161239a8   Lai Jiangshan   rtmutex: Simplify...
939
940
  	if (!owner)
  		return 0;
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
941
  	raw_spin_lock(&owner->pi_lock);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
942
  	if (waiter == rt_mutex_top_waiter(lock)) {
fb00aca47   Peter Zijlstra   rtmutex: Turn the...
943
944
  		rt_mutex_dequeue_pi(owner, top_waiter);
  		rt_mutex_enqueue_pi(owner, waiter);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
945

acd58620e   Peter Zijlstra   sched/rtmutex: Re...
946
  		rt_mutex_adjust_prio(owner);
db630637b   Steven Rostedt   [PATCH] clean up ...
947
948
  		if (owner->pi_blocked_on)
  			chain_walk = 1;
8930ed80f   Thomas Gleixner   rtmutex: Cleanup ...
949
  	} else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
db630637b   Steven Rostedt   [PATCH] clean up ...
950
  		chain_walk = 1;
820849843   Thomas Gleixner   rtmutex: Detect c...
951
  	}
db630637b   Steven Rostedt   [PATCH] clean up ...
952

820849843   Thomas Gleixner   rtmutex: Detect c...
953
954
  	/* Store the lock on which owner is blocked or NULL */
  	next_lock = task_blocked_on_lock(owner);
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
955
  	raw_spin_unlock(&owner->pi_lock);
820849843   Thomas Gleixner   rtmutex: Detect c...
956
957
958
959
960
961
  	/*
  	 * Even if full deadlock detection is on, if the owner is not
  	 * blocked itself, we can avoid finding this out in the chain
  	 * walk.
  	 */
  	if (!chain_walk || !next_lock)
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
962
  		return 0;
db630637b   Steven Rostedt   [PATCH] clean up ...
963
964
965
966
967
968
  	/*
  	 * The owner can't disappear while holding a lock,
  	 * so the owner struct is protected by wait_lock.
  	 * Gets dropped in rt_mutex_adjust_prio_chain()!
  	 */
  	get_task_struct(owner);
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
969
  	raw_spin_unlock_irq(&lock->wait_lock);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
970

8930ed80f   Thomas Gleixner   rtmutex: Cleanup ...
971
  	res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
820849843   Thomas Gleixner   rtmutex: Detect c...
972
  					 next_lock, waiter, task);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
973

b4abf9104   Thomas Gleixner   rtmutex: Make wai...
974
  	raw_spin_lock_irq(&lock->wait_lock);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
975
976
977
978
979
  
  	return res;
  }
  
  /*
9f40a51a3   Davidlohr Bueso   locking/rtmutex: ...
980
   * Remove the top waiter from the current tasks pi waiter tree and
45ab4effc   Davidlohr Bueso   locking/rtmutex: ...
981
   * queue it up.
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
982
   *
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
983
   * Called with lock->wait_lock held and interrupts disabled.
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
984
   */
45ab4effc   Davidlohr Bueso   locking/rtmutex: ...
985
986
  static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
  				    struct rt_mutex *lock)
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
987
988
  {
  	struct rt_mutex_waiter *waiter;
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
989

b4abf9104   Thomas Gleixner   rtmutex: Make wai...
990
  	raw_spin_lock(&current->pi_lock);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
991
992
  
  	waiter = rt_mutex_top_waiter(lock);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
993
994
  
  	/*
acd58620e   Peter Zijlstra   sched/rtmutex: Re...
995
996
997
998
999
  	 * Remove it from current->pi_waiters and deboost.
  	 *
  	 * We must in fact deboost here in order to ensure we call
  	 * rt_mutex_setprio() to update p->pi_top_task before the
  	 * task unblocks.
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1000
  	 */
fb00aca47   Peter Zijlstra   rtmutex: Turn the...
1001
  	rt_mutex_dequeue_pi(current, waiter);
acd58620e   Peter Zijlstra   sched/rtmutex: Re...
1002
  	rt_mutex_adjust_prio(current);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1003

27e35715d   Thomas Gleixner   rtmutex: Plug slo...
1004
1005
1006
1007
1008
1009
1010
1011
1012
  	/*
  	 * As we are waking up the top waiter, and the waiter stays
  	 * queued on the lock until it gets the lock, this lock
  	 * obviously has waiters. Just set the bit here and this has
  	 * the added benefit of forcing all new tasks into the
  	 * slow path making sure no task of lower priority than
  	 * the top waiter can steal this lock.
  	 */
  	lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1013

acd58620e   Peter Zijlstra   sched/rtmutex: Re...
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
  	/*
  	 * We deboosted before waking the top waiter task such that we don't
  	 * run two tasks with the 'same' priority (and ensure the
  	 * p->pi_top_task pointer points to a blocked task). This however can
  	 * lead to priority inversion if we would get preempted after the
  	 * deboost but before waking our donor task, hence the preempt_disable()
  	 * before unlock.
  	 *
  	 * Pairs with preempt_enable() in rt_mutex_postunlock();
  	 */
  	preempt_disable();
45ab4effc   Davidlohr Bueso   locking/rtmutex: ...
1025
  	wake_q_add(wake_q, waiter->task);
acd58620e   Peter Zijlstra   sched/rtmutex: Re...
1026
  	raw_spin_unlock(&current->pi_lock);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1027
1028
1029
  }
  
  /*
8161239a8   Lai Jiangshan   rtmutex: Simplify...
1030
   * Remove a waiter from a lock and give up
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1031
   *
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
1032
   * Must be called with lock->wait_lock held and interrupts disabled. I must
8161239a8   Lai Jiangshan   rtmutex: Simplify...
1033
   * have just failed to try_to_take_rt_mutex().
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1034
   */
bd197234b   Thomas Gleixner   Revert "futex_req...
1035
1036
  static void remove_waiter(struct rt_mutex *lock,
  			  struct rt_mutex_waiter *waiter)
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1037
  {
1ca7b8606   Thomas Gleixner   rtmutex: Simplify...
1038
  	bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
36c8b5868   Ingo Molnar   [PATCH] sched: cl...
1039
  	struct task_struct *owner = rt_mutex_owner(lock);
1ca7b8606   Thomas Gleixner   rtmutex: Simplify...
1040
  	struct rt_mutex *next_lock;
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1041

e0aad5b44   Peter Zijlstra   rtmutex: Fix PI c...
1042
  	lockdep_assert_held(&lock->wait_lock);
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
1043
  	raw_spin_lock(&current->pi_lock);
fb00aca47   Peter Zijlstra   rtmutex: Turn the...
1044
  	rt_mutex_dequeue(lock, waiter);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1045
  	current->pi_blocked_on = NULL;
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
1046
  	raw_spin_unlock(&current->pi_lock);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1047

1ca7b8606   Thomas Gleixner   rtmutex: Simplify...
1048
1049
1050
1051
1052
  	/*
  	 * Only update priority if the waiter was the highest priority
  	 * waiter of the lock and there is an owner to update.
  	 */
  	if (!owner || !is_top_waiter)
8161239a8   Lai Jiangshan   rtmutex: Simplify...
1053
  		return;
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
1054
  	raw_spin_lock(&owner->pi_lock);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1055

1ca7b8606   Thomas Gleixner   rtmutex: Simplify...
1056
  	rt_mutex_dequeue_pi(owner, waiter);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1057

1ca7b8606   Thomas Gleixner   rtmutex: Simplify...
1058
1059
  	if (rt_mutex_has_waiters(lock))
  		rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1060

acd58620e   Peter Zijlstra   sched/rtmutex: Re...
1061
  	rt_mutex_adjust_prio(owner);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1062

1ca7b8606   Thomas Gleixner   rtmutex: Simplify...
1063
1064
  	/* Store the lock on which owner is blocked or NULL */
  	next_lock = task_blocked_on_lock(owner);
db630637b   Steven Rostedt   [PATCH] clean up ...
1065

b4abf9104   Thomas Gleixner   rtmutex: Make wai...
1066
  	raw_spin_unlock(&owner->pi_lock);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1067

1ca7b8606   Thomas Gleixner   rtmutex: Simplify...
1068
1069
1070
1071
  	/*
  	 * Don't walk the chain, if the owner task is not blocked
  	 * itself.
  	 */
820849843   Thomas Gleixner   rtmutex: Detect c...
1072
  	if (!next_lock)
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1073
  		return;
db630637b   Steven Rostedt   [PATCH] clean up ...
1074
1075
  	/* gets dropped in rt_mutex_adjust_prio_chain()! */
  	get_task_struct(owner);
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
1076
  	raw_spin_unlock_irq(&lock->wait_lock);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1077

8930ed80f   Thomas Gleixner   rtmutex: Cleanup ...
1078
1079
  	rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
  				   next_lock, NULL, current);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1080

b4abf9104   Thomas Gleixner   rtmutex: Make wai...
1081
  	raw_spin_lock_irq(&lock->wait_lock);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1082
1083
1084
  }
  
  /*
95e02ca9b   Thomas Gleixner   [PATCH] rtmutex: ...
1085
1086
1087
1088
1089
1090
1091
   * Recheck the pi chain, in case we got a priority setting
   *
   * Called from sched_setscheduler
   */
  void rt_mutex_adjust_pi(struct task_struct *task)
  {
  	struct rt_mutex_waiter *waiter;
820849843   Thomas Gleixner   rtmutex: Detect c...
1092
  	struct rt_mutex *next_lock;
95e02ca9b   Thomas Gleixner   [PATCH] rtmutex: ...
1093
  	unsigned long flags;
1d6154825   Thomas Gleixner   sched: Convert pi...
1094
  	raw_spin_lock_irqsave(&task->pi_lock, flags);
95e02ca9b   Thomas Gleixner   [PATCH] rtmutex: ...
1095
1096
  
  	waiter = task->pi_blocked_on;
19830e552   Peter Zijlstra   rtmutex: Fix more...
1097
  	if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
1d6154825   Thomas Gleixner   sched: Convert pi...
1098
  		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
95e02ca9b   Thomas Gleixner   [PATCH] rtmutex: ...
1099
1100
  		return;
  	}
820849843   Thomas Gleixner   rtmutex: Detect c...
1101
  	next_lock = waiter->lock;
1d6154825   Thomas Gleixner   sched: Convert pi...
1102
  	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
95e02ca9b   Thomas Gleixner   [PATCH] rtmutex: ...
1103

db630637b   Steven Rostedt   [PATCH] clean up ...
1104
1105
  	/* gets dropped in rt_mutex_adjust_prio_chain()! */
  	get_task_struct(task);
820849843   Thomas Gleixner   rtmutex: Detect c...
1106

8930ed80f   Thomas Gleixner   rtmutex: Cleanup ...
1107
1108
  	rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
  				   next_lock, NULL, task);
95e02ca9b   Thomas Gleixner   [PATCH] rtmutex: ...
1109
  }
50809358d   Peter Zijlstra   futex,rt_mutex: I...
1110
1111
1112
1113
1114
1115
1116
  void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
  {
  	debug_rt_mutex_init_waiter(waiter);
  	RB_CLEAR_NODE(&waiter->pi_tree_entry);
  	RB_CLEAR_NODE(&waiter->tree_entry);
  	waiter->task = NULL;
  }
8dac456a6   Darren Hart   rt_mutex: add pro...
1117
1118
1119
1120
  /**
   * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
   * @lock:		 the rt_mutex to take
   * @state:		 the state the task should block in (TASK_INTERRUPTIBLE
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
1121
   *			 or TASK_UNINTERRUPTIBLE)
8dac456a6   Darren Hart   rt_mutex: add pro...
1122
1123
   * @timeout:		 the pre-initialized and started timer, or NULL for none
   * @waiter:		 the pre-initialized rt_mutex_waiter
8dac456a6   Darren Hart   rt_mutex: add pro...
1124
   *
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
1125
   * Must be called with lock->wait_lock held and interrupts disabled
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1126
1127
   */
  static int __sched
8dac456a6   Darren Hart   rt_mutex: add pro...
1128
1129
  __rt_mutex_slowlock(struct rt_mutex *lock, int state,
  		    struct hrtimer_sleeper *timeout,
8161239a8   Lai Jiangshan   rtmutex: Simplify...
1130
  		    struct rt_mutex_waiter *waiter)
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1131
  {
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1132
  	int ret = 0;
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1133
1134
  	for (;;) {
  		/* Try to acquire the lock: */
8161239a8   Lai Jiangshan   rtmutex: Simplify...
1135
  		if (try_to_take_rt_mutex(lock, current, waiter))
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1136
1137
1138
1139
1140
1141
  			break;
  
  		/*
  		 * TASK_INTERRUPTIBLE checks for signals and
  		 * timeout. Ignored otherwise.
  		 */
4009f4b3a   Steven Rostedt (VMware)   locking/rtmutex: ...
1142
  		if (likely(state == TASK_INTERRUPTIBLE)) {
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1143
1144
1145
1146
1147
1148
1149
1150
  			/* Signal pending? */
  			if (signal_pending(current))
  				ret = -EINTR;
  			if (timeout && !timeout->task)
  				ret = -ETIMEDOUT;
  			if (ret)
  				break;
  		}
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
1151
  		raw_spin_unlock_irq(&lock->wait_lock);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1152

8dac456a6   Darren Hart   rt_mutex: add pro...
1153
  		debug_rt_mutex_print_deadlock(waiter);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1154

1b0b7c176   Davidlohr Bueso   rtmutex: Delete s...
1155
  		schedule();
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1156

b4abf9104   Thomas Gleixner   rtmutex: Make wai...
1157
  		raw_spin_lock_irq(&lock->wait_lock);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1158
1159
  		set_current_state(state);
  	}
afffc6c18   Davidlohr Bueso   locking/rtmutex: ...
1160
  	__set_current_state(TASK_RUNNING);
8dac456a6   Darren Hart   rt_mutex: add pro...
1161
1162
  	return ret;
  }
3d5c9340d   Thomas Gleixner   rtmutex: Handle d...
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
  static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
  				     struct rt_mutex_waiter *w)
  {
  	/*
  	 * If the result is not -EDEADLOCK or the caller requested
  	 * deadlock detection, nothing to do here.
  	 */
  	if (res != -EDEADLOCK || detect_deadlock)
  		return;
  
  	/*
  	 * Yell lowdly and stop the task right here.
  	 */
  	rt_mutex_print_deadlock(w);
  	while (1) {
  		set_current_state(TASK_INTERRUPTIBLE);
  		schedule();
  	}
  }
8dac456a6   Darren Hart   rt_mutex: add pro...
1182
1183
1184
1185
1186
1187
  /*
   * Slow path lock function:
   */
  static int __sched
  rt_mutex_slowlock(struct rt_mutex *lock, int state,
  		  struct hrtimer_sleeper *timeout,
8930ed80f   Thomas Gleixner   rtmutex: Cleanup ...
1188
  		  enum rtmutex_chainwalk chwalk)
8dac456a6   Darren Hart   rt_mutex: add pro...
1189
1190
  {
  	struct rt_mutex_waiter waiter;
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
1191
  	unsigned long flags;
8dac456a6   Darren Hart   rt_mutex: add pro...
1192
  	int ret = 0;
50809358d   Peter Zijlstra   futex,rt_mutex: I...
1193
  	rt_mutex_init_waiter(&waiter);
8dac456a6   Darren Hart   rt_mutex: add pro...
1194

b4abf9104   Thomas Gleixner   rtmutex: Make wai...
1195
1196
1197
1198
1199
1200
1201
1202
1203
  	/*
  	 * Technically we could use raw_spin_[un]lock_irq() here, but this can
  	 * be called in early boot if the cmpxchg() fast path is disabled
  	 * (debug, no architecture support). In this case we will acquire the
  	 * rtmutex with lock->wait_lock held. But we cannot unconditionally
  	 * enable interrupts in that early boot case. So we need to use the
  	 * irqsave/restore variants.
  	 */
  	raw_spin_lock_irqsave(&lock->wait_lock, flags);
8dac456a6   Darren Hart   rt_mutex: add pro...
1204
1205
  
  	/* Try to acquire the lock again: */
8161239a8   Lai Jiangshan   rtmutex: Simplify...
1206
  	if (try_to_take_rt_mutex(lock, current, NULL)) {
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
1207
  		raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
8dac456a6   Darren Hart   rt_mutex: add pro...
1208
1209
1210
1211
1212
1213
  		return 0;
  	}
  
  	set_current_state(state);
  
  	/* Setup the timer, when timeout != NULL */
ccdd92c17   Thomas Gleixner   rtmutex: Remove b...
1214
  	if (unlikely(timeout))
8dac456a6   Darren Hart   rt_mutex: add pro...
1215
  		hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
8dac456a6   Darren Hart   rt_mutex: add pro...
1216

8930ed80f   Thomas Gleixner   rtmutex: Cleanup ...
1217
  	ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
8161239a8   Lai Jiangshan   rtmutex: Simplify...
1218
1219
  
  	if (likely(!ret))
afffc6c18   Davidlohr Bueso   locking/rtmutex: ...
1220
  		/* sleep on the mutex */
8161239a8   Lai Jiangshan   rtmutex: Simplify...
1221
  		ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
8dac456a6   Darren Hart   rt_mutex: add pro...
1222

3d5c9340d   Thomas Gleixner   rtmutex: Handle d...
1223
  	if (unlikely(ret)) {
9d3e2d02f   Sebastian Andrzej Siewior   locking/rtmutex: ...
1224
  		__set_current_state(TASK_RUNNING);
8d1e5a1a1   Sebastian Andrzej Siewior   locking/rtmutex: ...
1225
1226
  		if (rt_mutex_has_waiters(lock))
  			remove_waiter(lock, &waiter);
8930ed80f   Thomas Gleixner   rtmutex: Cleanup ...
1227
  		rt_mutex_handle_deadlock(ret, chwalk, &waiter);
3d5c9340d   Thomas Gleixner   rtmutex: Handle d...
1228
  	}
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1229
1230
1231
1232
1233
1234
  
  	/*
  	 * try_to_take_rt_mutex() sets the waiter bit
  	 * unconditionally. We might have to fix that up.
  	 */
  	fixup_rt_mutex_waiters(lock);
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
1235
  	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1236
1237
1238
1239
  
  	/* Remove pending timer: */
  	if (unlikely(timeout))
  		hrtimer_cancel(&timeout->timer);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1240
1241
1242
1243
  	debug_rt_mutex_free_waiter(&waiter);
  
  	return ret;
  }
1352130fe   Peter Zijlstra   futex: Avoid viol...
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
  static inline int __rt_mutex_slowtrylock(struct rt_mutex *lock)
  {
  	int ret = try_to_take_rt_mutex(lock, current, NULL);
  
  	/*
  	 * try_to_take_rt_mutex() sets the lock waiters bit
  	 * unconditionally. Clean this up.
  	 */
  	fixup_rt_mutex_waiters(lock);
  
  	return ret;
  }
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1256
1257
1258
  /*
   * Slow path try-lock function:
   */
88f2b4c15   Thomas Gleixner   rtmutex: Simplify...
1259
  static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1260
  {
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
1261
  	unsigned long flags;
88f2b4c15   Thomas Gleixner   rtmutex: Simplify...
1262
1263
1264
1265
1266
1267
1268
1269
1270
  	int ret;
  
  	/*
  	 * If the lock already has an owner we fail to get the lock.
  	 * This can be done without taking the @lock->wait_lock as
  	 * it is only being read, and this is a trylock anyway.
  	 */
  	if (rt_mutex_owner(lock))
  		return 0;
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1271

88f2b4c15   Thomas Gleixner   rtmutex: Simplify...
1272
  	/*
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
1273
1274
  	 * The mutex has currently no owner. Lock the wait lock and try to
  	 * acquire the lock. We use irqsave here to support early boot calls.
88f2b4c15   Thomas Gleixner   rtmutex: Simplify...
1275
  	 */
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
1276
  	raw_spin_lock_irqsave(&lock->wait_lock, flags);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1277

1352130fe   Peter Zijlstra   futex: Avoid viol...
1278
  	ret = __rt_mutex_slowtrylock(lock);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1279

b4abf9104   Thomas Gleixner   rtmutex: Make wai...
1280
  	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1281
1282
1283
1284
1285
  
  	return ret;
  }
  
  /*
802ab58da   Sebastian Andrzej Siewior   futex: Lower the ...
1286
   * Slow path to release a rt-mutex.
aa2bfe553   Peter Zijlstra   rtmutex: Clean up
1287
1288
   *
   * Return whether the current task needs to call rt_mutex_postunlock().
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1289
   */
802ab58da   Sebastian Andrzej Siewior   futex: Lower the ...
1290
1291
  static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
  					struct wake_q_head *wake_q)
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1292
  {
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
1293
1294
1295
1296
  	unsigned long flags;
  
  	/* irqsave required to support early boot calls */
  	raw_spin_lock_irqsave(&lock->wait_lock, flags);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1297
1298
  
  	debug_rt_mutex_unlock(lock);
27e35715d   Thomas Gleixner   rtmutex: Plug slo...
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
  	/*
  	 * We must be careful here if the fast path is enabled. If we
  	 * have no waiters queued we cannot set owner to NULL here
  	 * because of:
  	 *
  	 * foo->lock->owner = NULL;
  	 *			rtmutex_lock(foo->lock);   <- fast path
  	 *			free = atomic_dec_and_test(foo->refcnt);
  	 *			rtmutex_unlock(foo->lock); <- fast path
  	 *			if (free)
  	 *				kfree(foo);
  	 * raw_spin_unlock(foo->lock->wait_lock);
  	 *
  	 * So for the fastpath enabled kernel:
  	 *
  	 * Nothing can set the waiters bit as long as we hold
  	 * lock->wait_lock. So we do the following sequence:
  	 *
  	 *	owner = rt_mutex_owner(lock);
  	 *	clear_rt_mutex_waiters(lock);
  	 *	raw_spin_unlock(&lock->wait_lock);
  	 *	if (cmpxchg(&lock->owner, owner, 0) == owner)
  	 *		return;
  	 *	goto retry;
  	 *
  	 * The fastpath disabled variant is simple as all access to
  	 * lock->owner is serialized by lock->wait_lock:
  	 *
  	 *	lock->owner = NULL;
  	 *	raw_spin_unlock(&lock->wait_lock);
  	 */
  	while (!rt_mutex_has_waiters(lock)) {
  		/* Drops lock->wait_lock ! */
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
1332
  		if (unlock_rt_mutex_safe(lock, flags) == true)
802ab58da   Sebastian Andrzej Siewior   futex: Lower the ...
1333
  			return false;
27e35715d   Thomas Gleixner   rtmutex: Plug slo...
1334
  		/* Relock the rtmutex and try again */
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
1335
  		raw_spin_lock_irqsave(&lock->wait_lock, flags);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1336
  	}
27e35715d   Thomas Gleixner   rtmutex: Plug slo...
1337
1338
1339
  	/*
  	 * The wakeup next waiter path does not suffer from the above
  	 * race. See the comments there.
45ab4effc   Davidlohr Bueso   locking/rtmutex: ...
1340
1341
  	 *
  	 * Queue the next waiter for wakeup once we release the wait_lock.
27e35715d   Thomas Gleixner   rtmutex: Plug slo...
1342
  	 */
802ab58da   Sebastian Andrzej Siewior   futex: Lower the ...
1343
  	mark_wakeup_next_waiter(wake_q, lock);
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
1344
  	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1345

aa2bfe553   Peter Zijlstra   rtmutex: Clean up
1346
  	return true; /* call rt_mutex_postunlock() */
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
  }
  
  /*
   * debug aware fast / slowpath lock,trylock,unlock
   *
   * The atomic acquire/release ops are compiled away, when either the
   * architecture does not support cmpxchg or when debugging is enabled.
   */
  static inline int
  rt_mutex_fastlock(struct rt_mutex *lock, int state,
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1357
1358
  		  int (*slowfn)(struct rt_mutex *lock, int state,
  				struct hrtimer_sleeper *timeout,
8930ed80f   Thomas Gleixner   rtmutex: Cleanup ...
1359
  				enum rtmutex_chainwalk chwalk))
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1360
  {
fffa954fb   Peter Zijlstra   futex: Remove rt_...
1361
  	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1362
  		return 0;
fffa954fb   Peter Zijlstra   futex: Remove rt_...
1363
1364
  
  	return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1365
1366
1367
1368
  }
  
  static inline int
  rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
8930ed80f   Thomas Gleixner   rtmutex: Cleanup ...
1369
1370
  			struct hrtimer_sleeper *timeout,
  			enum rtmutex_chainwalk chwalk,
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1371
1372
  			int (*slowfn)(struct rt_mutex *lock, int state,
  				      struct hrtimer_sleeper *timeout,
8930ed80f   Thomas Gleixner   rtmutex: Cleanup ...
1373
  				      enum rtmutex_chainwalk chwalk))
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1374
  {
8930ed80f   Thomas Gleixner   rtmutex: Cleanup ...
1375
  	if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
fffa954fb   Peter Zijlstra   futex: Remove rt_...
1376
  	    likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1377
  		return 0;
fffa954fb   Peter Zijlstra   futex: Remove rt_...
1378
1379
  
  	return slowfn(lock, state, timeout, chwalk);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1380
1381
1382
1383
  }
  
  static inline int
  rt_mutex_fasttrylock(struct rt_mutex *lock,
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
1384
  		     int (*slowfn)(struct rt_mutex *lock))
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1385
  {
fffa954fb   Peter Zijlstra   futex: Remove rt_...
1386
  	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1387
  		return 1;
fffa954fb   Peter Zijlstra   futex: Remove rt_...
1388

9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
1389
  	return slowfn(lock);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1390
  }
2a1c60299   Xunlei Pang   rtmutex: Deboost ...
1391
  /*
aa2bfe553   Peter Zijlstra   rtmutex: Clean up
1392
   * Performs the wakeup of the the top-waiter and re-enables preemption.
2a1c60299   Xunlei Pang   rtmutex: Deboost ...
1393
   */
aa2bfe553   Peter Zijlstra   rtmutex: Clean up
1394
  void rt_mutex_postunlock(struct wake_q_head *wake_q)
2a1c60299   Xunlei Pang   rtmutex: Deboost ...
1395
1396
1397
1398
  {
  	wake_up_q(wake_q);
  
  	/* Pairs with preempt_disable() in rt_mutex_slowunlock() */
aa2bfe553   Peter Zijlstra   rtmutex: Clean up
1399
  	preempt_enable();
2a1c60299   Xunlei Pang   rtmutex: Deboost ...
1400
  }
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1401
1402
  static inline void
  rt_mutex_fastunlock(struct rt_mutex *lock,
802ab58da   Sebastian Andrzej Siewior   futex: Lower the ...
1403
1404
  		    bool (*slowfn)(struct rt_mutex *lock,
  				   struct wake_q_head *wqh))
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1405
  {
194a6b5b9   Waiman Long   sched/wake_q: Ren...
1406
  	DEFINE_WAKE_Q(wake_q);
802ab58da   Sebastian Andrzej Siewior   futex: Lower the ...
1407

fffa954fb   Peter Zijlstra   futex: Remove rt_...
1408
1409
  	if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
  		return;
802ab58da   Sebastian Andrzej Siewior   futex: Lower the ...
1410

aa2bfe553   Peter Zijlstra   rtmutex: Clean up
1411
1412
  	if (slowfn(lock, &wake_q))
  		rt_mutex_postunlock(&wake_q);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1413
  }
b3da5df23   Peter Rosin   locking/rtmutex: ...
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
  static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
  {
  	might_sleep();
  
  	mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
  	rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
  }
  
  #ifdef CONFIG_DEBUG_LOCK_ALLOC
  /**
   * rt_mutex_lock_nested - lock a rt_mutex
   *
   * @lock: the rt_mutex to be locked
   * @subclass: the lockdep subclass
   */
  void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
  {
  	__rt_mutex_lock(lock, subclass);
  }
  EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
  #endif
  
  #ifndef CONFIG_DEBUG_LOCK_ALLOC
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1437
1438
1439
1440
1441
1442
1443
  /**
   * rt_mutex_lock - lock a rt_mutex
   *
   * @lock: the rt_mutex to be locked
   */
  void __sched rt_mutex_lock(struct rt_mutex *lock)
  {
b3da5df23   Peter Rosin   locking/rtmutex: ...
1444
  	__rt_mutex_lock(lock, 0);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1445
1446
  }
  EXPORT_SYMBOL_GPL(rt_mutex_lock);
b3da5df23   Peter Rosin   locking/rtmutex: ...
1447
  #endif
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1448
1449
1450
1451
  
  /**
   * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
   *
c051b21f7   Thomas Gleixner   rtmutex: Confine ...
1452
   * @lock:		the rt_mutex to be locked
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1453
1454
   *
   * Returns:
c051b21f7   Thomas Gleixner   rtmutex: Confine ...
1455
1456
   *  0		on success
   * -EINTR	when interrupted by a signal
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1457
   */
c051b21f7   Thomas Gleixner   rtmutex: Confine ...
1458
  int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1459
  {
f5694788a   Peter Zijlstra   rt_mutex: Add loc...
1460
  	int ret;
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1461
  	might_sleep();
f5694788a   Peter Zijlstra   rt_mutex: Add loc...
1462
1463
1464
1465
1466
1467
  	mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  	ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
  	if (ret)
  		mutex_release(&lock->dep_map, 1, _RET_IP_);
  
  	return ret;
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1468
1469
  }
  EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
c051b21f7   Thomas Gleixner   rtmutex: Confine ...
1470
  /*
5293c2efd   Peter Zijlstra   futex,rt_mutex: P...
1471
1472
1473
1474
1475
   * Futex variant, must not use fastpath.
   */
  int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
  {
  	return rt_mutex_slowtrylock(lock);
c051b21f7   Thomas Gleixner   rtmutex: Confine ...
1476
  }
1352130fe   Peter Zijlstra   futex: Avoid viol...
1477
1478
1479
1480
  int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
  {
  	return __rt_mutex_slowtrylock(lock);
  }
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1481
  /**
23b94b967   Luis Henriques   locking, rtmutex....
1482
1483
1484
   * rt_mutex_timed_lock - lock a rt_mutex interruptible
   *			the timeout structure is provided
   *			by the caller
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1485
   *
c051b21f7   Thomas Gleixner   rtmutex: Confine ...
1486
   * @lock:		the rt_mutex to be locked
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1487
   * @timeout:		timeout structure or NULL (no timeout)
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1488
1489
   *
   * Returns:
c051b21f7   Thomas Gleixner   rtmutex: Confine ...
1490
1491
   *  0		on success
   * -EINTR	when interrupted by a signal
3ac49a1c9   Jean Delvare   trivial: fix ETIM...
1492
   * -ETIMEDOUT	when the timeout expired
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1493
1494
   */
  int
c051b21f7   Thomas Gleixner   rtmutex: Confine ...
1495
  rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1496
  {
f5694788a   Peter Zijlstra   rt_mutex: Add loc...
1497
  	int ret;
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1498
  	might_sleep();
f5694788a   Peter Zijlstra   rt_mutex: Add loc...
1499
1500
  	mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  	ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
8930ed80f   Thomas Gleixner   rtmutex: Cleanup ...
1501
  				       RT_MUTEX_MIN_CHAINWALK,
c051b21f7   Thomas Gleixner   rtmutex: Confine ...
1502
  				       rt_mutex_slowlock);
f5694788a   Peter Zijlstra   rt_mutex: Add loc...
1503
1504
1505
1506
  	if (ret)
  		mutex_release(&lock->dep_map, 1, _RET_IP_);
  
  	return ret;
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1507
1508
1509
1510
1511
1512
1513
1514
  }
  EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
  
  /**
   * rt_mutex_trylock - try to lock a rt_mutex
   *
   * @lock:	the rt_mutex to be locked
   *
6ce47fd96   Thomas Gleixner   rtmutex: Warn if ...
1515
1516
1517
1518
   * This function can only be called in thread context. It's safe to
   * call it from atomic regions, but not from hard interrupt or soft
   * interrupt context.
   *
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1519
1520
1521
1522
   * Returns 1 on success and 0 on contention
   */
  int __sched rt_mutex_trylock(struct rt_mutex *lock)
  {
f5694788a   Peter Zijlstra   rt_mutex: Add loc...
1523
  	int ret;
a461d5879   Sebastian Andrzej Siewior   locking/rtmutex: ...
1524
  	if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
6ce47fd96   Thomas Gleixner   rtmutex: Warn if ...
1525
  		return 0;
f5694788a   Peter Zijlstra   rt_mutex: Add loc...
1526
1527
1528
1529
1530
  	ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
  	if (ret)
  		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  
  	return ret;
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
  }
  EXPORT_SYMBOL_GPL(rt_mutex_trylock);
  
  /**
   * rt_mutex_unlock - unlock a rt_mutex
   *
   * @lock: the rt_mutex to be unlocked
   */
  void __sched rt_mutex_unlock(struct rt_mutex *lock)
  {
f5694788a   Peter Zijlstra   rt_mutex: Add loc...
1541
  	mutex_release(&lock->dep_map, 1, _RET_IP_);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1542
1543
1544
  	rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
  }
  EXPORT_SYMBOL_GPL(rt_mutex_unlock);
23b94b967   Luis Henriques   locking, rtmutex....
1545
  /**
5293c2efd   Peter Zijlstra   futex,rt_mutex: P...
1546
1547
   * Futex variant, that since futex variants do not use the fast-path, can be
   * simple and will not need to retry.
802ab58da   Sebastian Andrzej Siewior   futex: Lower the ...
1548
   */
5293c2efd   Peter Zijlstra   futex,rt_mutex: P...
1549
1550
  bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
  				    struct wake_q_head *wake_q)
802ab58da   Sebastian Andrzej Siewior   futex: Lower the ...
1551
  {
5293c2efd   Peter Zijlstra   futex,rt_mutex: P...
1552
1553
1554
1555
1556
1557
1558
1559
  	lockdep_assert_held(&lock->wait_lock);
  
  	debug_rt_mutex_unlock(lock);
  
  	if (!rt_mutex_has_waiters(lock)) {
  		lock->owner = NULL;
  		return false; /* done */
  	}
2a1c60299   Xunlei Pang   rtmutex: Deboost ...
1560
  	/*
def34eaae   Mike Galbraith   rtmutex: Plug pre...
1561
1562
1563
1564
  	 * We've already deboosted, mark_wakeup_next_waiter() will
  	 * retain preempt_disabled when we drop the wait_lock, to
  	 * avoid inversion prior to the wakeup.  preempt_disable()
  	 * therein pairs with rt_mutex_postunlock().
2a1c60299   Xunlei Pang   rtmutex: Deboost ...
1565
  	 */
def34eaae   Mike Galbraith   rtmutex: Plug pre...
1566
  	mark_wakeup_next_waiter(wake_q, lock);
2a1c60299   Xunlei Pang   rtmutex: Deboost ...
1567

aa2bfe553   Peter Zijlstra   rtmutex: Clean up
1568
  	return true; /* call postunlock() */
5293c2efd   Peter Zijlstra   futex,rt_mutex: P...
1569
  }
fffa954fb   Peter Zijlstra   futex: Remove rt_...
1570

5293c2efd   Peter Zijlstra   futex,rt_mutex: P...
1571
1572
1573
  void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
  {
  	DEFINE_WAKE_Q(wake_q);
aa2bfe553   Peter Zijlstra   rtmutex: Clean up
1574
  	bool postunlock;
5293c2efd   Peter Zijlstra   futex,rt_mutex: P...
1575
1576
  
  	raw_spin_lock_irq(&lock->wait_lock);
aa2bfe553   Peter Zijlstra   rtmutex: Clean up
1577
  	postunlock = __rt_mutex_futex_unlock(lock, &wake_q);
5293c2efd   Peter Zijlstra   futex,rt_mutex: P...
1578
  	raw_spin_unlock_irq(&lock->wait_lock);
aa2bfe553   Peter Zijlstra   rtmutex: Clean up
1579
1580
  	if (postunlock)
  		rt_mutex_postunlock(&wake_q);
802ab58da   Sebastian Andrzej Siewior   futex: Lower the ...
1581
1582
1583
  }
  
  /**
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
   * rt_mutex_destroy - mark a mutex unusable
   * @lock: the mutex to be destroyed
   *
   * This function marks the mutex uninitialized, and any subsequent
   * use of the mutex is forbidden. The mutex must not be locked when
   * this function is called.
   */
  void rt_mutex_destroy(struct rt_mutex *lock)
  {
  	WARN_ON(rt_mutex_is_locked(lock));
  #ifdef CONFIG_DEBUG_RT_MUTEXES
  	lock->magic = NULL;
  #endif
  }
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
  EXPORT_SYMBOL_GPL(rt_mutex_destroy);
  
  /**
   * __rt_mutex_init - initialize the rt lock
   *
   * @lock: the rt lock to be initialized
   *
   * Initialize the rt lock to unlocked state.
   *
   * Initializing of a locked rt lock is not allowed
   */
f5694788a   Peter Zijlstra   rt_mutex: Add loc...
1609
1610
  void __rt_mutex_init(struct rt_mutex *lock, const char *name,
  		     struct lock_class_key *key)
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1611
1612
  {
  	lock->owner = NULL;
d209d74d5   Thomas Gleixner   rtmutes: Convert ...
1613
  	raw_spin_lock_init(&lock->wait_lock);
a23ba907d   Davidlohr Bueso   locking/rtmutex: ...
1614
  	lock->waiters = RB_ROOT_CACHED;
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1615

cde50a673   Levin, Alexander (Sasha Levin)   locking/rtmutex: ...
1616
1617
  	if (name && key)
  		debug_rt_mutex_init(lock, name, key);
23f78d4a0   Ingo Molnar   [PATCH] pi-futex:...
1618
1619
  }
  EXPORT_SYMBOL_GPL(__rt_mutex_init);
0cdbee992   Ingo Molnar   [PATCH] pi-futex:...
1620
1621
1622
1623
1624
  
  /**
   * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
   *				proxy owner
   *
84d82ec5b   Thomas Gleixner   locking/rtmutex: ...
1625
   * @lock:	the rt_mutex to be locked
0cdbee992   Ingo Molnar   [PATCH] pi-futex:...
1626
1627
1628
   * @proxy_owner:the task to set as owner
   *
   * No locking. Caller has to do serializing itself
84d82ec5b   Thomas Gleixner   locking/rtmutex: ...
1629
1630
1631
1632
1633
   *
   * Special API call for PI-futex support. This initializes the rtmutex and
   * assigns it to @proxy_owner. Concurrent operations on the rtmutex are not
   * possible at this point because the pi_state which contains the rtmutex
   * is not yet visible to other tasks.
0cdbee992   Ingo Molnar   [PATCH] pi-futex:...
1634
1635
1636
1637
   */
  void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
  				struct task_struct *proxy_owner)
  {
f5694788a   Peter Zijlstra   rt_mutex: Add loc...
1638
  	__rt_mutex_init(lock, NULL, NULL);
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
1639
  	debug_rt_mutex_proxy_lock(lock, proxy_owner);
8161239a8   Lai Jiangshan   rtmutex: Simplify...
1640
  	rt_mutex_set_owner(lock, proxy_owner);
0cdbee992   Ingo Molnar   [PATCH] pi-futex:...
1641
1642
1643
1644
1645
  }
  
  /**
   * rt_mutex_proxy_unlock - release a lock on behalf of owner
   *
84d82ec5b   Thomas Gleixner   locking/rtmutex: ...
1646
   * @lock:	the rt_mutex to be locked
0cdbee992   Ingo Molnar   [PATCH] pi-futex:...
1647
1648
   *
   * No locking. Caller has to do serializing itself
84d82ec5b   Thomas Gleixner   locking/rtmutex: ...
1649
1650
1651
1652
1653
   *
   * Special API call for PI-futex support. This merrily cleans up the rtmutex
   * (debugging) state. Concurrent operations on this rt_mutex are not
   * possible because it belongs to the pi_state which is about to be freed
   * and it is not longer visible to other tasks.
0cdbee992   Ingo Molnar   [PATCH] pi-futex:...
1654
1655
1656
1657
1658
   */
  void rt_mutex_proxy_unlock(struct rt_mutex *lock,
  			   struct task_struct *proxy_owner)
  {
  	debug_rt_mutex_proxy_unlock(lock);
8161239a8   Lai Jiangshan   rtmutex: Simplify...
1659
  	rt_mutex_set_owner(lock, NULL);
0cdbee992   Ingo Molnar   [PATCH] pi-futex:...
1660
  }
56222b212   Peter Zijlstra   futex: Drop hb->l...
1661
  int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
8dac456a6   Darren Hart   rt_mutex: add pro...
1662
  			      struct rt_mutex_waiter *waiter,
c051b21f7   Thomas Gleixner   rtmutex: Confine ...
1663
  			      struct task_struct *task)
8dac456a6   Darren Hart   rt_mutex: add pro...
1664
1665
  {
  	int ret;
56222b212   Peter Zijlstra   futex: Drop hb->l...
1666
  	if (try_to_take_rt_mutex(lock, task, NULL))
8dac456a6   Darren Hart   rt_mutex: add pro...
1667
  		return 1;
8dac456a6   Darren Hart   rt_mutex: add pro...
1668

3d5c9340d   Thomas Gleixner   rtmutex: Handle d...
1669
  	/* We enforce deadlock detection for futexes */
8930ed80f   Thomas Gleixner   rtmutex: Cleanup ...
1670
1671
  	ret = task_blocks_on_rt_mutex(lock, waiter, task,
  				      RT_MUTEX_FULL_CHAINWALK);
8dac456a6   Darren Hart   rt_mutex: add pro...
1672

8161239a8   Lai Jiangshan   rtmutex: Simplify...
1673
  	if (ret && !rt_mutex_owner(lock)) {
8dac456a6   Darren Hart   rt_mutex: add pro...
1674
1675
1676
1677
1678
1679
1680
1681
  		/*
  		 * Reset the return value. We might have
  		 * returned with -EDEADLK and the owner
  		 * released the lock while we were walking the
  		 * pi chain.  Let the waiter sort it out.
  		 */
  		ret = 0;
  	}
8161239a8   Lai Jiangshan   rtmutex: Simplify...
1682
1683
1684
  
  	if (unlikely(ret))
  		remove_waiter(lock, waiter);
8dac456a6   Darren Hart   rt_mutex: add pro...
1685
1686
1687
1688
1689
1690
  	debug_rt_mutex_print_deadlock(waiter);
  
  	return ret;
  }
  
  /**
56222b212   Peter Zijlstra   futex: Drop hb->l...
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
   * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
   * @lock:		the rt_mutex to take
   * @waiter:		the pre-initialized rt_mutex_waiter
   * @task:		the task to prepare
   *
   * Returns:
   *  0 - task blocked on lock
   *  1 - acquired the lock for task, caller should wake it up
   * <0 - error
   *
   * Special API call for FUTEX_REQUEUE_PI support.
   */
  int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
  			      struct rt_mutex_waiter *waiter,
  			      struct task_struct *task)
  {
  	int ret;
  
  	raw_spin_lock_irq(&lock->wait_lock);
  	ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
  	raw_spin_unlock_irq(&lock->wait_lock);
  
  	return ret;
  }
  
  /**
0cdbee992   Ingo Molnar   [PATCH] pi-futex:...
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
   * rt_mutex_next_owner - return the next owner of the lock
   *
   * @lock: the rt lock query
   *
   * Returns the next owner of the lock or NULL
   *
   * Caller has to serialize against other accessors to the lock
   * itself.
   *
   * Special API call for PI-futex support
   */
  struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
  {
  	if (!rt_mutex_has_waiters(lock))
  		return NULL;
  
  	return rt_mutex_top_waiter(lock)->task;
  }
8dac456a6   Darren Hart   rt_mutex: add pro...
1735
1736
  
  /**
38d589f2f   Peter Zijlstra   futex,rt_mutex: R...
1737
   * rt_mutex_wait_proxy_lock() - Wait for lock acquisition
8dac456a6   Darren Hart   rt_mutex: add pro...
1738
1739
   * @lock:		the rt_mutex we were woken on
   * @to:			the timeout, null if none. hrtimer should already have
c051b21f7   Thomas Gleixner   rtmutex: Confine ...
1740
   *			been started.
8dac456a6   Darren Hart   rt_mutex: add pro...
1741
   * @waiter:		the pre-initialized rt_mutex_waiter
8dac456a6   Darren Hart   rt_mutex: add pro...
1742
   *
38d589f2f   Peter Zijlstra   futex,rt_mutex: R...
1743
1744
1745
   * Wait for the the lock acquisition started on our behalf by
   * rt_mutex_start_proxy_lock(). Upon failure, the caller must call
   * rt_mutex_cleanup_proxy_lock().
8dac456a6   Darren Hart   rt_mutex: add pro...
1746
1747
1748
   *
   * Returns:
   *  0 - success
c051b21f7   Thomas Gleixner   rtmutex: Confine ...
1749
   * <0 - error, one of -EINTR, -ETIMEDOUT
8dac456a6   Darren Hart   rt_mutex: add pro...
1750
   *
38d589f2f   Peter Zijlstra   futex,rt_mutex: R...
1751
   * Special API call for PI-futex support
8dac456a6   Darren Hart   rt_mutex: add pro...
1752
   */
38d589f2f   Peter Zijlstra   futex,rt_mutex: R...
1753
  int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
8dac456a6   Darren Hart   rt_mutex: add pro...
1754
  			       struct hrtimer_sleeper *to,
c051b21f7   Thomas Gleixner   rtmutex: Confine ...
1755
  			       struct rt_mutex_waiter *waiter)
8dac456a6   Darren Hart   rt_mutex: add pro...
1756
1757
  {
  	int ret;
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
1758
  	raw_spin_lock_irq(&lock->wait_lock);
afffc6c18   Davidlohr Bueso   locking/rtmutex: ...
1759
  	/* sleep on the mutex */
04dc1b2ff   Peter Zijlstra   futex,rt_mutex: F...
1760
  	set_current_state(TASK_INTERRUPTIBLE);
8161239a8   Lai Jiangshan   rtmutex: Simplify...
1761
  	ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
04dc1b2ff   Peter Zijlstra   futex,rt_mutex: F...
1762
1763
1764
1765
1766
  	/*
  	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
  	 * have to fix that up.
  	 */
  	fixup_rt_mutex_waiters(lock);
b4abf9104   Thomas Gleixner   rtmutex: Make wai...
1767
  	raw_spin_unlock_irq(&lock->wait_lock);
8dac456a6   Darren Hart   rt_mutex: add pro...
1768

8dac456a6   Darren Hart   rt_mutex: add pro...
1769
1770
  	return ret;
  }
38d589f2f   Peter Zijlstra   futex,rt_mutex: R...
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
  
  /**
   * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition
   * @lock:		the rt_mutex we were woken on
   * @waiter:		the pre-initialized rt_mutex_waiter
   *
   * Attempt to clean up after a failed rt_mutex_wait_proxy_lock().
   *
   * Unless we acquired the lock; we're still enqueued on the wait-list and can
   * in fact still be granted ownership until we're removed. Therefore we can
   * find we are in fact the owner and must disregard the
   * rt_mutex_wait_proxy_lock() failure.
   *
   * Returns:
   *  true  - did the cleanup, we done.
   *  false - we acquired the lock after rt_mutex_wait_proxy_lock() returned,
   *          caller should disregards its return value.
   *
   * Special API call for PI-futex support
   */
  bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
  				 struct rt_mutex_waiter *waiter)
  {
  	bool cleanup = false;
  
  	raw_spin_lock_irq(&lock->wait_lock);
  	/*
04dc1b2ff   Peter Zijlstra   futex,rt_mutex: F...
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
  	 * Do an unconditional try-lock, this deals with the lock stealing
  	 * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter()
  	 * sets a NULL owner.
  	 *
  	 * We're not interested in the return value, because the subsequent
  	 * test on rt_mutex_owner() will infer that. If the trylock succeeded,
  	 * we will own the lock and it will have removed the waiter. If we
  	 * failed the trylock, we're still not owner and we need to remove
  	 * ourselves.
  	 */
  	try_to_take_rt_mutex(lock, current, waiter);
  	/*
38d589f2f   Peter Zijlstra   futex,rt_mutex: R...
1810
1811
1812
1813
1814
  	 * Unless we're the owner; we're still enqueued on the wait_list.
  	 * So check if we became owner, if not, take us off the wait_list.
  	 */
  	if (rt_mutex_owner(lock) != current) {
  		remove_waiter(lock, waiter);
38d589f2f   Peter Zijlstra   futex,rt_mutex: R...
1815
1816
  		cleanup = true;
  	}
cfafcd117   Peter Zijlstra   futex: Rework fut...
1817
1818
1819
1820
1821
  	/*
  	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
  	 * have to fix that up.
  	 */
  	fixup_rt_mutex_waiters(lock);
38d589f2f   Peter Zijlstra   futex,rt_mutex: R...
1822
1823
1824
1825
  	raw_spin_unlock_irq(&lock->wait_lock);
  
  	return cleanup;
  }