Blame view
kernel/locking/mutex.c
25.2 KB
6053ee3b3 [PATCH] mutex sub... |
1 |
/* |
67a6de49b locking/doc: Upda... |
2 |
* kernel/locking/mutex.c |
6053ee3b3 [PATCH] mutex sub... |
3 4 5 6 7 8 9 10 11 12 |
* * Mutexes: blocking mutual exclusion locks * * Started by Ingo Molnar: * * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and * David Howells for suggestions and improvements. * |
0d66bf6d3 mutex: implement ... |
13 14 15 16 17 |
* - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline * from the -rt tree, where it was originally implemented for rtmutexes * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale * and Sven Dietrich. * |
214e0aed6 locking/Documenta... |
18 |
* Also see Documentation/locking/mutex-design.txt. |
6053ee3b3 [PATCH] mutex sub... |
19 20 |
*/ #include <linux/mutex.h> |
1b375dc30 mutex: Move ww_mu... |
21 |
#include <linux/ww_mutex.h> |
6053ee3b3 [PATCH] mutex sub... |
22 |
#include <linux/sched.h> |
8bd75c77b sched/rt: Move rt... |
23 |
#include <linux/sched/rt.h> |
9984de1a5 kernel: Map most ... |
24 |
#include <linux/export.h> |
6053ee3b3 [PATCH] mutex sub... |
25 26 |
#include <linux/spinlock.h> #include <linux/interrupt.h> |
9a11b49a8 [PATCH] lockdep: ... |
27 |
#include <linux/debug_locks.h> |
7a215f89a locking/rwsem: Se... |
28 |
#include <linux/osq_lock.h> |
6053ee3b3 [PATCH] mutex sub... |
29 30 31 32 33 34 35 36 |
/* * In the DEBUG case we are using the "NULL fastpath" for mutexes, * which forces all calls into the slowpath: */ #ifdef CONFIG_DEBUG_MUTEXES # include "mutex-debug.h" # include <asm-generic/mutex-null.h> |
6f008e72c locking/mutex: Fi... |
37 38 39 40 41 42 43 |
/* * Must be 0 for the debug case so we do not do the unlock outside of the * wait_lock region. debug_mutex_unlock() will do the actual unlock in this * case. */ # undef __mutex_slowpath_needs_to_unlock # define __mutex_slowpath_needs_to_unlock() 0 |
6053ee3b3 [PATCH] mutex sub... |
44 45 46 47 |
#else # include "mutex.h" # include <asm/mutex.h> #endif |
ef5d4707b [PATCH] lockdep: ... |
48 49 |
void __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) |
6053ee3b3 [PATCH] mutex sub... |
50 51 52 53 |
{ atomic_set(&lock->count, 1); spin_lock_init(&lock->wait_lock); INIT_LIST_HEAD(&lock->wait_list); |
0d66bf6d3 mutex: implement ... |
54 |
mutex_clear_owner(lock); |
2bd2c92cf mutex: Queue mute... |
55 |
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
4d9d951e6 locking/spinlocks... |
56 |
osq_lock_init(&lock->osq); |
2bd2c92cf mutex: Queue mute... |
57 |
#endif |
6053ee3b3 [PATCH] mutex sub... |
58 |
|
ef5d4707b [PATCH] lockdep: ... |
59 |
debug_mutex_init(lock, name, key); |
6053ee3b3 [PATCH] mutex sub... |
60 61 62 |
} EXPORT_SYMBOL(__mutex_init); |
e4564f79d lockdep: fixup mu... |
63 |
#ifndef CONFIG_DEBUG_LOCK_ALLOC |
6053ee3b3 [PATCH] mutex sub... |
64 65 66 67 68 69 |
/* * We split the mutex lock/unlock logic into separate fastpath and * slowpath functions, to reduce the register pressure on the fastpath. * We also put the fastpath first in the kernel image, to make sure the * branch is predicted by the CPU as default-untaken. */ |
22d9fd341 asmlinkage, mutex... |
70 |
__visible void __sched __mutex_lock_slowpath(atomic_t *lock_count); |
6053ee3b3 [PATCH] mutex sub... |
71 |
|
ef5dc121d mutex: Fix annota... |
72 |
/** |
6053ee3b3 [PATCH] mutex sub... |
73 74 75 76 77 78 79 80 81 |
* mutex_lock - acquire the mutex * @lock: the mutex to be acquired * * Lock the mutex exclusively for this task. If the mutex is not * available right now, it will sleep until it can get it. * * The mutex must later on be released by the same task that * acquired it. Recursive locking is not allowed. The task * may not exit without first unlocking the mutex. Also, kernel |
139b6fd26 sched/Documentati... |
82 |
* memory where the mutex resides must not be freed with |
6053ee3b3 [PATCH] mutex sub... |
83 84 85 86 87 88 89 90 91 92 |
* the mutex still locked. The mutex must first be initialized * (or statically defined) before it can be locked. memset()-ing * the mutex to 0 is not allowed. * * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging * checks that will enforce the restrictions and will also do * deadlock debugging. ) * * This function is similar to (but not equivalent to) down(). */ |
b09d2501e mutex: drop "inli... |
93 |
void __sched mutex_lock(struct mutex *lock) |
6053ee3b3 [PATCH] mutex sub... |
94 |
{ |
c544bdb19 [PATCH] mark mute... |
95 |
might_sleep(); |
6053ee3b3 [PATCH] mutex sub... |
96 97 98 |
/* * The locking fastpath is the 1->0 transition from * 'unlocked' into 'locked' state. |
6053ee3b3 [PATCH] mutex sub... |
99 100 |
*/ __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); |
0d66bf6d3 mutex: implement ... |
101 |
mutex_set_owner(lock); |
6053ee3b3 [PATCH] mutex sub... |
102 103 104 |
} EXPORT_SYMBOL(mutex_lock); |
e4564f79d lockdep: fixup mu... |
105 |
#endif |
6053ee3b3 [PATCH] mutex sub... |
106 |
|
76916515d locking/mutexes: ... |
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 |
static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx) { #ifdef CONFIG_DEBUG_MUTEXES /* * If this WARN_ON triggers, you used ww_mutex_lock to acquire, * but released with a normal mutex_unlock in this call. * * This should never happen, always use ww_mutex_unlock. */ DEBUG_LOCKS_WARN_ON(ww->ctx); /* * Not quite done after calling ww_acquire_done() ? */ DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); if (ww_ctx->contending_lock) { /* * After -EDEADLK you tried to * acquire a different ww_mutex? Bad! */ DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); /* * You called ww_mutex_lock after receiving -EDEADLK, * but 'forgot' to unlock everything else first? */ DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); ww_ctx->contending_lock = NULL; } /* * Naughty, using a different class will lead to undefined behavior! */ DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); #endif ww_ctx->acquired++; } /* |
4bd19084f locking/mutex: In... |
148 |
* After acquiring lock with fastpath or when we lost out in contested |
76916515d locking/mutexes: ... |
149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 |
* slowpath, set ctx and wake up any waiters so they can recheck. * * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set, * as the fastpath and opportunistic spinning are disabled in that case. */ static __always_inline void ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { unsigned long flags; struct mutex_waiter *cur; ww_mutex_lock_acquired(lock, ctx); lock->ctx = ctx; /* * The lock->ctx update should be visible on all cores before * the atomic read is done, otherwise contended waiters might be * missed. The contended waiters will either see ww_ctx == NULL * and keep spinning, or it will acquire wait_lock, add itself * to waiter list and sleep. */ smp_mb(); /* ^^^ */ /* * Check if lock is contended, if not there is nobody to wake up */ if (likely(atomic_read(&lock->base.count) == 0)) return; /* * Uh oh, we raced in fastpath, wake up everyone in this case, * so they can see the new lock->ctx. */ spin_lock_mutex(&lock->base.wait_lock, flags); list_for_each_entry(cur, &lock->base.wait_list, list) { debug_mutex_wake_waiter(&lock->base, cur); wake_up_process(cur->task); } spin_unlock_mutex(&lock->base.wait_lock, flags); } |
4bd19084f locking/mutex: In... |
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 |
/* * After acquiring lock in the slowpath set ctx and wake up any * waiters so they can recheck. * * Callers must hold the mutex wait_lock. */ static __always_inline void ww_mutex_set_context_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { struct mutex_waiter *cur; ww_mutex_lock_acquired(lock, ctx); lock->ctx = ctx; /* * Give any possible sleeping processes the chance to wake up, * so they can recheck if they have to back off. */ list_for_each_entry(cur, &lock->base.wait_list, list) { debug_mutex_wake_waiter(&lock->base, cur); wake_up_process(cur->task); } } |
76916515d locking/mutexes: ... |
215 |
|
41fcb9f23 mutex: Move mutex... |
216 |
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
41fcb9f23 mutex: Move mutex... |
217 218 219 220 221 |
/* * Look out! "owner" is an entirely speculative pointer * access and not reliable. */ static noinline |
be1f7bf21 locking/mutex: Re... |
222 |
bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) |
41fcb9f23 mutex: Move mutex... |
223 |
{ |
01ac33c1f locking/mutex: Fu... |
224 |
bool ret = true; |
be1f7bf21 locking/mutex: Re... |
225 |
|
41fcb9f23 mutex: Move mutex... |
226 |
rcu_read_lock(); |
01ac33c1f locking/mutex: Fu... |
227 |
while (lock->owner == owner) { |
be1f7bf21 locking/mutex: Re... |
228 229 |
/* * Ensure we emit the owner->on_cpu, dereference _after_ |
01ac33c1f locking/mutex: Fu... |
230 231 |
* checking lock->owner still matches owner. If that fails, * owner might point to freed memory. If it still matches, |
be1f7bf21 locking/mutex: Re... |
232 233 234 235 236 237 238 239 |
* the rcu_read_lock() ensures the memory stays valid. */ barrier(); if (!owner->on_cpu || need_resched()) { ret = false; break; } |
41fcb9f23 mutex: Move mutex... |
240 |
|
3a6bfbc91 arch, locking: Ci... |
241 |
cpu_relax_lowlatency(); |
41fcb9f23 mutex: Move mutex... |
242 243 |
} rcu_read_unlock(); |
be1f7bf21 locking/mutex: Re... |
244 |
return ret; |
41fcb9f23 mutex: Move mutex... |
245 |
} |
2bd2c92cf mutex: Queue mute... |
246 247 248 249 250 251 |
/* * Initial check for entering the mutex spinning loop */ static inline int mutex_can_spin_on_owner(struct mutex *lock) { |
1e40c2ede mutex: Fix/docume... |
252 |
struct task_struct *owner; |
2bd2c92cf mutex: Queue mute... |
253 |
int retval = 1; |
46af29e47 locking/mutexes: ... |
254 255 |
if (need_resched()) return 0; |
2bd2c92cf mutex: Queue mute... |
256 |
rcu_read_lock(); |
4d3199e4c locking: Remove A... |
257 |
owner = READ_ONCE(lock->owner); |
1e40c2ede mutex: Fix/docume... |
258 259 |
if (owner) retval = owner->on_cpu; |
2bd2c92cf mutex: Queue mute... |
260 261 262 263 264 265 266 |
rcu_read_unlock(); /* * if lock->owner is not set, the mutex owner may have just acquired * it and not set the owner yet or the mutex has been released. */ return retval; } |
76916515d locking/mutexes: ... |
267 268 269 270 271 272 273 |
/* * Atomically try to take the lock when it is available */ static inline bool mutex_try_to_acquire(struct mutex *lock) { return !mutex_is_locked(lock) && |
81a43adae locking/mutex: Us... |
274 |
(atomic_cmpxchg_acquire(&lock->count, 1, 0) == 1); |
76916515d locking/mutexes: ... |
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 |
} /* * Optimistic spinning. * * We try to spin for acquisition when we find that the lock owner * is currently running on a (different) CPU and while we don't * need to reschedule. The rationale is that if the lock owner is * running, it is likely to release the lock soon. * * Since this needs the lock owner, and this mutex implementation * doesn't track the owner atomically in the lock field, we need to * track it non-atomically. * * We can't do this for DEBUG_MUTEXES because that relies on wait_lock * to serialize everything. * * The mutex spinners are queued up using MCS lock so that only one * spinner can compete for the mutex. However, if mutex spinning isn't * going to happen, there is no point in going through the lock/unlock * overhead. * * Returns true when the lock was taken, otherwise false, indicating * that we need to jump to the slowpath and sleep. */ static bool mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) { struct task_struct *task = current; if (!mutex_can_spin_on_owner(lock)) goto done; |
e42f678a0 locking/mutex: Mo... |
307 308 309 310 311 |
/* * In order to avoid a stampede of mutex spinners trying to * acquire the mutex all at once, the spinners need to take a * MCS (queued) lock first before spinning on the owner field. */ |
76916515d locking/mutexes: ... |
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 |
if (!osq_lock(&lock->osq)) goto done; while (true) { struct task_struct *owner; if (use_ww_ctx && ww_ctx->acquired > 0) { struct ww_mutex *ww; ww = container_of(lock, struct ww_mutex, base); /* * If ww->ctx is set the contents are undefined, only * by acquiring wait_lock there is a guarantee that * they are not invalid when reading. * * As such, when deadlock detection needs to be * performed the optimistic spinning cannot be done. */ |
4d3199e4c locking: Remove A... |
330 |
if (READ_ONCE(ww->ctx)) |
76916515d locking/mutexes: ... |
331 332 333 334 335 336 337 |
break; } /* * If there's an owner, wait for it to either * release the lock or go to sleep. */ |
4d3199e4c locking: Remove A... |
338 |
owner = READ_ONCE(lock->owner); |
76916515d locking/mutexes: ... |
339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 |
if (owner && !mutex_spin_on_owner(lock, owner)) break; /* Try to acquire the mutex if it is unlocked. */ if (mutex_try_to_acquire(lock)) { lock_acquired(&lock->dep_map, ip); if (use_ww_ctx) { struct ww_mutex *ww; ww = container_of(lock, struct ww_mutex, base); ww_mutex_set_context_fastpath(ww, ww_ctx); } mutex_set_owner(lock); osq_unlock(&lock->osq); return true; } /* * When there's no owner, we might have preempted between the * owner acquiring the lock and setting the owner field. If * we're an RT task that will live-lock because we won't let * the owner complete. */ if (!owner && (need_resched() || rt_task(task))) break; /* * The cpu_relax() call is a compiler barrier which forces * everything in this loop to be re-loaded. We don't need * memory barriers as we'll eventually observe the right * values at the cost of a few extra spins. */ cpu_relax_lowlatency(); } osq_unlock(&lock->osq); done: /* * If we fell out of the spin path because of need_resched(), * reschedule now, before we try-lock the mutex. This avoids getting * scheduled out right after we obtained the mutex. */ |
6f942a1f2 locking/mutex: Do... |
383 384 385 386 387 388 |
if (need_resched()) { /* * We _should_ have TASK_RUNNING here, but just in case * we do not, make it so, otherwise we might get stuck. */ __set_current_state(TASK_RUNNING); |
76916515d locking/mutexes: ... |
389 |
schedule_preempt_disabled(); |
6f942a1f2 locking/mutex: Do... |
390 |
} |
76916515d locking/mutexes: ... |
391 392 393 394 395 396 397 398 399 |
return false; } #else static bool mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) { return false; } |
41fcb9f23 mutex: Move mutex... |
400 |
#endif |
22d9fd341 asmlinkage, mutex... |
401 402 |
__visible __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); |
6053ee3b3 [PATCH] mutex sub... |
403 |
|
ef5dc121d mutex: Fix annota... |
404 |
/** |
6053ee3b3 [PATCH] mutex sub... |
405 406 407 408 409 410 411 412 413 414 |
* mutex_unlock - release the mutex * @lock: the mutex to be released * * Unlock a mutex that has been locked by this task previously. * * This function must not be used in interrupt context. Unlocking * of a not locked mutex is not allowed. * * This function is similar to (but not equivalent to) up(). */ |
7ad5b3a50 kernel: remove fa... |
415 |
void __sched mutex_unlock(struct mutex *lock) |
6053ee3b3 [PATCH] mutex sub... |
416 417 418 419 |
{ /* * The unlocking fastpath is the 0->1 transition from 'locked' * into 'unlocked' state: |
6053ee3b3 [PATCH] mutex sub... |
420 |
*/ |
0d66bf6d3 mutex: implement ... |
421 422 423 424 425 426 427 428 |
#ifndef CONFIG_DEBUG_MUTEXES /* * When debugging is enabled we must not clear the owner before time, * the slow path will always be taken, and that clears the owner field * after verifying that it was indeed current. */ mutex_clear_owner(lock); #endif |
6053ee3b3 [PATCH] mutex sub... |
429 430 431 432 |
__mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); } EXPORT_SYMBOL(mutex_unlock); |
040a0a371 mutex: Add suppor... |
433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 |
/** * ww_mutex_unlock - release the w/w mutex * @lock: the mutex to be released * * Unlock a mutex that has been locked by this task previously with any of the * ww_mutex_lock* functions (with or without an acquire context). It is * forbidden to release the locks after releasing the acquire context. * * This function must not be used in interrupt context. Unlocking * of a unlocked mutex is not allowed. */ void __sched ww_mutex_unlock(struct ww_mutex *lock) { /* * The unlocking fastpath is the 0->1 transition from 'locked' * into 'unlocked' state: */ if (lock->ctx) { #ifdef CONFIG_DEBUG_MUTEXES DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); #endif if (lock->ctx->acquired > 0) lock->ctx->acquired--; lock->ctx = NULL; } #ifndef CONFIG_DEBUG_MUTEXES /* * When debugging is enabled we must not clear the owner before time, * the slow path will always be taken, and that clears the owner field * after verifying that it was indeed current. */ mutex_clear_owner(&lock->base); #endif __mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath); } EXPORT_SYMBOL(ww_mutex_unlock); static inline int __sched |
63dc47e95 locking/mutex: Ch... |
472 |
__ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx) |
040a0a371 mutex: Add suppor... |
473 474 |
{ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); |
4d3199e4c locking: Remove A... |
475 |
struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx); |
040a0a371 mutex: Add suppor... |
476 477 478 |
if (!hold_ctx) return 0; |
040a0a371 mutex: Add suppor... |
479 480 481 482 483 484 485 486 487 488 489 |
if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { #ifdef CONFIG_DEBUG_MUTEXES DEBUG_LOCKS_WARN_ON(ctx->contending_lock); ctx->contending_lock = ww; #endif return -EDEADLK; } return 0; } |
6053ee3b3 [PATCH] mutex sub... |
490 491 492 |
/* * Lock a mutex (possibly interruptible), slowpath: */ |
040a0a371 mutex: Add suppor... |
493 |
static __always_inline int __sched |
e4564f79d lockdep: fixup mu... |
494 |
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, |
040a0a371 mutex: Add suppor... |
495 |
struct lockdep_map *nest_lock, unsigned long ip, |
b0267507d mutex: Avoid gcc ... |
496 |
struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) |
6053ee3b3 [PATCH] mutex sub... |
497 498 499 |
{ struct task_struct *task = current; struct mutex_waiter waiter; |
1fb00c6cb [PATCH] work arou... |
500 |
unsigned long flags; |
040a0a371 mutex: Add suppor... |
501 |
int ret; |
6053ee3b3 [PATCH] mutex sub... |
502 |
|
0422e83d8 locking/ww_mutex:... |
503 504 505 506 507 |
if (use_ww_ctx) { struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) return -EALREADY; } |
41719b030 mutex: preemption... |
508 |
preempt_disable(); |
e4c70a662 lockdep, mutex: p... |
509 |
mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); |
c02260277 mutex: Better con... |
510 |
|
76916515d locking/mutexes: ... |
511 512 513 514 |
if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) { /* got the lock, yay! */ preempt_enable(); return 0; |
0d66bf6d3 mutex: implement ... |
515 |
} |
76916515d locking/mutexes: ... |
516 |
|
1fb00c6cb [PATCH] work arou... |
517 |
spin_lock_mutex(&lock->wait_lock, flags); |
6053ee3b3 [PATCH] mutex sub... |
518 |
|
1e820c960 locking/mutexes: ... |
519 520 |
/* * Once more, try to acquire the lock. Only try-lock the mutex if |
0d968dd8c locking/mutexes: ... |
521 |
* it is unlocked to reduce unnecessary xchg() operations. |
1e820c960 locking/mutexes: ... |
522 |
*/ |
81a43adae locking/mutex: Us... |
523 524 |
if (!mutex_is_locked(lock) && (atomic_xchg_acquire(&lock->count, 0) == 1)) |
ec83f425d mutex: Do not unn... |
525 |
goto skip_wait; |
9a11b49a8 [PATCH] lockdep: ... |
526 |
debug_mutex_lock_common(lock, &waiter); |
6720a305d locking: avoid pa... |
527 |
debug_mutex_add_waiter(lock, &waiter, task); |
6053ee3b3 [PATCH] mutex sub... |
528 529 530 531 |
/* add waiting tasks to the end of the waitqueue (FIFO): */ list_add_tail(&waiter.list, &lock->wait_list); waiter.task = task; |
e4564f79d lockdep: fixup mu... |
532 |
lock_contended(&lock->dep_map, ip); |
4fe87745a lockstat: hook in... |
533 |
|
6053ee3b3 [PATCH] mutex sub... |
534 535 536 537 538 539 540 541 |
for (;;) { /* * Lets try to take the lock again - this is needed even if * we get here for the first time (shortly after failing to * acquire the lock), to make sure that we get a wakeup once * it's unlocked. Later on, if we sleep, this is the * operation that gives us the lock. We xchg it to -1, so * that when we release the lock, we properly wake up the |
1e820c960 locking/mutexes: ... |
542 543 |
* other waiters. We only attempt the xchg if the count is * non-negative in order to avoid unnecessary xchg operations: |
6053ee3b3 [PATCH] mutex sub... |
544 |
*/ |
1e820c960 locking/mutexes: ... |
545 |
if (atomic_read(&lock->count) >= 0 && |
81a43adae locking/mutex: Us... |
546 |
(atomic_xchg_acquire(&lock->count, -1) == 1)) |
6053ee3b3 [PATCH] mutex sub... |
547 548 549 550 551 552 |
break; /* * got a signal? (This code gets eliminated in the * TASK_UNINTERRUPTIBLE case.) */ |
6ad36762d __mutex_lock_comm... |
553 |
if (unlikely(signal_pending_state(state, task))) { |
040a0a371 mutex: Add suppor... |
554 555 556 |
ret = -EINTR; goto err; } |
6053ee3b3 [PATCH] mutex sub... |
557 |
|
b0267507d mutex: Avoid gcc ... |
558 |
if (use_ww_ctx && ww_ctx->acquired > 0) { |
63dc47e95 locking/mutex: Ch... |
559 |
ret = __ww_mutex_lock_check_stamp(lock, ww_ctx); |
040a0a371 mutex: Add suppor... |
560 561 |
if (ret) goto err; |
6053ee3b3 [PATCH] mutex sub... |
562 |
} |
040a0a371 mutex: Add suppor... |
563 |
|
6053ee3b3 [PATCH] mutex sub... |
564 |
__set_task_state(task, state); |
25985edce Fix common misspe... |
565 |
/* didn't get the lock, go to sleep: */ |
1fb00c6cb [PATCH] work arou... |
566 |
spin_unlock_mutex(&lock->wait_lock, flags); |
bd2f55361 sched/rt: Use sch... |
567 |
schedule_preempt_disabled(); |
1fb00c6cb [PATCH] work arou... |
568 |
spin_lock_mutex(&lock->wait_lock, flags); |
6053ee3b3 [PATCH] mutex sub... |
569 |
} |
51587bcf3 locking/mutex: Ex... |
570 |
__set_task_state(task, TASK_RUNNING); |
6720a305d locking: avoid pa... |
571 |
mutex_remove_waiter(lock, &waiter, task); |
ec83f425d mutex: Do not unn... |
572 573 574 575 |
/* set it to 0 if there are no waiters left: */ if (likely(list_empty(&lock->wait_list))) atomic_set(&lock->count, 0); debug_mutex_free_waiter(&waiter); |
6053ee3b3 [PATCH] mutex sub... |
576 |
|
ec83f425d mutex: Do not unn... |
577 578 |
skip_wait: /* got the lock - cleanup and rejoice! */ |
c7e78cff6 lockstat: contend... |
579 |
lock_acquired(&lock->dep_map, ip); |
0d66bf6d3 mutex: implement ... |
580 |
mutex_set_owner(lock); |
6053ee3b3 [PATCH] mutex sub... |
581 |
|
b0267507d mutex: Avoid gcc ... |
582 |
if (use_ww_ctx) { |
ec83f425d mutex: Do not unn... |
583 |
struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); |
4bd19084f locking/mutex: In... |
584 |
ww_mutex_set_context_slowpath(ww, ww_ctx); |
040a0a371 mutex: Add suppor... |
585 |
} |
1fb00c6cb [PATCH] work arou... |
586 |
spin_unlock_mutex(&lock->wait_lock, flags); |
41719b030 mutex: preemption... |
587 |
preempt_enable(); |
6053ee3b3 [PATCH] mutex sub... |
588 |
return 0; |
040a0a371 mutex: Add suppor... |
589 590 |
err: |
6720a305d locking: avoid pa... |
591 |
mutex_remove_waiter(lock, &waiter, task); |
040a0a371 mutex: Add suppor... |
592 593 594 595 596 |
spin_unlock_mutex(&lock->wait_lock, flags); debug_mutex_free_waiter(&waiter); mutex_release(&lock->dep_map, 1, ip); preempt_enable(); return ret; |
6053ee3b3 [PATCH] mutex sub... |
597 |
} |
ef5d4707b [PATCH] lockdep: ... |
598 599 600 601 602 |
#ifdef CONFIG_DEBUG_LOCK_ALLOC void __sched mutex_lock_nested(struct mutex *lock, unsigned int subclass) { might_sleep(); |
040a0a371 mutex: Add suppor... |
603 |
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, |
b0267507d mutex: Avoid gcc ... |
604 |
subclass, NULL, _RET_IP_, NULL, 0); |
ef5d4707b [PATCH] lockdep: ... |
605 606 607 |
} EXPORT_SYMBOL_GPL(mutex_lock_nested); |
d63a5a74d [PATCH] lockdep: ... |
608 |
|
e4c70a662 lockdep, mutex: p... |
609 610 611 612 |
void __sched _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) { might_sleep(); |
040a0a371 mutex: Add suppor... |
613 |
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, |
b0267507d mutex: Avoid gcc ... |
614 |
0, nest, _RET_IP_, NULL, 0); |
e4c70a662 lockdep, mutex: p... |
615 616 617 |
} EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); |
d63a5a74d [PATCH] lockdep: ... |
618 |
int __sched |
ad776537c Add mutex_lock_ki... |
619 620 621 |
mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) { might_sleep(); |
040a0a371 mutex: Add suppor... |
622 |
return __mutex_lock_common(lock, TASK_KILLABLE, |
b0267507d mutex: Avoid gcc ... |
623 |
subclass, NULL, _RET_IP_, NULL, 0); |
ad776537c Add mutex_lock_ki... |
624 625 626 627 |
} EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); int __sched |
d63a5a74d [PATCH] lockdep: ... |
628 629 630 |
mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) { might_sleep(); |
0d66bf6d3 mutex: implement ... |
631 |
return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, |
b0267507d mutex: Avoid gcc ... |
632 |
subclass, NULL, _RET_IP_, NULL, 0); |
d63a5a74d [PATCH] lockdep: ... |
633 634 635 |
} EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); |
040a0a371 mutex: Add suppor... |
636 |
|
230100276 mutex: Add w/w mu... |
637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 |
static inline int ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH unsigned tmp; if (ctx->deadlock_inject_countdown-- == 0) { tmp = ctx->deadlock_inject_interval; if (tmp > UINT_MAX/4) tmp = UINT_MAX; else tmp = tmp*2 + tmp + tmp/2; ctx->deadlock_inject_interval = tmp; ctx->deadlock_inject_countdown = tmp; ctx->contending_lock = lock; ww_mutex_unlock(lock); return -EDEADLK; } #endif return 0; } |
040a0a371 mutex: Add suppor... |
662 663 664 665 |
int __sched __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { |
230100276 mutex: Add w/w mu... |
666 |
int ret; |
040a0a371 mutex: Add suppor... |
667 |
might_sleep(); |
230100276 mutex: Add w/w mu... |
668 |
ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, |
b0267507d mutex: Avoid gcc ... |
669 |
0, &ctx->dep_map, _RET_IP_, ctx, 1); |
85f489612 mutex: Fix w/w mu... |
670 |
if (!ret && ctx->acquired > 1) |
230100276 mutex: Add w/w mu... |
671 672 673 |
return ww_mutex_deadlock_injection(lock, ctx); return ret; |
040a0a371 mutex: Add suppor... |
674 675 676 677 678 679 |
} EXPORT_SYMBOL_GPL(__ww_mutex_lock); int __sched __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { |
230100276 mutex: Add w/w mu... |
680 |
int ret; |
040a0a371 mutex: Add suppor... |
681 |
might_sleep(); |
230100276 mutex: Add w/w mu... |
682 |
ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, |
b0267507d mutex: Avoid gcc ... |
683 |
0, &ctx->dep_map, _RET_IP_, ctx, 1); |
230100276 mutex: Add w/w mu... |
684 |
|
85f489612 mutex: Fix w/w mu... |
685 |
if (!ret && ctx->acquired > 1) |
230100276 mutex: Add w/w mu... |
686 687 688 |
return ww_mutex_deadlock_injection(lock, ctx); return ret; |
040a0a371 mutex: Add suppor... |
689 690 |
} EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible); |
ef5d4707b [PATCH] lockdep: ... |
691 |
#endif |
6053ee3b3 [PATCH] mutex sub... |
692 693 694 |
/* * Release the lock, slowpath: */ |
7ad5b3a50 kernel: remove fa... |
695 |
static inline void |
242489cfe locking/mutexes: ... |
696 |
__mutex_unlock_common_slowpath(struct mutex *lock, int nested) |
6053ee3b3 [PATCH] mutex sub... |
697 |
{ |
1fb00c6cb [PATCH] work arou... |
698 |
unsigned long flags; |
1329ce6fb locking/mutex: Al... |
699 |
WAKE_Q(wake_q); |
6053ee3b3 [PATCH] mutex sub... |
700 |
|
6053ee3b3 [PATCH] mutex sub... |
701 |
/* |
42fa566bd locking/mutexes: ... |
702 703 704 705 706 707 708 709 |
* As a performance measurement, release the lock before doing other * wakeup related duties to follow. This allows other tasks to acquire * the lock sooner, while still handling cleanups in past unlock calls. * This can be done as we do not enforce strict equivalence between the * mutex counter and wait_list. * * * Some architectures leave the lock unlocked in the fastpath failure |
6053ee3b3 [PATCH] mutex sub... |
710 |
* case, others need to leave it locked. In the later case we have to |
42fa566bd locking/mutexes: ... |
711 |
* unlock it here - as the lock counter is currently 0 or negative. |
6053ee3b3 [PATCH] mutex sub... |
712 713 714 |
*/ if (__mutex_slowpath_needs_to_unlock()) atomic_set(&lock->count, 1); |
1d8fe7dc8 locking/mutexes: ... |
715 716 717 |
spin_lock_mutex(&lock->wait_lock, flags); mutex_release(&lock->dep_map, nested, _RET_IP_); debug_mutex_unlock(lock); |
6053ee3b3 [PATCH] mutex sub... |
718 719 720 721 722 723 724 |
if (!list_empty(&lock->wait_list)) { /* get the first entry from the wait-list: */ struct mutex_waiter *waiter = list_entry(lock->wait_list.next, struct mutex_waiter, list); debug_mutex_wake_waiter(lock, waiter); |
1329ce6fb locking/mutex: Al... |
725 |
wake_q_add(&wake_q, waiter->task); |
6053ee3b3 [PATCH] mutex sub... |
726 |
} |
1fb00c6cb [PATCH] work arou... |
727 |
spin_unlock_mutex(&lock->wait_lock, flags); |
1329ce6fb locking/mutex: Al... |
728 |
wake_up_q(&wake_q); |
6053ee3b3 [PATCH] mutex sub... |
729 730 731 |
} /* |
9a11b49a8 [PATCH] lockdep: ... |
732 733 |
* Release the lock, slowpath: */ |
22d9fd341 asmlinkage, mutex... |
734 |
__visible void |
9a11b49a8 [PATCH] lockdep: ... |
735 736 |
__mutex_unlock_slowpath(atomic_t *lock_count) { |
242489cfe locking/mutexes: ... |
737 738 739 |
struct mutex *lock = container_of(lock_count, struct mutex, count); __mutex_unlock_common_slowpath(lock, 1); |
9a11b49a8 [PATCH] lockdep: ... |
740 |
} |
e4564f79d lockdep: fixup mu... |
741 |
#ifndef CONFIG_DEBUG_LOCK_ALLOC |
9a11b49a8 [PATCH] lockdep: ... |
742 |
/* |
6053ee3b3 [PATCH] mutex sub... |
743 744 745 |
* Here come the less common (and hence less performance-critical) APIs: * mutex_lock_interruptible() and mutex_trylock(). */ |
7ad5b3a50 kernel: remove fa... |
746 |
static noinline int __sched |
a41b56efa arch: Make __mute... |
747 |
__mutex_lock_killable_slowpath(struct mutex *lock); |
ad776537c Add mutex_lock_ki... |
748 |
|
7ad5b3a50 kernel: remove fa... |
749 |
static noinline int __sched |
a41b56efa arch: Make __mute... |
750 |
__mutex_lock_interruptible_slowpath(struct mutex *lock); |
6053ee3b3 [PATCH] mutex sub... |
751 |
|
ef5dc121d mutex: Fix annota... |
752 753 |
/** * mutex_lock_interruptible - acquire the mutex, interruptible |
6053ee3b3 [PATCH] mutex sub... |
754 755 756 757 758 759 760 761 762 |
* @lock: the mutex to be acquired * * Lock the mutex like mutex_lock(), and return 0 if the mutex has * been acquired or sleep until the mutex becomes available. If a * signal arrives while waiting for the lock then this function * returns -EINTR. * * This function is similar to (but not equivalent to) down_interruptible(). */ |
7ad5b3a50 kernel: remove fa... |
763 |
int __sched mutex_lock_interruptible(struct mutex *lock) |
6053ee3b3 [PATCH] mutex sub... |
764 |
{ |
0d66bf6d3 mutex: implement ... |
765 |
int ret; |
c544bdb19 [PATCH] mark mute... |
766 |
might_sleep(); |
a41b56efa arch: Make __mute... |
767 768 |
ret = __mutex_fastpath_lock_retval(&lock->count); if (likely(!ret)) { |
0d66bf6d3 mutex: implement ... |
769 |
mutex_set_owner(lock); |
a41b56efa arch: Make __mute... |
770 771 772 |
return 0; } else return __mutex_lock_interruptible_slowpath(lock); |
6053ee3b3 [PATCH] mutex sub... |
773 774 775 |
} EXPORT_SYMBOL(mutex_lock_interruptible); |
7ad5b3a50 kernel: remove fa... |
776 |
int __sched mutex_lock_killable(struct mutex *lock) |
ad776537c Add mutex_lock_ki... |
777 |
{ |
0d66bf6d3 mutex: implement ... |
778 |
int ret; |
ad776537c Add mutex_lock_ki... |
779 |
might_sleep(); |
a41b56efa arch: Make __mute... |
780 781 |
ret = __mutex_fastpath_lock_retval(&lock->count); if (likely(!ret)) { |
0d66bf6d3 mutex: implement ... |
782 |
mutex_set_owner(lock); |
a41b56efa arch: Make __mute... |
783 784 785 |
return 0; } else return __mutex_lock_killable_slowpath(lock); |
ad776537c Add mutex_lock_ki... |
786 787 |
} EXPORT_SYMBOL(mutex_lock_killable); |
22d9fd341 asmlinkage, mutex... |
788 |
__visible void __sched |
e4564f79d lockdep: fixup mu... |
789 790 791 |
__mutex_lock_slowpath(atomic_t *lock_count) { struct mutex *lock = container_of(lock_count, struct mutex, count); |
040a0a371 mutex: Add suppor... |
792 |
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, |
b0267507d mutex: Avoid gcc ... |
793 |
NULL, _RET_IP_, NULL, 0); |
e4564f79d lockdep: fixup mu... |
794 |
} |
7ad5b3a50 kernel: remove fa... |
795 |
static noinline int __sched |
a41b56efa arch: Make __mute... |
796 |
__mutex_lock_killable_slowpath(struct mutex *lock) |
ad776537c Add mutex_lock_ki... |
797 |
{ |
040a0a371 mutex: Add suppor... |
798 |
return __mutex_lock_common(lock, TASK_KILLABLE, 0, |
b0267507d mutex: Avoid gcc ... |
799 |
NULL, _RET_IP_, NULL, 0); |
ad776537c Add mutex_lock_ki... |
800 |
} |
7ad5b3a50 kernel: remove fa... |
801 |
static noinline int __sched |
a41b56efa arch: Make __mute... |
802 |
__mutex_lock_interruptible_slowpath(struct mutex *lock) |
6053ee3b3 [PATCH] mutex sub... |
803 |
{ |
040a0a371 mutex: Add suppor... |
804 |
return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, |
b0267507d mutex: Avoid gcc ... |
805 |
NULL, _RET_IP_, NULL, 0); |
040a0a371 mutex: Add suppor... |
806 807 808 809 810 811 |
} static noinline int __sched __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0, |
b0267507d mutex: Avoid gcc ... |
812 |
NULL, _RET_IP_, ctx, 1); |
6053ee3b3 [PATCH] mutex sub... |
813 |
} |
040a0a371 mutex: Add suppor... |
814 815 816 817 818 819 |
static noinline int __sched __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0, |
b0267507d mutex: Avoid gcc ... |
820 |
NULL, _RET_IP_, ctx, 1); |
040a0a371 mutex: Add suppor... |
821 |
} |
e4564f79d lockdep: fixup mu... |
822 |
#endif |
6053ee3b3 [PATCH] mutex sub... |
823 824 825 826 827 828 829 830 |
/* * Spinlock based trylock, we take the spinlock and check whether we * can get the lock: */ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) { struct mutex *lock = container_of(lock_count, struct mutex, count); |
1fb00c6cb [PATCH] work arou... |
831 |
unsigned long flags; |
6053ee3b3 [PATCH] mutex sub... |
832 |
int prev; |
72d5305dc locking/mutexes: ... |
833 834 835 |
/* No need to trylock if the mutex is locked. */ if (mutex_is_locked(lock)) return 0; |
1fb00c6cb [PATCH] work arou... |
836 |
spin_lock_mutex(&lock->wait_lock, flags); |
6053ee3b3 [PATCH] mutex sub... |
837 |
|
81a43adae locking/mutex: Us... |
838 |
prev = atomic_xchg_acquire(&lock->count, -1); |
ef5d4707b [PATCH] lockdep: ... |
839 |
if (likely(prev == 1)) { |
0d66bf6d3 mutex: implement ... |
840 |
mutex_set_owner(lock); |
ef5d4707b [PATCH] lockdep: ... |
841 842 |
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); } |
0d66bf6d3 mutex: implement ... |
843 |
|
6053ee3b3 [PATCH] mutex sub... |
844 845 846 |
/* Set it back to 0 if there are no waiters: */ if (likely(list_empty(&lock->wait_list))) atomic_set(&lock->count, 0); |
1fb00c6cb [PATCH] work arou... |
847 |
spin_unlock_mutex(&lock->wait_lock, flags); |
6053ee3b3 [PATCH] mutex sub... |
848 849 850 |
return prev == 1; } |
ef5dc121d mutex: Fix annota... |
851 852 |
/** * mutex_trylock - try to acquire the mutex, without waiting |
6053ee3b3 [PATCH] mutex sub... |
853 854 855 856 857 858 |
* @lock: the mutex to be acquired * * Try to acquire the mutex atomically. Returns 1 if the mutex * has been acquired successfully, and 0 on contention. * * NOTE: this function follows the spin_trylock() convention, so |
ef5dc121d mutex: Fix annota... |
859 |
* it is negated from the down_trylock() return values! Be careful |
6053ee3b3 [PATCH] mutex sub... |
860 861 862 863 864 |
* about this when converting semaphore users to mutexes. * * This function must not be used in interrupt context. The * mutex must be released by the same task that acquired it. */ |
7ad5b3a50 kernel: remove fa... |
865 |
int __sched mutex_trylock(struct mutex *lock) |
6053ee3b3 [PATCH] mutex sub... |
866 |
{ |
0d66bf6d3 mutex: implement ... |
867 868 869 870 871 872 873 |
int ret; ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath); if (ret) mutex_set_owner(lock); return ret; |
6053ee3b3 [PATCH] mutex sub... |
874 |
} |
6053ee3b3 [PATCH] mutex sub... |
875 |
EXPORT_SYMBOL(mutex_trylock); |
a511e3f96 mutex: add atomic... |
876 |
|
040a0a371 mutex: Add suppor... |
877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 |
#ifndef CONFIG_DEBUG_LOCK_ALLOC int __sched __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { int ret; might_sleep(); ret = __mutex_fastpath_lock_retval(&lock->base.count); if (likely(!ret)) { ww_mutex_set_context_fastpath(lock, ctx); mutex_set_owner(&lock->base); } else ret = __ww_mutex_lock_slowpath(lock, ctx); return ret; } EXPORT_SYMBOL(__ww_mutex_lock); int __sched __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { int ret; might_sleep(); ret = __mutex_fastpath_lock_retval(&lock->base.count); if (likely(!ret)) { ww_mutex_set_context_fastpath(lock, ctx); mutex_set_owner(&lock->base); } else ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx); return ret; } EXPORT_SYMBOL(__ww_mutex_lock_interruptible); #endif |
a511e3f96 mutex: add atomic... |
915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 |
/** * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 * @cnt: the atomic which we are to dec * @lock: the mutex to return holding if we dec to 0 * * return true and hold lock if we dec to 0, return false otherwise */ int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) { /* dec if we can't possibly hit 0 */ if (atomic_add_unless(cnt, -1, 1)) return 0; /* we might hit 0, so take the lock */ mutex_lock(lock); if (!atomic_dec_and_test(cnt)) { /* when we actually did the dec, we didn't hit 0 */ mutex_unlock(lock); return 0; } /* we hit 0, and we hold the lock */ return 1; } EXPORT_SYMBOL(atomic_dec_and_mutex_lock); |