Blame view
kernel/mutex.c
12.6 KB
6053ee3b3
|
1 2 3 4 5 6 7 8 9 10 11 12 |
/* * kernel/mutex.c * * Mutexes: blocking mutual exclusion locks * * Started by Ingo Molnar: * * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and * David Howells for suggestions and improvements. * |
0d66bf6d3
|
13 14 15 16 17 |
* - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline * from the -rt tree, where it was originally implemented for rtmutexes * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale * and Sven Dietrich. * |
6053ee3b3
|
18 19 20 21 22 23 24 |
* Also see Documentation/mutex-design.txt. */ #include <linux/mutex.h> #include <linux/sched.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/interrupt.h> |
9a11b49a8
|
25 |
#include <linux/debug_locks.h> |
6053ee3b3
|
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
/* * In the DEBUG case we are using the "NULL fastpath" for mutexes, * which forces all calls into the slowpath: */ #ifdef CONFIG_DEBUG_MUTEXES # include "mutex-debug.h" # include <asm-generic/mutex-null.h> #else # include "mutex.h" # include <asm/mutex.h> #endif /*** * mutex_init - initialize the mutex * @lock: the mutex to be initialized |
0e241ffd3
|
42 |
* @key: the lock_class_key for the class; used by mutex lock debugging |
6053ee3b3
|
43 44 45 46 47 |
* * Initialize the mutex to unlocked state. * * It is not allowed to initialize an already locked mutex. */ |
ef5d4707b
|
48 49 |
void __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) |
6053ee3b3
|
50 51 52 53 |
{ atomic_set(&lock->count, 1); spin_lock_init(&lock->wait_lock); INIT_LIST_HEAD(&lock->wait_list); |
0d66bf6d3
|
54 |
mutex_clear_owner(lock); |
6053ee3b3
|
55 |
|
ef5d4707b
|
56 |
debug_mutex_init(lock, name, key); |
6053ee3b3
|
57 58 59 |
} EXPORT_SYMBOL(__mutex_init); |
e4564f79d
|
60 |
#ifndef CONFIG_DEBUG_LOCK_ALLOC |
6053ee3b3
|
61 62 63 64 65 66 |
/* * We split the mutex lock/unlock logic into separate fastpath and * slowpath functions, to reduce the register pressure on the fastpath. * We also put the fastpath first in the kernel image, to make sure the * branch is predicted by the CPU as default-untaken. */ |
7918baa55
|
67 |
static __used noinline void __sched |
9a11b49a8
|
68 |
__mutex_lock_slowpath(atomic_t *lock_count); |
6053ee3b3
|
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 |
/*** * mutex_lock - acquire the mutex * @lock: the mutex to be acquired * * Lock the mutex exclusively for this task. If the mutex is not * available right now, it will sleep until it can get it. * * The mutex must later on be released by the same task that * acquired it. Recursive locking is not allowed. The task * may not exit without first unlocking the mutex. Also, kernel * memory where the mutex resides mutex must not be freed with * the mutex still locked. The mutex must first be initialized * (or statically defined) before it can be locked. memset()-ing * the mutex to 0 is not allowed. * * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging * checks that will enforce the restrictions and will also do * deadlock debugging. ) * * This function is similar to (but not equivalent to) down(). */ |
7ad5b3a50
|
91 |
void inline __sched mutex_lock(struct mutex *lock) |
6053ee3b3
|
92 |
{ |
c544bdb19
|
93 |
might_sleep(); |
6053ee3b3
|
94 95 96 |
/* * The locking fastpath is the 1->0 transition from * 'unlocked' into 'locked' state. |
6053ee3b3
|
97 98 |
*/ __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); |
0d66bf6d3
|
99 |
mutex_set_owner(lock); |
6053ee3b3
|
100 101 102 |
} EXPORT_SYMBOL(mutex_lock); |
e4564f79d
|
103 |
#endif |
6053ee3b3
|
104 |
|
7918baa55
|
105 |
static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); |
6053ee3b3
|
106 107 108 109 110 111 112 113 114 115 116 117 |
/*** * mutex_unlock - release the mutex * @lock: the mutex to be released * * Unlock a mutex that has been locked by this task previously. * * This function must not be used in interrupt context. Unlocking * of a not locked mutex is not allowed. * * This function is similar to (but not equivalent to) up(). */ |
7ad5b3a50
|
118 |
void __sched mutex_unlock(struct mutex *lock) |
6053ee3b3
|
119 120 121 122 |
{ /* * The unlocking fastpath is the 0->1 transition from 'locked' * into 'unlocked' state: |
6053ee3b3
|
123 |
*/ |
0d66bf6d3
|
124 125 126 127 128 129 130 131 |
#ifndef CONFIG_DEBUG_MUTEXES /* * When debugging is enabled we must not clear the owner before time, * the slow path will always be taken, and that clears the owner field * after verifying that it was indeed current. */ mutex_clear_owner(lock); #endif |
6053ee3b3
|
132 133 134 135 136 137 138 139 140 |
__mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); } EXPORT_SYMBOL(mutex_unlock); /* * Lock a mutex (possibly interruptible), slowpath: */ static inline int __sched |
e4564f79d
|
141 142 |
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, unsigned long ip) |
6053ee3b3
|
143 144 145 |
{ struct task_struct *task = current; struct mutex_waiter waiter; |
1fb00c6cb
|
146 |
unsigned long flags; |
6053ee3b3
|
147 |
|
41719b030
|
148 |
preempt_disable(); |
0d66bf6d3
|
149 |
mutex_acquire(&lock->dep_map, subclass, 0, ip); |
36cd3c9f9
|
150 151 |
#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) && \ !defined(CONFIG_HAVE_DEFAULT_NO_SPIN_MUTEXES) |
0d66bf6d3
|
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 |
/* * Optimistic spinning. * * We try to spin for acquisition when we find that there are no * pending waiters and the lock owner is currently running on a * (different) CPU. * * The rationale is that if the lock owner is running, it is likely to * release the lock soon. * * Since this needs the lock owner, and this mutex implementation * doesn't track the owner atomically in the lock field, we need to * track it non-atomically. * * We can't do this for DEBUG_MUTEXES because that relies on wait_lock * to serialize everything. */ for (;;) { struct thread_info *owner; /* |
0d66bf6d3
|
174 175 176 177 178 179 |
* If there's an owner, wait for it to either * release the lock or go to sleep. */ owner = ACCESS_ONCE(lock->owner); if (owner && !mutex_spin_on_owner(lock, owner)) break; |
ac6e60ee4
|
180 181 182 183 184 185 |
if (atomic_cmpxchg(&lock->count, 1, 0) == 1) { lock_acquired(&lock->dep_map, ip); mutex_set_owner(lock); preempt_enable(); return 0; } |
0d66bf6d3
|
186 187 188 189 190 191 192 193 |
/* * When there's no owner, we might have preempted between the * owner acquiring the lock and setting the owner field. If * we're an RT task that will live-lock because we won't let * the owner complete. */ if (!owner && (need_resched() || rt_task(task))) break; |
0d66bf6d3
|
194 195 196 197 198 199 200 201 202 |
/* * The cpu_relax() call is a compiler barrier which forces * everything in this loop to be re-loaded. We don't need * memory barriers as we'll eventually observe the right * values at the cost of a few extra spins. */ cpu_relax(); } #endif |
1fb00c6cb
|
203 |
spin_lock_mutex(&lock->wait_lock, flags); |
6053ee3b3
|
204 |
|
9a11b49a8
|
205 |
debug_mutex_lock_common(lock, &waiter); |
c9f4f06d3
|
206 |
debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); |
6053ee3b3
|
207 208 209 210 |
/* add waiting tasks to the end of the waitqueue (FIFO): */ list_add_tail(&waiter.list, &lock->wait_list); waiter.task = task; |
93d81d1ac
|
211 |
if (atomic_xchg(&lock->count, -1) == 1) |
4fe87745a
|
212 |
goto done; |
e4564f79d
|
213 |
lock_contended(&lock->dep_map, ip); |
4fe87745a
|
214 |
|
6053ee3b3
|
215 216 217 218 219 220 221 222 223 224 |
for (;;) { /* * Lets try to take the lock again - this is needed even if * we get here for the first time (shortly after failing to * acquire the lock), to make sure that we get a wakeup once * it's unlocked. Later on, if we sleep, this is the * operation that gives us the lock. We xchg it to -1, so * that when we release the lock, we properly wake up the * other waiters: */ |
93d81d1ac
|
225 |
if (atomic_xchg(&lock->count, -1) == 1) |
6053ee3b3
|
226 227 228 229 230 231 |
break; /* * got a signal? (This code gets eliminated in the * TASK_UNINTERRUPTIBLE case.) */ |
6ad36762d
|
232 |
if (unlikely(signal_pending_state(state, task))) { |
ad776537c
|
233 234 |
mutex_remove_waiter(lock, &waiter, task_thread_info(task)); |
e4564f79d
|
235 |
mutex_release(&lock->dep_map, 1, ip); |
1fb00c6cb
|
236 |
spin_unlock_mutex(&lock->wait_lock, flags); |
6053ee3b3
|
237 238 |
debug_mutex_free_waiter(&waiter); |
41719b030
|
239 |
preempt_enable(); |
6053ee3b3
|
240 241 242 243 244 |
return -EINTR; } __set_task_state(task, state); /* didnt get the lock, go to sleep: */ |
1fb00c6cb
|
245 |
spin_unlock_mutex(&lock->wait_lock, flags); |
41719b030
|
246 |
__schedule(); |
1fb00c6cb
|
247 |
spin_lock_mutex(&lock->wait_lock, flags); |
6053ee3b3
|
248 |
} |
4fe87745a
|
249 |
done: |
c7e78cff6
|
250 |
lock_acquired(&lock->dep_map, ip); |
6053ee3b3
|
251 |
/* got the lock - rejoice! */ |
0d66bf6d3
|
252 253 |
mutex_remove_waiter(lock, &waiter, current_thread_info()); mutex_set_owner(lock); |
6053ee3b3
|
254 255 256 257 |
/* set it to 0 if there are no waiters left: */ if (likely(list_empty(&lock->wait_list))) atomic_set(&lock->count, 0); |
1fb00c6cb
|
258 |
spin_unlock_mutex(&lock->wait_lock, flags); |
6053ee3b3
|
259 260 |
debug_mutex_free_waiter(&waiter); |
41719b030
|
261 |
preempt_enable(); |
6053ee3b3
|
262 |
|
6053ee3b3
|
263 264 |
return 0; } |
ef5d4707b
|
265 266 267 268 269 |
#ifdef CONFIG_DEBUG_LOCK_ALLOC void __sched mutex_lock_nested(struct mutex *lock, unsigned int subclass) { might_sleep(); |
e4564f79d
|
270 |
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_); |
ef5d4707b
|
271 272 273 |
} EXPORT_SYMBOL_GPL(mutex_lock_nested); |
d63a5a74d
|
274 275 |
int __sched |
ad776537c
|
276 277 278 279 280 281 282 283 |
mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) { might_sleep(); return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_); } EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); int __sched |
d63a5a74d
|
284 285 286 |
mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) { might_sleep(); |
0d66bf6d3
|
287 288 |
return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_); |
d63a5a74d
|
289 290 291 |
} EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); |
ef5d4707b
|
292 |
#endif |
6053ee3b3
|
293 294 295 |
/* * Release the lock, slowpath: */ |
7ad5b3a50
|
296 |
static inline void |
ef5d4707b
|
297 |
__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) |
6053ee3b3
|
298 |
{ |
02706647a
|
299 |
struct mutex *lock = container_of(lock_count, struct mutex, count); |
1fb00c6cb
|
300 |
unsigned long flags; |
6053ee3b3
|
301 |
|
1fb00c6cb
|
302 |
spin_lock_mutex(&lock->wait_lock, flags); |
ef5d4707b
|
303 |
mutex_release(&lock->dep_map, nested, _RET_IP_); |
9a11b49a8
|
304 |
debug_mutex_unlock(lock); |
6053ee3b3
|
305 306 307 308 309 310 311 312 |
/* * some architectures leave the lock unlocked in the fastpath failure * case, others need to leave it locked. In the later case we have to * unlock it here */ if (__mutex_slowpath_needs_to_unlock()) atomic_set(&lock->count, 1); |
6053ee3b3
|
313 314 315 316 317 318 319 320 321 322 |
if (!list_empty(&lock->wait_list)) { /* get the first entry from the wait-list: */ struct mutex_waiter *waiter = list_entry(lock->wait_list.next, struct mutex_waiter, list); debug_mutex_wake_waiter(lock, waiter); wake_up_process(waiter->task); } |
1fb00c6cb
|
323 |
spin_unlock_mutex(&lock->wait_lock, flags); |
6053ee3b3
|
324 325 326 |
} /* |
9a11b49a8
|
327 328 |
* Release the lock, slowpath: */ |
7918baa55
|
329 |
static __used noinline void |
9a11b49a8
|
330 331 |
__mutex_unlock_slowpath(atomic_t *lock_count) { |
ef5d4707b
|
332 |
__mutex_unlock_common_slowpath(lock_count, 1); |
9a11b49a8
|
333 |
} |
e4564f79d
|
334 |
#ifndef CONFIG_DEBUG_LOCK_ALLOC |
9a11b49a8
|
335 |
/* |
6053ee3b3
|
336 337 338 |
* Here come the less common (and hence less performance-critical) APIs: * mutex_lock_interruptible() and mutex_trylock(). */ |
7ad5b3a50
|
339 |
static noinline int __sched |
ad776537c
|
340 |
__mutex_lock_killable_slowpath(atomic_t *lock_count); |
7ad5b3a50
|
341 |
static noinline int __sched |
9a11b49a8
|
342 |
__mutex_lock_interruptible_slowpath(atomic_t *lock_count); |
6053ee3b3
|
343 344 345 346 347 348 349 350 351 352 353 354 |
/*** * mutex_lock_interruptible - acquire the mutex, interruptable * @lock: the mutex to be acquired * * Lock the mutex like mutex_lock(), and return 0 if the mutex has * been acquired or sleep until the mutex becomes available. If a * signal arrives while waiting for the lock then this function * returns -EINTR. * * This function is similar to (but not equivalent to) down_interruptible(). */ |
7ad5b3a50
|
355 |
int __sched mutex_lock_interruptible(struct mutex *lock) |
6053ee3b3
|
356 |
{ |
0d66bf6d3
|
357 |
int ret; |
c544bdb19
|
358 |
might_sleep(); |
0d66bf6d3
|
359 |
ret = __mutex_fastpath_lock_retval |
6053ee3b3
|
360 |
(&lock->count, __mutex_lock_interruptible_slowpath); |
0d66bf6d3
|
361 362 363 364 |
if (!ret) mutex_set_owner(lock); return ret; |
6053ee3b3
|
365 366 367 |
} EXPORT_SYMBOL(mutex_lock_interruptible); |
7ad5b3a50
|
368 |
int __sched mutex_lock_killable(struct mutex *lock) |
ad776537c
|
369 |
{ |
0d66bf6d3
|
370 |
int ret; |
ad776537c
|
371 |
might_sleep(); |
0d66bf6d3
|
372 |
ret = __mutex_fastpath_lock_retval |
ad776537c
|
373 |
(&lock->count, __mutex_lock_killable_slowpath); |
0d66bf6d3
|
374 375 376 377 |
if (!ret) mutex_set_owner(lock); return ret; |
ad776537c
|
378 379 |
} EXPORT_SYMBOL(mutex_lock_killable); |
7918baa55
|
380 |
static __used noinline void __sched |
e4564f79d
|
381 382 383 384 385 386 |
__mutex_lock_slowpath(atomic_t *lock_count) { struct mutex *lock = container_of(lock_count, struct mutex, count); __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_); } |
7ad5b3a50
|
387 |
static noinline int __sched |
ad776537c
|
388 389 390 391 392 393 |
__mutex_lock_killable_slowpath(atomic_t *lock_count) { struct mutex *lock = container_of(lock_count, struct mutex, count); return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_); } |
7ad5b3a50
|
394 |
static noinline int __sched |
9a11b49a8
|
395 |
__mutex_lock_interruptible_slowpath(atomic_t *lock_count) |
6053ee3b3
|
396 397 |
{ struct mutex *lock = container_of(lock_count, struct mutex, count); |
e4564f79d
|
398 |
return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_); |
6053ee3b3
|
399 |
} |
e4564f79d
|
400 |
#endif |
6053ee3b3
|
401 402 403 404 405 406 407 408 |
/* * Spinlock based trylock, we take the spinlock and check whether we * can get the lock: */ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) { struct mutex *lock = container_of(lock_count, struct mutex, count); |
1fb00c6cb
|
409 |
unsigned long flags; |
6053ee3b3
|
410 |
int prev; |
1fb00c6cb
|
411 |
spin_lock_mutex(&lock->wait_lock, flags); |
6053ee3b3
|
412 413 |
prev = atomic_xchg(&lock->count, -1); |
ef5d4707b
|
414 |
if (likely(prev == 1)) { |
0d66bf6d3
|
415 |
mutex_set_owner(lock); |
ef5d4707b
|
416 417 |
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); } |
0d66bf6d3
|
418 |
|
6053ee3b3
|
419 420 421 |
/* Set it back to 0 if there are no waiters: */ if (likely(list_empty(&lock->wait_list))) atomic_set(&lock->count, 0); |
1fb00c6cb
|
422 |
spin_unlock_mutex(&lock->wait_lock, flags); |
6053ee3b3
|
423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 |
return prev == 1; } /*** * mutex_trylock - try acquire the mutex, without waiting * @lock: the mutex to be acquired * * Try to acquire the mutex atomically. Returns 1 if the mutex * has been acquired successfully, and 0 on contention. * * NOTE: this function follows the spin_trylock() convention, so * it is negated to the down_trylock() return values! Be careful * about this when converting semaphore users to mutexes. * * This function must not be used in interrupt context. The * mutex must be released by the same task that acquired it. */ |
7ad5b3a50
|
441 |
int __sched mutex_trylock(struct mutex *lock) |
6053ee3b3
|
442 |
{ |
0d66bf6d3
|
443 444 445 446 447 448 449 |
int ret; ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath); if (ret) mutex_set_owner(lock); return ret; |
6053ee3b3
|
450 451 452 |
} EXPORT_SYMBOL(mutex_trylock); |