Blame view
kernel/mutex.c
10.4 KB
6053ee3b3
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 |
/* * kernel/mutex.c * * Mutexes: blocking mutual exclusion locks * * Started by Ingo Molnar: * * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and * David Howells for suggestions and improvements. * * Also see Documentation/mutex-design.txt. */ #include <linux/mutex.h> #include <linux/sched.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/interrupt.h> |
9a11b49a8
|
20 |
#include <linux/debug_locks.h> |
6053ee3b3
|
21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
/* * In the DEBUG case we are using the "NULL fastpath" for mutexes, * which forces all calls into the slowpath: */ #ifdef CONFIG_DEBUG_MUTEXES # include "mutex-debug.h" # include <asm-generic/mutex-null.h> #else # include "mutex.h" # include <asm/mutex.h> #endif /*** * mutex_init - initialize the mutex * @lock: the mutex to be initialized * * Initialize the mutex to unlocked state. * * It is not allowed to initialize an already locked mutex. */ |
ef5d4707b
|
42 43 |
void __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) |
6053ee3b3
|
44 45 46 47 |
{ atomic_set(&lock->count, 1); spin_lock_init(&lock->wait_lock); INIT_LIST_HEAD(&lock->wait_list); |
ef5d4707b
|
48 |
debug_mutex_init(lock, name, key); |
6053ee3b3
|
49 50 51 |
} EXPORT_SYMBOL(__mutex_init); |
e4564f79d
|
52 |
#ifndef CONFIG_DEBUG_LOCK_ALLOC |
6053ee3b3
|
53 54 55 56 57 58 |
/* * We split the mutex lock/unlock logic into separate fastpath and * slowpath functions, to reduce the register pressure on the fastpath. * We also put the fastpath first in the kernel image, to make sure the * branch is predicted by the CPU as default-untaken. */ |
7ad5b3a50
|
59 |
static void noinline __sched |
9a11b49a8
|
60 |
__mutex_lock_slowpath(atomic_t *lock_count); |
6053ee3b3
|
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 |
/*** * mutex_lock - acquire the mutex * @lock: the mutex to be acquired * * Lock the mutex exclusively for this task. If the mutex is not * available right now, it will sleep until it can get it. * * The mutex must later on be released by the same task that * acquired it. Recursive locking is not allowed. The task * may not exit without first unlocking the mutex. Also, kernel * memory where the mutex resides mutex must not be freed with * the mutex still locked. The mutex must first be initialized * (or statically defined) before it can be locked. memset()-ing * the mutex to 0 is not allowed. * * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging * checks that will enforce the restrictions and will also do * deadlock debugging. ) * * This function is similar to (but not equivalent to) down(). */ |
7ad5b3a50
|
83 |
void inline __sched mutex_lock(struct mutex *lock) |
6053ee3b3
|
84 |
{ |
c544bdb19
|
85 |
might_sleep(); |
6053ee3b3
|
86 87 88 |
/* * The locking fastpath is the 1->0 transition from * 'unlocked' into 'locked' state. |
6053ee3b3
|
89 90 91 92 93 |
*/ __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); } EXPORT_SYMBOL(mutex_lock); |
e4564f79d
|
94 |
#endif |
6053ee3b3
|
95 |
|
7ad5b3a50
|
96 |
static noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); |
6053ee3b3
|
97 98 99 100 101 102 103 104 105 106 107 108 |
/*** * mutex_unlock - release the mutex * @lock: the mutex to be released * * Unlock a mutex that has been locked by this task previously. * * This function must not be used in interrupt context. Unlocking * of a not locked mutex is not allowed. * * This function is similar to (but not equivalent to) up(). */ |
7ad5b3a50
|
109 |
void __sched mutex_unlock(struct mutex *lock) |
6053ee3b3
|
110 111 112 113 |
{ /* * The unlocking fastpath is the 0->1 transition from 'locked' * into 'unlocked' state: |
6053ee3b3
|
114 115 116 117 118 119 120 121 122 123 |
*/ __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); } EXPORT_SYMBOL(mutex_unlock); /* * Lock a mutex (possibly interruptible), slowpath: */ static inline int __sched |
e4564f79d
|
124 125 |
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, unsigned long ip) |
6053ee3b3
|
126 127 128 129 |
{ struct task_struct *task = current; struct mutex_waiter waiter; unsigned int old_val; |
1fb00c6cb
|
130 |
unsigned long flags; |
6053ee3b3
|
131 |
|
1fb00c6cb
|
132 |
spin_lock_mutex(&lock->wait_lock, flags); |
6053ee3b3
|
133 |
|
9a11b49a8
|
134 |
debug_mutex_lock_common(lock, &waiter); |
e4564f79d
|
135 |
mutex_acquire(&lock->dep_map, subclass, 0, ip); |
c9f4f06d3
|
136 |
debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); |
6053ee3b3
|
137 138 139 140 |
/* add waiting tasks to the end of the waitqueue (FIFO): */ list_add_tail(&waiter.list, &lock->wait_list); waiter.task = task; |
4fe87745a
|
141 142 143 |
old_val = atomic_xchg(&lock->count, -1); if (old_val == 1) goto done; |
e4564f79d
|
144 |
lock_contended(&lock->dep_map, ip); |
4fe87745a
|
145 |
|
6053ee3b3
|
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 |
for (;;) { /* * Lets try to take the lock again - this is needed even if * we get here for the first time (shortly after failing to * acquire the lock), to make sure that we get a wakeup once * it's unlocked. Later on, if we sleep, this is the * operation that gives us the lock. We xchg it to -1, so * that when we release the lock, we properly wake up the * other waiters: */ old_val = atomic_xchg(&lock->count, -1); if (old_val == 1) break; /* * got a signal? (This code gets eliminated in the * TASK_UNINTERRUPTIBLE case.) */ |
ad776537c
|
164 165 166 167 168 169 |
if (unlikely((state == TASK_INTERRUPTIBLE && signal_pending(task)) || (state == TASK_KILLABLE && fatal_signal_pending(task)))) { mutex_remove_waiter(lock, &waiter, task_thread_info(task)); |
e4564f79d
|
170 |
mutex_release(&lock->dep_map, 1, ip); |
1fb00c6cb
|
171 |
spin_unlock_mutex(&lock->wait_lock, flags); |
6053ee3b3
|
172 173 174 175 176 177 178 |
debug_mutex_free_waiter(&waiter); return -EINTR; } __set_task_state(task, state); /* didnt get the lock, go to sleep: */ |
1fb00c6cb
|
179 |
spin_unlock_mutex(&lock->wait_lock, flags); |
6053ee3b3
|
180 |
schedule(); |
1fb00c6cb
|
181 |
spin_lock_mutex(&lock->wait_lock, flags); |
6053ee3b3
|
182 |
} |
4fe87745a
|
183 |
done: |
96645678c
|
184 |
lock_acquired(&lock->dep_map); |
6053ee3b3
|
185 |
/* got the lock - rejoice! */ |
c9f4f06d3
|
186 187 |
mutex_remove_waiter(lock, &waiter, task_thread_info(task)); debug_mutex_set_owner(lock, task_thread_info(task)); |
6053ee3b3
|
188 189 190 191 |
/* set it to 0 if there are no waiters left: */ if (likely(list_empty(&lock->wait_list))) atomic_set(&lock->count, 0); |
1fb00c6cb
|
192 |
spin_unlock_mutex(&lock->wait_lock, flags); |
6053ee3b3
|
193 194 |
debug_mutex_free_waiter(&waiter); |
6053ee3b3
|
195 196 |
return 0; } |
ef5d4707b
|
197 198 199 200 201 |
#ifdef CONFIG_DEBUG_LOCK_ALLOC void __sched mutex_lock_nested(struct mutex *lock, unsigned int subclass) { might_sleep(); |
e4564f79d
|
202 |
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_); |
ef5d4707b
|
203 204 205 |
} EXPORT_SYMBOL_GPL(mutex_lock_nested); |
d63a5a74d
|
206 207 |
int __sched |
ad776537c
|
208 209 210 211 212 213 214 215 |
mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) { might_sleep(); return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_); } EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); int __sched |
d63a5a74d
|
216 217 218 |
mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) { might_sleep(); |
e4564f79d
|
219 |
return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_); |
d63a5a74d
|
220 221 222 |
} EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); |
ef5d4707b
|
223 |
#endif |
6053ee3b3
|
224 225 226 |
/* * Release the lock, slowpath: */ |
7ad5b3a50
|
227 |
static inline void |
ef5d4707b
|
228 |
__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) |
6053ee3b3
|
229 |
{ |
02706647a
|
230 |
struct mutex *lock = container_of(lock_count, struct mutex, count); |
1fb00c6cb
|
231 |
unsigned long flags; |
6053ee3b3
|
232 |
|
1fb00c6cb
|
233 |
spin_lock_mutex(&lock->wait_lock, flags); |
ef5d4707b
|
234 |
mutex_release(&lock->dep_map, nested, _RET_IP_); |
9a11b49a8
|
235 |
debug_mutex_unlock(lock); |
6053ee3b3
|
236 237 238 239 240 241 242 243 |
/* * some architectures leave the lock unlocked in the fastpath failure * case, others need to leave it locked. In the later case we have to * unlock it here */ if (__mutex_slowpath_needs_to_unlock()) atomic_set(&lock->count, 1); |
6053ee3b3
|
244 245 246 247 248 249 250 251 252 253 254 255 |
if (!list_empty(&lock->wait_list)) { /* get the first entry from the wait-list: */ struct mutex_waiter *waiter = list_entry(lock->wait_list.next, struct mutex_waiter, list); debug_mutex_wake_waiter(lock, waiter); wake_up_process(waiter->task); } debug_mutex_clear_owner(lock); |
1fb00c6cb
|
256 |
spin_unlock_mutex(&lock->wait_lock, flags); |
6053ee3b3
|
257 258 259 |
} /* |
9a11b49a8
|
260 261 |
* Release the lock, slowpath: */ |
7ad5b3a50
|
262 |
static noinline void |
9a11b49a8
|
263 264 |
__mutex_unlock_slowpath(atomic_t *lock_count) { |
ef5d4707b
|
265 |
__mutex_unlock_common_slowpath(lock_count, 1); |
9a11b49a8
|
266 |
} |
e4564f79d
|
267 |
#ifndef CONFIG_DEBUG_LOCK_ALLOC |
9a11b49a8
|
268 |
/* |
6053ee3b3
|
269 270 271 |
* Here come the less common (and hence less performance-critical) APIs: * mutex_lock_interruptible() and mutex_trylock(). */ |
7ad5b3a50
|
272 |
static noinline int __sched |
ad776537c
|
273 |
__mutex_lock_killable_slowpath(atomic_t *lock_count); |
7ad5b3a50
|
274 |
static noinline int __sched |
9a11b49a8
|
275 |
__mutex_lock_interruptible_slowpath(atomic_t *lock_count); |
6053ee3b3
|
276 277 278 279 280 281 282 283 284 285 286 287 |
/*** * mutex_lock_interruptible - acquire the mutex, interruptable * @lock: the mutex to be acquired * * Lock the mutex like mutex_lock(), and return 0 if the mutex has * been acquired or sleep until the mutex becomes available. If a * signal arrives while waiting for the lock then this function * returns -EINTR. * * This function is similar to (but not equivalent to) down_interruptible(). */ |
7ad5b3a50
|
288 |
int __sched mutex_lock_interruptible(struct mutex *lock) |
6053ee3b3
|
289 |
{ |
c544bdb19
|
290 |
might_sleep(); |
6053ee3b3
|
291 292 293 294 295 |
return __mutex_fastpath_lock_retval (&lock->count, __mutex_lock_interruptible_slowpath); } EXPORT_SYMBOL(mutex_lock_interruptible); |
7ad5b3a50
|
296 |
int __sched mutex_lock_killable(struct mutex *lock) |
ad776537c
|
297 298 299 300 301 302 |
{ might_sleep(); return __mutex_fastpath_lock_retval (&lock->count, __mutex_lock_killable_slowpath); } EXPORT_SYMBOL(mutex_lock_killable); |
7ad5b3a50
|
303 |
static noinline void __sched |
e4564f79d
|
304 305 306 307 308 309 |
__mutex_lock_slowpath(atomic_t *lock_count) { struct mutex *lock = container_of(lock_count, struct mutex, count); __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_); } |
7ad5b3a50
|
310 |
static noinline int __sched |
ad776537c
|
311 312 313 314 315 316 |
__mutex_lock_killable_slowpath(atomic_t *lock_count) { struct mutex *lock = container_of(lock_count, struct mutex, count); return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_); } |
7ad5b3a50
|
317 |
static noinline int __sched |
9a11b49a8
|
318 |
__mutex_lock_interruptible_slowpath(atomic_t *lock_count) |
6053ee3b3
|
319 320 |
{ struct mutex *lock = container_of(lock_count, struct mutex, count); |
e4564f79d
|
321 |
return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_); |
6053ee3b3
|
322 |
} |
e4564f79d
|
323 |
#endif |
6053ee3b3
|
324 325 326 327 328 329 330 331 |
/* * Spinlock based trylock, we take the spinlock and check whether we * can get the lock: */ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) { struct mutex *lock = container_of(lock_count, struct mutex, count); |
1fb00c6cb
|
332 |
unsigned long flags; |
6053ee3b3
|
333 |
int prev; |
1fb00c6cb
|
334 |
spin_lock_mutex(&lock->wait_lock, flags); |
6053ee3b3
|
335 336 |
prev = atomic_xchg(&lock->count, -1); |
ef5d4707b
|
337 |
if (likely(prev == 1)) { |
9a11b49a8
|
338 |
debug_mutex_set_owner(lock, current_thread_info()); |
ef5d4707b
|
339 340 |
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); } |
6053ee3b3
|
341 342 343 |
/* Set it back to 0 if there are no waiters: */ if (likely(list_empty(&lock->wait_list))) atomic_set(&lock->count, 0); |
1fb00c6cb
|
344 |
spin_unlock_mutex(&lock->wait_lock, flags); |
6053ee3b3
|
345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 |
return prev == 1; } /*** * mutex_trylock - try acquire the mutex, without waiting * @lock: the mutex to be acquired * * Try to acquire the mutex atomically. Returns 1 if the mutex * has been acquired successfully, and 0 on contention. * * NOTE: this function follows the spin_trylock() convention, so * it is negated to the down_trylock() return values! Be careful * about this when converting semaphore users to mutexes. * * This function must not be used in interrupt context. The * mutex must be released by the same task that acquired it. */ |
7ad5b3a50
|
363 |
int __sched mutex_trylock(struct mutex *lock) |
6053ee3b3
|
364 365 366 367 368 369 |
{ return __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath); } EXPORT_SYMBOL(mutex_trylock); |