Commit ad776537cc6b4b936cfd11893e7b698dfa072666
Committed by
Matthew Wilcox
1 parent
0b94e97a25
Exists in
master
and in
4 other branches
Add mutex_lock_killable
Similar to mutex_lock_interruptible, it can be interrupted by a fatal signal only. Signed-off-by: Liam R. Howlett <howlett@gmail.com> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
Showing 2 changed files with 38 additions and 3 deletions Side-by-side Diff
include/linux/mutex.h
... | ... | @@ -125,15 +125,20 @@ |
125 | 125 | extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); |
126 | 126 | extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, |
127 | 127 | unsigned int subclass); |
128 | +extern int __must_check mutex_lock_killable_nested(struct mutex *lock, | |
129 | + unsigned int subclass); | |
128 | 130 | |
129 | 131 | #define mutex_lock(lock) mutex_lock_nested(lock, 0) |
130 | 132 | #define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0) |
133 | +#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0) | |
131 | 134 | #else |
132 | 135 | extern void fastcall mutex_lock(struct mutex *lock); |
133 | 136 | extern int __must_check fastcall mutex_lock_interruptible(struct mutex *lock); |
137 | +extern int __must_check fastcall mutex_lock_killable(struct mutex *lock); | |
134 | 138 | |
135 | 139 | # define mutex_lock_nested(lock, subclass) mutex_lock(lock) |
136 | 140 | # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) |
141 | +# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock) | |
137 | 142 | #endif |
138 | 143 | |
139 | 144 | /* |
kernel/mutex.c
... | ... | @@ -166,9 +166,12 @@ |
166 | 166 | * got a signal? (This code gets eliminated in the |
167 | 167 | * TASK_UNINTERRUPTIBLE case.) |
168 | 168 | */ |
169 | - if (unlikely(state == TASK_INTERRUPTIBLE && | |
170 | - signal_pending(task))) { | |
171 | - mutex_remove_waiter(lock, &waiter, task_thread_info(task)); | |
169 | + if (unlikely((state == TASK_INTERRUPTIBLE && | |
170 | + signal_pending(task)) || | |
171 | + (state == TASK_KILLABLE && | |
172 | + fatal_signal_pending(task)))) { | |
173 | + mutex_remove_waiter(lock, &waiter, | |
174 | + task_thread_info(task)); | |
172 | 175 | mutex_release(&lock->dep_map, 1, ip); |
173 | 176 | spin_unlock_mutex(&lock->wait_lock, flags); |
174 | 177 | |
... | ... | @@ -211,6 +214,14 @@ |
211 | 214 | EXPORT_SYMBOL_GPL(mutex_lock_nested); |
212 | 215 | |
213 | 216 | int __sched |
217 | +mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) | |
218 | +{ | |
219 | + might_sleep(); | |
220 | + return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_); | |
221 | +} | |
222 | +EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); | |
223 | + | |
224 | +int __sched | |
214 | 225 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) |
215 | 226 | { |
216 | 227 | might_sleep(); |
... | ... | @@ -272,6 +283,9 @@ |
272 | 283 | * mutex_lock_interruptible() and mutex_trylock(). |
273 | 284 | */ |
274 | 285 | static int fastcall noinline __sched |
286 | +__mutex_lock_killable_slowpath(atomic_t *lock_count); | |
287 | + | |
288 | +static noinline int fastcall __sched | |
275 | 289 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count); |
276 | 290 | |
277 | 291 | /*** |
... | ... | @@ -294,6 +308,14 @@ |
294 | 308 | |
295 | 309 | EXPORT_SYMBOL(mutex_lock_interruptible); |
296 | 310 | |
311 | +int fastcall __sched mutex_lock_killable(struct mutex *lock) | |
312 | +{ | |
313 | + might_sleep(); | |
314 | + return __mutex_fastpath_lock_retval | |
315 | + (&lock->count, __mutex_lock_killable_slowpath); | |
316 | +} | |
317 | +EXPORT_SYMBOL(mutex_lock_killable); | |
318 | + | |
297 | 319 | static void fastcall noinline __sched |
298 | 320 | __mutex_lock_slowpath(atomic_t *lock_count) |
299 | 321 | { |
... | ... | @@ -303,6 +325,14 @@ |
303 | 325 | } |
304 | 326 | |
305 | 327 | static int fastcall noinline __sched |
328 | +__mutex_lock_killable_slowpath(atomic_t *lock_count) | |
329 | +{ | |
330 | + struct mutex *lock = container_of(lock_count, struct mutex, count); | |
331 | + | |
332 | + return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_); | |
333 | +} | |
334 | + | |
335 | +static noinline int fastcall __sched | |
306 | 336 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count) |
307 | 337 | { |
308 | 338 | struct mutex *lock = container_of(lock_count, struct mutex, count); |