Commit 6ad36762d7a88d747f6fed95194b4f7ff5da8df4

Authored by Oleg Nesterov
Committed by Ingo Molnar
1 parent 493d35863d

__mutex_lock_common: use signal_pending_state()

Change __mutex_lock_common() to use signal_pending_state() for the sake of
the code re-use.

This adds 7 bytes to kernel/mutex.o, but afaics only because gcc isn't smart
enough.

(btw, uninlining of __mutex_lock_common() shrinks .text from 2722 to 1542,
 perhaps it is worth doing).

Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

Showing 1 changed file with 1 additions and 4 deletions Inline Diff

1 /* 1 /*
2 * kernel/mutex.c 2 * kernel/mutex.c
3 * 3 *
4 * Mutexes: blocking mutual exclusion locks 4 * Mutexes: blocking mutual exclusion locks
5 * 5 *
6 * Started by Ingo Molnar: 6 * Started by Ingo Molnar:
7 * 7 *
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 * 9 *
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and 10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements. 11 * David Howells for suggestions and improvements.
12 * 12 *
13 * Also see Documentation/mutex-design.txt. 13 * Also see Documentation/mutex-design.txt.
14 */ 14 */
15 #include <linux/mutex.h> 15 #include <linux/mutex.h>
16 #include <linux/sched.h> 16 #include <linux/sched.h>
17 #include <linux/module.h> 17 #include <linux/module.h>
18 #include <linux/spinlock.h> 18 #include <linux/spinlock.h>
19 #include <linux/interrupt.h> 19 #include <linux/interrupt.h>
20 #include <linux/debug_locks.h> 20 #include <linux/debug_locks.h>
21 21
22 /* 22 /*
23 * In the DEBUG case we are using the "NULL fastpath" for mutexes, 23 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
24 * which forces all calls into the slowpath: 24 * which forces all calls into the slowpath:
25 */ 25 */
26 #ifdef CONFIG_DEBUG_MUTEXES 26 #ifdef CONFIG_DEBUG_MUTEXES
27 # include "mutex-debug.h" 27 # include "mutex-debug.h"
28 # include <asm-generic/mutex-null.h> 28 # include <asm-generic/mutex-null.h>
29 #else 29 #else
30 # include "mutex.h" 30 # include "mutex.h"
31 # include <asm/mutex.h> 31 # include <asm/mutex.h>
32 #endif 32 #endif
33 33
34 /*** 34 /***
35 * mutex_init - initialize the mutex 35 * mutex_init - initialize the mutex
36 * @lock: the mutex to be initialized 36 * @lock: the mutex to be initialized
37 * 37 *
38 * Initialize the mutex to unlocked state. 38 * Initialize the mutex to unlocked state.
39 * 39 *
40 * It is not allowed to initialize an already locked mutex. 40 * It is not allowed to initialize an already locked mutex.
41 */ 41 */
42 void 42 void
43 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) 43 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
44 { 44 {
45 atomic_set(&lock->count, 1); 45 atomic_set(&lock->count, 1);
46 spin_lock_init(&lock->wait_lock); 46 spin_lock_init(&lock->wait_lock);
47 INIT_LIST_HEAD(&lock->wait_list); 47 INIT_LIST_HEAD(&lock->wait_list);
48 48
49 debug_mutex_init(lock, name, key); 49 debug_mutex_init(lock, name, key);
50 } 50 }
51 51
52 EXPORT_SYMBOL(__mutex_init); 52 EXPORT_SYMBOL(__mutex_init);
53 53
54 #ifndef CONFIG_DEBUG_LOCK_ALLOC 54 #ifndef CONFIG_DEBUG_LOCK_ALLOC
55 /* 55 /*
56 * We split the mutex lock/unlock logic into separate fastpath and 56 * We split the mutex lock/unlock logic into separate fastpath and
57 * slowpath functions, to reduce the register pressure on the fastpath. 57 * slowpath functions, to reduce the register pressure on the fastpath.
58 * We also put the fastpath first in the kernel image, to make sure the 58 * We also put the fastpath first in the kernel image, to make sure the
59 * branch is predicted by the CPU as default-untaken. 59 * branch is predicted by the CPU as default-untaken.
60 */ 60 */
61 static void noinline __sched 61 static void noinline __sched
62 __mutex_lock_slowpath(atomic_t *lock_count); 62 __mutex_lock_slowpath(atomic_t *lock_count);
63 63
64 /*** 64 /***
65 * mutex_lock - acquire the mutex 65 * mutex_lock - acquire the mutex
66 * @lock: the mutex to be acquired 66 * @lock: the mutex to be acquired
67 * 67 *
68 * Lock the mutex exclusively for this task. If the mutex is not 68 * Lock the mutex exclusively for this task. If the mutex is not
69 * available right now, it will sleep until it can get it. 69 * available right now, it will sleep until it can get it.
70 * 70 *
71 * The mutex must later on be released by the same task that 71 * The mutex must later on be released by the same task that
72 * acquired it. Recursive locking is not allowed. The task 72 * acquired it. Recursive locking is not allowed. The task
73 * may not exit without first unlocking the mutex. Also, kernel 73 * may not exit without first unlocking the mutex. Also, kernel
74 * memory where the mutex resides mutex must not be freed with 74 * memory where the mutex resides mutex must not be freed with
75 * the mutex still locked. The mutex must first be initialized 75 * the mutex still locked. The mutex must first be initialized
76 * (or statically defined) before it can be locked. memset()-ing 76 * (or statically defined) before it can be locked. memset()-ing
77 * the mutex to 0 is not allowed. 77 * the mutex to 0 is not allowed.
78 * 78 *
79 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging 79 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
80 * checks that will enforce the restrictions and will also do 80 * checks that will enforce the restrictions and will also do
81 * deadlock debugging. ) 81 * deadlock debugging. )
82 * 82 *
83 * This function is similar to (but not equivalent to) down(). 83 * This function is similar to (but not equivalent to) down().
84 */ 84 */
85 void inline __sched mutex_lock(struct mutex *lock) 85 void inline __sched mutex_lock(struct mutex *lock)
86 { 86 {
87 might_sleep(); 87 might_sleep();
88 /* 88 /*
89 * The locking fastpath is the 1->0 transition from 89 * The locking fastpath is the 1->0 transition from
90 * 'unlocked' into 'locked' state. 90 * 'unlocked' into 'locked' state.
91 */ 91 */
92 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); 92 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
93 } 93 }
94 94
95 EXPORT_SYMBOL(mutex_lock); 95 EXPORT_SYMBOL(mutex_lock);
96 #endif 96 #endif
97 97
98 static noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); 98 static noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
99 99
100 /*** 100 /***
101 * mutex_unlock - release the mutex 101 * mutex_unlock - release the mutex
102 * @lock: the mutex to be released 102 * @lock: the mutex to be released
103 * 103 *
104 * Unlock a mutex that has been locked by this task previously. 104 * Unlock a mutex that has been locked by this task previously.
105 * 105 *
106 * This function must not be used in interrupt context. Unlocking 106 * This function must not be used in interrupt context. Unlocking
107 * of a not locked mutex is not allowed. 107 * of a not locked mutex is not allowed.
108 * 108 *
109 * This function is similar to (but not equivalent to) up(). 109 * This function is similar to (but not equivalent to) up().
110 */ 110 */
111 void __sched mutex_unlock(struct mutex *lock) 111 void __sched mutex_unlock(struct mutex *lock)
112 { 112 {
113 /* 113 /*
114 * The unlocking fastpath is the 0->1 transition from 'locked' 114 * The unlocking fastpath is the 0->1 transition from 'locked'
115 * into 'unlocked' state: 115 * into 'unlocked' state:
116 */ 116 */
117 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); 117 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
118 } 118 }
119 119
120 EXPORT_SYMBOL(mutex_unlock); 120 EXPORT_SYMBOL(mutex_unlock);
121 121
122 /* 122 /*
123 * Lock a mutex (possibly interruptible), slowpath: 123 * Lock a mutex (possibly interruptible), slowpath:
124 */ 124 */
125 static inline int __sched 125 static inline int __sched
126 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, 126 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
127 unsigned long ip) 127 unsigned long ip)
128 { 128 {
129 struct task_struct *task = current; 129 struct task_struct *task = current;
130 struct mutex_waiter waiter; 130 struct mutex_waiter waiter;
131 unsigned int old_val; 131 unsigned int old_val;
132 unsigned long flags; 132 unsigned long flags;
133 133
134 spin_lock_mutex(&lock->wait_lock, flags); 134 spin_lock_mutex(&lock->wait_lock, flags);
135 135
136 debug_mutex_lock_common(lock, &waiter); 136 debug_mutex_lock_common(lock, &waiter);
137 mutex_acquire(&lock->dep_map, subclass, 0, ip); 137 mutex_acquire(&lock->dep_map, subclass, 0, ip);
138 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); 138 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
139 139
140 /* add waiting tasks to the end of the waitqueue (FIFO): */ 140 /* add waiting tasks to the end of the waitqueue (FIFO): */
141 list_add_tail(&waiter.list, &lock->wait_list); 141 list_add_tail(&waiter.list, &lock->wait_list);
142 waiter.task = task; 142 waiter.task = task;
143 143
144 old_val = atomic_xchg(&lock->count, -1); 144 old_val = atomic_xchg(&lock->count, -1);
145 if (old_val == 1) 145 if (old_val == 1)
146 goto done; 146 goto done;
147 147
148 lock_contended(&lock->dep_map, ip); 148 lock_contended(&lock->dep_map, ip);
149 149
150 for (;;) { 150 for (;;) {
151 /* 151 /*
152 * Lets try to take the lock again - this is needed even if 152 * Lets try to take the lock again - this is needed even if
153 * we get here for the first time (shortly after failing to 153 * we get here for the first time (shortly after failing to
154 * acquire the lock), to make sure that we get a wakeup once 154 * acquire the lock), to make sure that we get a wakeup once
155 * it's unlocked. Later on, if we sleep, this is the 155 * it's unlocked. Later on, if we sleep, this is the
156 * operation that gives us the lock. We xchg it to -1, so 156 * operation that gives us the lock. We xchg it to -1, so
157 * that when we release the lock, we properly wake up the 157 * that when we release the lock, we properly wake up the
158 * other waiters: 158 * other waiters:
159 */ 159 */
160 old_val = atomic_xchg(&lock->count, -1); 160 old_val = atomic_xchg(&lock->count, -1);
161 if (old_val == 1) 161 if (old_val == 1)
162 break; 162 break;
163 163
164 /* 164 /*
165 * got a signal? (This code gets eliminated in the 165 * got a signal? (This code gets eliminated in the
166 * TASK_UNINTERRUPTIBLE case.) 166 * TASK_UNINTERRUPTIBLE case.)
167 */ 167 */
168 if (unlikely((state == TASK_INTERRUPTIBLE && 168 if (unlikely(signal_pending_state(state, task))) {
169 signal_pending(task)) ||
170 (state == TASK_KILLABLE &&
171 fatal_signal_pending(task)))) {
172 mutex_remove_waiter(lock, &waiter, 169 mutex_remove_waiter(lock, &waiter,
173 task_thread_info(task)); 170 task_thread_info(task));
174 mutex_release(&lock->dep_map, 1, ip); 171 mutex_release(&lock->dep_map, 1, ip);
175 spin_unlock_mutex(&lock->wait_lock, flags); 172 spin_unlock_mutex(&lock->wait_lock, flags);
176 173
177 debug_mutex_free_waiter(&waiter); 174 debug_mutex_free_waiter(&waiter);
178 return -EINTR; 175 return -EINTR;
179 } 176 }
180 __set_task_state(task, state); 177 __set_task_state(task, state);
181 178
182 /* didnt get the lock, go to sleep: */ 179 /* didnt get the lock, go to sleep: */
183 spin_unlock_mutex(&lock->wait_lock, flags); 180 spin_unlock_mutex(&lock->wait_lock, flags);
184 schedule(); 181 schedule();
185 spin_lock_mutex(&lock->wait_lock, flags); 182 spin_lock_mutex(&lock->wait_lock, flags);
186 } 183 }
187 184
188 done: 185 done:
189 lock_acquired(&lock->dep_map); 186 lock_acquired(&lock->dep_map);
190 /* got the lock - rejoice! */ 187 /* got the lock - rejoice! */
191 mutex_remove_waiter(lock, &waiter, task_thread_info(task)); 188 mutex_remove_waiter(lock, &waiter, task_thread_info(task));
192 debug_mutex_set_owner(lock, task_thread_info(task)); 189 debug_mutex_set_owner(lock, task_thread_info(task));
193 190
194 /* set it to 0 if there are no waiters left: */ 191 /* set it to 0 if there are no waiters left: */
195 if (likely(list_empty(&lock->wait_list))) 192 if (likely(list_empty(&lock->wait_list)))
196 atomic_set(&lock->count, 0); 193 atomic_set(&lock->count, 0);
197 194
198 spin_unlock_mutex(&lock->wait_lock, flags); 195 spin_unlock_mutex(&lock->wait_lock, flags);
199 196
200 debug_mutex_free_waiter(&waiter); 197 debug_mutex_free_waiter(&waiter);
201 198
202 return 0; 199 return 0;
203 } 200 }
204 201
205 #ifdef CONFIG_DEBUG_LOCK_ALLOC 202 #ifdef CONFIG_DEBUG_LOCK_ALLOC
206 void __sched 203 void __sched
207 mutex_lock_nested(struct mutex *lock, unsigned int subclass) 204 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
208 { 205 {
209 might_sleep(); 206 might_sleep();
210 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_); 207 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_);
211 } 208 }
212 209
213 EXPORT_SYMBOL_GPL(mutex_lock_nested); 210 EXPORT_SYMBOL_GPL(mutex_lock_nested);
214 211
215 int __sched 212 int __sched
216 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) 213 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
217 { 214 {
218 might_sleep(); 215 might_sleep();
219 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_); 216 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_);
220 } 217 }
221 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); 218 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
222 219
223 int __sched 220 int __sched
224 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) 221 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
225 { 222 {
226 might_sleep(); 223 might_sleep();
227 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_); 224 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_);
228 } 225 }
229 226
230 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); 227 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
231 #endif 228 #endif
232 229
233 /* 230 /*
234 * Release the lock, slowpath: 231 * Release the lock, slowpath:
235 */ 232 */
236 static inline void 233 static inline void
237 __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) 234 __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
238 { 235 {
239 struct mutex *lock = container_of(lock_count, struct mutex, count); 236 struct mutex *lock = container_of(lock_count, struct mutex, count);
240 unsigned long flags; 237 unsigned long flags;
241 238
242 spin_lock_mutex(&lock->wait_lock, flags); 239 spin_lock_mutex(&lock->wait_lock, flags);
243 mutex_release(&lock->dep_map, nested, _RET_IP_); 240 mutex_release(&lock->dep_map, nested, _RET_IP_);
244 debug_mutex_unlock(lock); 241 debug_mutex_unlock(lock);
245 242
246 /* 243 /*
247 * some architectures leave the lock unlocked in the fastpath failure 244 * some architectures leave the lock unlocked in the fastpath failure
248 * case, others need to leave it locked. In the later case we have to 245 * case, others need to leave it locked. In the later case we have to
249 * unlock it here 246 * unlock it here
250 */ 247 */
251 if (__mutex_slowpath_needs_to_unlock()) 248 if (__mutex_slowpath_needs_to_unlock())
252 atomic_set(&lock->count, 1); 249 atomic_set(&lock->count, 1);
253 250
254 if (!list_empty(&lock->wait_list)) { 251 if (!list_empty(&lock->wait_list)) {
255 /* get the first entry from the wait-list: */ 252 /* get the first entry from the wait-list: */
256 struct mutex_waiter *waiter = 253 struct mutex_waiter *waiter =
257 list_entry(lock->wait_list.next, 254 list_entry(lock->wait_list.next,
258 struct mutex_waiter, list); 255 struct mutex_waiter, list);
259 256
260 debug_mutex_wake_waiter(lock, waiter); 257 debug_mutex_wake_waiter(lock, waiter);
261 258
262 wake_up_process(waiter->task); 259 wake_up_process(waiter->task);
263 } 260 }
264 261
265 debug_mutex_clear_owner(lock); 262 debug_mutex_clear_owner(lock);
266 263
267 spin_unlock_mutex(&lock->wait_lock, flags); 264 spin_unlock_mutex(&lock->wait_lock, flags);
268 } 265 }
269 266
270 /* 267 /*
271 * Release the lock, slowpath: 268 * Release the lock, slowpath:
272 */ 269 */
273 static noinline void 270 static noinline void
274 __mutex_unlock_slowpath(atomic_t *lock_count) 271 __mutex_unlock_slowpath(atomic_t *lock_count)
275 { 272 {
276 __mutex_unlock_common_slowpath(lock_count, 1); 273 __mutex_unlock_common_slowpath(lock_count, 1);
277 } 274 }
278 275
279 #ifndef CONFIG_DEBUG_LOCK_ALLOC 276 #ifndef CONFIG_DEBUG_LOCK_ALLOC
280 /* 277 /*
281 * Here come the less common (and hence less performance-critical) APIs: 278 * Here come the less common (and hence less performance-critical) APIs:
282 * mutex_lock_interruptible() and mutex_trylock(). 279 * mutex_lock_interruptible() and mutex_trylock().
283 */ 280 */
284 static noinline int __sched 281 static noinline int __sched
285 __mutex_lock_killable_slowpath(atomic_t *lock_count); 282 __mutex_lock_killable_slowpath(atomic_t *lock_count);
286 283
287 static noinline int __sched 284 static noinline int __sched
288 __mutex_lock_interruptible_slowpath(atomic_t *lock_count); 285 __mutex_lock_interruptible_slowpath(atomic_t *lock_count);
289 286
290 /*** 287 /***
291 * mutex_lock_interruptible - acquire the mutex, interruptable 288 * mutex_lock_interruptible - acquire the mutex, interruptable
292 * @lock: the mutex to be acquired 289 * @lock: the mutex to be acquired
293 * 290 *
294 * Lock the mutex like mutex_lock(), and return 0 if the mutex has 291 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
295 * been acquired or sleep until the mutex becomes available. If a 292 * been acquired or sleep until the mutex becomes available. If a
296 * signal arrives while waiting for the lock then this function 293 * signal arrives while waiting for the lock then this function
297 * returns -EINTR. 294 * returns -EINTR.
298 * 295 *
299 * This function is similar to (but not equivalent to) down_interruptible(). 296 * This function is similar to (but not equivalent to) down_interruptible().
300 */ 297 */
301 int __sched mutex_lock_interruptible(struct mutex *lock) 298 int __sched mutex_lock_interruptible(struct mutex *lock)
302 { 299 {
303 might_sleep(); 300 might_sleep();
304 return __mutex_fastpath_lock_retval 301 return __mutex_fastpath_lock_retval
305 (&lock->count, __mutex_lock_interruptible_slowpath); 302 (&lock->count, __mutex_lock_interruptible_slowpath);
306 } 303 }
307 304
308 EXPORT_SYMBOL(mutex_lock_interruptible); 305 EXPORT_SYMBOL(mutex_lock_interruptible);
309 306
310 int __sched mutex_lock_killable(struct mutex *lock) 307 int __sched mutex_lock_killable(struct mutex *lock)
311 { 308 {
312 might_sleep(); 309 might_sleep();
313 return __mutex_fastpath_lock_retval 310 return __mutex_fastpath_lock_retval
314 (&lock->count, __mutex_lock_killable_slowpath); 311 (&lock->count, __mutex_lock_killable_slowpath);
315 } 312 }
316 EXPORT_SYMBOL(mutex_lock_killable); 313 EXPORT_SYMBOL(mutex_lock_killable);
317 314
318 static noinline void __sched 315 static noinline void __sched
319 __mutex_lock_slowpath(atomic_t *lock_count) 316 __mutex_lock_slowpath(atomic_t *lock_count)
320 { 317 {
321 struct mutex *lock = container_of(lock_count, struct mutex, count); 318 struct mutex *lock = container_of(lock_count, struct mutex, count);
322 319
323 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_); 320 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_);
324 } 321 }
325 322
326 static noinline int __sched 323 static noinline int __sched
327 __mutex_lock_killable_slowpath(atomic_t *lock_count) 324 __mutex_lock_killable_slowpath(atomic_t *lock_count)
328 { 325 {
329 struct mutex *lock = container_of(lock_count, struct mutex, count); 326 struct mutex *lock = container_of(lock_count, struct mutex, count);
330 327
331 return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_); 328 return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_);
332 } 329 }
333 330
334 static noinline int __sched 331 static noinline int __sched
335 __mutex_lock_interruptible_slowpath(atomic_t *lock_count) 332 __mutex_lock_interruptible_slowpath(atomic_t *lock_count)
336 { 333 {
337 struct mutex *lock = container_of(lock_count, struct mutex, count); 334 struct mutex *lock = container_of(lock_count, struct mutex, count);
338 335
339 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_); 336 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_);
340 } 337 }
341 #endif 338 #endif
342 339
343 /* 340 /*
344 * Spinlock based trylock, we take the spinlock and check whether we 341 * Spinlock based trylock, we take the spinlock and check whether we
345 * can get the lock: 342 * can get the lock:
346 */ 343 */
347 static inline int __mutex_trylock_slowpath(atomic_t *lock_count) 344 static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
348 { 345 {
349 struct mutex *lock = container_of(lock_count, struct mutex, count); 346 struct mutex *lock = container_of(lock_count, struct mutex, count);
350 unsigned long flags; 347 unsigned long flags;
351 int prev; 348 int prev;
352 349
353 spin_lock_mutex(&lock->wait_lock, flags); 350 spin_lock_mutex(&lock->wait_lock, flags);
354 351
355 prev = atomic_xchg(&lock->count, -1); 352 prev = atomic_xchg(&lock->count, -1);
356 if (likely(prev == 1)) { 353 if (likely(prev == 1)) {
357 debug_mutex_set_owner(lock, current_thread_info()); 354 debug_mutex_set_owner(lock, current_thread_info());
358 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); 355 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
359 } 356 }
360 /* Set it back to 0 if there are no waiters: */ 357 /* Set it back to 0 if there are no waiters: */
361 if (likely(list_empty(&lock->wait_list))) 358 if (likely(list_empty(&lock->wait_list)))
362 atomic_set(&lock->count, 0); 359 atomic_set(&lock->count, 0);
363 360
364 spin_unlock_mutex(&lock->wait_lock, flags); 361 spin_unlock_mutex(&lock->wait_lock, flags);
365 362
366 return prev == 1; 363 return prev == 1;
367 } 364 }
368 365
369 /*** 366 /***
370 * mutex_trylock - try acquire the mutex, without waiting 367 * mutex_trylock - try acquire the mutex, without waiting
371 * @lock: the mutex to be acquired 368 * @lock: the mutex to be acquired
372 * 369 *
373 * Try to acquire the mutex atomically. Returns 1 if the mutex 370 * Try to acquire the mutex atomically. Returns 1 if the mutex
374 * has been acquired successfully, and 0 on contention. 371 * has been acquired successfully, and 0 on contention.
375 * 372 *
376 * NOTE: this function follows the spin_trylock() convention, so 373 * NOTE: this function follows the spin_trylock() convention, so
377 * it is negated to the down_trylock() return values! Be careful 374 * it is negated to the down_trylock() return values! Be careful
378 * about this when converting semaphore users to mutexes. 375 * about this when converting semaphore users to mutexes.
379 * 376 *
380 * This function must not be used in interrupt context. The 377 * This function must not be used in interrupt context. The
381 * mutex must be released by the same task that acquired it. 378 * mutex must be released by the same task that acquired it.
382 */ 379 */
383 int __sched mutex_trylock(struct mutex *lock) 380 int __sched mutex_trylock(struct mutex *lock)
384 { 381 {
385 return __mutex_fastpath_trylock(&lock->count, 382 return __mutex_fastpath_trylock(&lock->count,
386 __mutex_trylock_slowpath); 383 __mutex_trylock_slowpath);
387 } 384 }
388 385
389 EXPORT_SYMBOL(mutex_trylock); 386 EXPORT_SYMBOL(mutex_trylock);
390 387