Commit e4564f79d4b6923da7360df4b24a48cc2d4160de
1 parent
3aa416b07f
Exists in
master
and in
4 other branches
lockdep: fixup mutex annotations
The fancy mutex_lock fastpath has too many indirections to track the caller hence all contentions are perceived to come from mutex_lock(). Avoid this by explicitly not using the fastpath code (it was disabled already anyway). Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Showing 2 changed files with 26 additions and 18 deletions Side-by-side Diff
include/linux/mutex.h
... | ... | @@ -120,14 +120,17 @@ |
120 | 120 | * See kernel/mutex.c for detailed documentation of these APIs. |
121 | 121 | * Also see Documentation/mutex-design.txt. |
122 | 122 | */ |
123 | -extern void fastcall mutex_lock(struct mutex *lock); | |
124 | -extern int __must_check fastcall mutex_lock_interruptible(struct mutex *lock); | |
125 | - | |
126 | 123 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
127 | 124 | extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); |
128 | 125 | extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, |
129 | 126 | unsigned int subclass); |
127 | + | |
128 | +#define mutex_lock(lock) mutex_lock_nested(lock, 0) | |
129 | +#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0) | |
130 | 130 | #else |
131 | +extern void fastcall mutex_lock(struct mutex *lock); | |
132 | +extern int __must_check fastcall mutex_lock_interruptible(struct mutex *lock); | |
133 | + | |
131 | 134 | # define mutex_lock_nested(lock, subclass) mutex_lock(lock) |
132 | 135 | # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) |
133 | 136 | #endif |
kernel/mutex.c
... | ... | @@ -51,6 +51,7 @@ |
51 | 51 | |
52 | 52 | EXPORT_SYMBOL(__mutex_init); |
53 | 53 | |
54 | +#ifndef CONFIG_DEBUG_LOCK_ALLOC | |
54 | 55 | /* |
55 | 56 | * We split the mutex lock/unlock logic into separate fastpath and |
56 | 57 | * slowpath functions, to reduce the register pressure on the fastpath. |
... | ... | @@ -92,6 +93,7 @@ |
92 | 93 | } |
93 | 94 | |
94 | 95 | EXPORT_SYMBOL(mutex_lock); |
96 | +#endif | |
95 | 97 | |
96 | 98 | static void fastcall noinline __sched |
97 | 99 | __mutex_unlock_slowpath(atomic_t *lock_count); |
... | ... | @@ -122,7 +124,8 @@ |
122 | 124 | * Lock a mutex (possibly interruptible), slowpath: |
123 | 125 | */ |
124 | 126 | static inline int __sched |
125 | -__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass) | |
127 | +__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |
128 | + unsigned long ip) | |
126 | 129 | { |
127 | 130 | struct task_struct *task = current; |
128 | 131 | struct mutex_waiter waiter; |
... | ... | @@ -132,7 +135,7 @@ |
132 | 135 | spin_lock_mutex(&lock->wait_lock, flags); |
133 | 136 | |
134 | 137 | debug_mutex_lock_common(lock, &waiter); |
135 | - mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); | |
138 | + mutex_acquire(&lock->dep_map, subclass, 0, ip); | |
136 | 139 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); |
137 | 140 | |
138 | 141 | /* add waiting tasks to the end of the waitqueue (FIFO): */ |
... | ... | @@ -143,7 +146,7 @@ |
143 | 146 | if (old_val == 1) |
144 | 147 | goto done; |
145 | 148 | |
146 | - lock_contended(&lock->dep_map, _RET_IP_); | |
149 | + lock_contended(&lock->dep_map, ip); | |
147 | 150 | |
148 | 151 | for (;;) { |
149 | 152 | /* |
... | ... | @@ -166,7 +169,7 @@ |
166 | 169 | if (unlikely(state == TASK_INTERRUPTIBLE && |
167 | 170 | signal_pending(task))) { |
168 | 171 | mutex_remove_waiter(lock, &waiter, task_thread_info(task)); |
169 | - mutex_release(&lock->dep_map, 1, _RET_IP_); | |
172 | + mutex_release(&lock->dep_map, 1, ip); | |
170 | 173 | spin_unlock_mutex(&lock->wait_lock, flags); |
171 | 174 | |
172 | 175 | debug_mutex_free_waiter(&waiter); |
173 | 176 | |
... | ... | @@ -197,20 +200,12 @@ |
197 | 200 | return 0; |
198 | 201 | } |
199 | 202 | |
200 | -static void fastcall noinline __sched | |
201 | -__mutex_lock_slowpath(atomic_t *lock_count) | |
202 | -{ | |
203 | - struct mutex *lock = container_of(lock_count, struct mutex, count); | |
204 | - | |
205 | - __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0); | |
206 | -} | |
207 | - | |
208 | 203 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
209 | 204 | void __sched |
210 | 205 | mutex_lock_nested(struct mutex *lock, unsigned int subclass) |
211 | 206 | { |
212 | 207 | might_sleep(); |
213 | - __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass); | |
208 | + __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_); | |
214 | 209 | } |
215 | 210 | |
216 | 211 | EXPORT_SYMBOL_GPL(mutex_lock_nested); |
... | ... | @@ -219,7 +214,7 @@ |
219 | 214 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) |
220 | 215 | { |
221 | 216 | might_sleep(); |
222 | - return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass); | |
217 | + return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_); | |
223 | 218 | } |
224 | 219 | |
225 | 220 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); |
... | ... | @@ -271,6 +266,7 @@ |
271 | 266 | __mutex_unlock_common_slowpath(lock_count, 1); |
272 | 267 | } |
273 | 268 | |
269 | +#ifndef CONFIG_DEBUG_LOCK_ALLOC | |
274 | 270 | /* |
275 | 271 | * Here come the less common (and hence less performance-critical) APIs: |
276 | 272 | * mutex_lock_interruptible() and mutex_trylock(). |
277 | 273 | |
278 | 274 | |
... | ... | @@ -298,13 +294,22 @@ |
298 | 294 | |
299 | 295 | EXPORT_SYMBOL(mutex_lock_interruptible); |
300 | 296 | |
297 | +static void fastcall noinline __sched | |
298 | +__mutex_lock_slowpath(atomic_t *lock_count) | |
299 | +{ | |
300 | + struct mutex *lock = container_of(lock_count, struct mutex, count); | |
301 | + | |
302 | + __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_); | |
303 | +} | |
304 | + | |
301 | 305 | static int fastcall noinline __sched |
302 | 306 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count) |
303 | 307 | { |
304 | 308 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
305 | 309 | |
306 | - return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0); | |
310 | + return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_); | |
307 | 311 | } |
312 | +#endif | |
308 | 313 | |
309 | 314 | /* |
310 | 315 | * Spinlock based trylock, we take the spinlock and check whether we |