Commit 9a11b49a805665e13a56aa067afaf81d43ec1514

Authored by Ingo Molnar
Committed by Linus Torvalds
1 parent fb7e42413a

[PATCH] lockdep: better lock debugging

Generic lock debugging:

 - generalized lock debugging framework. For example, a bug in one lock
   subsystem turns off debugging in all lock subsystems.

 - got rid of the caller address passing (__IP__/__IP_DECL__/etc.) from
   the mutex/rtmutex debugging code: it caused way too much prototype
   hackery, and lockdep will give the same information anyway.

 - ability to do silent tests

 - check lock freeing in vfree too.

 - more finegrained debugging options, to allow distributions to
   turn off more expensive debugging features.

There's no separate 'held mutexes' list anymore - but there's a 'held locks'
stack within lockdep, which unifies deadlock detection across all lock
classes.  (this is independent of the lockdep validation stuff - lockdep first
checks whether we are holding a lock already)

Here are the current debugging options:

CONFIG_DEBUG_MUTEXES=y
CONFIG_DEBUG_LOCK_ALLOC=y

which do:

 config DEBUG_MUTEXES
          bool "Mutex debugging, basic checks"

 config DEBUG_LOCK_ALLOC
         bool "Detect incorrect freeing of live mutexes"

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 25 changed files with 265 additions and 567 deletions Side-by-side Diff

drivers/char/sysrq.c
... ... @@ -151,7 +151,7 @@
151 151 static void sysrq_handle_showlocks(int key, struct pt_regs *pt_regs,
152 152 struct tty_struct *tty)
153 153 {
154   - mutex_debug_show_all_locks();
  154 + debug_show_all_locks();
155 155 }
156 156 static struct sysrq_key_op sysrq_showlocks_op = {
157 157 .handler = sysrq_handle_showlocks,
include/asm-generic/mutex-null.h
... ... @@ -10,16 +10,11 @@
10 10 #ifndef _ASM_GENERIC_MUTEX_NULL_H
11 11 #define _ASM_GENERIC_MUTEX_NULL_H
12 12  
13   -/* extra parameter only needed for mutex debugging: */
14   -#ifndef __IP__
15   -# define __IP__
16   -#endif
17   -
18   -#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count __RET_IP__)
19   -#define __mutex_fastpath_lock_retval(count, fail_fn) fail_fn(count __RET_IP__)
20   -#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count __RET_IP__)
21   -#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count)
22   -#define __mutex_slowpath_needs_to_unlock() 1
  13 +#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count)
  14 +#define __mutex_fastpath_lock_retval(count, fail_fn) fail_fn(count)
  15 +#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count)
  16 +#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count)
  17 +#define __mutex_slowpath_needs_to_unlock() 1
23 18  
24 19 #endif
include/linux/debug_locks.h
  1 +#ifndef __LINUX_DEBUG_LOCKING_H
  2 +#define __LINUX_DEBUG_LOCKING_H
  3 +
  4 +extern int debug_locks;
  5 +extern int debug_locks_silent;
  6 +
  7 +/*
  8 + * Generic 'turn off all lock debugging' function:
  9 + */
  10 +extern int debug_locks_off(void);
  11 +
  12 +/*
  13 + * In the debug case we carry the caller's instruction pointer into
  14 + * other functions, but we dont want the function argument overhead
  15 + * in the nondebug case - hence these macros:
  16 + */
  17 +#define _RET_IP_ (unsigned long)__builtin_return_address(0)
  18 +#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
  19 +
  20 +#define DEBUG_LOCKS_WARN_ON(c) \
  21 +({ \
  22 + int __ret = 0; \
  23 + \
  24 + if (unlikely(c)) { \
  25 + if (debug_locks_off()) \
  26 + WARN_ON(1); \
  27 + __ret = 1; \
  28 + } \
  29 + __ret; \
  30 +})
  31 +
  32 +#ifdef CONFIG_SMP
  33 +# define SMP_DEBUG_LOCKS_WARN_ON(c) DEBUG_LOCKS_WARN_ON(c)
  34 +#else
  35 +# define SMP_DEBUG_LOCKS_WARN_ON(c) do { } while (0)
  36 +#endif
  37 +
  38 +#ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS
  39 + extern void locking_selftest(void);
  40 +#else
  41 +# define locking_selftest() do { } while (0)
  42 +#endif
  43 +
  44 +#ifdef CONFIG_LOCKDEP
  45 +extern void debug_show_all_locks(void);
  46 +extern void debug_show_held_locks(struct task_struct *task);
  47 +extern void debug_check_no_locks_freed(const void *from, unsigned long len);
  48 +extern void debug_check_no_locks_held(struct task_struct *task);
  49 +#else
  50 +static inline void debug_show_all_locks(void)
  51 +{
  52 +}
  53 +
  54 +static inline void debug_show_held_locks(struct task_struct *task)
  55 +{
  56 +}
  57 +
  58 +static inline void
  59 +debug_check_no_locks_freed(const void *from, unsigned long len)
  60 +{
  61 +}
  62 +
  63 +static inline void
  64 +debug_check_no_locks_held(struct task_struct *task)
  65 +{
  66 +}
  67 +#endif
  68 +
  69 +#endif
include/linux/init_task.h
... ... @@ -124,7 +124,6 @@
124 124 .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
125 125 .fs_excl = ATOMIC_INIT(0), \
126 126 .pi_lock = SPIN_LOCK_UNLOCKED, \
127   - INIT_RT_MUTEXES(tsk) \
128 127 }
129 128  
130 129  
... ... @@ -14,6 +14,7 @@
14 14 #include <linux/prio_tree.h>
15 15 #include <linux/fs.h>
16 16 #include <linux/mutex.h>
  17 +#include <linux/debug_locks.h>
17 18  
18 19 struct mempolicy;
19 20 struct anon_vma;
... ... @@ -1033,13 +1034,6 @@
1033 1034 {
1034 1035 }
1035 1036 #endif /* CONFIG_PROC_FS */
1036   -
1037   -static inline void
1038   -debug_check_no_locks_freed(const void *from, unsigned long len)
1039   -{
1040   - mutex_debug_check_no_locks_freed(from, len);
1041   - rt_mutex_debug_check_no_locks_freed(from, len);
1042   -}
1043 1037  
1044 1038 #ifndef CONFIG_DEBUG_PAGEALLOC
1045 1039 static inline void
include/linux/mutex-debug.h
... ... @@ -7,18 +7,12 @@
7 7 * Mutexes - debugging helpers:
8 8 */
9 9  
10   -#define __DEBUG_MUTEX_INITIALIZER(lockname) \
11   - , .held_list = LIST_HEAD_INIT(lockname.held_list), \
12   - .name = #lockname , .magic = &lockname
  10 +#define __DEBUG_MUTEX_INITIALIZER(lockname) \
  11 + , .magic = &lockname
13 12  
14   -#define mutex_init(sem) __mutex_init(sem, __FUNCTION__)
  13 +#define mutex_init(sem) __mutex_init(sem, __FILE__":"#sem)
15 14  
16 15 extern void FASTCALL(mutex_destroy(struct mutex *lock));
17   -
18   -extern void mutex_debug_show_all_locks(void);
19   -extern void mutex_debug_show_held_locks(struct task_struct *filter);
20   -extern void mutex_debug_check_no_locks_held(struct task_struct *task);
21   -extern void mutex_debug_check_no_locks_freed(const void *from, unsigned long len);
22 16  
23 17 #endif
include/linux/mutex.h
... ... @@ -50,8 +50,6 @@
50 50 struct list_head wait_list;
51 51 #ifdef CONFIG_DEBUG_MUTEXES
52 52 struct thread_info *owner;
53   - struct list_head held_list;
54   - unsigned long acquire_ip;
55 53 const char *name;
56 54 void *magic;
57 55 #endif
... ... @@ -76,10 +74,6 @@
76 74 # define __DEBUG_MUTEX_INITIALIZER(lockname)
77 75 # define mutex_init(mutex) __mutex_init(mutex, NULL)
78 76 # define mutex_destroy(mutex) do { } while (0)
79   -# define mutex_debug_show_all_locks() do { } while (0)
80   -# define mutex_debug_show_held_locks(p) do { } while (0)
81   -# define mutex_debug_check_no_locks_held(task) do { } while (0)
82   -# define mutex_debug_check_no_locks_freed(from, len) do { } while (0)
83 77 #endif
84 78  
85 79 #define __MUTEX_INITIALIZER(lockname) \
include/linux/rtmutex.h
... ... @@ -29,8 +29,6 @@
29 29 struct task_struct *owner;
30 30 #ifdef CONFIG_DEBUG_RT_MUTEXES
31 31 int save_state;
32   - struct list_head held_list_entry;
33   - unsigned long acquire_ip;
34 32 const char *name, *file;
35 33 int line;
36 34 void *magic;
... ... @@ -97,14 +95,6 @@
97 95 extern int rt_mutex_trylock(struct rt_mutex *lock);
98 96  
99 97 extern void rt_mutex_unlock(struct rt_mutex *lock);
100   -
101   -#ifdef CONFIG_DEBUG_RT_MUTEXES
102   -# define INIT_RT_MUTEX_DEBUG(tsk) \
103   - .held_list_head = LIST_HEAD_INIT(tsk.held_list_head), \
104   - .held_list_lock = SPIN_LOCK_UNLOCKED
105   -#else
106   -# define INIT_RT_MUTEX_DEBUG(tsk)
107   -#endif
108 98  
109 99 #ifdef CONFIG_RT_MUTEXES
110 100 # define INIT_RT_MUTEXES(tsk) \
include/linux/sched.h
... ... @@ -865,10 +865,6 @@
865 865 struct plist_head pi_waiters;
866 866 /* Deadlock detection and priority inheritance handling */
867 867 struct rt_mutex_waiter *pi_blocked_on;
868   -# ifdef CONFIG_DEBUG_RT_MUTEXES
869   - spinlock_t held_list_lock;
870   - struct list_head held_list_head;
871   -# endif
872 868 #endif
873 869  
874 870 #ifdef CONFIG_DEBUG_MUTEXES
... ... @@ -47,6 +47,7 @@
47 47 #include <linux/key.h>
48 48 #include <linux/unwind.h>
49 49 #include <linux/buffer_head.h>
  50 +#include <linux/debug_locks.h>
50 51  
51 52 #include <asm/io.h>
52 53 #include <asm/bugs.h>
... ... @@ -511,6 +512,13 @@
511 512 console_init();
512 513 if (panic_later)
513 514 panic(panic_later, panic_param);
  515 + /*
  516 + * Need to run this when irqs are enabled, because it wants
  517 + * to self-test [hard/soft]-irqs on/off lock inversion bugs
  518 + * too:
  519 + */
  520 + locking_selftest();
  521 +
514 522 #ifdef CONFIG_BLK_DEV_INITRD
515 523 if (initrd_start && !initrd_below_start_ok &&
516 524 initrd_start < min_low_pfn << PAGE_SHIFT) {
... ... @@ -933,10 +933,9 @@
933 933 if (unlikely(current->pi_state_cache))
934 934 kfree(current->pi_state_cache);
935 935 /*
936   - * If DEBUG_MUTEXES is on, make sure we are holding no locks:
  936 + * Make sure we are holding no locks:
937 937 */
938   - mutex_debug_check_no_locks_held(tsk);
939   - rt_mutex_debug_check_no_locks_held(tsk);
  938 + debug_check_no_locks_held(tsk);
940 939  
941 940 if (tsk->io_context)
942 941 exit_io_context();
... ... @@ -919,10 +919,6 @@
919 919 spin_lock_init(&p->pi_lock);
920 920 plist_head_init(&p->pi_waiters, &p->pi_lock);
921 921 p->pi_blocked_on = NULL;
922   -# ifdef CONFIG_DEBUG_RT_MUTEXES
923   - spin_lock_init(&p->held_list_lock);
924   - INIT_LIST_HEAD(&p->held_list_head);
925   -# endif
926 922 #endif
927 923 }
928 924  
kernel/mutex-debug.c
... ... @@ -20,52 +20,19 @@
20 20 #include <linux/spinlock.h>
21 21 #include <linux/kallsyms.h>
22 22 #include <linux/interrupt.h>
  23 +#include <linux/debug_locks.h>
23 24  
24 25 #include "mutex-debug.h"
25 26  
26 27 /*
27   - * We need a global lock when we walk through the multi-process
28   - * lock tree. Only used in the deadlock-debugging case.
29   - */
30   -DEFINE_SPINLOCK(debug_mutex_lock);
31   -
32   -/*
33   - * All locks held by all tasks, in a single global list:
34   - */
35   -LIST_HEAD(debug_mutex_held_locks);
36   -
37   -/*
38   - * In the debug case we carry the caller's instruction pointer into
39   - * other functions, but we dont want the function argument overhead
40   - * in the nondebug case - hence these macros:
41   - */
42   -#define __IP_DECL__ , unsigned long ip
43   -#define __IP__ , ip
44   -#define __RET_IP__ , (unsigned long)__builtin_return_address(0)
45   -
46   -/*
47   - * "mutex debugging enabled" flag. We turn it off when we detect
48   - * the first problem because we dont want to recurse back
49   - * into the tracing code when doing error printk or
50   - * executing a BUG():
51   - */
52   -int debug_mutex_on = 1;
53   -
54   -/*
55 28 * Must be called with lock->wait_lock held.
56 29 */
57   -void debug_mutex_set_owner(struct mutex *lock,
58   - struct thread_info *new_owner __IP_DECL__)
  30 +void debug_mutex_set_owner(struct mutex *lock, struct thread_info *new_owner)
59 31 {
60 32 lock->owner = new_owner;
61   - DEBUG_LOCKS_WARN_ON(!list_empty(&lock->held_list));
62   - if (debug_mutex_on) {
63   - list_add_tail(&lock->held_list, &debug_mutex_held_locks);
64   - lock->acquire_ip = ip;
65   - }
66 33 }
67 34  
68   -void debug_mutex_init_waiter(struct mutex_waiter *waiter)
  35 +void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
69 36 {
70 37 memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter));
71 38 waiter->magic = waiter;
72 39  
... ... @@ -87,9 +54,10 @@
87 54 }
88 55  
89 56 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90   - struct thread_info *ti __IP_DECL__)
  57 + struct thread_info *ti)
91 58 {
92 59 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
  60 +
93 61 /* Mark the current thread as blocked on the lock: */
94 62 ti->task->blocked_on = waiter;
95 63 waiter->lock = lock;
96 64  
... ... @@ -109,13 +77,10 @@
109 77  
110 78 void debug_mutex_unlock(struct mutex *lock)
111 79 {
  80 + DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
112 81 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
113 82 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
114 83 DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
115   - if (debug_mutex_on) {
116   - DEBUG_LOCKS_WARN_ON(list_empty(&lock->held_list));
117   - list_del_init(&lock->held_list);
118   - }
119 84 }
120 85  
121 86 void debug_mutex_init(struct mutex *lock, const char *name)
122 87  
... ... @@ -123,10 +88,8 @@
123 88 /*
124 89 * Make sure we are not reinitializing a held lock:
125 90 */
126   - mutex_debug_check_no_locks_freed((void *)lock, sizeof(*lock));
  91 + debug_check_no_locks_freed((void *)lock, sizeof(*lock));
127 92 lock->owner = NULL;
128   - INIT_LIST_HEAD(&lock->held_list);
129   - lock->name = name;
130 93 lock->magic = lock;
131 94 }
132 95  
kernel/mutex-debug.h
... ... @@ -10,102 +10,45 @@
10 10 * More details are in kernel/mutex-debug.c.
11 11 */
12 12  
13   -extern spinlock_t debug_mutex_lock;
14   -extern struct list_head debug_mutex_held_locks;
15   -extern int debug_mutex_on;
16   -
17 13 /*
18   - * In the debug case we carry the caller's instruction pointer into
19   - * other functions, but we dont want the function argument overhead
20   - * in the nondebug case - hence these macros:
21   - */
22   -#define __IP_DECL__ , unsigned long ip
23   -#define __IP__ , ip
24   -#define __RET_IP__ , (unsigned long)__builtin_return_address(0)
25   -
26   -/*
27 14 * This must be called with lock->wait_lock held.
28 15 */
29   -extern void debug_mutex_set_owner(struct mutex *lock,
30   - struct thread_info *new_owner __IP_DECL__);
  16 +extern void
  17 +debug_mutex_set_owner(struct mutex *lock, struct thread_info *new_owner);
31 18  
32 19 static inline void debug_mutex_clear_owner(struct mutex *lock)
33 20 {
34 21 lock->owner = NULL;
35 22 }
36 23  
37   -extern void debug_mutex_init_waiter(struct mutex_waiter *waiter);
  24 +extern void debug_mutex_lock_common(struct mutex *lock,
  25 + struct mutex_waiter *waiter);
38 26 extern void debug_mutex_wake_waiter(struct mutex *lock,
39 27 struct mutex_waiter *waiter);
40 28 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
41 29 extern void debug_mutex_add_waiter(struct mutex *lock,
42 30 struct mutex_waiter *waiter,
43   - struct thread_info *ti __IP_DECL__);
  31 + struct thread_info *ti);
44 32 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
45 33 struct thread_info *ti);
46 34 extern void debug_mutex_unlock(struct mutex *lock);
47   -extern void debug_mutex_init(struct mutex *lock, const char *name);
  35 +extern void debug_mutex_init(struct mutex *lock, const char *name,
  36 + struct lock_class_key *key);
48 37  
49   -#define debug_spin_lock_save(lock, flags) \
50   - do { \
51   - local_irq_save(flags); \
52   - if (debug_mutex_on) \
53   - spin_lock(lock); \
54   - } while (0)
55   -
56   -#define debug_spin_unlock_restore(lock, flags) \
57   - do { \
58   - if (debug_mutex_on) \
59   - spin_unlock(lock); \
60   - local_irq_restore(flags); \
61   - preempt_check_resched(); \
62   - } while (0)
63   -
64 38 #define spin_lock_mutex(lock, flags) \
65 39 do { \
66 40 struct mutex *l = container_of(lock, struct mutex, wait_lock); \
67 41 \
68 42 DEBUG_LOCKS_WARN_ON(in_interrupt()); \
69   - debug_spin_lock_save(&debug_mutex_lock, flags); \
70   - spin_lock(lock); \
  43 + local_irq_save(flags); \
  44 + __raw_spin_lock(&(lock)->raw_lock); \
71 45 DEBUG_LOCKS_WARN_ON(l->magic != l); \
72 46 } while (0)
73 47  
74 48 #define spin_unlock_mutex(lock, flags) \
75 49 do { \
76   - spin_unlock(lock); \
77   - debug_spin_unlock_restore(&debug_mutex_lock, flags); \
  50 + __raw_spin_unlock(&(lock)->raw_lock); \
  51 + local_irq_restore(flags); \
  52 + preempt_check_resched(); \
78 53 } while (0)
79   -
80   -#define DEBUG_OFF() \
81   -do { \
82   - if (debug_mutex_on) { \
83   - debug_mutex_on = 0; \
84   - console_verbose(); \
85   - if (spin_is_locked(&debug_mutex_lock)) \
86   - spin_unlock(&debug_mutex_lock); \
87   - } \
88   -} while (0)
89   -
90   -#define DEBUG_BUG() \
91   -do { \
92   - if (debug_mutex_on) { \
93   - DEBUG_OFF(); \
94   - BUG(); \
95   - } \
96   -} while (0)
97   -
98   -#define DEBUG_LOCKS_WARN_ON(c) \
99   -do { \
100   - if (unlikely(c && debug_mutex_on)) { \
101   - DEBUG_OFF(); \
102   - WARN_ON(1); \
103   - } \
104   -} while (0)
105   -
106   -#ifdef CONFIG_SMP
107   -# define SMP_DEBUG_LOCKS_WARN_ON(c) DEBUG_LOCKS_WARN_ON(c)
108   -#else
109   -# define SMP_DEBUG_LOCKS_WARN_ON(c) do { } while (0)
110   -#endif
... ... @@ -17,6 +17,7 @@
17 17 #include <linux/module.h>
18 18 #include <linux/spinlock.h>
19 19 #include <linux/interrupt.h>
  20 +#include <linux/debug_locks.h>
20 21  
21 22 /*
22 23 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
... ... @@ -38,7 +39,7 @@
38 39 *
39 40 * It is not allowed to initialize an already locked mutex.
40 41 */
41   -void fastcall __mutex_init(struct mutex *lock, const char *name)
  42 +__always_inline void fastcall __mutex_init(struct mutex *lock, const char *name)
42 43 {
43 44 atomic_set(&lock->count, 1);
44 45 spin_lock_init(&lock->wait_lock);
... ... @@ -56,7 +57,7 @@
56 57 * branch is predicted by the CPU as default-untaken.
57 58 */
58 59 static void fastcall noinline __sched
59   -__mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__);
  60 +__mutex_lock_slowpath(atomic_t *lock_count);
60 61  
61 62 /***
62 63 * mutex_lock - acquire the mutex
... ... @@ -79,7 +80,7 @@
79 80 *
80 81 * This function is similar to (but not equivalent to) down().
81 82 */
82   -void fastcall __sched mutex_lock(struct mutex *lock)
  83 +void inline fastcall __sched mutex_lock(struct mutex *lock)
83 84 {
84 85 might_sleep();
85 86 /*
... ... @@ -92,7 +93,7 @@
92 93 EXPORT_SYMBOL(mutex_lock);
93 94  
94 95 static void fastcall noinline __sched
95   -__mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__);
  96 +__mutex_unlock_slowpath(atomic_t *lock_count);
96 97  
97 98 /***
98 99 * mutex_unlock - release the mutex
99 100  
100 101  
... ... @@ -120,18 +121,17 @@
120 121 * Lock a mutex (possibly interruptible), slowpath:
121 122 */
122 123 static inline int __sched
123   -__mutex_lock_common(struct mutex *lock, long state __IP_DECL__)
  124 +__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
124 125 {
125 126 struct task_struct *task = current;
126 127 struct mutex_waiter waiter;
127 128 unsigned int old_val;
128 129 unsigned long flags;
129 130  
130   - debug_mutex_init_waiter(&waiter);
131   -
132 131 spin_lock_mutex(&lock->wait_lock, flags);
133 132  
134   - debug_mutex_add_waiter(lock, &waiter, task->thread_info, ip);
  133 + debug_mutex_lock_common(lock, &waiter);
  134 + debug_mutex_add_waiter(lock, &waiter, task->thread_info);
135 135  
136 136 /* add waiting tasks to the end of the waitqueue (FIFO): */
137 137 list_add_tail(&waiter.list, &lock->wait_list);
... ... @@ -173,7 +173,7 @@
173 173  
174 174 /* got the lock - rejoice! */
175 175 mutex_remove_waiter(lock, &waiter, task->thread_info);
176   - debug_mutex_set_owner(lock, task->thread_info __IP__);
  176 + debug_mutex_set_owner(lock, task->thread_info);
177 177  
178 178 /* set it to 0 if there are no waiters left: */
179 179 if (likely(list_empty(&lock->wait_list)))
180 180  
181 181  
182 182  
183 183  
184 184  
... ... @@ -183,32 +183,28 @@
183 183  
184 184 debug_mutex_free_waiter(&waiter);
185 185  
186   - DEBUG_LOCKS_WARN_ON(list_empty(&lock->held_list));
187   - DEBUG_LOCKS_WARN_ON(lock->owner != task->thread_info);
188   -
189 186 return 0;
190 187 }
191 188  
192 189 static void fastcall noinline __sched
193   -__mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__)
  190 +__mutex_lock_slowpath(atomic_t *lock_count)
194 191 {
195 192 struct mutex *lock = container_of(lock_count, struct mutex, count);
196 193  
197   - __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE __IP__);
  194 + __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0);
198 195 }
199 196  
200 197 /*
201 198 * Release the lock, slowpath:
202 199 */
203   -static fastcall noinline void
204   -__mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__)
  200 +static fastcall inline void
  201 +__mutex_unlock_common_slowpath(atomic_t *lock_count)
205 202 {
206 203 struct mutex *lock = container_of(lock_count, struct mutex, count);
207 204 unsigned long flags;
208 205  
209   - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
210   -
211 206 spin_lock_mutex(&lock->wait_lock, flags);
  207 + debug_mutex_unlock(lock);
212 208  
213 209 /*
214 210 * some architectures leave the lock unlocked in the fastpath failure
... ... @@ -218,8 +214,6 @@
218 214 if (__mutex_slowpath_needs_to_unlock())
219 215 atomic_set(&lock->count, 1);
220 216  
221   - debug_mutex_unlock(lock);
222   -
223 217 if (!list_empty(&lock->wait_list)) {
224 218 /* get the first entry from the wait-list: */
225 219 struct mutex_waiter *waiter =
226 220  
... ... @@ -237,11 +231,20 @@
237 231 }
238 232  
239 233 /*
  234 + * Release the lock, slowpath:
  235 + */
  236 +static fastcall noinline void
  237 +__mutex_unlock_slowpath(atomic_t *lock_count)
  238 +{
  239 + __mutex_unlock_common_slowpath(lock_count);
  240 +}
  241 +
  242 +/*
240 243 * Here come the less common (and hence less performance-critical) APIs:
241 244 * mutex_lock_interruptible() and mutex_trylock().
242 245 */
243 246 static int fastcall noinline __sched
244   -__mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__);
  247 +__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
245 248  
246 249 /***
247 250 * mutex_lock_interruptible - acquire the mutex, interruptable
248 251  
... ... @@ -264,11 +267,11 @@
264 267 EXPORT_SYMBOL(mutex_lock_interruptible);
265 268  
266 269 static int fastcall noinline __sched
267   -__mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__)
  270 +__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
268 271 {
269 272 struct mutex *lock = container_of(lock_count, struct mutex, count);
270 273  
271   - return __mutex_lock_common(lock, TASK_INTERRUPTIBLE __IP__);
  274 + return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0);
272 275 }
273 276  
274 277 /*
... ... @@ -285,7 +288,8 @@
285 288  
286 289 prev = atomic_xchg(&lock->count, -1);
287 290 if (likely(prev == 1))
288   - debug_mutex_set_owner(lock, current_thread_info() __RET_IP__);
  291 + debug_mutex_set_owner(lock, current_thread_info());
  292 +
289 293 /* Set it back to 0 if there are no waiters: */
290 294 if (likely(list_empty(&lock->wait_list)))
291 295 atomic_set(&lock->count, 0);
... ... @@ -16,22 +16,16 @@
16 16 #define mutex_remove_waiter(lock, waiter, ti) \
17 17 __list_del((waiter)->list.prev, (waiter)->list.next)
18 18  
19   -#define DEBUG_LOCKS_WARN_ON(c) do { } while (0)
20 19 #define debug_mutex_set_owner(lock, new_owner) do { } while (0)
21 20 #define debug_mutex_clear_owner(lock) do { } while (0)
22   -#define debug_mutex_init_waiter(waiter) do { } while (0)
23 21 #define debug_mutex_wake_waiter(lock, waiter) do { } while (0)
24 22 #define debug_mutex_free_waiter(waiter) do { } while (0)
25   -#define debug_mutex_add_waiter(lock, waiter, ti, ip) do { } while (0)
  23 +#define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0)
26 24 #define debug_mutex_unlock(lock) do { } while (0)
27 25 #define debug_mutex_init(lock, name) do { } while (0)
28 26  
29   -/*
30   - * Return-address parameters/declarations. They are very useful for
31   - * debugging, but add overhead in the !DEBUG case - so we go the
32   - * trouble of using this not too elegant but zero-cost solution:
33   - */
34   -#define __IP_DECL__
35   -#define __IP__
36   -#define __RET_IP__
  27 +static inline void
  28 +debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
  29 +{
  30 +}
kernel/rtmutex-debug.c
... ... @@ -26,6 +26,7 @@
26 26 #include <linux/interrupt.h>
27 27 #include <linux/plist.h>
28 28 #include <linux/fs.h>
  29 +#include <linux/debug_locks.h>
29 30  
30 31 #include "rtmutex_common.h"
31 32  
... ... @@ -45,8 +46,6 @@
45 46 console_verbose(); \
46 47 if (spin_is_locked(&current->pi_lock)) \
47 48 spin_unlock(&current->pi_lock); \
48   - if (spin_is_locked(&current->held_list_lock)) \
49   - spin_unlock(&current->held_list_lock); \
50 49 } \
51 50 } while (0)
52 51  
... ... @@ -105,14 +104,6 @@
105 104 printk("<none>");
106 105 }
107 106  
108   -static void printk_task_short(task_t *p)
109   -{
110   - if (p)
111   - printk("%s/%d [%p, %3d]", p->comm, p->pid, p, p->prio);
112   - else
113   - printk("<none>");
114   -}
115   -
116 107 static void printk_lock(struct rt_mutex *lock, int print_owner)
117 108 {
118 109 if (lock->name)
119 110  
... ... @@ -128,224 +119,8 @@
128 119 printk_task(rt_mutex_owner(lock));
129 120 printk("\n");
130 121 }
131   - if (rt_mutex_owner(lock)) {
132   - printk("... acquired at: ");
133   - print_symbol("%s\n", lock->acquire_ip);
134   - }
135 122 }
136 123  
137   -static void printk_waiter(struct rt_mutex_waiter *w)
138   -{
139   - printk("-------------------------\n");
140   - printk("| waiter struct %p:\n", w);
141   - printk("| w->list_entry: [DP:%p/%p|SP:%p/%p|PRI:%d]\n",
142   - w->list_entry.plist.prio_list.prev, w->list_entry.plist.prio_list.next,
143   - w->list_entry.plist.node_list.prev, w->list_entry.plist.node_list.next,
144   - w->list_entry.prio);
145   - printk("| w->pi_list_entry: [DP:%p/%p|SP:%p/%p|PRI:%d]\n",
146   - w->pi_list_entry.plist.prio_list.prev, w->pi_list_entry.plist.prio_list.next,
147   - w->pi_list_entry.plist.node_list.prev, w->pi_list_entry.plist.node_list.next,
148   - w->pi_list_entry.prio);
149   - printk("\n| lock:\n");
150   - printk_lock(w->lock, 1);
151   - printk("| w->ti->task:\n");
152   - printk_task(w->task);
153   - printk("| blocked at: ");
154   - print_symbol("%s\n", w->ip);
155   - printk("-------------------------\n");
156   -}
157   -
158   -static void show_task_locks(task_t *p)
159   -{
160   - switch (p->state) {
161   - case TASK_RUNNING: printk("R"); break;
162   - case TASK_INTERRUPTIBLE: printk("S"); break;
163   - case TASK_UNINTERRUPTIBLE: printk("D"); break;
164   - case TASK_STOPPED: printk("T"); break;
165   - case EXIT_ZOMBIE: printk("Z"); break;
166   - case EXIT_DEAD: printk("X"); break;
167   - default: printk("?"); break;
168   - }
169   - printk_task(p);
170   - if (p->pi_blocked_on) {
171   - struct rt_mutex *lock = p->pi_blocked_on->lock;
172   -
173   - printk(" blocked on:");
174   - printk_lock(lock, 1);
175   - } else
176   - printk(" (not blocked)\n");
177   -}
178   -
179   -void rt_mutex_show_held_locks(task_t *task, int verbose)
180   -{
181   - struct list_head *curr, *cursor = NULL;
182   - struct rt_mutex *lock;
183   - task_t *t;
184   - unsigned long flags;
185   - int count = 0;
186   -
187   - if (!rt_trace_on)
188   - return;
189   -
190   - if (verbose) {
191   - printk("------------------------------\n");
192   - printk("| showing all locks held by: | (");
193   - printk_task_short(task);
194   - printk("):\n");
195   - printk("------------------------------\n");
196   - }
197   -
198   -next:
199   - spin_lock_irqsave(&task->held_list_lock, flags);
200   - list_for_each(curr, &task->held_list_head) {
201   - if (cursor && curr != cursor)
202   - continue;
203   - lock = list_entry(curr, struct rt_mutex, held_list_entry);
204   - t = rt_mutex_owner(lock);
205   - WARN_ON(t != task);
206   - count++;
207   - cursor = curr->next;
208   - spin_unlock_irqrestore(&task->held_list_lock, flags);
209   -
210   - printk("\n#%03d: ", count);
211   - printk_lock(lock, 0);
212   - goto next;
213   - }
214   - spin_unlock_irqrestore(&task->held_list_lock, flags);
215   -
216   - printk("\n");
217   -}
218   -
219   -void rt_mutex_show_all_locks(void)
220   -{
221   - task_t *g, *p;
222   - int count = 10;
223   - int unlock = 1;
224   -
225   - printk("\n");
226   - printk("----------------------\n");
227   - printk("| showing all tasks: |\n");
228   - printk("----------------------\n");
229   -
230   - /*
231   - * Here we try to get the tasklist_lock as hard as possible,
232   - * if not successful after 2 seconds we ignore it (but keep
233   - * trying). This is to enable a debug printout even if a
234   - * tasklist_lock-holding task deadlocks or crashes.
235   - */
236   -retry:
237   - if (!read_trylock(&tasklist_lock)) {
238   - if (count == 10)
239   - printk("hm, tasklist_lock locked, retrying... ");
240   - if (count) {
241   - count--;
242   - printk(" #%d", 10-count);
243   - mdelay(200);
244   - goto retry;
245   - }
246   - printk(" ignoring it.\n");
247   - unlock = 0;
248   - }
249   - if (count != 10)
250   - printk(" locked it.\n");
251   -
252   - do_each_thread(g, p) {
253   - show_task_locks(p);
254   - if (!unlock)
255   - if (read_trylock(&tasklist_lock))
256   - unlock = 1;
257   - } while_each_thread(g, p);
258   -
259   - printk("\n");
260   -
261   - printk("-----------------------------------------\n");
262   - printk("| showing all locks held in the system: |\n");
263   - printk("-----------------------------------------\n");
264   -
265   - do_each_thread(g, p) {
266   - rt_mutex_show_held_locks(p, 0);
267   - if (!unlock)
268   - if (read_trylock(&tasklist_lock))
269   - unlock = 1;
270   - } while_each_thread(g, p);
271   -
272   -
273   - printk("=============================================\n\n");
274   -
275   - if (unlock)
276   - read_unlock(&tasklist_lock);
277   -}
278   -
279   -void rt_mutex_debug_check_no_locks_held(task_t *task)
280   -{
281   - struct rt_mutex_waiter *w;
282   - struct list_head *curr;
283   - struct rt_mutex *lock;
284   -
285   - if (!rt_trace_on)
286   - return;
287   - if (!rt_prio(task->normal_prio) && rt_prio(task->prio)) {
288   - printk("BUG: PI priority boost leaked!\n");
289   - printk_task(task);
290   - printk("\n");
291   - }
292   - if (list_empty(&task->held_list_head))
293   - return;
294   -
295   - spin_lock(&task->pi_lock);
296   - plist_for_each_entry(w, &task->pi_waiters, pi_list_entry) {
297   - TRACE_OFF();
298   -
299   - printk("hm, PI interest held at exit time? Task:\n");
300   - printk_task(task);
301   - printk_waiter(w);
302   - return;
303   - }
304   - spin_unlock(&task->pi_lock);
305   -
306   - list_for_each(curr, &task->held_list_head) {
307   - lock = list_entry(curr, struct rt_mutex, held_list_entry);
308   -
309   - printk("BUG: %s/%d, lock held at task exit time!\n",
310   - task->comm, task->pid);
311   - printk_lock(lock, 1);
312   - if (rt_mutex_owner(lock) != task)
313   - printk("exiting task is not even the owner??\n");
314   - }
315   -}
316   -
317   -int rt_mutex_debug_check_no_locks_freed(const void *from, unsigned long len)
318   -{
319   - const void *to = from + len;
320   - struct list_head *curr;
321   - struct rt_mutex *lock;
322   - unsigned long flags;
323   - void *lock_addr;
324   -
325   - if (!rt_trace_on)
326   - return 0;
327   -
328   - spin_lock_irqsave(&current->held_list_lock, flags);
329   - list_for_each(curr, &current->held_list_head) {
330   - lock = list_entry(curr, struct rt_mutex, held_list_entry);
331   - lock_addr = lock;
332   - if (lock_addr < from || lock_addr >= to)
333   - continue;
334   - TRACE_OFF();
335   -
336   - printk("BUG: %s/%d, active lock [%p(%p-%p)] freed!\n",
337   - current->comm, current->pid, lock, from, to);
338   - dump_stack();
339   - printk_lock(lock, 1);
340   - if (rt_mutex_owner(lock) != current)
341   - printk("freeing task is not even the owner??\n");
342   - return 1;
343   - }
344   - spin_unlock_irqrestore(&current->held_list_lock, flags);
345   -
346   - return 0;
347   -}
348   -
349 124 void rt_mutex_debug_task_free(struct task_struct *task)
350 125 {
351 126 WARN_ON(!plist_head_empty(&task->pi_waiters));
352 127  
353 128  
354 129  
355 130  
356 131  
357 132  
358 133  
359 134  
... ... @@ -395,85 +170,41 @@
395 170 current->comm, current->pid);
396 171 printk_lock(waiter->lock, 1);
397 172  
398   - printk("... trying at: ");
399   - print_symbol("%s\n", waiter->ip);
400   -
401 173 printk("\n2) %s/%d is blocked on this lock:\n", task->comm, task->pid);
402 174 printk_lock(waiter->deadlock_lock, 1);
403 175  
404   - rt_mutex_show_held_locks(current, 1);
405   - rt_mutex_show_held_locks(task, 1);
  176 + debug_show_held_locks(current);
  177 + debug_show_held_locks(task);
406 178  
407 179 printk("\n%s/%d's [blocked] stackdump:\n\n", task->comm, task->pid);
408 180 show_stack(task, NULL);
409 181 printk("\n%s/%d's [current] stackdump:\n\n",
410 182 current->comm, current->pid);
411 183 dump_stack();
412   - rt_mutex_show_all_locks();
  184 + debug_show_all_locks();
  185 +
413 186 printk("[ turning off deadlock detection."
414 187 "Please report this trace. ]\n\n");
415 188 local_irq_disable();
416 189 }
417 190  
418   -void debug_rt_mutex_lock(struct rt_mutex *lock __IP_DECL__)
  191 +void debug_rt_mutex_lock(struct rt_mutex *lock)
419 192 {
420   - unsigned long flags;
421   -
422   - if (rt_trace_on) {
423   - TRACE_WARN_ON_LOCKED(!list_empty(&lock->held_list_entry));
424   -
425   - spin_lock_irqsave(&current->held_list_lock, flags);
426   - list_add_tail(&lock->held_list_entry, &current->held_list_head);
427   - spin_unlock_irqrestore(&current->held_list_lock, flags);
428   -
429   - lock->acquire_ip = ip;
430   - }
431 193 }
432 194  
433 195 void debug_rt_mutex_unlock(struct rt_mutex *lock)
434 196 {
435   - unsigned long flags;
436   -
437   - if (rt_trace_on) {
438   - TRACE_WARN_ON_LOCKED(rt_mutex_owner(lock) != current);
439   - TRACE_WARN_ON_LOCKED(list_empty(&lock->held_list_entry));
440   -
441   - spin_lock_irqsave(&current->held_list_lock, flags);
442   - list_del_init(&lock->held_list_entry);
443   - spin_unlock_irqrestore(&current->held_list_lock, flags);
444   - }
  197 + TRACE_WARN_ON_LOCKED(rt_mutex_owner(lock) != current);
445 198 }
446 199  
447   -void debug_rt_mutex_proxy_lock(struct rt_mutex *lock,
448   - struct task_struct *powner __IP_DECL__)
  200 +void
  201 +debug_rt_mutex_proxy_lock(struct rt_mutex *lock, struct task_struct *powner)
449 202 {
450   - unsigned long flags;
451   -
452   - if (rt_trace_on) {
453   - TRACE_WARN_ON_LOCKED(!list_empty(&lock->held_list_entry));
454   -
455   - spin_lock_irqsave(&powner->held_list_lock, flags);
456   - list_add_tail(&lock->held_list_entry, &powner->held_list_head);
457   - spin_unlock_irqrestore(&powner->held_list_lock, flags);
458   -
459   - lock->acquire_ip = ip;
460   - }
461 203 }
462 204  
463 205 void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock)
464 206 {
465   - unsigned long flags;
466   -
467   - if (rt_trace_on) {
468   - struct task_struct *owner = rt_mutex_owner(lock);
469   -
470   - TRACE_WARN_ON_LOCKED(!owner);
471   - TRACE_WARN_ON_LOCKED(list_empty(&lock->held_list_entry));
472   -
473   - spin_lock_irqsave(&owner->held_list_lock, flags);
474   - list_del_init(&lock->held_list_entry);
475   - spin_unlock_irqrestore(&owner->held_list_lock, flags);
476   - }
  207 + TRACE_WARN_ON_LOCKED(!rt_mutex_owner(lock));
477 208 }
478 209  
479 210 void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
... ... @@ -493,14 +224,11 @@
493 224  
494 225 void debug_rt_mutex_init(struct rt_mutex *lock, const char *name)
495 226 {
496   - void *addr = lock;
497   -
498   - if (rt_trace_on) {
499   - rt_mutex_debug_check_no_locks_freed(addr,
500   - sizeof(struct rt_mutex));
501   - INIT_LIST_HEAD(&lock->held_list_entry);
502   - lock->name = name;
503   - }
  227 + /*
  228 + * Make sure we are not reinitializing a held lock:
  229 + */
  230 + debug_check_no_locks_freed((void *)lock, sizeof(*lock));
  231 + lock->name = name;
504 232 }
505 233  
506 234 void rt_mutex_deadlock_account_lock(struct rt_mutex *lock, task_t *task)
kernel/rtmutex-debug.h
... ... @@ -9,20 +9,16 @@
9 9 * This file contains macros used solely by rtmutex.c. Debug version.
10 10 */
11 11  
12   -#define __IP_DECL__ , unsigned long ip
13   -#define __IP__ , ip
14   -#define __RET_IP__ , (unsigned long)__builtin_return_address(0)
15   -
16 12 extern void
17 13 rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task);
18 14 extern void rt_mutex_deadlock_account_unlock(struct task_struct *task);
19 15 extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
20 16 extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter);
21 17 extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name);
22   -extern void debug_rt_mutex_lock(struct rt_mutex *lock __IP_DECL__);
  18 +extern void debug_rt_mutex_lock(struct rt_mutex *lock);
23 19 extern void debug_rt_mutex_unlock(struct rt_mutex *lock);
24 20 extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock,
25   - struct task_struct *powner __IP_DECL__);
  21 + struct task_struct *powner);
26 22 extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock);
27 23 extern void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *waiter,
28 24 struct rt_mutex *lock);
... ... @@ -161,8 +161,7 @@
161 161 int deadlock_detect,
162 162 struct rt_mutex *orig_lock,
163 163 struct rt_mutex_waiter *orig_waiter,
164   - struct task_struct *top_task
165   - __IP_DECL__)
  164 + struct task_struct *top_task)
166 165 {
167 166 struct rt_mutex *lock;
168 167 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
... ... @@ -357,7 +356,7 @@
357 356 *
358 357 * Must be called with lock->wait_lock held.
359 358 */
360   -static int try_to_take_rt_mutex(struct rt_mutex *lock __IP_DECL__)
  359 +static int try_to_take_rt_mutex(struct rt_mutex *lock)
361 360 {
362 361 /*
363 362 * We have to be careful here if the atomic speedups are
... ... @@ -384,7 +383,7 @@
384 383 return 0;
385 384  
386 385 /* We got the lock. */
387   - debug_rt_mutex_lock(lock __IP__);
  386 + debug_rt_mutex_lock(lock);
388 387  
389 388 rt_mutex_set_owner(lock, current, 0);
390 389  
... ... @@ -402,8 +401,7 @@
402 401 */
403 402 static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
404 403 struct rt_mutex_waiter *waiter,
405   - int detect_deadlock
406   - __IP_DECL__)
  404 + int detect_deadlock)
407 405 {
408 406 struct rt_mutex_waiter *top_waiter = waiter;
409 407 task_t *owner = rt_mutex_owner(lock);
... ... @@ -454,7 +452,7 @@
454 452 spin_unlock(&lock->wait_lock);
455 453  
456 454 res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
457   - current __IP__);
  455 + current);
458 456  
459 457 spin_lock(&lock->wait_lock);
460 458  
... ... @@ -526,7 +524,7 @@
526 524 * Must be called with lock->wait_lock held
527 525 */
528 526 static void remove_waiter(struct rt_mutex *lock,
529   - struct rt_mutex_waiter *waiter __IP_DECL__)
  527 + struct rt_mutex_waiter *waiter)
530 528 {
531 529 int first = (waiter == rt_mutex_top_waiter(lock));
532 530 int boost = 0;
... ... @@ -568,7 +566,7 @@
568 566  
569 567 spin_unlock(&lock->wait_lock);
570 568  
571   - rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current __IP__);
  569 + rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
572 570  
573 571 spin_lock(&lock->wait_lock);
574 572 }
... ... @@ -595,7 +593,7 @@
595 593 get_task_struct(task);
596 594 spin_unlock_irqrestore(&task->pi_lock, flags);
597 595  
598   - rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task __RET_IP__);
  596 + rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
599 597 }
600 598  
601 599 /*
... ... @@ -604,7 +602,7 @@
604 602 static int __sched
605 603 rt_mutex_slowlock(struct rt_mutex *lock, int state,
606 604 struct hrtimer_sleeper *timeout,
607   - int detect_deadlock __IP_DECL__)
  605 + int detect_deadlock)
608 606 {
609 607 struct rt_mutex_waiter waiter;
610 608 int ret = 0;
... ... @@ -615,7 +613,7 @@
615 613 spin_lock(&lock->wait_lock);
616 614  
617 615 /* Try to acquire the lock again: */
618   - if (try_to_take_rt_mutex(lock __IP__)) {
  616 + if (try_to_take_rt_mutex(lock)) {
619 617 spin_unlock(&lock->wait_lock);
620 618 return 0;
621 619 }
... ... @@ -629,7 +627,7 @@
629 627  
630 628 for (;;) {
631 629 /* Try to acquire the lock: */
632   - if (try_to_take_rt_mutex(lock __IP__))
  630 + if (try_to_take_rt_mutex(lock))
633 631 break;
634 632  
635 633 /*
... ... @@ -653,7 +651,7 @@
653 651 */
654 652 if (!waiter.task) {
655 653 ret = task_blocks_on_rt_mutex(lock, &waiter,
656   - detect_deadlock __IP__);
  654 + detect_deadlock);
657 655 /*
658 656 * If we got woken up by the owner then start loop
659 657 * all over without going into schedule to try
... ... @@ -680,7 +678,7 @@
680 678 set_current_state(TASK_RUNNING);
681 679  
682 680 if (unlikely(waiter.task))
683   - remove_waiter(lock, &waiter __IP__);
  681 + remove_waiter(lock, &waiter);
684 682  
685 683 /*
686 684 * try_to_take_rt_mutex() sets the waiter bit
... ... @@ -711,7 +709,7 @@
711 709 * Slow path try-lock function:
712 710 */
713 711 static inline int
714   -rt_mutex_slowtrylock(struct rt_mutex *lock __IP_DECL__)
  712 +rt_mutex_slowtrylock(struct rt_mutex *lock)
715 713 {
716 714 int ret = 0;
717 715  
... ... @@ -719,7 +717,7 @@
719 717  
720 718 if (likely(rt_mutex_owner(lock) != current)) {
721 719  
722   - ret = try_to_take_rt_mutex(lock __IP__);
  720 + ret = try_to_take_rt_mutex(lock);
723 721 /*
724 722 * try_to_take_rt_mutex() sets the lock waiters
725 723 * bit unconditionally. Clean this up.
726 724  
... ... @@ -769,13 +767,13 @@
769 767 int detect_deadlock,
770 768 int (*slowfn)(struct rt_mutex *lock, int state,
771 769 struct hrtimer_sleeper *timeout,
772   - int detect_deadlock __IP_DECL__))
  770 + int detect_deadlock))
773 771 {
774 772 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
775 773 rt_mutex_deadlock_account_lock(lock, current);
776 774 return 0;
777 775 } else
778   - return slowfn(lock, state, NULL, detect_deadlock __RET_IP__);
  776 + return slowfn(lock, state, NULL, detect_deadlock);
779 777 }
780 778  
781 779 static inline int
782 780  
783 781  
784 782  
... ... @@ -783,24 +781,24 @@
783 781 struct hrtimer_sleeper *timeout, int detect_deadlock,
784 782 int (*slowfn)(struct rt_mutex *lock, int state,
785 783 struct hrtimer_sleeper *timeout,
786   - int detect_deadlock __IP_DECL__))
  784 + int detect_deadlock))
787 785 {
788 786 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
789 787 rt_mutex_deadlock_account_lock(lock, current);
790 788 return 0;
791 789 } else
792   - return slowfn(lock, state, timeout, detect_deadlock __RET_IP__);
  790 + return slowfn(lock, state, timeout, detect_deadlock);
793 791 }
794 792  
795 793 static inline int
796 794 rt_mutex_fasttrylock(struct rt_mutex *lock,
797   - int (*slowfn)(struct rt_mutex *lock __IP_DECL__))
  795 + int (*slowfn)(struct rt_mutex *lock))
798 796 {
799 797 if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
800 798 rt_mutex_deadlock_account_lock(lock, current);
801 799 return 1;
802 800 }
803   - return slowfn(lock __RET_IP__);
  801 + return slowfn(lock);
804 802 }
805 803  
806 804 static inline void
... ... @@ -948,7 +946,7 @@
948 946 struct task_struct *proxy_owner)
949 947 {
950 948 __rt_mutex_init(lock, NULL);
951   - debug_rt_mutex_proxy_lock(lock, proxy_owner __RET_IP__);
  949 + debug_rt_mutex_proxy_lock(lock, proxy_owner);
952 950 rt_mutex_set_owner(lock, proxy_owner, 0);
953 951 rt_mutex_deadlock_account_lock(lock, proxy_owner);
954 952 }
... ... @@ -10,9 +10,6 @@
10 10 * Non-debug version.
11 11 */
12 12  
13   -#define __IP_DECL__
14   -#define __IP__
15   -#define __RET_IP__
16 13 #define rt_mutex_deadlock_check(l) (0)
17 14 #define rt_mutex_deadlock_account_lock(m, t) do { } while (0)
18 15 #define rt_mutex_deadlock_account_unlock(l) do { } while (0)
... ... @@ -30,6 +30,7 @@
30 30 #include <linux/capability.h>
31 31 #include <linux/completion.h>
32 32 #include <linux/kernel_stat.h>
  33 +#include <linux/debug_locks.h>
33 34 #include <linux/security.h>
34 35 #include <linux/notifier.h>
35 36 #include <linux/profile.h>
36 37  
... ... @@ -3142,12 +3143,13 @@
3142 3143 /*
3143 3144 * Underflow?
3144 3145 */
3145   - BUG_ON((preempt_count() < 0));
  3146 + if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
  3147 + return;
3146 3148 preempt_count() += val;
3147 3149 /*
3148 3150 * Spinlock count overflowing soon?
3149 3151 */
3150   - BUG_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK-10);
  3152 + DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK-10);
3151 3153 }
3152 3154 EXPORT_SYMBOL(add_preempt_count);
3153 3155  
3154 3156  
... ... @@ -3156,11 +3158,15 @@
3156 3158 /*
3157 3159 * Underflow?
3158 3160 */
3159   - BUG_ON(val > preempt_count());
  3161 + if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
  3162 + return;
3160 3163 /*
3161 3164 * Is the spinlock portion underflowing?
3162 3165 */
3163   - BUG_ON((val < PREEMPT_MASK) && !(preempt_count() & PREEMPT_MASK));
  3166 + if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
  3167 + !(preempt_count() & PREEMPT_MASK)))
  3168 + return;
  3169 +
3164 3170 preempt_count() -= val;
3165 3171 }
3166 3172 EXPORT_SYMBOL(sub_preempt_count);
... ... @@ -4690,7 +4696,7 @@
4690 4696 } while_each_thread(g, p);
4691 4697  
4692 4698 read_unlock(&tasklist_lock);
4693   - mutex_debug_show_all_locks();
  4699 + debug_show_all_locks();
4694 4700 }
4695 4701  
4696 4702 /**
... ... @@ -11,7 +11,7 @@
11 11  
12 12 lib-y += kobject.o kref.o kobject_uevent.o klist.o
13 13  
14   -obj-y += sort.o parser.o halfmd4.o iomap_copy.o
  14 +obj-y += sort.o parser.o halfmd4.o iomap_copy.o debug_locks.o
15 15  
16 16 ifeq ($(CONFIG_DEBUG_KOBJECT),y)
17 17 CFLAGS_kobject.o += -DDEBUG
  1 +/*
  2 + * lib/debug_locks.c
  3 + *
  4 + * Generic place for common debugging facilities for various locks:
  5 + * spinlocks, rwlocks, mutexes and rwsems.
  6 + *
  7 + * Started by Ingo Molnar:
  8 + *
  9 + * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  10 + */
  11 +#include <linux/rwsem.h>
  12 +#include <linux/mutex.h>
  13 +#include <linux/module.h>
  14 +#include <linux/spinlock.h>
  15 +#include <linux/debug_locks.h>
  16 +
  17 +/*
  18 + * We want to turn all lock-debugging facilities on/off at once,
  19 + * via a global flag. The reason is that once a single bug has been
  20 + * detected and reported, there might be cascade of followup bugs
  21 + * that would just muddy the log. So we report the first one and
  22 + * shut up after that.
  23 + */
  24 +int debug_locks = 1;
  25 +
  26 +/*
  27 + * The locking-testsuite uses <debug_locks_silent> to get a
  28 + * 'silent failure': nothing is printed to the console when
  29 + * a locking bug is detected.
  30 + */
  31 +int debug_locks_silent;
  32 +
  33 +/*
  34 + * Generic 'turn off all lock debugging' function:
  35 + */
  36 +int debug_locks_off(void)
  37 +{
  38 + if (xchg(&debug_locks, 0)) {
  39 + if (!debug_locks_silent) {
  40 + console_verbose();
  41 + return 1;
  42 + }
  43 + }
  44 + return 0;
  45 +}
lib/spinlock_debug.c
... ... @@ -8,38 +8,35 @@
8 8  
9 9 #include <linux/spinlock.h>
10 10 #include <linux/interrupt.h>
  11 +#include <linux/debug_locks.h>
11 12 #include <linux/delay.h>
  13 +#include <linux/module.h>
12 14  
13 15 static void spin_bug(spinlock_t *lock, const char *msg)
14 16 {
15   - static long print_once = 1;
16 17 struct task_struct *owner = NULL;
17 18  
18   - if (xchg(&print_once, 0)) {
19   - if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
20   - owner = lock->owner;
21   - printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
22   - msg, raw_smp_processor_id(),
23   - current->comm, current->pid);
24   - printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, "
25   - ".owner_cpu: %d\n",
26   - lock, lock->magic,
27   - owner ? owner->comm : "<none>",
28   - owner ? owner->pid : -1,
29   - lock->owner_cpu);
30   - dump_stack();
31   -#ifdef CONFIG_SMP
32   - /*
33   - * We cannot continue on SMP:
34   - */
35   -// panic("bad locking");
36   -#endif
37   - }
  19 + if (!debug_locks_off())
  20 + return;
  21 +
  22 + if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
  23 + owner = lock->owner;
  24 + printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
  25 + msg, raw_smp_processor_id(),
  26 + current->comm, current->pid);
  27 + printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, "
  28 + ".owner_cpu: %d\n",
  29 + lock, lock->magic,
  30 + owner ? owner->comm : "<none>",
  31 + owner ? owner->pid : -1,
  32 + lock->owner_cpu);
  33 + dump_stack();
38 34 }
39 35  
40 36 #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
41 37  
42   -static inline void debug_spin_lock_before(spinlock_t *lock)
  38 +static inline void
  39 +debug_spin_lock_before(spinlock_t *lock)
43 40 {
44 41 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
45 42 SPIN_BUG_ON(lock->owner == current, lock, "recursion");
46 43  
... ... @@ -118,20 +115,13 @@
118 115  
119 116 static void rwlock_bug(rwlock_t *lock, const char *msg)
120 117 {
121   - static long print_once = 1;
  118 + if (!debug_locks_off())
  119 + return;
122 120  
123   - if (xchg(&print_once, 0)) {
124   - printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
125   - msg, raw_smp_processor_id(), current->comm,
126   - current->pid, lock);
127   - dump_stack();
128   -#ifdef CONFIG_SMP
129   - /*
130   - * We cannot continue on SMP:
131   - */
132   - panic("bad locking");
133   -#endif
134   - }
  121 + printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
  122 + msg, raw_smp_processor_id(), current->comm,
  123 + current->pid, lock);
  124 + dump_stack();
135 125 }
136 126  
137 127 #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
... ... @@ -330,6 +330,8 @@
330 330 return;
331 331 }
332 332  
  333 + debug_check_no_locks_freed(addr, area->size);
  334 +
333 335 if (deallocate_pages) {
334 336 int i;
335 337