Commit 4ea2176dfa714882e88180b474e4cbcd888b70af

Authored by Ingo Molnar
Committed by Linus Torvalds
1 parent a8f24a3978

[PATCH] lockdep: prove rwsem locking correctness

Use the lock validator framework to prove rwsem locking correctness.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 9 changed files with 184 additions and 51 deletions Side-by-side Diff

include/asm-i386/rwsem.h
... ... @@ -40,6 +40,7 @@
40 40  
41 41 #include <linux/list.h>
42 42 #include <linux/spinlock.h>
  43 +#include <linux/lockdep.h>
43 44  
44 45 struct rwsem_waiter;
45 46  
46 47  
47 48  
48 49  
49 50  
... ... @@ -61,22 +62,35 @@
61 62 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
62 63 spinlock_t wait_lock;
63 64 struct list_head wait_list;
  65 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
  66 + struct lockdep_map dep_map;
  67 +#endif
64 68 };
65 69  
  70 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
  71 +# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
  72 +#else
  73 +# define __RWSEM_DEP_MAP_INIT(lockname)
  74 +#endif
  75 +
  76 +
66 77 #define __RWSEM_INITIALIZER(name) \
67 78 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
68   - }
  79 + __RWSEM_DEP_MAP_INIT(name) }
69 80  
70 81 #define DECLARE_RWSEM(name) \
71 82 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
72 83  
73   -static inline void init_rwsem(struct rw_semaphore *sem)
74   -{
75   - sem->count = RWSEM_UNLOCKED_VALUE;
76   - spin_lock_init(&sem->wait_lock);
77   - INIT_LIST_HEAD(&sem->wait_list);
78   -}
  84 +extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
  85 + struct lock_class_key *key);
79 86  
  87 +#define init_rwsem(sem) \
  88 +do { \
  89 + static struct lock_class_key __key; \
  90 + \
  91 + __init_rwsem((sem), #sem, &__key); \
  92 +} while (0)
  93 +
80 94 /*
81 95 * lock for reading
82 96 */
... ... @@ -128,7 +142,7 @@
128 142 /*
129 143 * lock for writing
130 144 */
131   -static inline void __down_write(struct rw_semaphore *sem)
  145 +static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
132 146 {
133 147 int tmp;
134 148  
... ... @@ -150,6 +164,11 @@
150 164 : "=m"(sem->count), "=d"(tmp)
151 165 : "a"(sem), "1"(tmp), "m"(sem->count)
152 166 : "memory", "cc");
  167 +}
  168 +
  169 +static inline void __down_write(struct rw_semaphore *sem)
  170 +{
  171 + __down_write_nested(sem, 0);
153 172 }
154 173  
155 174 /*
include/asm-s390/rwsem.h
... ... @@ -61,6 +61,9 @@
61 61 signed long count;
62 62 spinlock_t wait_lock;
63 63 struct list_head wait_list;
  64 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
  65 + struct lockdep_map dep_map;
  66 +#endif
64 67 };
65 68  
66 69 #ifndef __s390x__
67 70  
... ... @@ -80,8 +83,16 @@
80 83 /*
81 84 * initialisation
82 85 */
  86 +
  87 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
  88 +# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
  89 +#else
  90 +# define __RWSEM_DEP_MAP_INIT(lockname)
  91 +#endif
  92 +
83 93 #define __RWSEM_INITIALIZER(name) \
84   -{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) }
  94 +{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
  95 + __RWSEM_DEP_MAP_INIT(name) }
85 96  
86 97 #define DECLARE_RWSEM(name) \
87 98 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
... ... @@ -93,6 +104,17 @@
93 104 INIT_LIST_HEAD(&sem->wait_list);
94 105 }
95 106  
  107 +extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
  108 + struct lock_class_key *key);
  109 +
  110 +#define init_rwsem(sem) \
  111 +do { \
  112 + static struct lock_class_key __key; \
  113 + \
  114 + __init_rwsem((sem), #sem, &__key); \
  115 +} while (0)
  116 +
  117 +
96 118 /*
97 119 * lock for reading
98 120 */
... ... @@ -155,7 +177,7 @@
155 177 /*
156 178 * lock for writing
157 179 */
158   -static inline void __down_write(struct rw_semaphore *sem)
  180 +static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
159 181 {
160 182 signed long old, new, tmp;
161 183  
... ... @@ -179,6 +201,11 @@
179 201 : "cc", "memory" );
180 202 if (old != 0)
181 203 rwsem_down_write_failed(sem);
  204 +}
  205 +
  206 +static inline void __down_write(struct rw_semaphore *sem)
  207 +{
  208 + __down_write_nested(sem, 0);
182 209 }
183 210  
184 211 /*
include/asm-s390/semaphore.h
... ... @@ -37,7 +37,8 @@
37 37  
38 38 static inline void sema_init (struct semaphore *sem, int val)
39 39 {
40   - *sem = (struct semaphore) __SEMAPHORE_INITIALIZER((*sem),val);
  40 + atomic_set(&sem->count, val);
  41 + init_waitqueue_head(&sem->wait);
41 42 }
42 43  
43 44 static inline void init_MUTEX (struct semaphore *sem)
include/linux/rwsem-spinlock.h
... ... @@ -32,18 +32,37 @@
32 32 __s32 activity;
33 33 spinlock_t wait_lock;
34 34 struct list_head wait_list;
  35 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
  36 + struct lockdep_map dep_map;
  37 +#endif
35 38 };
36 39  
  40 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
  41 +# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
  42 +#else
  43 +# define __RWSEM_DEP_MAP_INIT(lockname)
  44 +#endif
  45 +
37 46 #define __RWSEM_INITIALIZER(name) \
38   -{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) }
  47 +{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
39 48  
40 49 #define DECLARE_RWSEM(name) \
41 50 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
42 51  
43   -extern void FASTCALL(init_rwsem(struct rw_semaphore *sem));
  52 +extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
  53 + struct lock_class_key *key);
  54 +
  55 +#define init_rwsem(sem) \
  56 +do { \
  57 + static struct lock_class_key __key; \
  58 + \
  59 + __init_rwsem((sem), #sem, &__key); \
  60 +} while (0)
  61 +
44 62 extern void FASTCALL(__down_read(struct rw_semaphore *sem));
45 63 extern int FASTCALL(__down_read_trylock(struct rw_semaphore *sem));
46 64 extern void FASTCALL(__down_write(struct rw_semaphore *sem));
  65 +extern void FASTCALL(__down_write_nested(struct rw_semaphore *sem, int subclass));
47 66 extern int FASTCALL(__down_write_trylock(struct rw_semaphore *sem));
48 67 extern void FASTCALL(__up_read(struct rw_semaphore *sem));
49 68 extern void FASTCALL(__up_write(struct rw_semaphore *sem));
include/linux/rwsem.h
... ... @@ -27,64 +27,55 @@
27 27 /*
28 28 * lock for reading
29 29 */
30   -static inline void down_read(struct rw_semaphore *sem)
31   -{
32   - might_sleep();
33   - __down_read(sem);
34   -}
  30 +extern void down_read(struct rw_semaphore *sem);
35 31  
36 32 /*
37 33 * trylock for reading -- returns 1 if successful, 0 if contention
38 34 */
39   -static inline int down_read_trylock(struct rw_semaphore *sem)
40   -{
41   - int ret;
42   - ret = __down_read_trylock(sem);
43   - return ret;
44   -}
  35 +extern int down_read_trylock(struct rw_semaphore *sem);
45 36  
46 37 /*
47 38 * lock for writing
48 39 */
49   -static inline void down_write(struct rw_semaphore *sem)
50   -{
51   - might_sleep();
52   - __down_write(sem);
53   -}
  40 +extern void down_write(struct rw_semaphore *sem);
54 41  
55 42 /*
56 43 * trylock for writing -- returns 1 if successful, 0 if contention
57 44 */
58   -static inline int down_write_trylock(struct rw_semaphore *sem)
59   -{
60   - int ret;
61   - ret = __down_write_trylock(sem);
62   - return ret;
63   -}
  45 +extern int down_write_trylock(struct rw_semaphore *sem);
64 46  
65 47 /*
66 48 * release a read lock
67 49 */
68   -static inline void up_read(struct rw_semaphore *sem)
69   -{
70   - __up_read(sem);
71   -}
  50 +extern void up_read(struct rw_semaphore *sem);
72 51  
73 52 /*
74 53 * release a write lock
75 54 */
76   -static inline void up_write(struct rw_semaphore *sem)
77   -{
78   - __up_write(sem);
79   -}
  55 +extern void up_write(struct rw_semaphore *sem);
80 56  
81 57 /*
82 58 * downgrade write lock to read lock
83 59 */
84   -static inline void downgrade_write(struct rw_semaphore *sem)
85   -{
86   - __downgrade_write(sem);
87   -}
  60 +extern void downgrade_write(struct rw_semaphore *sem);
  61 +
  62 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
  63 +/*
  64 + * nested locking:
  65 + */
  66 +extern void down_read_nested(struct rw_semaphore *sem, int subclass);
  67 +extern void down_write_nested(struct rw_semaphore *sem, int subclass);
  68 +/*
  69 + * Take/release a lock when not the owner will release it:
  70 + */
  71 +extern void down_read_non_owner(struct rw_semaphore *sem);
  72 +extern void up_read_non_owner(struct rw_semaphore *sem);
  73 +#else
  74 +# define down_read_nested(sem, subclass) down_read(sem)
  75 +# define down_write_nested(sem, subclass) down_write(sem)
  76 +# define down_read_non_owner(sem) down_read(sem)
  77 +# define up_read_non_owner(sem) up_read(sem)
  78 +#endif
88 79  
89 80 #endif /* __KERNEL__ */
90 81 #endif /* _LINUX_RWSEM_H */
... ... @@ -8,7 +8,7 @@
8 8 signal.o sys.o kmod.o workqueue.o pid.o \
9 9 rcupdate.o extable.o params.o posix-timers.o \
10 10 kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
11   - hrtimer.o
  11 + hrtimer.o rwsem.o
12 12  
13 13 obj-$(CONFIG_STACKTRACE) += stacktrace.o
14 14 obj-y += time/
... ... @@ -103,4 +103,44 @@
103 103 }
104 104  
105 105 EXPORT_SYMBOL(downgrade_write);
  106 +
  107 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
  108 +
  109 +void down_read_nested(struct rw_semaphore *sem, int subclass)
  110 +{
  111 + might_sleep();
  112 + rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
  113 +
  114 + __down_read(sem);
  115 +}
  116 +
  117 +EXPORT_SYMBOL(down_read_nested);
  118 +
  119 +void down_read_non_owner(struct rw_semaphore *sem)
  120 +{
  121 + might_sleep();
  122 +
  123 + __down_read(sem);
  124 +}
  125 +
  126 +EXPORT_SYMBOL(down_read_non_owner);
  127 +
  128 +void down_write_nested(struct rw_semaphore *sem, int subclass)
  129 +{
  130 + might_sleep();
  131 + rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
  132 +
  133 + __down_write_nested(sem, subclass);
  134 +}
  135 +
  136 +EXPORT_SYMBOL(down_write_nested);
  137 +
  138 +void up_read_non_owner(struct rw_semaphore *sem)
  139 +{
  140 + __up_read(sem);
  141 +}
  142 +
  143 +EXPORT_SYMBOL(up_read_non_owner);
  144 +
  145 +#endif
lib/rwsem-spinlock.c
... ... @@ -20,8 +20,16 @@
20 20 /*
21 21 * initialise the semaphore
22 22 */
23   -void fastcall init_rwsem(struct rw_semaphore *sem)
  23 +void __init_rwsem(struct rw_semaphore *sem, const char *name,
  24 + struct lock_class_key *key)
24 25 {
  26 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
  27 + /*
  28 + * Make sure we are not reinitializing a held semaphore:
  29 + */
  30 + debug_check_no_locks_freed((void *)sem, sizeof(*sem));
  31 + lockdep_init_map(&sem->dep_map, name, key);
  32 +#endif
25 33 sem->activity = 0;
26 34 spin_lock_init(&sem->wait_lock);
27 35 INIT_LIST_HEAD(&sem->wait_list);
... ... @@ -183,7 +191,7 @@
183 191 * get a write lock on the semaphore
184 192 * - we increment the waiting count anyway to indicate an exclusive lock
185 193 */
186   -void fastcall __sched __down_write(struct rw_semaphore *sem)
  194 +void fastcall __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
187 195 {
188 196 struct rwsem_waiter waiter;
189 197 struct task_struct *tsk;
... ... @@ -223,6 +231,11 @@
223 231 ;
224 232 }
225 233  
  234 +void fastcall __sched __down_write(struct rw_semaphore *sem)
  235 +{
  236 + __down_write_nested(sem, 0);
  237 +}
  238 +
226 239 /*
227 240 * trylock for writing -- returns 1 if successful, 0 if contention
228 241 */
229 242  
... ... @@ -292,9 +305,10 @@
292 305 spin_unlock_irqrestore(&sem->wait_lock, flags);
293 306 }
294 307  
295   -EXPORT_SYMBOL(init_rwsem);
  308 +EXPORT_SYMBOL(__init_rwsem);
296 309 EXPORT_SYMBOL(__down_read);
297 310 EXPORT_SYMBOL(__down_read_trylock);
  311 +EXPORT_SYMBOL(__down_write_nested);
298 312 EXPORT_SYMBOL(__down_write);
299 313 EXPORT_SYMBOL(__down_write_trylock);
300 314 EXPORT_SYMBOL(__up_read);
... ... @@ -8,6 +8,26 @@
8 8 #include <linux/init.h>
9 9 #include <linux/module.h>
10 10  
  11 +/*
  12 + * Initialize an rwsem:
  13 + */
  14 +void __init_rwsem(struct rw_semaphore *sem, const char *name,
  15 + struct lock_class_key *key)
  16 +{
  17 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
  18 + /*
  19 + * Make sure we are not reinitializing a held semaphore:
  20 + */
  21 + debug_check_no_locks_freed((void *)sem, sizeof(*sem));
  22 + lockdep_init_map(&sem->dep_map, name, key);
  23 +#endif
  24 + sem->count = RWSEM_UNLOCKED_VALUE;
  25 + spin_lock_init(&sem->wait_lock);
  26 + INIT_LIST_HEAD(&sem->wait_list);
  27 +}
  28 +
  29 +EXPORT_SYMBOL(__init_rwsem);
  30 +
11 31 struct rwsem_waiter {
12 32 struct list_head list;
13 33 struct task_struct *task;