Commit 0586bed3e8563c2eb89bc7256e30ce633ae06cfb

Authored by Linus Torvalds

Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kern…

…el/git/tip/linux-2.6-tip

* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  rtmutex: tester: Remove the remaining BKL leftovers
  lockdep/timers: Explain in detail the locking problems del_timer_sync() may cause
  rtmutex: Simplify PI algorithm and make highest prio task get lock
  rwsem: Remove redundant asmregparm annotation
  rwsem: Move duplicate function prototypes to linux/rwsem.h
  rwsem: Unify the duplicate rwsem_is_locked() inlines
  rwsem: Move duplicate init macros and functions to linux/rwsem.h
  rwsem: Move duplicate struct rwsem declaration to linux/rwsem.h
  x86: Cleanup rwsem_count_t typedef
  rwsem: Cleanup includes
  locking: Remove deprecated lock initializers
  cred: Replace deprecated spinlock initialization
  kthread: Replace deprecated spinlock initialization
  xtensa: Replace deprecated spinlock initialization
  um: Replace deprecated spinlock initialization
  sparc: Replace deprecated spinlock initialization
  mips: Replace deprecated spinlock initialization
  cris: Replace deprecated spinlock initialization
  alpha: Replace deprecated spinlock initialization
  rtmutex-tester: Remove BKL tests

Showing 40 changed files Side-by-side Diff

Documentation/spinlocks.txt
... ... @@ -86,7 +86,7 @@
86 86  
87 87 The routines look the same as above:
88 88  
89   - rwlock_t xxx_lock = RW_LOCK_UNLOCKED;
  89 + rwlock_t xxx_lock = __RW_LOCK_UNLOCKED(xxx_lock);
90 90  
91 91 unsigned long flags;
92 92  
... ... @@ -196,26 +196,4 @@
196 196  
197 197 For static initialization, use DEFINE_SPINLOCK() / DEFINE_RWLOCK() or
198 198 __SPIN_LOCK_UNLOCKED() / __RW_LOCK_UNLOCKED() as appropriate.
199   -
200   -SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED are deprecated. These interfere
201   -with lockdep state tracking.
202   -
203   -Most of the time, you can simply turn:
204   - static spinlock_t xxx_lock = SPIN_LOCK_UNLOCKED;
205   -into:
206   - static DEFINE_SPINLOCK(xxx_lock);
207   -
208   -Static structure member variables go from:
209   -
210   - struct foo bar {
211   - .lock = SPIN_LOCK_UNLOCKED;
212   - };
213   -
214   -to:
215   -
216   - struct foo bar {
217   - .lock = __SPIN_LOCK_UNLOCKED(bar.lock);
218   - };
219   -
220   -Declaration of static rw_locks undergo a similar transformation.
arch/alpha/include/asm/rwsem.h
... ... @@ -13,45 +13,14 @@
13 13 #ifdef __KERNEL__
14 14  
15 15 #include <linux/compiler.h>
16   -#include <linux/list.h>
17   -#include <linux/spinlock.h>
18 16  
19   -struct rwsem_waiter;
20   -
21   -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
22   -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
23   -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
24   -extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
25   -
26   -/*
27   - * the semaphore definition
28   - */
29   -struct rw_semaphore {
30   - long count;
31 17 #define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
32 18 #define RWSEM_ACTIVE_BIAS 0x0000000000000001L
33 19 #define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
34 20 #define RWSEM_WAITING_BIAS (-0x0000000100000000L)
35 21 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
36 22 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
37   - spinlock_t wait_lock;
38   - struct list_head wait_list;
39   -};
40 23  
41   -#define __RWSEM_INITIALIZER(name) \
42   - { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
43   - LIST_HEAD_INIT((name).wait_list) }
44   -
45   -#define DECLARE_RWSEM(name) \
46   - struct rw_semaphore name = __RWSEM_INITIALIZER(name)
47   -
48   -static inline void init_rwsem(struct rw_semaphore *sem)
49   -{
50   - sem->count = RWSEM_UNLOCKED_VALUE;
51   - spin_lock_init(&sem->wait_lock);
52   - INIT_LIST_HEAD(&sem->wait_list);
53   -}
54   -
55 24 static inline void __down_read(struct rw_semaphore *sem)
56 25 {
57 26 long oldcount;
... ... @@ -248,11 +217,6 @@
248 217  
249 218 return ret;
250 219 #endif
251   -}
252   -
253   -static inline int rwsem_is_locked(struct rw_semaphore *sem)
254   -{
255   - return (sem->count != 0);
256 220 }
257 221  
258 222 #endif /* __KERNEL__ */
arch/cris/arch-v32/kernel/smp.c
... ... @@ -26,7 +26,9 @@
26 26 #define FLUSH_ALL (void*)0xffffffff
27 27  
28 28 /* Vector of locks used for various atomic operations */
29   -spinlock_t cris_atomic_locks[] = { [0 ... LOCK_COUNT - 1] = SPIN_LOCK_UNLOCKED};
  29 +spinlock_t cris_atomic_locks[] = {
  30 + [0 ... LOCK_COUNT - 1] = __SPIN_LOCK_UNLOCKED(cris_atomic_locks)
  31 +};
30 32  
31 33 /* CPU masks */
32 34 cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
arch/ia64/include/asm/rwsem.h
... ... @@ -25,20 +25,8 @@
25 25 #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
26 26 #endif
27 27  
28   -#include <linux/list.h>
29   -#include <linux/spinlock.h>
30   -
31 28 #include <asm/intrinsics.h>
32 29  
33   -/*
34   - * the semaphore definition
35   - */
36   -struct rw_semaphore {
37   - signed long count;
38   - spinlock_t wait_lock;
39   - struct list_head wait_list;
40   -};
41   -
42 30 #define RWSEM_UNLOCKED_VALUE __IA64_UL_CONST(0x0000000000000000)
43 31 #define RWSEM_ACTIVE_BIAS (1L)
44 32 #define RWSEM_ACTIVE_MASK (0xffffffffL)
... ... @@ -46,26 +34,6 @@
46 34 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
47 35 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
48 36  
49   -#define __RWSEM_INITIALIZER(name) \
50   - { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
51   - LIST_HEAD_INIT((name).wait_list) }
52   -
53   -#define DECLARE_RWSEM(name) \
54   - struct rw_semaphore name = __RWSEM_INITIALIZER(name)
55   -
56   -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
57   -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
58   -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
59   -extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
60   -
61   -static inline void
62   -init_rwsem (struct rw_semaphore *sem)
63   -{
64   - sem->count = RWSEM_UNLOCKED_VALUE;
65   - spin_lock_init(&sem->wait_lock);
66   - INIT_LIST_HEAD(&sem->wait_list);
67   -}
68   -
69 37 /*
70 38 * lock for reading
71 39 */
... ... @@ -173,11 +141,6 @@
173 141 */
174 142 #define rwsem_atomic_add(delta, sem) atomic64_add(delta, (atomic64_t *)(&(sem)->count))
175 143 #define rwsem_atomic_update(delta, sem) atomic64_add_return(delta, (atomic64_t *)(&(sem)->count))
176   -
177   -static inline int rwsem_is_locked(struct rw_semaphore *sem)
178   -{
179   - return (sem->count != 0);
180   -}
181 144  
182 145 #endif /* _ASM_IA64_RWSEM_H */
arch/powerpc/include/asm/rwsem.h
... ... @@ -13,11 +13,6 @@
13 13 * by Paul Mackerras <paulus@samba.org>.
14 14 */
15 15  
16   -#include <linux/list.h>
17   -#include <linux/spinlock.h>
18   -#include <asm/atomic.h>
19   -#include <asm/system.h>
20   -
21 16 /*
22 17 * the semaphore definition
23 18 */
... ... @@ -33,47 +28,6 @@
33 28 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
34 29 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
35 30  
36   -struct rw_semaphore {
37   - long count;
38   - spinlock_t wait_lock;
39   - struct list_head wait_list;
40   -#ifdef CONFIG_DEBUG_LOCK_ALLOC
41   - struct lockdep_map dep_map;
42   -#endif
43   -};
44   -
45   -#ifdef CONFIG_DEBUG_LOCK_ALLOC
46   -# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
47   -#else
48   -# define __RWSEM_DEP_MAP_INIT(lockname)
49   -#endif
50   -
51   -#define __RWSEM_INITIALIZER(name) \
52   -{ \
53   - RWSEM_UNLOCKED_VALUE, \
54   - __SPIN_LOCK_UNLOCKED((name).wait_lock), \
55   - LIST_HEAD_INIT((name).wait_list) \
56   - __RWSEM_DEP_MAP_INIT(name) \
57   -}
58   -
59   -#define DECLARE_RWSEM(name) \
60   - struct rw_semaphore name = __RWSEM_INITIALIZER(name)
61   -
62   -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
63   -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
64   -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
65   -extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
66   -
67   -extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
68   - struct lock_class_key *key);
69   -
70   -#define init_rwsem(sem) \
71   - do { \
72   - static struct lock_class_key __key; \
73   - \
74   - __init_rwsem((sem), #sem, &__key); \
75   - } while (0)
76   -
77 31 /*
78 32 * lock for reading
79 33 */
... ... @@ -172,11 +126,6 @@
172 126 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
173 127 {
174 128 return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
175   -}
176   -
177   -static inline int rwsem_is_locked(struct rw_semaphore *sem)
178   -{
179   - return sem->count != 0;
180 129 }
181 130  
182 131 #endif /* __KERNEL__ */
arch/s390/include/asm/rwsem.h
... ... @@ -43,29 +43,6 @@
43 43  
44 44 #ifdef __KERNEL__
45 45  
46   -#include <linux/list.h>
47   -#include <linux/spinlock.h>
48   -
49   -struct rwsem_waiter;
50   -
51   -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *);
52   -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *);
53   -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
54   -extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *);
55   -extern struct rw_semaphore *rwsem_downgrade_write(struct rw_semaphore *);
56   -
57   -/*
58   - * the semaphore definition
59   - */
60   -struct rw_semaphore {
61   - signed long count;
62   - spinlock_t wait_lock;
63   - struct list_head wait_list;
64   -#ifdef CONFIG_DEBUG_LOCK_ALLOC
65   - struct lockdep_map dep_map;
66   -#endif
67   -};
68   -
69 46 #ifndef __s390x__
70 47 #define RWSEM_UNLOCKED_VALUE 0x00000000
71 48 #define RWSEM_ACTIVE_BIAS 0x00000001
... ... @@ -81,41 +58,6 @@
81 58 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
82 59  
83 60 /*
84   - * initialisation
85   - */
86   -
87   -#ifdef CONFIG_DEBUG_LOCK_ALLOC
88   -# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
89   -#else
90   -# define __RWSEM_DEP_MAP_INIT(lockname)
91   -#endif
92   -
93   -#define __RWSEM_INITIALIZER(name) \
94   - { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait.lock), \
95   - LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
96   -
97   -#define DECLARE_RWSEM(name) \
98   - struct rw_semaphore name = __RWSEM_INITIALIZER(name)
99   -
100   -static inline void init_rwsem(struct rw_semaphore *sem)
101   -{
102   - sem->count = RWSEM_UNLOCKED_VALUE;
103   - spin_lock_init(&sem->wait_lock);
104   - INIT_LIST_HEAD(&sem->wait_list);
105   -}
106   -
107   -extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
108   - struct lock_class_key *key);
109   -
110   -#define init_rwsem(sem) \
111   -do { \
112   - static struct lock_class_key __key; \
113   - \
114   - __init_rwsem((sem), #sem, &__key); \
115   -} while (0)
116   -
117   -
118   -/*
119 61 * lock for reading
120 62 */
121 63 static inline void __down_read(struct rw_semaphore *sem)
... ... @@ -375,11 +317,6 @@
375 317 : "Q" (sem->count), "d" (delta)
376 318 : "cc", "memory");
377 319 return new;
378   -}
379   -
380   -static inline int rwsem_is_locked(struct rw_semaphore *sem)
381   -{
382   - return (sem->count != 0);
383 320 }
384 321  
385 322 #endif /* __KERNEL__ */
arch/sh/include/asm/rwsem.h
... ... @@ -11,65 +11,14 @@
11 11 #endif
12 12  
13 13 #ifdef __KERNEL__
14   -#include <linux/list.h>
15   -#include <linux/spinlock.h>
16   -#include <asm/atomic.h>
17   -#include <asm/system.h>
18 14  
19   -/*
20   - * the semaphore definition
21   - */
22   -struct rw_semaphore {
23   - long count;
24 15 #define RWSEM_UNLOCKED_VALUE 0x00000000
25 16 #define RWSEM_ACTIVE_BIAS 0x00000001
26 17 #define RWSEM_ACTIVE_MASK 0x0000ffff
27 18 #define RWSEM_WAITING_BIAS (-0x00010000)
28 19 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
29 20 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
30   - spinlock_t wait_lock;
31   - struct list_head wait_list;
32   -#ifdef CONFIG_DEBUG_LOCK_ALLOC
33   - struct lockdep_map dep_map;
34   -#endif
35   -};
36 21  
37   -#ifdef CONFIG_DEBUG_LOCK_ALLOC
38   -# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
39   -#else
40   -# define __RWSEM_DEP_MAP_INIT(lockname)
41   -#endif
42   -
43   -#define __RWSEM_INITIALIZER(name) \
44   - { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
45   - LIST_HEAD_INIT((name).wait_list) \
46   - __RWSEM_DEP_MAP_INIT(name) }
47   -
48   -#define DECLARE_RWSEM(name) \
49   - struct rw_semaphore name = __RWSEM_INITIALIZER(name)
50   -
51   -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
52   -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
53   -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
54   -extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
55   -
56   -extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
57   - struct lock_class_key *key);
58   -
59   -#define init_rwsem(sem) \
60   -do { \
61   - static struct lock_class_key __key; \
62   - \
63   - __init_rwsem((sem), #sem, &__key); \
64   -} while (0)
65   -
66   -static inline void init_rwsem(struct rw_semaphore *sem)
67   -{
68   - sem->count = RWSEM_UNLOCKED_VALUE;
69   - spin_lock_init(&sem->wait_lock);
70   - INIT_LIST_HEAD(&sem->wait_list);
71   -}
72   -
73 22 /*
74 23 * lock for reading
75 24 */
... ... @@ -177,11 +126,6 @@
177 126 {
178 127 smp_mb();
179 128 return atomic_add_return(delta, (atomic_t *)(&sem->count));
180   -}
181   -
182   -static inline int rwsem_is_locked(struct rw_semaphore *sem)
183   -{
184   - return (sem->count != 0);
185 129 }
186 130  
187 131 #endif /* __KERNEL__ */
arch/sparc/include/asm/rwsem.h
... ... @@ -13,54 +13,13 @@
13 13  
14 14 #ifdef __KERNEL__
15 15  
16   -#include <linux/list.h>
17   -#include <linux/spinlock.h>
18   -
19   -struct rwsem_waiter;
20   -
21   -struct rw_semaphore {
22   - signed long count;
23 16 #define RWSEM_UNLOCKED_VALUE 0x00000000L
24 17 #define RWSEM_ACTIVE_BIAS 0x00000001L
25 18 #define RWSEM_ACTIVE_MASK 0xffffffffL
26 19 #define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
27 20 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
28 21 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
29   - spinlock_t wait_lock;
30   - struct list_head wait_list;
31   -#ifdef CONFIG_DEBUG_LOCK_ALLOC
32   - struct lockdep_map dep_map;
33   -#endif
34   -};
35 22  
36   -#ifdef CONFIG_DEBUG_LOCK_ALLOC
37   -# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
38   -#else
39   -# define __RWSEM_DEP_MAP_INIT(lockname)
40   -#endif
41   -
42   -#define __RWSEM_INITIALIZER(name) \
43   -{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
44   - LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
45   -
46   -#define DECLARE_RWSEM(name) \
47   - struct rw_semaphore name = __RWSEM_INITIALIZER(name)
48   -
49   -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
50   -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
51   -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
52   -extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
53   -
54   -extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
55   - struct lock_class_key *key);
56   -
57   -#define init_rwsem(sem) \
58   -do { \
59   - static struct lock_class_key __key; \
60   - \
61   - __init_rwsem((sem), #sem, &__key); \
62   -} while (0)
63   -
64 23 /*
65 24 * lock for reading
66 25 */
... ... @@ -158,11 +117,6 @@
158 117 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
159 118 {
160 119 return atomic64_add_return(delta, (atomic64_t *)(&sem->count));
161   -}
162   -
163   -static inline int rwsem_is_locked(struct rw_semaphore *sem)
164   -{
165   - return (sem->count != 0);
166 120 }
167 121  
168 122 #endif /* __KERNEL__ */
arch/sparc/lib/atomic32.c
... ... @@ -16,7 +16,7 @@
16 16 #define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
17 17  
18 18 spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
19   - [0 ... (ATOMIC_HASH_SIZE-1)] = SPIN_LOCK_UNLOCKED
  19 + [0 ... (ATOMIC_HASH_SIZE-1)] = __SPIN_LOCK_UNLOCKED(__atomic_hash)
20 20 };
21 21  
22 22 #else /* SMP */
arch/um/drivers/ubd_kern.c
... ... @@ -185,7 +185,7 @@
185 185 .no_cow = 0, \
186 186 .shared = 0, \
187 187 .cow = DEFAULT_COW, \
188   - .lock = SPIN_LOCK_UNLOCKED, \
  188 + .lock = __SPIN_LOCK_UNLOCKED(ubd_devs.lock), \
189 189 .request = NULL, \
190 190 .start_sg = 0, \
191 191 .end_sg = 0, \
arch/x86/include/asm/rwsem.h
... ... @@ -37,26 +37,9 @@
37 37 #endif
38 38  
39 39 #ifdef __KERNEL__
40   -
41   -#include <linux/list.h>
42   -#include <linux/spinlock.h>
43   -#include <linux/lockdep.h>
44 40 #include <asm/asm.h>
45 41  
46   -struct rwsem_waiter;
47   -
48   -extern asmregparm struct rw_semaphore *
49   - rwsem_down_read_failed(struct rw_semaphore *sem);
50   -extern asmregparm struct rw_semaphore *
51   - rwsem_down_write_failed(struct rw_semaphore *sem);
52   -extern asmregparm struct rw_semaphore *
53   - rwsem_wake(struct rw_semaphore *);
54   -extern asmregparm struct rw_semaphore *
55   - rwsem_downgrade_wake(struct rw_semaphore *sem);
56   -
57 42 /*
58   - * the semaphore definition
59   - *
60 43 * The bias values and the counter type limits the number of
61 44 * potential readers/writers to 32767 for 32 bits and 2147483647
62 45 * for 64 bits.
... ... @@ -74,43 +57,6 @@
74 57 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
75 58 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
76 59  
77   -typedef signed long rwsem_count_t;
78   -
79   -struct rw_semaphore {
80   - rwsem_count_t count;
81   - spinlock_t wait_lock;
82   - struct list_head wait_list;
83   -#ifdef CONFIG_DEBUG_LOCK_ALLOC
84   - struct lockdep_map dep_map;
85   -#endif
86   -};
87   -
88   -#ifdef CONFIG_DEBUG_LOCK_ALLOC
89   -# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
90   -#else
91   -# define __RWSEM_DEP_MAP_INIT(lockname)
92   -#endif
93   -
94   -
95   -#define __RWSEM_INITIALIZER(name) \
96   -{ \
97   - RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
98   - LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) \
99   -}
100   -
101   -#define DECLARE_RWSEM(name) \
102   - struct rw_semaphore name = __RWSEM_INITIALIZER(name)
103   -
104   -extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
105   - struct lock_class_key *key);
106   -
107   -#define init_rwsem(sem) \
108   -do { \
109   - static struct lock_class_key __key; \
110   - \
111   - __init_rwsem((sem), #sem, &__key); \
112   -} while (0)
113   -
114 60 /*
115 61 * lock for reading
116 62 */
... ... @@ -133,7 +79,7 @@
133 79 */
134 80 static inline int __down_read_trylock(struct rw_semaphore *sem)
135 81 {
136   - rwsem_count_t result, tmp;
  82 + long result, tmp;
137 83 asm volatile("# beginning __down_read_trylock\n\t"
138 84 " mov %0,%1\n\t"
139 85 "1:\n\t"
... ... @@ -155,7 +101,7 @@
155 101 */
156 102 static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
157 103 {
158   - rwsem_count_t tmp;
  104 + long tmp;
159 105 asm volatile("# beginning down_write\n\t"
160 106 LOCK_PREFIX " xadd %1,(%2)\n\t"
161 107 /* adds 0xffff0001, returns the old value */
... ... @@ -180,9 +126,8 @@
180 126 */
181 127 static inline int __down_write_trylock(struct rw_semaphore *sem)
182 128 {
183   - rwsem_count_t ret = cmpxchg(&sem->count,
184   - RWSEM_UNLOCKED_VALUE,
185   - RWSEM_ACTIVE_WRITE_BIAS);
  129 + long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
  130 + RWSEM_ACTIVE_WRITE_BIAS);
186 131 if (ret == RWSEM_UNLOCKED_VALUE)
187 132 return 1;
188 133 return 0;
... ... @@ -193,7 +138,7 @@
193 138 */
194 139 static inline void __up_read(struct rw_semaphore *sem)
195 140 {
196   - rwsem_count_t tmp;
  141 + long tmp;
197 142 asm volatile("# beginning __up_read\n\t"
198 143 LOCK_PREFIX " xadd %1,(%2)\n\t"
199 144 /* subtracts 1, returns the old value */
... ... @@ -211,7 +156,7 @@
211 156 */
212 157 static inline void __up_write(struct rw_semaphore *sem)
213 158 {
214   - rwsem_count_t tmp;
  159 + long tmp;
215 160 asm volatile("# beginning __up_write\n\t"
216 161 LOCK_PREFIX " xadd %1,(%2)\n\t"
217 162 /* subtracts 0xffff0001, returns the old value */
... ... @@ -247,8 +192,7 @@
247 192 /*
248 193 * implement atomic add functionality
249 194 */
250   -static inline void rwsem_atomic_add(rwsem_count_t delta,
251   - struct rw_semaphore *sem)
  195 +static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
252 196 {
253 197 asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
254 198 : "+m" (sem->count)
255 199  
256 200  
... ... @@ -258,21 +202,15 @@
258 202 /*
259 203 * implement exchange and add functionality
260 204 */
261   -static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
262   - struct rw_semaphore *sem)
  205 +static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
263 206 {
264   - rwsem_count_t tmp = delta;
  207 + long tmp = delta;
265 208  
266 209 asm volatile(LOCK_PREFIX "xadd %0,%1"
267 210 : "+r" (tmp), "+m" (sem->count)
268 211 : : "memory");
269 212  
270 213 return tmp + delta;
271   -}
272   -
273   -static inline int rwsem_is_locked(struct rw_semaphore *sem)
274   -{
275   - return (sem->count != 0);
276 214 }
277 215  
278 216 #endif /* __KERNEL__ */
arch/xtensa/include/asm/rwsem.h
... ... @@ -17,45 +17,13 @@
17 17 #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
18 18 #endif
19 19  
20   -#include <linux/list.h>
21   -#include <linux/spinlock.h>
22   -#include <asm/atomic.h>
23   -#include <asm/system.h>
24   -
25   -/*
26   - * the semaphore definition
27   - */
28   -struct rw_semaphore {
29   - signed long count;
30 20 #define RWSEM_UNLOCKED_VALUE 0x00000000
31 21 #define RWSEM_ACTIVE_BIAS 0x00000001
32 22 #define RWSEM_ACTIVE_MASK 0x0000ffff
33 23 #define RWSEM_WAITING_BIAS (-0x00010000)
34 24 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
35 25 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
36   - spinlock_t wait_lock;
37   - struct list_head wait_list;
38   -};
39 26  
40   -#define __RWSEM_INITIALIZER(name) \
41   - { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
42   - LIST_HEAD_INIT((name).wait_list) }
43   -
44   -#define DECLARE_RWSEM(name) \
45   - struct rw_semaphore name = __RWSEM_INITIALIZER(name)
46   -
47   -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
48   -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
49   -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
50   -extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
51   -
52   -static inline void init_rwsem(struct rw_semaphore *sem)
53   -{
54   - sem->count = RWSEM_UNLOCKED_VALUE;
55   - spin_lock_init(&sem->wait_lock);
56   - INIT_LIST_HEAD(&sem->wait_list);
57   -}
58   -
59 27 /*
60 28 * lock for reading
61 29 */
... ... @@ -158,11 +126,6 @@
158 126 {
159 127 smp_mb();
160 128 return atomic_add_return(delta, (atomic_t *)(&sem->count));
161   -}
162   -
163   -static inline int rwsem_is_locked(struct rw_semaphore *sem)
164   -{
165   - return (sem->count != 0);
166 129 }
167 130  
168 131 #endif /* _XTENSA_RWSEM_H */
include/linux/kthread.h
... ... @@ -64,7 +64,7 @@
64 64 };
65 65  
66 66 #define KTHREAD_WORKER_INIT(worker) { \
67   - .lock = SPIN_LOCK_UNLOCKED, \
  67 + .lock = __SPIN_LOCK_UNLOCKED((worker).lock), \
68 68 .work_list = LIST_HEAD_INIT((worker).work_list), \
69 69 }
70 70  
include/linux/rwlock_types.h
... ... @@ -43,14 +43,6 @@
43 43 RW_DEP_MAP_INIT(lockname) }
44 44 #endif
45 45  
46   -/*
47   - * RW_LOCK_UNLOCKED defeat lockdep state tracking and is hence
48   - * deprecated.
49   - *
50   - * Please use DEFINE_RWLOCK() or __RW_LOCK_UNLOCKED() as appropriate.
51   - */
52   -#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
53   -
54 46 #define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
55 47  
56 48 #endif /* __LINUX_RWLOCK_TYPES_H */
include/linux/rwsem-spinlock.h
... ... @@ -12,15 +12,7 @@
12 12 #error "please don't include linux/rwsem-spinlock.h directly, use linux/rwsem.h instead"
13 13 #endif
14 14  
15   -#include <linux/spinlock.h>
16   -#include <linux/list.h>
17   -
18 15 #ifdef __KERNEL__
19   -
20   -#include <linux/types.h>
21   -
22   -struct rwsem_waiter;
23   -
24 16 /*
25 17 * the rw-semaphore definition
26 18 * - if activity is 0 then there are no active readers or writers
... ... @@ -37,28 +29,7 @@
37 29 #endif
38 30 };
39 31  
40   -#ifdef CONFIG_DEBUG_LOCK_ALLOC
41   -# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
42   -#else
43   -# define __RWSEM_DEP_MAP_INIT(lockname)
44   -#endif
45   -
46   -#define __RWSEM_INITIALIZER(name) \
47   -{ 0, __SPIN_LOCK_UNLOCKED(name.wait_lock), LIST_HEAD_INIT((name).wait_list) \
48   - __RWSEM_DEP_MAP_INIT(name) }
49   -
50   -#define DECLARE_RWSEM(name) \
51   - struct rw_semaphore name = __RWSEM_INITIALIZER(name)
52   -
53   -extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
54   - struct lock_class_key *key);
55   -
56   -#define init_rwsem(sem) \
57   -do { \
58   - static struct lock_class_key __key; \
59   - \
60   - __init_rwsem((sem), #sem, &__key); \
61   -} while (0)
  32 +#define RWSEM_UNLOCKED_VALUE 0x00000000
62 33  
63 34 extern void __down_read(struct rw_semaphore *sem);
64 35 extern int __down_read_trylock(struct rw_semaphore *sem);
include/linux/rwsem.h
... ... @@ -11,6 +11,9 @@
11 11  
12 12 #include <linux/types.h>
13 13 #include <linux/kernel.h>
  14 +#include <linux/list.h>
  15 +#include <linux/spinlock.h>
  16 +
14 17 #include <asm/system.h>
15 18 #include <asm/atomic.h>
16 19  
17 20  
... ... @@ -19,8 +22,56 @@
19 22 #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
20 23 #include <linux/rwsem-spinlock.h> /* use a generic implementation */
21 24 #else
22   -#include <asm/rwsem.h> /* use an arch-specific implementation */
  25 +/* All arch specific implementations share the same struct */
  26 +struct rw_semaphore {
  27 + long count;
  28 + spinlock_t wait_lock;
  29 + struct list_head wait_list;
  30 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
  31 + struct lockdep_map dep_map;
23 32 #endif
  33 +};
  34 +
  35 +extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
  36 +extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
  37 +extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
  38 +extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
  39 +
  40 +/* Include the arch specific part */
  41 +#include <asm/rwsem.h>
  42 +
  43 +/* In all implementations count != 0 means locked */
  44 +static inline int rwsem_is_locked(struct rw_semaphore *sem)
  45 +{
  46 + return sem->count != 0;
  47 +}
  48 +
  49 +#endif
  50 +
  51 +/* Common initializer macros and functions */
  52 +
  53 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
  54 +# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
  55 +#else
  56 +# define __RWSEM_DEP_MAP_INIT(lockname)
  57 +#endif
  58 +
  59 +#define __RWSEM_INITIALIZER(name) \
  60 + { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED(name.wait_lock), \
  61 + LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
  62 +
  63 +#define DECLARE_RWSEM(name) \
  64 + struct rw_semaphore name = __RWSEM_INITIALIZER(name)
  65 +
  66 +extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
  67 + struct lock_class_key *key);
  68 +
  69 +#define init_rwsem(sem) \
  70 +do { \
  71 + static struct lock_class_key __key; \
  72 + \
  73 + __init_rwsem((sem), #sem, &__key); \
  74 +} while (0)
24 75  
25 76 /*
26 77 * lock for reading
include/linux/spinlock_types.h
... ... @@ -81,14 +81,6 @@
81 81 #define __SPIN_LOCK_UNLOCKED(lockname) \
82 82 (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
83 83  
84   -/*
85   - * SPIN_LOCK_UNLOCKED defeats lockdep state tracking and is hence
86   - * deprecated.
87   - * Please use DEFINE_SPINLOCK() or __SPIN_LOCK_UNLOCKED() as
88   - * appropriate.
89   - */
90   -#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init)
91   -
92 84 #define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
93 85  
94 86 #include <linux/rwlock_types.h>
... ... @@ -35,7 +35,7 @@
35 35 static struct thread_group_cred init_tgcred = {
36 36 .usage = ATOMIC_INIT(2),
37 37 .tgid = 0,
38   - .lock = SPIN_LOCK_UNLOCKED,
  38 + .lock = __SPIN_LOCK_UNLOCKED(init_cred.tgcred.lock),
39 39 };
40 40 #endif
41 41  
... ... @@ -1555,10 +1555,10 @@
1555 1555  
1556 1556 /*
1557 1557 * We are here either because we stole the rtmutex from the
1558   - * pending owner or we are the pending owner which failed to
1559   - * get the rtmutex. We have to replace the pending owner TID
1560   - * in the user space variable. This must be atomic as we have
1561   - * to preserve the owner died bit here.
  1558 + * previous highest priority waiter or we are the highest priority
  1559 + * waiter but failed to get the rtmutex the first time.
  1560 + * We have to replace the newowner TID in the user space variable.
  1561 + * This must be atomic as we have to preserve the owner died bit here.
1562 1562 *
1563 1563 * Note: We write the user space value _before_ changing the pi_state
1564 1564 * because we can fault here. Imagine swapped out pages or a fork
... ... @@ -1605,8 +1605,8 @@
1605 1605  
1606 1606 /*
1607 1607 * To handle the page fault we need to drop the hash bucket
1608   - * lock here. That gives the other task (either the pending
1609   - * owner itself or the task which stole the rtmutex) the
  1608 + * lock here. That gives the other task (either the highest priority
  1609 + * waiter itself or the task which stole the rtmutex) the
1610 1610 * chance to try the fixup of the pi_state. So once we are
1611 1611 * back from handling the fault we need to check the pi_state
1612 1612 * after reacquiring the hash bucket lock and before trying to
1613 1613  
1614 1614  
1615 1615  
... ... @@ -1682,18 +1682,20 @@
1682 1682 /*
1683 1683 * pi_state is incorrect, some other task did a lock steal and
1684 1684 * we returned due to timeout or signal without taking the
1685   - * rt_mutex. Too late. We can access the rt_mutex_owner without
1686   - * locking, as the other task is now blocked on the hash bucket
1687   - * lock. Fix the state up.
  1685 + * rt_mutex. Too late.
1688 1686 */
  1687 + raw_spin_lock(&q->pi_state->pi_mutex.wait_lock);
1689 1688 owner = rt_mutex_owner(&q->pi_state->pi_mutex);
  1689 + if (!owner)
  1690 + owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
  1691 + raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock);
1690 1692 ret = fixup_pi_state_owner(uaddr, q, owner);
1691 1693 goto out;
1692 1694 }
1693 1695  
1694 1696 /*
1695 1697 * Paranoia check. If we did not take the lock, then we should not be
1696   - * the owner, nor the pending owner, of the rt_mutex.
  1698 + * the owner of the rt_mutex.
1697 1699 */
1698 1700 if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
1699 1701 printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
kernel/rtmutex-debug.c
... ... @@ -215,7 +215,6 @@
215 215 put_pid(waiter->deadlock_task_pid);
216 216 TRACE_WARN_ON(!plist_node_empty(&waiter->list_entry));
217 217 TRACE_WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
218   - TRACE_WARN_ON(waiter->task);
219 218 memset(waiter, 0x22, sizeof(*waiter));
220 219 }
221 220  
kernel/rtmutex-tester.c
... ... @@ -9,7 +9,6 @@
9 9 #include <linux/kthread.h>
10 10 #include <linux/module.h>
11 11 #include <linux/sched.h>
12   -#include <linux/smp_lock.h>
13 12 #include <linux/spinlock.h>
14 13 #include <linux/sysdev.h>
15 14 #include <linux/timer.h>
... ... @@ -27,7 +26,6 @@
27 26 int opcode;
28 27 int opdata;
29 28 int mutexes[MAX_RT_TEST_MUTEXES];
30   - int bkl;
31 29 int event;
32 30 struct sys_device sysdev;
33 31 };
... ... @@ -46,9 +44,8 @@
46 44 RTTEST_LOCKINTNOWAIT, /* 6 Lock interruptible no wait in wakeup, data = lockindex */
47 45 RTTEST_LOCKCONT, /* 7 Continue locking after the wakeup delay */
48 46 RTTEST_UNLOCK, /* 8 Unlock, data = lockindex */
49   - RTTEST_LOCKBKL, /* 9 Lock BKL */
50   - RTTEST_UNLOCKBKL, /* 10 Unlock BKL */
51   - RTTEST_SIGNAL, /* 11 Signal other test thread, data = thread id */
  47 + /* 9, 10 - reserved for BKL commemoration */
  48 + RTTEST_SIGNAL = 11, /* 11 Signal other test thread, data = thread id */
52 49 RTTEST_RESETEVENT = 98, /* 98 Reset event counter */
53 50 RTTEST_RESET = 99, /* 99 Reset all pending operations */
54 51 };
... ... @@ -74,13 +71,6 @@
74 71 td->mutexes[i] = 0;
75 72 }
76 73 }
77   -
78   - if (!lockwakeup && td->bkl == 4) {
79   -#ifdef CONFIG_LOCK_KERNEL
80   - unlock_kernel();
81   -#endif
82   - td->bkl = 0;
83   - }
84 74 return 0;
85 75  
86 76 case RTTEST_RESETEVENT:
... ... @@ -131,25 +121,6 @@
131 121 td->mutexes[id] = 0;
132 122 return 0;
133 123  
134   - case RTTEST_LOCKBKL:
135   - if (td->bkl)
136   - return 0;
137   - td->bkl = 1;
138   -#ifdef CONFIG_LOCK_KERNEL
139   - lock_kernel();
140   -#endif
141   - td->bkl = 4;
142   - return 0;
143   -
144   - case RTTEST_UNLOCKBKL:
145   - if (td->bkl != 4)
146   - break;
147   -#ifdef CONFIG_LOCK_KERNEL
148   - unlock_kernel();
149   -#endif
150   - td->bkl = 0;
151   - return 0;
152   -
153 124 default:
154 125 break;
155 126 }
... ... @@ -196,7 +167,6 @@
196 167 td->event = atomic_add_return(1, &rttest_event);
197 168 break;
198 169  
199   - case RTTEST_LOCKBKL:
200 170 default:
201 171 break;
202 172 }
... ... @@ -229,8 +199,6 @@
229 199 td->event = atomic_add_return(1, &rttest_event);
230 200 return;
231 201  
232   - case RTTEST_LOCKBKL:
233   - return;
234 202 default:
235 203 return;
236 204 }
237 205  
... ... @@ -380,11 +348,11 @@
380 348 spin_lock(&rttest_lock);
381 349  
382 350 curr += sprintf(curr,
383   - "O: %4d, E:%8d, S: 0x%08lx, P: %4d, N: %4d, B: %p, K: %d, M:",
  351 + "O: %4d, E:%8d, S: 0x%08lx, P: %4d, N: %4d, B: %p, M:",
384 352 td->opcode, td->event, tsk->state,
385 353 (MAX_RT_PRIO - 1) - tsk->prio,
386 354 (MAX_RT_PRIO - 1) - tsk->normal_prio,
387   - tsk->pi_blocked_on, td->bkl);
  355 + tsk->pi_blocked_on);
388 356  
389 357 for (i = MAX_RT_TEST_MUTEXES - 1; i >=0 ; i--)
390 358 curr += sprintf(curr, "%d", td->mutexes[i]);
... ... @@ -20,41 +20,34 @@
20 20 /*
21 21 * lock->owner state tracking:
22 22 *
23   - * lock->owner holds the task_struct pointer of the owner. Bit 0 and 1
24   - * are used to keep track of the "owner is pending" and "lock has
25   - * waiters" state.
  23 + * lock->owner holds the task_struct pointer of the owner. Bit 0
  24 + * is used to keep track of the "lock has waiters" state.
26 25 *
27   - * owner bit1 bit0
28   - * NULL 0 0 lock is free (fast acquire possible)
29   - * NULL 0 1 invalid state
30   - * NULL 1 0 Transitional State*
31   - * NULL 1 1 invalid state
32   - * taskpointer 0 0 lock is held (fast release possible)
33   - * taskpointer 0 1 task is pending owner
34   - * taskpointer 1 0 lock is held and has waiters
35   - * taskpointer 1 1 task is pending owner and lock has more waiters
  26 + * owner bit0
  27 + * NULL 0 lock is free (fast acquire possible)
  28 + * NULL 1 lock is free and has waiters and the top waiter
  29 + * is going to take the lock*
  30 + * taskpointer 0 lock is held (fast release possible)
  31 + * taskpointer 1 lock is held and has waiters**
36 32 *
37   - * Pending ownership is assigned to the top (highest priority)
38   - * waiter of the lock, when the lock is released. The thread is woken
39   - * up and can now take the lock. Until the lock is taken (bit 0
40   - * cleared) a competing higher priority thread can steal the lock
41   - * which puts the woken up thread back on the waiters list.
42   - *
43 33 * The fast atomic compare exchange based acquire and release is only
44   - * possible when bit 0 and 1 of lock->owner are 0.
  34 + * possible when bit 0 of lock->owner is 0.
45 35 *
46   - * (*) There's a small time where the owner can be NULL and the
47   - * "lock has waiters" bit is set. This can happen when grabbing the lock.
48   - * To prevent a cmpxchg of the owner releasing the lock, we need to set this
49   - * bit before looking at the lock, hence the reason this is a transitional
50   - * state.
  36 + * (*) It also can be a transitional state when grabbing the lock
  37 + * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
  38 + * we need to set the bit0 before looking at the lock, and the owner may be
  39 + * NULL in this small time, hence this can be a transitional state.
  40 + *
  41 + * (**) There is a small time when bit 0 is set but there are no
  42 + * waiters. This can happen when grabbing the lock in the slow path.
  43 + * To prevent a cmpxchg of the owner releasing the lock, we need to
  44 + * set this bit before looking at the lock.
51 45 */
52 46  
53 47 static void
54   -rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner,
55   - unsigned long mask)
  48 +rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
56 49 {
57   - unsigned long val = (unsigned long)owner | mask;
  50 + unsigned long val = (unsigned long)owner;
58 51  
59 52 if (rt_mutex_has_waiters(lock))
60 53 val |= RT_MUTEX_HAS_WAITERS;
61 54  
62 55  
... ... @@ -203,15 +196,14 @@
203 196 * reached or the state of the chain has changed while we
204 197 * dropped the locks.
205 198 */
206   - if (!waiter || !waiter->task)
  199 + if (!waiter)
207 200 goto out_unlock_pi;
208 201  
209 202 /*
210 203 * Check the orig_waiter state. After we dropped the locks,
211   - * the previous owner of the lock might have released the lock
212   - * and made us the pending owner:
  204 + * the previous owner of the lock might have released the lock.
213 205 */
214   - if (orig_waiter && !orig_waiter->task)
  206 + if (orig_waiter && !rt_mutex_owner(orig_lock))
215 207 goto out_unlock_pi;
216 208  
217 209 /*
... ... @@ -254,6 +246,17 @@
254 246  
255 247 /* Release the task */
256 248 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
  249 + if (!rt_mutex_owner(lock)) {
  250 + /*
  251 + * If the requeue above changed the top waiter, then we need
  252 + * to wake the new top waiter up to try to get the lock.
  253 + */
  254 +
  255 + if (top_waiter != rt_mutex_top_waiter(lock))
  256 + wake_up_process(rt_mutex_top_waiter(lock)->task);
  257 + raw_spin_unlock(&lock->wait_lock);
  258 + goto out_put_task;
  259 + }
257 260 put_task_struct(task);
258 261  
259 262 /* Grab the next task */
260 263  
261 264  
262 265  
... ... @@ -296,78 +299,16 @@
296 299 }
297 300  
298 301 /*
299   - * Optimization: check if we can steal the lock from the
300   - * assigned pending owner [which might not have taken the
301   - * lock yet]:
302   - */
303   -static inline int try_to_steal_lock(struct rt_mutex *lock,
304   - struct task_struct *task)
305   -{
306   - struct task_struct *pendowner = rt_mutex_owner(lock);
307   - struct rt_mutex_waiter *next;
308   - unsigned long flags;
309   -
310   - if (!rt_mutex_owner_pending(lock))
311   - return 0;
312   -
313   - if (pendowner == task)
314   - return 1;
315   -
316   - raw_spin_lock_irqsave(&pendowner->pi_lock, flags);
317   - if (task->prio >= pendowner->prio) {
318   - raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
319   - return 0;
320   - }
321   -
322   - /*
323   - * Check if a waiter is enqueued on the pending owners
324   - * pi_waiters list. Remove it and readjust pending owners
325   - * priority.
326   - */
327   - if (likely(!rt_mutex_has_waiters(lock))) {
328   - raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
329   - return 1;
330   - }
331   -
332   - /* No chain handling, pending owner is not blocked on anything: */
333   - next = rt_mutex_top_waiter(lock);
334   - plist_del(&next->pi_list_entry, &pendowner->pi_waiters);
335   - __rt_mutex_adjust_prio(pendowner);
336   - raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
337   -
338   - /*
339   - * We are going to steal the lock and a waiter was
340   - * enqueued on the pending owners pi_waiters queue. So
341   - * we have to enqueue this waiter into
342   - * task->pi_waiters list. This covers the case,
343   - * where task is boosted because it holds another
344   - * lock and gets unboosted because the booster is
345   - * interrupted, so we would delay a waiter with higher
346   - * priority as task->normal_prio.
347   - *
348   - * Note: in the rare case of a SCHED_OTHER task changing
349   - * its priority and thus stealing the lock, next->task
350   - * might be task:
351   - */
352   - if (likely(next->task != task)) {
353   - raw_spin_lock_irqsave(&task->pi_lock, flags);
354   - plist_add(&next->pi_list_entry, &task->pi_waiters);
355   - __rt_mutex_adjust_prio(task);
356   - raw_spin_unlock_irqrestore(&task->pi_lock, flags);
357   - }
358   - return 1;
359   -}
360   -
361   -/*
362 302 * Try to take an rt-mutex
363 303 *
364   - * This fails
365   - * - when the lock has a real owner
366   - * - when a different pending owner exists and has higher priority than current
367   - *
368 304 * Must be called with lock->wait_lock held.
  305 + *
  306 + * @lock: the lock to be acquired.
  307 + * @task: the task which wants to acquire the lock
  308 + * @waiter: the waiter that is queued to the lock's wait list. (could be NULL)
369 309 */
370   -static int try_to_take_rt_mutex(struct rt_mutex *lock)
  310 +static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
  311 + struct rt_mutex_waiter *waiter)
371 312 {
372 313 /*
373 314 * We have to be careful here if the atomic speedups are
374 315  
375 316  
376 317  
... ... @@ -390,15 +331,52 @@
390 331 */
391 332 mark_rt_mutex_waiters(lock);
392 333  
393   - if (rt_mutex_owner(lock) && !try_to_steal_lock(lock, current))
  334 + if (rt_mutex_owner(lock))
394 335 return 0;
395 336  
  337 + /*
  338 + * It will get the lock because of one of these conditions:
  339 + * 1) there is no waiter
  340 + * 2) higher priority than waiters
  341 + * 3) it is top waiter
  342 + */
  343 + if (rt_mutex_has_waiters(lock)) {
  344 + if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) {
  345 + if (!waiter || waiter != rt_mutex_top_waiter(lock))
  346 + return 0;
  347 + }
  348 + }
  349 +
  350 + if (waiter || rt_mutex_has_waiters(lock)) {
  351 + unsigned long flags;
  352 + struct rt_mutex_waiter *top;
  353 +
  354 + raw_spin_lock_irqsave(&task->pi_lock, flags);
  355 +
  356 + /* remove the queued waiter. */
  357 + if (waiter) {
  358 + plist_del(&waiter->list_entry, &lock->wait_list);
  359 + task->pi_blocked_on = NULL;
  360 + }
  361 +
  362 + /*
  363 + * We have to enqueue the top waiter(if it exists) into
  364 + * task->pi_waiters list.
  365 + */
  366 + if (rt_mutex_has_waiters(lock)) {
  367 + top = rt_mutex_top_waiter(lock);
  368 + top->pi_list_entry.prio = top->list_entry.prio;
  369 + plist_add(&top->pi_list_entry, &task->pi_waiters);
  370 + }
  371 + raw_spin_unlock_irqrestore(&task->pi_lock, flags);
  372 + }
  373 +
396 374 /* We got the lock. */
397 375 debug_rt_mutex_lock(lock);
398 376  
399   - rt_mutex_set_owner(lock, current, 0);
  377 + rt_mutex_set_owner(lock, task);
400 378  
401   - rt_mutex_deadlock_account_lock(lock, current);
  379 + rt_mutex_deadlock_account_lock(lock, task);
402 380  
403 381 return 1;
404 382 }
... ... @@ -436,6 +414,9 @@
436 414  
437 415 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
438 416  
  417 + if (!owner)
  418 + return 0;
  419 +
439 420 if (waiter == rt_mutex_top_waiter(lock)) {
440 421 raw_spin_lock_irqsave(&owner->pi_lock, flags);
441 422 plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
442 423  
443 424  
... ... @@ -472,21 +453,18 @@
472 453 /*
473 454 * Wake up the next waiter on the lock.
474 455 *
475   - * Remove the top waiter from the current tasks waiter list and from
476   - * the lock waiter list. Set it as pending owner. Then wake it up.
  456 + * Remove the top waiter from the current tasks waiter list and wake it up.
477 457 *
478 458 * Called with lock->wait_lock held.
479 459 */
480 460 static void wakeup_next_waiter(struct rt_mutex *lock)
481 461 {
482 462 struct rt_mutex_waiter *waiter;
483   - struct task_struct *pendowner;
484 463 unsigned long flags;
485 464  
486 465 raw_spin_lock_irqsave(&current->pi_lock, flags);
487 466  
488 467 waiter = rt_mutex_top_waiter(lock);
489   - plist_del(&waiter->list_entry, &lock->wait_list);
490 468  
491 469 /*
492 470 * Remove it from current->pi_waiters. We do not adjust a
493 471  
494 472  
495 473  
496 474  
... ... @@ -495,43 +473,19 @@
495 473 * lock->wait_lock.
496 474 */
497 475 plist_del(&waiter->pi_list_entry, &current->pi_waiters);
498   - pendowner = waiter->task;
499   - waiter->task = NULL;
500 476  
501   - rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING);
  477 + rt_mutex_set_owner(lock, NULL);
502 478  
503 479 raw_spin_unlock_irqrestore(&current->pi_lock, flags);
504 480  
505   - /*
506   - * Clear the pi_blocked_on variable and enqueue a possible
507   - * waiter into the pi_waiters list of the pending owner. This
508   - * prevents that in case the pending owner gets unboosted a
509   - * waiter with higher priority than pending-owner->normal_prio
510   - * is blocked on the unboosted (pending) owner.
511   - */
512   - raw_spin_lock_irqsave(&pendowner->pi_lock, flags);
513   -
514   - WARN_ON(!pendowner->pi_blocked_on);
515   - WARN_ON(pendowner->pi_blocked_on != waiter);
516   - WARN_ON(pendowner->pi_blocked_on->lock != lock);
517   -
518   - pendowner->pi_blocked_on = NULL;
519   -
520   - if (rt_mutex_has_waiters(lock)) {
521   - struct rt_mutex_waiter *next;
522   -
523   - next = rt_mutex_top_waiter(lock);
524   - plist_add(&next->pi_list_entry, &pendowner->pi_waiters);
525   - }
526   - raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
527   -
528   - wake_up_process(pendowner);
  481 + wake_up_process(waiter->task);
529 482 }
530 483  
531 484 /*
532   - * Remove a waiter from a lock
  485 + * Remove a waiter from a lock and give up
533 486 *
534   - * Must be called with lock->wait_lock held
  487 + * Must be called with lock->wait_lock held and
  488 + * have just failed to try_to_take_rt_mutex().
535 489 */
536 490 static void remove_waiter(struct rt_mutex *lock,
537 491 struct rt_mutex_waiter *waiter)
538 492  
539 493  
... ... @@ -543,12 +497,14 @@
543 497  
544 498 raw_spin_lock_irqsave(&current->pi_lock, flags);
545 499 plist_del(&waiter->list_entry, &lock->wait_list);
546   - waiter->task = NULL;
547 500 current->pi_blocked_on = NULL;
548 501 raw_spin_unlock_irqrestore(&current->pi_lock, flags);
549 502  
550   - if (first && owner != current) {
  503 + if (!owner)
  504 + return;
551 505  
  506 + if (first) {
  507 +
552 508 raw_spin_lock_irqsave(&owner->pi_lock, flags);
553 509  
554 510 plist_del(&waiter->pi_list_entry, &owner->pi_waiters);
555 511  
556 512  
... ... @@ -614,21 +570,19 @@
614 570 * or TASK_UNINTERRUPTIBLE)
615 571 * @timeout: the pre-initialized and started timer, or NULL for none
616 572 * @waiter: the pre-initialized rt_mutex_waiter
617   - * @detect_deadlock: passed to task_blocks_on_rt_mutex
618 573 *
619 574 * lock->wait_lock must be held by the caller.
620 575 */
621 576 static int __sched
622 577 __rt_mutex_slowlock(struct rt_mutex *lock, int state,
623 578 struct hrtimer_sleeper *timeout,
624   - struct rt_mutex_waiter *waiter,
625   - int detect_deadlock)
  579 + struct rt_mutex_waiter *waiter)
626 580 {
627 581 int ret = 0;
628 582  
629 583 for (;;) {
630 584 /* Try to acquire the lock: */
631   - if (try_to_take_rt_mutex(lock))
  585 + if (try_to_take_rt_mutex(lock, current, waiter))
632 586 break;
633 587  
634 588 /*
635 589  
... ... @@ -645,39 +599,11 @@
645 599 break;
646 600 }
647 601  
648   - /*
649   - * waiter->task is NULL the first time we come here and
650   - * when we have been woken up by the previous owner
651   - * but the lock got stolen by a higher prio task.
652   - */
653   - if (!waiter->task) {
654   - ret = task_blocks_on_rt_mutex(lock, waiter, current,
655   - detect_deadlock);
656   - /*
657   - * If we got woken up by the owner then start loop
658   - * all over without going into schedule to try
659   - * to get the lock now:
660   - */
661   - if (unlikely(!waiter->task)) {
662   - /*
663   - * Reset the return value. We might
664   - * have returned with -EDEADLK and the
665   - * owner released the lock while we
666   - * were walking the pi chain.
667   - */
668   - ret = 0;
669   - continue;
670   - }
671   - if (unlikely(ret))
672   - break;
673   - }
674   -
675 602 raw_spin_unlock(&lock->wait_lock);
676 603  
677 604 debug_rt_mutex_print_deadlock(waiter);
678 605  
679   - if (waiter->task)
680   - schedule_rt_mutex(lock);
  606 + schedule_rt_mutex(lock);
681 607  
682 608 raw_spin_lock(&lock->wait_lock);
683 609 set_current_state(state);
684 610  
... ... @@ -698,12 +624,11 @@
698 624 int ret = 0;
699 625  
700 626 debug_rt_mutex_init_waiter(&waiter);
701   - waiter.task = NULL;
702 627  
703 628 raw_spin_lock(&lock->wait_lock);
704 629  
705 630 /* Try to acquire the lock again: */
706   - if (try_to_take_rt_mutex(lock)) {
  631 + if (try_to_take_rt_mutex(lock, current, NULL)) {
707 632 raw_spin_unlock(&lock->wait_lock);
708 633 return 0;
709 634 }
710 635  
711 636  
... ... @@ -717,12 +642,14 @@
717 642 timeout->task = NULL;
718 643 }
719 644  
720   - ret = __rt_mutex_slowlock(lock, state, timeout, &waiter,
721   - detect_deadlock);
  645 + ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock);
722 646  
  647 + if (likely(!ret))
  648 + ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
  649 +
723 650 set_current_state(TASK_RUNNING);
724 651  
725   - if (unlikely(waiter.task))
  652 + if (unlikely(ret))
726 653 remove_waiter(lock, &waiter);
727 654  
728 655 /*
... ... @@ -737,14 +664,6 @@
737 664 if (unlikely(timeout))
738 665 hrtimer_cancel(&timeout->timer);
739 666  
740   - /*
741   - * Readjust priority, when we did not get the lock. We might
742   - * have been the pending owner and boosted. Since we did not
743   - * take the lock, the PI boost has to go.
744   - */
745   - if (unlikely(ret))
746   - rt_mutex_adjust_prio(current);
747   -
748 667 debug_rt_mutex_free_waiter(&waiter);
749 668  
750 669 return ret;
... ... @@ -762,7 +681,7 @@
762 681  
763 682 if (likely(rt_mutex_owner(lock) != current)) {
764 683  
765   - ret = try_to_take_rt_mutex(lock);
  684 + ret = try_to_take_rt_mutex(lock, current, NULL);
766 685 /*
767 686 * try_to_take_rt_mutex() sets the lock waiters
768 687 * bit unconditionally. Clean this up.
... ... @@ -992,7 +911,7 @@
992 911 {
993 912 __rt_mutex_init(lock, NULL);
994 913 debug_rt_mutex_proxy_lock(lock, proxy_owner);
995   - rt_mutex_set_owner(lock, proxy_owner, 0);
  914 + rt_mutex_set_owner(lock, proxy_owner);
996 915 rt_mutex_deadlock_account_lock(lock, proxy_owner);
997 916 }
998 917  
... ... @@ -1008,7 +927,7 @@
1008 927 struct task_struct *proxy_owner)
1009 928 {
1010 929 debug_rt_mutex_proxy_unlock(lock);
1011   - rt_mutex_set_owner(lock, NULL, 0);
  930 + rt_mutex_set_owner(lock, NULL);
1012 931 rt_mutex_deadlock_account_unlock(proxy_owner);
1013 932 }
1014 933  
1015 934  
1016 935  
... ... @@ -1034,20 +953,14 @@
1034 953  
1035 954 raw_spin_lock(&lock->wait_lock);
1036 955  
1037   - mark_rt_mutex_waiters(lock);
1038   -
1039   - if (!rt_mutex_owner(lock) || try_to_steal_lock(lock, task)) {
1040   - /* We got the lock for task. */
1041   - debug_rt_mutex_lock(lock);
1042   - rt_mutex_set_owner(lock, task, 0);
  956 + if (try_to_take_rt_mutex(lock, task, NULL)) {
1043 957 raw_spin_unlock(&lock->wait_lock);
1044   - rt_mutex_deadlock_account_lock(lock, task);
1045 958 return 1;
1046 959 }
1047 960  
1048 961 ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
1049 962  
1050   - if (ret && !waiter->task) {
  963 + if (ret && !rt_mutex_owner(lock)) {
1051 964 /*
1052 965 * Reset the return value. We might have
1053 966 * returned with -EDEADLK and the owner
... ... @@ -1056,6 +969,10 @@
1056 969 */
1057 970 ret = 0;
1058 971 }
  972 +
  973 + if (unlikely(ret))
  974 + remove_waiter(lock, waiter);
  975 +
1059 976 raw_spin_unlock(&lock->wait_lock);
1060 977  
1061 978 debug_rt_mutex_print_deadlock(waiter);
1062 979  
... ... @@ -1110,12 +1027,11 @@
1110 1027  
1111 1028 set_current_state(TASK_INTERRUPTIBLE);
1112 1029  
1113   - ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter,
1114   - detect_deadlock);
  1030 + ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
1115 1031  
1116 1032 set_current_state(TASK_RUNNING);
1117 1033  
1118   - if (unlikely(waiter->task))
  1034 + if (unlikely(ret))
1119 1035 remove_waiter(lock, waiter);
1120 1036  
1121 1037 /*
... ... @@ -1125,14 +1041,6 @@
1125 1041 fixup_rt_mutex_waiters(lock);
1126 1042  
1127 1043 raw_spin_unlock(&lock->wait_lock);
1128   -
1129   - /*
1130   - * Readjust priority, when we did not get the lock. We might have been
1131   - * the pending owner and boosted. Since we did not take the lock, the
1132   - * PI boost has to go.
1133   - */
1134   - if (unlikely(ret))
1135   - rt_mutex_adjust_prio(current);
1136 1044  
1137 1045 return ret;
1138 1046 }
kernel/rtmutex_common.h
... ... @@ -91,25 +91,13 @@
91 91 /*
92 92 * lock->owner state tracking:
93 93 */
94   -#define RT_MUTEX_OWNER_PENDING 1UL
95   -#define RT_MUTEX_HAS_WAITERS 2UL
96   -#define RT_MUTEX_OWNER_MASKALL 3UL
  94 +#define RT_MUTEX_HAS_WAITERS 1UL
  95 +#define RT_MUTEX_OWNER_MASKALL 1UL
97 96  
98 97 static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
99 98 {
100 99 return (struct task_struct *)
101 100 ((unsigned long)lock->owner & ~RT_MUTEX_OWNER_MASKALL);
102   -}
103   -
104   -static inline struct task_struct *rt_mutex_real_owner(struct rt_mutex *lock)
105   -{
106   - return (struct task_struct *)
107   - ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
108   -}
109   -
110   -static inline unsigned long rt_mutex_owner_pending(struct rt_mutex *lock)
111   -{
112   - return (unsigned long)lock->owner & RT_MUTEX_OWNER_PENDING;
113 101 }
114 102  
115 103 /*
... ... @@ -970,6 +970,25 @@
970 970 * add_timer_on(). Upon exit the timer is not queued and the handler is
971 971 * not running on any CPU.
972 972 *
  973 + * Note: You must not hold locks that are held in interrupt context
  974 + * while calling this function. Even if the lock has nothing to do
  975 + * with the timer in question. Here's why:
  976 + *
  977 + * CPU0 CPU1
  978 + * ---- ----
  979 + * <SOFTIRQ>
  980 + * call_timer_fn();
  981 + * base->running_timer = mytimer;
  982 + * spin_lock_irq(somelock);
  983 + * <IRQ>
  984 + * spin_lock(somelock);
  985 + * del_timer_sync(mytimer);
  986 + * while (base->running_timer == mytimer);
  987 + *
  988 + * Now del_timer_sync() will never return and never release somelock.
  989 + * The interrupt on the other CPU is waiting to grab somelock but
  990 + * it has interrupted the softirq that CPU0 is waiting to finish.
  991 + *
973 992 * The function returns whether it has deactivated a pending timer or not.
974 993 */
975 994 int del_timer_sync(struct timer_list *timer)
... ... @@ -977,6 +996,10 @@
977 996 #ifdef CONFIG_LOCKDEP
978 997 unsigned long flags;
979 998  
  999 + /*
  1000 + * If lockdep gives a backtrace here, please reference
  1001 + * the synchronization rules above.
  1002 + */
980 1003 local_irq_save(flags);
981 1004 lock_map_acquire(&timer->lockdep_map);
982 1005 lock_map_release(&timer->lockdep_map);
... ... @@ -222,8 +222,7 @@
222 222 /*
223 223 * wait for the read lock to be granted
224 224 */
225   -asmregparm struct rw_semaphore __sched *
226   -rwsem_down_read_failed(struct rw_semaphore *sem)
  225 +struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
227 226 {
228 227 return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_READ,
229 228 -RWSEM_ACTIVE_READ_BIAS);
... ... @@ -232,8 +231,7 @@
232 231 /*
233 232 * wait for the write lock to be granted
234 233 */
235   -asmregparm struct rw_semaphore __sched *
236   -rwsem_down_write_failed(struct rw_semaphore *sem)
  234 +struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
237 235 {
238 236 return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_WRITE,
239 237 -RWSEM_ACTIVE_WRITE_BIAS);
... ... @@ -243,7 +241,7 @@
243 241 * handle waking up a waiter on the semaphore
244 242 * - up_read/up_write has decremented the active part of count if we come here
245 243 */
246   -asmregparm struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
  244 +struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
247 245 {
248 246 unsigned long flags;
249 247  
... ... @@ -263,7 +261,7 @@
263 261 * - caller incremented waiting part of count and discovered it still negative
264 262 * - just wake up any readers at the front of the queue
265 263 */
266   -asmregparm struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
  264 +struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
267 265 {
268 266 unsigned long flags;
269 267  
scripts/checkpatch.pl
... ... @@ -2654,11 +2654,6 @@
2654 2654 WARN("Use of volatile is usually wrong: see Documentation/volatile-considered-harmful.txt\n" . $herecurr);
2655 2655 }
2656 2656  
2657   -# SPIN_LOCK_UNLOCKED & RW_LOCK_UNLOCKED are deprecated
2658   - if ($line =~ /\b(SPIN_LOCK_UNLOCKED|RW_LOCK_UNLOCKED)/) {
2659   - ERROR("Use of $1 is deprecated: see Documentation/spinlocks.txt\n" . $herecurr);
2660   - }
2661   -
2662 2657 # warn about #if 0
2663 2658 if ($line =~ /^.\s*\#\s*if\s+0\b/) {
2664 2659 CHK("if this code is redundant consider removing it\n" .
scripts/rt-tester/rt-tester.py
... ... @@ -33,8 +33,6 @@
33 33 "lockintnowait" : "6",
34 34 "lockcont" : "7",
35 35 "unlock" : "8",
36   - "lockbkl" : "9",
37   - "unlockbkl" : "10",
38 36 "signal" : "11",
39 37 "resetevent" : "98",
40 38 "reset" : "99",
scripts/rt-tester/t2-l1-2rt-sameprio.tst
... ... @@ -19,8 +19,6 @@
19 19 # lockintnowait lock nr (0-7)
20 20 # lockcont lock nr (0-7)
21 21 # unlock lock nr (0-7)
22   -# lockbkl lock nr (0-7)
23   -# unlockbkl lock nr (0-7)
24 22 # signal 0
25 23 # reset 0
26 24 # resetevent 0
... ... @@ -39,9 +37,6 @@
39 37 # blocked lock nr (0-7)
40 38 # blockedwake lock nr (0-7)
41 39 # unlocked lock nr (0-7)
42   -# lockedbkl dont care
43   -# blockedbkl dont care
44   -# unlockedbkl dont care
45 40 # opcodeeq command opcode or number
46 41 # opcodelt number
47 42 # opcodegt number
scripts/rt-tester/t2-l1-pi.tst
... ... @@ -19,8 +19,6 @@
19 19 # lockintnowait lock nr (0-7)
20 20 # lockcont lock nr (0-7)
21 21 # unlock lock nr (0-7)
22   -# lockbkl lock nr (0-7)
23   -# unlockbkl lock nr (0-7)
24 22 # signal 0
25 23 # reset 0
26 24 # resetevent 0
... ... @@ -39,9 +37,6 @@
39 37 # blocked lock nr (0-7)
40 38 # blockedwake lock nr (0-7)
41 39 # unlocked lock nr (0-7)
42   -# lockedbkl dont care
43   -# blockedbkl dont care
44   -# unlockedbkl dont care
45 40 # opcodeeq command opcode or number
46 41 # opcodelt number
47 42 # opcodegt number
scripts/rt-tester/t2-l1-signal.tst
... ... @@ -19,8 +19,6 @@
19 19 # lockintnowait lock nr (0-7)
20 20 # lockcont lock nr (0-7)
21 21 # unlock lock nr (0-7)
22   -# lockbkl lock nr (0-7)
23   -# unlockbkl lock nr (0-7)
24 22 # signal 0
25 23 # reset 0
26 24 # resetevent 0
... ... @@ -39,9 +37,6 @@
39 37 # blocked lock nr (0-7)
40 38 # blockedwake lock nr (0-7)
41 39 # unlocked lock nr (0-7)
42   -# lockedbkl dont care
43   -# blockedbkl dont care
44   -# unlockedbkl dont care
45 40 # opcodeeq command opcode or number
46 41 # opcodelt number
47 42 # opcodegt number
scripts/rt-tester/t2-l2-2rt-deadlock.tst
... ... @@ -19,8 +19,6 @@
19 19 # lockintnowait lock nr (0-7)
20 20 # lockcont lock nr (0-7)
21 21 # unlock lock nr (0-7)
22   -# lockbkl lock nr (0-7)
23   -# unlockbkl lock nr (0-7)
24 22 # signal 0
25 23 # reset 0
26 24 # resetevent 0
... ... @@ -39,9 +37,6 @@
39 37 # blocked lock nr (0-7)
40 38 # blockedwake lock nr (0-7)
41 39 # unlocked lock nr (0-7)
42   -# lockedbkl dont care
43   -# blockedbkl dont care
44   -# unlockedbkl dont care
45 40 # opcodeeq command opcode or number
46 41 # opcodelt number
47 42 # opcodegt number
scripts/rt-tester/t3-l1-pi-1rt.tst
... ... @@ -19,8 +19,6 @@
19 19 # lockintnowait lock nr (0-7)
20 20 # lockcont lock nr (0-7)
21 21 # unlock lock nr (0-7)
22   -# lockbkl lock nr (0-7)
23   -# unlockbkl lock nr (0-7)
24 22 # signal thread to signal (0-7)
25 23 # reset 0
26 24 # resetevent 0
... ... @@ -39,9 +37,6 @@
39 37 # blocked lock nr (0-7)
40 38 # blockedwake lock nr (0-7)
41 39 # unlocked lock nr (0-7)
42   -# lockedbkl dont care
43   -# blockedbkl dont care
44   -# unlockedbkl dont care
45 40 # opcodeeq command opcode or number
46 41 # opcodelt number
47 42 # opcodegt number
scripts/rt-tester/t3-l1-pi-2rt.tst
... ... @@ -19,8 +19,6 @@
19 19 # lockintnowait lock nr (0-7)
20 20 # lockcont lock nr (0-7)
21 21 # unlock lock nr (0-7)
22   -# lockbkl lock nr (0-7)
23   -# unlockbkl lock nr (0-7)
24 22 # signal thread to signal (0-7)
25 23 # reset 0
26 24 # resetevent 0
... ... @@ -39,9 +37,6 @@
39 37 # blocked lock nr (0-7)
40 38 # blockedwake lock nr (0-7)
41 39 # unlocked lock nr (0-7)
42   -# lockedbkl dont care
43   -# blockedbkl dont care
44   -# unlockedbkl dont care
45 40 # opcodeeq command opcode or number
46 41 # opcodelt number
47 42 # opcodegt number
scripts/rt-tester/t3-l1-pi-3rt.tst
... ... @@ -19,8 +19,6 @@
19 19 # lockintnowait lock nr (0-7)
20 20 # lockcont lock nr (0-7)
21 21 # unlock lock nr (0-7)
22   -# lockbkl lock nr (0-7)
23   -# unlockbkl lock nr (0-7)
24 22 # signal thread to signal (0-7)
25 23 # reset 0
26 24 # resetevent 0
... ... @@ -39,9 +37,6 @@
39 37 # blocked lock nr (0-7)
40 38 # blockedwake lock nr (0-7)
41 39 # unlocked lock nr (0-7)
42   -# lockedbkl dont care
43   -# blockedbkl dont care
44   -# unlockedbkl dont care
45 40 # opcodeeq command opcode or number
46 41 # opcodelt number
47 42 # opcodegt number
scripts/rt-tester/t3-l1-pi-signal.tst
... ... @@ -19,8 +19,6 @@
19 19 # lockintnowait lock nr (0-7)
20 20 # lockcont lock nr (0-7)
21 21 # unlock lock nr (0-7)
22   -# lockbkl lock nr (0-7)
23   -# unlockbkl lock nr (0-7)
24 22 # signal thread to signal (0-7)
25 23 # reset 0
26 24 # resetevent 0
... ... @@ -39,9 +37,6 @@
39 37 # blocked lock nr (0-7)
40 38 # blockedwake lock nr (0-7)
41 39 # unlocked lock nr (0-7)
42   -# lockedbkl dont care
43   -# blockedbkl dont care
44   -# unlockedbkl dont care
45 40 # opcodeeq command opcode or number
46 41 # opcodelt number
47 42 # opcodegt number
scripts/rt-tester/t3-l1-pi-steal.tst
... ... @@ -19,8 +19,6 @@
19 19 # lockintnowait lock nr (0-7)
20 20 # lockcont lock nr (0-7)
21 21 # unlock lock nr (0-7)
22   -# lockbkl lock nr (0-7)
23   -# unlockbkl lock nr (0-7)
24 22 # signal thread to signal (0-7)
25 23 # reset 0
26 24 # resetevent 0
... ... @@ -39,9 +37,6 @@
39 37 # blocked lock nr (0-7)
40 38 # blockedwake lock nr (0-7)
41 39 # unlocked lock nr (0-7)
42   -# lockedbkl dont care
43   -# blockedbkl dont care
44   -# unlockedbkl dont care
45 40 # opcodeeq command opcode or number
46 41 # opcodelt number
47 42 # opcodegt number
scripts/rt-tester/t3-l2-pi.tst
... ... @@ -19,8 +19,6 @@
19 19 # lockintnowait lock nr (0-7)
20 20 # lockcont lock nr (0-7)
21 21 # unlock lock nr (0-7)
22   -# lockbkl lock nr (0-7)
23   -# unlockbkl lock nr (0-7)
24 22 # signal thread to signal (0-7)
25 23 # reset 0
26 24 # resetevent 0
... ... @@ -39,9 +37,6 @@
39 37 # blocked lock nr (0-7)
40 38 # blockedwake lock nr (0-7)
41 39 # unlocked lock nr (0-7)
42   -# lockedbkl dont care
43   -# blockedbkl dont care
44   -# unlockedbkl dont care
45 40 # opcodeeq command opcode or number
46 41 # opcodelt number
47 42 # opcodegt number
scripts/rt-tester/t4-l2-pi-deboost.tst
... ... @@ -19,8 +19,6 @@
19 19 # lockintnowait lock nr (0-7)
20 20 # lockcont lock nr (0-7)
21 21 # unlock lock nr (0-7)
22   -# lockbkl lock nr (0-7)
23   -# unlockbkl lock nr (0-7)
24 22 # signal thread to signal (0-7)
25 23 # reset 0
26 24 # resetevent 0
... ... @@ -39,9 +37,6 @@
39 37 # blocked lock nr (0-7)
40 38 # blockedwake lock nr (0-7)
41 39 # unlocked lock nr (0-7)
42   -# lockedbkl dont care
43   -# blockedbkl dont care
44   -# unlockedbkl dont care
45 40 # opcodeeq command opcode or number
46 41 # opcodelt number
47 42 # opcodegt number
scripts/rt-tester/t5-l4-pi-boost-deboost-setsched.tst
... ... @@ -19,8 +19,6 @@
19 19 # lockintnowait lock nr (0-7)
20 20 # lockcont lock nr (0-7)
21 21 # unlock lock nr (0-7)
22   -# lockbkl lock nr (0-7)
23   -# unlockbkl lock nr (0-7)
24 22 # signal thread to signal (0-7)
25 23 # reset 0
26 24 # resetevent 0
... ... @@ -39,9 +37,6 @@
39 37 # blocked lock nr (0-7)
40 38 # blockedwake lock nr (0-7)
41 39 # unlocked lock nr (0-7)
42   -# lockedbkl dont care
43   -# blockedbkl dont care
44   -# unlockedbkl dont care
45 40 # opcodeeq command opcode or number
46 41 # opcodelt number
47 42 # opcodegt number
scripts/rt-tester/t5-l4-pi-boost-deboost.tst
... ... @@ -19,8 +19,6 @@
19 19 # lockintnowait lock nr (0-7)
20 20 # lockcont lock nr (0-7)
21 21 # unlock lock nr (0-7)
22   -# lockbkl lock nr (0-7)
23   -# unlockbkl lock nr (0-7)
24 22 # signal thread to signal (0-7)
25 23 # reset 0
26 24 # resetevent 0
... ... @@ -39,9 +37,6 @@
39 37 # blocked lock nr (0-7)
40 38 # blockedwake lock nr (0-7)
41 39 # unlocked lock nr (0-7)
42   -# lockedbkl dont care
43   -# blockedbkl dont care
44   -# unlockedbkl dont care
45 40 # opcodeeq command opcode or number
46 41 # opcodelt number
47 42 # opcodegt number