Commit 6514f93a2ce643ef5914eae7ce49b978e1d356aa

Authored by Thomas Gleixner
Committed by Ingo Molnar
1 parent a33fff3a03

x86: use immediates instead of RW_LOCK_BIAS_STR

Use immediate instead of the RW_LOCK_BIAS_STR.
Makes the code more readable and gets rid of the string constant.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

Showing 3 changed files with 7 additions and 8 deletions Inline Diff

include/asm-x86/rwlock.h
1 #ifndef _ASM_X86_RWLOCK_H 1 #ifndef _ASM_X86_RWLOCK_H
2 #define _ASM_X86_RWLOCK_H 2 #define _ASM_X86_RWLOCK_H
3 3
4 #define RW_LOCK_BIAS 0x01000000 4 #define RW_LOCK_BIAS 0x01000000
5 #define RW_LOCK_BIAS_STR "0x01000000"
6 5
7 /* Actual code is in asm/spinlock.h or in arch/x86/lib/rwlock.S */ 6 /* Actual code is in asm/spinlock.h or in arch/x86/lib/rwlock.S */
8 7
9 #endif /* _ASM_X86_RWLOCK_H */ 8 #endif /* _ASM_X86_RWLOCK_H */
10 9
include/asm-x86/spinlock_32.h
1 #ifndef __ASM_SPINLOCK_H 1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H 2 #define __ASM_SPINLOCK_H
3 3
4 #include <asm/atomic.h> 4 #include <asm/atomic.h>
5 #include <asm/rwlock.h> 5 #include <asm/rwlock.h>
6 #include <asm/page.h> 6 #include <asm/page.h>
7 #include <asm/processor.h> 7 #include <asm/processor.h>
8 8
9 /* 9 /*
10 * Your basic SMP spinlocks, allowing only a single CPU anywhere 10 * Your basic SMP spinlocks, allowing only a single CPU anywhere
11 * 11 *
12 * Simple spin lock operations. There are two variants, one clears IRQ's 12 * Simple spin lock operations. There are two variants, one clears IRQ's
13 * on the local processor, one does not. 13 * on the local processor, one does not.
14 * 14 *
15 * We make no fairness assumptions. They have a cost. 15 * We make no fairness assumptions. They have a cost.
16 * 16 *
17 * (the type definitions are in asm/spinlock_types.h) 17 * (the type definitions are in asm/spinlock_types.h)
18 */ 18 */
19 19
20 static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 20 static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
21 { 21 {
22 return *(volatile signed char *)(&(lock)->slock) <= 0; 22 return *(volatile signed char *)(&(lock)->slock) <= 0;
23 } 23 }
24 24
25 static inline void __raw_spin_lock(raw_spinlock_t *lock) 25 static inline void __raw_spin_lock(raw_spinlock_t *lock)
26 { 26 {
27 asm volatile( 27 asm volatile(
28 "\n1:\t" 28 "\n1:\t"
29 LOCK_PREFIX " ; decb %0\n\t" 29 LOCK_PREFIX " ; decb %0\n\t"
30 "jns 3f\n" 30 "jns 3f\n"
31 "2:\t" 31 "2:\t"
32 "rep;nop\n\t" 32 "rep;nop\n\t"
33 "cmpb $0,%0\n\t" 33 "cmpb $0,%0\n\t"
34 "jle 2b\n\t" 34 "jle 2b\n\t"
35 "jmp 1b\n" 35 "jmp 1b\n"
36 "3:\n\t" 36 "3:\n\t"
37 : "+m" (lock->slock) : : "memory"); 37 : "+m" (lock->slock) : : "memory");
38 } 38 }
39 39
40 /* 40 /*
41 * It is easier for the lock validator if interrupts are not re-enabled 41 * It is easier for the lock validator if interrupts are not re-enabled
42 * in the middle of a lock-acquire. This is a performance feature anyway 42 * in the middle of a lock-acquire. This is a performance feature anyway
43 * so we turn it off: 43 * so we turn it off:
44 * 44 *
45 * NOTE: there's an irqs-on section here, which normally would have to be 45 * NOTE: there's an irqs-on section here, which normally would have to be
46 * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant. 46 * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant.
47 */ 47 */
48 #ifndef CONFIG_PROVE_LOCKING 48 #ifndef CONFIG_PROVE_LOCKING
49 static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, 49 static inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
50 unsigned long flags) 50 unsigned long flags)
51 { 51 {
52 asm volatile( 52 asm volatile(
53 "\n1:\t" 53 "\n1:\t"
54 LOCK_PREFIX " ; decb %[slock]\n\t" 54 LOCK_PREFIX " ; decb %[slock]\n\t"
55 "jns 5f\n" 55 "jns 5f\n"
56 "2:\t" 56 "2:\t"
57 "testl $0x200, %[flags]\n\t" 57 "testl $0x200, %[flags]\n\t"
58 "jz 4f\n\t" 58 "jz 4f\n\t"
59 STI_STRING "\n" 59 STI_STRING "\n"
60 "3:\t" 60 "3:\t"
61 "rep;nop\n\t" 61 "rep;nop\n\t"
62 "cmpb $0, %[slock]\n\t" 62 "cmpb $0, %[slock]\n\t"
63 "jle 3b\n\t" 63 "jle 3b\n\t"
64 CLI_STRING "\n\t" 64 CLI_STRING "\n\t"
65 "jmp 1b\n" 65 "jmp 1b\n"
66 "4:\t" 66 "4:\t"
67 "rep;nop\n\t" 67 "rep;nop\n\t"
68 "cmpb $0, %[slock]\n\t" 68 "cmpb $0, %[slock]\n\t"
69 "jg 1b\n\t" 69 "jg 1b\n\t"
70 "jmp 4b\n" 70 "jmp 4b\n"
71 "5:\n\t" 71 "5:\n\t"
72 : [slock] "+m" (lock->slock) 72 : [slock] "+m" (lock->slock)
73 : [flags] "r" (flags) 73 : [flags] "r" (flags)
74 CLI_STI_INPUT_ARGS 74 CLI_STI_INPUT_ARGS
75 : "memory" CLI_STI_CLOBBERS); 75 : "memory" CLI_STI_CLOBBERS);
76 } 76 }
77 #endif 77 #endif
78 78
79 static inline int __raw_spin_trylock(raw_spinlock_t *lock) 79 static inline int __raw_spin_trylock(raw_spinlock_t *lock)
80 { 80 {
81 signed char oldval; 81 signed char oldval;
82 82
83 asm volatile( 83 asm volatile(
84 "xchgb %b0,%1" 84 "xchgb %b0,%1"
85 :"=q" (oldval), "+m" (lock->slock) 85 :"=q" (oldval), "+m" (lock->slock)
86 :"0" (0) : "memory"); 86 :"0" (0) : "memory");
87 87
88 return oldval > 0; 88 return oldval > 0;
89 } 89 }
90 90
91 /* 91 /*
92 * __raw_spin_unlock based on writing $1 to the low byte. 92 * __raw_spin_unlock based on writing $1 to the low byte.
93 * This method works. Despite all the confusion. 93 * This method works. Despite all the confusion.
94 * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there) 94 * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there)
95 * (PPro errata 66, 92) 95 * (PPro errata 66, 92)
96 */ 96 */
97 97
98 #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) 98 #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
99 99
100 static inline void __raw_spin_unlock(raw_spinlock_t *lock) 100 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
101 { 101 {
102 asm volatile("movb $1,%0" : "=m" (lock->slock) :: "memory"); 102 asm volatile("movb $1,%0" : "=m" (lock->slock) :: "memory");
103 } 103 }
104 104
105 #else 105 #else
106 106
107 static inline void __raw_spin_unlock(raw_spinlock_t *lock) 107 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
108 { 108 {
109 unsigned char oldval = 1; 109 unsigned char oldval = 1;
110 110
111 asm volatile("xchgb %b0, %1" 111 asm volatile("xchgb %b0, %1"
112 : "=q" (oldval), "+m" (lock->slock) 112 : "=q" (oldval), "+m" (lock->slock)
113 : "0" (oldval) : "memory"); 113 : "0" (oldval) : "memory");
114 } 114 }
115 115
116 #endif 116 #endif
117 117
118 static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 118 static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
119 { 119 {
120 while (__raw_spin_is_locked(lock)) 120 while (__raw_spin_is_locked(lock))
121 cpu_relax(); 121 cpu_relax();
122 } 122 }
123 123
124 /* 124 /*
125 * Read-write spinlocks, allowing multiple readers 125 * Read-write spinlocks, allowing multiple readers
126 * but only one writer. 126 * but only one writer.
127 * 127 *
128 * NOTE! it is quite common to have readers in interrupts 128 * NOTE! it is quite common to have readers in interrupts
129 * but no interrupt writers. For those circumstances we 129 * but no interrupt writers. For those circumstances we
130 * can "mix" irq-safe locks - any writer needs to get a 130 * can "mix" irq-safe locks - any writer needs to get a
131 * irq-safe write-lock, but readers can get non-irqsafe 131 * irq-safe write-lock, but readers can get non-irqsafe
132 * read-locks. 132 * read-locks.
133 * 133 *
134 * On x86, we implement read-write locks as a 32-bit counter 134 * On x86, we implement read-write locks as a 32-bit counter
135 * with the high bit (sign) being the "contended" bit. 135 * with the high bit (sign) being the "contended" bit.
136 */ 136 */
137 137
138 static inline int __raw_read_can_lock(raw_rwlock_t *lock) 138 static inline int __raw_read_can_lock(raw_rwlock_t *lock)
139 { 139 {
140 return (int)(lock)->lock > 0; 140 return (int)(lock)->lock > 0;
141 } 141 }
142 142
143 static inline int __raw_write_can_lock(raw_rwlock_t *lock) 143 static inline int __raw_write_can_lock(raw_rwlock_t *lock)
144 { 144 {
145 return (lock)->lock == RW_LOCK_BIAS; 145 return (lock)->lock == RW_LOCK_BIAS;
146 } 146 }
147 147
148 static inline void __raw_read_lock(raw_rwlock_t *rw) 148 static inline void __raw_read_lock(raw_rwlock_t *rw)
149 { 149 {
150 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" 150 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
151 "jns 1f\n" 151 "jns 1f\n"
152 "call __read_lock_failed\n\t" 152 "call __read_lock_failed\n\t"
153 "1:\n" 153 "1:\n"
154 ::"a" (rw) : "memory"); 154 ::"a" (rw) : "memory");
155 } 155 }
156 156
157 static inline void __raw_write_lock(raw_rwlock_t *rw) 157 static inline void __raw_write_lock(raw_rwlock_t *rw)
158 { 158 {
159 asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" 159 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
160 "jz 1f\n" 160 "jz 1f\n"
161 "call __write_lock_failed\n\t" 161 "call __write_lock_failed\n\t"
162 "1:\n" 162 "1:\n"
163 ::"a" (rw) : "memory"); 163 ::"a" (rw), "i" (RW_LOCK_BIAS) : "memory");
164 } 164 }
165 165
166 static inline int __raw_read_trylock(raw_rwlock_t *lock) 166 static inline int __raw_read_trylock(raw_rwlock_t *lock)
167 { 167 {
168 atomic_t *count = (atomic_t *)lock; 168 atomic_t *count = (atomic_t *)lock;
169 169
170 atomic_dec(count); 170 atomic_dec(count);
171 if (atomic_read(count) >= 0) 171 if (atomic_read(count) >= 0)
172 return 1; 172 return 1;
173 atomic_inc(count); 173 atomic_inc(count);
174 return 0; 174 return 0;
175 } 175 }
176 176
177 static inline int __raw_write_trylock(raw_rwlock_t *lock) 177 static inline int __raw_write_trylock(raw_rwlock_t *lock)
178 { 178 {
179 atomic_t *count = (atomic_t *)lock; 179 atomic_t *count = (atomic_t *)lock;
180 180
181 if (atomic_sub_and_test(RW_LOCK_BIAS, count)) 181 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
182 return 1; 182 return 1;
183 atomic_add(RW_LOCK_BIAS, count); 183 atomic_add(RW_LOCK_BIAS, count);
184 return 0; 184 return 0;
185 } 185 }
186 186
187 static inline void __raw_read_unlock(raw_rwlock_t *rw) 187 static inline void __raw_read_unlock(raw_rwlock_t *rw)
188 { 188 {
189 asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); 189 asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
190 } 190 }
191 191
192 static inline void __raw_write_unlock(raw_rwlock_t *rw) 192 static inline void __raw_write_unlock(raw_rwlock_t *rw)
193 { 193 {
194 asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0" 194 asm volatile(LOCK_PREFIX "addl %1, %0"
195 : "+m" (rw->lock) : : "memory"); 195 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
196 } 196 }
197 197
198 #define _raw_spin_relax(lock) cpu_relax() 198 #define _raw_spin_relax(lock) cpu_relax()
199 #define _raw_read_relax(lock) cpu_relax() 199 #define _raw_read_relax(lock) cpu_relax()
200 #define _raw_write_relax(lock) cpu_relax() 200 #define _raw_write_relax(lock) cpu_relax()
201 201
202 #endif /* __ASM_SPINLOCK_H */ 202 #endif /* __ASM_SPINLOCK_H */
203 203
include/asm-x86/spinlock_64.h
1 #ifndef __ASM_SPINLOCK_H 1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H 2 #define __ASM_SPINLOCK_H
3 3
4 #include <asm/atomic.h> 4 #include <asm/atomic.h>
5 #include <asm/rwlock.h> 5 #include <asm/rwlock.h>
6 #include <asm/page.h> 6 #include <asm/page.h>
7 #include <asm/processor.h> 7 #include <asm/processor.h>
8 8
9 /* 9 /*
10 * Your basic SMP spinlocks, allowing only a single CPU anywhere 10 * Your basic SMP spinlocks, allowing only a single CPU anywhere
11 * 11 *
12 * Simple spin lock operations. There are two variants, one clears IRQ's 12 * Simple spin lock operations. There are two variants, one clears IRQ's
13 * on the local processor, one does not. 13 * on the local processor, one does not.
14 * 14 *
15 * We make no fairness assumptions. They have a cost. 15 * We make no fairness assumptions. They have a cost.
16 * 16 *
17 * (the type definitions are in asm/spinlock_types.h) 17 * (the type definitions are in asm/spinlock_types.h)
18 */ 18 */
19 19
20 static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 20 static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
21 { 21 {
22 return *(volatile signed int *)(&(lock)->slock) <= 0; 22 return *(volatile signed int *)(&(lock)->slock) <= 0;
23 } 23 }
24 24
25 static inline void __raw_spin_lock(raw_spinlock_t *lock) 25 static inline void __raw_spin_lock(raw_spinlock_t *lock)
26 { 26 {
27 asm volatile( 27 asm volatile(
28 "\n1:\t" 28 "\n1:\t"
29 LOCK_PREFIX " ; decl %0\n\t" 29 LOCK_PREFIX " ; decl %0\n\t"
30 "jns 2f\n" 30 "jns 2f\n"
31 "3:\n" 31 "3:\n"
32 "rep;nop\n\t" 32 "rep;nop\n\t"
33 "cmpl $0,%0\n\t" 33 "cmpl $0,%0\n\t"
34 "jle 3b\n\t" 34 "jle 3b\n\t"
35 "jmp 1b\n" 35 "jmp 1b\n"
36 "2:\t" 36 "2:\t"
37 : "+m" (lock->slock) : : "memory"); 37 : "+m" (lock->slock) : : "memory");
38 } 38 }
39 39
40 /* 40 /*
41 * It is easier for the lock validator if interrupts are not re-enabled 41 * It is easier for the lock validator if interrupts are not re-enabled
42 * in the middle of a lock-acquire. This is a performance feature anyway 42 * in the middle of a lock-acquire. This is a performance feature anyway
43 * so we turn it off: 43 * so we turn it off:
44 * 44 *
45 * NOTE: there's an irqs-on section here, which normally would have to be 45 * NOTE: there's an irqs-on section here, which normally would have to be
46 * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant. 46 * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant.
47 */ 47 */
48 #ifndef CONFIG_PROVE_LOCKING 48 #ifndef CONFIG_PROVE_LOCKING
49 static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, 49 static inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
50 unsigned long flags) 50 unsigned long flags)
51 { 51 {
52 asm volatile( 52 asm volatile(
53 "\n1:\t" 53 "\n1:\t"
54 LOCK_PREFIX " ; decl %0\n\t" 54 LOCK_PREFIX " ; decl %0\n\t"
55 "jns 5f\n" 55 "jns 5f\n"
56 "testl $0x200, %1\n\t" /* interrupts were disabled? */ 56 "testl $0x200, %1\n\t" /* interrupts were disabled? */
57 "jz 4f\n\t" 57 "jz 4f\n\t"
58 STI_STRING "\n" 58 STI_STRING "\n"
59 "3:\t" 59 "3:\t"
60 "rep;nop\n\t" 60 "rep;nop\n\t"
61 "cmpl $0, %0\n\t" 61 "cmpl $0, %0\n\t"
62 "jle 3b\n\t" 62 "jle 3b\n\t"
63 CLI_STRING "\n\t" 63 CLI_STRING "\n\t"
64 "jmp 1b\n" 64 "jmp 1b\n"
65 "4:\t" 65 "4:\t"
66 "rep;nop\n\t" 66 "rep;nop\n\t"
67 "cmpl $0, %0\n\t" 67 "cmpl $0, %0\n\t"
68 "jg 1b\n\t" 68 "jg 1b\n\t"
69 "jmp 4b\n" 69 "jmp 4b\n"
70 "5:\n\t" 70 "5:\n\t"
71 : "+m" (lock->slock) 71 : "+m" (lock->slock)
72 : "r" ((unsigned)flags) CLI_STI_INPUT_ARGS 72 : "r" ((unsigned)flags) CLI_STI_INPUT_ARGS
73 : "memory" CLI_STI_CLOBBERS); 73 : "memory" CLI_STI_CLOBBERS);
74 } 74 }
75 #endif 75 #endif
76 76
77 static inline int __raw_spin_trylock(raw_spinlock_t *lock) 77 static inline int __raw_spin_trylock(raw_spinlock_t *lock)
78 { 78 {
79 int oldval; 79 int oldval;
80 80
81 asm volatile( 81 asm volatile(
82 "xchgl %0,%1" 82 "xchgl %0,%1"
83 :"=q" (oldval), "+m" (lock->slock) 83 :"=q" (oldval), "+m" (lock->slock)
84 :"0" (0) : "memory"); 84 :"0" (0) : "memory");
85 85
86 return oldval > 0; 86 return oldval > 0;
87 } 87 }
88 88
89 static inline void __raw_spin_unlock(raw_spinlock_t *lock) 89 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
90 { 90 {
91 asm volatile("movl $1,%0" : "=m" (lock->slock) :: "memory"); 91 asm volatile("movl $1,%0" : "=m" (lock->slock) :: "memory");
92 } 92 }
93 93
94 static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 94 static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
95 { 95 {
96 while (__raw_spin_is_locked(lock)) 96 while (__raw_spin_is_locked(lock))
97 cpu_relax(); 97 cpu_relax();
98 } 98 }
99 99
100 /* 100 /*
101 * Read-write spinlocks, allowing multiple readers 101 * Read-write spinlocks, allowing multiple readers
102 * but only one writer. 102 * but only one writer.
103 * 103 *
104 * NOTE! it is quite common to have readers in interrupts 104 * NOTE! it is quite common to have readers in interrupts
105 * but no interrupt writers. For those circumstances we 105 * but no interrupt writers. For those circumstances we
106 * can "mix" irq-safe locks - any writer needs to get a 106 * can "mix" irq-safe locks - any writer needs to get a
107 * irq-safe write-lock, but readers can get non-irqsafe 107 * irq-safe write-lock, but readers can get non-irqsafe
108 * read-locks. 108 * read-locks.
109 * 109 *
110 * On x86, we implement read-write locks as a 32-bit counter 110 * On x86, we implement read-write locks as a 32-bit counter
111 * with the high bit (sign) being the "contended" bit. 111 * with the high bit (sign) being the "contended" bit.
112 */ 112 */
113 113
114 static inline int __raw_read_can_lock(raw_rwlock_t *lock) 114 static inline int __raw_read_can_lock(raw_rwlock_t *lock)
115 { 115 {
116 return (int)(lock)->lock > 0; 116 return (int)(lock)->lock > 0;
117 } 117 }
118 118
119 static inline int __raw_write_can_lock(raw_rwlock_t *lock) 119 static inline int __raw_write_can_lock(raw_rwlock_t *lock)
120 { 120 {
121 return (lock)->lock == RW_LOCK_BIAS; 121 return (lock)->lock == RW_LOCK_BIAS;
122 } 122 }
123 123
124 static inline void __raw_read_lock(raw_rwlock_t *rw) 124 static inline void __raw_read_lock(raw_rwlock_t *rw)
125 { 125 {
126 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" 126 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
127 "jns 1f\n" 127 "jns 1f\n"
128 "call __read_lock_failed\n\t" 128 "call __read_lock_failed\n\t"
129 "1:\n" 129 "1:\n"
130 ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory"); 130 ::"D" (rw) : "memory");
131 } 131 }
132 132
133 static inline void __raw_write_lock(raw_rwlock_t *rw) 133 static inline void __raw_write_lock(raw_rwlock_t *rw)
134 { 134 {
135 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" 135 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
136 "jz 1f\n" 136 "jz 1f\n"
137 "call __write_lock_failed\n\t" 137 "call __write_lock_failed\n\t"
138 "1:\n" 138 "1:\n"
139 ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory"); 139 ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory");
140 } 140 }
141 141
142 static inline int __raw_read_trylock(raw_rwlock_t *lock) 142 static inline int __raw_read_trylock(raw_rwlock_t *lock)
143 { 143 {
144 atomic_t *count = (atomic_t *)lock; 144 atomic_t *count = (atomic_t *)lock;
145 145
146 atomic_dec(count); 146 atomic_dec(count);
147 if (atomic_read(count) >= 0) 147 if (atomic_read(count) >= 0)
148 return 1; 148 return 1;
149 atomic_inc(count); 149 atomic_inc(count);
150 return 0; 150 return 0;
151 } 151 }
152 152
153 static inline int __raw_write_trylock(raw_rwlock_t *lock) 153 static inline int __raw_write_trylock(raw_rwlock_t *lock)
154 { 154 {
155 atomic_t *count = (atomic_t *)lock; 155 atomic_t *count = (atomic_t *)lock;
156 156
157 if (atomic_sub_and_test(RW_LOCK_BIAS, count)) 157 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
158 return 1; 158 return 1;
159 atomic_add(RW_LOCK_BIAS, count); 159 atomic_add(RW_LOCK_BIAS, count);
160 return 0; 160 return 0;
161 } 161 }
162 162
163 static inline void __raw_read_unlock(raw_rwlock_t *rw) 163 static inline void __raw_read_unlock(raw_rwlock_t *rw)
164 { 164 {
165 asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); 165 asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
166 } 166 }
167 167
168 static inline void __raw_write_unlock(raw_rwlock_t *rw) 168 static inline void __raw_write_unlock(raw_rwlock_t *rw)
169 { 169 {
170 asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0" 170 asm volatile(LOCK_PREFIX "addl %1, %0"
171 : "+m" (rw->lock) : : "memory"); 171 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
172 } 172 }
173 173
174 #define _raw_spin_relax(lock) cpu_relax() 174 #define _raw_spin_relax(lock) cpu_relax()
175 #define _raw_read_relax(lock) cpu_relax() 175 #define _raw_read_relax(lock) cpu_relax()
176 #define _raw_write_relax(lock) cpu_relax() 176 #define _raw_write_relax(lock) cpu_relax()
177 177
178 #endif /* __ASM_SPINLOCK_H */ 178 #endif /* __ASM_SPINLOCK_H */
179 179