Commit 9f0cbea0d8cc47801b853d3c61d0e17475b0cc89
Committed by
Paul Mackerras
1 parent
c6d4267ece
Exists in
master
and in
7 other branches
[POWERPC] Implement atomic{, 64}_{read, write}() without volatile
Instead, use asm() like all other atomic operations already do. Also use inline functions instead of macros; this actually improves code generation (some code becomes a little smaller, probably because of improved alias information -- just a few hundred bytes total on a default kernel build, nothing shocking). Signed-off-by: Segher Boessenkool <segher@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Showing 1 changed file with 28 additions and 6 deletions Inline Diff
include/asm-powerpc/atomic.h
1 | #ifndef _ASM_POWERPC_ATOMIC_H_ | 1 | #ifndef _ASM_POWERPC_ATOMIC_H_ |
2 | #define _ASM_POWERPC_ATOMIC_H_ | 2 | #define _ASM_POWERPC_ATOMIC_H_ |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * PowerPC atomic operations | 5 | * PowerPC atomic operations |
6 | */ | 6 | */ |
7 | 7 | ||
8 | typedef struct { volatile int counter; } atomic_t; | 8 | typedef struct { int counter; } atomic_t; |
9 | 9 | ||
10 | #ifdef __KERNEL__ | 10 | #ifdef __KERNEL__ |
11 | #include <linux/compiler.h> | 11 | #include <linux/compiler.h> |
12 | #include <asm/synch.h> | 12 | #include <asm/synch.h> |
13 | #include <asm/asm-compat.h> | 13 | #include <asm/asm-compat.h> |
14 | #include <asm/system.h> | 14 | #include <asm/system.h> |
15 | 15 | ||
16 | #define ATOMIC_INIT(i) { (i) } | 16 | #define ATOMIC_INIT(i) { (i) } |
17 | 17 | ||
18 | #define atomic_read(v) ((v)->counter) | 18 | static __inline__ int atomic_read(const atomic_t *v) |
19 | #define atomic_set(v,i) (((v)->counter) = (i)) | 19 | { |
20 | int t; | ||
20 | 21 | ||
22 | __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter)); | ||
23 | |||
24 | return t; | ||
25 | } | ||
26 | |||
27 | static __inline__ void atomic_set(atomic_t *v, int i) | ||
28 | { | ||
29 | __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); | ||
30 | } | ||
31 | |||
21 | static __inline__ void atomic_add(int a, atomic_t *v) | 32 | static __inline__ void atomic_add(int a, atomic_t *v) |
22 | { | 33 | { |
23 | int t; | 34 | int t; |
24 | 35 | ||
25 | __asm__ __volatile__( | 36 | __asm__ __volatile__( |
26 | "1: lwarx %0,0,%3 # atomic_add\n\ | 37 | "1: lwarx %0,0,%3 # atomic_add\n\ |
27 | add %0,%2,%0\n" | 38 | add %0,%2,%0\n" |
28 | PPC405_ERR77(0,%3) | 39 | PPC405_ERR77(0,%3) |
29 | " stwcx. %0,0,%3 \n\ | 40 | " stwcx. %0,0,%3 \n\ |
30 | bne- 1b" | 41 | bne- 1b" |
31 | : "=&r" (t), "+m" (v->counter) | 42 | : "=&r" (t), "+m" (v->counter) |
32 | : "r" (a), "r" (&v->counter) | 43 | : "r" (a), "r" (&v->counter) |
33 | : "cc"); | 44 | : "cc"); |
34 | } | 45 | } |
35 | 46 | ||
36 | static __inline__ int atomic_add_return(int a, atomic_t *v) | 47 | static __inline__ int atomic_add_return(int a, atomic_t *v) |
37 | { | 48 | { |
38 | int t; | 49 | int t; |
39 | 50 | ||
40 | __asm__ __volatile__( | 51 | __asm__ __volatile__( |
41 | LWSYNC_ON_SMP | 52 | LWSYNC_ON_SMP |
42 | "1: lwarx %0,0,%2 # atomic_add_return\n\ | 53 | "1: lwarx %0,0,%2 # atomic_add_return\n\ |
43 | add %0,%1,%0\n" | 54 | add %0,%1,%0\n" |
44 | PPC405_ERR77(0,%2) | 55 | PPC405_ERR77(0,%2) |
45 | " stwcx. %0,0,%2 \n\ | 56 | " stwcx. %0,0,%2 \n\ |
46 | bne- 1b" | 57 | bne- 1b" |
47 | ISYNC_ON_SMP | 58 | ISYNC_ON_SMP |
48 | : "=&r" (t) | 59 | : "=&r" (t) |
49 | : "r" (a), "r" (&v->counter) | 60 | : "r" (a), "r" (&v->counter) |
50 | : "cc", "memory"); | 61 | : "cc", "memory"); |
51 | 62 | ||
52 | return t; | 63 | return t; |
53 | } | 64 | } |
54 | 65 | ||
55 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) | 66 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) |
56 | 67 | ||
57 | static __inline__ void atomic_sub(int a, atomic_t *v) | 68 | static __inline__ void atomic_sub(int a, atomic_t *v) |
58 | { | 69 | { |
59 | int t; | 70 | int t; |
60 | 71 | ||
61 | __asm__ __volatile__( | 72 | __asm__ __volatile__( |
62 | "1: lwarx %0,0,%3 # atomic_sub\n\ | 73 | "1: lwarx %0,0,%3 # atomic_sub\n\ |
63 | subf %0,%2,%0\n" | 74 | subf %0,%2,%0\n" |
64 | PPC405_ERR77(0,%3) | 75 | PPC405_ERR77(0,%3) |
65 | " stwcx. %0,0,%3 \n\ | 76 | " stwcx. %0,0,%3 \n\ |
66 | bne- 1b" | 77 | bne- 1b" |
67 | : "=&r" (t), "+m" (v->counter) | 78 | : "=&r" (t), "+m" (v->counter) |
68 | : "r" (a), "r" (&v->counter) | 79 | : "r" (a), "r" (&v->counter) |
69 | : "cc"); | 80 | : "cc"); |
70 | } | 81 | } |
71 | 82 | ||
72 | static __inline__ int atomic_sub_return(int a, atomic_t *v) | 83 | static __inline__ int atomic_sub_return(int a, atomic_t *v) |
73 | { | 84 | { |
74 | int t; | 85 | int t; |
75 | 86 | ||
76 | __asm__ __volatile__( | 87 | __asm__ __volatile__( |
77 | LWSYNC_ON_SMP | 88 | LWSYNC_ON_SMP |
78 | "1: lwarx %0,0,%2 # atomic_sub_return\n\ | 89 | "1: lwarx %0,0,%2 # atomic_sub_return\n\ |
79 | subf %0,%1,%0\n" | 90 | subf %0,%1,%0\n" |
80 | PPC405_ERR77(0,%2) | 91 | PPC405_ERR77(0,%2) |
81 | " stwcx. %0,0,%2 \n\ | 92 | " stwcx. %0,0,%2 \n\ |
82 | bne- 1b" | 93 | bne- 1b" |
83 | ISYNC_ON_SMP | 94 | ISYNC_ON_SMP |
84 | : "=&r" (t) | 95 | : "=&r" (t) |
85 | : "r" (a), "r" (&v->counter) | 96 | : "r" (a), "r" (&v->counter) |
86 | : "cc", "memory"); | 97 | : "cc", "memory"); |
87 | 98 | ||
88 | return t; | 99 | return t; |
89 | } | 100 | } |
90 | 101 | ||
91 | static __inline__ void atomic_inc(atomic_t *v) | 102 | static __inline__ void atomic_inc(atomic_t *v) |
92 | { | 103 | { |
93 | int t; | 104 | int t; |
94 | 105 | ||
95 | __asm__ __volatile__( | 106 | __asm__ __volatile__( |
96 | "1: lwarx %0,0,%2 # atomic_inc\n\ | 107 | "1: lwarx %0,0,%2 # atomic_inc\n\ |
97 | addic %0,%0,1\n" | 108 | addic %0,%0,1\n" |
98 | PPC405_ERR77(0,%2) | 109 | PPC405_ERR77(0,%2) |
99 | " stwcx. %0,0,%2 \n\ | 110 | " stwcx. %0,0,%2 \n\ |
100 | bne- 1b" | 111 | bne- 1b" |
101 | : "=&r" (t), "+m" (v->counter) | 112 | : "=&r" (t), "+m" (v->counter) |
102 | : "r" (&v->counter) | 113 | : "r" (&v->counter) |
103 | : "cc"); | 114 | : "cc"); |
104 | } | 115 | } |
105 | 116 | ||
106 | static __inline__ int atomic_inc_return(atomic_t *v) | 117 | static __inline__ int atomic_inc_return(atomic_t *v) |
107 | { | 118 | { |
108 | int t; | 119 | int t; |
109 | 120 | ||
110 | __asm__ __volatile__( | 121 | __asm__ __volatile__( |
111 | LWSYNC_ON_SMP | 122 | LWSYNC_ON_SMP |
112 | "1: lwarx %0,0,%1 # atomic_inc_return\n\ | 123 | "1: lwarx %0,0,%1 # atomic_inc_return\n\ |
113 | addic %0,%0,1\n" | 124 | addic %0,%0,1\n" |
114 | PPC405_ERR77(0,%1) | 125 | PPC405_ERR77(0,%1) |
115 | " stwcx. %0,0,%1 \n\ | 126 | " stwcx. %0,0,%1 \n\ |
116 | bne- 1b" | 127 | bne- 1b" |
117 | ISYNC_ON_SMP | 128 | ISYNC_ON_SMP |
118 | : "=&r" (t) | 129 | : "=&r" (t) |
119 | : "r" (&v->counter) | 130 | : "r" (&v->counter) |
120 | : "cc", "memory"); | 131 | : "cc", "memory"); |
121 | 132 | ||
122 | return t; | 133 | return t; |
123 | } | 134 | } |
124 | 135 | ||
125 | /* | 136 | /* |
126 | * atomic_inc_and_test - increment and test | 137 | * atomic_inc_and_test - increment and test |
127 | * @v: pointer of type atomic_t | 138 | * @v: pointer of type atomic_t |
128 | * | 139 | * |
129 | * Atomically increments @v by 1 | 140 | * Atomically increments @v by 1 |
130 | * and returns true if the result is zero, or false for all | 141 | * and returns true if the result is zero, or false for all |
131 | * other cases. | 142 | * other cases. |
132 | */ | 143 | */ |
133 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) | 144 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) |
134 | 145 | ||
135 | static __inline__ void atomic_dec(atomic_t *v) | 146 | static __inline__ void atomic_dec(atomic_t *v) |
136 | { | 147 | { |
137 | int t; | 148 | int t; |
138 | 149 | ||
139 | __asm__ __volatile__( | 150 | __asm__ __volatile__( |
140 | "1: lwarx %0,0,%2 # atomic_dec\n\ | 151 | "1: lwarx %0,0,%2 # atomic_dec\n\ |
141 | addic %0,%0,-1\n" | 152 | addic %0,%0,-1\n" |
142 | PPC405_ERR77(0,%2)\ | 153 | PPC405_ERR77(0,%2)\ |
143 | " stwcx. %0,0,%2\n\ | 154 | " stwcx. %0,0,%2\n\ |
144 | bne- 1b" | 155 | bne- 1b" |
145 | : "=&r" (t), "+m" (v->counter) | 156 | : "=&r" (t), "+m" (v->counter) |
146 | : "r" (&v->counter) | 157 | : "r" (&v->counter) |
147 | : "cc"); | 158 | : "cc"); |
148 | } | 159 | } |
149 | 160 | ||
150 | static __inline__ int atomic_dec_return(atomic_t *v) | 161 | static __inline__ int atomic_dec_return(atomic_t *v) |
151 | { | 162 | { |
152 | int t; | 163 | int t; |
153 | 164 | ||
154 | __asm__ __volatile__( | 165 | __asm__ __volatile__( |
155 | LWSYNC_ON_SMP | 166 | LWSYNC_ON_SMP |
156 | "1: lwarx %0,0,%1 # atomic_dec_return\n\ | 167 | "1: lwarx %0,0,%1 # atomic_dec_return\n\ |
157 | addic %0,%0,-1\n" | 168 | addic %0,%0,-1\n" |
158 | PPC405_ERR77(0,%1) | 169 | PPC405_ERR77(0,%1) |
159 | " stwcx. %0,0,%1\n\ | 170 | " stwcx. %0,0,%1\n\ |
160 | bne- 1b" | 171 | bne- 1b" |
161 | ISYNC_ON_SMP | 172 | ISYNC_ON_SMP |
162 | : "=&r" (t) | 173 | : "=&r" (t) |
163 | : "r" (&v->counter) | 174 | : "r" (&v->counter) |
164 | : "cc", "memory"); | 175 | : "cc", "memory"); |
165 | 176 | ||
166 | return t; | 177 | return t; |
167 | } | 178 | } |
168 | 179 | ||
169 | #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) | 180 | #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) |
170 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | 181 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
171 | 182 | ||
172 | /** | 183 | /** |
173 | * atomic_add_unless - add unless the number is a given value | 184 | * atomic_add_unless - add unless the number is a given value |
174 | * @v: pointer of type atomic_t | 185 | * @v: pointer of type atomic_t |
175 | * @a: the amount to add to v... | 186 | * @a: the amount to add to v... |
176 | * @u: ...unless v is equal to u. | 187 | * @u: ...unless v is equal to u. |
177 | * | 188 | * |
178 | * Atomically adds @a to @v, so long as it was not @u. | 189 | * Atomically adds @a to @v, so long as it was not @u. |
179 | * Returns non-zero if @v was not @u, and zero otherwise. | 190 | * Returns non-zero if @v was not @u, and zero otherwise. |
180 | */ | 191 | */ |
181 | static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | 192 | static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) |
182 | { | 193 | { |
183 | int t; | 194 | int t; |
184 | 195 | ||
185 | __asm__ __volatile__ ( | 196 | __asm__ __volatile__ ( |
186 | LWSYNC_ON_SMP | 197 | LWSYNC_ON_SMP |
187 | "1: lwarx %0,0,%1 # atomic_add_unless\n\ | 198 | "1: lwarx %0,0,%1 # atomic_add_unless\n\ |
188 | cmpw 0,%0,%3 \n\ | 199 | cmpw 0,%0,%3 \n\ |
189 | beq- 2f \n\ | 200 | beq- 2f \n\ |
190 | add %0,%2,%0 \n" | 201 | add %0,%2,%0 \n" |
191 | PPC405_ERR77(0,%2) | 202 | PPC405_ERR77(0,%2) |
192 | " stwcx. %0,0,%1 \n\ | 203 | " stwcx. %0,0,%1 \n\ |
193 | bne- 1b \n" | 204 | bne- 1b \n" |
194 | ISYNC_ON_SMP | 205 | ISYNC_ON_SMP |
195 | " subf %0,%2,%0 \n\ | 206 | " subf %0,%2,%0 \n\ |
196 | 2:" | 207 | 2:" |
197 | : "=&r" (t) | 208 | : "=&r" (t) |
198 | : "r" (&v->counter), "r" (a), "r" (u) | 209 | : "r" (&v->counter), "r" (a), "r" (u) |
199 | : "cc", "memory"); | 210 | : "cc", "memory"); |
200 | 211 | ||
201 | return t != u; | 212 | return t != u; |
202 | } | 213 | } |
203 | 214 | ||
204 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | 215 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
205 | 216 | ||
206 | #define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0) | 217 | #define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0) |
207 | #define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0) | 218 | #define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0) |
208 | 219 | ||
209 | /* | 220 | /* |
210 | * Atomically test *v and decrement if it is greater than 0. | 221 | * Atomically test *v and decrement if it is greater than 0. |
211 | * The function returns the old value of *v minus 1, even if | 222 | * The function returns the old value of *v minus 1, even if |
212 | * the atomic variable, v, was not decremented. | 223 | * the atomic variable, v, was not decremented. |
213 | */ | 224 | */ |
214 | static __inline__ int atomic_dec_if_positive(atomic_t *v) | 225 | static __inline__ int atomic_dec_if_positive(atomic_t *v) |
215 | { | 226 | { |
216 | int t; | 227 | int t; |
217 | 228 | ||
218 | __asm__ __volatile__( | 229 | __asm__ __volatile__( |
219 | LWSYNC_ON_SMP | 230 | LWSYNC_ON_SMP |
220 | "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ | 231 | "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ |
221 | cmpwi %0,1\n\ | 232 | cmpwi %0,1\n\ |
222 | addi %0,%0,-1\n\ | 233 | addi %0,%0,-1\n\ |
223 | blt- 2f\n" | 234 | blt- 2f\n" |
224 | PPC405_ERR77(0,%1) | 235 | PPC405_ERR77(0,%1) |
225 | " stwcx. %0,0,%1\n\ | 236 | " stwcx. %0,0,%1\n\ |
226 | bne- 1b" | 237 | bne- 1b" |
227 | ISYNC_ON_SMP | 238 | ISYNC_ON_SMP |
228 | "\n\ | 239 | "\n\ |
229 | 2:" : "=&b" (t) | 240 | 2:" : "=&b" (t) |
230 | : "r" (&v->counter) | 241 | : "r" (&v->counter) |
231 | : "cc", "memory"); | 242 | : "cc", "memory"); |
232 | 243 | ||
233 | return t; | 244 | return t; |
234 | } | 245 | } |
235 | 246 | ||
236 | #define smp_mb__before_atomic_dec() smp_mb() | 247 | #define smp_mb__before_atomic_dec() smp_mb() |
237 | #define smp_mb__after_atomic_dec() smp_mb() | 248 | #define smp_mb__after_atomic_dec() smp_mb() |
238 | #define smp_mb__before_atomic_inc() smp_mb() | 249 | #define smp_mb__before_atomic_inc() smp_mb() |
239 | #define smp_mb__after_atomic_inc() smp_mb() | 250 | #define smp_mb__after_atomic_inc() smp_mb() |
240 | 251 | ||
241 | #ifdef __powerpc64__ | 252 | #ifdef __powerpc64__ |
242 | 253 | ||
243 | typedef struct { volatile long counter; } atomic64_t; | 254 | typedef struct { long counter; } atomic64_t; |
244 | 255 | ||
245 | #define ATOMIC64_INIT(i) { (i) } | 256 | #define ATOMIC64_INIT(i) { (i) } |
246 | 257 | ||
247 | #define atomic64_read(v) ((v)->counter) | 258 | static __inline__ long atomic64_read(const atomic64_t *v) |
248 | #define atomic64_set(v,i) (((v)->counter) = (i)) | 259 | { |
260 | long t; | ||
261 | |||
262 | __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter)); | ||
263 | |||
264 | return t; | ||
265 | } | ||
266 | |||
267 | static __inline__ void atomic64_set(atomic64_t *v, long i) | ||
268 | { | ||
269 | __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); | ||
270 | } | ||
249 | 271 | ||
250 | static __inline__ void atomic64_add(long a, atomic64_t *v) | 272 | static __inline__ void atomic64_add(long a, atomic64_t *v) |
251 | { | 273 | { |
252 | long t; | 274 | long t; |
253 | 275 | ||
254 | __asm__ __volatile__( | 276 | __asm__ __volatile__( |
255 | "1: ldarx %0,0,%3 # atomic64_add\n\ | 277 | "1: ldarx %0,0,%3 # atomic64_add\n\ |
256 | add %0,%2,%0\n\ | 278 | add %0,%2,%0\n\ |
257 | stdcx. %0,0,%3 \n\ | 279 | stdcx. %0,0,%3 \n\ |
258 | bne- 1b" | 280 | bne- 1b" |
259 | : "=&r" (t), "+m" (v->counter) | 281 | : "=&r" (t), "+m" (v->counter) |
260 | : "r" (a), "r" (&v->counter) | 282 | : "r" (a), "r" (&v->counter) |
261 | : "cc"); | 283 | : "cc"); |
262 | } | 284 | } |
263 | 285 | ||
264 | static __inline__ long atomic64_add_return(long a, atomic64_t *v) | 286 | static __inline__ long atomic64_add_return(long a, atomic64_t *v) |
265 | { | 287 | { |
266 | long t; | 288 | long t; |
267 | 289 | ||
268 | __asm__ __volatile__( | 290 | __asm__ __volatile__( |
269 | LWSYNC_ON_SMP | 291 | LWSYNC_ON_SMP |
270 | "1: ldarx %0,0,%2 # atomic64_add_return\n\ | 292 | "1: ldarx %0,0,%2 # atomic64_add_return\n\ |
271 | add %0,%1,%0\n\ | 293 | add %0,%1,%0\n\ |
272 | stdcx. %0,0,%2 \n\ | 294 | stdcx. %0,0,%2 \n\ |
273 | bne- 1b" | 295 | bne- 1b" |
274 | ISYNC_ON_SMP | 296 | ISYNC_ON_SMP |
275 | : "=&r" (t) | 297 | : "=&r" (t) |
276 | : "r" (a), "r" (&v->counter) | 298 | : "r" (a), "r" (&v->counter) |
277 | : "cc", "memory"); | 299 | : "cc", "memory"); |
278 | 300 | ||
279 | return t; | 301 | return t; |
280 | } | 302 | } |
281 | 303 | ||
282 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) | 304 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) |
283 | 305 | ||
284 | static __inline__ void atomic64_sub(long a, atomic64_t *v) | 306 | static __inline__ void atomic64_sub(long a, atomic64_t *v) |
285 | { | 307 | { |
286 | long t; | 308 | long t; |
287 | 309 | ||
288 | __asm__ __volatile__( | 310 | __asm__ __volatile__( |
289 | "1: ldarx %0,0,%3 # atomic64_sub\n\ | 311 | "1: ldarx %0,0,%3 # atomic64_sub\n\ |
290 | subf %0,%2,%0\n\ | 312 | subf %0,%2,%0\n\ |
291 | stdcx. %0,0,%3 \n\ | 313 | stdcx. %0,0,%3 \n\ |
292 | bne- 1b" | 314 | bne- 1b" |
293 | : "=&r" (t), "+m" (v->counter) | 315 | : "=&r" (t), "+m" (v->counter) |
294 | : "r" (a), "r" (&v->counter) | 316 | : "r" (a), "r" (&v->counter) |
295 | : "cc"); | 317 | : "cc"); |
296 | } | 318 | } |
297 | 319 | ||
298 | static __inline__ long atomic64_sub_return(long a, atomic64_t *v) | 320 | static __inline__ long atomic64_sub_return(long a, atomic64_t *v) |
299 | { | 321 | { |
300 | long t; | 322 | long t; |
301 | 323 | ||
302 | __asm__ __volatile__( | 324 | __asm__ __volatile__( |
303 | LWSYNC_ON_SMP | 325 | LWSYNC_ON_SMP |
304 | "1: ldarx %0,0,%2 # atomic64_sub_return\n\ | 326 | "1: ldarx %0,0,%2 # atomic64_sub_return\n\ |
305 | subf %0,%1,%0\n\ | 327 | subf %0,%1,%0\n\ |
306 | stdcx. %0,0,%2 \n\ | 328 | stdcx. %0,0,%2 \n\ |
307 | bne- 1b" | 329 | bne- 1b" |
308 | ISYNC_ON_SMP | 330 | ISYNC_ON_SMP |
309 | : "=&r" (t) | 331 | : "=&r" (t) |
310 | : "r" (a), "r" (&v->counter) | 332 | : "r" (a), "r" (&v->counter) |
311 | : "cc", "memory"); | 333 | : "cc", "memory"); |
312 | 334 | ||
313 | return t; | 335 | return t; |
314 | } | 336 | } |
315 | 337 | ||
316 | static __inline__ void atomic64_inc(atomic64_t *v) | 338 | static __inline__ void atomic64_inc(atomic64_t *v) |
317 | { | 339 | { |
318 | long t; | 340 | long t; |
319 | 341 | ||
320 | __asm__ __volatile__( | 342 | __asm__ __volatile__( |
321 | "1: ldarx %0,0,%2 # atomic64_inc\n\ | 343 | "1: ldarx %0,0,%2 # atomic64_inc\n\ |
322 | addic %0,%0,1\n\ | 344 | addic %0,%0,1\n\ |
323 | stdcx. %0,0,%2 \n\ | 345 | stdcx. %0,0,%2 \n\ |
324 | bne- 1b" | 346 | bne- 1b" |
325 | : "=&r" (t), "+m" (v->counter) | 347 | : "=&r" (t), "+m" (v->counter) |
326 | : "r" (&v->counter) | 348 | : "r" (&v->counter) |
327 | : "cc"); | 349 | : "cc"); |
328 | } | 350 | } |
329 | 351 | ||
330 | static __inline__ long atomic64_inc_return(atomic64_t *v) | 352 | static __inline__ long atomic64_inc_return(atomic64_t *v) |
331 | { | 353 | { |
332 | long t; | 354 | long t; |
333 | 355 | ||
334 | __asm__ __volatile__( | 356 | __asm__ __volatile__( |
335 | LWSYNC_ON_SMP | 357 | LWSYNC_ON_SMP |
336 | "1: ldarx %0,0,%1 # atomic64_inc_return\n\ | 358 | "1: ldarx %0,0,%1 # atomic64_inc_return\n\ |
337 | addic %0,%0,1\n\ | 359 | addic %0,%0,1\n\ |
338 | stdcx. %0,0,%1 \n\ | 360 | stdcx. %0,0,%1 \n\ |
339 | bne- 1b" | 361 | bne- 1b" |
340 | ISYNC_ON_SMP | 362 | ISYNC_ON_SMP |
341 | : "=&r" (t) | 363 | : "=&r" (t) |
342 | : "r" (&v->counter) | 364 | : "r" (&v->counter) |
343 | : "cc", "memory"); | 365 | : "cc", "memory"); |
344 | 366 | ||
345 | return t; | 367 | return t; |
346 | } | 368 | } |
347 | 369 | ||
348 | /* | 370 | /* |
349 | * atomic64_inc_and_test - increment and test | 371 | * atomic64_inc_and_test - increment and test |
350 | * @v: pointer of type atomic64_t | 372 | * @v: pointer of type atomic64_t |
351 | * | 373 | * |
352 | * Atomically increments @v by 1 | 374 | * Atomically increments @v by 1 |
353 | * and returns true if the result is zero, or false for all | 375 | * and returns true if the result is zero, or false for all |
354 | * other cases. | 376 | * other cases. |
355 | */ | 377 | */ |
356 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) | 378 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) |
357 | 379 | ||
358 | static __inline__ void atomic64_dec(atomic64_t *v) | 380 | static __inline__ void atomic64_dec(atomic64_t *v) |
359 | { | 381 | { |
360 | long t; | 382 | long t; |
361 | 383 | ||
362 | __asm__ __volatile__( | 384 | __asm__ __volatile__( |
363 | "1: ldarx %0,0,%2 # atomic64_dec\n\ | 385 | "1: ldarx %0,0,%2 # atomic64_dec\n\ |
364 | addic %0,%0,-1\n\ | 386 | addic %0,%0,-1\n\ |
365 | stdcx. %0,0,%2\n\ | 387 | stdcx. %0,0,%2\n\ |
366 | bne- 1b" | 388 | bne- 1b" |
367 | : "=&r" (t), "+m" (v->counter) | 389 | : "=&r" (t), "+m" (v->counter) |
368 | : "r" (&v->counter) | 390 | : "r" (&v->counter) |
369 | : "cc"); | 391 | : "cc"); |
370 | } | 392 | } |
371 | 393 | ||
372 | static __inline__ long atomic64_dec_return(atomic64_t *v) | 394 | static __inline__ long atomic64_dec_return(atomic64_t *v) |
373 | { | 395 | { |
374 | long t; | 396 | long t; |
375 | 397 | ||
376 | __asm__ __volatile__( | 398 | __asm__ __volatile__( |
377 | LWSYNC_ON_SMP | 399 | LWSYNC_ON_SMP |
378 | "1: ldarx %0,0,%1 # atomic64_dec_return\n\ | 400 | "1: ldarx %0,0,%1 # atomic64_dec_return\n\ |
379 | addic %0,%0,-1\n\ | 401 | addic %0,%0,-1\n\ |
380 | stdcx. %0,0,%1\n\ | 402 | stdcx. %0,0,%1\n\ |
381 | bne- 1b" | 403 | bne- 1b" |
382 | ISYNC_ON_SMP | 404 | ISYNC_ON_SMP |
383 | : "=&r" (t) | 405 | : "=&r" (t) |
384 | : "r" (&v->counter) | 406 | : "r" (&v->counter) |
385 | : "cc", "memory"); | 407 | : "cc", "memory"); |
386 | 408 | ||
387 | return t; | 409 | return t; |
388 | } | 410 | } |
389 | 411 | ||
390 | #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) | 412 | #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) |
391 | #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) | 413 | #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) |
392 | 414 | ||
393 | /* | 415 | /* |
394 | * Atomically test *v and decrement if it is greater than 0. | 416 | * Atomically test *v and decrement if it is greater than 0. |
395 | * The function returns the old value of *v minus 1. | 417 | * The function returns the old value of *v minus 1. |
396 | */ | 418 | */ |
397 | static __inline__ long atomic64_dec_if_positive(atomic64_t *v) | 419 | static __inline__ long atomic64_dec_if_positive(atomic64_t *v) |
398 | { | 420 | { |
399 | long t; | 421 | long t; |
400 | 422 | ||
401 | __asm__ __volatile__( | 423 | __asm__ __volatile__( |
402 | LWSYNC_ON_SMP | 424 | LWSYNC_ON_SMP |
403 | "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\ | 425 | "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\ |
404 | addic. %0,%0,-1\n\ | 426 | addic. %0,%0,-1\n\ |
405 | blt- 2f\n\ | 427 | blt- 2f\n\ |
406 | stdcx. %0,0,%1\n\ | 428 | stdcx. %0,0,%1\n\ |
407 | bne- 1b" | 429 | bne- 1b" |
408 | ISYNC_ON_SMP | 430 | ISYNC_ON_SMP |
409 | "\n\ | 431 | "\n\ |
410 | 2:" : "=&r" (t) | 432 | 2:" : "=&r" (t) |
411 | : "r" (&v->counter) | 433 | : "r" (&v->counter) |
412 | : "cc", "memory"); | 434 | : "cc", "memory"); |
413 | 435 | ||
414 | return t; | 436 | return t; |
415 | } | 437 | } |
416 | 438 | ||
417 | #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) | 439 | #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) |
418 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) | 440 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) |
419 | 441 | ||
420 | /** | 442 | /** |
421 | * atomic64_add_unless - add unless the number is a given value | 443 | * atomic64_add_unless - add unless the number is a given value |
422 | * @v: pointer of type atomic64_t | 444 | * @v: pointer of type atomic64_t |
423 | * @a: the amount to add to v... | 445 | * @a: the amount to add to v... |
424 | * @u: ...unless v is equal to u. | 446 | * @u: ...unless v is equal to u. |
425 | * | 447 | * |
426 | * Atomically adds @a to @v, so long as it was not @u. | 448 | * Atomically adds @a to @v, so long as it was not @u. |
427 | * Returns non-zero if @v was not @u, and zero otherwise. | 449 | * Returns non-zero if @v was not @u, and zero otherwise. |
428 | */ | 450 | */ |
429 | static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) | 451 | static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) |
430 | { | 452 | { |
431 | long t; | 453 | long t; |
432 | 454 | ||
433 | __asm__ __volatile__ ( | 455 | __asm__ __volatile__ ( |
434 | LWSYNC_ON_SMP | 456 | LWSYNC_ON_SMP |
435 | "1: ldarx %0,0,%1 # atomic_add_unless\n\ | 457 | "1: ldarx %0,0,%1 # atomic_add_unless\n\ |
436 | cmpd 0,%0,%3 \n\ | 458 | cmpd 0,%0,%3 \n\ |
437 | beq- 2f \n\ | 459 | beq- 2f \n\ |
438 | add %0,%2,%0 \n" | 460 | add %0,%2,%0 \n" |
439 | " stdcx. %0,0,%1 \n\ | 461 | " stdcx. %0,0,%1 \n\ |
440 | bne- 1b \n" | 462 | bne- 1b \n" |
441 | ISYNC_ON_SMP | 463 | ISYNC_ON_SMP |
442 | " subf %0,%2,%0 \n\ | 464 | " subf %0,%2,%0 \n\ |
443 | 2:" | 465 | 2:" |
444 | : "=&r" (t) | 466 | : "=&r" (t) |
445 | : "r" (&v->counter), "r" (a), "r" (u) | 467 | : "r" (&v->counter), "r" (a), "r" (u) |
446 | : "cc", "memory"); | 468 | : "cc", "memory"); |
447 | 469 | ||
448 | return t != u; | 470 | return t != u; |
449 | } | 471 | } |
450 | 472 | ||
451 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | 473 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) |
452 | 474 | ||
453 | #endif /* __powerpc64__ */ | 475 | #endif /* __powerpc64__ */ |
454 | 476 | ||
455 | #include <asm-generic/atomic.h> | 477 | #include <asm-generic/atomic.h> |
456 | #endif /* __KERNEL__ */ | 478 | #endif /* __KERNEL__ */ |
457 | #endif /* _ASM_POWERPC_ATOMIC_H_ */ | 479 | #endif /* _ASM_POWERPC_ATOMIC_H_ */ |
458 | 480 |