Commit 0624517d809b1cf53c977335c9bda4c216cbddee
Committed by
Linus Torvalds
1 parent
1977f03272
Exists in
master
and in
7 other branches
forbid asm/bitops.h direct inclusion
forbid asm/bitops.h direct inclusion Because of compile errors that may occur after bit changes if asm/bitops.h is included directly without e.g. linux/kernel.h which includes linux/bitops.h, forbid direct inclusion of asm/bitops.h. Thanks to Adrian Bunk. Signed-off-by: Jiri Slaby <jirislaby@gmail.com> Cc: Adrian Bunk <bunk@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 25 changed files with 102 additions and 0 deletions Inline Diff
- include/asm-alpha/bitops.h
- include/asm-arm/bitops.h
- include/asm-avr32/bitops.h
- include/asm-blackfin/bitops.h
- include/asm-cris/bitops.h
- include/asm-frv/bitops.h
- include/asm-generic/bitops.h
- include/asm-h8300/bitops.h
- include/asm-ia64/bitops.h
- include/asm-m32r/bitops.h
- include/asm-m68k/bitops.h
- include/asm-m68knommu/bitops.h
- include/asm-mips/bitops.h
- include/asm-parisc/bitops.h
- include/asm-powerpc/bitops.h
- include/asm-s390/bitops.h
- include/asm-sh/bitops.h
- include/asm-sh64/bitops.h
- include/asm-sparc/bitops.h
- include/asm-sparc64/bitops.h
- include/asm-um/bitops.h
- include/asm-v850/bitops.h
- include/asm-x86/bitops_32.h
- include/asm-x86/bitops_64.h
- include/asm-xtensa/bitops.h
include/asm-alpha/bitops.h
1 | #ifndef _ALPHA_BITOPS_H | 1 | #ifndef _ALPHA_BITOPS_H |
2 | #define _ALPHA_BITOPS_H | 2 | #define _ALPHA_BITOPS_H |
3 | 3 | ||
4 | #ifndef _LINUX_BITOPS_H | ||
5 | #error only <linux/bitops.h> can be included directly | ||
6 | #endif | ||
7 | |||
4 | #include <asm/compiler.h> | 8 | #include <asm/compiler.h> |
5 | #include <asm/barrier.h> | 9 | #include <asm/barrier.h> |
6 | 10 | ||
7 | /* | 11 | /* |
8 | * Copyright 1994, Linus Torvalds. | 12 | * Copyright 1994, Linus Torvalds. |
9 | */ | 13 | */ |
10 | 14 | ||
11 | /* | 15 | /* |
12 | * These have to be done with inline assembly: that way the bit-setting | 16 | * These have to be done with inline assembly: that way the bit-setting |
13 | * is guaranteed to be atomic. All bit operations return 0 if the bit | 17 | * is guaranteed to be atomic. All bit operations return 0 if the bit |
14 | * was cleared before the operation and != 0 if it was not. | 18 | * was cleared before the operation and != 0 if it was not. |
15 | * | 19 | * |
16 | * To get proper branch prediction for the main line, we must branch | 20 | * To get proper branch prediction for the main line, we must branch |
17 | * forward to code at the end of this object's .text section, then | 21 | * forward to code at the end of this object's .text section, then |
18 | * branch back to restart the operation. | 22 | * branch back to restart the operation. |
19 | * | 23 | * |
20 | * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1). | 24 | * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1). |
21 | */ | 25 | */ |
22 | 26 | ||
23 | static inline void | 27 | static inline void |
24 | set_bit(unsigned long nr, volatile void * addr) | 28 | set_bit(unsigned long nr, volatile void * addr) |
25 | { | 29 | { |
26 | unsigned long temp; | 30 | unsigned long temp; |
27 | int *m = ((int *) addr) + (nr >> 5); | 31 | int *m = ((int *) addr) + (nr >> 5); |
28 | 32 | ||
29 | __asm__ __volatile__( | 33 | __asm__ __volatile__( |
30 | "1: ldl_l %0,%3\n" | 34 | "1: ldl_l %0,%3\n" |
31 | " bis %0,%2,%0\n" | 35 | " bis %0,%2,%0\n" |
32 | " stl_c %0,%1\n" | 36 | " stl_c %0,%1\n" |
33 | " beq %0,2f\n" | 37 | " beq %0,2f\n" |
34 | ".subsection 2\n" | 38 | ".subsection 2\n" |
35 | "2: br 1b\n" | 39 | "2: br 1b\n" |
36 | ".previous" | 40 | ".previous" |
37 | :"=&r" (temp), "=m" (*m) | 41 | :"=&r" (temp), "=m" (*m) |
38 | :"Ir" (1UL << (nr & 31)), "m" (*m)); | 42 | :"Ir" (1UL << (nr & 31)), "m" (*m)); |
39 | } | 43 | } |
40 | 44 | ||
41 | /* | 45 | /* |
42 | * WARNING: non atomic version. | 46 | * WARNING: non atomic version. |
43 | */ | 47 | */ |
44 | static inline void | 48 | static inline void |
45 | __set_bit(unsigned long nr, volatile void * addr) | 49 | __set_bit(unsigned long nr, volatile void * addr) |
46 | { | 50 | { |
47 | int *m = ((int *) addr) + (nr >> 5); | 51 | int *m = ((int *) addr) + (nr >> 5); |
48 | 52 | ||
49 | *m |= 1 << (nr & 31); | 53 | *m |= 1 << (nr & 31); |
50 | } | 54 | } |
51 | 55 | ||
52 | #define smp_mb__before_clear_bit() smp_mb() | 56 | #define smp_mb__before_clear_bit() smp_mb() |
53 | #define smp_mb__after_clear_bit() smp_mb() | 57 | #define smp_mb__after_clear_bit() smp_mb() |
54 | 58 | ||
55 | static inline void | 59 | static inline void |
56 | clear_bit(unsigned long nr, volatile void * addr) | 60 | clear_bit(unsigned long nr, volatile void * addr) |
57 | { | 61 | { |
58 | unsigned long temp; | 62 | unsigned long temp; |
59 | int *m = ((int *) addr) + (nr >> 5); | 63 | int *m = ((int *) addr) + (nr >> 5); |
60 | 64 | ||
61 | __asm__ __volatile__( | 65 | __asm__ __volatile__( |
62 | "1: ldl_l %0,%3\n" | 66 | "1: ldl_l %0,%3\n" |
63 | " bic %0,%2,%0\n" | 67 | " bic %0,%2,%0\n" |
64 | " stl_c %0,%1\n" | 68 | " stl_c %0,%1\n" |
65 | " beq %0,2f\n" | 69 | " beq %0,2f\n" |
66 | ".subsection 2\n" | 70 | ".subsection 2\n" |
67 | "2: br 1b\n" | 71 | "2: br 1b\n" |
68 | ".previous" | 72 | ".previous" |
69 | :"=&r" (temp), "=m" (*m) | 73 | :"=&r" (temp), "=m" (*m) |
70 | :"Ir" (1UL << (nr & 31)), "m" (*m)); | 74 | :"Ir" (1UL << (nr & 31)), "m" (*m)); |
71 | } | 75 | } |
72 | 76 | ||
73 | static inline void | 77 | static inline void |
74 | clear_bit_unlock(unsigned long nr, volatile void * addr) | 78 | clear_bit_unlock(unsigned long nr, volatile void * addr) |
75 | { | 79 | { |
76 | smp_mb(); | 80 | smp_mb(); |
77 | clear_bit(nr, addr); | 81 | clear_bit(nr, addr); |
78 | } | 82 | } |
79 | 83 | ||
80 | /* | 84 | /* |
81 | * WARNING: non atomic version. | 85 | * WARNING: non atomic version. |
82 | */ | 86 | */ |
83 | static __inline__ void | 87 | static __inline__ void |
84 | __clear_bit(unsigned long nr, volatile void * addr) | 88 | __clear_bit(unsigned long nr, volatile void * addr) |
85 | { | 89 | { |
86 | int *m = ((int *) addr) + (nr >> 5); | 90 | int *m = ((int *) addr) + (nr >> 5); |
87 | 91 | ||
88 | *m &= ~(1 << (nr & 31)); | 92 | *m &= ~(1 << (nr & 31)); |
89 | } | 93 | } |
90 | 94 | ||
91 | static inline void | 95 | static inline void |
92 | __clear_bit_unlock(unsigned long nr, volatile void * addr) | 96 | __clear_bit_unlock(unsigned long nr, volatile void * addr) |
93 | { | 97 | { |
94 | smp_mb(); | 98 | smp_mb(); |
95 | __clear_bit(nr, addr); | 99 | __clear_bit(nr, addr); |
96 | } | 100 | } |
97 | 101 | ||
98 | static inline void | 102 | static inline void |
99 | change_bit(unsigned long nr, volatile void * addr) | 103 | change_bit(unsigned long nr, volatile void * addr) |
100 | { | 104 | { |
101 | unsigned long temp; | 105 | unsigned long temp; |
102 | int *m = ((int *) addr) + (nr >> 5); | 106 | int *m = ((int *) addr) + (nr >> 5); |
103 | 107 | ||
104 | __asm__ __volatile__( | 108 | __asm__ __volatile__( |
105 | "1: ldl_l %0,%3\n" | 109 | "1: ldl_l %0,%3\n" |
106 | " xor %0,%2,%0\n" | 110 | " xor %0,%2,%0\n" |
107 | " stl_c %0,%1\n" | 111 | " stl_c %0,%1\n" |
108 | " beq %0,2f\n" | 112 | " beq %0,2f\n" |
109 | ".subsection 2\n" | 113 | ".subsection 2\n" |
110 | "2: br 1b\n" | 114 | "2: br 1b\n" |
111 | ".previous" | 115 | ".previous" |
112 | :"=&r" (temp), "=m" (*m) | 116 | :"=&r" (temp), "=m" (*m) |
113 | :"Ir" (1UL << (nr & 31)), "m" (*m)); | 117 | :"Ir" (1UL << (nr & 31)), "m" (*m)); |
114 | } | 118 | } |
115 | 119 | ||
116 | /* | 120 | /* |
117 | * WARNING: non atomic version. | 121 | * WARNING: non atomic version. |
118 | */ | 122 | */ |
119 | static __inline__ void | 123 | static __inline__ void |
120 | __change_bit(unsigned long nr, volatile void * addr) | 124 | __change_bit(unsigned long nr, volatile void * addr) |
121 | { | 125 | { |
122 | int *m = ((int *) addr) + (nr >> 5); | 126 | int *m = ((int *) addr) + (nr >> 5); |
123 | 127 | ||
124 | *m ^= 1 << (nr & 31); | 128 | *m ^= 1 << (nr & 31); |
125 | } | 129 | } |
126 | 130 | ||
127 | static inline int | 131 | static inline int |
128 | test_and_set_bit(unsigned long nr, volatile void *addr) | 132 | test_and_set_bit(unsigned long nr, volatile void *addr) |
129 | { | 133 | { |
130 | unsigned long oldbit; | 134 | unsigned long oldbit; |
131 | unsigned long temp; | 135 | unsigned long temp; |
132 | int *m = ((int *) addr) + (nr >> 5); | 136 | int *m = ((int *) addr) + (nr >> 5); |
133 | 137 | ||
134 | __asm__ __volatile__( | 138 | __asm__ __volatile__( |
135 | #ifdef CONFIG_SMP | 139 | #ifdef CONFIG_SMP |
136 | " mb\n" | 140 | " mb\n" |
137 | #endif | 141 | #endif |
138 | "1: ldl_l %0,%4\n" | 142 | "1: ldl_l %0,%4\n" |
139 | " and %0,%3,%2\n" | 143 | " and %0,%3,%2\n" |
140 | " bne %2,2f\n" | 144 | " bne %2,2f\n" |
141 | " xor %0,%3,%0\n" | 145 | " xor %0,%3,%0\n" |
142 | " stl_c %0,%1\n" | 146 | " stl_c %0,%1\n" |
143 | " beq %0,3f\n" | 147 | " beq %0,3f\n" |
144 | "2:\n" | 148 | "2:\n" |
145 | #ifdef CONFIG_SMP | 149 | #ifdef CONFIG_SMP |
146 | " mb\n" | 150 | " mb\n" |
147 | #endif | 151 | #endif |
148 | ".subsection 2\n" | 152 | ".subsection 2\n" |
149 | "3: br 1b\n" | 153 | "3: br 1b\n" |
150 | ".previous" | 154 | ".previous" |
151 | :"=&r" (temp), "=m" (*m), "=&r" (oldbit) | 155 | :"=&r" (temp), "=m" (*m), "=&r" (oldbit) |
152 | :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); | 156 | :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); |
153 | 157 | ||
154 | return oldbit != 0; | 158 | return oldbit != 0; |
155 | } | 159 | } |
156 | 160 | ||
157 | static inline int | 161 | static inline int |
158 | test_and_set_bit_lock(unsigned long nr, volatile void *addr) | 162 | test_and_set_bit_lock(unsigned long nr, volatile void *addr) |
159 | { | 163 | { |
160 | unsigned long oldbit; | 164 | unsigned long oldbit; |
161 | unsigned long temp; | 165 | unsigned long temp; |
162 | int *m = ((int *) addr) + (nr >> 5); | 166 | int *m = ((int *) addr) + (nr >> 5); |
163 | 167 | ||
164 | __asm__ __volatile__( | 168 | __asm__ __volatile__( |
165 | "1: ldl_l %0,%4\n" | 169 | "1: ldl_l %0,%4\n" |
166 | " and %0,%3,%2\n" | 170 | " and %0,%3,%2\n" |
167 | " bne %2,2f\n" | 171 | " bne %2,2f\n" |
168 | " xor %0,%3,%0\n" | 172 | " xor %0,%3,%0\n" |
169 | " stl_c %0,%1\n" | 173 | " stl_c %0,%1\n" |
170 | " beq %0,3f\n" | 174 | " beq %0,3f\n" |
171 | "2:\n" | 175 | "2:\n" |
172 | #ifdef CONFIG_SMP | 176 | #ifdef CONFIG_SMP |
173 | " mb\n" | 177 | " mb\n" |
174 | #endif | 178 | #endif |
175 | ".subsection 2\n" | 179 | ".subsection 2\n" |
176 | "3: br 1b\n" | 180 | "3: br 1b\n" |
177 | ".previous" | 181 | ".previous" |
178 | :"=&r" (temp), "=m" (*m), "=&r" (oldbit) | 182 | :"=&r" (temp), "=m" (*m), "=&r" (oldbit) |
179 | :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); | 183 | :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); |
180 | 184 | ||
181 | return oldbit != 0; | 185 | return oldbit != 0; |
182 | } | 186 | } |
183 | 187 | ||
184 | /* | 188 | /* |
185 | * WARNING: non atomic version. | 189 | * WARNING: non atomic version. |
186 | */ | 190 | */ |
187 | static inline int | 191 | static inline int |
188 | __test_and_set_bit(unsigned long nr, volatile void * addr) | 192 | __test_and_set_bit(unsigned long nr, volatile void * addr) |
189 | { | 193 | { |
190 | unsigned long mask = 1 << (nr & 0x1f); | 194 | unsigned long mask = 1 << (nr & 0x1f); |
191 | int *m = ((int *) addr) + (nr >> 5); | 195 | int *m = ((int *) addr) + (nr >> 5); |
192 | int old = *m; | 196 | int old = *m; |
193 | 197 | ||
194 | *m = old | mask; | 198 | *m = old | mask; |
195 | return (old & mask) != 0; | 199 | return (old & mask) != 0; |
196 | } | 200 | } |
197 | 201 | ||
198 | static inline int | 202 | static inline int |
199 | test_and_clear_bit(unsigned long nr, volatile void * addr) | 203 | test_and_clear_bit(unsigned long nr, volatile void * addr) |
200 | { | 204 | { |
201 | unsigned long oldbit; | 205 | unsigned long oldbit; |
202 | unsigned long temp; | 206 | unsigned long temp; |
203 | int *m = ((int *) addr) + (nr >> 5); | 207 | int *m = ((int *) addr) + (nr >> 5); |
204 | 208 | ||
205 | __asm__ __volatile__( | 209 | __asm__ __volatile__( |
206 | #ifdef CONFIG_SMP | 210 | #ifdef CONFIG_SMP |
207 | " mb\n" | 211 | " mb\n" |
208 | #endif | 212 | #endif |
209 | "1: ldl_l %0,%4\n" | 213 | "1: ldl_l %0,%4\n" |
210 | " and %0,%3,%2\n" | 214 | " and %0,%3,%2\n" |
211 | " beq %2,2f\n" | 215 | " beq %2,2f\n" |
212 | " xor %0,%3,%0\n" | 216 | " xor %0,%3,%0\n" |
213 | " stl_c %0,%1\n" | 217 | " stl_c %0,%1\n" |
214 | " beq %0,3f\n" | 218 | " beq %0,3f\n" |
215 | "2:\n" | 219 | "2:\n" |
216 | #ifdef CONFIG_SMP | 220 | #ifdef CONFIG_SMP |
217 | " mb\n" | 221 | " mb\n" |
218 | #endif | 222 | #endif |
219 | ".subsection 2\n" | 223 | ".subsection 2\n" |
220 | "3: br 1b\n" | 224 | "3: br 1b\n" |
221 | ".previous" | 225 | ".previous" |
222 | :"=&r" (temp), "=m" (*m), "=&r" (oldbit) | 226 | :"=&r" (temp), "=m" (*m), "=&r" (oldbit) |
223 | :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); | 227 | :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); |
224 | 228 | ||
225 | return oldbit != 0; | 229 | return oldbit != 0; |
226 | } | 230 | } |
227 | 231 | ||
228 | /* | 232 | /* |
229 | * WARNING: non atomic version. | 233 | * WARNING: non atomic version. |
230 | */ | 234 | */ |
231 | static inline int | 235 | static inline int |
232 | __test_and_clear_bit(unsigned long nr, volatile void * addr) | 236 | __test_and_clear_bit(unsigned long nr, volatile void * addr) |
233 | { | 237 | { |
234 | unsigned long mask = 1 << (nr & 0x1f); | 238 | unsigned long mask = 1 << (nr & 0x1f); |
235 | int *m = ((int *) addr) + (nr >> 5); | 239 | int *m = ((int *) addr) + (nr >> 5); |
236 | int old = *m; | 240 | int old = *m; |
237 | 241 | ||
238 | *m = old & ~mask; | 242 | *m = old & ~mask; |
239 | return (old & mask) != 0; | 243 | return (old & mask) != 0; |
240 | } | 244 | } |
241 | 245 | ||
242 | static inline int | 246 | static inline int |
243 | test_and_change_bit(unsigned long nr, volatile void * addr) | 247 | test_and_change_bit(unsigned long nr, volatile void * addr) |
244 | { | 248 | { |
245 | unsigned long oldbit; | 249 | unsigned long oldbit; |
246 | unsigned long temp; | 250 | unsigned long temp; |
247 | int *m = ((int *) addr) + (nr >> 5); | 251 | int *m = ((int *) addr) + (nr >> 5); |
248 | 252 | ||
249 | __asm__ __volatile__( | 253 | __asm__ __volatile__( |
250 | #ifdef CONFIG_SMP | 254 | #ifdef CONFIG_SMP |
251 | " mb\n" | 255 | " mb\n" |
252 | #endif | 256 | #endif |
253 | "1: ldl_l %0,%4\n" | 257 | "1: ldl_l %0,%4\n" |
254 | " and %0,%3,%2\n" | 258 | " and %0,%3,%2\n" |
255 | " xor %0,%3,%0\n" | 259 | " xor %0,%3,%0\n" |
256 | " stl_c %0,%1\n" | 260 | " stl_c %0,%1\n" |
257 | " beq %0,3f\n" | 261 | " beq %0,3f\n" |
258 | #ifdef CONFIG_SMP | 262 | #ifdef CONFIG_SMP |
259 | " mb\n" | 263 | " mb\n" |
260 | #endif | 264 | #endif |
261 | ".subsection 2\n" | 265 | ".subsection 2\n" |
262 | "3: br 1b\n" | 266 | "3: br 1b\n" |
263 | ".previous" | 267 | ".previous" |
264 | :"=&r" (temp), "=m" (*m), "=&r" (oldbit) | 268 | :"=&r" (temp), "=m" (*m), "=&r" (oldbit) |
265 | :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); | 269 | :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); |
266 | 270 | ||
267 | return oldbit != 0; | 271 | return oldbit != 0; |
268 | } | 272 | } |
269 | 273 | ||
270 | /* | 274 | /* |
271 | * WARNING: non atomic version. | 275 | * WARNING: non atomic version. |
272 | */ | 276 | */ |
273 | static __inline__ int | 277 | static __inline__ int |
274 | __test_and_change_bit(unsigned long nr, volatile void * addr) | 278 | __test_and_change_bit(unsigned long nr, volatile void * addr) |
275 | { | 279 | { |
276 | unsigned long mask = 1 << (nr & 0x1f); | 280 | unsigned long mask = 1 << (nr & 0x1f); |
277 | int *m = ((int *) addr) + (nr >> 5); | 281 | int *m = ((int *) addr) + (nr >> 5); |
278 | int old = *m; | 282 | int old = *m; |
279 | 283 | ||
280 | *m = old ^ mask; | 284 | *m = old ^ mask; |
281 | return (old & mask) != 0; | 285 | return (old & mask) != 0; |
282 | } | 286 | } |
283 | 287 | ||
284 | static inline int | 288 | static inline int |
285 | test_bit(int nr, const volatile void * addr) | 289 | test_bit(int nr, const volatile void * addr) |
286 | { | 290 | { |
287 | return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL; | 291 | return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL; |
288 | } | 292 | } |
289 | 293 | ||
290 | /* | 294 | /* |
291 | * ffz = Find First Zero in word. Undefined if no zero exists, | 295 | * ffz = Find First Zero in word. Undefined if no zero exists, |
292 | * so code should check against ~0UL first.. | 296 | * so code should check against ~0UL first.. |
293 | * | 297 | * |
294 | * Do a binary search on the bits. Due to the nature of large | 298 | * Do a binary search on the bits. Due to the nature of large |
295 | * constants on the alpha, it is worthwhile to split the search. | 299 | * constants on the alpha, it is worthwhile to split the search. |
296 | */ | 300 | */ |
297 | static inline unsigned long ffz_b(unsigned long x) | 301 | static inline unsigned long ffz_b(unsigned long x) |
298 | { | 302 | { |
299 | unsigned long sum, x1, x2, x4; | 303 | unsigned long sum, x1, x2, x4; |
300 | 304 | ||
301 | x = ~x & -~x; /* set first 0 bit, clear others */ | 305 | x = ~x & -~x; /* set first 0 bit, clear others */ |
302 | x1 = x & 0xAA; | 306 | x1 = x & 0xAA; |
303 | x2 = x & 0xCC; | 307 | x2 = x & 0xCC; |
304 | x4 = x & 0xF0; | 308 | x4 = x & 0xF0; |
305 | sum = x2 ? 2 : 0; | 309 | sum = x2 ? 2 : 0; |
306 | sum += (x4 != 0) * 4; | 310 | sum += (x4 != 0) * 4; |
307 | sum += (x1 != 0); | 311 | sum += (x1 != 0); |
308 | 312 | ||
309 | return sum; | 313 | return sum; |
310 | } | 314 | } |
311 | 315 | ||
312 | static inline unsigned long ffz(unsigned long word) | 316 | static inline unsigned long ffz(unsigned long word) |
313 | { | 317 | { |
314 | #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) | 318 | #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) |
315 | /* Whee. EV67 can calculate it directly. */ | 319 | /* Whee. EV67 can calculate it directly. */ |
316 | return __kernel_cttz(~word); | 320 | return __kernel_cttz(~word); |
317 | #else | 321 | #else |
318 | unsigned long bits, qofs, bofs; | 322 | unsigned long bits, qofs, bofs; |
319 | 323 | ||
320 | bits = __kernel_cmpbge(word, ~0UL); | 324 | bits = __kernel_cmpbge(word, ~0UL); |
321 | qofs = ffz_b(bits); | 325 | qofs = ffz_b(bits); |
322 | bits = __kernel_extbl(word, qofs); | 326 | bits = __kernel_extbl(word, qofs); |
323 | bofs = ffz_b(bits); | 327 | bofs = ffz_b(bits); |
324 | 328 | ||
325 | return qofs*8 + bofs; | 329 | return qofs*8 + bofs; |
326 | #endif | 330 | #endif |
327 | } | 331 | } |
328 | 332 | ||
329 | /* | 333 | /* |
330 | * __ffs = Find First set bit in word. Undefined if no set bit exists. | 334 | * __ffs = Find First set bit in word. Undefined if no set bit exists. |
331 | */ | 335 | */ |
332 | static inline unsigned long __ffs(unsigned long word) | 336 | static inline unsigned long __ffs(unsigned long word) |
333 | { | 337 | { |
334 | #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) | 338 | #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) |
335 | /* Whee. EV67 can calculate it directly. */ | 339 | /* Whee. EV67 can calculate it directly. */ |
336 | return __kernel_cttz(word); | 340 | return __kernel_cttz(word); |
337 | #else | 341 | #else |
338 | unsigned long bits, qofs, bofs; | 342 | unsigned long bits, qofs, bofs; |
339 | 343 | ||
340 | bits = __kernel_cmpbge(0, word); | 344 | bits = __kernel_cmpbge(0, word); |
341 | qofs = ffz_b(bits); | 345 | qofs = ffz_b(bits); |
342 | bits = __kernel_extbl(word, qofs); | 346 | bits = __kernel_extbl(word, qofs); |
343 | bofs = ffz_b(~bits); | 347 | bofs = ffz_b(~bits); |
344 | 348 | ||
345 | return qofs*8 + bofs; | 349 | return qofs*8 + bofs; |
346 | #endif | 350 | #endif |
347 | } | 351 | } |
348 | 352 | ||
349 | #ifdef __KERNEL__ | 353 | #ifdef __KERNEL__ |
350 | 354 | ||
351 | /* | 355 | /* |
352 | * ffs: find first bit set. This is defined the same way as | 356 | * ffs: find first bit set. This is defined the same way as |
353 | * the libc and compiler builtin ffs routines, therefore | 357 | * the libc and compiler builtin ffs routines, therefore |
354 | * differs in spirit from the above __ffs. | 358 | * differs in spirit from the above __ffs. |
355 | */ | 359 | */ |
356 | 360 | ||
357 | static inline int ffs(int word) | 361 | static inline int ffs(int word) |
358 | { | 362 | { |
359 | int result = __ffs(word) + 1; | 363 | int result = __ffs(word) + 1; |
360 | return word ? result : 0; | 364 | return word ? result : 0; |
361 | } | 365 | } |
362 | 366 | ||
363 | /* | 367 | /* |
364 | * fls: find last bit set. | 368 | * fls: find last bit set. |
365 | */ | 369 | */ |
366 | #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) | 370 | #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) |
367 | static inline int fls64(unsigned long word) | 371 | static inline int fls64(unsigned long word) |
368 | { | 372 | { |
369 | return 64 - __kernel_ctlz(word); | 373 | return 64 - __kernel_ctlz(word); |
370 | } | 374 | } |
371 | #else | 375 | #else |
372 | extern const unsigned char __flsm1_tab[256]; | 376 | extern const unsigned char __flsm1_tab[256]; |
373 | 377 | ||
374 | static inline int fls64(unsigned long x) | 378 | static inline int fls64(unsigned long x) |
375 | { | 379 | { |
376 | unsigned long t, a, r; | 380 | unsigned long t, a, r; |
377 | 381 | ||
378 | t = __kernel_cmpbge (x, 0x0101010101010101UL); | 382 | t = __kernel_cmpbge (x, 0x0101010101010101UL); |
379 | a = __flsm1_tab[t]; | 383 | a = __flsm1_tab[t]; |
380 | t = __kernel_extbl (x, a); | 384 | t = __kernel_extbl (x, a); |
381 | r = a*8 + __flsm1_tab[t] + (x != 0); | 385 | r = a*8 + __flsm1_tab[t] + (x != 0); |
382 | 386 | ||
383 | return r; | 387 | return r; |
384 | } | 388 | } |
385 | #endif | 389 | #endif |
386 | 390 | ||
387 | static inline int fls(int x) | 391 | static inline int fls(int x) |
388 | { | 392 | { |
389 | return fls64((unsigned int) x); | 393 | return fls64((unsigned int) x); |
390 | } | 394 | } |
391 | 395 | ||
392 | /* | 396 | /* |
393 | * hweightN: returns the hamming weight (i.e. the number | 397 | * hweightN: returns the hamming weight (i.e. the number |
394 | * of bits set) of a N-bit word | 398 | * of bits set) of a N-bit word |
395 | */ | 399 | */ |
396 | 400 | ||
397 | #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) | 401 | #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) |
398 | /* Whee. EV67 can calculate it directly. */ | 402 | /* Whee. EV67 can calculate it directly. */ |
399 | static inline unsigned long hweight64(unsigned long w) | 403 | static inline unsigned long hweight64(unsigned long w) |
400 | { | 404 | { |
401 | return __kernel_ctpop(w); | 405 | return __kernel_ctpop(w); |
402 | } | 406 | } |
403 | 407 | ||
404 | static inline unsigned int hweight32(unsigned int w) | 408 | static inline unsigned int hweight32(unsigned int w) |
405 | { | 409 | { |
406 | return hweight64(w); | 410 | return hweight64(w); |
407 | } | 411 | } |
408 | 412 | ||
409 | static inline unsigned int hweight16(unsigned int w) | 413 | static inline unsigned int hweight16(unsigned int w) |
410 | { | 414 | { |
411 | return hweight64(w & 0xffff); | 415 | return hweight64(w & 0xffff); |
412 | } | 416 | } |
413 | 417 | ||
414 | static inline unsigned int hweight8(unsigned int w) | 418 | static inline unsigned int hweight8(unsigned int w) |
415 | { | 419 | { |
416 | return hweight64(w & 0xff); | 420 | return hweight64(w & 0xff); |
417 | } | 421 | } |
418 | #else | 422 | #else |
419 | #include <asm-generic/bitops/hweight.h> | 423 | #include <asm-generic/bitops/hweight.h> |
420 | #endif | 424 | #endif |
421 | 425 | ||
422 | #endif /* __KERNEL__ */ | 426 | #endif /* __KERNEL__ */ |
423 | 427 | ||
424 | #include <asm-generic/bitops/find.h> | 428 | #include <asm-generic/bitops/find.h> |
425 | 429 | ||
426 | #ifdef __KERNEL__ | 430 | #ifdef __KERNEL__ |
427 | 431 | ||
428 | /* | 432 | /* |
429 | * Every architecture must define this function. It's the fastest | 433 | * Every architecture must define this function. It's the fastest |
430 | * way of searching a 140-bit bitmap where the first 100 bits are | 434 | * way of searching a 140-bit bitmap where the first 100 bits are |
431 | * unlikely to be set. It's guaranteed that at least one of the 140 | 435 | * unlikely to be set. It's guaranteed that at least one of the 140 |
432 | * bits is set. | 436 | * bits is set. |
433 | */ | 437 | */ |
434 | static inline unsigned long | 438 | static inline unsigned long |
435 | sched_find_first_bit(unsigned long b[3]) | 439 | sched_find_first_bit(unsigned long b[3]) |
436 | { | 440 | { |
437 | unsigned long b0 = b[0], b1 = b[1], b2 = b[2]; | 441 | unsigned long b0 = b[0], b1 = b[1], b2 = b[2]; |
438 | unsigned long ofs; | 442 | unsigned long ofs; |
439 | 443 | ||
440 | ofs = (b1 ? 64 : 128); | 444 | ofs = (b1 ? 64 : 128); |
441 | b1 = (b1 ? b1 : b2); | 445 | b1 = (b1 ? b1 : b2); |
442 | ofs = (b0 ? 0 : ofs); | 446 | ofs = (b0 ? 0 : ofs); |
443 | b0 = (b0 ? b0 : b1); | 447 | b0 = (b0 ? b0 : b1); |
444 | 448 | ||
445 | return __ffs(b0) + ofs; | 449 | return __ffs(b0) + ofs; |
446 | } | 450 | } |
447 | 451 | ||
448 | #include <asm-generic/bitops/ext2-non-atomic.h> | 452 | #include <asm-generic/bitops/ext2-non-atomic.h> |
449 | 453 | ||
450 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) | 454 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) |
451 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) | 455 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) |
452 | 456 | ||
453 | #include <asm-generic/bitops/minix.h> | 457 | #include <asm-generic/bitops/minix.h> |
454 | 458 | ||
455 | #endif /* __KERNEL__ */ | 459 | #endif /* __KERNEL__ */ |
456 | 460 | ||
457 | #endif /* _ALPHA_BITOPS_H */ | 461 | #endif /* _ALPHA_BITOPS_H */ |
458 | 462 |
include/asm-arm/bitops.h
1 | /* | 1 | /* |
2 | * Copyright 1995, Russell King. | 2 | * Copyright 1995, Russell King. |
3 | * Various bits and pieces copyrights include: | 3 | * Various bits and pieces copyrights include: |
4 | * Linus Torvalds (test_bit). | 4 | * Linus Torvalds (test_bit). |
5 | * Big endian support: Copyright 2001, Nicolas Pitre | 5 | * Big endian support: Copyright 2001, Nicolas Pitre |
6 | * reworked by rmk. | 6 | * reworked by rmk. |
7 | * | 7 | * |
8 | * bit 0 is the LSB of an "unsigned long" quantity. | 8 | * bit 0 is the LSB of an "unsigned long" quantity. |
9 | * | 9 | * |
10 | * Please note that the code in this file should never be included | 10 | * Please note that the code in this file should never be included |
11 | * from user space. Many of these are not implemented in assembler | 11 | * from user space. Many of these are not implemented in assembler |
12 | * since they would be too costly. Also, they require privileged | 12 | * since they would be too costly. Also, they require privileged |
13 | * instructions (which are not available from user mode) to ensure | 13 | * instructions (which are not available from user mode) to ensure |
14 | * that they are atomic. | 14 | * that they are atomic. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #ifndef __ASM_ARM_BITOPS_H | 17 | #ifndef __ASM_ARM_BITOPS_H |
18 | #define __ASM_ARM_BITOPS_H | 18 | #define __ASM_ARM_BITOPS_H |
19 | 19 | ||
20 | #ifdef __KERNEL__ | 20 | #ifdef __KERNEL__ |
21 | 21 | ||
22 | #ifndef _LINUX_BITOPS_H | ||
23 | #error only <linux/bitops.h> can be included directly | ||
24 | #endif | ||
25 | |||
22 | #include <linux/compiler.h> | 26 | #include <linux/compiler.h> |
23 | #include <asm/system.h> | 27 | #include <asm/system.h> |
24 | 28 | ||
25 | #define smp_mb__before_clear_bit() mb() | 29 | #define smp_mb__before_clear_bit() mb() |
26 | #define smp_mb__after_clear_bit() mb() | 30 | #define smp_mb__after_clear_bit() mb() |
27 | 31 | ||
28 | /* | 32 | /* |
29 | * These functions are the basis of our bit ops. | 33 | * These functions are the basis of our bit ops. |
30 | * | 34 | * |
31 | * First, the atomic bitops. These use native endian. | 35 | * First, the atomic bitops. These use native endian. |
32 | */ | 36 | */ |
33 | static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p) | 37 | static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p) |
34 | { | 38 | { |
35 | unsigned long flags; | 39 | unsigned long flags; |
36 | unsigned long mask = 1UL << (bit & 31); | 40 | unsigned long mask = 1UL << (bit & 31); |
37 | 41 | ||
38 | p += bit >> 5; | 42 | p += bit >> 5; |
39 | 43 | ||
40 | raw_local_irq_save(flags); | 44 | raw_local_irq_save(flags); |
41 | *p |= mask; | 45 | *p |= mask; |
42 | raw_local_irq_restore(flags); | 46 | raw_local_irq_restore(flags); |
43 | } | 47 | } |
44 | 48 | ||
45 | static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p) | 49 | static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p) |
46 | { | 50 | { |
47 | unsigned long flags; | 51 | unsigned long flags; |
48 | unsigned long mask = 1UL << (bit & 31); | 52 | unsigned long mask = 1UL << (bit & 31); |
49 | 53 | ||
50 | p += bit >> 5; | 54 | p += bit >> 5; |
51 | 55 | ||
52 | raw_local_irq_save(flags); | 56 | raw_local_irq_save(flags); |
53 | *p &= ~mask; | 57 | *p &= ~mask; |
54 | raw_local_irq_restore(flags); | 58 | raw_local_irq_restore(flags); |
55 | } | 59 | } |
56 | 60 | ||
57 | static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p) | 61 | static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p) |
58 | { | 62 | { |
59 | unsigned long flags; | 63 | unsigned long flags; |
60 | unsigned long mask = 1UL << (bit & 31); | 64 | unsigned long mask = 1UL << (bit & 31); |
61 | 65 | ||
62 | p += bit >> 5; | 66 | p += bit >> 5; |
63 | 67 | ||
64 | raw_local_irq_save(flags); | 68 | raw_local_irq_save(flags); |
65 | *p ^= mask; | 69 | *p ^= mask; |
66 | raw_local_irq_restore(flags); | 70 | raw_local_irq_restore(flags); |
67 | } | 71 | } |
68 | 72 | ||
69 | static inline int | 73 | static inline int |
70 | ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p) | 74 | ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p) |
71 | { | 75 | { |
72 | unsigned long flags; | 76 | unsigned long flags; |
73 | unsigned int res; | 77 | unsigned int res; |
74 | unsigned long mask = 1UL << (bit & 31); | 78 | unsigned long mask = 1UL << (bit & 31); |
75 | 79 | ||
76 | p += bit >> 5; | 80 | p += bit >> 5; |
77 | 81 | ||
78 | raw_local_irq_save(flags); | 82 | raw_local_irq_save(flags); |
79 | res = *p; | 83 | res = *p; |
80 | *p = res | mask; | 84 | *p = res | mask; |
81 | raw_local_irq_restore(flags); | 85 | raw_local_irq_restore(flags); |
82 | 86 | ||
83 | return res & mask; | 87 | return res & mask; |
84 | } | 88 | } |
85 | 89 | ||
86 | static inline int | 90 | static inline int |
87 | ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p) | 91 | ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p) |
88 | { | 92 | { |
89 | unsigned long flags; | 93 | unsigned long flags; |
90 | unsigned int res; | 94 | unsigned int res; |
91 | unsigned long mask = 1UL << (bit & 31); | 95 | unsigned long mask = 1UL << (bit & 31); |
92 | 96 | ||
93 | p += bit >> 5; | 97 | p += bit >> 5; |
94 | 98 | ||
95 | raw_local_irq_save(flags); | 99 | raw_local_irq_save(flags); |
96 | res = *p; | 100 | res = *p; |
97 | *p = res & ~mask; | 101 | *p = res & ~mask; |
98 | raw_local_irq_restore(flags); | 102 | raw_local_irq_restore(flags); |
99 | 103 | ||
100 | return res & mask; | 104 | return res & mask; |
101 | } | 105 | } |
102 | 106 | ||
103 | static inline int | 107 | static inline int |
104 | ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) | 108 | ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) |
105 | { | 109 | { |
106 | unsigned long flags; | 110 | unsigned long flags; |
107 | unsigned int res; | 111 | unsigned int res; |
108 | unsigned long mask = 1UL << (bit & 31); | 112 | unsigned long mask = 1UL << (bit & 31); |
109 | 113 | ||
110 | p += bit >> 5; | 114 | p += bit >> 5; |
111 | 115 | ||
112 | raw_local_irq_save(flags); | 116 | raw_local_irq_save(flags); |
113 | res = *p; | 117 | res = *p; |
114 | *p = res ^ mask; | 118 | *p = res ^ mask; |
115 | raw_local_irq_restore(flags); | 119 | raw_local_irq_restore(flags); |
116 | 120 | ||
117 | return res & mask; | 121 | return res & mask; |
118 | } | 122 | } |
119 | 123 | ||
120 | #include <asm-generic/bitops/non-atomic.h> | 124 | #include <asm-generic/bitops/non-atomic.h> |
121 | 125 | ||
122 | /* | 126 | /* |
123 | * A note about Endian-ness. | 127 | * A note about Endian-ness. |
124 | * ------------------------- | 128 | * ------------------------- |
125 | * | 129 | * |
126 | * When the ARM is put into big endian mode via CR15, the processor | 130 | * When the ARM is put into big endian mode via CR15, the processor |
127 | * merely swaps the order of bytes within words, thus: | 131 | * merely swaps the order of bytes within words, thus: |
128 | * | 132 | * |
129 | * ------------ physical data bus bits ----------- | 133 | * ------------ physical data bus bits ----------- |
130 | * D31 ... D24 D23 ... D16 D15 ... D8 D7 ... D0 | 134 | * D31 ... D24 D23 ... D16 D15 ... D8 D7 ... D0 |
131 | * little byte 3 byte 2 byte 1 byte 0 | 135 | * little byte 3 byte 2 byte 1 byte 0 |
132 | * big byte 0 byte 1 byte 2 byte 3 | 136 | * big byte 0 byte 1 byte 2 byte 3 |
133 | * | 137 | * |
134 | * This means that reading a 32-bit word at address 0 returns the same | 138 | * This means that reading a 32-bit word at address 0 returns the same |
135 | * value irrespective of the endian mode bit. | 139 | * value irrespective of the endian mode bit. |
136 | * | 140 | * |
137 | * Peripheral devices should be connected with the data bus reversed in | 141 | * Peripheral devices should be connected with the data bus reversed in |
138 | * "Big Endian" mode. ARM Application Note 61 is applicable, and is | 142 | * "Big Endian" mode. ARM Application Note 61 is applicable, and is |
139 | * available from http://www.arm.com/. | 143 | * available from http://www.arm.com/. |
140 | * | 144 | * |
141 | * The following assumes that the data bus connectivity for big endian | 145 | * The following assumes that the data bus connectivity for big endian |
142 | * mode has been followed. | 146 | * mode has been followed. |
143 | * | 147 | * |
144 | * Note that bit 0 is defined to be 32-bit word bit 0, not byte 0 bit 0. | 148 | * Note that bit 0 is defined to be 32-bit word bit 0, not byte 0 bit 0. |
145 | */ | 149 | */ |
146 | 150 | ||
147 | /* | 151 | /* |
148 | * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. | 152 | * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. |
149 | */ | 153 | */ |
150 | extern void _set_bit_le(int nr, volatile unsigned long * p); | 154 | extern void _set_bit_le(int nr, volatile unsigned long * p); |
151 | extern void _clear_bit_le(int nr, volatile unsigned long * p); | 155 | extern void _clear_bit_le(int nr, volatile unsigned long * p); |
152 | extern void _change_bit_le(int nr, volatile unsigned long * p); | 156 | extern void _change_bit_le(int nr, volatile unsigned long * p); |
153 | extern int _test_and_set_bit_le(int nr, volatile unsigned long * p); | 157 | extern int _test_and_set_bit_le(int nr, volatile unsigned long * p); |
154 | extern int _test_and_clear_bit_le(int nr, volatile unsigned long * p); | 158 | extern int _test_and_clear_bit_le(int nr, volatile unsigned long * p); |
155 | extern int _test_and_change_bit_le(int nr, volatile unsigned long * p); | 159 | extern int _test_and_change_bit_le(int nr, volatile unsigned long * p); |
156 | extern int _find_first_zero_bit_le(const void * p, unsigned size); | 160 | extern int _find_first_zero_bit_le(const void * p, unsigned size); |
157 | extern int _find_next_zero_bit_le(const void * p, int size, int offset); | 161 | extern int _find_next_zero_bit_le(const void * p, int size, int offset); |
158 | extern int _find_first_bit_le(const unsigned long *p, unsigned size); | 162 | extern int _find_first_bit_le(const unsigned long *p, unsigned size); |
159 | extern int _find_next_bit_le(const unsigned long *p, int size, int offset); | 163 | extern int _find_next_bit_le(const unsigned long *p, int size, int offset); |
160 | 164 | ||
161 | /* | 165 | /* |
162 | * Big endian assembly bitops. nr = 0 -> byte 3 bit 0. | 166 | * Big endian assembly bitops. nr = 0 -> byte 3 bit 0. |
163 | */ | 167 | */ |
164 | extern void _set_bit_be(int nr, volatile unsigned long * p); | 168 | extern void _set_bit_be(int nr, volatile unsigned long * p); |
165 | extern void _clear_bit_be(int nr, volatile unsigned long * p); | 169 | extern void _clear_bit_be(int nr, volatile unsigned long * p); |
166 | extern void _change_bit_be(int nr, volatile unsigned long * p); | 170 | extern void _change_bit_be(int nr, volatile unsigned long * p); |
167 | extern int _test_and_set_bit_be(int nr, volatile unsigned long * p); | 171 | extern int _test_and_set_bit_be(int nr, volatile unsigned long * p); |
168 | extern int _test_and_clear_bit_be(int nr, volatile unsigned long * p); | 172 | extern int _test_and_clear_bit_be(int nr, volatile unsigned long * p); |
169 | extern int _test_and_change_bit_be(int nr, volatile unsigned long * p); | 173 | extern int _test_and_change_bit_be(int nr, volatile unsigned long * p); |
170 | extern int _find_first_zero_bit_be(const void * p, unsigned size); | 174 | extern int _find_first_zero_bit_be(const void * p, unsigned size); |
171 | extern int _find_next_zero_bit_be(const void * p, int size, int offset); | 175 | extern int _find_next_zero_bit_be(const void * p, int size, int offset); |
172 | extern int _find_first_bit_be(const unsigned long *p, unsigned size); | 176 | extern int _find_first_bit_be(const unsigned long *p, unsigned size); |
173 | extern int _find_next_bit_be(const unsigned long *p, int size, int offset); | 177 | extern int _find_next_bit_be(const unsigned long *p, int size, int offset); |
174 | 178 | ||
175 | #ifndef CONFIG_SMP | 179 | #ifndef CONFIG_SMP |
176 | /* | 180 | /* |
177 | * The __* form of bitops are non-atomic and may be reordered. | 181 | * The __* form of bitops are non-atomic and may be reordered. |
178 | */ | 182 | */ |
179 | #define ATOMIC_BITOP_LE(name,nr,p) \ | 183 | #define ATOMIC_BITOP_LE(name,nr,p) \ |
180 | (__builtin_constant_p(nr) ? \ | 184 | (__builtin_constant_p(nr) ? \ |
181 | ____atomic_##name(nr, p) : \ | 185 | ____atomic_##name(nr, p) : \ |
182 | _##name##_le(nr,p)) | 186 | _##name##_le(nr,p)) |
183 | 187 | ||
184 | #define ATOMIC_BITOP_BE(name,nr,p) \ | 188 | #define ATOMIC_BITOP_BE(name,nr,p) \ |
185 | (__builtin_constant_p(nr) ? \ | 189 | (__builtin_constant_p(nr) ? \ |
186 | ____atomic_##name(nr, p) : \ | 190 | ____atomic_##name(nr, p) : \ |
187 | _##name##_be(nr,p)) | 191 | _##name##_be(nr,p)) |
188 | #else | 192 | #else |
189 | #define ATOMIC_BITOP_LE(name,nr,p) _##name##_le(nr,p) | 193 | #define ATOMIC_BITOP_LE(name,nr,p) _##name##_le(nr,p) |
190 | #define ATOMIC_BITOP_BE(name,nr,p) _##name##_be(nr,p) | 194 | #define ATOMIC_BITOP_BE(name,nr,p) _##name##_be(nr,p) |
191 | #endif | 195 | #endif |
192 | 196 | ||
193 | #define NONATOMIC_BITOP(name,nr,p) \ | 197 | #define NONATOMIC_BITOP(name,nr,p) \ |
194 | (____nonatomic_##name(nr, p)) | 198 | (____nonatomic_##name(nr, p)) |
195 | 199 | ||
196 | #ifndef __ARMEB__ | 200 | #ifndef __ARMEB__ |
197 | /* | 201 | /* |
198 | * These are the little endian, atomic definitions. | 202 | * These are the little endian, atomic definitions. |
199 | */ | 203 | */ |
200 | #define set_bit(nr,p) ATOMIC_BITOP_LE(set_bit,nr,p) | 204 | #define set_bit(nr,p) ATOMIC_BITOP_LE(set_bit,nr,p) |
201 | #define clear_bit(nr,p) ATOMIC_BITOP_LE(clear_bit,nr,p) | 205 | #define clear_bit(nr,p) ATOMIC_BITOP_LE(clear_bit,nr,p) |
202 | #define change_bit(nr,p) ATOMIC_BITOP_LE(change_bit,nr,p) | 206 | #define change_bit(nr,p) ATOMIC_BITOP_LE(change_bit,nr,p) |
203 | #define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p) | 207 | #define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p) |
204 | #define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p) | 208 | #define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p) |
205 | #define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p) | 209 | #define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p) |
206 | #define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) | 210 | #define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) |
207 | #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) | 211 | #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) |
208 | #define find_first_bit(p,sz) _find_first_bit_le(p,sz) | 212 | #define find_first_bit(p,sz) _find_first_bit_le(p,sz) |
209 | #define find_next_bit(p,sz,off) _find_next_bit_le(p,sz,off) | 213 | #define find_next_bit(p,sz,off) _find_next_bit_le(p,sz,off) |
210 | 214 | ||
211 | #define WORD_BITOFF_TO_LE(x) ((x)) | 215 | #define WORD_BITOFF_TO_LE(x) ((x)) |
212 | 216 | ||
213 | #else | 217 | #else |
214 | 218 | ||
215 | /* | 219 | /* |
216 | * These are the big endian, atomic definitions. | 220 | * These are the big endian, atomic definitions. |
217 | */ | 221 | */ |
218 | #define set_bit(nr,p) ATOMIC_BITOP_BE(set_bit,nr,p) | 222 | #define set_bit(nr,p) ATOMIC_BITOP_BE(set_bit,nr,p) |
219 | #define clear_bit(nr,p) ATOMIC_BITOP_BE(clear_bit,nr,p) | 223 | #define clear_bit(nr,p) ATOMIC_BITOP_BE(clear_bit,nr,p) |
220 | #define change_bit(nr,p) ATOMIC_BITOP_BE(change_bit,nr,p) | 224 | #define change_bit(nr,p) ATOMIC_BITOP_BE(change_bit,nr,p) |
221 | #define test_and_set_bit(nr,p) ATOMIC_BITOP_BE(test_and_set_bit,nr,p) | 225 | #define test_and_set_bit(nr,p) ATOMIC_BITOP_BE(test_and_set_bit,nr,p) |
222 | #define test_and_clear_bit(nr,p) ATOMIC_BITOP_BE(test_and_clear_bit,nr,p) | 226 | #define test_and_clear_bit(nr,p) ATOMIC_BITOP_BE(test_and_clear_bit,nr,p) |
223 | #define test_and_change_bit(nr,p) ATOMIC_BITOP_BE(test_and_change_bit,nr,p) | 227 | #define test_and_change_bit(nr,p) ATOMIC_BITOP_BE(test_and_change_bit,nr,p) |
224 | #define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz) | 228 | #define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz) |
225 | #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off) | 229 | #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off) |
226 | #define find_first_bit(p,sz) _find_first_bit_be(p,sz) | 230 | #define find_first_bit(p,sz) _find_first_bit_be(p,sz) |
227 | #define find_next_bit(p,sz,off) _find_next_bit_be(p,sz,off) | 231 | #define find_next_bit(p,sz,off) _find_next_bit_be(p,sz,off) |
228 | 232 | ||
229 | #define WORD_BITOFF_TO_LE(x) ((x) ^ 0x18) | 233 | #define WORD_BITOFF_TO_LE(x) ((x) ^ 0x18) |
230 | 234 | ||
231 | #endif | 235 | #endif |
232 | 236 | ||
233 | #if __LINUX_ARM_ARCH__ < 5 | 237 | #if __LINUX_ARM_ARCH__ < 5 |
234 | 238 | ||
235 | #include <asm-generic/bitops/ffz.h> | 239 | #include <asm-generic/bitops/ffz.h> |
236 | #include <asm-generic/bitops/__ffs.h> | 240 | #include <asm-generic/bitops/__ffs.h> |
237 | #include <asm-generic/bitops/fls.h> | 241 | #include <asm-generic/bitops/fls.h> |
238 | #include <asm-generic/bitops/ffs.h> | 242 | #include <asm-generic/bitops/ffs.h> |
239 | 243 | ||
240 | #else | 244 | #else |
241 | 245 | ||
242 | static inline int constant_fls(int x) | 246 | static inline int constant_fls(int x) |
243 | { | 247 | { |
244 | int r = 32; | 248 | int r = 32; |
245 | 249 | ||
246 | if (!x) | 250 | if (!x) |
247 | return 0; | 251 | return 0; |
248 | if (!(x & 0xffff0000u)) { | 252 | if (!(x & 0xffff0000u)) { |
249 | x <<= 16; | 253 | x <<= 16; |
250 | r -= 16; | 254 | r -= 16; |
251 | } | 255 | } |
252 | if (!(x & 0xff000000u)) { | 256 | if (!(x & 0xff000000u)) { |
253 | x <<= 8; | 257 | x <<= 8; |
254 | r -= 8; | 258 | r -= 8; |
255 | } | 259 | } |
256 | if (!(x & 0xf0000000u)) { | 260 | if (!(x & 0xf0000000u)) { |
257 | x <<= 4; | 261 | x <<= 4; |
258 | r -= 4; | 262 | r -= 4; |
259 | } | 263 | } |
260 | if (!(x & 0xc0000000u)) { | 264 | if (!(x & 0xc0000000u)) { |
261 | x <<= 2; | 265 | x <<= 2; |
262 | r -= 2; | 266 | r -= 2; |
263 | } | 267 | } |
264 | if (!(x & 0x80000000u)) { | 268 | if (!(x & 0x80000000u)) { |
265 | x <<= 1; | 269 | x <<= 1; |
266 | r -= 1; | 270 | r -= 1; |
267 | } | 271 | } |
268 | return r; | 272 | return r; |
269 | } | 273 | } |
270 | 274 | ||
271 | /* | 275 | /* |
272 | * On ARMv5 and above those functions can be implemented around | 276 | * On ARMv5 and above those functions can be implemented around |
273 | * the clz instruction for much better code efficiency. | 277 | * the clz instruction for much better code efficiency. |
274 | */ | 278 | */ |
275 | 279 | ||
276 | #define fls(x) \ | 280 | #define fls(x) \ |
277 | ( __builtin_constant_p(x) ? constant_fls(x) : \ | 281 | ( __builtin_constant_p(x) ? constant_fls(x) : \ |
278 | ({ int __r; asm("clz\t%0, %1" : "=r"(__r) : "r"(x) : "cc"); 32-__r; }) ) | 282 | ({ int __r; asm("clz\t%0, %1" : "=r"(__r) : "r"(x) : "cc"); 32-__r; }) ) |
279 | #define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); }) | 283 | #define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); }) |
280 | #define __ffs(x) (ffs(x) - 1) | 284 | #define __ffs(x) (ffs(x) - 1) |
281 | #define ffz(x) __ffs( ~(x) ) | 285 | #define ffz(x) __ffs( ~(x) ) |
282 | 286 | ||
283 | #endif | 287 | #endif |
284 | 288 | ||
285 | #include <asm-generic/bitops/fls64.h> | 289 | #include <asm-generic/bitops/fls64.h> |
286 | 290 | ||
287 | #include <asm-generic/bitops/sched.h> | 291 | #include <asm-generic/bitops/sched.h> |
288 | #include <asm-generic/bitops/hweight.h> | 292 | #include <asm-generic/bitops/hweight.h> |
289 | #include <asm-generic/bitops/lock.h> | 293 | #include <asm-generic/bitops/lock.h> |
290 | 294 | ||
291 | /* | 295 | /* |
292 | * Ext2 is defined to use little-endian byte ordering. | 296 | * Ext2 is defined to use little-endian byte ordering. |
293 | * These do not need to be atomic. | 297 | * These do not need to be atomic. |
294 | */ | 298 | */ |
295 | #define ext2_set_bit(nr,p) \ | 299 | #define ext2_set_bit(nr,p) \ |
296 | __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 300 | __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) |
297 | #define ext2_set_bit_atomic(lock,nr,p) \ | 301 | #define ext2_set_bit_atomic(lock,nr,p) \ |
298 | test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 302 | test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) |
299 | #define ext2_clear_bit(nr,p) \ | 303 | #define ext2_clear_bit(nr,p) \ |
300 | __test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 304 | __test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) |
301 | #define ext2_clear_bit_atomic(lock,nr,p) \ | 305 | #define ext2_clear_bit_atomic(lock,nr,p) \ |
302 | test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 306 | test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) |
303 | #define ext2_test_bit(nr,p) \ | 307 | #define ext2_test_bit(nr,p) \ |
304 | test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 308 | test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) |
305 | #define ext2_find_first_zero_bit(p,sz) \ | 309 | #define ext2_find_first_zero_bit(p,sz) \ |
306 | _find_first_zero_bit_le(p,sz) | 310 | _find_first_zero_bit_le(p,sz) |
307 | #define ext2_find_next_zero_bit(p,sz,off) \ | 311 | #define ext2_find_next_zero_bit(p,sz,off) \ |
308 | _find_next_zero_bit_le(p,sz,off) | 312 | _find_next_zero_bit_le(p,sz,off) |
309 | 313 | ||
310 | /* | 314 | /* |
311 | * Minix is defined to use little-endian byte ordering. | 315 | * Minix is defined to use little-endian byte ordering. |
312 | * These do not need to be atomic. | 316 | * These do not need to be atomic. |
313 | */ | 317 | */ |
314 | #define minix_set_bit(nr,p) \ | 318 | #define minix_set_bit(nr,p) \ |
315 | __set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 319 | __set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) |
316 | #define minix_test_bit(nr,p) \ | 320 | #define minix_test_bit(nr,p) \ |
317 | test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 321 | test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) |
318 | #define minix_test_and_set_bit(nr,p) \ | 322 | #define minix_test_and_set_bit(nr,p) \ |
319 | __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 323 | __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) |
320 | #define minix_test_and_clear_bit(nr,p) \ | 324 | #define minix_test_and_clear_bit(nr,p) \ |
321 | __test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 325 | __test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) |
322 | #define minix_find_first_zero_bit(p,sz) \ | 326 | #define minix_find_first_zero_bit(p,sz) \ |
323 | _find_first_zero_bit_le(p,sz) | 327 | _find_first_zero_bit_le(p,sz) |
324 | 328 | ||
325 | #endif /* __KERNEL__ */ | 329 | #endif /* __KERNEL__ */ |
326 | 330 | ||
327 | #endif /* _ARM_BITOPS_H */ | 331 | #endif /* _ARM_BITOPS_H */ |
328 | 332 |
include/asm-avr32/bitops.h
1 | /* | 1 | /* |
2 | * Copyright (C) 2004-2006 Atmel Corporation | 2 | * Copyright (C) 2004-2006 Atmel Corporation |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License version 2 as | 5 | * it under the terms of the GNU General Public License version 2 as |
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | */ | 7 | */ |
8 | #ifndef __ASM_AVR32_BITOPS_H | 8 | #ifndef __ASM_AVR32_BITOPS_H |
9 | #define __ASM_AVR32_BITOPS_H | 9 | #define __ASM_AVR32_BITOPS_H |
10 | 10 | ||
11 | #ifndef _LINUX_BITOPS_H | ||
12 | #error only <linux/bitops.h> can be included directly | ||
13 | #endif | ||
14 | |||
11 | #include <asm/byteorder.h> | 15 | #include <asm/byteorder.h> |
12 | #include <asm/system.h> | 16 | #include <asm/system.h> |
13 | 17 | ||
14 | /* | 18 | /* |
15 | * clear_bit() doesn't provide any barrier for the compiler | 19 | * clear_bit() doesn't provide any barrier for the compiler |
16 | */ | 20 | */ |
17 | #define smp_mb__before_clear_bit() barrier() | 21 | #define smp_mb__before_clear_bit() barrier() |
18 | #define smp_mb__after_clear_bit() barrier() | 22 | #define smp_mb__after_clear_bit() barrier() |
19 | 23 | ||
20 | /* | 24 | /* |
21 | * set_bit - Atomically set a bit in memory | 25 | * set_bit - Atomically set a bit in memory |
22 | * @nr: the bit to set | 26 | * @nr: the bit to set |
23 | * @addr: the address to start counting from | 27 | * @addr: the address to start counting from |
24 | * | 28 | * |
25 | * This function is atomic and may not be reordered. See __set_bit() | 29 | * This function is atomic and may not be reordered. See __set_bit() |
26 | * if you do not require the atomic guarantees. | 30 | * if you do not require the atomic guarantees. |
27 | * | 31 | * |
28 | * Note that @nr may be almost arbitrarily large; this function is not | 32 | * Note that @nr may be almost arbitrarily large; this function is not |
29 | * restricted to acting on a single-word quantity. | 33 | * restricted to acting on a single-word quantity. |
30 | */ | 34 | */ |
31 | static inline void set_bit(int nr, volatile void * addr) | 35 | static inline void set_bit(int nr, volatile void * addr) |
32 | { | 36 | { |
33 | unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; | 37 | unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; |
34 | unsigned long tmp; | 38 | unsigned long tmp; |
35 | 39 | ||
36 | if (__builtin_constant_p(nr)) { | 40 | if (__builtin_constant_p(nr)) { |
37 | asm volatile( | 41 | asm volatile( |
38 | "1: ssrf 5\n" | 42 | "1: ssrf 5\n" |
39 | " ld.w %0, %2\n" | 43 | " ld.w %0, %2\n" |
40 | " sbr %0, %3\n" | 44 | " sbr %0, %3\n" |
41 | " stcond %1, %0\n" | 45 | " stcond %1, %0\n" |
42 | " brne 1b" | 46 | " brne 1b" |
43 | : "=&r"(tmp), "=o"(*p) | 47 | : "=&r"(tmp), "=o"(*p) |
44 | : "m"(*p), "i"(nr) | 48 | : "m"(*p), "i"(nr) |
45 | : "cc"); | 49 | : "cc"); |
46 | } else { | 50 | } else { |
47 | unsigned long mask = 1UL << (nr % BITS_PER_LONG); | 51 | unsigned long mask = 1UL << (nr % BITS_PER_LONG); |
48 | asm volatile( | 52 | asm volatile( |
49 | "1: ssrf 5\n" | 53 | "1: ssrf 5\n" |
50 | " ld.w %0, %2\n" | 54 | " ld.w %0, %2\n" |
51 | " or %0, %3\n" | 55 | " or %0, %3\n" |
52 | " stcond %1, %0\n" | 56 | " stcond %1, %0\n" |
53 | " brne 1b" | 57 | " brne 1b" |
54 | : "=&r"(tmp), "=o"(*p) | 58 | : "=&r"(tmp), "=o"(*p) |
55 | : "m"(*p), "r"(mask) | 59 | : "m"(*p), "r"(mask) |
56 | : "cc"); | 60 | : "cc"); |
57 | } | 61 | } |
58 | } | 62 | } |
59 | 63 | ||
60 | /* | 64 | /* |
61 | * clear_bit - Clears a bit in memory | 65 | * clear_bit - Clears a bit in memory |
62 | * @nr: Bit to clear | 66 | * @nr: Bit to clear |
63 | * @addr: Address to start counting from | 67 | * @addr: Address to start counting from |
64 | * | 68 | * |
65 | * clear_bit() is atomic and may not be reordered. However, it does | 69 | * clear_bit() is atomic and may not be reordered. However, it does |
66 | * not contain a memory barrier, so if it is used for locking purposes, | 70 | * not contain a memory barrier, so if it is used for locking purposes, |
67 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | 71 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
68 | * in order to ensure changes are visible on other processors. | 72 | * in order to ensure changes are visible on other processors. |
69 | */ | 73 | */ |
70 | static inline void clear_bit(int nr, volatile void * addr) | 74 | static inline void clear_bit(int nr, volatile void * addr) |
71 | { | 75 | { |
72 | unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; | 76 | unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; |
73 | unsigned long tmp; | 77 | unsigned long tmp; |
74 | 78 | ||
75 | if (__builtin_constant_p(nr)) { | 79 | if (__builtin_constant_p(nr)) { |
76 | asm volatile( | 80 | asm volatile( |
77 | "1: ssrf 5\n" | 81 | "1: ssrf 5\n" |
78 | " ld.w %0, %2\n" | 82 | " ld.w %0, %2\n" |
79 | " cbr %0, %3\n" | 83 | " cbr %0, %3\n" |
80 | " stcond %1, %0\n" | 84 | " stcond %1, %0\n" |
81 | " brne 1b" | 85 | " brne 1b" |
82 | : "=&r"(tmp), "=o"(*p) | 86 | : "=&r"(tmp), "=o"(*p) |
83 | : "m"(*p), "i"(nr) | 87 | : "m"(*p), "i"(nr) |
84 | : "cc"); | 88 | : "cc"); |
85 | } else { | 89 | } else { |
86 | unsigned long mask = 1UL << (nr % BITS_PER_LONG); | 90 | unsigned long mask = 1UL << (nr % BITS_PER_LONG); |
87 | asm volatile( | 91 | asm volatile( |
88 | "1: ssrf 5\n" | 92 | "1: ssrf 5\n" |
89 | " ld.w %0, %2\n" | 93 | " ld.w %0, %2\n" |
90 | " andn %0, %3\n" | 94 | " andn %0, %3\n" |
91 | " stcond %1, %0\n" | 95 | " stcond %1, %0\n" |
92 | " brne 1b" | 96 | " brne 1b" |
93 | : "=&r"(tmp), "=o"(*p) | 97 | : "=&r"(tmp), "=o"(*p) |
94 | : "m"(*p), "r"(mask) | 98 | : "m"(*p), "r"(mask) |
95 | : "cc"); | 99 | : "cc"); |
96 | } | 100 | } |
97 | } | 101 | } |
98 | 102 | ||
99 | /* | 103 | /* |
100 | * change_bit - Toggle a bit in memory | 104 | * change_bit - Toggle a bit in memory |
101 | * @nr: Bit to change | 105 | * @nr: Bit to change |
102 | * @addr: Address to start counting from | 106 | * @addr: Address to start counting from |
103 | * | 107 | * |
104 | * change_bit() is atomic and may not be reordered. | 108 | * change_bit() is atomic and may not be reordered. |
105 | * Note that @nr may be almost arbitrarily large; this function is not | 109 | * Note that @nr may be almost arbitrarily large; this function is not |
106 | * restricted to acting on a single-word quantity. | 110 | * restricted to acting on a single-word quantity. |
107 | */ | 111 | */ |
108 | static inline void change_bit(int nr, volatile void * addr) | 112 | static inline void change_bit(int nr, volatile void * addr) |
109 | { | 113 | { |
110 | unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; | 114 | unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; |
111 | unsigned long mask = 1UL << (nr % BITS_PER_LONG); | 115 | unsigned long mask = 1UL << (nr % BITS_PER_LONG); |
112 | unsigned long tmp; | 116 | unsigned long tmp; |
113 | 117 | ||
114 | asm volatile( | 118 | asm volatile( |
115 | "1: ssrf 5\n" | 119 | "1: ssrf 5\n" |
116 | " ld.w %0, %2\n" | 120 | " ld.w %0, %2\n" |
117 | " eor %0, %3\n" | 121 | " eor %0, %3\n" |
118 | " stcond %1, %0\n" | 122 | " stcond %1, %0\n" |
119 | " brne 1b" | 123 | " brne 1b" |
120 | : "=&r"(tmp), "=o"(*p) | 124 | : "=&r"(tmp), "=o"(*p) |
121 | : "m"(*p), "r"(mask) | 125 | : "m"(*p), "r"(mask) |
122 | : "cc"); | 126 | : "cc"); |
123 | } | 127 | } |
124 | 128 | ||
125 | /* | 129 | /* |
126 | * test_and_set_bit - Set a bit and return its old value | 130 | * test_and_set_bit - Set a bit and return its old value |
127 | * @nr: Bit to set | 131 | * @nr: Bit to set |
128 | * @addr: Address to count from | 132 | * @addr: Address to count from |
129 | * | 133 | * |
130 | * This operation is atomic and cannot be reordered. | 134 | * This operation is atomic and cannot be reordered. |
131 | * It also implies a memory barrier. | 135 | * It also implies a memory barrier. |
132 | */ | 136 | */ |
133 | static inline int test_and_set_bit(int nr, volatile void * addr) | 137 | static inline int test_and_set_bit(int nr, volatile void * addr) |
134 | { | 138 | { |
135 | unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; | 139 | unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; |
136 | unsigned long mask = 1UL << (nr % BITS_PER_LONG); | 140 | unsigned long mask = 1UL << (nr % BITS_PER_LONG); |
137 | unsigned long tmp, old; | 141 | unsigned long tmp, old; |
138 | 142 | ||
139 | if (__builtin_constant_p(nr)) { | 143 | if (__builtin_constant_p(nr)) { |
140 | asm volatile( | 144 | asm volatile( |
141 | "1: ssrf 5\n" | 145 | "1: ssrf 5\n" |
142 | " ld.w %0, %3\n" | 146 | " ld.w %0, %3\n" |
143 | " mov %2, %0\n" | 147 | " mov %2, %0\n" |
144 | " sbr %0, %4\n" | 148 | " sbr %0, %4\n" |
145 | " stcond %1, %0\n" | 149 | " stcond %1, %0\n" |
146 | " brne 1b" | 150 | " brne 1b" |
147 | : "=&r"(tmp), "=o"(*p), "=&r"(old) | 151 | : "=&r"(tmp), "=o"(*p), "=&r"(old) |
148 | : "m"(*p), "i"(nr) | 152 | : "m"(*p), "i"(nr) |
149 | : "memory", "cc"); | 153 | : "memory", "cc"); |
150 | } else { | 154 | } else { |
151 | asm volatile( | 155 | asm volatile( |
152 | "1: ssrf 5\n" | 156 | "1: ssrf 5\n" |
153 | " ld.w %2, %3\n" | 157 | " ld.w %2, %3\n" |
154 | " or %0, %2, %4\n" | 158 | " or %0, %2, %4\n" |
155 | " stcond %1, %0\n" | 159 | " stcond %1, %0\n" |
156 | " brne 1b" | 160 | " brne 1b" |
157 | : "=&r"(tmp), "=o"(*p), "=&r"(old) | 161 | : "=&r"(tmp), "=o"(*p), "=&r"(old) |
158 | : "m"(*p), "r"(mask) | 162 | : "m"(*p), "r"(mask) |
159 | : "memory", "cc"); | 163 | : "memory", "cc"); |
160 | } | 164 | } |
161 | 165 | ||
162 | return (old & mask) != 0; | 166 | return (old & mask) != 0; |
163 | } | 167 | } |
164 | 168 | ||
165 | /* | 169 | /* |
166 | * test_and_clear_bit - Clear a bit and return its old value | 170 | * test_and_clear_bit - Clear a bit and return its old value |
167 | * @nr: Bit to clear | 171 | * @nr: Bit to clear |
168 | * @addr: Address to count from | 172 | * @addr: Address to count from |
169 | * | 173 | * |
170 | * This operation is atomic and cannot be reordered. | 174 | * This operation is atomic and cannot be reordered. |
171 | * It also implies a memory barrier. | 175 | * It also implies a memory barrier. |
172 | */ | 176 | */ |
173 | static inline int test_and_clear_bit(int nr, volatile void * addr) | 177 | static inline int test_and_clear_bit(int nr, volatile void * addr) |
174 | { | 178 | { |
175 | unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; | 179 | unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; |
176 | unsigned long mask = 1UL << (nr % BITS_PER_LONG); | 180 | unsigned long mask = 1UL << (nr % BITS_PER_LONG); |
177 | unsigned long tmp, old; | 181 | unsigned long tmp, old; |
178 | 182 | ||
179 | if (__builtin_constant_p(nr)) { | 183 | if (__builtin_constant_p(nr)) { |
180 | asm volatile( | 184 | asm volatile( |
181 | "1: ssrf 5\n" | 185 | "1: ssrf 5\n" |
182 | " ld.w %0, %3\n" | 186 | " ld.w %0, %3\n" |
183 | " mov %2, %0\n" | 187 | " mov %2, %0\n" |
184 | " cbr %0, %4\n" | 188 | " cbr %0, %4\n" |
185 | " stcond %1, %0\n" | 189 | " stcond %1, %0\n" |
186 | " brne 1b" | 190 | " brne 1b" |
187 | : "=&r"(tmp), "=o"(*p), "=&r"(old) | 191 | : "=&r"(tmp), "=o"(*p), "=&r"(old) |
188 | : "m"(*p), "i"(nr) | 192 | : "m"(*p), "i"(nr) |
189 | : "memory", "cc"); | 193 | : "memory", "cc"); |
190 | } else { | 194 | } else { |
191 | asm volatile( | 195 | asm volatile( |
192 | "1: ssrf 5\n" | 196 | "1: ssrf 5\n" |
193 | " ld.w %0, %3\n" | 197 | " ld.w %0, %3\n" |
194 | " mov %2, %0\n" | 198 | " mov %2, %0\n" |
195 | " andn %0, %4\n" | 199 | " andn %0, %4\n" |
196 | " stcond %1, %0\n" | 200 | " stcond %1, %0\n" |
197 | " brne 1b" | 201 | " brne 1b" |
198 | : "=&r"(tmp), "=o"(*p), "=&r"(old) | 202 | : "=&r"(tmp), "=o"(*p), "=&r"(old) |
199 | : "m"(*p), "r"(mask) | 203 | : "m"(*p), "r"(mask) |
200 | : "memory", "cc"); | 204 | : "memory", "cc"); |
201 | } | 205 | } |
202 | 206 | ||
203 | return (old & mask) != 0; | 207 | return (old & mask) != 0; |
204 | } | 208 | } |
205 | 209 | ||
206 | /* | 210 | /* |
207 | * test_and_change_bit - Change a bit and return its old value | 211 | * test_and_change_bit - Change a bit and return its old value |
208 | * @nr: Bit to change | 212 | * @nr: Bit to change |
209 | * @addr: Address to count from | 213 | * @addr: Address to count from |
210 | * | 214 | * |
211 | * This operation is atomic and cannot be reordered. | 215 | * This operation is atomic and cannot be reordered. |
212 | * It also implies a memory barrier. | 216 | * It also implies a memory barrier. |
213 | */ | 217 | */ |
214 | static inline int test_and_change_bit(int nr, volatile void * addr) | 218 | static inline int test_and_change_bit(int nr, volatile void * addr) |
215 | { | 219 | { |
216 | unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; | 220 | unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; |
217 | unsigned long mask = 1UL << (nr % BITS_PER_LONG); | 221 | unsigned long mask = 1UL << (nr % BITS_PER_LONG); |
218 | unsigned long tmp, old; | 222 | unsigned long tmp, old; |
219 | 223 | ||
220 | asm volatile( | 224 | asm volatile( |
221 | "1: ssrf 5\n" | 225 | "1: ssrf 5\n" |
222 | " ld.w %2, %3\n" | 226 | " ld.w %2, %3\n" |
223 | " eor %0, %2, %4\n" | 227 | " eor %0, %2, %4\n" |
224 | " stcond %1, %0\n" | 228 | " stcond %1, %0\n" |
225 | " brne 1b" | 229 | " brne 1b" |
226 | : "=&r"(tmp), "=o"(*p), "=&r"(old) | 230 | : "=&r"(tmp), "=o"(*p), "=&r"(old) |
227 | : "m"(*p), "r"(mask) | 231 | : "m"(*p), "r"(mask) |
228 | : "memory", "cc"); | 232 | : "memory", "cc"); |
229 | 233 | ||
230 | return (old & mask) != 0; | 234 | return (old & mask) != 0; |
231 | } | 235 | } |
232 | 236 | ||
233 | #include <asm-generic/bitops/non-atomic.h> | 237 | #include <asm-generic/bitops/non-atomic.h> |
234 | 238 | ||
235 | /* Find First bit Set */ | 239 | /* Find First bit Set */ |
236 | static inline unsigned long __ffs(unsigned long word) | 240 | static inline unsigned long __ffs(unsigned long word) |
237 | { | 241 | { |
238 | unsigned long result; | 242 | unsigned long result; |
239 | 243 | ||
240 | asm("brev %1\n\t" | 244 | asm("brev %1\n\t" |
241 | "clz %0,%1" | 245 | "clz %0,%1" |
242 | : "=r"(result), "=&r"(word) | 246 | : "=r"(result), "=&r"(word) |
243 | : "1"(word)); | 247 | : "1"(word)); |
244 | return result; | 248 | return result; |
245 | } | 249 | } |
246 | 250 | ||
247 | /* Find First Zero */ | 251 | /* Find First Zero */ |
248 | static inline unsigned long ffz(unsigned long word) | 252 | static inline unsigned long ffz(unsigned long word) |
249 | { | 253 | { |
250 | return __ffs(~word); | 254 | return __ffs(~word); |
251 | } | 255 | } |
252 | 256 | ||
253 | /* Find Last bit Set */ | 257 | /* Find Last bit Set */ |
254 | static inline int fls(unsigned long word) | 258 | static inline int fls(unsigned long word) |
255 | { | 259 | { |
256 | unsigned long result; | 260 | unsigned long result; |
257 | 261 | ||
258 | asm("clz %0,%1" : "=r"(result) : "r"(word)); | 262 | asm("clz %0,%1" : "=r"(result) : "r"(word)); |
259 | return 32 - result; | 263 | return 32 - result; |
260 | } | 264 | } |
261 | 265 | ||
262 | unsigned long find_first_zero_bit(const unsigned long *addr, | 266 | unsigned long find_first_zero_bit(const unsigned long *addr, |
263 | unsigned long size); | 267 | unsigned long size); |
264 | unsigned long find_next_zero_bit(const unsigned long *addr, | 268 | unsigned long find_next_zero_bit(const unsigned long *addr, |
265 | unsigned long size, | 269 | unsigned long size, |
266 | unsigned long offset); | 270 | unsigned long offset); |
267 | unsigned long find_first_bit(const unsigned long *addr, | 271 | unsigned long find_first_bit(const unsigned long *addr, |
268 | unsigned long size); | 272 | unsigned long size); |
269 | unsigned long find_next_bit(const unsigned long *addr, | 273 | unsigned long find_next_bit(const unsigned long *addr, |
270 | unsigned long size, | 274 | unsigned long size, |
271 | unsigned long offset); | 275 | unsigned long offset); |
272 | 276 | ||
273 | /* | 277 | /* |
274 | * ffs: find first bit set. This is defined the same way as | 278 | * ffs: find first bit set. This is defined the same way as |
275 | * the libc and compiler builtin ffs routines, therefore | 279 | * the libc and compiler builtin ffs routines, therefore |
276 | * differs in spirit from the above ffz (man ffs). | 280 | * differs in spirit from the above ffz (man ffs). |
277 | * | 281 | * |
278 | * The difference is that bit numbering starts at 1, and if no bit is set, | 282 | * The difference is that bit numbering starts at 1, and if no bit is set, |
279 | * the function returns 0. | 283 | * the function returns 0. |
280 | */ | 284 | */ |
281 | static inline int ffs(unsigned long word) | 285 | static inline int ffs(unsigned long word) |
282 | { | 286 | { |
283 | if(word == 0) | 287 | if(word == 0) |
284 | return 0; | 288 | return 0; |
285 | return __ffs(word) + 1; | 289 | return __ffs(word) + 1; |
286 | } | 290 | } |
287 | 291 | ||
288 | #include <asm-generic/bitops/fls64.h> | 292 | #include <asm-generic/bitops/fls64.h> |
289 | #include <asm-generic/bitops/sched.h> | 293 | #include <asm-generic/bitops/sched.h> |
290 | #include <asm-generic/bitops/hweight.h> | 294 | #include <asm-generic/bitops/hweight.h> |
291 | #include <asm-generic/bitops/lock.h> | 295 | #include <asm-generic/bitops/lock.h> |
292 | 296 | ||
293 | #include <asm-generic/bitops/ext2-non-atomic.h> | 297 | #include <asm-generic/bitops/ext2-non-atomic.h> |
294 | #include <asm-generic/bitops/ext2-atomic.h> | 298 | #include <asm-generic/bitops/ext2-atomic.h> |
295 | #include <asm-generic/bitops/minix-le.h> | 299 | #include <asm-generic/bitops/minix-le.h> |
296 | 300 | ||
297 | #endif /* __ASM_AVR32_BITOPS_H */ | 301 | #endif /* __ASM_AVR32_BITOPS_H */ |
298 | 302 |
include/asm-blackfin/bitops.h
1 | #ifndef _BLACKFIN_BITOPS_H | 1 | #ifndef _BLACKFIN_BITOPS_H |
2 | #define _BLACKFIN_BITOPS_H | 2 | #define _BLACKFIN_BITOPS_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Copyright 1992, Linus Torvalds. | 5 | * Copyright 1992, Linus Torvalds. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/compiler.h> | 8 | #include <linux/compiler.h> |
9 | #include <asm/byteorder.h> /* swab32 */ | 9 | #include <asm/byteorder.h> /* swab32 */ |
10 | #include <asm/system.h> /* save_flags */ | 10 | #include <asm/system.h> /* save_flags */ |
11 | 11 | ||
12 | #ifdef __KERNEL__ | 12 | #ifdef __KERNEL__ |
13 | 13 | ||
14 | #ifndef _LINUX_BITOPS_H | ||
15 | #error only <linux/bitops.h> can be included directly | ||
16 | #endif | ||
17 | |||
14 | #include <asm-generic/bitops/ffs.h> | 18 | #include <asm-generic/bitops/ffs.h> |
15 | #include <asm-generic/bitops/__ffs.h> | 19 | #include <asm-generic/bitops/__ffs.h> |
16 | #include <asm-generic/bitops/sched.h> | 20 | #include <asm-generic/bitops/sched.h> |
17 | #include <asm-generic/bitops/ffz.h> | 21 | #include <asm-generic/bitops/ffz.h> |
18 | 22 | ||
19 | static __inline__ void set_bit(int nr, volatile unsigned long *addr) | 23 | static __inline__ void set_bit(int nr, volatile unsigned long *addr) |
20 | { | 24 | { |
21 | int *a = (int *)addr; | 25 | int *a = (int *)addr; |
22 | int mask; | 26 | int mask; |
23 | unsigned long flags; | 27 | unsigned long flags; |
24 | 28 | ||
25 | a += nr >> 5; | 29 | a += nr >> 5; |
26 | mask = 1 << (nr & 0x1f); | 30 | mask = 1 << (nr & 0x1f); |
27 | local_irq_save(flags); | 31 | local_irq_save(flags); |
28 | *a |= mask; | 32 | *a |= mask; |
29 | local_irq_restore(flags); | 33 | local_irq_restore(flags); |
30 | } | 34 | } |
31 | 35 | ||
32 | static __inline__ void __set_bit(int nr, volatile unsigned long *addr) | 36 | static __inline__ void __set_bit(int nr, volatile unsigned long *addr) |
33 | { | 37 | { |
34 | int *a = (int *)addr; | 38 | int *a = (int *)addr; |
35 | int mask; | 39 | int mask; |
36 | 40 | ||
37 | a += nr >> 5; | 41 | a += nr >> 5; |
38 | mask = 1 << (nr & 0x1f); | 42 | mask = 1 << (nr & 0x1f); |
39 | *a |= mask; | 43 | *a |= mask; |
40 | } | 44 | } |
41 | 45 | ||
42 | /* | 46 | /* |
43 | * clear_bit() doesn't provide any barrier for the compiler. | 47 | * clear_bit() doesn't provide any barrier for the compiler. |
44 | */ | 48 | */ |
45 | #define smp_mb__before_clear_bit() barrier() | 49 | #define smp_mb__before_clear_bit() barrier() |
46 | #define smp_mb__after_clear_bit() barrier() | 50 | #define smp_mb__after_clear_bit() barrier() |
47 | 51 | ||
48 | static __inline__ void clear_bit(int nr, volatile unsigned long *addr) | 52 | static __inline__ void clear_bit(int nr, volatile unsigned long *addr) |
49 | { | 53 | { |
50 | int *a = (int *)addr; | 54 | int *a = (int *)addr; |
51 | int mask; | 55 | int mask; |
52 | unsigned long flags; | 56 | unsigned long flags; |
53 | a += nr >> 5; | 57 | a += nr >> 5; |
54 | mask = 1 << (nr & 0x1f); | 58 | mask = 1 << (nr & 0x1f); |
55 | local_irq_save(flags); | 59 | local_irq_save(flags); |
56 | *a &= ~mask; | 60 | *a &= ~mask; |
57 | local_irq_restore(flags); | 61 | local_irq_restore(flags); |
58 | } | 62 | } |
59 | 63 | ||
60 | static __inline__ void __clear_bit(int nr, volatile unsigned long *addr) | 64 | static __inline__ void __clear_bit(int nr, volatile unsigned long *addr) |
61 | { | 65 | { |
62 | int *a = (int *)addr; | 66 | int *a = (int *)addr; |
63 | int mask; | 67 | int mask; |
64 | 68 | ||
65 | a += nr >> 5; | 69 | a += nr >> 5; |
66 | mask = 1 << (nr & 0x1f); | 70 | mask = 1 << (nr & 0x1f); |
67 | *a &= ~mask; | 71 | *a &= ~mask; |
68 | } | 72 | } |
69 | 73 | ||
70 | static __inline__ void change_bit(int nr, volatile unsigned long *addr) | 74 | static __inline__ void change_bit(int nr, volatile unsigned long *addr) |
71 | { | 75 | { |
72 | int mask, flags; | 76 | int mask, flags; |
73 | unsigned long *ADDR = (unsigned long *)addr; | 77 | unsigned long *ADDR = (unsigned long *)addr; |
74 | 78 | ||
75 | ADDR += nr >> 5; | 79 | ADDR += nr >> 5; |
76 | mask = 1 << (nr & 31); | 80 | mask = 1 << (nr & 31); |
77 | local_irq_save(flags); | 81 | local_irq_save(flags); |
78 | *ADDR ^= mask; | 82 | *ADDR ^= mask; |
79 | local_irq_restore(flags); | 83 | local_irq_restore(flags); |
80 | } | 84 | } |
81 | 85 | ||
82 | static __inline__ void __change_bit(int nr, volatile unsigned long *addr) | 86 | static __inline__ void __change_bit(int nr, volatile unsigned long *addr) |
83 | { | 87 | { |
84 | int mask; | 88 | int mask; |
85 | unsigned long *ADDR = (unsigned long *)addr; | 89 | unsigned long *ADDR = (unsigned long *)addr; |
86 | 90 | ||
87 | ADDR += nr >> 5; | 91 | ADDR += nr >> 5; |
88 | mask = 1 << (nr & 31); | 92 | mask = 1 << (nr & 31); |
89 | *ADDR ^= mask; | 93 | *ADDR ^= mask; |
90 | } | 94 | } |
91 | 95 | ||
92 | static __inline__ int test_and_set_bit(int nr, void *addr) | 96 | static __inline__ int test_and_set_bit(int nr, void *addr) |
93 | { | 97 | { |
94 | int mask, retval; | 98 | int mask, retval; |
95 | volatile unsigned int *a = (volatile unsigned int *)addr; | 99 | volatile unsigned int *a = (volatile unsigned int *)addr; |
96 | unsigned long flags; | 100 | unsigned long flags; |
97 | 101 | ||
98 | a += nr >> 5; | 102 | a += nr >> 5; |
99 | mask = 1 << (nr & 0x1f); | 103 | mask = 1 << (nr & 0x1f); |
100 | local_irq_save(flags); | 104 | local_irq_save(flags); |
101 | retval = (mask & *a) != 0; | 105 | retval = (mask & *a) != 0; |
102 | *a |= mask; | 106 | *a |= mask; |
103 | local_irq_restore(flags); | 107 | local_irq_restore(flags); |
104 | 108 | ||
105 | return retval; | 109 | return retval; |
106 | } | 110 | } |
107 | 111 | ||
108 | static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr) | 112 | static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr) |
109 | { | 113 | { |
110 | int mask, retval; | 114 | int mask, retval; |
111 | volatile unsigned int *a = (volatile unsigned int *)addr; | 115 | volatile unsigned int *a = (volatile unsigned int *)addr; |
112 | 116 | ||
113 | a += nr >> 5; | 117 | a += nr >> 5; |
114 | mask = 1 << (nr & 0x1f); | 118 | mask = 1 << (nr & 0x1f); |
115 | retval = (mask & *a) != 0; | 119 | retval = (mask & *a) != 0; |
116 | *a |= mask; | 120 | *a |= mask; |
117 | return retval; | 121 | return retval; |
118 | } | 122 | } |
119 | 123 | ||
120 | static __inline__ int test_and_clear_bit(int nr, volatile unsigned long *addr) | 124 | static __inline__ int test_and_clear_bit(int nr, volatile unsigned long *addr) |
121 | { | 125 | { |
122 | int mask, retval; | 126 | int mask, retval; |
123 | volatile unsigned int *a = (volatile unsigned int *)addr; | 127 | volatile unsigned int *a = (volatile unsigned int *)addr; |
124 | unsigned long flags; | 128 | unsigned long flags; |
125 | 129 | ||
126 | a += nr >> 5; | 130 | a += nr >> 5; |
127 | mask = 1 << (nr & 0x1f); | 131 | mask = 1 << (nr & 0x1f); |
128 | local_irq_save(flags); | 132 | local_irq_save(flags); |
129 | retval = (mask & *a) != 0; | 133 | retval = (mask & *a) != 0; |
130 | *a &= ~mask; | 134 | *a &= ~mask; |
131 | local_irq_restore(flags); | 135 | local_irq_restore(flags); |
132 | 136 | ||
133 | return retval; | 137 | return retval; |
134 | } | 138 | } |
135 | 139 | ||
136 | static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr) | 140 | static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr) |
137 | { | 141 | { |
138 | int mask, retval; | 142 | int mask, retval; |
139 | volatile unsigned int *a = (volatile unsigned int *)addr; | 143 | volatile unsigned int *a = (volatile unsigned int *)addr; |
140 | 144 | ||
141 | a += nr >> 5; | 145 | a += nr >> 5; |
142 | mask = 1 << (nr & 0x1f); | 146 | mask = 1 << (nr & 0x1f); |
143 | retval = (mask & *a) != 0; | 147 | retval = (mask & *a) != 0; |
144 | *a &= ~mask; | 148 | *a &= ~mask; |
145 | return retval; | 149 | return retval; |
146 | } | 150 | } |
147 | 151 | ||
148 | static __inline__ int test_and_change_bit(int nr, volatile unsigned long *addr) | 152 | static __inline__ int test_and_change_bit(int nr, volatile unsigned long *addr) |
149 | { | 153 | { |
150 | int mask, retval; | 154 | int mask, retval; |
151 | volatile unsigned int *a = (volatile unsigned int *)addr; | 155 | volatile unsigned int *a = (volatile unsigned int *)addr; |
152 | unsigned long flags; | 156 | unsigned long flags; |
153 | 157 | ||
154 | a += nr >> 5; | 158 | a += nr >> 5; |
155 | mask = 1 << (nr & 0x1f); | 159 | mask = 1 << (nr & 0x1f); |
156 | local_irq_save(flags); | 160 | local_irq_save(flags); |
157 | retval = (mask & *a) != 0; | 161 | retval = (mask & *a) != 0; |
158 | *a ^= mask; | 162 | *a ^= mask; |
159 | local_irq_restore(flags); | 163 | local_irq_restore(flags); |
160 | return retval; | 164 | return retval; |
161 | } | 165 | } |
162 | 166 | ||
163 | static __inline__ int __test_and_change_bit(int nr, | 167 | static __inline__ int __test_and_change_bit(int nr, |
164 | volatile unsigned long *addr) | 168 | volatile unsigned long *addr) |
165 | { | 169 | { |
166 | int mask, retval; | 170 | int mask, retval; |
167 | volatile unsigned int *a = (volatile unsigned int *)addr; | 171 | volatile unsigned int *a = (volatile unsigned int *)addr; |
168 | 172 | ||
169 | a += nr >> 5; | 173 | a += nr >> 5; |
170 | mask = 1 << (nr & 0x1f); | 174 | mask = 1 << (nr & 0x1f); |
171 | retval = (mask & *a) != 0; | 175 | retval = (mask & *a) != 0; |
172 | *a ^= mask; | 176 | *a ^= mask; |
173 | return retval; | 177 | return retval; |
174 | } | 178 | } |
175 | 179 | ||
176 | /* | 180 | /* |
177 | * This routine doesn't need to be atomic. | 181 | * This routine doesn't need to be atomic. |
178 | */ | 182 | */ |
179 | static __inline__ int __constant_test_bit(int nr, const void *addr) | 183 | static __inline__ int __constant_test_bit(int nr, const void *addr) |
180 | { | 184 | { |
181 | return ((1UL << (nr & 31)) & | 185 | return ((1UL << (nr & 31)) & |
182 | (((const volatile unsigned int *)addr)[nr >> 5])) != 0; | 186 | (((const volatile unsigned int *)addr)[nr >> 5])) != 0; |
183 | } | 187 | } |
184 | 188 | ||
185 | static __inline__ int __test_bit(int nr, const void *addr) | 189 | static __inline__ int __test_bit(int nr, const void *addr) |
186 | { | 190 | { |
187 | int *a = (int *)addr; | 191 | int *a = (int *)addr; |
188 | int mask; | 192 | int mask; |
189 | 193 | ||
190 | a += nr >> 5; | 194 | a += nr >> 5; |
191 | mask = 1 << (nr & 0x1f); | 195 | mask = 1 << (nr & 0x1f); |
192 | return ((mask & *a) != 0); | 196 | return ((mask & *a) != 0); |
193 | } | 197 | } |
194 | 198 | ||
195 | #define test_bit(nr,addr) \ | 199 | #define test_bit(nr,addr) \ |
196 | (__builtin_constant_p(nr) ? \ | 200 | (__builtin_constant_p(nr) ? \ |
197 | __constant_test_bit((nr),(addr)) : \ | 201 | __constant_test_bit((nr),(addr)) : \ |
198 | __test_bit((nr),(addr))) | 202 | __test_bit((nr),(addr))) |
199 | 203 | ||
200 | #include <asm-generic/bitops/find.h> | 204 | #include <asm-generic/bitops/find.h> |
201 | #include <asm-generic/bitops/hweight.h> | 205 | #include <asm-generic/bitops/hweight.h> |
202 | #include <asm-generic/bitops/lock.h> | 206 | #include <asm-generic/bitops/lock.h> |
203 | 207 | ||
204 | #include <asm-generic/bitops/ext2-atomic.h> | 208 | #include <asm-generic/bitops/ext2-atomic.h> |
205 | #include <asm-generic/bitops/ext2-non-atomic.h> | 209 | #include <asm-generic/bitops/ext2-non-atomic.h> |
206 | 210 | ||
207 | #include <asm-generic/bitops/minix.h> | 211 | #include <asm-generic/bitops/minix.h> |
208 | 212 | ||
209 | #endif /* __KERNEL__ */ | 213 | #endif /* __KERNEL__ */ |
210 | 214 | ||
211 | #include <asm-generic/bitops/fls.h> | 215 | #include <asm-generic/bitops/fls.h> |
212 | #include <asm-generic/bitops/fls64.h> | 216 | #include <asm-generic/bitops/fls64.h> |
213 | 217 | ||
214 | #endif /* _BLACKFIN_BITOPS_H */ | 218 | #endif /* _BLACKFIN_BITOPS_H */ |
215 | 219 |
include/asm-cris/bitops.h
1 | /* asm/bitops.h for Linux/CRIS | 1 | /* asm/bitops.h for Linux/CRIS |
2 | * | 2 | * |
3 | * TODO: asm versions if speed is needed | 3 | * TODO: asm versions if speed is needed |
4 | * | 4 | * |
5 | * All bit operations return 0 if the bit was cleared before the | 5 | * All bit operations return 0 if the bit was cleared before the |
6 | * operation and != 0 if it was not. | 6 | * operation and != 0 if it was not. |
7 | * | 7 | * |
8 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | 8 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef _CRIS_BITOPS_H | 11 | #ifndef _CRIS_BITOPS_H |
12 | #define _CRIS_BITOPS_H | 12 | #define _CRIS_BITOPS_H |
13 | 13 | ||
14 | /* Currently this is unsuitable for consumption outside the kernel. */ | 14 | /* Currently this is unsuitable for consumption outside the kernel. */ |
15 | #ifdef __KERNEL__ | 15 | #ifdef __KERNEL__ |
16 | 16 | ||
17 | #ifndef _LINUX_BITOPS_H | ||
18 | #error only <linux/bitops.h> can be included directly | ||
19 | #endif | ||
20 | |||
17 | #include <asm/arch/bitops.h> | 21 | #include <asm/arch/bitops.h> |
18 | #include <asm/system.h> | 22 | #include <asm/system.h> |
19 | #include <asm/atomic.h> | 23 | #include <asm/atomic.h> |
20 | #include <linux/compiler.h> | 24 | #include <linux/compiler.h> |
21 | 25 | ||
22 | /* | 26 | /* |
23 | * Some hacks to defeat gcc over-optimizations.. | 27 | * Some hacks to defeat gcc over-optimizations.. |
24 | */ | 28 | */ |
25 | struct __dummy { unsigned long a[100]; }; | 29 | struct __dummy { unsigned long a[100]; }; |
26 | #define ADDR (*(struct __dummy *) addr) | 30 | #define ADDR (*(struct __dummy *) addr) |
27 | #define CONST_ADDR (*(const struct __dummy *) addr) | 31 | #define CONST_ADDR (*(const struct __dummy *) addr) |
28 | 32 | ||
29 | /* | 33 | /* |
30 | * set_bit - Atomically set a bit in memory | 34 | * set_bit - Atomically set a bit in memory |
31 | * @nr: the bit to set | 35 | * @nr: the bit to set |
32 | * @addr: the address to start counting from | 36 | * @addr: the address to start counting from |
33 | * | 37 | * |
34 | * This function is atomic and may not be reordered. See __set_bit() | 38 | * This function is atomic and may not be reordered. See __set_bit() |
35 | * if you do not require the atomic guarantees. | 39 | * if you do not require the atomic guarantees. |
36 | * Note that @nr may be almost arbitrarily large; this function is not | 40 | * Note that @nr may be almost arbitrarily large; this function is not |
37 | * restricted to acting on a single-word quantity. | 41 | * restricted to acting on a single-word quantity. |
38 | */ | 42 | */ |
39 | 43 | ||
40 | #define set_bit(nr, addr) (void)test_and_set_bit(nr, addr) | 44 | #define set_bit(nr, addr) (void)test_and_set_bit(nr, addr) |
41 | 45 | ||
42 | /* | 46 | /* |
43 | * clear_bit - Clears a bit in memory | 47 | * clear_bit - Clears a bit in memory |
44 | * @nr: Bit to clear | 48 | * @nr: Bit to clear |
45 | * @addr: Address to start counting from | 49 | * @addr: Address to start counting from |
46 | * | 50 | * |
47 | * clear_bit() is atomic and may not be reordered. However, it does | 51 | * clear_bit() is atomic and may not be reordered. However, it does |
48 | * not contain a memory barrier, so if it is used for locking purposes, | 52 | * not contain a memory barrier, so if it is used for locking purposes, |
49 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | 53 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
50 | * in order to ensure changes are visible on other processors. | 54 | * in order to ensure changes are visible on other processors. |
51 | */ | 55 | */ |
52 | 56 | ||
53 | #define clear_bit(nr, addr) (void)test_and_clear_bit(nr, addr) | 57 | #define clear_bit(nr, addr) (void)test_and_clear_bit(nr, addr) |
54 | 58 | ||
55 | /* | 59 | /* |
56 | * change_bit - Toggle a bit in memory | 60 | * change_bit - Toggle a bit in memory |
57 | * @nr: Bit to change | 61 | * @nr: Bit to change |
58 | * @addr: Address to start counting from | 62 | * @addr: Address to start counting from |
59 | * | 63 | * |
60 | * change_bit() is atomic and may not be reordered. | 64 | * change_bit() is atomic and may not be reordered. |
61 | * Note that @nr may be almost arbitrarily large; this function is not | 65 | * Note that @nr may be almost arbitrarily large; this function is not |
62 | * restricted to acting on a single-word quantity. | 66 | * restricted to acting on a single-word quantity. |
63 | */ | 67 | */ |
64 | 68 | ||
65 | #define change_bit(nr, addr) (void)test_and_change_bit(nr, addr) | 69 | #define change_bit(nr, addr) (void)test_and_change_bit(nr, addr) |
66 | 70 | ||
67 | /** | 71 | /** |
68 | * test_and_set_bit - Set a bit and return its old value | 72 | * test_and_set_bit - Set a bit and return its old value |
69 | * @nr: Bit to set | 73 | * @nr: Bit to set |
70 | * @addr: Address to count from | 74 | * @addr: Address to count from |
71 | * | 75 | * |
72 | * This operation is atomic and cannot be reordered. | 76 | * This operation is atomic and cannot be reordered. |
73 | * It also implies a memory barrier. | 77 | * It also implies a memory barrier. |
74 | */ | 78 | */ |
75 | 79 | ||
76 | static inline int test_and_set_bit(int nr, volatile unsigned long *addr) | 80 | static inline int test_and_set_bit(int nr, volatile unsigned long *addr) |
77 | { | 81 | { |
78 | unsigned int mask, retval; | 82 | unsigned int mask, retval; |
79 | unsigned long flags; | 83 | unsigned long flags; |
80 | unsigned int *adr = (unsigned int *)addr; | 84 | unsigned int *adr = (unsigned int *)addr; |
81 | 85 | ||
82 | adr += nr >> 5; | 86 | adr += nr >> 5; |
83 | mask = 1 << (nr & 0x1f); | 87 | mask = 1 << (nr & 0x1f); |
84 | cris_atomic_save(addr, flags); | 88 | cris_atomic_save(addr, flags); |
85 | retval = (mask & *adr) != 0; | 89 | retval = (mask & *adr) != 0; |
86 | *adr |= mask; | 90 | *adr |= mask; |
87 | cris_atomic_restore(addr, flags); | 91 | cris_atomic_restore(addr, flags); |
88 | return retval; | 92 | return retval; |
89 | } | 93 | } |
90 | 94 | ||
91 | /* | 95 | /* |
92 | * clear_bit() doesn't provide any barrier for the compiler. | 96 | * clear_bit() doesn't provide any barrier for the compiler. |
93 | */ | 97 | */ |
94 | #define smp_mb__before_clear_bit() barrier() | 98 | #define smp_mb__before_clear_bit() barrier() |
95 | #define smp_mb__after_clear_bit() barrier() | 99 | #define smp_mb__after_clear_bit() barrier() |
96 | 100 | ||
97 | /** | 101 | /** |
98 | * test_and_clear_bit - Clear a bit and return its old value | 102 | * test_and_clear_bit - Clear a bit and return its old value |
99 | * @nr: Bit to clear | 103 | * @nr: Bit to clear |
100 | * @addr: Address to count from | 104 | * @addr: Address to count from |
101 | * | 105 | * |
102 | * This operation is atomic and cannot be reordered. | 106 | * This operation is atomic and cannot be reordered. |
103 | * It also implies a memory barrier. | 107 | * It also implies a memory barrier. |
104 | */ | 108 | */ |
105 | 109 | ||
106 | static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) | 110 | static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) |
107 | { | 111 | { |
108 | unsigned int mask, retval; | 112 | unsigned int mask, retval; |
109 | unsigned long flags; | 113 | unsigned long flags; |
110 | unsigned int *adr = (unsigned int *)addr; | 114 | unsigned int *adr = (unsigned int *)addr; |
111 | 115 | ||
112 | adr += nr >> 5; | 116 | adr += nr >> 5; |
113 | mask = 1 << (nr & 0x1f); | 117 | mask = 1 << (nr & 0x1f); |
114 | cris_atomic_save(addr, flags); | 118 | cris_atomic_save(addr, flags); |
115 | retval = (mask & *adr) != 0; | 119 | retval = (mask & *adr) != 0; |
116 | *adr &= ~mask; | 120 | *adr &= ~mask; |
117 | cris_atomic_restore(addr, flags); | 121 | cris_atomic_restore(addr, flags); |
118 | return retval; | 122 | return retval; |
119 | } | 123 | } |
120 | 124 | ||
121 | /** | 125 | /** |
122 | * test_and_change_bit - Change a bit and return its old value | 126 | * test_and_change_bit - Change a bit and return its old value |
123 | * @nr: Bit to change | 127 | * @nr: Bit to change |
124 | * @addr: Address to count from | 128 | * @addr: Address to count from |
125 | * | 129 | * |
126 | * This operation is atomic and cannot be reordered. | 130 | * This operation is atomic and cannot be reordered. |
127 | * It also implies a memory barrier. | 131 | * It also implies a memory barrier. |
128 | */ | 132 | */ |
129 | 133 | ||
130 | static inline int test_and_change_bit(int nr, volatile unsigned long *addr) | 134 | static inline int test_and_change_bit(int nr, volatile unsigned long *addr) |
131 | { | 135 | { |
132 | unsigned int mask, retval; | 136 | unsigned int mask, retval; |
133 | unsigned long flags; | 137 | unsigned long flags; |
134 | unsigned int *adr = (unsigned int *)addr; | 138 | unsigned int *adr = (unsigned int *)addr; |
135 | adr += nr >> 5; | 139 | adr += nr >> 5; |
136 | mask = 1 << (nr & 0x1f); | 140 | mask = 1 << (nr & 0x1f); |
137 | cris_atomic_save(addr, flags); | 141 | cris_atomic_save(addr, flags); |
138 | retval = (mask & *adr) != 0; | 142 | retval = (mask & *adr) != 0; |
139 | *adr ^= mask; | 143 | *adr ^= mask; |
140 | cris_atomic_restore(addr, flags); | 144 | cris_atomic_restore(addr, flags); |
141 | return retval; | 145 | return retval; |
142 | } | 146 | } |
143 | 147 | ||
144 | #include <asm-generic/bitops/non-atomic.h> | 148 | #include <asm-generic/bitops/non-atomic.h> |
145 | 149 | ||
146 | /* | 150 | /* |
147 | * Since we define it "external", it collides with the built-in | 151 | * Since we define it "external", it collides with the built-in |
148 | * definition, which doesn't have the same semantics. We don't want to | 152 | * definition, which doesn't have the same semantics. We don't want to |
149 | * use -fno-builtin, so just hide the name ffs. | 153 | * use -fno-builtin, so just hide the name ffs. |
150 | */ | 154 | */ |
151 | #define ffs kernel_ffs | 155 | #define ffs kernel_ffs |
152 | 156 | ||
153 | #include <asm-generic/bitops/fls.h> | 157 | #include <asm-generic/bitops/fls.h> |
154 | #include <asm-generic/bitops/fls64.h> | 158 | #include <asm-generic/bitops/fls64.h> |
155 | #include <asm-generic/bitops/hweight.h> | 159 | #include <asm-generic/bitops/hweight.h> |
156 | #include <asm-generic/bitops/find.h> | 160 | #include <asm-generic/bitops/find.h> |
157 | #include <asm-generic/bitops/lock.h> | 161 | #include <asm-generic/bitops/lock.h> |
158 | 162 | ||
159 | #include <asm-generic/bitops/ext2-non-atomic.h> | 163 | #include <asm-generic/bitops/ext2-non-atomic.h> |
160 | 164 | ||
161 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) | 165 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) |
162 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) | 166 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) |
163 | 167 | ||
164 | #include <asm-generic/bitops/minix.h> | 168 | #include <asm-generic/bitops/minix.h> |
165 | #include <asm-generic/bitops/sched.h> | 169 | #include <asm-generic/bitops/sched.h> |
166 | 170 | ||
167 | #endif /* __KERNEL__ */ | 171 | #endif /* __KERNEL__ */ |
168 | 172 | ||
169 | #endif /* _CRIS_BITOPS_H */ | 173 | #endif /* _CRIS_BITOPS_H */ |
170 | 174 |
include/asm-frv/bitops.h
1 | /* bitops.h: bit operations for the Fujitsu FR-V CPUs | 1 | /* bitops.h: bit operations for the Fujitsu FR-V CPUs |
2 | * | 2 | * |
3 | * For an explanation of how atomic ops work in this arch, see: | 3 | * For an explanation of how atomic ops work in this arch, see: |
4 | * Documentation/fujitsu/frv/atomic-ops.txt | 4 | * Documentation/fujitsu/frv/atomic-ops.txt |
5 | * | 5 | * |
6 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. | 6 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. |
7 | * Written by David Howells (dhowells@redhat.com) | 7 | * Written by David Howells (dhowells@redhat.com) |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public License | 10 | * modify it under the terms of the GNU General Public License |
11 | * as published by the Free Software Foundation; either version | 11 | * as published by the Free Software Foundation; either version |
12 | * 2 of the License, or (at your option) any later version. | 12 | * 2 of the License, or (at your option) any later version. |
13 | */ | 13 | */ |
14 | #ifndef _ASM_BITOPS_H | 14 | #ifndef _ASM_BITOPS_H |
15 | #define _ASM_BITOPS_H | 15 | #define _ASM_BITOPS_H |
16 | 16 | ||
17 | #include <linux/compiler.h> | 17 | #include <linux/compiler.h> |
18 | #include <asm/byteorder.h> | 18 | #include <asm/byteorder.h> |
19 | #include <asm/system.h> | 19 | #include <asm/system.h> |
20 | #include <asm/atomic.h> | 20 | #include <asm/atomic.h> |
21 | 21 | ||
22 | #ifdef __KERNEL__ | 22 | #ifdef __KERNEL__ |
23 | 23 | ||
24 | #ifndef _LINUX_BITOPS_H | ||
25 | #error only <linux/bitops.h> can be included directly | ||
26 | #endif | ||
27 | |||
24 | #include <asm-generic/bitops/ffz.h> | 28 | #include <asm-generic/bitops/ffz.h> |
25 | 29 | ||
26 | /* | 30 | /* |
27 | * clear_bit() doesn't provide any barrier for the compiler. | 31 | * clear_bit() doesn't provide any barrier for the compiler. |
28 | */ | 32 | */ |
29 | #define smp_mb__before_clear_bit() barrier() | 33 | #define smp_mb__before_clear_bit() barrier() |
30 | #define smp_mb__after_clear_bit() barrier() | 34 | #define smp_mb__after_clear_bit() barrier() |
31 | 35 | ||
32 | static inline int test_and_clear_bit(int nr, volatile void *addr) | 36 | static inline int test_and_clear_bit(int nr, volatile void *addr) |
33 | { | 37 | { |
34 | volatile unsigned long *ptr = addr; | 38 | volatile unsigned long *ptr = addr; |
35 | unsigned long mask = 1UL << (nr & 31); | 39 | unsigned long mask = 1UL << (nr & 31); |
36 | ptr += nr >> 5; | 40 | ptr += nr >> 5; |
37 | return (atomic_test_and_ANDNOT_mask(mask, ptr) & mask) != 0; | 41 | return (atomic_test_and_ANDNOT_mask(mask, ptr) & mask) != 0; |
38 | } | 42 | } |
39 | 43 | ||
40 | static inline int test_and_set_bit(int nr, volatile void *addr) | 44 | static inline int test_and_set_bit(int nr, volatile void *addr) |
41 | { | 45 | { |
42 | volatile unsigned long *ptr = addr; | 46 | volatile unsigned long *ptr = addr; |
43 | unsigned long mask = 1UL << (nr & 31); | 47 | unsigned long mask = 1UL << (nr & 31); |
44 | ptr += nr >> 5; | 48 | ptr += nr >> 5; |
45 | return (atomic_test_and_OR_mask(mask, ptr) & mask) != 0; | 49 | return (atomic_test_and_OR_mask(mask, ptr) & mask) != 0; |
46 | } | 50 | } |
47 | 51 | ||
48 | static inline int test_and_change_bit(int nr, volatile void *addr) | 52 | static inline int test_and_change_bit(int nr, volatile void *addr) |
49 | { | 53 | { |
50 | volatile unsigned long *ptr = addr; | 54 | volatile unsigned long *ptr = addr; |
51 | unsigned long mask = 1UL << (nr & 31); | 55 | unsigned long mask = 1UL << (nr & 31); |
52 | ptr += nr >> 5; | 56 | ptr += nr >> 5; |
53 | return (atomic_test_and_XOR_mask(mask, ptr) & mask) != 0; | 57 | return (atomic_test_and_XOR_mask(mask, ptr) & mask) != 0; |
54 | } | 58 | } |
55 | 59 | ||
56 | static inline void clear_bit(int nr, volatile void *addr) | 60 | static inline void clear_bit(int nr, volatile void *addr) |
57 | { | 61 | { |
58 | test_and_clear_bit(nr, addr); | 62 | test_and_clear_bit(nr, addr); |
59 | } | 63 | } |
60 | 64 | ||
61 | static inline void set_bit(int nr, volatile void *addr) | 65 | static inline void set_bit(int nr, volatile void *addr) |
62 | { | 66 | { |
63 | test_and_set_bit(nr, addr); | 67 | test_and_set_bit(nr, addr); |
64 | } | 68 | } |
65 | 69 | ||
66 | static inline void change_bit(int nr, volatile void * addr) | 70 | static inline void change_bit(int nr, volatile void * addr) |
67 | { | 71 | { |
68 | test_and_change_bit(nr, addr); | 72 | test_and_change_bit(nr, addr); |
69 | } | 73 | } |
70 | 74 | ||
71 | static inline void __clear_bit(int nr, volatile void * addr) | 75 | static inline void __clear_bit(int nr, volatile void * addr) |
72 | { | 76 | { |
73 | volatile unsigned long *a = addr; | 77 | volatile unsigned long *a = addr; |
74 | int mask; | 78 | int mask; |
75 | 79 | ||
76 | a += nr >> 5; | 80 | a += nr >> 5; |
77 | mask = 1 << (nr & 31); | 81 | mask = 1 << (nr & 31); |
78 | *a &= ~mask; | 82 | *a &= ~mask; |
79 | } | 83 | } |
80 | 84 | ||
81 | static inline void __set_bit(int nr, volatile void * addr) | 85 | static inline void __set_bit(int nr, volatile void * addr) |
82 | { | 86 | { |
83 | volatile unsigned long *a = addr; | 87 | volatile unsigned long *a = addr; |
84 | int mask; | 88 | int mask; |
85 | 89 | ||
86 | a += nr >> 5; | 90 | a += nr >> 5; |
87 | mask = 1 << (nr & 31); | 91 | mask = 1 << (nr & 31); |
88 | *a |= mask; | 92 | *a |= mask; |
89 | } | 93 | } |
90 | 94 | ||
91 | static inline void __change_bit(int nr, volatile void *addr) | 95 | static inline void __change_bit(int nr, volatile void *addr) |
92 | { | 96 | { |
93 | volatile unsigned long *a = addr; | 97 | volatile unsigned long *a = addr; |
94 | int mask; | 98 | int mask; |
95 | 99 | ||
96 | a += nr >> 5; | 100 | a += nr >> 5; |
97 | mask = 1 << (nr & 31); | 101 | mask = 1 << (nr & 31); |
98 | *a ^= mask; | 102 | *a ^= mask; |
99 | } | 103 | } |
100 | 104 | ||
101 | static inline int __test_and_clear_bit(int nr, volatile void * addr) | 105 | static inline int __test_and_clear_bit(int nr, volatile void * addr) |
102 | { | 106 | { |
103 | volatile unsigned long *a = addr; | 107 | volatile unsigned long *a = addr; |
104 | int mask, retval; | 108 | int mask, retval; |
105 | 109 | ||
106 | a += nr >> 5; | 110 | a += nr >> 5; |
107 | mask = 1 << (nr & 31); | 111 | mask = 1 << (nr & 31); |
108 | retval = (mask & *a) != 0; | 112 | retval = (mask & *a) != 0; |
109 | *a &= ~mask; | 113 | *a &= ~mask; |
110 | return retval; | 114 | return retval; |
111 | } | 115 | } |
112 | 116 | ||
113 | static inline int __test_and_set_bit(int nr, volatile void * addr) | 117 | static inline int __test_and_set_bit(int nr, volatile void * addr) |
114 | { | 118 | { |
115 | volatile unsigned long *a = addr; | 119 | volatile unsigned long *a = addr; |
116 | int mask, retval; | 120 | int mask, retval; |
117 | 121 | ||
118 | a += nr >> 5; | 122 | a += nr >> 5; |
119 | mask = 1 << (nr & 31); | 123 | mask = 1 << (nr & 31); |
120 | retval = (mask & *a) != 0; | 124 | retval = (mask & *a) != 0; |
121 | *a |= mask; | 125 | *a |= mask; |
122 | return retval; | 126 | return retval; |
123 | } | 127 | } |
124 | 128 | ||
125 | static inline int __test_and_change_bit(int nr, volatile void * addr) | 129 | static inline int __test_and_change_bit(int nr, volatile void * addr) |
126 | { | 130 | { |
127 | volatile unsigned long *a = addr; | 131 | volatile unsigned long *a = addr; |
128 | int mask, retval; | 132 | int mask, retval; |
129 | 133 | ||
130 | a += nr >> 5; | 134 | a += nr >> 5; |
131 | mask = 1 << (nr & 31); | 135 | mask = 1 << (nr & 31); |
132 | retval = (mask & *a) != 0; | 136 | retval = (mask & *a) != 0; |
133 | *a ^= mask; | 137 | *a ^= mask; |
134 | return retval; | 138 | return retval; |
135 | } | 139 | } |
136 | 140 | ||
137 | /* | 141 | /* |
138 | * This routine doesn't need to be atomic. | 142 | * This routine doesn't need to be atomic. |
139 | */ | 143 | */ |
140 | static inline int __constant_test_bit(int nr, const volatile void * addr) | 144 | static inline int __constant_test_bit(int nr, const volatile void * addr) |
141 | { | 145 | { |
142 | return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; | 146 | return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; |
143 | } | 147 | } |
144 | 148 | ||
145 | static inline int __test_bit(int nr, const volatile void * addr) | 149 | static inline int __test_bit(int nr, const volatile void * addr) |
146 | { | 150 | { |
147 | int * a = (int *) addr; | 151 | int * a = (int *) addr; |
148 | int mask; | 152 | int mask; |
149 | 153 | ||
150 | a += nr >> 5; | 154 | a += nr >> 5; |
151 | mask = 1 << (nr & 0x1f); | 155 | mask = 1 << (nr & 0x1f); |
152 | return ((mask & *a) != 0); | 156 | return ((mask & *a) != 0); |
153 | } | 157 | } |
154 | 158 | ||
155 | #define test_bit(nr,addr) \ | 159 | #define test_bit(nr,addr) \ |
156 | (__builtin_constant_p(nr) ? \ | 160 | (__builtin_constant_p(nr) ? \ |
157 | __constant_test_bit((nr),(addr)) : \ | 161 | __constant_test_bit((nr),(addr)) : \ |
158 | __test_bit((nr),(addr))) | 162 | __test_bit((nr),(addr))) |
159 | 163 | ||
160 | #include <asm-generic/bitops/find.h> | 164 | #include <asm-generic/bitops/find.h> |
161 | 165 | ||
162 | /** | 166 | /** |
163 | * fls - find last bit set | 167 | * fls - find last bit set |
164 | * @x: the word to search | 168 | * @x: the word to search |
165 | * | 169 | * |
166 | * This is defined the same way as ffs: | 170 | * This is defined the same way as ffs: |
167 | * - return 32..1 to indicate bit 31..0 most significant bit set | 171 | * - return 32..1 to indicate bit 31..0 most significant bit set |
168 | * - return 0 to indicate no bits set | 172 | * - return 0 to indicate no bits set |
169 | */ | 173 | */ |
170 | #define fls(x) \ | 174 | #define fls(x) \ |
171 | ({ \ | 175 | ({ \ |
172 | int bit; \ | 176 | int bit; \ |
173 | \ | 177 | \ |
174 | asm(" subcc %1,gr0,gr0,icc0 \n" \ | 178 | asm(" subcc %1,gr0,gr0,icc0 \n" \ |
175 | " ckne icc0,cc4 \n" \ | 179 | " ckne icc0,cc4 \n" \ |
176 | " cscan.p %1,gr0,%0 ,cc4,#1 \n" \ | 180 | " cscan.p %1,gr0,%0 ,cc4,#1 \n" \ |
177 | " csub %0,%0,%0 ,cc4,#0 \n" \ | 181 | " csub %0,%0,%0 ,cc4,#0 \n" \ |
178 | " csub %2,%0,%0 ,cc4,#1 \n" \ | 182 | " csub %2,%0,%0 ,cc4,#1 \n" \ |
179 | : "=&r"(bit) \ | 183 | : "=&r"(bit) \ |
180 | : "r"(x), "r"(32) \ | 184 | : "r"(x), "r"(32) \ |
181 | : "icc0", "cc4" \ | 185 | : "icc0", "cc4" \ |
182 | ); \ | 186 | ); \ |
183 | \ | 187 | \ |
184 | bit; \ | 188 | bit; \ |
185 | }) | 189 | }) |
186 | 190 | ||
187 | /** | 191 | /** |
188 | * fls64 - find last bit set in a 64-bit value | 192 | * fls64 - find last bit set in a 64-bit value |
189 | * @n: the value to search | 193 | * @n: the value to search |
190 | * | 194 | * |
191 | * This is defined the same way as ffs: | 195 | * This is defined the same way as ffs: |
192 | * - return 64..1 to indicate bit 63..0 most significant bit set | 196 | * - return 64..1 to indicate bit 63..0 most significant bit set |
193 | * - return 0 to indicate no bits set | 197 | * - return 0 to indicate no bits set |
194 | */ | 198 | */ |
195 | static inline __attribute__((const)) | 199 | static inline __attribute__((const)) |
196 | int fls64(u64 n) | 200 | int fls64(u64 n) |
197 | { | 201 | { |
198 | union { | 202 | union { |
199 | u64 ll; | 203 | u64 ll; |
200 | struct { u32 h, l; }; | 204 | struct { u32 h, l; }; |
201 | } _; | 205 | } _; |
202 | int bit, x, y; | 206 | int bit, x, y; |
203 | 207 | ||
204 | _.ll = n; | 208 | _.ll = n; |
205 | 209 | ||
206 | asm(" subcc.p %3,gr0,gr0,icc0 \n" | 210 | asm(" subcc.p %3,gr0,gr0,icc0 \n" |
207 | " subcc %4,gr0,gr0,icc1 \n" | 211 | " subcc %4,gr0,gr0,icc1 \n" |
208 | " ckne icc0,cc4 \n" | 212 | " ckne icc0,cc4 \n" |
209 | " ckne icc1,cc5 \n" | 213 | " ckne icc1,cc5 \n" |
210 | " norcr cc4,cc5,cc6 \n" | 214 | " norcr cc4,cc5,cc6 \n" |
211 | " csub.p %0,%0,%0 ,cc6,1 \n" | 215 | " csub.p %0,%0,%0 ,cc6,1 \n" |
212 | " orcr cc5,cc4,cc4 \n" | 216 | " orcr cc5,cc4,cc4 \n" |
213 | " andcr cc4,cc5,cc4 \n" | 217 | " andcr cc4,cc5,cc4 \n" |
214 | " cscan.p %3,gr0,%0 ,cc4,0 \n" | 218 | " cscan.p %3,gr0,%0 ,cc4,0 \n" |
215 | " setlos #64,%1 \n" | 219 | " setlos #64,%1 \n" |
216 | " cscan.p %4,gr0,%0 ,cc4,1 \n" | 220 | " cscan.p %4,gr0,%0 ,cc4,1 \n" |
217 | " setlos #32,%2 \n" | 221 | " setlos #32,%2 \n" |
218 | " csub.p %1,%0,%0 ,cc4,0 \n" | 222 | " csub.p %1,%0,%0 ,cc4,0 \n" |
219 | " csub %2,%0,%0 ,cc4,1 \n" | 223 | " csub %2,%0,%0 ,cc4,1 \n" |
220 | : "=&r"(bit), "=r"(x), "=r"(y) | 224 | : "=&r"(bit), "=r"(x), "=r"(y) |
221 | : "0r"(_.h), "r"(_.l) | 225 | : "0r"(_.h), "r"(_.l) |
222 | : "icc0", "icc1", "cc4", "cc5", "cc6" | 226 | : "icc0", "icc1", "cc4", "cc5", "cc6" |
223 | ); | 227 | ); |
224 | return bit; | 228 | return bit; |
225 | 229 | ||
226 | } | 230 | } |
227 | 231 | ||
228 | /** | 232 | /** |
229 | * ffs - find first bit set | 233 | * ffs - find first bit set |
230 | * @x: the word to search | 234 | * @x: the word to search |
231 | * | 235 | * |
232 | * - return 32..1 to indicate bit 31..0 most least significant bit set | 236 | * - return 32..1 to indicate bit 31..0 most least significant bit set |
233 | * - return 0 to indicate no bits set | 237 | * - return 0 to indicate no bits set |
234 | */ | 238 | */ |
235 | static inline __attribute__((const)) | 239 | static inline __attribute__((const)) |
236 | int ffs(int x) | 240 | int ffs(int x) |
237 | { | 241 | { |
238 | /* Note: (x & -x) gives us a mask that is the least significant | 242 | /* Note: (x & -x) gives us a mask that is the least significant |
239 | * (rightmost) 1-bit of the value in x. | 243 | * (rightmost) 1-bit of the value in x. |
240 | */ | 244 | */ |
241 | return fls(x & -x); | 245 | return fls(x & -x); |
242 | } | 246 | } |
243 | 247 | ||
244 | /** | 248 | /** |
245 | * __ffs - find first bit set | 249 | * __ffs - find first bit set |
246 | * @x: the word to search | 250 | * @x: the word to search |
247 | * | 251 | * |
248 | * - return 31..0 to indicate bit 31..0 most least significant bit set | 252 | * - return 31..0 to indicate bit 31..0 most least significant bit set |
249 | * - if no bits are set in x, the result is undefined | 253 | * - if no bits are set in x, the result is undefined |
250 | */ | 254 | */ |
251 | static inline __attribute__((const)) | 255 | static inline __attribute__((const)) |
252 | int __ffs(unsigned long x) | 256 | int __ffs(unsigned long x) |
253 | { | 257 | { |
254 | int bit; | 258 | int bit; |
255 | asm("scan %1,gr0,%0" : "=r"(bit) : "r"(x & -x)); | 259 | asm("scan %1,gr0,%0" : "=r"(bit) : "r"(x & -x)); |
256 | return 31 - bit; | 260 | return 31 - bit; |
257 | } | 261 | } |
258 | 262 | ||
259 | /* | 263 | /* |
260 | * special slimline version of fls() for calculating ilog2_u32() | 264 | * special slimline version of fls() for calculating ilog2_u32() |
261 | * - note: no protection against n == 0 | 265 | * - note: no protection against n == 0 |
262 | */ | 266 | */ |
263 | #define ARCH_HAS_ILOG2_U32 | 267 | #define ARCH_HAS_ILOG2_U32 |
264 | static inline __attribute__((const)) | 268 | static inline __attribute__((const)) |
265 | int __ilog2_u32(u32 n) | 269 | int __ilog2_u32(u32 n) |
266 | { | 270 | { |
267 | int bit; | 271 | int bit; |
268 | asm("scan %1,gr0,%0" : "=r"(bit) : "r"(n)); | 272 | asm("scan %1,gr0,%0" : "=r"(bit) : "r"(n)); |
269 | return 31 - bit; | 273 | return 31 - bit; |
270 | } | 274 | } |
271 | 275 | ||
272 | /* | 276 | /* |
273 | * special slimline version of fls64() for calculating ilog2_u64() | 277 | * special slimline version of fls64() for calculating ilog2_u64() |
274 | * - note: no protection against n == 0 | 278 | * - note: no protection against n == 0 |
275 | */ | 279 | */ |
276 | #define ARCH_HAS_ILOG2_U64 | 280 | #define ARCH_HAS_ILOG2_U64 |
277 | static inline __attribute__((const)) | 281 | static inline __attribute__((const)) |
278 | int __ilog2_u64(u64 n) | 282 | int __ilog2_u64(u64 n) |
279 | { | 283 | { |
280 | union { | 284 | union { |
281 | u64 ll; | 285 | u64 ll; |
282 | struct { u32 h, l; }; | 286 | struct { u32 h, l; }; |
283 | } _; | 287 | } _; |
284 | int bit, x, y; | 288 | int bit, x, y; |
285 | 289 | ||
286 | _.ll = n; | 290 | _.ll = n; |
287 | 291 | ||
288 | asm(" subcc %3,gr0,gr0,icc0 \n" | 292 | asm(" subcc %3,gr0,gr0,icc0 \n" |
289 | " ckeq icc0,cc4 \n" | 293 | " ckeq icc0,cc4 \n" |
290 | " cscan.p %3,gr0,%0 ,cc4,0 \n" | 294 | " cscan.p %3,gr0,%0 ,cc4,0 \n" |
291 | " setlos #63,%1 \n" | 295 | " setlos #63,%1 \n" |
292 | " cscan.p %4,gr0,%0 ,cc4,1 \n" | 296 | " cscan.p %4,gr0,%0 ,cc4,1 \n" |
293 | " setlos #31,%2 \n" | 297 | " setlos #31,%2 \n" |
294 | " csub.p %1,%0,%0 ,cc4,0 \n" | 298 | " csub.p %1,%0,%0 ,cc4,0 \n" |
295 | " csub %2,%0,%0 ,cc4,1 \n" | 299 | " csub %2,%0,%0 ,cc4,1 \n" |
296 | : "=&r"(bit), "=r"(x), "=r"(y) | 300 | : "=&r"(bit), "=r"(x), "=r"(y) |
297 | : "0r"(_.h), "r"(_.l) | 301 | : "0r"(_.h), "r"(_.l) |
298 | : "icc0", "cc4" | 302 | : "icc0", "cc4" |
299 | ); | 303 | ); |
300 | return bit; | 304 | return bit; |
301 | } | 305 | } |
302 | 306 | ||
303 | #include <asm-generic/bitops/sched.h> | 307 | #include <asm-generic/bitops/sched.h> |
304 | #include <asm-generic/bitops/hweight.h> | 308 | #include <asm-generic/bitops/hweight.h> |
305 | #include <asm-generic/bitops/lock.h> | 309 | #include <asm-generic/bitops/lock.h> |
306 | 310 | ||
307 | #include <asm-generic/bitops/ext2-non-atomic.h> | 311 | #include <asm-generic/bitops/ext2-non-atomic.h> |
308 | 312 | ||
309 | #define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit ((nr) ^ 0x18, (addr)) | 313 | #define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit ((nr) ^ 0x18, (addr)) |
310 | #define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr) ^ 0x18, (addr)) | 314 | #define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr) ^ 0x18, (addr)) |
311 | 315 | ||
312 | #include <asm-generic/bitops/minix-le.h> | 316 | #include <asm-generic/bitops/minix-le.h> |
313 | 317 | ||
314 | #endif /* __KERNEL__ */ | 318 | #endif /* __KERNEL__ */ |
315 | 319 | ||
316 | #endif /* _ASM_BITOPS_H */ | 320 | #endif /* _ASM_BITOPS_H */ |
317 | 321 |
include/asm-generic/bitops.h
1 | #ifndef _ASM_GENERIC_BITOPS_H_ | 1 | #ifndef _ASM_GENERIC_BITOPS_H_ |
2 | #define _ASM_GENERIC_BITOPS_H_ | 2 | #define _ASM_GENERIC_BITOPS_H_ |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * For the benefit of those who are trying to port Linux to another | 5 | * For the benefit of those who are trying to port Linux to another |
6 | * architecture, here are some C-language equivalents. You should | 6 | * architecture, here are some C-language equivalents. You should |
7 | * recode these in the native assembly language, if at all possible. | 7 | * recode these in the native assembly language, if at all possible. |
8 | * | 8 | * |
9 | * C language equivalents written by Theodore Ts'o, 9/26/92 | 9 | * C language equivalents written by Theodore Ts'o, 9/26/92 |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <asm-generic/bitops/atomic.h> | 12 | #include <asm-generic/bitops/atomic.h> |
13 | #include <asm-generic/bitops/non-atomic.h> | 13 | #include <asm-generic/bitops/non-atomic.h> |
14 | #include <asm-generic/bitops/__ffs.h> | 14 | #include <asm-generic/bitops/__ffs.h> |
15 | #include <asm-generic/bitops/ffz.h> | 15 | #include <asm-generic/bitops/ffz.h> |
16 | #include <asm-generic/bitops/fls.h> | 16 | #include <asm-generic/bitops/fls.h> |
17 | #include <asm-generic/bitops/fls64.h> | 17 | #include <asm-generic/bitops/fls64.h> |
18 | #include <asm-generic/bitops/find.h> | 18 | #include <asm-generic/bitops/find.h> |
19 | 19 | ||
20 | #ifdef __KERNEL__ | 20 | #ifdef __KERNEL__ |
21 | 21 | ||
22 | #ifndef _LINUX_BITOPS_H | ||
23 | #error only <linux/bitops.h> can be included directly | ||
24 | #endif | ||
25 | |||
22 | #include <asm-generic/bitops/sched.h> | 26 | #include <asm-generic/bitops/sched.h> |
23 | #include <asm-generic/bitops/ffs.h> | 27 | #include <asm-generic/bitops/ffs.h> |
24 | #include <asm-generic/bitops/hweight.h> | 28 | #include <asm-generic/bitops/hweight.h> |
25 | #include <asm-generic/bitops/lock.h> | 29 | #include <asm-generic/bitops/lock.h> |
26 | 30 | ||
27 | #include <asm-generic/bitops/ext2-non-atomic.h> | 31 | #include <asm-generic/bitops/ext2-non-atomic.h> |
28 | #include <asm-generic/bitops/ext2-atomic.h> | 32 | #include <asm-generic/bitops/ext2-atomic.h> |
29 | #include <asm-generic/bitops/minix.h> | 33 | #include <asm-generic/bitops/minix.h> |
30 | 34 | ||
31 | #endif /* __KERNEL__ */ | 35 | #endif /* __KERNEL__ */ |
32 | 36 | ||
33 | #endif /* _ASM_GENERIC_BITOPS_H */ | 37 | #endif /* _ASM_GENERIC_BITOPS_H */ |
34 | 38 |
include/asm-h8300/bitops.h
1 | #ifndef _H8300_BITOPS_H | 1 | #ifndef _H8300_BITOPS_H |
2 | #define _H8300_BITOPS_H | 2 | #define _H8300_BITOPS_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Copyright 1992, Linus Torvalds. | 5 | * Copyright 1992, Linus Torvalds. |
6 | * Copyright 2002, Yoshinori Sato | 6 | * Copyright 2002, Yoshinori Sato |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/compiler.h> | 9 | #include <linux/compiler.h> |
10 | #include <asm/system.h> | 10 | #include <asm/system.h> |
11 | 11 | ||
12 | #ifdef __KERNEL__ | 12 | #ifdef __KERNEL__ |
13 | |||
14 | #ifndef _LINUX_BITOPS_H | ||
15 | #error only <linux/bitops.h> can be included directly | ||
16 | #endif | ||
17 | |||
13 | /* | 18 | /* |
14 | * Function prototypes to keep gcc -Wall happy | 19 | * Function prototypes to keep gcc -Wall happy |
15 | */ | 20 | */ |
16 | 21 | ||
17 | /* | 22 | /* |
18 | * ffz = Find First Zero in word. Undefined if no zero exists, | 23 | * ffz = Find First Zero in word. Undefined if no zero exists, |
19 | * so code should check against ~0UL first.. | 24 | * so code should check against ~0UL first.. |
20 | */ | 25 | */ |
21 | static __inline__ unsigned long ffz(unsigned long word) | 26 | static __inline__ unsigned long ffz(unsigned long word) |
22 | { | 27 | { |
23 | unsigned long result; | 28 | unsigned long result; |
24 | 29 | ||
25 | result = -1; | 30 | result = -1; |
26 | __asm__("1:\n\t" | 31 | __asm__("1:\n\t" |
27 | "shlr.l %2\n\t" | 32 | "shlr.l %2\n\t" |
28 | "adds #1,%0\n\t" | 33 | "adds #1,%0\n\t" |
29 | "bcs 1b" | 34 | "bcs 1b" |
30 | : "=r" (result) | 35 | : "=r" (result) |
31 | : "0" (result),"r" (word)); | 36 | : "0" (result),"r" (word)); |
32 | return result; | 37 | return result; |
33 | } | 38 | } |
34 | 39 | ||
35 | #define H8300_GEN_BITOP_CONST(OP,BIT) \ | 40 | #define H8300_GEN_BITOP_CONST(OP,BIT) \ |
36 | case BIT: \ | 41 | case BIT: \ |
37 | __asm__(OP " #" #BIT ",@%0"::"r"(b_addr):"memory"); \ | 42 | __asm__(OP " #" #BIT ",@%0"::"r"(b_addr):"memory"); \ |
38 | break; | 43 | break; |
39 | 44 | ||
40 | #define H8300_GEN_BITOP(FNAME,OP) \ | 45 | #define H8300_GEN_BITOP(FNAME,OP) \ |
41 | static __inline__ void FNAME(int nr, volatile unsigned long* addr) \ | 46 | static __inline__ void FNAME(int nr, volatile unsigned long* addr) \ |
42 | { \ | 47 | { \ |
43 | volatile unsigned char *b_addr; \ | 48 | volatile unsigned char *b_addr; \ |
44 | b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \ | 49 | b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \ |
45 | if (__builtin_constant_p(nr)) { \ | 50 | if (__builtin_constant_p(nr)) { \ |
46 | switch(nr & 7) { \ | 51 | switch(nr & 7) { \ |
47 | H8300_GEN_BITOP_CONST(OP,0) \ | 52 | H8300_GEN_BITOP_CONST(OP,0) \ |
48 | H8300_GEN_BITOP_CONST(OP,1) \ | 53 | H8300_GEN_BITOP_CONST(OP,1) \ |
49 | H8300_GEN_BITOP_CONST(OP,2) \ | 54 | H8300_GEN_BITOP_CONST(OP,2) \ |
50 | H8300_GEN_BITOP_CONST(OP,3) \ | 55 | H8300_GEN_BITOP_CONST(OP,3) \ |
51 | H8300_GEN_BITOP_CONST(OP,4) \ | 56 | H8300_GEN_BITOP_CONST(OP,4) \ |
52 | H8300_GEN_BITOP_CONST(OP,5) \ | 57 | H8300_GEN_BITOP_CONST(OP,5) \ |
53 | H8300_GEN_BITOP_CONST(OP,6) \ | 58 | H8300_GEN_BITOP_CONST(OP,6) \ |
54 | H8300_GEN_BITOP_CONST(OP,7) \ | 59 | H8300_GEN_BITOP_CONST(OP,7) \ |
55 | } \ | 60 | } \ |
56 | } else { \ | 61 | } else { \ |
57 | __asm__(OP " %w0,@%1"::"r"(nr),"r"(b_addr):"memory"); \ | 62 | __asm__(OP " %w0,@%1"::"r"(nr),"r"(b_addr):"memory"); \ |
58 | } \ | 63 | } \ |
59 | } | 64 | } |
60 | 65 | ||
61 | /* | 66 | /* |
62 | * clear_bit() doesn't provide any barrier for the compiler. | 67 | * clear_bit() doesn't provide any barrier for the compiler. |
63 | */ | 68 | */ |
64 | #define smp_mb__before_clear_bit() barrier() | 69 | #define smp_mb__before_clear_bit() barrier() |
65 | #define smp_mb__after_clear_bit() barrier() | 70 | #define smp_mb__after_clear_bit() barrier() |
66 | 71 | ||
67 | H8300_GEN_BITOP(set_bit ,"bset") | 72 | H8300_GEN_BITOP(set_bit ,"bset") |
68 | H8300_GEN_BITOP(clear_bit ,"bclr") | 73 | H8300_GEN_BITOP(clear_bit ,"bclr") |
69 | H8300_GEN_BITOP(change_bit,"bnot") | 74 | H8300_GEN_BITOP(change_bit,"bnot") |
70 | #define __set_bit(nr,addr) set_bit((nr),(addr)) | 75 | #define __set_bit(nr,addr) set_bit((nr),(addr)) |
71 | #define __clear_bit(nr,addr) clear_bit((nr),(addr)) | 76 | #define __clear_bit(nr,addr) clear_bit((nr),(addr)) |
72 | #define __change_bit(nr,addr) change_bit((nr),(addr)) | 77 | #define __change_bit(nr,addr) change_bit((nr),(addr)) |
73 | 78 | ||
74 | #undef H8300_GEN_BITOP | 79 | #undef H8300_GEN_BITOP |
75 | #undef H8300_GEN_BITOP_CONST | 80 | #undef H8300_GEN_BITOP_CONST |
76 | 81 | ||
77 | static __inline__ int test_bit(int nr, const unsigned long* addr) | 82 | static __inline__ int test_bit(int nr, const unsigned long* addr) |
78 | { | 83 | { |
79 | return (*((volatile unsigned char *)addr + | 84 | return (*((volatile unsigned char *)addr + |
80 | ((nr >> 3) ^ 3)) & (1UL << (nr & 7))) != 0; | 85 | ((nr >> 3) ^ 3)) & (1UL << (nr & 7))) != 0; |
81 | } | 86 | } |
82 | 87 | ||
83 | #define __test_bit(nr, addr) test_bit(nr, addr) | 88 | #define __test_bit(nr, addr) test_bit(nr, addr) |
84 | 89 | ||
85 | #define H8300_GEN_TEST_BITOP_CONST_INT(OP,BIT) \ | 90 | #define H8300_GEN_TEST_BITOP_CONST_INT(OP,BIT) \ |
86 | case BIT: \ | 91 | case BIT: \ |
87 | __asm__("stc ccr,%w1\n\t" \ | 92 | __asm__("stc ccr,%w1\n\t" \ |
88 | "orc #0x80,ccr\n\t" \ | 93 | "orc #0x80,ccr\n\t" \ |
89 | "bld #" #BIT ",@%4\n\t" \ | 94 | "bld #" #BIT ",@%4\n\t" \ |
90 | OP " #" #BIT ",@%4\n\t" \ | 95 | OP " #" #BIT ",@%4\n\t" \ |
91 | "rotxl.l %0\n\t" \ | 96 | "rotxl.l %0\n\t" \ |
92 | "ldc %w1,ccr" \ | 97 | "ldc %w1,ccr" \ |
93 | : "=r"(retval),"=&r"(ccrsave),"=m"(*b_addr) \ | 98 | : "=r"(retval),"=&r"(ccrsave),"=m"(*b_addr) \ |
94 | : "0" (retval),"r" (b_addr) \ | 99 | : "0" (retval),"r" (b_addr) \ |
95 | : "memory"); \ | 100 | : "memory"); \ |
96 | break; | 101 | break; |
97 | 102 | ||
98 | #define H8300_GEN_TEST_BITOP_CONST(OP,BIT) \ | 103 | #define H8300_GEN_TEST_BITOP_CONST(OP,BIT) \ |
99 | case BIT: \ | 104 | case BIT: \ |
100 | __asm__("bld #" #BIT ",@%3\n\t" \ | 105 | __asm__("bld #" #BIT ",@%3\n\t" \ |
101 | OP " #" #BIT ",@%3\n\t" \ | 106 | OP " #" #BIT ",@%3\n\t" \ |
102 | "rotxl.l %0\n\t" \ | 107 | "rotxl.l %0\n\t" \ |
103 | : "=r"(retval),"=m"(*b_addr) \ | 108 | : "=r"(retval),"=m"(*b_addr) \ |
104 | : "0" (retval),"r" (b_addr) \ | 109 | : "0" (retval),"r" (b_addr) \ |
105 | : "memory"); \ | 110 | : "memory"); \ |
106 | break; | 111 | break; |
107 | 112 | ||
108 | #define H8300_GEN_TEST_BITOP(FNNAME,OP) \ | 113 | #define H8300_GEN_TEST_BITOP(FNNAME,OP) \ |
109 | static __inline__ int FNNAME(int nr, volatile void * addr) \ | 114 | static __inline__ int FNNAME(int nr, volatile void * addr) \ |
110 | { \ | 115 | { \ |
111 | int retval = 0; \ | 116 | int retval = 0; \ |
112 | char ccrsave; \ | 117 | char ccrsave; \ |
113 | volatile unsigned char *b_addr; \ | 118 | volatile unsigned char *b_addr; \ |
114 | b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \ | 119 | b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \ |
115 | if (__builtin_constant_p(nr)) { \ | 120 | if (__builtin_constant_p(nr)) { \ |
116 | switch(nr & 7) { \ | 121 | switch(nr & 7) { \ |
117 | H8300_GEN_TEST_BITOP_CONST_INT(OP,0) \ | 122 | H8300_GEN_TEST_BITOP_CONST_INT(OP,0) \ |
118 | H8300_GEN_TEST_BITOP_CONST_INT(OP,1) \ | 123 | H8300_GEN_TEST_BITOP_CONST_INT(OP,1) \ |
119 | H8300_GEN_TEST_BITOP_CONST_INT(OP,2) \ | 124 | H8300_GEN_TEST_BITOP_CONST_INT(OP,2) \ |
120 | H8300_GEN_TEST_BITOP_CONST_INT(OP,3) \ | 125 | H8300_GEN_TEST_BITOP_CONST_INT(OP,3) \ |
121 | H8300_GEN_TEST_BITOP_CONST_INT(OP,4) \ | 126 | H8300_GEN_TEST_BITOP_CONST_INT(OP,4) \ |
122 | H8300_GEN_TEST_BITOP_CONST_INT(OP,5) \ | 127 | H8300_GEN_TEST_BITOP_CONST_INT(OP,5) \ |
123 | H8300_GEN_TEST_BITOP_CONST_INT(OP,6) \ | 128 | H8300_GEN_TEST_BITOP_CONST_INT(OP,6) \ |
124 | H8300_GEN_TEST_BITOP_CONST_INT(OP,7) \ | 129 | H8300_GEN_TEST_BITOP_CONST_INT(OP,7) \ |
125 | } \ | 130 | } \ |
126 | } else { \ | 131 | } else { \ |
127 | __asm__("stc ccr,%w1\n\t" \ | 132 | __asm__("stc ccr,%w1\n\t" \ |
128 | "orc #0x80,ccr\n\t" \ | 133 | "orc #0x80,ccr\n\t" \ |
129 | "btst %w5,@%4\n\t" \ | 134 | "btst %w5,@%4\n\t" \ |
130 | OP " %w5,@%4\n\t" \ | 135 | OP " %w5,@%4\n\t" \ |
131 | "beq 1f\n\t" \ | 136 | "beq 1f\n\t" \ |
132 | "inc.l #1,%0\n" \ | 137 | "inc.l #1,%0\n" \ |
133 | "1:\n\t" \ | 138 | "1:\n\t" \ |
134 | "ldc %w1,ccr" \ | 139 | "ldc %w1,ccr" \ |
135 | : "=r"(retval),"=&r"(ccrsave),"=m"(*b_addr) \ | 140 | : "=r"(retval),"=&r"(ccrsave),"=m"(*b_addr) \ |
136 | : "0" (retval),"r" (b_addr),"r"(nr) \ | 141 | : "0" (retval),"r" (b_addr),"r"(nr) \ |
137 | : "memory"); \ | 142 | : "memory"); \ |
138 | } \ | 143 | } \ |
139 | return retval; \ | 144 | return retval; \ |
140 | } \ | 145 | } \ |
141 | \ | 146 | \ |
142 | static __inline__ int __ ## FNNAME(int nr, volatile void * addr) \ | 147 | static __inline__ int __ ## FNNAME(int nr, volatile void * addr) \ |
143 | { \ | 148 | { \ |
144 | int retval = 0; \ | 149 | int retval = 0; \ |
145 | volatile unsigned char *b_addr; \ | 150 | volatile unsigned char *b_addr; \ |
146 | b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \ | 151 | b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \ |
147 | if (__builtin_constant_p(nr)) { \ | 152 | if (__builtin_constant_p(nr)) { \ |
148 | switch(nr & 7) { \ | 153 | switch(nr & 7) { \ |
149 | H8300_GEN_TEST_BITOP_CONST(OP,0) \ | 154 | H8300_GEN_TEST_BITOP_CONST(OP,0) \ |
150 | H8300_GEN_TEST_BITOP_CONST(OP,1) \ | 155 | H8300_GEN_TEST_BITOP_CONST(OP,1) \ |
151 | H8300_GEN_TEST_BITOP_CONST(OP,2) \ | 156 | H8300_GEN_TEST_BITOP_CONST(OP,2) \ |
152 | H8300_GEN_TEST_BITOP_CONST(OP,3) \ | 157 | H8300_GEN_TEST_BITOP_CONST(OP,3) \ |
153 | H8300_GEN_TEST_BITOP_CONST(OP,4) \ | 158 | H8300_GEN_TEST_BITOP_CONST(OP,4) \ |
154 | H8300_GEN_TEST_BITOP_CONST(OP,5) \ | 159 | H8300_GEN_TEST_BITOP_CONST(OP,5) \ |
155 | H8300_GEN_TEST_BITOP_CONST(OP,6) \ | 160 | H8300_GEN_TEST_BITOP_CONST(OP,6) \ |
156 | H8300_GEN_TEST_BITOP_CONST(OP,7) \ | 161 | H8300_GEN_TEST_BITOP_CONST(OP,7) \ |
157 | } \ | 162 | } \ |
158 | } else { \ | 163 | } else { \ |
159 | __asm__("btst %w4,@%3\n\t" \ | 164 | __asm__("btst %w4,@%3\n\t" \ |
160 | OP " %w4,@%3\n\t" \ | 165 | OP " %w4,@%3\n\t" \ |
161 | "beq 1f\n\t" \ | 166 | "beq 1f\n\t" \ |
162 | "inc.l #1,%0\n" \ | 167 | "inc.l #1,%0\n" \ |
163 | "1:" \ | 168 | "1:" \ |
164 | : "=r"(retval),"=m"(*b_addr) \ | 169 | : "=r"(retval),"=m"(*b_addr) \ |
165 | : "0" (retval),"r" (b_addr),"r"(nr) \ | 170 | : "0" (retval),"r" (b_addr),"r"(nr) \ |
166 | : "memory"); \ | 171 | : "memory"); \ |
167 | } \ | 172 | } \ |
168 | return retval; \ | 173 | return retval; \ |
169 | } | 174 | } |
170 | 175 | ||
171 | H8300_GEN_TEST_BITOP(test_and_set_bit, "bset") | 176 | H8300_GEN_TEST_BITOP(test_and_set_bit, "bset") |
172 | H8300_GEN_TEST_BITOP(test_and_clear_bit, "bclr") | 177 | H8300_GEN_TEST_BITOP(test_and_clear_bit, "bclr") |
173 | H8300_GEN_TEST_BITOP(test_and_change_bit,"bnot") | 178 | H8300_GEN_TEST_BITOP(test_and_change_bit,"bnot") |
174 | #undef H8300_GEN_TEST_BITOP_CONST | 179 | #undef H8300_GEN_TEST_BITOP_CONST |
175 | #undef H8300_GEN_TEST_BITOP_CONST_INT | 180 | #undef H8300_GEN_TEST_BITOP_CONST_INT |
176 | #undef H8300_GEN_TEST_BITOP | 181 | #undef H8300_GEN_TEST_BITOP |
177 | 182 | ||
178 | #include <asm-generic/bitops/ffs.h> | 183 | #include <asm-generic/bitops/ffs.h> |
179 | 184 | ||
180 | static __inline__ unsigned long __ffs(unsigned long word) | 185 | static __inline__ unsigned long __ffs(unsigned long word) |
181 | { | 186 | { |
182 | unsigned long result; | 187 | unsigned long result; |
183 | 188 | ||
184 | result = -1; | 189 | result = -1; |
185 | __asm__("1:\n\t" | 190 | __asm__("1:\n\t" |
186 | "shlr.l %2\n\t" | 191 | "shlr.l %2\n\t" |
187 | "adds #1,%0\n\t" | 192 | "adds #1,%0\n\t" |
188 | "bcc 1b" | 193 | "bcc 1b" |
189 | : "=r" (result) | 194 | : "=r" (result) |
190 | : "0"(result),"r"(word)); | 195 | : "0"(result),"r"(word)); |
191 | return result; | 196 | return result; |
192 | } | 197 | } |
193 | 198 | ||
194 | #include <asm-generic/bitops/find.h> | 199 | #include <asm-generic/bitops/find.h> |
195 | #include <asm-generic/bitops/sched.h> | 200 | #include <asm-generic/bitops/sched.h> |
196 | #include <asm-generic/bitops/hweight.h> | 201 | #include <asm-generic/bitops/hweight.h> |
197 | #include <asm-generic/bitops/lock.h> | 202 | #include <asm-generic/bitops/lock.h> |
198 | #include <asm-generic/bitops/ext2-non-atomic.h> | 203 | #include <asm-generic/bitops/ext2-non-atomic.h> |
199 | #include <asm-generic/bitops/ext2-atomic.h> | 204 | #include <asm-generic/bitops/ext2-atomic.h> |
200 | #include <asm-generic/bitops/minix.h> | 205 | #include <asm-generic/bitops/minix.h> |
201 | 206 | ||
202 | #endif /* __KERNEL__ */ | 207 | #endif /* __KERNEL__ */ |
203 | 208 | ||
204 | #include <asm-generic/bitops/fls.h> | 209 | #include <asm-generic/bitops/fls.h> |
205 | #include <asm-generic/bitops/fls64.h> | 210 | #include <asm-generic/bitops/fls64.h> |
206 | 211 | ||
207 | #endif /* _H8300_BITOPS_H */ | 212 | #endif /* _H8300_BITOPS_H */ |
208 | 213 |
include/asm-ia64/bitops.h
1 | #ifndef _ASM_IA64_BITOPS_H | 1 | #ifndef _ASM_IA64_BITOPS_H |
2 | #define _ASM_IA64_BITOPS_H | 2 | #define _ASM_IA64_BITOPS_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Copyright (C) 1998-2003 Hewlett-Packard Co | 5 | * Copyright (C) 1998-2003 Hewlett-Packard Co |
6 | * David Mosberger-Tang <davidm@hpl.hp.com> | 6 | * David Mosberger-Tang <davidm@hpl.hp.com> |
7 | * | 7 | * |
8 | * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64 | 8 | * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64 |
9 | * O(1) scheduler patch | 9 | * O(1) scheduler patch |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #ifndef _LINUX_BITOPS_H | ||
13 | #error only <linux/bitops.h> can be included directly | ||
14 | #endif | ||
15 | |||
12 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
13 | #include <linux/types.h> | 17 | #include <linux/types.h> |
14 | #include <asm/intrinsics.h> | 18 | #include <asm/intrinsics.h> |
15 | 19 | ||
16 | /** | 20 | /** |
17 | * set_bit - Atomically set a bit in memory | 21 | * set_bit - Atomically set a bit in memory |
18 | * @nr: the bit to set | 22 | * @nr: the bit to set |
19 | * @addr: the address to start counting from | 23 | * @addr: the address to start counting from |
20 | * | 24 | * |
21 | * This function is atomic and may not be reordered. See __set_bit() | 25 | * This function is atomic and may not be reordered. See __set_bit() |
22 | * if you do not require the atomic guarantees. | 26 | * if you do not require the atomic guarantees. |
23 | * Note that @nr may be almost arbitrarily large; this function is not | 27 | * Note that @nr may be almost arbitrarily large; this function is not |
24 | * restricted to acting on a single-word quantity. | 28 | * restricted to acting on a single-word quantity. |
25 | * | 29 | * |
26 | * The address must be (at least) "long" aligned. | 30 | * The address must be (at least) "long" aligned. |
27 | * Note that there are driver (e.g., eepro100) which use these operations to | 31 | * Note that there are driver (e.g., eepro100) which use these operations to |
28 | * operate on hw-defined data-structures, so we can't easily change these | 32 | * operate on hw-defined data-structures, so we can't easily change these |
29 | * operations to force a bigger alignment. | 33 | * operations to force a bigger alignment. |
30 | * | 34 | * |
31 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | 35 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). |
32 | */ | 36 | */ |
33 | static __inline__ void | 37 | static __inline__ void |
34 | set_bit (int nr, volatile void *addr) | 38 | set_bit (int nr, volatile void *addr) |
35 | { | 39 | { |
36 | __u32 bit, old, new; | 40 | __u32 bit, old, new; |
37 | volatile __u32 *m; | 41 | volatile __u32 *m; |
38 | CMPXCHG_BUGCHECK_DECL | 42 | CMPXCHG_BUGCHECK_DECL |
39 | 43 | ||
40 | m = (volatile __u32 *) addr + (nr >> 5); | 44 | m = (volatile __u32 *) addr + (nr >> 5); |
41 | bit = 1 << (nr & 31); | 45 | bit = 1 << (nr & 31); |
42 | do { | 46 | do { |
43 | CMPXCHG_BUGCHECK(m); | 47 | CMPXCHG_BUGCHECK(m); |
44 | old = *m; | 48 | old = *m; |
45 | new = old | bit; | 49 | new = old | bit; |
46 | } while (cmpxchg_acq(m, old, new) != old); | 50 | } while (cmpxchg_acq(m, old, new) != old); |
47 | } | 51 | } |
48 | 52 | ||
49 | /** | 53 | /** |
50 | * __set_bit - Set a bit in memory | 54 | * __set_bit - Set a bit in memory |
51 | * @nr: the bit to set | 55 | * @nr: the bit to set |
52 | * @addr: the address to start counting from | 56 | * @addr: the address to start counting from |
53 | * | 57 | * |
54 | * Unlike set_bit(), this function is non-atomic and may be reordered. | 58 | * Unlike set_bit(), this function is non-atomic and may be reordered. |
55 | * If it's called on the same region of memory simultaneously, the effect | 59 | * If it's called on the same region of memory simultaneously, the effect |
56 | * may be that only one operation succeeds. | 60 | * may be that only one operation succeeds. |
57 | */ | 61 | */ |
58 | static __inline__ void | 62 | static __inline__ void |
59 | __set_bit (int nr, volatile void *addr) | 63 | __set_bit (int nr, volatile void *addr) |
60 | { | 64 | { |
61 | *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31)); | 65 | *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31)); |
62 | } | 66 | } |
63 | 67 | ||
64 | /* | 68 | /* |
65 | * clear_bit() has "acquire" semantics. | 69 | * clear_bit() has "acquire" semantics. |
66 | */ | 70 | */ |
67 | #define smp_mb__before_clear_bit() smp_mb() | 71 | #define smp_mb__before_clear_bit() smp_mb() |
68 | #define smp_mb__after_clear_bit() do { /* skip */; } while (0) | 72 | #define smp_mb__after_clear_bit() do { /* skip */; } while (0) |
69 | 73 | ||
70 | /** | 74 | /** |
71 | * clear_bit - Clears a bit in memory | 75 | * clear_bit - Clears a bit in memory |
72 | * @nr: Bit to clear | 76 | * @nr: Bit to clear |
73 | * @addr: Address to start counting from | 77 | * @addr: Address to start counting from |
74 | * | 78 | * |
75 | * clear_bit() is atomic and may not be reordered. However, it does | 79 | * clear_bit() is atomic and may not be reordered. However, it does |
76 | * not contain a memory barrier, so if it is used for locking purposes, | 80 | * not contain a memory barrier, so if it is used for locking purposes, |
77 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | 81 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
78 | * in order to ensure changes are visible on other processors. | 82 | * in order to ensure changes are visible on other processors. |
79 | */ | 83 | */ |
80 | static __inline__ void | 84 | static __inline__ void |
81 | clear_bit (int nr, volatile void *addr) | 85 | clear_bit (int nr, volatile void *addr) |
82 | { | 86 | { |
83 | __u32 mask, old, new; | 87 | __u32 mask, old, new; |
84 | volatile __u32 *m; | 88 | volatile __u32 *m; |
85 | CMPXCHG_BUGCHECK_DECL | 89 | CMPXCHG_BUGCHECK_DECL |
86 | 90 | ||
87 | m = (volatile __u32 *) addr + (nr >> 5); | 91 | m = (volatile __u32 *) addr + (nr >> 5); |
88 | mask = ~(1 << (nr & 31)); | 92 | mask = ~(1 << (nr & 31)); |
89 | do { | 93 | do { |
90 | CMPXCHG_BUGCHECK(m); | 94 | CMPXCHG_BUGCHECK(m); |
91 | old = *m; | 95 | old = *m; |
92 | new = old & mask; | 96 | new = old & mask; |
93 | } while (cmpxchg_acq(m, old, new) != old); | 97 | } while (cmpxchg_acq(m, old, new) != old); |
94 | } | 98 | } |
95 | 99 | ||
96 | /** | 100 | /** |
97 | * clear_bit_unlock - Clears a bit in memory with release | 101 | * clear_bit_unlock - Clears a bit in memory with release |
98 | * @nr: Bit to clear | 102 | * @nr: Bit to clear |
99 | * @addr: Address to start counting from | 103 | * @addr: Address to start counting from |
100 | * | 104 | * |
101 | * clear_bit_unlock() is atomic and may not be reordered. It does | 105 | * clear_bit_unlock() is atomic and may not be reordered. It does |
102 | * contain a memory barrier suitable for unlock type operations. | 106 | * contain a memory barrier suitable for unlock type operations. |
103 | */ | 107 | */ |
104 | static __inline__ void | 108 | static __inline__ void |
105 | clear_bit_unlock (int nr, volatile void *addr) | 109 | clear_bit_unlock (int nr, volatile void *addr) |
106 | { | 110 | { |
107 | __u32 mask, old, new; | 111 | __u32 mask, old, new; |
108 | volatile __u32 *m; | 112 | volatile __u32 *m; |
109 | CMPXCHG_BUGCHECK_DECL | 113 | CMPXCHG_BUGCHECK_DECL |
110 | 114 | ||
111 | m = (volatile __u32 *) addr + (nr >> 5); | 115 | m = (volatile __u32 *) addr + (nr >> 5); |
112 | mask = ~(1 << (nr & 31)); | 116 | mask = ~(1 << (nr & 31)); |
113 | do { | 117 | do { |
114 | CMPXCHG_BUGCHECK(m); | 118 | CMPXCHG_BUGCHECK(m); |
115 | old = *m; | 119 | old = *m; |
116 | new = old & mask; | 120 | new = old & mask; |
117 | } while (cmpxchg_rel(m, old, new) != old); | 121 | } while (cmpxchg_rel(m, old, new) != old); |
118 | } | 122 | } |
119 | 123 | ||
120 | /** | 124 | /** |
121 | * __clear_bit_unlock - Non-atomically clear a bit with release | 125 | * __clear_bit_unlock - Non-atomically clear a bit with release |
122 | * | 126 | * |
123 | * This is like clear_bit_unlock, but the implementation may use a non-atomic | 127 | * This is like clear_bit_unlock, but the implementation may use a non-atomic |
124 | * store (this one uses an atomic, however). | 128 | * store (this one uses an atomic, however). |
125 | */ | 129 | */ |
126 | #define __clear_bit_unlock clear_bit_unlock | 130 | #define __clear_bit_unlock clear_bit_unlock |
127 | 131 | ||
128 | /** | 132 | /** |
129 | * __clear_bit - Clears a bit in memory (non-atomic version) | 133 | * __clear_bit - Clears a bit in memory (non-atomic version) |
130 | */ | 134 | */ |
131 | static __inline__ void | 135 | static __inline__ void |
132 | __clear_bit (int nr, volatile void *addr) | 136 | __clear_bit (int nr, volatile void *addr) |
133 | { | 137 | { |
134 | volatile __u32 *p = (__u32 *) addr + (nr >> 5); | 138 | volatile __u32 *p = (__u32 *) addr + (nr >> 5); |
135 | __u32 m = 1 << (nr & 31); | 139 | __u32 m = 1 << (nr & 31); |
136 | *p &= ~m; | 140 | *p &= ~m; |
137 | } | 141 | } |
138 | 142 | ||
139 | /** | 143 | /** |
140 | * change_bit - Toggle a bit in memory | 144 | * change_bit - Toggle a bit in memory |
141 | * @nr: Bit to clear | 145 | * @nr: Bit to clear |
142 | * @addr: Address to start counting from | 146 | * @addr: Address to start counting from |
143 | * | 147 | * |
144 | * change_bit() is atomic and may not be reordered. | 148 | * change_bit() is atomic and may not be reordered. |
145 | * Note that @nr may be almost arbitrarily large; this function is not | 149 | * Note that @nr may be almost arbitrarily large; this function is not |
146 | * restricted to acting on a single-word quantity. | 150 | * restricted to acting on a single-word quantity. |
147 | */ | 151 | */ |
148 | static __inline__ void | 152 | static __inline__ void |
149 | change_bit (int nr, volatile void *addr) | 153 | change_bit (int nr, volatile void *addr) |
150 | { | 154 | { |
151 | __u32 bit, old, new; | 155 | __u32 bit, old, new; |
152 | volatile __u32 *m; | 156 | volatile __u32 *m; |
153 | CMPXCHG_BUGCHECK_DECL | 157 | CMPXCHG_BUGCHECK_DECL |
154 | 158 | ||
155 | m = (volatile __u32 *) addr + (nr >> 5); | 159 | m = (volatile __u32 *) addr + (nr >> 5); |
156 | bit = (1 << (nr & 31)); | 160 | bit = (1 << (nr & 31)); |
157 | do { | 161 | do { |
158 | CMPXCHG_BUGCHECK(m); | 162 | CMPXCHG_BUGCHECK(m); |
159 | old = *m; | 163 | old = *m; |
160 | new = old ^ bit; | 164 | new = old ^ bit; |
161 | } while (cmpxchg_acq(m, old, new) != old); | 165 | } while (cmpxchg_acq(m, old, new) != old); |
162 | } | 166 | } |
163 | 167 | ||
164 | /** | 168 | /** |
165 | * __change_bit - Toggle a bit in memory | 169 | * __change_bit - Toggle a bit in memory |
166 | * @nr: the bit to set | 170 | * @nr: the bit to set |
167 | * @addr: the address to start counting from | 171 | * @addr: the address to start counting from |
168 | * | 172 | * |
169 | * Unlike change_bit(), this function is non-atomic and may be reordered. | 173 | * Unlike change_bit(), this function is non-atomic and may be reordered. |
170 | * If it's called on the same region of memory simultaneously, the effect | 174 | * If it's called on the same region of memory simultaneously, the effect |
171 | * may be that only one operation succeeds. | 175 | * may be that only one operation succeeds. |
172 | */ | 176 | */ |
173 | static __inline__ void | 177 | static __inline__ void |
174 | __change_bit (int nr, volatile void *addr) | 178 | __change_bit (int nr, volatile void *addr) |
175 | { | 179 | { |
176 | *((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31)); | 180 | *((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31)); |
177 | } | 181 | } |
178 | 182 | ||
179 | /** | 183 | /** |
180 | * test_and_set_bit - Set a bit and return its old value | 184 | * test_and_set_bit - Set a bit and return its old value |
181 | * @nr: Bit to set | 185 | * @nr: Bit to set |
182 | * @addr: Address to count from | 186 | * @addr: Address to count from |
183 | * | 187 | * |
184 | * This operation is atomic and cannot be reordered. | 188 | * This operation is atomic and cannot be reordered. |
185 | * It also implies a memory barrier. | 189 | * It also implies a memory barrier. |
186 | */ | 190 | */ |
187 | static __inline__ int | 191 | static __inline__ int |
188 | test_and_set_bit (int nr, volatile void *addr) | 192 | test_and_set_bit (int nr, volatile void *addr) |
189 | { | 193 | { |
190 | __u32 bit, old, new; | 194 | __u32 bit, old, new; |
191 | volatile __u32 *m; | 195 | volatile __u32 *m; |
192 | CMPXCHG_BUGCHECK_DECL | 196 | CMPXCHG_BUGCHECK_DECL |
193 | 197 | ||
194 | m = (volatile __u32 *) addr + (nr >> 5); | 198 | m = (volatile __u32 *) addr + (nr >> 5); |
195 | bit = 1 << (nr & 31); | 199 | bit = 1 << (nr & 31); |
196 | do { | 200 | do { |
197 | CMPXCHG_BUGCHECK(m); | 201 | CMPXCHG_BUGCHECK(m); |
198 | old = *m; | 202 | old = *m; |
199 | new = old | bit; | 203 | new = old | bit; |
200 | } while (cmpxchg_acq(m, old, new) != old); | 204 | } while (cmpxchg_acq(m, old, new) != old); |
201 | return (old & bit) != 0; | 205 | return (old & bit) != 0; |
202 | } | 206 | } |
203 | 207 | ||
204 | /** | 208 | /** |
205 | * test_and_set_bit_lock - Set a bit and return its old value for lock | 209 | * test_and_set_bit_lock - Set a bit and return its old value for lock |
206 | * @nr: Bit to set | 210 | * @nr: Bit to set |
207 | * @addr: Address to count from | 211 | * @addr: Address to count from |
208 | * | 212 | * |
209 | * This is the same as test_and_set_bit on ia64 | 213 | * This is the same as test_and_set_bit on ia64 |
210 | */ | 214 | */ |
211 | #define test_and_set_bit_lock test_and_set_bit | 215 | #define test_and_set_bit_lock test_and_set_bit |
212 | 216 | ||
213 | /** | 217 | /** |
214 | * __test_and_set_bit - Set a bit and return its old value | 218 | * __test_and_set_bit - Set a bit and return its old value |
215 | * @nr: Bit to set | 219 | * @nr: Bit to set |
216 | * @addr: Address to count from | 220 | * @addr: Address to count from |
217 | * | 221 | * |
218 | * This operation is non-atomic and can be reordered. | 222 | * This operation is non-atomic and can be reordered. |
219 | * If two examples of this operation race, one can appear to succeed | 223 | * If two examples of this operation race, one can appear to succeed |
220 | * but actually fail. You must protect multiple accesses with a lock. | 224 | * but actually fail. You must protect multiple accesses with a lock. |
221 | */ | 225 | */ |
222 | static __inline__ int | 226 | static __inline__ int |
223 | __test_and_set_bit (int nr, volatile void *addr) | 227 | __test_and_set_bit (int nr, volatile void *addr) |
224 | { | 228 | { |
225 | __u32 *p = (__u32 *) addr + (nr >> 5); | 229 | __u32 *p = (__u32 *) addr + (nr >> 5); |
226 | __u32 m = 1 << (nr & 31); | 230 | __u32 m = 1 << (nr & 31); |
227 | int oldbitset = (*p & m) != 0; | 231 | int oldbitset = (*p & m) != 0; |
228 | 232 | ||
229 | *p |= m; | 233 | *p |= m; |
230 | return oldbitset; | 234 | return oldbitset; |
231 | } | 235 | } |
232 | 236 | ||
233 | /** | 237 | /** |
234 | * test_and_clear_bit - Clear a bit and return its old value | 238 | * test_and_clear_bit - Clear a bit and return its old value |
235 | * @nr: Bit to set | 239 | * @nr: Bit to set |
236 | * @addr: Address to count from | 240 | * @addr: Address to count from |
237 | * | 241 | * |
238 | * This operation is atomic and cannot be reordered. | 242 | * This operation is atomic and cannot be reordered. |
239 | * It also implies a memory barrier. | 243 | * It also implies a memory barrier. |
240 | */ | 244 | */ |
241 | static __inline__ int | 245 | static __inline__ int |
242 | test_and_clear_bit (int nr, volatile void *addr) | 246 | test_and_clear_bit (int nr, volatile void *addr) |
243 | { | 247 | { |
244 | __u32 mask, old, new; | 248 | __u32 mask, old, new; |
245 | volatile __u32 *m; | 249 | volatile __u32 *m; |
246 | CMPXCHG_BUGCHECK_DECL | 250 | CMPXCHG_BUGCHECK_DECL |
247 | 251 | ||
248 | m = (volatile __u32 *) addr + (nr >> 5); | 252 | m = (volatile __u32 *) addr + (nr >> 5); |
249 | mask = ~(1 << (nr & 31)); | 253 | mask = ~(1 << (nr & 31)); |
250 | do { | 254 | do { |
251 | CMPXCHG_BUGCHECK(m); | 255 | CMPXCHG_BUGCHECK(m); |
252 | old = *m; | 256 | old = *m; |
253 | new = old & mask; | 257 | new = old & mask; |
254 | } while (cmpxchg_acq(m, old, new) != old); | 258 | } while (cmpxchg_acq(m, old, new) != old); |
255 | return (old & ~mask) != 0; | 259 | return (old & ~mask) != 0; |
256 | } | 260 | } |
257 | 261 | ||
258 | /** | 262 | /** |
259 | * __test_and_clear_bit - Clear a bit and return its old value | 263 | * __test_and_clear_bit - Clear a bit and return its old value |
260 | * @nr: Bit to set | 264 | * @nr: Bit to set |
261 | * @addr: Address to count from | 265 | * @addr: Address to count from |
262 | * | 266 | * |
263 | * This operation is non-atomic and can be reordered. | 267 | * This operation is non-atomic and can be reordered. |
264 | * If two examples of this operation race, one can appear to succeed | 268 | * If two examples of this operation race, one can appear to succeed |
265 | * but actually fail. You must protect multiple accesses with a lock. | 269 | * but actually fail. You must protect multiple accesses with a lock. |
266 | */ | 270 | */ |
267 | static __inline__ int | 271 | static __inline__ int |
268 | __test_and_clear_bit(int nr, volatile void * addr) | 272 | __test_and_clear_bit(int nr, volatile void * addr) |
269 | { | 273 | { |
270 | __u32 *p = (__u32 *) addr + (nr >> 5); | 274 | __u32 *p = (__u32 *) addr + (nr >> 5); |
271 | __u32 m = 1 << (nr & 31); | 275 | __u32 m = 1 << (nr & 31); |
272 | int oldbitset = *p & m; | 276 | int oldbitset = *p & m; |
273 | 277 | ||
274 | *p &= ~m; | 278 | *p &= ~m; |
275 | return oldbitset; | 279 | return oldbitset; |
276 | } | 280 | } |
277 | 281 | ||
278 | /** | 282 | /** |
279 | * test_and_change_bit - Change a bit and return its old value | 283 | * test_and_change_bit - Change a bit and return its old value |
280 | * @nr: Bit to set | 284 | * @nr: Bit to set |
281 | * @addr: Address to count from | 285 | * @addr: Address to count from |
282 | * | 286 | * |
283 | * This operation is atomic and cannot be reordered. | 287 | * This operation is atomic and cannot be reordered. |
284 | * It also implies a memory barrier. | 288 | * It also implies a memory barrier. |
285 | */ | 289 | */ |
286 | static __inline__ int | 290 | static __inline__ int |
287 | test_and_change_bit (int nr, volatile void *addr) | 291 | test_and_change_bit (int nr, volatile void *addr) |
288 | { | 292 | { |
289 | __u32 bit, old, new; | 293 | __u32 bit, old, new; |
290 | volatile __u32 *m; | 294 | volatile __u32 *m; |
291 | CMPXCHG_BUGCHECK_DECL | 295 | CMPXCHG_BUGCHECK_DECL |
292 | 296 | ||
293 | m = (volatile __u32 *) addr + (nr >> 5); | 297 | m = (volatile __u32 *) addr + (nr >> 5); |
294 | bit = (1 << (nr & 31)); | 298 | bit = (1 << (nr & 31)); |
295 | do { | 299 | do { |
296 | CMPXCHG_BUGCHECK(m); | 300 | CMPXCHG_BUGCHECK(m); |
297 | old = *m; | 301 | old = *m; |
298 | new = old ^ bit; | 302 | new = old ^ bit; |
299 | } while (cmpxchg_acq(m, old, new) != old); | 303 | } while (cmpxchg_acq(m, old, new) != old); |
300 | return (old & bit) != 0; | 304 | return (old & bit) != 0; |
301 | } | 305 | } |
302 | 306 | ||
303 | /* | 307 | /* |
304 | * WARNING: non atomic version. | 308 | * WARNING: non atomic version. |
305 | */ | 309 | */ |
306 | static __inline__ int | 310 | static __inline__ int |
307 | __test_and_change_bit (int nr, void *addr) | 311 | __test_and_change_bit (int nr, void *addr) |
308 | { | 312 | { |
309 | __u32 old, bit = (1 << (nr & 31)); | 313 | __u32 old, bit = (1 << (nr & 31)); |
310 | __u32 *m = (__u32 *) addr + (nr >> 5); | 314 | __u32 *m = (__u32 *) addr + (nr >> 5); |
311 | 315 | ||
312 | old = *m; | 316 | old = *m; |
313 | *m = old ^ bit; | 317 | *m = old ^ bit; |
314 | return (old & bit) != 0; | 318 | return (old & bit) != 0; |
315 | } | 319 | } |
316 | 320 | ||
317 | static __inline__ int | 321 | static __inline__ int |
318 | test_bit (int nr, const volatile void *addr) | 322 | test_bit (int nr, const volatile void *addr) |
319 | { | 323 | { |
320 | return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31)); | 324 | return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31)); |
321 | } | 325 | } |
322 | 326 | ||
323 | /** | 327 | /** |
324 | * ffz - find the first zero bit in a long word | 328 | * ffz - find the first zero bit in a long word |
325 | * @x: The long word to find the bit in | 329 | * @x: The long word to find the bit in |
326 | * | 330 | * |
327 | * Returns the bit-number (0..63) of the first (least significant) zero bit. | 331 | * Returns the bit-number (0..63) of the first (least significant) zero bit. |
328 | * Undefined if no zero exists, so code should check against ~0UL first... | 332 | * Undefined if no zero exists, so code should check against ~0UL first... |
329 | */ | 333 | */ |
330 | static inline unsigned long | 334 | static inline unsigned long |
331 | ffz (unsigned long x) | 335 | ffz (unsigned long x) |
332 | { | 336 | { |
333 | unsigned long result; | 337 | unsigned long result; |
334 | 338 | ||
335 | result = ia64_popcnt(x & (~x - 1)); | 339 | result = ia64_popcnt(x & (~x - 1)); |
336 | return result; | 340 | return result; |
337 | } | 341 | } |
338 | 342 | ||
339 | /** | 343 | /** |
340 | * __ffs - find first bit in word. | 344 | * __ffs - find first bit in word. |
341 | * @x: The word to search | 345 | * @x: The word to search |
342 | * | 346 | * |
343 | * Undefined if no bit exists, so code should check against 0 first. | 347 | * Undefined if no bit exists, so code should check against 0 first. |
344 | */ | 348 | */ |
345 | static __inline__ unsigned long | 349 | static __inline__ unsigned long |
346 | __ffs (unsigned long x) | 350 | __ffs (unsigned long x) |
347 | { | 351 | { |
348 | unsigned long result; | 352 | unsigned long result; |
349 | 353 | ||
350 | result = ia64_popcnt((x-1) & ~x); | 354 | result = ia64_popcnt((x-1) & ~x); |
351 | return result; | 355 | return result; |
352 | } | 356 | } |
353 | 357 | ||
354 | #ifdef __KERNEL__ | 358 | #ifdef __KERNEL__ |
355 | 359 | ||
356 | /* | 360 | /* |
357 | * Return bit number of last (most-significant) bit set. Undefined | 361 | * Return bit number of last (most-significant) bit set. Undefined |
358 | * for x==0. Bits are numbered from 0..63 (e.g., ia64_fls(9) == 3). | 362 | * for x==0. Bits are numbered from 0..63 (e.g., ia64_fls(9) == 3). |
359 | */ | 363 | */ |
360 | static inline unsigned long | 364 | static inline unsigned long |
361 | ia64_fls (unsigned long x) | 365 | ia64_fls (unsigned long x) |
362 | { | 366 | { |
363 | long double d = x; | 367 | long double d = x; |
364 | long exp; | 368 | long exp; |
365 | 369 | ||
366 | exp = ia64_getf_exp(d); | 370 | exp = ia64_getf_exp(d); |
367 | return exp - 0xffff; | 371 | return exp - 0xffff; |
368 | } | 372 | } |
369 | 373 | ||
370 | /* | 374 | /* |
371 | * Find the last (most significant) bit set. Returns 0 for x==0 and | 375 | * Find the last (most significant) bit set. Returns 0 for x==0 and |
372 | * bits are numbered from 1..32 (e.g., fls(9) == 4). | 376 | * bits are numbered from 1..32 (e.g., fls(9) == 4). |
373 | */ | 377 | */ |
374 | static inline int | 378 | static inline int |
375 | fls (int t) | 379 | fls (int t) |
376 | { | 380 | { |
377 | unsigned long x = t & 0xffffffffu; | 381 | unsigned long x = t & 0xffffffffu; |
378 | 382 | ||
379 | if (!x) | 383 | if (!x) |
380 | return 0; | 384 | return 0; |
381 | x |= x >> 1; | 385 | x |= x >> 1; |
382 | x |= x >> 2; | 386 | x |= x >> 2; |
383 | x |= x >> 4; | 387 | x |= x >> 4; |
384 | x |= x >> 8; | 388 | x |= x >> 8; |
385 | x |= x >> 16; | 389 | x |= x >> 16; |
386 | return ia64_popcnt(x); | 390 | return ia64_popcnt(x); |
387 | } | 391 | } |
388 | 392 | ||
389 | #include <asm-generic/bitops/fls64.h> | 393 | #include <asm-generic/bitops/fls64.h> |
390 | 394 | ||
391 | /* | 395 | /* |
392 | * ffs: find first bit set. This is defined the same way as the libc and | 396 | * ffs: find first bit set. This is defined the same way as the libc and |
393 | * compiler builtin ffs routines, therefore differs in spirit from the above | 397 | * compiler builtin ffs routines, therefore differs in spirit from the above |
394 | * ffz (man ffs): it operates on "int" values only and the result value is the | 398 | * ffz (man ffs): it operates on "int" values only and the result value is the |
395 | * bit number + 1. ffs(0) is defined to return zero. | 399 | * bit number + 1. ffs(0) is defined to return zero. |
396 | */ | 400 | */ |
397 | #define ffs(x) __builtin_ffs(x) | 401 | #define ffs(x) __builtin_ffs(x) |
398 | 402 | ||
399 | /* | 403 | /* |
400 | * hweightN: returns the hamming weight (i.e. the number | 404 | * hweightN: returns the hamming weight (i.e. the number |
401 | * of bits set) of a N-bit word | 405 | * of bits set) of a N-bit word |
402 | */ | 406 | */ |
403 | static __inline__ unsigned long | 407 | static __inline__ unsigned long |
404 | hweight64 (unsigned long x) | 408 | hweight64 (unsigned long x) |
405 | { | 409 | { |
406 | unsigned long result; | 410 | unsigned long result; |
407 | result = ia64_popcnt(x); | 411 | result = ia64_popcnt(x); |
408 | return result; | 412 | return result; |
409 | } | 413 | } |
410 | 414 | ||
411 | #define hweight32(x) (unsigned int) hweight64((x) & 0xfffffffful) | 415 | #define hweight32(x) (unsigned int) hweight64((x) & 0xfffffffful) |
412 | #define hweight16(x) (unsigned int) hweight64((x) & 0xfffful) | 416 | #define hweight16(x) (unsigned int) hweight64((x) & 0xfffful) |
413 | #define hweight8(x) (unsigned int) hweight64((x) & 0xfful) | 417 | #define hweight8(x) (unsigned int) hweight64((x) & 0xfful) |
414 | 418 | ||
415 | #endif /* __KERNEL__ */ | 419 | #endif /* __KERNEL__ */ |
416 | 420 | ||
417 | #include <asm-generic/bitops/find.h> | 421 | #include <asm-generic/bitops/find.h> |
418 | 422 | ||
419 | #ifdef __KERNEL__ | 423 | #ifdef __KERNEL__ |
420 | 424 | ||
421 | #include <asm-generic/bitops/ext2-non-atomic.h> | 425 | #include <asm-generic/bitops/ext2-non-atomic.h> |
422 | 426 | ||
423 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) | 427 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) |
424 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) | 428 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) |
425 | 429 | ||
426 | #include <asm-generic/bitops/minix.h> | 430 | #include <asm-generic/bitops/minix.h> |
427 | #include <asm-generic/bitops/sched.h> | 431 | #include <asm-generic/bitops/sched.h> |
428 | 432 | ||
429 | #endif /* __KERNEL__ */ | 433 | #endif /* __KERNEL__ */ |
430 | 434 | ||
431 | #endif /* _ASM_IA64_BITOPS_H */ | 435 | #endif /* _ASM_IA64_BITOPS_H */ |
432 | 436 |
include/asm-m32r/bitops.h
1 | #ifndef _ASM_M32R_BITOPS_H | 1 | #ifndef _ASM_M32R_BITOPS_H |
2 | #define _ASM_M32R_BITOPS_H | 2 | #define _ASM_M32R_BITOPS_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * linux/include/asm-m32r/bitops.h | 5 | * linux/include/asm-m32r/bitops.h |
6 | * | 6 | * |
7 | * Copyright 1992, Linus Torvalds. | 7 | * Copyright 1992, Linus Torvalds. |
8 | * | 8 | * |
9 | * M32R version: | 9 | * M32R version: |
10 | * Copyright (C) 2001, 2002 Hitoshi Yamamoto | 10 | * Copyright (C) 2001, 2002 Hitoshi Yamamoto |
11 | * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org> | 11 | * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org> |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #ifndef _LINUX_BITOPS_H | ||
15 | #error only <linux/bitops.h> can be included directly | ||
16 | #endif | ||
17 | |||
14 | #include <linux/compiler.h> | 18 | #include <linux/compiler.h> |
15 | #include <asm/assembler.h> | 19 | #include <asm/assembler.h> |
16 | #include <asm/system.h> | 20 | #include <asm/system.h> |
17 | #include <asm/byteorder.h> | 21 | #include <asm/byteorder.h> |
18 | #include <asm/types.h> | 22 | #include <asm/types.h> |
19 | 23 | ||
20 | /* | 24 | /* |
21 | * These have to be done with inline assembly: that way the bit-setting | 25 | * These have to be done with inline assembly: that way the bit-setting |
22 | * is guaranteed to be atomic. All bit operations return 0 if the bit | 26 | * is guaranteed to be atomic. All bit operations return 0 if the bit |
23 | * was cleared before the operation and != 0 if it was not. | 27 | * was cleared before the operation and != 0 if it was not. |
24 | * | 28 | * |
25 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | 29 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). |
26 | */ | 30 | */ |
27 | 31 | ||
28 | /** | 32 | /** |
29 | * set_bit - Atomically set a bit in memory | 33 | * set_bit - Atomically set a bit in memory |
30 | * @nr: the bit to set | 34 | * @nr: the bit to set |
31 | * @addr: the address to start counting from | 35 | * @addr: the address to start counting from |
32 | * | 36 | * |
33 | * This function is atomic and may not be reordered. See __set_bit() | 37 | * This function is atomic and may not be reordered. See __set_bit() |
34 | * if you do not require the atomic guarantees. | 38 | * if you do not require the atomic guarantees. |
35 | * Note that @nr may be almost arbitrarily large; this function is not | 39 | * Note that @nr may be almost arbitrarily large; this function is not |
36 | * restricted to acting on a single-word quantity. | 40 | * restricted to acting on a single-word quantity. |
37 | */ | 41 | */ |
38 | static __inline__ void set_bit(int nr, volatile void * addr) | 42 | static __inline__ void set_bit(int nr, volatile void * addr) |
39 | { | 43 | { |
40 | __u32 mask; | 44 | __u32 mask; |
41 | volatile __u32 *a = addr; | 45 | volatile __u32 *a = addr; |
42 | unsigned long flags; | 46 | unsigned long flags; |
43 | unsigned long tmp; | 47 | unsigned long tmp; |
44 | 48 | ||
45 | a += (nr >> 5); | 49 | a += (nr >> 5); |
46 | mask = (1 << (nr & 0x1F)); | 50 | mask = (1 << (nr & 0x1F)); |
47 | 51 | ||
48 | local_irq_save(flags); | 52 | local_irq_save(flags); |
49 | __asm__ __volatile__ ( | 53 | __asm__ __volatile__ ( |
50 | DCACHE_CLEAR("%0", "r6", "%1") | 54 | DCACHE_CLEAR("%0", "r6", "%1") |
51 | M32R_LOCK" %0, @%1; \n\t" | 55 | M32R_LOCK" %0, @%1; \n\t" |
52 | "or %0, %2; \n\t" | 56 | "or %0, %2; \n\t" |
53 | M32R_UNLOCK" %0, @%1; \n\t" | 57 | M32R_UNLOCK" %0, @%1; \n\t" |
54 | : "=&r" (tmp) | 58 | : "=&r" (tmp) |
55 | : "r" (a), "r" (mask) | 59 | : "r" (a), "r" (mask) |
56 | : "memory" | 60 | : "memory" |
57 | #ifdef CONFIG_CHIP_M32700_TS1 | 61 | #ifdef CONFIG_CHIP_M32700_TS1 |
58 | , "r6" | 62 | , "r6" |
59 | #endif /* CONFIG_CHIP_M32700_TS1 */ | 63 | #endif /* CONFIG_CHIP_M32700_TS1 */ |
60 | ); | 64 | ); |
61 | local_irq_restore(flags); | 65 | local_irq_restore(flags); |
62 | } | 66 | } |
63 | 67 | ||
64 | /** | 68 | /** |
65 | * clear_bit - Clears a bit in memory | 69 | * clear_bit - Clears a bit in memory |
66 | * @nr: Bit to clear | 70 | * @nr: Bit to clear |
67 | * @addr: Address to start counting from | 71 | * @addr: Address to start counting from |
68 | * | 72 | * |
69 | * clear_bit() is atomic and may not be reordered. However, it does | 73 | * clear_bit() is atomic and may not be reordered. However, it does |
70 | * not contain a memory barrier, so if it is used for locking purposes, | 74 | * not contain a memory barrier, so if it is used for locking purposes, |
71 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | 75 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
72 | * in order to ensure changes are visible on other processors. | 76 | * in order to ensure changes are visible on other processors. |
73 | */ | 77 | */ |
74 | static __inline__ void clear_bit(int nr, volatile void * addr) | 78 | static __inline__ void clear_bit(int nr, volatile void * addr) |
75 | { | 79 | { |
76 | __u32 mask; | 80 | __u32 mask; |
77 | volatile __u32 *a = addr; | 81 | volatile __u32 *a = addr; |
78 | unsigned long flags; | 82 | unsigned long flags; |
79 | unsigned long tmp; | 83 | unsigned long tmp; |
80 | 84 | ||
81 | a += (nr >> 5); | 85 | a += (nr >> 5); |
82 | mask = (1 << (nr & 0x1F)); | 86 | mask = (1 << (nr & 0x1F)); |
83 | 87 | ||
84 | local_irq_save(flags); | 88 | local_irq_save(flags); |
85 | 89 | ||
86 | __asm__ __volatile__ ( | 90 | __asm__ __volatile__ ( |
87 | DCACHE_CLEAR("%0", "r6", "%1") | 91 | DCACHE_CLEAR("%0", "r6", "%1") |
88 | M32R_LOCK" %0, @%1; \n\t" | 92 | M32R_LOCK" %0, @%1; \n\t" |
89 | "and %0, %2; \n\t" | 93 | "and %0, %2; \n\t" |
90 | M32R_UNLOCK" %0, @%1; \n\t" | 94 | M32R_UNLOCK" %0, @%1; \n\t" |
91 | : "=&r" (tmp) | 95 | : "=&r" (tmp) |
92 | : "r" (a), "r" (~mask) | 96 | : "r" (a), "r" (~mask) |
93 | : "memory" | 97 | : "memory" |
94 | #ifdef CONFIG_CHIP_M32700_TS1 | 98 | #ifdef CONFIG_CHIP_M32700_TS1 |
95 | , "r6" | 99 | , "r6" |
96 | #endif /* CONFIG_CHIP_M32700_TS1 */ | 100 | #endif /* CONFIG_CHIP_M32700_TS1 */ |
97 | ); | 101 | ); |
98 | local_irq_restore(flags); | 102 | local_irq_restore(flags); |
99 | } | 103 | } |
100 | 104 | ||
101 | #define smp_mb__before_clear_bit() barrier() | 105 | #define smp_mb__before_clear_bit() barrier() |
102 | #define smp_mb__after_clear_bit() barrier() | 106 | #define smp_mb__after_clear_bit() barrier() |
103 | 107 | ||
104 | /** | 108 | /** |
105 | * change_bit - Toggle a bit in memory | 109 | * change_bit - Toggle a bit in memory |
106 | * @nr: Bit to clear | 110 | * @nr: Bit to clear |
107 | * @addr: Address to start counting from | 111 | * @addr: Address to start counting from |
108 | * | 112 | * |
109 | * change_bit() is atomic and may not be reordered. | 113 | * change_bit() is atomic and may not be reordered. |
110 | * Note that @nr may be almost arbitrarily large; this function is not | 114 | * Note that @nr may be almost arbitrarily large; this function is not |
111 | * restricted to acting on a single-word quantity. | 115 | * restricted to acting on a single-word quantity. |
112 | */ | 116 | */ |
113 | static __inline__ void change_bit(int nr, volatile void * addr) | 117 | static __inline__ void change_bit(int nr, volatile void * addr) |
114 | { | 118 | { |
115 | __u32 mask; | 119 | __u32 mask; |
116 | volatile __u32 *a = addr; | 120 | volatile __u32 *a = addr; |
117 | unsigned long flags; | 121 | unsigned long flags; |
118 | unsigned long tmp; | 122 | unsigned long tmp; |
119 | 123 | ||
120 | a += (nr >> 5); | 124 | a += (nr >> 5); |
121 | mask = (1 << (nr & 0x1F)); | 125 | mask = (1 << (nr & 0x1F)); |
122 | 126 | ||
123 | local_irq_save(flags); | 127 | local_irq_save(flags); |
124 | __asm__ __volatile__ ( | 128 | __asm__ __volatile__ ( |
125 | DCACHE_CLEAR("%0", "r6", "%1") | 129 | DCACHE_CLEAR("%0", "r6", "%1") |
126 | M32R_LOCK" %0, @%1; \n\t" | 130 | M32R_LOCK" %0, @%1; \n\t" |
127 | "xor %0, %2; \n\t" | 131 | "xor %0, %2; \n\t" |
128 | M32R_UNLOCK" %0, @%1; \n\t" | 132 | M32R_UNLOCK" %0, @%1; \n\t" |
129 | : "=&r" (tmp) | 133 | : "=&r" (tmp) |
130 | : "r" (a), "r" (mask) | 134 | : "r" (a), "r" (mask) |
131 | : "memory" | 135 | : "memory" |
132 | #ifdef CONFIG_CHIP_M32700_TS1 | 136 | #ifdef CONFIG_CHIP_M32700_TS1 |
133 | , "r6" | 137 | , "r6" |
134 | #endif /* CONFIG_CHIP_M32700_TS1 */ | 138 | #endif /* CONFIG_CHIP_M32700_TS1 */ |
135 | ); | 139 | ); |
136 | local_irq_restore(flags); | 140 | local_irq_restore(flags); |
137 | } | 141 | } |
138 | 142 | ||
139 | /** | 143 | /** |
140 | * test_and_set_bit - Set a bit and return its old value | 144 | * test_and_set_bit - Set a bit and return its old value |
141 | * @nr: Bit to set | 145 | * @nr: Bit to set |
142 | * @addr: Address to count from | 146 | * @addr: Address to count from |
143 | * | 147 | * |
144 | * This operation is atomic and cannot be reordered. | 148 | * This operation is atomic and cannot be reordered. |
145 | * It also implies a memory barrier. | 149 | * It also implies a memory barrier. |
146 | */ | 150 | */ |
147 | static __inline__ int test_and_set_bit(int nr, volatile void * addr) | 151 | static __inline__ int test_and_set_bit(int nr, volatile void * addr) |
148 | { | 152 | { |
149 | __u32 mask, oldbit; | 153 | __u32 mask, oldbit; |
150 | volatile __u32 *a = addr; | 154 | volatile __u32 *a = addr; |
151 | unsigned long flags; | 155 | unsigned long flags; |
152 | unsigned long tmp; | 156 | unsigned long tmp; |
153 | 157 | ||
154 | a += (nr >> 5); | 158 | a += (nr >> 5); |
155 | mask = (1 << (nr & 0x1F)); | 159 | mask = (1 << (nr & 0x1F)); |
156 | 160 | ||
157 | local_irq_save(flags); | 161 | local_irq_save(flags); |
158 | __asm__ __volatile__ ( | 162 | __asm__ __volatile__ ( |
159 | DCACHE_CLEAR("%0", "%1", "%2") | 163 | DCACHE_CLEAR("%0", "%1", "%2") |
160 | M32R_LOCK" %0, @%2; \n\t" | 164 | M32R_LOCK" %0, @%2; \n\t" |
161 | "mv %1, %0; \n\t" | 165 | "mv %1, %0; \n\t" |
162 | "and %0, %3; \n\t" | 166 | "and %0, %3; \n\t" |
163 | "or %1, %3; \n\t" | 167 | "or %1, %3; \n\t" |
164 | M32R_UNLOCK" %1, @%2; \n\t" | 168 | M32R_UNLOCK" %1, @%2; \n\t" |
165 | : "=&r" (oldbit), "=&r" (tmp) | 169 | : "=&r" (oldbit), "=&r" (tmp) |
166 | : "r" (a), "r" (mask) | 170 | : "r" (a), "r" (mask) |
167 | : "memory" | 171 | : "memory" |
168 | ); | 172 | ); |
169 | local_irq_restore(flags); | 173 | local_irq_restore(flags); |
170 | 174 | ||
171 | return (oldbit != 0); | 175 | return (oldbit != 0); |
172 | } | 176 | } |
173 | 177 | ||
174 | /** | 178 | /** |
175 | * test_and_clear_bit - Clear a bit and return its old value | 179 | * test_and_clear_bit - Clear a bit and return its old value |
176 | * @nr: Bit to set | 180 | * @nr: Bit to set |
177 | * @addr: Address to count from | 181 | * @addr: Address to count from |
178 | * | 182 | * |
179 | * This operation is atomic and cannot be reordered. | 183 | * This operation is atomic and cannot be reordered. |
180 | * It also implies a memory barrier. | 184 | * It also implies a memory barrier. |
181 | */ | 185 | */ |
182 | static __inline__ int test_and_clear_bit(int nr, volatile void * addr) | 186 | static __inline__ int test_and_clear_bit(int nr, volatile void * addr) |
183 | { | 187 | { |
184 | __u32 mask, oldbit; | 188 | __u32 mask, oldbit; |
185 | volatile __u32 *a = addr; | 189 | volatile __u32 *a = addr; |
186 | unsigned long flags; | 190 | unsigned long flags; |
187 | unsigned long tmp; | 191 | unsigned long tmp; |
188 | 192 | ||
189 | a += (nr >> 5); | 193 | a += (nr >> 5); |
190 | mask = (1 << (nr & 0x1F)); | 194 | mask = (1 << (nr & 0x1F)); |
191 | 195 | ||
192 | local_irq_save(flags); | 196 | local_irq_save(flags); |
193 | 197 | ||
194 | __asm__ __volatile__ ( | 198 | __asm__ __volatile__ ( |
195 | DCACHE_CLEAR("%0", "%1", "%3") | 199 | DCACHE_CLEAR("%0", "%1", "%3") |
196 | M32R_LOCK" %0, @%3; \n\t" | 200 | M32R_LOCK" %0, @%3; \n\t" |
197 | "mv %1, %0; \n\t" | 201 | "mv %1, %0; \n\t" |
198 | "and %0, %2; \n\t" | 202 | "and %0, %2; \n\t" |
199 | "not %2, %2; \n\t" | 203 | "not %2, %2; \n\t" |
200 | "and %1, %2; \n\t" | 204 | "and %1, %2; \n\t" |
201 | M32R_UNLOCK" %1, @%3; \n\t" | 205 | M32R_UNLOCK" %1, @%3; \n\t" |
202 | : "=&r" (oldbit), "=&r" (tmp), "+r" (mask) | 206 | : "=&r" (oldbit), "=&r" (tmp), "+r" (mask) |
203 | : "r" (a) | 207 | : "r" (a) |
204 | : "memory" | 208 | : "memory" |
205 | ); | 209 | ); |
206 | local_irq_restore(flags); | 210 | local_irq_restore(flags); |
207 | 211 | ||
208 | return (oldbit != 0); | 212 | return (oldbit != 0); |
209 | } | 213 | } |
210 | 214 | ||
211 | /** | 215 | /** |
212 | * test_and_change_bit - Change a bit and return its old value | 216 | * test_and_change_bit - Change a bit and return its old value |
213 | * @nr: Bit to set | 217 | * @nr: Bit to set |
214 | * @addr: Address to count from | 218 | * @addr: Address to count from |
215 | * | 219 | * |
216 | * This operation is atomic and cannot be reordered. | 220 | * This operation is atomic and cannot be reordered. |
217 | * It also implies a memory barrier. | 221 | * It also implies a memory barrier. |
218 | */ | 222 | */ |
219 | static __inline__ int test_and_change_bit(int nr, volatile void * addr) | 223 | static __inline__ int test_and_change_bit(int nr, volatile void * addr) |
220 | { | 224 | { |
221 | __u32 mask, oldbit; | 225 | __u32 mask, oldbit; |
222 | volatile __u32 *a = addr; | 226 | volatile __u32 *a = addr; |
223 | unsigned long flags; | 227 | unsigned long flags; |
224 | unsigned long tmp; | 228 | unsigned long tmp; |
225 | 229 | ||
226 | a += (nr >> 5); | 230 | a += (nr >> 5); |
227 | mask = (1 << (nr & 0x1F)); | 231 | mask = (1 << (nr & 0x1F)); |
228 | 232 | ||
229 | local_irq_save(flags); | 233 | local_irq_save(flags); |
230 | __asm__ __volatile__ ( | 234 | __asm__ __volatile__ ( |
231 | DCACHE_CLEAR("%0", "%1", "%2") | 235 | DCACHE_CLEAR("%0", "%1", "%2") |
232 | M32R_LOCK" %0, @%2; \n\t" | 236 | M32R_LOCK" %0, @%2; \n\t" |
233 | "mv %1, %0; \n\t" | 237 | "mv %1, %0; \n\t" |
234 | "and %0, %3; \n\t" | 238 | "and %0, %3; \n\t" |
235 | "xor %1, %3; \n\t" | 239 | "xor %1, %3; \n\t" |
236 | M32R_UNLOCK" %1, @%2; \n\t" | 240 | M32R_UNLOCK" %1, @%2; \n\t" |
237 | : "=&r" (oldbit), "=&r" (tmp) | 241 | : "=&r" (oldbit), "=&r" (tmp) |
238 | : "r" (a), "r" (mask) | 242 | : "r" (a), "r" (mask) |
239 | : "memory" | 243 | : "memory" |
240 | ); | 244 | ); |
241 | local_irq_restore(flags); | 245 | local_irq_restore(flags); |
242 | 246 | ||
243 | return (oldbit != 0); | 247 | return (oldbit != 0); |
244 | } | 248 | } |
245 | 249 | ||
246 | #include <asm-generic/bitops/non-atomic.h> | 250 | #include <asm-generic/bitops/non-atomic.h> |
247 | #include <asm-generic/bitops/ffz.h> | 251 | #include <asm-generic/bitops/ffz.h> |
248 | #include <asm-generic/bitops/__ffs.h> | 252 | #include <asm-generic/bitops/__ffs.h> |
249 | #include <asm-generic/bitops/fls.h> | 253 | #include <asm-generic/bitops/fls.h> |
250 | #include <asm-generic/bitops/fls64.h> | 254 | #include <asm-generic/bitops/fls64.h> |
251 | 255 | ||
252 | #ifdef __KERNEL__ | 256 | #ifdef __KERNEL__ |
253 | 257 | ||
254 | #include <asm-generic/bitops/sched.h> | 258 | #include <asm-generic/bitops/sched.h> |
255 | #include <asm-generic/bitops/find.h> | 259 | #include <asm-generic/bitops/find.h> |
256 | #include <asm-generic/bitops/ffs.h> | 260 | #include <asm-generic/bitops/ffs.h> |
257 | #include <asm-generic/bitops/hweight.h> | 261 | #include <asm-generic/bitops/hweight.h> |
258 | #include <asm-generic/bitops/lock.h> | 262 | #include <asm-generic/bitops/lock.h> |
259 | 263 | ||
260 | #endif /* __KERNEL__ */ | 264 | #endif /* __KERNEL__ */ |
261 | 265 | ||
262 | #ifdef __KERNEL__ | 266 | #ifdef __KERNEL__ |
263 | 267 | ||
264 | #include <asm-generic/bitops/ext2-non-atomic.h> | 268 | #include <asm-generic/bitops/ext2-non-atomic.h> |
265 | #include <asm-generic/bitops/ext2-atomic.h> | 269 | #include <asm-generic/bitops/ext2-atomic.h> |
266 | #include <asm-generic/bitops/minix.h> | 270 | #include <asm-generic/bitops/minix.h> |
267 | 271 | ||
268 | #endif /* __KERNEL__ */ | 272 | #endif /* __KERNEL__ */ |
269 | 273 | ||
270 | #endif /* _ASM_M32R_BITOPS_H */ | 274 | #endif /* _ASM_M32R_BITOPS_H */ |
271 | 275 |
include/asm-m68k/bitops.h
1 | #ifndef _M68K_BITOPS_H | 1 | #ifndef _M68K_BITOPS_H |
2 | #define _M68K_BITOPS_H | 2 | #define _M68K_BITOPS_H |
3 | /* | 3 | /* |
4 | * Copyright 1992, Linus Torvalds. | 4 | * Copyright 1992, Linus Torvalds. |
5 | * | 5 | * |
6 | * This file is subject to the terms and conditions of the GNU General Public | 6 | * This file is subject to the terms and conditions of the GNU General Public |
7 | * License. See the file COPYING in the main directory of this archive | 7 | * License. See the file COPYING in the main directory of this archive |
8 | * for more details. | 8 | * for more details. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef _LINUX_BITOPS_H | ||
12 | #error only <linux/bitops.h> can be included directly | ||
13 | #endif | ||
14 | |||
11 | #include <linux/compiler.h> | 15 | #include <linux/compiler.h> |
12 | 16 | ||
13 | /* | 17 | /* |
14 | * Require 68020 or better. | 18 | * Require 68020 or better. |
15 | * | 19 | * |
16 | * They use the standard big-endian m680x0 bit ordering. | 20 | * They use the standard big-endian m680x0 bit ordering. |
17 | */ | 21 | */ |
18 | 22 | ||
19 | #define test_and_set_bit(nr,vaddr) \ | 23 | #define test_and_set_bit(nr,vaddr) \ |
20 | (__builtin_constant_p(nr) ? \ | 24 | (__builtin_constant_p(nr) ? \ |
21 | __constant_test_and_set_bit(nr, vaddr) : \ | 25 | __constant_test_and_set_bit(nr, vaddr) : \ |
22 | __generic_test_and_set_bit(nr, vaddr)) | 26 | __generic_test_and_set_bit(nr, vaddr)) |
23 | 27 | ||
24 | #define __test_and_set_bit(nr,vaddr) test_and_set_bit(nr,vaddr) | 28 | #define __test_and_set_bit(nr,vaddr) test_and_set_bit(nr,vaddr) |
25 | 29 | ||
26 | static inline int __constant_test_and_set_bit(int nr, unsigned long *vaddr) | 30 | static inline int __constant_test_and_set_bit(int nr, unsigned long *vaddr) |
27 | { | 31 | { |
28 | char *p = (char *)vaddr + (nr ^ 31) / 8; | 32 | char *p = (char *)vaddr + (nr ^ 31) / 8; |
29 | char retval; | 33 | char retval; |
30 | 34 | ||
31 | __asm__ __volatile__ ("bset %2,%1; sne %0" | 35 | __asm__ __volatile__ ("bset %2,%1; sne %0" |
32 | : "=d" (retval), "+m" (*p) | 36 | : "=d" (retval), "+m" (*p) |
33 | : "di" (nr & 7)); | 37 | : "di" (nr & 7)); |
34 | 38 | ||
35 | return retval; | 39 | return retval; |
36 | } | 40 | } |
37 | 41 | ||
38 | static inline int __generic_test_and_set_bit(int nr, unsigned long *vaddr) | 42 | static inline int __generic_test_and_set_bit(int nr, unsigned long *vaddr) |
39 | { | 43 | { |
40 | char retval; | 44 | char retval; |
41 | 45 | ||
42 | __asm__ __volatile__ ("bfset %2{%1:#1}; sne %0" | 46 | __asm__ __volatile__ ("bfset %2{%1:#1}; sne %0" |
43 | : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory"); | 47 | : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory"); |
44 | 48 | ||
45 | return retval; | 49 | return retval; |
46 | } | 50 | } |
47 | 51 | ||
48 | #define set_bit(nr,vaddr) \ | 52 | #define set_bit(nr,vaddr) \ |
49 | (__builtin_constant_p(nr) ? \ | 53 | (__builtin_constant_p(nr) ? \ |
50 | __constant_set_bit(nr, vaddr) : \ | 54 | __constant_set_bit(nr, vaddr) : \ |
51 | __generic_set_bit(nr, vaddr)) | 55 | __generic_set_bit(nr, vaddr)) |
52 | 56 | ||
53 | #define __set_bit(nr,vaddr) set_bit(nr,vaddr) | 57 | #define __set_bit(nr,vaddr) set_bit(nr,vaddr) |
54 | 58 | ||
55 | static inline void __constant_set_bit(int nr, volatile unsigned long *vaddr) | 59 | static inline void __constant_set_bit(int nr, volatile unsigned long *vaddr) |
56 | { | 60 | { |
57 | char *p = (char *)vaddr + (nr ^ 31) / 8; | 61 | char *p = (char *)vaddr + (nr ^ 31) / 8; |
58 | __asm__ __volatile__ ("bset %1,%0" | 62 | __asm__ __volatile__ ("bset %1,%0" |
59 | : "+m" (*p) : "di" (nr & 7)); | 63 | : "+m" (*p) : "di" (nr & 7)); |
60 | } | 64 | } |
61 | 65 | ||
62 | static inline void __generic_set_bit(int nr, volatile unsigned long *vaddr) | 66 | static inline void __generic_set_bit(int nr, volatile unsigned long *vaddr) |
63 | { | 67 | { |
64 | __asm__ __volatile__ ("bfset %1{%0:#1}" | 68 | __asm__ __volatile__ ("bfset %1{%0:#1}" |
65 | : : "d" (nr^31), "o" (*vaddr) : "memory"); | 69 | : : "d" (nr^31), "o" (*vaddr) : "memory"); |
66 | } | 70 | } |
67 | 71 | ||
68 | #define test_and_clear_bit(nr,vaddr) \ | 72 | #define test_and_clear_bit(nr,vaddr) \ |
69 | (__builtin_constant_p(nr) ? \ | 73 | (__builtin_constant_p(nr) ? \ |
70 | __constant_test_and_clear_bit(nr, vaddr) : \ | 74 | __constant_test_and_clear_bit(nr, vaddr) : \ |
71 | __generic_test_and_clear_bit(nr, vaddr)) | 75 | __generic_test_and_clear_bit(nr, vaddr)) |
72 | 76 | ||
73 | #define __test_and_clear_bit(nr,vaddr) test_and_clear_bit(nr,vaddr) | 77 | #define __test_and_clear_bit(nr,vaddr) test_and_clear_bit(nr,vaddr) |
74 | 78 | ||
75 | static inline int __constant_test_and_clear_bit(int nr, unsigned long *vaddr) | 79 | static inline int __constant_test_and_clear_bit(int nr, unsigned long *vaddr) |
76 | { | 80 | { |
77 | char *p = (char *)vaddr + (nr ^ 31) / 8; | 81 | char *p = (char *)vaddr + (nr ^ 31) / 8; |
78 | char retval; | 82 | char retval; |
79 | 83 | ||
80 | __asm__ __volatile__ ("bclr %2,%1; sne %0" | 84 | __asm__ __volatile__ ("bclr %2,%1; sne %0" |
81 | : "=d" (retval), "+m" (*p) | 85 | : "=d" (retval), "+m" (*p) |
82 | : "di" (nr & 7)); | 86 | : "di" (nr & 7)); |
83 | 87 | ||
84 | return retval; | 88 | return retval; |
85 | } | 89 | } |
86 | 90 | ||
87 | static inline int __generic_test_and_clear_bit(int nr, unsigned long *vaddr) | 91 | static inline int __generic_test_and_clear_bit(int nr, unsigned long *vaddr) |
88 | { | 92 | { |
89 | char retval; | 93 | char retval; |
90 | 94 | ||
91 | __asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0" | 95 | __asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0" |
92 | : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory"); | 96 | : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory"); |
93 | 97 | ||
94 | return retval; | 98 | return retval; |
95 | } | 99 | } |
96 | 100 | ||
97 | /* | 101 | /* |
98 | * clear_bit() doesn't provide any barrier for the compiler. | 102 | * clear_bit() doesn't provide any barrier for the compiler. |
99 | */ | 103 | */ |
100 | #define smp_mb__before_clear_bit() barrier() | 104 | #define smp_mb__before_clear_bit() barrier() |
101 | #define smp_mb__after_clear_bit() barrier() | 105 | #define smp_mb__after_clear_bit() barrier() |
102 | 106 | ||
103 | #define clear_bit(nr,vaddr) \ | 107 | #define clear_bit(nr,vaddr) \ |
104 | (__builtin_constant_p(nr) ? \ | 108 | (__builtin_constant_p(nr) ? \ |
105 | __constant_clear_bit(nr, vaddr) : \ | 109 | __constant_clear_bit(nr, vaddr) : \ |
106 | __generic_clear_bit(nr, vaddr)) | 110 | __generic_clear_bit(nr, vaddr)) |
107 | #define __clear_bit(nr,vaddr) clear_bit(nr,vaddr) | 111 | #define __clear_bit(nr,vaddr) clear_bit(nr,vaddr) |
108 | 112 | ||
109 | static inline void __constant_clear_bit(int nr, volatile unsigned long *vaddr) | 113 | static inline void __constant_clear_bit(int nr, volatile unsigned long *vaddr) |
110 | { | 114 | { |
111 | char *p = (char *)vaddr + (nr ^ 31) / 8; | 115 | char *p = (char *)vaddr + (nr ^ 31) / 8; |
112 | __asm__ __volatile__ ("bclr %1,%0" | 116 | __asm__ __volatile__ ("bclr %1,%0" |
113 | : "+m" (*p) : "di" (nr & 7)); | 117 | : "+m" (*p) : "di" (nr & 7)); |
114 | } | 118 | } |
115 | 119 | ||
116 | static inline void __generic_clear_bit(int nr, volatile unsigned long *vaddr) | 120 | static inline void __generic_clear_bit(int nr, volatile unsigned long *vaddr) |
117 | { | 121 | { |
118 | __asm__ __volatile__ ("bfclr %1{%0:#1}" | 122 | __asm__ __volatile__ ("bfclr %1{%0:#1}" |
119 | : : "d" (nr^31), "o" (*vaddr) : "memory"); | 123 | : : "d" (nr^31), "o" (*vaddr) : "memory"); |
120 | } | 124 | } |
121 | 125 | ||
122 | #define test_and_change_bit(nr,vaddr) \ | 126 | #define test_and_change_bit(nr,vaddr) \ |
123 | (__builtin_constant_p(nr) ? \ | 127 | (__builtin_constant_p(nr) ? \ |
124 | __constant_test_and_change_bit(nr, vaddr) : \ | 128 | __constant_test_and_change_bit(nr, vaddr) : \ |
125 | __generic_test_and_change_bit(nr, vaddr)) | 129 | __generic_test_and_change_bit(nr, vaddr)) |
126 | 130 | ||
127 | #define __test_and_change_bit(nr,vaddr) test_and_change_bit(nr,vaddr) | 131 | #define __test_and_change_bit(nr,vaddr) test_and_change_bit(nr,vaddr) |
128 | #define __change_bit(nr,vaddr) change_bit(nr,vaddr) | 132 | #define __change_bit(nr,vaddr) change_bit(nr,vaddr) |
129 | 133 | ||
130 | static inline int __constant_test_and_change_bit(int nr, unsigned long *vaddr) | 134 | static inline int __constant_test_and_change_bit(int nr, unsigned long *vaddr) |
131 | { | 135 | { |
132 | char *p = (char *)vaddr + (nr ^ 31) / 8; | 136 | char *p = (char *)vaddr + (nr ^ 31) / 8; |
133 | char retval; | 137 | char retval; |
134 | 138 | ||
135 | __asm__ __volatile__ ("bchg %2,%1; sne %0" | 139 | __asm__ __volatile__ ("bchg %2,%1; sne %0" |
136 | : "=d" (retval), "+m" (*p) | 140 | : "=d" (retval), "+m" (*p) |
137 | : "di" (nr & 7)); | 141 | : "di" (nr & 7)); |
138 | 142 | ||
139 | return retval; | 143 | return retval; |
140 | } | 144 | } |
141 | 145 | ||
142 | static inline int __generic_test_and_change_bit(int nr, unsigned long *vaddr) | 146 | static inline int __generic_test_and_change_bit(int nr, unsigned long *vaddr) |
143 | { | 147 | { |
144 | char retval; | 148 | char retval; |
145 | 149 | ||
146 | __asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0" | 150 | __asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0" |
147 | : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory"); | 151 | : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory"); |
148 | 152 | ||
149 | return retval; | 153 | return retval; |
150 | } | 154 | } |
151 | 155 | ||
152 | #define change_bit(nr,vaddr) \ | 156 | #define change_bit(nr,vaddr) \ |
153 | (__builtin_constant_p(nr) ? \ | 157 | (__builtin_constant_p(nr) ? \ |
154 | __constant_change_bit(nr, vaddr) : \ | 158 | __constant_change_bit(nr, vaddr) : \ |
155 | __generic_change_bit(nr, vaddr)) | 159 | __generic_change_bit(nr, vaddr)) |
156 | 160 | ||
157 | static inline void __constant_change_bit(int nr, unsigned long *vaddr) | 161 | static inline void __constant_change_bit(int nr, unsigned long *vaddr) |
158 | { | 162 | { |
159 | char *p = (char *)vaddr + (nr ^ 31) / 8; | 163 | char *p = (char *)vaddr + (nr ^ 31) / 8; |
160 | __asm__ __volatile__ ("bchg %1,%0" | 164 | __asm__ __volatile__ ("bchg %1,%0" |
161 | : "+m" (*p) : "di" (nr & 7)); | 165 | : "+m" (*p) : "di" (nr & 7)); |
162 | } | 166 | } |
163 | 167 | ||
164 | static inline void __generic_change_bit(int nr, unsigned long *vaddr) | 168 | static inline void __generic_change_bit(int nr, unsigned long *vaddr) |
165 | { | 169 | { |
166 | __asm__ __volatile__ ("bfchg %1{%0:#1}" | 170 | __asm__ __volatile__ ("bfchg %1{%0:#1}" |
167 | : : "d" (nr^31), "o" (*vaddr) : "memory"); | 171 | : : "d" (nr^31), "o" (*vaddr) : "memory"); |
168 | } | 172 | } |
169 | 173 | ||
170 | static inline int test_bit(int nr, const unsigned long *vaddr) | 174 | static inline int test_bit(int nr, const unsigned long *vaddr) |
171 | { | 175 | { |
172 | return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0; | 176 | return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0; |
173 | } | 177 | } |
174 | 178 | ||
175 | static inline int find_first_zero_bit(const unsigned long *vaddr, | 179 | static inline int find_first_zero_bit(const unsigned long *vaddr, |
176 | unsigned size) | 180 | unsigned size) |
177 | { | 181 | { |
178 | const unsigned long *p = vaddr; | 182 | const unsigned long *p = vaddr; |
179 | int res = 32; | 183 | int res = 32; |
180 | unsigned long num; | 184 | unsigned long num; |
181 | 185 | ||
182 | if (!size) | 186 | if (!size) |
183 | return 0; | 187 | return 0; |
184 | 188 | ||
185 | size = (size + 31) >> 5; | 189 | size = (size + 31) >> 5; |
186 | while (!(num = ~*p++)) { | 190 | while (!(num = ~*p++)) { |
187 | if (!--size) | 191 | if (!--size) |
188 | goto out; | 192 | goto out; |
189 | } | 193 | } |
190 | 194 | ||
191 | __asm__ __volatile__ ("bfffo %1{#0,#0},%0" | 195 | __asm__ __volatile__ ("bfffo %1{#0,#0},%0" |
192 | : "=d" (res) : "d" (num & -num)); | 196 | : "=d" (res) : "d" (num & -num)); |
193 | res ^= 31; | 197 | res ^= 31; |
194 | out: | 198 | out: |
195 | return ((long)p - (long)vaddr - 4) * 8 + res; | 199 | return ((long)p - (long)vaddr - 4) * 8 + res; |
196 | } | 200 | } |
197 | 201 | ||
198 | static inline int find_next_zero_bit(const unsigned long *vaddr, int size, | 202 | static inline int find_next_zero_bit(const unsigned long *vaddr, int size, |
199 | int offset) | 203 | int offset) |
200 | { | 204 | { |
201 | const unsigned long *p = vaddr + (offset >> 5); | 205 | const unsigned long *p = vaddr + (offset >> 5); |
202 | int bit = offset & 31UL, res; | 206 | int bit = offset & 31UL, res; |
203 | 207 | ||
204 | if (offset >= size) | 208 | if (offset >= size) |
205 | return size; | 209 | return size; |
206 | 210 | ||
207 | if (bit) { | 211 | if (bit) { |
208 | unsigned long num = ~*p++ & (~0UL << bit); | 212 | unsigned long num = ~*p++ & (~0UL << bit); |
209 | offset -= bit; | 213 | offset -= bit; |
210 | 214 | ||
211 | /* Look for zero in first longword */ | 215 | /* Look for zero in first longword */ |
212 | __asm__ __volatile__ ("bfffo %1{#0,#0},%0" | 216 | __asm__ __volatile__ ("bfffo %1{#0,#0},%0" |
213 | : "=d" (res) : "d" (num & -num)); | 217 | : "=d" (res) : "d" (num & -num)); |
214 | if (res < 32) | 218 | if (res < 32) |
215 | return offset + (res ^ 31); | 219 | return offset + (res ^ 31); |
216 | offset += 32; | 220 | offset += 32; |
217 | } | 221 | } |
218 | /* No zero yet, search remaining full bytes for a zero */ | 222 | /* No zero yet, search remaining full bytes for a zero */ |
219 | res = find_first_zero_bit(p, size - ((long)p - (long)vaddr) * 8); | 223 | res = find_first_zero_bit(p, size - ((long)p - (long)vaddr) * 8); |
220 | return offset + res; | 224 | return offset + res; |
221 | } | 225 | } |
222 | 226 | ||
223 | static inline int find_first_bit(const unsigned long *vaddr, unsigned size) | 227 | static inline int find_first_bit(const unsigned long *vaddr, unsigned size) |
224 | { | 228 | { |
225 | const unsigned long *p = vaddr; | 229 | const unsigned long *p = vaddr; |
226 | int res = 32; | 230 | int res = 32; |
227 | unsigned long num; | 231 | unsigned long num; |
228 | 232 | ||
229 | if (!size) | 233 | if (!size) |
230 | return 0; | 234 | return 0; |
231 | 235 | ||
232 | size = (size + 31) >> 5; | 236 | size = (size + 31) >> 5; |
233 | while (!(num = *p++)) { | 237 | while (!(num = *p++)) { |
234 | if (!--size) | 238 | if (!--size) |
235 | goto out; | 239 | goto out; |
236 | } | 240 | } |
237 | 241 | ||
238 | __asm__ __volatile__ ("bfffo %1{#0,#0},%0" | 242 | __asm__ __volatile__ ("bfffo %1{#0,#0},%0" |
239 | : "=d" (res) : "d" (num & -num)); | 243 | : "=d" (res) : "d" (num & -num)); |
240 | res ^= 31; | 244 | res ^= 31; |
241 | out: | 245 | out: |
242 | return ((long)p - (long)vaddr - 4) * 8 + res; | 246 | return ((long)p - (long)vaddr - 4) * 8 + res; |
243 | } | 247 | } |
244 | 248 | ||
245 | static inline int find_next_bit(const unsigned long *vaddr, int size, | 249 | static inline int find_next_bit(const unsigned long *vaddr, int size, |
246 | int offset) | 250 | int offset) |
247 | { | 251 | { |
248 | const unsigned long *p = vaddr + (offset >> 5); | 252 | const unsigned long *p = vaddr + (offset >> 5); |
249 | int bit = offset & 31UL, res; | 253 | int bit = offset & 31UL, res; |
250 | 254 | ||
251 | if (offset >= size) | 255 | if (offset >= size) |
252 | return size; | 256 | return size; |
253 | 257 | ||
254 | if (bit) { | 258 | if (bit) { |
255 | unsigned long num = *p++ & (~0UL << bit); | 259 | unsigned long num = *p++ & (~0UL << bit); |
256 | offset -= bit; | 260 | offset -= bit; |
257 | 261 | ||
258 | /* Look for one in first longword */ | 262 | /* Look for one in first longword */ |
259 | __asm__ __volatile__ ("bfffo %1{#0,#0},%0" | 263 | __asm__ __volatile__ ("bfffo %1{#0,#0},%0" |
260 | : "=d" (res) : "d" (num & -num)); | 264 | : "=d" (res) : "d" (num & -num)); |
261 | if (res < 32) | 265 | if (res < 32) |
262 | return offset + (res ^ 31); | 266 | return offset + (res ^ 31); |
263 | offset += 32; | 267 | offset += 32; |
264 | } | 268 | } |
265 | /* No one yet, search remaining full bytes for a one */ | 269 | /* No one yet, search remaining full bytes for a one */ |
266 | res = find_first_bit(p, size - ((long)p - (long)vaddr) * 8); | 270 | res = find_first_bit(p, size - ((long)p - (long)vaddr) * 8); |
267 | return offset + res; | 271 | return offset + res; |
268 | } | 272 | } |
269 | 273 | ||
270 | /* | 274 | /* |
271 | * ffz = Find First Zero in word. Undefined if no zero exists, | 275 | * ffz = Find First Zero in word. Undefined if no zero exists, |
272 | * so code should check against ~0UL first.. | 276 | * so code should check against ~0UL first.. |
273 | */ | 277 | */ |
274 | static inline unsigned long ffz(unsigned long word) | 278 | static inline unsigned long ffz(unsigned long word) |
275 | { | 279 | { |
276 | int res; | 280 | int res; |
277 | 281 | ||
278 | __asm__ __volatile__ ("bfffo %1{#0,#0},%0" | 282 | __asm__ __volatile__ ("bfffo %1{#0,#0},%0" |
279 | : "=d" (res) : "d" (~word & -~word)); | 283 | : "=d" (res) : "d" (~word & -~word)); |
280 | return res ^ 31; | 284 | return res ^ 31; |
281 | } | 285 | } |
282 | 286 | ||
283 | #ifdef __KERNEL__ | 287 | #ifdef __KERNEL__ |
284 | 288 | ||
285 | /* | 289 | /* |
286 | * ffs: find first bit set. This is defined the same way as | 290 | * ffs: find first bit set. This is defined the same way as |
287 | * the libc and compiler builtin ffs routines, therefore | 291 | * the libc and compiler builtin ffs routines, therefore |
288 | * differs in spirit from the above ffz (man ffs). | 292 | * differs in spirit from the above ffz (man ffs). |
289 | */ | 293 | */ |
290 | 294 | ||
291 | static inline int ffs(int x) | 295 | static inline int ffs(int x) |
292 | { | 296 | { |
293 | int cnt; | 297 | int cnt; |
294 | 298 | ||
295 | asm ("bfffo %1{#0:#0},%0" : "=d" (cnt) : "dm" (x & -x)); | 299 | asm ("bfffo %1{#0:#0},%0" : "=d" (cnt) : "dm" (x & -x)); |
296 | 300 | ||
297 | return 32 - cnt; | 301 | return 32 - cnt; |
298 | } | 302 | } |
299 | #define __ffs(x) (ffs(x) - 1) | 303 | #define __ffs(x) (ffs(x) - 1) |
300 | 304 | ||
301 | /* | 305 | /* |
302 | * fls: find last bit set. | 306 | * fls: find last bit set. |
303 | */ | 307 | */ |
304 | 308 | ||
305 | static inline int fls(int x) | 309 | static inline int fls(int x) |
306 | { | 310 | { |
307 | int cnt; | 311 | int cnt; |
308 | 312 | ||
309 | asm ("bfffo %1{#0,#0},%0" : "=d" (cnt) : "dm" (x)); | 313 | asm ("bfffo %1{#0,#0},%0" : "=d" (cnt) : "dm" (x)); |
310 | 314 | ||
311 | return 32 - cnt; | 315 | return 32 - cnt; |
312 | } | 316 | } |
313 | 317 | ||
314 | #include <asm-generic/bitops/fls64.h> | 318 | #include <asm-generic/bitops/fls64.h> |
315 | #include <asm-generic/bitops/sched.h> | 319 | #include <asm-generic/bitops/sched.h> |
316 | #include <asm-generic/bitops/hweight.h> | 320 | #include <asm-generic/bitops/hweight.h> |
317 | #include <asm-generic/bitops/lock.h> | 321 | #include <asm-generic/bitops/lock.h> |
318 | 322 | ||
319 | /* Bitmap functions for the minix filesystem */ | 323 | /* Bitmap functions for the minix filesystem */ |
320 | 324 | ||
321 | static inline int minix_find_first_zero_bit(const void *vaddr, unsigned size) | 325 | static inline int minix_find_first_zero_bit(const void *vaddr, unsigned size) |
322 | { | 326 | { |
323 | const unsigned short *p = vaddr, *addr = vaddr; | 327 | const unsigned short *p = vaddr, *addr = vaddr; |
324 | int res; | 328 | int res; |
325 | unsigned short num; | 329 | unsigned short num; |
326 | 330 | ||
327 | if (!size) | 331 | if (!size) |
328 | return 0; | 332 | return 0; |
329 | 333 | ||
330 | size = (size >> 4) + ((size & 15) > 0); | 334 | size = (size >> 4) + ((size & 15) > 0); |
331 | while (*p++ == 0xffff) | 335 | while (*p++ == 0xffff) |
332 | { | 336 | { |
333 | if (--size == 0) | 337 | if (--size == 0) |
334 | return (p - addr) << 4; | 338 | return (p - addr) << 4; |
335 | } | 339 | } |
336 | 340 | ||
337 | num = ~*--p; | 341 | num = ~*--p; |
338 | __asm__ __volatile__ ("bfffo %1{#16,#16},%0" | 342 | __asm__ __volatile__ ("bfffo %1{#16,#16},%0" |
339 | : "=d" (res) : "d" (num & -num)); | 343 | : "=d" (res) : "d" (num & -num)); |
340 | return ((p - addr) << 4) + (res ^ 31); | 344 | return ((p - addr) << 4) + (res ^ 31); |
341 | } | 345 | } |
342 | 346 | ||
343 | #define minix_test_and_set_bit(nr, addr) __test_and_set_bit((nr) ^ 16, (unsigned long *)(addr)) | 347 | #define minix_test_and_set_bit(nr, addr) __test_and_set_bit((nr) ^ 16, (unsigned long *)(addr)) |
344 | #define minix_set_bit(nr,addr) __set_bit((nr) ^ 16, (unsigned long *)(addr)) | 348 | #define minix_set_bit(nr,addr) __set_bit((nr) ^ 16, (unsigned long *)(addr)) |
345 | #define minix_test_and_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 16, (unsigned long *)(addr)) | 349 | #define minix_test_and_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 16, (unsigned long *)(addr)) |
346 | 350 | ||
347 | static inline int minix_test_bit(int nr, const void *vaddr) | 351 | static inline int minix_test_bit(int nr, const void *vaddr) |
348 | { | 352 | { |
349 | const unsigned short *p = vaddr; | 353 | const unsigned short *p = vaddr; |
350 | return (p[nr >> 4] & (1U << (nr & 15))) != 0; | 354 | return (p[nr >> 4] & (1U << (nr & 15))) != 0; |
351 | } | 355 | } |
352 | 356 | ||
353 | /* Bitmap functions for the ext2 filesystem. */ | 357 | /* Bitmap functions for the ext2 filesystem. */ |
354 | 358 | ||
355 | #define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 24, (unsigned long *)(addr)) | 359 | #define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 24, (unsigned long *)(addr)) |
356 | #define ext2_set_bit_atomic(lock, nr, addr) test_and_set_bit((nr) ^ 24, (unsigned long *)(addr)) | 360 | #define ext2_set_bit_atomic(lock, nr, addr) test_and_set_bit((nr) ^ 24, (unsigned long *)(addr)) |
357 | #define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr)) | 361 | #define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr)) |
358 | #define ext2_clear_bit_atomic(lock, nr, addr) test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr)) | 362 | #define ext2_clear_bit_atomic(lock, nr, addr) test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr)) |
359 | 363 | ||
360 | static inline int ext2_test_bit(int nr, const void *vaddr) | 364 | static inline int ext2_test_bit(int nr, const void *vaddr) |
361 | { | 365 | { |
362 | const unsigned char *p = vaddr; | 366 | const unsigned char *p = vaddr; |
363 | return (p[nr >> 3] & (1U << (nr & 7))) != 0; | 367 | return (p[nr >> 3] & (1U << (nr & 7))) != 0; |
364 | } | 368 | } |
365 | 369 | ||
366 | static inline int ext2_find_first_zero_bit(const void *vaddr, unsigned size) | 370 | static inline int ext2_find_first_zero_bit(const void *vaddr, unsigned size) |
367 | { | 371 | { |
368 | const unsigned long *p = vaddr, *addr = vaddr; | 372 | const unsigned long *p = vaddr, *addr = vaddr; |
369 | int res; | 373 | int res; |
370 | 374 | ||
371 | if (!size) | 375 | if (!size) |
372 | return 0; | 376 | return 0; |
373 | 377 | ||
374 | size = (size >> 5) + ((size & 31) > 0); | 378 | size = (size >> 5) + ((size & 31) > 0); |
375 | while (*p++ == ~0UL) | 379 | while (*p++ == ~0UL) |
376 | { | 380 | { |
377 | if (--size == 0) | 381 | if (--size == 0) |
378 | return (p - addr) << 5; | 382 | return (p - addr) << 5; |
379 | } | 383 | } |
380 | 384 | ||
381 | --p; | 385 | --p; |
382 | for (res = 0; res < 32; res++) | 386 | for (res = 0; res < 32; res++) |
383 | if (!ext2_test_bit (res, p)) | 387 | if (!ext2_test_bit (res, p)) |
384 | break; | 388 | break; |
385 | return (p - addr) * 32 + res; | 389 | return (p - addr) * 32 + res; |
386 | } | 390 | } |
387 | 391 | ||
388 | static inline int ext2_find_next_zero_bit(const void *vaddr, unsigned size, | 392 | static inline int ext2_find_next_zero_bit(const void *vaddr, unsigned size, |
389 | unsigned offset) | 393 | unsigned offset) |
390 | { | 394 | { |
391 | const unsigned long *addr = vaddr; | 395 | const unsigned long *addr = vaddr; |
392 | const unsigned long *p = addr + (offset >> 5); | 396 | const unsigned long *p = addr + (offset >> 5); |
393 | int bit = offset & 31UL, res; | 397 | int bit = offset & 31UL, res; |
394 | 398 | ||
395 | if (offset >= size) | 399 | if (offset >= size) |
396 | return size; | 400 | return size; |
397 | 401 | ||
398 | if (bit) { | 402 | if (bit) { |
399 | /* Look for zero in first longword */ | 403 | /* Look for zero in first longword */ |
400 | for (res = bit; res < 32; res++) | 404 | for (res = bit; res < 32; res++) |
401 | if (!ext2_test_bit (res, p)) | 405 | if (!ext2_test_bit (res, p)) |
402 | return (p - addr) * 32 + res; | 406 | return (p - addr) * 32 + res; |
403 | p++; | 407 | p++; |
404 | } | 408 | } |
405 | /* No zero yet, search remaining full bytes for a zero */ | 409 | /* No zero yet, search remaining full bytes for a zero */ |
406 | res = ext2_find_first_zero_bit (p, size - 32 * (p - addr)); | 410 | res = ext2_find_first_zero_bit (p, size - 32 * (p - addr)); |
407 | return (p - addr) * 32 + res; | 411 | return (p - addr) * 32 + res; |
408 | } | 412 | } |
409 | 413 | ||
410 | #endif /* __KERNEL__ */ | 414 | #endif /* __KERNEL__ */ |
411 | 415 | ||
412 | #endif /* _M68K_BITOPS_H */ | 416 | #endif /* _M68K_BITOPS_H */ |
413 | 417 |
include/asm-m68knommu/bitops.h
1 | #ifndef _M68KNOMMU_BITOPS_H | 1 | #ifndef _M68KNOMMU_BITOPS_H |
2 | #define _M68KNOMMU_BITOPS_H | 2 | #define _M68KNOMMU_BITOPS_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Copyright 1992, Linus Torvalds. | 5 | * Copyright 1992, Linus Torvalds. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/compiler.h> | 8 | #include <linux/compiler.h> |
9 | #include <asm/byteorder.h> /* swab32 */ | 9 | #include <asm/byteorder.h> /* swab32 */ |
10 | 10 | ||
11 | #ifdef __KERNEL__ | 11 | #ifdef __KERNEL__ |
12 | 12 | ||
13 | #ifndef _LINUX_BITOPS_H | ||
14 | #error only <linux/bitops.h> can be included directly | ||
15 | #endif | ||
16 | |||
13 | #include <asm-generic/bitops/ffs.h> | 17 | #include <asm-generic/bitops/ffs.h> |
14 | #include <asm-generic/bitops/__ffs.h> | 18 | #include <asm-generic/bitops/__ffs.h> |
15 | #include <asm-generic/bitops/sched.h> | 19 | #include <asm-generic/bitops/sched.h> |
16 | #include <asm-generic/bitops/ffz.h> | 20 | #include <asm-generic/bitops/ffz.h> |
17 | 21 | ||
18 | static __inline__ void set_bit(int nr, volatile unsigned long * addr) | 22 | static __inline__ void set_bit(int nr, volatile unsigned long * addr) |
19 | { | 23 | { |
20 | #ifdef CONFIG_COLDFIRE | 24 | #ifdef CONFIG_COLDFIRE |
21 | __asm__ __volatile__ ("lea %0,%%a0; bset %1,(%%a0)" | 25 | __asm__ __volatile__ ("lea %0,%%a0; bset %1,(%%a0)" |
22 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 26 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
23 | : "d" (nr) | 27 | : "d" (nr) |
24 | : "%a0", "cc"); | 28 | : "%a0", "cc"); |
25 | #else | 29 | #else |
26 | __asm__ __volatile__ ("bset %1,%0" | 30 | __asm__ __volatile__ ("bset %1,%0" |
27 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 31 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
28 | : "di" (nr) | 32 | : "di" (nr) |
29 | : "cc"); | 33 | : "cc"); |
30 | #endif | 34 | #endif |
31 | } | 35 | } |
32 | 36 | ||
33 | #define __set_bit(nr, addr) set_bit(nr, addr) | 37 | #define __set_bit(nr, addr) set_bit(nr, addr) |
34 | 38 | ||
35 | /* | 39 | /* |
36 | * clear_bit() doesn't provide any barrier for the compiler. | 40 | * clear_bit() doesn't provide any barrier for the compiler. |
37 | */ | 41 | */ |
38 | #define smp_mb__before_clear_bit() barrier() | 42 | #define smp_mb__before_clear_bit() barrier() |
39 | #define smp_mb__after_clear_bit() barrier() | 43 | #define smp_mb__after_clear_bit() barrier() |
40 | 44 | ||
41 | static __inline__ void clear_bit(int nr, volatile unsigned long * addr) | 45 | static __inline__ void clear_bit(int nr, volatile unsigned long * addr) |
42 | { | 46 | { |
43 | #ifdef CONFIG_COLDFIRE | 47 | #ifdef CONFIG_COLDFIRE |
44 | __asm__ __volatile__ ("lea %0,%%a0; bclr %1,(%%a0)" | 48 | __asm__ __volatile__ ("lea %0,%%a0; bclr %1,(%%a0)" |
45 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 49 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
46 | : "d" (nr) | 50 | : "d" (nr) |
47 | : "%a0", "cc"); | 51 | : "%a0", "cc"); |
48 | #else | 52 | #else |
49 | __asm__ __volatile__ ("bclr %1,%0" | 53 | __asm__ __volatile__ ("bclr %1,%0" |
50 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 54 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
51 | : "di" (nr) | 55 | : "di" (nr) |
52 | : "cc"); | 56 | : "cc"); |
53 | #endif | 57 | #endif |
54 | } | 58 | } |
55 | 59 | ||
56 | #define __clear_bit(nr, addr) clear_bit(nr, addr) | 60 | #define __clear_bit(nr, addr) clear_bit(nr, addr) |
57 | 61 | ||
58 | static __inline__ void change_bit(int nr, volatile unsigned long * addr) | 62 | static __inline__ void change_bit(int nr, volatile unsigned long * addr) |
59 | { | 63 | { |
60 | #ifdef CONFIG_COLDFIRE | 64 | #ifdef CONFIG_COLDFIRE |
61 | __asm__ __volatile__ ("lea %0,%%a0; bchg %1,(%%a0)" | 65 | __asm__ __volatile__ ("lea %0,%%a0; bchg %1,(%%a0)" |
62 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 66 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
63 | : "d" (nr) | 67 | : "d" (nr) |
64 | : "%a0", "cc"); | 68 | : "%a0", "cc"); |
65 | #else | 69 | #else |
66 | __asm__ __volatile__ ("bchg %1,%0" | 70 | __asm__ __volatile__ ("bchg %1,%0" |
67 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 71 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
68 | : "di" (nr) | 72 | : "di" (nr) |
69 | : "cc"); | 73 | : "cc"); |
70 | #endif | 74 | #endif |
71 | } | 75 | } |
72 | 76 | ||
73 | #define __change_bit(nr, addr) change_bit(nr, addr) | 77 | #define __change_bit(nr, addr) change_bit(nr, addr) |
74 | 78 | ||
75 | static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) | 79 | static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) |
76 | { | 80 | { |
77 | char retval; | 81 | char retval; |
78 | 82 | ||
79 | #ifdef CONFIG_COLDFIRE | 83 | #ifdef CONFIG_COLDFIRE |
80 | __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0" | 84 | __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0" |
81 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 85 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
82 | : "d" (nr) | 86 | : "d" (nr) |
83 | : "%a0"); | 87 | : "%a0"); |
84 | #else | 88 | #else |
85 | __asm__ __volatile__ ("bset %2,%1; sne %0" | 89 | __asm__ __volatile__ ("bset %2,%1; sne %0" |
86 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 90 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
87 | : "di" (nr) | 91 | : "di" (nr) |
88 | /* No clobber */); | 92 | /* No clobber */); |
89 | #endif | 93 | #endif |
90 | 94 | ||
91 | return retval; | 95 | return retval; |
92 | } | 96 | } |
93 | 97 | ||
94 | #define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr) | 98 | #define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr) |
95 | 99 | ||
96 | static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) | 100 | static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) |
97 | { | 101 | { |
98 | char retval; | 102 | char retval; |
99 | 103 | ||
100 | #ifdef CONFIG_COLDFIRE | 104 | #ifdef CONFIG_COLDFIRE |
101 | __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0" | 105 | __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0" |
102 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 106 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
103 | : "d" (nr) | 107 | : "d" (nr) |
104 | : "%a0"); | 108 | : "%a0"); |
105 | #else | 109 | #else |
106 | __asm__ __volatile__ ("bclr %2,%1; sne %0" | 110 | __asm__ __volatile__ ("bclr %2,%1; sne %0" |
107 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 111 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
108 | : "di" (nr) | 112 | : "di" (nr) |
109 | /* No clobber */); | 113 | /* No clobber */); |
110 | #endif | 114 | #endif |
111 | 115 | ||
112 | return retval; | 116 | return retval; |
113 | } | 117 | } |
114 | 118 | ||
115 | #define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr) | 119 | #define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr) |
116 | 120 | ||
117 | static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) | 121 | static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) |
118 | { | 122 | { |
119 | char retval; | 123 | char retval; |
120 | 124 | ||
121 | #ifdef CONFIG_COLDFIRE | 125 | #ifdef CONFIG_COLDFIRE |
122 | __asm__ __volatile__ ("lea %1,%%a0\n\tbchg %2,(%%a0)\n\tsne %0" | 126 | __asm__ __volatile__ ("lea %1,%%a0\n\tbchg %2,(%%a0)\n\tsne %0" |
123 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 127 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
124 | : "d" (nr) | 128 | : "d" (nr) |
125 | : "%a0"); | 129 | : "%a0"); |
126 | #else | 130 | #else |
127 | __asm__ __volatile__ ("bchg %2,%1; sne %0" | 131 | __asm__ __volatile__ ("bchg %2,%1; sne %0" |
128 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 132 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
129 | : "di" (nr) | 133 | : "di" (nr) |
130 | /* No clobber */); | 134 | /* No clobber */); |
131 | #endif | 135 | #endif |
132 | 136 | ||
133 | return retval; | 137 | return retval; |
134 | } | 138 | } |
135 | 139 | ||
136 | #define __test_and_change_bit(nr, addr) test_and_change_bit(nr, addr) | 140 | #define __test_and_change_bit(nr, addr) test_and_change_bit(nr, addr) |
137 | 141 | ||
138 | /* | 142 | /* |
139 | * This routine doesn't need to be atomic. | 143 | * This routine doesn't need to be atomic. |
140 | */ | 144 | */ |
141 | static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr) | 145 | static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr) |
142 | { | 146 | { |
143 | return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; | 147 | return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; |
144 | } | 148 | } |
145 | 149 | ||
146 | static __inline__ int __test_bit(int nr, const volatile unsigned long * addr) | 150 | static __inline__ int __test_bit(int nr, const volatile unsigned long * addr) |
147 | { | 151 | { |
148 | int * a = (int *) addr; | 152 | int * a = (int *) addr; |
149 | int mask; | 153 | int mask; |
150 | 154 | ||
151 | a += nr >> 5; | 155 | a += nr >> 5; |
152 | mask = 1 << (nr & 0x1f); | 156 | mask = 1 << (nr & 0x1f); |
153 | return ((mask & *a) != 0); | 157 | return ((mask & *a) != 0); |
154 | } | 158 | } |
155 | 159 | ||
156 | #define test_bit(nr,addr) \ | 160 | #define test_bit(nr,addr) \ |
157 | (__builtin_constant_p(nr) ? \ | 161 | (__builtin_constant_p(nr) ? \ |
158 | __constant_test_bit((nr),(addr)) : \ | 162 | __constant_test_bit((nr),(addr)) : \ |
159 | __test_bit((nr),(addr))) | 163 | __test_bit((nr),(addr))) |
160 | 164 | ||
161 | #include <asm-generic/bitops/find.h> | 165 | #include <asm-generic/bitops/find.h> |
162 | #include <asm-generic/bitops/hweight.h> | 166 | #include <asm-generic/bitops/hweight.h> |
163 | #include <asm-generic/bitops/lock.h> | 167 | #include <asm-generic/bitops/lock.h> |
164 | 168 | ||
165 | static __inline__ int ext2_set_bit(int nr, volatile void * addr) | 169 | static __inline__ int ext2_set_bit(int nr, volatile void * addr) |
166 | { | 170 | { |
167 | char retval; | 171 | char retval; |
168 | 172 | ||
169 | #ifdef CONFIG_COLDFIRE | 173 | #ifdef CONFIG_COLDFIRE |
170 | __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0" | 174 | __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0" |
171 | : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3]) | 175 | : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3]) |
172 | : "d" (nr) | 176 | : "d" (nr) |
173 | : "%a0"); | 177 | : "%a0"); |
174 | #else | 178 | #else |
175 | __asm__ __volatile__ ("bset %2,%1; sne %0" | 179 | __asm__ __volatile__ ("bset %2,%1; sne %0" |
176 | : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3]) | 180 | : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3]) |
177 | : "di" (nr) | 181 | : "di" (nr) |
178 | /* No clobber */); | 182 | /* No clobber */); |
179 | #endif | 183 | #endif |
180 | 184 | ||
181 | return retval; | 185 | return retval; |
182 | } | 186 | } |
183 | 187 | ||
184 | static __inline__ int ext2_clear_bit(int nr, volatile void * addr) | 188 | static __inline__ int ext2_clear_bit(int nr, volatile void * addr) |
185 | { | 189 | { |
186 | char retval; | 190 | char retval; |
187 | 191 | ||
188 | #ifdef CONFIG_COLDFIRE | 192 | #ifdef CONFIG_COLDFIRE |
189 | __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0" | 193 | __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0" |
190 | : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3]) | 194 | : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3]) |
191 | : "d" (nr) | 195 | : "d" (nr) |
192 | : "%a0"); | 196 | : "%a0"); |
193 | #else | 197 | #else |
194 | __asm__ __volatile__ ("bclr %2,%1; sne %0" | 198 | __asm__ __volatile__ ("bclr %2,%1; sne %0" |
195 | : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3]) | 199 | : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3]) |
196 | : "di" (nr) | 200 | : "di" (nr) |
197 | /* No clobber */); | 201 | /* No clobber */); |
198 | #endif | 202 | #endif |
199 | 203 | ||
200 | return retval; | 204 | return retval; |
201 | } | 205 | } |
202 | 206 | ||
203 | #define ext2_set_bit_atomic(lock, nr, addr) \ | 207 | #define ext2_set_bit_atomic(lock, nr, addr) \ |
204 | ({ \ | 208 | ({ \ |
205 | int ret; \ | 209 | int ret; \ |
206 | spin_lock(lock); \ | 210 | spin_lock(lock); \ |
207 | ret = ext2_set_bit((nr), (addr)); \ | 211 | ret = ext2_set_bit((nr), (addr)); \ |
208 | spin_unlock(lock); \ | 212 | spin_unlock(lock); \ |
209 | ret; \ | 213 | ret; \ |
210 | }) | 214 | }) |
211 | 215 | ||
212 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | 216 | #define ext2_clear_bit_atomic(lock, nr, addr) \ |
213 | ({ \ | 217 | ({ \ |
214 | int ret; \ | 218 | int ret; \ |
215 | spin_lock(lock); \ | 219 | spin_lock(lock); \ |
216 | ret = ext2_clear_bit((nr), (addr)); \ | 220 | ret = ext2_clear_bit((nr), (addr)); \ |
217 | spin_unlock(lock); \ | 221 | spin_unlock(lock); \ |
218 | ret; \ | 222 | ret; \ |
219 | }) | 223 | }) |
220 | 224 | ||
221 | static __inline__ int ext2_test_bit(int nr, const volatile void * addr) | 225 | static __inline__ int ext2_test_bit(int nr, const volatile void * addr) |
222 | { | 226 | { |
223 | char retval; | 227 | char retval; |
224 | 228 | ||
225 | #ifdef CONFIG_COLDFIRE | 229 | #ifdef CONFIG_COLDFIRE |
226 | __asm__ __volatile__ ("lea %1,%%a0; btst %2,(%%a0); sne %0" | 230 | __asm__ __volatile__ ("lea %1,%%a0; btst %2,(%%a0); sne %0" |
227 | : "=d" (retval) | 231 | : "=d" (retval) |
228 | : "m" (((const volatile char *)addr)[nr >> 3]), "d" (nr) | 232 | : "m" (((const volatile char *)addr)[nr >> 3]), "d" (nr) |
229 | : "%a0"); | 233 | : "%a0"); |
230 | #else | 234 | #else |
231 | __asm__ __volatile__ ("btst %2,%1; sne %0" | 235 | __asm__ __volatile__ ("btst %2,%1; sne %0" |
232 | : "=d" (retval) | 236 | : "=d" (retval) |
233 | : "m" (((const volatile char *)addr)[nr >> 3]), "di" (nr) | 237 | : "m" (((const volatile char *)addr)[nr >> 3]), "di" (nr) |
234 | /* No clobber */); | 238 | /* No clobber */); |
235 | #endif | 239 | #endif |
236 | 240 | ||
237 | return retval; | 241 | return retval; |
238 | } | 242 | } |
239 | 243 | ||
240 | #define ext2_find_first_zero_bit(addr, size) \ | 244 | #define ext2_find_first_zero_bit(addr, size) \ |
241 | ext2_find_next_zero_bit((addr), (size), 0) | 245 | ext2_find_next_zero_bit((addr), (size), 0) |
242 | 246 | ||
243 | static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) | 247 | static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) |
244 | { | 248 | { |
245 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | 249 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); |
246 | unsigned long result = offset & ~31UL; | 250 | unsigned long result = offset & ~31UL; |
247 | unsigned long tmp; | 251 | unsigned long tmp; |
248 | 252 | ||
249 | if (offset >= size) | 253 | if (offset >= size) |
250 | return size; | 254 | return size; |
251 | size -= result; | 255 | size -= result; |
252 | offset &= 31UL; | 256 | offset &= 31UL; |
253 | if(offset) { | 257 | if(offset) { |
254 | /* We hold the little endian value in tmp, but then the | 258 | /* We hold the little endian value in tmp, but then the |
255 | * shift is illegal. So we could keep a big endian value | 259 | * shift is illegal. So we could keep a big endian value |
256 | * in tmp, like this: | 260 | * in tmp, like this: |
257 | * | 261 | * |
258 | * tmp = __swab32(*(p++)); | 262 | * tmp = __swab32(*(p++)); |
259 | * tmp |= ~0UL >> (32-offset); | 263 | * tmp |= ~0UL >> (32-offset); |
260 | * | 264 | * |
261 | * but this would decrease preformance, so we change the | 265 | * but this would decrease preformance, so we change the |
262 | * shift: | 266 | * shift: |
263 | */ | 267 | */ |
264 | tmp = *(p++); | 268 | tmp = *(p++); |
265 | tmp |= __swab32(~0UL >> (32-offset)); | 269 | tmp |= __swab32(~0UL >> (32-offset)); |
266 | if(size < 32) | 270 | if(size < 32) |
267 | goto found_first; | 271 | goto found_first; |
268 | if(~tmp) | 272 | if(~tmp) |
269 | goto found_middle; | 273 | goto found_middle; |
270 | size -= 32; | 274 | size -= 32; |
271 | result += 32; | 275 | result += 32; |
272 | } | 276 | } |
273 | while(size & ~31UL) { | 277 | while(size & ~31UL) { |
274 | if(~(tmp = *(p++))) | 278 | if(~(tmp = *(p++))) |
275 | goto found_middle; | 279 | goto found_middle; |
276 | result += 32; | 280 | result += 32; |
277 | size -= 32; | 281 | size -= 32; |
278 | } | 282 | } |
279 | if(!size) | 283 | if(!size) |
280 | return result; | 284 | return result; |
281 | tmp = *p; | 285 | tmp = *p; |
282 | 286 | ||
283 | found_first: | 287 | found_first: |
284 | /* tmp is little endian, so we would have to swab the shift, | 288 | /* tmp is little endian, so we would have to swab the shift, |
285 | * see above. But then we have to swab tmp below for ffz, so | 289 | * see above. But then we have to swab tmp below for ffz, so |
286 | * we might as well do this here. | 290 | * we might as well do this here. |
287 | */ | 291 | */ |
288 | return result + ffz(__swab32(tmp) | (~0UL << size)); | 292 | return result + ffz(__swab32(tmp) | (~0UL << size)); |
289 | found_middle: | 293 | found_middle: |
290 | return result + ffz(__swab32(tmp)); | 294 | return result + ffz(__swab32(tmp)); |
291 | } | 295 | } |
292 | 296 | ||
293 | #include <asm-generic/bitops/minix.h> | 297 | #include <asm-generic/bitops/minix.h> |
294 | 298 | ||
295 | #endif /* __KERNEL__ */ | 299 | #endif /* __KERNEL__ */ |
296 | 300 | ||
297 | #include <asm-generic/bitops/fls.h> | 301 | #include <asm-generic/bitops/fls.h> |
298 | #include <asm-generic/bitops/fls64.h> | 302 | #include <asm-generic/bitops/fls64.h> |
299 | 303 | ||
300 | #endif /* _M68KNOMMU_BITOPS_H */ | 304 | #endif /* _M68KNOMMU_BITOPS_H */ |
301 | 305 |
include/asm-mips/bitops.h
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org) | 6 | * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org) |
7 | * Copyright (c) 1999, 2000 Silicon Graphics, Inc. | 7 | * Copyright (c) 1999, 2000 Silicon Graphics, Inc. |
8 | */ | 8 | */ |
9 | #ifndef _ASM_BITOPS_H | 9 | #ifndef _ASM_BITOPS_H |
10 | #define _ASM_BITOPS_H | 10 | #define _ASM_BITOPS_H |
11 | 11 | ||
12 | #ifndef _LINUX_BITOPS_H | ||
13 | #error only <linux/bitops.h> can be included directly | ||
14 | #endif | ||
15 | |||
12 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
13 | #include <linux/irqflags.h> | 17 | #include <linux/irqflags.h> |
14 | #include <linux/types.h> | 18 | #include <linux/types.h> |
15 | #include <asm/barrier.h> | 19 | #include <asm/barrier.h> |
16 | #include <asm/bug.h> | 20 | #include <asm/bug.h> |
17 | #include <asm/byteorder.h> /* sigh ... */ | 21 | #include <asm/byteorder.h> /* sigh ... */ |
18 | #include <asm/cpu-features.h> | 22 | #include <asm/cpu-features.h> |
19 | #include <asm/sgidefs.h> | 23 | #include <asm/sgidefs.h> |
20 | #include <asm/war.h> | 24 | #include <asm/war.h> |
21 | 25 | ||
22 | #if _MIPS_SZLONG == 32 | 26 | #if _MIPS_SZLONG == 32 |
23 | #define SZLONG_LOG 5 | 27 | #define SZLONG_LOG 5 |
24 | #define SZLONG_MASK 31UL | 28 | #define SZLONG_MASK 31UL |
25 | #define __LL "ll " | 29 | #define __LL "ll " |
26 | #define __SC "sc " | 30 | #define __SC "sc " |
27 | #define __INS "ins " | 31 | #define __INS "ins " |
28 | #define __EXT "ext " | 32 | #define __EXT "ext " |
29 | #elif _MIPS_SZLONG == 64 | 33 | #elif _MIPS_SZLONG == 64 |
30 | #define SZLONG_LOG 6 | 34 | #define SZLONG_LOG 6 |
31 | #define SZLONG_MASK 63UL | 35 | #define SZLONG_MASK 63UL |
32 | #define __LL "lld " | 36 | #define __LL "lld " |
33 | #define __SC "scd " | 37 | #define __SC "scd " |
34 | #define __INS "dins " | 38 | #define __INS "dins " |
35 | #define __EXT "dext " | 39 | #define __EXT "dext " |
36 | #endif | 40 | #endif |
37 | 41 | ||
38 | /* | 42 | /* |
39 | * clear_bit() doesn't provide any barrier for the compiler. | 43 | * clear_bit() doesn't provide any barrier for the compiler. |
40 | */ | 44 | */ |
41 | #define smp_mb__before_clear_bit() smp_llsc_mb() | 45 | #define smp_mb__before_clear_bit() smp_llsc_mb() |
42 | #define smp_mb__after_clear_bit() smp_llsc_mb() | 46 | #define smp_mb__after_clear_bit() smp_llsc_mb() |
43 | 47 | ||
44 | /* | 48 | /* |
45 | * set_bit - Atomically set a bit in memory | 49 | * set_bit - Atomically set a bit in memory |
46 | * @nr: the bit to set | 50 | * @nr: the bit to set |
47 | * @addr: the address to start counting from | 51 | * @addr: the address to start counting from |
48 | * | 52 | * |
49 | * This function is atomic and may not be reordered. See __set_bit() | 53 | * This function is atomic and may not be reordered. See __set_bit() |
50 | * if you do not require the atomic guarantees. | 54 | * if you do not require the atomic guarantees. |
51 | * Note that @nr may be almost arbitrarily large; this function is not | 55 | * Note that @nr may be almost arbitrarily large; this function is not |
52 | * restricted to acting on a single-word quantity. | 56 | * restricted to acting on a single-word quantity. |
53 | */ | 57 | */ |
54 | static inline void set_bit(unsigned long nr, volatile unsigned long *addr) | 58 | static inline void set_bit(unsigned long nr, volatile unsigned long *addr) |
55 | { | 59 | { |
56 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 60 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
57 | unsigned short bit = nr & SZLONG_MASK; | 61 | unsigned short bit = nr & SZLONG_MASK; |
58 | unsigned long temp; | 62 | unsigned long temp; |
59 | 63 | ||
60 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 64 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
61 | __asm__ __volatile__( | 65 | __asm__ __volatile__( |
62 | " .set mips3 \n" | 66 | " .set mips3 \n" |
63 | "1: " __LL "%0, %1 # set_bit \n" | 67 | "1: " __LL "%0, %1 # set_bit \n" |
64 | " or %0, %2 \n" | 68 | " or %0, %2 \n" |
65 | " " __SC "%0, %1 \n" | 69 | " " __SC "%0, %1 \n" |
66 | " beqzl %0, 1b \n" | 70 | " beqzl %0, 1b \n" |
67 | " .set mips0 \n" | 71 | " .set mips0 \n" |
68 | : "=&r" (temp), "=m" (*m) | 72 | : "=&r" (temp), "=m" (*m) |
69 | : "ir" (1UL << bit), "m" (*m)); | 73 | : "ir" (1UL << bit), "m" (*m)); |
70 | #ifdef CONFIG_CPU_MIPSR2 | 74 | #ifdef CONFIG_CPU_MIPSR2 |
71 | } else if (__builtin_constant_p(bit)) { | 75 | } else if (__builtin_constant_p(bit)) { |
72 | __asm__ __volatile__( | 76 | __asm__ __volatile__( |
73 | "1: " __LL "%0, %1 # set_bit \n" | 77 | "1: " __LL "%0, %1 # set_bit \n" |
74 | " " __INS "%0, %4, %2, 1 \n" | 78 | " " __INS "%0, %4, %2, 1 \n" |
75 | " " __SC "%0, %1 \n" | 79 | " " __SC "%0, %1 \n" |
76 | " beqz %0, 2f \n" | 80 | " beqz %0, 2f \n" |
77 | " .subsection 2 \n" | 81 | " .subsection 2 \n" |
78 | "2: b 1b \n" | 82 | "2: b 1b \n" |
79 | " .previous \n" | 83 | " .previous \n" |
80 | : "=&r" (temp), "=m" (*m) | 84 | : "=&r" (temp), "=m" (*m) |
81 | : "ir" (bit), "m" (*m), "r" (~0)); | 85 | : "ir" (bit), "m" (*m), "r" (~0)); |
82 | #endif /* CONFIG_CPU_MIPSR2 */ | 86 | #endif /* CONFIG_CPU_MIPSR2 */ |
83 | } else if (cpu_has_llsc) { | 87 | } else if (cpu_has_llsc) { |
84 | __asm__ __volatile__( | 88 | __asm__ __volatile__( |
85 | " .set mips3 \n" | 89 | " .set mips3 \n" |
86 | "1: " __LL "%0, %1 # set_bit \n" | 90 | "1: " __LL "%0, %1 # set_bit \n" |
87 | " or %0, %2 \n" | 91 | " or %0, %2 \n" |
88 | " " __SC "%0, %1 \n" | 92 | " " __SC "%0, %1 \n" |
89 | " beqz %0, 2f \n" | 93 | " beqz %0, 2f \n" |
90 | " .subsection 2 \n" | 94 | " .subsection 2 \n" |
91 | "2: b 1b \n" | 95 | "2: b 1b \n" |
92 | " .previous \n" | 96 | " .previous \n" |
93 | " .set mips0 \n" | 97 | " .set mips0 \n" |
94 | : "=&r" (temp), "=m" (*m) | 98 | : "=&r" (temp), "=m" (*m) |
95 | : "ir" (1UL << bit), "m" (*m)); | 99 | : "ir" (1UL << bit), "m" (*m)); |
96 | } else { | 100 | } else { |
97 | volatile unsigned long *a = addr; | 101 | volatile unsigned long *a = addr; |
98 | unsigned long mask; | 102 | unsigned long mask; |
99 | unsigned long flags; | 103 | unsigned long flags; |
100 | 104 | ||
101 | a += nr >> SZLONG_LOG; | 105 | a += nr >> SZLONG_LOG; |
102 | mask = 1UL << bit; | 106 | mask = 1UL << bit; |
103 | raw_local_irq_save(flags); | 107 | raw_local_irq_save(flags); |
104 | *a |= mask; | 108 | *a |= mask; |
105 | raw_local_irq_restore(flags); | 109 | raw_local_irq_restore(flags); |
106 | } | 110 | } |
107 | } | 111 | } |
108 | 112 | ||
109 | /* | 113 | /* |
110 | * clear_bit - Clears a bit in memory | 114 | * clear_bit - Clears a bit in memory |
111 | * @nr: Bit to clear | 115 | * @nr: Bit to clear |
112 | * @addr: Address to start counting from | 116 | * @addr: Address to start counting from |
113 | * | 117 | * |
114 | * clear_bit() is atomic and may not be reordered. However, it does | 118 | * clear_bit() is atomic and may not be reordered. However, it does |
115 | * not contain a memory barrier, so if it is used for locking purposes, | 119 | * not contain a memory barrier, so if it is used for locking purposes, |
116 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | 120 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
117 | * in order to ensure changes are visible on other processors. | 121 | * in order to ensure changes are visible on other processors. |
118 | */ | 122 | */ |
119 | static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) | 123 | static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) |
120 | { | 124 | { |
121 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 125 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
122 | unsigned short bit = nr & SZLONG_MASK; | 126 | unsigned short bit = nr & SZLONG_MASK; |
123 | unsigned long temp; | 127 | unsigned long temp; |
124 | 128 | ||
125 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 129 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
126 | __asm__ __volatile__( | 130 | __asm__ __volatile__( |
127 | " .set mips3 \n" | 131 | " .set mips3 \n" |
128 | "1: " __LL "%0, %1 # clear_bit \n" | 132 | "1: " __LL "%0, %1 # clear_bit \n" |
129 | " and %0, %2 \n" | 133 | " and %0, %2 \n" |
130 | " " __SC "%0, %1 \n" | 134 | " " __SC "%0, %1 \n" |
131 | " beqzl %0, 1b \n" | 135 | " beqzl %0, 1b \n" |
132 | " .set mips0 \n" | 136 | " .set mips0 \n" |
133 | : "=&r" (temp), "=m" (*m) | 137 | : "=&r" (temp), "=m" (*m) |
134 | : "ir" (~(1UL << bit)), "m" (*m)); | 138 | : "ir" (~(1UL << bit)), "m" (*m)); |
135 | #ifdef CONFIG_CPU_MIPSR2 | 139 | #ifdef CONFIG_CPU_MIPSR2 |
136 | } else if (__builtin_constant_p(bit)) { | 140 | } else if (__builtin_constant_p(bit)) { |
137 | __asm__ __volatile__( | 141 | __asm__ __volatile__( |
138 | "1: " __LL "%0, %1 # clear_bit \n" | 142 | "1: " __LL "%0, %1 # clear_bit \n" |
139 | " " __INS "%0, $0, %2, 1 \n" | 143 | " " __INS "%0, $0, %2, 1 \n" |
140 | " " __SC "%0, %1 \n" | 144 | " " __SC "%0, %1 \n" |
141 | " beqz %0, 2f \n" | 145 | " beqz %0, 2f \n" |
142 | " .subsection 2 \n" | 146 | " .subsection 2 \n" |
143 | "2: b 1b \n" | 147 | "2: b 1b \n" |
144 | " .previous \n" | 148 | " .previous \n" |
145 | : "=&r" (temp), "=m" (*m) | 149 | : "=&r" (temp), "=m" (*m) |
146 | : "ir" (bit), "m" (*m)); | 150 | : "ir" (bit), "m" (*m)); |
147 | #endif /* CONFIG_CPU_MIPSR2 */ | 151 | #endif /* CONFIG_CPU_MIPSR2 */ |
148 | } else if (cpu_has_llsc) { | 152 | } else if (cpu_has_llsc) { |
149 | __asm__ __volatile__( | 153 | __asm__ __volatile__( |
150 | " .set mips3 \n" | 154 | " .set mips3 \n" |
151 | "1: " __LL "%0, %1 # clear_bit \n" | 155 | "1: " __LL "%0, %1 # clear_bit \n" |
152 | " and %0, %2 \n" | 156 | " and %0, %2 \n" |
153 | " " __SC "%0, %1 \n" | 157 | " " __SC "%0, %1 \n" |
154 | " beqz %0, 2f \n" | 158 | " beqz %0, 2f \n" |
155 | " .subsection 2 \n" | 159 | " .subsection 2 \n" |
156 | "2: b 1b \n" | 160 | "2: b 1b \n" |
157 | " .previous \n" | 161 | " .previous \n" |
158 | " .set mips0 \n" | 162 | " .set mips0 \n" |
159 | : "=&r" (temp), "=m" (*m) | 163 | : "=&r" (temp), "=m" (*m) |
160 | : "ir" (~(1UL << bit)), "m" (*m)); | 164 | : "ir" (~(1UL << bit)), "m" (*m)); |
161 | } else { | 165 | } else { |
162 | volatile unsigned long *a = addr; | 166 | volatile unsigned long *a = addr; |
163 | unsigned long mask; | 167 | unsigned long mask; |
164 | unsigned long flags; | 168 | unsigned long flags; |
165 | 169 | ||
166 | a += nr >> SZLONG_LOG; | 170 | a += nr >> SZLONG_LOG; |
167 | mask = 1UL << bit; | 171 | mask = 1UL << bit; |
168 | raw_local_irq_save(flags); | 172 | raw_local_irq_save(flags); |
169 | *a &= ~mask; | 173 | *a &= ~mask; |
170 | raw_local_irq_restore(flags); | 174 | raw_local_irq_restore(flags); |
171 | } | 175 | } |
172 | } | 176 | } |
173 | 177 | ||
174 | /* | 178 | /* |
175 | * clear_bit_unlock - Clears a bit in memory | 179 | * clear_bit_unlock - Clears a bit in memory |
176 | * @nr: Bit to clear | 180 | * @nr: Bit to clear |
177 | * @addr: Address to start counting from | 181 | * @addr: Address to start counting from |
178 | * | 182 | * |
179 | * clear_bit() is atomic and implies release semantics before the memory | 183 | * clear_bit() is atomic and implies release semantics before the memory |
180 | * operation. It can be used for an unlock. | 184 | * operation. It can be used for an unlock. |
181 | */ | 185 | */ |
182 | static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) | 186 | static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) |
183 | { | 187 | { |
184 | smp_mb__before_clear_bit(); | 188 | smp_mb__before_clear_bit(); |
185 | clear_bit(nr, addr); | 189 | clear_bit(nr, addr); |
186 | } | 190 | } |
187 | 191 | ||
188 | /* | 192 | /* |
189 | * change_bit - Toggle a bit in memory | 193 | * change_bit - Toggle a bit in memory |
190 | * @nr: Bit to change | 194 | * @nr: Bit to change |
191 | * @addr: Address to start counting from | 195 | * @addr: Address to start counting from |
192 | * | 196 | * |
193 | * change_bit() is atomic and may not be reordered. | 197 | * change_bit() is atomic and may not be reordered. |
194 | * Note that @nr may be almost arbitrarily large; this function is not | 198 | * Note that @nr may be almost arbitrarily large; this function is not |
195 | * restricted to acting on a single-word quantity. | 199 | * restricted to acting on a single-word quantity. |
196 | */ | 200 | */ |
197 | static inline void change_bit(unsigned long nr, volatile unsigned long *addr) | 201 | static inline void change_bit(unsigned long nr, volatile unsigned long *addr) |
198 | { | 202 | { |
199 | unsigned short bit = nr & SZLONG_MASK; | 203 | unsigned short bit = nr & SZLONG_MASK; |
200 | 204 | ||
201 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 205 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
202 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 206 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
203 | unsigned long temp; | 207 | unsigned long temp; |
204 | 208 | ||
205 | __asm__ __volatile__( | 209 | __asm__ __volatile__( |
206 | " .set mips3 \n" | 210 | " .set mips3 \n" |
207 | "1: " __LL "%0, %1 # change_bit \n" | 211 | "1: " __LL "%0, %1 # change_bit \n" |
208 | " xor %0, %2 \n" | 212 | " xor %0, %2 \n" |
209 | " " __SC "%0, %1 \n" | 213 | " " __SC "%0, %1 \n" |
210 | " beqzl %0, 1b \n" | 214 | " beqzl %0, 1b \n" |
211 | " .set mips0 \n" | 215 | " .set mips0 \n" |
212 | : "=&r" (temp), "=m" (*m) | 216 | : "=&r" (temp), "=m" (*m) |
213 | : "ir" (1UL << bit), "m" (*m)); | 217 | : "ir" (1UL << bit), "m" (*m)); |
214 | } else if (cpu_has_llsc) { | 218 | } else if (cpu_has_llsc) { |
215 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 219 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
216 | unsigned long temp; | 220 | unsigned long temp; |
217 | 221 | ||
218 | __asm__ __volatile__( | 222 | __asm__ __volatile__( |
219 | " .set mips3 \n" | 223 | " .set mips3 \n" |
220 | "1: " __LL "%0, %1 # change_bit \n" | 224 | "1: " __LL "%0, %1 # change_bit \n" |
221 | " xor %0, %2 \n" | 225 | " xor %0, %2 \n" |
222 | " " __SC "%0, %1 \n" | 226 | " " __SC "%0, %1 \n" |
223 | " beqz %0, 2f \n" | 227 | " beqz %0, 2f \n" |
224 | " .subsection 2 \n" | 228 | " .subsection 2 \n" |
225 | "2: b 1b \n" | 229 | "2: b 1b \n" |
226 | " .previous \n" | 230 | " .previous \n" |
227 | " .set mips0 \n" | 231 | " .set mips0 \n" |
228 | : "=&r" (temp), "=m" (*m) | 232 | : "=&r" (temp), "=m" (*m) |
229 | : "ir" (1UL << bit), "m" (*m)); | 233 | : "ir" (1UL << bit), "m" (*m)); |
230 | } else { | 234 | } else { |
231 | volatile unsigned long *a = addr; | 235 | volatile unsigned long *a = addr; |
232 | unsigned long mask; | 236 | unsigned long mask; |
233 | unsigned long flags; | 237 | unsigned long flags; |
234 | 238 | ||
235 | a += nr >> SZLONG_LOG; | 239 | a += nr >> SZLONG_LOG; |
236 | mask = 1UL << bit; | 240 | mask = 1UL << bit; |
237 | raw_local_irq_save(flags); | 241 | raw_local_irq_save(flags); |
238 | *a ^= mask; | 242 | *a ^= mask; |
239 | raw_local_irq_restore(flags); | 243 | raw_local_irq_restore(flags); |
240 | } | 244 | } |
241 | } | 245 | } |
242 | 246 | ||
243 | /* | 247 | /* |
244 | * test_and_set_bit - Set a bit and return its old value | 248 | * test_and_set_bit - Set a bit and return its old value |
245 | * @nr: Bit to set | 249 | * @nr: Bit to set |
246 | * @addr: Address to count from | 250 | * @addr: Address to count from |
247 | * | 251 | * |
248 | * This operation is atomic and cannot be reordered. | 252 | * This operation is atomic and cannot be reordered. |
249 | * It also implies a memory barrier. | 253 | * It also implies a memory barrier. |
250 | */ | 254 | */ |
251 | static inline int test_and_set_bit(unsigned long nr, | 255 | static inline int test_and_set_bit(unsigned long nr, |
252 | volatile unsigned long *addr) | 256 | volatile unsigned long *addr) |
253 | { | 257 | { |
254 | unsigned short bit = nr & SZLONG_MASK; | 258 | unsigned short bit = nr & SZLONG_MASK; |
255 | unsigned long res; | 259 | unsigned long res; |
256 | 260 | ||
257 | smp_llsc_mb(); | 261 | smp_llsc_mb(); |
258 | 262 | ||
259 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 263 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
260 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 264 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
261 | unsigned long temp; | 265 | unsigned long temp; |
262 | 266 | ||
263 | __asm__ __volatile__( | 267 | __asm__ __volatile__( |
264 | " .set mips3 \n" | 268 | " .set mips3 \n" |
265 | "1: " __LL "%0, %1 # test_and_set_bit \n" | 269 | "1: " __LL "%0, %1 # test_and_set_bit \n" |
266 | " or %2, %0, %3 \n" | 270 | " or %2, %0, %3 \n" |
267 | " " __SC "%2, %1 \n" | 271 | " " __SC "%2, %1 \n" |
268 | " beqzl %2, 1b \n" | 272 | " beqzl %2, 1b \n" |
269 | " and %2, %0, %3 \n" | 273 | " and %2, %0, %3 \n" |
270 | " .set mips0 \n" | 274 | " .set mips0 \n" |
271 | : "=&r" (temp), "=m" (*m), "=&r" (res) | 275 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
272 | : "r" (1UL << bit), "m" (*m) | 276 | : "r" (1UL << bit), "m" (*m) |
273 | : "memory"); | 277 | : "memory"); |
274 | } else if (cpu_has_llsc) { | 278 | } else if (cpu_has_llsc) { |
275 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 279 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
276 | unsigned long temp; | 280 | unsigned long temp; |
277 | 281 | ||
278 | __asm__ __volatile__( | 282 | __asm__ __volatile__( |
279 | " .set push \n" | 283 | " .set push \n" |
280 | " .set noreorder \n" | 284 | " .set noreorder \n" |
281 | " .set mips3 \n" | 285 | " .set mips3 \n" |
282 | "1: " __LL "%0, %1 # test_and_set_bit \n" | 286 | "1: " __LL "%0, %1 # test_and_set_bit \n" |
283 | " or %2, %0, %3 \n" | 287 | " or %2, %0, %3 \n" |
284 | " " __SC "%2, %1 \n" | 288 | " " __SC "%2, %1 \n" |
285 | " beqz %2, 2f \n" | 289 | " beqz %2, 2f \n" |
286 | " and %2, %0, %3 \n" | 290 | " and %2, %0, %3 \n" |
287 | " .subsection 2 \n" | 291 | " .subsection 2 \n" |
288 | "2: b 1b \n" | 292 | "2: b 1b \n" |
289 | " nop \n" | 293 | " nop \n" |
290 | " .previous \n" | 294 | " .previous \n" |
291 | " .set pop \n" | 295 | " .set pop \n" |
292 | : "=&r" (temp), "=m" (*m), "=&r" (res) | 296 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
293 | : "r" (1UL << bit), "m" (*m) | 297 | : "r" (1UL << bit), "m" (*m) |
294 | : "memory"); | 298 | : "memory"); |
295 | } else { | 299 | } else { |
296 | volatile unsigned long *a = addr; | 300 | volatile unsigned long *a = addr; |
297 | unsigned long mask; | 301 | unsigned long mask; |
298 | unsigned long flags; | 302 | unsigned long flags; |
299 | 303 | ||
300 | a += nr >> SZLONG_LOG; | 304 | a += nr >> SZLONG_LOG; |
301 | mask = 1UL << bit; | 305 | mask = 1UL << bit; |
302 | raw_local_irq_save(flags); | 306 | raw_local_irq_save(flags); |
303 | res = (mask & *a); | 307 | res = (mask & *a); |
304 | *a |= mask; | 308 | *a |= mask; |
305 | raw_local_irq_restore(flags); | 309 | raw_local_irq_restore(flags); |
306 | } | 310 | } |
307 | 311 | ||
308 | smp_llsc_mb(); | 312 | smp_llsc_mb(); |
309 | 313 | ||
310 | return res != 0; | 314 | return res != 0; |
311 | } | 315 | } |
312 | 316 | ||
313 | /* | 317 | /* |
314 | * test_and_set_bit_lock - Set a bit and return its old value | 318 | * test_and_set_bit_lock - Set a bit and return its old value |
315 | * @nr: Bit to set | 319 | * @nr: Bit to set |
316 | * @addr: Address to count from | 320 | * @addr: Address to count from |
317 | * | 321 | * |
318 | * This operation is atomic and implies acquire ordering semantics | 322 | * This operation is atomic and implies acquire ordering semantics |
319 | * after the memory operation. | 323 | * after the memory operation. |
320 | */ | 324 | */ |
321 | static inline int test_and_set_bit_lock(unsigned long nr, | 325 | static inline int test_and_set_bit_lock(unsigned long nr, |
322 | volatile unsigned long *addr) | 326 | volatile unsigned long *addr) |
323 | { | 327 | { |
324 | unsigned short bit = nr & SZLONG_MASK; | 328 | unsigned short bit = nr & SZLONG_MASK; |
325 | unsigned long res; | 329 | unsigned long res; |
326 | 330 | ||
327 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 331 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
328 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 332 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
329 | unsigned long temp; | 333 | unsigned long temp; |
330 | 334 | ||
331 | __asm__ __volatile__( | 335 | __asm__ __volatile__( |
332 | " .set mips3 \n" | 336 | " .set mips3 \n" |
333 | "1: " __LL "%0, %1 # test_and_set_bit \n" | 337 | "1: " __LL "%0, %1 # test_and_set_bit \n" |
334 | " or %2, %0, %3 \n" | 338 | " or %2, %0, %3 \n" |
335 | " " __SC "%2, %1 \n" | 339 | " " __SC "%2, %1 \n" |
336 | " beqzl %2, 1b \n" | 340 | " beqzl %2, 1b \n" |
337 | " and %2, %0, %3 \n" | 341 | " and %2, %0, %3 \n" |
338 | " .set mips0 \n" | 342 | " .set mips0 \n" |
339 | : "=&r" (temp), "=m" (*m), "=&r" (res) | 343 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
340 | : "r" (1UL << bit), "m" (*m) | 344 | : "r" (1UL << bit), "m" (*m) |
341 | : "memory"); | 345 | : "memory"); |
342 | } else if (cpu_has_llsc) { | 346 | } else if (cpu_has_llsc) { |
343 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 347 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
344 | unsigned long temp; | 348 | unsigned long temp; |
345 | 349 | ||
346 | __asm__ __volatile__( | 350 | __asm__ __volatile__( |
347 | " .set push \n" | 351 | " .set push \n" |
348 | " .set noreorder \n" | 352 | " .set noreorder \n" |
349 | " .set mips3 \n" | 353 | " .set mips3 \n" |
350 | "1: " __LL "%0, %1 # test_and_set_bit \n" | 354 | "1: " __LL "%0, %1 # test_and_set_bit \n" |
351 | " or %2, %0, %3 \n" | 355 | " or %2, %0, %3 \n" |
352 | " " __SC "%2, %1 \n" | 356 | " " __SC "%2, %1 \n" |
353 | " beqz %2, 2f \n" | 357 | " beqz %2, 2f \n" |
354 | " and %2, %0, %3 \n" | 358 | " and %2, %0, %3 \n" |
355 | " .subsection 2 \n" | 359 | " .subsection 2 \n" |
356 | "2: b 1b \n" | 360 | "2: b 1b \n" |
357 | " nop \n" | 361 | " nop \n" |
358 | " .previous \n" | 362 | " .previous \n" |
359 | " .set pop \n" | 363 | " .set pop \n" |
360 | : "=&r" (temp), "=m" (*m), "=&r" (res) | 364 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
361 | : "r" (1UL << bit), "m" (*m) | 365 | : "r" (1UL << bit), "m" (*m) |
362 | : "memory"); | 366 | : "memory"); |
363 | } else { | 367 | } else { |
364 | volatile unsigned long *a = addr; | 368 | volatile unsigned long *a = addr; |
365 | unsigned long mask; | 369 | unsigned long mask; |
366 | unsigned long flags; | 370 | unsigned long flags; |
367 | 371 | ||
368 | a += nr >> SZLONG_LOG; | 372 | a += nr >> SZLONG_LOG; |
369 | mask = 1UL << bit; | 373 | mask = 1UL << bit; |
370 | raw_local_irq_save(flags); | 374 | raw_local_irq_save(flags); |
371 | res = (mask & *a); | 375 | res = (mask & *a); |
372 | *a |= mask; | 376 | *a |= mask; |
373 | raw_local_irq_restore(flags); | 377 | raw_local_irq_restore(flags); |
374 | } | 378 | } |
375 | 379 | ||
376 | smp_llsc_mb(); | 380 | smp_llsc_mb(); |
377 | 381 | ||
378 | return res != 0; | 382 | return res != 0; |
379 | } | 383 | } |
380 | /* | 384 | /* |
381 | * test_and_clear_bit - Clear a bit and return its old value | 385 | * test_and_clear_bit - Clear a bit and return its old value |
382 | * @nr: Bit to clear | 386 | * @nr: Bit to clear |
383 | * @addr: Address to count from | 387 | * @addr: Address to count from |
384 | * | 388 | * |
385 | * This operation is atomic and cannot be reordered. | 389 | * This operation is atomic and cannot be reordered. |
386 | * It also implies a memory barrier. | 390 | * It also implies a memory barrier. |
387 | */ | 391 | */ |
388 | static inline int test_and_clear_bit(unsigned long nr, | 392 | static inline int test_and_clear_bit(unsigned long nr, |
389 | volatile unsigned long *addr) | 393 | volatile unsigned long *addr) |
390 | { | 394 | { |
391 | unsigned short bit = nr & SZLONG_MASK; | 395 | unsigned short bit = nr & SZLONG_MASK; |
392 | unsigned long res; | 396 | unsigned long res; |
393 | 397 | ||
394 | smp_llsc_mb(); | 398 | smp_llsc_mb(); |
395 | 399 | ||
396 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 400 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
397 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 401 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
398 | unsigned long temp; | 402 | unsigned long temp; |
399 | 403 | ||
400 | __asm__ __volatile__( | 404 | __asm__ __volatile__( |
401 | " .set mips3 \n" | 405 | " .set mips3 \n" |
402 | "1: " __LL "%0, %1 # test_and_clear_bit \n" | 406 | "1: " __LL "%0, %1 # test_and_clear_bit \n" |
403 | " or %2, %0, %3 \n" | 407 | " or %2, %0, %3 \n" |
404 | " xor %2, %3 \n" | 408 | " xor %2, %3 \n" |
405 | " " __SC "%2, %1 \n" | 409 | " " __SC "%2, %1 \n" |
406 | " beqzl %2, 1b \n" | 410 | " beqzl %2, 1b \n" |
407 | " and %2, %0, %3 \n" | 411 | " and %2, %0, %3 \n" |
408 | " .set mips0 \n" | 412 | " .set mips0 \n" |
409 | : "=&r" (temp), "=m" (*m), "=&r" (res) | 413 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
410 | : "r" (1UL << bit), "m" (*m) | 414 | : "r" (1UL << bit), "m" (*m) |
411 | : "memory"); | 415 | : "memory"); |
412 | #ifdef CONFIG_CPU_MIPSR2 | 416 | #ifdef CONFIG_CPU_MIPSR2 |
413 | } else if (__builtin_constant_p(nr)) { | 417 | } else if (__builtin_constant_p(nr)) { |
414 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 418 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
415 | unsigned long temp; | 419 | unsigned long temp; |
416 | 420 | ||
417 | __asm__ __volatile__( | 421 | __asm__ __volatile__( |
418 | "1: " __LL "%0, %1 # test_and_clear_bit \n" | 422 | "1: " __LL "%0, %1 # test_and_clear_bit \n" |
419 | " " __EXT "%2, %0, %3, 1 \n" | 423 | " " __EXT "%2, %0, %3, 1 \n" |
420 | " " __INS "%0, $0, %3, 1 \n" | 424 | " " __INS "%0, $0, %3, 1 \n" |
421 | " " __SC "%0, %1 \n" | 425 | " " __SC "%0, %1 \n" |
422 | " beqz %0, 2f \n" | 426 | " beqz %0, 2f \n" |
423 | " .subsection 2 \n" | 427 | " .subsection 2 \n" |
424 | "2: b 1b \n" | 428 | "2: b 1b \n" |
425 | " .previous \n" | 429 | " .previous \n" |
426 | : "=&r" (temp), "=m" (*m), "=&r" (res) | 430 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
427 | : "ri" (bit), "m" (*m) | 431 | : "ri" (bit), "m" (*m) |
428 | : "memory"); | 432 | : "memory"); |
429 | #endif | 433 | #endif |
430 | } else if (cpu_has_llsc) { | 434 | } else if (cpu_has_llsc) { |
431 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 435 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
432 | unsigned long temp; | 436 | unsigned long temp; |
433 | 437 | ||
434 | __asm__ __volatile__( | 438 | __asm__ __volatile__( |
435 | " .set push \n" | 439 | " .set push \n" |
436 | " .set noreorder \n" | 440 | " .set noreorder \n" |
437 | " .set mips3 \n" | 441 | " .set mips3 \n" |
438 | "1: " __LL "%0, %1 # test_and_clear_bit \n" | 442 | "1: " __LL "%0, %1 # test_and_clear_bit \n" |
439 | " or %2, %0, %3 \n" | 443 | " or %2, %0, %3 \n" |
440 | " xor %2, %3 \n" | 444 | " xor %2, %3 \n" |
441 | " " __SC "%2, %1 \n" | 445 | " " __SC "%2, %1 \n" |
442 | " beqz %2, 2f \n" | 446 | " beqz %2, 2f \n" |
443 | " and %2, %0, %3 \n" | 447 | " and %2, %0, %3 \n" |
444 | " .subsection 2 \n" | 448 | " .subsection 2 \n" |
445 | "2: b 1b \n" | 449 | "2: b 1b \n" |
446 | " nop \n" | 450 | " nop \n" |
447 | " .previous \n" | 451 | " .previous \n" |
448 | " .set pop \n" | 452 | " .set pop \n" |
449 | : "=&r" (temp), "=m" (*m), "=&r" (res) | 453 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
450 | : "r" (1UL << bit), "m" (*m) | 454 | : "r" (1UL << bit), "m" (*m) |
451 | : "memory"); | 455 | : "memory"); |
452 | } else { | 456 | } else { |
453 | volatile unsigned long *a = addr; | 457 | volatile unsigned long *a = addr; |
454 | unsigned long mask; | 458 | unsigned long mask; |
455 | unsigned long flags; | 459 | unsigned long flags; |
456 | 460 | ||
457 | a += nr >> SZLONG_LOG; | 461 | a += nr >> SZLONG_LOG; |
458 | mask = 1UL << bit; | 462 | mask = 1UL << bit; |
459 | raw_local_irq_save(flags); | 463 | raw_local_irq_save(flags); |
460 | res = (mask & *a); | 464 | res = (mask & *a); |
461 | *a &= ~mask; | 465 | *a &= ~mask; |
462 | raw_local_irq_restore(flags); | 466 | raw_local_irq_restore(flags); |
463 | } | 467 | } |
464 | 468 | ||
465 | smp_llsc_mb(); | 469 | smp_llsc_mb(); |
466 | 470 | ||
467 | return res != 0; | 471 | return res != 0; |
468 | } | 472 | } |
469 | 473 | ||
470 | /* | 474 | /* |
471 | * test_and_change_bit - Change a bit and return its old value | 475 | * test_and_change_bit - Change a bit and return its old value |
472 | * @nr: Bit to change | 476 | * @nr: Bit to change |
473 | * @addr: Address to count from | 477 | * @addr: Address to count from |
474 | * | 478 | * |
475 | * This operation is atomic and cannot be reordered. | 479 | * This operation is atomic and cannot be reordered. |
476 | * It also implies a memory barrier. | 480 | * It also implies a memory barrier. |
477 | */ | 481 | */ |
478 | static inline int test_and_change_bit(unsigned long nr, | 482 | static inline int test_and_change_bit(unsigned long nr, |
479 | volatile unsigned long *addr) | 483 | volatile unsigned long *addr) |
480 | { | 484 | { |
481 | unsigned short bit = nr & SZLONG_MASK; | 485 | unsigned short bit = nr & SZLONG_MASK; |
482 | unsigned long res; | 486 | unsigned long res; |
483 | 487 | ||
484 | smp_llsc_mb(); | 488 | smp_llsc_mb(); |
485 | 489 | ||
486 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 490 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
487 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 491 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
488 | unsigned long temp; | 492 | unsigned long temp; |
489 | 493 | ||
490 | __asm__ __volatile__( | 494 | __asm__ __volatile__( |
491 | " .set mips3 \n" | 495 | " .set mips3 \n" |
492 | "1: " __LL "%0, %1 # test_and_change_bit \n" | 496 | "1: " __LL "%0, %1 # test_and_change_bit \n" |
493 | " xor %2, %0, %3 \n" | 497 | " xor %2, %0, %3 \n" |
494 | " " __SC "%2, %1 \n" | 498 | " " __SC "%2, %1 \n" |
495 | " beqzl %2, 1b \n" | 499 | " beqzl %2, 1b \n" |
496 | " and %2, %0, %3 \n" | 500 | " and %2, %0, %3 \n" |
497 | " .set mips0 \n" | 501 | " .set mips0 \n" |
498 | : "=&r" (temp), "=m" (*m), "=&r" (res) | 502 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
499 | : "r" (1UL << bit), "m" (*m) | 503 | : "r" (1UL << bit), "m" (*m) |
500 | : "memory"); | 504 | : "memory"); |
501 | } else if (cpu_has_llsc) { | 505 | } else if (cpu_has_llsc) { |
502 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 506 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
503 | unsigned long temp; | 507 | unsigned long temp; |
504 | 508 | ||
505 | __asm__ __volatile__( | 509 | __asm__ __volatile__( |
506 | " .set push \n" | 510 | " .set push \n" |
507 | " .set noreorder \n" | 511 | " .set noreorder \n" |
508 | " .set mips3 \n" | 512 | " .set mips3 \n" |
509 | "1: " __LL "%0, %1 # test_and_change_bit \n" | 513 | "1: " __LL "%0, %1 # test_and_change_bit \n" |
510 | " xor %2, %0, %3 \n" | 514 | " xor %2, %0, %3 \n" |
511 | " " __SC "\t%2, %1 \n" | 515 | " " __SC "\t%2, %1 \n" |
512 | " beqz %2, 2f \n" | 516 | " beqz %2, 2f \n" |
513 | " and %2, %0, %3 \n" | 517 | " and %2, %0, %3 \n" |
514 | " .subsection 2 \n" | 518 | " .subsection 2 \n" |
515 | "2: b 1b \n" | 519 | "2: b 1b \n" |
516 | " nop \n" | 520 | " nop \n" |
517 | " .previous \n" | 521 | " .previous \n" |
518 | " .set pop \n" | 522 | " .set pop \n" |
519 | : "=&r" (temp), "=m" (*m), "=&r" (res) | 523 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
520 | : "r" (1UL << bit), "m" (*m) | 524 | : "r" (1UL << bit), "m" (*m) |
521 | : "memory"); | 525 | : "memory"); |
522 | } else { | 526 | } else { |
523 | volatile unsigned long *a = addr; | 527 | volatile unsigned long *a = addr; |
524 | unsigned long mask; | 528 | unsigned long mask; |
525 | unsigned long flags; | 529 | unsigned long flags; |
526 | 530 | ||
527 | a += nr >> SZLONG_LOG; | 531 | a += nr >> SZLONG_LOG; |
528 | mask = 1UL << bit; | 532 | mask = 1UL << bit; |
529 | raw_local_irq_save(flags); | 533 | raw_local_irq_save(flags); |
530 | res = (mask & *a); | 534 | res = (mask & *a); |
531 | *a ^= mask; | 535 | *a ^= mask; |
532 | raw_local_irq_restore(flags); | 536 | raw_local_irq_restore(flags); |
533 | } | 537 | } |
534 | 538 | ||
535 | smp_llsc_mb(); | 539 | smp_llsc_mb(); |
536 | 540 | ||
537 | return res != 0; | 541 | return res != 0; |
538 | } | 542 | } |
539 | 543 | ||
540 | #include <asm-generic/bitops/non-atomic.h> | 544 | #include <asm-generic/bitops/non-atomic.h> |
541 | 545 | ||
542 | /* | 546 | /* |
543 | * __clear_bit_unlock - Clears a bit in memory | 547 | * __clear_bit_unlock - Clears a bit in memory |
544 | * @nr: Bit to clear | 548 | * @nr: Bit to clear |
545 | * @addr: Address to start counting from | 549 | * @addr: Address to start counting from |
546 | * | 550 | * |
547 | * __clear_bit() is non-atomic and implies release semantics before the memory | 551 | * __clear_bit() is non-atomic and implies release semantics before the memory |
548 | * operation. It can be used for an unlock if no other CPUs can concurrently | 552 | * operation. It can be used for an unlock if no other CPUs can concurrently |
549 | * modify other bits in the word. | 553 | * modify other bits in the word. |
550 | */ | 554 | */ |
551 | static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) | 555 | static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) |
552 | { | 556 | { |
553 | smp_mb(); | 557 | smp_mb(); |
554 | __clear_bit(nr, addr); | 558 | __clear_bit(nr, addr); |
555 | } | 559 | } |
556 | 560 | ||
557 | /* | 561 | /* |
558 | * Return the bit position (0..63) of the most significant 1 bit in a word | 562 | * Return the bit position (0..63) of the most significant 1 bit in a word |
559 | * Returns -1 if no 1 bit exists | 563 | * Returns -1 if no 1 bit exists |
560 | */ | 564 | */ |
561 | static inline int __ilog2(unsigned long x) | 565 | static inline int __ilog2(unsigned long x) |
562 | { | 566 | { |
563 | int lz; | 567 | int lz; |
564 | 568 | ||
565 | if (sizeof(x) == 4) { | 569 | if (sizeof(x) == 4) { |
566 | __asm__( | 570 | __asm__( |
567 | " .set push \n" | 571 | " .set push \n" |
568 | " .set mips32 \n" | 572 | " .set mips32 \n" |
569 | " clz %0, %1 \n" | 573 | " clz %0, %1 \n" |
570 | " .set pop \n" | 574 | " .set pop \n" |
571 | : "=r" (lz) | 575 | : "=r" (lz) |
572 | : "r" (x)); | 576 | : "r" (x)); |
573 | 577 | ||
574 | return 31 - lz; | 578 | return 31 - lz; |
575 | } | 579 | } |
576 | 580 | ||
577 | BUG_ON(sizeof(x) != 8); | 581 | BUG_ON(sizeof(x) != 8); |
578 | 582 | ||
579 | __asm__( | 583 | __asm__( |
580 | " .set push \n" | 584 | " .set push \n" |
581 | " .set mips64 \n" | 585 | " .set mips64 \n" |
582 | " dclz %0, %1 \n" | 586 | " dclz %0, %1 \n" |
583 | " .set pop \n" | 587 | " .set pop \n" |
584 | : "=r" (lz) | 588 | : "=r" (lz) |
585 | : "r" (x)); | 589 | : "r" (x)); |
586 | 590 | ||
587 | return 63 - lz; | 591 | return 63 - lz; |
588 | } | 592 | } |
589 | 593 | ||
590 | #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) | 594 | #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) |
591 | 595 | ||
592 | /* | 596 | /* |
593 | * __ffs - find first bit in word. | 597 | * __ffs - find first bit in word. |
594 | * @word: The word to search | 598 | * @word: The word to search |
595 | * | 599 | * |
596 | * Returns 0..SZLONG-1 | 600 | * Returns 0..SZLONG-1 |
597 | * Undefined if no bit exists, so code should check against 0 first. | 601 | * Undefined if no bit exists, so code should check against 0 first. |
598 | */ | 602 | */ |
599 | static inline unsigned long __ffs(unsigned long word) | 603 | static inline unsigned long __ffs(unsigned long word) |
600 | { | 604 | { |
601 | return __ilog2(word & -word); | 605 | return __ilog2(word & -word); |
602 | } | 606 | } |
603 | 607 | ||
604 | /* | 608 | /* |
605 | * fls - find last bit set. | 609 | * fls - find last bit set. |
606 | * @word: The word to search | 610 | * @word: The word to search |
607 | * | 611 | * |
608 | * This is defined the same way as ffs. | 612 | * This is defined the same way as ffs. |
609 | * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. | 613 | * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. |
610 | */ | 614 | */ |
611 | static inline int fls(int word) | 615 | static inline int fls(int word) |
612 | { | 616 | { |
613 | __asm__("clz %0, %1" : "=r" (word) : "r" (word)); | 617 | __asm__("clz %0, %1" : "=r" (word) : "r" (word)); |
614 | 618 | ||
615 | return 32 - word; | 619 | return 32 - word; |
616 | } | 620 | } |
617 | 621 | ||
618 | #if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPS64) | 622 | #if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPS64) |
619 | static inline int fls64(__u64 word) | 623 | static inline int fls64(__u64 word) |
620 | { | 624 | { |
621 | __asm__("dclz %0, %1" : "=r" (word) : "r" (word)); | 625 | __asm__("dclz %0, %1" : "=r" (word) : "r" (word)); |
622 | 626 | ||
623 | return 64 - word; | 627 | return 64 - word; |
624 | } | 628 | } |
625 | #else | 629 | #else |
626 | #include <asm-generic/bitops/fls64.h> | 630 | #include <asm-generic/bitops/fls64.h> |
627 | #endif | 631 | #endif |
628 | 632 | ||
629 | /* | 633 | /* |
630 | * ffs - find first bit set. | 634 | * ffs - find first bit set. |
631 | * @word: The word to search | 635 | * @word: The word to search |
632 | * | 636 | * |
633 | * This is defined the same way as | 637 | * This is defined the same way as |
634 | * the libc and compiler builtin ffs routines, therefore | 638 | * the libc and compiler builtin ffs routines, therefore |
635 | * differs in spirit from the above ffz (man ffs). | 639 | * differs in spirit from the above ffz (man ffs). |
636 | */ | 640 | */ |
637 | static inline int ffs(int word) | 641 | static inline int ffs(int word) |
638 | { | 642 | { |
639 | if (!word) | 643 | if (!word) |
640 | return 0; | 644 | return 0; |
641 | 645 | ||
642 | return fls(word & -word); | 646 | return fls(word & -word); |
643 | } | 647 | } |
644 | 648 | ||
645 | #else | 649 | #else |
646 | 650 | ||
647 | #include <asm-generic/bitops/__ffs.h> | 651 | #include <asm-generic/bitops/__ffs.h> |
648 | #include <asm-generic/bitops/ffs.h> | 652 | #include <asm-generic/bitops/ffs.h> |
649 | #include <asm-generic/bitops/fls.h> | 653 | #include <asm-generic/bitops/fls.h> |
650 | #include <asm-generic/bitops/fls64.h> | 654 | #include <asm-generic/bitops/fls64.h> |
651 | 655 | ||
652 | #endif /*defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) */ | 656 | #endif /*defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) */ |
653 | 657 | ||
654 | #include <asm-generic/bitops/ffz.h> | 658 | #include <asm-generic/bitops/ffz.h> |
655 | #include <asm-generic/bitops/find.h> | 659 | #include <asm-generic/bitops/find.h> |
656 | 660 | ||
657 | #ifdef __KERNEL__ | 661 | #ifdef __KERNEL__ |
658 | 662 | ||
659 | #include <asm-generic/bitops/sched.h> | 663 | #include <asm-generic/bitops/sched.h> |
660 | #include <asm-generic/bitops/hweight.h> | 664 | #include <asm-generic/bitops/hweight.h> |
661 | #include <asm-generic/bitops/ext2-non-atomic.h> | 665 | #include <asm-generic/bitops/ext2-non-atomic.h> |
662 | #include <asm-generic/bitops/ext2-atomic.h> | 666 | #include <asm-generic/bitops/ext2-atomic.h> |
663 | #include <asm-generic/bitops/minix.h> | 667 | #include <asm-generic/bitops/minix.h> |
664 | 668 | ||
665 | #endif /* __KERNEL__ */ | 669 | #endif /* __KERNEL__ */ |
666 | 670 | ||
667 | #endif /* _ASM_BITOPS_H */ | 671 | #endif /* _ASM_BITOPS_H */ |
668 | 672 |
include/asm-parisc/bitops.h
1 | #ifndef _PARISC_BITOPS_H | 1 | #ifndef _PARISC_BITOPS_H |
2 | #define _PARISC_BITOPS_H | 2 | #define _PARISC_BITOPS_H |
3 | 3 | ||
4 | #ifndef _LINUX_BITOPS_H | ||
5 | #error only <linux/bitops.h> can be included directly | ||
6 | #endif | ||
7 | |||
4 | #include <linux/compiler.h> | 8 | #include <linux/compiler.h> |
5 | #include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */ | 9 | #include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */ |
6 | #include <asm/byteorder.h> | 10 | #include <asm/byteorder.h> |
7 | #include <asm/atomic.h> | 11 | #include <asm/atomic.h> |
8 | 12 | ||
9 | /* | 13 | /* |
10 | * HP-PARISC specific bit operations | 14 | * HP-PARISC specific bit operations |
11 | * for a detailed description of the functions please refer | 15 | * for a detailed description of the functions please refer |
12 | * to include/asm-i386/bitops.h or kerneldoc | 16 | * to include/asm-i386/bitops.h or kerneldoc |
13 | */ | 17 | */ |
14 | 18 | ||
15 | #define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1)) | 19 | #define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1)) |
16 | 20 | ||
17 | 21 | ||
18 | #define smp_mb__before_clear_bit() smp_mb() | 22 | #define smp_mb__before_clear_bit() smp_mb() |
19 | #define smp_mb__after_clear_bit() smp_mb() | 23 | #define smp_mb__after_clear_bit() smp_mb() |
20 | 24 | ||
21 | /* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion | 25 | /* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion |
22 | * on use of volatile and __*_bit() (set/clear/change): | 26 | * on use of volatile and __*_bit() (set/clear/change): |
23 | * *_bit() want use of volatile. | 27 | * *_bit() want use of volatile. |
24 | * __*_bit() are "relaxed" and don't use spinlock or volatile. | 28 | * __*_bit() are "relaxed" and don't use spinlock or volatile. |
25 | */ | 29 | */ |
26 | 30 | ||
27 | static __inline__ void set_bit(int nr, volatile unsigned long * addr) | 31 | static __inline__ void set_bit(int nr, volatile unsigned long * addr) |
28 | { | 32 | { |
29 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); | 33 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); |
30 | unsigned long flags; | 34 | unsigned long flags; |
31 | 35 | ||
32 | addr += (nr >> SHIFT_PER_LONG); | 36 | addr += (nr >> SHIFT_PER_LONG); |
33 | _atomic_spin_lock_irqsave(addr, flags); | 37 | _atomic_spin_lock_irqsave(addr, flags); |
34 | *addr |= mask; | 38 | *addr |= mask; |
35 | _atomic_spin_unlock_irqrestore(addr, flags); | 39 | _atomic_spin_unlock_irqrestore(addr, flags); |
36 | } | 40 | } |
37 | 41 | ||
38 | static __inline__ void clear_bit(int nr, volatile unsigned long * addr) | 42 | static __inline__ void clear_bit(int nr, volatile unsigned long * addr) |
39 | { | 43 | { |
40 | unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr)); | 44 | unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr)); |
41 | unsigned long flags; | 45 | unsigned long flags; |
42 | 46 | ||
43 | addr += (nr >> SHIFT_PER_LONG); | 47 | addr += (nr >> SHIFT_PER_LONG); |
44 | _atomic_spin_lock_irqsave(addr, flags); | 48 | _atomic_spin_lock_irqsave(addr, flags); |
45 | *addr &= mask; | 49 | *addr &= mask; |
46 | _atomic_spin_unlock_irqrestore(addr, flags); | 50 | _atomic_spin_unlock_irqrestore(addr, flags); |
47 | } | 51 | } |
48 | 52 | ||
49 | static __inline__ void change_bit(int nr, volatile unsigned long * addr) | 53 | static __inline__ void change_bit(int nr, volatile unsigned long * addr) |
50 | { | 54 | { |
51 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); | 55 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); |
52 | unsigned long flags; | 56 | unsigned long flags; |
53 | 57 | ||
54 | addr += (nr >> SHIFT_PER_LONG); | 58 | addr += (nr >> SHIFT_PER_LONG); |
55 | _atomic_spin_lock_irqsave(addr, flags); | 59 | _atomic_spin_lock_irqsave(addr, flags); |
56 | *addr ^= mask; | 60 | *addr ^= mask; |
57 | _atomic_spin_unlock_irqrestore(addr, flags); | 61 | _atomic_spin_unlock_irqrestore(addr, flags); |
58 | } | 62 | } |
59 | 63 | ||
60 | static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) | 64 | static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) |
61 | { | 65 | { |
62 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); | 66 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); |
63 | unsigned long old; | 67 | unsigned long old; |
64 | unsigned long flags; | 68 | unsigned long flags; |
65 | int set; | 69 | int set; |
66 | 70 | ||
67 | addr += (nr >> SHIFT_PER_LONG); | 71 | addr += (nr >> SHIFT_PER_LONG); |
68 | _atomic_spin_lock_irqsave(addr, flags); | 72 | _atomic_spin_lock_irqsave(addr, flags); |
69 | old = *addr; | 73 | old = *addr; |
70 | set = (old & mask) ? 1 : 0; | 74 | set = (old & mask) ? 1 : 0; |
71 | if (!set) | 75 | if (!set) |
72 | *addr = old | mask; | 76 | *addr = old | mask; |
73 | _atomic_spin_unlock_irqrestore(addr, flags); | 77 | _atomic_spin_unlock_irqrestore(addr, flags); |
74 | 78 | ||
75 | return set; | 79 | return set; |
76 | } | 80 | } |
77 | 81 | ||
78 | static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) | 82 | static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) |
79 | { | 83 | { |
80 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); | 84 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); |
81 | unsigned long old; | 85 | unsigned long old; |
82 | unsigned long flags; | 86 | unsigned long flags; |
83 | int set; | 87 | int set; |
84 | 88 | ||
85 | addr += (nr >> SHIFT_PER_LONG); | 89 | addr += (nr >> SHIFT_PER_LONG); |
86 | _atomic_spin_lock_irqsave(addr, flags); | 90 | _atomic_spin_lock_irqsave(addr, flags); |
87 | old = *addr; | 91 | old = *addr; |
88 | set = (old & mask) ? 1 : 0; | 92 | set = (old & mask) ? 1 : 0; |
89 | if (set) | 93 | if (set) |
90 | *addr = old & ~mask; | 94 | *addr = old & ~mask; |
91 | _atomic_spin_unlock_irqrestore(addr, flags); | 95 | _atomic_spin_unlock_irqrestore(addr, flags); |
92 | 96 | ||
93 | return set; | 97 | return set; |
94 | } | 98 | } |
95 | 99 | ||
96 | static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) | 100 | static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) |
97 | { | 101 | { |
98 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); | 102 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); |
99 | unsigned long oldbit; | 103 | unsigned long oldbit; |
100 | unsigned long flags; | 104 | unsigned long flags; |
101 | 105 | ||
102 | addr += (nr >> SHIFT_PER_LONG); | 106 | addr += (nr >> SHIFT_PER_LONG); |
103 | _atomic_spin_lock_irqsave(addr, flags); | 107 | _atomic_spin_lock_irqsave(addr, flags); |
104 | oldbit = *addr; | 108 | oldbit = *addr; |
105 | *addr = oldbit ^ mask; | 109 | *addr = oldbit ^ mask; |
106 | _atomic_spin_unlock_irqrestore(addr, flags); | 110 | _atomic_spin_unlock_irqrestore(addr, flags); |
107 | 111 | ||
108 | return (oldbit & mask) ? 1 : 0; | 112 | return (oldbit & mask) ? 1 : 0; |
109 | } | 113 | } |
110 | 114 | ||
111 | #include <asm-generic/bitops/non-atomic.h> | 115 | #include <asm-generic/bitops/non-atomic.h> |
112 | 116 | ||
113 | #ifdef __KERNEL__ | 117 | #ifdef __KERNEL__ |
114 | 118 | ||
115 | /** | 119 | /** |
116 | * __ffs - find first bit in word. returns 0 to "BITS_PER_LONG-1". | 120 | * __ffs - find first bit in word. returns 0 to "BITS_PER_LONG-1". |
117 | * @word: The word to search | 121 | * @word: The word to search |
118 | * | 122 | * |
119 | * __ffs() return is undefined if no bit is set. | 123 | * __ffs() return is undefined if no bit is set. |
120 | * | 124 | * |
121 | * 32-bit fast __ffs by LaMont Jones "lamont At hp com". | 125 | * 32-bit fast __ffs by LaMont Jones "lamont At hp com". |
122 | * 64-bit enhancement by Grant Grundler "grundler At parisc-linux org". | 126 | * 64-bit enhancement by Grant Grundler "grundler At parisc-linux org". |
123 | * (with help from willy/jejb to get the semantics right) | 127 | * (with help from willy/jejb to get the semantics right) |
124 | * | 128 | * |
125 | * This algorithm avoids branches by making use of nullification. | 129 | * This algorithm avoids branches by making use of nullification. |
126 | * One side effect of "extr" instructions is it sets PSW[N] bit. | 130 | * One side effect of "extr" instructions is it sets PSW[N] bit. |
127 | * How PSW[N] (nullify next insn) gets set is determined by the | 131 | * How PSW[N] (nullify next insn) gets set is determined by the |
128 | * "condition" field (eg "<>" or "TR" below) in the extr* insn. | 132 | * "condition" field (eg "<>" or "TR" below) in the extr* insn. |
129 | * Only the 1st and one of either the 2cd or 3rd insn will get executed. | 133 | * Only the 1st and one of either the 2cd or 3rd insn will get executed. |
130 | * Each set of 3 insn will get executed in 2 cycles on PA8x00 vs 16 or so | 134 | * Each set of 3 insn will get executed in 2 cycles on PA8x00 vs 16 or so |
131 | * cycles for each mispredicted branch. | 135 | * cycles for each mispredicted branch. |
132 | */ | 136 | */ |
133 | 137 | ||
134 | static __inline__ unsigned long __ffs(unsigned long x) | 138 | static __inline__ unsigned long __ffs(unsigned long x) |
135 | { | 139 | { |
136 | unsigned long ret; | 140 | unsigned long ret; |
137 | 141 | ||
138 | __asm__( | 142 | __asm__( |
139 | #ifdef CONFIG_64BIT | 143 | #ifdef CONFIG_64BIT |
140 | " ldi 63,%1\n" | 144 | " ldi 63,%1\n" |
141 | " extrd,u,*<> %0,63,32,%%r0\n" | 145 | " extrd,u,*<> %0,63,32,%%r0\n" |
142 | " extrd,u,*TR %0,31,32,%0\n" /* move top 32-bits down */ | 146 | " extrd,u,*TR %0,31,32,%0\n" /* move top 32-bits down */ |
143 | " addi -32,%1,%1\n" | 147 | " addi -32,%1,%1\n" |
144 | #else | 148 | #else |
145 | " ldi 31,%1\n" | 149 | " ldi 31,%1\n" |
146 | #endif | 150 | #endif |
147 | " extru,<> %0,31,16,%%r0\n" | 151 | " extru,<> %0,31,16,%%r0\n" |
148 | " extru,TR %0,15,16,%0\n" /* xxxx0000 -> 0000xxxx */ | 152 | " extru,TR %0,15,16,%0\n" /* xxxx0000 -> 0000xxxx */ |
149 | " addi -16,%1,%1\n" | 153 | " addi -16,%1,%1\n" |
150 | " extru,<> %0,31,8,%%r0\n" | 154 | " extru,<> %0,31,8,%%r0\n" |
151 | " extru,TR %0,23,8,%0\n" /* 0000xx00 -> 000000xx */ | 155 | " extru,TR %0,23,8,%0\n" /* 0000xx00 -> 000000xx */ |
152 | " addi -8,%1,%1\n" | 156 | " addi -8,%1,%1\n" |
153 | " extru,<> %0,31,4,%%r0\n" | 157 | " extru,<> %0,31,4,%%r0\n" |
154 | " extru,TR %0,27,4,%0\n" /* 000000x0 -> 0000000x */ | 158 | " extru,TR %0,27,4,%0\n" /* 000000x0 -> 0000000x */ |
155 | " addi -4,%1,%1\n" | 159 | " addi -4,%1,%1\n" |
156 | " extru,<> %0,31,2,%%r0\n" | 160 | " extru,<> %0,31,2,%%r0\n" |
157 | " extru,TR %0,29,2,%0\n" /* 0000000y, 1100b -> 0011b */ | 161 | " extru,TR %0,29,2,%0\n" /* 0000000y, 1100b -> 0011b */ |
158 | " addi -2,%1,%1\n" | 162 | " addi -2,%1,%1\n" |
159 | " extru,= %0,31,1,%%r0\n" /* check last bit */ | 163 | " extru,= %0,31,1,%%r0\n" /* check last bit */ |
160 | " addi -1,%1,%1\n" | 164 | " addi -1,%1,%1\n" |
161 | : "+r" (x), "=r" (ret) ); | 165 | : "+r" (x), "=r" (ret) ); |
162 | return ret; | 166 | return ret; |
163 | } | 167 | } |
164 | 168 | ||
165 | #include <asm-generic/bitops/ffz.h> | 169 | #include <asm-generic/bitops/ffz.h> |
166 | 170 | ||
167 | /* | 171 | /* |
168 | * ffs: find first bit set. returns 1 to BITS_PER_LONG or 0 (if none set) | 172 | * ffs: find first bit set. returns 1 to BITS_PER_LONG or 0 (if none set) |
169 | * This is defined the same way as the libc and compiler builtin | 173 | * This is defined the same way as the libc and compiler builtin |
170 | * ffs routines, therefore differs in spirit from the above ffz (man ffs). | 174 | * ffs routines, therefore differs in spirit from the above ffz (man ffs). |
171 | */ | 175 | */ |
172 | static __inline__ int ffs(int x) | 176 | static __inline__ int ffs(int x) |
173 | { | 177 | { |
174 | return x ? (__ffs((unsigned long)x) + 1) : 0; | 178 | return x ? (__ffs((unsigned long)x) + 1) : 0; |
175 | } | 179 | } |
176 | 180 | ||
177 | /* | 181 | /* |
178 | * fls: find last (most significant) bit set. | 182 | * fls: find last (most significant) bit set. |
179 | * fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. | 183 | * fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. |
180 | */ | 184 | */ |
181 | 185 | ||
182 | static __inline__ int fls(int x) | 186 | static __inline__ int fls(int x) |
183 | { | 187 | { |
184 | int ret; | 188 | int ret; |
185 | if (!x) | 189 | if (!x) |
186 | return 0; | 190 | return 0; |
187 | 191 | ||
188 | __asm__( | 192 | __asm__( |
189 | " ldi 1,%1\n" | 193 | " ldi 1,%1\n" |
190 | " extru,<> %0,15,16,%%r0\n" | 194 | " extru,<> %0,15,16,%%r0\n" |
191 | " zdep,TR %0,15,16,%0\n" /* xxxx0000 */ | 195 | " zdep,TR %0,15,16,%0\n" /* xxxx0000 */ |
192 | " addi 16,%1,%1\n" | 196 | " addi 16,%1,%1\n" |
193 | " extru,<> %0,7,8,%%r0\n" | 197 | " extru,<> %0,7,8,%%r0\n" |
194 | " zdep,TR %0,23,24,%0\n" /* xx000000 */ | 198 | " zdep,TR %0,23,24,%0\n" /* xx000000 */ |
195 | " addi 8,%1,%1\n" | 199 | " addi 8,%1,%1\n" |
196 | " extru,<> %0,3,4,%%r0\n" | 200 | " extru,<> %0,3,4,%%r0\n" |
197 | " zdep,TR %0,27,28,%0\n" /* x0000000 */ | 201 | " zdep,TR %0,27,28,%0\n" /* x0000000 */ |
198 | " addi 4,%1,%1\n" | 202 | " addi 4,%1,%1\n" |
199 | " extru,<> %0,1,2,%%r0\n" | 203 | " extru,<> %0,1,2,%%r0\n" |
200 | " zdep,TR %0,29,30,%0\n" /* y0000000 (y&3 = 0) */ | 204 | " zdep,TR %0,29,30,%0\n" /* y0000000 (y&3 = 0) */ |
201 | " addi 2,%1,%1\n" | 205 | " addi 2,%1,%1\n" |
202 | " extru,= %0,0,1,%%r0\n" | 206 | " extru,= %0,0,1,%%r0\n" |
203 | " addi 1,%1,%1\n" /* if y & 8, add 1 */ | 207 | " addi 1,%1,%1\n" /* if y & 8, add 1 */ |
204 | : "+r" (x), "=r" (ret) ); | 208 | : "+r" (x), "=r" (ret) ); |
205 | 209 | ||
206 | return ret; | 210 | return ret; |
207 | } | 211 | } |
208 | 212 | ||
209 | #include <asm-generic/bitops/fls64.h> | 213 | #include <asm-generic/bitops/fls64.h> |
210 | #include <asm-generic/bitops/hweight.h> | 214 | #include <asm-generic/bitops/hweight.h> |
211 | #include <asm-generic/bitops/lock.h> | 215 | #include <asm-generic/bitops/lock.h> |
212 | #include <asm-generic/bitops/sched.h> | 216 | #include <asm-generic/bitops/sched.h> |
213 | 217 | ||
214 | #endif /* __KERNEL__ */ | 218 | #endif /* __KERNEL__ */ |
215 | 219 | ||
216 | #include <asm-generic/bitops/find.h> | 220 | #include <asm-generic/bitops/find.h> |
217 | 221 | ||
218 | #ifdef __KERNEL__ | 222 | #ifdef __KERNEL__ |
219 | 223 | ||
220 | #include <asm-generic/bitops/ext2-non-atomic.h> | 224 | #include <asm-generic/bitops/ext2-non-atomic.h> |
221 | 225 | ||
222 | /* '3' is bits per byte */ | 226 | /* '3' is bits per byte */ |
223 | #define LE_BYTE_ADDR ((sizeof(unsigned long) - 1) << 3) | 227 | #define LE_BYTE_ADDR ((sizeof(unsigned long) - 1) << 3) |
224 | 228 | ||
225 | #define ext2_set_bit_atomic(l,nr,addr) \ | 229 | #define ext2_set_bit_atomic(l,nr,addr) \ |
226 | test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) | 230 | test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) |
227 | #define ext2_clear_bit_atomic(l,nr,addr) \ | 231 | #define ext2_clear_bit_atomic(l,nr,addr) \ |
228 | test_and_clear_bit( (nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) | 232 | test_and_clear_bit( (nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) |
229 | 233 | ||
230 | #endif /* __KERNEL__ */ | 234 | #endif /* __KERNEL__ */ |
231 | 235 | ||
232 | #include <asm-generic/bitops/minix-le.h> | 236 | #include <asm-generic/bitops/minix-le.h> |
233 | 237 | ||
234 | #endif /* _PARISC_BITOPS_H */ | 238 | #endif /* _PARISC_BITOPS_H */ |
235 | 239 |
include/asm-powerpc/bitops.h
1 | /* | 1 | /* |
2 | * PowerPC atomic bit operations. | 2 | * PowerPC atomic bit operations. |
3 | * | 3 | * |
4 | * Merged version by David Gibson <david@gibson.dropbear.id.au>. | 4 | * Merged version by David Gibson <david@gibson.dropbear.id.au>. |
5 | * Based on ppc64 versions by: Dave Engebretsen, Todd Inglett, Don | 5 | * Based on ppc64 versions by: Dave Engebretsen, Todd Inglett, Don |
6 | * Reed, Pat McCarthy, Peter Bergner, Anton Blanchard. They | 6 | * Reed, Pat McCarthy, Peter Bergner, Anton Blanchard. They |
7 | * originally took it from the ppc32 code. | 7 | * originally took it from the ppc32 code. |
8 | * | 8 | * |
9 | * Within a word, bits are numbered LSB first. Lot's of places make | 9 | * Within a word, bits are numbered LSB first. Lot's of places make |
10 | * this assumption by directly testing bits with (val & (1<<nr)). | 10 | * this assumption by directly testing bits with (val & (1<<nr)). |
11 | * This can cause confusion for large (> 1 word) bitmaps on a | 11 | * This can cause confusion for large (> 1 word) bitmaps on a |
12 | * big-endian system because, unlike little endian, the number of each | 12 | * big-endian system because, unlike little endian, the number of each |
13 | * bit depends on the word size. | 13 | * bit depends on the word size. |
14 | * | 14 | * |
15 | * The bitop functions are defined to work on unsigned longs, so for a | 15 | * The bitop functions are defined to work on unsigned longs, so for a |
16 | * ppc64 system the bits end up numbered: | 16 | * ppc64 system the bits end up numbered: |
17 | * |63..............0|127............64|191...........128|255...........196| | 17 | * |63..............0|127............64|191...........128|255...........196| |
18 | * and on ppc32: | 18 | * and on ppc32: |
19 | * |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224| | 19 | * |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224| |
20 | * | 20 | * |
21 | * There are a few little-endian macros used mostly for filesystem | 21 | * There are a few little-endian macros used mostly for filesystem |
22 | * bitmaps, these work on similar bit arrays layouts, but | 22 | * bitmaps, these work on similar bit arrays layouts, but |
23 | * byte-oriented: | 23 | * byte-oriented: |
24 | * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56| | 24 | * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56| |
25 | * | 25 | * |
26 | * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit | 26 | * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit |
27 | * number field needs to be reversed compared to the big-endian bit | 27 | * number field needs to be reversed compared to the big-endian bit |
28 | * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b). | 28 | * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b). |
29 | * | 29 | * |
30 | * This program is free software; you can redistribute it and/or | 30 | * This program is free software; you can redistribute it and/or |
31 | * modify it under the terms of the GNU General Public License | 31 | * modify it under the terms of the GNU General Public License |
32 | * as published by the Free Software Foundation; either version | 32 | * as published by the Free Software Foundation; either version |
33 | * 2 of the License, or (at your option) any later version. | 33 | * 2 of the License, or (at your option) any later version. |
34 | */ | 34 | */ |
35 | 35 | ||
36 | #ifndef _ASM_POWERPC_BITOPS_H | 36 | #ifndef _ASM_POWERPC_BITOPS_H |
37 | #define _ASM_POWERPC_BITOPS_H | 37 | #define _ASM_POWERPC_BITOPS_H |
38 | 38 | ||
39 | #ifdef __KERNEL__ | 39 | #ifdef __KERNEL__ |
40 | 40 | ||
41 | #ifndef _LINUX_BITOPS_H | ||
42 | #error only <linux/bitops.h> can be included directly | ||
43 | #endif | ||
44 | |||
41 | #include <linux/compiler.h> | 45 | #include <linux/compiler.h> |
42 | #include <asm/asm-compat.h> | 46 | #include <asm/asm-compat.h> |
43 | #include <asm/synch.h> | 47 | #include <asm/synch.h> |
44 | 48 | ||
45 | /* | 49 | /* |
46 | * clear_bit doesn't imply a memory barrier | 50 | * clear_bit doesn't imply a memory barrier |
47 | */ | 51 | */ |
48 | #define smp_mb__before_clear_bit() smp_mb() | 52 | #define smp_mb__before_clear_bit() smp_mb() |
49 | #define smp_mb__after_clear_bit() smp_mb() | 53 | #define smp_mb__after_clear_bit() smp_mb() |
50 | 54 | ||
51 | #define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) | 55 | #define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) |
52 | #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) | 56 | #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) |
53 | #define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) | 57 | #define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) |
54 | 58 | ||
55 | static __inline__ void set_bit(int nr, volatile unsigned long *addr) | 59 | static __inline__ void set_bit(int nr, volatile unsigned long *addr) |
56 | { | 60 | { |
57 | unsigned long old; | 61 | unsigned long old; |
58 | unsigned long mask = BITOP_MASK(nr); | 62 | unsigned long mask = BITOP_MASK(nr); |
59 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | 63 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); |
60 | 64 | ||
61 | __asm__ __volatile__( | 65 | __asm__ __volatile__( |
62 | "1:" PPC_LLARX "%0,0,%3 # set_bit\n" | 66 | "1:" PPC_LLARX "%0,0,%3 # set_bit\n" |
63 | "or %0,%0,%2\n" | 67 | "or %0,%0,%2\n" |
64 | PPC405_ERR77(0,%3) | 68 | PPC405_ERR77(0,%3) |
65 | PPC_STLCX "%0,0,%3\n" | 69 | PPC_STLCX "%0,0,%3\n" |
66 | "bne- 1b" | 70 | "bne- 1b" |
67 | : "=&r" (old), "+m" (*p) | 71 | : "=&r" (old), "+m" (*p) |
68 | : "r" (mask), "r" (p) | 72 | : "r" (mask), "r" (p) |
69 | : "cc" ); | 73 | : "cc" ); |
70 | } | 74 | } |
71 | 75 | ||
72 | static __inline__ void clear_bit(int nr, volatile unsigned long *addr) | 76 | static __inline__ void clear_bit(int nr, volatile unsigned long *addr) |
73 | { | 77 | { |
74 | unsigned long old; | 78 | unsigned long old; |
75 | unsigned long mask = BITOP_MASK(nr); | 79 | unsigned long mask = BITOP_MASK(nr); |
76 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | 80 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); |
77 | 81 | ||
78 | __asm__ __volatile__( | 82 | __asm__ __volatile__( |
79 | "1:" PPC_LLARX "%0,0,%3 # clear_bit\n" | 83 | "1:" PPC_LLARX "%0,0,%3 # clear_bit\n" |
80 | "andc %0,%0,%2\n" | 84 | "andc %0,%0,%2\n" |
81 | PPC405_ERR77(0,%3) | 85 | PPC405_ERR77(0,%3) |
82 | PPC_STLCX "%0,0,%3\n" | 86 | PPC_STLCX "%0,0,%3\n" |
83 | "bne- 1b" | 87 | "bne- 1b" |
84 | : "=&r" (old), "+m" (*p) | 88 | : "=&r" (old), "+m" (*p) |
85 | : "r" (mask), "r" (p) | 89 | : "r" (mask), "r" (p) |
86 | : "cc" ); | 90 | : "cc" ); |
87 | } | 91 | } |
88 | 92 | ||
89 | static __inline__ void clear_bit_unlock(int nr, volatile unsigned long *addr) | 93 | static __inline__ void clear_bit_unlock(int nr, volatile unsigned long *addr) |
90 | { | 94 | { |
91 | unsigned long old; | 95 | unsigned long old; |
92 | unsigned long mask = BITOP_MASK(nr); | 96 | unsigned long mask = BITOP_MASK(nr); |
93 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | 97 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); |
94 | 98 | ||
95 | __asm__ __volatile__( | 99 | __asm__ __volatile__( |
96 | LWSYNC_ON_SMP | 100 | LWSYNC_ON_SMP |
97 | "1:" PPC_LLARX "%0,0,%3 # clear_bit_unlock\n" | 101 | "1:" PPC_LLARX "%0,0,%3 # clear_bit_unlock\n" |
98 | "andc %0,%0,%2\n" | 102 | "andc %0,%0,%2\n" |
99 | PPC405_ERR77(0,%3) | 103 | PPC405_ERR77(0,%3) |
100 | PPC_STLCX "%0,0,%3\n" | 104 | PPC_STLCX "%0,0,%3\n" |
101 | "bne- 1b" | 105 | "bne- 1b" |
102 | : "=&r" (old), "+m" (*p) | 106 | : "=&r" (old), "+m" (*p) |
103 | : "r" (mask), "r" (p) | 107 | : "r" (mask), "r" (p) |
104 | : "cc", "memory"); | 108 | : "cc", "memory"); |
105 | } | 109 | } |
106 | 110 | ||
107 | static __inline__ void change_bit(int nr, volatile unsigned long *addr) | 111 | static __inline__ void change_bit(int nr, volatile unsigned long *addr) |
108 | { | 112 | { |
109 | unsigned long old; | 113 | unsigned long old; |
110 | unsigned long mask = BITOP_MASK(nr); | 114 | unsigned long mask = BITOP_MASK(nr); |
111 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | 115 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); |
112 | 116 | ||
113 | __asm__ __volatile__( | 117 | __asm__ __volatile__( |
114 | "1:" PPC_LLARX "%0,0,%3 # change_bit\n" | 118 | "1:" PPC_LLARX "%0,0,%3 # change_bit\n" |
115 | "xor %0,%0,%2\n" | 119 | "xor %0,%0,%2\n" |
116 | PPC405_ERR77(0,%3) | 120 | PPC405_ERR77(0,%3) |
117 | PPC_STLCX "%0,0,%3\n" | 121 | PPC_STLCX "%0,0,%3\n" |
118 | "bne- 1b" | 122 | "bne- 1b" |
119 | : "=&r" (old), "+m" (*p) | 123 | : "=&r" (old), "+m" (*p) |
120 | : "r" (mask), "r" (p) | 124 | : "r" (mask), "r" (p) |
121 | : "cc" ); | 125 | : "cc" ); |
122 | } | 126 | } |
123 | 127 | ||
124 | static __inline__ int test_and_set_bit(unsigned long nr, | 128 | static __inline__ int test_and_set_bit(unsigned long nr, |
125 | volatile unsigned long *addr) | 129 | volatile unsigned long *addr) |
126 | { | 130 | { |
127 | unsigned long old, t; | 131 | unsigned long old, t; |
128 | unsigned long mask = BITOP_MASK(nr); | 132 | unsigned long mask = BITOP_MASK(nr); |
129 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | 133 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); |
130 | 134 | ||
131 | __asm__ __volatile__( | 135 | __asm__ __volatile__( |
132 | LWSYNC_ON_SMP | 136 | LWSYNC_ON_SMP |
133 | "1:" PPC_LLARX "%0,0,%3 # test_and_set_bit\n" | 137 | "1:" PPC_LLARX "%0,0,%3 # test_and_set_bit\n" |
134 | "or %1,%0,%2 \n" | 138 | "or %1,%0,%2 \n" |
135 | PPC405_ERR77(0,%3) | 139 | PPC405_ERR77(0,%3) |
136 | PPC_STLCX "%1,0,%3 \n" | 140 | PPC_STLCX "%1,0,%3 \n" |
137 | "bne- 1b" | 141 | "bne- 1b" |
138 | ISYNC_ON_SMP | 142 | ISYNC_ON_SMP |
139 | : "=&r" (old), "=&r" (t) | 143 | : "=&r" (old), "=&r" (t) |
140 | : "r" (mask), "r" (p) | 144 | : "r" (mask), "r" (p) |
141 | : "cc", "memory"); | 145 | : "cc", "memory"); |
142 | 146 | ||
143 | return (old & mask) != 0; | 147 | return (old & mask) != 0; |
144 | } | 148 | } |
145 | 149 | ||
146 | static __inline__ int test_and_set_bit_lock(unsigned long nr, | 150 | static __inline__ int test_and_set_bit_lock(unsigned long nr, |
147 | volatile unsigned long *addr) | 151 | volatile unsigned long *addr) |
148 | { | 152 | { |
149 | unsigned long old, t; | 153 | unsigned long old, t; |
150 | unsigned long mask = BITOP_MASK(nr); | 154 | unsigned long mask = BITOP_MASK(nr); |
151 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | 155 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); |
152 | 156 | ||
153 | __asm__ __volatile__( | 157 | __asm__ __volatile__( |
154 | "1:" PPC_LLARX "%0,0,%3 # test_and_set_bit_lock\n" | 158 | "1:" PPC_LLARX "%0,0,%3 # test_and_set_bit_lock\n" |
155 | "or %1,%0,%2 \n" | 159 | "or %1,%0,%2 \n" |
156 | PPC405_ERR77(0,%3) | 160 | PPC405_ERR77(0,%3) |
157 | PPC_STLCX "%1,0,%3 \n" | 161 | PPC_STLCX "%1,0,%3 \n" |
158 | "bne- 1b" | 162 | "bne- 1b" |
159 | ISYNC_ON_SMP | 163 | ISYNC_ON_SMP |
160 | : "=&r" (old), "=&r" (t) | 164 | : "=&r" (old), "=&r" (t) |
161 | : "r" (mask), "r" (p) | 165 | : "r" (mask), "r" (p) |
162 | : "cc", "memory"); | 166 | : "cc", "memory"); |
163 | 167 | ||
164 | return (old & mask) != 0; | 168 | return (old & mask) != 0; |
165 | } | 169 | } |
166 | 170 | ||
167 | static __inline__ int test_and_clear_bit(unsigned long nr, | 171 | static __inline__ int test_and_clear_bit(unsigned long nr, |
168 | volatile unsigned long *addr) | 172 | volatile unsigned long *addr) |
169 | { | 173 | { |
170 | unsigned long old, t; | 174 | unsigned long old, t; |
171 | unsigned long mask = BITOP_MASK(nr); | 175 | unsigned long mask = BITOP_MASK(nr); |
172 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | 176 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); |
173 | 177 | ||
174 | __asm__ __volatile__( | 178 | __asm__ __volatile__( |
175 | LWSYNC_ON_SMP | 179 | LWSYNC_ON_SMP |
176 | "1:" PPC_LLARX "%0,0,%3 # test_and_clear_bit\n" | 180 | "1:" PPC_LLARX "%0,0,%3 # test_and_clear_bit\n" |
177 | "andc %1,%0,%2 \n" | 181 | "andc %1,%0,%2 \n" |
178 | PPC405_ERR77(0,%3) | 182 | PPC405_ERR77(0,%3) |
179 | PPC_STLCX "%1,0,%3 \n" | 183 | PPC_STLCX "%1,0,%3 \n" |
180 | "bne- 1b" | 184 | "bne- 1b" |
181 | ISYNC_ON_SMP | 185 | ISYNC_ON_SMP |
182 | : "=&r" (old), "=&r" (t) | 186 | : "=&r" (old), "=&r" (t) |
183 | : "r" (mask), "r" (p) | 187 | : "r" (mask), "r" (p) |
184 | : "cc", "memory"); | 188 | : "cc", "memory"); |
185 | 189 | ||
186 | return (old & mask) != 0; | 190 | return (old & mask) != 0; |
187 | } | 191 | } |
188 | 192 | ||
189 | static __inline__ int test_and_change_bit(unsigned long nr, | 193 | static __inline__ int test_and_change_bit(unsigned long nr, |
190 | volatile unsigned long *addr) | 194 | volatile unsigned long *addr) |
191 | { | 195 | { |
192 | unsigned long old, t; | 196 | unsigned long old, t; |
193 | unsigned long mask = BITOP_MASK(nr); | 197 | unsigned long mask = BITOP_MASK(nr); |
194 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | 198 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); |
195 | 199 | ||
196 | __asm__ __volatile__( | 200 | __asm__ __volatile__( |
197 | LWSYNC_ON_SMP | 201 | LWSYNC_ON_SMP |
198 | "1:" PPC_LLARX "%0,0,%3 # test_and_change_bit\n" | 202 | "1:" PPC_LLARX "%0,0,%3 # test_and_change_bit\n" |
199 | "xor %1,%0,%2 \n" | 203 | "xor %1,%0,%2 \n" |
200 | PPC405_ERR77(0,%3) | 204 | PPC405_ERR77(0,%3) |
201 | PPC_STLCX "%1,0,%3 \n" | 205 | PPC_STLCX "%1,0,%3 \n" |
202 | "bne- 1b" | 206 | "bne- 1b" |
203 | ISYNC_ON_SMP | 207 | ISYNC_ON_SMP |
204 | : "=&r" (old), "=&r" (t) | 208 | : "=&r" (old), "=&r" (t) |
205 | : "r" (mask), "r" (p) | 209 | : "r" (mask), "r" (p) |
206 | : "cc", "memory"); | 210 | : "cc", "memory"); |
207 | 211 | ||
208 | return (old & mask) != 0; | 212 | return (old & mask) != 0; |
209 | } | 213 | } |
210 | 214 | ||
211 | static __inline__ void set_bits(unsigned long mask, unsigned long *addr) | 215 | static __inline__ void set_bits(unsigned long mask, unsigned long *addr) |
212 | { | 216 | { |
213 | unsigned long old; | 217 | unsigned long old; |
214 | 218 | ||
215 | __asm__ __volatile__( | 219 | __asm__ __volatile__( |
216 | "1:" PPC_LLARX "%0,0,%3 # set_bits\n" | 220 | "1:" PPC_LLARX "%0,0,%3 # set_bits\n" |
217 | "or %0,%0,%2\n" | 221 | "or %0,%0,%2\n" |
218 | PPC_STLCX "%0,0,%3\n" | 222 | PPC_STLCX "%0,0,%3\n" |
219 | "bne- 1b" | 223 | "bne- 1b" |
220 | : "=&r" (old), "+m" (*addr) | 224 | : "=&r" (old), "+m" (*addr) |
221 | : "r" (mask), "r" (addr) | 225 | : "r" (mask), "r" (addr) |
222 | : "cc"); | 226 | : "cc"); |
223 | } | 227 | } |
224 | 228 | ||
225 | #include <asm-generic/bitops/non-atomic.h> | 229 | #include <asm-generic/bitops/non-atomic.h> |
226 | 230 | ||
227 | static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr) | 231 | static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr) |
228 | { | 232 | { |
229 | __asm__ __volatile__(LWSYNC_ON_SMP "" ::: "memory"); | 233 | __asm__ __volatile__(LWSYNC_ON_SMP "" ::: "memory"); |
230 | __clear_bit(nr, addr); | 234 | __clear_bit(nr, addr); |
231 | } | 235 | } |
232 | 236 | ||
233 | /* | 237 | /* |
234 | * Return the zero-based bit position (LE, not IBM bit numbering) of | 238 | * Return the zero-based bit position (LE, not IBM bit numbering) of |
235 | * the most significant 1-bit in a double word. | 239 | * the most significant 1-bit in a double word. |
236 | */ | 240 | */ |
237 | static __inline__ __attribute__((const)) | 241 | static __inline__ __attribute__((const)) |
238 | int __ilog2(unsigned long x) | 242 | int __ilog2(unsigned long x) |
239 | { | 243 | { |
240 | int lz; | 244 | int lz; |
241 | 245 | ||
242 | asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (x)); | 246 | asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (x)); |
243 | return BITS_PER_LONG - 1 - lz; | 247 | return BITS_PER_LONG - 1 - lz; |
244 | } | 248 | } |
245 | 249 | ||
246 | static inline __attribute__((const)) | 250 | static inline __attribute__((const)) |
247 | int __ilog2_u32(u32 n) | 251 | int __ilog2_u32(u32 n) |
248 | { | 252 | { |
249 | int bit; | 253 | int bit; |
250 | asm ("cntlzw %0,%1" : "=r" (bit) : "r" (n)); | 254 | asm ("cntlzw %0,%1" : "=r" (bit) : "r" (n)); |
251 | return 31 - bit; | 255 | return 31 - bit; |
252 | } | 256 | } |
253 | 257 | ||
254 | #ifdef __powerpc64__ | 258 | #ifdef __powerpc64__ |
255 | static inline __attribute__((const)) | 259 | static inline __attribute__((const)) |
256 | int __ilog2_u64(u64 n) | 260 | int __ilog2_u64(u64 n) |
257 | { | 261 | { |
258 | int bit; | 262 | int bit; |
259 | asm ("cntlzd %0,%1" : "=r" (bit) : "r" (n)); | 263 | asm ("cntlzd %0,%1" : "=r" (bit) : "r" (n)); |
260 | return 63 - bit; | 264 | return 63 - bit; |
261 | } | 265 | } |
262 | #endif | 266 | #endif |
263 | 267 | ||
264 | /* | 268 | /* |
265 | * Determines the bit position of the least significant 0 bit in the | 269 | * Determines the bit position of the least significant 0 bit in the |
266 | * specified double word. The returned bit position will be | 270 | * specified double word. The returned bit position will be |
267 | * zero-based, starting from the right side (63/31 - 0). | 271 | * zero-based, starting from the right side (63/31 - 0). |
268 | */ | 272 | */ |
269 | static __inline__ unsigned long ffz(unsigned long x) | 273 | static __inline__ unsigned long ffz(unsigned long x) |
270 | { | 274 | { |
271 | /* no zero exists anywhere in the 8 byte area. */ | 275 | /* no zero exists anywhere in the 8 byte area. */ |
272 | if ((x = ~x) == 0) | 276 | if ((x = ~x) == 0) |
273 | return BITS_PER_LONG; | 277 | return BITS_PER_LONG; |
274 | 278 | ||
275 | /* | 279 | /* |
276 | * Calculate the bit position of the least signficant '1' bit in x | 280 | * Calculate the bit position of the least signficant '1' bit in x |
277 | * (since x has been changed this will actually be the least signficant | 281 | * (since x has been changed this will actually be the least signficant |
278 | * '0' bit in * the original x). Note: (x & -x) gives us a mask that | 282 | * '0' bit in * the original x). Note: (x & -x) gives us a mask that |
279 | * is the least significant * (RIGHT-most) 1-bit of the value in x. | 283 | * is the least significant * (RIGHT-most) 1-bit of the value in x. |
280 | */ | 284 | */ |
281 | return __ilog2(x & -x); | 285 | return __ilog2(x & -x); |
282 | } | 286 | } |
283 | 287 | ||
284 | static __inline__ int __ffs(unsigned long x) | 288 | static __inline__ int __ffs(unsigned long x) |
285 | { | 289 | { |
286 | return __ilog2(x & -x); | 290 | return __ilog2(x & -x); |
287 | } | 291 | } |
288 | 292 | ||
289 | /* | 293 | /* |
290 | * ffs: find first bit set. This is defined the same way as | 294 | * ffs: find first bit set. This is defined the same way as |
291 | * the libc and compiler builtin ffs routines, therefore | 295 | * the libc and compiler builtin ffs routines, therefore |
292 | * differs in spirit from the above ffz (man ffs). | 296 | * differs in spirit from the above ffz (man ffs). |
293 | */ | 297 | */ |
294 | static __inline__ int ffs(int x) | 298 | static __inline__ int ffs(int x) |
295 | { | 299 | { |
296 | unsigned long i = (unsigned long)x; | 300 | unsigned long i = (unsigned long)x; |
297 | return __ilog2(i & -i) + 1; | 301 | return __ilog2(i & -i) + 1; |
298 | } | 302 | } |
299 | 303 | ||
300 | /* | 304 | /* |
301 | * fls: find last (most-significant) bit set. | 305 | * fls: find last (most-significant) bit set. |
302 | * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. | 306 | * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. |
303 | */ | 307 | */ |
304 | static __inline__ int fls(unsigned int x) | 308 | static __inline__ int fls(unsigned int x) |
305 | { | 309 | { |
306 | int lz; | 310 | int lz; |
307 | 311 | ||
308 | asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x)); | 312 | asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x)); |
309 | return 32 - lz; | 313 | return 32 - lz; |
310 | } | 314 | } |
311 | #include <asm-generic/bitops/fls64.h> | 315 | #include <asm-generic/bitops/fls64.h> |
312 | 316 | ||
313 | #include <asm-generic/bitops/hweight.h> | 317 | #include <asm-generic/bitops/hweight.h> |
314 | 318 | ||
315 | #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) | 319 | #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) |
316 | unsigned long find_next_zero_bit(const unsigned long *addr, | 320 | unsigned long find_next_zero_bit(const unsigned long *addr, |
317 | unsigned long size, unsigned long offset); | 321 | unsigned long size, unsigned long offset); |
318 | /** | 322 | /** |
319 | * find_first_bit - find the first set bit in a memory region | 323 | * find_first_bit - find the first set bit in a memory region |
320 | * @addr: The address to start the search at | 324 | * @addr: The address to start the search at |
321 | * @size: The maximum size to search | 325 | * @size: The maximum size to search |
322 | * | 326 | * |
323 | * Returns the bit-number of the first set bit, not the number of the byte | 327 | * Returns the bit-number of the first set bit, not the number of the byte |
324 | * containing a bit. | 328 | * containing a bit. |
325 | */ | 329 | */ |
326 | #define find_first_bit(addr, size) find_next_bit((addr), (size), 0) | 330 | #define find_first_bit(addr, size) find_next_bit((addr), (size), 0) |
327 | unsigned long find_next_bit(const unsigned long *addr, | 331 | unsigned long find_next_bit(const unsigned long *addr, |
328 | unsigned long size, unsigned long offset); | 332 | unsigned long size, unsigned long offset); |
329 | 333 | ||
330 | /* Little-endian versions */ | 334 | /* Little-endian versions */ |
331 | 335 | ||
332 | static __inline__ int test_le_bit(unsigned long nr, | 336 | static __inline__ int test_le_bit(unsigned long nr, |
333 | __const__ unsigned long *addr) | 337 | __const__ unsigned long *addr) |
334 | { | 338 | { |
335 | __const__ unsigned char *tmp = (__const__ unsigned char *) addr; | 339 | __const__ unsigned char *tmp = (__const__ unsigned char *) addr; |
336 | return (tmp[nr >> 3] >> (nr & 7)) & 1; | 340 | return (tmp[nr >> 3] >> (nr & 7)) & 1; |
337 | } | 341 | } |
338 | 342 | ||
339 | #define __set_le_bit(nr, addr) \ | 343 | #define __set_le_bit(nr, addr) \ |
340 | __set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | 344 | __set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) |
341 | #define __clear_le_bit(nr, addr) \ | 345 | #define __clear_le_bit(nr, addr) \ |
342 | __clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | 346 | __clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) |
343 | 347 | ||
344 | #define test_and_set_le_bit(nr, addr) \ | 348 | #define test_and_set_le_bit(nr, addr) \ |
345 | test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | 349 | test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) |
346 | #define test_and_clear_le_bit(nr, addr) \ | 350 | #define test_and_clear_le_bit(nr, addr) \ |
347 | test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | 351 | test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) |
348 | 352 | ||
349 | #define __test_and_set_le_bit(nr, addr) \ | 353 | #define __test_and_set_le_bit(nr, addr) \ |
350 | __test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | 354 | __test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) |
351 | #define __test_and_clear_le_bit(nr, addr) \ | 355 | #define __test_and_clear_le_bit(nr, addr) \ |
352 | __test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | 356 | __test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) |
353 | 357 | ||
354 | #define find_first_zero_le_bit(addr, size) generic_find_next_zero_le_bit((addr), (size), 0) | 358 | #define find_first_zero_le_bit(addr, size) generic_find_next_zero_le_bit((addr), (size), 0) |
355 | unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, | 359 | unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, |
356 | unsigned long size, unsigned long offset); | 360 | unsigned long size, unsigned long offset); |
357 | 361 | ||
358 | /* Bitmap functions for the ext2 filesystem */ | 362 | /* Bitmap functions for the ext2 filesystem */ |
359 | 363 | ||
360 | #define ext2_set_bit(nr,addr) \ | 364 | #define ext2_set_bit(nr,addr) \ |
361 | __test_and_set_le_bit((nr), (unsigned long*)addr) | 365 | __test_and_set_le_bit((nr), (unsigned long*)addr) |
362 | #define ext2_clear_bit(nr, addr) \ | 366 | #define ext2_clear_bit(nr, addr) \ |
363 | __test_and_clear_le_bit((nr), (unsigned long*)addr) | 367 | __test_and_clear_le_bit((nr), (unsigned long*)addr) |
364 | 368 | ||
365 | #define ext2_set_bit_atomic(lock, nr, addr) \ | 369 | #define ext2_set_bit_atomic(lock, nr, addr) \ |
366 | test_and_set_le_bit((nr), (unsigned long*)addr) | 370 | test_and_set_le_bit((nr), (unsigned long*)addr) |
367 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | 371 | #define ext2_clear_bit_atomic(lock, nr, addr) \ |
368 | test_and_clear_le_bit((nr), (unsigned long*)addr) | 372 | test_and_clear_le_bit((nr), (unsigned long*)addr) |
369 | 373 | ||
370 | #define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr) | 374 | #define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr) |
371 | 375 | ||
372 | #define ext2_find_first_zero_bit(addr, size) \ | 376 | #define ext2_find_first_zero_bit(addr, size) \ |
373 | find_first_zero_le_bit((unsigned long*)addr, size) | 377 | find_first_zero_le_bit((unsigned long*)addr, size) |
374 | #define ext2_find_next_zero_bit(addr, size, off) \ | 378 | #define ext2_find_next_zero_bit(addr, size, off) \ |
375 | generic_find_next_zero_le_bit((unsigned long*)addr, size, off) | 379 | generic_find_next_zero_le_bit((unsigned long*)addr, size, off) |
376 | 380 | ||
377 | /* Bitmap functions for the minix filesystem. */ | 381 | /* Bitmap functions for the minix filesystem. */ |
378 | 382 | ||
379 | #define minix_test_and_set_bit(nr,addr) \ | 383 | #define minix_test_and_set_bit(nr,addr) \ |
380 | __test_and_set_le_bit(nr, (unsigned long *)addr) | 384 | __test_and_set_le_bit(nr, (unsigned long *)addr) |
381 | #define minix_set_bit(nr,addr) \ | 385 | #define minix_set_bit(nr,addr) \ |
382 | __set_le_bit(nr, (unsigned long *)addr) | 386 | __set_le_bit(nr, (unsigned long *)addr) |
383 | #define minix_test_and_clear_bit(nr,addr) \ | 387 | #define minix_test_and_clear_bit(nr,addr) \ |
384 | __test_and_clear_le_bit(nr, (unsigned long *)addr) | 388 | __test_and_clear_le_bit(nr, (unsigned long *)addr) |
385 | #define minix_test_bit(nr,addr) \ | 389 | #define minix_test_bit(nr,addr) \ |
386 | test_le_bit(nr, (unsigned long *)addr) | 390 | test_le_bit(nr, (unsigned long *)addr) |
387 | 391 | ||
388 | #define minix_find_first_zero_bit(addr,size) \ | 392 | #define minix_find_first_zero_bit(addr,size) \ |
389 | find_first_zero_le_bit((unsigned long *)addr, size) | 393 | find_first_zero_le_bit((unsigned long *)addr, size) |
390 | 394 | ||
391 | #include <asm-generic/bitops/sched.h> | 395 | #include <asm-generic/bitops/sched.h> |
392 | 396 | ||
393 | #endif /* __KERNEL__ */ | 397 | #endif /* __KERNEL__ */ |
394 | 398 | ||
395 | #endif /* _ASM_POWERPC_BITOPS_H */ | 399 | #endif /* _ASM_POWERPC_BITOPS_H */ |
396 | 400 |
include/asm-s390/bitops.h
1 | #ifndef _S390_BITOPS_H | 1 | #ifndef _S390_BITOPS_H |
2 | #define _S390_BITOPS_H | 2 | #define _S390_BITOPS_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * include/asm-s390/bitops.h | 5 | * include/asm-s390/bitops.h |
6 | * | 6 | * |
7 | * S390 version | 7 | * S390 version |
8 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | 8 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation |
9 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) | 9 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) |
10 | * | 10 | * |
11 | * Derived from "include/asm-i386/bitops.h" | 11 | * Derived from "include/asm-i386/bitops.h" |
12 | * Copyright (C) 1992, Linus Torvalds | 12 | * Copyright (C) 1992, Linus Torvalds |
13 | * | 13 | * |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #ifdef __KERNEL__ | 16 | #ifdef __KERNEL__ |
17 | 17 | ||
18 | #ifndef _LINUX_BITOPS_H | ||
19 | #error only <linux/bitops.h> can be included directly | ||
20 | #endif | ||
21 | |||
18 | #include <linux/compiler.h> | 22 | #include <linux/compiler.h> |
19 | 23 | ||
20 | /* | 24 | /* |
21 | * 32 bit bitops format: | 25 | * 32 bit bitops format: |
22 | * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr; | 26 | * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr; |
23 | * bit 32 is the LSB of *(addr+4). That combined with the | 27 | * bit 32 is the LSB of *(addr+4). That combined with the |
24 | * big endian byte order on S390 give the following bit | 28 | * big endian byte order on S390 give the following bit |
25 | * order in memory: | 29 | * order in memory: |
26 | * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \ | 30 | * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \ |
27 | * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 | 31 | * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 |
28 | * after that follows the next long with bit numbers | 32 | * after that follows the next long with bit numbers |
29 | * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30 | 33 | * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30 |
30 | * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20 | 34 | * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20 |
31 | * The reason for this bit ordering is the fact that | 35 | * The reason for this bit ordering is the fact that |
32 | * in the architecture independent code bits operations | 36 | * in the architecture independent code bits operations |
33 | * of the form "flags |= (1 << bitnr)" are used INTERMIXED | 37 | * of the form "flags |= (1 << bitnr)" are used INTERMIXED |
34 | * with operation of the form "set_bit(bitnr, flags)". | 38 | * with operation of the form "set_bit(bitnr, flags)". |
35 | * | 39 | * |
36 | * 64 bit bitops format: | 40 | * 64 bit bitops format: |
37 | * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr; | 41 | * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr; |
38 | * bit 64 is the LSB of *(addr+8). That combined with the | 42 | * bit 64 is the LSB of *(addr+8). That combined with the |
39 | * big endian byte order on S390 give the following bit | 43 | * big endian byte order on S390 give the following bit |
40 | * order in memory: | 44 | * order in memory: |
41 | * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30 | 45 | * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30 |
42 | * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20 | 46 | * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20 |
43 | * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 | 47 | * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 |
44 | * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 | 48 | * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 |
45 | * after that follows the next long with bit numbers | 49 | * after that follows the next long with bit numbers |
46 | * 7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70 | 50 | * 7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70 |
47 | * 6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60 | 51 | * 6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60 |
48 | * 5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50 | 52 | * 5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50 |
49 | * 4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40 | 53 | * 4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40 |
50 | * The reason for this bit ordering is the fact that | 54 | * The reason for this bit ordering is the fact that |
51 | * in the architecture independent code bits operations | 55 | * in the architecture independent code bits operations |
52 | * of the form "flags |= (1 << bitnr)" are used INTERMIXED | 56 | * of the form "flags |= (1 << bitnr)" are used INTERMIXED |
53 | * with operation of the form "set_bit(bitnr, flags)". | 57 | * with operation of the form "set_bit(bitnr, flags)". |
54 | */ | 58 | */ |
55 | 59 | ||
56 | /* bitmap tables from arch/S390/kernel/bitmap.S */ | 60 | /* bitmap tables from arch/S390/kernel/bitmap.S */ |
57 | extern const char _oi_bitmap[]; | 61 | extern const char _oi_bitmap[]; |
58 | extern const char _ni_bitmap[]; | 62 | extern const char _ni_bitmap[]; |
59 | extern const char _zb_findmap[]; | 63 | extern const char _zb_findmap[]; |
60 | extern const char _sb_findmap[]; | 64 | extern const char _sb_findmap[]; |
61 | 65 | ||
62 | #ifndef __s390x__ | 66 | #ifndef __s390x__ |
63 | 67 | ||
64 | #define __BITOPS_ALIGN 3 | 68 | #define __BITOPS_ALIGN 3 |
65 | #define __BITOPS_WORDSIZE 32 | 69 | #define __BITOPS_WORDSIZE 32 |
66 | #define __BITOPS_OR "or" | 70 | #define __BITOPS_OR "or" |
67 | #define __BITOPS_AND "nr" | 71 | #define __BITOPS_AND "nr" |
68 | #define __BITOPS_XOR "xr" | 72 | #define __BITOPS_XOR "xr" |
69 | 73 | ||
70 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | 74 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) |
71 | 75 | ||
72 | #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ | 76 | #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ |
73 | asm volatile( \ | 77 | asm volatile( \ |
74 | " l %0,%2\n" \ | 78 | " l %0,%2\n" \ |
75 | "0: lr %1,%0\n" \ | 79 | "0: lr %1,%0\n" \ |
76 | __op_string " %1,%3\n" \ | 80 | __op_string " %1,%3\n" \ |
77 | " cs %0,%1,%2\n" \ | 81 | " cs %0,%1,%2\n" \ |
78 | " jl 0b" \ | 82 | " jl 0b" \ |
79 | : "=&d" (__old), "=&d" (__new), \ | 83 | : "=&d" (__old), "=&d" (__new), \ |
80 | "=Q" (*(unsigned long *) __addr) \ | 84 | "=Q" (*(unsigned long *) __addr) \ |
81 | : "d" (__val), "Q" (*(unsigned long *) __addr) \ | 85 | : "d" (__val), "Q" (*(unsigned long *) __addr) \ |
82 | : "cc"); | 86 | : "cc"); |
83 | 87 | ||
84 | #else /* __GNUC__ */ | 88 | #else /* __GNUC__ */ |
85 | 89 | ||
86 | #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ | 90 | #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ |
87 | asm volatile( \ | 91 | asm volatile( \ |
88 | " l %0,0(%4)\n" \ | 92 | " l %0,0(%4)\n" \ |
89 | "0: lr %1,%0\n" \ | 93 | "0: lr %1,%0\n" \ |
90 | __op_string " %1,%3\n" \ | 94 | __op_string " %1,%3\n" \ |
91 | " cs %0,%1,0(%4)\n" \ | 95 | " cs %0,%1,0(%4)\n" \ |
92 | " jl 0b" \ | 96 | " jl 0b" \ |
93 | : "=&d" (__old), "=&d" (__new), \ | 97 | : "=&d" (__old), "=&d" (__new), \ |
94 | "=m" (*(unsigned long *) __addr) \ | 98 | "=m" (*(unsigned long *) __addr) \ |
95 | : "d" (__val), "a" (__addr), \ | 99 | : "d" (__val), "a" (__addr), \ |
96 | "m" (*(unsigned long *) __addr) : "cc"); | 100 | "m" (*(unsigned long *) __addr) : "cc"); |
97 | 101 | ||
98 | #endif /* __GNUC__ */ | 102 | #endif /* __GNUC__ */ |
99 | 103 | ||
100 | #else /* __s390x__ */ | 104 | #else /* __s390x__ */ |
101 | 105 | ||
102 | #define __BITOPS_ALIGN 7 | 106 | #define __BITOPS_ALIGN 7 |
103 | #define __BITOPS_WORDSIZE 64 | 107 | #define __BITOPS_WORDSIZE 64 |
104 | #define __BITOPS_OR "ogr" | 108 | #define __BITOPS_OR "ogr" |
105 | #define __BITOPS_AND "ngr" | 109 | #define __BITOPS_AND "ngr" |
106 | #define __BITOPS_XOR "xgr" | 110 | #define __BITOPS_XOR "xgr" |
107 | 111 | ||
108 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | 112 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) |
109 | 113 | ||
110 | #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ | 114 | #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ |
111 | asm volatile( \ | 115 | asm volatile( \ |
112 | " lg %0,%2\n" \ | 116 | " lg %0,%2\n" \ |
113 | "0: lgr %1,%0\n" \ | 117 | "0: lgr %1,%0\n" \ |
114 | __op_string " %1,%3\n" \ | 118 | __op_string " %1,%3\n" \ |
115 | " csg %0,%1,%2\n" \ | 119 | " csg %0,%1,%2\n" \ |
116 | " jl 0b" \ | 120 | " jl 0b" \ |
117 | : "=&d" (__old), "=&d" (__new), \ | 121 | : "=&d" (__old), "=&d" (__new), \ |
118 | "=Q" (*(unsigned long *) __addr) \ | 122 | "=Q" (*(unsigned long *) __addr) \ |
119 | : "d" (__val), "Q" (*(unsigned long *) __addr) \ | 123 | : "d" (__val), "Q" (*(unsigned long *) __addr) \ |
120 | : "cc"); | 124 | : "cc"); |
121 | 125 | ||
122 | #else /* __GNUC__ */ | 126 | #else /* __GNUC__ */ |
123 | 127 | ||
124 | #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ | 128 | #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ |
125 | asm volatile( \ | 129 | asm volatile( \ |
126 | " lg %0,0(%4)\n" \ | 130 | " lg %0,0(%4)\n" \ |
127 | "0: lgr %1,%0\n" \ | 131 | "0: lgr %1,%0\n" \ |
128 | __op_string " %1,%3\n" \ | 132 | __op_string " %1,%3\n" \ |
129 | " csg %0,%1,0(%4)\n" \ | 133 | " csg %0,%1,0(%4)\n" \ |
130 | " jl 0b" \ | 134 | " jl 0b" \ |
131 | : "=&d" (__old), "=&d" (__new), \ | 135 | : "=&d" (__old), "=&d" (__new), \ |
132 | "=m" (*(unsigned long *) __addr) \ | 136 | "=m" (*(unsigned long *) __addr) \ |
133 | : "d" (__val), "a" (__addr), \ | 137 | : "d" (__val), "a" (__addr), \ |
134 | "m" (*(unsigned long *) __addr) : "cc"); | 138 | "m" (*(unsigned long *) __addr) : "cc"); |
135 | 139 | ||
136 | 140 | ||
137 | #endif /* __GNUC__ */ | 141 | #endif /* __GNUC__ */ |
138 | 142 | ||
139 | #endif /* __s390x__ */ | 143 | #endif /* __s390x__ */ |
140 | 144 | ||
141 | #define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE) | 145 | #define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE) |
142 | #define __BITOPS_BARRIER() asm volatile("" : : : "memory") | 146 | #define __BITOPS_BARRIER() asm volatile("" : : : "memory") |
143 | 147 | ||
144 | #ifdef CONFIG_SMP | 148 | #ifdef CONFIG_SMP |
145 | /* | 149 | /* |
146 | * SMP safe set_bit routine based on compare and swap (CS) | 150 | * SMP safe set_bit routine based on compare and swap (CS) |
147 | */ | 151 | */ |
148 | static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr) | 152 | static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr) |
149 | { | 153 | { |
150 | unsigned long addr, old, new, mask; | 154 | unsigned long addr, old, new, mask; |
151 | 155 | ||
152 | addr = (unsigned long) ptr; | 156 | addr = (unsigned long) ptr; |
153 | /* calculate address for CS */ | 157 | /* calculate address for CS */ |
154 | addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; | 158 | addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; |
155 | /* make OR mask */ | 159 | /* make OR mask */ |
156 | mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); | 160 | mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); |
157 | /* Do the atomic update. */ | 161 | /* Do the atomic update. */ |
158 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); | 162 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); |
159 | } | 163 | } |
160 | 164 | ||
161 | /* | 165 | /* |
162 | * SMP safe clear_bit routine based on compare and swap (CS) | 166 | * SMP safe clear_bit routine based on compare and swap (CS) |
163 | */ | 167 | */ |
164 | static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr) | 168 | static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr) |
165 | { | 169 | { |
166 | unsigned long addr, old, new, mask; | 170 | unsigned long addr, old, new, mask; |
167 | 171 | ||
168 | addr = (unsigned long) ptr; | 172 | addr = (unsigned long) ptr; |
169 | /* calculate address for CS */ | 173 | /* calculate address for CS */ |
170 | addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; | 174 | addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; |
171 | /* make AND mask */ | 175 | /* make AND mask */ |
172 | mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1))); | 176 | mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1))); |
173 | /* Do the atomic update. */ | 177 | /* Do the atomic update. */ |
174 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); | 178 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); |
175 | } | 179 | } |
176 | 180 | ||
177 | /* | 181 | /* |
178 | * SMP safe change_bit routine based on compare and swap (CS) | 182 | * SMP safe change_bit routine based on compare and swap (CS) |
179 | */ | 183 | */ |
180 | static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr) | 184 | static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr) |
181 | { | 185 | { |
182 | unsigned long addr, old, new, mask; | 186 | unsigned long addr, old, new, mask; |
183 | 187 | ||
184 | addr = (unsigned long) ptr; | 188 | addr = (unsigned long) ptr; |
185 | /* calculate address for CS */ | 189 | /* calculate address for CS */ |
186 | addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; | 190 | addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; |
187 | /* make XOR mask */ | 191 | /* make XOR mask */ |
188 | mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); | 192 | mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); |
189 | /* Do the atomic update. */ | 193 | /* Do the atomic update. */ |
190 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); | 194 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); |
191 | } | 195 | } |
192 | 196 | ||
193 | /* | 197 | /* |
194 | * SMP safe test_and_set_bit routine based on compare and swap (CS) | 198 | * SMP safe test_and_set_bit routine based on compare and swap (CS) |
195 | */ | 199 | */ |
196 | static inline int | 200 | static inline int |
197 | test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr) | 201 | test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr) |
198 | { | 202 | { |
199 | unsigned long addr, old, new, mask; | 203 | unsigned long addr, old, new, mask; |
200 | 204 | ||
201 | addr = (unsigned long) ptr; | 205 | addr = (unsigned long) ptr; |
202 | /* calculate address for CS */ | 206 | /* calculate address for CS */ |
203 | addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; | 207 | addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; |
204 | /* make OR/test mask */ | 208 | /* make OR/test mask */ |
205 | mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); | 209 | mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); |
206 | /* Do the atomic update. */ | 210 | /* Do the atomic update. */ |
207 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); | 211 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); |
208 | __BITOPS_BARRIER(); | 212 | __BITOPS_BARRIER(); |
209 | return (old & mask) != 0; | 213 | return (old & mask) != 0; |
210 | } | 214 | } |
211 | 215 | ||
212 | /* | 216 | /* |
213 | * SMP safe test_and_clear_bit routine based on compare and swap (CS) | 217 | * SMP safe test_and_clear_bit routine based on compare and swap (CS) |
214 | */ | 218 | */ |
215 | static inline int | 219 | static inline int |
216 | test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr) | 220 | test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr) |
217 | { | 221 | { |
218 | unsigned long addr, old, new, mask; | 222 | unsigned long addr, old, new, mask; |
219 | 223 | ||
220 | addr = (unsigned long) ptr; | 224 | addr = (unsigned long) ptr; |
221 | /* calculate address for CS */ | 225 | /* calculate address for CS */ |
222 | addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; | 226 | addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; |
223 | /* make AND/test mask */ | 227 | /* make AND/test mask */ |
224 | mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1))); | 228 | mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1))); |
225 | /* Do the atomic update. */ | 229 | /* Do the atomic update. */ |
226 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); | 230 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); |
227 | __BITOPS_BARRIER(); | 231 | __BITOPS_BARRIER(); |
228 | return (old ^ new) != 0; | 232 | return (old ^ new) != 0; |
229 | } | 233 | } |
230 | 234 | ||
231 | /* | 235 | /* |
232 | * SMP safe test_and_change_bit routine based on compare and swap (CS) | 236 | * SMP safe test_and_change_bit routine based on compare and swap (CS) |
233 | */ | 237 | */ |
234 | static inline int | 238 | static inline int |
235 | test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr) | 239 | test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr) |
236 | { | 240 | { |
237 | unsigned long addr, old, new, mask; | 241 | unsigned long addr, old, new, mask; |
238 | 242 | ||
239 | addr = (unsigned long) ptr; | 243 | addr = (unsigned long) ptr; |
240 | /* calculate address for CS */ | 244 | /* calculate address for CS */ |
241 | addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; | 245 | addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; |
242 | /* make XOR/test mask */ | 246 | /* make XOR/test mask */ |
243 | mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); | 247 | mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); |
244 | /* Do the atomic update. */ | 248 | /* Do the atomic update. */ |
245 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); | 249 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); |
246 | __BITOPS_BARRIER(); | 250 | __BITOPS_BARRIER(); |
247 | return (old & mask) != 0; | 251 | return (old & mask) != 0; |
248 | } | 252 | } |
249 | #endif /* CONFIG_SMP */ | 253 | #endif /* CONFIG_SMP */ |
250 | 254 | ||
251 | /* | 255 | /* |
252 | * fast, non-SMP set_bit routine | 256 | * fast, non-SMP set_bit routine |
253 | */ | 257 | */ |
254 | static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr) | 258 | static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr) |
255 | { | 259 | { |
256 | unsigned long addr; | 260 | unsigned long addr; |
257 | 261 | ||
258 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 262 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); |
259 | asm volatile( | 263 | asm volatile( |
260 | " oc 0(1,%1),0(%2)" | 264 | " oc 0(1,%1),0(%2)" |
261 | : "=m" (*(char *) addr) : "a" (addr), | 265 | : "=m" (*(char *) addr) : "a" (addr), |
262 | "a" (_oi_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc" ); | 266 | "a" (_oi_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc" ); |
263 | } | 267 | } |
264 | 268 | ||
265 | static inline void | 269 | static inline void |
266 | __constant_set_bit(const unsigned long nr, volatile unsigned long *ptr) | 270 | __constant_set_bit(const unsigned long nr, volatile unsigned long *ptr) |
267 | { | 271 | { |
268 | unsigned long addr; | 272 | unsigned long addr; |
269 | 273 | ||
270 | addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 274 | addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); |
271 | *(unsigned char *) addr |= 1 << (nr & 7); | 275 | *(unsigned char *) addr |= 1 << (nr & 7); |
272 | } | 276 | } |
273 | 277 | ||
274 | #define set_bit_simple(nr,addr) \ | 278 | #define set_bit_simple(nr,addr) \ |
275 | (__builtin_constant_p((nr)) ? \ | 279 | (__builtin_constant_p((nr)) ? \ |
276 | __constant_set_bit((nr),(addr)) : \ | 280 | __constant_set_bit((nr),(addr)) : \ |
277 | __set_bit((nr),(addr)) ) | 281 | __set_bit((nr),(addr)) ) |
278 | 282 | ||
279 | /* | 283 | /* |
280 | * fast, non-SMP clear_bit routine | 284 | * fast, non-SMP clear_bit routine |
281 | */ | 285 | */ |
282 | static inline void | 286 | static inline void |
283 | __clear_bit(unsigned long nr, volatile unsigned long *ptr) | 287 | __clear_bit(unsigned long nr, volatile unsigned long *ptr) |
284 | { | 288 | { |
285 | unsigned long addr; | 289 | unsigned long addr; |
286 | 290 | ||
287 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 291 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); |
288 | asm volatile( | 292 | asm volatile( |
289 | " nc 0(1,%1),0(%2)" | 293 | " nc 0(1,%1),0(%2)" |
290 | : "=m" (*(char *) addr) : "a" (addr), | 294 | : "=m" (*(char *) addr) : "a" (addr), |
291 | "a" (_ni_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc"); | 295 | "a" (_ni_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc"); |
292 | } | 296 | } |
293 | 297 | ||
294 | static inline void | 298 | static inline void |
295 | __constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr) | 299 | __constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr) |
296 | { | 300 | { |
297 | unsigned long addr; | 301 | unsigned long addr; |
298 | 302 | ||
299 | addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 303 | addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); |
300 | *(unsigned char *) addr &= ~(1 << (nr & 7)); | 304 | *(unsigned char *) addr &= ~(1 << (nr & 7)); |
301 | } | 305 | } |
302 | 306 | ||
303 | #define clear_bit_simple(nr,addr) \ | 307 | #define clear_bit_simple(nr,addr) \ |
304 | (__builtin_constant_p((nr)) ? \ | 308 | (__builtin_constant_p((nr)) ? \ |
305 | __constant_clear_bit((nr),(addr)) : \ | 309 | __constant_clear_bit((nr),(addr)) : \ |
306 | __clear_bit((nr),(addr)) ) | 310 | __clear_bit((nr),(addr)) ) |
307 | 311 | ||
308 | /* | 312 | /* |
309 | * fast, non-SMP change_bit routine | 313 | * fast, non-SMP change_bit routine |
310 | */ | 314 | */ |
311 | static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr) | 315 | static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr) |
312 | { | 316 | { |
313 | unsigned long addr; | 317 | unsigned long addr; |
314 | 318 | ||
315 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 319 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); |
316 | asm volatile( | 320 | asm volatile( |
317 | " xc 0(1,%1),0(%2)" | 321 | " xc 0(1,%1),0(%2)" |
318 | : "=m" (*(char *) addr) : "a" (addr), | 322 | : "=m" (*(char *) addr) : "a" (addr), |
319 | "a" (_oi_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc" ); | 323 | "a" (_oi_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc" ); |
320 | } | 324 | } |
321 | 325 | ||
322 | static inline void | 326 | static inline void |
323 | __constant_change_bit(const unsigned long nr, volatile unsigned long *ptr) | 327 | __constant_change_bit(const unsigned long nr, volatile unsigned long *ptr) |
324 | { | 328 | { |
325 | unsigned long addr; | 329 | unsigned long addr; |
326 | 330 | ||
327 | addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 331 | addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); |
328 | *(unsigned char *) addr ^= 1 << (nr & 7); | 332 | *(unsigned char *) addr ^= 1 << (nr & 7); |
329 | } | 333 | } |
330 | 334 | ||
331 | #define change_bit_simple(nr,addr) \ | 335 | #define change_bit_simple(nr,addr) \ |
332 | (__builtin_constant_p((nr)) ? \ | 336 | (__builtin_constant_p((nr)) ? \ |
333 | __constant_change_bit((nr),(addr)) : \ | 337 | __constant_change_bit((nr),(addr)) : \ |
334 | __change_bit((nr),(addr)) ) | 338 | __change_bit((nr),(addr)) ) |
335 | 339 | ||
336 | /* | 340 | /* |
337 | * fast, non-SMP test_and_set_bit routine | 341 | * fast, non-SMP test_and_set_bit routine |
338 | */ | 342 | */ |
339 | static inline int | 343 | static inline int |
340 | test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr) | 344 | test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr) |
341 | { | 345 | { |
342 | unsigned long addr; | 346 | unsigned long addr; |
343 | unsigned char ch; | 347 | unsigned char ch; |
344 | 348 | ||
345 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 349 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); |
346 | ch = *(unsigned char *) addr; | 350 | ch = *(unsigned char *) addr; |
347 | asm volatile( | 351 | asm volatile( |
348 | " oc 0(1,%1),0(%2)" | 352 | " oc 0(1,%1),0(%2)" |
349 | : "=m" (*(char *) addr) | 353 | : "=m" (*(char *) addr) |
350 | : "a" (addr), "a" (_oi_bitmap + (nr & 7)), | 354 | : "a" (addr), "a" (_oi_bitmap + (nr & 7)), |
351 | "m" (*(char *) addr) : "cc", "memory"); | 355 | "m" (*(char *) addr) : "cc", "memory"); |
352 | return (ch >> (nr & 7)) & 1; | 356 | return (ch >> (nr & 7)) & 1; |
353 | } | 357 | } |
354 | #define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y) | 358 | #define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y) |
355 | 359 | ||
356 | /* | 360 | /* |
357 | * fast, non-SMP test_and_clear_bit routine | 361 | * fast, non-SMP test_and_clear_bit routine |
358 | */ | 362 | */ |
359 | static inline int | 363 | static inline int |
360 | test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr) | 364 | test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr) |
361 | { | 365 | { |
362 | unsigned long addr; | 366 | unsigned long addr; |
363 | unsigned char ch; | 367 | unsigned char ch; |
364 | 368 | ||
365 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 369 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); |
366 | ch = *(unsigned char *) addr; | 370 | ch = *(unsigned char *) addr; |
367 | asm volatile( | 371 | asm volatile( |
368 | " nc 0(1,%1),0(%2)" | 372 | " nc 0(1,%1),0(%2)" |
369 | : "=m" (*(char *) addr) | 373 | : "=m" (*(char *) addr) |
370 | : "a" (addr), "a" (_ni_bitmap + (nr & 7)), | 374 | : "a" (addr), "a" (_ni_bitmap + (nr & 7)), |
371 | "m" (*(char *) addr) : "cc", "memory"); | 375 | "m" (*(char *) addr) : "cc", "memory"); |
372 | return (ch >> (nr & 7)) & 1; | 376 | return (ch >> (nr & 7)) & 1; |
373 | } | 377 | } |
374 | #define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y) | 378 | #define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y) |
375 | 379 | ||
376 | /* | 380 | /* |
377 | * fast, non-SMP test_and_change_bit routine | 381 | * fast, non-SMP test_and_change_bit routine |
378 | */ | 382 | */ |
379 | static inline int | 383 | static inline int |
380 | test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr) | 384 | test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr) |
381 | { | 385 | { |
382 | unsigned long addr; | 386 | unsigned long addr; |
383 | unsigned char ch; | 387 | unsigned char ch; |
384 | 388 | ||
385 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 389 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); |
386 | ch = *(unsigned char *) addr; | 390 | ch = *(unsigned char *) addr; |
387 | asm volatile( | 391 | asm volatile( |
388 | " xc 0(1,%1),0(%2)" | 392 | " xc 0(1,%1),0(%2)" |
389 | : "=m" (*(char *) addr) | 393 | : "=m" (*(char *) addr) |
390 | : "a" (addr), "a" (_oi_bitmap + (nr & 7)), | 394 | : "a" (addr), "a" (_oi_bitmap + (nr & 7)), |
391 | "m" (*(char *) addr) : "cc", "memory"); | 395 | "m" (*(char *) addr) : "cc", "memory"); |
392 | return (ch >> (nr & 7)) & 1; | 396 | return (ch >> (nr & 7)) & 1; |
393 | } | 397 | } |
394 | #define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y) | 398 | #define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y) |
395 | 399 | ||
396 | #ifdef CONFIG_SMP | 400 | #ifdef CONFIG_SMP |
397 | #define set_bit set_bit_cs | 401 | #define set_bit set_bit_cs |
398 | #define clear_bit clear_bit_cs | 402 | #define clear_bit clear_bit_cs |
399 | #define change_bit change_bit_cs | 403 | #define change_bit change_bit_cs |
400 | #define test_and_set_bit test_and_set_bit_cs | 404 | #define test_and_set_bit test_and_set_bit_cs |
401 | #define test_and_clear_bit test_and_clear_bit_cs | 405 | #define test_and_clear_bit test_and_clear_bit_cs |
402 | #define test_and_change_bit test_and_change_bit_cs | 406 | #define test_and_change_bit test_and_change_bit_cs |
403 | #else | 407 | #else |
404 | #define set_bit set_bit_simple | 408 | #define set_bit set_bit_simple |
405 | #define clear_bit clear_bit_simple | 409 | #define clear_bit clear_bit_simple |
406 | #define change_bit change_bit_simple | 410 | #define change_bit change_bit_simple |
407 | #define test_and_set_bit test_and_set_bit_simple | 411 | #define test_and_set_bit test_and_set_bit_simple |
408 | #define test_and_clear_bit test_and_clear_bit_simple | 412 | #define test_and_clear_bit test_and_clear_bit_simple |
409 | #define test_and_change_bit test_and_change_bit_simple | 413 | #define test_and_change_bit test_and_change_bit_simple |
410 | #endif | 414 | #endif |
411 | 415 | ||
412 | 416 | ||
413 | /* | 417 | /* |
414 | * This routine doesn't need to be atomic. | 418 | * This routine doesn't need to be atomic. |
415 | */ | 419 | */ |
416 | 420 | ||
417 | static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr) | 421 | static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr) |
418 | { | 422 | { |
419 | unsigned long addr; | 423 | unsigned long addr; |
420 | unsigned char ch; | 424 | unsigned char ch; |
421 | 425 | ||
422 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 426 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); |
423 | ch = *(volatile unsigned char *) addr; | 427 | ch = *(volatile unsigned char *) addr; |
424 | return (ch >> (nr & 7)) & 1; | 428 | return (ch >> (nr & 7)) & 1; |
425 | } | 429 | } |
426 | 430 | ||
427 | static inline int | 431 | static inline int |
428 | __constant_test_bit(unsigned long nr, const volatile unsigned long *addr) { | 432 | __constant_test_bit(unsigned long nr, const volatile unsigned long *addr) { |
429 | return (((volatile char *) addr) | 433 | return (((volatile char *) addr) |
430 | [(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7))) != 0; | 434 | [(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7))) != 0; |
431 | } | 435 | } |
432 | 436 | ||
433 | #define test_bit(nr,addr) \ | 437 | #define test_bit(nr,addr) \ |
434 | (__builtin_constant_p((nr)) ? \ | 438 | (__builtin_constant_p((nr)) ? \ |
435 | __constant_test_bit((nr),(addr)) : \ | 439 | __constant_test_bit((nr),(addr)) : \ |
436 | __test_bit((nr),(addr)) ) | 440 | __test_bit((nr),(addr)) ) |
437 | 441 | ||
438 | /* | 442 | /* |
439 | * ffz = Find First Zero in word. Undefined if no zero exists, | 443 | * ffz = Find First Zero in word. Undefined if no zero exists, |
440 | * so code should check against ~0UL first.. | 444 | * so code should check against ~0UL first.. |
441 | */ | 445 | */ |
442 | static inline unsigned long ffz(unsigned long word) | 446 | static inline unsigned long ffz(unsigned long word) |
443 | { | 447 | { |
444 | unsigned long bit = 0; | 448 | unsigned long bit = 0; |
445 | 449 | ||
446 | #ifdef __s390x__ | 450 | #ifdef __s390x__ |
447 | if (likely((word & 0xffffffff) == 0xffffffff)) { | 451 | if (likely((word & 0xffffffff) == 0xffffffff)) { |
448 | word >>= 32; | 452 | word >>= 32; |
449 | bit += 32; | 453 | bit += 32; |
450 | } | 454 | } |
451 | #endif | 455 | #endif |
452 | if (likely((word & 0xffff) == 0xffff)) { | 456 | if (likely((word & 0xffff) == 0xffff)) { |
453 | word >>= 16; | 457 | word >>= 16; |
454 | bit += 16; | 458 | bit += 16; |
455 | } | 459 | } |
456 | if (likely((word & 0xff) == 0xff)) { | 460 | if (likely((word & 0xff) == 0xff)) { |
457 | word >>= 8; | 461 | word >>= 8; |
458 | bit += 8; | 462 | bit += 8; |
459 | } | 463 | } |
460 | return bit + _zb_findmap[word & 0xff]; | 464 | return bit + _zb_findmap[word & 0xff]; |
461 | } | 465 | } |
462 | 466 | ||
463 | /* | 467 | /* |
464 | * __ffs = find first bit in word. Undefined if no bit exists, | 468 | * __ffs = find first bit in word. Undefined if no bit exists, |
465 | * so code should check against 0UL first.. | 469 | * so code should check against 0UL first.. |
466 | */ | 470 | */ |
467 | static inline unsigned long __ffs (unsigned long word) | 471 | static inline unsigned long __ffs (unsigned long word) |
468 | { | 472 | { |
469 | unsigned long bit = 0; | 473 | unsigned long bit = 0; |
470 | 474 | ||
471 | #ifdef __s390x__ | 475 | #ifdef __s390x__ |
472 | if (likely((word & 0xffffffff) == 0)) { | 476 | if (likely((word & 0xffffffff) == 0)) { |
473 | word >>= 32; | 477 | word >>= 32; |
474 | bit += 32; | 478 | bit += 32; |
475 | } | 479 | } |
476 | #endif | 480 | #endif |
477 | if (likely((word & 0xffff) == 0)) { | 481 | if (likely((word & 0xffff) == 0)) { |
478 | word >>= 16; | 482 | word >>= 16; |
479 | bit += 16; | 483 | bit += 16; |
480 | } | 484 | } |
481 | if (likely((word & 0xff) == 0)) { | 485 | if (likely((word & 0xff) == 0)) { |
482 | word >>= 8; | 486 | word >>= 8; |
483 | bit += 8; | 487 | bit += 8; |
484 | } | 488 | } |
485 | return bit + _sb_findmap[word & 0xff]; | 489 | return bit + _sb_findmap[word & 0xff]; |
486 | } | 490 | } |
487 | 491 | ||
488 | /* | 492 | /* |
489 | * Find-bit routines.. | 493 | * Find-bit routines.. |
490 | */ | 494 | */ |
491 | 495 | ||
492 | #ifndef __s390x__ | 496 | #ifndef __s390x__ |
493 | 497 | ||
494 | static inline int | 498 | static inline int |
495 | find_first_zero_bit(const unsigned long * addr, unsigned long size) | 499 | find_first_zero_bit(const unsigned long * addr, unsigned long size) |
496 | { | 500 | { |
497 | typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; | 501 | typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; |
498 | unsigned long cmp, count; | 502 | unsigned long cmp, count; |
499 | unsigned int res; | 503 | unsigned int res; |
500 | 504 | ||
501 | if (!size) | 505 | if (!size) |
502 | return 0; | 506 | return 0; |
503 | asm volatile( | 507 | asm volatile( |
504 | " lhi %1,-1\n" | 508 | " lhi %1,-1\n" |
505 | " lr %2,%3\n" | 509 | " lr %2,%3\n" |
506 | " slr %0,%0\n" | 510 | " slr %0,%0\n" |
507 | " ahi %2,31\n" | 511 | " ahi %2,31\n" |
508 | " srl %2,5\n" | 512 | " srl %2,5\n" |
509 | "0: c %1,0(%0,%4)\n" | 513 | "0: c %1,0(%0,%4)\n" |
510 | " jne 1f\n" | 514 | " jne 1f\n" |
511 | " la %0,4(%0)\n" | 515 | " la %0,4(%0)\n" |
512 | " brct %2,0b\n" | 516 | " brct %2,0b\n" |
513 | " lr %0,%3\n" | 517 | " lr %0,%3\n" |
514 | " j 4f\n" | 518 | " j 4f\n" |
515 | "1: l %2,0(%0,%4)\n" | 519 | "1: l %2,0(%0,%4)\n" |
516 | " sll %0,3\n" | 520 | " sll %0,3\n" |
517 | " lhi %1,0xff\n" | 521 | " lhi %1,0xff\n" |
518 | " tml %2,0xffff\n" | 522 | " tml %2,0xffff\n" |
519 | " jno 2f\n" | 523 | " jno 2f\n" |
520 | " ahi %0,16\n" | 524 | " ahi %0,16\n" |
521 | " srl %2,16\n" | 525 | " srl %2,16\n" |
522 | "2: tml %2,0x00ff\n" | 526 | "2: tml %2,0x00ff\n" |
523 | " jno 3f\n" | 527 | " jno 3f\n" |
524 | " ahi %0,8\n" | 528 | " ahi %0,8\n" |
525 | " srl %2,8\n" | 529 | " srl %2,8\n" |
526 | "3: nr %2,%1\n" | 530 | "3: nr %2,%1\n" |
527 | " ic %2,0(%2,%5)\n" | 531 | " ic %2,0(%2,%5)\n" |
528 | " alr %0,%2\n" | 532 | " alr %0,%2\n" |
529 | "4:" | 533 | "4:" |
530 | : "=&a" (res), "=&d" (cmp), "=&a" (count) | 534 | : "=&a" (res), "=&d" (cmp), "=&a" (count) |
531 | : "a" (size), "a" (addr), "a" (&_zb_findmap), | 535 | : "a" (size), "a" (addr), "a" (&_zb_findmap), |
532 | "m" (*(addrtype *) addr) : "cc"); | 536 | "m" (*(addrtype *) addr) : "cc"); |
533 | return (res < size) ? res : size; | 537 | return (res < size) ? res : size; |
534 | } | 538 | } |
535 | 539 | ||
536 | static inline int | 540 | static inline int |
537 | find_first_bit(const unsigned long * addr, unsigned long size) | 541 | find_first_bit(const unsigned long * addr, unsigned long size) |
538 | { | 542 | { |
539 | typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; | 543 | typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; |
540 | unsigned long cmp, count; | 544 | unsigned long cmp, count; |
541 | unsigned int res; | 545 | unsigned int res; |
542 | 546 | ||
543 | if (!size) | 547 | if (!size) |
544 | return 0; | 548 | return 0; |
545 | asm volatile( | 549 | asm volatile( |
546 | " slr %1,%1\n" | 550 | " slr %1,%1\n" |
547 | " lr %2,%3\n" | 551 | " lr %2,%3\n" |
548 | " slr %0,%0\n" | 552 | " slr %0,%0\n" |
549 | " ahi %2,31\n" | 553 | " ahi %2,31\n" |
550 | " srl %2,5\n" | 554 | " srl %2,5\n" |
551 | "0: c %1,0(%0,%4)\n" | 555 | "0: c %1,0(%0,%4)\n" |
552 | " jne 1f\n" | 556 | " jne 1f\n" |
553 | " la %0,4(%0)\n" | 557 | " la %0,4(%0)\n" |
554 | " brct %2,0b\n" | 558 | " brct %2,0b\n" |
555 | " lr %0,%3\n" | 559 | " lr %0,%3\n" |
556 | " j 4f\n" | 560 | " j 4f\n" |
557 | "1: l %2,0(%0,%4)\n" | 561 | "1: l %2,0(%0,%4)\n" |
558 | " sll %0,3\n" | 562 | " sll %0,3\n" |
559 | " lhi %1,0xff\n" | 563 | " lhi %1,0xff\n" |
560 | " tml %2,0xffff\n" | 564 | " tml %2,0xffff\n" |
561 | " jnz 2f\n" | 565 | " jnz 2f\n" |
562 | " ahi %0,16\n" | 566 | " ahi %0,16\n" |
563 | " srl %2,16\n" | 567 | " srl %2,16\n" |
564 | "2: tml %2,0x00ff\n" | 568 | "2: tml %2,0x00ff\n" |
565 | " jnz 3f\n" | 569 | " jnz 3f\n" |
566 | " ahi %0,8\n" | 570 | " ahi %0,8\n" |
567 | " srl %2,8\n" | 571 | " srl %2,8\n" |
568 | "3: nr %2,%1\n" | 572 | "3: nr %2,%1\n" |
569 | " ic %2,0(%2,%5)\n" | 573 | " ic %2,0(%2,%5)\n" |
570 | " alr %0,%2\n" | 574 | " alr %0,%2\n" |
571 | "4:" | 575 | "4:" |
572 | : "=&a" (res), "=&d" (cmp), "=&a" (count) | 576 | : "=&a" (res), "=&d" (cmp), "=&a" (count) |
573 | : "a" (size), "a" (addr), "a" (&_sb_findmap), | 577 | : "a" (size), "a" (addr), "a" (&_sb_findmap), |
574 | "m" (*(addrtype *) addr) : "cc"); | 578 | "m" (*(addrtype *) addr) : "cc"); |
575 | return (res < size) ? res : size; | 579 | return (res < size) ? res : size; |
576 | } | 580 | } |
577 | 581 | ||
578 | #else /* __s390x__ */ | 582 | #else /* __s390x__ */ |
579 | 583 | ||
580 | static inline unsigned long | 584 | static inline unsigned long |
581 | find_first_zero_bit(const unsigned long * addr, unsigned long size) | 585 | find_first_zero_bit(const unsigned long * addr, unsigned long size) |
582 | { | 586 | { |
583 | typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; | 587 | typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; |
584 | unsigned long res, cmp, count; | 588 | unsigned long res, cmp, count; |
585 | 589 | ||
586 | if (!size) | 590 | if (!size) |
587 | return 0; | 591 | return 0; |
588 | asm volatile( | 592 | asm volatile( |
589 | " lghi %1,-1\n" | 593 | " lghi %1,-1\n" |
590 | " lgr %2,%3\n" | 594 | " lgr %2,%3\n" |
591 | " slgr %0,%0\n" | 595 | " slgr %0,%0\n" |
592 | " aghi %2,63\n" | 596 | " aghi %2,63\n" |
593 | " srlg %2,%2,6\n" | 597 | " srlg %2,%2,6\n" |
594 | "0: cg %1,0(%0,%4)\n" | 598 | "0: cg %1,0(%0,%4)\n" |
595 | " jne 1f\n" | 599 | " jne 1f\n" |
596 | " la %0,8(%0)\n" | 600 | " la %0,8(%0)\n" |
597 | " brct %2,0b\n" | 601 | " brct %2,0b\n" |
598 | " lgr %0,%3\n" | 602 | " lgr %0,%3\n" |
599 | " j 5f\n" | 603 | " j 5f\n" |
600 | "1: lg %2,0(%0,%4)\n" | 604 | "1: lg %2,0(%0,%4)\n" |
601 | " sllg %0,%0,3\n" | 605 | " sllg %0,%0,3\n" |
602 | " clr %2,%1\n" | 606 | " clr %2,%1\n" |
603 | " jne 2f\n" | 607 | " jne 2f\n" |
604 | " aghi %0,32\n" | 608 | " aghi %0,32\n" |
605 | " srlg %2,%2,32\n" | 609 | " srlg %2,%2,32\n" |
606 | "2: lghi %1,0xff\n" | 610 | "2: lghi %1,0xff\n" |
607 | " tmll %2,0xffff\n" | 611 | " tmll %2,0xffff\n" |
608 | " jno 3f\n" | 612 | " jno 3f\n" |
609 | " aghi %0,16\n" | 613 | " aghi %0,16\n" |
610 | " srl %2,16\n" | 614 | " srl %2,16\n" |
611 | "3: tmll %2,0x00ff\n" | 615 | "3: tmll %2,0x00ff\n" |
612 | " jno 4f\n" | 616 | " jno 4f\n" |
613 | " aghi %0,8\n" | 617 | " aghi %0,8\n" |
614 | " srl %2,8\n" | 618 | " srl %2,8\n" |
615 | "4: ngr %2,%1\n" | 619 | "4: ngr %2,%1\n" |
616 | " ic %2,0(%2,%5)\n" | 620 | " ic %2,0(%2,%5)\n" |
617 | " algr %0,%2\n" | 621 | " algr %0,%2\n" |
618 | "5:" | 622 | "5:" |
619 | : "=&a" (res), "=&d" (cmp), "=&a" (count) | 623 | : "=&a" (res), "=&d" (cmp), "=&a" (count) |
620 | : "a" (size), "a" (addr), "a" (&_zb_findmap), | 624 | : "a" (size), "a" (addr), "a" (&_zb_findmap), |
621 | "m" (*(addrtype *) addr) : "cc"); | 625 | "m" (*(addrtype *) addr) : "cc"); |
622 | return (res < size) ? res : size; | 626 | return (res < size) ? res : size; |
623 | } | 627 | } |
624 | 628 | ||
625 | static inline unsigned long | 629 | static inline unsigned long |
626 | find_first_bit(const unsigned long * addr, unsigned long size) | 630 | find_first_bit(const unsigned long * addr, unsigned long size) |
627 | { | 631 | { |
628 | typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; | 632 | typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; |
629 | unsigned long res, cmp, count; | 633 | unsigned long res, cmp, count; |
630 | 634 | ||
631 | if (!size) | 635 | if (!size) |
632 | return 0; | 636 | return 0; |
633 | asm volatile( | 637 | asm volatile( |
634 | " slgr %1,%1\n" | 638 | " slgr %1,%1\n" |
635 | " lgr %2,%3\n" | 639 | " lgr %2,%3\n" |
636 | " slgr %0,%0\n" | 640 | " slgr %0,%0\n" |
637 | " aghi %2,63\n" | 641 | " aghi %2,63\n" |
638 | " srlg %2,%2,6\n" | 642 | " srlg %2,%2,6\n" |
639 | "0: cg %1,0(%0,%4)\n" | 643 | "0: cg %1,0(%0,%4)\n" |
640 | " jne 1f\n" | 644 | " jne 1f\n" |
641 | " aghi %0,8\n" | 645 | " aghi %0,8\n" |
642 | " brct %2,0b\n" | 646 | " brct %2,0b\n" |
643 | " lgr %0,%3\n" | 647 | " lgr %0,%3\n" |
644 | " j 5f\n" | 648 | " j 5f\n" |
645 | "1: lg %2,0(%0,%4)\n" | 649 | "1: lg %2,0(%0,%4)\n" |
646 | " sllg %0,%0,3\n" | 650 | " sllg %0,%0,3\n" |
647 | " clr %2,%1\n" | 651 | " clr %2,%1\n" |
648 | " jne 2f\n" | 652 | " jne 2f\n" |
649 | " aghi %0,32\n" | 653 | " aghi %0,32\n" |
650 | " srlg %2,%2,32\n" | 654 | " srlg %2,%2,32\n" |
651 | "2: lghi %1,0xff\n" | 655 | "2: lghi %1,0xff\n" |
652 | " tmll %2,0xffff\n" | 656 | " tmll %2,0xffff\n" |
653 | " jnz 3f\n" | 657 | " jnz 3f\n" |
654 | " aghi %0,16\n" | 658 | " aghi %0,16\n" |
655 | " srl %2,16\n" | 659 | " srl %2,16\n" |
656 | "3: tmll %2,0x00ff\n" | 660 | "3: tmll %2,0x00ff\n" |
657 | " jnz 4f\n" | 661 | " jnz 4f\n" |
658 | " aghi %0,8\n" | 662 | " aghi %0,8\n" |
659 | " srl %2,8\n" | 663 | " srl %2,8\n" |
660 | "4: ngr %2,%1\n" | 664 | "4: ngr %2,%1\n" |
661 | " ic %2,0(%2,%5)\n" | 665 | " ic %2,0(%2,%5)\n" |
662 | " algr %0,%2\n" | 666 | " algr %0,%2\n" |
663 | "5:" | 667 | "5:" |
664 | : "=&a" (res), "=&d" (cmp), "=&a" (count) | 668 | : "=&a" (res), "=&d" (cmp), "=&a" (count) |
665 | : "a" (size), "a" (addr), "a" (&_sb_findmap), | 669 | : "a" (size), "a" (addr), "a" (&_sb_findmap), |
666 | "m" (*(addrtype *) addr) : "cc"); | 670 | "m" (*(addrtype *) addr) : "cc"); |
667 | return (res < size) ? res : size; | 671 | return (res < size) ? res : size; |
668 | } | 672 | } |
669 | 673 | ||
670 | #endif /* __s390x__ */ | 674 | #endif /* __s390x__ */ |
671 | 675 | ||
672 | static inline int | 676 | static inline int |
673 | find_next_zero_bit (const unsigned long * addr, unsigned long size, | 677 | find_next_zero_bit (const unsigned long * addr, unsigned long size, |
674 | unsigned long offset) | 678 | unsigned long offset) |
675 | { | 679 | { |
676 | const unsigned long *p; | 680 | const unsigned long *p; |
677 | unsigned long bit, set; | 681 | unsigned long bit, set; |
678 | 682 | ||
679 | if (offset >= size) | 683 | if (offset >= size) |
680 | return size; | 684 | return size; |
681 | bit = offset & (__BITOPS_WORDSIZE - 1); | 685 | bit = offset & (__BITOPS_WORDSIZE - 1); |
682 | offset -= bit; | 686 | offset -= bit; |
683 | size -= offset; | 687 | size -= offset; |
684 | p = addr + offset / __BITOPS_WORDSIZE; | 688 | p = addr + offset / __BITOPS_WORDSIZE; |
685 | if (bit) { | 689 | if (bit) { |
686 | /* | 690 | /* |
687 | * s390 version of ffz returns __BITOPS_WORDSIZE | 691 | * s390 version of ffz returns __BITOPS_WORDSIZE |
688 | * if no zero bit is present in the word. | 692 | * if no zero bit is present in the word. |
689 | */ | 693 | */ |
690 | set = ffz(*p >> bit) + bit; | 694 | set = ffz(*p >> bit) + bit; |
691 | if (set >= size) | 695 | if (set >= size) |
692 | return size + offset; | 696 | return size + offset; |
693 | if (set < __BITOPS_WORDSIZE) | 697 | if (set < __BITOPS_WORDSIZE) |
694 | return set + offset; | 698 | return set + offset; |
695 | offset += __BITOPS_WORDSIZE; | 699 | offset += __BITOPS_WORDSIZE; |
696 | size -= __BITOPS_WORDSIZE; | 700 | size -= __BITOPS_WORDSIZE; |
697 | p++; | 701 | p++; |
698 | } | 702 | } |
699 | return offset + find_first_zero_bit(p, size); | 703 | return offset + find_first_zero_bit(p, size); |
700 | } | 704 | } |
701 | 705 | ||
702 | static inline int | 706 | static inline int |
703 | find_next_bit (const unsigned long * addr, unsigned long size, | 707 | find_next_bit (const unsigned long * addr, unsigned long size, |
704 | unsigned long offset) | 708 | unsigned long offset) |
705 | { | 709 | { |
706 | const unsigned long *p; | 710 | const unsigned long *p; |
707 | unsigned long bit, set; | 711 | unsigned long bit, set; |
708 | 712 | ||
709 | if (offset >= size) | 713 | if (offset >= size) |
710 | return size; | 714 | return size; |
711 | bit = offset & (__BITOPS_WORDSIZE - 1); | 715 | bit = offset & (__BITOPS_WORDSIZE - 1); |
712 | offset -= bit; | 716 | offset -= bit; |
713 | size -= offset; | 717 | size -= offset; |
714 | p = addr + offset / __BITOPS_WORDSIZE; | 718 | p = addr + offset / __BITOPS_WORDSIZE; |
715 | if (bit) { | 719 | if (bit) { |
716 | /* | 720 | /* |
717 | * s390 version of __ffs returns __BITOPS_WORDSIZE | 721 | * s390 version of __ffs returns __BITOPS_WORDSIZE |
718 | * if no one bit is present in the word. | 722 | * if no one bit is present in the word. |
719 | */ | 723 | */ |
720 | set = __ffs(*p & (~0UL << bit)); | 724 | set = __ffs(*p & (~0UL << bit)); |
721 | if (set >= size) | 725 | if (set >= size) |
722 | return size + offset; | 726 | return size + offset; |
723 | if (set < __BITOPS_WORDSIZE) | 727 | if (set < __BITOPS_WORDSIZE) |
724 | return set + offset; | 728 | return set + offset; |
725 | offset += __BITOPS_WORDSIZE; | 729 | offset += __BITOPS_WORDSIZE; |
726 | size -= __BITOPS_WORDSIZE; | 730 | size -= __BITOPS_WORDSIZE; |
727 | p++; | 731 | p++; |
728 | } | 732 | } |
729 | return offset + find_first_bit(p, size); | 733 | return offset + find_first_bit(p, size); |
730 | } | 734 | } |
731 | 735 | ||
732 | /* | 736 | /* |
733 | * Every architecture must define this function. It's the fastest | 737 | * Every architecture must define this function. It's the fastest |
734 | * way of searching a 140-bit bitmap where the first 100 bits are | 738 | * way of searching a 140-bit bitmap where the first 100 bits are |
735 | * unlikely to be set. It's guaranteed that at least one of the 140 | 739 | * unlikely to be set. It's guaranteed that at least one of the 140 |
736 | * bits is cleared. | 740 | * bits is cleared. |
737 | */ | 741 | */ |
738 | static inline int sched_find_first_bit(unsigned long *b) | 742 | static inline int sched_find_first_bit(unsigned long *b) |
739 | { | 743 | { |
740 | return find_first_bit(b, 140); | 744 | return find_first_bit(b, 140); |
741 | } | 745 | } |
742 | 746 | ||
743 | #include <asm-generic/bitops/ffs.h> | 747 | #include <asm-generic/bitops/ffs.h> |
744 | 748 | ||
745 | #include <asm-generic/bitops/fls.h> | 749 | #include <asm-generic/bitops/fls.h> |
746 | #include <asm-generic/bitops/fls64.h> | 750 | #include <asm-generic/bitops/fls64.h> |
747 | 751 | ||
748 | #include <asm-generic/bitops/hweight.h> | 752 | #include <asm-generic/bitops/hweight.h> |
749 | #include <asm-generic/bitops/lock.h> | 753 | #include <asm-generic/bitops/lock.h> |
750 | 754 | ||
751 | /* | 755 | /* |
752 | * ATTENTION: intel byte ordering convention for ext2 and minix !! | 756 | * ATTENTION: intel byte ordering convention for ext2 and minix !! |
753 | * bit 0 is the LSB of addr; bit 31 is the MSB of addr; | 757 | * bit 0 is the LSB of addr; bit 31 is the MSB of addr; |
754 | * bit 32 is the LSB of (addr+4). | 758 | * bit 32 is the LSB of (addr+4). |
755 | * That combined with the little endian byte order of Intel gives the | 759 | * That combined with the little endian byte order of Intel gives the |
756 | * following bit order in memory: | 760 | * following bit order in memory: |
757 | * 07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \ | 761 | * 07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \ |
758 | * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24 | 762 | * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24 |
759 | */ | 763 | */ |
760 | 764 | ||
761 | #define ext2_set_bit(nr, addr) \ | 765 | #define ext2_set_bit(nr, addr) \ |
762 | __test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) | 766 | __test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) |
763 | #define ext2_set_bit_atomic(lock, nr, addr) \ | 767 | #define ext2_set_bit_atomic(lock, nr, addr) \ |
764 | test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) | 768 | test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) |
765 | #define ext2_clear_bit(nr, addr) \ | 769 | #define ext2_clear_bit(nr, addr) \ |
766 | __test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) | 770 | __test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) |
767 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | 771 | #define ext2_clear_bit_atomic(lock, nr, addr) \ |
768 | test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) | 772 | test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) |
769 | #define ext2_test_bit(nr, addr) \ | 773 | #define ext2_test_bit(nr, addr) \ |
770 | test_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) | 774 | test_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) |
771 | 775 | ||
772 | #ifndef __s390x__ | 776 | #ifndef __s390x__ |
773 | 777 | ||
774 | static inline int | 778 | static inline int |
775 | ext2_find_first_zero_bit(void *vaddr, unsigned int size) | 779 | ext2_find_first_zero_bit(void *vaddr, unsigned int size) |
776 | { | 780 | { |
777 | typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; | 781 | typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; |
778 | unsigned long cmp, count; | 782 | unsigned long cmp, count; |
779 | unsigned int res; | 783 | unsigned int res; |
780 | 784 | ||
781 | if (!size) | 785 | if (!size) |
782 | return 0; | 786 | return 0; |
783 | asm volatile( | 787 | asm volatile( |
784 | " lhi %1,-1\n" | 788 | " lhi %1,-1\n" |
785 | " lr %2,%3\n" | 789 | " lr %2,%3\n" |
786 | " ahi %2,31\n" | 790 | " ahi %2,31\n" |
787 | " srl %2,5\n" | 791 | " srl %2,5\n" |
788 | " slr %0,%0\n" | 792 | " slr %0,%0\n" |
789 | "0: cl %1,0(%0,%4)\n" | 793 | "0: cl %1,0(%0,%4)\n" |
790 | " jne 1f\n" | 794 | " jne 1f\n" |
791 | " ahi %0,4\n" | 795 | " ahi %0,4\n" |
792 | " brct %2,0b\n" | 796 | " brct %2,0b\n" |
793 | " lr %0,%3\n" | 797 | " lr %0,%3\n" |
794 | " j 4f\n" | 798 | " j 4f\n" |
795 | "1: l %2,0(%0,%4)\n" | 799 | "1: l %2,0(%0,%4)\n" |
796 | " sll %0,3\n" | 800 | " sll %0,3\n" |
797 | " ahi %0,24\n" | 801 | " ahi %0,24\n" |
798 | " lhi %1,0xff\n" | 802 | " lhi %1,0xff\n" |
799 | " tmh %2,0xffff\n" | 803 | " tmh %2,0xffff\n" |
800 | " jo 2f\n" | 804 | " jo 2f\n" |
801 | " ahi %0,-16\n" | 805 | " ahi %0,-16\n" |
802 | " srl %2,16\n" | 806 | " srl %2,16\n" |
803 | "2: tml %2,0xff00\n" | 807 | "2: tml %2,0xff00\n" |
804 | " jo 3f\n" | 808 | " jo 3f\n" |
805 | " ahi %0,-8\n" | 809 | " ahi %0,-8\n" |
806 | " srl %2,8\n" | 810 | " srl %2,8\n" |
807 | "3: nr %2,%1\n" | 811 | "3: nr %2,%1\n" |
808 | " ic %2,0(%2,%5)\n" | 812 | " ic %2,0(%2,%5)\n" |
809 | " alr %0,%2\n" | 813 | " alr %0,%2\n" |
810 | "4:" | 814 | "4:" |
811 | : "=&a" (res), "=&d" (cmp), "=&a" (count) | 815 | : "=&a" (res), "=&d" (cmp), "=&a" (count) |
812 | : "a" (size), "a" (vaddr), "a" (&_zb_findmap), | 816 | : "a" (size), "a" (vaddr), "a" (&_zb_findmap), |
813 | "m" (*(addrtype *) vaddr) : "cc"); | 817 | "m" (*(addrtype *) vaddr) : "cc"); |
814 | return (res < size) ? res : size; | 818 | return (res < size) ? res : size; |
815 | } | 819 | } |
816 | 820 | ||
817 | #else /* __s390x__ */ | 821 | #else /* __s390x__ */ |
818 | 822 | ||
819 | static inline unsigned long | 823 | static inline unsigned long |
820 | ext2_find_first_zero_bit(void *vaddr, unsigned long size) | 824 | ext2_find_first_zero_bit(void *vaddr, unsigned long size) |
821 | { | 825 | { |
822 | typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; | 826 | typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; |
823 | unsigned long res, cmp, count; | 827 | unsigned long res, cmp, count; |
824 | 828 | ||
825 | if (!size) | 829 | if (!size) |
826 | return 0; | 830 | return 0; |
827 | asm volatile( | 831 | asm volatile( |
828 | " lghi %1,-1\n" | 832 | " lghi %1,-1\n" |
829 | " lgr %2,%3\n" | 833 | " lgr %2,%3\n" |
830 | " aghi %2,63\n" | 834 | " aghi %2,63\n" |
831 | " srlg %2,%2,6\n" | 835 | " srlg %2,%2,6\n" |
832 | " slgr %0,%0\n" | 836 | " slgr %0,%0\n" |
833 | "0: clg %1,0(%0,%4)\n" | 837 | "0: clg %1,0(%0,%4)\n" |
834 | " jne 1f\n" | 838 | " jne 1f\n" |
835 | " aghi %0,8\n" | 839 | " aghi %0,8\n" |
836 | " brct %2,0b\n" | 840 | " brct %2,0b\n" |
837 | " lgr %0,%3\n" | 841 | " lgr %0,%3\n" |
838 | " j 5f\n" | 842 | " j 5f\n" |
839 | "1: cl %1,0(%0,%4)\n" | 843 | "1: cl %1,0(%0,%4)\n" |
840 | " jne 2f\n" | 844 | " jne 2f\n" |
841 | " aghi %0,4\n" | 845 | " aghi %0,4\n" |
842 | "2: l %2,0(%0,%4)\n" | 846 | "2: l %2,0(%0,%4)\n" |
843 | " sllg %0,%0,3\n" | 847 | " sllg %0,%0,3\n" |
844 | " aghi %0,24\n" | 848 | " aghi %0,24\n" |
845 | " lghi %1,0xff\n" | 849 | " lghi %1,0xff\n" |
846 | " tmlh %2,0xffff\n" | 850 | " tmlh %2,0xffff\n" |
847 | " jo 3f\n" | 851 | " jo 3f\n" |
848 | " aghi %0,-16\n" | 852 | " aghi %0,-16\n" |
849 | " srl %2,16\n" | 853 | " srl %2,16\n" |
850 | "3: tmll %2,0xff00\n" | 854 | "3: tmll %2,0xff00\n" |
851 | " jo 4f\n" | 855 | " jo 4f\n" |
852 | " aghi %0,-8\n" | 856 | " aghi %0,-8\n" |
853 | " srl %2,8\n" | 857 | " srl %2,8\n" |
854 | "4: ngr %2,%1\n" | 858 | "4: ngr %2,%1\n" |
855 | " ic %2,0(%2,%5)\n" | 859 | " ic %2,0(%2,%5)\n" |
856 | " algr %0,%2\n" | 860 | " algr %0,%2\n" |
857 | "5:" | 861 | "5:" |
858 | : "=&a" (res), "=&d" (cmp), "=&a" (count) | 862 | : "=&a" (res), "=&d" (cmp), "=&a" (count) |
859 | : "a" (size), "a" (vaddr), "a" (&_zb_findmap), | 863 | : "a" (size), "a" (vaddr), "a" (&_zb_findmap), |
860 | "m" (*(addrtype *) vaddr) : "cc"); | 864 | "m" (*(addrtype *) vaddr) : "cc"); |
861 | return (res < size) ? res : size; | 865 | return (res < size) ? res : size; |
862 | } | 866 | } |
863 | 867 | ||
864 | #endif /* __s390x__ */ | 868 | #endif /* __s390x__ */ |
865 | 869 | ||
866 | static inline int | 870 | static inline int |
867 | ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset) | 871 | ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset) |
868 | { | 872 | { |
869 | unsigned long *addr = vaddr, *p; | 873 | unsigned long *addr = vaddr, *p; |
870 | unsigned long word, bit, set; | 874 | unsigned long word, bit, set; |
871 | 875 | ||
872 | if (offset >= size) | 876 | if (offset >= size) |
873 | return size; | 877 | return size; |
874 | bit = offset & (__BITOPS_WORDSIZE - 1); | 878 | bit = offset & (__BITOPS_WORDSIZE - 1); |
875 | offset -= bit; | 879 | offset -= bit; |
876 | size -= offset; | 880 | size -= offset; |
877 | p = addr + offset / __BITOPS_WORDSIZE; | 881 | p = addr + offset / __BITOPS_WORDSIZE; |
878 | if (bit) { | 882 | if (bit) { |
879 | #ifndef __s390x__ | 883 | #ifndef __s390x__ |
880 | asm volatile( | 884 | asm volatile( |
881 | " ic %0,0(%1)\n" | 885 | " ic %0,0(%1)\n" |
882 | " icm %0,2,1(%1)\n" | 886 | " icm %0,2,1(%1)\n" |
883 | " icm %0,4,2(%1)\n" | 887 | " icm %0,4,2(%1)\n" |
884 | " icm %0,8,3(%1)" | 888 | " icm %0,8,3(%1)" |
885 | : "=&a" (word) : "a" (p), "m" (*p) : "cc"); | 889 | : "=&a" (word) : "a" (p), "m" (*p) : "cc"); |
886 | #else | 890 | #else |
887 | asm volatile( | 891 | asm volatile( |
888 | " lrvg %0,%1" | 892 | " lrvg %0,%1" |
889 | : "=a" (word) : "m" (*p) ); | 893 | : "=a" (word) : "m" (*p) ); |
890 | #endif | 894 | #endif |
891 | /* | 895 | /* |
892 | * s390 version of ffz returns __BITOPS_WORDSIZE | 896 | * s390 version of ffz returns __BITOPS_WORDSIZE |
893 | * if no zero bit is present in the word. | 897 | * if no zero bit is present in the word. |
894 | */ | 898 | */ |
895 | set = ffz(word >> bit) + bit; | 899 | set = ffz(word >> bit) + bit; |
896 | if (set >= size) | 900 | if (set >= size) |
897 | return size + offset; | 901 | return size + offset; |
898 | if (set < __BITOPS_WORDSIZE) | 902 | if (set < __BITOPS_WORDSIZE) |
899 | return set + offset; | 903 | return set + offset; |
900 | offset += __BITOPS_WORDSIZE; | 904 | offset += __BITOPS_WORDSIZE; |
901 | size -= __BITOPS_WORDSIZE; | 905 | size -= __BITOPS_WORDSIZE; |
902 | p++; | 906 | p++; |
903 | } | 907 | } |
904 | return offset + ext2_find_first_zero_bit(p, size); | 908 | return offset + ext2_find_first_zero_bit(p, size); |
905 | } | 909 | } |
906 | 910 | ||
907 | #include <asm-generic/bitops/minix.h> | 911 | #include <asm-generic/bitops/minix.h> |
908 | 912 | ||
909 | #endif /* __KERNEL__ */ | 913 | #endif /* __KERNEL__ */ |
910 | 914 | ||
911 | #endif /* _S390_BITOPS_H */ | 915 | #endif /* _S390_BITOPS_H */ |
912 | 916 |
include/asm-sh/bitops.h
1 | #ifndef __ASM_SH_BITOPS_H | 1 | #ifndef __ASM_SH_BITOPS_H |
2 | #define __ASM_SH_BITOPS_H | 2 | #define __ASM_SH_BITOPS_H |
3 | 3 | ||
4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
5 | |||
6 | #ifndef _LINUX_BITOPS_H | ||
7 | #error only <linux/bitops.h> can be included directly | ||
8 | #endif | ||
9 | |||
5 | #include <asm/system.h> | 10 | #include <asm/system.h> |
6 | /* For __swab32 */ | 11 | /* For __swab32 */ |
7 | #include <asm/byteorder.h> | 12 | #include <asm/byteorder.h> |
8 | 13 | ||
9 | static inline void set_bit(int nr, volatile void * addr) | 14 | static inline void set_bit(int nr, volatile void * addr) |
10 | { | 15 | { |
11 | int mask; | 16 | int mask; |
12 | volatile unsigned int *a = addr; | 17 | volatile unsigned int *a = addr; |
13 | unsigned long flags; | 18 | unsigned long flags; |
14 | 19 | ||
15 | a += nr >> 5; | 20 | a += nr >> 5; |
16 | mask = 1 << (nr & 0x1f); | 21 | mask = 1 << (nr & 0x1f); |
17 | local_irq_save(flags); | 22 | local_irq_save(flags); |
18 | *a |= mask; | 23 | *a |= mask; |
19 | local_irq_restore(flags); | 24 | local_irq_restore(flags); |
20 | } | 25 | } |
21 | 26 | ||
22 | /* | 27 | /* |
23 | * clear_bit() doesn't provide any barrier for the compiler. | 28 | * clear_bit() doesn't provide any barrier for the compiler. |
24 | */ | 29 | */ |
25 | #define smp_mb__before_clear_bit() barrier() | 30 | #define smp_mb__before_clear_bit() barrier() |
26 | #define smp_mb__after_clear_bit() barrier() | 31 | #define smp_mb__after_clear_bit() barrier() |
27 | static inline void clear_bit(int nr, volatile void * addr) | 32 | static inline void clear_bit(int nr, volatile void * addr) |
28 | { | 33 | { |
29 | int mask; | 34 | int mask; |
30 | volatile unsigned int *a = addr; | 35 | volatile unsigned int *a = addr; |
31 | unsigned long flags; | 36 | unsigned long flags; |
32 | 37 | ||
33 | a += nr >> 5; | 38 | a += nr >> 5; |
34 | mask = 1 << (nr & 0x1f); | 39 | mask = 1 << (nr & 0x1f); |
35 | local_irq_save(flags); | 40 | local_irq_save(flags); |
36 | *a &= ~mask; | 41 | *a &= ~mask; |
37 | local_irq_restore(flags); | 42 | local_irq_restore(flags); |
38 | } | 43 | } |
39 | 44 | ||
40 | static inline void change_bit(int nr, volatile void * addr) | 45 | static inline void change_bit(int nr, volatile void * addr) |
41 | { | 46 | { |
42 | int mask; | 47 | int mask; |
43 | volatile unsigned int *a = addr; | 48 | volatile unsigned int *a = addr; |
44 | unsigned long flags; | 49 | unsigned long flags; |
45 | 50 | ||
46 | a += nr >> 5; | 51 | a += nr >> 5; |
47 | mask = 1 << (nr & 0x1f); | 52 | mask = 1 << (nr & 0x1f); |
48 | local_irq_save(flags); | 53 | local_irq_save(flags); |
49 | *a ^= mask; | 54 | *a ^= mask; |
50 | local_irq_restore(flags); | 55 | local_irq_restore(flags); |
51 | } | 56 | } |
52 | 57 | ||
53 | static inline int test_and_set_bit(int nr, volatile void * addr) | 58 | static inline int test_and_set_bit(int nr, volatile void * addr) |
54 | { | 59 | { |
55 | int mask, retval; | 60 | int mask, retval; |
56 | volatile unsigned int *a = addr; | 61 | volatile unsigned int *a = addr; |
57 | unsigned long flags; | 62 | unsigned long flags; |
58 | 63 | ||
59 | a += nr >> 5; | 64 | a += nr >> 5; |
60 | mask = 1 << (nr & 0x1f); | 65 | mask = 1 << (nr & 0x1f); |
61 | local_irq_save(flags); | 66 | local_irq_save(flags); |
62 | retval = (mask & *a) != 0; | 67 | retval = (mask & *a) != 0; |
63 | *a |= mask; | 68 | *a |= mask; |
64 | local_irq_restore(flags); | 69 | local_irq_restore(flags); |
65 | 70 | ||
66 | return retval; | 71 | return retval; |
67 | } | 72 | } |
68 | 73 | ||
69 | static inline int test_and_clear_bit(int nr, volatile void * addr) | 74 | static inline int test_and_clear_bit(int nr, volatile void * addr) |
70 | { | 75 | { |
71 | int mask, retval; | 76 | int mask, retval; |
72 | volatile unsigned int *a = addr; | 77 | volatile unsigned int *a = addr; |
73 | unsigned long flags; | 78 | unsigned long flags; |
74 | 79 | ||
75 | a += nr >> 5; | 80 | a += nr >> 5; |
76 | mask = 1 << (nr & 0x1f); | 81 | mask = 1 << (nr & 0x1f); |
77 | local_irq_save(flags); | 82 | local_irq_save(flags); |
78 | retval = (mask & *a) != 0; | 83 | retval = (mask & *a) != 0; |
79 | *a &= ~mask; | 84 | *a &= ~mask; |
80 | local_irq_restore(flags); | 85 | local_irq_restore(flags); |
81 | 86 | ||
82 | return retval; | 87 | return retval; |
83 | } | 88 | } |
84 | 89 | ||
85 | static inline int test_and_change_bit(int nr, volatile void * addr) | 90 | static inline int test_and_change_bit(int nr, volatile void * addr) |
86 | { | 91 | { |
87 | int mask, retval; | 92 | int mask, retval; |
88 | volatile unsigned int *a = addr; | 93 | volatile unsigned int *a = addr; |
89 | unsigned long flags; | 94 | unsigned long flags; |
90 | 95 | ||
91 | a += nr >> 5; | 96 | a += nr >> 5; |
92 | mask = 1 << (nr & 0x1f); | 97 | mask = 1 << (nr & 0x1f); |
93 | local_irq_save(flags); | 98 | local_irq_save(flags); |
94 | retval = (mask & *a) != 0; | 99 | retval = (mask & *a) != 0; |
95 | *a ^= mask; | 100 | *a ^= mask; |
96 | local_irq_restore(flags); | 101 | local_irq_restore(flags); |
97 | 102 | ||
98 | return retval; | 103 | return retval; |
99 | } | 104 | } |
100 | 105 | ||
101 | #include <asm-generic/bitops/non-atomic.h> | 106 | #include <asm-generic/bitops/non-atomic.h> |
102 | 107 | ||
103 | static inline unsigned long ffz(unsigned long word) | 108 | static inline unsigned long ffz(unsigned long word) |
104 | { | 109 | { |
105 | unsigned long result; | 110 | unsigned long result; |
106 | 111 | ||
107 | __asm__("1:\n\t" | 112 | __asm__("1:\n\t" |
108 | "shlr %1\n\t" | 113 | "shlr %1\n\t" |
109 | "bt/s 1b\n\t" | 114 | "bt/s 1b\n\t" |
110 | " add #1, %0" | 115 | " add #1, %0" |
111 | : "=r" (result), "=r" (word) | 116 | : "=r" (result), "=r" (word) |
112 | : "0" (~0L), "1" (word) | 117 | : "0" (~0L), "1" (word) |
113 | : "t"); | 118 | : "t"); |
114 | return result; | 119 | return result; |
115 | } | 120 | } |
116 | 121 | ||
117 | /** | 122 | /** |
118 | * __ffs - find first bit in word. | 123 | * __ffs - find first bit in word. |
119 | * @word: The word to search | 124 | * @word: The word to search |
120 | * | 125 | * |
121 | * Undefined if no bit exists, so code should check against 0 first. | 126 | * Undefined if no bit exists, so code should check against 0 first. |
122 | */ | 127 | */ |
123 | static inline unsigned long __ffs(unsigned long word) | 128 | static inline unsigned long __ffs(unsigned long word) |
124 | { | 129 | { |
125 | unsigned long result; | 130 | unsigned long result; |
126 | 131 | ||
127 | __asm__("1:\n\t" | 132 | __asm__("1:\n\t" |
128 | "shlr %1\n\t" | 133 | "shlr %1\n\t" |
129 | "bf/s 1b\n\t" | 134 | "bf/s 1b\n\t" |
130 | " add #1, %0" | 135 | " add #1, %0" |
131 | : "=r" (result), "=r" (word) | 136 | : "=r" (result), "=r" (word) |
132 | : "0" (~0L), "1" (word) | 137 | : "0" (~0L), "1" (word) |
133 | : "t"); | 138 | : "t"); |
134 | return result; | 139 | return result; |
135 | } | 140 | } |
136 | 141 | ||
137 | #include <asm-generic/bitops/find.h> | 142 | #include <asm-generic/bitops/find.h> |
138 | #include <asm-generic/bitops/ffs.h> | 143 | #include <asm-generic/bitops/ffs.h> |
139 | #include <asm-generic/bitops/hweight.h> | 144 | #include <asm-generic/bitops/hweight.h> |
140 | #include <asm-generic/bitops/lock.h> | 145 | #include <asm-generic/bitops/lock.h> |
141 | #include <asm-generic/bitops/sched.h> | 146 | #include <asm-generic/bitops/sched.h> |
142 | #include <asm-generic/bitops/ext2-non-atomic.h> | 147 | #include <asm-generic/bitops/ext2-non-atomic.h> |
143 | #include <asm-generic/bitops/ext2-atomic.h> | 148 | #include <asm-generic/bitops/ext2-atomic.h> |
144 | #include <asm-generic/bitops/minix.h> | 149 | #include <asm-generic/bitops/minix.h> |
145 | #include <asm-generic/bitops/fls.h> | 150 | #include <asm-generic/bitops/fls.h> |
146 | #include <asm-generic/bitops/fls64.h> | 151 | #include <asm-generic/bitops/fls64.h> |
147 | 152 | ||
148 | #endif /* __KERNEL__ */ | 153 | #endif /* __KERNEL__ */ |
149 | 154 | ||
150 | #endif /* __ASM_SH_BITOPS_H */ | 155 | #endif /* __ASM_SH_BITOPS_H */ |
151 | 156 |
include/asm-sh64/bitops.h
1 | #ifndef __ASM_SH64_BITOPS_H | 1 | #ifndef __ASM_SH64_BITOPS_H |
2 | #define __ASM_SH64_BITOPS_H | 2 | #define __ASM_SH64_BITOPS_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * This file is subject to the terms and conditions of the GNU General Public | 5 | * This file is subject to the terms and conditions of the GNU General Public |
6 | * License. See the file "COPYING" in the main directory of this archive | 6 | * License. See the file "COPYING" in the main directory of this archive |
7 | * for more details. | 7 | * for more details. |
8 | * | 8 | * |
9 | * include/asm-sh64/bitops.h | 9 | * include/asm-sh64/bitops.h |
10 | * | 10 | * |
11 | * Copyright (C) 2000, 2001 Paolo Alberelli | 11 | * Copyright (C) 2000, 2001 Paolo Alberelli |
12 | * Copyright (C) 2003 Paul Mundt | 12 | * Copyright (C) 2003 Paul Mundt |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #ifdef __KERNEL__ | 15 | #ifdef __KERNEL__ |
16 | |||
17 | #ifndef _LINUX_BITOPS_H | ||
18 | #error only <linux/bitops.h> can be included directly | ||
19 | #endif | ||
20 | |||
16 | #include <linux/compiler.h> | 21 | #include <linux/compiler.h> |
17 | #include <asm/system.h> | 22 | #include <asm/system.h> |
18 | /* For __swab32 */ | 23 | /* For __swab32 */ |
19 | #include <asm/byteorder.h> | 24 | #include <asm/byteorder.h> |
20 | 25 | ||
21 | static __inline__ void set_bit(int nr, volatile void * addr) | 26 | static __inline__ void set_bit(int nr, volatile void * addr) |
22 | { | 27 | { |
23 | int mask; | 28 | int mask; |
24 | volatile unsigned int *a = addr; | 29 | volatile unsigned int *a = addr; |
25 | unsigned long flags; | 30 | unsigned long flags; |
26 | 31 | ||
27 | a += nr >> 5; | 32 | a += nr >> 5; |
28 | mask = 1 << (nr & 0x1f); | 33 | mask = 1 << (nr & 0x1f); |
29 | local_irq_save(flags); | 34 | local_irq_save(flags); |
30 | *a |= mask; | 35 | *a |= mask; |
31 | local_irq_restore(flags); | 36 | local_irq_restore(flags); |
32 | } | 37 | } |
33 | 38 | ||
34 | /* | 39 | /* |
35 | * clear_bit() doesn't provide any barrier for the compiler. | 40 | * clear_bit() doesn't provide any barrier for the compiler. |
36 | */ | 41 | */ |
37 | #define smp_mb__before_clear_bit() barrier() | 42 | #define smp_mb__before_clear_bit() barrier() |
38 | #define smp_mb__after_clear_bit() barrier() | 43 | #define smp_mb__after_clear_bit() barrier() |
39 | static inline void clear_bit(int nr, volatile unsigned long *a) | 44 | static inline void clear_bit(int nr, volatile unsigned long *a) |
40 | { | 45 | { |
41 | int mask; | 46 | int mask; |
42 | unsigned long flags; | 47 | unsigned long flags; |
43 | 48 | ||
44 | a += nr >> 5; | 49 | a += nr >> 5; |
45 | mask = 1 << (nr & 0x1f); | 50 | mask = 1 << (nr & 0x1f); |
46 | local_irq_save(flags); | 51 | local_irq_save(flags); |
47 | *a &= ~mask; | 52 | *a &= ~mask; |
48 | local_irq_restore(flags); | 53 | local_irq_restore(flags); |
49 | } | 54 | } |
50 | 55 | ||
51 | static __inline__ void change_bit(int nr, volatile void * addr) | 56 | static __inline__ void change_bit(int nr, volatile void * addr) |
52 | { | 57 | { |
53 | int mask; | 58 | int mask; |
54 | volatile unsigned int *a = addr; | 59 | volatile unsigned int *a = addr; |
55 | unsigned long flags; | 60 | unsigned long flags; |
56 | 61 | ||
57 | a += nr >> 5; | 62 | a += nr >> 5; |
58 | mask = 1 << (nr & 0x1f); | 63 | mask = 1 << (nr & 0x1f); |
59 | local_irq_save(flags); | 64 | local_irq_save(flags); |
60 | *a ^= mask; | 65 | *a ^= mask; |
61 | local_irq_restore(flags); | 66 | local_irq_restore(flags); |
62 | } | 67 | } |
63 | 68 | ||
64 | static __inline__ int test_and_set_bit(int nr, volatile void * addr) | 69 | static __inline__ int test_and_set_bit(int nr, volatile void * addr) |
65 | { | 70 | { |
66 | int mask, retval; | 71 | int mask, retval; |
67 | volatile unsigned int *a = addr; | 72 | volatile unsigned int *a = addr; |
68 | unsigned long flags; | 73 | unsigned long flags; |
69 | 74 | ||
70 | a += nr >> 5; | 75 | a += nr >> 5; |
71 | mask = 1 << (nr & 0x1f); | 76 | mask = 1 << (nr & 0x1f); |
72 | local_irq_save(flags); | 77 | local_irq_save(flags); |
73 | retval = (mask & *a) != 0; | 78 | retval = (mask & *a) != 0; |
74 | *a |= mask; | 79 | *a |= mask; |
75 | local_irq_restore(flags); | 80 | local_irq_restore(flags); |
76 | 81 | ||
77 | return retval; | 82 | return retval; |
78 | } | 83 | } |
79 | 84 | ||
80 | static __inline__ int test_and_clear_bit(int nr, volatile void * addr) | 85 | static __inline__ int test_and_clear_bit(int nr, volatile void * addr) |
81 | { | 86 | { |
82 | int mask, retval; | 87 | int mask, retval; |
83 | volatile unsigned int *a = addr; | 88 | volatile unsigned int *a = addr; |
84 | unsigned long flags; | 89 | unsigned long flags; |
85 | 90 | ||
86 | a += nr >> 5; | 91 | a += nr >> 5; |
87 | mask = 1 << (nr & 0x1f); | 92 | mask = 1 << (nr & 0x1f); |
88 | local_irq_save(flags); | 93 | local_irq_save(flags); |
89 | retval = (mask & *a) != 0; | 94 | retval = (mask & *a) != 0; |
90 | *a &= ~mask; | 95 | *a &= ~mask; |
91 | local_irq_restore(flags); | 96 | local_irq_restore(flags); |
92 | 97 | ||
93 | return retval; | 98 | return retval; |
94 | } | 99 | } |
95 | 100 | ||
96 | static __inline__ int test_and_change_bit(int nr, volatile void * addr) | 101 | static __inline__ int test_and_change_bit(int nr, volatile void * addr) |
97 | { | 102 | { |
98 | int mask, retval; | 103 | int mask, retval; |
99 | volatile unsigned int *a = addr; | 104 | volatile unsigned int *a = addr; |
100 | unsigned long flags; | 105 | unsigned long flags; |
101 | 106 | ||
102 | a += nr >> 5; | 107 | a += nr >> 5; |
103 | mask = 1 << (nr & 0x1f); | 108 | mask = 1 << (nr & 0x1f); |
104 | local_irq_save(flags); | 109 | local_irq_save(flags); |
105 | retval = (mask & *a) != 0; | 110 | retval = (mask & *a) != 0; |
106 | *a ^= mask; | 111 | *a ^= mask; |
107 | local_irq_restore(flags); | 112 | local_irq_restore(flags); |
108 | 113 | ||
109 | return retval; | 114 | return retval; |
110 | } | 115 | } |
111 | 116 | ||
112 | #include <asm-generic/bitops/non-atomic.h> | 117 | #include <asm-generic/bitops/non-atomic.h> |
113 | 118 | ||
114 | static __inline__ unsigned long ffz(unsigned long word) | 119 | static __inline__ unsigned long ffz(unsigned long word) |
115 | { | 120 | { |
116 | unsigned long result, __d2, __d3; | 121 | unsigned long result, __d2, __d3; |
117 | 122 | ||
118 | __asm__("gettr tr0, %2\n\t" | 123 | __asm__("gettr tr0, %2\n\t" |
119 | "pta $+32, tr0\n\t" | 124 | "pta $+32, tr0\n\t" |
120 | "andi %1, 1, %3\n\t" | 125 | "andi %1, 1, %3\n\t" |
121 | "beq %3, r63, tr0\n\t" | 126 | "beq %3, r63, tr0\n\t" |
122 | "pta $+4, tr0\n" | 127 | "pta $+4, tr0\n" |
123 | "0:\n\t" | 128 | "0:\n\t" |
124 | "shlri.l %1, 1, %1\n\t" | 129 | "shlri.l %1, 1, %1\n\t" |
125 | "addi %0, 1, %0\n\t" | 130 | "addi %0, 1, %0\n\t" |
126 | "andi %1, 1, %3\n\t" | 131 | "andi %1, 1, %3\n\t" |
127 | "beqi %3, 1, tr0\n" | 132 | "beqi %3, 1, tr0\n" |
128 | "1:\n\t" | 133 | "1:\n\t" |
129 | "ptabs %2, tr0\n\t" | 134 | "ptabs %2, tr0\n\t" |
130 | : "=r" (result), "=r" (word), "=r" (__d2), "=r" (__d3) | 135 | : "=r" (result), "=r" (word), "=r" (__d2), "=r" (__d3) |
131 | : "0" (0L), "1" (word)); | 136 | : "0" (0L), "1" (word)); |
132 | 137 | ||
133 | return result; | 138 | return result; |
134 | } | 139 | } |
135 | 140 | ||
136 | #include <asm-generic/bitops/__ffs.h> | 141 | #include <asm-generic/bitops/__ffs.h> |
137 | #include <asm-generic/bitops/find.h> | 142 | #include <asm-generic/bitops/find.h> |
138 | #include <asm-generic/bitops/hweight.h> | 143 | #include <asm-generic/bitops/hweight.h> |
139 | #include <asm-generic/bitops/lock.h> | 144 | #include <asm-generic/bitops/lock.h> |
140 | #include <asm-generic/bitops/sched.h> | 145 | #include <asm-generic/bitops/sched.h> |
141 | #include <asm-generic/bitops/ffs.h> | 146 | #include <asm-generic/bitops/ffs.h> |
142 | #include <asm-generic/bitops/ext2-non-atomic.h> | 147 | #include <asm-generic/bitops/ext2-non-atomic.h> |
143 | #include <asm-generic/bitops/ext2-atomic.h> | 148 | #include <asm-generic/bitops/ext2-atomic.h> |
144 | #include <asm-generic/bitops/minix.h> | 149 | #include <asm-generic/bitops/minix.h> |
145 | #include <asm-generic/bitops/fls.h> | 150 | #include <asm-generic/bitops/fls.h> |
146 | #include <asm-generic/bitops/fls64.h> | 151 | #include <asm-generic/bitops/fls64.h> |
147 | 152 | ||
148 | #endif /* __KERNEL__ */ | 153 | #endif /* __KERNEL__ */ |
149 | 154 | ||
150 | #endif /* __ASM_SH64_BITOPS_H */ | 155 | #endif /* __ASM_SH64_BITOPS_H */ |
151 | 156 |
include/asm-sparc/bitops.h
1 | /* $Id: bitops.h,v 1.67 2001/11/19 18:36:34 davem Exp $ | 1 | /* $Id: bitops.h,v 1.67 2001/11/19 18:36:34 davem Exp $ |
2 | * bitops.h: Bit string operations on the Sparc. | 2 | * bitops.h: Bit string operations on the Sparc. |
3 | * | 3 | * |
4 | * Copyright 1995 David S. Miller (davem@caip.rutgers.edu) | 4 | * Copyright 1995 David S. Miller (davem@caip.rutgers.edu) |
5 | * Copyright 1996 Eddie C. Dost (ecd@skynet.be) | 5 | * Copyright 1996 Eddie C. Dost (ecd@skynet.be) |
6 | * Copyright 2001 Anton Blanchard (anton@samba.org) | 6 | * Copyright 2001 Anton Blanchard (anton@samba.org) |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #ifndef _SPARC_BITOPS_H | 9 | #ifndef _SPARC_BITOPS_H |
10 | #define _SPARC_BITOPS_H | 10 | #define _SPARC_BITOPS_H |
11 | 11 | ||
12 | #include <linux/compiler.h> | 12 | #include <linux/compiler.h> |
13 | #include <asm/byteorder.h> | 13 | #include <asm/byteorder.h> |
14 | 14 | ||
15 | #ifdef __KERNEL__ | 15 | #ifdef __KERNEL__ |
16 | 16 | ||
17 | #ifndef _LINUX_BITOPS_H | ||
18 | #error only <linux/bitops.h> can be included directly | ||
19 | #endif | ||
20 | |||
17 | extern unsigned long ___set_bit(unsigned long *addr, unsigned long mask); | 21 | extern unsigned long ___set_bit(unsigned long *addr, unsigned long mask); |
18 | extern unsigned long ___clear_bit(unsigned long *addr, unsigned long mask); | 22 | extern unsigned long ___clear_bit(unsigned long *addr, unsigned long mask); |
19 | extern unsigned long ___change_bit(unsigned long *addr, unsigned long mask); | 23 | extern unsigned long ___change_bit(unsigned long *addr, unsigned long mask); |
20 | 24 | ||
21 | /* | 25 | /* |
22 | * Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0' | 26 | * Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0' |
23 | * is in the highest of the four bytes and bit '31' is the high bit | 27 | * is in the highest of the four bytes and bit '31' is the high bit |
24 | * within the first byte. Sparc is BIG-Endian. Unless noted otherwise | 28 | * within the first byte. Sparc is BIG-Endian. Unless noted otherwise |
25 | * all bit-ops return 0 if bit was previously clear and != 0 otherwise. | 29 | * all bit-ops return 0 if bit was previously clear and != 0 otherwise. |
26 | */ | 30 | */ |
27 | static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *addr) | 31 | static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *addr) |
28 | { | 32 | { |
29 | unsigned long *ADDR, mask; | 33 | unsigned long *ADDR, mask; |
30 | 34 | ||
31 | ADDR = ((unsigned long *) addr) + (nr >> 5); | 35 | ADDR = ((unsigned long *) addr) + (nr >> 5); |
32 | mask = 1 << (nr & 31); | 36 | mask = 1 << (nr & 31); |
33 | 37 | ||
34 | return ___set_bit(ADDR, mask) != 0; | 38 | return ___set_bit(ADDR, mask) != 0; |
35 | } | 39 | } |
36 | 40 | ||
37 | static inline void set_bit(unsigned long nr, volatile unsigned long *addr) | 41 | static inline void set_bit(unsigned long nr, volatile unsigned long *addr) |
38 | { | 42 | { |
39 | unsigned long *ADDR, mask; | 43 | unsigned long *ADDR, mask; |
40 | 44 | ||
41 | ADDR = ((unsigned long *) addr) + (nr >> 5); | 45 | ADDR = ((unsigned long *) addr) + (nr >> 5); |
42 | mask = 1 << (nr & 31); | 46 | mask = 1 << (nr & 31); |
43 | 47 | ||
44 | (void) ___set_bit(ADDR, mask); | 48 | (void) ___set_bit(ADDR, mask); |
45 | } | 49 | } |
46 | 50 | ||
47 | static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) | 51 | static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) |
48 | { | 52 | { |
49 | unsigned long *ADDR, mask; | 53 | unsigned long *ADDR, mask; |
50 | 54 | ||
51 | ADDR = ((unsigned long *) addr) + (nr >> 5); | 55 | ADDR = ((unsigned long *) addr) + (nr >> 5); |
52 | mask = 1 << (nr & 31); | 56 | mask = 1 << (nr & 31); |
53 | 57 | ||
54 | return ___clear_bit(ADDR, mask) != 0; | 58 | return ___clear_bit(ADDR, mask) != 0; |
55 | } | 59 | } |
56 | 60 | ||
57 | static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) | 61 | static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) |
58 | { | 62 | { |
59 | unsigned long *ADDR, mask; | 63 | unsigned long *ADDR, mask; |
60 | 64 | ||
61 | ADDR = ((unsigned long *) addr) + (nr >> 5); | 65 | ADDR = ((unsigned long *) addr) + (nr >> 5); |
62 | mask = 1 << (nr & 31); | 66 | mask = 1 << (nr & 31); |
63 | 67 | ||
64 | (void) ___clear_bit(ADDR, mask); | 68 | (void) ___clear_bit(ADDR, mask); |
65 | } | 69 | } |
66 | 70 | ||
67 | static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr) | 71 | static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr) |
68 | { | 72 | { |
69 | unsigned long *ADDR, mask; | 73 | unsigned long *ADDR, mask; |
70 | 74 | ||
71 | ADDR = ((unsigned long *) addr) + (nr >> 5); | 75 | ADDR = ((unsigned long *) addr) + (nr >> 5); |
72 | mask = 1 << (nr & 31); | 76 | mask = 1 << (nr & 31); |
73 | 77 | ||
74 | return ___change_bit(ADDR, mask) != 0; | 78 | return ___change_bit(ADDR, mask) != 0; |
75 | } | 79 | } |
76 | 80 | ||
77 | static inline void change_bit(unsigned long nr, volatile unsigned long *addr) | 81 | static inline void change_bit(unsigned long nr, volatile unsigned long *addr) |
78 | { | 82 | { |
79 | unsigned long *ADDR, mask; | 83 | unsigned long *ADDR, mask; |
80 | 84 | ||
81 | ADDR = ((unsigned long *) addr) + (nr >> 5); | 85 | ADDR = ((unsigned long *) addr) + (nr >> 5); |
82 | mask = 1 << (nr & 31); | 86 | mask = 1 << (nr & 31); |
83 | 87 | ||
84 | (void) ___change_bit(ADDR, mask); | 88 | (void) ___change_bit(ADDR, mask); |
85 | } | 89 | } |
86 | 90 | ||
87 | #include <asm-generic/bitops/non-atomic.h> | 91 | #include <asm-generic/bitops/non-atomic.h> |
88 | 92 | ||
89 | #define smp_mb__before_clear_bit() do { } while(0) | 93 | #define smp_mb__before_clear_bit() do { } while(0) |
90 | #define smp_mb__after_clear_bit() do { } while(0) | 94 | #define smp_mb__after_clear_bit() do { } while(0) |
91 | 95 | ||
92 | #include <asm-generic/bitops/ffz.h> | 96 | #include <asm-generic/bitops/ffz.h> |
93 | #include <asm-generic/bitops/__ffs.h> | 97 | #include <asm-generic/bitops/__ffs.h> |
94 | #include <asm-generic/bitops/sched.h> | 98 | #include <asm-generic/bitops/sched.h> |
95 | #include <asm-generic/bitops/ffs.h> | 99 | #include <asm-generic/bitops/ffs.h> |
96 | #include <asm-generic/bitops/fls.h> | 100 | #include <asm-generic/bitops/fls.h> |
97 | #include <asm-generic/bitops/fls64.h> | 101 | #include <asm-generic/bitops/fls64.h> |
98 | #include <asm-generic/bitops/hweight.h> | 102 | #include <asm-generic/bitops/hweight.h> |
99 | #include <asm-generic/bitops/lock.h> | 103 | #include <asm-generic/bitops/lock.h> |
100 | #include <asm-generic/bitops/find.h> | 104 | #include <asm-generic/bitops/find.h> |
101 | #include <asm-generic/bitops/ext2-non-atomic.h> | 105 | #include <asm-generic/bitops/ext2-non-atomic.h> |
102 | #include <asm-generic/bitops/ext2-atomic.h> | 106 | #include <asm-generic/bitops/ext2-atomic.h> |
103 | #include <asm-generic/bitops/minix.h> | 107 | #include <asm-generic/bitops/minix.h> |
104 | 108 | ||
105 | #endif /* __KERNEL__ */ | 109 | #endif /* __KERNEL__ */ |
106 | 110 | ||
107 | #endif /* defined(_SPARC_BITOPS_H) */ | 111 | #endif /* defined(_SPARC_BITOPS_H) */ |
108 | 112 |
include/asm-sparc64/bitops.h
1 | /* $Id: bitops.h,v 1.39 2002/01/30 01:40:00 davem Exp $ | 1 | /* $Id: bitops.h,v 1.39 2002/01/30 01:40:00 davem Exp $ |
2 | * bitops.h: Bit string operations on the V9. | 2 | * bitops.h: Bit string operations on the V9. |
3 | * | 3 | * |
4 | * Copyright 1996, 1997 David S. Miller (davem@caip.rutgers.edu) | 4 | * Copyright 1996, 1997 David S. Miller (davem@caip.rutgers.edu) |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #ifndef _SPARC64_BITOPS_H | 7 | #ifndef _SPARC64_BITOPS_H |
8 | #define _SPARC64_BITOPS_H | 8 | #define _SPARC64_BITOPS_H |
9 | 9 | ||
10 | #ifndef _LINUX_BITOPS_H | ||
11 | #error only <linux/bitops.h> can be included directly | ||
12 | #endif | ||
13 | |||
10 | #include <linux/compiler.h> | 14 | #include <linux/compiler.h> |
11 | #include <asm/byteorder.h> | 15 | #include <asm/byteorder.h> |
12 | 16 | ||
13 | extern int test_and_set_bit(unsigned long nr, volatile unsigned long *addr); | 17 | extern int test_and_set_bit(unsigned long nr, volatile unsigned long *addr); |
14 | extern int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr); | 18 | extern int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr); |
15 | extern int test_and_change_bit(unsigned long nr, volatile unsigned long *addr); | 19 | extern int test_and_change_bit(unsigned long nr, volatile unsigned long *addr); |
16 | extern void set_bit(unsigned long nr, volatile unsigned long *addr); | 20 | extern void set_bit(unsigned long nr, volatile unsigned long *addr); |
17 | extern void clear_bit(unsigned long nr, volatile unsigned long *addr); | 21 | extern void clear_bit(unsigned long nr, volatile unsigned long *addr); |
18 | extern void change_bit(unsigned long nr, volatile unsigned long *addr); | 22 | extern void change_bit(unsigned long nr, volatile unsigned long *addr); |
19 | 23 | ||
20 | #include <asm-generic/bitops/non-atomic.h> | 24 | #include <asm-generic/bitops/non-atomic.h> |
21 | 25 | ||
22 | #ifdef CONFIG_SMP | 26 | #ifdef CONFIG_SMP |
23 | #define smp_mb__before_clear_bit() membar_storeload_loadload() | 27 | #define smp_mb__before_clear_bit() membar_storeload_loadload() |
24 | #define smp_mb__after_clear_bit() membar_storeload_storestore() | 28 | #define smp_mb__after_clear_bit() membar_storeload_storestore() |
25 | #else | 29 | #else |
26 | #define smp_mb__before_clear_bit() barrier() | 30 | #define smp_mb__before_clear_bit() barrier() |
27 | #define smp_mb__after_clear_bit() barrier() | 31 | #define smp_mb__after_clear_bit() barrier() |
28 | #endif | 32 | #endif |
29 | 33 | ||
30 | #include <asm-generic/bitops/ffz.h> | 34 | #include <asm-generic/bitops/ffz.h> |
31 | #include <asm-generic/bitops/__ffs.h> | 35 | #include <asm-generic/bitops/__ffs.h> |
32 | #include <asm-generic/bitops/fls.h> | 36 | #include <asm-generic/bitops/fls.h> |
33 | #include <asm-generic/bitops/fls64.h> | 37 | #include <asm-generic/bitops/fls64.h> |
34 | 38 | ||
35 | #ifdef __KERNEL__ | 39 | #ifdef __KERNEL__ |
36 | 40 | ||
37 | #include <asm-generic/bitops/sched.h> | 41 | #include <asm-generic/bitops/sched.h> |
38 | #include <asm-generic/bitops/ffs.h> | 42 | #include <asm-generic/bitops/ffs.h> |
39 | 43 | ||
40 | /* | 44 | /* |
41 | * hweightN: returns the hamming weight (i.e. the number | 45 | * hweightN: returns the hamming weight (i.e. the number |
42 | * of bits set) of a N-bit word | 46 | * of bits set) of a N-bit word |
43 | */ | 47 | */ |
44 | 48 | ||
45 | #ifdef ULTRA_HAS_POPULATION_COUNT | 49 | #ifdef ULTRA_HAS_POPULATION_COUNT |
46 | 50 | ||
47 | static inline unsigned int hweight64(unsigned long w) | 51 | static inline unsigned int hweight64(unsigned long w) |
48 | { | 52 | { |
49 | unsigned int res; | 53 | unsigned int res; |
50 | 54 | ||
51 | __asm__ ("popc %1,%0" : "=r" (res) : "r" (w)); | 55 | __asm__ ("popc %1,%0" : "=r" (res) : "r" (w)); |
52 | return res; | 56 | return res; |
53 | } | 57 | } |
54 | 58 | ||
55 | static inline unsigned int hweight32(unsigned int w) | 59 | static inline unsigned int hweight32(unsigned int w) |
56 | { | 60 | { |
57 | unsigned int res; | 61 | unsigned int res; |
58 | 62 | ||
59 | __asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xffffffff)); | 63 | __asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xffffffff)); |
60 | return res; | 64 | return res; |
61 | } | 65 | } |
62 | 66 | ||
63 | static inline unsigned int hweight16(unsigned int w) | 67 | static inline unsigned int hweight16(unsigned int w) |
64 | { | 68 | { |
65 | unsigned int res; | 69 | unsigned int res; |
66 | 70 | ||
67 | __asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xffff)); | 71 | __asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xffff)); |
68 | return res; | 72 | return res; |
69 | } | 73 | } |
70 | 74 | ||
71 | static inline unsigned int hweight8(unsigned int w) | 75 | static inline unsigned int hweight8(unsigned int w) |
72 | { | 76 | { |
73 | unsigned int res; | 77 | unsigned int res; |
74 | 78 | ||
75 | __asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xff)); | 79 | __asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xff)); |
76 | return res; | 80 | return res; |
77 | } | 81 | } |
78 | 82 | ||
79 | #else | 83 | #else |
80 | 84 | ||
81 | #include <asm-generic/bitops/hweight.h> | 85 | #include <asm-generic/bitops/hweight.h> |
82 | 86 | ||
83 | #endif | 87 | #endif |
84 | #include <asm-generic/bitops/lock.h> | 88 | #include <asm-generic/bitops/lock.h> |
85 | #endif /* __KERNEL__ */ | 89 | #endif /* __KERNEL__ */ |
86 | 90 | ||
87 | #include <asm-generic/bitops/find.h> | 91 | #include <asm-generic/bitops/find.h> |
88 | 92 | ||
89 | #ifdef __KERNEL__ | 93 | #ifdef __KERNEL__ |
90 | 94 | ||
91 | #include <asm-generic/bitops/ext2-non-atomic.h> | 95 | #include <asm-generic/bitops/ext2-non-atomic.h> |
92 | 96 | ||
93 | #define ext2_set_bit_atomic(lock,nr,addr) \ | 97 | #define ext2_set_bit_atomic(lock,nr,addr) \ |
94 | test_and_set_bit((nr) ^ 0x38,(unsigned long *)(addr)) | 98 | test_and_set_bit((nr) ^ 0x38,(unsigned long *)(addr)) |
95 | #define ext2_clear_bit_atomic(lock,nr,addr) \ | 99 | #define ext2_clear_bit_atomic(lock,nr,addr) \ |
96 | test_and_clear_bit((nr) ^ 0x38,(unsigned long *)(addr)) | 100 | test_and_clear_bit((nr) ^ 0x38,(unsigned long *)(addr)) |
97 | 101 | ||
98 | #include <asm-generic/bitops/minix.h> | 102 | #include <asm-generic/bitops/minix.h> |
99 | 103 | ||
100 | #endif /* __KERNEL__ */ | 104 | #endif /* __KERNEL__ */ |
101 | 105 | ||
102 | #endif /* defined(_SPARC64_BITOPS_H) */ | 106 | #endif /* defined(_SPARC64_BITOPS_H) */ |
103 | 107 |
include/asm-um/bitops.h
1 | #ifndef __UM_BITOPS_H | 1 | #ifndef __UM_BITOPS_H |
2 | #define __UM_BITOPS_H | 2 | #define __UM_BITOPS_H |
3 | 3 | ||
4 | #ifndef _LINUX_BITOPS_H | ||
5 | #error only <linux/bitops.h> can be included directly | ||
6 | #endif | ||
7 | |||
4 | #include "asm/arch/bitops.h" | 8 | #include "asm/arch/bitops.h" |
5 | 9 | ||
6 | #endif | 10 | #endif |
7 | 11 |
include/asm-v850/bitops.h
1 | /* | 1 | /* |
2 | * include/asm-v850/bitops.h -- Bit operations | 2 | * include/asm-v850/bitops.h -- Bit operations |
3 | * | 3 | * |
4 | * Copyright (C) 2001,02,03,04,05 NEC Electronics Corporation | 4 | * Copyright (C) 2001,02,03,04,05 NEC Electronics Corporation |
5 | * Copyright (C) 2001,02,03,04,05 Miles Bader <miles@gnu.org> | 5 | * Copyright (C) 2001,02,03,04,05 Miles Bader <miles@gnu.org> |
6 | * Copyright (C) 1992 Linus Torvalds. | 6 | * Copyright (C) 1992 Linus Torvalds. |
7 | * | 7 | * |
8 | * This file is subject to the terms and conditions of the GNU General | 8 | * This file is subject to the terms and conditions of the GNU General |
9 | * Public License. See the file COPYING in the main directory of this | 9 | * Public License. See the file COPYING in the main directory of this |
10 | * archive for more details. | 10 | * archive for more details. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #ifndef __V850_BITOPS_H__ | 13 | #ifndef __V850_BITOPS_H__ |
14 | #define __V850_BITOPS_H__ | 14 | #define __V850_BITOPS_H__ |
15 | 15 | ||
16 | #ifndef _LINUX_BITOPS_H | ||
17 | #error only <linux/bitops.h> can be included directly | ||
18 | #endif | ||
16 | 19 | ||
17 | #include <linux/compiler.h> /* unlikely */ | 20 | #include <linux/compiler.h> /* unlikely */ |
18 | #include <asm/byteorder.h> /* swab32 */ | 21 | #include <asm/byteorder.h> /* swab32 */ |
19 | #include <asm/system.h> /* interrupt enable/disable */ | 22 | #include <asm/system.h> /* interrupt enable/disable */ |
20 | 23 | ||
21 | 24 | ||
22 | #ifdef __KERNEL__ | 25 | #ifdef __KERNEL__ |
23 | 26 | ||
24 | #include <asm-generic/bitops/ffz.h> | 27 | #include <asm-generic/bitops/ffz.h> |
25 | 28 | ||
26 | /* | 29 | /* |
27 | * The __ functions are not atomic | 30 | * The __ functions are not atomic |
28 | */ | 31 | */ |
29 | 32 | ||
30 | /* In the following constant-bit-op macros, a "g" constraint is used when | 33 | /* In the following constant-bit-op macros, a "g" constraint is used when |
31 | we really need an integer ("i" constraint). This is to avoid | 34 | we really need an integer ("i" constraint). This is to avoid |
32 | warnings/errors from the compiler in the case where the associated | 35 | warnings/errors from the compiler in the case where the associated |
33 | operand _isn't_ an integer, and shouldn't produce bogus assembly because | 36 | operand _isn't_ an integer, and shouldn't produce bogus assembly because |
34 | use of that form is protected by a guard statement that checks for | 37 | use of that form is protected by a guard statement that checks for |
35 | constants, and should otherwise be removed by the optimizer. This | 38 | constants, and should otherwise be removed by the optimizer. This |
36 | _usually_ works -- however, __builtin_constant_p returns true for a | 39 | _usually_ works -- however, __builtin_constant_p returns true for a |
37 | variable with a known constant value too, and unfortunately gcc will | 40 | variable with a known constant value too, and unfortunately gcc will |
38 | happily put the variable in a register and use the register for the "g" | 41 | happily put the variable in a register and use the register for the "g" |
39 | constraint'd asm operand. To avoid the latter problem, we add a | 42 | constraint'd asm operand. To avoid the latter problem, we add a |
40 | constant offset to the operand and subtract it back in the asm code; | 43 | constant offset to the operand and subtract it back in the asm code; |
41 | forcing gcc to do arithmetic on the value is usually enough to get it | 44 | forcing gcc to do arithmetic on the value is usually enough to get it |
42 | to use a real constant value. This is horrible, and ultimately | 45 | to use a real constant value. This is horrible, and ultimately |
43 | unreliable too, but it seems to work for now (hopefully gcc will offer | 46 | unreliable too, but it seems to work for now (hopefully gcc will offer |
44 | us more control in the future, so we can do a better job). */ | 47 | us more control in the future, so we can do a better job). */ |
45 | 48 | ||
46 | #define __const_bit_op(op, nr, addr) \ | 49 | #define __const_bit_op(op, nr, addr) \ |
47 | ({ __asm__ (op " (%0 - 0x123), %1" \ | 50 | ({ __asm__ (op " (%0 - 0x123), %1" \ |
48 | :: "g" (((nr) & 0x7) + 0x123), \ | 51 | :: "g" (((nr) & 0x7) + 0x123), \ |
49 | "m" (*((char *)(addr) + ((nr) >> 3))) \ | 52 | "m" (*((char *)(addr) + ((nr) >> 3))) \ |
50 | : "memory"); }) | 53 | : "memory"); }) |
51 | #define __var_bit_op(op, nr, addr) \ | 54 | #define __var_bit_op(op, nr, addr) \ |
52 | ({ int __nr = (nr); \ | 55 | ({ int __nr = (nr); \ |
53 | __asm__ (op " %0, [%1]" \ | 56 | __asm__ (op " %0, [%1]" \ |
54 | :: "r" (__nr & 0x7), \ | 57 | :: "r" (__nr & 0x7), \ |
55 | "r" ((char *)(addr) + (__nr >> 3)) \ | 58 | "r" ((char *)(addr) + (__nr >> 3)) \ |
56 | : "memory"); }) | 59 | : "memory"); }) |
57 | #define __bit_op(op, nr, addr) \ | 60 | #define __bit_op(op, nr, addr) \ |
58 | ((__builtin_constant_p (nr) && (unsigned)(nr) <= 0x7FFFF) \ | 61 | ((__builtin_constant_p (nr) && (unsigned)(nr) <= 0x7FFFF) \ |
59 | ? __const_bit_op (op, nr, addr) \ | 62 | ? __const_bit_op (op, nr, addr) \ |
60 | : __var_bit_op (op, nr, addr)) | 63 | : __var_bit_op (op, nr, addr)) |
61 | 64 | ||
62 | #define __set_bit(nr, addr) __bit_op ("set1", nr, addr) | 65 | #define __set_bit(nr, addr) __bit_op ("set1", nr, addr) |
63 | #define __clear_bit(nr, addr) __bit_op ("clr1", nr, addr) | 66 | #define __clear_bit(nr, addr) __bit_op ("clr1", nr, addr) |
64 | #define __change_bit(nr, addr) __bit_op ("not1", nr, addr) | 67 | #define __change_bit(nr, addr) __bit_op ("not1", nr, addr) |
65 | 68 | ||
66 | /* The bit instructions used by `non-atomic' variants are actually atomic. */ | 69 | /* The bit instructions used by `non-atomic' variants are actually atomic. */ |
67 | #define set_bit __set_bit | 70 | #define set_bit __set_bit |
68 | #define clear_bit __clear_bit | 71 | #define clear_bit __clear_bit |
69 | #define change_bit __change_bit | 72 | #define change_bit __change_bit |
70 | 73 | ||
71 | 74 | ||
72 | #define __const_tns_bit_op(op, nr, addr) \ | 75 | #define __const_tns_bit_op(op, nr, addr) \ |
73 | ({ int __tns_res; \ | 76 | ({ int __tns_res; \ |
74 | __asm__ __volatile__ ( \ | 77 | __asm__ __volatile__ ( \ |
75 | "tst1 (%1 - 0x123), %2; setf nz, %0; " op " (%1 - 0x123), %2" \ | 78 | "tst1 (%1 - 0x123), %2; setf nz, %0; " op " (%1 - 0x123), %2" \ |
76 | : "=&r" (__tns_res) \ | 79 | : "=&r" (__tns_res) \ |
77 | : "g" (((nr) & 0x7) + 0x123), \ | 80 | : "g" (((nr) & 0x7) + 0x123), \ |
78 | "m" (*((char *)(addr) + ((nr) >> 3))) \ | 81 | "m" (*((char *)(addr) + ((nr) >> 3))) \ |
79 | : "memory"); \ | 82 | : "memory"); \ |
80 | __tns_res; \ | 83 | __tns_res; \ |
81 | }) | 84 | }) |
82 | #define __var_tns_bit_op(op, nr, addr) \ | 85 | #define __var_tns_bit_op(op, nr, addr) \ |
83 | ({ int __nr = (nr); \ | 86 | ({ int __nr = (nr); \ |
84 | int __tns_res; \ | 87 | int __tns_res; \ |
85 | __asm__ __volatile__ ( \ | 88 | __asm__ __volatile__ ( \ |
86 | "tst1 %1, [%2]; setf nz, %0; " op " %1, [%2]" \ | 89 | "tst1 %1, [%2]; setf nz, %0; " op " %1, [%2]" \ |
87 | : "=&r" (__tns_res) \ | 90 | : "=&r" (__tns_res) \ |
88 | : "r" (__nr & 0x7), \ | 91 | : "r" (__nr & 0x7), \ |
89 | "r" ((char *)(addr) + (__nr >> 3)) \ | 92 | "r" ((char *)(addr) + (__nr >> 3)) \ |
90 | : "memory"); \ | 93 | : "memory"); \ |
91 | __tns_res; \ | 94 | __tns_res; \ |
92 | }) | 95 | }) |
93 | #define __tns_bit_op(op, nr, addr) \ | 96 | #define __tns_bit_op(op, nr, addr) \ |
94 | ((__builtin_constant_p (nr) && (unsigned)(nr) <= 0x7FFFF) \ | 97 | ((__builtin_constant_p (nr) && (unsigned)(nr) <= 0x7FFFF) \ |
95 | ? __const_tns_bit_op (op, nr, addr) \ | 98 | ? __const_tns_bit_op (op, nr, addr) \ |
96 | : __var_tns_bit_op (op, nr, addr)) | 99 | : __var_tns_bit_op (op, nr, addr)) |
97 | #define __tns_atomic_bit_op(op, nr, addr) \ | 100 | #define __tns_atomic_bit_op(op, nr, addr) \ |
98 | ({ int __tns_atomic_res, __tns_atomic_flags; \ | 101 | ({ int __tns_atomic_res, __tns_atomic_flags; \ |
99 | local_irq_save (__tns_atomic_flags); \ | 102 | local_irq_save (__tns_atomic_flags); \ |
100 | __tns_atomic_res = __tns_bit_op (op, nr, addr); \ | 103 | __tns_atomic_res = __tns_bit_op (op, nr, addr); \ |
101 | local_irq_restore (__tns_atomic_flags); \ | 104 | local_irq_restore (__tns_atomic_flags); \ |
102 | __tns_atomic_res; \ | 105 | __tns_atomic_res; \ |
103 | }) | 106 | }) |
104 | 107 | ||
105 | #define __test_and_set_bit(nr, addr) __tns_bit_op ("set1", nr, addr) | 108 | #define __test_and_set_bit(nr, addr) __tns_bit_op ("set1", nr, addr) |
106 | #define test_and_set_bit(nr, addr) __tns_atomic_bit_op ("set1", nr, addr) | 109 | #define test_and_set_bit(nr, addr) __tns_atomic_bit_op ("set1", nr, addr) |
107 | 110 | ||
108 | #define __test_and_clear_bit(nr, addr) __tns_bit_op ("clr1", nr, addr) | 111 | #define __test_and_clear_bit(nr, addr) __tns_bit_op ("clr1", nr, addr) |
109 | #define test_and_clear_bit(nr, addr) __tns_atomic_bit_op ("clr1", nr, addr) | 112 | #define test_and_clear_bit(nr, addr) __tns_atomic_bit_op ("clr1", nr, addr) |
110 | 113 | ||
111 | #define __test_and_change_bit(nr, addr) __tns_bit_op ("not1", nr, addr) | 114 | #define __test_and_change_bit(nr, addr) __tns_bit_op ("not1", nr, addr) |
112 | #define test_and_change_bit(nr, addr) __tns_atomic_bit_op ("not1", nr, addr) | 115 | #define test_and_change_bit(nr, addr) __tns_atomic_bit_op ("not1", nr, addr) |
113 | 116 | ||
114 | 117 | ||
115 | #define __const_test_bit(nr, addr) \ | 118 | #define __const_test_bit(nr, addr) \ |
116 | ({ int __test_bit_res; \ | 119 | ({ int __test_bit_res; \ |
117 | __asm__ __volatile__ ("tst1 (%1 - 0x123), %2; setf nz, %0" \ | 120 | __asm__ __volatile__ ("tst1 (%1 - 0x123), %2; setf nz, %0" \ |
118 | : "=r" (__test_bit_res) \ | 121 | : "=r" (__test_bit_res) \ |
119 | : "g" (((nr) & 0x7) + 0x123), \ | 122 | : "g" (((nr) & 0x7) + 0x123), \ |
120 | "m" (*((const char *)(addr) + ((nr) >> 3)))); \ | 123 | "m" (*((const char *)(addr) + ((nr) >> 3)))); \ |
121 | __test_bit_res; \ | 124 | __test_bit_res; \ |
122 | }) | 125 | }) |
123 | static inline int __test_bit (int nr, const void *addr) | 126 | static inline int __test_bit (int nr, const void *addr) |
124 | { | 127 | { |
125 | int res; | 128 | int res; |
126 | __asm__ __volatile__ ("tst1 %1, [%2]; setf nz, %0" | 129 | __asm__ __volatile__ ("tst1 %1, [%2]; setf nz, %0" |
127 | : "=r" (res) | 130 | : "=r" (res) |
128 | : "r" (nr & 0x7), "r" (addr + (nr >> 3))); | 131 | : "r" (nr & 0x7), "r" (addr + (nr >> 3))); |
129 | return res; | 132 | return res; |
130 | } | 133 | } |
131 | #define test_bit(nr,addr) \ | 134 | #define test_bit(nr,addr) \ |
132 | ((__builtin_constant_p (nr) && (unsigned)(nr) <= 0x7FFFF) \ | 135 | ((__builtin_constant_p (nr) && (unsigned)(nr) <= 0x7FFFF) \ |
133 | ? __const_test_bit ((nr), (addr)) \ | 136 | ? __const_test_bit ((nr), (addr)) \ |
134 | : __test_bit ((nr), (addr))) | 137 | : __test_bit ((nr), (addr))) |
135 | 138 | ||
136 | 139 | ||
137 | /* clear_bit doesn't provide any barrier for the compiler. */ | 140 | /* clear_bit doesn't provide any barrier for the compiler. */ |
138 | #define smp_mb__before_clear_bit() barrier () | 141 | #define smp_mb__before_clear_bit() barrier () |
139 | #define smp_mb__after_clear_bit() barrier () | 142 | #define smp_mb__after_clear_bit() barrier () |
140 | 143 | ||
141 | #include <asm-generic/bitops/ffs.h> | 144 | #include <asm-generic/bitops/ffs.h> |
142 | #include <asm-generic/bitops/fls.h> | 145 | #include <asm-generic/bitops/fls.h> |
143 | #include <asm-generic/bitops/fls64.h> | 146 | #include <asm-generic/bitops/fls64.h> |
144 | #include <asm-generic/bitops/__ffs.h> | 147 | #include <asm-generic/bitops/__ffs.h> |
145 | #include <asm-generic/bitops/find.h> | 148 | #include <asm-generic/bitops/find.h> |
146 | #include <asm-generic/bitops/sched.h> | 149 | #include <asm-generic/bitops/sched.h> |
147 | #include <asm-generic/bitops/hweight.h> | 150 | #include <asm-generic/bitops/hweight.h> |
148 | #include <asm-generic/bitops/lock.h> | 151 | #include <asm-generic/bitops/lock.h> |
149 | 152 | ||
150 | #include <asm-generic/bitops/ext2-non-atomic.h> | 153 | #include <asm-generic/bitops/ext2-non-atomic.h> |
151 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) | 154 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) |
152 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) | 155 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) |
153 | 156 | ||
154 | #include <asm-generic/bitops/minix.h> | 157 | #include <asm-generic/bitops/minix.h> |
155 | 158 | ||
156 | #endif /* __KERNEL__ */ | 159 | #endif /* __KERNEL__ */ |
157 | 160 | ||
158 | #endif /* __V850_BITOPS_H__ */ | 161 | #endif /* __V850_BITOPS_H__ */ |
159 | 162 |
include/asm-x86/bitops_32.h
1 | #ifndef _I386_BITOPS_H | 1 | #ifndef _I386_BITOPS_H |
2 | #define _I386_BITOPS_H | 2 | #define _I386_BITOPS_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Copyright 1992, Linus Torvalds. | 5 | * Copyright 1992, Linus Torvalds. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #ifndef _LINUX_BITOPS_H | ||
9 | #error only <linux/bitops.h> can be included directly | ||
10 | #endif | ||
11 | |||
8 | #include <linux/compiler.h> | 12 | #include <linux/compiler.h> |
9 | #include <asm/alternative.h> | 13 | #include <asm/alternative.h> |
10 | 14 | ||
11 | /* | 15 | /* |
12 | * These have to be done with inline assembly: that way the bit-setting | 16 | * These have to be done with inline assembly: that way the bit-setting |
13 | * is guaranteed to be atomic. All bit operations return 0 if the bit | 17 | * is guaranteed to be atomic. All bit operations return 0 if the bit |
14 | * was cleared before the operation and != 0 if it was not. | 18 | * was cleared before the operation and != 0 if it was not. |
15 | * | 19 | * |
16 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | 20 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). |
17 | */ | 21 | */ |
18 | 22 | ||
19 | #define ADDR (*(volatile long *) addr) | 23 | #define ADDR (*(volatile long *) addr) |
20 | 24 | ||
21 | /** | 25 | /** |
22 | * set_bit - Atomically set a bit in memory | 26 | * set_bit - Atomically set a bit in memory |
23 | * @nr: the bit to set | 27 | * @nr: the bit to set |
24 | * @addr: the address to start counting from | 28 | * @addr: the address to start counting from |
25 | * | 29 | * |
26 | * This function is atomic and may not be reordered. See __set_bit() | 30 | * This function is atomic and may not be reordered. See __set_bit() |
27 | * if you do not require the atomic guarantees. | 31 | * if you do not require the atomic guarantees. |
28 | * | 32 | * |
29 | * Note: there are no guarantees that this function will not be reordered | 33 | * Note: there are no guarantees that this function will not be reordered |
30 | * on non x86 architectures, so if you are writing portable code, | 34 | * on non x86 architectures, so if you are writing portable code, |
31 | * make sure not to rely on its reordering guarantees. | 35 | * make sure not to rely on its reordering guarantees. |
32 | * | 36 | * |
33 | * Note that @nr may be almost arbitrarily large; this function is not | 37 | * Note that @nr may be almost arbitrarily large; this function is not |
34 | * restricted to acting on a single-word quantity. | 38 | * restricted to acting on a single-word quantity. |
35 | */ | 39 | */ |
36 | static inline void set_bit(int nr, volatile unsigned long * addr) | 40 | static inline void set_bit(int nr, volatile unsigned long * addr) |
37 | { | 41 | { |
38 | __asm__ __volatile__( LOCK_PREFIX | 42 | __asm__ __volatile__( LOCK_PREFIX |
39 | "btsl %1,%0" | 43 | "btsl %1,%0" |
40 | :"+m" (ADDR) | 44 | :"+m" (ADDR) |
41 | :"Ir" (nr)); | 45 | :"Ir" (nr)); |
42 | } | 46 | } |
43 | 47 | ||
44 | /** | 48 | /** |
45 | * __set_bit - Set a bit in memory | 49 | * __set_bit - Set a bit in memory |
46 | * @nr: the bit to set | 50 | * @nr: the bit to set |
47 | * @addr: the address to start counting from | 51 | * @addr: the address to start counting from |
48 | * | 52 | * |
49 | * Unlike set_bit(), this function is non-atomic and may be reordered. | 53 | * Unlike set_bit(), this function is non-atomic and may be reordered. |
50 | * If it's called on the same region of memory simultaneously, the effect | 54 | * If it's called on the same region of memory simultaneously, the effect |
51 | * may be that only one operation succeeds. | 55 | * may be that only one operation succeeds. |
52 | */ | 56 | */ |
53 | static inline void __set_bit(int nr, volatile unsigned long * addr) | 57 | static inline void __set_bit(int nr, volatile unsigned long * addr) |
54 | { | 58 | { |
55 | __asm__( | 59 | __asm__( |
56 | "btsl %1,%0" | 60 | "btsl %1,%0" |
57 | :"+m" (ADDR) | 61 | :"+m" (ADDR) |
58 | :"Ir" (nr)); | 62 | :"Ir" (nr)); |
59 | } | 63 | } |
60 | 64 | ||
61 | /** | 65 | /** |
62 | * clear_bit - Clears a bit in memory | 66 | * clear_bit - Clears a bit in memory |
63 | * @nr: Bit to clear | 67 | * @nr: Bit to clear |
64 | * @addr: Address to start counting from | 68 | * @addr: Address to start counting from |
65 | * | 69 | * |
66 | * clear_bit() is atomic and may not be reordered. However, it does | 70 | * clear_bit() is atomic and may not be reordered. However, it does |
67 | * not contain a memory barrier, so if it is used for locking purposes, | 71 | * not contain a memory barrier, so if it is used for locking purposes, |
68 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | 72 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
69 | * in order to ensure changes are visible on other processors. | 73 | * in order to ensure changes are visible on other processors. |
70 | */ | 74 | */ |
71 | static inline void clear_bit(int nr, volatile unsigned long * addr) | 75 | static inline void clear_bit(int nr, volatile unsigned long * addr) |
72 | { | 76 | { |
73 | __asm__ __volatile__( LOCK_PREFIX | 77 | __asm__ __volatile__( LOCK_PREFIX |
74 | "btrl %1,%0" | 78 | "btrl %1,%0" |
75 | :"+m" (ADDR) | 79 | :"+m" (ADDR) |
76 | :"Ir" (nr)); | 80 | :"Ir" (nr)); |
77 | } | 81 | } |
78 | 82 | ||
79 | static inline void __clear_bit(int nr, volatile unsigned long * addr) | 83 | static inline void __clear_bit(int nr, volatile unsigned long * addr) |
80 | { | 84 | { |
81 | __asm__ __volatile__( | 85 | __asm__ __volatile__( |
82 | "btrl %1,%0" | 86 | "btrl %1,%0" |
83 | :"+m" (ADDR) | 87 | :"+m" (ADDR) |
84 | :"Ir" (nr)); | 88 | :"Ir" (nr)); |
85 | } | 89 | } |
86 | #define smp_mb__before_clear_bit() barrier() | 90 | #define smp_mb__before_clear_bit() barrier() |
87 | #define smp_mb__after_clear_bit() barrier() | 91 | #define smp_mb__after_clear_bit() barrier() |
88 | 92 | ||
89 | /** | 93 | /** |
90 | * __change_bit - Toggle a bit in memory | 94 | * __change_bit - Toggle a bit in memory |
91 | * @nr: the bit to change | 95 | * @nr: the bit to change |
92 | * @addr: the address to start counting from | 96 | * @addr: the address to start counting from |
93 | * | 97 | * |
94 | * Unlike change_bit(), this function is non-atomic and may be reordered. | 98 | * Unlike change_bit(), this function is non-atomic and may be reordered. |
95 | * If it's called on the same region of memory simultaneously, the effect | 99 | * If it's called on the same region of memory simultaneously, the effect |
96 | * may be that only one operation succeeds. | 100 | * may be that only one operation succeeds. |
97 | */ | 101 | */ |
98 | static inline void __change_bit(int nr, volatile unsigned long * addr) | 102 | static inline void __change_bit(int nr, volatile unsigned long * addr) |
99 | { | 103 | { |
100 | __asm__ __volatile__( | 104 | __asm__ __volatile__( |
101 | "btcl %1,%0" | 105 | "btcl %1,%0" |
102 | :"+m" (ADDR) | 106 | :"+m" (ADDR) |
103 | :"Ir" (nr)); | 107 | :"Ir" (nr)); |
104 | } | 108 | } |
105 | 109 | ||
106 | /** | 110 | /** |
107 | * change_bit - Toggle a bit in memory | 111 | * change_bit - Toggle a bit in memory |
108 | * @nr: Bit to change | 112 | * @nr: Bit to change |
109 | * @addr: Address to start counting from | 113 | * @addr: Address to start counting from |
110 | * | 114 | * |
111 | * change_bit() is atomic and may not be reordered. It may be | 115 | * change_bit() is atomic and may not be reordered. It may be |
112 | * reordered on other architectures than x86. | 116 | * reordered on other architectures than x86. |
113 | * Note that @nr may be almost arbitrarily large; this function is not | 117 | * Note that @nr may be almost arbitrarily large; this function is not |
114 | * restricted to acting on a single-word quantity. | 118 | * restricted to acting on a single-word quantity. |
115 | */ | 119 | */ |
116 | static inline void change_bit(int nr, volatile unsigned long * addr) | 120 | static inline void change_bit(int nr, volatile unsigned long * addr) |
117 | { | 121 | { |
118 | __asm__ __volatile__( LOCK_PREFIX | 122 | __asm__ __volatile__( LOCK_PREFIX |
119 | "btcl %1,%0" | 123 | "btcl %1,%0" |
120 | :"+m" (ADDR) | 124 | :"+m" (ADDR) |
121 | :"Ir" (nr)); | 125 | :"Ir" (nr)); |
122 | } | 126 | } |
123 | 127 | ||
124 | /** | 128 | /** |
125 | * test_and_set_bit - Set a bit and return its old value | 129 | * test_and_set_bit - Set a bit and return its old value |
126 | * @nr: Bit to set | 130 | * @nr: Bit to set |
127 | * @addr: Address to count from | 131 | * @addr: Address to count from |
128 | * | 132 | * |
129 | * This operation is atomic and cannot be reordered. | 133 | * This operation is atomic and cannot be reordered. |
130 | * It may be reordered on other architectures than x86. | 134 | * It may be reordered on other architectures than x86. |
131 | * It also implies a memory barrier. | 135 | * It also implies a memory barrier. |
132 | */ | 136 | */ |
133 | static inline int test_and_set_bit(int nr, volatile unsigned long * addr) | 137 | static inline int test_and_set_bit(int nr, volatile unsigned long * addr) |
134 | { | 138 | { |
135 | int oldbit; | 139 | int oldbit; |
136 | 140 | ||
137 | __asm__ __volatile__( LOCK_PREFIX | 141 | __asm__ __volatile__( LOCK_PREFIX |
138 | "btsl %2,%1\n\tsbbl %0,%0" | 142 | "btsl %2,%1\n\tsbbl %0,%0" |
139 | :"=r" (oldbit),"+m" (ADDR) | 143 | :"=r" (oldbit),"+m" (ADDR) |
140 | :"Ir" (nr) : "memory"); | 144 | :"Ir" (nr) : "memory"); |
141 | return oldbit; | 145 | return oldbit; |
142 | } | 146 | } |
143 | 147 | ||
144 | /** | 148 | /** |
145 | * __test_and_set_bit - Set a bit and return its old value | 149 | * __test_and_set_bit - Set a bit and return its old value |
146 | * @nr: Bit to set | 150 | * @nr: Bit to set |
147 | * @addr: Address to count from | 151 | * @addr: Address to count from |
148 | * | 152 | * |
149 | * This operation is non-atomic and can be reordered. | 153 | * This operation is non-atomic and can be reordered. |
150 | * If two examples of this operation race, one can appear to succeed | 154 | * If two examples of this operation race, one can appear to succeed |
151 | * but actually fail. You must protect multiple accesses with a lock. | 155 | * but actually fail. You must protect multiple accesses with a lock. |
152 | */ | 156 | */ |
153 | static inline int __test_and_set_bit(int nr, volatile unsigned long * addr) | 157 | static inline int __test_and_set_bit(int nr, volatile unsigned long * addr) |
154 | { | 158 | { |
155 | int oldbit; | 159 | int oldbit; |
156 | 160 | ||
157 | __asm__( | 161 | __asm__( |
158 | "btsl %2,%1\n\tsbbl %0,%0" | 162 | "btsl %2,%1\n\tsbbl %0,%0" |
159 | :"=r" (oldbit),"+m" (ADDR) | 163 | :"=r" (oldbit),"+m" (ADDR) |
160 | :"Ir" (nr)); | 164 | :"Ir" (nr)); |
161 | return oldbit; | 165 | return oldbit; |
162 | } | 166 | } |
163 | 167 | ||
164 | /** | 168 | /** |
165 | * test_and_clear_bit - Clear a bit and return its old value | 169 | * test_and_clear_bit - Clear a bit and return its old value |
166 | * @nr: Bit to clear | 170 | * @nr: Bit to clear |
167 | * @addr: Address to count from | 171 | * @addr: Address to count from |
168 | * | 172 | * |
169 | * This operation is atomic and cannot be reordered. | 173 | * This operation is atomic and cannot be reordered. |
170 | * It can be reorderdered on other architectures other than x86. | 174 | * It can be reorderdered on other architectures other than x86. |
171 | * It also implies a memory barrier. | 175 | * It also implies a memory barrier. |
172 | */ | 176 | */ |
173 | static inline int test_and_clear_bit(int nr, volatile unsigned long * addr) | 177 | static inline int test_and_clear_bit(int nr, volatile unsigned long * addr) |
174 | { | 178 | { |
175 | int oldbit; | 179 | int oldbit; |
176 | 180 | ||
177 | __asm__ __volatile__( LOCK_PREFIX | 181 | __asm__ __volatile__( LOCK_PREFIX |
178 | "btrl %2,%1\n\tsbbl %0,%0" | 182 | "btrl %2,%1\n\tsbbl %0,%0" |
179 | :"=r" (oldbit),"+m" (ADDR) | 183 | :"=r" (oldbit),"+m" (ADDR) |
180 | :"Ir" (nr) : "memory"); | 184 | :"Ir" (nr) : "memory"); |
181 | return oldbit; | 185 | return oldbit; |
182 | } | 186 | } |
183 | 187 | ||
184 | /** | 188 | /** |
185 | * __test_and_clear_bit - Clear a bit and return its old value | 189 | * __test_and_clear_bit - Clear a bit and return its old value |
186 | * @nr: Bit to clear | 190 | * @nr: Bit to clear |
187 | * @addr: Address to count from | 191 | * @addr: Address to count from |
188 | * | 192 | * |
189 | * This operation is non-atomic and can be reordered. | 193 | * This operation is non-atomic and can be reordered. |
190 | * If two examples of this operation race, one can appear to succeed | 194 | * If two examples of this operation race, one can appear to succeed |
191 | * but actually fail. You must protect multiple accesses with a lock. | 195 | * but actually fail. You must protect multiple accesses with a lock. |
192 | */ | 196 | */ |
193 | static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) | 197 | static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) |
194 | { | 198 | { |
195 | int oldbit; | 199 | int oldbit; |
196 | 200 | ||
197 | __asm__( | 201 | __asm__( |
198 | "btrl %2,%1\n\tsbbl %0,%0" | 202 | "btrl %2,%1\n\tsbbl %0,%0" |
199 | :"=r" (oldbit),"+m" (ADDR) | 203 | :"=r" (oldbit),"+m" (ADDR) |
200 | :"Ir" (nr)); | 204 | :"Ir" (nr)); |
201 | return oldbit; | 205 | return oldbit; |
202 | } | 206 | } |
203 | 207 | ||
204 | /* WARNING: non atomic and it can be reordered! */ | 208 | /* WARNING: non atomic and it can be reordered! */ |
205 | static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) | 209 | static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) |
206 | { | 210 | { |
207 | int oldbit; | 211 | int oldbit; |
208 | 212 | ||
209 | __asm__ __volatile__( | 213 | __asm__ __volatile__( |
210 | "btcl %2,%1\n\tsbbl %0,%0" | 214 | "btcl %2,%1\n\tsbbl %0,%0" |
211 | :"=r" (oldbit),"+m" (ADDR) | 215 | :"=r" (oldbit),"+m" (ADDR) |
212 | :"Ir" (nr) : "memory"); | 216 | :"Ir" (nr) : "memory"); |
213 | return oldbit; | 217 | return oldbit; |
214 | } | 218 | } |
215 | 219 | ||
216 | /** | 220 | /** |
217 | * test_and_change_bit - Change a bit and return its old value | 221 | * test_and_change_bit - Change a bit and return its old value |
218 | * @nr: Bit to change | 222 | * @nr: Bit to change |
219 | * @addr: Address to count from | 223 | * @addr: Address to count from |
220 | * | 224 | * |
221 | * This operation is atomic and cannot be reordered. | 225 | * This operation is atomic and cannot be reordered. |
222 | * It also implies a memory barrier. | 226 | * It also implies a memory barrier. |
223 | */ | 227 | */ |
224 | static inline int test_and_change_bit(int nr, volatile unsigned long* addr) | 228 | static inline int test_and_change_bit(int nr, volatile unsigned long* addr) |
225 | { | 229 | { |
226 | int oldbit; | 230 | int oldbit; |
227 | 231 | ||
228 | __asm__ __volatile__( LOCK_PREFIX | 232 | __asm__ __volatile__( LOCK_PREFIX |
229 | "btcl %2,%1\n\tsbbl %0,%0" | 233 | "btcl %2,%1\n\tsbbl %0,%0" |
230 | :"=r" (oldbit),"+m" (ADDR) | 234 | :"=r" (oldbit),"+m" (ADDR) |
231 | :"Ir" (nr) : "memory"); | 235 | :"Ir" (nr) : "memory"); |
232 | return oldbit; | 236 | return oldbit; |
233 | } | 237 | } |
234 | 238 | ||
235 | #if 0 /* Fool kernel-doc since it doesn't do macros yet */ | 239 | #if 0 /* Fool kernel-doc since it doesn't do macros yet */ |
236 | /** | 240 | /** |
237 | * test_bit - Determine whether a bit is set | 241 | * test_bit - Determine whether a bit is set |
238 | * @nr: bit number to test | 242 | * @nr: bit number to test |
239 | * @addr: Address to start counting from | 243 | * @addr: Address to start counting from |
240 | */ | 244 | */ |
241 | static int test_bit(int nr, const volatile void * addr); | 245 | static int test_bit(int nr, const volatile void * addr); |
242 | #endif | 246 | #endif |
243 | 247 | ||
244 | static __always_inline int constant_test_bit(int nr, const volatile unsigned long *addr) | 248 | static __always_inline int constant_test_bit(int nr, const volatile unsigned long *addr) |
245 | { | 249 | { |
246 | return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0; | 250 | return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0; |
247 | } | 251 | } |
248 | 252 | ||
249 | static inline int variable_test_bit(int nr, const volatile unsigned long * addr) | 253 | static inline int variable_test_bit(int nr, const volatile unsigned long * addr) |
250 | { | 254 | { |
251 | int oldbit; | 255 | int oldbit; |
252 | 256 | ||
253 | __asm__ __volatile__( | 257 | __asm__ __volatile__( |
254 | "btl %2,%1\n\tsbbl %0,%0" | 258 | "btl %2,%1\n\tsbbl %0,%0" |
255 | :"=r" (oldbit) | 259 | :"=r" (oldbit) |
256 | :"m" (ADDR),"Ir" (nr)); | 260 | :"m" (ADDR),"Ir" (nr)); |
257 | return oldbit; | 261 | return oldbit; |
258 | } | 262 | } |
259 | 263 | ||
260 | #define test_bit(nr,addr) \ | 264 | #define test_bit(nr,addr) \ |
261 | (__builtin_constant_p(nr) ? \ | 265 | (__builtin_constant_p(nr) ? \ |
262 | constant_test_bit((nr),(addr)) : \ | 266 | constant_test_bit((nr),(addr)) : \ |
263 | variable_test_bit((nr),(addr))) | 267 | variable_test_bit((nr),(addr))) |
264 | 268 | ||
265 | #undef ADDR | 269 | #undef ADDR |
266 | 270 | ||
267 | /** | 271 | /** |
268 | * find_first_zero_bit - find the first zero bit in a memory region | 272 | * find_first_zero_bit - find the first zero bit in a memory region |
269 | * @addr: The address to start the search at | 273 | * @addr: The address to start the search at |
270 | * @size: The maximum size to search | 274 | * @size: The maximum size to search |
271 | * | 275 | * |
272 | * Returns the bit-number of the first zero bit, not the number of the byte | 276 | * Returns the bit-number of the first zero bit, not the number of the byte |
273 | * containing a bit. | 277 | * containing a bit. |
274 | */ | 278 | */ |
275 | static inline int find_first_zero_bit(const unsigned long *addr, unsigned size) | 279 | static inline int find_first_zero_bit(const unsigned long *addr, unsigned size) |
276 | { | 280 | { |
277 | int d0, d1, d2; | 281 | int d0, d1, d2; |
278 | int res; | 282 | int res; |
279 | 283 | ||
280 | if (!size) | 284 | if (!size) |
281 | return 0; | 285 | return 0; |
282 | /* This looks at memory. Mark it volatile to tell gcc not to move it around */ | 286 | /* This looks at memory. Mark it volatile to tell gcc not to move it around */ |
283 | __asm__ __volatile__( | 287 | __asm__ __volatile__( |
284 | "movl $-1,%%eax\n\t" | 288 | "movl $-1,%%eax\n\t" |
285 | "xorl %%edx,%%edx\n\t" | 289 | "xorl %%edx,%%edx\n\t" |
286 | "repe; scasl\n\t" | 290 | "repe; scasl\n\t" |
287 | "je 1f\n\t" | 291 | "je 1f\n\t" |
288 | "xorl -4(%%edi),%%eax\n\t" | 292 | "xorl -4(%%edi),%%eax\n\t" |
289 | "subl $4,%%edi\n\t" | 293 | "subl $4,%%edi\n\t" |
290 | "bsfl %%eax,%%edx\n" | 294 | "bsfl %%eax,%%edx\n" |
291 | "1:\tsubl %%ebx,%%edi\n\t" | 295 | "1:\tsubl %%ebx,%%edi\n\t" |
292 | "shll $3,%%edi\n\t" | 296 | "shll $3,%%edi\n\t" |
293 | "addl %%edi,%%edx" | 297 | "addl %%edi,%%edx" |
294 | :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2) | 298 | :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2) |
295 | :"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory"); | 299 | :"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory"); |
296 | return res; | 300 | return res; |
297 | } | 301 | } |
298 | 302 | ||
299 | /** | 303 | /** |
300 | * find_next_zero_bit - find the first zero bit in a memory region | 304 | * find_next_zero_bit - find the first zero bit in a memory region |
301 | * @addr: The address to base the search on | 305 | * @addr: The address to base the search on |
302 | * @offset: The bitnumber to start searching at | 306 | * @offset: The bitnumber to start searching at |
303 | * @size: The maximum size to search | 307 | * @size: The maximum size to search |
304 | */ | 308 | */ |
305 | int find_next_zero_bit(const unsigned long *addr, int size, int offset); | 309 | int find_next_zero_bit(const unsigned long *addr, int size, int offset); |
306 | 310 | ||
307 | /** | 311 | /** |
308 | * __ffs - find first bit in word. | 312 | * __ffs - find first bit in word. |
309 | * @word: The word to search | 313 | * @word: The word to search |
310 | * | 314 | * |
311 | * Undefined if no bit exists, so code should check against 0 first. | 315 | * Undefined if no bit exists, so code should check against 0 first. |
312 | */ | 316 | */ |
313 | static inline unsigned long __ffs(unsigned long word) | 317 | static inline unsigned long __ffs(unsigned long word) |
314 | { | 318 | { |
315 | __asm__("bsfl %1,%0" | 319 | __asm__("bsfl %1,%0" |
316 | :"=r" (word) | 320 | :"=r" (word) |
317 | :"rm" (word)); | 321 | :"rm" (word)); |
318 | return word; | 322 | return word; |
319 | } | 323 | } |
320 | 324 | ||
321 | /** | 325 | /** |
322 | * find_first_bit - find the first set bit in a memory region | 326 | * find_first_bit - find the first set bit in a memory region |
323 | * @addr: The address to start the search at | 327 | * @addr: The address to start the search at |
324 | * @size: The maximum size to search | 328 | * @size: The maximum size to search |
325 | * | 329 | * |
326 | * Returns the bit-number of the first set bit, not the number of the byte | 330 | * Returns the bit-number of the first set bit, not the number of the byte |
327 | * containing a bit. | 331 | * containing a bit. |
328 | */ | 332 | */ |
329 | static inline unsigned find_first_bit(const unsigned long *addr, unsigned size) | 333 | static inline unsigned find_first_bit(const unsigned long *addr, unsigned size) |
330 | { | 334 | { |
331 | unsigned x = 0; | 335 | unsigned x = 0; |
332 | 336 | ||
333 | while (x < size) { | 337 | while (x < size) { |
334 | unsigned long val = *addr++; | 338 | unsigned long val = *addr++; |
335 | if (val) | 339 | if (val) |
336 | return __ffs(val) + x; | 340 | return __ffs(val) + x; |
337 | x += (sizeof(*addr)<<3); | 341 | x += (sizeof(*addr)<<3); |
338 | } | 342 | } |
339 | return x; | 343 | return x; |
340 | } | 344 | } |
341 | 345 | ||
342 | /** | 346 | /** |
343 | * find_next_bit - find the first set bit in a memory region | 347 | * find_next_bit - find the first set bit in a memory region |
344 | * @addr: The address to base the search on | 348 | * @addr: The address to base the search on |
345 | * @offset: The bitnumber to start searching at | 349 | * @offset: The bitnumber to start searching at |
346 | * @size: The maximum size to search | 350 | * @size: The maximum size to search |
347 | */ | 351 | */ |
348 | int find_next_bit(const unsigned long *addr, int size, int offset); | 352 | int find_next_bit(const unsigned long *addr, int size, int offset); |
349 | 353 | ||
350 | /** | 354 | /** |
351 | * ffz - find first zero in word. | 355 | * ffz - find first zero in word. |
352 | * @word: The word to search | 356 | * @word: The word to search |
353 | * | 357 | * |
354 | * Undefined if no zero exists, so code should check against ~0UL first. | 358 | * Undefined if no zero exists, so code should check against ~0UL first. |
355 | */ | 359 | */ |
356 | static inline unsigned long ffz(unsigned long word) | 360 | static inline unsigned long ffz(unsigned long word) |
357 | { | 361 | { |
358 | __asm__("bsfl %1,%0" | 362 | __asm__("bsfl %1,%0" |
359 | :"=r" (word) | 363 | :"=r" (word) |
360 | :"r" (~word)); | 364 | :"r" (~word)); |
361 | return word; | 365 | return word; |
362 | } | 366 | } |
363 | 367 | ||
364 | #ifdef __KERNEL__ | 368 | #ifdef __KERNEL__ |
365 | 369 | ||
366 | #include <asm-generic/bitops/sched.h> | 370 | #include <asm-generic/bitops/sched.h> |
367 | 371 | ||
368 | /** | 372 | /** |
369 | * ffs - find first bit set | 373 | * ffs - find first bit set |
370 | * @x: the word to search | 374 | * @x: the word to search |
371 | * | 375 | * |
372 | * This is defined the same way as | 376 | * This is defined the same way as |
373 | * the libc and compiler builtin ffs routines, therefore | 377 | * the libc and compiler builtin ffs routines, therefore |
374 | * differs in spirit from the above ffz() (man ffs). | 378 | * differs in spirit from the above ffz() (man ffs). |
375 | */ | 379 | */ |
376 | static inline int ffs(int x) | 380 | static inline int ffs(int x) |
377 | { | 381 | { |
378 | int r; | 382 | int r; |
379 | 383 | ||
380 | __asm__("bsfl %1,%0\n\t" | 384 | __asm__("bsfl %1,%0\n\t" |
381 | "jnz 1f\n\t" | 385 | "jnz 1f\n\t" |
382 | "movl $-1,%0\n" | 386 | "movl $-1,%0\n" |
383 | "1:" : "=r" (r) : "rm" (x)); | 387 | "1:" : "=r" (r) : "rm" (x)); |
384 | return r+1; | 388 | return r+1; |
385 | } | 389 | } |
386 | 390 | ||
387 | /** | 391 | /** |
388 | * fls - find last bit set | 392 | * fls - find last bit set |
389 | * @x: the word to search | 393 | * @x: the word to search |
390 | * | 394 | * |
391 | * This is defined the same way as ffs(). | 395 | * This is defined the same way as ffs(). |
392 | */ | 396 | */ |
393 | static inline int fls(int x) | 397 | static inline int fls(int x) |
394 | { | 398 | { |
395 | int r; | 399 | int r; |
396 | 400 | ||
397 | __asm__("bsrl %1,%0\n\t" | 401 | __asm__("bsrl %1,%0\n\t" |
398 | "jnz 1f\n\t" | 402 | "jnz 1f\n\t" |
399 | "movl $-1,%0\n" | 403 | "movl $-1,%0\n" |
400 | "1:" : "=r" (r) : "rm" (x)); | 404 | "1:" : "=r" (r) : "rm" (x)); |
401 | return r+1; | 405 | return r+1; |
402 | } | 406 | } |
403 | 407 | ||
404 | #include <asm-generic/bitops/hweight.h> | 408 | #include <asm-generic/bitops/hweight.h> |
405 | #include <asm-generic/bitops/lock.h> | 409 | #include <asm-generic/bitops/lock.h> |
406 | 410 | ||
407 | #endif /* __KERNEL__ */ | 411 | #endif /* __KERNEL__ */ |
408 | 412 | ||
409 | #include <asm-generic/bitops/fls64.h> | 413 | #include <asm-generic/bitops/fls64.h> |
410 | 414 | ||
411 | #ifdef __KERNEL__ | 415 | #ifdef __KERNEL__ |
412 | 416 | ||
413 | #include <asm-generic/bitops/ext2-non-atomic.h> | 417 | #include <asm-generic/bitops/ext2-non-atomic.h> |
414 | 418 | ||
415 | #define ext2_set_bit_atomic(lock,nr,addr) \ | 419 | #define ext2_set_bit_atomic(lock,nr,addr) \ |
416 | test_and_set_bit((nr),(unsigned long*)addr) | 420 | test_and_set_bit((nr),(unsigned long*)addr) |
417 | #define ext2_clear_bit_atomic(lock,nr, addr) \ | 421 | #define ext2_clear_bit_atomic(lock,nr, addr) \ |
418 | test_and_clear_bit((nr),(unsigned long*)addr) | 422 | test_and_clear_bit((nr),(unsigned long*)addr) |
419 | 423 | ||
420 | #include <asm-generic/bitops/minix.h> | 424 | #include <asm-generic/bitops/minix.h> |
421 | 425 | ||
422 | #endif /* __KERNEL__ */ | 426 | #endif /* __KERNEL__ */ |
423 | 427 | ||
424 | #endif /* _I386_BITOPS_H */ | 428 | #endif /* _I386_BITOPS_H */ |
425 | 429 |
include/asm-x86/bitops_64.h
1 | #ifndef _X86_64_BITOPS_H | 1 | #ifndef _X86_64_BITOPS_H |
2 | #define _X86_64_BITOPS_H | 2 | #define _X86_64_BITOPS_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Copyright 1992, Linus Torvalds. | 5 | * Copyright 1992, Linus Torvalds. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #ifndef _LINUX_BITOPS_H | ||
9 | #error only <linux/bitops.h> can be included directly | ||
10 | #endif | ||
11 | |||
8 | #include <asm/alternative.h> | 12 | #include <asm/alternative.h> |
9 | 13 | ||
10 | #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) | 14 | #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) |
11 | /* Technically wrong, but this avoids compilation errors on some gcc | 15 | /* Technically wrong, but this avoids compilation errors on some gcc |
12 | versions. */ | 16 | versions. */ |
13 | #define ADDR "=m" (*(volatile long *) addr) | 17 | #define ADDR "=m" (*(volatile long *) addr) |
14 | #else | 18 | #else |
15 | #define ADDR "+m" (*(volatile long *) addr) | 19 | #define ADDR "+m" (*(volatile long *) addr) |
16 | #endif | 20 | #endif |
17 | 21 | ||
18 | /** | 22 | /** |
19 | * set_bit - Atomically set a bit in memory | 23 | * set_bit - Atomically set a bit in memory |
20 | * @nr: the bit to set | 24 | * @nr: the bit to set |
21 | * @addr: the address to start counting from | 25 | * @addr: the address to start counting from |
22 | * | 26 | * |
23 | * This function is atomic and may not be reordered. See __set_bit() | 27 | * This function is atomic and may not be reordered. See __set_bit() |
24 | * if you do not require the atomic guarantees. | 28 | * if you do not require the atomic guarantees. |
25 | * Note that @nr may be almost arbitrarily large; this function is not | 29 | * Note that @nr may be almost arbitrarily large; this function is not |
26 | * restricted to acting on a single-word quantity. | 30 | * restricted to acting on a single-word quantity. |
27 | */ | 31 | */ |
28 | static __inline__ void set_bit(int nr, volatile void * addr) | 32 | static __inline__ void set_bit(int nr, volatile void * addr) |
29 | { | 33 | { |
30 | __asm__ __volatile__( LOCK_PREFIX | 34 | __asm__ __volatile__( LOCK_PREFIX |
31 | "btsl %1,%0" | 35 | "btsl %1,%0" |
32 | :ADDR | 36 | :ADDR |
33 | :"dIr" (nr) : "memory"); | 37 | :"dIr" (nr) : "memory"); |
34 | } | 38 | } |
35 | 39 | ||
36 | /** | 40 | /** |
37 | * __set_bit - Set a bit in memory | 41 | * __set_bit - Set a bit in memory |
38 | * @nr: the bit to set | 42 | * @nr: the bit to set |
39 | * @addr: the address to start counting from | 43 | * @addr: the address to start counting from |
40 | * | 44 | * |
41 | * Unlike set_bit(), this function is non-atomic and may be reordered. | 45 | * Unlike set_bit(), this function is non-atomic and may be reordered. |
42 | * If it's called on the same region of memory simultaneously, the effect | 46 | * If it's called on the same region of memory simultaneously, the effect |
43 | * may be that only one operation succeeds. | 47 | * may be that only one operation succeeds. |
44 | */ | 48 | */ |
45 | static __inline__ void __set_bit(int nr, volatile void * addr) | 49 | static __inline__ void __set_bit(int nr, volatile void * addr) |
46 | { | 50 | { |
47 | __asm__ volatile( | 51 | __asm__ volatile( |
48 | "btsl %1,%0" | 52 | "btsl %1,%0" |
49 | :ADDR | 53 | :ADDR |
50 | :"dIr" (nr) : "memory"); | 54 | :"dIr" (nr) : "memory"); |
51 | } | 55 | } |
52 | 56 | ||
53 | /** | 57 | /** |
54 | * clear_bit - Clears a bit in memory | 58 | * clear_bit - Clears a bit in memory |
55 | * @nr: Bit to clear | 59 | * @nr: Bit to clear |
56 | * @addr: Address to start counting from | 60 | * @addr: Address to start counting from |
57 | * | 61 | * |
58 | * clear_bit() is atomic and may not be reordered. However, it does | 62 | * clear_bit() is atomic and may not be reordered. However, it does |
59 | * not contain a memory barrier, so if it is used for locking purposes, | 63 | * not contain a memory barrier, so if it is used for locking purposes, |
60 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | 64 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
61 | * in order to ensure changes are visible on other processors. | 65 | * in order to ensure changes are visible on other processors. |
62 | */ | 66 | */ |
63 | static __inline__ void clear_bit(int nr, volatile void * addr) | 67 | static __inline__ void clear_bit(int nr, volatile void * addr) |
64 | { | 68 | { |
65 | __asm__ __volatile__( LOCK_PREFIX | 69 | __asm__ __volatile__( LOCK_PREFIX |
66 | "btrl %1,%0" | 70 | "btrl %1,%0" |
67 | :ADDR | 71 | :ADDR |
68 | :"dIr" (nr)); | 72 | :"dIr" (nr)); |
69 | } | 73 | } |
70 | 74 | ||
71 | static __inline__ void __clear_bit(int nr, volatile void * addr) | 75 | static __inline__ void __clear_bit(int nr, volatile void * addr) |
72 | { | 76 | { |
73 | __asm__ __volatile__( | 77 | __asm__ __volatile__( |
74 | "btrl %1,%0" | 78 | "btrl %1,%0" |
75 | :ADDR | 79 | :ADDR |
76 | :"dIr" (nr)); | 80 | :"dIr" (nr)); |
77 | } | 81 | } |
78 | 82 | ||
79 | #define smp_mb__before_clear_bit() barrier() | 83 | #define smp_mb__before_clear_bit() barrier() |
80 | #define smp_mb__after_clear_bit() barrier() | 84 | #define smp_mb__after_clear_bit() barrier() |
81 | 85 | ||
82 | /** | 86 | /** |
83 | * __change_bit - Toggle a bit in memory | 87 | * __change_bit - Toggle a bit in memory |
84 | * @nr: the bit to change | 88 | * @nr: the bit to change |
85 | * @addr: the address to start counting from | 89 | * @addr: the address to start counting from |
86 | * | 90 | * |
87 | * Unlike change_bit(), this function is non-atomic and may be reordered. | 91 | * Unlike change_bit(), this function is non-atomic and may be reordered. |
88 | * If it's called on the same region of memory simultaneously, the effect | 92 | * If it's called on the same region of memory simultaneously, the effect |
89 | * may be that only one operation succeeds. | 93 | * may be that only one operation succeeds. |
90 | */ | 94 | */ |
91 | static __inline__ void __change_bit(int nr, volatile void * addr) | 95 | static __inline__ void __change_bit(int nr, volatile void * addr) |
92 | { | 96 | { |
93 | __asm__ __volatile__( | 97 | __asm__ __volatile__( |
94 | "btcl %1,%0" | 98 | "btcl %1,%0" |
95 | :ADDR | 99 | :ADDR |
96 | :"dIr" (nr)); | 100 | :"dIr" (nr)); |
97 | } | 101 | } |
98 | 102 | ||
99 | /** | 103 | /** |
100 | * change_bit - Toggle a bit in memory | 104 | * change_bit - Toggle a bit in memory |
101 | * @nr: Bit to change | 105 | * @nr: Bit to change |
102 | * @addr: Address to start counting from | 106 | * @addr: Address to start counting from |
103 | * | 107 | * |
104 | * change_bit() is atomic and may not be reordered. | 108 | * change_bit() is atomic and may not be reordered. |
105 | * Note that @nr may be almost arbitrarily large; this function is not | 109 | * Note that @nr may be almost arbitrarily large; this function is not |
106 | * restricted to acting on a single-word quantity. | 110 | * restricted to acting on a single-word quantity. |
107 | */ | 111 | */ |
108 | static __inline__ void change_bit(int nr, volatile void * addr) | 112 | static __inline__ void change_bit(int nr, volatile void * addr) |
109 | { | 113 | { |
110 | __asm__ __volatile__( LOCK_PREFIX | 114 | __asm__ __volatile__( LOCK_PREFIX |
111 | "btcl %1,%0" | 115 | "btcl %1,%0" |
112 | :ADDR | 116 | :ADDR |
113 | :"dIr" (nr)); | 117 | :"dIr" (nr)); |
114 | } | 118 | } |
115 | 119 | ||
116 | /** | 120 | /** |
117 | * test_and_set_bit - Set a bit and return its old value | 121 | * test_and_set_bit - Set a bit and return its old value |
118 | * @nr: Bit to set | 122 | * @nr: Bit to set |
119 | * @addr: Address to count from | 123 | * @addr: Address to count from |
120 | * | 124 | * |
121 | * This operation is atomic and cannot be reordered. | 125 | * This operation is atomic and cannot be reordered. |
122 | * It also implies a memory barrier. | 126 | * It also implies a memory barrier. |
123 | */ | 127 | */ |
124 | static __inline__ int test_and_set_bit(int nr, volatile void * addr) | 128 | static __inline__ int test_and_set_bit(int nr, volatile void * addr) |
125 | { | 129 | { |
126 | int oldbit; | 130 | int oldbit; |
127 | 131 | ||
128 | __asm__ __volatile__( LOCK_PREFIX | 132 | __asm__ __volatile__( LOCK_PREFIX |
129 | "btsl %2,%1\n\tsbbl %0,%0" | 133 | "btsl %2,%1\n\tsbbl %0,%0" |
130 | :"=r" (oldbit),ADDR | 134 | :"=r" (oldbit),ADDR |
131 | :"dIr" (nr) : "memory"); | 135 | :"dIr" (nr) : "memory"); |
132 | return oldbit; | 136 | return oldbit; |
133 | } | 137 | } |
134 | 138 | ||
135 | /** | 139 | /** |
136 | * __test_and_set_bit - Set a bit and return its old value | 140 | * __test_and_set_bit - Set a bit and return its old value |
137 | * @nr: Bit to set | 141 | * @nr: Bit to set |
138 | * @addr: Address to count from | 142 | * @addr: Address to count from |
139 | * | 143 | * |
140 | * This operation is non-atomic and can be reordered. | 144 | * This operation is non-atomic and can be reordered. |
141 | * If two examples of this operation race, one can appear to succeed | 145 | * If two examples of this operation race, one can appear to succeed |
142 | * but actually fail. You must protect multiple accesses with a lock. | 146 | * but actually fail. You must protect multiple accesses with a lock. |
143 | */ | 147 | */ |
144 | static __inline__ int __test_and_set_bit(int nr, volatile void * addr) | 148 | static __inline__ int __test_and_set_bit(int nr, volatile void * addr) |
145 | { | 149 | { |
146 | int oldbit; | 150 | int oldbit; |
147 | 151 | ||
148 | __asm__( | 152 | __asm__( |
149 | "btsl %2,%1\n\tsbbl %0,%0" | 153 | "btsl %2,%1\n\tsbbl %0,%0" |
150 | :"=r" (oldbit),ADDR | 154 | :"=r" (oldbit),ADDR |
151 | :"dIr" (nr)); | 155 | :"dIr" (nr)); |
152 | return oldbit; | 156 | return oldbit; |
153 | } | 157 | } |
154 | 158 | ||
155 | /** | 159 | /** |
156 | * test_and_clear_bit - Clear a bit and return its old value | 160 | * test_and_clear_bit - Clear a bit and return its old value |
157 | * @nr: Bit to clear | 161 | * @nr: Bit to clear |
158 | * @addr: Address to count from | 162 | * @addr: Address to count from |
159 | * | 163 | * |
160 | * This operation is atomic and cannot be reordered. | 164 | * This operation is atomic and cannot be reordered. |
161 | * It also implies a memory barrier. | 165 | * It also implies a memory barrier. |
162 | */ | 166 | */ |
163 | static __inline__ int test_and_clear_bit(int nr, volatile void * addr) | 167 | static __inline__ int test_and_clear_bit(int nr, volatile void * addr) |
164 | { | 168 | { |
165 | int oldbit; | 169 | int oldbit; |
166 | 170 | ||
167 | __asm__ __volatile__( LOCK_PREFIX | 171 | __asm__ __volatile__( LOCK_PREFIX |
168 | "btrl %2,%1\n\tsbbl %0,%0" | 172 | "btrl %2,%1\n\tsbbl %0,%0" |
169 | :"=r" (oldbit),ADDR | 173 | :"=r" (oldbit),ADDR |
170 | :"dIr" (nr) : "memory"); | 174 | :"dIr" (nr) : "memory"); |
171 | return oldbit; | 175 | return oldbit; |
172 | } | 176 | } |
173 | 177 | ||
174 | /** | 178 | /** |
175 | * __test_and_clear_bit - Clear a bit and return its old value | 179 | * __test_and_clear_bit - Clear a bit and return its old value |
176 | * @nr: Bit to clear | 180 | * @nr: Bit to clear |
177 | * @addr: Address to count from | 181 | * @addr: Address to count from |
178 | * | 182 | * |
179 | * This operation is non-atomic and can be reordered. | 183 | * This operation is non-atomic and can be reordered. |
180 | * If two examples of this operation race, one can appear to succeed | 184 | * If two examples of this operation race, one can appear to succeed |
181 | * but actually fail. You must protect multiple accesses with a lock. | 185 | * but actually fail. You must protect multiple accesses with a lock. |
182 | */ | 186 | */ |
183 | static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) | 187 | static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) |
184 | { | 188 | { |
185 | int oldbit; | 189 | int oldbit; |
186 | 190 | ||
187 | __asm__( | 191 | __asm__( |
188 | "btrl %2,%1\n\tsbbl %0,%0" | 192 | "btrl %2,%1\n\tsbbl %0,%0" |
189 | :"=r" (oldbit),ADDR | 193 | :"=r" (oldbit),ADDR |
190 | :"dIr" (nr)); | 194 | :"dIr" (nr)); |
191 | return oldbit; | 195 | return oldbit; |
192 | } | 196 | } |
193 | 197 | ||
194 | /* WARNING: non atomic and it can be reordered! */ | 198 | /* WARNING: non atomic and it can be reordered! */ |
195 | static __inline__ int __test_and_change_bit(int nr, volatile void * addr) | 199 | static __inline__ int __test_and_change_bit(int nr, volatile void * addr) |
196 | { | 200 | { |
197 | int oldbit; | 201 | int oldbit; |
198 | 202 | ||
199 | __asm__ __volatile__( | 203 | __asm__ __volatile__( |
200 | "btcl %2,%1\n\tsbbl %0,%0" | 204 | "btcl %2,%1\n\tsbbl %0,%0" |
201 | :"=r" (oldbit),ADDR | 205 | :"=r" (oldbit),ADDR |
202 | :"dIr" (nr) : "memory"); | 206 | :"dIr" (nr) : "memory"); |
203 | return oldbit; | 207 | return oldbit; |
204 | } | 208 | } |
205 | 209 | ||
206 | /** | 210 | /** |
207 | * test_and_change_bit - Change a bit and return its old value | 211 | * test_and_change_bit - Change a bit and return its old value |
208 | * @nr: Bit to change | 212 | * @nr: Bit to change |
209 | * @addr: Address to count from | 213 | * @addr: Address to count from |
210 | * | 214 | * |
211 | * This operation is atomic and cannot be reordered. | 215 | * This operation is atomic and cannot be reordered. |
212 | * It also implies a memory barrier. | 216 | * It also implies a memory barrier. |
213 | */ | 217 | */ |
214 | static __inline__ int test_and_change_bit(int nr, volatile void * addr) | 218 | static __inline__ int test_and_change_bit(int nr, volatile void * addr) |
215 | { | 219 | { |
216 | int oldbit; | 220 | int oldbit; |
217 | 221 | ||
218 | __asm__ __volatile__( LOCK_PREFIX | 222 | __asm__ __volatile__( LOCK_PREFIX |
219 | "btcl %2,%1\n\tsbbl %0,%0" | 223 | "btcl %2,%1\n\tsbbl %0,%0" |
220 | :"=r" (oldbit),ADDR | 224 | :"=r" (oldbit),ADDR |
221 | :"dIr" (nr) : "memory"); | 225 | :"dIr" (nr) : "memory"); |
222 | return oldbit; | 226 | return oldbit; |
223 | } | 227 | } |
224 | 228 | ||
225 | #if 0 /* Fool kernel-doc since it doesn't do macros yet */ | 229 | #if 0 /* Fool kernel-doc since it doesn't do macros yet */ |
226 | /** | 230 | /** |
227 | * test_bit - Determine whether a bit is set | 231 | * test_bit - Determine whether a bit is set |
228 | * @nr: bit number to test | 232 | * @nr: bit number to test |
229 | * @addr: Address to start counting from | 233 | * @addr: Address to start counting from |
230 | */ | 234 | */ |
231 | static int test_bit(int nr, const volatile void * addr); | 235 | static int test_bit(int nr, const volatile void * addr); |
232 | #endif | 236 | #endif |
233 | 237 | ||
234 | static __inline__ int constant_test_bit(int nr, const volatile void * addr) | 238 | static __inline__ int constant_test_bit(int nr, const volatile void * addr) |
235 | { | 239 | { |
236 | return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; | 240 | return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; |
237 | } | 241 | } |
238 | 242 | ||
239 | static __inline__ int variable_test_bit(int nr, volatile const void * addr) | 243 | static __inline__ int variable_test_bit(int nr, volatile const void * addr) |
240 | { | 244 | { |
241 | int oldbit; | 245 | int oldbit; |
242 | 246 | ||
243 | __asm__ __volatile__( | 247 | __asm__ __volatile__( |
244 | "btl %2,%1\n\tsbbl %0,%0" | 248 | "btl %2,%1\n\tsbbl %0,%0" |
245 | :"=r" (oldbit) | 249 | :"=r" (oldbit) |
246 | :"m" (*(volatile long *)addr),"dIr" (nr)); | 250 | :"m" (*(volatile long *)addr),"dIr" (nr)); |
247 | return oldbit; | 251 | return oldbit; |
248 | } | 252 | } |
249 | 253 | ||
250 | #define test_bit(nr,addr) \ | 254 | #define test_bit(nr,addr) \ |
251 | (__builtin_constant_p(nr) ? \ | 255 | (__builtin_constant_p(nr) ? \ |
252 | constant_test_bit((nr),(addr)) : \ | 256 | constant_test_bit((nr),(addr)) : \ |
253 | variable_test_bit((nr),(addr))) | 257 | variable_test_bit((nr),(addr))) |
254 | 258 | ||
255 | #undef ADDR | 259 | #undef ADDR |
256 | 260 | ||
257 | extern long find_first_zero_bit(const unsigned long * addr, unsigned long size); | 261 | extern long find_first_zero_bit(const unsigned long * addr, unsigned long size); |
258 | extern long find_next_zero_bit (const unsigned long * addr, long size, long offset); | 262 | extern long find_next_zero_bit (const unsigned long * addr, long size, long offset); |
259 | extern long find_first_bit(const unsigned long * addr, unsigned long size); | 263 | extern long find_first_bit(const unsigned long * addr, unsigned long size); |
260 | extern long find_next_bit(const unsigned long * addr, long size, long offset); | 264 | extern long find_next_bit(const unsigned long * addr, long size, long offset); |
261 | 265 | ||
262 | /* return index of first bet set in val or max when no bit is set */ | 266 | /* return index of first bet set in val or max when no bit is set */ |
263 | static inline long __scanbit(unsigned long val, unsigned long max) | 267 | static inline long __scanbit(unsigned long val, unsigned long max) |
264 | { | 268 | { |
265 | asm("bsfq %1,%0 ; cmovz %2,%0" : "=&r" (val) : "r" (val), "r" (max)); | 269 | asm("bsfq %1,%0 ; cmovz %2,%0" : "=&r" (val) : "r" (val), "r" (max)); |
266 | return val; | 270 | return val; |
267 | } | 271 | } |
268 | 272 | ||
269 | #define find_first_bit(addr,size) \ | 273 | #define find_first_bit(addr,size) \ |
270 | ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ | 274 | ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ |
271 | (__scanbit(*(unsigned long *)addr,(size))) : \ | 275 | (__scanbit(*(unsigned long *)addr,(size))) : \ |
272 | find_first_bit(addr,size))) | 276 | find_first_bit(addr,size))) |
273 | 277 | ||
274 | #define find_next_bit(addr,size,off) \ | 278 | #define find_next_bit(addr,size,off) \ |
275 | ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ | 279 | ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ |
276 | ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \ | 280 | ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \ |
277 | find_next_bit(addr,size,off))) | 281 | find_next_bit(addr,size,off))) |
278 | 282 | ||
279 | #define find_first_zero_bit(addr,size) \ | 283 | #define find_first_zero_bit(addr,size) \ |
280 | ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ | 284 | ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ |
281 | (__scanbit(~*(unsigned long *)addr,(size))) : \ | 285 | (__scanbit(~*(unsigned long *)addr,(size))) : \ |
282 | find_first_zero_bit(addr,size))) | 286 | find_first_zero_bit(addr,size))) |
283 | 287 | ||
284 | #define find_next_zero_bit(addr,size,off) \ | 288 | #define find_next_zero_bit(addr,size,off) \ |
285 | ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ | 289 | ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ |
286 | ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \ | 290 | ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \ |
287 | find_next_zero_bit(addr,size,off))) | 291 | find_next_zero_bit(addr,size,off))) |
288 | 292 | ||
289 | /* | 293 | /* |
290 | * Find string of zero bits in a bitmap. -1 when not found. | 294 | * Find string of zero bits in a bitmap. -1 when not found. |
291 | */ | 295 | */ |
292 | extern unsigned long | 296 | extern unsigned long |
293 | find_next_zero_string(unsigned long *bitmap, long start, long nbits, int len); | 297 | find_next_zero_string(unsigned long *bitmap, long start, long nbits, int len); |
294 | 298 | ||
295 | static inline void set_bit_string(unsigned long *bitmap, unsigned long i, | 299 | static inline void set_bit_string(unsigned long *bitmap, unsigned long i, |
296 | int len) | 300 | int len) |
297 | { | 301 | { |
298 | unsigned long end = i + len; | 302 | unsigned long end = i + len; |
299 | while (i < end) { | 303 | while (i < end) { |
300 | __set_bit(i, bitmap); | 304 | __set_bit(i, bitmap); |
301 | i++; | 305 | i++; |
302 | } | 306 | } |
303 | } | 307 | } |
304 | 308 | ||
305 | static inline void __clear_bit_string(unsigned long *bitmap, unsigned long i, | 309 | static inline void __clear_bit_string(unsigned long *bitmap, unsigned long i, |
306 | int len) | 310 | int len) |
307 | { | 311 | { |
308 | unsigned long end = i + len; | 312 | unsigned long end = i + len; |
309 | while (i < end) { | 313 | while (i < end) { |
310 | __clear_bit(i, bitmap); | 314 | __clear_bit(i, bitmap); |
311 | i++; | 315 | i++; |
312 | } | 316 | } |
313 | } | 317 | } |
314 | 318 | ||
315 | /** | 319 | /** |
316 | * ffz - find first zero in word. | 320 | * ffz - find first zero in word. |
317 | * @word: The word to search | 321 | * @word: The word to search |
318 | * | 322 | * |
319 | * Undefined if no zero exists, so code should check against ~0UL first. | 323 | * Undefined if no zero exists, so code should check against ~0UL first. |
320 | */ | 324 | */ |
321 | static __inline__ unsigned long ffz(unsigned long word) | 325 | static __inline__ unsigned long ffz(unsigned long word) |
322 | { | 326 | { |
323 | __asm__("bsfq %1,%0" | 327 | __asm__("bsfq %1,%0" |
324 | :"=r" (word) | 328 | :"=r" (word) |
325 | :"r" (~word)); | 329 | :"r" (~word)); |
326 | return word; | 330 | return word; |
327 | } | 331 | } |
328 | 332 | ||
329 | /** | 333 | /** |
330 | * __ffs - find first bit in word. | 334 | * __ffs - find first bit in word. |
331 | * @word: The word to search | 335 | * @word: The word to search |
332 | * | 336 | * |
333 | * Undefined if no bit exists, so code should check against 0 first. | 337 | * Undefined if no bit exists, so code should check against 0 first. |
334 | */ | 338 | */ |
335 | static __inline__ unsigned long __ffs(unsigned long word) | 339 | static __inline__ unsigned long __ffs(unsigned long word) |
336 | { | 340 | { |
337 | __asm__("bsfq %1,%0" | 341 | __asm__("bsfq %1,%0" |
338 | :"=r" (word) | 342 | :"=r" (word) |
339 | :"rm" (word)); | 343 | :"rm" (word)); |
340 | return word; | 344 | return word; |
341 | } | 345 | } |
342 | 346 | ||
343 | /* | 347 | /* |
344 | * __fls: find last bit set. | 348 | * __fls: find last bit set. |
345 | * @word: The word to search | 349 | * @word: The word to search |
346 | * | 350 | * |
347 | * Undefined if no zero exists, so code should check against ~0UL first. | 351 | * Undefined if no zero exists, so code should check against ~0UL first. |
348 | */ | 352 | */ |
349 | static __inline__ unsigned long __fls(unsigned long word) | 353 | static __inline__ unsigned long __fls(unsigned long word) |
350 | { | 354 | { |
351 | __asm__("bsrq %1,%0" | 355 | __asm__("bsrq %1,%0" |
352 | :"=r" (word) | 356 | :"=r" (word) |
353 | :"rm" (word)); | 357 | :"rm" (word)); |
354 | return word; | 358 | return word; |
355 | } | 359 | } |
356 | 360 | ||
357 | #ifdef __KERNEL__ | 361 | #ifdef __KERNEL__ |
358 | 362 | ||
359 | #include <asm-generic/bitops/sched.h> | 363 | #include <asm-generic/bitops/sched.h> |
360 | 364 | ||
361 | /** | 365 | /** |
362 | * ffs - find first bit set | 366 | * ffs - find first bit set |
363 | * @x: the word to search | 367 | * @x: the word to search |
364 | * | 368 | * |
365 | * This is defined the same way as | 369 | * This is defined the same way as |
366 | * the libc and compiler builtin ffs routines, therefore | 370 | * the libc and compiler builtin ffs routines, therefore |
367 | * differs in spirit from the above ffz (man ffs). | 371 | * differs in spirit from the above ffz (man ffs). |
368 | */ | 372 | */ |
369 | static __inline__ int ffs(int x) | 373 | static __inline__ int ffs(int x) |
370 | { | 374 | { |
371 | int r; | 375 | int r; |
372 | 376 | ||
373 | __asm__("bsfl %1,%0\n\t" | 377 | __asm__("bsfl %1,%0\n\t" |
374 | "cmovzl %2,%0" | 378 | "cmovzl %2,%0" |
375 | : "=r" (r) : "rm" (x), "r" (-1)); | 379 | : "=r" (r) : "rm" (x), "r" (-1)); |
376 | return r+1; | 380 | return r+1; |
377 | } | 381 | } |
378 | 382 | ||
379 | /** | 383 | /** |
380 | * fls64 - find last bit set in 64 bit word | 384 | * fls64 - find last bit set in 64 bit word |
381 | * @x: the word to search | 385 | * @x: the word to search |
382 | * | 386 | * |
383 | * This is defined the same way as fls. | 387 | * This is defined the same way as fls. |
384 | */ | 388 | */ |
385 | static __inline__ int fls64(__u64 x) | 389 | static __inline__ int fls64(__u64 x) |
386 | { | 390 | { |
387 | if (x == 0) | 391 | if (x == 0) |
388 | return 0; | 392 | return 0; |
389 | return __fls(x) + 1; | 393 | return __fls(x) + 1; |
390 | } | 394 | } |
391 | 395 | ||
392 | /** | 396 | /** |
393 | * fls - find last bit set | 397 | * fls - find last bit set |
394 | * @x: the word to search | 398 | * @x: the word to search |
395 | * | 399 | * |
396 | * This is defined the same way as ffs. | 400 | * This is defined the same way as ffs. |
397 | */ | 401 | */ |
398 | static __inline__ int fls(int x) | 402 | static __inline__ int fls(int x) |
399 | { | 403 | { |
400 | int r; | 404 | int r; |
401 | 405 | ||
402 | __asm__("bsrl %1,%0\n\t" | 406 | __asm__("bsrl %1,%0\n\t" |
403 | "cmovzl %2,%0" | 407 | "cmovzl %2,%0" |
404 | : "=&r" (r) : "rm" (x), "rm" (-1)); | 408 | : "=&r" (r) : "rm" (x), "rm" (-1)); |
405 | return r+1; | 409 | return r+1; |
406 | } | 410 | } |
407 | 411 | ||
408 | #define ARCH_HAS_FAST_MULTIPLIER 1 | 412 | #define ARCH_HAS_FAST_MULTIPLIER 1 |
409 | 413 | ||
410 | #include <asm-generic/bitops/hweight.h> | 414 | #include <asm-generic/bitops/hweight.h> |
411 | #include <asm-generic/bitops/lock.h> | 415 | #include <asm-generic/bitops/lock.h> |
412 | 416 | ||
413 | #endif /* __KERNEL__ */ | 417 | #endif /* __KERNEL__ */ |
414 | 418 | ||
415 | #ifdef __KERNEL__ | 419 | #ifdef __KERNEL__ |
416 | 420 | ||
417 | #include <asm-generic/bitops/ext2-non-atomic.h> | 421 | #include <asm-generic/bitops/ext2-non-atomic.h> |
418 | 422 | ||
419 | #define ext2_set_bit_atomic(lock,nr,addr) \ | 423 | #define ext2_set_bit_atomic(lock,nr,addr) \ |
420 | test_and_set_bit((nr),(unsigned long*)addr) | 424 | test_and_set_bit((nr),(unsigned long*)addr) |
421 | #define ext2_clear_bit_atomic(lock,nr,addr) \ | 425 | #define ext2_clear_bit_atomic(lock,nr,addr) \ |
422 | test_and_clear_bit((nr),(unsigned long*)addr) | 426 | test_and_clear_bit((nr),(unsigned long*)addr) |
423 | 427 | ||
424 | #include <asm-generic/bitops/minix.h> | 428 | #include <asm-generic/bitops/minix.h> |
425 | 429 | ||
426 | #endif /* __KERNEL__ */ | 430 | #endif /* __KERNEL__ */ |
427 | 431 | ||
428 | #endif /* _X86_64_BITOPS_H */ | 432 | #endif /* _X86_64_BITOPS_H */ |
429 | 433 |
include/asm-xtensa/bitops.h
1 | /* | 1 | /* |
2 | * include/asm-xtensa/bitops.h | 2 | * include/asm-xtensa/bitops.h |
3 | * | 3 | * |
4 | * Atomic operations that C can't guarantee us.Useful for resource counting etc. | 4 | * Atomic operations that C can't guarantee us.Useful for resource counting etc. |
5 | * | 5 | * |
6 | * This file is subject to the terms and conditions of the GNU General Public | 6 | * This file is subject to the terms and conditions of the GNU General Public |
7 | * License. See the file "COPYING" in the main directory of this archive | 7 | * License. See the file "COPYING" in the main directory of this archive |
8 | * for more details. | 8 | * for more details. |
9 | * | 9 | * |
10 | * Copyright (C) 2001 - 2007 Tensilica Inc. | 10 | * Copyright (C) 2001 - 2007 Tensilica Inc. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #ifndef _XTENSA_BITOPS_H | 13 | #ifndef _XTENSA_BITOPS_H |
14 | #define _XTENSA_BITOPS_H | 14 | #define _XTENSA_BITOPS_H |
15 | 15 | ||
16 | #ifdef __KERNEL__ | 16 | #ifdef __KERNEL__ |
17 | 17 | ||
18 | #ifndef _LINUX_BITOPS_H | ||
19 | #error only <linux/bitops.h> can be included directly | ||
20 | #endif | ||
21 | |||
18 | #include <asm/processor.h> | 22 | #include <asm/processor.h> |
19 | #include <asm/byteorder.h> | 23 | #include <asm/byteorder.h> |
20 | #include <asm/system.h> | 24 | #include <asm/system.h> |
21 | 25 | ||
22 | #ifdef CONFIG_SMP | 26 | #ifdef CONFIG_SMP |
23 | # error SMP not supported on this architecture | 27 | # error SMP not supported on this architecture |
24 | #endif | 28 | #endif |
25 | 29 | ||
26 | #define smp_mb__before_clear_bit() barrier() | 30 | #define smp_mb__before_clear_bit() barrier() |
27 | #define smp_mb__after_clear_bit() barrier() | 31 | #define smp_mb__after_clear_bit() barrier() |
28 | 32 | ||
29 | #include <asm-generic/bitops/atomic.h> | 33 | #include <asm-generic/bitops/atomic.h> |
30 | #include <asm-generic/bitops/non-atomic.h> | 34 | #include <asm-generic/bitops/non-atomic.h> |
31 | 35 | ||
32 | #if XCHAL_HAVE_NSA | 36 | #if XCHAL_HAVE_NSA |
33 | 37 | ||
34 | static inline unsigned long __cntlz (unsigned long x) | 38 | static inline unsigned long __cntlz (unsigned long x) |
35 | { | 39 | { |
36 | int lz; | 40 | int lz; |
37 | asm ("nsau %0, %1" : "=r" (lz) : "r" (x)); | 41 | asm ("nsau %0, %1" : "=r" (lz) : "r" (x)); |
38 | return lz; | 42 | return lz; |
39 | } | 43 | } |
40 | 44 | ||
41 | /* | 45 | /* |
42 | * ffz: Find first zero in word. Undefined if no zero exists. | 46 | * ffz: Find first zero in word. Undefined if no zero exists. |
43 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | 47 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). |
44 | */ | 48 | */ |
45 | 49 | ||
46 | static inline int ffz(unsigned long x) | 50 | static inline int ffz(unsigned long x) |
47 | { | 51 | { |
48 | return 31 - __cntlz(~x & -~x); | 52 | return 31 - __cntlz(~x & -~x); |
49 | } | 53 | } |
50 | 54 | ||
51 | /* | 55 | /* |
52 | * __ffs: Find first bit set in word. Return 0 for bit 0 | 56 | * __ffs: Find first bit set in word. Return 0 for bit 0 |
53 | */ | 57 | */ |
54 | 58 | ||
55 | static inline int __ffs(unsigned long x) | 59 | static inline int __ffs(unsigned long x) |
56 | { | 60 | { |
57 | return 31 - __cntlz(x & -x); | 61 | return 31 - __cntlz(x & -x); |
58 | } | 62 | } |
59 | 63 | ||
60 | /* | 64 | /* |
61 | * ffs: Find first bit set in word. This is defined the same way as | 65 | * ffs: Find first bit set in word. This is defined the same way as |
62 | * the libc and compiler builtin ffs routines, therefore | 66 | * the libc and compiler builtin ffs routines, therefore |
63 | * differs in spirit from the above ffz (man ffs). | 67 | * differs in spirit from the above ffz (man ffs). |
64 | */ | 68 | */ |
65 | 69 | ||
66 | static inline int ffs(unsigned long x) | 70 | static inline int ffs(unsigned long x) |
67 | { | 71 | { |
68 | return 32 - __cntlz(x & -x); | 72 | return 32 - __cntlz(x & -x); |
69 | } | 73 | } |
70 | 74 | ||
71 | /* | 75 | /* |
72 | * fls: Find last (most-significant) bit set in word. | 76 | * fls: Find last (most-significant) bit set in word. |
73 | * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. | 77 | * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. |
74 | */ | 78 | */ |
75 | 79 | ||
76 | static inline int fls (unsigned int x) | 80 | static inline int fls (unsigned int x) |
77 | { | 81 | { |
78 | return 32 - __cntlz(x); | 82 | return 32 - __cntlz(x); |
79 | } | 83 | } |
80 | 84 | ||
81 | #else | 85 | #else |
82 | 86 | ||
83 | /* Use the generic implementation if we don't have the nsa/nsau instructions. */ | 87 | /* Use the generic implementation if we don't have the nsa/nsau instructions. */ |
84 | 88 | ||
85 | # include <asm-generic/bitops/ffs.h> | 89 | # include <asm-generic/bitops/ffs.h> |
86 | # include <asm-generic/bitops/__ffs.h> | 90 | # include <asm-generic/bitops/__ffs.h> |
87 | # include <asm-generic/bitops/ffz.h> | 91 | # include <asm-generic/bitops/ffz.h> |
88 | # include <asm-generic/bitops/fls.h> | 92 | # include <asm-generic/bitops/fls.h> |
89 | 93 | ||
90 | #endif | 94 | #endif |
91 | 95 | ||
92 | #include <asm-generic/bitops/fls64.h> | 96 | #include <asm-generic/bitops/fls64.h> |
93 | #include <asm-generic/bitops/find.h> | 97 | #include <asm-generic/bitops/find.h> |
94 | #include <asm-generic/bitops/ext2-non-atomic.h> | 98 | #include <asm-generic/bitops/ext2-non-atomic.h> |
95 | 99 | ||
96 | #ifdef __XTENSA_EL__ | 100 | #ifdef __XTENSA_EL__ |
97 | # define ext2_set_bit_atomic(lock,nr,addr) \ | 101 | # define ext2_set_bit_atomic(lock,nr,addr) \ |
98 | test_and_set_bit((nr), (unsigned long*)(addr)) | 102 | test_and_set_bit((nr), (unsigned long*)(addr)) |
99 | # define ext2_clear_bit_atomic(lock,nr,addr) \ | 103 | # define ext2_clear_bit_atomic(lock,nr,addr) \ |
100 | test_and_clear_bit((nr), (unsigned long*)(addr)) | 104 | test_and_clear_bit((nr), (unsigned long*)(addr)) |
101 | #elif defined(__XTENSA_EB__) | 105 | #elif defined(__XTENSA_EB__) |
102 | # define ext2_set_bit_atomic(lock,nr,addr) \ | 106 | # define ext2_set_bit_atomic(lock,nr,addr) \ |
103 | test_and_set_bit((nr) ^ 0x18, (unsigned long*)(addr)) | 107 | test_and_set_bit((nr) ^ 0x18, (unsigned long*)(addr)) |
104 | # define ext2_clear_bit_atomic(lock,nr,addr) \ | 108 | # define ext2_clear_bit_atomic(lock,nr,addr) \ |
105 | test_and_clear_bit((nr) ^ 0x18, (unsigned long*)(addr)) | 109 | test_and_clear_bit((nr) ^ 0x18, (unsigned long*)(addr)) |
106 | #else | 110 | #else |
107 | # error processor byte order undefined! | 111 | # error processor byte order undefined! |
108 | #endif | 112 | #endif |
109 | 113 | ||
110 | #include <asm-generic/bitops/hweight.h> | 114 | #include <asm-generic/bitops/hweight.h> |
111 | #include <asm-generic/bitops/lock.h> | 115 | #include <asm-generic/bitops/lock.h> |
112 | #include <asm-generic/bitops/sched.h> | 116 | #include <asm-generic/bitops/sched.h> |
113 | #include <asm-generic/bitops/minix.h> | 117 | #include <asm-generic/bitops/minix.h> |
114 | 118 | ||
115 | #endif /* __KERNEL__ */ | 119 | #endif /* __KERNEL__ */ |
116 | 120 | ||
117 | #endif /* _XTENSA_BITOPS_H */ | 121 | #endif /* _XTENSA_BITOPS_H */ |
118 | 122 |