Commit 9c1ee9387c0ce06d573e2d27de10cbc24179941e
Committed by
Linus Torvalds
1 parent
df28f34bf9
Exists in
master
and in
7 other branches
[PATCH] m68knommu: change addr arg to const in bitops.h/find_next_zero_bit()
Change addr arg to find_next_zero_bit to be a const. Cleans up compiler warning. Signed-off-by: Greg Ungerer <gerg@uclinux.com> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 1 changed file with 1 additions and 1 deletions Inline Diff
include/asm-m68knommu/bitops.h
1 | #ifndef _M68KNOMMU_BITOPS_H | 1 | #ifndef _M68KNOMMU_BITOPS_H |
2 | #define _M68KNOMMU_BITOPS_H | 2 | #define _M68KNOMMU_BITOPS_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Copyright 1992, Linus Torvalds. | 5 | * Copyright 1992, Linus Torvalds. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/config.h> | 8 | #include <linux/config.h> |
9 | #include <linux/compiler.h> | 9 | #include <linux/compiler.h> |
10 | #include <asm/byteorder.h> /* swab32 */ | 10 | #include <asm/byteorder.h> /* swab32 */ |
11 | #include <asm/system.h> /* save_flags */ | 11 | #include <asm/system.h> /* save_flags */ |
12 | 12 | ||
13 | #ifdef __KERNEL__ | 13 | #ifdef __KERNEL__ |
14 | 14 | ||
15 | /* | 15 | /* |
16 | * Generic ffs(). | 16 | * Generic ffs(). |
17 | */ | 17 | */ |
18 | static inline int ffs(int x) | 18 | static inline int ffs(int x) |
19 | { | 19 | { |
20 | int r = 1; | 20 | int r = 1; |
21 | 21 | ||
22 | if (!x) | 22 | if (!x) |
23 | return 0; | 23 | return 0; |
24 | if (!(x & 0xffff)) { | 24 | if (!(x & 0xffff)) { |
25 | x >>= 16; | 25 | x >>= 16; |
26 | r += 16; | 26 | r += 16; |
27 | } | 27 | } |
28 | if (!(x & 0xff)) { | 28 | if (!(x & 0xff)) { |
29 | x >>= 8; | 29 | x >>= 8; |
30 | r += 8; | 30 | r += 8; |
31 | } | 31 | } |
32 | if (!(x & 0xf)) { | 32 | if (!(x & 0xf)) { |
33 | x >>= 4; | 33 | x >>= 4; |
34 | r += 4; | 34 | r += 4; |
35 | } | 35 | } |
36 | if (!(x & 3)) { | 36 | if (!(x & 3)) { |
37 | x >>= 2; | 37 | x >>= 2; |
38 | r += 2; | 38 | r += 2; |
39 | } | 39 | } |
40 | if (!(x & 1)) { | 40 | if (!(x & 1)) { |
41 | x >>= 1; | 41 | x >>= 1; |
42 | r += 1; | 42 | r += 1; |
43 | } | 43 | } |
44 | return r; | 44 | return r; |
45 | } | 45 | } |
46 | 46 | ||
47 | /* | 47 | /* |
48 | * Generic __ffs(). | 48 | * Generic __ffs(). |
49 | */ | 49 | */ |
50 | static inline int __ffs(int x) | 50 | static inline int __ffs(int x) |
51 | { | 51 | { |
52 | int r = 0; | 52 | int r = 0; |
53 | 53 | ||
54 | if (!x) | 54 | if (!x) |
55 | return 0; | 55 | return 0; |
56 | if (!(x & 0xffff)) { | 56 | if (!(x & 0xffff)) { |
57 | x >>= 16; | 57 | x >>= 16; |
58 | r += 16; | 58 | r += 16; |
59 | } | 59 | } |
60 | if (!(x & 0xff)) { | 60 | if (!(x & 0xff)) { |
61 | x >>= 8; | 61 | x >>= 8; |
62 | r += 8; | 62 | r += 8; |
63 | } | 63 | } |
64 | if (!(x & 0xf)) { | 64 | if (!(x & 0xf)) { |
65 | x >>= 4; | 65 | x >>= 4; |
66 | r += 4; | 66 | r += 4; |
67 | } | 67 | } |
68 | if (!(x & 3)) { | 68 | if (!(x & 3)) { |
69 | x >>= 2; | 69 | x >>= 2; |
70 | r += 2; | 70 | r += 2; |
71 | } | 71 | } |
72 | if (!(x & 1)) { | 72 | if (!(x & 1)) { |
73 | x >>= 1; | 73 | x >>= 1; |
74 | r += 1; | 74 | r += 1; |
75 | } | 75 | } |
76 | return r; | 76 | return r; |
77 | } | 77 | } |
78 | 78 | ||
79 | /* | 79 | /* |
80 | * Every architecture must define this function. It's the fastest | 80 | * Every architecture must define this function. It's the fastest |
81 | * way of searching a 140-bit bitmap where the first 100 bits are | 81 | * way of searching a 140-bit bitmap where the first 100 bits are |
82 | * unlikely to be set. It's guaranteed that at least one of the 140 | 82 | * unlikely to be set. It's guaranteed that at least one of the 140 |
83 | * bits is cleared. | 83 | * bits is cleared. |
84 | */ | 84 | */ |
85 | static inline int sched_find_first_bit(unsigned long *b) | 85 | static inline int sched_find_first_bit(unsigned long *b) |
86 | { | 86 | { |
87 | if (unlikely(b[0])) | 87 | if (unlikely(b[0])) |
88 | return __ffs(b[0]); | 88 | return __ffs(b[0]); |
89 | if (unlikely(b[1])) | 89 | if (unlikely(b[1])) |
90 | return __ffs(b[1]) + 32; | 90 | return __ffs(b[1]) + 32; |
91 | if (unlikely(b[2])) | 91 | if (unlikely(b[2])) |
92 | return __ffs(b[2]) + 64; | 92 | return __ffs(b[2]) + 64; |
93 | if (b[3]) | 93 | if (b[3]) |
94 | return __ffs(b[3]) + 96; | 94 | return __ffs(b[3]) + 96; |
95 | return __ffs(b[4]) + 128; | 95 | return __ffs(b[4]) + 128; |
96 | } | 96 | } |
97 | 97 | ||
98 | /* | 98 | /* |
99 | * ffz = Find First Zero in word. Undefined if no zero exists, | 99 | * ffz = Find First Zero in word. Undefined if no zero exists, |
100 | * so code should check against ~0UL first.. | 100 | * so code should check against ~0UL first.. |
101 | */ | 101 | */ |
102 | static __inline__ unsigned long ffz(unsigned long word) | 102 | static __inline__ unsigned long ffz(unsigned long word) |
103 | { | 103 | { |
104 | unsigned long result = 0; | 104 | unsigned long result = 0; |
105 | 105 | ||
106 | while(word & 1) { | 106 | while(word & 1) { |
107 | result++; | 107 | result++; |
108 | word >>= 1; | 108 | word >>= 1; |
109 | } | 109 | } |
110 | return result; | 110 | return result; |
111 | } | 111 | } |
112 | 112 | ||
113 | 113 | ||
114 | static __inline__ void set_bit(int nr, volatile unsigned long * addr) | 114 | static __inline__ void set_bit(int nr, volatile unsigned long * addr) |
115 | { | 115 | { |
116 | #ifdef CONFIG_COLDFIRE | 116 | #ifdef CONFIG_COLDFIRE |
117 | __asm__ __volatile__ ("lea %0,%%a0; bset %1,(%%a0)" | 117 | __asm__ __volatile__ ("lea %0,%%a0; bset %1,(%%a0)" |
118 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 118 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
119 | : "d" (nr) | 119 | : "d" (nr) |
120 | : "%a0", "cc"); | 120 | : "%a0", "cc"); |
121 | #else | 121 | #else |
122 | __asm__ __volatile__ ("bset %1,%0" | 122 | __asm__ __volatile__ ("bset %1,%0" |
123 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 123 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
124 | : "di" (nr) | 124 | : "di" (nr) |
125 | : "cc"); | 125 | : "cc"); |
126 | #endif | 126 | #endif |
127 | } | 127 | } |
128 | 128 | ||
129 | #define __set_bit(nr, addr) set_bit(nr, addr) | 129 | #define __set_bit(nr, addr) set_bit(nr, addr) |
130 | 130 | ||
131 | /* | 131 | /* |
132 | * clear_bit() doesn't provide any barrier for the compiler. | 132 | * clear_bit() doesn't provide any barrier for the compiler. |
133 | */ | 133 | */ |
134 | #define smp_mb__before_clear_bit() barrier() | 134 | #define smp_mb__before_clear_bit() barrier() |
135 | #define smp_mb__after_clear_bit() barrier() | 135 | #define smp_mb__after_clear_bit() barrier() |
136 | 136 | ||
137 | static __inline__ void clear_bit(int nr, volatile unsigned long * addr) | 137 | static __inline__ void clear_bit(int nr, volatile unsigned long * addr) |
138 | { | 138 | { |
139 | #ifdef CONFIG_COLDFIRE | 139 | #ifdef CONFIG_COLDFIRE |
140 | __asm__ __volatile__ ("lea %0,%%a0; bclr %1,(%%a0)" | 140 | __asm__ __volatile__ ("lea %0,%%a0; bclr %1,(%%a0)" |
141 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 141 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
142 | : "d" (nr) | 142 | : "d" (nr) |
143 | : "%a0", "cc"); | 143 | : "%a0", "cc"); |
144 | #else | 144 | #else |
145 | __asm__ __volatile__ ("bclr %1,%0" | 145 | __asm__ __volatile__ ("bclr %1,%0" |
146 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 146 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
147 | : "di" (nr) | 147 | : "di" (nr) |
148 | : "cc"); | 148 | : "cc"); |
149 | #endif | 149 | #endif |
150 | } | 150 | } |
151 | 151 | ||
152 | #define __clear_bit(nr, addr) clear_bit(nr, addr) | 152 | #define __clear_bit(nr, addr) clear_bit(nr, addr) |
153 | 153 | ||
154 | static __inline__ void change_bit(int nr, volatile unsigned long * addr) | 154 | static __inline__ void change_bit(int nr, volatile unsigned long * addr) |
155 | { | 155 | { |
156 | #ifdef CONFIG_COLDFIRE | 156 | #ifdef CONFIG_COLDFIRE |
157 | __asm__ __volatile__ ("lea %0,%%a0; bchg %1,(%%a0)" | 157 | __asm__ __volatile__ ("lea %0,%%a0; bchg %1,(%%a0)" |
158 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 158 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
159 | : "d" (nr) | 159 | : "d" (nr) |
160 | : "%a0", "cc"); | 160 | : "%a0", "cc"); |
161 | #else | 161 | #else |
162 | __asm__ __volatile__ ("bchg %1,%0" | 162 | __asm__ __volatile__ ("bchg %1,%0" |
163 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 163 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
164 | : "di" (nr) | 164 | : "di" (nr) |
165 | : "cc"); | 165 | : "cc"); |
166 | #endif | 166 | #endif |
167 | } | 167 | } |
168 | 168 | ||
169 | #define __change_bit(nr, addr) change_bit(nr, addr) | 169 | #define __change_bit(nr, addr) change_bit(nr, addr) |
170 | 170 | ||
171 | static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) | 171 | static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) |
172 | { | 172 | { |
173 | char retval; | 173 | char retval; |
174 | 174 | ||
175 | #ifdef CONFIG_COLDFIRE | 175 | #ifdef CONFIG_COLDFIRE |
176 | __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0" | 176 | __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0" |
177 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 177 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
178 | : "d" (nr) | 178 | : "d" (nr) |
179 | : "%a0"); | 179 | : "%a0"); |
180 | #else | 180 | #else |
181 | __asm__ __volatile__ ("bset %2,%1; sne %0" | 181 | __asm__ __volatile__ ("bset %2,%1; sne %0" |
182 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 182 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
183 | : "di" (nr) | 183 | : "di" (nr) |
184 | /* No clobber */); | 184 | /* No clobber */); |
185 | #endif | 185 | #endif |
186 | 186 | ||
187 | return retval; | 187 | return retval; |
188 | } | 188 | } |
189 | 189 | ||
190 | #define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr) | 190 | #define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr) |
191 | 191 | ||
192 | static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) | 192 | static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) |
193 | { | 193 | { |
194 | char retval; | 194 | char retval; |
195 | 195 | ||
196 | #ifdef CONFIG_COLDFIRE | 196 | #ifdef CONFIG_COLDFIRE |
197 | __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0" | 197 | __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0" |
198 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 198 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
199 | : "d" (nr) | 199 | : "d" (nr) |
200 | : "%a0"); | 200 | : "%a0"); |
201 | #else | 201 | #else |
202 | __asm__ __volatile__ ("bclr %2,%1; sne %0" | 202 | __asm__ __volatile__ ("bclr %2,%1; sne %0" |
203 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 203 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
204 | : "di" (nr) | 204 | : "di" (nr) |
205 | /* No clobber */); | 205 | /* No clobber */); |
206 | #endif | 206 | #endif |
207 | 207 | ||
208 | return retval; | 208 | return retval; |
209 | } | 209 | } |
210 | 210 | ||
211 | #define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr) | 211 | #define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr) |
212 | 212 | ||
213 | static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) | 213 | static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) |
214 | { | 214 | { |
215 | char retval; | 215 | char retval; |
216 | 216 | ||
217 | #ifdef CONFIG_COLDFIRE | 217 | #ifdef CONFIG_COLDFIRE |
218 | __asm__ __volatile__ ("lea %1,%%a0\n\tbchg %2,(%%a0)\n\tsne %0" | 218 | __asm__ __volatile__ ("lea %1,%%a0\n\tbchg %2,(%%a0)\n\tsne %0" |
219 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 219 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
220 | : "d" (nr) | 220 | : "d" (nr) |
221 | : "%a0"); | 221 | : "%a0"); |
222 | #else | 222 | #else |
223 | __asm__ __volatile__ ("bchg %2,%1; sne %0" | 223 | __asm__ __volatile__ ("bchg %2,%1; sne %0" |
224 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 224 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
225 | : "di" (nr) | 225 | : "di" (nr) |
226 | /* No clobber */); | 226 | /* No clobber */); |
227 | #endif | 227 | #endif |
228 | 228 | ||
229 | return retval; | 229 | return retval; |
230 | } | 230 | } |
231 | 231 | ||
232 | #define __test_and_change_bit(nr, addr) test_and_change_bit(nr, addr) | 232 | #define __test_and_change_bit(nr, addr) test_and_change_bit(nr, addr) |
233 | 233 | ||
234 | /* | 234 | /* |
235 | * This routine doesn't need to be atomic. | 235 | * This routine doesn't need to be atomic. |
236 | */ | 236 | */ |
237 | static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr) | 237 | static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr) |
238 | { | 238 | { |
239 | return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; | 239 | return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; |
240 | } | 240 | } |
241 | 241 | ||
242 | static __inline__ int __test_bit(int nr, const volatile unsigned long * addr) | 242 | static __inline__ int __test_bit(int nr, const volatile unsigned long * addr) |
243 | { | 243 | { |
244 | int * a = (int *) addr; | 244 | int * a = (int *) addr; |
245 | int mask; | 245 | int mask; |
246 | 246 | ||
247 | a += nr >> 5; | 247 | a += nr >> 5; |
248 | mask = 1 << (nr & 0x1f); | 248 | mask = 1 << (nr & 0x1f); |
249 | return ((mask & *a) != 0); | 249 | return ((mask & *a) != 0); |
250 | } | 250 | } |
251 | 251 | ||
252 | #define test_bit(nr,addr) \ | 252 | #define test_bit(nr,addr) \ |
253 | (__builtin_constant_p(nr) ? \ | 253 | (__builtin_constant_p(nr) ? \ |
254 | __constant_test_bit((nr),(addr)) : \ | 254 | __constant_test_bit((nr),(addr)) : \ |
255 | __test_bit((nr),(addr))) | 255 | __test_bit((nr),(addr))) |
256 | 256 | ||
257 | #define find_first_zero_bit(addr, size) \ | 257 | #define find_first_zero_bit(addr, size) \ |
258 | find_next_zero_bit((addr), (size), 0) | 258 | find_next_zero_bit((addr), (size), 0) |
259 | #define find_first_bit(addr, size) \ | 259 | #define find_first_bit(addr, size) \ |
260 | find_next_bit((addr), (size), 0) | 260 | find_next_bit((addr), (size), 0) |
261 | 261 | ||
262 | static __inline__ int find_next_zero_bit (void * addr, int size, int offset) | 262 | static __inline__ int find_next_zero_bit (const void * addr, int size, int offset) |
263 | { | 263 | { |
264 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | 264 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); |
265 | unsigned long result = offset & ~31UL; | 265 | unsigned long result = offset & ~31UL; |
266 | unsigned long tmp; | 266 | unsigned long tmp; |
267 | 267 | ||
268 | if (offset >= size) | 268 | if (offset >= size) |
269 | return size; | 269 | return size; |
270 | size -= result; | 270 | size -= result; |
271 | offset &= 31UL; | 271 | offset &= 31UL; |
272 | if (offset) { | 272 | if (offset) { |
273 | tmp = *(p++); | 273 | tmp = *(p++); |
274 | tmp |= ~0UL >> (32-offset); | 274 | tmp |= ~0UL >> (32-offset); |
275 | if (size < 32) | 275 | if (size < 32) |
276 | goto found_first; | 276 | goto found_first; |
277 | if (~tmp) | 277 | if (~tmp) |
278 | goto found_middle; | 278 | goto found_middle; |
279 | size -= 32; | 279 | size -= 32; |
280 | result += 32; | 280 | result += 32; |
281 | } | 281 | } |
282 | while (size & ~31UL) { | 282 | while (size & ~31UL) { |
283 | if (~(tmp = *(p++))) | 283 | if (~(tmp = *(p++))) |
284 | goto found_middle; | 284 | goto found_middle; |
285 | result += 32; | 285 | result += 32; |
286 | size -= 32; | 286 | size -= 32; |
287 | } | 287 | } |
288 | if (!size) | 288 | if (!size) |
289 | return result; | 289 | return result; |
290 | tmp = *p; | 290 | tmp = *p; |
291 | 291 | ||
292 | found_first: | 292 | found_first: |
293 | tmp |= ~0UL >> size; | 293 | tmp |= ~0UL >> size; |
294 | found_middle: | 294 | found_middle: |
295 | return result + ffz(tmp); | 295 | return result + ffz(tmp); |
296 | } | 296 | } |
297 | 297 | ||
298 | /* | 298 | /* |
299 | * Find next one bit in a bitmap reasonably efficiently. | 299 | * Find next one bit in a bitmap reasonably efficiently. |
300 | */ | 300 | */ |
301 | static __inline__ unsigned long find_next_bit(const unsigned long *addr, | 301 | static __inline__ unsigned long find_next_bit(const unsigned long *addr, |
302 | unsigned long size, unsigned long offset) | 302 | unsigned long size, unsigned long offset) |
303 | { | 303 | { |
304 | unsigned int *p = ((unsigned int *) addr) + (offset >> 5); | 304 | unsigned int *p = ((unsigned int *) addr) + (offset >> 5); |
305 | unsigned int result = offset & ~31UL; | 305 | unsigned int result = offset & ~31UL; |
306 | unsigned int tmp; | 306 | unsigned int tmp; |
307 | 307 | ||
308 | if (offset >= size) | 308 | if (offset >= size) |
309 | return size; | 309 | return size; |
310 | size -= result; | 310 | size -= result; |
311 | offset &= 31UL; | 311 | offset &= 31UL; |
312 | if (offset) { | 312 | if (offset) { |
313 | tmp = *p++; | 313 | tmp = *p++; |
314 | tmp &= ~0UL << offset; | 314 | tmp &= ~0UL << offset; |
315 | if (size < 32) | 315 | if (size < 32) |
316 | goto found_first; | 316 | goto found_first; |
317 | if (tmp) | 317 | if (tmp) |
318 | goto found_middle; | 318 | goto found_middle; |
319 | size -= 32; | 319 | size -= 32; |
320 | result += 32; | 320 | result += 32; |
321 | } | 321 | } |
322 | while (size >= 32) { | 322 | while (size >= 32) { |
323 | if ((tmp = *p++) != 0) | 323 | if ((tmp = *p++) != 0) |
324 | goto found_middle; | 324 | goto found_middle; |
325 | result += 32; | 325 | result += 32; |
326 | size -= 32; | 326 | size -= 32; |
327 | } | 327 | } |
328 | if (!size) | 328 | if (!size) |
329 | return result; | 329 | return result; |
330 | tmp = *p; | 330 | tmp = *p; |
331 | 331 | ||
332 | found_first: | 332 | found_first: |
333 | tmp &= ~0UL >> (32 - size); | 333 | tmp &= ~0UL >> (32 - size); |
334 | if (tmp == 0UL) /* Are any bits set? */ | 334 | if (tmp == 0UL) /* Are any bits set? */ |
335 | return result + size; /* Nope. */ | 335 | return result + size; /* Nope. */ |
336 | found_middle: | 336 | found_middle: |
337 | return result + __ffs(tmp); | 337 | return result + __ffs(tmp); |
338 | } | 338 | } |
339 | 339 | ||
340 | /* | 340 | /* |
341 | * hweightN: returns the hamming weight (i.e. the number | 341 | * hweightN: returns the hamming weight (i.e. the number |
342 | * of bits set) of a N-bit word | 342 | * of bits set) of a N-bit word |
343 | */ | 343 | */ |
344 | 344 | ||
345 | #define hweight32(x) generic_hweight32(x) | 345 | #define hweight32(x) generic_hweight32(x) |
346 | #define hweight16(x) generic_hweight16(x) | 346 | #define hweight16(x) generic_hweight16(x) |
347 | #define hweight8(x) generic_hweight8(x) | 347 | #define hweight8(x) generic_hweight8(x) |
348 | 348 | ||
349 | 349 | ||
350 | static __inline__ int ext2_set_bit(int nr, volatile void * addr) | 350 | static __inline__ int ext2_set_bit(int nr, volatile void * addr) |
351 | { | 351 | { |
352 | char retval; | 352 | char retval; |
353 | 353 | ||
354 | #ifdef CONFIG_COLDFIRE | 354 | #ifdef CONFIG_COLDFIRE |
355 | __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0" | 355 | __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0" |
356 | : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3]) | 356 | : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3]) |
357 | : "d" (nr) | 357 | : "d" (nr) |
358 | : "%a0"); | 358 | : "%a0"); |
359 | #else | 359 | #else |
360 | __asm__ __volatile__ ("bset %2,%1; sne %0" | 360 | __asm__ __volatile__ ("bset %2,%1; sne %0" |
361 | : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3]) | 361 | : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3]) |
362 | : "di" (nr) | 362 | : "di" (nr) |
363 | /* No clobber */); | 363 | /* No clobber */); |
364 | #endif | 364 | #endif |
365 | 365 | ||
366 | return retval; | 366 | return retval; |
367 | } | 367 | } |
368 | 368 | ||
369 | static __inline__ int ext2_clear_bit(int nr, volatile void * addr) | 369 | static __inline__ int ext2_clear_bit(int nr, volatile void * addr) |
370 | { | 370 | { |
371 | char retval; | 371 | char retval; |
372 | 372 | ||
373 | #ifdef CONFIG_COLDFIRE | 373 | #ifdef CONFIG_COLDFIRE |
374 | __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0" | 374 | __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0" |
375 | : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3]) | 375 | : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3]) |
376 | : "d" (nr) | 376 | : "d" (nr) |
377 | : "%a0"); | 377 | : "%a0"); |
378 | #else | 378 | #else |
379 | __asm__ __volatile__ ("bclr %2,%1; sne %0" | 379 | __asm__ __volatile__ ("bclr %2,%1; sne %0" |
380 | : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3]) | 380 | : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3]) |
381 | : "di" (nr) | 381 | : "di" (nr) |
382 | /* No clobber */); | 382 | /* No clobber */); |
383 | #endif | 383 | #endif |
384 | 384 | ||
385 | return retval; | 385 | return retval; |
386 | } | 386 | } |
387 | 387 | ||
388 | #define ext2_set_bit_atomic(lock, nr, addr) \ | 388 | #define ext2_set_bit_atomic(lock, nr, addr) \ |
389 | ({ \ | 389 | ({ \ |
390 | int ret; \ | 390 | int ret; \ |
391 | spin_lock(lock); \ | 391 | spin_lock(lock); \ |
392 | ret = ext2_set_bit((nr), (addr)); \ | 392 | ret = ext2_set_bit((nr), (addr)); \ |
393 | spin_unlock(lock); \ | 393 | spin_unlock(lock); \ |
394 | ret; \ | 394 | ret; \ |
395 | }) | 395 | }) |
396 | 396 | ||
397 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | 397 | #define ext2_clear_bit_atomic(lock, nr, addr) \ |
398 | ({ \ | 398 | ({ \ |
399 | int ret; \ | 399 | int ret; \ |
400 | spin_lock(lock); \ | 400 | spin_lock(lock); \ |
401 | ret = ext2_clear_bit((nr), (addr)); \ | 401 | ret = ext2_clear_bit((nr), (addr)); \ |
402 | spin_unlock(lock); \ | 402 | spin_unlock(lock); \ |
403 | ret; \ | 403 | ret; \ |
404 | }) | 404 | }) |
405 | 405 | ||
406 | static __inline__ int ext2_test_bit(int nr, const volatile void * addr) | 406 | static __inline__ int ext2_test_bit(int nr, const volatile void * addr) |
407 | { | 407 | { |
408 | char retval; | 408 | char retval; |
409 | 409 | ||
410 | #ifdef CONFIG_COLDFIRE | 410 | #ifdef CONFIG_COLDFIRE |
411 | __asm__ __volatile__ ("lea %1,%%a0; btst %2,(%%a0); sne %0" | 411 | __asm__ __volatile__ ("lea %1,%%a0; btst %2,(%%a0); sne %0" |
412 | : "=d" (retval) | 412 | : "=d" (retval) |
413 | : "m" (((const volatile char *)addr)[nr >> 3]), "d" (nr) | 413 | : "m" (((const volatile char *)addr)[nr >> 3]), "d" (nr) |
414 | : "%a0"); | 414 | : "%a0"); |
415 | #else | 415 | #else |
416 | __asm__ __volatile__ ("btst %2,%1; sne %0" | 416 | __asm__ __volatile__ ("btst %2,%1; sne %0" |
417 | : "=d" (retval) | 417 | : "=d" (retval) |
418 | : "m" (((const volatile char *)addr)[nr >> 3]), "di" (nr) | 418 | : "m" (((const volatile char *)addr)[nr >> 3]), "di" (nr) |
419 | /* No clobber */); | 419 | /* No clobber */); |
420 | #endif | 420 | #endif |
421 | 421 | ||
422 | return retval; | 422 | return retval; |
423 | } | 423 | } |
424 | 424 | ||
425 | #define ext2_find_first_zero_bit(addr, size) \ | 425 | #define ext2_find_first_zero_bit(addr, size) \ |
426 | ext2_find_next_zero_bit((addr), (size), 0) | 426 | ext2_find_next_zero_bit((addr), (size), 0) |
427 | 427 | ||
428 | static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) | 428 | static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) |
429 | { | 429 | { |
430 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | 430 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); |
431 | unsigned long result = offset & ~31UL; | 431 | unsigned long result = offset & ~31UL; |
432 | unsigned long tmp; | 432 | unsigned long tmp; |
433 | 433 | ||
434 | if (offset >= size) | 434 | if (offset >= size) |
435 | return size; | 435 | return size; |
436 | size -= result; | 436 | size -= result; |
437 | offset &= 31UL; | 437 | offset &= 31UL; |
438 | if(offset) { | 438 | if(offset) { |
439 | /* We hold the little endian value in tmp, but then the | 439 | /* We hold the little endian value in tmp, but then the |
440 | * shift is illegal. So we could keep a big endian value | 440 | * shift is illegal. So we could keep a big endian value |
441 | * in tmp, like this: | 441 | * in tmp, like this: |
442 | * | 442 | * |
443 | * tmp = __swab32(*(p++)); | 443 | * tmp = __swab32(*(p++)); |
444 | * tmp |= ~0UL >> (32-offset); | 444 | * tmp |= ~0UL >> (32-offset); |
445 | * | 445 | * |
446 | * but this would decrease preformance, so we change the | 446 | * but this would decrease preformance, so we change the |
447 | * shift: | 447 | * shift: |
448 | */ | 448 | */ |
449 | tmp = *(p++); | 449 | tmp = *(p++); |
450 | tmp |= __swab32(~0UL >> (32-offset)); | 450 | tmp |= __swab32(~0UL >> (32-offset)); |
451 | if(size < 32) | 451 | if(size < 32) |
452 | goto found_first; | 452 | goto found_first; |
453 | if(~tmp) | 453 | if(~tmp) |
454 | goto found_middle; | 454 | goto found_middle; |
455 | size -= 32; | 455 | size -= 32; |
456 | result += 32; | 456 | result += 32; |
457 | } | 457 | } |
458 | while(size & ~31UL) { | 458 | while(size & ~31UL) { |
459 | if(~(tmp = *(p++))) | 459 | if(~(tmp = *(p++))) |
460 | goto found_middle; | 460 | goto found_middle; |
461 | result += 32; | 461 | result += 32; |
462 | size -= 32; | 462 | size -= 32; |
463 | } | 463 | } |
464 | if(!size) | 464 | if(!size) |
465 | return result; | 465 | return result; |
466 | tmp = *p; | 466 | tmp = *p; |
467 | 467 | ||
468 | found_first: | 468 | found_first: |
469 | /* tmp is little endian, so we would have to swab the shift, | 469 | /* tmp is little endian, so we would have to swab the shift, |
470 | * see above. But then we have to swab tmp below for ffz, so | 470 | * see above. But then we have to swab tmp below for ffz, so |
471 | * we might as well do this here. | 471 | * we might as well do this here. |
472 | */ | 472 | */ |
473 | return result + ffz(__swab32(tmp) | (~0UL << size)); | 473 | return result + ffz(__swab32(tmp) | (~0UL << size)); |
474 | found_middle: | 474 | found_middle: |
475 | return result + ffz(__swab32(tmp)); | 475 | return result + ffz(__swab32(tmp)); |
476 | } | 476 | } |
477 | 477 | ||
478 | /* Bitmap functions for the minix filesystem. */ | 478 | /* Bitmap functions for the minix filesystem. */ |
479 | #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) | 479 | #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) |
480 | #define minix_set_bit(nr,addr) set_bit(nr,addr) | 480 | #define minix_set_bit(nr,addr) set_bit(nr,addr) |
481 | #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) | 481 | #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) |
482 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | 482 | #define minix_test_bit(nr,addr) test_bit(nr,addr) |
483 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | 483 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) |
484 | 484 | ||
485 | /** | 485 | /** |
486 | * hweightN - returns the hamming weight of a N-bit word | 486 | * hweightN - returns the hamming weight of a N-bit word |
487 | * @x: the word to weigh | 487 | * @x: the word to weigh |
488 | * | 488 | * |
489 | * The Hamming Weight of a number is the total number of bits set in it. | 489 | * The Hamming Weight of a number is the total number of bits set in it. |
490 | */ | 490 | */ |
491 | 491 | ||
492 | #define hweight32(x) generic_hweight32(x) | 492 | #define hweight32(x) generic_hweight32(x) |
493 | #define hweight16(x) generic_hweight16(x) | 493 | #define hweight16(x) generic_hweight16(x) |
494 | #define hweight8(x) generic_hweight8(x) | 494 | #define hweight8(x) generic_hweight8(x) |
495 | 495 | ||
496 | #endif /* __KERNEL__ */ | 496 | #endif /* __KERNEL__ */ |
497 | 497 | ||
498 | /* | 498 | /* |
499 | * fls: find last bit set. | 499 | * fls: find last bit set. |
500 | */ | 500 | */ |
501 | #define fls(x) generic_fls(x) | 501 | #define fls(x) generic_fls(x) |
502 | 502 | ||
503 | #endif /* _M68KNOMMU_BITOPS_H */ | 503 | #endif /* _M68KNOMMU_BITOPS_H */ |
504 | 504 |