Commit e2268c7129e6cab53a5dac1ab0790a547555e21a

Authored by Akinobu Mita
Committed by Linus Torvalds
1 parent 7e33db4e2e

[PATCH] bitops: sh: use generic bitops

- remove __{,test_and_}{set,clear,change}_bit() and test_bit()
- remove find_{next,first}{,_zero}_bit()
- remove generic_ffs()
- remove generic_hweight{32,16,8}()
- remove sched_find_first_bit()
- remove ext2_{set,clear,test,find_first_zero,find_next_zero}_bit()
- remove ext2_{set,clear}_bit_atomic()
- remove minix_{test,set,test_and_clear,test,find_first_zero}_bit()
- remove generic_fls()
- remove generic_fls64()

Signed-off-by: Akinobu Mita <mita@miraclelinux.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Kazumoto Kojima <kkojima@rr.iij4u.or.jp>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 2 changed files with 18 additions and 332 deletions Side-by-side Diff

... ... @@ -21,6 +21,14 @@
21 21 config RWSEM_XCHGADD_ALGORITHM
22 22 bool
23 23  
  24 +config GENERIC_FIND_NEXT_BIT
  25 + bool
  26 + default y
  27 +
  28 +config GENERIC_HWEIGHT
  29 + bool
  30 + default y
  31 +
24 32 config GENERIC_HARDIRQS
25 33 bool
26 34 default y
include/asm-sh/bitops.h
... ... @@ -19,16 +19,6 @@
19 19 local_irq_restore(flags);
20 20 }
21 21  
22   -static __inline__ void __set_bit(int nr, volatile void * addr)
23   -{
24   - int mask;
25   - volatile unsigned int *a = addr;
26   -
27   - a += nr >> 5;
28   - mask = 1 << (nr & 0x1f);
29   - *a |= mask;
30   -}
31   -
32 22 /*
33 23 * clear_bit() doesn't provide any barrier for the compiler.
34 24 */
... ... @@ -47,16 +37,6 @@
47 37 local_irq_restore(flags);
48 38 }
49 39  
50   -static __inline__ void __clear_bit(int nr, volatile void * addr)
51   -{
52   - int mask;
53   - volatile unsigned int *a = addr;
54   -
55   - a += nr >> 5;
56   - mask = 1 << (nr & 0x1f);
57   - *a &= ~mask;
58   -}
59   -
60 40 static __inline__ void change_bit(int nr, volatile void * addr)
61 41 {
62 42 int mask;
... ... @@ -70,16 +50,6 @@
70 50 local_irq_restore(flags);
71 51 }
72 52  
73   -static __inline__ void __change_bit(int nr, volatile void * addr)
74   -{
75   - int mask;
76   - volatile unsigned int *a = addr;
77   -
78   - a += nr >> 5;
79   - mask = 1 << (nr & 0x1f);
80   - *a ^= mask;
81   -}
82   -
83 53 static __inline__ int test_and_set_bit(int nr, volatile void * addr)
84 54 {
85 55 int mask, retval;
... ... @@ -96,19 +66,6 @@
96 66 return retval;
97 67 }
98 68  
99   -static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
100   -{
101   - int mask, retval;
102   - volatile unsigned int *a = addr;
103   -
104   - a += nr >> 5;
105   - mask = 1 << (nr & 0x1f);
106   - retval = (mask & *a) != 0;
107   - *a |= mask;
108   -
109   - return retval;
110   -}
111   -
112 69 static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
113 70 {
114 71 int mask, retval;
... ... @@ -125,19 +82,6 @@
125 82 return retval;
126 83 }
127 84  
128   -static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
129   -{
130   - int mask, retval;
131   - volatile unsigned int *a = addr;
132   -
133   - a += nr >> 5;
134   - mask = 1 << (nr & 0x1f);
135   - retval = (mask & *a) != 0;
136   - *a &= ~mask;
137   -
138   - return retval;
139   -}
140   -
141 85 static __inline__ int test_and_change_bit(int nr, volatile void * addr)
142 86 {
143 87 int mask, retval;
144 88  
... ... @@ -154,24 +98,8 @@
154 98 return retval;
155 99 }
156 100  
157   -static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
158   -{
159   - int mask, retval;
160   - volatile unsigned int *a = addr;
  101 +#include <asm-generic/bitops/non-atomic.h>
161 102  
162   - a += nr >> 5;
163   - mask = 1 << (nr & 0x1f);
164   - retval = (mask & *a) != 0;
165   - *a ^= mask;
166   -
167   - return retval;
168   -}
169   -
170   -static __inline__ int test_bit(int nr, const volatile void *addr)
171   -{
172   - return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31));
173   -}
174   -
175 103 static __inline__ unsigned long ffz(unsigned long word)
176 104 {
177 105 unsigned long result;
... ... @@ -206,265 +134,15 @@
206 134 return result;
207 135 }
208 136  
209   -/**
210   - * find_next_bit - find the next set bit in a memory region
211   - * @addr: The address to base the search on
212   - * @offset: The bitnumber to start searching at
213   - * @size: The maximum size to search
214   - */
215   -static __inline__ unsigned long find_next_bit(const unsigned long *addr,
216   - unsigned long size, unsigned long offset)
217   -{
218   - unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
219   - unsigned int result = offset & ~31UL;
220   - unsigned int tmp;
221   -
222   - if (offset >= size)
223   - return size;
224   - size -= result;
225   - offset &= 31UL;
226   - if (offset) {
227   - tmp = *p++;
228   - tmp &= ~0UL << offset;
229   - if (size < 32)
230   - goto found_first;
231   - if (tmp)
232   - goto found_middle;
233   - size -= 32;
234   - result += 32;
235   - }
236   - while (size >= 32) {
237   - if ((tmp = *p++) != 0)
238   - goto found_middle;
239   - result += 32;
240   - size -= 32;
241   - }
242   - if (!size)
243   - return result;
244   - tmp = *p;
245   -
246   -found_first:
247   - tmp &= ~0UL >> (32 - size);
248   - if (tmp == 0UL) /* Are any bits set? */
249   - return result + size; /* Nope. */
250   -found_middle:
251   - return result + __ffs(tmp);
252   -}
253   -
254   -/**
255   - * find_first_bit - find the first set bit in a memory region
256   - * @addr: The address to start the search at
257   - * @size: The maximum size to search
258   - *
259   - * Returns the bit-number of the first set bit, not the number of the byte
260   - * containing a bit.
261   - */
262   -#define find_first_bit(addr, size) \
263   - find_next_bit((addr), (size), 0)
264   -
265   -static __inline__ int find_next_zero_bit(const unsigned long *addr, int size, int offset)
266   -{
267   - const unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
268   - unsigned long result = offset & ~31UL;
269   - unsigned long tmp;
270   -
271   - if (offset >= size)
272   - return size;
273   - size -= result;
274   - offset &= 31UL;
275   - if (offset) {
276   - tmp = *(p++);
277   - tmp |= ~0UL >> (32-offset);
278   - if (size < 32)
279   - goto found_first;
280   - if (~tmp)
281   - goto found_middle;
282   - size -= 32;
283   - result += 32;
284   - }
285   - while (size & ~31UL) {
286   - if (~(tmp = *(p++)))
287   - goto found_middle;
288   - result += 32;
289   - size -= 32;
290   - }
291   - if (!size)
292   - return result;
293   - tmp = *p;
294   -
295   -found_first:
296   - tmp |= ~0UL << size;
297   -found_middle:
298   - return result + ffz(tmp);
299   -}
300   -
301   -#define find_first_zero_bit(addr, size) \
302   - find_next_zero_bit((addr), (size), 0)
303   -
304   -/*
305   - * ffs: find first bit set. This is defined the same way as
306   - * the libc and compiler builtin ffs routines, therefore
307   - * differs in spirit from the above ffz (man ffs).
308   - */
309   -
310   -#define ffs(x) generic_ffs(x)
311   -
312   -/*
313   - * hweightN: returns the hamming weight (i.e. the number
314   - * of bits set) of a N-bit word
315   - */
316   -
317   -#define hweight32(x) generic_hweight32(x)
318   -#define hweight16(x) generic_hweight16(x)
319   -#define hweight8(x) generic_hweight8(x)
320   -
321   -/*
322   - * Every architecture must define this function. It's the fastest
323   - * way of searching a 140-bit bitmap where the first 100 bits are
324   - * unlikely to be set. It's guaranteed that at least one of the 140
325   - * bits is cleared.
326   - */
327   -
328   -static inline int sched_find_first_bit(const unsigned long *b)
329   -{
330   - if (unlikely(b[0]))
331   - return __ffs(b[0]);
332   - if (unlikely(b[1]))
333   - return __ffs(b[1]) + 32;
334   - if (unlikely(b[2]))
335   - return __ffs(b[2]) + 64;
336   - if (b[3])
337   - return __ffs(b[3]) + 96;
338   - return __ffs(b[4]) + 128;
339   -}
340   -
341   -#ifdef __LITTLE_ENDIAN__
342   -#define ext2_set_bit(nr, addr) __test_and_set_bit((nr), (addr))
343   -#define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr), (addr))
344   -#define ext2_test_bit(nr, addr) test_bit((nr), (addr))
345   -#define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size))
346   -#define ext2_find_next_zero_bit(addr, size, offset) \
347   - find_next_zero_bit((unsigned long *)(addr), (size), (offset))
348   -#else
349   -static __inline__ int ext2_set_bit(int nr, volatile void * addr)
350   -{
351   - int mask, retval;
352   - volatile unsigned char *ADDR = (unsigned char *) addr;
353   -
354   - ADDR += nr >> 3;
355   - mask = 1 << (nr & 0x07);
356   - retval = (mask & *ADDR) != 0;
357   - *ADDR |= mask;
358   - return retval;
359   -}
360   -
361   -static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
362   -{
363   - int mask, retval;
364   - volatile unsigned char *ADDR = (unsigned char *) addr;
365   -
366   - ADDR += nr >> 3;
367   - mask = 1 << (nr & 0x07);
368   - retval = (mask & *ADDR) != 0;
369   - *ADDR &= ~mask;
370   - return retval;
371   -}
372   -
373   -static __inline__ int ext2_test_bit(int nr, const volatile void * addr)
374   -{
375   - int mask;
376   - const volatile unsigned char *ADDR = (const unsigned char *) addr;
377   -
378   - ADDR += nr >> 3;
379   - mask = 1 << (nr & 0x07);
380   - return ((mask & *ADDR) != 0);
381   -}
382   -
383   -#define ext2_find_first_zero_bit(addr, size) \
384   - ext2_find_next_zero_bit((addr), (size), 0)
385   -
386   -static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
387   -{
388   - unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
389   - unsigned long result = offset & ~31UL;
390   - unsigned long tmp;
391   -
392   - if (offset >= size)
393   - return size;
394   - size -= result;
395   - offset &= 31UL;
396   - if(offset) {
397   - /* We hold the little endian value in tmp, but then the
398   - * shift is illegal. So we could keep a big endian value
399   - * in tmp, like this:
400   - *
401   - * tmp = __swab32(*(p++));
402   - * tmp |= ~0UL >> (32-offset);
403   - *
404   - * but this would decrease preformance, so we change the
405   - * shift:
406   - */
407   - tmp = *(p++);
408   - tmp |= __swab32(~0UL >> (32-offset));
409   - if(size < 32)
410   - goto found_first;
411   - if(~tmp)
412   - goto found_middle;
413   - size -= 32;
414   - result += 32;
415   - }
416   - while(size & ~31UL) {
417   - if(~(tmp = *(p++)))
418   - goto found_middle;
419   - result += 32;
420   - size -= 32;
421   - }
422   - if(!size)
423   - return result;
424   - tmp = *p;
425   -
426   -found_first:
427   - /* tmp is little endian, so we would have to swab the shift,
428   - * see above. But then we have to swab tmp below for ffz, so
429   - * we might as well do this here.
430   - */
431   - return result + ffz(__swab32(tmp) | (~0UL << size));
432   -found_middle:
433   - return result + ffz(__swab32(tmp));
434   -}
435   -#endif
436   -
437   -#define ext2_set_bit_atomic(lock, nr, addr) \
438   - ({ \
439   - int ret; \
440   - spin_lock(lock); \
441   - ret = ext2_set_bit((nr), (addr)); \
442   - spin_unlock(lock); \
443   - ret; \
444   - })
445   -
446   -#define ext2_clear_bit_atomic(lock, nr, addr) \
447   - ({ \
448   - int ret; \
449   - spin_lock(lock); \
450   - ret = ext2_clear_bit((nr), (addr)); \
451   - spin_unlock(lock); \
452   - ret; \
453   - })
454   -
455   -/* Bitmap functions for the minix filesystem. */
456   -#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
457   -#define minix_set_bit(nr,addr) __set_bit(nr,addr)
458   -#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
459   -#define minix_test_bit(nr,addr) test_bit(nr,addr)
460   -#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
461   -
462   -/*
463   - * fls: find last bit set.
464   - */
465   -
466   -#define fls(x) generic_fls(x)
467   -#define fls64(x) generic_fls64(x)
  137 +#include <asm-generic/bitops/find.h>
  138 +#include <asm-generic/bitops/ffs.h>
  139 +#include <asm-generic/bitops/hweight.h>
  140 +#include <asm-generic/bitops/sched.h>
  141 +#include <asm-generic/bitops/ext2-non-atomic.h>
  142 +#include <asm-generic/bitops/ext2-atomic.h>
  143 +#include <asm-generic/bitops/minix.h>
  144 +#include <asm-generic/bitops/fls.h>
  145 +#include <asm-generic/bitops/fls64.h>
468 146  
469 147 #endif /* __KERNEL__ */
470 148