Commit 61f2e7b0f474225b4226772830ae4b29a3a21f8d
Committed by
Linus Torvalds
1 parent
3fca5af786
Exists in
master
and in
4 other branches
bitops: remove minix bitops from asm/bitops.h
minix bit operations are only used by minix filesystem and useless by other modules. Because byte order of inode and block bitmaps is different on each architecture like below: m68k: big-endian 16bit indexed bitmaps h8300, microblaze, s390, sparc, m68knommu: big-endian 32 or 64bit indexed bitmaps m32r, mips, sh, xtensa: big-endian 32 or 64bit indexed bitmaps for big-endian mode little-endian bitmaps for little-endian mode Others: little-endian bitmaps In order to move minix bit operations from asm/bitops.h to architecture independent code in minix filesystem, this provides two config options. CONFIG_MINIX_FS_BIG_ENDIAN_16BIT_INDEXED is only selected by m68k. CONFIG_MINIX_FS_NATIVE_ENDIAN is selected by the architectures which use native byte order bitmaps (h8300, microblaze, s390, sparc, m68knommu, m32r, mips, sh, xtensa). The architectures which always use little-endian bitmaps do not select these options. Finally, we can remove minix bit operations from asm/bitops.h for all architectures. Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com> Acked-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Greg Ungerer <gerg@uclinux.org> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Roman Zippel <zippel@linux-m68k.org> Cc: Andreas Schwab <schwab@linux-m68k.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Cc: Michal Simek <monstr@monstr.eu> Cc: "David S. Miller" <davem@davemloft.net> Cc: Hirokazu Takata <takata@linux-m32r.org> Acked-by: Ralf Baechle <ralf@linux-mips.org> Acked-by: Paul Mundt <lethal@linux-sh.org> Cc: Chris Zankel <chris@zankel.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 27 changed files with 82 additions and 110 deletions Inline Diff
- arch/alpha/include/asm/bitops.h
- arch/arm/include/asm/bitops.h
- arch/avr32/include/asm/bitops.h
- arch/blackfin/include/asm/bitops.h
- arch/cris/include/asm/bitops.h
- arch/frv/include/asm/bitops.h
- arch/h8300/include/asm/bitops.h
- arch/ia64/include/asm/bitops.h
- arch/m32r/include/asm/bitops.h
- arch/m68k/include/asm/bitops_mm.h
- arch/m68k/include/asm/bitops_no.h
- arch/mips/include/asm/bitops.h
- arch/mn10300/include/asm/bitops.h
- arch/parisc/include/asm/bitops.h
- arch/powerpc/include/asm/bitops.h
- arch/s390/include/asm/bitops.h
- arch/sh/include/asm/bitops.h
- arch/sparc/include/asm/bitops_32.h
- arch/sparc/include/asm/bitops_64.h
- arch/tile/include/asm/bitops.h
- arch/x86/include/asm/bitops.h
- arch/xtensa/include/asm/bitops.h
- fs/minix/Kconfig
- fs/minix/minix.h
- include/asm-generic/bitops.h
- include/asm-generic/bitops/minix-le.h
- include/asm-generic/bitops/minix.h
arch/alpha/include/asm/bitops.h
| 1 | #ifndef _ALPHA_BITOPS_H | 1 | #ifndef _ALPHA_BITOPS_H |
| 2 | #define _ALPHA_BITOPS_H | 2 | #define _ALPHA_BITOPS_H |
| 3 | 3 | ||
| 4 | #ifndef _LINUX_BITOPS_H | 4 | #ifndef _LINUX_BITOPS_H |
| 5 | #error only <linux/bitops.h> can be included directly | 5 | #error only <linux/bitops.h> can be included directly |
| 6 | #endif | 6 | #endif |
| 7 | 7 | ||
| 8 | #include <asm/compiler.h> | 8 | #include <asm/compiler.h> |
| 9 | #include <asm/barrier.h> | 9 | #include <asm/barrier.h> |
| 10 | 10 | ||
| 11 | /* | 11 | /* |
| 12 | * Copyright 1994, Linus Torvalds. | 12 | * Copyright 1994, Linus Torvalds. |
| 13 | */ | 13 | */ |
| 14 | 14 | ||
| 15 | /* | 15 | /* |
| 16 | * These have to be done with inline assembly: that way the bit-setting | 16 | * These have to be done with inline assembly: that way the bit-setting |
| 17 | * is guaranteed to be atomic. All bit operations return 0 if the bit | 17 | * is guaranteed to be atomic. All bit operations return 0 if the bit |
| 18 | * was cleared before the operation and != 0 if it was not. | 18 | * was cleared before the operation and != 0 if it was not. |
| 19 | * | 19 | * |
| 20 | * To get proper branch prediction for the main line, we must branch | 20 | * To get proper branch prediction for the main line, we must branch |
| 21 | * forward to code at the end of this object's .text section, then | 21 | * forward to code at the end of this object's .text section, then |
| 22 | * branch back to restart the operation. | 22 | * branch back to restart the operation. |
| 23 | * | 23 | * |
| 24 | * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1). | 24 | * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1). |
| 25 | */ | 25 | */ |
| 26 | 26 | ||
| 27 | static inline void | 27 | static inline void |
| 28 | set_bit(unsigned long nr, volatile void * addr) | 28 | set_bit(unsigned long nr, volatile void * addr) |
| 29 | { | 29 | { |
| 30 | unsigned long temp; | 30 | unsigned long temp; |
| 31 | int *m = ((int *) addr) + (nr >> 5); | 31 | int *m = ((int *) addr) + (nr >> 5); |
| 32 | 32 | ||
| 33 | __asm__ __volatile__( | 33 | __asm__ __volatile__( |
| 34 | "1: ldl_l %0,%3\n" | 34 | "1: ldl_l %0,%3\n" |
| 35 | " bis %0,%2,%0\n" | 35 | " bis %0,%2,%0\n" |
| 36 | " stl_c %0,%1\n" | 36 | " stl_c %0,%1\n" |
| 37 | " beq %0,2f\n" | 37 | " beq %0,2f\n" |
| 38 | ".subsection 2\n" | 38 | ".subsection 2\n" |
| 39 | "2: br 1b\n" | 39 | "2: br 1b\n" |
| 40 | ".previous" | 40 | ".previous" |
| 41 | :"=&r" (temp), "=m" (*m) | 41 | :"=&r" (temp), "=m" (*m) |
| 42 | :"Ir" (1UL << (nr & 31)), "m" (*m)); | 42 | :"Ir" (1UL << (nr & 31)), "m" (*m)); |
| 43 | } | 43 | } |
| 44 | 44 | ||
| 45 | /* | 45 | /* |
| 46 | * WARNING: non atomic version. | 46 | * WARNING: non atomic version. |
| 47 | */ | 47 | */ |
| 48 | static inline void | 48 | static inline void |
| 49 | __set_bit(unsigned long nr, volatile void * addr) | 49 | __set_bit(unsigned long nr, volatile void * addr) |
| 50 | { | 50 | { |
| 51 | int *m = ((int *) addr) + (nr >> 5); | 51 | int *m = ((int *) addr) + (nr >> 5); |
| 52 | 52 | ||
| 53 | *m |= 1 << (nr & 31); | 53 | *m |= 1 << (nr & 31); |
| 54 | } | 54 | } |
| 55 | 55 | ||
| 56 | #define smp_mb__before_clear_bit() smp_mb() | 56 | #define smp_mb__before_clear_bit() smp_mb() |
| 57 | #define smp_mb__after_clear_bit() smp_mb() | 57 | #define smp_mb__after_clear_bit() smp_mb() |
| 58 | 58 | ||
| 59 | static inline void | 59 | static inline void |
| 60 | clear_bit(unsigned long nr, volatile void * addr) | 60 | clear_bit(unsigned long nr, volatile void * addr) |
| 61 | { | 61 | { |
| 62 | unsigned long temp; | 62 | unsigned long temp; |
| 63 | int *m = ((int *) addr) + (nr >> 5); | 63 | int *m = ((int *) addr) + (nr >> 5); |
| 64 | 64 | ||
| 65 | __asm__ __volatile__( | 65 | __asm__ __volatile__( |
| 66 | "1: ldl_l %0,%3\n" | 66 | "1: ldl_l %0,%3\n" |
| 67 | " bic %0,%2,%0\n" | 67 | " bic %0,%2,%0\n" |
| 68 | " stl_c %0,%1\n" | 68 | " stl_c %0,%1\n" |
| 69 | " beq %0,2f\n" | 69 | " beq %0,2f\n" |
| 70 | ".subsection 2\n" | 70 | ".subsection 2\n" |
| 71 | "2: br 1b\n" | 71 | "2: br 1b\n" |
| 72 | ".previous" | 72 | ".previous" |
| 73 | :"=&r" (temp), "=m" (*m) | 73 | :"=&r" (temp), "=m" (*m) |
| 74 | :"Ir" (1UL << (nr & 31)), "m" (*m)); | 74 | :"Ir" (1UL << (nr & 31)), "m" (*m)); |
| 75 | } | 75 | } |
| 76 | 76 | ||
| 77 | static inline void | 77 | static inline void |
| 78 | clear_bit_unlock(unsigned long nr, volatile void * addr) | 78 | clear_bit_unlock(unsigned long nr, volatile void * addr) |
| 79 | { | 79 | { |
| 80 | smp_mb(); | 80 | smp_mb(); |
| 81 | clear_bit(nr, addr); | 81 | clear_bit(nr, addr); |
| 82 | } | 82 | } |
| 83 | 83 | ||
| 84 | /* | 84 | /* |
| 85 | * WARNING: non atomic version. | 85 | * WARNING: non atomic version. |
| 86 | */ | 86 | */ |
| 87 | static __inline__ void | 87 | static __inline__ void |
| 88 | __clear_bit(unsigned long nr, volatile void * addr) | 88 | __clear_bit(unsigned long nr, volatile void * addr) |
| 89 | { | 89 | { |
| 90 | int *m = ((int *) addr) + (nr >> 5); | 90 | int *m = ((int *) addr) + (nr >> 5); |
| 91 | 91 | ||
| 92 | *m &= ~(1 << (nr & 31)); | 92 | *m &= ~(1 << (nr & 31)); |
| 93 | } | 93 | } |
| 94 | 94 | ||
| 95 | static inline void | 95 | static inline void |
| 96 | __clear_bit_unlock(unsigned long nr, volatile void * addr) | 96 | __clear_bit_unlock(unsigned long nr, volatile void * addr) |
| 97 | { | 97 | { |
| 98 | smp_mb(); | 98 | smp_mb(); |
| 99 | __clear_bit(nr, addr); | 99 | __clear_bit(nr, addr); |
| 100 | } | 100 | } |
| 101 | 101 | ||
| 102 | static inline void | 102 | static inline void |
| 103 | change_bit(unsigned long nr, volatile void * addr) | 103 | change_bit(unsigned long nr, volatile void * addr) |
| 104 | { | 104 | { |
| 105 | unsigned long temp; | 105 | unsigned long temp; |
| 106 | int *m = ((int *) addr) + (nr >> 5); | 106 | int *m = ((int *) addr) + (nr >> 5); |
| 107 | 107 | ||
| 108 | __asm__ __volatile__( | 108 | __asm__ __volatile__( |
| 109 | "1: ldl_l %0,%3\n" | 109 | "1: ldl_l %0,%3\n" |
| 110 | " xor %0,%2,%0\n" | 110 | " xor %0,%2,%0\n" |
| 111 | " stl_c %0,%1\n" | 111 | " stl_c %0,%1\n" |
| 112 | " beq %0,2f\n" | 112 | " beq %0,2f\n" |
| 113 | ".subsection 2\n" | 113 | ".subsection 2\n" |
| 114 | "2: br 1b\n" | 114 | "2: br 1b\n" |
| 115 | ".previous" | 115 | ".previous" |
| 116 | :"=&r" (temp), "=m" (*m) | 116 | :"=&r" (temp), "=m" (*m) |
| 117 | :"Ir" (1UL << (nr & 31)), "m" (*m)); | 117 | :"Ir" (1UL << (nr & 31)), "m" (*m)); |
| 118 | } | 118 | } |
| 119 | 119 | ||
| 120 | /* | 120 | /* |
| 121 | * WARNING: non atomic version. | 121 | * WARNING: non atomic version. |
| 122 | */ | 122 | */ |
| 123 | static __inline__ void | 123 | static __inline__ void |
| 124 | __change_bit(unsigned long nr, volatile void * addr) | 124 | __change_bit(unsigned long nr, volatile void * addr) |
| 125 | { | 125 | { |
| 126 | int *m = ((int *) addr) + (nr >> 5); | 126 | int *m = ((int *) addr) + (nr >> 5); |
| 127 | 127 | ||
| 128 | *m ^= 1 << (nr & 31); | 128 | *m ^= 1 << (nr & 31); |
| 129 | } | 129 | } |
| 130 | 130 | ||
| 131 | static inline int | 131 | static inline int |
| 132 | test_and_set_bit(unsigned long nr, volatile void *addr) | 132 | test_and_set_bit(unsigned long nr, volatile void *addr) |
| 133 | { | 133 | { |
| 134 | unsigned long oldbit; | 134 | unsigned long oldbit; |
| 135 | unsigned long temp; | 135 | unsigned long temp; |
| 136 | int *m = ((int *) addr) + (nr >> 5); | 136 | int *m = ((int *) addr) + (nr >> 5); |
| 137 | 137 | ||
| 138 | __asm__ __volatile__( | 138 | __asm__ __volatile__( |
| 139 | #ifdef CONFIG_SMP | 139 | #ifdef CONFIG_SMP |
| 140 | " mb\n" | 140 | " mb\n" |
| 141 | #endif | 141 | #endif |
| 142 | "1: ldl_l %0,%4\n" | 142 | "1: ldl_l %0,%4\n" |
| 143 | " and %0,%3,%2\n" | 143 | " and %0,%3,%2\n" |
| 144 | " bne %2,2f\n" | 144 | " bne %2,2f\n" |
| 145 | " xor %0,%3,%0\n" | 145 | " xor %0,%3,%0\n" |
| 146 | " stl_c %0,%1\n" | 146 | " stl_c %0,%1\n" |
| 147 | " beq %0,3f\n" | 147 | " beq %0,3f\n" |
| 148 | "2:\n" | 148 | "2:\n" |
| 149 | #ifdef CONFIG_SMP | 149 | #ifdef CONFIG_SMP |
| 150 | " mb\n" | 150 | " mb\n" |
| 151 | #endif | 151 | #endif |
| 152 | ".subsection 2\n" | 152 | ".subsection 2\n" |
| 153 | "3: br 1b\n" | 153 | "3: br 1b\n" |
| 154 | ".previous" | 154 | ".previous" |
| 155 | :"=&r" (temp), "=m" (*m), "=&r" (oldbit) | 155 | :"=&r" (temp), "=m" (*m), "=&r" (oldbit) |
| 156 | :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); | 156 | :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); |
| 157 | 157 | ||
| 158 | return oldbit != 0; | 158 | return oldbit != 0; |
| 159 | } | 159 | } |
| 160 | 160 | ||
| 161 | static inline int | 161 | static inline int |
| 162 | test_and_set_bit_lock(unsigned long nr, volatile void *addr) | 162 | test_and_set_bit_lock(unsigned long nr, volatile void *addr) |
| 163 | { | 163 | { |
| 164 | unsigned long oldbit; | 164 | unsigned long oldbit; |
| 165 | unsigned long temp; | 165 | unsigned long temp; |
| 166 | int *m = ((int *) addr) + (nr >> 5); | 166 | int *m = ((int *) addr) + (nr >> 5); |
| 167 | 167 | ||
| 168 | __asm__ __volatile__( | 168 | __asm__ __volatile__( |
| 169 | "1: ldl_l %0,%4\n" | 169 | "1: ldl_l %0,%4\n" |
| 170 | " and %0,%3,%2\n" | 170 | " and %0,%3,%2\n" |
| 171 | " bne %2,2f\n" | 171 | " bne %2,2f\n" |
| 172 | " xor %0,%3,%0\n" | 172 | " xor %0,%3,%0\n" |
| 173 | " stl_c %0,%1\n" | 173 | " stl_c %0,%1\n" |
| 174 | " beq %0,3f\n" | 174 | " beq %0,3f\n" |
| 175 | "2:\n" | 175 | "2:\n" |
| 176 | #ifdef CONFIG_SMP | 176 | #ifdef CONFIG_SMP |
| 177 | " mb\n" | 177 | " mb\n" |
| 178 | #endif | 178 | #endif |
| 179 | ".subsection 2\n" | 179 | ".subsection 2\n" |
| 180 | "3: br 1b\n" | 180 | "3: br 1b\n" |
| 181 | ".previous" | 181 | ".previous" |
| 182 | :"=&r" (temp), "=m" (*m), "=&r" (oldbit) | 182 | :"=&r" (temp), "=m" (*m), "=&r" (oldbit) |
| 183 | :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); | 183 | :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); |
| 184 | 184 | ||
| 185 | return oldbit != 0; | 185 | return oldbit != 0; |
| 186 | } | 186 | } |
| 187 | 187 | ||
| 188 | /* | 188 | /* |
| 189 | * WARNING: non atomic version. | 189 | * WARNING: non atomic version. |
| 190 | */ | 190 | */ |
| 191 | static inline int | 191 | static inline int |
| 192 | __test_and_set_bit(unsigned long nr, volatile void * addr) | 192 | __test_and_set_bit(unsigned long nr, volatile void * addr) |
| 193 | { | 193 | { |
| 194 | unsigned long mask = 1 << (nr & 0x1f); | 194 | unsigned long mask = 1 << (nr & 0x1f); |
| 195 | int *m = ((int *) addr) + (nr >> 5); | 195 | int *m = ((int *) addr) + (nr >> 5); |
| 196 | int old = *m; | 196 | int old = *m; |
| 197 | 197 | ||
| 198 | *m = old | mask; | 198 | *m = old | mask; |
| 199 | return (old & mask) != 0; | 199 | return (old & mask) != 0; |
| 200 | } | 200 | } |
| 201 | 201 | ||
| 202 | static inline int | 202 | static inline int |
| 203 | test_and_clear_bit(unsigned long nr, volatile void * addr) | 203 | test_and_clear_bit(unsigned long nr, volatile void * addr) |
| 204 | { | 204 | { |
| 205 | unsigned long oldbit; | 205 | unsigned long oldbit; |
| 206 | unsigned long temp; | 206 | unsigned long temp; |
| 207 | int *m = ((int *) addr) + (nr >> 5); | 207 | int *m = ((int *) addr) + (nr >> 5); |
| 208 | 208 | ||
| 209 | __asm__ __volatile__( | 209 | __asm__ __volatile__( |
| 210 | #ifdef CONFIG_SMP | 210 | #ifdef CONFIG_SMP |
| 211 | " mb\n" | 211 | " mb\n" |
| 212 | #endif | 212 | #endif |
| 213 | "1: ldl_l %0,%4\n" | 213 | "1: ldl_l %0,%4\n" |
| 214 | " and %0,%3,%2\n" | 214 | " and %0,%3,%2\n" |
| 215 | " beq %2,2f\n" | 215 | " beq %2,2f\n" |
| 216 | " xor %0,%3,%0\n" | 216 | " xor %0,%3,%0\n" |
| 217 | " stl_c %0,%1\n" | 217 | " stl_c %0,%1\n" |
| 218 | " beq %0,3f\n" | 218 | " beq %0,3f\n" |
| 219 | "2:\n" | 219 | "2:\n" |
| 220 | #ifdef CONFIG_SMP | 220 | #ifdef CONFIG_SMP |
| 221 | " mb\n" | 221 | " mb\n" |
| 222 | #endif | 222 | #endif |
| 223 | ".subsection 2\n" | 223 | ".subsection 2\n" |
| 224 | "3: br 1b\n" | 224 | "3: br 1b\n" |
| 225 | ".previous" | 225 | ".previous" |
| 226 | :"=&r" (temp), "=m" (*m), "=&r" (oldbit) | 226 | :"=&r" (temp), "=m" (*m), "=&r" (oldbit) |
| 227 | :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); | 227 | :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); |
| 228 | 228 | ||
| 229 | return oldbit != 0; | 229 | return oldbit != 0; |
| 230 | } | 230 | } |
| 231 | 231 | ||
| 232 | /* | 232 | /* |
| 233 | * WARNING: non atomic version. | 233 | * WARNING: non atomic version. |
| 234 | */ | 234 | */ |
| 235 | static inline int | 235 | static inline int |
| 236 | __test_and_clear_bit(unsigned long nr, volatile void * addr) | 236 | __test_and_clear_bit(unsigned long nr, volatile void * addr) |
| 237 | { | 237 | { |
| 238 | unsigned long mask = 1 << (nr & 0x1f); | 238 | unsigned long mask = 1 << (nr & 0x1f); |
| 239 | int *m = ((int *) addr) + (nr >> 5); | 239 | int *m = ((int *) addr) + (nr >> 5); |
| 240 | int old = *m; | 240 | int old = *m; |
| 241 | 241 | ||
| 242 | *m = old & ~mask; | 242 | *m = old & ~mask; |
| 243 | return (old & mask) != 0; | 243 | return (old & mask) != 0; |
| 244 | } | 244 | } |
| 245 | 245 | ||
| 246 | static inline int | 246 | static inline int |
| 247 | test_and_change_bit(unsigned long nr, volatile void * addr) | 247 | test_and_change_bit(unsigned long nr, volatile void * addr) |
| 248 | { | 248 | { |
| 249 | unsigned long oldbit; | 249 | unsigned long oldbit; |
| 250 | unsigned long temp; | 250 | unsigned long temp; |
| 251 | int *m = ((int *) addr) + (nr >> 5); | 251 | int *m = ((int *) addr) + (nr >> 5); |
| 252 | 252 | ||
| 253 | __asm__ __volatile__( | 253 | __asm__ __volatile__( |
| 254 | #ifdef CONFIG_SMP | 254 | #ifdef CONFIG_SMP |
| 255 | " mb\n" | 255 | " mb\n" |
| 256 | #endif | 256 | #endif |
| 257 | "1: ldl_l %0,%4\n" | 257 | "1: ldl_l %0,%4\n" |
| 258 | " and %0,%3,%2\n" | 258 | " and %0,%3,%2\n" |
| 259 | " xor %0,%3,%0\n" | 259 | " xor %0,%3,%0\n" |
| 260 | " stl_c %0,%1\n" | 260 | " stl_c %0,%1\n" |
| 261 | " beq %0,3f\n" | 261 | " beq %0,3f\n" |
| 262 | #ifdef CONFIG_SMP | 262 | #ifdef CONFIG_SMP |
| 263 | " mb\n" | 263 | " mb\n" |
| 264 | #endif | 264 | #endif |
| 265 | ".subsection 2\n" | 265 | ".subsection 2\n" |
| 266 | "3: br 1b\n" | 266 | "3: br 1b\n" |
| 267 | ".previous" | 267 | ".previous" |
| 268 | :"=&r" (temp), "=m" (*m), "=&r" (oldbit) | 268 | :"=&r" (temp), "=m" (*m), "=&r" (oldbit) |
| 269 | :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); | 269 | :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); |
| 270 | 270 | ||
| 271 | return oldbit != 0; | 271 | return oldbit != 0; |
| 272 | } | 272 | } |
| 273 | 273 | ||
| 274 | /* | 274 | /* |
| 275 | * WARNING: non atomic version. | 275 | * WARNING: non atomic version. |
| 276 | */ | 276 | */ |
| 277 | static __inline__ int | 277 | static __inline__ int |
| 278 | __test_and_change_bit(unsigned long nr, volatile void * addr) | 278 | __test_and_change_bit(unsigned long nr, volatile void * addr) |
| 279 | { | 279 | { |
| 280 | unsigned long mask = 1 << (nr & 0x1f); | 280 | unsigned long mask = 1 << (nr & 0x1f); |
| 281 | int *m = ((int *) addr) + (nr >> 5); | 281 | int *m = ((int *) addr) + (nr >> 5); |
| 282 | int old = *m; | 282 | int old = *m; |
| 283 | 283 | ||
| 284 | *m = old ^ mask; | 284 | *m = old ^ mask; |
| 285 | return (old & mask) != 0; | 285 | return (old & mask) != 0; |
| 286 | } | 286 | } |
| 287 | 287 | ||
| 288 | static inline int | 288 | static inline int |
| 289 | test_bit(int nr, const volatile void * addr) | 289 | test_bit(int nr, const volatile void * addr) |
| 290 | { | 290 | { |
| 291 | return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL; | 291 | return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL; |
| 292 | } | 292 | } |
| 293 | 293 | ||
| 294 | /* | 294 | /* |
| 295 | * ffz = Find First Zero in word. Undefined if no zero exists, | 295 | * ffz = Find First Zero in word. Undefined if no zero exists, |
| 296 | * so code should check against ~0UL first.. | 296 | * so code should check against ~0UL first.. |
| 297 | * | 297 | * |
| 298 | * Do a binary search on the bits. Due to the nature of large | 298 | * Do a binary search on the bits. Due to the nature of large |
| 299 | * constants on the alpha, it is worthwhile to split the search. | 299 | * constants on the alpha, it is worthwhile to split the search. |
| 300 | */ | 300 | */ |
| 301 | static inline unsigned long ffz_b(unsigned long x) | 301 | static inline unsigned long ffz_b(unsigned long x) |
| 302 | { | 302 | { |
| 303 | unsigned long sum, x1, x2, x4; | 303 | unsigned long sum, x1, x2, x4; |
| 304 | 304 | ||
| 305 | x = ~x & -~x; /* set first 0 bit, clear others */ | 305 | x = ~x & -~x; /* set first 0 bit, clear others */ |
| 306 | x1 = x & 0xAA; | 306 | x1 = x & 0xAA; |
| 307 | x2 = x & 0xCC; | 307 | x2 = x & 0xCC; |
| 308 | x4 = x & 0xF0; | 308 | x4 = x & 0xF0; |
| 309 | sum = x2 ? 2 : 0; | 309 | sum = x2 ? 2 : 0; |
| 310 | sum += (x4 != 0) * 4; | 310 | sum += (x4 != 0) * 4; |
| 311 | sum += (x1 != 0); | 311 | sum += (x1 != 0); |
| 312 | 312 | ||
| 313 | return sum; | 313 | return sum; |
| 314 | } | 314 | } |
| 315 | 315 | ||
| 316 | static inline unsigned long ffz(unsigned long word) | 316 | static inline unsigned long ffz(unsigned long word) |
| 317 | { | 317 | { |
| 318 | #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) | 318 | #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) |
| 319 | /* Whee. EV67 can calculate it directly. */ | 319 | /* Whee. EV67 can calculate it directly. */ |
| 320 | return __kernel_cttz(~word); | 320 | return __kernel_cttz(~word); |
| 321 | #else | 321 | #else |
| 322 | unsigned long bits, qofs, bofs; | 322 | unsigned long bits, qofs, bofs; |
| 323 | 323 | ||
| 324 | bits = __kernel_cmpbge(word, ~0UL); | 324 | bits = __kernel_cmpbge(word, ~0UL); |
| 325 | qofs = ffz_b(bits); | 325 | qofs = ffz_b(bits); |
| 326 | bits = __kernel_extbl(word, qofs); | 326 | bits = __kernel_extbl(word, qofs); |
| 327 | bofs = ffz_b(bits); | 327 | bofs = ffz_b(bits); |
| 328 | 328 | ||
| 329 | return qofs*8 + bofs; | 329 | return qofs*8 + bofs; |
| 330 | #endif | 330 | #endif |
| 331 | } | 331 | } |
| 332 | 332 | ||
| 333 | /* | 333 | /* |
| 334 | * __ffs = Find First set bit in word. Undefined if no set bit exists. | 334 | * __ffs = Find First set bit in word. Undefined if no set bit exists. |
| 335 | */ | 335 | */ |
| 336 | static inline unsigned long __ffs(unsigned long word) | 336 | static inline unsigned long __ffs(unsigned long word) |
| 337 | { | 337 | { |
| 338 | #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) | 338 | #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) |
| 339 | /* Whee. EV67 can calculate it directly. */ | 339 | /* Whee. EV67 can calculate it directly. */ |
| 340 | return __kernel_cttz(word); | 340 | return __kernel_cttz(word); |
| 341 | #else | 341 | #else |
| 342 | unsigned long bits, qofs, bofs; | 342 | unsigned long bits, qofs, bofs; |
| 343 | 343 | ||
| 344 | bits = __kernel_cmpbge(0, word); | 344 | bits = __kernel_cmpbge(0, word); |
| 345 | qofs = ffz_b(bits); | 345 | qofs = ffz_b(bits); |
| 346 | bits = __kernel_extbl(word, qofs); | 346 | bits = __kernel_extbl(word, qofs); |
| 347 | bofs = ffz_b(~bits); | 347 | bofs = ffz_b(~bits); |
| 348 | 348 | ||
| 349 | return qofs*8 + bofs; | 349 | return qofs*8 + bofs; |
| 350 | #endif | 350 | #endif |
| 351 | } | 351 | } |
| 352 | 352 | ||
| 353 | #ifdef __KERNEL__ | 353 | #ifdef __KERNEL__ |
| 354 | 354 | ||
| 355 | /* | 355 | /* |
| 356 | * ffs: find first bit set. This is defined the same way as | 356 | * ffs: find first bit set. This is defined the same way as |
| 357 | * the libc and compiler builtin ffs routines, therefore | 357 | * the libc and compiler builtin ffs routines, therefore |
| 358 | * differs in spirit from the above __ffs. | 358 | * differs in spirit from the above __ffs. |
| 359 | */ | 359 | */ |
| 360 | 360 | ||
| 361 | static inline int ffs(int word) | 361 | static inline int ffs(int word) |
| 362 | { | 362 | { |
| 363 | int result = __ffs(word) + 1; | 363 | int result = __ffs(word) + 1; |
| 364 | return word ? result : 0; | 364 | return word ? result : 0; |
| 365 | } | 365 | } |
| 366 | 366 | ||
| 367 | /* | 367 | /* |
| 368 | * fls: find last bit set. | 368 | * fls: find last bit set. |
| 369 | */ | 369 | */ |
| 370 | #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) | 370 | #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) |
| 371 | static inline int fls64(unsigned long word) | 371 | static inline int fls64(unsigned long word) |
| 372 | { | 372 | { |
| 373 | return 64 - __kernel_ctlz(word); | 373 | return 64 - __kernel_ctlz(word); |
| 374 | } | 374 | } |
| 375 | #else | 375 | #else |
| 376 | extern const unsigned char __flsm1_tab[256]; | 376 | extern const unsigned char __flsm1_tab[256]; |
| 377 | 377 | ||
| 378 | static inline int fls64(unsigned long x) | 378 | static inline int fls64(unsigned long x) |
| 379 | { | 379 | { |
| 380 | unsigned long t, a, r; | 380 | unsigned long t, a, r; |
| 381 | 381 | ||
| 382 | t = __kernel_cmpbge (x, 0x0101010101010101UL); | 382 | t = __kernel_cmpbge (x, 0x0101010101010101UL); |
| 383 | a = __flsm1_tab[t]; | 383 | a = __flsm1_tab[t]; |
| 384 | t = __kernel_extbl (x, a); | 384 | t = __kernel_extbl (x, a); |
| 385 | r = a*8 + __flsm1_tab[t] + (x != 0); | 385 | r = a*8 + __flsm1_tab[t] + (x != 0); |
| 386 | 386 | ||
| 387 | return r; | 387 | return r; |
| 388 | } | 388 | } |
| 389 | #endif | 389 | #endif |
| 390 | 390 | ||
| 391 | static inline unsigned long __fls(unsigned long x) | 391 | static inline unsigned long __fls(unsigned long x) |
| 392 | { | 392 | { |
| 393 | return fls64(x) - 1; | 393 | return fls64(x) - 1; |
| 394 | } | 394 | } |
| 395 | 395 | ||
| 396 | static inline int fls(int x) | 396 | static inline int fls(int x) |
| 397 | { | 397 | { |
| 398 | return fls64((unsigned int) x); | 398 | return fls64((unsigned int) x); |
| 399 | } | 399 | } |
| 400 | 400 | ||
| 401 | /* | 401 | /* |
| 402 | * hweightN: returns the hamming weight (i.e. the number | 402 | * hweightN: returns the hamming weight (i.e. the number |
| 403 | * of bits set) of a N-bit word | 403 | * of bits set) of a N-bit word |
| 404 | */ | 404 | */ |
| 405 | 405 | ||
| 406 | #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) | 406 | #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) |
| 407 | /* Whee. EV67 can calculate it directly. */ | 407 | /* Whee. EV67 can calculate it directly. */ |
| 408 | static inline unsigned long __arch_hweight64(unsigned long w) | 408 | static inline unsigned long __arch_hweight64(unsigned long w) |
| 409 | { | 409 | { |
| 410 | return __kernel_ctpop(w); | 410 | return __kernel_ctpop(w); |
| 411 | } | 411 | } |
| 412 | 412 | ||
| 413 | static inline unsigned int __arch_hweight32(unsigned int w) | 413 | static inline unsigned int __arch_hweight32(unsigned int w) |
| 414 | { | 414 | { |
| 415 | return __arch_hweight64(w); | 415 | return __arch_hweight64(w); |
| 416 | } | 416 | } |
| 417 | 417 | ||
| 418 | static inline unsigned int __arch_hweight16(unsigned int w) | 418 | static inline unsigned int __arch_hweight16(unsigned int w) |
| 419 | { | 419 | { |
| 420 | return __arch_hweight64(w & 0xffff); | 420 | return __arch_hweight64(w & 0xffff); |
| 421 | } | 421 | } |
| 422 | 422 | ||
| 423 | static inline unsigned int __arch_hweight8(unsigned int w) | 423 | static inline unsigned int __arch_hweight8(unsigned int w) |
| 424 | { | 424 | { |
| 425 | return __arch_hweight64(w & 0xff); | 425 | return __arch_hweight64(w & 0xff); |
| 426 | } | 426 | } |
| 427 | #else | 427 | #else |
| 428 | #include <asm-generic/bitops/arch_hweight.h> | 428 | #include <asm-generic/bitops/arch_hweight.h> |
| 429 | #endif | 429 | #endif |
| 430 | 430 | ||
| 431 | #include <asm-generic/bitops/const_hweight.h> | 431 | #include <asm-generic/bitops/const_hweight.h> |
| 432 | 432 | ||
| 433 | #endif /* __KERNEL__ */ | 433 | #endif /* __KERNEL__ */ |
| 434 | 434 | ||
| 435 | #include <asm-generic/bitops/find.h> | 435 | #include <asm-generic/bitops/find.h> |
| 436 | 436 | ||
| 437 | #ifdef __KERNEL__ | 437 | #ifdef __KERNEL__ |
| 438 | 438 | ||
| 439 | /* | 439 | /* |
| 440 | * Every architecture must define this function. It's the fastest | 440 | * Every architecture must define this function. It's the fastest |
| 441 | * way of searching a 100-bit bitmap. It's guaranteed that at least | 441 | * way of searching a 100-bit bitmap. It's guaranteed that at least |
| 442 | * one of the 100 bits is cleared. | 442 | * one of the 100 bits is cleared. |
| 443 | */ | 443 | */ |
| 444 | static inline unsigned long | 444 | static inline unsigned long |
| 445 | sched_find_first_bit(const unsigned long b[2]) | 445 | sched_find_first_bit(const unsigned long b[2]) |
| 446 | { | 446 | { |
| 447 | unsigned long b0, b1, ofs, tmp; | 447 | unsigned long b0, b1, ofs, tmp; |
| 448 | 448 | ||
| 449 | b0 = b[0]; | 449 | b0 = b[0]; |
| 450 | b1 = b[1]; | 450 | b1 = b[1]; |
| 451 | ofs = (b0 ? 0 : 64); | 451 | ofs = (b0 ? 0 : 64); |
| 452 | tmp = (b0 ? b0 : b1); | 452 | tmp = (b0 ? b0 : b1); |
| 453 | 453 | ||
| 454 | return __ffs(tmp) + ofs; | 454 | return __ffs(tmp) + ofs; |
| 455 | } | 455 | } |
| 456 | 456 | ||
| 457 | #include <asm-generic/bitops/le.h> | 457 | #include <asm-generic/bitops/le.h> |
| 458 | 458 | ||
| 459 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) | 459 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) |
| 460 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) | 460 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) |
| 461 | 461 | ||
| 462 | #include <asm-generic/bitops/minix.h> | ||
| 463 | |||
| 464 | #endif /* __KERNEL__ */ | 462 | #endif /* __KERNEL__ */ |
| 465 | 463 | ||
| 466 | #endif /* _ALPHA_BITOPS_H */ | 464 | #endif /* _ALPHA_BITOPS_H */ |
| 467 | 465 |
arch/arm/include/asm/bitops.h
| 1 | /* | 1 | /* |
| 2 | * Copyright 1995, Russell King. | 2 | * Copyright 1995, Russell King. |
| 3 | * Various bits and pieces copyrights include: | 3 | * Various bits and pieces copyrights include: |
| 4 | * Linus Torvalds (test_bit). | 4 | * Linus Torvalds (test_bit). |
| 5 | * Big endian support: Copyright 2001, Nicolas Pitre | 5 | * Big endian support: Copyright 2001, Nicolas Pitre |
| 6 | * reworked by rmk. | 6 | * reworked by rmk. |
| 7 | * | 7 | * |
| 8 | * bit 0 is the LSB of an "unsigned long" quantity. | 8 | * bit 0 is the LSB of an "unsigned long" quantity. |
| 9 | * | 9 | * |
| 10 | * Please note that the code in this file should never be included | 10 | * Please note that the code in this file should never be included |
| 11 | * from user space. Many of these are not implemented in assembler | 11 | * from user space. Many of these are not implemented in assembler |
| 12 | * since they would be too costly. Also, they require privileged | 12 | * since they would be too costly. Also, they require privileged |
| 13 | * instructions (which are not available from user mode) to ensure | 13 | * instructions (which are not available from user mode) to ensure |
| 14 | * that they are atomic. | 14 | * that they are atomic. |
| 15 | */ | 15 | */ |
| 16 | 16 | ||
| 17 | #ifndef __ASM_ARM_BITOPS_H | 17 | #ifndef __ASM_ARM_BITOPS_H |
| 18 | #define __ASM_ARM_BITOPS_H | 18 | #define __ASM_ARM_BITOPS_H |
| 19 | 19 | ||
| 20 | #ifdef __KERNEL__ | 20 | #ifdef __KERNEL__ |
| 21 | 21 | ||
| 22 | #ifndef _LINUX_BITOPS_H | 22 | #ifndef _LINUX_BITOPS_H |
| 23 | #error only <linux/bitops.h> can be included directly | 23 | #error only <linux/bitops.h> can be included directly |
| 24 | #endif | 24 | #endif |
| 25 | 25 | ||
| 26 | #include <linux/compiler.h> | 26 | #include <linux/compiler.h> |
| 27 | #include <asm/system.h> | 27 | #include <asm/system.h> |
| 28 | 28 | ||
| 29 | #define smp_mb__before_clear_bit() mb() | 29 | #define smp_mb__before_clear_bit() mb() |
| 30 | #define smp_mb__after_clear_bit() mb() | 30 | #define smp_mb__after_clear_bit() mb() |
| 31 | 31 | ||
| 32 | /* | 32 | /* |
| 33 | * These functions are the basis of our bit ops. | 33 | * These functions are the basis of our bit ops. |
| 34 | * | 34 | * |
| 35 | * First, the atomic bitops. These use native endian. | 35 | * First, the atomic bitops. These use native endian. |
| 36 | */ | 36 | */ |
| 37 | static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p) | 37 | static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p) |
| 38 | { | 38 | { |
| 39 | unsigned long flags; | 39 | unsigned long flags; |
| 40 | unsigned long mask = 1UL << (bit & 31); | 40 | unsigned long mask = 1UL << (bit & 31); |
| 41 | 41 | ||
| 42 | p += bit >> 5; | 42 | p += bit >> 5; |
| 43 | 43 | ||
| 44 | raw_local_irq_save(flags); | 44 | raw_local_irq_save(flags); |
| 45 | *p |= mask; | 45 | *p |= mask; |
| 46 | raw_local_irq_restore(flags); | 46 | raw_local_irq_restore(flags); |
| 47 | } | 47 | } |
| 48 | 48 | ||
| 49 | static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p) | 49 | static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p) |
| 50 | { | 50 | { |
| 51 | unsigned long flags; | 51 | unsigned long flags; |
| 52 | unsigned long mask = 1UL << (bit & 31); | 52 | unsigned long mask = 1UL << (bit & 31); |
| 53 | 53 | ||
| 54 | p += bit >> 5; | 54 | p += bit >> 5; |
| 55 | 55 | ||
| 56 | raw_local_irq_save(flags); | 56 | raw_local_irq_save(flags); |
| 57 | *p &= ~mask; | 57 | *p &= ~mask; |
| 58 | raw_local_irq_restore(flags); | 58 | raw_local_irq_restore(flags); |
| 59 | } | 59 | } |
| 60 | 60 | ||
| 61 | static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p) | 61 | static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p) |
| 62 | { | 62 | { |
| 63 | unsigned long flags; | 63 | unsigned long flags; |
| 64 | unsigned long mask = 1UL << (bit & 31); | 64 | unsigned long mask = 1UL << (bit & 31); |
| 65 | 65 | ||
| 66 | p += bit >> 5; | 66 | p += bit >> 5; |
| 67 | 67 | ||
| 68 | raw_local_irq_save(flags); | 68 | raw_local_irq_save(flags); |
| 69 | *p ^= mask; | 69 | *p ^= mask; |
| 70 | raw_local_irq_restore(flags); | 70 | raw_local_irq_restore(flags); |
| 71 | } | 71 | } |
| 72 | 72 | ||
| 73 | static inline int | 73 | static inline int |
| 74 | ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p) | 74 | ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p) |
| 75 | { | 75 | { |
| 76 | unsigned long flags; | 76 | unsigned long flags; |
| 77 | unsigned int res; | 77 | unsigned int res; |
| 78 | unsigned long mask = 1UL << (bit & 31); | 78 | unsigned long mask = 1UL << (bit & 31); |
| 79 | 79 | ||
| 80 | p += bit >> 5; | 80 | p += bit >> 5; |
| 81 | 81 | ||
| 82 | raw_local_irq_save(flags); | 82 | raw_local_irq_save(flags); |
| 83 | res = *p; | 83 | res = *p; |
| 84 | *p = res | mask; | 84 | *p = res | mask; |
| 85 | raw_local_irq_restore(flags); | 85 | raw_local_irq_restore(flags); |
| 86 | 86 | ||
| 87 | return (res & mask) != 0; | 87 | return (res & mask) != 0; |
| 88 | } | 88 | } |
| 89 | 89 | ||
| 90 | static inline int | 90 | static inline int |
| 91 | ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p) | 91 | ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p) |
| 92 | { | 92 | { |
| 93 | unsigned long flags; | 93 | unsigned long flags; |
| 94 | unsigned int res; | 94 | unsigned int res; |
| 95 | unsigned long mask = 1UL << (bit & 31); | 95 | unsigned long mask = 1UL << (bit & 31); |
| 96 | 96 | ||
| 97 | p += bit >> 5; | 97 | p += bit >> 5; |
| 98 | 98 | ||
| 99 | raw_local_irq_save(flags); | 99 | raw_local_irq_save(flags); |
| 100 | res = *p; | 100 | res = *p; |
| 101 | *p = res & ~mask; | 101 | *p = res & ~mask; |
| 102 | raw_local_irq_restore(flags); | 102 | raw_local_irq_restore(flags); |
| 103 | 103 | ||
| 104 | return (res & mask) != 0; | 104 | return (res & mask) != 0; |
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | static inline int | 107 | static inline int |
| 108 | ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) | 108 | ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) |
| 109 | { | 109 | { |
| 110 | unsigned long flags; | 110 | unsigned long flags; |
| 111 | unsigned int res; | 111 | unsigned int res; |
| 112 | unsigned long mask = 1UL << (bit & 31); | 112 | unsigned long mask = 1UL << (bit & 31); |
| 113 | 113 | ||
| 114 | p += bit >> 5; | 114 | p += bit >> 5; |
| 115 | 115 | ||
| 116 | raw_local_irq_save(flags); | 116 | raw_local_irq_save(flags); |
| 117 | res = *p; | 117 | res = *p; |
| 118 | *p = res ^ mask; | 118 | *p = res ^ mask; |
| 119 | raw_local_irq_restore(flags); | 119 | raw_local_irq_restore(flags); |
| 120 | 120 | ||
| 121 | return (res & mask) != 0; | 121 | return (res & mask) != 0; |
| 122 | } | 122 | } |
| 123 | 123 | ||
| 124 | #include <asm-generic/bitops/non-atomic.h> | 124 | #include <asm-generic/bitops/non-atomic.h> |
| 125 | 125 | ||
| 126 | /* | 126 | /* |
| 127 | * A note about Endian-ness. | 127 | * A note about Endian-ness. |
| 128 | * ------------------------- | 128 | * ------------------------- |
| 129 | * | 129 | * |
| 130 | * When the ARM is put into big endian mode via CR15, the processor | 130 | * When the ARM is put into big endian mode via CR15, the processor |
| 131 | * merely swaps the order of bytes within words, thus: | 131 | * merely swaps the order of bytes within words, thus: |
| 132 | * | 132 | * |
| 133 | * ------------ physical data bus bits ----------- | 133 | * ------------ physical data bus bits ----------- |
| 134 | * D31 ... D24 D23 ... D16 D15 ... D8 D7 ... D0 | 134 | * D31 ... D24 D23 ... D16 D15 ... D8 D7 ... D0 |
| 135 | * little byte 3 byte 2 byte 1 byte 0 | 135 | * little byte 3 byte 2 byte 1 byte 0 |
| 136 | * big byte 0 byte 1 byte 2 byte 3 | 136 | * big byte 0 byte 1 byte 2 byte 3 |
| 137 | * | 137 | * |
| 138 | * This means that reading a 32-bit word at address 0 returns the same | 138 | * This means that reading a 32-bit word at address 0 returns the same |
| 139 | * value irrespective of the endian mode bit. | 139 | * value irrespective of the endian mode bit. |
| 140 | * | 140 | * |
| 141 | * Peripheral devices should be connected with the data bus reversed in | 141 | * Peripheral devices should be connected with the data bus reversed in |
| 142 | * "Big Endian" mode. ARM Application Note 61 is applicable, and is | 142 | * "Big Endian" mode. ARM Application Note 61 is applicable, and is |
| 143 | * available from http://www.arm.com/. | 143 | * available from http://www.arm.com/. |
| 144 | * | 144 | * |
| 145 | * The following assumes that the data bus connectivity for big endian | 145 | * The following assumes that the data bus connectivity for big endian |
| 146 | * mode has been followed. | 146 | * mode has been followed. |
| 147 | * | 147 | * |
| 148 | * Note that bit 0 is defined to be 32-bit word bit 0, not byte 0 bit 0. | 148 | * Note that bit 0 is defined to be 32-bit word bit 0, not byte 0 bit 0. |
| 149 | */ | 149 | */ |
| 150 | 150 | ||
| 151 | /* | 151 | /* |
| 152 | * Native endian assembly bitops. nr = 0 -> word 0 bit 0. | 152 | * Native endian assembly bitops. nr = 0 -> word 0 bit 0. |
| 153 | */ | 153 | */ |
| 154 | extern void _set_bit(int nr, volatile unsigned long * p); | 154 | extern void _set_bit(int nr, volatile unsigned long * p); |
| 155 | extern void _clear_bit(int nr, volatile unsigned long * p); | 155 | extern void _clear_bit(int nr, volatile unsigned long * p); |
| 156 | extern void _change_bit(int nr, volatile unsigned long * p); | 156 | extern void _change_bit(int nr, volatile unsigned long * p); |
| 157 | extern int _test_and_set_bit(int nr, volatile unsigned long * p); | 157 | extern int _test_and_set_bit(int nr, volatile unsigned long * p); |
| 158 | extern int _test_and_clear_bit(int nr, volatile unsigned long * p); | 158 | extern int _test_and_clear_bit(int nr, volatile unsigned long * p); |
| 159 | extern int _test_and_change_bit(int nr, volatile unsigned long * p); | 159 | extern int _test_and_change_bit(int nr, volatile unsigned long * p); |
| 160 | 160 | ||
| 161 | /* | 161 | /* |
| 162 | * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. | 162 | * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. |
| 163 | */ | 163 | */ |
| 164 | extern int _find_first_zero_bit_le(const void * p, unsigned size); | 164 | extern int _find_first_zero_bit_le(const void * p, unsigned size); |
| 165 | extern int _find_next_zero_bit_le(const void * p, int size, int offset); | 165 | extern int _find_next_zero_bit_le(const void * p, int size, int offset); |
| 166 | extern int _find_first_bit_le(const unsigned long *p, unsigned size); | 166 | extern int _find_first_bit_le(const unsigned long *p, unsigned size); |
| 167 | extern int _find_next_bit_le(const unsigned long *p, int size, int offset); | 167 | extern int _find_next_bit_le(const unsigned long *p, int size, int offset); |
| 168 | 168 | ||
| 169 | /* | 169 | /* |
| 170 | * Big endian assembly bitops. nr = 0 -> byte 3 bit 0. | 170 | * Big endian assembly bitops. nr = 0 -> byte 3 bit 0. |
| 171 | */ | 171 | */ |
| 172 | extern int _find_first_zero_bit_be(const void * p, unsigned size); | 172 | extern int _find_first_zero_bit_be(const void * p, unsigned size); |
| 173 | extern int _find_next_zero_bit_be(const void * p, int size, int offset); | 173 | extern int _find_next_zero_bit_be(const void * p, int size, int offset); |
| 174 | extern int _find_first_bit_be(const unsigned long *p, unsigned size); | 174 | extern int _find_first_bit_be(const unsigned long *p, unsigned size); |
| 175 | extern int _find_next_bit_be(const unsigned long *p, int size, int offset); | 175 | extern int _find_next_bit_be(const unsigned long *p, int size, int offset); |
| 176 | 176 | ||
| 177 | #ifndef CONFIG_SMP | 177 | #ifndef CONFIG_SMP |
| 178 | /* | 178 | /* |
| 179 | * The __* form of bitops are non-atomic and may be reordered. | 179 | * The __* form of bitops are non-atomic and may be reordered. |
| 180 | */ | 180 | */ |
| 181 | #define ATOMIC_BITOP(name,nr,p) \ | 181 | #define ATOMIC_BITOP(name,nr,p) \ |
| 182 | (__builtin_constant_p(nr) ? ____atomic_##name(nr, p) : _##name(nr,p)) | 182 | (__builtin_constant_p(nr) ? ____atomic_##name(nr, p) : _##name(nr,p)) |
| 183 | #else | 183 | #else |
| 184 | #define ATOMIC_BITOP(name,nr,p) _##name(nr,p) | 184 | #define ATOMIC_BITOP(name,nr,p) _##name(nr,p) |
| 185 | #endif | 185 | #endif |
| 186 | 186 | ||
| 187 | /* | 187 | /* |
| 188 | * Native endian atomic definitions. | 188 | * Native endian atomic definitions. |
| 189 | */ | 189 | */ |
| 190 | #define set_bit(nr,p) ATOMIC_BITOP(set_bit,nr,p) | 190 | #define set_bit(nr,p) ATOMIC_BITOP(set_bit,nr,p) |
| 191 | #define clear_bit(nr,p) ATOMIC_BITOP(clear_bit,nr,p) | 191 | #define clear_bit(nr,p) ATOMIC_BITOP(clear_bit,nr,p) |
| 192 | #define change_bit(nr,p) ATOMIC_BITOP(change_bit,nr,p) | 192 | #define change_bit(nr,p) ATOMIC_BITOP(change_bit,nr,p) |
| 193 | #define test_and_set_bit(nr,p) ATOMIC_BITOP(test_and_set_bit,nr,p) | 193 | #define test_and_set_bit(nr,p) ATOMIC_BITOP(test_and_set_bit,nr,p) |
| 194 | #define test_and_clear_bit(nr,p) ATOMIC_BITOP(test_and_clear_bit,nr,p) | 194 | #define test_and_clear_bit(nr,p) ATOMIC_BITOP(test_and_clear_bit,nr,p) |
| 195 | #define test_and_change_bit(nr,p) ATOMIC_BITOP(test_and_change_bit,nr,p) | 195 | #define test_and_change_bit(nr,p) ATOMIC_BITOP(test_and_change_bit,nr,p) |
| 196 | 196 | ||
| 197 | #ifndef __ARMEB__ | 197 | #ifndef __ARMEB__ |
| 198 | /* | 198 | /* |
| 199 | * These are the little endian, atomic definitions. | 199 | * These are the little endian, atomic definitions. |
| 200 | */ | 200 | */ |
| 201 | #define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) | 201 | #define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) |
| 202 | #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) | 202 | #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) |
| 203 | #define find_first_bit(p,sz) _find_first_bit_le(p,sz) | 203 | #define find_first_bit(p,sz) _find_first_bit_le(p,sz) |
| 204 | #define find_next_bit(p,sz,off) _find_next_bit_le(p,sz,off) | 204 | #define find_next_bit(p,sz,off) _find_next_bit_le(p,sz,off) |
| 205 | 205 | ||
| 206 | #define WORD_BITOFF_TO_LE(x) ((x)) | 206 | #define WORD_BITOFF_TO_LE(x) ((x)) |
| 207 | 207 | ||
| 208 | #else | 208 | #else |
| 209 | /* | 209 | /* |
| 210 | * These are the big endian, atomic definitions. | 210 | * These are the big endian, atomic definitions. |
| 211 | */ | 211 | */ |
| 212 | #define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz) | 212 | #define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz) |
| 213 | #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off) | 213 | #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off) |
| 214 | #define find_first_bit(p,sz) _find_first_bit_be(p,sz) | 214 | #define find_first_bit(p,sz) _find_first_bit_be(p,sz) |
| 215 | #define find_next_bit(p,sz,off) _find_next_bit_be(p,sz,off) | 215 | #define find_next_bit(p,sz,off) _find_next_bit_be(p,sz,off) |
| 216 | 216 | ||
| 217 | #define WORD_BITOFF_TO_LE(x) ((x) ^ 0x18) | 217 | #define WORD_BITOFF_TO_LE(x) ((x) ^ 0x18) |
| 218 | 218 | ||
| 219 | #endif | 219 | #endif |
| 220 | 220 | ||
| 221 | #if __LINUX_ARM_ARCH__ < 5 | 221 | #if __LINUX_ARM_ARCH__ < 5 |
| 222 | 222 | ||
| 223 | #include <asm-generic/bitops/ffz.h> | 223 | #include <asm-generic/bitops/ffz.h> |
| 224 | #include <asm-generic/bitops/__fls.h> | 224 | #include <asm-generic/bitops/__fls.h> |
| 225 | #include <asm-generic/bitops/__ffs.h> | 225 | #include <asm-generic/bitops/__ffs.h> |
| 226 | #include <asm-generic/bitops/fls.h> | 226 | #include <asm-generic/bitops/fls.h> |
| 227 | #include <asm-generic/bitops/ffs.h> | 227 | #include <asm-generic/bitops/ffs.h> |
| 228 | 228 | ||
| 229 | #else | 229 | #else |
| 230 | 230 | ||
| 231 | static inline int constant_fls(int x) | 231 | static inline int constant_fls(int x) |
| 232 | { | 232 | { |
| 233 | int r = 32; | 233 | int r = 32; |
| 234 | 234 | ||
| 235 | if (!x) | 235 | if (!x) |
| 236 | return 0; | 236 | return 0; |
| 237 | if (!(x & 0xffff0000u)) { | 237 | if (!(x & 0xffff0000u)) { |
| 238 | x <<= 16; | 238 | x <<= 16; |
| 239 | r -= 16; | 239 | r -= 16; |
| 240 | } | 240 | } |
| 241 | if (!(x & 0xff000000u)) { | 241 | if (!(x & 0xff000000u)) { |
| 242 | x <<= 8; | 242 | x <<= 8; |
| 243 | r -= 8; | 243 | r -= 8; |
| 244 | } | 244 | } |
| 245 | if (!(x & 0xf0000000u)) { | 245 | if (!(x & 0xf0000000u)) { |
| 246 | x <<= 4; | 246 | x <<= 4; |
| 247 | r -= 4; | 247 | r -= 4; |
| 248 | } | 248 | } |
| 249 | if (!(x & 0xc0000000u)) { | 249 | if (!(x & 0xc0000000u)) { |
| 250 | x <<= 2; | 250 | x <<= 2; |
| 251 | r -= 2; | 251 | r -= 2; |
| 252 | } | 252 | } |
| 253 | if (!(x & 0x80000000u)) { | 253 | if (!(x & 0x80000000u)) { |
| 254 | x <<= 1; | 254 | x <<= 1; |
| 255 | r -= 1; | 255 | r -= 1; |
| 256 | } | 256 | } |
| 257 | return r; | 257 | return r; |
| 258 | } | 258 | } |
| 259 | 259 | ||
| 260 | /* | 260 | /* |
| 261 | * On ARMv5 and above those functions can be implemented around | 261 | * On ARMv5 and above those functions can be implemented around |
| 262 | * the clz instruction for much better code efficiency. | 262 | * the clz instruction for much better code efficiency. |
| 263 | */ | 263 | */ |
| 264 | 264 | ||
| 265 | static inline int fls(int x) | 265 | static inline int fls(int x) |
| 266 | { | 266 | { |
| 267 | int ret; | 267 | int ret; |
| 268 | 268 | ||
| 269 | if (__builtin_constant_p(x)) | 269 | if (__builtin_constant_p(x)) |
| 270 | return constant_fls(x); | 270 | return constant_fls(x); |
| 271 | 271 | ||
| 272 | asm("clz\t%0, %1" : "=r" (ret) : "r" (x)); | 272 | asm("clz\t%0, %1" : "=r" (ret) : "r" (x)); |
| 273 | ret = 32 - ret; | 273 | ret = 32 - ret; |
| 274 | return ret; | 274 | return ret; |
| 275 | } | 275 | } |
| 276 | 276 | ||
| 277 | #define __fls(x) (fls(x) - 1) | 277 | #define __fls(x) (fls(x) - 1) |
| 278 | #define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); }) | 278 | #define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); }) |
| 279 | #define __ffs(x) (ffs(x) - 1) | 279 | #define __ffs(x) (ffs(x) - 1) |
| 280 | #define ffz(x) __ffs( ~(x) ) | 280 | #define ffz(x) __ffs( ~(x) ) |
| 281 | 281 | ||
| 282 | #endif | 282 | #endif |
| 283 | 283 | ||
| 284 | #include <asm-generic/bitops/fls64.h> | 284 | #include <asm-generic/bitops/fls64.h> |
| 285 | 285 | ||
| 286 | #include <asm-generic/bitops/sched.h> | 286 | #include <asm-generic/bitops/sched.h> |
| 287 | #include <asm-generic/bitops/hweight.h> | 287 | #include <asm-generic/bitops/hweight.h> |
| 288 | #include <asm-generic/bitops/lock.h> | 288 | #include <asm-generic/bitops/lock.h> |
| 289 | 289 | ||
| 290 | static inline void __set_bit_le(int nr, void *addr) | 290 | static inline void __set_bit_le(int nr, void *addr) |
| 291 | { | 291 | { |
| 292 | __set_bit(WORD_BITOFF_TO_LE(nr), addr); | 292 | __set_bit(WORD_BITOFF_TO_LE(nr), addr); |
| 293 | } | 293 | } |
| 294 | 294 | ||
| 295 | static inline void __clear_bit_le(int nr, void *addr) | 295 | static inline void __clear_bit_le(int nr, void *addr) |
| 296 | { | 296 | { |
| 297 | __clear_bit(WORD_BITOFF_TO_LE(nr), addr); | 297 | __clear_bit(WORD_BITOFF_TO_LE(nr), addr); |
| 298 | } | 298 | } |
| 299 | 299 | ||
| 300 | static inline int __test_and_set_bit_le(int nr, void *addr) | 300 | static inline int __test_and_set_bit_le(int nr, void *addr) |
| 301 | { | 301 | { |
| 302 | return __test_and_set_bit(WORD_BITOFF_TO_LE(nr), addr); | 302 | return __test_and_set_bit(WORD_BITOFF_TO_LE(nr), addr); |
| 303 | } | 303 | } |
| 304 | 304 | ||
| 305 | static inline int test_and_set_bit_le(int nr, void *addr) | 305 | static inline int test_and_set_bit_le(int nr, void *addr) |
| 306 | { | 306 | { |
| 307 | return test_and_set_bit(WORD_BITOFF_TO_LE(nr), addr); | 307 | return test_and_set_bit(WORD_BITOFF_TO_LE(nr), addr); |
| 308 | } | 308 | } |
| 309 | 309 | ||
| 310 | static inline int __test_and_clear_bit_le(int nr, void *addr) | 310 | static inline int __test_and_clear_bit_le(int nr, void *addr) |
| 311 | { | 311 | { |
| 312 | return __test_and_clear_bit(WORD_BITOFF_TO_LE(nr), addr); | 312 | return __test_and_clear_bit(WORD_BITOFF_TO_LE(nr), addr); |
| 313 | } | 313 | } |
| 314 | 314 | ||
| 315 | static inline int test_and_clear_bit_le(int nr, void *addr) | 315 | static inline int test_and_clear_bit_le(int nr, void *addr) |
| 316 | { | 316 | { |
| 317 | return test_and_clear_bit(WORD_BITOFF_TO_LE(nr), addr); | 317 | return test_and_clear_bit(WORD_BITOFF_TO_LE(nr), addr); |
| 318 | } | 318 | } |
| 319 | 319 | ||
| 320 | static inline int test_bit_le(int nr, const void *addr) | 320 | static inline int test_bit_le(int nr, const void *addr) |
| 321 | { | 321 | { |
| 322 | return test_bit(WORD_BITOFF_TO_LE(nr), addr); | 322 | return test_bit(WORD_BITOFF_TO_LE(nr), addr); |
| 323 | } | 323 | } |
| 324 | 324 | ||
| 325 | static inline int find_first_zero_bit_le(const void *p, unsigned size) | 325 | static inline int find_first_zero_bit_le(const void *p, unsigned size) |
| 326 | { | 326 | { |
| 327 | return _find_first_zero_bit_le(p, size); | 327 | return _find_first_zero_bit_le(p, size); |
| 328 | } | 328 | } |
| 329 | 329 | ||
| 330 | static inline int find_next_zero_bit_le(const void *p, int size, int offset) | 330 | static inline int find_next_zero_bit_le(const void *p, int size, int offset) |
| 331 | { | 331 | { |
| 332 | return _find_next_zero_bit_le(p, size, offset); | 332 | return _find_next_zero_bit_le(p, size, offset); |
| 333 | } | 333 | } |
| 334 | 334 | ||
| 335 | static inline int find_next_bit_le(const void *p, int size, int offset) | 335 | static inline int find_next_bit_le(const void *p, int size, int offset) |
| 336 | { | 336 | { |
| 337 | return _find_next_bit_le(p, size, offset); | 337 | return _find_next_bit_le(p, size, offset); |
| 338 | } | 338 | } |
| 339 | 339 | ||
| 340 | /* | 340 | /* |
| 341 | * Ext2 is defined to use little-endian byte ordering. | 341 | * Ext2 is defined to use little-endian byte ordering. |
| 342 | */ | 342 | */ |
| 343 | #define ext2_set_bit_atomic(lock, nr, p) \ | 343 | #define ext2_set_bit_atomic(lock, nr, p) \ |
| 344 | test_and_set_bit_le(nr, p) | 344 | test_and_set_bit_le(nr, p) |
| 345 | #define ext2_clear_bit_atomic(lock, nr, p) \ | 345 | #define ext2_clear_bit_atomic(lock, nr, p) \ |
| 346 | test_and_clear_bit_le(nr, p) | 346 | test_and_clear_bit_le(nr, p) |
| 347 | 347 | ||
| 348 | /* | ||
| 349 | * Minix is defined to use little-endian byte ordering. | ||
| 350 | * These do not need to be atomic. | ||
| 351 | */ | ||
| 352 | #define minix_set_bit __set_bit_le | ||
| 353 | #define minix_test_bit test_bit_le | ||
| 354 | #define minix_test_and_set_bit __test_and_set_bit_le | ||
| 355 | #define minix_test_and_clear_bit __test_and_clear_bit_le | ||
| 356 | #define minix_find_first_zero_bit find_first_zero_bit_le | ||
| 357 | |||
| 358 | #endif /* __KERNEL__ */ | 348 | #endif /* __KERNEL__ */ |
| 359 | 349 | ||
| 360 | #endif /* _ARM_BITOPS_H */ | 350 | #endif /* _ARM_BITOPS_H */ |
| 361 | 351 |
arch/avr32/include/asm/bitops.h
| 1 | /* | 1 | /* |
| 2 | * Copyright (C) 2004-2006 Atmel Corporation | 2 | * Copyright (C) 2004-2006 Atmel Corporation |
| 3 | * | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 as | 5 | * it under the terms of the GNU General Public License version 2 as |
| 6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
| 7 | */ | 7 | */ |
| 8 | #ifndef __ASM_AVR32_BITOPS_H | 8 | #ifndef __ASM_AVR32_BITOPS_H |
| 9 | #define __ASM_AVR32_BITOPS_H | 9 | #define __ASM_AVR32_BITOPS_H |
| 10 | 10 | ||
| 11 | #ifndef _LINUX_BITOPS_H | 11 | #ifndef _LINUX_BITOPS_H |
| 12 | #error only <linux/bitops.h> can be included directly | 12 | #error only <linux/bitops.h> can be included directly |
| 13 | #endif | 13 | #endif |
| 14 | 14 | ||
| 15 | #include <asm/byteorder.h> | 15 | #include <asm/byteorder.h> |
| 16 | #include <asm/system.h> | 16 | #include <asm/system.h> |
| 17 | 17 | ||
| 18 | /* | 18 | /* |
| 19 | * clear_bit() doesn't provide any barrier for the compiler | 19 | * clear_bit() doesn't provide any barrier for the compiler |
| 20 | */ | 20 | */ |
| 21 | #define smp_mb__before_clear_bit() barrier() | 21 | #define smp_mb__before_clear_bit() barrier() |
| 22 | #define smp_mb__after_clear_bit() barrier() | 22 | #define smp_mb__after_clear_bit() barrier() |
| 23 | 23 | ||
| 24 | /* | 24 | /* |
| 25 | * set_bit - Atomically set a bit in memory | 25 | * set_bit - Atomically set a bit in memory |
| 26 | * @nr: the bit to set | 26 | * @nr: the bit to set |
| 27 | * @addr: the address to start counting from | 27 | * @addr: the address to start counting from |
| 28 | * | 28 | * |
| 29 | * This function is atomic and may not be reordered. See __set_bit() | 29 | * This function is atomic and may not be reordered. See __set_bit() |
| 30 | * if you do not require the atomic guarantees. | 30 | * if you do not require the atomic guarantees. |
| 31 | * | 31 | * |
| 32 | * Note that @nr may be almost arbitrarily large; this function is not | 32 | * Note that @nr may be almost arbitrarily large; this function is not |
| 33 | * restricted to acting on a single-word quantity. | 33 | * restricted to acting on a single-word quantity. |
| 34 | */ | 34 | */ |
| 35 | static inline void set_bit(int nr, volatile void * addr) | 35 | static inline void set_bit(int nr, volatile void * addr) |
| 36 | { | 36 | { |
| 37 | unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; | 37 | unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; |
| 38 | unsigned long tmp; | 38 | unsigned long tmp; |
| 39 | 39 | ||
| 40 | if (__builtin_constant_p(nr)) { | 40 | if (__builtin_constant_p(nr)) { |
| 41 | asm volatile( | 41 | asm volatile( |
| 42 | "1: ssrf 5\n" | 42 | "1: ssrf 5\n" |
| 43 | " ld.w %0, %2\n" | 43 | " ld.w %0, %2\n" |
| 44 | " sbr %0, %3\n" | 44 | " sbr %0, %3\n" |
| 45 | " stcond %1, %0\n" | 45 | " stcond %1, %0\n" |
| 46 | " brne 1b" | 46 | " brne 1b" |
| 47 | : "=&r"(tmp), "=o"(*p) | 47 | : "=&r"(tmp), "=o"(*p) |
| 48 | : "m"(*p), "i"(nr) | 48 | : "m"(*p), "i"(nr) |
| 49 | : "cc"); | 49 | : "cc"); |
| 50 | } else { | 50 | } else { |
| 51 | unsigned long mask = 1UL << (nr % BITS_PER_LONG); | 51 | unsigned long mask = 1UL << (nr % BITS_PER_LONG); |
| 52 | asm volatile( | 52 | asm volatile( |
| 53 | "1: ssrf 5\n" | 53 | "1: ssrf 5\n" |
| 54 | " ld.w %0, %2\n" | 54 | " ld.w %0, %2\n" |
| 55 | " or %0, %3\n" | 55 | " or %0, %3\n" |
| 56 | " stcond %1, %0\n" | 56 | " stcond %1, %0\n" |
| 57 | " brne 1b" | 57 | " brne 1b" |
| 58 | : "=&r"(tmp), "=o"(*p) | 58 | : "=&r"(tmp), "=o"(*p) |
| 59 | : "m"(*p), "r"(mask) | 59 | : "m"(*p), "r"(mask) |
| 60 | : "cc"); | 60 | : "cc"); |
| 61 | } | 61 | } |
| 62 | } | 62 | } |
| 63 | 63 | ||
| 64 | /* | 64 | /* |
| 65 | * clear_bit - Clears a bit in memory | 65 | * clear_bit - Clears a bit in memory |
| 66 | * @nr: Bit to clear | 66 | * @nr: Bit to clear |
| 67 | * @addr: Address to start counting from | 67 | * @addr: Address to start counting from |
| 68 | * | 68 | * |
| 69 | * clear_bit() is atomic and may not be reordered. However, it does | 69 | * clear_bit() is atomic and may not be reordered. However, it does |
| 70 | * not contain a memory barrier, so if it is used for locking purposes, | 70 | * not contain a memory barrier, so if it is used for locking purposes, |
| 71 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | 71 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
| 72 | * in order to ensure changes are visible on other processors. | 72 | * in order to ensure changes are visible on other processors. |
| 73 | */ | 73 | */ |
| 74 | static inline void clear_bit(int nr, volatile void * addr) | 74 | static inline void clear_bit(int nr, volatile void * addr) |
| 75 | { | 75 | { |
| 76 | unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; | 76 | unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; |
| 77 | unsigned long tmp; | 77 | unsigned long tmp; |
| 78 | 78 | ||
| 79 | if (__builtin_constant_p(nr)) { | 79 | if (__builtin_constant_p(nr)) { |
| 80 | asm volatile( | 80 | asm volatile( |
| 81 | "1: ssrf 5\n" | 81 | "1: ssrf 5\n" |
| 82 | " ld.w %0, %2\n" | 82 | " ld.w %0, %2\n" |
| 83 | " cbr %0, %3\n" | 83 | " cbr %0, %3\n" |
| 84 | " stcond %1, %0\n" | 84 | " stcond %1, %0\n" |
| 85 | " brne 1b" | 85 | " brne 1b" |
| 86 | : "=&r"(tmp), "=o"(*p) | 86 | : "=&r"(tmp), "=o"(*p) |
| 87 | : "m"(*p), "i"(nr) | 87 | : "m"(*p), "i"(nr) |
| 88 | : "cc"); | 88 | : "cc"); |
| 89 | } else { | 89 | } else { |
| 90 | unsigned long mask = 1UL << (nr % BITS_PER_LONG); | 90 | unsigned long mask = 1UL << (nr % BITS_PER_LONG); |
| 91 | asm volatile( | 91 | asm volatile( |
| 92 | "1: ssrf 5\n" | 92 | "1: ssrf 5\n" |
| 93 | " ld.w %0, %2\n" | 93 | " ld.w %0, %2\n" |
| 94 | " andn %0, %3\n" | 94 | " andn %0, %3\n" |
| 95 | " stcond %1, %0\n" | 95 | " stcond %1, %0\n" |
| 96 | " brne 1b" | 96 | " brne 1b" |
| 97 | : "=&r"(tmp), "=o"(*p) | 97 | : "=&r"(tmp), "=o"(*p) |
| 98 | : "m"(*p), "r"(mask) | 98 | : "m"(*p), "r"(mask) |
| 99 | : "cc"); | 99 | : "cc"); |
| 100 | } | 100 | } |
| 101 | } | 101 | } |
| 102 | 102 | ||
| 103 | /* | 103 | /* |
| 104 | * change_bit - Toggle a bit in memory | 104 | * change_bit - Toggle a bit in memory |
| 105 | * @nr: Bit to change | 105 | * @nr: Bit to change |
| 106 | * @addr: Address to start counting from | 106 | * @addr: Address to start counting from |
| 107 | * | 107 | * |
| 108 | * change_bit() is atomic and may not be reordered. | 108 | * change_bit() is atomic and may not be reordered. |
| 109 | * Note that @nr may be almost arbitrarily large; this function is not | 109 | * Note that @nr may be almost arbitrarily large; this function is not |
| 110 | * restricted to acting on a single-word quantity. | 110 | * restricted to acting on a single-word quantity. |
| 111 | */ | 111 | */ |
| 112 | static inline void change_bit(int nr, volatile void * addr) | 112 | static inline void change_bit(int nr, volatile void * addr) |
| 113 | { | 113 | { |
| 114 | unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; | 114 | unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; |
| 115 | unsigned long mask = 1UL << (nr % BITS_PER_LONG); | 115 | unsigned long mask = 1UL << (nr % BITS_PER_LONG); |
| 116 | unsigned long tmp; | 116 | unsigned long tmp; |
| 117 | 117 | ||
| 118 | asm volatile( | 118 | asm volatile( |
| 119 | "1: ssrf 5\n" | 119 | "1: ssrf 5\n" |
| 120 | " ld.w %0, %2\n" | 120 | " ld.w %0, %2\n" |
| 121 | " eor %0, %3\n" | 121 | " eor %0, %3\n" |
| 122 | " stcond %1, %0\n" | 122 | " stcond %1, %0\n" |
| 123 | " brne 1b" | 123 | " brne 1b" |
| 124 | : "=&r"(tmp), "=o"(*p) | 124 | : "=&r"(tmp), "=o"(*p) |
| 125 | : "m"(*p), "r"(mask) | 125 | : "m"(*p), "r"(mask) |
| 126 | : "cc"); | 126 | : "cc"); |
| 127 | } | 127 | } |
| 128 | 128 | ||
| 129 | /* | 129 | /* |
| 130 | * test_and_set_bit - Set a bit and return its old value | 130 | * test_and_set_bit - Set a bit and return its old value |
| 131 | * @nr: Bit to set | 131 | * @nr: Bit to set |
| 132 | * @addr: Address to count from | 132 | * @addr: Address to count from |
| 133 | * | 133 | * |
| 134 | * This operation is atomic and cannot be reordered. | 134 | * This operation is atomic and cannot be reordered. |
| 135 | * It also implies a memory barrier. | 135 | * It also implies a memory barrier. |
| 136 | */ | 136 | */ |
| 137 | static inline int test_and_set_bit(int nr, volatile void * addr) | 137 | static inline int test_and_set_bit(int nr, volatile void * addr) |
| 138 | { | 138 | { |
| 139 | unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; | 139 | unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; |
| 140 | unsigned long mask = 1UL << (nr % BITS_PER_LONG); | 140 | unsigned long mask = 1UL << (nr % BITS_PER_LONG); |
| 141 | unsigned long tmp, old; | 141 | unsigned long tmp, old; |
| 142 | 142 | ||
| 143 | if (__builtin_constant_p(nr)) { | 143 | if (__builtin_constant_p(nr)) { |
| 144 | asm volatile( | 144 | asm volatile( |
| 145 | "1: ssrf 5\n" | 145 | "1: ssrf 5\n" |
| 146 | " ld.w %0, %3\n" | 146 | " ld.w %0, %3\n" |
| 147 | " mov %2, %0\n" | 147 | " mov %2, %0\n" |
| 148 | " sbr %0, %4\n" | 148 | " sbr %0, %4\n" |
| 149 | " stcond %1, %0\n" | 149 | " stcond %1, %0\n" |
| 150 | " brne 1b" | 150 | " brne 1b" |
| 151 | : "=&r"(tmp), "=o"(*p), "=&r"(old) | 151 | : "=&r"(tmp), "=o"(*p), "=&r"(old) |
| 152 | : "m"(*p), "i"(nr) | 152 | : "m"(*p), "i"(nr) |
| 153 | : "memory", "cc"); | 153 | : "memory", "cc"); |
| 154 | } else { | 154 | } else { |
| 155 | asm volatile( | 155 | asm volatile( |
| 156 | "1: ssrf 5\n" | 156 | "1: ssrf 5\n" |
| 157 | " ld.w %2, %3\n" | 157 | " ld.w %2, %3\n" |
| 158 | " or %0, %2, %4\n" | 158 | " or %0, %2, %4\n" |
| 159 | " stcond %1, %0\n" | 159 | " stcond %1, %0\n" |
| 160 | " brne 1b" | 160 | " brne 1b" |
| 161 | : "=&r"(tmp), "=o"(*p), "=&r"(old) | 161 | : "=&r"(tmp), "=o"(*p), "=&r"(old) |
| 162 | : "m"(*p), "r"(mask) | 162 | : "m"(*p), "r"(mask) |
| 163 | : "memory", "cc"); | 163 | : "memory", "cc"); |
| 164 | } | 164 | } |
| 165 | 165 | ||
| 166 | return (old & mask) != 0; | 166 | return (old & mask) != 0; |
| 167 | } | 167 | } |
| 168 | 168 | ||
| 169 | /* | 169 | /* |
| 170 | * test_and_clear_bit - Clear a bit and return its old value | 170 | * test_and_clear_bit - Clear a bit and return its old value |
| 171 | * @nr: Bit to clear | 171 | * @nr: Bit to clear |
| 172 | * @addr: Address to count from | 172 | * @addr: Address to count from |
| 173 | * | 173 | * |
| 174 | * This operation is atomic and cannot be reordered. | 174 | * This operation is atomic and cannot be reordered. |
| 175 | * It also implies a memory barrier. | 175 | * It also implies a memory barrier. |
| 176 | */ | 176 | */ |
| 177 | static inline int test_and_clear_bit(int nr, volatile void * addr) | 177 | static inline int test_and_clear_bit(int nr, volatile void * addr) |
| 178 | { | 178 | { |
| 179 | unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; | 179 | unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; |
| 180 | unsigned long mask = 1UL << (nr % BITS_PER_LONG); | 180 | unsigned long mask = 1UL << (nr % BITS_PER_LONG); |
| 181 | unsigned long tmp, old; | 181 | unsigned long tmp, old; |
| 182 | 182 | ||
| 183 | if (__builtin_constant_p(nr)) { | 183 | if (__builtin_constant_p(nr)) { |
| 184 | asm volatile( | 184 | asm volatile( |
| 185 | "1: ssrf 5\n" | 185 | "1: ssrf 5\n" |
| 186 | " ld.w %0, %3\n" | 186 | " ld.w %0, %3\n" |
| 187 | " mov %2, %0\n" | 187 | " mov %2, %0\n" |
| 188 | " cbr %0, %4\n" | 188 | " cbr %0, %4\n" |
| 189 | " stcond %1, %0\n" | 189 | " stcond %1, %0\n" |
| 190 | " brne 1b" | 190 | " brne 1b" |
| 191 | : "=&r"(tmp), "=o"(*p), "=&r"(old) | 191 | : "=&r"(tmp), "=o"(*p), "=&r"(old) |
| 192 | : "m"(*p), "i"(nr) | 192 | : "m"(*p), "i"(nr) |
| 193 | : "memory", "cc"); | 193 | : "memory", "cc"); |
| 194 | } else { | 194 | } else { |
| 195 | asm volatile( | 195 | asm volatile( |
| 196 | "1: ssrf 5\n" | 196 | "1: ssrf 5\n" |
| 197 | " ld.w %0, %3\n" | 197 | " ld.w %0, %3\n" |
| 198 | " mov %2, %0\n" | 198 | " mov %2, %0\n" |
| 199 | " andn %0, %4\n" | 199 | " andn %0, %4\n" |
| 200 | " stcond %1, %0\n" | 200 | " stcond %1, %0\n" |
| 201 | " brne 1b" | 201 | " brne 1b" |
| 202 | : "=&r"(tmp), "=o"(*p), "=&r"(old) | 202 | : "=&r"(tmp), "=o"(*p), "=&r"(old) |
| 203 | : "m"(*p), "r"(mask) | 203 | : "m"(*p), "r"(mask) |
| 204 | : "memory", "cc"); | 204 | : "memory", "cc"); |
| 205 | } | 205 | } |
| 206 | 206 | ||
| 207 | return (old & mask) != 0; | 207 | return (old & mask) != 0; |
| 208 | } | 208 | } |
| 209 | 209 | ||
| 210 | /* | 210 | /* |
| 211 | * test_and_change_bit - Change a bit and return its old value | 211 | * test_and_change_bit - Change a bit and return its old value |
| 212 | * @nr: Bit to change | 212 | * @nr: Bit to change |
| 213 | * @addr: Address to count from | 213 | * @addr: Address to count from |
| 214 | * | 214 | * |
| 215 | * This operation is atomic and cannot be reordered. | 215 | * This operation is atomic and cannot be reordered. |
| 216 | * It also implies a memory barrier. | 216 | * It also implies a memory barrier. |
| 217 | */ | 217 | */ |
| 218 | static inline int test_and_change_bit(int nr, volatile void * addr) | 218 | static inline int test_and_change_bit(int nr, volatile void * addr) |
| 219 | { | 219 | { |
| 220 | unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; | 220 | unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; |
| 221 | unsigned long mask = 1UL << (nr % BITS_PER_LONG); | 221 | unsigned long mask = 1UL << (nr % BITS_PER_LONG); |
| 222 | unsigned long tmp, old; | 222 | unsigned long tmp, old; |
| 223 | 223 | ||
| 224 | asm volatile( | 224 | asm volatile( |
| 225 | "1: ssrf 5\n" | 225 | "1: ssrf 5\n" |
| 226 | " ld.w %2, %3\n" | 226 | " ld.w %2, %3\n" |
| 227 | " eor %0, %2, %4\n" | 227 | " eor %0, %2, %4\n" |
| 228 | " stcond %1, %0\n" | 228 | " stcond %1, %0\n" |
| 229 | " brne 1b" | 229 | " brne 1b" |
| 230 | : "=&r"(tmp), "=o"(*p), "=&r"(old) | 230 | : "=&r"(tmp), "=o"(*p), "=&r"(old) |
| 231 | : "m"(*p), "r"(mask) | 231 | : "m"(*p), "r"(mask) |
| 232 | : "memory", "cc"); | 232 | : "memory", "cc"); |
| 233 | 233 | ||
| 234 | return (old & mask) != 0; | 234 | return (old & mask) != 0; |
| 235 | } | 235 | } |
| 236 | 236 | ||
| 237 | #include <asm-generic/bitops/non-atomic.h> | 237 | #include <asm-generic/bitops/non-atomic.h> |
| 238 | 238 | ||
| 239 | /* Find First bit Set */ | 239 | /* Find First bit Set */ |
| 240 | static inline unsigned long __ffs(unsigned long word) | 240 | static inline unsigned long __ffs(unsigned long word) |
| 241 | { | 241 | { |
| 242 | unsigned long result; | 242 | unsigned long result; |
| 243 | 243 | ||
| 244 | asm("brev %1\n\t" | 244 | asm("brev %1\n\t" |
| 245 | "clz %0,%1" | 245 | "clz %0,%1" |
| 246 | : "=r"(result), "=&r"(word) | 246 | : "=r"(result), "=&r"(word) |
| 247 | : "1"(word)); | 247 | : "1"(word)); |
| 248 | return result; | 248 | return result; |
| 249 | } | 249 | } |
| 250 | 250 | ||
| 251 | /* Find First Zero */ | 251 | /* Find First Zero */ |
| 252 | static inline unsigned long ffz(unsigned long word) | 252 | static inline unsigned long ffz(unsigned long word) |
| 253 | { | 253 | { |
| 254 | return __ffs(~word); | 254 | return __ffs(~word); |
| 255 | } | 255 | } |
| 256 | 256 | ||
| 257 | /* Find Last bit Set */ | 257 | /* Find Last bit Set */ |
| 258 | static inline int fls(unsigned long word) | 258 | static inline int fls(unsigned long word) |
| 259 | { | 259 | { |
| 260 | unsigned long result; | 260 | unsigned long result; |
| 261 | 261 | ||
| 262 | asm("clz %0,%1" : "=r"(result) : "r"(word)); | 262 | asm("clz %0,%1" : "=r"(result) : "r"(word)); |
| 263 | return 32 - result; | 263 | return 32 - result; |
| 264 | } | 264 | } |
| 265 | 265 | ||
| 266 | static inline int __fls(unsigned long word) | 266 | static inline int __fls(unsigned long word) |
| 267 | { | 267 | { |
| 268 | return fls(word) - 1; | 268 | return fls(word) - 1; |
| 269 | } | 269 | } |
| 270 | 270 | ||
| 271 | unsigned long find_first_zero_bit(const unsigned long *addr, | 271 | unsigned long find_first_zero_bit(const unsigned long *addr, |
| 272 | unsigned long size); | 272 | unsigned long size); |
| 273 | unsigned long find_next_zero_bit(const unsigned long *addr, | 273 | unsigned long find_next_zero_bit(const unsigned long *addr, |
| 274 | unsigned long size, | 274 | unsigned long size, |
| 275 | unsigned long offset); | 275 | unsigned long offset); |
| 276 | unsigned long find_first_bit(const unsigned long *addr, | 276 | unsigned long find_first_bit(const unsigned long *addr, |
| 277 | unsigned long size); | 277 | unsigned long size); |
| 278 | unsigned long find_next_bit(const unsigned long *addr, | 278 | unsigned long find_next_bit(const unsigned long *addr, |
| 279 | unsigned long size, | 279 | unsigned long size, |
| 280 | unsigned long offset); | 280 | unsigned long offset); |
| 281 | 281 | ||
| 282 | /* | 282 | /* |
| 283 | * ffs: find first bit set. This is defined the same way as | 283 | * ffs: find first bit set. This is defined the same way as |
| 284 | * the libc and compiler builtin ffs routines, therefore | 284 | * the libc and compiler builtin ffs routines, therefore |
| 285 | * differs in spirit from the above ffz (man ffs). | 285 | * differs in spirit from the above ffz (man ffs). |
| 286 | * | 286 | * |
| 287 | * The difference is that bit numbering starts at 1, and if no bit is set, | 287 | * The difference is that bit numbering starts at 1, and if no bit is set, |
| 288 | * the function returns 0. | 288 | * the function returns 0. |
| 289 | */ | 289 | */ |
| 290 | static inline int ffs(unsigned long word) | 290 | static inline int ffs(unsigned long word) |
| 291 | { | 291 | { |
| 292 | if(word == 0) | 292 | if(word == 0) |
| 293 | return 0; | 293 | return 0; |
| 294 | return __ffs(word) + 1; | 294 | return __ffs(word) + 1; |
| 295 | } | 295 | } |
| 296 | 296 | ||
| 297 | #include <asm-generic/bitops/fls64.h> | 297 | #include <asm-generic/bitops/fls64.h> |
| 298 | #include <asm-generic/bitops/sched.h> | 298 | #include <asm-generic/bitops/sched.h> |
| 299 | #include <asm-generic/bitops/hweight.h> | 299 | #include <asm-generic/bitops/hweight.h> |
| 300 | #include <asm-generic/bitops/lock.h> | 300 | #include <asm-generic/bitops/lock.h> |
| 301 | 301 | ||
| 302 | #include <asm-generic/bitops/le.h> | 302 | #include <asm-generic/bitops/le.h> |
| 303 | #include <asm-generic/bitops/ext2-atomic.h> | 303 | #include <asm-generic/bitops/ext2-atomic.h> |
| 304 | #include <asm-generic/bitops/minix-le.h> | ||
| 305 | 304 | ||
| 306 | #endif /* __ASM_AVR32_BITOPS_H */ | 305 | #endif /* __ASM_AVR32_BITOPS_H */ |
| 307 | 306 |
arch/blackfin/include/asm/bitops.h
| 1 | /* | 1 | /* |
| 2 | * Copyright 2004-2009 Analog Devices Inc. | 2 | * Copyright 2004-2009 Analog Devices Inc. |
| 3 | * | 3 | * |
| 4 | * Licensed under the GPL-2 or later. | 4 | * Licensed under the GPL-2 or later. |
| 5 | */ | 5 | */ |
| 6 | 6 | ||
| 7 | #ifndef _BLACKFIN_BITOPS_H | 7 | #ifndef _BLACKFIN_BITOPS_H |
| 8 | #define _BLACKFIN_BITOPS_H | 8 | #define _BLACKFIN_BITOPS_H |
| 9 | 9 | ||
| 10 | #include <linux/compiler.h> | 10 | #include <linux/compiler.h> |
| 11 | 11 | ||
| 12 | #include <asm-generic/bitops/__ffs.h> | 12 | #include <asm-generic/bitops/__ffs.h> |
| 13 | #include <asm-generic/bitops/ffz.h> | 13 | #include <asm-generic/bitops/ffz.h> |
| 14 | #include <asm-generic/bitops/fls.h> | 14 | #include <asm-generic/bitops/fls.h> |
| 15 | #include <asm-generic/bitops/__fls.h> | 15 | #include <asm-generic/bitops/__fls.h> |
| 16 | #include <asm-generic/bitops/fls64.h> | 16 | #include <asm-generic/bitops/fls64.h> |
| 17 | #include <asm-generic/bitops/find.h> | 17 | #include <asm-generic/bitops/find.h> |
| 18 | 18 | ||
| 19 | #ifndef _LINUX_BITOPS_H | 19 | #ifndef _LINUX_BITOPS_H |
| 20 | #error only <linux/bitops.h> can be included directly | 20 | #error only <linux/bitops.h> can be included directly |
| 21 | #endif | 21 | #endif |
| 22 | 22 | ||
| 23 | #include <asm-generic/bitops/sched.h> | 23 | #include <asm-generic/bitops/sched.h> |
| 24 | #include <asm-generic/bitops/ffs.h> | 24 | #include <asm-generic/bitops/ffs.h> |
| 25 | #include <asm-generic/bitops/const_hweight.h> | 25 | #include <asm-generic/bitops/const_hweight.h> |
| 26 | #include <asm-generic/bitops/lock.h> | 26 | #include <asm-generic/bitops/lock.h> |
| 27 | 27 | ||
| 28 | #include <asm-generic/bitops/le.h> | 28 | #include <asm-generic/bitops/le.h> |
| 29 | #include <asm-generic/bitops/ext2-atomic.h> | 29 | #include <asm-generic/bitops/ext2-atomic.h> |
| 30 | #include <asm-generic/bitops/minix.h> | ||
| 31 | 30 | ||
| 32 | #ifndef CONFIG_SMP | 31 | #ifndef CONFIG_SMP |
| 33 | #include <linux/irqflags.h> | 32 | #include <linux/irqflags.h> |
| 34 | 33 | ||
| 35 | /* | 34 | /* |
| 36 | * clear_bit may not imply a memory barrier | 35 | * clear_bit may not imply a memory barrier |
| 37 | */ | 36 | */ |
| 38 | #ifndef smp_mb__before_clear_bit | 37 | #ifndef smp_mb__before_clear_bit |
| 39 | #define smp_mb__before_clear_bit() smp_mb() | 38 | #define smp_mb__before_clear_bit() smp_mb() |
| 40 | #define smp_mb__after_clear_bit() smp_mb() | 39 | #define smp_mb__after_clear_bit() smp_mb() |
| 41 | #endif | 40 | #endif |
| 42 | #include <asm-generic/bitops/atomic.h> | 41 | #include <asm-generic/bitops/atomic.h> |
| 43 | #include <asm-generic/bitops/non-atomic.h> | 42 | #include <asm-generic/bitops/non-atomic.h> |
| 44 | #else | 43 | #else |
| 45 | 44 | ||
| 46 | #include <asm/byteorder.h> /* swab32 */ | 45 | #include <asm/byteorder.h> /* swab32 */ |
| 47 | #include <linux/linkage.h> | 46 | #include <linux/linkage.h> |
| 48 | 47 | ||
| 49 | asmlinkage int __raw_bit_set_asm(volatile unsigned long *addr, int nr); | 48 | asmlinkage int __raw_bit_set_asm(volatile unsigned long *addr, int nr); |
| 50 | 49 | ||
| 51 | asmlinkage int __raw_bit_clear_asm(volatile unsigned long *addr, int nr); | 50 | asmlinkage int __raw_bit_clear_asm(volatile unsigned long *addr, int nr); |
| 52 | 51 | ||
| 53 | asmlinkage int __raw_bit_toggle_asm(volatile unsigned long *addr, int nr); | 52 | asmlinkage int __raw_bit_toggle_asm(volatile unsigned long *addr, int nr); |
| 54 | 53 | ||
| 55 | asmlinkage int __raw_bit_test_set_asm(volatile unsigned long *addr, int nr); | 54 | asmlinkage int __raw_bit_test_set_asm(volatile unsigned long *addr, int nr); |
| 56 | 55 | ||
| 57 | asmlinkage int __raw_bit_test_clear_asm(volatile unsigned long *addr, int nr); | 56 | asmlinkage int __raw_bit_test_clear_asm(volatile unsigned long *addr, int nr); |
| 58 | 57 | ||
| 59 | asmlinkage int __raw_bit_test_toggle_asm(volatile unsigned long *addr, int nr); | 58 | asmlinkage int __raw_bit_test_toggle_asm(volatile unsigned long *addr, int nr); |
| 60 | 59 | ||
| 61 | asmlinkage int __raw_bit_test_asm(const volatile unsigned long *addr, int nr); | 60 | asmlinkage int __raw_bit_test_asm(const volatile unsigned long *addr, int nr); |
| 62 | 61 | ||
| 63 | static inline void set_bit(int nr, volatile unsigned long *addr) | 62 | static inline void set_bit(int nr, volatile unsigned long *addr) |
| 64 | { | 63 | { |
| 65 | volatile unsigned long *a = addr + (nr >> 5); | 64 | volatile unsigned long *a = addr + (nr >> 5); |
| 66 | __raw_bit_set_asm(a, nr & 0x1f); | 65 | __raw_bit_set_asm(a, nr & 0x1f); |
| 67 | } | 66 | } |
| 68 | 67 | ||
| 69 | static inline void clear_bit(int nr, volatile unsigned long *addr) | 68 | static inline void clear_bit(int nr, volatile unsigned long *addr) |
| 70 | { | 69 | { |
| 71 | volatile unsigned long *a = addr + (nr >> 5); | 70 | volatile unsigned long *a = addr + (nr >> 5); |
| 72 | __raw_bit_clear_asm(a, nr & 0x1f); | 71 | __raw_bit_clear_asm(a, nr & 0x1f); |
| 73 | } | 72 | } |
| 74 | 73 | ||
| 75 | static inline void change_bit(int nr, volatile unsigned long *addr) | 74 | static inline void change_bit(int nr, volatile unsigned long *addr) |
| 76 | { | 75 | { |
| 77 | volatile unsigned long *a = addr + (nr >> 5); | 76 | volatile unsigned long *a = addr + (nr >> 5); |
| 78 | __raw_bit_toggle_asm(a, nr & 0x1f); | 77 | __raw_bit_toggle_asm(a, nr & 0x1f); |
| 79 | } | 78 | } |
| 80 | 79 | ||
| 81 | static inline int test_bit(int nr, const volatile unsigned long *addr) | 80 | static inline int test_bit(int nr, const volatile unsigned long *addr) |
| 82 | { | 81 | { |
| 83 | volatile const unsigned long *a = addr + (nr >> 5); | 82 | volatile const unsigned long *a = addr + (nr >> 5); |
| 84 | return __raw_bit_test_asm(a, nr & 0x1f) != 0; | 83 | return __raw_bit_test_asm(a, nr & 0x1f) != 0; |
| 85 | } | 84 | } |
| 86 | 85 | ||
| 87 | static inline int test_and_set_bit(int nr, volatile unsigned long *addr) | 86 | static inline int test_and_set_bit(int nr, volatile unsigned long *addr) |
| 88 | { | 87 | { |
| 89 | volatile unsigned long *a = addr + (nr >> 5); | 88 | volatile unsigned long *a = addr + (nr >> 5); |
| 90 | return __raw_bit_test_set_asm(a, nr & 0x1f); | 89 | return __raw_bit_test_set_asm(a, nr & 0x1f); |
| 91 | } | 90 | } |
| 92 | 91 | ||
| 93 | static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) | 92 | static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) |
| 94 | { | 93 | { |
| 95 | volatile unsigned long *a = addr + (nr >> 5); | 94 | volatile unsigned long *a = addr + (nr >> 5); |
| 96 | return __raw_bit_test_clear_asm(a, nr & 0x1f); | 95 | return __raw_bit_test_clear_asm(a, nr & 0x1f); |
| 97 | } | 96 | } |
| 98 | 97 | ||
| 99 | static inline int test_and_change_bit(int nr, volatile unsigned long *addr) | 98 | static inline int test_and_change_bit(int nr, volatile unsigned long *addr) |
| 100 | { | 99 | { |
| 101 | volatile unsigned long *a = addr + (nr >> 5); | 100 | volatile unsigned long *a = addr + (nr >> 5); |
| 102 | return __raw_bit_test_toggle_asm(a, nr & 0x1f); | 101 | return __raw_bit_test_toggle_asm(a, nr & 0x1f); |
| 103 | } | 102 | } |
| 104 | 103 | ||
| 105 | /* | 104 | /* |
| 106 | * clear_bit() doesn't provide any barrier for the compiler. | 105 | * clear_bit() doesn't provide any barrier for the compiler. |
| 107 | */ | 106 | */ |
| 108 | #define smp_mb__before_clear_bit() barrier() | 107 | #define smp_mb__before_clear_bit() barrier() |
| 109 | #define smp_mb__after_clear_bit() barrier() | 108 | #define smp_mb__after_clear_bit() barrier() |
| 110 | 109 | ||
| 111 | #define test_bit __skip_test_bit | 110 | #define test_bit __skip_test_bit |
| 112 | #include <asm-generic/bitops/non-atomic.h> | 111 | #include <asm-generic/bitops/non-atomic.h> |
| 113 | #undef test_bit | 112 | #undef test_bit |
| 114 | 113 | ||
| 115 | #endif /* CONFIG_SMP */ | 114 | #endif /* CONFIG_SMP */ |
| 116 | 115 | ||
| 117 | /* | 116 | /* |
| 118 | * hweightN: returns the hamming weight (i.e. the number | 117 | * hweightN: returns the hamming weight (i.e. the number |
| 119 | * of bits set) of a N-bit word | 118 | * of bits set) of a N-bit word |
| 120 | */ | 119 | */ |
| 121 | 120 | ||
| 122 | static inline unsigned int __arch_hweight32(unsigned int w) | 121 | static inline unsigned int __arch_hweight32(unsigned int w) |
| 123 | { | 122 | { |
| 124 | unsigned int res; | 123 | unsigned int res; |
| 125 | 124 | ||
| 126 | __asm__ ("%0.l = ONES %1;" | 125 | __asm__ ("%0.l = ONES %1;" |
| 127 | "%0 = %0.l (Z);" | 126 | "%0 = %0.l (Z);" |
| 128 | : "=d" (res) : "d" (w)); | 127 | : "=d" (res) : "d" (w)); |
| 129 | return res; | 128 | return res; |
| 130 | } | 129 | } |
| 131 | 130 | ||
| 132 | static inline unsigned int __arch_hweight64(__u64 w) | 131 | static inline unsigned int __arch_hweight64(__u64 w) |
| 133 | { | 132 | { |
| 134 | return __arch_hweight32((unsigned int)(w >> 32)) + | 133 | return __arch_hweight32((unsigned int)(w >> 32)) + |
| 135 | __arch_hweight32((unsigned int)w); | 134 | __arch_hweight32((unsigned int)w); |
| 136 | } | 135 | } |
| 137 | 136 | ||
| 138 | static inline unsigned int __arch_hweight16(unsigned int w) | 137 | static inline unsigned int __arch_hweight16(unsigned int w) |
| 139 | { | 138 | { |
| 140 | return __arch_hweight32(w & 0xffff); | 139 | return __arch_hweight32(w & 0xffff); |
| 141 | } | 140 | } |
| 142 | 141 | ||
| 143 | static inline unsigned int __arch_hweight8(unsigned int w) | 142 | static inline unsigned int __arch_hweight8(unsigned int w) |
| 144 | { | 143 | { |
| 145 | return __arch_hweight32(w & 0xff); | 144 | return __arch_hweight32(w & 0xff); |
| 146 | } | 145 | } |
| 147 | 146 | ||
| 148 | #endif /* _BLACKFIN_BITOPS_H */ | 147 | #endif /* _BLACKFIN_BITOPS_H */ |
| 149 | 148 |
arch/cris/include/asm/bitops.h
| 1 | /* asm/bitops.h for Linux/CRIS | 1 | /* asm/bitops.h for Linux/CRIS |
| 2 | * | 2 | * |
| 3 | * TODO: asm versions if speed is needed | 3 | * TODO: asm versions if speed is needed |
| 4 | * | 4 | * |
| 5 | * All bit operations return 0 if the bit was cleared before the | 5 | * All bit operations return 0 if the bit was cleared before the |
| 6 | * operation and != 0 if it was not. | 6 | * operation and != 0 if it was not. |
| 7 | * | 7 | * |
| 8 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | 8 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). |
| 9 | */ | 9 | */ |
| 10 | 10 | ||
| 11 | #ifndef _CRIS_BITOPS_H | 11 | #ifndef _CRIS_BITOPS_H |
| 12 | #define _CRIS_BITOPS_H | 12 | #define _CRIS_BITOPS_H |
| 13 | 13 | ||
| 14 | /* Currently this is unsuitable for consumption outside the kernel. */ | 14 | /* Currently this is unsuitable for consumption outside the kernel. */ |
| 15 | #ifdef __KERNEL__ | 15 | #ifdef __KERNEL__ |
| 16 | 16 | ||
| 17 | #ifndef _LINUX_BITOPS_H | 17 | #ifndef _LINUX_BITOPS_H |
| 18 | #error only <linux/bitops.h> can be included directly | 18 | #error only <linux/bitops.h> can be included directly |
| 19 | #endif | 19 | #endif |
| 20 | 20 | ||
| 21 | #include <arch/bitops.h> | 21 | #include <arch/bitops.h> |
| 22 | #include <asm/system.h> | 22 | #include <asm/system.h> |
| 23 | #include <asm/atomic.h> | 23 | #include <asm/atomic.h> |
| 24 | #include <linux/compiler.h> | 24 | #include <linux/compiler.h> |
| 25 | 25 | ||
| 26 | /* | 26 | /* |
| 27 | * set_bit - Atomically set a bit in memory | 27 | * set_bit - Atomically set a bit in memory |
| 28 | * @nr: the bit to set | 28 | * @nr: the bit to set |
| 29 | * @addr: the address to start counting from | 29 | * @addr: the address to start counting from |
| 30 | * | 30 | * |
| 31 | * This function is atomic and may not be reordered. See __set_bit() | 31 | * This function is atomic and may not be reordered. See __set_bit() |
| 32 | * if you do not require the atomic guarantees. | 32 | * if you do not require the atomic guarantees. |
| 33 | * Note that @nr may be almost arbitrarily large; this function is not | 33 | * Note that @nr may be almost arbitrarily large; this function is not |
| 34 | * restricted to acting on a single-word quantity. | 34 | * restricted to acting on a single-word quantity. |
| 35 | */ | 35 | */ |
| 36 | 36 | ||
| 37 | #define set_bit(nr, addr) (void)test_and_set_bit(nr, addr) | 37 | #define set_bit(nr, addr) (void)test_and_set_bit(nr, addr) |
| 38 | 38 | ||
| 39 | /* | 39 | /* |
| 40 | * clear_bit - Clears a bit in memory | 40 | * clear_bit - Clears a bit in memory |
| 41 | * @nr: Bit to clear | 41 | * @nr: Bit to clear |
| 42 | * @addr: Address to start counting from | 42 | * @addr: Address to start counting from |
| 43 | * | 43 | * |
| 44 | * clear_bit() is atomic and may not be reordered. However, it does | 44 | * clear_bit() is atomic and may not be reordered. However, it does |
| 45 | * not contain a memory barrier, so if it is used for locking purposes, | 45 | * not contain a memory barrier, so if it is used for locking purposes, |
| 46 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | 46 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
| 47 | * in order to ensure changes are visible on other processors. | 47 | * in order to ensure changes are visible on other processors. |
| 48 | */ | 48 | */ |
| 49 | 49 | ||
| 50 | #define clear_bit(nr, addr) (void)test_and_clear_bit(nr, addr) | 50 | #define clear_bit(nr, addr) (void)test_and_clear_bit(nr, addr) |
| 51 | 51 | ||
| 52 | /* | 52 | /* |
| 53 | * change_bit - Toggle a bit in memory | 53 | * change_bit - Toggle a bit in memory |
| 54 | * @nr: Bit to change | 54 | * @nr: Bit to change |
| 55 | * @addr: Address to start counting from | 55 | * @addr: Address to start counting from |
| 56 | * | 56 | * |
| 57 | * change_bit() is atomic and may not be reordered. | 57 | * change_bit() is atomic and may not be reordered. |
| 58 | * Note that @nr may be almost arbitrarily large; this function is not | 58 | * Note that @nr may be almost arbitrarily large; this function is not |
| 59 | * restricted to acting on a single-word quantity. | 59 | * restricted to acting on a single-word quantity. |
| 60 | */ | 60 | */ |
| 61 | 61 | ||
| 62 | #define change_bit(nr, addr) (void)test_and_change_bit(nr, addr) | 62 | #define change_bit(nr, addr) (void)test_and_change_bit(nr, addr) |
| 63 | 63 | ||
| 64 | /** | 64 | /** |
| 65 | * test_and_set_bit - Set a bit and return its old value | 65 | * test_and_set_bit - Set a bit and return its old value |
| 66 | * @nr: Bit to set | 66 | * @nr: Bit to set |
| 67 | * @addr: Address to count from | 67 | * @addr: Address to count from |
| 68 | * | 68 | * |
| 69 | * This operation is atomic and cannot be reordered. | 69 | * This operation is atomic and cannot be reordered. |
| 70 | * It also implies a memory barrier. | 70 | * It also implies a memory barrier. |
| 71 | */ | 71 | */ |
| 72 | 72 | ||
| 73 | static inline int test_and_set_bit(int nr, volatile unsigned long *addr) | 73 | static inline int test_and_set_bit(int nr, volatile unsigned long *addr) |
| 74 | { | 74 | { |
| 75 | unsigned int mask, retval; | 75 | unsigned int mask, retval; |
| 76 | unsigned long flags; | 76 | unsigned long flags; |
| 77 | unsigned int *adr = (unsigned int *)addr; | 77 | unsigned int *adr = (unsigned int *)addr; |
| 78 | 78 | ||
| 79 | adr += nr >> 5; | 79 | adr += nr >> 5; |
| 80 | mask = 1 << (nr & 0x1f); | 80 | mask = 1 << (nr & 0x1f); |
| 81 | cris_atomic_save(addr, flags); | 81 | cris_atomic_save(addr, flags); |
| 82 | retval = (mask & *adr) != 0; | 82 | retval = (mask & *adr) != 0; |
| 83 | *adr |= mask; | 83 | *adr |= mask; |
| 84 | cris_atomic_restore(addr, flags); | 84 | cris_atomic_restore(addr, flags); |
| 85 | return retval; | 85 | return retval; |
| 86 | } | 86 | } |
| 87 | 87 | ||
| 88 | /* | 88 | /* |
| 89 | * clear_bit() doesn't provide any barrier for the compiler. | 89 | * clear_bit() doesn't provide any barrier for the compiler. |
| 90 | */ | 90 | */ |
| 91 | #define smp_mb__before_clear_bit() barrier() | 91 | #define smp_mb__before_clear_bit() barrier() |
| 92 | #define smp_mb__after_clear_bit() barrier() | 92 | #define smp_mb__after_clear_bit() barrier() |
| 93 | 93 | ||
| 94 | /** | 94 | /** |
| 95 | * test_and_clear_bit - Clear a bit and return its old value | 95 | * test_and_clear_bit - Clear a bit and return its old value |
| 96 | * @nr: Bit to clear | 96 | * @nr: Bit to clear |
| 97 | * @addr: Address to count from | 97 | * @addr: Address to count from |
| 98 | * | 98 | * |
| 99 | * This operation is atomic and cannot be reordered. | 99 | * This operation is atomic and cannot be reordered. |
| 100 | * It also implies a memory barrier. | 100 | * It also implies a memory barrier. |
| 101 | */ | 101 | */ |
| 102 | 102 | ||
| 103 | static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) | 103 | static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) |
| 104 | { | 104 | { |
| 105 | unsigned int mask, retval; | 105 | unsigned int mask, retval; |
| 106 | unsigned long flags; | 106 | unsigned long flags; |
| 107 | unsigned int *adr = (unsigned int *)addr; | 107 | unsigned int *adr = (unsigned int *)addr; |
| 108 | 108 | ||
| 109 | adr += nr >> 5; | 109 | adr += nr >> 5; |
| 110 | mask = 1 << (nr & 0x1f); | 110 | mask = 1 << (nr & 0x1f); |
| 111 | cris_atomic_save(addr, flags); | 111 | cris_atomic_save(addr, flags); |
| 112 | retval = (mask & *adr) != 0; | 112 | retval = (mask & *adr) != 0; |
| 113 | *adr &= ~mask; | 113 | *adr &= ~mask; |
| 114 | cris_atomic_restore(addr, flags); | 114 | cris_atomic_restore(addr, flags); |
| 115 | return retval; | 115 | return retval; |
| 116 | } | 116 | } |
| 117 | 117 | ||
| 118 | /** | 118 | /** |
| 119 | * test_and_change_bit - Change a bit and return its old value | 119 | * test_and_change_bit - Change a bit and return its old value |
| 120 | * @nr: Bit to change | 120 | * @nr: Bit to change |
| 121 | * @addr: Address to count from | 121 | * @addr: Address to count from |
| 122 | * | 122 | * |
| 123 | * This operation is atomic and cannot be reordered. | 123 | * This operation is atomic and cannot be reordered. |
| 124 | * It also implies a memory barrier. | 124 | * It also implies a memory barrier. |
| 125 | */ | 125 | */ |
| 126 | 126 | ||
| 127 | static inline int test_and_change_bit(int nr, volatile unsigned long *addr) | 127 | static inline int test_and_change_bit(int nr, volatile unsigned long *addr) |
| 128 | { | 128 | { |
| 129 | unsigned int mask, retval; | 129 | unsigned int mask, retval; |
| 130 | unsigned long flags; | 130 | unsigned long flags; |
| 131 | unsigned int *adr = (unsigned int *)addr; | 131 | unsigned int *adr = (unsigned int *)addr; |
| 132 | adr += nr >> 5; | 132 | adr += nr >> 5; |
| 133 | mask = 1 << (nr & 0x1f); | 133 | mask = 1 << (nr & 0x1f); |
| 134 | cris_atomic_save(addr, flags); | 134 | cris_atomic_save(addr, flags); |
| 135 | retval = (mask & *adr) != 0; | 135 | retval = (mask & *adr) != 0; |
| 136 | *adr ^= mask; | 136 | *adr ^= mask; |
| 137 | cris_atomic_restore(addr, flags); | 137 | cris_atomic_restore(addr, flags); |
| 138 | return retval; | 138 | return retval; |
| 139 | } | 139 | } |
| 140 | 140 | ||
| 141 | #include <asm-generic/bitops/non-atomic.h> | 141 | #include <asm-generic/bitops/non-atomic.h> |
| 142 | 142 | ||
| 143 | /* | 143 | /* |
| 144 | * Since we define it "external", it collides with the built-in | 144 | * Since we define it "external", it collides with the built-in |
| 145 | * definition, which doesn't have the same semantics. We don't want to | 145 | * definition, which doesn't have the same semantics. We don't want to |
| 146 | * use -fno-builtin, so just hide the name ffs. | 146 | * use -fno-builtin, so just hide the name ffs. |
| 147 | */ | 147 | */ |
| 148 | #define ffs kernel_ffs | 148 | #define ffs kernel_ffs |
| 149 | 149 | ||
| 150 | #include <asm-generic/bitops/fls.h> | 150 | #include <asm-generic/bitops/fls.h> |
| 151 | #include <asm-generic/bitops/__fls.h> | 151 | #include <asm-generic/bitops/__fls.h> |
| 152 | #include <asm-generic/bitops/fls64.h> | 152 | #include <asm-generic/bitops/fls64.h> |
| 153 | #include <asm-generic/bitops/hweight.h> | 153 | #include <asm-generic/bitops/hweight.h> |
| 154 | #include <asm-generic/bitops/find.h> | 154 | #include <asm-generic/bitops/find.h> |
| 155 | #include <asm-generic/bitops/lock.h> | 155 | #include <asm-generic/bitops/lock.h> |
| 156 | 156 | ||
| 157 | #include <asm-generic/bitops/le.h> | 157 | #include <asm-generic/bitops/le.h> |
| 158 | 158 | ||
| 159 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) | 159 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) |
| 160 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) | 160 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) |
| 161 | 161 | ||
| 162 | #include <asm-generic/bitops/minix.h> | ||
| 163 | #include <asm-generic/bitops/sched.h> | 162 | #include <asm-generic/bitops/sched.h> |
| 164 | 163 | ||
| 165 | #endif /* __KERNEL__ */ | 164 | #endif /* __KERNEL__ */ |
| 166 | 165 | ||
| 167 | #endif /* _CRIS_BITOPS_H */ | 166 | #endif /* _CRIS_BITOPS_H */ |
| 168 | 167 |
arch/frv/include/asm/bitops.h
| 1 | /* bitops.h: bit operations for the Fujitsu FR-V CPUs | 1 | /* bitops.h: bit operations for the Fujitsu FR-V CPUs |
| 2 | * | 2 | * |
| 3 | * For an explanation of how atomic ops work in this arch, see: | 3 | * For an explanation of how atomic ops work in this arch, see: |
| 4 | * Documentation/frv/atomic-ops.txt | 4 | * Documentation/frv/atomic-ops.txt |
| 5 | * | 5 | * |
| 6 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. | 6 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. |
| 7 | * Written by David Howells (dhowells@redhat.com) | 7 | * Written by David Howells (dhowells@redhat.com) |
| 8 | * | 8 | * |
| 9 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
| 10 | * modify it under the terms of the GNU General Public License | 10 | * modify it under the terms of the GNU General Public License |
| 11 | * as published by the Free Software Foundation; either version | 11 | * as published by the Free Software Foundation; either version |
| 12 | * 2 of the License, or (at your option) any later version. | 12 | * 2 of the License, or (at your option) any later version. |
| 13 | */ | 13 | */ |
| 14 | #ifndef _ASM_BITOPS_H | 14 | #ifndef _ASM_BITOPS_H |
| 15 | #define _ASM_BITOPS_H | 15 | #define _ASM_BITOPS_H |
| 16 | 16 | ||
| 17 | #include <linux/compiler.h> | 17 | #include <linux/compiler.h> |
| 18 | #include <asm/byteorder.h> | 18 | #include <asm/byteorder.h> |
| 19 | 19 | ||
| 20 | #ifdef __KERNEL__ | 20 | #ifdef __KERNEL__ |
| 21 | 21 | ||
| 22 | #ifndef _LINUX_BITOPS_H | 22 | #ifndef _LINUX_BITOPS_H |
| 23 | #error only <linux/bitops.h> can be included directly | 23 | #error only <linux/bitops.h> can be included directly |
| 24 | #endif | 24 | #endif |
| 25 | 25 | ||
| 26 | #include <asm-generic/bitops/ffz.h> | 26 | #include <asm-generic/bitops/ffz.h> |
| 27 | 27 | ||
| 28 | /* | 28 | /* |
| 29 | * clear_bit() doesn't provide any barrier for the compiler. | 29 | * clear_bit() doesn't provide any barrier for the compiler. |
| 30 | */ | 30 | */ |
| 31 | #define smp_mb__before_clear_bit() barrier() | 31 | #define smp_mb__before_clear_bit() barrier() |
| 32 | #define smp_mb__after_clear_bit() barrier() | 32 | #define smp_mb__after_clear_bit() barrier() |
| 33 | 33 | ||
| 34 | #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS | 34 | #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS |
| 35 | static inline | 35 | static inline |
| 36 | unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v) | 36 | unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v) |
| 37 | { | 37 | { |
| 38 | unsigned long old, tmp; | 38 | unsigned long old, tmp; |
| 39 | 39 | ||
| 40 | asm volatile( | 40 | asm volatile( |
| 41 | "0: \n" | 41 | "0: \n" |
| 42 | " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ | 42 | " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ |
| 43 | " ckeq icc3,cc7 \n" | 43 | " ckeq icc3,cc7 \n" |
| 44 | " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */ | 44 | " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */ |
| 45 | " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ | 45 | " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ |
| 46 | " and%I3 %1,%3,%2 \n" | 46 | " and%I3 %1,%3,%2 \n" |
| 47 | " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */ | 47 | " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */ |
| 48 | " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */ | 48 | " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */ |
| 49 | " beq icc3,#0,0b \n" | 49 | " beq icc3,#0,0b \n" |
| 50 | : "+U"(*v), "=&r"(old), "=r"(tmp) | 50 | : "+U"(*v), "=&r"(old), "=r"(tmp) |
| 51 | : "NPr"(~mask) | 51 | : "NPr"(~mask) |
| 52 | : "memory", "cc7", "cc3", "icc3" | 52 | : "memory", "cc7", "cc3", "icc3" |
| 53 | ); | 53 | ); |
| 54 | 54 | ||
| 55 | return old; | 55 | return old; |
| 56 | } | 56 | } |
| 57 | 57 | ||
| 58 | static inline | 58 | static inline |
| 59 | unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v) | 59 | unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v) |
| 60 | { | 60 | { |
| 61 | unsigned long old, tmp; | 61 | unsigned long old, tmp; |
| 62 | 62 | ||
| 63 | asm volatile( | 63 | asm volatile( |
| 64 | "0: \n" | 64 | "0: \n" |
| 65 | " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ | 65 | " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ |
| 66 | " ckeq icc3,cc7 \n" | 66 | " ckeq icc3,cc7 \n" |
| 67 | " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */ | 67 | " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */ |
| 68 | " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ | 68 | " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ |
| 69 | " or%I3 %1,%3,%2 \n" | 69 | " or%I3 %1,%3,%2 \n" |
| 70 | " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */ | 70 | " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */ |
| 71 | " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */ | 71 | " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */ |
| 72 | " beq icc3,#0,0b \n" | 72 | " beq icc3,#0,0b \n" |
| 73 | : "+U"(*v), "=&r"(old), "=r"(tmp) | 73 | : "+U"(*v), "=&r"(old), "=r"(tmp) |
| 74 | : "NPr"(mask) | 74 | : "NPr"(mask) |
| 75 | : "memory", "cc7", "cc3", "icc3" | 75 | : "memory", "cc7", "cc3", "icc3" |
| 76 | ); | 76 | ); |
| 77 | 77 | ||
| 78 | return old; | 78 | return old; |
| 79 | } | 79 | } |
| 80 | 80 | ||
| 81 | static inline | 81 | static inline |
| 82 | unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v) | 82 | unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v) |
| 83 | { | 83 | { |
| 84 | unsigned long old, tmp; | 84 | unsigned long old, tmp; |
| 85 | 85 | ||
| 86 | asm volatile( | 86 | asm volatile( |
| 87 | "0: \n" | 87 | "0: \n" |
| 88 | " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ | 88 | " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ |
| 89 | " ckeq icc3,cc7 \n" | 89 | " ckeq icc3,cc7 \n" |
| 90 | " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */ | 90 | " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */ |
| 91 | " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ | 91 | " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ |
| 92 | " xor%I3 %1,%3,%2 \n" | 92 | " xor%I3 %1,%3,%2 \n" |
| 93 | " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */ | 93 | " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */ |
| 94 | " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */ | 94 | " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */ |
| 95 | " beq icc3,#0,0b \n" | 95 | " beq icc3,#0,0b \n" |
| 96 | : "+U"(*v), "=&r"(old), "=r"(tmp) | 96 | : "+U"(*v), "=&r"(old), "=r"(tmp) |
| 97 | : "NPr"(mask) | 97 | : "NPr"(mask) |
| 98 | : "memory", "cc7", "cc3", "icc3" | 98 | : "memory", "cc7", "cc3", "icc3" |
| 99 | ); | 99 | ); |
| 100 | 100 | ||
| 101 | return old; | 101 | return old; |
| 102 | } | 102 | } |
| 103 | 103 | ||
| 104 | #else | 104 | #else |
| 105 | 105 | ||
| 106 | extern unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v); | 106 | extern unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v); |
| 107 | extern unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v); | 107 | extern unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v); |
| 108 | extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v); | 108 | extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v); |
| 109 | 109 | ||
| 110 | #endif | 110 | #endif |
| 111 | 111 | ||
| 112 | #define atomic_clear_mask(mask, v) atomic_test_and_ANDNOT_mask((mask), (v)) | 112 | #define atomic_clear_mask(mask, v) atomic_test_and_ANDNOT_mask((mask), (v)) |
| 113 | #define atomic_set_mask(mask, v) atomic_test_and_OR_mask((mask), (v)) | 113 | #define atomic_set_mask(mask, v) atomic_test_and_OR_mask((mask), (v)) |
| 114 | 114 | ||
| 115 | static inline int test_and_clear_bit(unsigned long nr, volatile void *addr) | 115 | static inline int test_and_clear_bit(unsigned long nr, volatile void *addr) |
| 116 | { | 116 | { |
| 117 | volatile unsigned long *ptr = addr; | 117 | volatile unsigned long *ptr = addr; |
| 118 | unsigned long mask = 1UL << (nr & 31); | 118 | unsigned long mask = 1UL << (nr & 31); |
| 119 | ptr += nr >> 5; | 119 | ptr += nr >> 5; |
| 120 | return (atomic_test_and_ANDNOT_mask(mask, ptr) & mask) != 0; | 120 | return (atomic_test_and_ANDNOT_mask(mask, ptr) & mask) != 0; |
| 121 | } | 121 | } |
| 122 | 122 | ||
| 123 | static inline int test_and_set_bit(unsigned long nr, volatile void *addr) | 123 | static inline int test_and_set_bit(unsigned long nr, volatile void *addr) |
| 124 | { | 124 | { |
| 125 | volatile unsigned long *ptr = addr; | 125 | volatile unsigned long *ptr = addr; |
| 126 | unsigned long mask = 1UL << (nr & 31); | 126 | unsigned long mask = 1UL << (nr & 31); |
| 127 | ptr += nr >> 5; | 127 | ptr += nr >> 5; |
| 128 | return (atomic_test_and_OR_mask(mask, ptr) & mask) != 0; | 128 | return (atomic_test_and_OR_mask(mask, ptr) & mask) != 0; |
| 129 | } | 129 | } |
| 130 | 130 | ||
| 131 | static inline int test_and_change_bit(unsigned long nr, volatile void *addr) | 131 | static inline int test_and_change_bit(unsigned long nr, volatile void *addr) |
| 132 | { | 132 | { |
| 133 | volatile unsigned long *ptr = addr; | 133 | volatile unsigned long *ptr = addr; |
| 134 | unsigned long mask = 1UL << (nr & 31); | 134 | unsigned long mask = 1UL << (nr & 31); |
| 135 | ptr += nr >> 5; | 135 | ptr += nr >> 5; |
| 136 | return (atomic_test_and_XOR_mask(mask, ptr) & mask) != 0; | 136 | return (atomic_test_and_XOR_mask(mask, ptr) & mask) != 0; |
| 137 | } | 137 | } |
| 138 | 138 | ||
| 139 | static inline void clear_bit(unsigned long nr, volatile void *addr) | 139 | static inline void clear_bit(unsigned long nr, volatile void *addr) |
| 140 | { | 140 | { |
| 141 | test_and_clear_bit(nr, addr); | 141 | test_and_clear_bit(nr, addr); |
| 142 | } | 142 | } |
| 143 | 143 | ||
| 144 | static inline void set_bit(unsigned long nr, volatile void *addr) | 144 | static inline void set_bit(unsigned long nr, volatile void *addr) |
| 145 | { | 145 | { |
| 146 | test_and_set_bit(nr, addr); | 146 | test_and_set_bit(nr, addr); |
| 147 | } | 147 | } |
| 148 | 148 | ||
| 149 | static inline void change_bit(unsigned long nr, volatile void *addr) | 149 | static inline void change_bit(unsigned long nr, volatile void *addr) |
| 150 | { | 150 | { |
| 151 | test_and_change_bit(nr, addr); | 151 | test_and_change_bit(nr, addr); |
| 152 | } | 152 | } |
| 153 | 153 | ||
| 154 | static inline void __clear_bit(unsigned long nr, volatile void *addr) | 154 | static inline void __clear_bit(unsigned long nr, volatile void *addr) |
| 155 | { | 155 | { |
| 156 | volatile unsigned long *a = addr; | 156 | volatile unsigned long *a = addr; |
| 157 | int mask; | 157 | int mask; |
| 158 | 158 | ||
| 159 | a += nr >> 5; | 159 | a += nr >> 5; |
| 160 | mask = 1 << (nr & 31); | 160 | mask = 1 << (nr & 31); |
| 161 | *a &= ~mask; | 161 | *a &= ~mask; |
| 162 | } | 162 | } |
| 163 | 163 | ||
| 164 | static inline void __set_bit(unsigned long nr, volatile void *addr) | 164 | static inline void __set_bit(unsigned long nr, volatile void *addr) |
| 165 | { | 165 | { |
| 166 | volatile unsigned long *a = addr; | 166 | volatile unsigned long *a = addr; |
| 167 | int mask; | 167 | int mask; |
| 168 | 168 | ||
| 169 | a += nr >> 5; | 169 | a += nr >> 5; |
| 170 | mask = 1 << (nr & 31); | 170 | mask = 1 << (nr & 31); |
| 171 | *a |= mask; | 171 | *a |= mask; |
| 172 | } | 172 | } |
| 173 | 173 | ||
| 174 | static inline void __change_bit(unsigned long nr, volatile void *addr) | 174 | static inline void __change_bit(unsigned long nr, volatile void *addr) |
| 175 | { | 175 | { |
| 176 | volatile unsigned long *a = addr; | 176 | volatile unsigned long *a = addr; |
| 177 | int mask; | 177 | int mask; |
| 178 | 178 | ||
| 179 | a += nr >> 5; | 179 | a += nr >> 5; |
| 180 | mask = 1 << (nr & 31); | 180 | mask = 1 << (nr & 31); |
| 181 | *a ^= mask; | 181 | *a ^= mask; |
| 182 | } | 182 | } |
| 183 | 183 | ||
| 184 | static inline int __test_and_clear_bit(unsigned long nr, volatile void *addr) | 184 | static inline int __test_and_clear_bit(unsigned long nr, volatile void *addr) |
| 185 | { | 185 | { |
| 186 | volatile unsigned long *a = addr; | 186 | volatile unsigned long *a = addr; |
| 187 | int mask, retval; | 187 | int mask, retval; |
| 188 | 188 | ||
| 189 | a += nr >> 5; | 189 | a += nr >> 5; |
| 190 | mask = 1 << (nr & 31); | 190 | mask = 1 << (nr & 31); |
| 191 | retval = (mask & *a) != 0; | 191 | retval = (mask & *a) != 0; |
| 192 | *a &= ~mask; | 192 | *a &= ~mask; |
| 193 | return retval; | 193 | return retval; |
| 194 | } | 194 | } |
| 195 | 195 | ||
| 196 | static inline int __test_and_set_bit(unsigned long nr, volatile void *addr) | 196 | static inline int __test_and_set_bit(unsigned long nr, volatile void *addr) |
| 197 | { | 197 | { |
| 198 | volatile unsigned long *a = addr; | 198 | volatile unsigned long *a = addr; |
| 199 | int mask, retval; | 199 | int mask, retval; |
| 200 | 200 | ||
| 201 | a += nr >> 5; | 201 | a += nr >> 5; |
| 202 | mask = 1 << (nr & 31); | 202 | mask = 1 << (nr & 31); |
| 203 | retval = (mask & *a) != 0; | 203 | retval = (mask & *a) != 0; |
| 204 | *a |= mask; | 204 | *a |= mask; |
| 205 | return retval; | 205 | return retval; |
| 206 | } | 206 | } |
| 207 | 207 | ||
| 208 | static inline int __test_and_change_bit(unsigned long nr, volatile void *addr) | 208 | static inline int __test_and_change_bit(unsigned long nr, volatile void *addr) |
| 209 | { | 209 | { |
| 210 | volatile unsigned long *a = addr; | 210 | volatile unsigned long *a = addr; |
| 211 | int mask, retval; | 211 | int mask, retval; |
| 212 | 212 | ||
| 213 | a += nr >> 5; | 213 | a += nr >> 5; |
| 214 | mask = 1 << (nr & 31); | 214 | mask = 1 << (nr & 31); |
| 215 | retval = (mask & *a) != 0; | 215 | retval = (mask & *a) != 0; |
| 216 | *a ^= mask; | 216 | *a ^= mask; |
| 217 | return retval; | 217 | return retval; |
| 218 | } | 218 | } |
| 219 | 219 | ||
| 220 | /* | 220 | /* |
| 221 | * This routine doesn't need to be atomic. | 221 | * This routine doesn't need to be atomic. |
| 222 | */ | 222 | */ |
| 223 | static inline int | 223 | static inline int |
| 224 | __constant_test_bit(unsigned long nr, const volatile void *addr) | 224 | __constant_test_bit(unsigned long nr, const volatile void *addr) |
| 225 | { | 225 | { |
| 226 | return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; | 226 | return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; |
| 227 | } | 227 | } |
| 228 | 228 | ||
| 229 | static inline int __test_bit(unsigned long nr, const volatile void *addr) | 229 | static inline int __test_bit(unsigned long nr, const volatile void *addr) |
| 230 | { | 230 | { |
| 231 | int * a = (int *) addr; | 231 | int * a = (int *) addr; |
| 232 | int mask; | 232 | int mask; |
| 233 | 233 | ||
| 234 | a += nr >> 5; | 234 | a += nr >> 5; |
| 235 | mask = 1 << (nr & 0x1f); | 235 | mask = 1 << (nr & 0x1f); |
| 236 | return ((mask & *a) != 0); | 236 | return ((mask & *a) != 0); |
| 237 | } | 237 | } |
| 238 | 238 | ||
| 239 | #define test_bit(nr,addr) \ | 239 | #define test_bit(nr,addr) \ |
| 240 | (__builtin_constant_p(nr) ? \ | 240 | (__builtin_constant_p(nr) ? \ |
| 241 | __constant_test_bit((nr),(addr)) : \ | 241 | __constant_test_bit((nr),(addr)) : \ |
| 242 | __test_bit((nr),(addr))) | 242 | __test_bit((nr),(addr))) |
| 243 | 243 | ||
| 244 | #include <asm-generic/bitops/find.h> | 244 | #include <asm-generic/bitops/find.h> |
| 245 | 245 | ||
| 246 | /** | 246 | /** |
| 247 | * fls - find last bit set | 247 | * fls - find last bit set |
| 248 | * @x: the word to search | 248 | * @x: the word to search |
| 249 | * | 249 | * |
| 250 | * This is defined the same way as ffs: | 250 | * This is defined the same way as ffs: |
| 251 | * - return 32..1 to indicate bit 31..0 most significant bit set | 251 | * - return 32..1 to indicate bit 31..0 most significant bit set |
| 252 | * - return 0 to indicate no bits set | 252 | * - return 0 to indicate no bits set |
| 253 | */ | 253 | */ |
| 254 | #define fls(x) \ | 254 | #define fls(x) \ |
| 255 | ({ \ | 255 | ({ \ |
| 256 | int bit; \ | 256 | int bit; \ |
| 257 | \ | 257 | \ |
| 258 | asm(" subcc %1,gr0,gr0,icc0 \n" \ | 258 | asm(" subcc %1,gr0,gr0,icc0 \n" \ |
| 259 | " ckne icc0,cc4 \n" \ | 259 | " ckne icc0,cc4 \n" \ |
| 260 | " cscan.p %1,gr0,%0 ,cc4,#1 \n" \ | 260 | " cscan.p %1,gr0,%0 ,cc4,#1 \n" \ |
| 261 | " csub %0,%0,%0 ,cc4,#0 \n" \ | 261 | " csub %0,%0,%0 ,cc4,#0 \n" \ |
| 262 | " csub %2,%0,%0 ,cc4,#1 \n" \ | 262 | " csub %2,%0,%0 ,cc4,#1 \n" \ |
| 263 | : "=&r"(bit) \ | 263 | : "=&r"(bit) \ |
| 264 | : "r"(x), "r"(32) \ | 264 | : "r"(x), "r"(32) \ |
| 265 | : "icc0", "cc4" \ | 265 | : "icc0", "cc4" \ |
| 266 | ); \ | 266 | ); \ |
| 267 | \ | 267 | \ |
| 268 | bit; \ | 268 | bit; \ |
| 269 | }) | 269 | }) |
| 270 | 270 | ||
| 271 | /** | 271 | /** |
| 272 | * fls64 - find last bit set in a 64-bit value | 272 | * fls64 - find last bit set in a 64-bit value |
| 273 | * @n: the value to search | 273 | * @n: the value to search |
| 274 | * | 274 | * |
| 275 | * This is defined the same way as ffs: | 275 | * This is defined the same way as ffs: |
| 276 | * - return 64..1 to indicate bit 63..0 most significant bit set | 276 | * - return 64..1 to indicate bit 63..0 most significant bit set |
| 277 | * - return 0 to indicate no bits set | 277 | * - return 0 to indicate no bits set |
| 278 | */ | 278 | */ |
| 279 | static inline __attribute__((const)) | 279 | static inline __attribute__((const)) |
| 280 | int fls64(u64 n) | 280 | int fls64(u64 n) |
| 281 | { | 281 | { |
| 282 | union { | 282 | union { |
| 283 | u64 ll; | 283 | u64 ll; |
| 284 | struct { u32 h, l; }; | 284 | struct { u32 h, l; }; |
| 285 | } _; | 285 | } _; |
| 286 | int bit, x, y; | 286 | int bit, x, y; |
| 287 | 287 | ||
| 288 | _.ll = n; | 288 | _.ll = n; |
| 289 | 289 | ||
| 290 | asm(" subcc.p %3,gr0,gr0,icc0 \n" | 290 | asm(" subcc.p %3,gr0,gr0,icc0 \n" |
| 291 | " subcc %4,gr0,gr0,icc1 \n" | 291 | " subcc %4,gr0,gr0,icc1 \n" |
| 292 | " ckne icc0,cc4 \n" | 292 | " ckne icc0,cc4 \n" |
| 293 | " ckne icc1,cc5 \n" | 293 | " ckne icc1,cc5 \n" |
| 294 | " norcr cc4,cc5,cc6 \n" | 294 | " norcr cc4,cc5,cc6 \n" |
| 295 | " csub.p %0,%0,%0 ,cc6,1 \n" | 295 | " csub.p %0,%0,%0 ,cc6,1 \n" |
| 296 | " orcr cc5,cc4,cc4 \n" | 296 | " orcr cc5,cc4,cc4 \n" |
| 297 | " andcr cc4,cc5,cc4 \n" | 297 | " andcr cc4,cc5,cc4 \n" |
| 298 | " cscan.p %3,gr0,%0 ,cc4,0 \n" | 298 | " cscan.p %3,gr0,%0 ,cc4,0 \n" |
| 299 | " setlos #64,%1 \n" | 299 | " setlos #64,%1 \n" |
| 300 | " cscan.p %4,gr0,%0 ,cc4,1 \n" | 300 | " cscan.p %4,gr0,%0 ,cc4,1 \n" |
| 301 | " setlos #32,%2 \n" | 301 | " setlos #32,%2 \n" |
| 302 | " csub.p %1,%0,%0 ,cc4,0 \n" | 302 | " csub.p %1,%0,%0 ,cc4,0 \n" |
| 303 | " csub %2,%0,%0 ,cc4,1 \n" | 303 | " csub %2,%0,%0 ,cc4,1 \n" |
| 304 | : "=&r"(bit), "=r"(x), "=r"(y) | 304 | : "=&r"(bit), "=r"(x), "=r"(y) |
| 305 | : "0r"(_.h), "r"(_.l) | 305 | : "0r"(_.h), "r"(_.l) |
| 306 | : "icc0", "icc1", "cc4", "cc5", "cc6" | 306 | : "icc0", "icc1", "cc4", "cc5", "cc6" |
| 307 | ); | 307 | ); |
| 308 | return bit; | 308 | return bit; |
| 309 | 309 | ||
| 310 | } | 310 | } |
| 311 | 311 | ||
| 312 | /** | 312 | /** |
| 313 | * ffs - find first bit set | 313 | * ffs - find first bit set |
| 314 | * @x: the word to search | 314 | * @x: the word to search |
| 315 | * | 315 | * |
| 316 | * - return 32..1 to indicate bit 31..0 most least significant bit set | 316 | * - return 32..1 to indicate bit 31..0 most least significant bit set |
| 317 | * - return 0 to indicate no bits set | 317 | * - return 0 to indicate no bits set |
| 318 | */ | 318 | */ |
| 319 | static inline __attribute__((const)) | 319 | static inline __attribute__((const)) |
| 320 | int ffs(int x) | 320 | int ffs(int x) |
| 321 | { | 321 | { |
| 322 | /* Note: (x & -x) gives us a mask that is the least significant | 322 | /* Note: (x & -x) gives us a mask that is the least significant |
| 323 | * (rightmost) 1-bit of the value in x. | 323 | * (rightmost) 1-bit of the value in x. |
| 324 | */ | 324 | */ |
| 325 | return fls(x & -x); | 325 | return fls(x & -x); |
| 326 | } | 326 | } |
| 327 | 327 | ||
| 328 | /** | 328 | /** |
| 329 | * __ffs - find first bit set | 329 | * __ffs - find first bit set |
| 330 | * @x: the word to search | 330 | * @x: the word to search |
| 331 | * | 331 | * |
| 332 | * - return 31..0 to indicate bit 31..0 most least significant bit set | 332 | * - return 31..0 to indicate bit 31..0 most least significant bit set |
| 333 | * - if no bits are set in x, the result is undefined | 333 | * - if no bits are set in x, the result is undefined |
| 334 | */ | 334 | */ |
| 335 | static inline __attribute__((const)) | 335 | static inline __attribute__((const)) |
| 336 | int __ffs(unsigned long x) | 336 | int __ffs(unsigned long x) |
| 337 | { | 337 | { |
| 338 | int bit; | 338 | int bit; |
| 339 | asm("scan %1,gr0,%0" : "=r"(bit) : "r"(x & -x)); | 339 | asm("scan %1,gr0,%0" : "=r"(bit) : "r"(x & -x)); |
| 340 | return 31 - bit; | 340 | return 31 - bit; |
| 341 | } | 341 | } |
| 342 | 342 | ||
| 343 | /** | 343 | /** |
| 344 | * __fls - find last (most-significant) set bit in a long word | 344 | * __fls - find last (most-significant) set bit in a long word |
| 345 | * @word: the word to search | 345 | * @word: the word to search |
| 346 | * | 346 | * |
| 347 | * Undefined if no set bit exists, so code should check against 0 first. | 347 | * Undefined if no set bit exists, so code should check against 0 first. |
| 348 | */ | 348 | */ |
| 349 | static inline unsigned long __fls(unsigned long word) | 349 | static inline unsigned long __fls(unsigned long word) |
| 350 | { | 350 | { |
| 351 | unsigned long bit; | 351 | unsigned long bit; |
| 352 | asm("scan %1,gr0,%0" : "=r"(bit) : "r"(word)); | 352 | asm("scan %1,gr0,%0" : "=r"(bit) : "r"(word)); |
| 353 | return bit; | 353 | return bit; |
| 354 | } | 354 | } |
| 355 | 355 | ||
| 356 | /* | 356 | /* |
| 357 | * special slimline version of fls() for calculating ilog2_u32() | 357 | * special slimline version of fls() for calculating ilog2_u32() |
| 358 | * - note: no protection against n == 0 | 358 | * - note: no protection against n == 0 |
| 359 | */ | 359 | */ |
| 360 | #define ARCH_HAS_ILOG2_U32 | 360 | #define ARCH_HAS_ILOG2_U32 |
| 361 | static inline __attribute__((const)) | 361 | static inline __attribute__((const)) |
| 362 | int __ilog2_u32(u32 n) | 362 | int __ilog2_u32(u32 n) |
| 363 | { | 363 | { |
| 364 | int bit; | 364 | int bit; |
| 365 | asm("scan %1,gr0,%0" : "=r"(bit) : "r"(n)); | 365 | asm("scan %1,gr0,%0" : "=r"(bit) : "r"(n)); |
| 366 | return 31 - bit; | 366 | return 31 - bit; |
| 367 | } | 367 | } |
| 368 | 368 | ||
| 369 | /* | 369 | /* |
| 370 | * special slimline version of fls64() for calculating ilog2_u64() | 370 | * special slimline version of fls64() for calculating ilog2_u64() |
| 371 | * - note: no protection against n == 0 | 371 | * - note: no protection against n == 0 |
| 372 | */ | 372 | */ |
| 373 | #define ARCH_HAS_ILOG2_U64 | 373 | #define ARCH_HAS_ILOG2_U64 |
| 374 | static inline __attribute__((const)) | 374 | static inline __attribute__((const)) |
| 375 | int __ilog2_u64(u64 n) | 375 | int __ilog2_u64(u64 n) |
| 376 | { | 376 | { |
| 377 | union { | 377 | union { |
| 378 | u64 ll; | 378 | u64 ll; |
| 379 | struct { u32 h, l; }; | 379 | struct { u32 h, l; }; |
| 380 | } _; | 380 | } _; |
| 381 | int bit, x, y; | 381 | int bit, x, y; |
| 382 | 382 | ||
| 383 | _.ll = n; | 383 | _.ll = n; |
| 384 | 384 | ||
| 385 | asm(" subcc %3,gr0,gr0,icc0 \n" | 385 | asm(" subcc %3,gr0,gr0,icc0 \n" |
| 386 | " ckeq icc0,cc4 \n" | 386 | " ckeq icc0,cc4 \n" |
| 387 | " cscan.p %3,gr0,%0 ,cc4,0 \n" | 387 | " cscan.p %3,gr0,%0 ,cc4,0 \n" |
| 388 | " setlos #63,%1 \n" | 388 | " setlos #63,%1 \n" |
| 389 | " cscan.p %4,gr0,%0 ,cc4,1 \n" | 389 | " cscan.p %4,gr0,%0 ,cc4,1 \n" |
| 390 | " setlos #31,%2 \n" | 390 | " setlos #31,%2 \n" |
| 391 | " csub.p %1,%0,%0 ,cc4,0 \n" | 391 | " csub.p %1,%0,%0 ,cc4,0 \n" |
| 392 | " csub %2,%0,%0 ,cc4,1 \n" | 392 | " csub %2,%0,%0 ,cc4,1 \n" |
| 393 | : "=&r"(bit), "=r"(x), "=r"(y) | 393 | : "=&r"(bit), "=r"(x), "=r"(y) |
| 394 | : "0r"(_.h), "r"(_.l) | 394 | : "0r"(_.h), "r"(_.l) |
| 395 | : "icc0", "cc4" | 395 | : "icc0", "cc4" |
| 396 | ); | 396 | ); |
| 397 | return bit; | 397 | return bit; |
| 398 | } | 398 | } |
| 399 | 399 | ||
| 400 | #include <asm-generic/bitops/sched.h> | 400 | #include <asm-generic/bitops/sched.h> |
| 401 | #include <asm-generic/bitops/hweight.h> | 401 | #include <asm-generic/bitops/hweight.h> |
| 402 | #include <asm-generic/bitops/lock.h> | 402 | #include <asm-generic/bitops/lock.h> |
| 403 | 403 | ||
| 404 | #include <asm-generic/bitops/le.h> | 404 | #include <asm-generic/bitops/le.h> |
| 405 | 405 | ||
| 406 | #define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit ((nr) ^ 0x18, (addr)) | 406 | #define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit ((nr) ^ 0x18, (addr)) |
| 407 | #define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr) ^ 0x18, (addr)) | 407 | #define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr) ^ 0x18, (addr)) |
| 408 | 408 | ||
| 409 | #include <asm-generic/bitops/minix-le.h> | ||
| 410 | |||
| 411 | #endif /* __KERNEL__ */ | 409 | #endif /* __KERNEL__ */ |
| 412 | 410 | ||
| 413 | #endif /* _ASM_BITOPS_H */ | 411 | #endif /* _ASM_BITOPS_H */ |
| 414 | 412 |
arch/h8300/include/asm/bitops.h
| 1 | #ifndef _H8300_BITOPS_H | 1 | #ifndef _H8300_BITOPS_H |
| 2 | #define _H8300_BITOPS_H | 2 | #define _H8300_BITOPS_H |
| 3 | 3 | ||
| 4 | /* | 4 | /* |
| 5 | * Copyright 1992, Linus Torvalds. | 5 | * Copyright 1992, Linus Torvalds. |
| 6 | * Copyright 2002, Yoshinori Sato | 6 | * Copyright 2002, Yoshinori Sato |
| 7 | */ | 7 | */ |
| 8 | 8 | ||
| 9 | #include <linux/compiler.h> | 9 | #include <linux/compiler.h> |
| 10 | #include <asm/system.h> | 10 | #include <asm/system.h> |
| 11 | 11 | ||
| 12 | #ifdef __KERNEL__ | 12 | #ifdef __KERNEL__ |
| 13 | 13 | ||
| 14 | #ifndef _LINUX_BITOPS_H | 14 | #ifndef _LINUX_BITOPS_H |
| 15 | #error only <linux/bitops.h> can be included directly | 15 | #error only <linux/bitops.h> can be included directly |
| 16 | #endif | 16 | #endif |
| 17 | 17 | ||
| 18 | /* | 18 | /* |
| 19 | * Function prototypes to keep gcc -Wall happy | 19 | * Function prototypes to keep gcc -Wall happy |
| 20 | */ | 20 | */ |
| 21 | 21 | ||
| 22 | /* | 22 | /* |
| 23 | * ffz = Find First Zero in word. Undefined if no zero exists, | 23 | * ffz = Find First Zero in word. Undefined if no zero exists, |
| 24 | * so code should check against ~0UL first.. | 24 | * so code should check against ~0UL first.. |
| 25 | */ | 25 | */ |
| 26 | static __inline__ unsigned long ffz(unsigned long word) | 26 | static __inline__ unsigned long ffz(unsigned long word) |
| 27 | { | 27 | { |
| 28 | unsigned long result; | 28 | unsigned long result; |
| 29 | 29 | ||
| 30 | result = -1; | 30 | result = -1; |
| 31 | __asm__("1:\n\t" | 31 | __asm__("1:\n\t" |
| 32 | "shlr.l %2\n\t" | 32 | "shlr.l %2\n\t" |
| 33 | "adds #1,%0\n\t" | 33 | "adds #1,%0\n\t" |
| 34 | "bcs 1b" | 34 | "bcs 1b" |
| 35 | : "=r" (result) | 35 | : "=r" (result) |
| 36 | : "0" (result),"r" (word)); | 36 | : "0" (result),"r" (word)); |
| 37 | return result; | 37 | return result; |
| 38 | } | 38 | } |
| 39 | 39 | ||
| 40 | #define H8300_GEN_BITOP_CONST(OP,BIT) \ | 40 | #define H8300_GEN_BITOP_CONST(OP,BIT) \ |
| 41 | case BIT: \ | 41 | case BIT: \ |
| 42 | __asm__(OP " #" #BIT ",@%0"::"r"(b_addr):"memory"); \ | 42 | __asm__(OP " #" #BIT ",@%0"::"r"(b_addr):"memory"); \ |
| 43 | break; | 43 | break; |
| 44 | 44 | ||
| 45 | #define H8300_GEN_BITOP(FNAME,OP) \ | 45 | #define H8300_GEN_BITOP(FNAME,OP) \ |
| 46 | static __inline__ void FNAME(int nr, volatile unsigned long* addr) \ | 46 | static __inline__ void FNAME(int nr, volatile unsigned long* addr) \ |
| 47 | { \ | 47 | { \ |
| 48 | volatile unsigned char *b_addr; \ | 48 | volatile unsigned char *b_addr; \ |
| 49 | b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \ | 49 | b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \ |
| 50 | if (__builtin_constant_p(nr)) { \ | 50 | if (__builtin_constant_p(nr)) { \ |
| 51 | switch(nr & 7) { \ | 51 | switch(nr & 7) { \ |
| 52 | H8300_GEN_BITOP_CONST(OP,0) \ | 52 | H8300_GEN_BITOP_CONST(OP,0) \ |
| 53 | H8300_GEN_BITOP_CONST(OP,1) \ | 53 | H8300_GEN_BITOP_CONST(OP,1) \ |
| 54 | H8300_GEN_BITOP_CONST(OP,2) \ | 54 | H8300_GEN_BITOP_CONST(OP,2) \ |
| 55 | H8300_GEN_BITOP_CONST(OP,3) \ | 55 | H8300_GEN_BITOP_CONST(OP,3) \ |
| 56 | H8300_GEN_BITOP_CONST(OP,4) \ | 56 | H8300_GEN_BITOP_CONST(OP,4) \ |
| 57 | H8300_GEN_BITOP_CONST(OP,5) \ | 57 | H8300_GEN_BITOP_CONST(OP,5) \ |
| 58 | H8300_GEN_BITOP_CONST(OP,6) \ | 58 | H8300_GEN_BITOP_CONST(OP,6) \ |
| 59 | H8300_GEN_BITOP_CONST(OP,7) \ | 59 | H8300_GEN_BITOP_CONST(OP,7) \ |
| 60 | } \ | 60 | } \ |
| 61 | } else { \ | 61 | } else { \ |
| 62 | __asm__(OP " %w0,@%1"::"r"(nr),"r"(b_addr):"memory"); \ | 62 | __asm__(OP " %w0,@%1"::"r"(nr),"r"(b_addr):"memory"); \ |
| 63 | } \ | 63 | } \ |
| 64 | } | 64 | } |
| 65 | 65 | ||
| 66 | /* | 66 | /* |
| 67 | * clear_bit() doesn't provide any barrier for the compiler. | 67 | * clear_bit() doesn't provide any barrier for the compiler. |
| 68 | */ | 68 | */ |
| 69 | #define smp_mb__before_clear_bit() barrier() | 69 | #define smp_mb__before_clear_bit() barrier() |
| 70 | #define smp_mb__after_clear_bit() barrier() | 70 | #define smp_mb__after_clear_bit() barrier() |
| 71 | 71 | ||
| 72 | H8300_GEN_BITOP(set_bit ,"bset") | 72 | H8300_GEN_BITOP(set_bit ,"bset") |
| 73 | H8300_GEN_BITOP(clear_bit ,"bclr") | 73 | H8300_GEN_BITOP(clear_bit ,"bclr") |
| 74 | H8300_GEN_BITOP(change_bit,"bnot") | 74 | H8300_GEN_BITOP(change_bit,"bnot") |
| 75 | #define __set_bit(nr,addr) set_bit((nr),(addr)) | 75 | #define __set_bit(nr,addr) set_bit((nr),(addr)) |
| 76 | #define __clear_bit(nr,addr) clear_bit((nr),(addr)) | 76 | #define __clear_bit(nr,addr) clear_bit((nr),(addr)) |
| 77 | #define __change_bit(nr,addr) change_bit((nr),(addr)) | 77 | #define __change_bit(nr,addr) change_bit((nr),(addr)) |
| 78 | 78 | ||
| 79 | #undef H8300_GEN_BITOP | 79 | #undef H8300_GEN_BITOP |
| 80 | #undef H8300_GEN_BITOP_CONST | 80 | #undef H8300_GEN_BITOP_CONST |
| 81 | 81 | ||
| 82 | static __inline__ int test_bit(int nr, const unsigned long* addr) | 82 | static __inline__ int test_bit(int nr, const unsigned long* addr) |
| 83 | { | 83 | { |
| 84 | return (*((volatile unsigned char *)addr + | 84 | return (*((volatile unsigned char *)addr + |
| 85 | ((nr >> 3) ^ 3)) & (1UL << (nr & 7))) != 0; | 85 | ((nr >> 3) ^ 3)) & (1UL << (nr & 7))) != 0; |
| 86 | } | 86 | } |
| 87 | 87 | ||
| 88 | #define __test_bit(nr, addr) test_bit(nr, addr) | 88 | #define __test_bit(nr, addr) test_bit(nr, addr) |
| 89 | 89 | ||
| 90 | #define H8300_GEN_TEST_BITOP_CONST_INT(OP,BIT) \ | 90 | #define H8300_GEN_TEST_BITOP_CONST_INT(OP,BIT) \ |
| 91 | case BIT: \ | 91 | case BIT: \ |
| 92 | __asm__("stc ccr,%w1\n\t" \ | 92 | __asm__("stc ccr,%w1\n\t" \ |
| 93 | "orc #0x80,ccr\n\t" \ | 93 | "orc #0x80,ccr\n\t" \ |
| 94 | "bld #" #BIT ",@%4\n\t" \ | 94 | "bld #" #BIT ",@%4\n\t" \ |
| 95 | OP " #" #BIT ",@%4\n\t" \ | 95 | OP " #" #BIT ",@%4\n\t" \ |
| 96 | "rotxl.l %0\n\t" \ | 96 | "rotxl.l %0\n\t" \ |
| 97 | "ldc %w1,ccr" \ | 97 | "ldc %w1,ccr" \ |
| 98 | : "=r"(retval),"=&r"(ccrsave),"=m"(*b_addr) \ | 98 | : "=r"(retval),"=&r"(ccrsave),"=m"(*b_addr) \ |
| 99 | : "0" (retval),"r" (b_addr) \ | 99 | : "0" (retval),"r" (b_addr) \ |
| 100 | : "memory"); \ | 100 | : "memory"); \ |
| 101 | break; | 101 | break; |
| 102 | 102 | ||
| 103 | #define H8300_GEN_TEST_BITOP_CONST(OP,BIT) \ | 103 | #define H8300_GEN_TEST_BITOP_CONST(OP,BIT) \ |
| 104 | case BIT: \ | 104 | case BIT: \ |
| 105 | __asm__("bld #" #BIT ",@%3\n\t" \ | 105 | __asm__("bld #" #BIT ",@%3\n\t" \ |
| 106 | OP " #" #BIT ",@%3\n\t" \ | 106 | OP " #" #BIT ",@%3\n\t" \ |
| 107 | "rotxl.l %0\n\t" \ | 107 | "rotxl.l %0\n\t" \ |
| 108 | : "=r"(retval),"=m"(*b_addr) \ | 108 | : "=r"(retval),"=m"(*b_addr) \ |
| 109 | : "0" (retval),"r" (b_addr) \ | 109 | : "0" (retval),"r" (b_addr) \ |
| 110 | : "memory"); \ | 110 | : "memory"); \ |
| 111 | break; | 111 | break; |
| 112 | 112 | ||
| 113 | #define H8300_GEN_TEST_BITOP(FNNAME,OP) \ | 113 | #define H8300_GEN_TEST_BITOP(FNNAME,OP) \ |
| 114 | static __inline__ int FNNAME(int nr, volatile void * addr) \ | 114 | static __inline__ int FNNAME(int nr, volatile void * addr) \ |
| 115 | { \ | 115 | { \ |
| 116 | int retval = 0; \ | 116 | int retval = 0; \ |
| 117 | char ccrsave; \ | 117 | char ccrsave; \ |
| 118 | volatile unsigned char *b_addr; \ | 118 | volatile unsigned char *b_addr; \ |
| 119 | b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \ | 119 | b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \ |
| 120 | if (__builtin_constant_p(nr)) { \ | 120 | if (__builtin_constant_p(nr)) { \ |
| 121 | switch(nr & 7) { \ | 121 | switch(nr & 7) { \ |
| 122 | H8300_GEN_TEST_BITOP_CONST_INT(OP,0) \ | 122 | H8300_GEN_TEST_BITOP_CONST_INT(OP,0) \ |
| 123 | H8300_GEN_TEST_BITOP_CONST_INT(OP,1) \ | 123 | H8300_GEN_TEST_BITOP_CONST_INT(OP,1) \ |
| 124 | H8300_GEN_TEST_BITOP_CONST_INT(OP,2) \ | 124 | H8300_GEN_TEST_BITOP_CONST_INT(OP,2) \ |
| 125 | H8300_GEN_TEST_BITOP_CONST_INT(OP,3) \ | 125 | H8300_GEN_TEST_BITOP_CONST_INT(OP,3) \ |
| 126 | H8300_GEN_TEST_BITOP_CONST_INT(OP,4) \ | 126 | H8300_GEN_TEST_BITOP_CONST_INT(OP,4) \ |
| 127 | H8300_GEN_TEST_BITOP_CONST_INT(OP,5) \ | 127 | H8300_GEN_TEST_BITOP_CONST_INT(OP,5) \ |
| 128 | H8300_GEN_TEST_BITOP_CONST_INT(OP,6) \ | 128 | H8300_GEN_TEST_BITOP_CONST_INT(OP,6) \ |
| 129 | H8300_GEN_TEST_BITOP_CONST_INT(OP,7) \ | 129 | H8300_GEN_TEST_BITOP_CONST_INT(OP,7) \ |
| 130 | } \ | 130 | } \ |
| 131 | } else { \ | 131 | } else { \ |
| 132 | __asm__("stc ccr,%w1\n\t" \ | 132 | __asm__("stc ccr,%w1\n\t" \ |
| 133 | "orc #0x80,ccr\n\t" \ | 133 | "orc #0x80,ccr\n\t" \ |
| 134 | "btst %w5,@%4\n\t" \ | 134 | "btst %w5,@%4\n\t" \ |
| 135 | OP " %w5,@%4\n\t" \ | 135 | OP " %w5,@%4\n\t" \ |
| 136 | "beq 1f\n\t" \ | 136 | "beq 1f\n\t" \ |
| 137 | "inc.l #1,%0\n" \ | 137 | "inc.l #1,%0\n" \ |
| 138 | "1:\n\t" \ | 138 | "1:\n\t" \ |
| 139 | "ldc %w1,ccr" \ | 139 | "ldc %w1,ccr" \ |
| 140 | : "=r"(retval),"=&r"(ccrsave),"=m"(*b_addr) \ | 140 | : "=r"(retval),"=&r"(ccrsave),"=m"(*b_addr) \ |
| 141 | : "0" (retval),"r" (b_addr),"r"(nr) \ | 141 | : "0" (retval),"r" (b_addr),"r"(nr) \ |
| 142 | : "memory"); \ | 142 | : "memory"); \ |
| 143 | } \ | 143 | } \ |
| 144 | return retval; \ | 144 | return retval; \ |
| 145 | } \ | 145 | } \ |
| 146 | \ | 146 | \ |
| 147 | static __inline__ int __ ## FNNAME(int nr, volatile void * addr) \ | 147 | static __inline__ int __ ## FNNAME(int nr, volatile void * addr) \ |
| 148 | { \ | 148 | { \ |
| 149 | int retval = 0; \ | 149 | int retval = 0; \ |
| 150 | volatile unsigned char *b_addr; \ | 150 | volatile unsigned char *b_addr; \ |
| 151 | b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \ | 151 | b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \ |
| 152 | if (__builtin_constant_p(nr)) { \ | 152 | if (__builtin_constant_p(nr)) { \ |
| 153 | switch(nr & 7) { \ | 153 | switch(nr & 7) { \ |
| 154 | H8300_GEN_TEST_BITOP_CONST(OP,0) \ | 154 | H8300_GEN_TEST_BITOP_CONST(OP,0) \ |
| 155 | H8300_GEN_TEST_BITOP_CONST(OP,1) \ | 155 | H8300_GEN_TEST_BITOP_CONST(OP,1) \ |
| 156 | H8300_GEN_TEST_BITOP_CONST(OP,2) \ | 156 | H8300_GEN_TEST_BITOP_CONST(OP,2) \ |
| 157 | H8300_GEN_TEST_BITOP_CONST(OP,3) \ | 157 | H8300_GEN_TEST_BITOP_CONST(OP,3) \ |
| 158 | H8300_GEN_TEST_BITOP_CONST(OP,4) \ | 158 | H8300_GEN_TEST_BITOP_CONST(OP,4) \ |
| 159 | H8300_GEN_TEST_BITOP_CONST(OP,5) \ | 159 | H8300_GEN_TEST_BITOP_CONST(OP,5) \ |
| 160 | H8300_GEN_TEST_BITOP_CONST(OP,6) \ | 160 | H8300_GEN_TEST_BITOP_CONST(OP,6) \ |
| 161 | H8300_GEN_TEST_BITOP_CONST(OP,7) \ | 161 | H8300_GEN_TEST_BITOP_CONST(OP,7) \ |
| 162 | } \ | 162 | } \ |
| 163 | } else { \ | 163 | } else { \ |
| 164 | __asm__("btst %w4,@%3\n\t" \ | 164 | __asm__("btst %w4,@%3\n\t" \ |
| 165 | OP " %w4,@%3\n\t" \ | 165 | OP " %w4,@%3\n\t" \ |
| 166 | "beq 1f\n\t" \ | 166 | "beq 1f\n\t" \ |
| 167 | "inc.l #1,%0\n" \ | 167 | "inc.l #1,%0\n" \ |
| 168 | "1:" \ | 168 | "1:" \ |
| 169 | : "=r"(retval),"=m"(*b_addr) \ | 169 | : "=r"(retval),"=m"(*b_addr) \ |
| 170 | : "0" (retval),"r" (b_addr),"r"(nr) \ | 170 | : "0" (retval),"r" (b_addr),"r"(nr) \ |
| 171 | : "memory"); \ | 171 | : "memory"); \ |
| 172 | } \ | 172 | } \ |
| 173 | return retval; \ | 173 | return retval; \ |
| 174 | } | 174 | } |
| 175 | 175 | ||
| 176 | H8300_GEN_TEST_BITOP(test_and_set_bit, "bset") | 176 | H8300_GEN_TEST_BITOP(test_and_set_bit, "bset") |
| 177 | H8300_GEN_TEST_BITOP(test_and_clear_bit, "bclr") | 177 | H8300_GEN_TEST_BITOP(test_and_clear_bit, "bclr") |
| 178 | H8300_GEN_TEST_BITOP(test_and_change_bit,"bnot") | 178 | H8300_GEN_TEST_BITOP(test_and_change_bit,"bnot") |
| 179 | #undef H8300_GEN_TEST_BITOP_CONST | 179 | #undef H8300_GEN_TEST_BITOP_CONST |
| 180 | #undef H8300_GEN_TEST_BITOP_CONST_INT | 180 | #undef H8300_GEN_TEST_BITOP_CONST_INT |
| 181 | #undef H8300_GEN_TEST_BITOP | 181 | #undef H8300_GEN_TEST_BITOP |
| 182 | 182 | ||
| 183 | #include <asm-generic/bitops/ffs.h> | 183 | #include <asm-generic/bitops/ffs.h> |
| 184 | 184 | ||
| 185 | static __inline__ unsigned long __ffs(unsigned long word) | 185 | static __inline__ unsigned long __ffs(unsigned long word) |
| 186 | { | 186 | { |
| 187 | unsigned long result; | 187 | unsigned long result; |
| 188 | 188 | ||
| 189 | result = -1; | 189 | result = -1; |
| 190 | __asm__("1:\n\t" | 190 | __asm__("1:\n\t" |
| 191 | "shlr.l %2\n\t" | 191 | "shlr.l %2\n\t" |
| 192 | "adds #1,%0\n\t" | 192 | "adds #1,%0\n\t" |
| 193 | "bcc 1b" | 193 | "bcc 1b" |
| 194 | : "=r" (result) | 194 | : "=r" (result) |
| 195 | : "0"(result),"r"(word)); | 195 | : "0"(result),"r"(word)); |
| 196 | return result; | 196 | return result; |
| 197 | } | 197 | } |
| 198 | 198 | ||
| 199 | #include <asm-generic/bitops/find.h> | 199 | #include <asm-generic/bitops/find.h> |
| 200 | #include <asm-generic/bitops/sched.h> | 200 | #include <asm-generic/bitops/sched.h> |
| 201 | #include <asm-generic/bitops/hweight.h> | 201 | #include <asm-generic/bitops/hweight.h> |
| 202 | #include <asm-generic/bitops/lock.h> | 202 | #include <asm-generic/bitops/lock.h> |
| 203 | #include <asm-generic/bitops/le.h> | 203 | #include <asm-generic/bitops/le.h> |
| 204 | #include <asm-generic/bitops/ext2-atomic.h> | 204 | #include <asm-generic/bitops/ext2-atomic.h> |
| 205 | #include <asm-generic/bitops/minix.h> | ||
| 206 | 205 | ||
| 207 | #endif /* __KERNEL__ */ | 206 | #endif /* __KERNEL__ */ |
| 208 | 207 | ||
| 209 | #include <asm-generic/bitops/fls.h> | 208 | #include <asm-generic/bitops/fls.h> |
| 210 | #include <asm-generic/bitops/__fls.h> | 209 | #include <asm-generic/bitops/__fls.h> |
| 211 | #include <asm-generic/bitops/fls64.h> | 210 | #include <asm-generic/bitops/fls64.h> |
| 212 | 211 | ||
| 213 | #endif /* _H8300_BITOPS_H */ | 212 | #endif /* _H8300_BITOPS_H */ |
| 214 | 213 |
arch/ia64/include/asm/bitops.h
| 1 | #ifndef _ASM_IA64_BITOPS_H | 1 | #ifndef _ASM_IA64_BITOPS_H |
| 2 | #define _ASM_IA64_BITOPS_H | 2 | #define _ASM_IA64_BITOPS_H |
| 3 | 3 | ||
| 4 | /* | 4 | /* |
| 5 | * Copyright (C) 1998-2003 Hewlett-Packard Co | 5 | * Copyright (C) 1998-2003 Hewlett-Packard Co |
| 6 | * David Mosberger-Tang <davidm@hpl.hp.com> | 6 | * David Mosberger-Tang <davidm@hpl.hp.com> |
| 7 | * | 7 | * |
| 8 | * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64 | 8 | * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64 |
| 9 | * O(1) scheduler patch | 9 | * O(1) scheduler patch |
| 10 | */ | 10 | */ |
| 11 | 11 | ||
| 12 | #ifndef _LINUX_BITOPS_H | 12 | #ifndef _LINUX_BITOPS_H |
| 13 | #error only <linux/bitops.h> can be included directly | 13 | #error only <linux/bitops.h> can be included directly |
| 14 | #endif | 14 | #endif |
| 15 | 15 | ||
| 16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
| 17 | #include <linux/types.h> | 17 | #include <linux/types.h> |
| 18 | #include <asm/intrinsics.h> | 18 | #include <asm/intrinsics.h> |
| 19 | 19 | ||
| 20 | /** | 20 | /** |
| 21 | * set_bit - Atomically set a bit in memory | 21 | * set_bit - Atomically set a bit in memory |
| 22 | * @nr: the bit to set | 22 | * @nr: the bit to set |
| 23 | * @addr: the address to start counting from | 23 | * @addr: the address to start counting from |
| 24 | * | 24 | * |
| 25 | * This function is atomic and may not be reordered. See __set_bit() | 25 | * This function is atomic and may not be reordered. See __set_bit() |
| 26 | * if you do not require the atomic guarantees. | 26 | * if you do not require the atomic guarantees. |
| 27 | * Note that @nr may be almost arbitrarily large; this function is not | 27 | * Note that @nr may be almost arbitrarily large; this function is not |
| 28 | * restricted to acting on a single-word quantity. | 28 | * restricted to acting on a single-word quantity. |
| 29 | * | 29 | * |
| 30 | * The address must be (at least) "long" aligned. | 30 | * The address must be (at least) "long" aligned. |
| 31 | * Note that there are driver (e.g., eepro100) which use these operations to | 31 | * Note that there are driver (e.g., eepro100) which use these operations to |
| 32 | * operate on hw-defined data-structures, so we can't easily change these | 32 | * operate on hw-defined data-structures, so we can't easily change these |
| 33 | * operations to force a bigger alignment. | 33 | * operations to force a bigger alignment. |
| 34 | * | 34 | * |
| 35 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | 35 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). |
| 36 | */ | 36 | */ |
| 37 | static __inline__ void | 37 | static __inline__ void |
| 38 | set_bit (int nr, volatile void *addr) | 38 | set_bit (int nr, volatile void *addr) |
| 39 | { | 39 | { |
| 40 | __u32 bit, old, new; | 40 | __u32 bit, old, new; |
| 41 | volatile __u32 *m; | 41 | volatile __u32 *m; |
| 42 | CMPXCHG_BUGCHECK_DECL | 42 | CMPXCHG_BUGCHECK_DECL |
| 43 | 43 | ||
| 44 | m = (volatile __u32 *) addr + (nr >> 5); | 44 | m = (volatile __u32 *) addr + (nr >> 5); |
| 45 | bit = 1 << (nr & 31); | 45 | bit = 1 << (nr & 31); |
| 46 | do { | 46 | do { |
| 47 | CMPXCHG_BUGCHECK(m); | 47 | CMPXCHG_BUGCHECK(m); |
| 48 | old = *m; | 48 | old = *m; |
| 49 | new = old | bit; | 49 | new = old | bit; |
| 50 | } while (cmpxchg_acq(m, old, new) != old); | 50 | } while (cmpxchg_acq(m, old, new) != old); |
| 51 | } | 51 | } |
| 52 | 52 | ||
| 53 | /** | 53 | /** |
| 54 | * __set_bit - Set a bit in memory | 54 | * __set_bit - Set a bit in memory |
| 55 | * @nr: the bit to set | 55 | * @nr: the bit to set |
| 56 | * @addr: the address to start counting from | 56 | * @addr: the address to start counting from |
| 57 | * | 57 | * |
| 58 | * Unlike set_bit(), this function is non-atomic and may be reordered. | 58 | * Unlike set_bit(), this function is non-atomic and may be reordered. |
| 59 | * If it's called on the same region of memory simultaneously, the effect | 59 | * If it's called on the same region of memory simultaneously, the effect |
| 60 | * may be that only one operation succeeds. | 60 | * may be that only one operation succeeds. |
| 61 | */ | 61 | */ |
| 62 | static __inline__ void | 62 | static __inline__ void |
| 63 | __set_bit (int nr, volatile void *addr) | 63 | __set_bit (int nr, volatile void *addr) |
| 64 | { | 64 | { |
| 65 | *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31)); | 65 | *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31)); |
| 66 | } | 66 | } |
| 67 | 67 | ||
| 68 | /* | 68 | /* |
| 69 | * clear_bit() has "acquire" semantics. | 69 | * clear_bit() has "acquire" semantics. |
| 70 | */ | 70 | */ |
| 71 | #define smp_mb__before_clear_bit() smp_mb() | 71 | #define smp_mb__before_clear_bit() smp_mb() |
| 72 | #define smp_mb__after_clear_bit() do { /* skip */; } while (0) | 72 | #define smp_mb__after_clear_bit() do { /* skip */; } while (0) |
| 73 | 73 | ||
| 74 | /** | 74 | /** |
| 75 | * clear_bit - Clears a bit in memory | 75 | * clear_bit - Clears a bit in memory |
| 76 | * @nr: Bit to clear | 76 | * @nr: Bit to clear |
| 77 | * @addr: Address to start counting from | 77 | * @addr: Address to start counting from |
| 78 | * | 78 | * |
| 79 | * clear_bit() is atomic and may not be reordered. However, it does | 79 | * clear_bit() is atomic and may not be reordered. However, it does |
| 80 | * not contain a memory barrier, so if it is used for locking purposes, | 80 | * not contain a memory barrier, so if it is used for locking purposes, |
| 81 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | 81 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
| 82 | * in order to ensure changes are visible on other processors. | 82 | * in order to ensure changes are visible on other processors. |
| 83 | */ | 83 | */ |
| 84 | static __inline__ void | 84 | static __inline__ void |
| 85 | clear_bit (int nr, volatile void *addr) | 85 | clear_bit (int nr, volatile void *addr) |
| 86 | { | 86 | { |
| 87 | __u32 mask, old, new; | 87 | __u32 mask, old, new; |
| 88 | volatile __u32 *m; | 88 | volatile __u32 *m; |
| 89 | CMPXCHG_BUGCHECK_DECL | 89 | CMPXCHG_BUGCHECK_DECL |
| 90 | 90 | ||
| 91 | m = (volatile __u32 *) addr + (nr >> 5); | 91 | m = (volatile __u32 *) addr + (nr >> 5); |
| 92 | mask = ~(1 << (nr & 31)); | 92 | mask = ~(1 << (nr & 31)); |
| 93 | do { | 93 | do { |
| 94 | CMPXCHG_BUGCHECK(m); | 94 | CMPXCHG_BUGCHECK(m); |
| 95 | old = *m; | 95 | old = *m; |
| 96 | new = old & mask; | 96 | new = old & mask; |
| 97 | } while (cmpxchg_acq(m, old, new) != old); | 97 | } while (cmpxchg_acq(m, old, new) != old); |
| 98 | } | 98 | } |
| 99 | 99 | ||
| 100 | /** | 100 | /** |
| 101 | * clear_bit_unlock - Clears a bit in memory with release | 101 | * clear_bit_unlock - Clears a bit in memory with release |
| 102 | * @nr: Bit to clear | 102 | * @nr: Bit to clear |
| 103 | * @addr: Address to start counting from | 103 | * @addr: Address to start counting from |
| 104 | * | 104 | * |
| 105 | * clear_bit_unlock() is atomic and may not be reordered. It does | 105 | * clear_bit_unlock() is atomic and may not be reordered. It does |
| 106 | * contain a memory barrier suitable for unlock type operations. | 106 | * contain a memory barrier suitable for unlock type operations. |
| 107 | */ | 107 | */ |
| 108 | static __inline__ void | 108 | static __inline__ void |
| 109 | clear_bit_unlock (int nr, volatile void *addr) | 109 | clear_bit_unlock (int nr, volatile void *addr) |
| 110 | { | 110 | { |
| 111 | __u32 mask, old, new; | 111 | __u32 mask, old, new; |
| 112 | volatile __u32 *m; | 112 | volatile __u32 *m; |
| 113 | CMPXCHG_BUGCHECK_DECL | 113 | CMPXCHG_BUGCHECK_DECL |
| 114 | 114 | ||
| 115 | m = (volatile __u32 *) addr + (nr >> 5); | 115 | m = (volatile __u32 *) addr + (nr >> 5); |
| 116 | mask = ~(1 << (nr & 31)); | 116 | mask = ~(1 << (nr & 31)); |
| 117 | do { | 117 | do { |
| 118 | CMPXCHG_BUGCHECK(m); | 118 | CMPXCHG_BUGCHECK(m); |
| 119 | old = *m; | 119 | old = *m; |
| 120 | new = old & mask; | 120 | new = old & mask; |
| 121 | } while (cmpxchg_rel(m, old, new) != old); | 121 | } while (cmpxchg_rel(m, old, new) != old); |
| 122 | } | 122 | } |
| 123 | 123 | ||
| 124 | /** | 124 | /** |
| 125 | * __clear_bit_unlock - Non-atomically clears a bit in memory with release | 125 | * __clear_bit_unlock - Non-atomically clears a bit in memory with release |
| 126 | * @nr: Bit to clear | 126 | * @nr: Bit to clear |
| 127 | * @addr: Address to start counting from | 127 | * @addr: Address to start counting from |
| 128 | * | 128 | * |
| 129 | * Similarly to clear_bit_unlock, the implementation uses a store | 129 | * Similarly to clear_bit_unlock, the implementation uses a store |
| 130 | * with release semantics. See also arch_spin_unlock(). | 130 | * with release semantics. See also arch_spin_unlock(). |
| 131 | */ | 131 | */ |
| 132 | static __inline__ void | 132 | static __inline__ void |
| 133 | __clear_bit_unlock(int nr, void *addr) | 133 | __clear_bit_unlock(int nr, void *addr) |
| 134 | { | 134 | { |
| 135 | __u32 * const m = (__u32 *) addr + (nr >> 5); | 135 | __u32 * const m = (__u32 *) addr + (nr >> 5); |
| 136 | __u32 const new = *m & ~(1 << (nr & 31)); | 136 | __u32 const new = *m & ~(1 << (nr & 31)); |
| 137 | 137 | ||
| 138 | ia64_st4_rel_nta(m, new); | 138 | ia64_st4_rel_nta(m, new); |
| 139 | } | 139 | } |
| 140 | 140 | ||
| 141 | /** | 141 | /** |
| 142 | * __clear_bit - Clears a bit in memory (non-atomic version) | 142 | * __clear_bit - Clears a bit in memory (non-atomic version) |
| 143 | * @nr: the bit to clear | 143 | * @nr: the bit to clear |
| 144 | * @addr: the address to start counting from | 144 | * @addr: the address to start counting from |
| 145 | * | 145 | * |
| 146 | * Unlike clear_bit(), this function is non-atomic and may be reordered. | 146 | * Unlike clear_bit(), this function is non-atomic and may be reordered. |
| 147 | * If it's called on the same region of memory simultaneously, the effect | 147 | * If it's called on the same region of memory simultaneously, the effect |
| 148 | * may be that only one operation succeeds. | 148 | * may be that only one operation succeeds. |
| 149 | */ | 149 | */ |
| 150 | static __inline__ void | 150 | static __inline__ void |
| 151 | __clear_bit (int nr, volatile void *addr) | 151 | __clear_bit (int nr, volatile void *addr) |
| 152 | { | 152 | { |
| 153 | *((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31)); | 153 | *((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31)); |
| 154 | } | 154 | } |
| 155 | 155 | ||
| 156 | /** | 156 | /** |
| 157 | * change_bit - Toggle a bit in memory | 157 | * change_bit - Toggle a bit in memory |
| 158 | * @nr: Bit to toggle | 158 | * @nr: Bit to toggle |
| 159 | * @addr: Address to start counting from | 159 | * @addr: Address to start counting from |
| 160 | * | 160 | * |
| 161 | * change_bit() is atomic and may not be reordered. | 161 | * change_bit() is atomic and may not be reordered. |
| 162 | * Note that @nr may be almost arbitrarily large; this function is not | 162 | * Note that @nr may be almost arbitrarily large; this function is not |
| 163 | * restricted to acting on a single-word quantity. | 163 | * restricted to acting on a single-word quantity. |
| 164 | */ | 164 | */ |
| 165 | static __inline__ void | 165 | static __inline__ void |
| 166 | change_bit (int nr, volatile void *addr) | 166 | change_bit (int nr, volatile void *addr) |
| 167 | { | 167 | { |
| 168 | __u32 bit, old, new; | 168 | __u32 bit, old, new; |
| 169 | volatile __u32 *m; | 169 | volatile __u32 *m; |
| 170 | CMPXCHG_BUGCHECK_DECL | 170 | CMPXCHG_BUGCHECK_DECL |
| 171 | 171 | ||
| 172 | m = (volatile __u32 *) addr + (nr >> 5); | 172 | m = (volatile __u32 *) addr + (nr >> 5); |
| 173 | bit = (1 << (nr & 31)); | 173 | bit = (1 << (nr & 31)); |
| 174 | do { | 174 | do { |
| 175 | CMPXCHG_BUGCHECK(m); | 175 | CMPXCHG_BUGCHECK(m); |
| 176 | old = *m; | 176 | old = *m; |
| 177 | new = old ^ bit; | 177 | new = old ^ bit; |
| 178 | } while (cmpxchg_acq(m, old, new) != old); | 178 | } while (cmpxchg_acq(m, old, new) != old); |
| 179 | } | 179 | } |
| 180 | 180 | ||
| 181 | /** | 181 | /** |
| 182 | * __change_bit - Toggle a bit in memory | 182 | * __change_bit - Toggle a bit in memory |
| 183 | * @nr: the bit to toggle | 183 | * @nr: the bit to toggle |
| 184 | * @addr: the address to start counting from | 184 | * @addr: the address to start counting from |
| 185 | * | 185 | * |
| 186 | * Unlike change_bit(), this function is non-atomic and may be reordered. | 186 | * Unlike change_bit(), this function is non-atomic and may be reordered. |
| 187 | * If it's called on the same region of memory simultaneously, the effect | 187 | * If it's called on the same region of memory simultaneously, the effect |
| 188 | * may be that only one operation succeeds. | 188 | * may be that only one operation succeeds. |
| 189 | */ | 189 | */ |
| 190 | static __inline__ void | 190 | static __inline__ void |
| 191 | __change_bit (int nr, volatile void *addr) | 191 | __change_bit (int nr, volatile void *addr) |
| 192 | { | 192 | { |
| 193 | *((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31)); | 193 | *((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31)); |
| 194 | } | 194 | } |
| 195 | 195 | ||
| 196 | /** | 196 | /** |
| 197 | * test_and_set_bit - Set a bit and return its old value | 197 | * test_and_set_bit - Set a bit and return its old value |
| 198 | * @nr: Bit to set | 198 | * @nr: Bit to set |
| 199 | * @addr: Address to count from | 199 | * @addr: Address to count from |
| 200 | * | 200 | * |
| 201 | * This operation is atomic and cannot be reordered. | 201 | * This operation is atomic and cannot be reordered. |
| 202 | * It also implies the acquisition side of the memory barrier. | 202 | * It also implies the acquisition side of the memory barrier. |
| 203 | */ | 203 | */ |
| 204 | static __inline__ int | 204 | static __inline__ int |
| 205 | test_and_set_bit (int nr, volatile void *addr) | 205 | test_and_set_bit (int nr, volatile void *addr) |
| 206 | { | 206 | { |
| 207 | __u32 bit, old, new; | 207 | __u32 bit, old, new; |
| 208 | volatile __u32 *m; | 208 | volatile __u32 *m; |
| 209 | CMPXCHG_BUGCHECK_DECL | 209 | CMPXCHG_BUGCHECK_DECL |
| 210 | 210 | ||
| 211 | m = (volatile __u32 *) addr + (nr >> 5); | 211 | m = (volatile __u32 *) addr + (nr >> 5); |
| 212 | bit = 1 << (nr & 31); | 212 | bit = 1 << (nr & 31); |
| 213 | do { | 213 | do { |
| 214 | CMPXCHG_BUGCHECK(m); | 214 | CMPXCHG_BUGCHECK(m); |
| 215 | old = *m; | 215 | old = *m; |
| 216 | new = old | bit; | 216 | new = old | bit; |
| 217 | } while (cmpxchg_acq(m, old, new) != old); | 217 | } while (cmpxchg_acq(m, old, new) != old); |
| 218 | return (old & bit) != 0; | 218 | return (old & bit) != 0; |
| 219 | } | 219 | } |
| 220 | 220 | ||
| 221 | /** | 221 | /** |
| 222 | * test_and_set_bit_lock - Set a bit and return its old value for lock | 222 | * test_and_set_bit_lock - Set a bit and return its old value for lock |
| 223 | * @nr: Bit to set | 223 | * @nr: Bit to set |
| 224 | * @addr: Address to count from | 224 | * @addr: Address to count from |
| 225 | * | 225 | * |
| 226 | * This is the same as test_and_set_bit on ia64 | 226 | * This is the same as test_and_set_bit on ia64 |
| 227 | */ | 227 | */ |
| 228 | #define test_and_set_bit_lock test_and_set_bit | 228 | #define test_and_set_bit_lock test_and_set_bit |
| 229 | 229 | ||
| 230 | /** | 230 | /** |
| 231 | * __test_and_set_bit - Set a bit and return its old value | 231 | * __test_and_set_bit - Set a bit and return its old value |
| 232 | * @nr: Bit to set | 232 | * @nr: Bit to set |
| 233 | * @addr: Address to count from | 233 | * @addr: Address to count from |
| 234 | * | 234 | * |
| 235 | * This operation is non-atomic and can be reordered. | 235 | * This operation is non-atomic and can be reordered. |
| 236 | * If two examples of this operation race, one can appear to succeed | 236 | * If two examples of this operation race, one can appear to succeed |
| 237 | * but actually fail. You must protect multiple accesses with a lock. | 237 | * but actually fail. You must protect multiple accesses with a lock. |
| 238 | */ | 238 | */ |
| 239 | static __inline__ int | 239 | static __inline__ int |
| 240 | __test_and_set_bit (int nr, volatile void *addr) | 240 | __test_and_set_bit (int nr, volatile void *addr) |
| 241 | { | 241 | { |
| 242 | __u32 *p = (__u32 *) addr + (nr >> 5); | 242 | __u32 *p = (__u32 *) addr + (nr >> 5); |
| 243 | __u32 m = 1 << (nr & 31); | 243 | __u32 m = 1 << (nr & 31); |
| 244 | int oldbitset = (*p & m) != 0; | 244 | int oldbitset = (*p & m) != 0; |
| 245 | 245 | ||
| 246 | *p |= m; | 246 | *p |= m; |
| 247 | return oldbitset; | 247 | return oldbitset; |
| 248 | } | 248 | } |
| 249 | 249 | ||
| 250 | /** | 250 | /** |
| 251 | * test_and_clear_bit - Clear a bit and return its old value | 251 | * test_and_clear_bit - Clear a bit and return its old value |
| 252 | * @nr: Bit to clear | 252 | * @nr: Bit to clear |
| 253 | * @addr: Address to count from | 253 | * @addr: Address to count from |
| 254 | * | 254 | * |
| 255 | * This operation is atomic and cannot be reordered. | 255 | * This operation is atomic and cannot be reordered. |
| 256 | * It also implies the acquisition side of the memory barrier. | 256 | * It also implies the acquisition side of the memory barrier. |
| 257 | */ | 257 | */ |
| 258 | static __inline__ int | 258 | static __inline__ int |
| 259 | test_and_clear_bit (int nr, volatile void *addr) | 259 | test_and_clear_bit (int nr, volatile void *addr) |
| 260 | { | 260 | { |
| 261 | __u32 mask, old, new; | 261 | __u32 mask, old, new; |
| 262 | volatile __u32 *m; | 262 | volatile __u32 *m; |
| 263 | CMPXCHG_BUGCHECK_DECL | 263 | CMPXCHG_BUGCHECK_DECL |
| 264 | 264 | ||
| 265 | m = (volatile __u32 *) addr + (nr >> 5); | 265 | m = (volatile __u32 *) addr + (nr >> 5); |
| 266 | mask = ~(1 << (nr & 31)); | 266 | mask = ~(1 << (nr & 31)); |
| 267 | do { | 267 | do { |
| 268 | CMPXCHG_BUGCHECK(m); | 268 | CMPXCHG_BUGCHECK(m); |
| 269 | old = *m; | 269 | old = *m; |
| 270 | new = old & mask; | 270 | new = old & mask; |
| 271 | } while (cmpxchg_acq(m, old, new) != old); | 271 | } while (cmpxchg_acq(m, old, new) != old); |
| 272 | return (old & ~mask) != 0; | 272 | return (old & ~mask) != 0; |
| 273 | } | 273 | } |
| 274 | 274 | ||
| 275 | /** | 275 | /** |
| 276 | * __test_and_clear_bit - Clear a bit and return its old value | 276 | * __test_and_clear_bit - Clear a bit and return its old value |
| 277 | * @nr: Bit to clear | 277 | * @nr: Bit to clear |
| 278 | * @addr: Address to count from | 278 | * @addr: Address to count from |
| 279 | * | 279 | * |
| 280 | * This operation is non-atomic and can be reordered. | 280 | * This operation is non-atomic and can be reordered. |
| 281 | * If two examples of this operation race, one can appear to succeed | 281 | * If two examples of this operation race, one can appear to succeed |
| 282 | * but actually fail. You must protect multiple accesses with a lock. | 282 | * but actually fail. You must protect multiple accesses with a lock. |
| 283 | */ | 283 | */ |
| 284 | static __inline__ int | 284 | static __inline__ int |
| 285 | __test_and_clear_bit(int nr, volatile void * addr) | 285 | __test_and_clear_bit(int nr, volatile void * addr) |
| 286 | { | 286 | { |
| 287 | __u32 *p = (__u32 *) addr + (nr >> 5); | 287 | __u32 *p = (__u32 *) addr + (nr >> 5); |
| 288 | __u32 m = 1 << (nr & 31); | 288 | __u32 m = 1 << (nr & 31); |
| 289 | int oldbitset = (*p & m) != 0; | 289 | int oldbitset = (*p & m) != 0; |
| 290 | 290 | ||
| 291 | *p &= ~m; | 291 | *p &= ~m; |
| 292 | return oldbitset; | 292 | return oldbitset; |
| 293 | } | 293 | } |
| 294 | 294 | ||
| 295 | /** | 295 | /** |
| 296 | * test_and_change_bit - Change a bit and return its old value | 296 | * test_and_change_bit - Change a bit and return its old value |
| 297 | * @nr: Bit to change | 297 | * @nr: Bit to change |
| 298 | * @addr: Address to count from | 298 | * @addr: Address to count from |
| 299 | * | 299 | * |
| 300 | * This operation is atomic and cannot be reordered. | 300 | * This operation is atomic and cannot be reordered. |
| 301 | * It also implies the acquisition side of the memory barrier. | 301 | * It also implies the acquisition side of the memory barrier. |
| 302 | */ | 302 | */ |
| 303 | static __inline__ int | 303 | static __inline__ int |
| 304 | test_and_change_bit (int nr, volatile void *addr) | 304 | test_and_change_bit (int nr, volatile void *addr) |
| 305 | { | 305 | { |
| 306 | __u32 bit, old, new; | 306 | __u32 bit, old, new; |
| 307 | volatile __u32 *m; | 307 | volatile __u32 *m; |
| 308 | CMPXCHG_BUGCHECK_DECL | 308 | CMPXCHG_BUGCHECK_DECL |
| 309 | 309 | ||
| 310 | m = (volatile __u32 *) addr + (nr >> 5); | 310 | m = (volatile __u32 *) addr + (nr >> 5); |
| 311 | bit = (1 << (nr & 31)); | 311 | bit = (1 << (nr & 31)); |
| 312 | do { | 312 | do { |
| 313 | CMPXCHG_BUGCHECK(m); | 313 | CMPXCHG_BUGCHECK(m); |
| 314 | old = *m; | 314 | old = *m; |
| 315 | new = old ^ bit; | 315 | new = old ^ bit; |
| 316 | } while (cmpxchg_acq(m, old, new) != old); | 316 | } while (cmpxchg_acq(m, old, new) != old); |
| 317 | return (old & bit) != 0; | 317 | return (old & bit) != 0; |
| 318 | } | 318 | } |
| 319 | 319 | ||
| 320 | /** | 320 | /** |
| 321 | * __test_and_change_bit - Change a bit and return its old value | 321 | * __test_and_change_bit - Change a bit and return its old value |
| 322 | * @nr: Bit to change | 322 | * @nr: Bit to change |
| 323 | * @addr: Address to count from | 323 | * @addr: Address to count from |
| 324 | * | 324 | * |
| 325 | * This operation is non-atomic and can be reordered. | 325 | * This operation is non-atomic and can be reordered. |
| 326 | */ | 326 | */ |
| 327 | static __inline__ int | 327 | static __inline__ int |
| 328 | __test_and_change_bit (int nr, void *addr) | 328 | __test_and_change_bit (int nr, void *addr) |
| 329 | { | 329 | { |
| 330 | __u32 old, bit = (1 << (nr & 31)); | 330 | __u32 old, bit = (1 << (nr & 31)); |
| 331 | __u32 *m = (__u32 *) addr + (nr >> 5); | 331 | __u32 *m = (__u32 *) addr + (nr >> 5); |
| 332 | 332 | ||
| 333 | old = *m; | 333 | old = *m; |
| 334 | *m = old ^ bit; | 334 | *m = old ^ bit; |
| 335 | return (old & bit) != 0; | 335 | return (old & bit) != 0; |
| 336 | } | 336 | } |
| 337 | 337 | ||
| 338 | static __inline__ int | 338 | static __inline__ int |
| 339 | test_bit (int nr, const volatile void *addr) | 339 | test_bit (int nr, const volatile void *addr) |
| 340 | { | 340 | { |
| 341 | return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31)); | 341 | return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31)); |
| 342 | } | 342 | } |
| 343 | 343 | ||
| 344 | /** | 344 | /** |
| 345 | * ffz - find the first zero bit in a long word | 345 | * ffz - find the first zero bit in a long word |
| 346 | * @x: The long word to find the bit in | 346 | * @x: The long word to find the bit in |
| 347 | * | 347 | * |
| 348 | * Returns the bit-number (0..63) of the first (least significant) zero bit. | 348 | * Returns the bit-number (0..63) of the first (least significant) zero bit. |
| 349 | * Undefined if no zero exists, so code should check against ~0UL first... | 349 | * Undefined if no zero exists, so code should check against ~0UL first... |
| 350 | */ | 350 | */ |
| 351 | static inline unsigned long | 351 | static inline unsigned long |
| 352 | ffz (unsigned long x) | 352 | ffz (unsigned long x) |
| 353 | { | 353 | { |
| 354 | unsigned long result; | 354 | unsigned long result; |
| 355 | 355 | ||
| 356 | result = ia64_popcnt(x & (~x - 1)); | 356 | result = ia64_popcnt(x & (~x - 1)); |
| 357 | return result; | 357 | return result; |
| 358 | } | 358 | } |
| 359 | 359 | ||
| 360 | /** | 360 | /** |
| 361 | * __ffs - find first bit in word. | 361 | * __ffs - find first bit in word. |
| 362 | * @x: The word to search | 362 | * @x: The word to search |
| 363 | * | 363 | * |
| 364 | * Undefined if no bit exists, so code should check against 0 first. | 364 | * Undefined if no bit exists, so code should check against 0 first. |
| 365 | */ | 365 | */ |
| 366 | static __inline__ unsigned long | 366 | static __inline__ unsigned long |
| 367 | __ffs (unsigned long x) | 367 | __ffs (unsigned long x) |
| 368 | { | 368 | { |
| 369 | unsigned long result; | 369 | unsigned long result; |
| 370 | 370 | ||
| 371 | result = ia64_popcnt((x-1) & ~x); | 371 | result = ia64_popcnt((x-1) & ~x); |
| 372 | return result; | 372 | return result; |
| 373 | } | 373 | } |
| 374 | 374 | ||
| 375 | #ifdef __KERNEL__ | 375 | #ifdef __KERNEL__ |
| 376 | 376 | ||
| 377 | /* | 377 | /* |
| 378 | * Return bit number of last (most-significant) bit set. Undefined | 378 | * Return bit number of last (most-significant) bit set. Undefined |
| 379 | * for x==0. Bits are numbered from 0..63 (e.g., ia64_fls(9) == 3). | 379 | * for x==0. Bits are numbered from 0..63 (e.g., ia64_fls(9) == 3). |
| 380 | */ | 380 | */ |
| 381 | static inline unsigned long | 381 | static inline unsigned long |
| 382 | ia64_fls (unsigned long x) | 382 | ia64_fls (unsigned long x) |
| 383 | { | 383 | { |
| 384 | long double d = x; | 384 | long double d = x; |
| 385 | long exp; | 385 | long exp; |
| 386 | 386 | ||
| 387 | exp = ia64_getf_exp(d); | 387 | exp = ia64_getf_exp(d); |
| 388 | return exp - 0xffff; | 388 | return exp - 0xffff; |
| 389 | } | 389 | } |
| 390 | 390 | ||
| 391 | /* | 391 | /* |
| 392 | * Find the last (most significant) bit set. Returns 0 for x==0 and | 392 | * Find the last (most significant) bit set. Returns 0 for x==0 and |
| 393 | * bits are numbered from 1..32 (e.g., fls(9) == 4). | 393 | * bits are numbered from 1..32 (e.g., fls(9) == 4). |
| 394 | */ | 394 | */ |
| 395 | static inline int | 395 | static inline int |
| 396 | fls (int t) | 396 | fls (int t) |
| 397 | { | 397 | { |
| 398 | unsigned long x = t & 0xffffffffu; | 398 | unsigned long x = t & 0xffffffffu; |
| 399 | 399 | ||
| 400 | if (!x) | 400 | if (!x) |
| 401 | return 0; | 401 | return 0; |
| 402 | x |= x >> 1; | 402 | x |= x >> 1; |
| 403 | x |= x >> 2; | 403 | x |= x >> 2; |
| 404 | x |= x >> 4; | 404 | x |= x >> 4; |
| 405 | x |= x >> 8; | 405 | x |= x >> 8; |
| 406 | x |= x >> 16; | 406 | x |= x >> 16; |
| 407 | return ia64_popcnt(x); | 407 | return ia64_popcnt(x); |
| 408 | } | 408 | } |
| 409 | 409 | ||
| 410 | /* | 410 | /* |
| 411 | * Find the last (most significant) bit set. Undefined for x==0. | 411 | * Find the last (most significant) bit set. Undefined for x==0. |
| 412 | * Bits are numbered from 0..63 (e.g., __fls(9) == 3). | 412 | * Bits are numbered from 0..63 (e.g., __fls(9) == 3). |
| 413 | */ | 413 | */ |
| 414 | static inline unsigned long | 414 | static inline unsigned long |
| 415 | __fls (unsigned long x) | 415 | __fls (unsigned long x) |
| 416 | { | 416 | { |
| 417 | x |= x >> 1; | 417 | x |= x >> 1; |
| 418 | x |= x >> 2; | 418 | x |= x >> 2; |
| 419 | x |= x >> 4; | 419 | x |= x >> 4; |
| 420 | x |= x >> 8; | 420 | x |= x >> 8; |
| 421 | x |= x >> 16; | 421 | x |= x >> 16; |
| 422 | x |= x >> 32; | 422 | x |= x >> 32; |
| 423 | return ia64_popcnt(x) - 1; | 423 | return ia64_popcnt(x) - 1; |
| 424 | } | 424 | } |
| 425 | 425 | ||
| 426 | #include <asm-generic/bitops/fls64.h> | 426 | #include <asm-generic/bitops/fls64.h> |
| 427 | 427 | ||
| 428 | /* | 428 | /* |
| 429 | * ffs: find first bit set. This is defined the same way as the libc and | 429 | * ffs: find first bit set. This is defined the same way as the libc and |
| 430 | * compiler builtin ffs routines, therefore differs in spirit from the above | 430 | * compiler builtin ffs routines, therefore differs in spirit from the above |
| 431 | * ffz (man ffs): it operates on "int" values only and the result value is the | 431 | * ffz (man ffs): it operates on "int" values only and the result value is the |
| 432 | * bit number + 1. ffs(0) is defined to return zero. | 432 | * bit number + 1. ffs(0) is defined to return zero. |
| 433 | */ | 433 | */ |
| 434 | #define ffs(x) __builtin_ffs(x) | 434 | #define ffs(x) __builtin_ffs(x) |
| 435 | 435 | ||
| 436 | /* | 436 | /* |
| 437 | * hweightN: returns the hamming weight (i.e. the number | 437 | * hweightN: returns the hamming weight (i.e. the number |
| 438 | * of bits set) of a N-bit word | 438 | * of bits set) of a N-bit word |
| 439 | */ | 439 | */ |
| 440 | static __inline__ unsigned long __arch_hweight64(unsigned long x) | 440 | static __inline__ unsigned long __arch_hweight64(unsigned long x) |
| 441 | { | 441 | { |
| 442 | unsigned long result; | 442 | unsigned long result; |
| 443 | result = ia64_popcnt(x); | 443 | result = ia64_popcnt(x); |
| 444 | return result; | 444 | return result; |
| 445 | } | 445 | } |
| 446 | 446 | ||
| 447 | #define __arch_hweight32(x) ((unsigned int) __arch_hweight64((x) & 0xfffffffful)) | 447 | #define __arch_hweight32(x) ((unsigned int) __arch_hweight64((x) & 0xfffffffful)) |
| 448 | #define __arch_hweight16(x) ((unsigned int) __arch_hweight64((x) & 0xfffful)) | 448 | #define __arch_hweight16(x) ((unsigned int) __arch_hweight64((x) & 0xfffful)) |
| 449 | #define __arch_hweight8(x) ((unsigned int) __arch_hweight64((x) & 0xfful)) | 449 | #define __arch_hweight8(x) ((unsigned int) __arch_hweight64((x) & 0xfful)) |
| 450 | 450 | ||
| 451 | #include <asm-generic/bitops/const_hweight.h> | 451 | #include <asm-generic/bitops/const_hweight.h> |
| 452 | 452 | ||
| 453 | #endif /* __KERNEL__ */ | 453 | #endif /* __KERNEL__ */ |
| 454 | 454 | ||
| 455 | #include <asm-generic/bitops/find.h> | 455 | #include <asm-generic/bitops/find.h> |
| 456 | 456 | ||
| 457 | #ifdef __KERNEL__ | 457 | #ifdef __KERNEL__ |
| 458 | 458 | ||
| 459 | #include <asm-generic/bitops/le.h> | 459 | #include <asm-generic/bitops/le.h> |
| 460 | 460 | ||
| 461 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) | 461 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) |
| 462 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) | 462 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) |
| 463 | 463 | ||
| 464 | #include <asm-generic/bitops/minix.h> | ||
| 465 | #include <asm-generic/bitops/sched.h> | 464 | #include <asm-generic/bitops/sched.h> |
| 466 | 465 | ||
| 467 | #endif /* __KERNEL__ */ | 466 | #endif /* __KERNEL__ */ |
| 468 | 467 | ||
| 469 | #endif /* _ASM_IA64_BITOPS_H */ | 468 | #endif /* _ASM_IA64_BITOPS_H */ |
| 470 | 469 |
arch/m32r/include/asm/bitops.h
| 1 | #ifndef _ASM_M32R_BITOPS_H | 1 | #ifndef _ASM_M32R_BITOPS_H |
| 2 | #define _ASM_M32R_BITOPS_H | 2 | #define _ASM_M32R_BITOPS_H |
| 3 | 3 | ||
| 4 | /* | 4 | /* |
| 5 | * linux/include/asm-m32r/bitops.h | 5 | * linux/include/asm-m32r/bitops.h |
| 6 | * | 6 | * |
| 7 | * Copyright 1992, Linus Torvalds. | 7 | * Copyright 1992, Linus Torvalds. |
| 8 | * | 8 | * |
| 9 | * M32R version: | 9 | * M32R version: |
| 10 | * Copyright (C) 2001, 2002 Hitoshi Yamamoto | 10 | * Copyright (C) 2001, 2002 Hitoshi Yamamoto |
| 11 | * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org> | 11 | * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org> |
| 12 | */ | 12 | */ |
| 13 | 13 | ||
| 14 | #ifndef _LINUX_BITOPS_H | 14 | #ifndef _LINUX_BITOPS_H |
| 15 | #error only <linux/bitops.h> can be included directly | 15 | #error only <linux/bitops.h> can be included directly |
| 16 | #endif | 16 | #endif |
| 17 | 17 | ||
| 18 | #include <linux/compiler.h> | 18 | #include <linux/compiler.h> |
| 19 | #include <asm/assembler.h> | 19 | #include <asm/assembler.h> |
| 20 | #include <asm/system.h> | 20 | #include <asm/system.h> |
| 21 | #include <asm/byteorder.h> | 21 | #include <asm/byteorder.h> |
| 22 | #include <asm/types.h> | 22 | #include <asm/types.h> |
| 23 | 23 | ||
| 24 | /* | 24 | /* |
| 25 | * These have to be done with inline assembly: that way the bit-setting | 25 | * These have to be done with inline assembly: that way the bit-setting |
| 26 | * is guaranteed to be atomic. All bit operations return 0 if the bit | 26 | * is guaranteed to be atomic. All bit operations return 0 if the bit |
| 27 | * was cleared before the operation and != 0 if it was not. | 27 | * was cleared before the operation and != 0 if it was not. |
| 28 | * | 28 | * |
| 29 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | 29 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). |
| 30 | */ | 30 | */ |
| 31 | 31 | ||
| 32 | /** | 32 | /** |
| 33 | * set_bit - Atomically set a bit in memory | 33 | * set_bit - Atomically set a bit in memory |
| 34 | * @nr: the bit to set | 34 | * @nr: the bit to set |
| 35 | * @addr: the address to start counting from | 35 | * @addr: the address to start counting from |
| 36 | * | 36 | * |
| 37 | * This function is atomic and may not be reordered. See __set_bit() | 37 | * This function is atomic and may not be reordered. See __set_bit() |
| 38 | * if you do not require the atomic guarantees. | 38 | * if you do not require the atomic guarantees. |
| 39 | * Note that @nr may be almost arbitrarily large; this function is not | 39 | * Note that @nr may be almost arbitrarily large; this function is not |
| 40 | * restricted to acting on a single-word quantity. | 40 | * restricted to acting on a single-word quantity. |
| 41 | */ | 41 | */ |
| 42 | static __inline__ void set_bit(int nr, volatile void * addr) | 42 | static __inline__ void set_bit(int nr, volatile void * addr) |
| 43 | { | 43 | { |
| 44 | __u32 mask; | 44 | __u32 mask; |
| 45 | volatile __u32 *a = addr; | 45 | volatile __u32 *a = addr; |
| 46 | unsigned long flags; | 46 | unsigned long flags; |
| 47 | unsigned long tmp; | 47 | unsigned long tmp; |
| 48 | 48 | ||
| 49 | a += (nr >> 5); | 49 | a += (nr >> 5); |
| 50 | mask = (1 << (nr & 0x1F)); | 50 | mask = (1 << (nr & 0x1F)); |
| 51 | 51 | ||
| 52 | local_irq_save(flags); | 52 | local_irq_save(flags); |
| 53 | __asm__ __volatile__ ( | 53 | __asm__ __volatile__ ( |
| 54 | DCACHE_CLEAR("%0", "r6", "%1") | 54 | DCACHE_CLEAR("%0", "r6", "%1") |
| 55 | M32R_LOCK" %0, @%1; \n\t" | 55 | M32R_LOCK" %0, @%1; \n\t" |
| 56 | "or %0, %2; \n\t" | 56 | "or %0, %2; \n\t" |
| 57 | M32R_UNLOCK" %0, @%1; \n\t" | 57 | M32R_UNLOCK" %0, @%1; \n\t" |
| 58 | : "=&r" (tmp) | 58 | : "=&r" (tmp) |
| 59 | : "r" (a), "r" (mask) | 59 | : "r" (a), "r" (mask) |
| 60 | : "memory" | 60 | : "memory" |
| 61 | #ifdef CONFIG_CHIP_M32700_TS1 | 61 | #ifdef CONFIG_CHIP_M32700_TS1 |
| 62 | , "r6" | 62 | , "r6" |
| 63 | #endif /* CONFIG_CHIP_M32700_TS1 */ | 63 | #endif /* CONFIG_CHIP_M32700_TS1 */ |
| 64 | ); | 64 | ); |
| 65 | local_irq_restore(flags); | 65 | local_irq_restore(flags); |
| 66 | } | 66 | } |
| 67 | 67 | ||
| 68 | /** | 68 | /** |
| 69 | * clear_bit - Clears a bit in memory | 69 | * clear_bit - Clears a bit in memory |
| 70 | * @nr: Bit to clear | 70 | * @nr: Bit to clear |
| 71 | * @addr: Address to start counting from | 71 | * @addr: Address to start counting from |
| 72 | * | 72 | * |
| 73 | * clear_bit() is atomic and may not be reordered. However, it does | 73 | * clear_bit() is atomic and may not be reordered. However, it does |
| 74 | * not contain a memory barrier, so if it is used for locking purposes, | 74 | * not contain a memory barrier, so if it is used for locking purposes, |
| 75 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | 75 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
| 76 | * in order to ensure changes are visible on other processors. | 76 | * in order to ensure changes are visible on other processors. |
| 77 | */ | 77 | */ |
| 78 | static __inline__ void clear_bit(int nr, volatile void * addr) | 78 | static __inline__ void clear_bit(int nr, volatile void * addr) |
| 79 | { | 79 | { |
| 80 | __u32 mask; | 80 | __u32 mask; |
| 81 | volatile __u32 *a = addr; | 81 | volatile __u32 *a = addr; |
| 82 | unsigned long flags; | 82 | unsigned long flags; |
| 83 | unsigned long tmp; | 83 | unsigned long tmp; |
| 84 | 84 | ||
| 85 | a += (nr >> 5); | 85 | a += (nr >> 5); |
| 86 | mask = (1 << (nr & 0x1F)); | 86 | mask = (1 << (nr & 0x1F)); |
| 87 | 87 | ||
| 88 | local_irq_save(flags); | 88 | local_irq_save(flags); |
| 89 | 89 | ||
| 90 | __asm__ __volatile__ ( | 90 | __asm__ __volatile__ ( |
| 91 | DCACHE_CLEAR("%0", "r6", "%1") | 91 | DCACHE_CLEAR("%0", "r6", "%1") |
| 92 | M32R_LOCK" %0, @%1; \n\t" | 92 | M32R_LOCK" %0, @%1; \n\t" |
| 93 | "and %0, %2; \n\t" | 93 | "and %0, %2; \n\t" |
| 94 | M32R_UNLOCK" %0, @%1; \n\t" | 94 | M32R_UNLOCK" %0, @%1; \n\t" |
| 95 | : "=&r" (tmp) | 95 | : "=&r" (tmp) |
| 96 | : "r" (a), "r" (~mask) | 96 | : "r" (a), "r" (~mask) |
| 97 | : "memory" | 97 | : "memory" |
| 98 | #ifdef CONFIG_CHIP_M32700_TS1 | 98 | #ifdef CONFIG_CHIP_M32700_TS1 |
| 99 | , "r6" | 99 | , "r6" |
| 100 | #endif /* CONFIG_CHIP_M32700_TS1 */ | 100 | #endif /* CONFIG_CHIP_M32700_TS1 */ |
| 101 | ); | 101 | ); |
| 102 | local_irq_restore(flags); | 102 | local_irq_restore(flags); |
| 103 | } | 103 | } |
| 104 | 104 | ||
| 105 | #define smp_mb__before_clear_bit() barrier() | 105 | #define smp_mb__before_clear_bit() barrier() |
| 106 | #define smp_mb__after_clear_bit() barrier() | 106 | #define smp_mb__after_clear_bit() barrier() |
| 107 | 107 | ||
| 108 | /** | 108 | /** |
| 109 | * change_bit - Toggle a bit in memory | 109 | * change_bit - Toggle a bit in memory |
| 110 | * @nr: Bit to clear | 110 | * @nr: Bit to clear |
| 111 | * @addr: Address to start counting from | 111 | * @addr: Address to start counting from |
| 112 | * | 112 | * |
| 113 | * change_bit() is atomic and may not be reordered. | 113 | * change_bit() is atomic and may not be reordered. |
| 114 | * Note that @nr may be almost arbitrarily large; this function is not | 114 | * Note that @nr may be almost arbitrarily large; this function is not |
| 115 | * restricted to acting on a single-word quantity. | 115 | * restricted to acting on a single-word quantity. |
| 116 | */ | 116 | */ |
| 117 | static __inline__ void change_bit(int nr, volatile void * addr) | 117 | static __inline__ void change_bit(int nr, volatile void * addr) |
| 118 | { | 118 | { |
| 119 | __u32 mask; | 119 | __u32 mask; |
| 120 | volatile __u32 *a = addr; | 120 | volatile __u32 *a = addr; |
| 121 | unsigned long flags; | 121 | unsigned long flags; |
| 122 | unsigned long tmp; | 122 | unsigned long tmp; |
| 123 | 123 | ||
| 124 | a += (nr >> 5); | 124 | a += (nr >> 5); |
| 125 | mask = (1 << (nr & 0x1F)); | 125 | mask = (1 << (nr & 0x1F)); |
| 126 | 126 | ||
| 127 | local_irq_save(flags); | 127 | local_irq_save(flags); |
| 128 | __asm__ __volatile__ ( | 128 | __asm__ __volatile__ ( |
| 129 | DCACHE_CLEAR("%0", "r6", "%1") | 129 | DCACHE_CLEAR("%0", "r6", "%1") |
| 130 | M32R_LOCK" %0, @%1; \n\t" | 130 | M32R_LOCK" %0, @%1; \n\t" |
| 131 | "xor %0, %2; \n\t" | 131 | "xor %0, %2; \n\t" |
| 132 | M32R_UNLOCK" %0, @%1; \n\t" | 132 | M32R_UNLOCK" %0, @%1; \n\t" |
| 133 | : "=&r" (tmp) | 133 | : "=&r" (tmp) |
| 134 | : "r" (a), "r" (mask) | 134 | : "r" (a), "r" (mask) |
| 135 | : "memory" | 135 | : "memory" |
| 136 | #ifdef CONFIG_CHIP_M32700_TS1 | 136 | #ifdef CONFIG_CHIP_M32700_TS1 |
| 137 | , "r6" | 137 | , "r6" |
| 138 | #endif /* CONFIG_CHIP_M32700_TS1 */ | 138 | #endif /* CONFIG_CHIP_M32700_TS1 */ |
| 139 | ); | 139 | ); |
| 140 | local_irq_restore(flags); | 140 | local_irq_restore(flags); |
| 141 | } | 141 | } |
| 142 | 142 | ||
| 143 | /** | 143 | /** |
| 144 | * test_and_set_bit - Set a bit and return its old value | 144 | * test_and_set_bit - Set a bit and return its old value |
| 145 | * @nr: Bit to set | 145 | * @nr: Bit to set |
| 146 | * @addr: Address to count from | 146 | * @addr: Address to count from |
| 147 | * | 147 | * |
| 148 | * This operation is atomic and cannot be reordered. | 148 | * This operation is atomic and cannot be reordered. |
| 149 | * It also implies a memory barrier. | 149 | * It also implies a memory barrier. |
| 150 | */ | 150 | */ |
| 151 | static __inline__ int test_and_set_bit(int nr, volatile void * addr) | 151 | static __inline__ int test_and_set_bit(int nr, volatile void * addr) |
| 152 | { | 152 | { |
| 153 | __u32 mask, oldbit; | 153 | __u32 mask, oldbit; |
| 154 | volatile __u32 *a = addr; | 154 | volatile __u32 *a = addr; |
| 155 | unsigned long flags; | 155 | unsigned long flags; |
| 156 | unsigned long tmp; | 156 | unsigned long tmp; |
| 157 | 157 | ||
| 158 | a += (nr >> 5); | 158 | a += (nr >> 5); |
| 159 | mask = (1 << (nr & 0x1F)); | 159 | mask = (1 << (nr & 0x1F)); |
| 160 | 160 | ||
| 161 | local_irq_save(flags); | 161 | local_irq_save(flags); |
| 162 | __asm__ __volatile__ ( | 162 | __asm__ __volatile__ ( |
| 163 | DCACHE_CLEAR("%0", "%1", "%2") | 163 | DCACHE_CLEAR("%0", "%1", "%2") |
| 164 | M32R_LOCK" %0, @%2; \n\t" | 164 | M32R_LOCK" %0, @%2; \n\t" |
| 165 | "mv %1, %0; \n\t" | 165 | "mv %1, %0; \n\t" |
| 166 | "and %0, %3; \n\t" | 166 | "and %0, %3; \n\t" |
| 167 | "or %1, %3; \n\t" | 167 | "or %1, %3; \n\t" |
| 168 | M32R_UNLOCK" %1, @%2; \n\t" | 168 | M32R_UNLOCK" %1, @%2; \n\t" |
| 169 | : "=&r" (oldbit), "=&r" (tmp) | 169 | : "=&r" (oldbit), "=&r" (tmp) |
| 170 | : "r" (a), "r" (mask) | 170 | : "r" (a), "r" (mask) |
| 171 | : "memory" | 171 | : "memory" |
| 172 | ); | 172 | ); |
| 173 | local_irq_restore(flags); | 173 | local_irq_restore(flags); |
| 174 | 174 | ||
| 175 | return (oldbit != 0); | 175 | return (oldbit != 0); |
| 176 | } | 176 | } |
| 177 | 177 | ||
| 178 | /** | 178 | /** |
| 179 | * test_and_clear_bit - Clear a bit and return its old value | 179 | * test_and_clear_bit - Clear a bit and return its old value |
| 180 | * @nr: Bit to set | 180 | * @nr: Bit to set |
| 181 | * @addr: Address to count from | 181 | * @addr: Address to count from |
| 182 | * | 182 | * |
| 183 | * This operation is atomic and cannot be reordered. | 183 | * This operation is atomic and cannot be reordered. |
| 184 | * It also implies a memory barrier. | 184 | * It also implies a memory barrier. |
| 185 | */ | 185 | */ |
| 186 | static __inline__ int test_and_clear_bit(int nr, volatile void * addr) | 186 | static __inline__ int test_and_clear_bit(int nr, volatile void * addr) |
| 187 | { | 187 | { |
| 188 | __u32 mask, oldbit; | 188 | __u32 mask, oldbit; |
| 189 | volatile __u32 *a = addr; | 189 | volatile __u32 *a = addr; |
| 190 | unsigned long flags; | 190 | unsigned long flags; |
| 191 | unsigned long tmp; | 191 | unsigned long tmp; |
| 192 | 192 | ||
| 193 | a += (nr >> 5); | 193 | a += (nr >> 5); |
| 194 | mask = (1 << (nr & 0x1F)); | 194 | mask = (1 << (nr & 0x1F)); |
| 195 | 195 | ||
| 196 | local_irq_save(flags); | 196 | local_irq_save(flags); |
| 197 | 197 | ||
| 198 | __asm__ __volatile__ ( | 198 | __asm__ __volatile__ ( |
| 199 | DCACHE_CLEAR("%0", "%1", "%3") | 199 | DCACHE_CLEAR("%0", "%1", "%3") |
| 200 | M32R_LOCK" %0, @%3; \n\t" | 200 | M32R_LOCK" %0, @%3; \n\t" |
| 201 | "mv %1, %0; \n\t" | 201 | "mv %1, %0; \n\t" |
| 202 | "and %0, %2; \n\t" | 202 | "and %0, %2; \n\t" |
| 203 | "not %2, %2; \n\t" | 203 | "not %2, %2; \n\t" |
| 204 | "and %1, %2; \n\t" | 204 | "and %1, %2; \n\t" |
| 205 | M32R_UNLOCK" %1, @%3; \n\t" | 205 | M32R_UNLOCK" %1, @%3; \n\t" |
| 206 | : "=&r" (oldbit), "=&r" (tmp), "+r" (mask) | 206 | : "=&r" (oldbit), "=&r" (tmp), "+r" (mask) |
| 207 | : "r" (a) | 207 | : "r" (a) |
| 208 | : "memory" | 208 | : "memory" |
| 209 | ); | 209 | ); |
| 210 | local_irq_restore(flags); | 210 | local_irq_restore(flags); |
| 211 | 211 | ||
| 212 | return (oldbit != 0); | 212 | return (oldbit != 0); |
| 213 | } | 213 | } |
| 214 | 214 | ||
| 215 | /** | 215 | /** |
| 216 | * test_and_change_bit - Change a bit and return its old value | 216 | * test_and_change_bit - Change a bit and return its old value |
| 217 | * @nr: Bit to set | 217 | * @nr: Bit to set |
| 218 | * @addr: Address to count from | 218 | * @addr: Address to count from |
| 219 | * | 219 | * |
| 220 | * This operation is atomic and cannot be reordered. | 220 | * This operation is atomic and cannot be reordered. |
| 221 | * It also implies a memory barrier. | 221 | * It also implies a memory barrier. |
| 222 | */ | 222 | */ |
| 223 | static __inline__ int test_and_change_bit(int nr, volatile void * addr) | 223 | static __inline__ int test_and_change_bit(int nr, volatile void * addr) |
| 224 | { | 224 | { |
| 225 | __u32 mask, oldbit; | 225 | __u32 mask, oldbit; |
| 226 | volatile __u32 *a = addr; | 226 | volatile __u32 *a = addr; |
| 227 | unsigned long flags; | 227 | unsigned long flags; |
| 228 | unsigned long tmp; | 228 | unsigned long tmp; |
| 229 | 229 | ||
| 230 | a += (nr >> 5); | 230 | a += (nr >> 5); |
| 231 | mask = (1 << (nr & 0x1F)); | 231 | mask = (1 << (nr & 0x1F)); |
| 232 | 232 | ||
| 233 | local_irq_save(flags); | 233 | local_irq_save(flags); |
| 234 | __asm__ __volatile__ ( | 234 | __asm__ __volatile__ ( |
| 235 | DCACHE_CLEAR("%0", "%1", "%2") | 235 | DCACHE_CLEAR("%0", "%1", "%2") |
| 236 | M32R_LOCK" %0, @%2; \n\t" | 236 | M32R_LOCK" %0, @%2; \n\t" |
| 237 | "mv %1, %0; \n\t" | 237 | "mv %1, %0; \n\t" |
| 238 | "and %0, %3; \n\t" | 238 | "and %0, %3; \n\t" |
| 239 | "xor %1, %3; \n\t" | 239 | "xor %1, %3; \n\t" |
| 240 | M32R_UNLOCK" %1, @%2; \n\t" | 240 | M32R_UNLOCK" %1, @%2; \n\t" |
| 241 | : "=&r" (oldbit), "=&r" (tmp) | 241 | : "=&r" (oldbit), "=&r" (tmp) |
| 242 | : "r" (a), "r" (mask) | 242 | : "r" (a), "r" (mask) |
| 243 | : "memory" | 243 | : "memory" |
| 244 | ); | 244 | ); |
| 245 | local_irq_restore(flags); | 245 | local_irq_restore(flags); |
| 246 | 246 | ||
| 247 | return (oldbit != 0); | 247 | return (oldbit != 0); |
| 248 | } | 248 | } |
| 249 | 249 | ||
| 250 | #include <asm-generic/bitops/non-atomic.h> | 250 | #include <asm-generic/bitops/non-atomic.h> |
| 251 | #include <asm-generic/bitops/ffz.h> | 251 | #include <asm-generic/bitops/ffz.h> |
| 252 | #include <asm-generic/bitops/__ffs.h> | 252 | #include <asm-generic/bitops/__ffs.h> |
| 253 | #include <asm-generic/bitops/fls.h> | 253 | #include <asm-generic/bitops/fls.h> |
| 254 | #include <asm-generic/bitops/__fls.h> | 254 | #include <asm-generic/bitops/__fls.h> |
| 255 | #include <asm-generic/bitops/fls64.h> | 255 | #include <asm-generic/bitops/fls64.h> |
| 256 | 256 | ||
| 257 | #ifdef __KERNEL__ | 257 | #ifdef __KERNEL__ |
| 258 | 258 | ||
| 259 | #include <asm-generic/bitops/sched.h> | 259 | #include <asm-generic/bitops/sched.h> |
| 260 | #include <asm-generic/bitops/find.h> | 260 | #include <asm-generic/bitops/find.h> |
| 261 | #include <asm-generic/bitops/ffs.h> | 261 | #include <asm-generic/bitops/ffs.h> |
| 262 | #include <asm-generic/bitops/hweight.h> | 262 | #include <asm-generic/bitops/hweight.h> |
| 263 | #include <asm-generic/bitops/lock.h> | 263 | #include <asm-generic/bitops/lock.h> |
| 264 | 264 | ||
| 265 | #endif /* __KERNEL__ */ | 265 | #endif /* __KERNEL__ */ |
| 266 | 266 | ||
| 267 | #ifdef __KERNEL__ | 267 | #ifdef __KERNEL__ |
| 268 | 268 | ||
| 269 | #include <asm-generic/bitops/le.h> | 269 | #include <asm-generic/bitops/le.h> |
| 270 | #include <asm-generic/bitops/ext2-atomic.h> | 270 | #include <asm-generic/bitops/ext2-atomic.h> |
| 271 | #include <asm-generic/bitops/minix.h> | ||
| 272 | 271 | ||
| 273 | #endif /* __KERNEL__ */ | 272 | #endif /* __KERNEL__ */ |
| 274 | 273 | ||
| 275 | #endif /* _ASM_M32R_BITOPS_H */ | 274 | #endif /* _ASM_M32R_BITOPS_H */ |
| 276 | 275 |
arch/m68k/include/asm/bitops_mm.h
| 1 | #ifndef _M68K_BITOPS_H | 1 | #ifndef _M68K_BITOPS_H |
| 2 | #define _M68K_BITOPS_H | 2 | #define _M68K_BITOPS_H |
| 3 | /* | 3 | /* |
| 4 | * Copyright 1992, Linus Torvalds. | 4 | * Copyright 1992, Linus Torvalds. |
| 5 | * | 5 | * |
| 6 | * This file is subject to the terms and conditions of the GNU General Public | 6 | * This file is subject to the terms and conditions of the GNU General Public |
| 7 | * License. See the file COPYING in the main directory of this archive | 7 | * License. See the file COPYING in the main directory of this archive |
| 8 | * for more details. | 8 | * for more details. |
| 9 | */ | 9 | */ |
| 10 | 10 | ||
| 11 | #ifndef _LINUX_BITOPS_H | 11 | #ifndef _LINUX_BITOPS_H |
| 12 | #error only <linux/bitops.h> can be included directly | 12 | #error only <linux/bitops.h> can be included directly |
| 13 | #endif | 13 | #endif |
| 14 | 14 | ||
| 15 | #include <linux/compiler.h> | 15 | #include <linux/compiler.h> |
| 16 | 16 | ||
| 17 | /* | 17 | /* |
| 18 | * Require 68020 or better. | 18 | * Require 68020 or better. |
| 19 | * | 19 | * |
| 20 | * They use the standard big-endian m680x0 bit ordering. | 20 | * They use the standard big-endian m680x0 bit ordering. |
| 21 | */ | 21 | */ |
| 22 | 22 | ||
| 23 | #define test_and_set_bit(nr,vaddr) \ | 23 | #define test_and_set_bit(nr,vaddr) \ |
| 24 | (__builtin_constant_p(nr) ? \ | 24 | (__builtin_constant_p(nr) ? \ |
| 25 | __constant_test_and_set_bit(nr, vaddr) : \ | 25 | __constant_test_and_set_bit(nr, vaddr) : \ |
| 26 | __generic_test_and_set_bit(nr, vaddr)) | 26 | __generic_test_and_set_bit(nr, vaddr)) |
| 27 | 27 | ||
| 28 | #define __test_and_set_bit(nr,vaddr) test_and_set_bit(nr,vaddr) | 28 | #define __test_and_set_bit(nr,vaddr) test_and_set_bit(nr,vaddr) |
| 29 | 29 | ||
| 30 | static inline int __constant_test_and_set_bit(int nr, unsigned long *vaddr) | 30 | static inline int __constant_test_and_set_bit(int nr, unsigned long *vaddr) |
| 31 | { | 31 | { |
| 32 | char *p = (char *)vaddr + (nr ^ 31) / 8; | 32 | char *p = (char *)vaddr + (nr ^ 31) / 8; |
| 33 | char retval; | 33 | char retval; |
| 34 | 34 | ||
| 35 | __asm__ __volatile__ ("bset %2,%1; sne %0" | 35 | __asm__ __volatile__ ("bset %2,%1; sne %0" |
| 36 | : "=d" (retval), "+m" (*p) | 36 | : "=d" (retval), "+m" (*p) |
| 37 | : "di" (nr & 7)); | 37 | : "di" (nr & 7)); |
| 38 | 38 | ||
| 39 | return retval; | 39 | return retval; |
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | static inline int __generic_test_and_set_bit(int nr, unsigned long *vaddr) | 42 | static inline int __generic_test_and_set_bit(int nr, unsigned long *vaddr) |
| 43 | { | 43 | { |
| 44 | char retval; | 44 | char retval; |
| 45 | 45 | ||
| 46 | __asm__ __volatile__ ("bfset %2{%1:#1}; sne %0" | 46 | __asm__ __volatile__ ("bfset %2{%1:#1}; sne %0" |
| 47 | : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory"); | 47 | : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory"); |
| 48 | 48 | ||
| 49 | return retval; | 49 | return retval; |
| 50 | } | 50 | } |
| 51 | 51 | ||
| 52 | #define set_bit(nr,vaddr) \ | 52 | #define set_bit(nr,vaddr) \ |
| 53 | (__builtin_constant_p(nr) ? \ | 53 | (__builtin_constant_p(nr) ? \ |
| 54 | __constant_set_bit(nr, vaddr) : \ | 54 | __constant_set_bit(nr, vaddr) : \ |
| 55 | __generic_set_bit(nr, vaddr)) | 55 | __generic_set_bit(nr, vaddr)) |
| 56 | 56 | ||
| 57 | #define __set_bit(nr,vaddr) set_bit(nr,vaddr) | 57 | #define __set_bit(nr,vaddr) set_bit(nr,vaddr) |
| 58 | 58 | ||
| 59 | static inline void __constant_set_bit(int nr, volatile unsigned long *vaddr) | 59 | static inline void __constant_set_bit(int nr, volatile unsigned long *vaddr) |
| 60 | { | 60 | { |
| 61 | char *p = (char *)vaddr + (nr ^ 31) / 8; | 61 | char *p = (char *)vaddr + (nr ^ 31) / 8; |
| 62 | __asm__ __volatile__ ("bset %1,%0" | 62 | __asm__ __volatile__ ("bset %1,%0" |
| 63 | : "+m" (*p) : "di" (nr & 7)); | 63 | : "+m" (*p) : "di" (nr & 7)); |
| 64 | } | 64 | } |
| 65 | 65 | ||
| 66 | static inline void __generic_set_bit(int nr, volatile unsigned long *vaddr) | 66 | static inline void __generic_set_bit(int nr, volatile unsigned long *vaddr) |
| 67 | { | 67 | { |
| 68 | __asm__ __volatile__ ("bfset %1{%0:#1}" | 68 | __asm__ __volatile__ ("bfset %1{%0:#1}" |
| 69 | : : "d" (nr^31), "o" (*vaddr) : "memory"); | 69 | : : "d" (nr^31), "o" (*vaddr) : "memory"); |
| 70 | } | 70 | } |
| 71 | 71 | ||
| 72 | #define test_and_clear_bit(nr,vaddr) \ | 72 | #define test_and_clear_bit(nr,vaddr) \ |
| 73 | (__builtin_constant_p(nr) ? \ | 73 | (__builtin_constant_p(nr) ? \ |
| 74 | __constant_test_and_clear_bit(nr, vaddr) : \ | 74 | __constant_test_and_clear_bit(nr, vaddr) : \ |
| 75 | __generic_test_and_clear_bit(nr, vaddr)) | 75 | __generic_test_and_clear_bit(nr, vaddr)) |
| 76 | 76 | ||
| 77 | #define __test_and_clear_bit(nr,vaddr) test_and_clear_bit(nr,vaddr) | 77 | #define __test_and_clear_bit(nr,vaddr) test_and_clear_bit(nr,vaddr) |
| 78 | 78 | ||
| 79 | static inline int __constant_test_and_clear_bit(int nr, unsigned long *vaddr) | 79 | static inline int __constant_test_and_clear_bit(int nr, unsigned long *vaddr) |
| 80 | { | 80 | { |
| 81 | char *p = (char *)vaddr + (nr ^ 31) / 8; | 81 | char *p = (char *)vaddr + (nr ^ 31) / 8; |
| 82 | char retval; | 82 | char retval; |
| 83 | 83 | ||
| 84 | __asm__ __volatile__ ("bclr %2,%1; sne %0" | 84 | __asm__ __volatile__ ("bclr %2,%1; sne %0" |
| 85 | : "=d" (retval), "+m" (*p) | 85 | : "=d" (retval), "+m" (*p) |
| 86 | : "di" (nr & 7)); | 86 | : "di" (nr & 7)); |
| 87 | 87 | ||
| 88 | return retval; | 88 | return retval; |
| 89 | } | 89 | } |
| 90 | 90 | ||
| 91 | static inline int __generic_test_and_clear_bit(int nr, unsigned long *vaddr) | 91 | static inline int __generic_test_and_clear_bit(int nr, unsigned long *vaddr) |
| 92 | { | 92 | { |
| 93 | char retval; | 93 | char retval; |
| 94 | 94 | ||
| 95 | __asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0" | 95 | __asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0" |
| 96 | : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory"); | 96 | : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory"); |
| 97 | 97 | ||
| 98 | return retval; | 98 | return retval; |
| 99 | } | 99 | } |
| 100 | 100 | ||
| 101 | /* | 101 | /* |
| 102 | * clear_bit() doesn't provide any barrier for the compiler. | 102 | * clear_bit() doesn't provide any barrier for the compiler. |
| 103 | */ | 103 | */ |
| 104 | #define smp_mb__before_clear_bit() barrier() | 104 | #define smp_mb__before_clear_bit() barrier() |
| 105 | #define smp_mb__after_clear_bit() barrier() | 105 | #define smp_mb__after_clear_bit() barrier() |
| 106 | 106 | ||
| 107 | #define clear_bit(nr,vaddr) \ | 107 | #define clear_bit(nr,vaddr) \ |
| 108 | (__builtin_constant_p(nr) ? \ | 108 | (__builtin_constant_p(nr) ? \ |
| 109 | __constant_clear_bit(nr, vaddr) : \ | 109 | __constant_clear_bit(nr, vaddr) : \ |
| 110 | __generic_clear_bit(nr, vaddr)) | 110 | __generic_clear_bit(nr, vaddr)) |
| 111 | #define __clear_bit(nr,vaddr) clear_bit(nr,vaddr) | 111 | #define __clear_bit(nr,vaddr) clear_bit(nr,vaddr) |
| 112 | 112 | ||
| 113 | static inline void __constant_clear_bit(int nr, volatile unsigned long *vaddr) | 113 | static inline void __constant_clear_bit(int nr, volatile unsigned long *vaddr) |
| 114 | { | 114 | { |
| 115 | char *p = (char *)vaddr + (nr ^ 31) / 8; | 115 | char *p = (char *)vaddr + (nr ^ 31) / 8; |
| 116 | __asm__ __volatile__ ("bclr %1,%0" | 116 | __asm__ __volatile__ ("bclr %1,%0" |
| 117 | : "+m" (*p) : "di" (nr & 7)); | 117 | : "+m" (*p) : "di" (nr & 7)); |
| 118 | } | 118 | } |
| 119 | 119 | ||
| 120 | static inline void __generic_clear_bit(int nr, volatile unsigned long *vaddr) | 120 | static inline void __generic_clear_bit(int nr, volatile unsigned long *vaddr) |
| 121 | { | 121 | { |
| 122 | __asm__ __volatile__ ("bfclr %1{%0:#1}" | 122 | __asm__ __volatile__ ("bfclr %1{%0:#1}" |
| 123 | : : "d" (nr^31), "o" (*vaddr) : "memory"); | 123 | : : "d" (nr^31), "o" (*vaddr) : "memory"); |
| 124 | } | 124 | } |
| 125 | 125 | ||
| 126 | #define test_and_change_bit(nr,vaddr) \ | 126 | #define test_and_change_bit(nr,vaddr) \ |
| 127 | (__builtin_constant_p(nr) ? \ | 127 | (__builtin_constant_p(nr) ? \ |
| 128 | __constant_test_and_change_bit(nr, vaddr) : \ | 128 | __constant_test_and_change_bit(nr, vaddr) : \ |
| 129 | __generic_test_and_change_bit(nr, vaddr)) | 129 | __generic_test_and_change_bit(nr, vaddr)) |
| 130 | 130 | ||
| 131 | #define __test_and_change_bit(nr,vaddr) test_and_change_bit(nr,vaddr) | 131 | #define __test_and_change_bit(nr,vaddr) test_and_change_bit(nr,vaddr) |
| 132 | #define __change_bit(nr,vaddr) change_bit(nr,vaddr) | 132 | #define __change_bit(nr,vaddr) change_bit(nr,vaddr) |
| 133 | 133 | ||
| 134 | static inline int __constant_test_and_change_bit(int nr, unsigned long *vaddr) | 134 | static inline int __constant_test_and_change_bit(int nr, unsigned long *vaddr) |
| 135 | { | 135 | { |
| 136 | char *p = (char *)vaddr + (nr ^ 31) / 8; | 136 | char *p = (char *)vaddr + (nr ^ 31) / 8; |
| 137 | char retval; | 137 | char retval; |
| 138 | 138 | ||
| 139 | __asm__ __volatile__ ("bchg %2,%1; sne %0" | 139 | __asm__ __volatile__ ("bchg %2,%1; sne %0" |
| 140 | : "=d" (retval), "+m" (*p) | 140 | : "=d" (retval), "+m" (*p) |
| 141 | : "di" (nr & 7)); | 141 | : "di" (nr & 7)); |
| 142 | 142 | ||
| 143 | return retval; | 143 | return retval; |
| 144 | } | 144 | } |
| 145 | 145 | ||
| 146 | static inline int __generic_test_and_change_bit(int nr, unsigned long *vaddr) | 146 | static inline int __generic_test_and_change_bit(int nr, unsigned long *vaddr) |
| 147 | { | 147 | { |
| 148 | char retval; | 148 | char retval; |
| 149 | 149 | ||
| 150 | __asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0" | 150 | __asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0" |
| 151 | : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory"); | 151 | : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory"); |
| 152 | 152 | ||
| 153 | return retval; | 153 | return retval; |
| 154 | } | 154 | } |
| 155 | 155 | ||
| 156 | #define change_bit(nr,vaddr) \ | 156 | #define change_bit(nr,vaddr) \ |
| 157 | (__builtin_constant_p(nr) ? \ | 157 | (__builtin_constant_p(nr) ? \ |
| 158 | __constant_change_bit(nr, vaddr) : \ | 158 | __constant_change_bit(nr, vaddr) : \ |
| 159 | __generic_change_bit(nr, vaddr)) | 159 | __generic_change_bit(nr, vaddr)) |
| 160 | 160 | ||
| 161 | static inline void __constant_change_bit(int nr, unsigned long *vaddr) | 161 | static inline void __constant_change_bit(int nr, unsigned long *vaddr) |
| 162 | { | 162 | { |
| 163 | char *p = (char *)vaddr + (nr ^ 31) / 8; | 163 | char *p = (char *)vaddr + (nr ^ 31) / 8; |
| 164 | __asm__ __volatile__ ("bchg %1,%0" | 164 | __asm__ __volatile__ ("bchg %1,%0" |
| 165 | : "+m" (*p) : "di" (nr & 7)); | 165 | : "+m" (*p) : "di" (nr & 7)); |
| 166 | } | 166 | } |
| 167 | 167 | ||
| 168 | static inline void __generic_change_bit(int nr, unsigned long *vaddr) | 168 | static inline void __generic_change_bit(int nr, unsigned long *vaddr) |
| 169 | { | 169 | { |
| 170 | __asm__ __volatile__ ("bfchg %1{%0:#1}" | 170 | __asm__ __volatile__ ("bfchg %1{%0:#1}" |
| 171 | : : "d" (nr^31), "o" (*vaddr) : "memory"); | 171 | : : "d" (nr^31), "o" (*vaddr) : "memory"); |
| 172 | } | 172 | } |
| 173 | 173 | ||
| 174 | static inline int test_bit(int nr, const unsigned long *vaddr) | 174 | static inline int test_bit(int nr, const unsigned long *vaddr) |
| 175 | { | 175 | { |
| 176 | return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0; | 176 | return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0; |
| 177 | } | 177 | } |
| 178 | 178 | ||
| 179 | static inline int find_first_zero_bit(const unsigned long *vaddr, | 179 | static inline int find_first_zero_bit(const unsigned long *vaddr, |
| 180 | unsigned size) | 180 | unsigned size) |
| 181 | { | 181 | { |
| 182 | const unsigned long *p = vaddr; | 182 | const unsigned long *p = vaddr; |
| 183 | int res = 32; | 183 | int res = 32; |
| 184 | unsigned long num; | 184 | unsigned long num; |
| 185 | 185 | ||
| 186 | if (!size) | 186 | if (!size) |
| 187 | return 0; | 187 | return 0; |
| 188 | 188 | ||
| 189 | size = (size + 31) >> 5; | 189 | size = (size + 31) >> 5; |
| 190 | while (!(num = ~*p++)) { | 190 | while (!(num = ~*p++)) { |
| 191 | if (!--size) | 191 | if (!--size) |
| 192 | goto out; | 192 | goto out; |
| 193 | } | 193 | } |
| 194 | 194 | ||
| 195 | __asm__ __volatile__ ("bfffo %1{#0,#0},%0" | 195 | __asm__ __volatile__ ("bfffo %1{#0,#0},%0" |
| 196 | : "=d" (res) : "d" (num & -num)); | 196 | : "=d" (res) : "d" (num & -num)); |
| 197 | res ^= 31; | 197 | res ^= 31; |
| 198 | out: | 198 | out: |
| 199 | return ((long)p - (long)vaddr - 4) * 8 + res; | 199 | return ((long)p - (long)vaddr - 4) * 8 + res; |
| 200 | } | 200 | } |
| 201 | 201 | ||
| 202 | static inline int find_next_zero_bit(const unsigned long *vaddr, int size, | 202 | static inline int find_next_zero_bit(const unsigned long *vaddr, int size, |
| 203 | int offset) | 203 | int offset) |
| 204 | { | 204 | { |
| 205 | const unsigned long *p = vaddr + (offset >> 5); | 205 | const unsigned long *p = vaddr + (offset >> 5); |
| 206 | int bit = offset & 31UL, res; | 206 | int bit = offset & 31UL, res; |
| 207 | 207 | ||
| 208 | if (offset >= size) | 208 | if (offset >= size) |
| 209 | return size; | 209 | return size; |
| 210 | 210 | ||
| 211 | if (bit) { | 211 | if (bit) { |
| 212 | unsigned long num = ~*p++ & (~0UL << bit); | 212 | unsigned long num = ~*p++ & (~0UL << bit); |
| 213 | offset -= bit; | 213 | offset -= bit; |
| 214 | 214 | ||
| 215 | /* Look for zero in first longword */ | 215 | /* Look for zero in first longword */ |
| 216 | __asm__ __volatile__ ("bfffo %1{#0,#0},%0" | 216 | __asm__ __volatile__ ("bfffo %1{#0,#0},%0" |
| 217 | : "=d" (res) : "d" (num & -num)); | 217 | : "=d" (res) : "d" (num & -num)); |
| 218 | if (res < 32) | 218 | if (res < 32) |
| 219 | return offset + (res ^ 31); | 219 | return offset + (res ^ 31); |
| 220 | offset += 32; | 220 | offset += 32; |
| 221 | } | 221 | } |
| 222 | /* No zero yet, search remaining full bytes for a zero */ | 222 | /* No zero yet, search remaining full bytes for a zero */ |
| 223 | res = find_first_zero_bit(p, size - ((long)p - (long)vaddr) * 8); | 223 | res = find_first_zero_bit(p, size - ((long)p - (long)vaddr) * 8); |
| 224 | return offset + res; | 224 | return offset + res; |
| 225 | } | 225 | } |
| 226 | 226 | ||
| 227 | static inline int find_first_bit(const unsigned long *vaddr, unsigned size) | 227 | static inline int find_first_bit(const unsigned long *vaddr, unsigned size) |
| 228 | { | 228 | { |
| 229 | const unsigned long *p = vaddr; | 229 | const unsigned long *p = vaddr; |
| 230 | int res = 32; | 230 | int res = 32; |
| 231 | unsigned long num; | 231 | unsigned long num; |
| 232 | 232 | ||
| 233 | if (!size) | 233 | if (!size) |
| 234 | return 0; | 234 | return 0; |
| 235 | 235 | ||
| 236 | size = (size + 31) >> 5; | 236 | size = (size + 31) >> 5; |
| 237 | while (!(num = *p++)) { | 237 | while (!(num = *p++)) { |
| 238 | if (!--size) | 238 | if (!--size) |
| 239 | goto out; | 239 | goto out; |
| 240 | } | 240 | } |
| 241 | 241 | ||
| 242 | __asm__ __volatile__ ("bfffo %1{#0,#0},%0" | 242 | __asm__ __volatile__ ("bfffo %1{#0,#0},%0" |
| 243 | : "=d" (res) : "d" (num & -num)); | 243 | : "=d" (res) : "d" (num & -num)); |
| 244 | res ^= 31; | 244 | res ^= 31; |
| 245 | out: | 245 | out: |
| 246 | return ((long)p - (long)vaddr - 4) * 8 + res; | 246 | return ((long)p - (long)vaddr - 4) * 8 + res; |
| 247 | } | 247 | } |
| 248 | 248 | ||
| 249 | static inline int find_next_bit(const unsigned long *vaddr, int size, | 249 | static inline int find_next_bit(const unsigned long *vaddr, int size, |
| 250 | int offset) | 250 | int offset) |
| 251 | { | 251 | { |
| 252 | const unsigned long *p = vaddr + (offset >> 5); | 252 | const unsigned long *p = vaddr + (offset >> 5); |
| 253 | int bit = offset & 31UL, res; | 253 | int bit = offset & 31UL, res; |
| 254 | 254 | ||
| 255 | if (offset >= size) | 255 | if (offset >= size) |
| 256 | return size; | 256 | return size; |
| 257 | 257 | ||
| 258 | if (bit) { | 258 | if (bit) { |
| 259 | unsigned long num = *p++ & (~0UL << bit); | 259 | unsigned long num = *p++ & (~0UL << bit); |
| 260 | offset -= bit; | 260 | offset -= bit; |
| 261 | 261 | ||
| 262 | /* Look for one in first longword */ | 262 | /* Look for one in first longword */ |
| 263 | __asm__ __volatile__ ("bfffo %1{#0,#0},%0" | 263 | __asm__ __volatile__ ("bfffo %1{#0,#0},%0" |
| 264 | : "=d" (res) : "d" (num & -num)); | 264 | : "=d" (res) : "d" (num & -num)); |
| 265 | if (res < 32) | 265 | if (res < 32) |
| 266 | return offset + (res ^ 31); | 266 | return offset + (res ^ 31); |
| 267 | offset += 32; | 267 | offset += 32; |
| 268 | } | 268 | } |
| 269 | /* No one yet, search remaining full bytes for a one */ | 269 | /* No one yet, search remaining full bytes for a one */ |
| 270 | res = find_first_bit(p, size - ((long)p - (long)vaddr) * 8); | 270 | res = find_first_bit(p, size - ((long)p - (long)vaddr) * 8); |
| 271 | return offset + res; | 271 | return offset + res; |
| 272 | } | 272 | } |
| 273 | 273 | ||
| 274 | /* | 274 | /* |
| 275 | * ffz = Find First Zero in word. Undefined if no zero exists, | 275 | * ffz = Find First Zero in word. Undefined if no zero exists, |
| 276 | * so code should check against ~0UL first.. | 276 | * so code should check against ~0UL first.. |
| 277 | */ | 277 | */ |
| 278 | static inline unsigned long ffz(unsigned long word) | 278 | static inline unsigned long ffz(unsigned long word) |
| 279 | { | 279 | { |
| 280 | int res; | 280 | int res; |
| 281 | 281 | ||
| 282 | __asm__ __volatile__ ("bfffo %1{#0,#0},%0" | 282 | __asm__ __volatile__ ("bfffo %1{#0,#0},%0" |
| 283 | : "=d" (res) : "d" (~word & -~word)); | 283 | : "=d" (res) : "d" (~word & -~word)); |
| 284 | return res ^ 31; | 284 | return res ^ 31; |
| 285 | } | 285 | } |
| 286 | 286 | ||
| 287 | #ifdef __KERNEL__ | 287 | #ifdef __KERNEL__ |
| 288 | 288 | ||
| 289 | /* | 289 | /* |
| 290 | * ffs: find first bit set. This is defined the same way as | 290 | * ffs: find first bit set. This is defined the same way as |
| 291 | * the libc and compiler builtin ffs routines, therefore | 291 | * the libc and compiler builtin ffs routines, therefore |
| 292 | * differs in spirit from the above ffz (man ffs). | 292 | * differs in spirit from the above ffz (man ffs). |
| 293 | */ | 293 | */ |
| 294 | 294 | ||
| 295 | static inline int ffs(int x) | 295 | static inline int ffs(int x) |
| 296 | { | 296 | { |
| 297 | int cnt; | 297 | int cnt; |
| 298 | 298 | ||
| 299 | asm ("bfffo %1{#0:#0},%0" : "=d" (cnt) : "dm" (x & -x)); | 299 | asm ("bfffo %1{#0:#0},%0" : "=d" (cnt) : "dm" (x & -x)); |
| 300 | 300 | ||
| 301 | return 32 - cnt; | 301 | return 32 - cnt; |
| 302 | } | 302 | } |
| 303 | #define __ffs(x) (ffs(x) - 1) | 303 | #define __ffs(x) (ffs(x) - 1) |
| 304 | 304 | ||
| 305 | /* | 305 | /* |
| 306 | * fls: find last bit set. | 306 | * fls: find last bit set. |
| 307 | */ | 307 | */ |
| 308 | 308 | ||
| 309 | static inline int fls(int x) | 309 | static inline int fls(int x) |
| 310 | { | 310 | { |
| 311 | int cnt; | 311 | int cnt; |
| 312 | 312 | ||
| 313 | asm ("bfffo %1{#0,#0},%0" : "=d" (cnt) : "dm" (x)); | 313 | asm ("bfffo %1{#0,#0},%0" : "=d" (cnt) : "dm" (x)); |
| 314 | 314 | ||
| 315 | return 32 - cnt; | 315 | return 32 - cnt; |
| 316 | } | 316 | } |
| 317 | 317 | ||
| 318 | static inline int __fls(int x) | 318 | static inline int __fls(int x) |
| 319 | { | 319 | { |
| 320 | return fls(x) - 1; | 320 | return fls(x) - 1; |
| 321 | } | 321 | } |
| 322 | 322 | ||
| 323 | #include <asm-generic/bitops/fls64.h> | 323 | #include <asm-generic/bitops/fls64.h> |
| 324 | #include <asm-generic/bitops/sched.h> | 324 | #include <asm-generic/bitops/sched.h> |
| 325 | #include <asm-generic/bitops/hweight.h> | 325 | #include <asm-generic/bitops/hweight.h> |
| 326 | #include <asm-generic/bitops/lock.h> | 326 | #include <asm-generic/bitops/lock.h> |
| 327 | 327 | ||
| 328 | /* Bitmap functions for the minix filesystem */ | ||
| 329 | |||
| 330 | static inline int minix_find_first_zero_bit(const void *vaddr, unsigned size) | ||
| 331 | { | ||
| 332 | const unsigned short *p = vaddr, *addr = vaddr; | ||
| 333 | unsigned short num; | ||
| 334 | |||
| 335 | if (!size) | ||
| 336 | return 0; | ||
| 337 | |||
| 338 | size = (size >> 4) + ((size & 15) > 0); | ||
| 339 | while (*p++ == 0xffff) { | ||
| 340 | if (--size == 0) | ||
| 341 | return (p - addr) << 4; | ||
| 342 | } | ||
| 343 | |||
| 344 | num = *--p; | ||
| 345 | return ((p - addr) << 4) + ffz(num); | ||
| 346 | } | ||
| 347 | |||
| 348 | #define minix_test_and_set_bit(nr, addr) __test_and_set_bit((nr) ^ 16, (unsigned long *)(addr)) | ||
| 349 | #define minix_set_bit(nr,addr) __set_bit((nr) ^ 16, (unsigned long *)(addr)) | ||
| 350 | #define minix_test_and_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 16, (unsigned long *)(addr)) | ||
| 351 | |||
| 352 | static inline int minix_test_bit(int nr, const void *vaddr) | ||
| 353 | { | ||
| 354 | const unsigned short *p = vaddr; | ||
| 355 | return (p[nr >> 4] & (1U << (nr & 15))) != 0; | ||
| 356 | } | ||
| 357 | |||
| 358 | /* Bitmap functions for the little endian bitmap. */ | 328 | /* Bitmap functions for the little endian bitmap. */ |
| 359 | 329 | ||
| 360 | static inline void __set_bit_le(int nr, void *addr) | 330 | static inline void __set_bit_le(int nr, void *addr) |
| 361 | { | 331 | { |
| 362 | __set_bit(nr ^ 24, addr); | 332 | __set_bit(nr ^ 24, addr); |
| 363 | } | 333 | } |
| 364 | 334 | ||
| 365 | static inline void __clear_bit_le(int nr, void *addr) | 335 | static inline void __clear_bit_le(int nr, void *addr) |
| 366 | { | 336 | { |
| 367 | __clear_bit(nr ^ 24, addr); | 337 | __clear_bit(nr ^ 24, addr); |
| 368 | } | 338 | } |
| 369 | 339 | ||
| 370 | static inline int __test_and_set_bit_le(int nr, void *addr) | 340 | static inline int __test_and_set_bit_le(int nr, void *addr) |
| 371 | { | 341 | { |
| 372 | return __test_and_set_bit(nr ^ 24, addr); | 342 | return __test_and_set_bit(nr ^ 24, addr); |
| 373 | } | 343 | } |
| 374 | 344 | ||
| 375 | static inline int test_and_set_bit_le(int nr, void *addr) | 345 | static inline int test_and_set_bit_le(int nr, void *addr) |
| 376 | { | 346 | { |
| 377 | return test_and_set_bit(nr ^ 24, addr); | 347 | return test_and_set_bit(nr ^ 24, addr); |
| 378 | } | 348 | } |
| 379 | 349 | ||
| 380 | static inline int __test_and_clear_bit_le(int nr, void *addr) | 350 | static inline int __test_and_clear_bit_le(int nr, void *addr) |
| 381 | { | 351 | { |
| 382 | return __test_and_clear_bit(nr ^ 24, addr); | 352 | return __test_and_clear_bit(nr ^ 24, addr); |
| 383 | } | 353 | } |
| 384 | 354 | ||
| 385 | static inline int test_and_clear_bit_le(int nr, void *addr) | 355 | static inline int test_and_clear_bit_le(int nr, void *addr) |
| 386 | { | 356 | { |
| 387 | return test_and_clear_bit(nr ^ 24, addr); | 357 | return test_and_clear_bit(nr ^ 24, addr); |
| 388 | } | 358 | } |
| 389 | 359 | ||
| 390 | static inline int test_bit_le(int nr, const void *vaddr) | 360 | static inline int test_bit_le(int nr, const void *vaddr) |
| 391 | { | 361 | { |
| 392 | const unsigned char *p = vaddr; | 362 | const unsigned char *p = vaddr; |
| 393 | return (p[nr >> 3] & (1U << (nr & 7))) != 0; | 363 | return (p[nr >> 3] & (1U << (nr & 7))) != 0; |
| 394 | } | 364 | } |
| 395 | 365 | ||
| 396 | static inline int find_first_zero_bit_le(const void *vaddr, unsigned size) | 366 | static inline int find_first_zero_bit_le(const void *vaddr, unsigned size) |
| 397 | { | 367 | { |
| 398 | const unsigned long *p = vaddr, *addr = vaddr; | 368 | const unsigned long *p = vaddr, *addr = vaddr; |
| 399 | int res; | 369 | int res; |
| 400 | 370 | ||
| 401 | if (!size) | 371 | if (!size) |
| 402 | return 0; | 372 | return 0; |
| 403 | 373 | ||
| 404 | size = (size >> 5) + ((size & 31) > 0); | 374 | size = (size >> 5) + ((size & 31) > 0); |
| 405 | while (*p++ == ~0UL) | 375 | while (*p++ == ~0UL) |
| 406 | { | 376 | { |
| 407 | if (--size == 0) | 377 | if (--size == 0) |
| 408 | return (p - addr) << 5; | 378 | return (p - addr) << 5; |
| 409 | } | 379 | } |
| 410 | 380 | ||
| 411 | --p; | 381 | --p; |
| 412 | for (res = 0; res < 32; res++) | 382 | for (res = 0; res < 32; res++) |
| 413 | if (!test_bit_le(res, p)) | 383 | if (!test_bit_le(res, p)) |
| 414 | break; | 384 | break; |
| 415 | return (p - addr) * 32 + res; | 385 | return (p - addr) * 32 + res; |
| 416 | } | 386 | } |
| 417 | 387 | ||
| 418 | static inline unsigned long find_next_zero_bit_le(const void *addr, | 388 | static inline unsigned long find_next_zero_bit_le(const void *addr, |
| 419 | unsigned long size, unsigned long offset) | 389 | unsigned long size, unsigned long offset) |
| 420 | { | 390 | { |
| 421 | const unsigned long *p = addr; | 391 | const unsigned long *p = addr; |
| 422 | int bit = offset & 31UL, res; | 392 | int bit = offset & 31UL, res; |
| 423 | 393 | ||
| 424 | if (offset >= size) | 394 | if (offset >= size) |
| 425 | return size; | 395 | return size; |
| 426 | 396 | ||
| 427 | p += offset >> 5; | 397 | p += offset >> 5; |
| 428 | 398 | ||
| 429 | if (bit) { | 399 | if (bit) { |
| 430 | offset -= bit; | 400 | offset -= bit; |
| 431 | /* Look for zero in first longword */ | 401 | /* Look for zero in first longword */ |
| 432 | for (res = bit; res < 32; res++) | 402 | for (res = bit; res < 32; res++) |
| 433 | if (!test_bit_le(res, p)) | 403 | if (!test_bit_le(res, p)) |
| 434 | return offset + res; | 404 | return offset + res; |
| 435 | p++; | 405 | p++; |
| 436 | offset += 32; | 406 | offset += 32; |
| 437 | } | 407 | } |
| 438 | /* No zero yet, search remaining full bytes for a zero */ | 408 | /* No zero yet, search remaining full bytes for a zero */ |
| 439 | return offset + find_first_zero_bit_le(p, size - offset); | 409 | return offset + find_first_zero_bit_le(p, size - offset); |
| 440 | } | 410 | } |
| 441 | 411 | ||
| 442 | static inline int find_first_bit_le(const void *vaddr, unsigned size) | 412 | static inline int find_first_bit_le(const void *vaddr, unsigned size) |
| 443 | { | 413 | { |
| 444 | const unsigned long *p = vaddr, *addr = vaddr; | 414 | const unsigned long *p = vaddr, *addr = vaddr; |
| 445 | int res; | 415 | int res; |
| 446 | 416 | ||
| 447 | if (!size) | 417 | if (!size) |
| 448 | return 0; | 418 | return 0; |
| 449 | 419 | ||
| 450 | size = (size >> 5) + ((size & 31) > 0); | 420 | size = (size >> 5) + ((size & 31) > 0); |
| 451 | while (*p++ == 0UL) { | 421 | while (*p++ == 0UL) { |
| 452 | if (--size == 0) | 422 | if (--size == 0) |
| 453 | return (p - addr) << 5; | 423 | return (p - addr) << 5; |
| 454 | } | 424 | } |
| 455 | 425 | ||
| 456 | --p; | 426 | --p; |
| 457 | for (res = 0; res < 32; res++) | 427 | for (res = 0; res < 32; res++) |
| 458 | if (test_bit_le(res, p)) | 428 | if (test_bit_le(res, p)) |
| 459 | break; | 429 | break; |
| 460 | return (p - addr) * 32 + res; | 430 | return (p - addr) * 32 + res; |
| 461 | } | 431 | } |
| 462 | 432 | ||
| 463 | static inline unsigned long find_next_bit_le(const void *addr, | 433 | static inline unsigned long find_next_bit_le(const void *addr, |
| 464 | unsigned long size, unsigned long offset) | 434 | unsigned long size, unsigned long offset) |
| 465 | { | 435 | { |
| 466 | const unsigned long *p = addr; | 436 | const unsigned long *p = addr; |
| 467 | int bit = offset & 31UL, res; | 437 | int bit = offset & 31UL, res; |
| 468 | 438 | ||
| 469 | if (offset >= size) | 439 | if (offset >= size) |
| 470 | return size; | 440 | return size; |
| 471 | 441 | ||
| 472 | p += offset >> 5; | 442 | p += offset >> 5; |
| 473 | 443 | ||
| 474 | if (bit) { | 444 | if (bit) { |
| 475 | offset -= bit; | 445 | offset -= bit; |
| 476 | /* Look for one in first longword */ | 446 | /* Look for one in first longword */ |
| 477 | for (res = bit; res < 32; res++) | 447 | for (res = bit; res < 32; res++) |
| 478 | if (test_bit_le(res, p)) | 448 | if (test_bit_le(res, p)) |
| 479 | return offset + res; | 449 | return offset + res; |
| 480 | p++; | 450 | p++; |
| 481 | offset += 32; | 451 | offset += 32; |
| 482 | } | 452 | } |
| 483 | /* No set bit yet, search remaining full bytes for a set bit */ | 453 | /* No set bit yet, search remaining full bytes for a set bit */ |
| 484 | return offset + find_first_bit_le(p, size - offset); | 454 | return offset + find_first_bit_le(p, size - offset); |
| 485 | } | 455 | } |
| 486 | 456 | ||
| 487 | /* Bitmap functions for the ext2 filesystem. */ | 457 | /* Bitmap functions for the ext2 filesystem. */ |
| 488 | 458 | ||
| 489 | #define ext2_set_bit_atomic(lock, nr, addr) \ | 459 | #define ext2_set_bit_atomic(lock, nr, addr) \ |
| 490 | test_and_set_bit_le(nr, addr) | 460 | test_and_set_bit_le(nr, addr) |
| 491 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | 461 | #define ext2_clear_bit_atomic(lock, nr, addr) \ |
| 492 | test_and_clear_bit_le(nr, addr) | 462 | test_and_clear_bit_le(nr, addr) |
| 493 | 463 | ||
| 494 | #endif /* __KERNEL__ */ | 464 | #endif /* __KERNEL__ */ |
| 495 | 465 | ||
| 496 | #endif /* _M68K_BITOPS_H */ | 466 | #endif /* _M68K_BITOPS_H */ |
| 497 | 467 |
arch/m68k/include/asm/bitops_no.h
| 1 | #ifndef _M68KNOMMU_BITOPS_H | 1 | #ifndef _M68KNOMMU_BITOPS_H |
| 2 | #define _M68KNOMMU_BITOPS_H | 2 | #define _M68KNOMMU_BITOPS_H |
| 3 | 3 | ||
| 4 | /* | 4 | /* |
| 5 | * Copyright 1992, Linus Torvalds. | 5 | * Copyright 1992, Linus Torvalds. |
| 6 | */ | 6 | */ |
| 7 | 7 | ||
| 8 | #include <linux/compiler.h> | 8 | #include <linux/compiler.h> |
| 9 | #include <asm/byteorder.h> /* swab32 */ | 9 | #include <asm/byteorder.h> /* swab32 */ |
| 10 | 10 | ||
| 11 | #ifdef __KERNEL__ | 11 | #ifdef __KERNEL__ |
| 12 | 12 | ||
| 13 | #ifndef _LINUX_BITOPS_H | 13 | #ifndef _LINUX_BITOPS_H |
| 14 | #error only <linux/bitops.h> can be included directly | 14 | #error only <linux/bitops.h> can be included directly |
| 15 | #endif | 15 | #endif |
| 16 | 16 | ||
| 17 | #if defined (__mcfisaaplus__) || defined (__mcfisac__) | 17 | #if defined (__mcfisaaplus__) || defined (__mcfisac__) |
| 18 | static inline int ffs(unsigned int val) | 18 | static inline int ffs(unsigned int val) |
| 19 | { | 19 | { |
| 20 | if (!val) | 20 | if (!val) |
| 21 | return 0; | 21 | return 0; |
| 22 | 22 | ||
| 23 | asm volatile( | 23 | asm volatile( |
| 24 | "bitrev %0\n\t" | 24 | "bitrev %0\n\t" |
| 25 | "ff1 %0\n\t" | 25 | "ff1 %0\n\t" |
| 26 | : "=d" (val) | 26 | : "=d" (val) |
| 27 | : "0" (val) | 27 | : "0" (val) |
| 28 | ); | 28 | ); |
| 29 | val++; | 29 | val++; |
| 30 | return val; | 30 | return val; |
| 31 | } | 31 | } |
| 32 | 32 | ||
| 33 | static inline int __ffs(unsigned int val) | 33 | static inline int __ffs(unsigned int val) |
| 34 | { | 34 | { |
| 35 | asm volatile( | 35 | asm volatile( |
| 36 | "bitrev %0\n\t" | 36 | "bitrev %0\n\t" |
| 37 | "ff1 %0\n\t" | 37 | "ff1 %0\n\t" |
| 38 | : "=d" (val) | 38 | : "=d" (val) |
| 39 | : "0" (val) | 39 | : "0" (val) |
| 40 | ); | 40 | ); |
| 41 | return val; | 41 | return val; |
| 42 | } | 42 | } |
| 43 | 43 | ||
| 44 | #else | 44 | #else |
| 45 | #include <asm-generic/bitops/ffs.h> | 45 | #include <asm-generic/bitops/ffs.h> |
| 46 | #include <asm-generic/bitops/__ffs.h> | 46 | #include <asm-generic/bitops/__ffs.h> |
| 47 | #endif | 47 | #endif |
| 48 | 48 | ||
| 49 | #include <asm-generic/bitops/sched.h> | 49 | #include <asm-generic/bitops/sched.h> |
| 50 | #include <asm-generic/bitops/ffz.h> | 50 | #include <asm-generic/bitops/ffz.h> |
| 51 | 51 | ||
| 52 | static __inline__ void set_bit(int nr, volatile unsigned long * addr) | 52 | static __inline__ void set_bit(int nr, volatile unsigned long * addr) |
| 53 | { | 53 | { |
| 54 | #ifdef CONFIG_COLDFIRE | 54 | #ifdef CONFIG_COLDFIRE |
| 55 | __asm__ __volatile__ ("lea %0,%%a0; bset %1,(%%a0)" | 55 | __asm__ __volatile__ ("lea %0,%%a0; bset %1,(%%a0)" |
| 56 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 56 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
| 57 | : "d" (nr) | 57 | : "d" (nr) |
| 58 | : "%a0", "cc"); | 58 | : "%a0", "cc"); |
| 59 | #else | 59 | #else |
| 60 | __asm__ __volatile__ ("bset %1,%0" | 60 | __asm__ __volatile__ ("bset %1,%0" |
| 61 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 61 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
| 62 | : "di" (nr) | 62 | : "di" (nr) |
| 63 | : "cc"); | 63 | : "cc"); |
| 64 | #endif | 64 | #endif |
| 65 | } | 65 | } |
| 66 | 66 | ||
| 67 | #define __set_bit(nr, addr) set_bit(nr, addr) | 67 | #define __set_bit(nr, addr) set_bit(nr, addr) |
| 68 | 68 | ||
| 69 | /* | 69 | /* |
| 70 | * clear_bit() doesn't provide any barrier for the compiler. | 70 | * clear_bit() doesn't provide any barrier for the compiler. |
| 71 | */ | 71 | */ |
| 72 | #define smp_mb__before_clear_bit() barrier() | 72 | #define smp_mb__before_clear_bit() barrier() |
| 73 | #define smp_mb__after_clear_bit() barrier() | 73 | #define smp_mb__after_clear_bit() barrier() |
| 74 | 74 | ||
| 75 | static __inline__ void clear_bit(int nr, volatile unsigned long * addr) | 75 | static __inline__ void clear_bit(int nr, volatile unsigned long * addr) |
| 76 | { | 76 | { |
| 77 | #ifdef CONFIG_COLDFIRE | 77 | #ifdef CONFIG_COLDFIRE |
| 78 | __asm__ __volatile__ ("lea %0,%%a0; bclr %1,(%%a0)" | 78 | __asm__ __volatile__ ("lea %0,%%a0; bclr %1,(%%a0)" |
| 79 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 79 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
| 80 | : "d" (nr) | 80 | : "d" (nr) |
| 81 | : "%a0", "cc"); | 81 | : "%a0", "cc"); |
| 82 | #else | 82 | #else |
| 83 | __asm__ __volatile__ ("bclr %1,%0" | 83 | __asm__ __volatile__ ("bclr %1,%0" |
| 84 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 84 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
| 85 | : "di" (nr) | 85 | : "di" (nr) |
| 86 | : "cc"); | 86 | : "cc"); |
| 87 | #endif | 87 | #endif |
| 88 | } | 88 | } |
| 89 | 89 | ||
| 90 | #define __clear_bit(nr, addr) clear_bit(nr, addr) | 90 | #define __clear_bit(nr, addr) clear_bit(nr, addr) |
| 91 | 91 | ||
| 92 | static __inline__ void change_bit(int nr, volatile unsigned long * addr) | 92 | static __inline__ void change_bit(int nr, volatile unsigned long * addr) |
| 93 | { | 93 | { |
| 94 | #ifdef CONFIG_COLDFIRE | 94 | #ifdef CONFIG_COLDFIRE |
| 95 | __asm__ __volatile__ ("lea %0,%%a0; bchg %1,(%%a0)" | 95 | __asm__ __volatile__ ("lea %0,%%a0; bchg %1,(%%a0)" |
| 96 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 96 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
| 97 | : "d" (nr) | 97 | : "d" (nr) |
| 98 | : "%a0", "cc"); | 98 | : "%a0", "cc"); |
| 99 | #else | 99 | #else |
| 100 | __asm__ __volatile__ ("bchg %1,%0" | 100 | __asm__ __volatile__ ("bchg %1,%0" |
| 101 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 101 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
| 102 | : "di" (nr) | 102 | : "di" (nr) |
| 103 | : "cc"); | 103 | : "cc"); |
| 104 | #endif | 104 | #endif |
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | #define __change_bit(nr, addr) change_bit(nr, addr) | 107 | #define __change_bit(nr, addr) change_bit(nr, addr) |
| 108 | 108 | ||
| 109 | static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) | 109 | static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) |
| 110 | { | 110 | { |
| 111 | char retval; | 111 | char retval; |
| 112 | 112 | ||
| 113 | #ifdef CONFIG_COLDFIRE | 113 | #ifdef CONFIG_COLDFIRE |
| 114 | __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0" | 114 | __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0" |
| 115 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 115 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
| 116 | : "d" (nr) | 116 | : "d" (nr) |
| 117 | : "%a0"); | 117 | : "%a0"); |
| 118 | #else | 118 | #else |
| 119 | __asm__ __volatile__ ("bset %2,%1; sne %0" | 119 | __asm__ __volatile__ ("bset %2,%1; sne %0" |
| 120 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 120 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
| 121 | : "di" (nr) | 121 | : "di" (nr) |
| 122 | /* No clobber */); | 122 | /* No clobber */); |
| 123 | #endif | 123 | #endif |
| 124 | 124 | ||
| 125 | return retval; | 125 | return retval; |
| 126 | } | 126 | } |
| 127 | 127 | ||
| 128 | #define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr) | 128 | #define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr) |
| 129 | 129 | ||
| 130 | static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) | 130 | static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) |
| 131 | { | 131 | { |
| 132 | char retval; | 132 | char retval; |
| 133 | 133 | ||
| 134 | #ifdef CONFIG_COLDFIRE | 134 | #ifdef CONFIG_COLDFIRE |
| 135 | __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0" | 135 | __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0" |
| 136 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 136 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
| 137 | : "d" (nr) | 137 | : "d" (nr) |
| 138 | : "%a0"); | 138 | : "%a0"); |
| 139 | #else | 139 | #else |
| 140 | __asm__ __volatile__ ("bclr %2,%1; sne %0" | 140 | __asm__ __volatile__ ("bclr %2,%1; sne %0" |
| 141 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 141 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
| 142 | : "di" (nr) | 142 | : "di" (nr) |
| 143 | /* No clobber */); | 143 | /* No clobber */); |
| 144 | #endif | 144 | #endif |
| 145 | 145 | ||
| 146 | return retval; | 146 | return retval; |
| 147 | } | 147 | } |
| 148 | 148 | ||
| 149 | #define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr) | 149 | #define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr) |
| 150 | 150 | ||
| 151 | static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) | 151 | static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) |
| 152 | { | 152 | { |
| 153 | char retval; | 153 | char retval; |
| 154 | 154 | ||
| 155 | #ifdef CONFIG_COLDFIRE | 155 | #ifdef CONFIG_COLDFIRE |
| 156 | __asm__ __volatile__ ("lea %1,%%a0\n\tbchg %2,(%%a0)\n\tsne %0" | 156 | __asm__ __volatile__ ("lea %1,%%a0\n\tbchg %2,(%%a0)\n\tsne %0" |
| 157 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 157 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
| 158 | : "d" (nr) | 158 | : "d" (nr) |
| 159 | : "%a0"); | 159 | : "%a0"); |
| 160 | #else | 160 | #else |
| 161 | __asm__ __volatile__ ("bchg %2,%1; sne %0" | 161 | __asm__ __volatile__ ("bchg %2,%1; sne %0" |
| 162 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) | 162 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) |
| 163 | : "di" (nr) | 163 | : "di" (nr) |
| 164 | /* No clobber */); | 164 | /* No clobber */); |
| 165 | #endif | 165 | #endif |
| 166 | 166 | ||
| 167 | return retval; | 167 | return retval; |
| 168 | } | 168 | } |
| 169 | 169 | ||
| 170 | #define __test_and_change_bit(nr, addr) test_and_change_bit(nr, addr) | 170 | #define __test_and_change_bit(nr, addr) test_and_change_bit(nr, addr) |
| 171 | 171 | ||
| 172 | /* | 172 | /* |
| 173 | * This routine doesn't need to be atomic. | 173 | * This routine doesn't need to be atomic. |
| 174 | */ | 174 | */ |
| 175 | static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr) | 175 | static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr) |
| 176 | { | 176 | { |
| 177 | return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; | 177 | return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; |
| 178 | } | 178 | } |
| 179 | 179 | ||
| 180 | static __inline__ int __test_bit(int nr, const volatile unsigned long * addr) | 180 | static __inline__ int __test_bit(int nr, const volatile unsigned long * addr) |
| 181 | { | 181 | { |
| 182 | int * a = (int *) addr; | 182 | int * a = (int *) addr; |
| 183 | int mask; | 183 | int mask; |
| 184 | 184 | ||
| 185 | a += nr >> 5; | 185 | a += nr >> 5; |
| 186 | mask = 1 << (nr & 0x1f); | 186 | mask = 1 << (nr & 0x1f); |
| 187 | return ((mask & *a) != 0); | 187 | return ((mask & *a) != 0); |
| 188 | } | 188 | } |
| 189 | 189 | ||
| 190 | #define test_bit(nr,addr) \ | 190 | #define test_bit(nr,addr) \ |
| 191 | (__builtin_constant_p(nr) ? \ | 191 | (__builtin_constant_p(nr) ? \ |
| 192 | __constant_test_bit((nr),(addr)) : \ | 192 | __constant_test_bit((nr),(addr)) : \ |
| 193 | __test_bit((nr),(addr))) | 193 | __test_bit((nr),(addr))) |
| 194 | 194 | ||
| 195 | #include <asm-generic/bitops/find.h> | 195 | #include <asm-generic/bitops/find.h> |
| 196 | #include <asm-generic/bitops/hweight.h> | 196 | #include <asm-generic/bitops/hweight.h> |
| 197 | #include <asm-generic/bitops/lock.h> | 197 | #include <asm-generic/bitops/lock.h> |
| 198 | 198 | ||
| 199 | #define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) | 199 | #define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) |
| 200 | 200 | ||
| 201 | static inline void __set_bit_le(int nr, void *addr) | 201 | static inline void __set_bit_le(int nr, void *addr) |
| 202 | { | 202 | { |
| 203 | __set_bit(nr ^ BITOP_LE_SWIZZLE, addr); | 203 | __set_bit(nr ^ BITOP_LE_SWIZZLE, addr); |
| 204 | } | 204 | } |
| 205 | 205 | ||
| 206 | static inline void __clear_bit_le(int nr, void *addr) | 206 | static inline void __clear_bit_le(int nr, void *addr) |
| 207 | { | 207 | { |
| 208 | __clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); | 208 | __clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); |
| 209 | } | 209 | } |
| 210 | 210 | ||
| 211 | static inline int __test_and_set_bit_le(int nr, volatile void *addr) | 211 | static inline int __test_and_set_bit_le(int nr, volatile void *addr) |
| 212 | { | 212 | { |
| 213 | char retval; | 213 | char retval; |
| 214 | 214 | ||
| 215 | #ifdef CONFIG_COLDFIRE | 215 | #ifdef CONFIG_COLDFIRE |
| 216 | __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0" | 216 | __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0" |
| 217 | : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3]) | 217 | : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3]) |
| 218 | : "d" (nr) | 218 | : "d" (nr) |
| 219 | : "%a0"); | 219 | : "%a0"); |
| 220 | #else | 220 | #else |
| 221 | __asm__ __volatile__ ("bset %2,%1; sne %0" | 221 | __asm__ __volatile__ ("bset %2,%1; sne %0" |
| 222 | : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3]) | 222 | : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3]) |
| 223 | : "di" (nr) | 223 | : "di" (nr) |
| 224 | /* No clobber */); | 224 | /* No clobber */); |
| 225 | #endif | 225 | #endif |
| 226 | 226 | ||
| 227 | return retval; | 227 | return retval; |
| 228 | } | 228 | } |
| 229 | 229 | ||
| 230 | static inline int __test_and_clear_bit_le(int nr, volatile void *addr) | 230 | static inline int __test_and_clear_bit_le(int nr, volatile void *addr) |
| 231 | { | 231 | { |
| 232 | char retval; | 232 | char retval; |
| 233 | 233 | ||
| 234 | #ifdef CONFIG_COLDFIRE | 234 | #ifdef CONFIG_COLDFIRE |
| 235 | __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0" | 235 | __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0" |
| 236 | : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3]) | 236 | : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3]) |
| 237 | : "d" (nr) | 237 | : "d" (nr) |
| 238 | : "%a0"); | 238 | : "%a0"); |
| 239 | #else | 239 | #else |
| 240 | __asm__ __volatile__ ("bclr %2,%1; sne %0" | 240 | __asm__ __volatile__ ("bclr %2,%1; sne %0" |
| 241 | : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3]) | 241 | : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3]) |
| 242 | : "di" (nr) | 242 | : "di" (nr) |
| 243 | /* No clobber */); | 243 | /* No clobber */); |
| 244 | #endif | 244 | #endif |
| 245 | 245 | ||
| 246 | return retval; | 246 | return retval; |
| 247 | } | 247 | } |
| 248 | 248 | ||
| 249 | #define ext2_set_bit_atomic(lock, nr, addr) \ | 249 | #define ext2_set_bit_atomic(lock, nr, addr) \ |
| 250 | ({ \ | 250 | ({ \ |
| 251 | int ret; \ | 251 | int ret; \ |
| 252 | spin_lock(lock); \ | 252 | spin_lock(lock); \ |
| 253 | ret = __test_and_set_bit_le((nr), (addr)); \ | 253 | ret = __test_and_set_bit_le((nr), (addr)); \ |
| 254 | spin_unlock(lock); \ | 254 | spin_unlock(lock); \ |
| 255 | ret; \ | 255 | ret; \ |
| 256 | }) | 256 | }) |
| 257 | 257 | ||
| 258 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | 258 | #define ext2_clear_bit_atomic(lock, nr, addr) \ |
| 259 | ({ \ | 259 | ({ \ |
| 260 | int ret; \ | 260 | int ret; \ |
| 261 | spin_lock(lock); \ | 261 | spin_lock(lock); \ |
| 262 | ret = __test_and_clear_bit_le((nr), (addr)); \ | 262 | ret = __test_and_clear_bit_le((nr), (addr)); \ |
| 263 | spin_unlock(lock); \ | 263 | spin_unlock(lock); \ |
| 264 | ret; \ | 264 | ret; \ |
| 265 | }) | 265 | }) |
| 266 | 266 | ||
| 267 | static inline int test_bit_le(int nr, const volatile void *addr) | 267 | static inline int test_bit_le(int nr, const volatile void *addr) |
| 268 | { | 268 | { |
| 269 | char retval; | 269 | char retval; |
| 270 | 270 | ||
| 271 | #ifdef CONFIG_COLDFIRE | 271 | #ifdef CONFIG_COLDFIRE |
| 272 | __asm__ __volatile__ ("lea %1,%%a0; btst %2,(%%a0); sne %0" | 272 | __asm__ __volatile__ ("lea %1,%%a0; btst %2,(%%a0); sne %0" |
| 273 | : "=d" (retval) | 273 | : "=d" (retval) |
| 274 | : "m" (((const volatile char *)addr)[nr >> 3]), "d" (nr) | 274 | : "m" (((const volatile char *)addr)[nr >> 3]), "d" (nr) |
| 275 | : "%a0"); | 275 | : "%a0"); |
| 276 | #else | 276 | #else |
| 277 | __asm__ __volatile__ ("btst %2,%1; sne %0" | 277 | __asm__ __volatile__ ("btst %2,%1; sne %0" |
| 278 | : "=d" (retval) | 278 | : "=d" (retval) |
| 279 | : "m" (((const volatile char *)addr)[nr >> 3]), "di" (nr) | 279 | : "m" (((const volatile char *)addr)[nr >> 3]), "di" (nr) |
| 280 | /* No clobber */); | 280 | /* No clobber */); |
| 281 | #endif | 281 | #endif |
| 282 | 282 | ||
| 283 | return retval; | 283 | return retval; |
| 284 | } | 284 | } |
| 285 | 285 | ||
| 286 | #define find_first_zero_bit_le(addr, size) \ | 286 | #define find_first_zero_bit_le(addr, size) \ |
| 287 | find_next_zero_bit_le((addr), (size), 0) | 287 | find_next_zero_bit_le((addr), (size), 0) |
| 288 | 288 | ||
| 289 | static inline unsigned long find_next_zero_bit_le(void *addr, unsigned long size, unsigned long offset) | 289 | static inline unsigned long find_next_zero_bit_le(void *addr, unsigned long size, unsigned long offset) |
| 290 | { | 290 | { |
| 291 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | 291 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); |
| 292 | unsigned long result = offset & ~31UL; | 292 | unsigned long result = offset & ~31UL; |
| 293 | unsigned long tmp; | 293 | unsigned long tmp; |
| 294 | 294 | ||
| 295 | if (offset >= size) | 295 | if (offset >= size) |
| 296 | return size; | 296 | return size; |
| 297 | size -= result; | 297 | size -= result; |
| 298 | offset &= 31UL; | 298 | offset &= 31UL; |
| 299 | if(offset) { | 299 | if(offset) { |
| 300 | /* We hold the little endian value in tmp, but then the | 300 | /* We hold the little endian value in tmp, but then the |
| 301 | * shift is illegal. So we could keep a big endian value | 301 | * shift is illegal. So we could keep a big endian value |
| 302 | * in tmp, like this: | 302 | * in tmp, like this: |
| 303 | * | 303 | * |
| 304 | * tmp = __swab32(*(p++)); | 304 | * tmp = __swab32(*(p++)); |
| 305 | * tmp |= ~0UL >> (32-offset); | 305 | * tmp |= ~0UL >> (32-offset); |
| 306 | * | 306 | * |
| 307 | * but this would decrease performance, so we change the | 307 | * but this would decrease performance, so we change the |
| 308 | * shift: | 308 | * shift: |
| 309 | */ | 309 | */ |
| 310 | tmp = *(p++); | 310 | tmp = *(p++); |
| 311 | tmp |= __swab32(~0UL >> (32-offset)); | 311 | tmp |= __swab32(~0UL >> (32-offset)); |
| 312 | if(size < 32) | 312 | if(size < 32) |
| 313 | goto found_first; | 313 | goto found_first; |
| 314 | if(~tmp) | 314 | if(~tmp) |
| 315 | goto found_middle; | 315 | goto found_middle; |
| 316 | size -= 32; | 316 | size -= 32; |
| 317 | result += 32; | 317 | result += 32; |
| 318 | } | 318 | } |
| 319 | while(size & ~31UL) { | 319 | while(size & ~31UL) { |
| 320 | if(~(tmp = *(p++))) | 320 | if(~(tmp = *(p++))) |
| 321 | goto found_middle; | 321 | goto found_middle; |
| 322 | result += 32; | 322 | result += 32; |
| 323 | size -= 32; | 323 | size -= 32; |
| 324 | } | 324 | } |
| 325 | if(!size) | 325 | if(!size) |
| 326 | return result; | 326 | return result; |
| 327 | tmp = *p; | 327 | tmp = *p; |
| 328 | 328 | ||
| 329 | found_first: | 329 | found_first: |
| 330 | /* tmp is little endian, so we would have to swab the shift, | 330 | /* tmp is little endian, so we would have to swab the shift, |
| 331 | * see above. But then we have to swab tmp below for ffz, so | 331 | * see above. But then we have to swab tmp below for ffz, so |
| 332 | * we might as well do this here. | 332 | * we might as well do this here. |
| 333 | */ | 333 | */ |
| 334 | return result + ffz(__swab32(tmp) | (~0UL << size)); | 334 | return result + ffz(__swab32(tmp) | (~0UL << size)); |
| 335 | found_middle: | 335 | found_middle: |
| 336 | return result + ffz(__swab32(tmp)); | 336 | return result + ffz(__swab32(tmp)); |
| 337 | } | 337 | } |
| 338 | 338 | ||
| 339 | #include <asm-generic/bitops/minix.h> | ||
| 340 | |||
| 341 | #endif /* __KERNEL__ */ | 339 | #endif /* __KERNEL__ */ |
| 342 | 340 | ||
| 343 | #include <asm-generic/bitops/fls.h> | 341 | #include <asm-generic/bitops/fls.h> |
| 344 | #include <asm-generic/bitops/__fls.h> | 342 | #include <asm-generic/bitops/__fls.h> |
| 345 | #include <asm-generic/bitops/fls64.h> | 343 | #include <asm-generic/bitops/fls64.h> |
| 346 | 344 | ||
| 347 | #endif /* _M68KNOMMU_BITOPS_H */ | 345 | #endif /* _M68KNOMMU_BITOPS_H */ |
| 348 | 346 |
arch/mips/include/asm/bitops.h
| 1 | /* | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. | 4 | * for more details. |
| 5 | * | 5 | * |
| 6 | * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org) | 6 | * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org) |
| 7 | * Copyright (c) 1999, 2000 Silicon Graphics, Inc. | 7 | * Copyright (c) 1999, 2000 Silicon Graphics, Inc. |
| 8 | */ | 8 | */ |
| 9 | #ifndef _ASM_BITOPS_H | 9 | #ifndef _ASM_BITOPS_H |
| 10 | #define _ASM_BITOPS_H | 10 | #define _ASM_BITOPS_H |
| 11 | 11 | ||
| 12 | #ifndef _LINUX_BITOPS_H | 12 | #ifndef _LINUX_BITOPS_H |
| 13 | #error only <linux/bitops.h> can be included directly | 13 | #error only <linux/bitops.h> can be included directly |
| 14 | #endif | 14 | #endif |
| 15 | 15 | ||
| 16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
| 17 | #include <linux/irqflags.h> | 17 | #include <linux/irqflags.h> |
| 18 | #include <linux/types.h> | 18 | #include <linux/types.h> |
| 19 | #include <asm/barrier.h> | 19 | #include <asm/barrier.h> |
| 20 | #include <asm/bug.h> | 20 | #include <asm/bug.h> |
| 21 | #include <asm/byteorder.h> /* sigh ... */ | 21 | #include <asm/byteorder.h> /* sigh ... */ |
| 22 | #include <asm/cpu-features.h> | 22 | #include <asm/cpu-features.h> |
| 23 | #include <asm/sgidefs.h> | 23 | #include <asm/sgidefs.h> |
| 24 | #include <asm/war.h> | 24 | #include <asm/war.h> |
| 25 | 25 | ||
| 26 | #if _MIPS_SZLONG == 32 | 26 | #if _MIPS_SZLONG == 32 |
| 27 | #define SZLONG_LOG 5 | 27 | #define SZLONG_LOG 5 |
| 28 | #define SZLONG_MASK 31UL | 28 | #define SZLONG_MASK 31UL |
| 29 | #define __LL "ll " | 29 | #define __LL "ll " |
| 30 | #define __SC "sc " | 30 | #define __SC "sc " |
| 31 | #define __INS "ins " | 31 | #define __INS "ins " |
| 32 | #define __EXT "ext " | 32 | #define __EXT "ext " |
| 33 | #elif _MIPS_SZLONG == 64 | 33 | #elif _MIPS_SZLONG == 64 |
| 34 | #define SZLONG_LOG 6 | 34 | #define SZLONG_LOG 6 |
| 35 | #define SZLONG_MASK 63UL | 35 | #define SZLONG_MASK 63UL |
| 36 | #define __LL "lld " | 36 | #define __LL "lld " |
| 37 | #define __SC "scd " | 37 | #define __SC "scd " |
| 38 | #define __INS "dins " | 38 | #define __INS "dins " |
| 39 | #define __EXT "dext " | 39 | #define __EXT "dext " |
| 40 | #endif | 40 | #endif |
| 41 | 41 | ||
| 42 | /* | 42 | /* |
| 43 | * clear_bit() doesn't provide any barrier for the compiler. | 43 | * clear_bit() doesn't provide any barrier for the compiler. |
| 44 | */ | 44 | */ |
| 45 | #define smp_mb__before_clear_bit() smp_mb__before_llsc() | 45 | #define smp_mb__before_clear_bit() smp_mb__before_llsc() |
| 46 | #define smp_mb__after_clear_bit() smp_llsc_mb() | 46 | #define smp_mb__after_clear_bit() smp_llsc_mb() |
| 47 | 47 | ||
| 48 | /* | 48 | /* |
| 49 | * set_bit - Atomically set a bit in memory | 49 | * set_bit - Atomically set a bit in memory |
| 50 | * @nr: the bit to set | 50 | * @nr: the bit to set |
| 51 | * @addr: the address to start counting from | 51 | * @addr: the address to start counting from |
| 52 | * | 52 | * |
| 53 | * This function is atomic and may not be reordered. See __set_bit() | 53 | * This function is atomic and may not be reordered. See __set_bit() |
| 54 | * if you do not require the atomic guarantees. | 54 | * if you do not require the atomic guarantees. |
| 55 | * Note that @nr may be almost arbitrarily large; this function is not | 55 | * Note that @nr may be almost arbitrarily large; this function is not |
| 56 | * restricted to acting on a single-word quantity. | 56 | * restricted to acting on a single-word quantity. |
| 57 | */ | 57 | */ |
| 58 | static inline void set_bit(unsigned long nr, volatile unsigned long *addr) | 58 | static inline void set_bit(unsigned long nr, volatile unsigned long *addr) |
| 59 | { | 59 | { |
| 60 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 60 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
| 61 | unsigned short bit = nr & SZLONG_MASK; | 61 | unsigned short bit = nr & SZLONG_MASK; |
| 62 | unsigned long temp; | 62 | unsigned long temp; |
| 63 | 63 | ||
| 64 | if (kernel_uses_llsc && R10000_LLSC_WAR) { | 64 | if (kernel_uses_llsc && R10000_LLSC_WAR) { |
| 65 | __asm__ __volatile__( | 65 | __asm__ __volatile__( |
| 66 | " .set mips3 \n" | 66 | " .set mips3 \n" |
| 67 | "1: " __LL "%0, %1 # set_bit \n" | 67 | "1: " __LL "%0, %1 # set_bit \n" |
| 68 | " or %0, %2 \n" | 68 | " or %0, %2 \n" |
| 69 | " " __SC "%0, %1 \n" | 69 | " " __SC "%0, %1 \n" |
| 70 | " beqzl %0, 1b \n" | 70 | " beqzl %0, 1b \n" |
| 71 | " .set mips0 \n" | 71 | " .set mips0 \n" |
| 72 | : "=&r" (temp), "=m" (*m) | 72 | : "=&r" (temp), "=m" (*m) |
| 73 | : "ir" (1UL << bit), "m" (*m)); | 73 | : "ir" (1UL << bit), "m" (*m)); |
| 74 | #ifdef CONFIG_CPU_MIPSR2 | 74 | #ifdef CONFIG_CPU_MIPSR2 |
| 75 | } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { | 75 | } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { |
| 76 | do { | 76 | do { |
| 77 | __asm__ __volatile__( | 77 | __asm__ __volatile__( |
| 78 | " " __LL "%0, %1 # set_bit \n" | 78 | " " __LL "%0, %1 # set_bit \n" |
| 79 | " " __INS "%0, %3, %2, 1 \n" | 79 | " " __INS "%0, %3, %2, 1 \n" |
| 80 | " " __SC "%0, %1 \n" | 80 | " " __SC "%0, %1 \n" |
| 81 | : "=&r" (temp), "+m" (*m) | 81 | : "=&r" (temp), "+m" (*m) |
| 82 | : "ir" (bit), "r" (~0)); | 82 | : "ir" (bit), "r" (~0)); |
| 83 | } while (unlikely(!temp)); | 83 | } while (unlikely(!temp)); |
| 84 | #endif /* CONFIG_CPU_MIPSR2 */ | 84 | #endif /* CONFIG_CPU_MIPSR2 */ |
| 85 | } else if (kernel_uses_llsc) { | 85 | } else if (kernel_uses_llsc) { |
| 86 | do { | 86 | do { |
| 87 | __asm__ __volatile__( | 87 | __asm__ __volatile__( |
| 88 | " .set mips3 \n" | 88 | " .set mips3 \n" |
| 89 | " " __LL "%0, %1 # set_bit \n" | 89 | " " __LL "%0, %1 # set_bit \n" |
| 90 | " or %0, %2 \n" | 90 | " or %0, %2 \n" |
| 91 | " " __SC "%0, %1 \n" | 91 | " " __SC "%0, %1 \n" |
| 92 | " .set mips0 \n" | 92 | " .set mips0 \n" |
| 93 | : "=&r" (temp), "+m" (*m) | 93 | : "=&r" (temp), "+m" (*m) |
| 94 | : "ir" (1UL << bit)); | 94 | : "ir" (1UL << bit)); |
| 95 | } while (unlikely(!temp)); | 95 | } while (unlikely(!temp)); |
| 96 | } else { | 96 | } else { |
| 97 | volatile unsigned long *a = addr; | 97 | volatile unsigned long *a = addr; |
| 98 | unsigned long mask; | 98 | unsigned long mask; |
| 99 | unsigned long flags; | 99 | unsigned long flags; |
| 100 | 100 | ||
| 101 | a += nr >> SZLONG_LOG; | 101 | a += nr >> SZLONG_LOG; |
| 102 | mask = 1UL << bit; | 102 | mask = 1UL << bit; |
| 103 | raw_local_irq_save(flags); | 103 | raw_local_irq_save(flags); |
| 104 | *a |= mask; | 104 | *a |= mask; |
| 105 | raw_local_irq_restore(flags); | 105 | raw_local_irq_restore(flags); |
| 106 | } | 106 | } |
| 107 | } | 107 | } |
| 108 | 108 | ||
| 109 | /* | 109 | /* |
| 110 | * clear_bit - Clears a bit in memory | 110 | * clear_bit - Clears a bit in memory |
| 111 | * @nr: Bit to clear | 111 | * @nr: Bit to clear |
| 112 | * @addr: Address to start counting from | 112 | * @addr: Address to start counting from |
| 113 | * | 113 | * |
| 114 | * clear_bit() is atomic and may not be reordered. However, it does | 114 | * clear_bit() is atomic and may not be reordered. However, it does |
| 115 | * not contain a memory barrier, so if it is used for locking purposes, | 115 | * not contain a memory barrier, so if it is used for locking purposes, |
| 116 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | 116 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
| 117 | * in order to ensure changes are visible on other processors. | 117 | * in order to ensure changes are visible on other processors. |
| 118 | */ | 118 | */ |
| 119 | static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) | 119 | static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) |
| 120 | { | 120 | { |
| 121 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 121 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
| 122 | unsigned short bit = nr & SZLONG_MASK; | 122 | unsigned short bit = nr & SZLONG_MASK; |
| 123 | unsigned long temp; | 123 | unsigned long temp; |
| 124 | 124 | ||
| 125 | if (kernel_uses_llsc && R10000_LLSC_WAR) { | 125 | if (kernel_uses_llsc && R10000_LLSC_WAR) { |
| 126 | __asm__ __volatile__( | 126 | __asm__ __volatile__( |
| 127 | " .set mips3 \n" | 127 | " .set mips3 \n" |
| 128 | "1: " __LL "%0, %1 # clear_bit \n" | 128 | "1: " __LL "%0, %1 # clear_bit \n" |
| 129 | " and %0, %2 \n" | 129 | " and %0, %2 \n" |
| 130 | " " __SC "%0, %1 \n" | 130 | " " __SC "%0, %1 \n" |
| 131 | " beqzl %0, 1b \n" | 131 | " beqzl %0, 1b \n" |
| 132 | " .set mips0 \n" | 132 | " .set mips0 \n" |
| 133 | : "=&r" (temp), "+m" (*m) | 133 | : "=&r" (temp), "+m" (*m) |
| 134 | : "ir" (~(1UL << bit))); | 134 | : "ir" (~(1UL << bit))); |
| 135 | #ifdef CONFIG_CPU_MIPSR2 | 135 | #ifdef CONFIG_CPU_MIPSR2 |
| 136 | } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { | 136 | } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { |
| 137 | do { | 137 | do { |
| 138 | __asm__ __volatile__( | 138 | __asm__ __volatile__( |
| 139 | " " __LL "%0, %1 # clear_bit \n" | 139 | " " __LL "%0, %1 # clear_bit \n" |
| 140 | " " __INS "%0, $0, %2, 1 \n" | 140 | " " __INS "%0, $0, %2, 1 \n" |
| 141 | " " __SC "%0, %1 \n" | 141 | " " __SC "%0, %1 \n" |
| 142 | : "=&r" (temp), "+m" (*m) | 142 | : "=&r" (temp), "+m" (*m) |
| 143 | : "ir" (bit)); | 143 | : "ir" (bit)); |
| 144 | } while (unlikely(!temp)); | 144 | } while (unlikely(!temp)); |
| 145 | #endif /* CONFIG_CPU_MIPSR2 */ | 145 | #endif /* CONFIG_CPU_MIPSR2 */ |
| 146 | } else if (kernel_uses_llsc) { | 146 | } else if (kernel_uses_llsc) { |
| 147 | do { | 147 | do { |
| 148 | __asm__ __volatile__( | 148 | __asm__ __volatile__( |
| 149 | " .set mips3 \n" | 149 | " .set mips3 \n" |
| 150 | " " __LL "%0, %1 # clear_bit \n" | 150 | " " __LL "%0, %1 # clear_bit \n" |
| 151 | " and %0, %2 \n" | 151 | " and %0, %2 \n" |
| 152 | " " __SC "%0, %1 \n" | 152 | " " __SC "%0, %1 \n" |
| 153 | " .set mips0 \n" | 153 | " .set mips0 \n" |
| 154 | : "=&r" (temp), "+m" (*m) | 154 | : "=&r" (temp), "+m" (*m) |
| 155 | : "ir" (~(1UL << bit))); | 155 | : "ir" (~(1UL << bit))); |
| 156 | } while (unlikely(!temp)); | 156 | } while (unlikely(!temp)); |
| 157 | } else { | 157 | } else { |
| 158 | volatile unsigned long *a = addr; | 158 | volatile unsigned long *a = addr; |
| 159 | unsigned long mask; | 159 | unsigned long mask; |
| 160 | unsigned long flags; | 160 | unsigned long flags; |
| 161 | 161 | ||
| 162 | a += nr >> SZLONG_LOG; | 162 | a += nr >> SZLONG_LOG; |
| 163 | mask = 1UL << bit; | 163 | mask = 1UL << bit; |
| 164 | raw_local_irq_save(flags); | 164 | raw_local_irq_save(flags); |
| 165 | *a &= ~mask; | 165 | *a &= ~mask; |
| 166 | raw_local_irq_restore(flags); | 166 | raw_local_irq_restore(flags); |
| 167 | } | 167 | } |
| 168 | } | 168 | } |
| 169 | 169 | ||
| 170 | /* | 170 | /* |
| 171 | * clear_bit_unlock - Clears a bit in memory | 171 | * clear_bit_unlock - Clears a bit in memory |
| 172 | * @nr: Bit to clear | 172 | * @nr: Bit to clear |
| 173 | * @addr: Address to start counting from | 173 | * @addr: Address to start counting from |
| 174 | * | 174 | * |
| 175 | * clear_bit() is atomic and implies release semantics before the memory | 175 | * clear_bit() is atomic and implies release semantics before the memory |
| 176 | * operation. It can be used for an unlock. | 176 | * operation. It can be used for an unlock. |
| 177 | */ | 177 | */ |
| 178 | static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) | 178 | static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) |
| 179 | { | 179 | { |
| 180 | smp_mb__before_clear_bit(); | 180 | smp_mb__before_clear_bit(); |
| 181 | clear_bit(nr, addr); | 181 | clear_bit(nr, addr); |
| 182 | } | 182 | } |
| 183 | 183 | ||
| 184 | /* | 184 | /* |
| 185 | * change_bit - Toggle a bit in memory | 185 | * change_bit - Toggle a bit in memory |
| 186 | * @nr: Bit to change | 186 | * @nr: Bit to change |
| 187 | * @addr: Address to start counting from | 187 | * @addr: Address to start counting from |
| 188 | * | 188 | * |
| 189 | * change_bit() is atomic and may not be reordered. | 189 | * change_bit() is atomic and may not be reordered. |
| 190 | * Note that @nr may be almost arbitrarily large; this function is not | 190 | * Note that @nr may be almost arbitrarily large; this function is not |
| 191 | * restricted to acting on a single-word quantity. | 191 | * restricted to acting on a single-word quantity. |
| 192 | */ | 192 | */ |
| 193 | static inline void change_bit(unsigned long nr, volatile unsigned long *addr) | 193 | static inline void change_bit(unsigned long nr, volatile unsigned long *addr) |
| 194 | { | 194 | { |
| 195 | unsigned short bit = nr & SZLONG_MASK; | 195 | unsigned short bit = nr & SZLONG_MASK; |
| 196 | 196 | ||
| 197 | if (kernel_uses_llsc && R10000_LLSC_WAR) { | 197 | if (kernel_uses_llsc && R10000_LLSC_WAR) { |
| 198 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 198 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
| 199 | unsigned long temp; | 199 | unsigned long temp; |
| 200 | 200 | ||
| 201 | __asm__ __volatile__( | 201 | __asm__ __volatile__( |
| 202 | " .set mips3 \n" | 202 | " .set mips3 \n" |
| 203 | "1: " __LL "%0, %1 # change_bit \n" | 203 | "1: " __LL "%0, %1 # change_bit \n" |
| 204 | " xor %0, %2 \n" | 204 | " xor %0, %2 \n" |
| 205 | " " __SC "%0, %1 \n" | 205 | " " __SC "%0, %1 \n" |
| 206 | " beqzl %0, 1b \n" | 206 | " beqzl %0, 1b \n" |
| 207 | " .set mips0 \n" | 207 | " .set mips0 \n" |
| 208 | : "=&r" (temp), "+m" (*m) | 208 | : "=&r" (temp), "+m" (*m) |
| 209 | : "ir" (1UL << bit)); | 209 | : "ir" (1UL << bit)); |
| 210 | } else if (kernel_uses_llsc) { | 210 | } else if (kernel_uses_llsc) { |
| 211 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 211 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
| 212 | unsigned long temp; | 212 | unsigned long temp; |
| 213 | 213 | ||
| 214 | do { | 214 | do { |
| 215 | __asm__ __volatile__( | 215 | __asm__ __volatile__( |
| 216 | " .set mips3 \n" | 216 | " .set mips3 \n" |
| 217 | " " __LL "%0, %1 # change_bit \n" | 217 | " " __LL "%0, %1 # change_bit \n" |
| 218 | " xor %0, %2 \n" | 218 | " xor %0, %2 \n" |
| 219 | " " __SC "%0, %1 \n" | 219 | " " __SC "%0, %1 \n" |
| 220 | " .set mips0 \n" | 220 | " .set mips0 \n" |
| 221 | : "=&r" (temp), "+m" (*m) | 221 | : "=&r" (temp), "+m" (*m) |
| 222 | : "ir" (1UL << bit)); | 222 | : "ir" (1UL << bit)); |
| 223 | } while (unlikely(!temp)); | 223 | } while (unlikely(!temp)); |
| 224 | } else { | 224 | } else { |
| 225 | volatile unsigned long *a = addr; | 225 | volatile unsigned long *a = addr; |
| 226 | unsigned long mask; | 226 | unsigned long mask; |
| 227 | unsigned long flags; | 227 | unsigned long flags; |
| 228 | 228 | ||
| 229 | a += nr >> SZLONG_LOG; | 229 | a += nr >> SZLONG_LOG; |
| 230 | mask = 1UL << bit; | 230 | mask = 1UL << bit; |
| 231 | raw_local_irq_save(flags); | 231 | raw_local_irq_save(flags); |
| 232 | *a ^= mask; | 232 | *a ^= mask; |
| 233 | raw_local_irq_restore(flags); | 233 | raw_local_irq_restore(flags); |
| 234 | } | 234 | } |
| 235 | } | 235 | } |
| 236 | 236 | ||
| 237 | /* | 237 | /* |
| 238 | * test_and_set_bit - Set a bit and return its old value | 238 | * test_and_set_bit - Set a bit and return its old value |
| 239 | * @nr: Bit to set | 239 | * @nr: Bit to set |
| 240 | * @addr: Address to count from | 240 | * @addr: Address to count from |
| 241 | * | 241 | * |
| 242 | * This operation is atomic and cannot be reordered. | 242 | * This operation is atomic and cannot be reordered. |
| 243 | * It also implies a memory barrier. | 243 | * It also implies a memory barrier. |
| 244 | */ | 244 | */ |
| 245 | static inline int test_and_set_bit(unsigned long nr, | 245 | static inline int test_and_set_bit(unsigned long nr, |
| 246 | volatile unsigned long *addr) | 246 | volatile unsigned long *addr) |
| 247 | { | 247 | { |
| 248 | unsigned short bit = nr & SZLONG_MASK; | 248 | unsigned short bit = nr & SZLONG_MASK; |
| 249 | unsigned long res; | 249 | unsigned long res; |
| 250 | 250 | ||
| 251 | smp_mb__before_llsc(); | 251 | smp_mb__before_llsc(); |
| 252 | 252 | ||
| 253 | if (kernel_uses_llsc && R10000_LLSC_WAR) { | 253 | if (kernel_uses_llsc && R10000_LLSC_WAR) { |
| 254 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 254 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
| 255 | unsigned long temp; | 255 | unsigned long temp; |
| 256 | 256 | ||
| 257 | __asm__ __volatile__( | 257 | __asm__ __volatile__( |
| 258 | " .set mips3 \n" | 258 | " .set mips3 \n" |
| 259 | "1: " __LL "%0, %1 # test_and_set_bit \n" | 259 | "1: " __LL "%0, %1 # test_and_set_bit \n" |
| 260 | " or %2, %0, %3 \n" | 260 | " or %2, %0, %3 \n" |
| 261 | " " __SC "%2, %1 \n" | 261 | " " __SC "%2, %1 \n" |
| 262 | " beqzl %2, 1b \n" | 262 | " beqzl %2, 1b \n" |
| 263 | " and %2, %0, %3 \n" | 263 | " and %2, %0, %3 \n" |
| 264 | " .set mips0 \n" | 264 | " .set mips0 \n" |
| 265 | : "=&r" (temp), "+m" (*m), "=&r" (res) | 265 | : "=&r" (temp), "+m" (*m), "=&r" (res) |
| 266 | : "r" (1UL << bit) | 266 | : "r" (1UL << bit) |
| 267 | : "memory"); | 267 | : "memory"); |
| 268 | } else if (kernel_uses_llsc) { | 268 | } else if (kernel_uses_llsc) { |
| 269 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 269 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
| 270 | unsigned long temp; | 270 | unsigned long temp; |
| 271 | 271 | ||
| 272 | do { | 272 | do { |
| 273 | __asm__ __volatile__( | 273 | __asm__ __volatile__( |
| 274 | " .set mips3 \n" | 274 | " .set mips3 \n" |
| 275 | " " __LL "%0, %1 # test_and_set_bit \n" | 275 | " " __LL "%0, %1 # test_and_set_bit \n" |
| 276 | " or %2, %0, %3 \n" | 276 | " or %2, %0, %3 \n" |
| 277 | " " __SC "%2, %1 \n" | 277 | " " __SC "%2, %1 \n" |
| 278 | " .set mips0 \n" | 278 | " .set mips0 \n" |
| 279 | : "=&r" (temp), "+m" (*m), "=&r" (res) | 279 | : "=&r" (temp), "+m" (*m), "=&r" (res) |
| 280 | : "r" (1UL << bit) | 280 | : "r" (1UL << bit) |
| 281 | : "memory"); | 281 | : "memory"); |
| 282 | } while (unlikely(!res)); | 282 | } while (unlikely(!res)); |
| 283 | 283 | ||
| 284 | res = temp & (1UL << bit); | 284 | res = temp & (1UL << bit); |
| 285 | } else { | 285 | } else { |
| 286 | volatile unsigned long *a = addr; | 286 | volatile unsigned long *a = addr; |
| 287 | unsigned long mask; | 287 | unsigned long mask; |
| 288 | unsigned long flags; | 288 | unsigned long flags; |
| 289 | 289 | ||
| 290 | a += nr >> SZLONG_LOG; | 290 | a += nr >> SZLONG_LOG; |
| 291 | mask = 1UL << bit; | 291 | mask = 1UL << bit; |
| 292 | raw_local_irq_save(flags); | 292 | raw_local_irq_save(flags); |
| 293 | res = (mask & *a); | 293 | res = (mask & *a); |
| 294 | *a |= mask; | 294 | *a |= mask; |
| 295 | raw_local_irq_restore(flags); | 295 | raw_local_irq_restore(flags); |
| 296 | } | 296 | } |
| 297 | 297 | ||
| 298 | smp_llsc_mb(); | 298 | smp_llsc_mb(); |
| 299 | 299 | ||
| 300 | return res != 0; | 300 | return res != 0; |
| 301 | } | 301 | } |
| 302 | 302 | ||
| 303 | /* | 303 | /* |
| 304 | * test_and_set_bit_lock - Set a bit and return its old value | 304 | * test_and_set_bit_lock - Set a bit and return its old value |
| 305 | * @nr: Bit to set | 305 | * @nr: Bit to set |
| 306 | * @addr: Address to count from | 306 | * @addr: Address to count from |
| 307 | * | 307 | * |
| 308 | * This operation is atomic and implies acquire ordering semantics | 308 | * This operation is atomic and implies acquire ordering semantics |
| 309 | * after the memory operation. | 309 | * after the memory operation. |
| 310 | */ | 310 | */ |
| 311 | static inline int test_and_set_bit_lock(unsigned long nr, | 311 | static inline int test_and_set_bit_lock(unsigned long nr, |
| 312 | volatile unsigned long *addr) | 312 | volatile unsigned long *addr) |
| 313 | { | 313 | { |
| 314 | unsigned short bit = nr & SZLONG_MASK; | 314 | unsigned short bit = nr & SZLONG_MASK; |
| 315 | unsigned long res; | 315 | unsigned long res; |
| 316 | 316 | ||
| 317 | if (kernel_uses_llsc && R10000_LLSC_WAR) { | 317 | if (kernel_uses_llsc && R10000_LLSC_WAR) { |
| 318 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 318 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
| 319 | unsigned long temp; | 319 | unsigned long temp; |
| 320 | 320 | ||
| 321 | __asm__ __volatile__( | 321 | __asm__ __volatile__( |
| 322 | " .set mips3 \n" | 322 | " .set mips3 \n" |
| 323 | "1: " __LL "%0, %1 # test_and_set_bit \n" | 323 | "1: " __LL "%0, %1 # test_and_set_bit \n" |
| 324 | " or %2, %0, %3 \n" | 324 | " or %2, %0, %3 \n" |
| 325 | " " __SC "%2, %1 \n" | 325 | " " __SC "%2, %1 \n" |
| 326 | " beqzl %2, 1b \n" | 326 | " beqzl %2, 1b \n" |
| 327 | " and %2, %0, %3 \n" | 327 | " and %2, %0, %3 \n" |
| 328 | " .set mips0 \n" | 328 | " .set mips0 \n" |
| 329 | : "=&r" (temp), "+m" (*m), "=&r" (res) | 329 | : "=&r" (temp), "+m" (*m), "=&r" (res) |
| 330 | : "r" (1UL << bit) | 330 | : "r" (1UL << bit) |
| 331 | : "memory"); | 331 | : "memory"); |
| 332 | } else if (kernel_uses_llsc) { | 332 | } else if (kernel_uses_llsc) { |
| 333 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 333 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
| 334 | unsigned long temp; | 334 | unsigned long temp; |
| 335 | 335 | ||
| 336 | do { | 336 | do { |
| 337 | __asm__ __volatile__( | 337 | __asm__ __volatile__( |
| 338 | " .set mips3 \n" | 338 | " .set mips3 \n" |
| 339 | " " __LL "%0, %1 # test_and_set_bit \n" | 339 | " " __LL "%0, %1 # test_and_set_bit \n" |
| 340 | " or %2, %0, %3 \n" | 340 | " or %2, %0, %3 \n" |
| 341 | " " __SC "%2, %1 \n" | 341 | " " __SC "%2, %1 \n" |
| 342 | " .set mips0 \n" | 342 | " .set mips0 \n" |
| 343 | : "=&r" (temp), "+m" (*m), "=&r" (res) | 343 | : "=&r" (temp), "+m" (*m), "=&r" (res) |
| 344 | : "r" (1UL << bit) | 344 | : "r" (1UL << bit) |
| 345 | : "memory"); | 345 | : "memory"); |
| 346 | } while (unlikely(!res)); | 346 | } while (unlikely(!res)); |
| 347 | 347 | ||
| 348 | res = temp & (1UL << bit); | 348 | res = temp & (1UL << bit); |
| 349 | } else { | 349 | } else { |
| 350 | volatile unsigned long *a = addr; | 350 | volatile unsigned long *a = addr; |
| 351 | unsigned long mask; | 351 | unsigned long mask; |
| 352 | unsigned long flags; | 352 | unsigned long flags; |
| 353 | 353 | ||
| 354 | a += nr >> SZLONG_LOG; | 354 | a += nr >> SZLONG_LOG; |
| 355 | mask = 1UL << bit; | 355 | mask = 1UL << bit; |
| 356 | raw_local_irq_save(flags); | 356 | raw_local_irq_save(flags); |
| 357 | res = (mask & *a); | 357 | res = (mask & *a); |
| 358 | *a |= mask; | 358 | *a |= mask; |
| 359 | raw_local_irq_restore(flags); | 359 | raw_local_irq_restore(flags); |
| 360 | } | 360 | } |
| 361 | 361 | ||
| 362 | smp_llsc_mb(); | 362 | smp_llsc_mb(); |
| 363 | 363 | ||
| 364 | return res != 0; | 364 | return res != 0; |
| 365 | } | 365 | } |
| 366 | /* | 366 | /* |
| 367 | * test_and_clear_bit - Clear a bit and return its old value | 367 | * test_and_clear_bit - Clear a bit and return its old value |
| 368 | * @nr: Bit to clear | 368 | * @nr: Bit to clear |
| 369 | * @addr: Address to count from | 369 | * @addr: Address to count from |
| 370 | * | 370 | * |
| 371 | * This operation is atomic and cannot be reordered. | 371 | * This operation is atomic and cannot be reordered. |
| 372 | * It also implies a memory barrier. | 372 | * It also implies a memory barrier. |
| 373 | */ | 373 | */ |
| 374 | static inline int test_and_clear_bit(unsigned long nr, | 374 | static inline int test_and_clear_bit(unsigned long nr, |
| 375 | volatile unsigned long *addr) | 375 | volatile unsigned long *addr) |
| 376 | { | 376 | { |
| 377 | unsigned short bit = nr & SZLONG_MASK; | 377 | unsigned short bit = nr & SZLONG_MASK; |
| 378 | unsigned long res; | 378 | unsigned long res; |
| 379 | 379 | ||
| 380 | smp_mb__before_llsc(); | 380 | smp_mb__before_llsc(); |
| 381 | 381 | ||
| 382 | if (kernel_uses_llsc && R10000_LLSC_WAR) { | 382 | if (kernel_uses_llsc && R10000_LLSC_WAR) { |
| 383 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 383 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
| 384 | unsigned long temp; | 384 | unsigned long temp; |
| 385 | 385 | ||
| 386 | __asm__ __volatile__( | 386 | __asm__ __volatile__( |
| 387 | " .set mips3 \n" | 387 | " .set mips3 \n" |
| 388 | "1: " __LL "%0, %1 # test_and_clear_bit \n" | 388 | "1: " __LL "%0, %1 # test_and_clear_bit \n" |
| 389 | " or %2, %0, %3 \n" | 389 | " or %2, %0, %3 \n" |
| 390 | " xor %2, %3 \n" | 390 | " xor %2, %3 \n" |
| 391 | " " __SC "%2, %1 \n" | 391 | " " __SC "%2, %1 \n" |
| 392 | " beqzl %2, 1b \n" | 392 | " beqzl %2, 1b \n" |
| 393 | " and %2, %0, %3 \n" | 393 | " and %2, %0, %3 \n" |
| 394 | " .set mips0 \n" | 394 | " .set mips0 \n" |
| 395 | : "=&r" (temp), "+m" (*m), "=&r" (res) | 395 | : "=&r" (temp), "+m" (*m), "=&r" (res) |
| 396 | : "r" (1UL << bit) | 396 | : "r" (1UL << bit) |
| 397 | : "memory"); | 397 | : "memory"); |
| 398 | #ifdef CONFIG_CPU_MIPSR2 | 398 | #ifdef CONFIG_CPU_MIPSR2 |
| 399 | } else if (kernel_uses_llsc && __builtin_constant_p(nr)) { | 399 | } else if (kernel_uses_llsc && __builtin_constant_p(nr)) { |
| 400 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 400 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
| 401 | unsigned long temp; | 401 | unsigned long temp; |
| 402 | 402 | ||
| 403 | do { | 403 | do { |
| 404 | __asm__ __volatile__( | 404 | __asm__ __volatile__( |
| 405 | " " __LL "%0, %1 # test_and_clear_bit \n" | 405 | " " __LL "%0, %1 # test_and_clear_bit \n" |
| 406 | " " __EXT "%2, %0, %3, 1 \n" | 406 | " " __EXT "%2, %0, %3, 1 \n" |
| 407 | " " __INS "%0, $0, %3, 1 \n" | 407 | " " __INS "%0, $0, %3, 1 \n" |
| 408 | " " __SC "%0, %1 \n" | 408 | " " __SC "%0, %1 \n" |
| 409 | : "=&r" (temp), "+m" (*m), "=&r" (res) | 409 | : "=&r" (temp), "+m" (*m), "=&r" (res) |
| 410 | : "ir" (bit) | 410 | : "ir" (bit) |
| 411 | : "memory"); | 411 | : "memory"); |
| 412 | } while (unlikely(!temp)); | 412 | } while (unlikely(!temp)); |
| 413 | #endif | 413 | #endif |
| 414 | } else if (kernel_uses_llsc) { | 414 | } else if (kernel_uses_llsc) { |
| 415 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 415 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
| 416 | unsigned long temp; | 416 | unsigned long temp; |
| 417 | 417 | ||
| 418 | do { | 418 | do { |
| 419 | __asm__ __volatile__( | 419 | __asm__ __volatile__( |
| 420 | " .set mips3 \n" | 420 | " .set mips3 \n" |
| 421 | " " __LL "%0, %1 # test_and_clear_bit \n" | 421 | " " __LL "%0, %1 # test_and_clear_bit \n" |
| 422 | " or %2, %0, %3 \n" | 422 | " or %2, %0, %3 \n" |
| 423 | " xor %2, %3 \n" | 423 | " xor %2, %3 \n" |
| 424 | " " __SC "%2, %1 \n" | 424 | " " __SC "%2, %1 \n" |
| 425 | " .set mips0 \n" | 425 | " .set mips0 \n" |
| 426 | : "=&r" (temp), "+m" (*m), "=&r" (res) | 426 | : "=&r" (temp), "+m" (*m), "=&r" (res) |
| 427 | : "r" (1UL << bit) | 427 | : "r" (1UL << bit) |
| 428 | : "memory"); | 428 | : "memory"); |
| 429 | } while (unlikely(!res)); | 429 | } while (unlikely(!res)); |
| 430 | 430 | ||
| 431 | res = temp & (1UL << bit); | 431 | res = temp & (1UL << bit); |
| 432 | } else { | 432 | } else { |
| 433 | volatile unsigned long *a = addr; | 433 | volatile unsigned long *a = addr; |
| 434 | unsigned long mask; | 434 | unsigned long mask; |
| 435 | unsigned long flags; | 435 | unsigned long flags; |
| 436 | 436 | ||
| 437 | a += nr >> SZLONG_LOG; | 437 | a += nr >> SZLONG_LOG; |
| 438 | mask = 1UL << bit; | 438 | mask = 1UL << bit; |
| 439 | raw_local_irq_save(flags); | 439 | raw_local_irq_save(flags); |
| 440 | res = (mask & *a); | 440 | res = (mask & *a); |
| 441 | *a &= ~mask; | 441 | *a &= ~mask; |
| 442 | raw_local_irq_restore(flags); | 442 | raw_local_irq_restore(flags); |
| 443 | } | 443 | } |
| 444 | 444 | ||
| 445 | smp_llsc_mb(); | 445 | smp_llsc_mb(); |
| 446 | 446 | ||
| 447 | return res != 0; | 447 | return res != 0; |
| 448 | } | 448 | } |
| 449 | 449 | ||
| 450 | /* | 450 | /* |
| 451 | * test_and_change_bit - Change a bit and return its old value | 451 | * test_and_change_bit - Change a bit and return its old value |
| 452 | * @nr: Bit to change | 452 | * @nr: Bit to change |
| 453 | * @addr: Address to count from | 453 | * @addr: Address to count from |
| 454 | * | 454 | * |
| 455 | * This operation is atomic and cannot be reordered. | 455 | * This operation is atomic and cannot be reordered. |
| 456 | * It also implies a memory barrier. | 456 | * It also implies a memory barrier. |
| 457 | */ | 457 | */ |
| 458 | static inline int test_and_change_bit(unsigned long nr, | 458 | static inline int test_and_change_bit(unsigned long nr, |
| 459 | volatile unsigned long *addr) | 459 | volatile unsigned long *addr) |
| 460 | { | 460 | { |
| 461 | unsigned short bit = nr & SZLONG_MASK; | 461 | unsigned short bit = nr & SZLONG_MASK; |
| 462 | unsigned long res; | 462 | unsigned long res; |
| 463 | 463 | ||
| 464 | smp_mb__before_llsc(); | 464 | smp_mb__before_llsc(); |
| 465 | 465 | ||
| 466 | if (kernel_uses_llsc && R10000_LLSC_WAR) { | 466 | if (kernel_uses_llsc && R10000_LLSC_WAR) { |
| 467 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 467 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
| 468 | unsigned long temp; | 468 | unsigned long temp; |
| 469 | 469 | ||
| 470 | __asm__ __volatile__( | 470 | __asm__ __volatile__( |
| 471 | " .set mips3 \n" | 471 | " .set mips3 \n" |
| 472 | "1: " __LL "%0, %1 # test_and_change_bit \n" | 472 | "1: " __LL "%0, %1 # test_and_change_bit \n" |
| 473 | " xor %2, %0, %3 \n" | 473 | " xor %2, %0, %3 \n" |
| 474 | " " __SC "%2, %1 \n" | 474 | " " __SC "%2, %1 \n" |
| 475 | " beqzl %2, 1b \n" | 475 | " beqzl %2, 1b \n" |
| 476 | " and %2, %0, %3 \n" | 476 | " and %2, %0, %3 \n" |
| 477 | " .set mips0 \n" | 477 | " .set mips0 \n" |
| 478 | : "=&r" (temp), "+m" (*m), "=&r" (res) | 478 | : "=&r" (temp), "+m" (*m), "=&r" (res) |
| 479 | : "r" (1UL << bit) | 479 | : "r" (1UL << bit) |
| 480 | : "memory"); | 480 | : "memory"); |
| 481 | } else if (kernel_uses_llsc) { | 481 | } else if (kernel_uses_llsc) { |
| 482 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 482 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
| 483 | unsigned long temp; | 483 | unsigned long temp; |
| 484 | 484 | ||
| 485 | do { | 485 | do { |
| 486 | __asm__ __volatile__( | 486 | __asm__ __volatile__( |
| 487 | " .set mips3 \n" | 487 | " .set mips3 \n" |
| 488 | " " __LL "%0, %1 # test_and_change_bit \n" | 488 | " " __LL "%0, %1 # test_and_change_bit \n" |
| 489 | " xor %2, %0, %3 \n" | 489 | " xor %2, %0, %3 \n" |
| 490 | " " __SC "\t%2, %1 \n" | 490 | " " __SC "\t%2, %1 \n" |
| 491 | " .set mips0 \n" | 491 | " .set mips0 \n" |
| 492 | : "=&r" (temp), "+m" (*m), "=&r" (res) | 492 | : "=&r" (temp), "+m" (*m), "=&r" (res) |
| 493 | : "r" (1UL << bit) | 493 | : "r" (1UL << bit) |
| 494 | : "memory"); | 494 | : "memory"); |
| 495 | } while (unlikely(!res)); | 495 | } while (unlikely(!res)); |
| 496 | 496 | ||
| 497 | res = temp & (1UL << bit); | 497 | res = temp & (1UL << bit); |
| 498 | } else { | 498 | } else { |
| 499 | volatile unsigned long *a = addr; | 499 | volatile unsigned long *a = addr; |
| 500 | unsigned long mask; | 500 | unsigned long mask; |
| 501 | unsigned long flags; | 501 | unsigned long flags; |
| 502 | 502 | ||
| 503 | a += nr >> SZLONG_LOG; | 503 | a += nr >> SZLONG_LOG; |
| 504 | mask = 1UL << bit; | 504 | mask = 1UL << bit; |
| 505 | raw_local_irq_save(flags); | 505 | raw_local_irq_save(flags); |
| 506 | res = (mask & *a); | 506 | res = (mask & *a); |
| 507 | *a ^= mask; | 507 | *a ^= mask; |
| 508 | raw_local_irq_restore(flags); | 508 | raw_local_irq_restore(flags); |
| 509 | } | 509 | } |
| 510 | 510 | ||
| 511 | smp_llsc_mb(); | 511 | smp_llsc_mb(); |
| 512 | 512 | ||
| 513 | return res != 0; | 513 | return res != 0; |
| 514 | } | 514 | } |
| 515 | 515 | ||
| 516 | #include <asm-generic/bitops/non-atomic.h> | 516 | #include <asm-generic/bitops/non-atomic.h> |
| 517 | 517 | ||
| 518 | /* | 518 | /* |
| 519 | * __clear_bit_unlock - Clears a bit in memory | 519 | * __clear_bit_unlock - Clears a bit in memory |
| 520 | * @nr: Bit to clear | 520 | * @nr: Bit to clear |
| 521 | * @addr: Address to start counting from | 521 | * @addr: Address to start counting from |
| 522 | * | 522 | * |
| 523 | * __clear_bit() is non-atomic and implies release semantics before the memory | 523 | * __clear_bit() is non-atomic and implies release semantics before the memory |
| 524 | * operation. It can be used for an unlock if no other CPUs can concurrently | 524 | * operation. It can be used for an unlock if no other CPUs can concurrently |
| 525 | * modify other bits in the word. | 525 | * modify other bits in the word. |
| 526 | */ | 526 | */ |
| 527 | static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) | 527 | static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) |
| 528 | { | 528 | { |
| 529 | smp_mb(); | 529 | smp_mb(); |
| 530 | __clear_bit(nr, addr); | 530 | __clear_bit(nr, addr); |
| 531 | } | 531 | } |
| 532 | 532 | ||
| 533 | /* | 533 | /* |
| 534 | * Return the bit position (0..63) of the most significant 1 bit in a word | 534 | * Return the bit position (0..63) of the most significant 1 bit in a word |
| 535 | * Returns -1 if no 1 bit exists | 535 | * Returns -1 if no 1 bit exists |
| 536 | */ | 536 | */ |
| 537 | static inline unsigned long __fls(unsigned long word) | 537 | static inline unsigned long __fls(unsigned long word) |
| 538 | { | 538 | { |
| 539 | int num; | 539 | int num; |
| 540 | 540 | ||
| 541 | if (BITS_PER_LONG == 32 && | 541 | if (BITS_PER_LONG == 32 && |
| 542 | __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) { | 542 | __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) { |
| 543 | __asm__( | 543 | __asm__( |
| 544 | " .set push \n" | 544 | " .set push \n" |
| 545 | " .set mips32 \n" | 545 | " .set mips32 \n" |
| 546 | " clz %0, %1 \n" | 546 | " clz %0, %1 \n" |
| 547 | " .set pop \n" | 547 | " .set pop \n" |
| 548 | : "=r" (num) | 548 | : "=r" (num) |
| 549 | : "r" (word)); | 549 | : "r" (word)); |
| 550 | 550 | ||
| 551 | return 31 - num; | 551 | return 31 - num; |
| 552 | } | 552 | } |
| 553 | 553 | ||
| 554 | if (BITS_PER_LONG == 64 && | 554 | if (BITS_PER_LONG == 64 && |
| 555 | __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) { | 555 | __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) { |
| 556 | __asm__( | 556 | __asm__( |
| 557 | " .set push \n" | 557 | " .set push \n" |
| 558 | " .set mips64 \n" | 558 | " .set mips64 \n" |
| 559 | " dclz %0, %1 \n" | 559 | " dclz %0, %1 \n" |
| 560 | " .set pop \n" | 560 | " .set pop \n" |
| 561 | : "=r" (num) | 561 | : "=r" (num) |
| 562 | : "r" (word)); | 562 | : "r" (word)); |
| 563 | 563 | ||
| 564 | return 63 - num; | 564 | return 63 - num; |
| 565 | } | 565 | } |
| 566 | 566 | ||
| 567 | num = BITS_PER_LONG - 1; | 567 | num = BITS_PER_LONG - 1; |
| 568 | 568 | ||
| 569 | #if BITS_PER_LONG == 64 | 569 | #if BITS_PER_LONG == 64 |
| 570 | if (!(word & (~0ul << 32))) { | 570 | if (!(word & (~0ul << 32))) { |
| 571 | num -= 32; | 571 | num -= 32; |
| 572 | word <<= 32; | 572 | word <<= 32; |
| 573 | } | 573 | } |
| 574 | #endif | 574 | #endif |
| 575 | if (!(word & (~0ul << (BITS_PER_LONG-16)))) { | 575 | if (!(word & (~0ul << (BITS_PER_LONG-16)))) { |
| 576 | num -= 16; | 576 | num -= 16; |
| 577 | word <<= 16; | 577 | word <<= 16; |
| 578 | } | 578 | } |
| 579 | if (!(word & (~0ul << (BITS_PER_LONG-8)))) { | 579 | if (!(word & (~0ul << (BITS_PER_LONG-8)))) { |
| 580 | num -= 8; | 580 | num -= 8; |
| 581 | word <<= 8; | 581 | word <<= 8; |
| 582 | } | 582 | } |
| 583 | if (!(word & (~0ul << (BITS_PER_LONG-4)))) { | 583 | if (!(word & (~0ul << (BITS_PER_LONG-4)))) { |
| 584 | num -= 4; | 584 | num -= 4; |
| 585 | word <<= 4; | 585 | word <<= 4; |
| 586 | } | 586 | } |
| 587 | if (!(word & (~0ul << (BITS_PER_LONG-2)))) { | 587 | if (!(word & (~0ul << (BITS_PER_LONG-2)))) { |
| 588 | num -= 2; | 588 | num -= 2; |
| 589 | word <<= 2; | 589 | word <<= 2; |
| 590 | } | 590 | } |
| 591 | if (!(word & (~0ul << (BITS_PER_LONG-1)))) | 591 | if (!(word & (~0ul << (BITS_PER_LONG-1)))) |
| 592 | num -= 1; | 592 | num -= 1; |
| 593 | return num; | 593 | return num; |
| 594 | } | 594 | } |
| 595 | 595 | ||
| 596 | /* | 596 | /* |
| 597 | * __ffs - find first bit in word. | 597 | * __ffs - find first bit in word. |
| 598 | * @word: The word to search | 598 | * @word: The word to search |
| 599 | * | 599 | * |
| 600 | * Returns 0..SZLONG-1 | 600 | * Returns 0..SZLONG-1 |
| 601 | * Undefined if no bit exists, so code should check against 0 first. | 601 | * Undefined if no bit exists, so code should check against 0 first. |
| 602 | */ | 602 | */ |
| 603 | static inline unsigned long __ffs(unsigned long word) | 603 | static inline unsigned long __ffs(unsigned long word) |
| 604 | { | 604 | { |
| 605 | return __fls(word & -word); | 605 | return __fls(word & -word); |
| 606 | } | 606 | } |
| 607 | 607 | ||
| 608 | /* | 608 | /* |
| 609 | * fls - find last bit set. | 609 | * fls - find last bit set. |
| 610 | * @word: The word to search | 610 | * @word: The word to search |
| 611 | * | 611 | * |
| 612 | * This is defined the same way as ffs. | 612 | * This is defined the same way as ffs. |
| 613 | * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. | 613 | * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. |
| 614 | */ | 614 | */ |
| 615 | static inline int fls(int x) | 615 | static inline int fls(int x) |
| 616 | { | 616 | { |
| 617 | int r; | 617 | int r; |
| 618 | 618 | ||
| 619 | if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) { | 619 | if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) { |
| 620 | __asm__("clz %0, %1" : "=r" (x) : "r" (x)); | 620 | __asm__("clz %0, %1" : "=r" (x) : "r" (x)); |
| 621 | 621 | ||
| 622 | return 32 - x; | 622 | return 32 - x; |
| 623 | } | 623 | } |
| 624 | 624 | ||
| 625 | r = 32; | 625 | r = 32; |
| 626 | if (!x) | 626 | if (!x) |
| 627 | return 0; | 627 | return 0; |
| 628 | if (!(x & 0xffff0000u)) { | 628 | if (!(x & 0xffff0000u)) { |
| 629 | x <<= 16; | 629 | x <<= 16; |
| 630 | r -= 16; | 630 | r -= 16; |
| 631 | } | 631 | } |
| 632 | if (!(x & 0xff000000u)) { | 632 | if (!(x & 0xff000000u)) { |
| 633 | x <<= 8; | 633 | x <<= 8; |
| 634 | r -= 8; | 634 | r -= 8; |
| 635 | } | 635 | } |
| 636 | if (!(x & 0xf0000000u)) { | 636 | if (!(x & 0xf0000000u)) { |
| 637 | x <<= 4; | 637 | x <<= 4; |
| 638 | r -= 4; | 638 | r -= 4; |
| 639 | } | 639 | } |
| 640 | if (!(x & 0xc0000000u)) { | 640 | if (!(x & 0xc0000000u)) { |
| 641 | x <<= 2; | 641 | x <<= 2; |
| 642 | r -= 2; | 642 | r -= 2; |
| 643 | } | 643 | } |
| 644 | if (!(x & 0x80000000u)) { | 644 | if (!(x & 0x80000000u)) { |
| 645 | x <<= 1; | 645 | x <<= 1; |
| 646 | r -= 1; | 646 | r -= 1; |
| 647 | } | 647 | } |
| 648 | return r; | 648 | return r; |
| 649 | } | 649 | } |
| 650 | 650 | ||
| 651 | #include <asm-generic/bitops/fls64.h> | 651 | #include <asm-generic/bitops/fls64.h> |
| 652 | 652 | ||
| 653 | /* | 653 | /* |
| 654 | * ffs - find first bit set. | 654 | * ffs - find first bit set. |
| 655 | * @word: The word to search | 655 | * @word: The word to search |
| 656 | * | 656 | * |
| 657 | * This is defined the same way as | 657 | * This is defined the same way as |
| 658 | * the libc and compiler builtin ffs routines, therefore | 658 | * the libc and compiler builtin ffs routines, therefore |
| 659 | * differs in spirit from the above ffz (man ffs). | 659 | * differs in spirit from the above ffz (man ffs). |
| 660 | */ | 660 | */ |
| 661 | static inline int ffs(int word) | 661 | static inline int ffs(int word) |
| 662 | { | 662 | { |
| 663 | if (!word) | 663 | if (!word) |
| 664 | return 0; | 664 | return 0; |
| 665 | 665 | ||
| 666 | return fls(word & -word); | 666 | return fls(word & -word); |
| 667 | } | 667 | } |
| 668 | 668 | ||
| 669 | #include <asm-generic/bitops/ffz.h> | 669 | #include <asm-generic/bitops/ffz.h> |
| 670 | #include <asm-generic/bitops/find.h> | 670 | #include <asm-generic/bitops/find.h> |
| 671 | 671 | ||
| 672 | #ifdef __KERNEL__ | 672 | #ifdef __KERNEL__ |
| 673 | 673 | ||
| 674 | #include <asm-generic/bitops/sched.h> | 674 | #include <asm-generic/bitops/sched.h> |
| 675 | 675 | ||
| 676 | #include <asm/arch_hweight.h> | 676 | #include <asm/arch_hweight.h> |
| 677 | #include <asm-generic/bitops/const_hweight.h> | 677 | #include <asm-generic/bitops/const_hweight.h> |
| 678 | 678 | ||
| 679 | #include <asm-generic/bitops/le.h> | 679 | #include <asm-generic/bitops/le.h> |
| 680 | #include <asm-generic/bitops/ext2-atomic.h> | 680 | #include <asm-generic/bitops/ext2-atomic.h> |
| 681 | #include <asm-generic/bitops/minix.h> | ||
| 682 | 681 | ||
| 683 | #endif /* __KERNEL__ */ | 682 | #endif /* __KERNEL__ */ |
| 684 | 683 | ||
| 685 | #endif /* _ASM_BITOPS_H */ | 684 | #endif /* _ASM_BITOPS_H */ |
| 686 | 685 |
arch/mn10300/include/asm/bitops.h
| 1 | /* MN10300 bit operations | 1 | /* MN10300 bit operations |
| 2 | * | 2 | * |
| 3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | 3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
| 4 | * Written by David Howells (dhowells@redhat.com) | 4 | * Written by David Howells (dhowells@redhat.com) |
| 5 | * | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public Licence | 7 | * modify it under the terms of the GNU General Public Licence |
| 8 | * as published by the Free Software Foundation; either version | 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the Licence, or (at your option) any later version. | 9 | * 2 of the Licence, or (at your option) any later version. |
| 10 | * | 10 | * |
| 11 | * These have to be done with inline assembly: that way the bit-setting | 11 | * These have to be done with inline assembly: that way the bit-setting |
| 12 | * is guaranteed to be atomic. All bit operations return 0 if the bit | 12 | * is guaranteed to be atomic. All bit operations return 0 if the bit |
| 13 | * was cleared before the operation and != 0 if it was not. | 13 | * was cleared before the operation and != 0 if it was not. |
| 14 | * | 14 | * |
| 15 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | 15 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). |
| 16 | */ | 16 | */ |
| 17 | #ifndef __ASM_BITOPS_H | 17 | #ifndef __ASM_BITOPS_H |
| 18 | #define __ASM_BITOPS_H | 18 | #define __ASM_BITOPS_H |
| 19 | 19 | ||
| 20 | #include <asm/cpu-regs.h> | 20 | #include <asm/cpu-regs.h> |
| 21 | 21 | ||
| 22 | #define smp_mb__before_clear_bit() barrier() | 22 | #define smp_mb__before_clear_bit() barrier() |
| 23 | #define smp_mb__after_clear_bit() barrier() | 23 | #define smp_mb__after_clear_bit() barrier() |
| 24 | 24 | ||
| 25 | /* | 25 | /* |
| 26 | * set bit | 26 | * set bit |
| 27 | */ | 27 | */ |
| 28 | #define __set_bit(nr, addr) \ | 28 | #define __set_bit(nr, addr) \ |
| 29 | ({ \ | 29 | ({ \ |
| 30 | volatile unsigned char *_a = (unsigned char *)(addr); \ | 30 | volatile unsigned char *_a = (unsigned char *)(addr); \ |
| 31 | const unsigned shift = (nr) & 7; \ | 31 | const unsigned shift = (nr) & 7; \ |
| 32 | _a += (nr) >> 3; \ | 32 | _a += (nr) >> 3; \ |
| 33 | \ | 33 | \ |
| 34 | asm volatile("bset %2,(%1) # set_bit reg" \ | 34 | asm volatile("bset %2,(%1) # set_bit reg" \ |
| 35 | : "=m"(*_a) \ | 35 | : "=m"(*_a) \ |
| 36 | : "a"(_a), "d"(1 << shift), "m"(*_a) \ | 36 | : "a"(_a), "d"(1 << shift), "m"(*_a) \ |
| 37 | : "memory", "cc"); \ | 37 | : "memory", "cc"); \ |
| 38 | }) | 38 | }) |
| 39 | 39 | ||
| 40 | #define set_bit(nr, addr) __set_bit((nr), (addr)) | 40 | #define set_bit(nr, addr) __set_bit((nr), (addr)) |
| 41 | 41 | ||
| 42 | /* | 42 | /* |
| 43 | * clear bit | 43 | * clear bit |
| 44 | */ | 44 | */ |
| 45 | #define ___clear_bit(nr, addr) \ | 45 | #define ___clear_bit(nr, addr) \ |
| 46 | ({ \ | 46 | ({ \ |
| 47 | volatile unsigned char *_a = (unsigned char *)(addr); \ | 47 | volatile unsigned char *_a = (unsigned char *)(addr); \ |
| 48 | const unsigned shift = (nr) & 7; \ | 48 | const unsigned shift = (nr) & 7; \ |
| 49 | _a += (nr) >> 3; \ | 49 | _a += (nr) >> 3; \ |
| 50 | \ | 50 | \ |
| 51 | asm volatile("bclr %2,(%1) # clear_bit reg" \ | 51 | asm volatile("bclr %2,(%1) # clear_bit reg" \ |
| 52 | : "=m"(*_a) \ | 52 | : "=m"(*_a) \ |
| 53 | : "a"(_a), "d"(1 << shift), "m"(*_a) \ | 53 | : "a"(_a), "d"(1 << shift), "m"(*_a) \ |
| 54 | : "memory", "cc"); \ | 54 | : "memory", "cc"); \ |
| 55 | }) | 55 | }) |
| 56 | 56 | ||
| 57 | #define clear_bit(nr, addr) ___clear_bit((nr), (addr)) | 57 | #define clear_bit(nr, addr) ___clear_bit((nr), (addr)) |
| 58 | 58 | ||
| 59 | 59 | ||
| 60 | static inline void __clear_bit(unsigned long nr, volatile void *addr) | 60 | static inline void __clear_bit(unsigned long nr, volatile void *addr) |
| 61 | { | 61 | { |
| 62 | unsigned int *a = (unsigned int *) addr; | 62 | unsigned int *a = (unsigned int *) addr; |
| 63 | int mask; | 63 | int mask; |
| 64 | 64 | ||
| 65 | a += nr >> 5; | 65 | a += nr >> 5; |
| 66 | mask = 1 << (nr & 0x1f); | 66 | mask = 1 << (nr & 0x1f); |
| 67 | *a &= ~mask; | 67 | *a &= ~mask; |
| 68 | } | 68 | } |
| 69 | 69 | ||
| 70 | /* | 70 | /* |
| 71 | * test bit | 71 | * test bit |
| 72 | */ | 72 | */ |
| 73 | static inline int test_bit(unsigned long nr, const volatile void *addr) | 73 | static inline int test_bit(unsigned long nr, const volatile void *addr) |
| 74 | { | 74 | { |
| 75 | return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31)); | 75 | return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31)); |
| 76 | } | 76 | } |
| 77 | 77 | ||
| 78 | /* | 78 | /* |
| 79 | * change bit | 79 | * change bit |
| 80 | */ | 80 | */ |
| 81 | static inline void __change_bit(unsigned long nr, volatile void *addr) | 81 | static inline void __change_bit(unsigned long nr, volatile void *addr) |
| 82 | { | 82 | { |
| 83 | int mask; | 83 | int mask; |
| 84 | unsigned int *a = (unsigned int *) addr; | 84 | unsigned int *a = (unsigned int *) addr; |
| 85 | 85 | ||
| 86 | a += nr >> 5; | 86 | a += nr >> 5; |
| 87 | mask = 1 << (nr & 0x1f); | 87 | mask = 1 << (nr & 0x1f); |
| 88 | *a ^= mask; | 88 | *a ^= mask; |
| 89 | } | 89 | } |
| 90 | 90 | ||
| 91 | extern void change_bit(unsigned long nr, volatile void *addr); | 91 | extern void change_bit(unsigned long nr, volatile void *addr); |
| 92 | 92 | ||
| 93 | /* | 93 | /* |
| 94 | * test and set bit | 94 | * test and set bit |
| 95 | */ | 95 | */ |
| 96 | #define __test_and_set_bit(nr,addr) \ | 96 | #define __test_and_set_bit(nr,addr) \ |
| 97 | ({ \ | 97 | ({ \ |
| 98 | volatile unsigned char *_a = (unsigned char *)(addr); \ | 98 | volatile unsigned char *_a = (unsigned char *)(addr); \ |
| 99 | const unsigned shift = (nr) & 7; \ | 99 | const unsigned shift = (nr) & 7; \ |
| 100 | unsigned epsw; \ | 100 | unsigned epsw; \ |
| 101 | _a += (nr) >> 3; \ | 101 | _a += (nr) >> 3; \ |
| 102 | \ | 102 | \ |
| 103 | asm volatile("bset %3,(%2) # test_set_bit reg\n" \ | 103 | asm volatile("bset %3,(%2) # test_set_bit reg\n" \ |
| 104 | "mov epsw,%1" \ | 104 | "mov epsw,%1" \ |
| 105 | : "=m"(*_a), "=d"(epsw) \ | 105 | : "=m"(*_a), "=d"(epsw) \ |
| 106 | : "a"(_a), "d"(1 << shift), "m"(*_a) \ | 106 | : "a"(_a), "d"(1 << shift), "m"(*_a) \ |
| 107 | : "memory", "cc"); \ | 107 | : "memory", "cc"); \ |
| 108 | \ | 108 | \ |
| 109 | !(epsw & EPSW_FLAG_Z); \ | 109 | !(epsw & EPSW_FLAG_Z); \ |
| 110 | }) | 110 | }) |
| 111 | 111 | ||
| 112 | #define test_and_set_bit(nr, addr) __test_and_set_bit((nr), (addr)) | 112 | #define test_and_set_bit(nr, addr) __test_and_set_bit((nr), (addr)) |
| 113 | 113 | ||
| 114 | /* | 114 | /* |
| 115 | * test and clear bit | 115 | * test and clear bit |
| 116 | */ | 116 | */ |
| 117 | #define __test_and_clear_bit(nr, addr) \ | 117 | #define __test_and_clear_bit(nr, addr) \ |
| 118 | ({ \ | 118 | ({ \ |
| 119 | volatile unsigned char *_a = (unsigned char *)(addr); \ | 119 | volatile unsigned char *_a = (unsigned char *)(addr); \ |
| 120 | const unsigned shift = (nr) & 7; \ | 120 | const unsigned shift = (nr) & 7; \ |
| 121 | unsigned epsw; \ | 121 | unsigned epsw; \ |
| 122 | _a += (nr) >> 3; \ | 122 | _a += (nr) >> 3; \ |
| 123 | \ | 123 | \ |
| 124 | asm volatile("bclr %3,(%2) # test_clear_bit reg\n" \ | 124 | asm volatile("bclr %3,(%2) # test_clear_bit reg\n" \ |
| 125 | "mov epsw,%1" \ | 125 | "mov epsw,%1" \ |
| 126 | : "=m"(*_a), "=d"(epsw) \ | 126 | : "=m"(*_a), "=d"(epsw) \ |
| 127 | : "a"(_a), "d"(1 << shift), "m"(*_a) \ | 127 | : "a"(_a), "d"(1 << shift), "m"(*_a) \ |
| 128 | : "memory", "cc"); \ | 128 | : "memory", "cc"); \ |
| 129 | \ | 129 | \ |
| 130 | !(epsw & EPSW_FLAG_Z); \ | 130 | !(epsw & EPSW_FLAG_Z); \ |
| 131 | }) | 131 | }) |
| 132 | 132 | ||
| 133 | #define test_and_clear_bit(nr, addr) __test_and_clear_bit((nr), (addr)) | 133 | #define test_and_clear_bit(nr, addr) __test_and_clear_bit((nr), (addr)) |
| 134 | 134 | ||
| 135 | /* | 135 | /* |
| 136 | * test and change bit | 136 | * test and change bit |
| 137 | */ | 137 | */ |
| 138 | static inline int __test_and_change_bit(unsigned long nr, volatile void *addr) | 138 | static inline int __test_and_change_bit(unsigned long nr, volatile void *addr) |
| 139 | { | 139 | { |
| 140 | int mask, retval; | 140 | int mask, retval; |
| 141 | unsigned int *a = (unsigned int *)addr; | 141 | unsigned int *a = (unsigned int *)addr; |
| 142 | 142 | ||
| 143 | a += nr >> 5; | 143 | a += nr >> 5; |
| 144 | mask = 1 << (nr & 0x1f); | 144 | mask = 1 << (nr & 0x1f); |
| 145 | retval = (mask & *a) != 0; | 145 | retval = (mask & *a) != 0; |
| 146 | *a ^= mask; | 146 | *a ^= mask; |
| 147 | 147 | ||
| 148 | return retval; | 148 | return retval; |
| 149 | } | 149 | } |
| 150 | 150 | ||
| 151 | extern int test_and_change_bit(unsigned long nr, volatile void *addr); | 151 | extern int test_and_change_bit(unsigned long nr, volatile void *addr); |
| 152 | 152 | ||
| 153 | #include <asm-generic/bitops/lock.h> | 153 | #include <asm-generic/bitops/lock.h> |
| 154 | 154 | ||
| 155 | #ifdef __KERNEL__ | 155 | #ifdef __KERNEL__ |
| 156 | 156 | ||
| 157 | /** | 157 | /** |
| 158 | * __ffs - find first bit set | 158 | * __ffs - find first bit set |
| 159 | * @x: the word to search | 159 | * @x: the word to search |
| 160 | * | 160 | * |
| 161 | * - return 31..0 to indicate bit 31..0 most least significant bit set | 161 | * - return 31..0 to indicate bit 31..0 most least significant bit set |
| 162 | * - if no bits are set in x, the result is undefined | 162 | * - if no bits are set in x, the result is undefined |
| 163 | */ | 163 | */ |
| 164 | static inline __attribute__((const)) | 164 | static inline __attribute__((const)) |
| 165 | unsigned long __ffs(unsigned long x) | 165 | unsigned long __ffs(unsigned long x) |
| 166 | { | 166 | { |
| 167 | int bit; | 167 | int bit; |
| 168 | asm("bsch %2,%0" : "=r"(bit) : "0"(0), "r"(x & -x) : "cc"); | 168 | asm("bsch %2,%0" : "=r"(bit) : "0"(0), "r"(x & -x) : "cc"); |
| 169 | return bit; | 169 | return bit; |
| 170 | } | 170 | } |
| 171 | 171 | ||
| 172 | /* | 172 | /* |
| 173 | * special slimline version of fls() for calculating ilog2_u32() | 173 | * special slimline version of fls() for calculating ilog2_u32() |
| 174 | * - note: no protection against n == 0 | 174 | * - note: no protection against n == 0 |
| 175 | */ | 175 | */ |
| 176 | static inline __attribute__((const)) | 176 | static inline __attribute__((const)) |
| 177 | int __ilog2_u32(u32 n) | 177 | int __ilog2_u32(u32 n) |
| 178 | { | 178 | { |
| 179 | int bit; | 179 | int bit; |
| 180 | asm("bsch %2,%0" : "=r"(bit) : "0"(0), "r"(n) : "cc"); | 180 | asm("bsch %2,%0" : "=r"(bit) : "0"(0), "r"(n) : "cc"); |
| 181 | return bit; | 181 | return bit; |
| 182 | } | 182 | } |
| 183 | 183 | ||
| 184 | /** | 184 | /** |
| 185 | * fls - find last bit set | 185 | * fls - find last bit set |
| 186 | * @x: the word to search | 186 | * @x: the word to search |
| 187 | * | 187 | * |
| 188 | * This is defined the same way as ffs: | 188 | * This is defined the same way as ffs: |
| 189 | * - return 32..1 to indicate bit 31..0 most significant bit set | 189 | * - return 32..1 to indicate bit 31..0 most significant bit set |
| 190 | * - return 0 to indicate no bits set | 190 | * - return 0 to indicate no bits set |
| 191 | */ | 191 | */ |
| 192 | static inline __attribute__((const)) | 192 | static inline __attribute__((const)) |
| 193 | int fls(int x) | 193 | int fls(int x) |
| 194 | { | 194 | { |
| 195 | return (x != 0) ? __ilog2_u32(x) + 1 : 0; | 195 | return (x != 0) ? __ilog2_u32(x) + 1 : 0; |
| 196 | } | 196 | } |
| 197 | 197 | ||
| 198 | /** | 198 | /** |
| 199 | * __fls - find last (most-significant) set bit in a long word | 199 | * __fls - find last (most-significant) set bit in a long word |
| 200 | * @word: the word to search | 200 | * @word: the word to search |
| 201 | * | 201 | * |
| 202 | * Undefined if no set bit exists, so code should check against 0 first. | 202 | * Undefined if no set bit exists, so code should check against 0 first. |
| 203 | */ | 203 | */ |
| 204 | static inline unsigned long __fls(unsigned long word) | 204 | static inline unsigned long __fls(unsigned long word) |
| 205 | { | 205 | { |
| 206 | return __ilog2_u32(word); | 206 | return __ilog2_u32(word); |
| 207 | } | 207 | } |
| 208 | 208 | ||
| 209 | /** | 209 | /** |
| 210 | * ffs - find first bit set | 210 | * ffs - find first bit set |
| 211 | * @x: the word to search | 211 | * @x: the word to search |
| 212 | * | 212 | * |
| 213 | * - return 32..1 to indicate bit 31..0 most least significant bit set | 213 | * - return 32..1 to indicate bit 31..0 most least significant bit set |
| 214 | * - return 0 to indicate no bits set | 214 | * - return 0 to indicate no bits set |
| 215 | */ | 215 | */ |
| 216 | static inline __attribute__((const)) | 216 | static inline __attribute__((const)) |
| 217 | int ffs(int x) | 217 | int ffs(int x) |
| 218 | { | 218 | { |
| 219 | /* Note: (x & -x) gives us a mask that is the least significant | 219 | /* Note: (x & -x) gives us a mask that is the least significant |
| 220 | * (rightmost) 1-bit of the value in x. | 220 | * (rightmost) 1-bit of the value in x. |
| 221 | */ | 221 | */ |
| 222 | return fls(x & -x); | 222 | return fls(x & -x); |
| 223 | } | 223 | } |
| 224 | 224 | ||
| 225 | #include <asm-generic/bitops/ffz.h> | 225 | #include <asm-generic/bitops/ffz.h> |
| 226 | #include <asm-generic/bitops/fls64.h> | 226 | #include <asm-generic/bitops/fls64.h> |
| 227 | #include <asm-generic/bitops/find.h> | 227 | #include <asm-generic/bitops/find.h> |
| 228 | #include <asm-generic/bitops/sched.h> | 228 | #include <asm-generic/bitops/sched.h> |
| 229 | #include <asm-generic/bitops/hweight.h> | 229 | #include <asm-generic/bitops/hweight.h> |
| 230 | 230 | ||
| 231 | #define ext2_set_bit_atomic(lock, nr, addr) \ | 231 | #define ext2_set_bit_atomic(lock, nr, addr) \ |
| 232 | test_and_set_bit((nr), (addr)) | 232 | test_and_set_bit((nr), (addr)) |
| 233 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | 233 | #define ext2_clear_bit_atomic(lock, nr, addr) \ |
| 234 | test_and_clear_bit((nr), (addr)) | 234 | test_and_clear_bit((nr), (addr)) |
| 235 | 235 | ||
| 236 | #include <asm-generic/bitops/le.h> | 236 | #include <asm-generic/bitops/le.h> |
| 237 | #include <asm-generic/bitops/minix-le.h> | ||
| 238 | 237 | ||
| 239 | #endif /* __KERNEL__ */ | 238 | #endif /* __KERNEL__ */ |
| 240 | #endif /* __ASM_BITOPS_H */ | 239 | #endif /* __ASM_BITOPS_H */ |
| 241 | 240 |
arch/parisc/include/asm/bitops.h
| 1 | #ifndef _PARISC_BITOPS_H | 1 | #ifndef _PARISC_BITOPS_H |
| 2 | #define _PARISC_BITOPS_H | 2 | #define _PARISC_BITOPS_H |
| 3 | 3 | ||
| 4 | #ifndef _LINUX_BITOPS_H | 4 | #ifndef _LINUX_BITOPS_H |
| 5 | #error only <linux/bitops.h> can be included directly | 5 | #error only <linux/bitops.h> can be included directly |
| 6 | #endif | 6 | #endif |
| 7 | 7 | ||
| 8 | #include <linux/compiler.h> | 8 | #include <linux/compiler.h> |
| 9 | #include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */ | 9 | #include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */ |
| 10 | #include <asm/byteorder.h> | 10 | #include <asm/byteorder.h> |
| 11 | #include <asm/atomic.h> | 11 | #include <asm/atomic.h> |
| 12 | 12 | ||
| 13 | /* | 13 | /* |
| 14 | * HP-PARISC specific bit operations | 14 | * HP-PARISC specific bit operations |
| 15 | * for a detailed description of the functions please refer | 15 | * for a detailed description of the functions please refer |
| 16 | * to include/asm-i386/bitops.h or kerneldoc | 16 | * to include/asm-i386/bitops.h or kerneldoc |
| 17 | */ | 17 | */ |
| 18 | 18 | ||
| 19 | #define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1)) | 19 | #define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1)) |
| 20 | 20 | ||
| 21 | 21 | ||
| 22 | #define smp_mb__before_clear_bit() smp_mb() | 22 | #define smp_mb__before_clear_bit() smp_mb() |
| 23 | #define smp_mb__after_clear_bit() smp_mb() | 23 | #define smp_mb__after_clear_bit() smp_mb() |
| 24 | 24 | ||
| 25 | /* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion | 25 | /* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion |
| 26 | * on use of volatile and __*_bit() (set/clear/change): | 26 | * on use of volatile and __*_bit() (set/clear/change): |
| 27 | * *_bit() want use of volatile. | 27 | * *_bit() want use of volatile. |
| 28 | * __*_bit() are "relaxed" and don't use spinlock or volatile. | 28 | * __*_bit() are "relaxed" and don't use spinlock or volatile. |
| 29 | */ | 29 | */ |
| 30 | 30 | ||
| 31 | static __inline__ void set_bit(int nr, volatile unsigned long * addr) | 31 | static __inline__ void set_bit(int nr, volatile unsigned long * addr) |
| 32 | { | 32 | { |
| 33 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); | 33 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); |
| 34 | unsigned long flags; | 34 | unsigned long flags; |
| 35 | 35 | ||
| 36 | addr += (nr >> SHIFT_PER_LONG); | 36 | addr += (nr >> SHIFT_PER_LONG); |
| 37 | _atomic_spin_lock_irqsave(addr, flags); | 37 | _atomic_spin_lock_irqsave(addr, flags); |
| 38 | *addr |= mask; | 38 | *addr |= mask; |
| 39 | _atomic_spin_unlock_irqrestore(addr, flags); | 39 | _atomic_spin_unlock_irqrestore(addr, flags); |
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | static __inline__ void clear_bit(int nr, volatile unsigned long * addr) | 42 | static __inline__ void clear_bit(int nr, volatile unsigned long * addr) |
| 43 | { | 43 | { |
| 44 | unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr)); | 44 | unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr)); |
| 45 | unsigned long flags; | 45 | unsigned long flags; |
| 46 | 46 | ||
| 47 | addr += (nr >> SHIFT_PER_LONG); | 47 | addr += (nr >> SHIFT_PER_LONG); |
| 48 | _atomic_spin_lock_irqsave(addr, flags); | 48 | _atomic_spin_lock_irqsave(addr, flags); |
| 49 | *addr &= mask; | 49 | *addr &= mask; |
| 50 | _atomic_spin_unlock_irqrestore(addr, flags); | 50 | _atomic_spin_unlock_irqrestore(addr, flags); |
| 51 | } | 51 | } |
| 52 | 52 | ||
| 53 | static __inline__ void change_bit(int nr, volatile unsigned long * addr) | 53 | static __inline__ void change_bit(int nr, volatile unsigned long * addr) |
| 54 | { | 54 | { |
| 55 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); | 55 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); |
| 56 | unsigned long flags; | 56 | unsigned long flags; |
| 57 | 57 | ||
| 58 | addr += (nr >> SHIFT_PER_LONG); | 58 | addr += (nr >> SHIFT_PER_LONG); |
| 59 | _atomic_spin_lock_irqsave(addr, flags); | 59 | _atomic_spin_lock_irqsave(addr, flags); |
| 60 | *addr ^= mask; | 60 | *addr ^= mask; |
| 61 | _atomic_spin_unlock_irqrestore(addr, flags); | 61 | _atomic_spin_unlock_irqrestore(addr, flags); |
| 62 | } | 62 | } |
| 63 | 63 | ||
| 64 | static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) | 64 | static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) |
| 65 | { | 65 | { |
| 66 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); | 66 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); |
| 67 | unsigned long old; | 67 | unsigned long old; |
| 68 | unsigned long flags; | 68 | unsigned long flags; |
| 69 | int set; | 69 | int set; |
| 70 | 70 | ||
| 71 | addr += (nr >> SHIFT_PER_LONG); | 71 | addr += (nr >> SHIFT_PER_LONG); |
| 72 | _atomic_spin_lock_irqsave(addr, flags); | 72 | _atomic_spin_lock_irqsave(addr, flags); |
| 73 | old = *addr; | 73 | old = *addr; |
| 74 | set = (old & mask) ? 1 : 0; | 74 | set = (old & mask) ? 1 : 0; |
| 75 | if (!set) | 75 | if (!set) |
| 76 | *addr = old | mask; | 76 | *addr = old | mask; |
| 77 | _atomic_spin_unlock_irqrestore(addr, flags); | 77 | _atomic_spin_unlock_irqrestore(addr, flags); |
| 78 | 78 | ||
| 79 | return set; | 79 | return set; |
| 80 | } | 80 | } |
| 81 | 81 | ||
| 82 | static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) | 82 | static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) |
| 83 | { | 83 | { |
| 84 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); | 84 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); |
| 85 | unsigned long old; | 85 | unsigned long old; |
| 86 | unsigned long flags; | 86 | unsigned long flags; |
| 87 | int set; | 87 | int set; |
| 88 | 88 | ||
| 89 | addr += (nr >> SHIFT_PER_LONG); | 89 | addr += (nr >> SHIFT_PER_LONG); |
| 90 | _atomic_spin_lock_irqsave(addr, flags); | 90 | _atomic_spin_lock_irqsave(addr, flags); |
| 91 | old = *addr; | 91 | old = *addr; |
| 92 | set = (old & mask) ? 1 : 0; | 92 | set = (old & mask) ? 1 : 0; |
| 93 | if (set) | 93 | if (set) |
| 94 | *addr = old & ~mask; | 94 | *addr = old & ~mask; |
| 95 | _atomic_spin_unlock_irqrestore(addr, flags); | 95 | _atomic_spin_unlock_irqrestore(addr, flags); |
| 96 | 96 | ||
| 97 | return set; | 97 | return set; |
| 98 | } | 98 | } |
| 99 | 99 | ||
| 100 | static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) | 100 | static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) |
| 101 | { | 101 | { |
| 102 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); | 102 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); |
| 103 | unsigned long oldbit; | 103 | unsigned long oldbit; |
| 104 | unsigned long flags; | 104 | unsigned long flags; |
| 105 | 105 | ||
| 106 | addr += (nr >> SHIFT_PER_LONG); | 106 | addr += (nr >> SHIFT_PER_LONG); |
| 107 | _atomic_spin_lock_irqsave(addr, flags); | 107 | _atomic_spin_lock_irqsave(addr, flags); |
| 108 | oldbit = *addr; | 108 | oldbit = *addr; |
| 109 | *addr = oldbit ^ mask; | 109 | *addr = oldbit ^ mask; |
| 110 | _atomic_spin_unlock_irqrestore(addr, flags); | 110 | _atomic_spin_unlock_irqrestore(addr, flags); |
| 111 | 111 | ||
| 112 | return (oldbit & mask) ? 1 : 0; | 112 | return (oldbit & mask) ? 1 : 0; |
| 113 | } | 113 | } |
| 114 | 114 | ||
| 115 | #include <asm-generic/bitops/non-atomic.h> | 115 | #include <asm-generic/bitops/non-atomic.h> |
| 116 | 116 | ||
| 117 | #ifdef __KERNEL__ | 117 | #ifdef __KERNEL__ |
| 118 | 118 | ||
| 119 | /** | 119 | /** |
| 120 | * __ffs - find first bit in word. returns 0 to "BITS_PER_LONG-1". | 120 | * __ffs - find first bit in word. returns 0 to "BITS_PER_LONG-1". |
| 121 | * @word: The word to search | 121 | * @word: The word to search |
| 122 | * | 122 | * |
| 123 | * __ffs() return is undefined if no bit is set. | 123 | * __ffs() return is undefined if no bit is set. |
| 124 | * | 124 | * |
| 125 | * 32-bit fast __ffs by LaMont Jones "lamont At hp com". | 125 | * 32-bit fast __ffs by LaMont Jones "lamont At hp com". |
| 126 | * 64-bit enhancement by Grant Grundler "grundler At parisc-linux org". | 126 | * 64-bit enhancement by Grant Grundler "grundler At parisc-linux org". |
| 127 | * (with help from willy/jejb to get the semantics right) | 127 | * (with help from willy/jejb to get the semantics right) |
| 128 | * | 128 | * |
| 129 | * This algorithm avoids branches by making use of nullification. | 129 | * This algorithm avoids branches by making use of nullification. |
| 130 | * One side effect of "extr" instructions is it sets PSW[N] bit. | 130 | * One side effect of "extr" instructions is it sets PSW[N] bit. |
| 131 | * How PSW[N] (nullify next insn) gets set is determined by the | 131 | * How PSW[N] (nullify next insn) gets set is determined by the |
| 132 | * "condition" field (eg "<>" or "TR" below) in the extr* insn. | 132 | * "condition" field (eg "<>" or "TR" below) in the extr* insn. |
| 133 | * Only the 1st and one of either the 2cd or 3rd insn will get executed. | 133 | * Only the 1st and one of either the 2cd or 3rd insn will get executed. |
| 134 | * Each set of 3 insn will get executed in 2 cycles on PA8x00 vs 16 or so | 134 | * Each set of 3 insn will get executed in 2 cycles on PA8x00 vs 16 or so |
| 135 | * cycles for each mispredicted branch. | 135 | * cycles for each mispredicted branch. |
| 136 | */ | 136 | */ |
| 137 | 137 | ||
| 138 | static __inline__ unsigned long __ffs(unsigned long x) | 138 | static __inline__ unsigned long __ffs(unsigned long x) |
| 139 | { | 139 | { |
| 140 | unsigned long ret; | 140 | unsigned long ret; |
| 141 | 141 | ||
| 142 | __asm__( | 142 | __asm__( |
| 143 | #ifdef CONFIG_64BIT | 143 | #ifdef CONFIG_64BIT |
| 144 | " ldi 63,%1\n" | 144 | " ldi 63,%1\n" |
| 145 | " extrd,u,*<> %0,63,32,%%r0\n" | 145 | " extrd,u,*<> %0,63,32,%%r0\n" |
| 146 | " extrd,u,*TR %0,31,32,%0\n" /* move top 32-bits down */ | 146 | " extrd,u,*TR %0,31,32,%0\n" /* move top 32-bits down */ |
| 147 | " addi -32,%1,%1\n" | 147 | " addi -32,%1,%1\n" |
| 148 | #else | 148 | #else |
| 149 | " ldi 31,%1\n" | 149 | " ldi 31,%1\n" |
| 150 | #endif | 150 | #endif |
| 151 | " extru,<> %0,31,16,%%r0\n" | 151 | " extru,<> %0,31,16,%%r0\n" |
| 152 | " extru,TR %0,15,16,%0\n" /* xxxx0000 -> 0000xxxx */ | 152 | " extru,TR %0,15,16,%0\n" /* xxxx0000 -> 0000xxxx */ |
| 153 | " addi -16,%1,%1\n" | 153 | " addi -16,%1,%1\n" |
| 154 | " extru,<> %0,31,8,%%r0\n" | 154 | " extru,<> %0,31,8,%%r0\n" |
| 155 | " extru,TR %0,23,8,%0\n" /* 0000xx00 -> 000000xx */ | 155 | " extru,TR %0,23,8,%0\n" /* 0000xx00 -> 000000xx */ |
| 156 | " addi -8,%1,%1\n" | 156 | " addi -8,%1,%1\n" |
| 157 | " extru,<> %0,31,4,%%r0\n" | 157 | " extru,<> %0,31,4,%%r0\n" |
| 158 | " extru,TR %0,27,4,%0\n" /* 000000x0 -> 0000000x */ | 158 | " extru,TR %0,27,4,%0\n" /* 000000x0 -> 0000000x */ |
| 159 | " addi -4,%1,%1\n" | 159 | " addi -4,%1,%1\n" |
| 160 | " extru,<> %0,31,2,%%r0\n" | 160 | " extru,<> %0,31,2,%%r0\n" |
| 161 | " extru,TR %0,29,2,%0\n" /* 0000000y, 1100b -> 0011b */ | 161 | " extru,TR %0,29,2,%0\n" /* 0000000y, 1100b -> 0011b */ |
| 162 | " addi -2,%1,%1\n" | 162 | " addi -2,%1,%1\n" |
| 163 | " extru,= %0,31,1,%%r0\n" /* check last bit */ | 163 | " extru,= %0,31,1,%%r0\n" /* check last bit */ |
| 164 | " addi -1,%1,%1\n" | 164 | " addi -1,%1,%1\n" |
| 165 | : "+r" (x), "=r" (ret) ); | 165 | : "+r" (x), "=r" (ret) ); |
| 166 | return ret; | 166 | return ret; |
| 167 | } | 167 | } |
| 168 | 168 | ||
| 169 | #include <asm-generic/bitops/ffz.h> | 169 | #include <asm-generic/bitops/ffz.h> |
| 170 | 170 | ||
| 171 | /* | 171 | /* |
| 172 | * ffs: find first bit set. returns 1 to BITS_PER_LONG or 0 (if none set) | 172 | * ffs: find first bit set. returns 1 to BITS_PER_LONG or 0 (if none set) |
| 173 | * This is defined the same way as the libc and compiler builtin | 173 | * This is defined the same way as the libc and compiler builtin |
| 174 | * ffs routines, therefore differs in spirit from the above ffz (man ffs). | 174 | * ffs routines, therefore differs in spirit from the above ffz (man ffs). |
| 175 | */ | 175 | */ |
| 176 | static __inline__ int ffs(int x) | 176 | static __inline__ int ffs(int x) |
| 177 | { | 177 | { |
| 178 | return x ? (__ffs((unsigned long)x) + 1) : 0; | 178 | return x ? (__ffs((unsigned long)x) + 1) : 0; |
| 179 | } | 179 | } |
| 180 | 180 | ||
| 181 | /* | 181 | /* |
| 182 | * fls: find last (most significant) bit set. | 182 | * fls: find last (most significant) bit set. |
| 183 | * fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. | 183 | * fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. |
| 184 | */ | 184 | */ |
| 185 | 185 | ||
| 186 | static __inline__ int fls(int x) | 186 | static __inline__ int fls(int x) |
| 187 | { | 187 | { |
| 188 | int ret; | 188 | int ret; |
| 189 | if (!x) | 189 | if (!x) |
| 190 | return 0; | 190 | return 0; |
| 191 | 191 | ||
| 192 | __asm__( | 192 | __asm__( |
| 193 | " ldi 1,%1\n" | 193 | " ldi 1,%1\n" |
| 194 | " extru,<> %0,15,16,%%r0\n" | 194 | " extru,<> %0,15,16,%%r0\n" |
| 195 | " zdep,TR %0,15,16,%0\n" /* xxxx0000 */ | 195 | " zdep,TR %0,15,16,%0\n" /* xxxx0000 */ |
| 196 | " addi 16,%1,%1\n" | 196 | " addi 16,%1,%1\n" |
| 197 | " extru,<> %0,7,8,%%r0\n" | 197 | " extru,<> %0,7,8,%%r0\n" |
| 198 | " zdep,TR %0,23,24,%0\n" /* xx000000 */ | 198 | " zdep,TR %0,23,24,%0\n" /* xx000000 */ |
| 199 | " addi 8,%1,%1\n" | 199 | " addi 8,%1,%1\n" |
| 200 | " extru,<> %0,3,4,%%r0\n" | 200 | " extru,<> %0,3,4,%%r0\n" |
| 201 | " zdep,TR %0,27,28,%0\n" /* x0000000 */ | 201 | " zdep,TR %0,27,28,%0\n" /* x0000000 */ |
| 202 | " addi 4,%1,%1\n" | 202 | " addi 4,%1,%1\n" |
| 203 | " extru,<> %0,1,2,%%r0\n" | 203 | " extru,<> %0,1,2,%%r0\n" |
| 204 | " zdep,TR %0,29,30,%0\n" /* y0000000 (y&3 = 0) */ | 204 | " zdep,TR %0,29,30,%0\n" /* y0000000 (y&3 = 0) */ |
| 205 | " addi 2,%1,%1\n" | 205 | " addi 2,%1,%1\n" |
| 206 | " extru,= %0,0,1,%%r0\n" | 206 | " extru,= %0,0,1,%%r0\n" |
| 207 | " addi 1,%1,%1\n" /* if y & 8, add 1 */ | 207 | " addi 1,%1,%1\n" /* if y & 8, add 1 */ |
| 208 | : "+r" (x), "=r" (ret) ); | 208 | : "+r" (x), "=r" (ret) ); |
| 209 | 209 | ||
| 210 | return ret; | 210 | return ret; |
| 211 | } | 211 | } |
| 212 | 212 | ||
| 213 | #include <asm-generic/bitops/__fls.h> | 213 | #include <asm-generic/bitops/__fls.h> |
| 214 | #include <asm-generic/bitops/fls64.h> | 214 | #include <asm-generic/bitops/fls64.h> |
| 215 | #include <asm-generic/bitops/hweight.h> | 215 | #include <asm-generic/bitops/hweight.h> |
| 216 | #include <asm-generic/bitops/lock.h> | 216 | #include <asm-generic/bitops/lock.h> |
| 217 | #include <asm-generic/bitops/sched.h> | 217 | #include <asm-generic/bitops/sched.h> |
| 218 | 218 | ||
| 219 | #endif /* __KERNEL__ */ | 219 | #endif /* __KERNEL__ */ |
| 220 | 220 | ||
| 221 | #include <asm-generic/bitops/find.h> | 221 | #include <asm-generic/bitops/find.h> |
| 222 | 222 | ||
| 223 | #ifdef __KERNEL__ | 223 | #ifdef __KERNEL__ |
| 224 | 224 | ||
| 225 | #include <asm-generic/bitops/le.h> | 225 | #include <asm-generic/bitops/le.h> |
| 226 | 226 | ||
| 227 | /* '3' is bits per byte */ | 227 | /* '3' is bits per byte */ |
| 228 | #define LE_BYTE_ADDR ((sizeof(unsigned long) - 1) << 3) | 228 | #define LE_BYTE_ADDR ((sizeof(unsigned long) - 1) << 3) |
| 229 | 229 | ||
| 230 | #define ext2_set_bit_atomic(l,nr,addr) \ | 230 | #define ext2_set_bit_atomic(l,nr,addr) \ |
| 231 | test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) | 231 | test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) |
| 232 | #define ext2_clear_bit_atomic(l,nr,addr) \ | 232 | #define ext2_clear_bit_atomic(l,nr,addr) \ |
| 233 | test_and_clear_bit( (nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) | 233 | test_and_clear_bit( (nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) |
| 234 | 234 | ||
| 235 | #endif /* __KERNEL__ */ | 235 | #endif /* __KERNEL__ */ |
| 236 | 236 | ||
| 237 | #include <asm-generic/bitops/minix-le.h> | ||
| 238 | |||
| 239 | #endif /* _PARISC_BITOPS_H */ | 237 | #endif /* _PARISC_BITOPS_H */ |
| 240 | 238 |
arch/powerpc/include/asm/bitops.h
| 1 | /* | 1 | /* |
| 2 | * PowerPC atomic bit operations. | 2 | * PowerPC atomic bit operations. |
| 3 | * | 3 | * |
| 4 | * Merged version by David Gibson <david@gibson.dropbear.id.au>. | 4 | * Merged version by David Gibson <david@gibson.dropbear.id.au>. |
| 5 | * Based on ppc64 versions by: Dave Engebretsen, Todd Inglett, Don | 5 | * Based on ppc64 versions by: Dave Engebretsen, Todd Inglett, Don |
| 6 | * Reed, Pat McCarthy, Peter Bergner, Anton Blanchard. They | 6 | * Reed, Pat McCarthy, Peter Bergner, Anton Blanchard. They |
| 7 | * originally took it from the ppc32 code. | 7 | * originally took it from the ppc32 code. |
| 8 | * | 8 | * |
| 9 | * Within a word, bits are numbered LSB first. Lot's of places make | 9 | * Within a word, bits are numbered LSB first. Lot's of places make |
| 10 | * this assumption by directly testing bits with (val & (1<<nr)). | 10 | * this assumption by directly testing bits with (val & (1<<nr)). |
| 11 | * This can cause confusion for large (> 1 word) bitmaps on a | 11 | * This can cause confusion for large (> 1 word) bitmaps on a |
| 12 | * big-endian system because, unlike little endian, the number of each | 12 | * big-endian system because, unlike little endian, the number of each |
| 13 | * bit depends on the word size. | 13 | * bit depends on the word size. |
| 14 | * | 14 | * |
| 15 | * The bitop functions are defined to work on unsigned longs, so for a | 15 | * The bitop functions are defined to work on unsigned longs, so for a |
| 16 | * ppc64 system the bits end up numbered: | 16 | * ppc64 system the bits end up numbered: |
| 17 | * |63..............0|127............64|191...........128|255...........196| | 17 | * |63..............0|127............64|191...........128|255...........196| |
| 18 | * and on ppc32: | 18 | * and on ppc32: |
| 19 | * |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224| | 19 | * |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224| |
| 20 | * | 20 | * |
| 21 | * There are a few little-endian macros used mostly for filesystem | 21 | * There are a few little-endian macros used mostly for filesystem |
| 22 | * bitmaps, these work on similar bit arrays layouts, but | 22 | * bitmaps, these work on similar bit arrays layouts, but |
| 23 | * byte-oriented: | 23 | * byte-oriented: |
| 24 | * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56| | 24 | * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56| |
| 25 | * | 25 | * |
| 26 | * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit | 26 | * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit |
| 27 | * number field needs to be reversed compared to the big-endian bit | 27 | * number field needs to be reversed compared to the big-endian bit |
| 28 | * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b). | 28 | * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b). |
| 29 | * | 29 | * |
| 30 | * This program is free software; you can redistribute it and/or | 30 | * This program is free software; you can redistribute it and/or |
| 31 | * modify it under the terms of the GNU General Public License | 31 | * modify it under the terms of the GNU General Public License |
| 32 | * as published by the Free Software Foundation; either version | 32 | * as published by the Free Software Foundation; either version |
| 33 | * 2 of the License, or (at your option) any later version. | 33 | * 2 of the License, or (at your option) any later version. |
| 34 | */ | 34 | */ |
| 35 | 35 | ||
| 36 | #ifndef _ASM_POWERPC_BITOPS_H | 36 | #ifndef _ASM_POWERPC_BITOPS_H |
| 37 | #define _ASM_POWERPC_BITOPS_H | 37 | #define _ASM_POWERPC_BITOPS_H |
| 38 | 38 | ||
| 39 | #ifdef __KERNEL__ | 39 | #ifdef __KERNEL__ |
| 40 | 40 | ||
| 41 | #ifndef _LINUX_BITOPS_H | 41 | #ifndef _LINUX_BITOPS_H |
| 42 | #error only <linux/bitops.h> can be included directly | 42 | #error only <linux/bitops.h> can be included directly |
| 43 | #endif | 43 | #endif |
| 44 | 44 | ||
| 45 | #include <linux/compiler.h> | 45 | #include <linux/compiler.h> |
| 46 | #include <asm/asm-compat.h> | 46 | #include <asm/asm-compat.h> |
| 47 | #include <asm/synch.h> | 47 | #include <asm/synch.h> |
| 48 | 48 | ||
| 49 | /* | 49 | /* |
| 50 | * clear_bit doesn't imply a memory barrier | 50 | * clear_bit doesn't imply a memory barrier |
| 51 | */ | 51 | */ |
| 52 | #define smp_mb__before_clear_bit() smp_mb() | 52 | #define smp_mb__before_clear_bit() smp_mb() |
| 53 | #define smp_mb__after_clear_bit() smp_mb() | 53 | #define smp_mb__after_clear_bit() smp_mb() |
| 54 | 54 | ||
| 55 | #define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) | 55 | #define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) |
| 56 | #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) | 56 | #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) |
| 57 | #define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) | 57 | #define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) |
| 58 | 58 | ||
| 59 | /* Macro for generating the ***_bits() functions */ | 59 | /* Macro for generating the ***_bits() functions */ |
| 60 | #define DEFINE_BITOP(fn, op, prefix, postfix) \ | 60 | #define DEFINE_BITOP(fn, op, prefix, postfix) \ |
| 61 | static __inline__ void fn(unsigned long mask, \ | 61 | static __inline__ void fn(unsigned long mask, \ |
| 62 | volatile unsigned long *_p) \ | 62 | volatile unsigned long *_p) \ |
| 63 | { \ | 63 | { \ |
| 64 | unsigned long old; \ | 64 | unsigned long old; \ |
| 65 | unsigned long *p = (unsigned long *)_p; \ | 65 | unsigned long *p = (unsigned long *)_p; \ |
| 66 | __asm__ __volatile__ ( \ | 66 | __asm__ __volatile__ ( \ |
| 67 | prefix \ | 67 | prefix \ |
| 68 | "1:" PPC_LLARX(%0,0,%3,0) "\n" \ | 68 | "1:" PPC_LLARX(%0,0,%3,0) "\n" \ |
| 69 | stringify_in_c(op) "%0,%0,%2\n" \ | 69 | stringify_in_c(op) "%0,%0,%2\n" \ |
| 70 | PPC405_ERR77(0,%3) \ | 70 | PPC405_ERR77(0,%3) \ |
| 71 | PPC_STLCX "%0,0,%3\n" \ | 71 | PPC_STLCX "%0,0,%3\n" \ |
| 72 | "bne- 1b\n" \ | 72 | "bne- 1b\n" \ |
| 73 | postfix \ | 73 | postfix \ |
| 74 | : "=&r" (old), "+m" (*p) \ | 74 | : "=&r" (old), "+m" (*p) \ |
| 75 | : "r" (mask), "r" (p) \ | 75 | : "r" (mask), "r" (p) \ |
| 76 | : "cc", "memory"); \ | 76 | : "cc", "memory"); \ |
| 77 | } | 77 | } |
| 78 | 78 | ||
| 79 | DEFINE_BITOP(set_bits, or, "", "") | 79 | DEFINE_BITOP(set_bits, or, "", "") |
| 80 | DEFINE_BITOP(clear_bits, andc, "", "") | 80 | DEFINE_BITOP(clear_bits, andc, "", "") |
| 81 | DEFINE_BITOP(clear_bits_unlock, andc, PPC_RELEASE_BARRIER, "") | 81 | DEFINE_BITOP(clear_bits_unlock, andc, PPC_RELEASE_BARRIER, "") |
| 82 | DEFINE_BITOP(change_bits, xor, "", "") | 82 | DEFINE_BITOP(change_bits, xor, "", "") |
| 83 | 83 | ||
| 84 | static __inline__ void set_bit(int nr, volatile unsigned long *addr) | 84 | static __inline__ void set_bit(int nr, volatile unsigned long *addr) |
| 85 | { | 85 | { |
| 86 | set_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)); | 86 | set_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)); |
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | static __inline__ void clear_bit(int nr, volatile unsigned long *addr) | 89 | static __inline__ void clear_bit(int nr, volatile unsigned long *addr) |
| 90 | { | 90 | { |
| 91 | clear_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)); | 91 | clear_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)); |
| 92 | } | 92 | } |
| 93 | 93 | ||
| 94 | static __inline__ void clear_bit_unlock(int nr, volatile unsigned long *addr) | 94 | static __inline__ void clear_bit_unlock(int nr, volatile unsigned long *addr) |
| 95 | { | 95 | { |
| 96 | clear_bits_unlock(BITOP_MASK(nr), addr + BITOP_WORD(nr)); | 96 | clear_bits_unlock(BITOP_MASK(nr), addr + BITOP_WORD(nr)); |
| 97 | } | 97 | } |
| 98 | 98 | ||
| 99 | static __inline__ void change_bit(int nr, volatile unsigned long *addr) | 99 | static __inline__ void change_bit(int nr, volatile unsigned long *addr) |
| 100 | { | 100 | { |
| 101 | change_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)); | 101 | change_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)); |
| 102 | } | 102 | } |
| 103 | 103 | ||
| 104 | /* Like DEFINE_BITOP(), with changes to the arguments to 'op' and the output | 104 | /* Like DEFINE_BITOP(), with changes to the arguments to 'op' and the output |
| 105 | * operands. */ | 105 | * operands. */ |
| 106 | #define DEFINE_TESTOP(fn, op, prefix, postfix, eh) \ | 106 | #define DEFINE_TESTOP(fn, op, prefix, postfix, eh) \ |
| 107 | static __inline__ unsigned long fn( \ | 107 | static __inline__ unsigned long fn( \ |
| 108 | unsigned long mask, \ | 108 | unsigned long mask, \ |
| 109 | volatile unsigned long *_p) \ | 109 | volatile unsigned long *_p) \ |
| 110 | { \ | 110 | { \ |
| 111 | unsigned long old, t; \ | 111 | unsigned long old, t; \ |
| 112 | unsigned long *p = (unsigned long *)_p; \ | 112 | unsigned long *p = (unsigned long *)_p; \ |
| 113 | __asm__ __volatile__ ( \ | 113 | __asm__ __volatile__ ( \ |
| 114 | prefix \ | 114 | prefix \ |
| 115 | "1:" PPC_LLARX(%0,0,%3,eh) "\n" \ | 115 | "1:" PPC_LLARX(%0,0,%3,eh) "\n" \ |
| 116 | stringify_in_c(op) "%1,%0,%2\n" \ | 116 | stringify_in_c(op) "%1,%0,%2\n" \ |
| 117 | PPC405_ERR77(0,%3) \ | 117 | PPC405_ERR77(0,%3) \ |
| 118 | PPC_STLCX "%1,0,%3\n" \ | 118 | PPC_STLCX "%1,0,%3\n" \ |
| 119 | "bne- 1b\n" \ | 119 | "bne- 1b\n" \ |
| 120 | postfix \ | 120 | postfix \ |
| 121 | : "=&r" (old), "=&r" (t) \ | 121 | : "=&r" (old), "=&r" (t) \ |
| 122 | : "r" (mask), "r" (p) \ | 122 | : "r" (mask), "r" (p) \ |
| 123 | : "cc", "memory"); \ | 123 | : "cc", "memory"); \ |
| 124 | return (old & mask); \ | 124 | return (old & mask); \ |
| 125 | } | 125 | } |
| 126 | 126 | ||
| 127 | DEFINE_TESTOP(test_and_set_bits, or, PPC_RELEASE_BARRIER, | 127 | DEFINE_TESTOP(test_and_set_bits, or, PPC_RELEASE_BARRIER, |
| 128 | PPC_ACQUIRE_BARRIER, 0) | 128 | PPC_ACQUIRE_BARRIER, 0) |
| 129 | DEFINE_TESTOP(test_and_set_bits_lock, or, "", | 129 | DEFINE_TESTOP(test_and_set_bits_lock, or, "", |
| 130 | PPC_ACQUIRE_BARRIER, 1) | 130 | PPC_ACQUIRE_BARRIER, 1) |
| 131 | DEFINE_TESTOP(test_and_clear_bits, andc, PPC_RELEASE_BARRIER, | 131 | DEFINE_TESTOP(test_and_clear_bits, andc, PPC_RELEASE_BARRIER, |
| 132 | PPC_ACQUIRE_BARRIER, 0) | 132 | PPC_ACQUIRE_BARRIER, 0) |
| 133 | DEFINE_TESTOP(test_and_change_bits, xor, PPC_RELEASE_BARRIER, | 133 | DEFINE_TESTOP(test_and_change_bits, xor, PPC_RELEASE_BARRIER, |
| 134 | PPC_ACQUIRE_BARRIER, 0) | 134 | PPC_ACQUIRE_BARRIER, 0) |
| 135 | 135 | ||
| 136 | static __inline__ int test_and_set_bit(unsigned long nr, | 136 | static __inline__ int test_and_set_bit(unsigned long nr, |
| 137 | volatile unsigned long *addr) | 137 | volatile unsigned long *addr) |
| 138 | { | 138 | { |
| 139 | return test_and_set_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0; | 139 | return test_and_set_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0; |
| 140 | } | 140 | } |
| 141 | 141 | ||
| 142 | static __inline__ int test_and_set_bit_lock(unsigned long nr, | 142 | static __inline__ int test_and_set_bit_lock(unsigned long nr, |
| 143 | volatile unsigned long *addr) | 143 | volatile unsigned long *addr) |
| 144 | { | 144 | { |
| 145 | return test_and_set_bits_lock(BITOP_MASK(nr), | 145 | return test_and_set_bits_lock(BITOP_MASK(nr), |
| 146 | addr + BITOP_WORD(nr)) != 0; | 146 | addr + BITOP_WORD(nr)) != 0; |
| 147 | } | 147 | } |
| 148 | 148 | ||
| 149 | static __inline__ int test_and_clear_bit(unsigned long nr, | 149 | static __inline__ int test_and_clear_bit(unsigned long nr, |
| 150 | volatile unsigned long *addr) | 150 | volatile unsigned long *addr) |
| 151 | { | 151 | { |
| 152 | return test_and_clear_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0; | 152 | return test_and_clear_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0; |
| 153 | } | 153 | } |
| 154 | 154 | ||
| 155 | static __inline__ int test_and_change_bit(unsigned long nr, | 155 | static __inline__ int test_and_change_bit(unsigned long nr, |
| 156 | volatile unsigned long *addr) | 156 | volatile unsigned long *addr) |
| 157 | { | 157 | { |
| 158 | return test_and_change_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0; | 158 | return test_and_change_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0; |
| 159 | } | 159 | } |
| 160 | 160 | ||
| 161 | #include <asm-generic/bitops/non-atomic.h> | 161 | #include <asm-generic/bitops/non-atomic.h> |
| 162 | 162 | ||
| 163 | static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr) | 163 | static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr) |
| 164 | { | 164 | { |
| 165 | __asm__ __volatile__(PPC_RELEASE_BARRIER "" ::: "memory"); | 165 | __asm__ __volatile__(PPC_RELEASE_BARRIER "" ::: "memory"); |
| 166 | __clear_bit(nr, addr); | 166 | __clear_bit(nr, addr); |
| 167 | } | 167 | } |
| 168 | 168 | ||
| 169 | /* | 169 | /* |
| 170 | * Return the zero-based bit position (LE, not IBM bit numbering) of | 170 | * Return the zero-based bit position (LE, not IBM bit numbering) of |
| 171 | * the most significant 1-bit in a double word. | 171 | * the most significant 1-bit in a double word. |
| 172 | */ | 172 | */ |
| 173 | static __inline__ __attribute__((const)) | 173 | static __inline__ __attribute__((const)) |
| 174 | int __ilog2(unsigned long x) | 174 | int __ilog2(unsigned long x) |
| 175 | { | 175 | { |
| 176 | int lz; | 176 | int lz; |
| 177 | 177 | ||
| 178 | asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (x)); | 178 | asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (x)); |
| 179 | return BITS_PER_LONG - 1 - lz; | 179 | return BITS_PER_LONG - 1 - lz; |
| 180 | } | 180 | } |
| 181 | 181 | ||
| 182 | static inline __attribute__((const)) | 182 | static inline __attribute__((const)) |
| 183 | int __ilog2_u32(u32 n) | 183 | int __ilog2_u32(u32 n) |
| 184 | { | 184 | { |
| 185 | int bit; | 185 | int bit; |
| 186 | asm ("cntlzw %0,%1" : "=r" (bit) : "r" (n)); | 186 | asm ("cntlzw %0,%1" : "=r" (bit) : "r" (n)); |
| 187 | return 31 - bit; | 187 | return 31 - bit; |
| 188 | } | 188 | } |
| 189 | 189 | ||
| 190 | #ifdef __powerpc64__ | 190 | #ifdef __powerpc64__ |
| 191 | static inline __attribute__((const)) | 191 | static inline __attribute__((const)) |
| 192 | int __ilog2_u64(u64 n) | 192 | int __ilog2_u64(u64 n) |
| 193 | { | 193 | { |
| 194 | int bit; | 194 | int bit; |
| 195 | asm ("cntlzd %0,%1" : "=r" (bit) : "r" (n)); | 195 | asm ("cntlzd %0,%1" : "=r" (bit) : "r" (n)); |
| 196 | return 63 - bit; | 196 | return 63 - bit; |
| 197 | } | 197 | } |
| 198 | #endif | 198 | #endif |
| 199 | 199 | ||
| 200 | /* | 200 | /* |
| 201 | * Determines the bit position of the least significant 0 bit in the | 201 | * Determines the bit position of the least significant 0 bit in the |
| 202 | * specified double word. The returned bit position will be | 202 | * specified double word. The returned bit position will be |
| 203 | * zero-based, starting from the right side (63/31 - 0). | 203 | * zero-based, starting from the right side (63/31 - 0). |
| 204 | */ | 204 | */ |
| 205 | static __inline__ unsigned long ffz(unsigned long x) | 205 | static __inline__ unsigned long ffz(unsigned long x) |
| 206 | { | 206 | { |
| 207 | /* no zero exists anywhere in the 8 byte area. */ | 207 | /* no zero exists anywhere in the 8 byte area. */ |
| 208 | if ((x = ~x) == 0) | 208 | if ((x = ~x) == 0) |
| 209 | return BITS_PER_LONG; | 209 | return BITS_PER_LONG; |
| 210 | 210 | ||
| 211 | /* | 211 | /* |
| 212 | * Calculate the bit position of the least signficant '1' bit in x | 212 | * Calculate the bit position of the least signficant '1' bit in x |
| 213 | * (since x has been changed this will actually be the least signficant | 213 | * (since x has been changed this will actually be the least signficant |
| 214 | * '0' bit in * the original x). Note: (x & -x) gives us a mask that | 214 | * '0' bit in * the original x). Note: (x & -x) gives us a mask that |
| 215 | * is the least significant * (RIGHT-most) 1-bit of the value in x. | 215 | * is the least significant * (RIGHT-most) 1-bit of the value in x. |
| 216 | */ | 216 | */ |
| 217 | return __ilog2(x & -x); | 217 | return __ilog2(x & -x); |
| 218 | } | 218 | } |
| 219 | 219 | ||
| 220 | static __inline__ int __ffs(unsigned long x) | 220 | static __inline__ int __ffs(unsigned long x) |
| 221 | { | 221 | { |
| 222 | return __ilog2(x & -x); | 222 | return __ilog2(x & -x); |
| 223 | } | 223 | } |
| 224 | 224 | ||
| 225 | /* | 225 | /* |
| 226 | * ffs: find first bit set. This is defined the same way as | 226 | * ffs: find first bit set. This is defined the same way as |
| 227 | * the libc and compiler builtin ffs routines, therefore | 227 | * the libc and compiler builtin ffs routines, therefore |
| 228 | * differs in spirit from the above ffz (man ffs). | 228 | * differs in spirit from the above ffz (man ffs). |
| 229 | */ | 229 | */ |
| 230 | static __inline__ int ffs(int x) | 230 | static __inline__ int ffs(int x) |
| 231 | { | 231 | { |
| 232 | unsigned long i = (unsigned long)x; | 232 | unsigned long i = (unsigned long)x; |
| 233 | return __ilog2(i & -i) + 1; | 233 | return __ilog2(i & -i) + 1; |
| 234 | } | 234 | } |
| 235 | 235 | ||
| 236 | /* | 236 | /* |
| 237 | * fls: find last (most-significant) bit set. | 237 | * fls: find last (most-significant) bit set. |
| 238 | * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. | 238 | * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. |
| 239 | */ | 239 | */ |
| 240 | static __inline__ int fls(unsigned int x) | 240 | static __inline__ int fls(unsigned int x) |
| 241 | { | 241 | { |
| 242 | int lz; | 242 | int lz; |
| 243 | 243 | ||
| 244 | asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x)); | 244 | asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x)); |
| 245 | return 32 - lz; | 245 | return 32 - lz; |
| 246 | } | 246 | } |
| 247 | 247 | ||
| 248 | static __inline__ unsigned long __fls(unsigned long x) | 248 | static __inline__ unsigned long __fls(unsigned long x) |
| 249 | { | 249 | { |
| 250 | return __ilog2(x); | 250 | return __ilog2(x); |
| 251 | } | 251 | } |
| 252 | 252 | ||
| 253 | /* | 253 | /* |
| 254 | * 64-bit can do this using one cntlzd (count leading zeroes doubleword) | 254 | * 64-bit can do this using one cntlzd (count leading zeroes doubleword) |
| 255 | * instruction; for 32-bit we use the generic version, which does two | 255 | * instruction; for 32-bit we use the generic version, which does two |
| 256 | * 32-bit fls calls. | 256 | * 32-bit fls calls. |
| 257 | */ | 257 | */ |
| 258 | #ifdef __powerpc64__ | 258 | #ifdef __powerpc64__ |
| 259 | static __inline__ int fls64(__u64 x) | 259 | static __inline__ int fls64(__u64 x) |
| 260 | { | 260 | { |
| 261 | int lz; | 261 | int lz; |
| 262 | 262 | ||
| 263 | asm ("cntlzd %0,%1" : "=r" (lz) : "r" (x)); | 263 | asm ("cntlzd %0,%1" : "=r" (lz) : "r" (x)); |
| 264 | return 64 - lz; | 264 | return 64 - lz; |
| 265 | } | 265 | } |
| 266 | #else | 266 | #else |
| 267 | #include <asm-generic/bitops/fls64.h> | 267 | #include <asm-generic/bitops/fls64.h> |
| 268 | #endif /* __powerpc64__ */ | 268 | #endif /* __powerpc64__ */ |
| 269 | 269 | ||
| 270 | #ifdef CONFIG_PPC64 | 270 | #ifdef CONFIG_PPC64 |
| 271 | unsigned int __arch_hweight8(unsigned int w); | 271 | unsigned int __arch_hweight8(unsigned int w); |
| 272 | unsigned int __arch_hweight16(unsigned int w); | 272 | unsigned int __arch_hweight16(unsigned int w); |
| 273 | unsigned int __arch_hweight32(unsigned int w); | 273 | unsigned int __arch_hweight32(unsigned int w); |
| 274 | unsigned long __arch_hweight64(__u64 w); | 274 | unsigned long __arch_hweight64(__u64 w); |
| 275 | #include <asm-generic/bitops/const_hweight.h> | 275 | #include <asm-generic/bitops/const_hweight.h> |
| 276 | #else | 276 | #else |
| 277 | #include <asm-generic/bitops/hweight.h> | 277 | #include <asm-generic/bitops/hweight.h> |
| 278 | #endif | 278 | #endif |
| 279 | 279 | ||
| 280 | #include <asm-generic/bitops/find.h> | 280 | #include <asm-generic/bitops/find.h> |
| 281 | 281 | ||
| 282 | /* Little-endian versions */ | 282 | /* Little-endian versions */ |
| 283 | 283 | ||
| 284 | static __inline__ int test_bit_le(unsigned long nr, | 284 | static __inline__ int test_bit_le(unsigned long nr, |
| 285 | __const__ void *addr) | 285 | __const__ void *addr) |
| 286 | { | 286 | { |
| 287 | __const__ unsigned char *tmp = (__const__ unsigned char *) addr; | 287 | __const__ unsigned char *tmp = (__const__ unsigned char *) addr; |
| 288 | return (tmp[nr >> 3] >> (nr & 7)) & 1; | 288 | return (tmp[nr >> 3] >> (nr & 7)) & 1; |
| 289 | } | 289 | } |
| 290 | 290 | ||
| 291 | static inline void __set_bit_le(int nr, void *addr) | 291 | static inline void __set_bit_le(int nr, void *addr) |
| 292 | { | 292 | { |
| 293 | __set_bit(nr ^ BITOP_LE_SWIZZLE, addr); | 293 | __set_bit(nr ^ BITOP_LE_SWIZZLE, addr); |
| 294 | } | 294 | } |
| 295 | 295 | ||
| 296 | static inline void __clear_bit_le(int nr, void *addr) | 296 | static inline void __clear_bit_le(int nr, void *addr) |
| 297 | { | 297 | { |
| 298 | __clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); | 298 | __clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); |
| 299 | } | 299 | } |
| 300 | 300 | ||
| 301 | static inline int test_and_set_bit_le(int nr, void *addr) | 301 | static inline int test_and_set_bit_le(int nr, void *addr) |
| 302 | { | 302 | { |
| 303 | return test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr); | 303 | return test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr); |
| 304 | } | 304 | } |
| 305 | 305 | ||
| 306 | static inline int test_and_clear_bit_le(int nr, void *addr) | 306 | static inline int test_and_clear_bit_le(int nr, void *addr) |
| 307 | { | 307 | { |
| 308 | return test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); | 308 | return test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); |
| 309 | } | 309 | } |
| 310 | 310 | ||
| 311 | static inline int __test_and_set_bit_le(int nr, void *addr) | 311 | static inline int __test_and_set_bit_le(int nr, void *addr) |
| 312 | { | 312 | { |
| 313 | return __test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr); | 313 | return __test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr); |
| 314 | } | 314 | } |
| 315 | 315 | ||
| 316 | static inline int __test_and_clear_bit_le(int nr, void *addr) | 316 | static inline int __test_and_clear_bit_le(int nr, void *addr) |
| 317 | { | 317 | { |
| 318 | return __test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); | 318 | return __test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); |
| 319 | } | 319 | } |
| 320 | 320 | ||
| 321 | #define find_first_zero_bit_le(addr, size) \ | 321 | #define find_first_zero_bit_le(addr, size) \ |
| 322 | find_next_zero_bit_le((addr), (size), 0) | 322 | find_next_zero_bit_le((addr), (size), 0) |
| 323 | unsigned long find_next_zero_bit_le(const void *addr, | 323 | unsigned long find_next_zero_bit_le(const void *addr, |
| 324 | unsigned long size, unsigned long offset); | 324 | unsigned long size, unsigned long offset); |
| 325 | 325 | ||
| 326 | unsigned long find_next_bit_le(const void *addr, | 326 | unsigned long find_next_bit_le(const void *addr, |
| 327 | unsigned long size, unsigned long offset); | 327 | unsigned long size, unsigned long offset); |
| 328 | /* Bitmap functions for the ext2 filesystem */ | 328 | /* Bitmap functions for the ext2 filesystem */ |
| 329 | 329 | ||
| 330 | #define ext2_set_bit_atomic(lock, nr, addr) \ | 330 | #define ext2_set_bit_atomic(lock, nr, addr) \ |
| 331 | test_and_set_bit_le((nr), (unsigned long*)addr) | 331 | test_and_set_bit_le((nr), (unsigned long*)addr) |
| 332 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | 332 | #define ext2_clear_bit_atomic(lock, nr, addr) \ |
| 333 | test_and_clear_bit_le((nr), (unsigned long*)addr) | 333 | test_and_clear_bit_le((nr), (unsigned long*)addr) |
| 334 | 334 | ||
| 335 | /* Bitmap functions for the minix filesystem. */ | ||
| 336 | |||
| 337 | #define minix_test_and_set_bit(nr,addr) \ | ||
| 338 | __test_and_set_bit_le(nr, (unsigned long *)addr) | ||
| 339 | #define minix_set_bit(nr,addr) \ | ||
| 340 | __set_bit_le(nr, (unsigned long *)addr) | ||
| 341 | #define minix_test_and_clear_bit(nr,addr) \ | ||
| 342 | __test_and_clear_bit_le(nr, (unsigned long *)addr) | ||
| 343 | #define minix_test_bit(nr,addr) \ | ||
| 344 | test_bit_le(nr, (unsigned long *)addr) | ||
| 345 | |||
| 346 | #define minix_find_first_zero_bit(addr,size) \ | ||
| 347 | find_first_zero_bit_le((unsigned long *)addr, size) | ||
| 348 | |||
| 349 | #include <asm-generic/bitops/sched.h> | 335 | #include <asm-generic/bitops/sched.h> |
| 350 | 336 | ||
| 351 | #endif /* __KERNEL__ */ | 337 | #endif /* __KERNEL__ */ |
| 352 | 338 | ||
| 353 | #endif /* _ASM_POWERPC_BITOPS_H */ | 339 | #endif /* _ASM_POWERPC_BITOPS_H */ |
| 354 | 340 |
arch/s390/include/asm/bitops.h
| 1 | #ifndef _S390_BITOPS_H | 1 | #ifndef _S390_BITOPS_H |
| 2 | #define _S390_BITOPS_H | 2 | #define _S390_BITOPS_H |
| 3 | 3 | ||
| 4 | /* | 4 | /* |
| 5 | * include/asm-s390/bitops.h | 5 | * include/asm-s390/bitops.h |
| 6 | * | 6 | * |
| 7 | * S390 version | 7 | * S390 version |
| 8 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | 8 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation |
| 9 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) | 9 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) |
| 10 | * | 10 | * |
| 11 | * Derived from "include/asm-i386/bitops.h" | 11 | * Derived from "include/asm-i386/bitops.h" |
| 12 | * Copyright (C) 1992, Linus Torvalds | 12 | * Copyright (C) 1992, Linus Torvalds |
| 13 | * | 13 | * |
| 14 | */ | 14 | */ |
| 15 | 15 | ||
| 16 | #ifdef __KERNEL__ | 16 | #ifdef __KERNEL__ |
| 17 | 17 | ||
| 18 | #ifndef _LINUX_BITOPS_H | 18 | #ifndef _LINUX_BITOPS_H |
| 19 | #error only <linux/bitops.h> can be included directly | 19 | #error only <linux/bitops.h> can be included directly |
| 20 | #endif | 20 | #endif |
| 21 | 21 | ||
| 22 | #include <linux/compiler.h> | 22 | #include <linux/compiler.h> |
| 23 | 23 | ||
| 24 | /* | 24 | /* |
| 25 | * 32 bit bitops format: | 25 | * 32 bit bitops format: |
| 26 | * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr; | 26 | * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr; |
| 27 | * bit 32 is the LSB of *(addr+4). That combined with the | 27 | * bit 32 is the LSB of *(addr+4). That combined with the |
| 28 | * big endian byte order on S390 give the following bit | 28 | * big endian byte order on S390 give the following bit |
| 29 | * order in memory: | 29 | * order in memory: |
| 30 | * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \ | 30 | * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \ |
| 31 | * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 | 31 | * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 |
| 32 | * after that follows the next long with bit numbers | 32 | * after that follows the next long with bit numbers |
| 33 | * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30 | 33 | * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30 |
| 34 | * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20 | 34 | * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20 |
| 35 | * The reason for this bit ordering is the fact that | 35 | * The reason for this bit ordering is the fact that |
| 36 | * in the architecture independent code bits operations | 36 | * in the architecture independent code bits operations |
| 37 | * of the form "flags |= (1 << bitnr)" are used INTERMIXED | 37 | * of the form "flags |= (1 << bitnr)" are used INTERMIXED |
| 38 | * with operation of the form "set_bit(bitnr, flags)". | 38 | * with operation of the form "set_bit(bitnr, flags)". |
| 39 | * | 39 | * |
| 40 | * 64 bit bitops format: | 40 | * 64 bit bitops format: |
| 41 | * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr; | 41 | * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr; |
| 42 | * bit 64 is the LSB of *(addr+8). That combined with the | 42 | * bit 64 is the LSB of *(addr+8). That combined with the |
| 43 | * big endian byte order on S390 give the following bit | 43 | * big endian byte order on S390 give the following bit |
| 44 | * order in memory: | 44 | * order in memory: |
| 45 | * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30 | 45 | * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30 |
| 46 | * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20 | 46 | * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20 |
| 47 | * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 | 47 | * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 |
| 48 | * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 | 48 | * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 |
| 49 | * after that follows the next long with bit numbers | 49 | * after that follows the next long with bit numbers |
| 50 | * 7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70 | 50 | * 7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70 |
| 51 | * 6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60 | 51 | * 6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60 |
| 52 | * 5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50 | 52 | * 5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50 |
| 53 | * 4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40 | 53 | * 4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40 |
| 54 | * The reason for this bit ordering is the fact that | 54 | * The reason for this bit ordering is the fact that |
| 55 | * in the architecture independent code bits operations | 55 | * in the architecture independent code bits operations |
| 56 | * of the form "flags |= (1 << bitnr)" are used INTERMIXED | 56 | * of the form "flags |= (1 << bitnr)" are used INTERMIXED |
| 57 | * with operation of the form "set_bit(bitnr, flags)". | 57 | * with operation of the form "set_bit(bitnr, flags)". |
| 58 | */ | 58 | */ |
| 59 | 59 | ||
| 60 | /* bitmap tables from arch/s390/kernel/bitmap.c */ | 60 | /* bitmap tables from arch/s390/kernel/bitmap.c */ |
| 61 | extern const char _oi_bitmap[]; | 61 | extern const char _oi_bitmap[]; |
| 62 | extern const char _ni_bitmap[]; | 62 | extern const char _ni_bitmap[]; |
| 63 | extern const char _zb_findmap[]; | 63 | extern const char _zb_findmap[]; |
| 64 | extern const char _sb_findmap[]; | 64 | extern const char _sb_findmap[]; |
| 65 | 65 | ||
| 66 | #ifndef __s390x__ | 66 | #ifndef __s390x__ |
| 67 | 67 | ||
| 68 | #define __BITOPS_ALIGN 3 | 68 | #define __BITOPS_ALIGN 3 |
| 69 | #define __BITOPS_WORDSIZE 32 | 69 | #define __BITOPS_WORDSIZE 32 |
| 70 | #define __BITOPS_OR "or" | 70 | #define __BITOPS_OR "or" |
| 71 | #define __BITOPS_AND "nr" | 71 | #define __BITOPS_AND "nr" |
| 72 | #define __BITOPS_XOR "xr" | 72 | #define __BITOPS_XOR "xr" |
| 73 | 73 | ||
| 74 | #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ | 74 | #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ |
| 75 | asm volatile( \ | 75 | asm volatile( \ |
| 76 | " l %0,%2\n" \ | 76 | " l %0,%2\n" \ |
| 77 | "0: lr %1,%0\n" \ | 77 | "0: lr %1,%0\n" \ |
| 78 | __op_string " %1,%3\n" \ | 78 | __op_string " %1,%3\n" \ |
| 79 | " cs %0,%1,%2\n" \ | 79 | " cs %0,%1,%2\n" \ |
| 80 | " jl 0b" \ | 80 | " jl 0b" \ |
| 81 | : "=&d" (__old), "=&d" (__new), \ | 81 | : "=&d" (__old), "=&d" (__new), \ |
| 82 | "=Q" (*(unsigned long *) __addr) \ | 82 | "=Q" (*(unsigned long *) __addr) \ |
| 83 | : "d" (__val), "Q" (*(unsigned long *) __addr) \ | 83 | : "d" (__val), "Q" (*(unsigned long *) __addr) \ |
| 84 | : "cc"); | 84 | : "cc"); |
| 85 | 85 | ||
| 86 | #else /* __s390x__ */ | 86 | #else /* __s390x__ */ |
| 87 | 87 | ||
| 88 | #define __BITOPS_ALIGN 7 | 88 | #define __BITOPS_ALIGN 7 |
| 89 | #define __BITOPS_WORDSIZE 64 | 89 | #define __BITOPS_WORDSIZE 64 |
| 90 | #define __BITOPS_OR "ogr" | 90 | #define __BITOPS_OR "ogr" |
| 91 | #define __BITOPS_AND "ngr" | 91 | #define __BITOPS_AND "ngr" |
| 92 | #define __BITOPS_XOR "xgr" | 92 | #define __BITOPS_XOR "xgr" |
| 93 | 93 | ||
| 94 | #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ | 94 | #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ |
| 95 | asm volatile( \ | 95 | asm volatile( \ |
| 96 | " lg %0,%2\n" \ | 96 | " lg %0,%2\n" \ |
| 97 | "0: lgr %1,%0\n" \ | 97 | "0: lgr %1,%0\n" \ |
| 98 | __op_string " %1,%3\n" \ | 98 | __op_string " %1,%3\n" \ |
| 99 | " csg %0,%1,%2\n" \ | 99 | " csg %0,%1,%2\n" \ |
| 100 | " jl 0b" \ | 100 | " jl 0b" \ |
| 101 | : "=&d" (__old), "=&d" (__new), \ | 101 | : "=&d" (__old), "=&d" (__new), \ |
| 102 | "=Q" (*(unsigned long *) __addr) \ | 102 | "=Q" (*(unsigned long *) __addr) \ |
| 103 | : "d" (__val), "Q" (*(unsigned long *) __addr) \ | 103 | : "d" (__val), "Q" (*(unsigned long *) __addr) \ |
| 104 | : "cc"); | 104 | : "cc"); |
| 105 | 105 | ||
| 106 | #endif /* __s390x__ */ | 106 | #endif /* __s390x__ */ |
| 107 | 107 | ||
| 108 | #define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE) | 108 | #define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE) |
| 109 | #define __BITOPS_BARRIER() asm volatile("" : : : "memory") | 109 | #define __BITOPS_BARRIER() asm volatile("" : : : "memory") |
| 110 | 110 | ||
| 111 | #ifdef CONFIG_SMP | 111 | #ifdef CONFIG_SMP |
| 112 | /* | 112 | /* |
| 113 | * SMP safe set_bit routine based on compare and swap (CS) | 113 | * SMP safe set_bit routine based on compare and swap (CS) |
| 114 | */ | 114 | */ |
| 115 | static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr) | 115 | static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr) |
| 116 | { | 116 | { |
| 117 | unsigned long addr, old, new, mask; | 117 | unsigned long addr, old, new, mask; |
| 118 | 118 | ||
| 119 | addr = (unsigned long) ptr; | 119 | addr = (unsigned long) ptr; |
| 120 | /* calculate address for CS */ | 120 | /* calculate address for CS */ |
| 121 | addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; | 121 | addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; |
| 122 | /* make OR mask */ | 122 | /* make OR mask */ |
| 123 | mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); | 123 | mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); |
| 124 | /* Do the atomic update. */ | 124 | /* Do the atomic update. */ |
| 125 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); | 125 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); |
| 126 | } | 126 | } |
| 127 | 127 | ||
| 128 | /* | 128 | /* |
| 129 | * SMP safe clear_bit routine based on compare and swap (CS) | 129 | * SMP safe clear_bit routine based on compare and swap (CS) |
| 130 | */ | 130 | */ |
| 131 | static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr) | 131 | static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr) |
| 132 | { | 132 | { |
| 133 | unsigned long addr, old, new, mask; | 133 | unsigned long addr, old, new, mask; |
| 134 | 134 | ||
| 135 | addr = (unsigned long) ptr; | 135 | addr = (unsigned long) ptr; |
| 136 | /* calculate address for CS */ | 136 | /* calculate address for CS */ |
| 137 | addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; | 137 | addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; |
| 138 | /* make AND mask */ | 138 | /* make AND mask */ |
| 139 | mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1))); | 139 | mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1))); |
| 140 | /* Do the atomic update. */ | 140 | /* Do the atomic update. */ |
| 141 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); | 141 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); |
| 142 | } | 142 | } |
| 143 | 143 | ||
| 144 | /* | 144 | /* |
| 145 | * SMP safe change_bit routine based on compare and swap (CS) | 145 | * SMP safe change_bit routine based on compare and swap (CS) |
| 146 | */ | 146 | */ |
| 147 | static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr) | 147 | static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr) |
| 148 | { | 148 | { |
| 149 | unsigned long addr, old, new, mask; | 149 | unsigned long addr, old, new, mask; |
| 150 | 150 | ||
| 151 | addr = (unsigned long) ptr; | 151 | addr = (unsigned long) ptr; |
| 152 | /* calculate address for CS */ | 152 | /* calculate address for CS */ |
| 153 | addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; | 153 | addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; |
| 154 | /* make XOR mask */ | 154 | /* make XOR mask */ |
| 155 | mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); | 155 | mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); |
| 156 | /* Do the atomic update. */ | 156 | /* Do the atomic update. */ |
| 157 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); | 157 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); |
| 158 | } | 158 | } |
| 159 | 159 | ||
| 160 | /* | 160 | /* |
| 161 | * SMP safe test_and_set_bit routine based on compare and swap (CS) | 161 | * SMP safe test_and_set_bit routine based on compare and swap (CS) |
| 162 | */ | 162 | */ |
| 163 | static inline int | 163 | static inline int |
| 164 | test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr) | 164 | test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr) |
| 165 | { | 165 | { |
| 166 | unsigned long addr, old, new, mask; | 166 | unsigned long addr, old, new, mask; |
| 167 | 167 | ||
| 168 | addr = (unsigned long) ptr; | 168 | addr = (unsigned long) ptr; |
| 169 | /* calculate address for CS */ | 169 | /* calculate address for CS */ |
| 170 | addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; | 170 | addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; |
| 171 | /* make OR/test mask */ | 171 | /* make OR/test mask */ |
| 172 | mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); | 172 | mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); |
| 173 | /* Do the atomic update. */ | 173 | /* Do the atomic update. */ |
| 174 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); | 174 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); |
| 175 | __BITOPS_BARRIER(); | 175 | __BITOPS_BARRIER(); |
| 176 | return (old & mask) != 0; | 176 | return (old & mask) != 0; |
| 177 | } | 177 | } |
| 178 | 178 | ||
| 179 | /* | 179 | /* |
| 180 | * SMP safe test_and_clear_bit routine based on compare and swap (CS) | 180 | * SMP safe test_and_clear_bit routine based on compare and swap (CS) |
| 181 | */ | 181 | */ |
| 182 | static inline int | 182 | static inline int |
| 183 | test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr) | 183 | test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr) |
| 184 | { | 184 | { |
| 185 | unsigned long addr, old, new, mask; | 185 | unsigned long addr, old, new, mask; |
| 186 | 186 | ||
| 187 | addr = (unsigned long) ptr; | 187 | addr = (unsigned long) ptr; |
| 188 | /* calculate address for CS */ | 188 | /* calculate address for CS */ |
| 189 | addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; | 189 | addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; |
| 190 | /* make AND/test mask */ | 190 | /* make AND/test mask */ |
| 191 | mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1))); | 191 | mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1))); |
| 192 | /* Do the atomic update. */ | 192 | /* Do the atomic update. */ |
| 193 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); | 193 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); |
| 194 | __BITOPS_BARRIER(); | 194 | __BITOPS_BARRIER(); |
| 195 | return (old ^ new) != 0; | 195 | return (old ^ new) != 0; |
| 196 | } | 196 | } |
| 197 | 197 | ||
| 198 | /* | 198 | /* |
| 199 | * SMP safe test_and_change_bit routine based on compare and swap (CS) | 199 | * SMP safe test_and_change_bit routine based on compare and swap (CS) |
| 200 | */ | 200 | */ |
| 201 | static inline int | 201 | static inline int |
| 202 | test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr) | 202 | test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr) |
| 203 | { | 203 | { |
| 204 | unsigned long addr, old, new, mask; | 204 | unsigned long addr, old, new, mask; |
| 205 | 205 | ||
| 206 | addr = (unsigned long) ptr; | 206 | addr = (unsigned long) ptr; |
| 207 | /* calculate address for CS */ | 207 | /* calculate address for CS */ |
| 208 | addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; | 208 | addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; |
| 209 | /* make XOR/test mask */ | 209 | /* make XOR/test mask */ |
| 210 | mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); | 210 | mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); |
| 211 | /* Do the atomic update. */ | 211 | /* Do the atomic update. */ |
| 212 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); | 212 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); |
| 213 | __BITOPS_BARRIER(); | 213 | __BITOPS_BARRIER(); |
| 214 | return (old & mask) != 0; | 214 | return (old & mask) != 0; |
| 215 | } | 215 | } |
| 216 | #endif /* CONFIG_SMP */ | 216 | #endif /* CONFIG_SMP */ |
| 217 | 217 | ||
| 218 | /* | 218 | /* |
| 219 | * fast, non-SMP set_bit routine | 219 | * fast, non-SMP set_bit routine |
| 220 | */ | 220 | */ |
| 221 | static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr) | 221 | static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr) |
| 222 | { | 222 | { |
| 223 | unsigned long addr; | 223 | unsigned long addr; |
| 224 | 224 | ||
| 225 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 225 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); |
| 226 | asm volatile( | 226 | asm volatile( |
| 227 | " oc %O0(1,%R0),%1" | 227 | " oc %O0(1,%R0),%1" |
| 228 | : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); | 228 | : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); |
| 229 | } | 229 | } |
| 230 | 230 | ||
| 231 | static inline void | 231 | static inline void |
| 232 | __constant_set_bit(const unsigned long nr, volatile unsigned long *ptr) | 232 | __constant_set_bit(const unsigned long nr, volatile unsigned long *ptr) |
| 233 | { | 233 | { |
| 234 | unsigned long addr; | 234 | unsigned long addr; |
| 235 | 235 | ||
| 236 | addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 236 | addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); |
| 237 | *(unsigned char *) addr |= 1 << (nr & 7); | 237 | *(unsigned char *) addr |= 1 << (nr & 7); |
| 238 | } | 238 | } |
| 239 | 239 | ||
| 240 | #define set_bit_simple(nr,addr) \ | 240 | #define set_bit_simple(nr,addr) \ |
| 241 | (__builtin_constant_p((nr)) ? \ | 241 | (__builtin_constant_p((nr)) ? \ |
| 242 | __constant_set_bit((nr),(addr)) : \ | 242 | __constant_set_bit((nr),(addr)) : \ |
| 243 | __set_bit((nr),(addr)) ) | 243 | __set_bit((nr),(addr)) ) |
| 244 | 244 | ||
| 245 | /* | 245 | /* |
| 246 | * fast, non-SMP clear_bit routine | 246 | * fast, non-SMP clear_bit routine |
| 247 | */ | 247 | */ |
| 248 | static inline void | 248 | static inline void |
| 249 | __clear_bit(unsigned long nr, volatile unsigned long *ptr) | 249 | __clear_bit(unsigned long nr, volatile unsigned long *ptr) |
| 250 | { | 250 | { |
| 251 | unsigned long addr; | 251 | unsigned long addr; |
| 252 | 252 | ||
| 253 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 253 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); |
| 254 | asm volatile( | 254 | asm volatile( |
| 255 | " nc %O0(1,%R0),%1" | 255 | " nc %O0(1,%R0),%1" |
| 256 | : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" ); | 256 | : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" ); |
| 257 | } | 257 | } |
| 258 | 258 | ||
| 259 | static inline void | 259 | static inline void |
| 260 | __constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr) | 260 | __constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr) |
| 261 | { | 261 | { |
| 262 | unsigned long addr; | 262 | unsigned long addr; |
| 263 | 263 | ||
| 264 | addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 264 | addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); |
| 265 | *(unsigned char *) addr &= ~(1 << (nr & 7)); | 265 | *(unsigned char *) addr &= ~(1 << (nr & 7)); |
| 266 | } | 266 | } |
| 267 | 267 | ||
| 268 | #define clear_bit_simple(nr,addr) \ | 268 | #define clear_bit_simple(nr,addr) \ |
| 269 | (__builtin_constant_p((nr)) ? \ | 269 | (__builtin_constant_p((nr)) ? \ |
| 270 | __constant_clear_bit((nr),(addr)) : \ | 270 | __constant_clear_bit((nr),(addr)) : \ |
| 271 | __clear_bit((nr),(addr)) ) | 271 | __clear_bit((nr),(addr)) ) |
| 272 | 272 | ||
| 273 | /* | 273 | /* |
| 274 | * fast, non-SMP change_bit routine | 274 | * fast, non-SMP change_bit routine |
| 275 | */ | 275 | */ |
| 276 | static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr) | 276 | static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr) |
| 277 | { | 277 | { |
| 278 | unsigned long addr; | 278 | unsigned long addr; |
| 279 | 279 | ||
| 280 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 280 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); |
| 281 | asm volatile( | 281 | asm volatile( |
| 282 | " xc %O0(1,%R0),%1" | 282 | " xc %O0(1,%R0),%1" |
| 283 | : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); | 283 | : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); |
| 284 | } | 284 | } |
| 285 | 285 | ||
| 286 | static inline void | 286 | static inline void |
| 287 | __constant_change_bit(const unsigned long nr, volatile unsigned long *ptr) | 287 | __constant_change_bit(const unsigned long nr, volatile unsigned long *ptr) |
| 288 | { | 288 | { |
| 289 | unsigned long addr; | 289 | unsigned long addr; |
| 290 | 290 | ||
| 291 | addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 291 | addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); |
| 292 | *(unsigned char *) addr ^= 1 << (nr & 7); | 292 | *(unsigned char *) addr ^= 1 << (nr & 7); |
| 293 | } | 293 | } |
| 294 | 294 | ||
| 295 | #define change_bit_simple(nr,addr) \ | 295 | #define change_bit_simple(nr,addr) \ |
| 296 | (__builtin_constant_p((nr)) ? \ | 296 | (__builtin_constant_p((nr)) ? \ |
| 297 | __constant_change_bit((nr),(addr)) : \ | 297 | __constant_change_bit((nr),(addr)) : \ |
| 298 | __change_bit((nr),(addr)) ) | 298 | __change_bit((nr),(addr)) ) |
| 299 | 299 | ||
| 300 | /* | 300 | /* |
| 301 | * fast, non-SMP test_and_set_bit routine | 301 | * fast, non-SMP test_and_set_bit routine |
| 302 | */ | 302 | */ |
| 303 | static inline int | 303 | static inline int |
| 304 | test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr) | 304 | test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr) |
| 305 | { | 305 | { |
| 306 | unsigned long addr; | 306 | unsigned long addr; |
| 307 | unsigned char ch; | 307 | unsigned char ch; |
| 308 | 308 | ||
| 309 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 309 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); |
| 310 | ch = *(unsigned char *) addr; | 310 | ch = *(unsigned char *) addr; |
| 311 | asm volatile( | 311 | asm volatile( |
| 312 | " oc %O0(1,%R0),%1" | 312 | " oc %O0(1,%R0),%1" |
| 313 | : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) | 313 | : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) |
| 314 | : "cc", "memory"); | 314 | : "cc", "memory"); |
| 315 | return (ch >> (nr & 7)) & 1; | 315 | return (ch >> (nr & 7)) & 1; |
| 316 | } | 316 | } |
| 317 | #define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y) | 317 | #define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y) |
| 318 | 318 | ||
| 319 | /* | 319 | /* |
| 320 | * fast, non-SMP test_and_clear_bit routine | 320 | * fast, non-SMP test_and_clear_bit routine |
| 321 | */ | 321 | */ |
| 322 | static inline int | 322 | static inline int |
| 323 | test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr) | 323 | test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr) |
| 324 | { | 324 | { |
| 325 | unsigned long addr; | 325 | unsigned long addr; |
| 326 | unsigned char ch; | 326 | unsigned char ch; |
| 327 | 327 | ||
| 328 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 328 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); |
| 329 | ch = *(unsigned char *) addr; | 329 | ch = *(unsigned char *) addr; |
| 330 | asm volatile( | 330 | asm volatile( |
| 331 | " nc %O0(1,%R0),%1" | 331 | " nc %O0(1,%R0),%1" |
| 332 | : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) | 332 | : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) |
| 333 | : "cc", "memory"); | 333 | : "cc", "memory"); |
| 334 | return (ch >> (nr & 7)) & 1; | 334 | return (ch >> (nr & 7)) & 1; |
| 335 | } | 335 | } |
| 336 | #define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y) | 336 | #define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y) |
| 337 | 337 | ||
| 338 | /* | 338 | /* |
| 339 | * fast, non-SMP test_and_change_bit routine | 339 | * fast, non-SMP test_and_change_bit routine |
| 340 | */ | 340 | */ |
| 341 | static inline int | 341 | static inline int |
| 342 | test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr) | 342 | test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr) |
| 343 | { | 343 | { |
| 344 | unsigned long addr; | 344 | unsigned long addr; |
| 345 | unsigned char ch; | 345 | unsigned char ch; |
| 346 | 346 | ||
| 347 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 347 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); |
| 348 | ch = *(unsigned char *) addr; | 348 | ch = *(unsigned char *) addr; |
| 349 | asm volatile( | 349 | asm volatile( |
| 350 | " xc %O0(1,%R0),%1" | 350 | " xc %O0(1,%R0),%1" |
| 351 | : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) | 351 | : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) |
| 352 | : "cc", "memory"); | 352 | : "cc", "memory"); |
| 353 | return (ch >> (nr & 7)) & 1; | 353 | return (ch >> (nr & 7)) & 1; |
| 354 | } | 354 | } |
| 355 | #define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y) | 355 | #define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y) |
| 356 | 356 | ||
| 357 | #ifdef CONFIG_SMP | 357 | #ifdef CONFIG_SMP |
| 358 | #define set_bit set_bit_cs | 358 | #define set_bit set_bit_cs |
| 359 | #define clear_bit clear_bit_cs | 359 | #define clear_bit clear_bit_cs |
| 360 | #define change_bit change_bit_cs | 360 | #define change_bit change_bit_cs |
| 361 | #define test_and_set_bit test_and_set_bit_cs | 361 | #define test_and_set_bit test_and_set_bit_cs |
| 362 | #define test_and_clear_bit test_and_clear_bit_cs | 362 | #define test_and_clear_bit test_and_clear_bit_cs |
| 363 | #define test_and_change_bit test_and_change_bit_cs | 363 | #define test_and_change_bit test_and_change_bit_cs |
| 364 | #else | 364 | #else |
| 365 | #define set_bit set_bit_simple | 365 | #define set_bit set_bit_simple |
| 366 | #define clear_bit clear_bit_simple | 366 | #define clear_bit clear_bit_simple |
| 367 | #define change_bit change_bit_simple | 367 | #define change_bit change_bit_simple |
| 368 | #define test_and_set_bit test_and_set_bit_simple | 368 | #define test_and_set_bit test_and_set_bit_simple |
| 369 | #define test_and_clear_bit test_and_clear_bit_simple | 369 | #define test_and_clear_bit test_and_clear_bit_simple |
| 370 | #define test_and_change_bit test_and_change_bit_simple | 370 | #define test_and_change_bit test_and_change_bit_simple |
| 371 | #endif | 371 | #endif |
| 372 | 372 | ||
| 373 | 373 | ||
| 374 | /* | 374 | /* |
| 375 | * This routine doesn't need to be atomic. | 375 | * This routine doesn't need to be atomic. |
| 376 | */ | 376 | */ |
| 377 | 377 | ||
| 378 | static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr) | 378 | static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr) |
| 379 | { | 379 | { |
| 380 | unsigned long addr; | 380 | unsigned long addr; |
| 381 | unsigned char ch; | 381 | unsigned char ch; |
| 382 | 382 | ||
| 383 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 383 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); |
| 384 | ch = *(volatile unsigned char *) addr; | 384 | ch = *(volatile unsigned char *) addr; |
| 385 | return (ch >> (nr & 7)) & 1; | 385 | return (ch >> (nr & 7)) & 1; |
| 386 | } | 386 | } |
| 387 | 387 | ||
| 388 | static inline int | 388 | static inline int |
| 389 | __constant_test_bit(unsigned long nr, const volatile unsigned long *addr) { | 389 | __constant_test_bit(unsigned long nr, const volatile unsigned long *addr) { |
| 390 | return (((volatile char *) addr) | 390 | return (((volatile char *) addr) |
| 391 | [(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7))) != 0; | 391 | [(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7))) != 0; |
| 392 | } | 392 | } |
| 393 | 393 | ||
| 394 | #define test_bit(nr,addr) \ | 394 | #define test_bit(nr,addr) \ |
| 395 | (__builtin_constant_p((nr)) ? \ | 395 | (__builtin_constant_p((nr)) ? \ |
| 396 | __constant_test_bit((nr),(addr)) : \ | 396 | __constant_test_bit((nr),(addr)) : \ |
| 397 | __test_bit((nr),(addr)) ) | 397 | __test_bit((nr),(addr)) ) |
| 398 | 398 | ||
| 399 | /* | 399 | /* |
| 400 | * Optimized find bit helper functions. | 400 | * Optimized find bit helper functions. |
| 401 | */ | 401 | */ |
| 402 | 402 | ||
| 403 | /** | 403 | /** |
| 404 | * __ffz_word_loop - find byte offset of first long != -1UL | 404 | * __ffz_word_loop - find byte offset of first long != -1UL |
| 405 | * @addr: pointer to array of unsigned long | 405 | * @addr: pointer to array of unsigned long |
| 406 | * @size: size of the array in bits | 406 | * @size: size of the array in bits |
| 407 | */ | 407 | */ |
| 408 | static inline unsigned long __ffz_word_loop(const unsigned long *addr, | 408 | static inline unsigned long __ffz_word_loop(const unsigned long *addr, |
| 409 | unsigned long size) | 409 | unsigned long size) |
| 410 | { | 410 | { |
| 411 | typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; | 411 | typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; |
| 412 | unsigned long bytes = 0; | 412 | unsigned long bytes = 0; |
| 413 | 413 | ||
| 414 | asm volatile( | 414 | asm volatile( |
| 415 | #ifndef __s390x__ | 415 | #ifndef __s390x__ |
| 416 | " ahi %1,-1\n" | 416 | " ahi %1,-1\n" |
| 417 | " sra %1,5\n" | 417 | " sra %1,5\n" |
| 418 | " jz 1f\n" | 418 | " jz 1f\n" |
| 419 | "0: c %2,0(%0,%3)\n" | 419 | "0: c %2,0(%0,%3)\n" |
| 420 | " jne 1f\n" | 420 | " jne 1f\n" |
| 421 | " la %0,4(%0)\n" | 421 | " la %0,4(%0)\n" |
| 422 | " brct %1,0b\n" | 422 | " brct %1,0b\n" |
| 423 | "1:\n" | 423 | "1:\n" |
| 424 | #else | 424 | #else |
| 425 | " aghi %1,-1\n" | 425 | " aghi %1,-1\n" |
| 426 | " srag %1,%1,6\n" | 426 | " srag %1,%1,6\n" |
| 427 | " jz 1f\n" | 427 | " jz 1f\n" |
| 428 | "0: cg %2,0(%0,%3)\n" | 428 | "0: cg %2,0(%0,%3)\n" |
| 429 | " jne 1f\n" | 429 | " jne 1f\n" |
| 430 | " la %0,8(%0)\n" | 430 | " la %0,8(%0)\n" |
| 431 | " brct %1,0b\n" | 431 | " brct %1,0b\n" |
| 432 | "1:\n" | 432 | "1:\n" |
| 433 | #endif | 433 | #endif |
| 434 | : "+&a" (bytes), "+&d" (size) | 434 | : "+&a" (bytes), "+&d" (size) |
| 435 | : "d" (-1UL), "a" (addr), "m" (*(addrtype *) addr) | 435 | : "d" (-1UL), "a" (addr), "m" (*(addrtype *) addr) |
| 436 | : "cc" ); | 436 | : "cc" ); |
| 437 | return bytes; | 437 | return bytes; |
| 438 | } | 438 | } |
| 439 | 439 | ||
| 440 | /** | 440 | /** |
| 441 | * __ffs_word_loop - find byte offset of first long != 0UL | 441 | * __ffs_word_loop - find byte offset of first long != 0UL |
| 442 | * @addr: pointer to array of unsigned long | 442 | * @addr: pointer to array of unsigned long |
| 443 | * @size: size of the array in bits | 443 | * @size: size of the array in bits |
| 444 | */ | 444 | */ |
| 445 | static inline unsigned long __ffs_word_loop(const unsigned long *addr, | 445 | static inline unsigned long __ffs_word_loop(const unsigned long *addr, |
| 446 | unsigned long size) | 446 | unsigned long size) |
| 447 | { | 447 | { |
| 448 | typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; | 448 | typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; |
| 449 | unsigned long bytes = 0; | 449 | unsigned long bytes = 0; |
| 450 | 450 | ||
| 451 | asm volatile( | 451 | asm volatile( |
| 452 | #ifndef __s390x__ | 452 | #ifndef __s390x__ |
| 453 | " ahi %1,-1\n" | 453 | " ahi %1,-1\n" |
| 454 | " sra %1,5\n" | 454 | " sra %1,5\n" |
| 455 | " jz 1f\n" | 455 | " jz 1f\n" |
| 456 | "0: c %2,0(%0,%3)\n" | 456 | "0: c %2,0(%0,%3)\n" |
| 457 | " jne 1f\n" | 457 | " jne 1f\n" |
| 458 | " la %0,4(%0)\n" | 458 | " la %0,4(%0)\n" |
| 459 | " brct %1,0b\n" | 459 | " brct %1,0b\n" |
| 460 | "1:\n" | 460 | "1:\n" |
| 461 | #else | 461 | #else |
| 462 | " aghi %1,-1\n" | 462 | " aghi %1,-1\n" |
| 463 | " srag %1,%1,6\n" | 463 | " srag %1,%1,6\n" |
| 464 | " jz 1f\n" | 464 | " jz 1f\n" |
| 465 | "0: cg %2,0(%0,%3)\n" | 465 | "0: cg %2,0(%0,%3)\n" |
| 466 | " jne 1f\n" | 466 | " jne 1f\n" |
| 467 | " la %0,8(%0)\n" | 467 | " la %0,8(%0)\n" |
| 468 | " brct %1,0b\n" | 468 | " brct %1,0b\n" |
| 469 | "1:\n" | 469 | "1:\n" |
| 470 | #endif | 470 | #endif |
| 471 | : "+&a" (bytes), "+&a" (size) | 471 | : "+&a" (bytes), "+&a" (size) |
| 472 | : "d" (0UL), "a" (addr), "m" (*(addrtype *) addr) | 472 | : "d" (0UL), "a" (addr), "m" (*(addrtype *) addr) |
| 473 | : "cc" ); | 473 | : "cc" ); |
| 474 | return bytes; | 474 | return bytes; |
| 475 | } | 475 | } |
| 476 | 476 | ||
| 477 | /** | 477 | /** |
| 478 | * __ffz_word - add number of the first unset bit | 478 | * __ffz_word - add number of the first unset bit |
| 479 | * @nr: base value the bit number is added to | 479 | * @nr: base value the bit number is added to |
| 480 | * @word: the word that is searched for unset bits | 480 | * @word: the word that is searched for unset bits |
| 481 | */ | 481 | */ |
| 482 | static inline unsigned long __ffz_word(unsigned long nr, unsigned long word) | 482 | static inline unsigned long __ffz_word(unsigned long nr, unsigned long word) |
| 483 | { | 483 | { |
| 484 | #ifdef __s390x__ | 484 | #ifdef __s390x__ |
| 485 | if ((word & 0xffffffff) == 0xffffffff) { | 485 | if ((word & 0xffffffff) == 0xffffffff) { |
| 486 | word >>= 32; | 486 | word >>= 32; |
| 487 | nr += 32; | 487 | nr += 32; |
| 488 | } | 488 | } |
| 489 | #endif | 489 | #endif |
| 490 | if ((word & 0xffff) == 0xffff) { | 490 | if ((word & 0xffff) == 0xffff) { |
| 491 | word >>= 16; | 491 | word >>= 16; |
| 492 | nr += 16; | 492 | nr += 16; |
| 493 | } | 493 | } |
| 494 | if ((word & 0xff) == 0xff) { | 494 | if ((word & 0xff) == 0xff) { |
| 495 | word >>= 8; | 495 | word >>= 8; |
| 496 | nr += 8; | 496 | nr += 8; |
| 497 | } | 497 | } |
| 498 | return nr + _zb_findmap[(unsigned char) word]; | 498 | return nr + _zb_findmap[(unsigned char) word]; |
| 499 | } | 499 | } |
| 500 | 500 | ||
| 501 | /** | 501 | /** |
| 502 | * __ffs_word - add number of the first set bit | 502 | * __ffs_word - add number of the first set bit |
| 503 | * @nr: base value the bit number is added to | 503 | * @nr: base value the bit number is added to |
| 504 | * @word: the word that is searched for set bits | 504 | * @word: the word that is searched for set bits |
| 505 | */ | 505 | */ |
| 506 | static inline unsigned long __ffs_word(unsigned long nr, unsigned long word) | 506 | static inline unsigned long __ffs_word(unsigned long nr, unsigned long word) |
| 507 | { | 507 | { |
| 508 | #ifdef __s390x__ | 508 | #ifdef __s390x__ |
| 509 | if ((word & 0xffffffff) == 0) { | 509 | if ((word & 0xffffffff) == 0) { |
| 510 | word >>= 32; | 510 | word >>= 32; |
| 511 | nr += 32; | 511 | nr += 32; |
| 512 | } | 512 | } |
| 513 | #endif | 513 | #endif |
| 514 | if ((word & 0xffff) == 0) { | 514 | if ((word & 0xffff) == 0) { |
| 515 | word >>= 16; | 515 | word >>= 16; |
| 516 | nr += 16; | 516 | nr += 16; |
| 517 | } | 517 | } |
| 518 | if ((word & 0xff) == 0) { | 518 | if ((word & 0xff) == 0) { |
| 519 | word >>= 8; | 519 | word >>= 8; |
| 520 | nr += 8; | 520 | nr += 8; |
| 521 | } | 521 | } |
| 522 | return nr + _sb_findmap[(unsigned char) word]; | 522 | return nr + _sb_findmap[(unsigned char) word]; |
| 523 | } | 523 | } |
| 524 | 524 | ||
| 525 | 525 | ||
| 526 | /** | 526 | /** |
| 527 | * __load_ulong_be - load big endian unsigned long | 527 | * __load_ulong_be - load big endian unsigned long |
| 528 | * @p: pointer to array of unsigned long | 528 | * @p: pointer to array of unsigned long |
| 529 | * @offset: byte offset of source value in the array | 529 | * @offset: byte offset of source value in the array |
| 530 | */ | 530 | */ |
| 531 | static inline unsigned long __load_ulong_be(const unsigned long *p, | 531 | static inline unsigned long __load_ulong_be(const unsigned long *p, |
| 532 | unsigned long offset) | 532 | unsigned long offset) |
| 533 | { | 533 | { |
| 534 | p = (unsigned long *)((unsigned long) p + offset); | 534 | p = (unsigned long *)((unsigned long) p + offset); |
| 535 | return *p; | 535 | return *p; |
| 536 | } | 536 | } |
| 537 | 537 | ||
| 538 | /** | 538 | /** |
| 539 | * __load_ulong_le - load little endian unsigned long | 539 | * __load_ulong_le - load little endian unsigned long |
| 540 | * @p: pointer to array of unsigned long | 540 | * @p: pointer to array of unsigned long |
| 541 | * @offset: byte offset of source value in the array | 541 | * @offset: byte offset of source value in the array |
| 542 | */ | 542 | */ |
| 543 | static inline unsigned long __load_ulong_le(const unsigned long *p, | 543 | static inline unsigned long __load_ulong_le(const unsigned long *p, |
| 544 | unsigned long offset) | 544 | unsigned long offset) |
| 545 | { | 545 | { |
| 546 | unsigned long word; | 546 | unsigned long word; |
| 547 | 547 | ||
| 548 | p = (unsigned long *)((unsigned long) p + offset); | 548 | p = (unsigned long *)((unsigned long) p + offset); |
| 549 | #ifndef __s390x__ | 549 | #ifndef __s390x__ |
| 550 | asm volatile( | 550 | asm volatile( |
| 551 | " ic %0,%O1(%R1)\n" | 551 | " ic %0,%O1(%R1)\n" |
| 552 | " icm %0,2,%O1+1(%R1)\n" | 552 | " icm %0,2,%O1+1(%R1)\n" |
| 553 | " icm %0,4,%O1+2(%R1)\n" | 553 | " icm %0,4,%O1+2(%R1)\n" |
| 554 | " icm %0,8,%O1+3(%R1)" | 554 | " icm %0,8,%O1+3(%R1)" |
| 555 | : "=&d" (word) : "Q" (*p) : "cc"); | 555 | : "=&d" (word) : "Q" (*p) : "cc"); |
| 556 | #else | 556 | #else |
| 557 | asm volatile( | 557 | asm volatile( |
| 558 | " lrvg %0,%1" | 558 | " lrvg %0,%1" |
| 559 | : "=d" (word) : "m" (*p) ); | 559 | : "=d" (word) : "m" (*p) ); |
| 560 | #endif | 560 | #endif |
| 561 | return word; | 561 | return word; |
| 562 | } | 562 | } |
| 563 | 563 | ||
| 564 | /* | 564 | /* |
| 565 | * The various find bit functions. | 565 | * The various find bit functions. |
| 566 | */ | 566 | */ |
| 567 | 567 | ||
| 568 | /* | 568 | /* |
| 569 | * ffz - find first zero in word. | 569 | * ffz - find first zero in word. |
| 570 | * @word: The word to search | 570 | * @word: The word to search |
| 571 | * | 571 | * |
| 572 | * Undefined if no zero exists, so code should check against ~0UL first. | 572 | * Undefined if no zero exists, so code should check against ~0UL first. |
| 573 | */ | 573 | */ |
| 574 | static inline unsigned long ffz(unsigned long word) | 574 | static inline unsigned long ffz(unsigned long word) |
| 575 | { | 575 | { |
| 576 | return __ffz_word(0, word); | 576 | return __ffz_word(0, word); |
| 577 | } | 577 | } |
| 578 | 578 | ||
| 579 | /** | 579 | /** |
| 580 | * __ffs - find first bit in word. | 580 | * __ffs - find first bit in word. |
| 581 | * @word: The word to search | 581 | * @word: The word to search |
| 582 | * | 582 | * |
| 583 | * Undefined if no bit exists, so code should check against 0 first. | 583 | * Undefined if no bit exists, so code should check against 0 first. |
| 584 | */ | 584 | */ |
| 585 | static inline unsigned long __ffs (unsigned long word) | 585 | static inline unsigned long __ffs (unsigned long word) |
| 586 | { | 586 | { |
| 587 | return __ffs_word(0, word); | 587 | return __ffs_word(0, word); |
| 588 | } | 588 | } |
| 589 | 589 | ||
| 590 | /** | 590 | /** |
| 591 | * ffs - find first bit set | 591 | * ffs - find first bit set |
| 592 | * @x: the word to search | 592 | * @x: the word to search |
| 593 | * | 593 | * |
| 594 | * This is defined the same way as | 594 | * This is defined the same way as |
| 595 | * the libc and compiler builtin ffs routines, therefore | 595 | * the libc and compiler builtin ffs routines, therefore |
| 596 | * differs in spirit from the above ffz (man ffs). | 596 | * differs in spirit from the above ffz (man ffs). |
| 597 | */ | 597 | */ |
| 598 | static inline int ffs(int x) | 598 | static inline int ffs(int x) |
| 599 | { | 599 | { |
| 600 | if (!x) | 600 | if (!x) |
| 601 | return 0; | 601 | return 0; |
| 602 | return __ffs_word(1, x); | 602 | return __ffs_word(1, x); |
| 603 | } | 603 | } |
| 604 | 604 | ||
| 605 | /** | 605 | /** |
| 606 | * find_first_zero_bit - find the first zero bit in a memory region | 606 | * find_first_zero_bit - find the first zero bit in a memory region |
| 607 | * @addr: The address to start the search at | 607 | * @addr: The address to start the search at |
| 608 | * @size: The maximum size to search | 608 | * @size: The maximum size to search |
| 609 | * | 609 | * |
| 610 | * Returns the bit-number of the first zero bit, not the number of the byte | 610 | * Returns the bit-number of the first zero bit, not the number of the byte |
| 611 | * containing a bit. | 611 | * containing a bit. |
| 612 | */ | 612 | */ |
| 613 | static inline unsigned long find_first_zero_bit(const unsigned long *addr, | 613 | static inline unsigned long find_first_zero_bit(const unsigned long *addr, |
| 614 | unsigned long size) | 614 | unsigned long size) |
| 615 | { | 615 | { |
| 616 | unsigned long bytes, bits; | 616 | unsigned long bytes, bits; |
| 617 | 617 | ||
| 618 | if (!size) | 618 | if (!size) |
| 619 | return 0; | 619 | return 0; |
| 620 | bytes = __ffz_word_loop(addr, size); | 620 | bytes = __ffz_word_loop(addr, size); |
| 621 | bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes)); | 621 | bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes)); |
| 622 | return (bits < size) ? bits : size; | 622 | return (bits < size) ? bits : size; |
| 623 | } | 623 | } |
| 624 | 624 | ||
| 625 | /** | 625 | /** |
| 626 | * find_first_bit - find the first set bit in a memory region | 626 | * find_first_bit - find the first set bit in a memory region |
| 627 | * @addr: The address to start the search at | 627 | * @addr: The address to start the search at |
| 628 | * @size: The maximum size to search | 628 | * @size: The maximum size to search |
| 629 | * | 629 | * |
| 630 | * Returns the bit-number of the first set bit, not the number of the byte | 630 | * Returns the bit-number of the first set bit, not the number of the byte |
| 631 | * containing a bit. | 631 | * containing a bit. |
| 632 | */ | 632 | */ |
| 633 | static inline unsigned long find_first_bit(const unsigned long * addr, | 633 | static inline unsigned long find_first_bit(const unsigned long * addr, |
| 634 | unsigned long size) | 634 | unsigned long size) |
| 635 | { | 635 | { |
| 636 | unsigned long bytes, bits; | 636 | unsigned long bytes, bits; |
| 637 | 637 | ||
| 638 | if (!size) | 638 | if (!size) |
| 639 | return 0; | 639 | return 0; |
| 640 | bytes = __ffs_word_loop(addr, size); | 640 | bytes = __ffs_word_loop(addr, size); |
| 641 | bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes)); | 641 | bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes)); |
| 642 | return (bits < size) ? bits : size; | 642 | return (bits < size) ? bits : size; |
| 643 | } | 643 | } |
| 644 | 644 | ||
| 645 | /** | 645 | /** |
| 646 | * find_next_zero_bit - find the first zero bit in a memory region | 646 | * find_next_zero_bit - find the first zero bit in a memory region |
| 647 | * @addr: The address to base the search on | 647 | * @addr: The address to base the search on |
| 648 | * @offset: The bitnumber to start searching at | 648 | * @offset: The bitnumber to start searching at |
| 649 | * @size: The maximum size to search | 649 | * @size: The maximum size to search |
| 650 | */ | 650 | */ |
| 651 | static inline int find_next_zero_bit (const unsigned long * addr, | 651 | static inline int find_next_zero_bit (const unsigned long * addr, |
| 652 | unsigned long size, | 652 | unsigned long size, |
| 653 | unsigned long offset) | 653 | unsigned long offset) |
| 654 | { | 654 | { |
| 655 | const unsigned long *p; | 655 | const unsigned long *p; |
| 656 | unsigned long bit, set; | 656 | unsigned long bit, set; |
| 657 | 657 | ||
| 658 | if (offset >= size) | 658 | if (offset >= size) |
| 659 | return size; | 659 | return size; |
| 660 | bit = offset & (__BITOPS_WORDSIZE - 1); | 660 | bit = offset & (__BITOPS_WORDSIZE - 1); |
| 661 | offset -= bit; | 661 | offset -= bit; |
| 662 | size -= offset; | 662 | size -= offset; |
| 663 | p = addr + offset / __BITOPS_WORDSIZE; | 663 | p = addr + offset / __BITOPS_WORDSIZE; |
| 664 | if (bit) { | 664 | if (bit) { |
| 665 | /* | 665 | /* |
| 666 | * __ffz_word returns __BITOPS_WORDSIZE | 666 | * __ffz_word returns __BITOPS_WORDSIZE |
| 667 | * if no zero bit is present in the word. | 667 | * if no zero bit is present in the word. |
| 668 | */ | 668 | */ |
| 669 | set = __ffz_word(bit, *p >> bit); | 669 | set = __ffz_word(bit, *p >> bit); |
| 670 | if (set >= size) | 670 | if (set >= size) |
| 671 | return size + offset; | 671 | return size + offset; |
| 672 | if (set < __BITOPS_WORDSIZE) | 672 | if (set < __BITOPS_WORDSIZE) |
| 673 | return set + offset; | 673 | return set + offset; |
| 674 | offset += __BITOPS_WORDSIZE; | 674 | offset += __BITOPS_WORDSIZE; |
| 675 | size -= __BITOPS_WORDSIZE; | 675 | size -= __BITOPS_WORDSIZE; |
| 676 | p++; | 676 | p++; |
| 677 | } | 677 | } |
| 678 | return offset + find_first_zero_bit(p, size); | 678 | return offset + find_first_zero_bit(p, size); |
| 679 | } | 679 | } |
| 680 | 680 | ||
| 681 | /** | 681 | /** |
| 682 | * find_next_bit - find the first set bit in a memory region | 682 | * find_next_bit - find the first set bit in a memory region |
| 683 | * @addr: The address to base the search on | 683 | * @addr: The address to base the search on |
| 684 | * @offset: The bitnumber to start searching at | 684 | * @offset: The bitnumber to start searching at |
| 685 | * @size: The maximum size to search | 685 | * @size: The maximum size to search |
| 686 | */ | 686 | */ |
| 687 | static inline int find_next_bit (const unsigned long * addr, | 687 | static inline int find_next_bit (const unsigned long * addr, |
| 688 | unsigned long size, | 688 | unsigned long size, |
| 689 | unsigned long offset) | 689 | unsigned long offset) |
| 690 | { | 690 | { |
| 691 | const unsigned long *p; | 691 | const unsigned long *p; |
| 692 | unsigned long bit, set; | 692 | unsigned long bit, set; |
| 693 | 693 | ||
| 694 | if (offset >= size) | 694 | if (offset >= size) |
| 695 | return size; | 695 | return size; |
| 696 | bit = offset & (__BITOPS_WORDSIZE - 1); | 696 | bit = offset & (__BITOPS_WORDSIZE - 1); |
| 697 | offset -= bit; | 697 | offset -= bit; |
| 698 | size -= offset; | 698 | size -= offset; |
| 699 | p = addr + offset / __BITOPS_WORDSIZE; | 699 | p = addr + offset / __BITOPS_WORDSIZE; |
| 700 | if (bit) { | 700 | if (bit) { |
| 701 | /* | 701 | /* |
| 702 | * __ffs_word returns __BITOPS_WORDSIZE | 702 | * __ffs_word returns __BITOPS_WORDSIZE |
| 703 | * if no one bit is present in the word. | 703 | * if no one bit is present in the word. |
| 704 | */ | 704 | */ |
| 705 | set = __ffs_word(0, *p & (~0UL << bit)); | 705 | set = __ffs_word(0, *p & (~0UL << bit)); |
| 706 | if (set >= size) | 706 | if (set >= size) |
| 707 | return size + offset; | 707 | return size + offset; |
| 708 | if (set < __BITOPS_WORDSIZE) | 708 | if (set < __BITOPS_WORDSIZE) |
| 709 | return set + offset; | 709 | return set + offset; |
| 710 | offset += __BITOPS_WORDSIZE; | 710 | offset += __BITOPS_WORDSIZE; |
| 711 | size -= __BITOPS_WORDSIZE; | 711 | size -= __BITOPS_WORDSIZE; |
| 712 | p++; | 712 | p++; |
| 713 | } | 713 | } |
| 714 | return offset + find_first_bit(p, size); | 714 | return offset + find_first_bit(p, size); |
| 715 | } | 715 | } |
| 716 | 716 | ||
| 717 | /* | 717 | /* |
| 718 | * Every architecture must define this function. It's the fastest | 718 | * Every architecture must define this function. It's the fastest |
| 719 | * way of searching a 140-bit bitmap where the first 100 bits are | 719 | * way of searching a 140-bit bitmap where the first 100 bits are |
| 720 | * unlikely to be set. It's guaranteed that at least one of the 140 | 720 | * unlikely to be set. It's guaranteed that at least one of the 140 |
| 721 | * bits is cleared. | 721 | * bits is cleared. |
| 722 | */ | 722 | */ |
| 723 | static inline int sched_find_first_bit(unsigned long *b) | 723 | static inline int sched_find_first_bit(unsigned long *b) |
| 724 | { | 724 | { |
| 725 | return find_first_bit(b, 140); | 725 | return find_first_bit(b, 140); |
| 726 | } | 726 | } |
| 727 | 727 | ||
| 728 | #include <asm-generic/bitops/fls.h> | 728 | #include <asm-generic/bitops/fls.h> |
| 729 | #include <asm-generic/bitops/__fls.h> | 729 | #include <asm-generic/bitops/__fls.h> |
| 730 | #include <asm-generic/bitops/fls64.h> | 730 | #include <asm-generic/bitops/fls64.h> |
| 731 | 731 | ||
| 732 | #include <asm-generic/bitops/hweight.h> | 732 | #include <asm-generic/bitops/hweight.h> |
| 733 | #include <asm-generic/bitops/lock.h> | 733 | #include <asm-generic/bitops/lock.h> |
| 734 | 734 | ||
| 735 | /* | 735 | /* |
| 736 | * ATTENTION: intel byte ordering convention for ext2 and minix !! | 736 | * ATTENTION: intel byte ordering convention for ext2 and minix !! |
| 737 | * bit 0 is the LSB of addr; bit 31 is the MSB of addr; | 737 | * bit 0 is the LSB of addr; bit 31 is the MSB of addr; |
| 738 | * bit 32 is the LSB of (addr+4). | 738 | * bit 32 is the LSB of (addr+4). |
| 739 | * That combined with the little endian byte order of Intel gives the | 739 | * That combined with the little endian byte order of Intel gives the |
| 740 | * following bit order in memory: | 740 | * following bit order in memory: |
| 741 | * 07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \ | 741 | * 07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \ |
| 742 | * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24 | 742 | * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24 |
| 743 | */ | 743 | */ |
| 744 | 744 | ||
| 745 | static inline void __set_bit_le(unsigned long nr, void *addr) | 745 | static inline void __set_bit_le(unsigned long nr, void *addr) |
| 746 | { | 746 | { |
| 747 | __set_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); | 747 | __set_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); |
| 748 | } | 748 | } |
| 749 | 749 | ||
| 750 | static inline void __clear_bit_le(unsigned long nr, void *addr) | 750 | static inline void __clear_bit_le(unsigned long nr, void *addr) |
| 751 | { | 751 | { |
| 752 | __clear_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); | 752 | __clear_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); |
| 753 | } | 753 | } |
| 754 | 754 | ||
| 755 | static inline int __test_and_set_bit_le(unsigned long nr, void *addr) | 755 | static inline int __test_and_set_bit_le(unsigned long nr, void *addr) |
| 756 | { | 756 | { |
| 757 | return __test_and_set_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); | 757 | return __test_and_set_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); |
| 758 | } | 758 | } |
| 759 | 759 | ||
| 760 | static inline int test_and_set_bit_le(unsigned long nr, void *addr) | 760 | static inline int test_and_set_bit_le(unsigned long nr, void *addr) |
| 761 | { | 761 | { |
| 762 | return test_and_set_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); | 762 | return test_and_set_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); |
| 763 | } | 763 | } |
| 764 | 764 | ||
| 765 | static inline int __test_and_clear_bit_le(unsigned long nr, void *addr) | 765 | static inline int __test_and_clear_bit_le(unsigned long nr, void *addr) |
| 766 | { | 766 | { |
| 767 | return __test_and_clear_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); | 767 | return __test_and_clear_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); |
| 768 | } | 768 | } |
| 769 | 769 | ||
| 770 | static inline int test_and_clear_bit_le(unsigned long nr, void *addr) | 770 | static inline int test_and_clear_bit_le(unsigned long nr, void *addr) |
| 771 | { | 771 | { |
| 772 | return test_and_clear_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); | 772 | return test_and_clear_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); |
| 773 | } | 773 | } |
| 774 | 774 | ||
| 775 | static inline int test_bit_le(unsigned long nr, const void *addr) | 775 | static inline int test_bit_le(unsigned long nr, const void *addr) |
| 776 | { | 776 | { |
| 777 | return test_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); | 777 | return test_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); |
| 778 | } | 778 | } |
| 779 | 779 | ||
| 780 | static inline int find_first_zero_bit_le(void *vaddr, unsigned int size) | 780 | static inline int find_first_zero_bit_le(void *vaddr, unsigned int size) |
| 781 | { | 781 | { |
| 782 | unsigned long bytes, bits; | 782 | unsigned long bytes, bits; |
| 783 | 783 | ||
| 784 | if (!size) | 784 | if (!size) |
| 785 | return 0; | 785 | return 0; |
| 786 | bytes = __ffz_word_loop(vaddr, size); | 786 | bytes = __ffz_word_loop(vaddr, size); |
| 787 | bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes)); | 787 | bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes)); |
| 788 | return (bits < size) ? bits : size; | 788 | return (bits < size) ? bits : size; |
| 789 | } | 789 | } |
| 790 | 790 | ||
| 791 | static inline int find_next_zero_bit_le(void *vaddr, unsigned long size, | 791 | static inline int find_next_zero_bit_le(void *vaddr, unsigned long size, |
| 792 | unsigned long offset) | 792 | unsigned long offset) |
| 793 | { | 793 | { |
| 794 | unsigned long *addr = vaddr, *p; | 794 | unsigned long *addr = vaddr, *p; |
| 795 | unsigned long bit, set; | 795 | unsigned long bit, set; |
| 796 | 796 | ||
| 797 | if (offset >= size) | 797 | if (offset >= size) |
| 798 | return size; | 798 | return size; |
| 799 | bit = offset & (__BITOPS_WORDSIZE - 1); | 799 | bit = offset & (__BITOPS_WORDSIZE - 1); |
| 800 | offset -= bit; | 800 | offset -= bit; |
| 801 | size -= offset; | 801 | size -= offset; |
| 802 | p = addr + offset / __BITOPS_WORDSIZE; | 802 | p = addr + offset / __BITOPS_WORDSIZE; |
| 803 | if (bit) { | 803 | if (bit) { |
| 804 | /* | 804 | /* |
| 805 | * s390 version of ffz returns __BITOPS_WORDSIZE | 805 | * s390 version of ffz returns __BITOPS_WORDSIZE |
| 806 | * if no zero bit is present in the word. | 806 | * if no zero bit is present in the word. |
| 807 | */ | 807 | */ |
| 808 | set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit); | 808 | set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit); |
| 809 | if (set >= size) | 809 | if (set >= size) |
| 810 | return size + offset; | 810 | return size + offset; |
| 811 | if (set < __BITOPS_WORDSIZE) | 811 | if (set < __BITOPS_WORDSIZE) |
| 812 | return set + offset; | 812 | return set + offset; |
| 813 | offset += __BITOPS_WORDSIZE; | 813 | offset += __BITOPS_WORDSIZE; |
| 814 | size -= __BITOPS_WORDSIZE; | 814 | size -= __BITOPS_WORDSIZE; |
| 815 | p++; | 815 | p++; |
| 816 | } | 816 | } |
| 817 | return offset + find_first_zero_bit_le(p, size); | 817 | return offset + find_first_zero_bit_le(p, size); |
| 818 | } | 818 | } |
| 819 | 819 | ||
| 820 | static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size) | 820 | static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size) |
| 821 | { | 821 | { |
| 822 | unsigned long bytes, bits; | 822 | unsigned long bytes, bits; |
| 823 | 823 | ||
| 824 | if (!size) | 824 | if (!size) |
| 825 | return 0; | 825 | return 0; |
| 826 | bytes = __ffs_word_loop(vaddr, size); | 826 | bytes = __ffs_word_loop(vaddr, size); |
| 827 | bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes)); | 827 | bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes)); |
| 828 | return (bits < size) ? bits : size; | 828 | return (bits < size) ? bits : size; |
| 829 | } | 829 | } |
| 830 | 830 | ||
| 831 | static inline int find_next_bit_le(void *vaddr, unsigned long size, | 831 | static inline int find_next_bit_le(void *vaddr, unsigned long size, |
| 832 | unsigned long offset) | 832 | unsigned long offset) |
| 833 | { | 833 | { |
| 834 | unsigned long *addr = vaddr, *p; | 834 | unsigned long *addr = vaddr, *p; |
| 835 | unsigned long bit, set; | 835 | unsigned long bit, set; |
| 836 | 836 | ||
| 837 | if (offset >= size) | 837 | if (offset >= size) |
| 838 | return size; | 838 | return size; |
| 839 | bit = offset & (__BITOPS_WORDSIZE - 1); | 839 | bit = offset & (__BITOPS_WORDSIZE - 1); |
| 840 | offset -= bit; | 840 | offset -= bit; |
| 841 | size -= offset; | 841 | size -= offset; |
| 842 | p = addr + offset / __BITOPS_WORDSIZE; | 842 | p = addr + offset / __BITOPS_WORDSIZE; |
| 843 | if (bit) { | 843 | if (bit) { |
| 844 | /* | 844 | /* |
| 845 | * s390 version of ffz returns __BITOPS_WORDSIZE | 845 | * s390 version of ffz returns __BITOPS_WORDSIZE |
| 846 | * if no zero bit is present in the word. | 846 | * if no zero bit is present in the word. |
| 847 | */ | 847 | */ |
| 848 | set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit)); | 848 | set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit)); |
| 849 | if (set >= size) | 849 | if (set >= size) |
| 850 | return size + offset; | 850 | return size + offset; |
| 851 | if (set < __BITOPS_WORDSIZE) | 851 | if (set < __BITOPS_WORDSIZE) |
| 852 | return set + offset; | 852 | return set + offset; |
| 853 | offset += __BITOPS_WORDSIZE; | 853 | offset += __BITOPS_WORDSIZE; |
| 854 | size -= __BITOPS_WORDSIZE; | 854 | size -= __BITOPS_WORDSIZE; |
| 855 | p++; | 855 | p++; |
| 856 | } | 856 | } |
| 857 | return offset + find_first_bit_le(p, size); | 857 | return offset + find_first_bit_le(p, size); |
| 858 | } | 858 | } |
| 859 | 859 | ||
| 860 | #define ext2_set_bit_atomic(lock, nr, addr) \ | 860 | #define ext2_set_bit_atomic(lock, nr, addr) \ |
| 861 | test_and_set_bit_le(nr, addr) | 861 | test_and_set_bit_le(nr, addr) |
| 862 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | 862 | #define ext2_clear_bit_atomic(lock, nr, addr) \ |
| 863 | test_and_clear_bit_le(nr, addr) | 863 | test_and_clear_bit_le(nr, addr) |
| 864 | 864 | ||
| 865 | #include <asm-generic/bitops/minix.h> | ||
| 866 | 865 | ||
| 867 | #endif /* __KERNEL__ */ | 866 | #endif /* __KERNEL__ */ |
| 868 | 867 | ||
| 869 | #endif /* _S390_BITOPS_H */ | 868 | #endif /* _S390_BITOPS_H */ |
| 870 | 869 |
arch/sh/include/asm/bitops.h
| 1 | #ifndef __ASM_SH_BITOPS_H | 1 | #ifndef __ASM_SH_BITOPS_H |
| 2 | #define __ASM_SH_BITOPS_H | 2 | #define __ASM_SH_BITOPS_H |
| 3 | 3 | ||
| 4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
| 5 | 5 | ||
| 6 | #ifndef _LINUX_BITOPS_H | 6 | #ifndef _LINUX_BITOPS_H |
| 7 | #error only <linux/bitops.h> can be included directly | 7 | #error only <linux/bitops.h> can be included directly |
| 8 | #endif | 8 | #endif |
| 9 | 9 | ||
| 10 | #include <asm/system.h> | 10 | #include <asm/system.h> |
| 11 | /* For __swab32 */ | 11 | /* For __swab32 */ |
| 12 | #include <asm/byteorder.h> | 12 | #include <asm/byteorder.h> |
| 13 | 13 | ||
| 14 | #ifdef CONFIG_GUSA_RB | 14 | #ifdef CONFIG_GUSA_RB |
| 15 | #include <asm/bitops-grb.h> | 15 | #include <asm/bitops-grb.h> |
| 16 | #elif defined(CONFIG_CPU_SH2A) | 16 | #elif defined(CONFIG_CPU_SH2A) |
| 17 | #include <asm-generic/bitops/atomic.h> | 17 | #include <asm-generic/bitops/atomic.h> |
| 18 | #include <asm/bitops-op32.h> | 18 | #include <asm/bitops-op32.h> |
| 19 | #elif defined(CONFIG_CPU_SH4A) | 19 | #elif defined(CONFIG_CPU_SH4A) |
| 20 | #include <asm/bitops-llsc.h> | 20 | #include <asm/bitops-llsc.h> |
| 21 | #else | 21 | #else |
| 22 | #include <asm-generic/bitops/atomic.h> | 22 | #include <asm-generic/bitops/atomic.h> |
| 23 | #include <asm-generic/bitops/non-atomic.h> | 23 | #include <asm-generic/bitops/non-atomic.h> |
| 24 | #endif | 24 | #endif |
| 25 | 25 | ||
| 26 | /* | 26 | /* |
| 27 | * clear_bit() doesn't provide any barrier for the compiler. | 27 | * clear_bit() doesn't provide any barrier for the compiler. |
| 28 | */ | 28 | */ |
| 29 | #define smp_mb__before_clear_bit() smp_mb() | 29 | #define smp_mb__before_clear_bit() smp_mb() |
| 30 | #define smp_mb__after_clear_bit() smp_mb() | 30 | #define smp_mb__after_clear_bit() smp_mb() |
| 31 | 31 | ||
| 32 | #ifdef CONFIG_SUPERH32 | 32 | #ifdef CONFIG_SUPERH32 |
| 33 | static inline unsigned long ffz(unsigned long word) | 33 | static inline unsigned long ffz(unsigned long word) |
| 34 | { | 34 | { |
| 35 | unsigned long result; | 35 | unsigned long result; |
| 36 | 36 | ||
| 37 | __asm__("1:\n\t" | 37 | __asm__("1:\n\t" |
| 38 | "shlr %1\n\t" | 38 | "shlr %1\n\t" |
| 39 | "bt/s 1b\n\t" | 39 | "bt/s 1b\n\t" |
| 40 | " add #1, %0" | 40 | " add #1, %0" |
| 41 | : "=r" (result), "=r" (word) | 41 | : "=r" (result), "=r" (word) |
| 42 | : "0" (~0L), "1" (word) | 42 | : "0" (~0L), "1" (word) |
| 43 | : "t"); | 43 | : "t"); |
| 44 | return result; | 44 | return result; |
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | /** | 47 | /** |
| 48 | * __ffs - find first bit in word. | 48 | * __ffs - find first bit in word. |
| 49 | * @word: The word to search | 49 | * @word: The word to search |
| 50 | * | 50 | * |
| 51 | * Undefined if no bit exists, so code should check against 0 first. | 51 | * Undefined if no bit exists, so code should check against 0 first. |
| 52 | */ | 52 | */ |
| 53 | static inline unsigned long __ffs(unsigned long word) | 53 | static inline unsigned long __ffs(unsigned long word) |
| 54 | { | 54 | { |
| 55 | unsigned long result; | 55 | unsigned long result; |
| 56 | 56 | ||
| 57 | __asm__("1:\n\t" | 57 | __asm__("1:\n\t" |
| 58 | "shlr %1\n\t" | 58 | "shlr %1\n\t" |
| 59 | "bf/s 1b\n\t" | 59 | "bf/s 1b\n\t" |
| 60 | " add #1, %0" | 60 | " add #1, %0" |
| 61 | : "=r" (result), "=r" (word) | 61 | : "=r" (result), "=r" (word) |
| 62 | : "0" (~0L), "1" (word) | 62 | : "0" (~0L), "1" (word) |
| 63 | : "t"); | 63 | : "t"); |
| 64 | return result; | 64 | return result; |
| 65 | } | 65 | } |
| 66 | #else | 66 | #else |
| 67 | static inline unsigned long ffz(unsigned long word) | 67 | static inline unsigned long ffz(unsigned long word) |
| 68 | { | 68 | { |
| 69 | unsigned long result, __d2, __d3; | 69 | unsigned long result, __d2, __d3; |
| 70 | 70 | ||
| 71 | __asm__("gettr tr0, %2\n\t" | 71 | __asm__("gettr tr0, %2\n\t" |
| 72 | "pta $+32, tr0\n\t" | 72 | "pta $+32, tr0\n\t" |
| 73 | "andi %1, 1, %3\n\t" | 73 | "andi %1, 1, %3\n\t" |
| 74 | "beq %3, r63, tr0\n\t" | 74 | "beq %3, r63, tr0\n\t" |
| 75 | "pta $+4, tr0\n" | 75 | "pta $+4, tr0\n" |
| 76 | "0:\n\t" | 76 | "0:\n\t" |
| 77 | "shlri.l %1, 1, %1\n\t" | 77 | "shlri.l %1, 1, %1\n\t" |
| 78 | "addi %0, 1, %0\n\t" | 78 | "addi %0, 1, %0\n\t" |
| 79 | "andi %1, 1, %3\n\t" | 79 | "andi %1, 1, %3\n\t" |
| 80 | "beqi %3, 1, tr0\n" | 80 | "beqi %3, 1, tr0\n" |
| 81 | "1:\n\t" | 81 | "1:\n\t" |
| 82 | "ptabs %2, tr0\n\t" | 82 | "ptabs %2, tr0\n\t" |
| 83 | : "=r" (result), "=r" (word), "=r" (__d2), "=r" (__d3) | 83 | : "=r" (result), "=r" (word), "=r" (__d2), "=r" (__d3) |
| 84 | : "0" (0L), "1" (word)); | 84 | : "0" (0L), "1" (word)); |
| 85 | 85 | ||
| 86 | return result; | 86 | return result; |
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | #include <asm-generic/bitops/__ffs.h> | 89 | #include <asm-generic/bitops/__ffs.h> |
| 90 | #endif | 90 | #endif |
| 91 | 91 | ||
| 92 | #include <asm-generic/bitops/find.h> | 92 | #include <asm-generic/bitops/find.h> |
| 93 | #include <asm-generic/bitops/ffs.h> | 93 | #include <asm-generic/bitops/ffs.h> |
| 94 | #include <asm-generic/bitops/hweight.h> | 94 | #include <asm-generic/bitops/hweight.h> |
| 95 | #include <asm-generic/bitops/lock.h> | 95 | #include <asm-generic/bitops/lock.h> |
| 96 | #include <asm-generic/bitops/sched.h> | 96 | #include <asm-generic/bitops/sched.h> |
| 97 | #include <asm-generic/bitops/le.h> | 97 | #include <asm-generic/bitops/le.h> |
| 98 | #include <asm-generic/bitops/ext2-atomic.h> | 98 | #include <asm-generic/bitops/ext2-atomic.h> |
| 99 | #include <asm-generic/bitops/minix.h> | ||
| 100 | #include <asm-generic/bitops/fls.h> | 99 | #include <asm-generic/bitops/fls.h> |
| 101 | #include <asm-generic/bitops/__fls.h> | 100 | #include <asm-generic/bitops/__fls.h> |
| 102 | #include <asm-generic/bitops/fls64.h> | 101 | #include <asm-generic/bitops/fls64.h> |
| 103 | 102 | ||
| 104 | #endif /* __KERNEL__ */ | 103 | #endif /* __KERNEL__ */ |
| 105 | 104 | ||
| 106 | #endif /* __ASM_SH_BITOPS_H */ | 105 | #endif /* __ASM_SH_BITOPS_H */ |
| 107 | 106 |
arch/sparc/include/asm/bitops_32.h
| 1 | /* | 1 | /* |
| 2 | * bitops.h: Bit string operations on the Sparc. | 2 | * bitops.h: Bit string operations on the Sparc. |
| 3 | * | 3 | * |
| 4 | * Copyright 1995 David S. Miller (davem@caip.rutgers.edu) | 4 | * Copyright 1995 David S. Miller (davem@caip.rutgers.edu) |
| 5 | * Copyright 1996 Eddie C. Dost (ecd@skynet.be) | 5 | * Copyright 1996 Eddie C. Dost (ecd@skynet.be) |
| 6 | * Copyright 2001 Anton Blanchard (anton@samba.org) | 6 | * Copyright 2001 Anton Blanchard (anton@samba.org) |
| 7 | */ | 7 | */ |
| 8 | 8 | ||
| 9 | #ifndef _SPARC_BITOPS_H | 9 | #ifndef _SPARC_BITOPS_H |
| 10 | #define _SPARC_BITOPS_H | 10 | #define _SPARC_BITOPS_H |
| 11 | 11 | ||
| 12 | #include <linux/compiler.h> | 12 | #include <linux/compiler.h> |
| 13 | #include <asm/byteorder.h> | 13 | #include <asm/byteorder.h> |
| 14 | 14 | ||
| 15 | #ifdef __KERNEL__ | 15 | #ifdef __KERNEL__ |
| 16 | 16 | ||
| 17 | #ifndef _LINUX_BITOPS_H | 17 | #ifndef _LINUX_BITOPS_H |
| 18 | #error only <linux/bitops.h> can be included directly | 18 | #error only <linux/bitops.h> can be included directly |
| 19 | #endif | 19 | #endif |
| 20 | 20 | ||
| 21 | extern unsigned long ___set_bit(unsigned long *addr, unsigned long mask); | 21 | extern unsigned long ___set_bit(unsigned long *addr, unsigned long mask); |
| 22 | extern unsigned long ___clear_bit(unsigned long *addr, unsigned long mask); | 22 | extern unsigned long ___clear_bit(unsigned long *addr, unsigned long mask); |
| 23 | extern unsigned long ___change_bit(unsigned long *addr, unsigned long mask); | 23 | extern unsigned long ___change_bit(unsigned long *addr, unsigned long mask); |
| 24 | 24 | ||
| 25 | /* | 25 | /* |
| 26 | * Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0' | 26 | * Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0' |
| 27 | * is in the highest of the four bytes and bit '31' is the high bit | 27 | * is in the highest of the four bytes and bit '31' is the high bit |
| 28 | * within the first byte. Sparc is BIG-Endian. Unless noted otherwise | 28 | * within the first byte. Sparc is BIG-Endian. Unless noted otherwise |
| 29 | * all bit-ops return 0 if bit was previously clear and != 0 otherwise. | 29 | * all bit-ops return 0 if bit was previously clear and != 0 otherwise. |
| 30 | */ | 30 | */ |
| 31 | static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *addr) | 31 | static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *addr) |
| 32 | { | 32 | { |
| 33 | unsigned long *ADDR, mask; | 33 | unsigned long *ADDR, mask; |
| 34 | 34 | ||
| 35 | ADDR = ((unsigned long *) addr) + (nr >> 5); | 35 | ADDR = ((unsigned long *) addr) + (nr >> 5); |
| 36 | mask = 1 << (nr & 31); | 36 | mask = 1 << (nr & 31); |
| 37 | 37 | ||
| 38 | return ___set_bit(ADDR, mask) != 0; | 38 | return ___set_bit(ADDR, mask) != 0; |
| 39 | } | 39 | } |
| 40 | 40 | ||
| 41 | static inline void set_bit(unsigned long nr, volatile unsigned long *addr) | 41 | static inline void set_bit(unsigned long nr, volatile unsigned long *addr) |
| 42 | { | 42 | { |
| 43 | unsigned long *ADDR, mask; | 43 | unsigned long *ADDR, mask; |
| 44 | 44 | ||
| 45 | ADDR = ((unsigned long *) addr) + (nr >> 5); | 45 | ADDR = ((unsigned long *) addr) + (nr >> 5); |
| 46 | mask = 1 << (nr & 31); | 46 | mask = 1 << (nr & 31); |
| 47 | 47 | ||
| 48 | (void) ___set_bit(ADDR, mask); | 48 | (void) ___set_bit(ADDR, mask); |
| 49 | } | 49 | } |
| 50 | 50 | ||
| 51 | static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) | 51 | static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) |
| 52 | { | 52 | { |
| 53 | unsigned long *ADDR, mask; | 53 | unsigned long *ADDR, mask; |
| 54 | 54 | ||
| 55 | ADDR = ((unsigned long *) addr) + (nr >> 5); | 55 | ADDR = ((unsigned long *) addr) + (nr >> 5); |
| 56 | mask = 1 << (nr & 31); | 56 | mask = 1 << (nr & 31); |
| 57 | 57 | ||
| 58 | return ___clear_bit(ADDR, mask) != 0; | 58 | return ___clear_bit(ADDR, mask) != 0; |
| 59 | } | 59 | } |
| 60 | 60 | ||
| 61 | static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) | 61 | static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) |
| 62 | { | 62 | { |
| 63 | unsigned long *ADDR, mask; | 63 | unsigned long *ADDR, mask; |
| 64 | 64 | ||
| 65 | ADDR = ((unsigned long *) addr) + (nr >> 5); | 65 | ADDR = ((unsigned long *) addr) + (nr >> 5); |
| 66 | mask = 1 << (nr & 31); | 66 | mask = 1 << (nr & 31); |
| 67 | 67 | ||
| 68 | (void) ___clear_bit(ADDR, mask); | 68 | (void) ___clear_bit(ADDR, mask); |
| 69 | } | 69 | } |
| 70 | 70 | ||
| 71 | static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr) | 71 | static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr) |
| 72 | { | 72 | { |
| 73 | unsigned long *ADDR, mask; | 73 | unsigned long *ADDR, mask; |
| 74 | 74 | ||
| 75 | ADDR = ((unsigned long *) addr) + (nr >> 5); | 75 | ADDR = ((unsigned long *) addr) + (nr >> 5); |
| 76 | mask = 1 << (nr & 31); | 76 | mask = 1 << (nr & 31); |
| 77 | 77 | ||
| 78 | return ___change_bit(ADDR, mask) != 0; | 78 | return ___change_bit(ADDR, mask) != 0; |
| 79 | } | 79 | } |
| 80 | 80 | ||
| 81 | static inline void change_bit(unsigned long nr, volatile unsigned long *addr) | 81 | static inline void change_bit(unsigned long nr, volatile unsigned long *addr) |
| 82 | { | 82 | { |
| 83 | unsigned long *ADDR, mask; | 83 | unsigned long *ADDR, mask; |
| 84 | 84 | ||
| 85 | ADDR = ((unsigned long *) addr) + (nr >> 5); | 85 | ADDR = ((unsigned long *) addr) + (nr >> 5); |
| 86 | mask = 1 << (nr & 31); | 86 | mask = 1 << (nr & 31); |
| 87 | 87 | ||
| 88 | (void) ___change_bit(ADDR, mask); | 88 | (void) ___change_bit(ADDR, mask); |
| 89 | } | 89 | } |
| 90 | 90 | ||
| 91 | #include <asm-generic/bitops/non-atomic.h> | 91 | #include <asm-generic/bitops/non-atomic.h> |
| 92 | 92 | ||
| 93 | #define smp_mb__before_clear_bit() do { } while(0) | 93 | #define smp_mb__before_clear_bit() do { } while(0) |
| 94 | #define smp_mb__after_clear_bit() do { } while(0) | 94 | #define smp_mb__after_clear_bit() do { } while(0) |
| 95 | 95 | ||
| 96 | #include <asm-generic/bitops/ffz.h> | 96 | #include <asm-generic/bitops/ffz.h> |
| 97 | #include <asm-generic/bitops/__ffs.h> | 97 | #include <asm-generic/bitops/__ffs.h> |
| 98 | #include <asm-generic/bitops/sched.h> | 98 | #include <asm-generic/bitops/sched.h> |
| 99 | #include <asm-generic/bitops/ffs.h> | 99 | #include <asm-generic/bitops/ffs.h> |
| 100 | #include <asm-generic/bitops/fls.h> | 100 | #include <asm-generic/bitops/fls.h> |
| 101 | #include <asm-generic/bitops/__fls.h> | 101 | #include <asm-generic/bitops/__fls.h> |
| 102 | #include <asm-generic/bitops/fls64.h> | 102 | #include <asm-generic/bitops/fls64.h> |
| 103 | #include <asm-generic/bitops/hweight.h> | 103 | #include <asm-generic/bitops/hweight.h> |
| 104 | #include <asm-generic/bitops/lock.h> | 104 | #include <asm-generic/bitops/lock.h> |
| 105 | #include <asm-generic/bitops/find.h> | 105 | #include <asm-generic/bitops/find.h> |
| 106 | #include <asm-generic/bitops/le.h> | 106 | #include <asm-generic/bitops/le.h> |
| 107 | #include <asm-generic/bitops/ext2-atomic.h> | 107 | #include <asm-generic/bitops/ext2-atomic.h> |
| 108 | #include <asm-generic/bitops/minix.h> | ||
| 109 | 108 | ||
| 110 | #endif /* __KERNEL__ */ | 109 | #endif /* __KERNEL__ */ |
| 111 | 110 | ||
| 112 | #endif /* defined(_SPARC_BITOPS_H) */ | 111 | #endif /* defined(_SPARC_BITOPS_H) */ |
| 113 | 112 |
arch/sparc/include/asm/bitops_64.h
| 1 | /* | 1 | /* |
| 2 | * bitops.h: Bit string operations on the V9. | 2 | * bitops.h: Bit string operations on the V9. |
| 3 | * | 3 | * |
| 4 | * Copyright 1996, 1997 David S. Miller (davem@caip.rutgers.edu) | 4 | * Copyright 1996, 1997 David S. Miller (davem@caip.rutgers.edu) |
| 5 | */ | 5 | */ |
| 6 | 6 | ||
| 7 | #ifndef _SPARC64_BITOPS_H | 7 | #ifndef _SPARC64_BITOPS_H |
| 8 | #define _SPARC64_BITOPS_H | 8 | #define _SPARC64_BITOPS_H |
| 9 | 9 | ||
| 10 | #ifndef _LINUX_BITOPS_H | 10 | #ifndef _LINUX_BITOPS_H |
| 11 | #error only <linux/bitops.h> can be included directly | 11 | #error only <linux/bitops.h> can be included directly |
| 12 | #endif | 12 | #endif |
| 13 | 13 | ||
| 14 | #include <linux/compiler.h> | 14 | #include <linux/compiler.h> |
| 15 | #include <asm/byteorder.h> | 15 | #include <asm/byteorder.h> |
| 16 | 16 | ||
| 17 | extern int test_and_set_bit(unsigned long nr, volatile unsigned long *addr); | 17 | extern int test_and_set_bit(unsigned long nr, volatile unsigned long *addr); |
| 18 | extern int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr); | 18 | extern int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr); |
| 19 | extern int test_and_change_bit(unsigned long nr, volatile unsigned long *addr); | 19 | extern int test_and_change_bit(unsigned long nr, volatile unsigned long *addr); |
| 20 | extern void set_bit(unsigned long nr, volatile unsigned long *addr); | 20 | extern void set_bit(unsigned long nr, volatile unsigned long *addr); |
| 21 | extern void clear_bit(unsigned long nr, volatile unsigned long *addr); | 21 | extern void clear_bit(unsigned long nr, volatile unsigned long *addr); |
| 22 | extern void change_bit(unsigned long nr, volatile unsigned long *addr); | 22 | extern void change_bit(unsigned long nr, volatile unsigned long *addr); |
| 23 | 23 | ||
| 24 | #include <asm-generic/bitops/non-atomic.h> | 24 | #include <asm-generic/bitops/non-atomic.h> |
| 25 | 25 | ||
| 26 | #define smp_mb__before_clear_bit() barrier() | 26 | #define smp_mb__before_clear_bit() barrier() |
| 27 | #define smp_mb__after_clear_bit() barrier() | 27 | #define smp_mb__after_clear_bit() barrier() |
| 28 | 28 | ||
| 29 | #include <asm-generic/bitops/ffz.h> | 29 | #include <asm-generic/bitops/ffz.h> |
| 30 | #include <asm-generic/bitops/__ffs.h> | 30 | #include <asm-generic/bitops/__ffs.h> |
| 31 | #include <asm-generic/bitops/fls.h> | 31 | #include <asm-generic/bitops/fls.h> |
| 32 | #include <asm-generic/bitops/__fls.h> | 32 | #include <asm-generic/bitops/__fls.h> |
| 33 | #include <asm-generic/bitops/fls64.h> | 33 | #include <asm-generic/bitops/fls64.h> |
| 34 | 34 | ||
| 35 | #ifdef __KERNEL__ | 35 | #ifdef __KERNEL__ |
| 36 | 36 | ||
| 37 | #include <asm-generic/bitops/sched.h> | 37 | #include <asm-generic/bitops/sched.h> |
| 38 | #include <asm-generic/bitops/ffs.h> | 38 | #include <asm-generic/bitops/ffs.h> |
| 39 | 39 | ||
| 40 | /* | 40 | /* |
| 41 | * hweightN: returns the hamming weight (i.e. the number | 41 | * hweightN: returns the hamming weight (i.e. the number |
| 42 | * of bits set) of a N-bit word | 42 | * of bits set) of a N-bit word |
| 43 | */ | 43 | */ |
| 44 | 44 | ||
| 45 | #ifdef ULTRA_HAS_POPULATION_COUNT | 45 | #ifdef ULTRA_HAS_POPULATION_COUNT |
| 46 | 46 | ||
| 47 | static inline unsigned int __arch_hweight64(unsigned long w) | 47 | static inline unsigned int __arch_hweight64(unsigned long w) |
| 48 | { | 48 | { |
| 49 | unsigned int res; | 49 | unsigned int res; |
| 50 | 50 | ||
| 51 | __asm__ ("popc %1,%0" : "=r" (res) : "r" (w)); | 51 | __asm__ ("popc %1,%0" : "=r" (res) : "r" (w)); |
| 52 | return res; | 52 | return res; |
| 53 | } | 53 | } |
| 54 | 54 | ||
| 55 | static inline unsigned int __arch_hweight32(unsigned int w) | 55 | static inline unsigned int __arch_hweight32(unsigned int w) |
| 56 | { | 56 | { |
| 57 | unsigned int res; | 57 | unsigned int res; |
| 58 | 58 | ||
| 59 | __asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xffffffff)); | 59 | __asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xffffffff)); |
| 60 | return res; | 60 | return res; |
| 61 | } | 61 | } |
| 62 | 62 | ||
| 63 | static inline unsigned int __arch_hweight16(unsigned int w) | 63 | static inline unsigned int __arch_hweight16(unsigned int w) |
| 64 | { | 64 | { |
| 65 | unsigned int res; | 65 | unsigned int res; |
| 66 | 66 | ||
| 67 | __asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xffff)); | 67 | __asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xffff)); |
| 68 | return res; | 68 | return res; |
| 69 | } | 69 | } |
| 70 | 70 | ||
| 71 | static inline unsigned int __arch_hweight8(unsigned int w) | 71 | static inline unsigned int __arch_hweight8(unsigned int w) |
| 72 | { | 72 | { |
| 73 | unsigned int res; | 73 | unsigned int res; |
| 74 | 74 | ||
| 75 | __asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xff)); | 75 | __asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xff)); |
| 76 | return res; | 76 | return res; |
| 77 | } | 77 | } |
| 78 | 78 | ||
| 79 | #else | 79 | #else |
| 80 | 80 | ||
| 81 | #include <asm-generic/bitops/arch_hweight.h> | 81 | #include <asm-generic/bitops/arch_hweight.h> |
| 82 | 82 | ||
| 83 | #endif | 83 | #endif |
| 84 | #include <asm-generic/bitops/const_hweight.h> | 84 | #include <asm-generic/bitops/const_hweight.h> |
| 85 | #include <asm-generic/bitops/lock.h> | 85 | #include <asm-generic/bitops/lock.h> |
| 86 | #endif /* __KERNEL__ */ | 86 | #endif /* __KERNEL__ */ |
| 87 | 87 | ||
| 88 | #include <asm-generic/bitops/find.h> | 88 | #include <asm-generic/bitops/find.h> |
| 89 | 89 | ||
| 90 | #ifdef __KERNEL__ | 90 | #ifdef __KERNEL__ |
| 91 | 91 | ||
| 92 | #include <asm-generic/bitops/le.h> | 92 | #include <asm-generic/bitops/le.h> |
| 93 | 93 | ||
| 94 | #define ext2_set_bit_atomic(lock,nr,addr) \ | 94 | #define ext2_set_bit_atomic(lock,nr,addr) \ |
| 95 | test_and_set_bit((nr) ^ 0x38,(unsigned long *)(addr)) | 95 | test_and_set_bit((nr) ^ 0x38,(unsigned long *)(addr)) |
| 96 | #define ext2_clear_bit_atomic(lock,nr,addr) \ | 96 | #define ext2_clear_bit_atomic(lock,nr,addr) \ |
| 97 | test_and_clear_bit((nr) ^ 0x38,(unsigned long *)(addr)) | 97 | test_and_clear_bit((nr) ^ 0x38,(unsigned long *)(addr)) |
| 98 | 98 | ||
| 99 | #include <asm-generic/bitops/minix.h> | ||
| 100 | |||
| 101 | #endif /* __KERNEL__ */ | 99 | #endif /* __KERNEL__ */ |
| 102 | 100 | ||
| 103 | #endif /* defined(_SPARC64_BITOPS_H) */ | 101 | #endif /* defined(_SPARC64_BITOPS_H) */ |
| 104 | 102 |
arch/tile/include/asm/bitops.h
| 1 | /* | 1 | /* |
| 2 | * Copyright 1992, Linus Torvalds. | 2 | * Copyright 1992, Linus Torvalds. |
| 3 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | 3 | * Copyright 2010 Tilera Corporation. All Rights Reserved. |
| 4 | * | 4 | * |
| 5 | * This program is free software; you can redistribute it and/or | 5 | * This program is free software; you can redistribute it and/or |
| 6 | * modify it under the terms of the GNU General Public License | 6 | * modify it under the terms of the GNU General Public License |
| 7 | * as published by the Free Software Foundation, version 2. | 7 | * as published by the Free Software Foundation, version 2. |
| 8 | * | 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, but | 9 | * This program is distributed in the hope that it will be useful, but |
| 10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | 11 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or |
| 12 | * NON INFRINGEMENT. See the GNU General Public License for | 12 | * NON INFRINGEMENT. See the GNU General Public License for |
| 13 | * more details. | 13 | * more details. |
| 14 | */ | 14 | */ |
| 15 | 15 | ||
| 16 | #ifndef _ASM_TILE_BITOPS_H | 16 | #ifndef _ASM_TILE_BITOPS_H |
| 17 | #define _ASM_TILE_BITOPS_H | 17 | #define _ASM_TILE_BITOPS_H |
| 18 | 18 | ||
| 19 | #include <linux/types.h> | 19 | #include <linux/types.h> |
| 20 | 20 | ||
| 21 | #ifndef _LINUX_BITOPS_H | 21 | #ifndef _LINUX_BITOPS_H |
| 22 | #error only <linux/bitops.h> can be included directly | 22 | #error only <linux/bitops.h> can be included directly |
| 23 | #endif | 23 | #endif |
| 24 | 24 | ||
| 25 | #ifdef __tilegx__ | 25 | #ifdef __tilegx__ |
| 26 | #include <asm/bitops_64.h> | 26 | #include <asm/bitops_64.h> |
| 27 | #else | 27 | #else |
| 28 | #include <asm/bitops_32.h> | 28 | #include <asm/bitops_32.h> |
| 29 | #endif | 29 | #endif |
| 30 | 30 | ||
| 31 | /** | 31 | /** |
| 32 | * __ffs - find first set bit in word | 32 | * __ffs - find first set bit in word |
| 33 | * @word: The word to search | 33 | * @word: The word to search |
| 34 | * | 34 | * |
| 35 | * Undefined if no set bit exists, so code should check against 0 first. | 35 | * Undefined if no set bit exists, so code should check against 0 first. |
| 36 | */ | 36 | */ |
| 37 | static inline unsigned long __ffs(unsigned long word) | 37 | static inline unsigned long __ffs(unsigned long word) |
| 38 | { | 38 | { |
| 39 | return __builtin_ctzl(word); | 39 | return __builtin_ctzl(word); |
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | /** | 42 | /** |
| 43 | * ffz - find first zero bit in word | 43 | * ffz - find first zero bit in word |
| 44 | * @word: The word to search | 44 | * @word: The word to search |
| 45 | * | 45 | * |
| 46 | * Undefined if no zero exists, so code should check against ~0UL first. | 46 | * Undefined if no zero exists, so code should check against ~0UL first. |
| 47 | */ | 47 | */ |
| 48 | static inline unsigned long ffz(unsigned long word) | 48 | static inline unsigned long ffz(unsigned long word) |
| 49 | { | 49 | { |
| 50 | return __builtin_ctzl(~word); | 50 | return __builtin_ctzl(~word); |
| 51 | } | 51 | } |
| 52 | 52 | ||
| 53 | /** | 53 | /** |
| 54 | * __fls - find last set bit in word | 54 | * __fls - find last set bit in word |
| 55 | * @word: The word to search | 55 | * @word: The word to search |
| 56 | * | 56 | * |
| 57 | * Undefined if no set bit exists, so code should check against 0 first. | 57 | * Undefined if no set bit exists, so code should check against 0 first. |
| 58 | */ | 58 | */ |
| 59 | static inline unsigned long __fls(unsigned long word) | 59 | static inline unsigned long __fls(unsigned long word) |
| 60 | { | 60 | { |
| 61 | return (sizeof(word) * 8) - 1 - __builtin_clzl(word); | 61 | return (sizeof(word) * 8) - 1 - __builtin_clzl(word); |
| 62 | } | 62 | } |
| 63 | 63 | ||
| 64 | /** | 64 | /** |
| 65 | * ffs - find first set bit in word | 65 | * ffs - find first set bit in word |
| 66 | * @x: the word to search | 66 | * @x: the word to search |
| 67 | * | 67 | * |
| 68 | * This is defined the same way as the libc and compiler builtin ffs | 68 | * This is defined the same way as the libc and compiler builtin ffs |
| 69 | * routines, therefore differs in spirit from the other bitops. | 69 | * routines, therefore differs in spirit from the other bitops. |
| 70 | * | 70 | * |
| 71 | * ffs(value) returns 0 if value is 0 or the position of the first | 71 | * ffs(value) returns 0 if value is 0 or the position of the first |
| 72 | * set bit if value is nonzero. The first (least significant) bit | 72 | * set bit if value is nonzero. The first (least significant) bit |
| 73 | * is at position 1. | 73 | * is at position 1. |
| 74 | */ | 74 | */ |
| 75 | static inline int ffs(int x) | 75 | static inline int ffs(int x) |
| 76 | { | 76 | { |
| 77 | return __builtin_ffs(x); | 77 | return __builtin_ffs(x); |
| 78 | } | 78 | } |
| 79 | 79 | ||
| 80 | /** | 80 | /** |
| 81 | * fls - find last set bit in word | 81 | * fls - find last set bit in word |
| 82 | * @x: the word to search | 82 | * @x: the word to search |
| 83 | * | 83 | * |
| 84 | * This is defined in a similar way as the libc and compiler builtin | 84 | * This is defined in a similar way as the libc and compiler builtin |
| 85 | * ffs, but returns the position of the most significant set bit. | 85 | * ffs, but returns the position of the most significant set bit. |
| 86 | * | 86 | * |
| 87 | * fls(value) returns 0 if value is 0 or the position of the last | 87 | * fls(value) returns 0 if value is 0 or the position of the last |
| 88 | * set bit if value is nonzero. The last (most significant) bit is | 88 | * set bit if value is nonzero. The last (most significant) bit is |
| 89 | * at position 32. | 89 | * at position 32. |
| 90 | */ | 90 | */ |
| 91 | static inline int fls(int x) | 91 | static inline int fls(int x) |
| 92 | { | 92 | { |
| 93 | return (sizeof(int) * 8) - __builtin_clz(x); | 93 | return (sizeof(int) * 8) - __builtin_clz(x); |
| 94 | } | 94 | } |
| 95 | 95 | ||
| 96 | static inline int fls64(__u64 w) | 96 | static inline int fls64(__u64 w) |
| 97 | { | 97 | { |
| 98 | return (sizeof(__u64) * 8) - __builtin_clzll(w); | 98 | return (sizeof(__u64) * 8) - __builtin_clzll(w); |
| 99 | } | 99 | } |
| 100 | 100 | ||
| 101 | static inline unsigned int __arch_hweight32(unsigned int w) | 101 | static inline unsigned int __arch_hweight32(unsigned int w) |
| 102 | { | 102 | { |
| 103 | return __builtin_popcount(w); | 103 | return __builtin_popcount(w); |
| 104 | } | 104 | } |
| 105 | 105 | ||
| 106 | static inline unsigned int __arch_hweight16(unsigned int w) | 106 | static inline unsigned int __arch_hweight16(unsigned int w) |
| 107 | { | 107 | { |
| 108 | return __builtin_popcount(w & 0xffff); | 108 | return __builtin_popcount(w & 0xffff); |
| 109 | } | 109 | } |
| 110 | 110 | ||
| 111 | static inline unsigned int __arch_hweight8(unsigned int w) | 111 | static inline unsigned int __arch_hweight8(unsigned int w) |
| 112 | { | 112 | { |
| 113 | return __builtin_popcount(w & 0xff); | 113 | return __builtin_popcount(w & 0xff); |
| 114 | } | 114 | } |
| 115 | 115 | ||
| 116 | static inline unsigned long __arch_hweight64(__u64 w) | 116 | static inline unsigned long __arch_hweight64(__u64 w) |
| 117 | { | 117 | { |
| 118 | return __builtin_popcountll(w); | 118 | return __builtin_popcountll(w); |
| 119 | } | 119 | } |
| 120 | 120 | ||
| 121 | #include <asm-generic/bitops/const_hweight.h> | 121 | #include <asm-generic/bitops/const_hweight.h> |
| 122 | #include <asm-generic/bitops/lock.h> | 122 | #include <asm-generic/bitops/lock.h> |
| 123 | #include <asm-generic/bitops/find.h> | 123 | #include <asm-generic/bitops/find.h> |
| 124 | #include <asm-generic/bitops/sched.h> | 124 | #include <asm-generic/bitops/sched.h> |
| 125 | #include <asm-generic/bitops/le.h> | 125 | #include <asm-generic/bitops/le.h> |
| 126 | #include <asm-generic/bitops/minix.h> | ||
| 127 | 126 | ||
| 128 | #endif /* _ASM_TILE_BITOPS_H */ | 127 | #endif /* _ASM_TILE_BITOPS_H */ |
| 129 | 128 |
arch/x86/include/asm/bitops.h
| 1 | #ifndef _ASM_X86_BITOPS_H | 1 | #ifndef _ASM_X86_BITOPS_H |
| 2 | #define _ASM_X86_BITOPS_H | 2 | #define _ASM_X86_BITOPS_H |
| 3 | 3 | ||
| 4 | /* | 4 | /* |
| 5 | * Copyright 1992, Linus Torvalds. | 5 | * Copyright 1992, Linus Torvalds. |
| 6 | * | 6 | * |
| 7 | * Note: inlines with more than a single statement should be marked | 7 | * Note: inlines with more than a single statement should be marked |
| 8 | * __always_inline to avoid problems with older gcc's inlining heuristics. | 8 | * __always_inline to avoid problems with older gcc's inlining heuristics. |
| 9 | */ | 9 | */ |
| 10 | 10 | ||
| 11 | #ifndef _LINUX_BITOPS_H | 11 | #ifndef _LINUX_BITOPS_H |
| 12 | #error only <linux/bitops.h> can be included directly | 12 | #error only <linux/bitops.h> can be included directly |
| 13 | #endif | 13 | #endif |
| 14 | 14 | ||
| 15 | #include <linux/compiler.h> | 15 | #include <linux/compiler.h> |
| 16 | #include <asm/alternative.h> | 16 | #include <asm/alternative.h> |
| 17 | 17 | ||
| 18 | /* | 18 | /* |
| 19 | * These have to be done with inline assembly: that way the bit-setting | 19 | * These have to be done with inline assembly: that way the bit-setting |
| 20 | * is guaranteed to be atomic. All bit operations return 0 if the bit | 20 | * is guaranteed to be atomic. All bit operations return 0 if the bit |
| 21 | * was cleared before the operation and != 0 if it was not. | 21 | * was cleared before the operation and != 0 if it was not. |
| 22 | * | 22 | * |
| 23 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | 23 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). |
| 24 | */ | 24 | */ |
| 25 | 25 | ||
| 26 | #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) | 26 | #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) |
| 27 | /* Technically wrong, but this avoids compilation errors on some gcc | 27 | /* Technically wrong, but this avoids compilation errors on some gcc |
| 28 | versions. */ | 28 | versions. */ |
| 29 | #define BITOP_ADDR(x) "=m" (*(volatile long *) (x)) | 29 | #define BITOP_ADDR(x) "=m" (*(volatile long *) (x)) |
| 30 | #else | 30 | #else |
| 31 | #define BITOP_ADDR(x) "+m" (*(volatile long *) (x)) | 31 | #define BITOP_ADDR(x) "+m" (*(volatile long *) (x)) |
| 32 | #endif | 32 | #endif |
| 33 | 33 | ||
| 34 | #define ADDR BITOP_ADDR(addr) | 34 | #define ADDR BITOP_ADDR(addr) |
| 35 | 35 | ||
| 36 | /* | 36 | /* |
| 37 | * We do the locked ops that don't return the old value as | 37 | * We do the locked ops that don't return the old value as |
| 38 | * a mask operation on a byte. | 38 | * a mask operation on a byte. |
| 39 | */ | 39 | */ |
| 40 | #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) | 40 | #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) |
| 41 | #define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3)) | 41 | #define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3)) |
| 42 | #define CONST_MASK(nr) (1 << ((nr) & 7)) | 42 | #define CONST_MASK(nr) (1 << ((nr) & 7)) |
| 43 | 43 | ||
| 44 | /** | 44 | /** |
| 45 | * set_bit - Atomically set a bit in memory | 45 | * set_bit - Atomically set a bit in memory |
| 46 | * @nr: the bit to set | 46 | * @nr: the bit to set |
| 47 | * @addr: the address to start counting from | 47 | * @addr: the address to start counting from |
| 48 | * | 48 | * |
| 49 | * This function is atomic and may not be reordered. See __set_bit() | 49 | * This function is atomic and may not be reordered. See __set_bit() |
| 50 | * if you do not require the atomic guarantees. | 50 | * if you do not require the atomic guarantees. |
| 51 | * | 51 | * |
| 52 | * Note: there are no guarantees that this function will not be reordered | 52 | * Note: there are no guarantees that this function will not be reordered |
| 53 | * on non x86 architectures, so if you are writing portable code, | 53 | * on non x86 architectures, so if you are writing portable code, |
| 54 | * make sure not to rely on its reordering guarantees. | 54 | * make sure not to rely on its reordering guarantees. |
| 55 | * | 55 | * |
| 56 | * Note that @nr may be almost arbitrarily large; this function is not | 56 | * Note that @nr may be almost arbitrarily large; this function is not |
| 57 | * restricted to acting on a single-word quantity. | 57 | * restricted to acting on a single-word quantity. |
| 58 | */ | 58 | */ |
| 59 | static __always_inline void | 59 | static __always_inline void |
| 60 | set_bit(unsigned int nr, volatile unsigned long *addr) | 60 | set_bit(unsigned int nr, volatile unsigned long *addr) |
| 61 | { | 61 | { |
| 62 | if (IS_IMMEDIATE(nr)) { | 62 | if (IS_IMMEDIATE(nr)) { |
| 63 | asm volatile(LOCK_PREFIX "orb %1,%0" | 63 | asm volatile(LOCK_PREFIX "orb %1,%0" |
| 64 | : CONST_MASK_ADDR(nr, addr) | 64 | : CONST_MASK_ADDR(nr, addr) |
| 65 | : "iq" ((u8)CONST_MASK(nr)) | 65 | : "iq" ((u8)CONST_MASK(nr)) |
| 66 | : "memory"); | 66 | : "memory"); |
| 67 | } else { | 67 | } else { |
| 68 | asm volatile(LOCK_PREFIX "bts %1,%0" | 68 | asm volatile(LOCK_PREFIX "bts %1,%0" |
| 69 | : BITOP_ADDR(addr) : "Ir" (nr) : "memory"); | 69 | : BITOP_ADDR(addr) : "Ir" (nr) : "memory"); |
| 70 | } | 70 | } |
| 71 | } | 71 | } |
| 72 | 72 | ||
| 73 | /** | 73 | /** |
| 74 | * __set_bit - Set a bit in memory | 74 | * __set_bit - Set a bit in memory |
| 75 | * @nr: the bit to set | 75 | * @nr: the bit to set |
| 76 | * @addr: the address to start counting from | 76 | * @addr: the address to start counting from |
| 77 | * | 77 | * |
| 78 | * Unlike set_bit(), this function is non-atomic and may be reordered. | 78 | * Unlike set_bit(), this function is non-atomic and may be reordered. |
| 79 | * If it's called on the same region of memory simultaneously, the effect | 79 | * If it's called on the same region of memory simultaneously, the effect |
| 80 | * may be that only one operation succeeds. | 80 | * may be that only one operation succeeds. |
| 81 | */ | 81 | */ |
| 82 | static inline void __set_bit(int nr, volatile unsigned long *addr) | 82 | static inline void __set_bit(int nr, volatile unsigned long *addr) |
| 83 | { | 83 | { |
| 84 | asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); | 84 | asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); |
| 85 | } | 85 | } |
| 86 | 86 | ||
| 87 | /** | 87 | /** |
| 88 | * clear_bit - Clears a bit in memory | 88 | * clear_bit - Clears a bit in memory |
| 89 | * @nr: Bit to clear | 89 | * @nr: Bit to clear |
| 90 | * @addr: Address to start counting from | 90 | * @addr: Address to start counting from |
| 91 | * | 91 | * |
| 92 | * clear_bit() is atomic and may not be reordered. However, it does | 92 | * clear_bit() is atomic and may not be reordered. However, it does |
| 93 | * not contain a memory barrier, so if it is used for locking purposes, | 93 | * not contain a memory barrier, so if it is used for locking purposes, |
| 94 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | 94 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
| 95 | * in order to ensure changes are visible on other processors. | 95 | * in order to ensure changes are visible on other processors. |
| 96 | */ | 96 | */ |
| 97 | static __always_inline void | 97 | static __always_inline void |
| 98 | clear_bit(int nr, volatile unsigned long *addr) | 98 | clear_bit(int nr, volatile unsigned long *addr) |
| 99 | { | 99 | { |
| 100 | if (IS_IMMEDIATE(nr)) { | 100 | if (IS_IMMEDIATE(nr)) { |
| 101 | asm volatile(LOCK_PREFIX "andb %1,%0" | 101 | asm volatile(LOCK_PREFIX "andb %1,%0" |
| 102 | : CONST_MASK_ADDR(nr, addr) | 102 | : CONST_MASK_ADDR(nr, addr) |
| 103 | : "iq" ((u8)~CONST_MASK(nr))); | 103 | : "iq" ((u8)~CONST_MASK(nr))); |
| 104 | } else { | 104 | } else { |
| 105 | asm volatile(LOCK_PREFIX "btr %1,%0" | 105 | asm volatile(LOCK_PREFIX "btr %1,%0" |
| 106 | : BITOP_ADDR(addr) | 106 | : BITOP_ADDR(addr) |
| 107 | : "Ir" (nr)); | 107 | : "Ir" (nr)); |
| 108 | } | 108 | } |
| 109 | } | 109 | } |
| 110 | 110 | ||
| 111 | /* | 111 | /* |
| 112 | * clear_bit_unlock - Clears a bit in memory | 112 | * clear_bit_unlock - Clears a bit in memory |
| 113 | * @nr: Bit to clear | 113 | * @nr: Bit to clear |
| 114 | * @addr: Address to start counting from | 114 | * @addr: Address to start counting from |
| 115 | * | 115 | * |
| 116 | * clear_bit() is atomic and implies release semantics before the memory | 116 | * clear_bit() is atomic and implies release semantics before the memory |
| 117 | * operation. It can be used for an unlock. | 117 | * operation. It can be used for an unlock. |
| 118 | */ | 118 | */ |
| 119 | static inline void clear_bit_unlock(unsigned nr, volatile unsigned long *addr) | 119 | static inline void clear_bit_unlock(unsigned nr, volatile unsigned long *addr) |
| 120 | { | 120 | { |
| 121 | barrier(); | 121 | barrier(); |
| 122 | clear_bit(nr, addr); | 122 | clear_bit(nr, addr); |
| 123 | } | 123 | } |
| 124 | 124 | ||
| 125 | static inline void __clear_bit(int nr, volatile unsigned long *addr) | 125 | static inline void __clear_bit(int nr, volatile unsigned long *addr) |
| 126 | { | 126 | { |
| 127 | asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); | 127 | asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); |
| 128 | } | 128 | } |
| 129 | 129 | ||
| 130 | /* | 130 | /* |
| 131 | * __clear_bit_unlock - Clears a bit in memory | 131 | * __clear_bit_unlock - Clears a bit in memory |
| 132 | * @nr: Bit to clear | 132 | * @nr: Bit to clear |
| 133 | * @addr: Address to start counting from | 133 | * @addr: Address to start counting from |
| 134 | * | 134 | * |
| 135 | * __clear_bit() is non-atomic and implies release semantics before the memory | 135 | * __clear_bit() is non-atomic and implies release semantics before the memory |
| 136 | * operation. It can be used for an unlock if no other CPUs can concurrently | 136 | * operation. It can be used for an unlock if no other CPUs can concurrently |
| 137 | * modify other bits in the word. | 137 | * modify other bits in the word. |
| 138 | * | 138 | * |
| 139 | * No memory barrier is required here, because x86 cannot reorder stores past | 139 | * No memory barrier is required here, because x86 cannot reorder stores past |
| 140 | * older loads. Same principle as spin_unlock. | 140 | * older loads. Same principle as spin_unlock. |
| 141 | */ | 141 | */ |
| 142 | static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr) | 142 | static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr) |
| 143 | { | 143 | { |
| 144 | barrier(); | 144 | barrier(); |
| 145 | __clear_bit(nr, addr); | 145 | __clear_bit(nr, addr); |
| 146 | } | 146 | } |
| 147 | 147 | ||
| 148 | #define smp_mb__before_clear_bit() barrier() | 148 | #define smp_mb__before_clear_bit() barrier() |
| 149 | #define smp_mb__after_clear_bit() barrier() | 149 | #define smp_mb__after_clear_bit() barrier() |
| 150 | 150 | ||
| 151 | /** | 151 | /** |
| 152 | * __change_bit - Toggle a bit in memory | 152 | * __change_bit - Toggle a bit in memory |
| 153 | * @nr: the bit to change | 153 | * @nr: the bit to change |
| 154 | * @addr: the address to start counting from | 154 | * @addr: the address to start counting from |
| 155 | * | 155 | * |
| 156 | * Unlike change_bit(), this function is non-atomic and may be reordered. | 156 | * Unlike change_bit(), this function is non-atomic and may be reordered. |
| 157 | * If it's called on the same region of memory simultaneously, the effect | 157 | * If it's called on the same region of memory simultaneously, the effect |
| 158 | * may be that only one operation succeeds. | 158 | * may be that only one operation succeeds. |
| 159 | */ | 159 | */ |
| 160 | static inline void __change_bit(int nr, volatile unsigned long *addr) | 160 | static inline void __change_bit(int nr, volatile unsigned long *addr) |
| 161 | { | 161 | { |
| 162 | asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); | 162 | asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); |
| 163 | } | 163 | } |
| 164 | 164 | ||
| 165 | /** | 165 | /** |
| 166 | * change_bit - Toggle a bit in memory | 166 | * change_bit - Toggle a bit in memory |
| 167 | * @nr: Bit to change | 167 | * @nr: Bit to change |
| 168 | * @addr: Address to start counting from | 168 | * @addr: Address to start counting from |
| 169 | * | 169 | * |
| 170 | * change_bit() is atomic and may not be reordered. | 170 | * change_bit() is atomic and may not be reordered. |
| 171 | * Note that @nr may be almost arbitrarily large; this function is not | 171 | * Note that @nr may be almost arbitrarily large; this function is not |
| 172 | * restricted to acting on a single-word quantity. | 172 | * restricted to acting on a single-word quantity. |
| 173 | */ | 173 | */ |
| 174 | static inline void change_bit(int nr, volatile unsigned long *addr) | 174 | static inline void change_bit(int nr, volatile unsigned long *addr) |
| 175 | { | 175 | { |
| 176 | if (IS_IMMEDIATE(nr)) { | 176 | if (IS_IMMEDIATE(nr)) { |
| 177 | asm volatile(LOCK_PREFIX "xorb %1,%0" | 177 | asm volatile(LOCK_PREFIX "xorb %1,%0" |
| 178 | : CONST_MASK_ADDR(nr, addr) | 178 | : CONST_MASK_ADDR(nr, addr) |
| 179 | : "iq" ((u8)CONST_MASK(nr))); | 179 | : "iq" ((u8)CONST_MASK(nr))); |
| 180 | } else { | 180 | } else { |
| 181 | asm volatile(LOCK_PREFIX "btc %1,%0" | 181 | asm volatile(LOCK_PREFIX "btc %1,%0" |
| 182 | : BITOP_ADDR(addr) | 182 | : BITOP_ADDR(addr) |
| 183 | : "Ir" (nr)); | 183 | : "Ir" (nr)); |
| 184 | } | 184 | } |
| 185 | } | 185 | } |
| 186 | 186 | ||
| 187 | /** | 187 | /** |
| 188 | * test_and_set_bit - Set a bit and return its old value | 188 | * test_and_set_bit - Set a bit and return its old value |
| 189 | * @nr: Bit to set | 189 | * @nr: Bit to set |
| 190 | * @addr: Address to count from | 190 | * @addr: Address to count from |
| 191 | * | 191 | * |
| 192 | * This operation is atomic and cannot be reordered. | 192 | * This operation is atomic and cannot be reordered. |
| 193 | * It also implies a memory barrier. | 193 | * It also implies a memory barrier. |
| 194 | */ | 194 | */ |
| 195 | static inline int test_and_set_bit(int nr, volatile unsigned long *addr) | 195 | static inline int test_and_set_bit(int nr, volatile unsigned long *addr) |
| 196 | { | 196 | { |
| 197 | int oldbit; | 197 | int oldbit; |
| 198 | 198 | ||
| 199 | asm volatile(LOCK_PREFIX "bts %2,%1\n\t" | 199 | asm volatile(LOCK_PREFIX "bts %2,%1\n\t" |
| 200 | "sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); | 200 | "sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); |
| 201 | 201 | ||
| 202 | return oldbit; | 202 | return oldbit; |
| 203 | } | 203 | } |
| 204 | 204 | ||
| 205 | /** | 205 | /** |
| 206 | * test_and_set_bit_lock - Set a bit and return its old value for lock | 206 | * test_and_set_bit_lock - Set a bit and return its old value for lock |
| 207 | * @nr: Bit to set | 207 | * @nr: Bit to set |
| 208 | * @addr: Address to count from | 208 | * @addr: Address to count from |
| 209 | * | 209 | * |
| 210 | * This is the same as test_and_set_bit on x86. | 210 | * This is the same as test_and_set_bit on x86. |
| 211 | */ | 211 | */ |
| 212 | static __always_inline int | 212 | static __always_inline int |
| 213 | test_and_set_bit_lock(int nr, volatile unsigned long *addr) | 213 | test_and_set_bit_lock(int nr, volatile unsigned long *addr) |
| 214 | { | 214 | { |
| 215 | return test_and_set_bit(nr, addr); | 215 | return test_and_set_bit(nr, addr); |
| 216 | } | 216 | } |
| 217 | 217 | ||
| 218 | /** | 218 | /** |
| 219 | * __test_and_set_bit - Set a bit and return its old value | 219 | * __test_and_set_bit - Set a bit and return its old value |
| 220 | * @nr: Bit to set | 220 | * @nr: Bit to set |
| 221 | * @addr: Address to count from | 221 | * @addr: Address to count from |
| 222 | * | 222 | * |
| 223 | * This operation is non-atomic and can be reordered. | 223 | * This operation is non-atomic and can be reordered. |
| 224 | * If two examples of this operation race, one can appear to succeed | 224 | * If two examples of this operation race, one can appear to succeed |
| 225 | * but actually fail. You must protect multiple accesses with a lock. | 225 | * but actually fail. You must protect multiple accesses with a lock. |
| 226 | */ | 226 | */ |
| 227 | static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) | 227 | static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) |
| 228 | { | 228 | { |
| 229 | int oldbit; | 229 | int oldbit; |
| 230 | 230 | ||
| 231 | asm("bts %2,%1\n\t" | 231 | asm("bts %2,%1\n\t" |
| 232 | "sbb %0,%0" | 232 | "sbb %0,%0" |
| 233 | : "=r" (oldbit), ADDR | 233 | : "=r" (oldbit), ADDR |
| 234 | : "Ir" (nr)); | 234 | : "Ir" (nr)); |
| 235 | return oldbit; | 235 | return oldbit; |
| 236 | } | 236 | } |
| 237 | 237 | ||
| 238 | /** | 238 | /** |
| 239 | * test_and_clear_bit - Clear a bit and return its old value | 239 | * test_and_clear_bit - Clear a bit and return its old value |
| 240 | * @nr: Bit to clear | 240 | * @nr: Bit to clear |
| 241 | * @addr: Address to count from | 241 | * @addr: Address to count from |
| 242 | * | 242 | * |
| 243 | * This operation is atomic and cannot be reordered. | 243 | * This operation is atomic and cannot be reordered. |
| 244 | * It also implies a memory barrier. | 244 | * It also implies a memory barrier. |
| 245 | */ | 245 | */ |
| 246 | static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) | 246 | static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) |
| 247 | { | 247 | { |
| 248 | int oldbit; | 248 | int oldbit; |
| 249 | 249 | ||
| 250 | asm volatile(LOCK_PREFIX "btr %2,%1\n\t" | 250 | asm volatile(LOCK_PREFIX "btr %2,%1\n\t" |
| 251 | "sbb %0,%0" | 251 | "sbb %0,%0" |
| 252 | : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); | 252 | : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); |
| 253 | 253 | ||
| 254 | return oldbit; | 254 | return oldbit; |
| 255 | } | 255 | } |
| 256 | 256 | ||
| 257 | /** | 257 | /** |
| 258 | * __test_and_clear_bit - Clear a bit and return its old value | 258 | * __test_and_clear_bit - Clear a bit and return its old value |
| 259 | * @nr: Bit to clear | 259 | * @nr: Bit to clear |
| 260 | * @addr: Address to count from | 260 | * @addr: Address to count from |
| 261 | * | 261 | * |
| 262 | * This operation is non-atomic and can be reordered. | 262 | * This operation is non-atomic and can be reordered. |
| 263 | * If two examples of this operation race, one can appear to succeed | 263 | * If two examples of this operation race, one can appear to succeed |
| 264 | * but actually fail. You must protect multiple accesses with a lock. | 264 | * but actually fail. You must protect multiple accesses with a lock. |
| 265 | */ | 265 | */ |
| 266 | static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) | 266 | static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) |
| 267 | { | 267 | { |
| 268 | int oldbit; | 268 | int oldbit; |
| 269 | 269 | ||
| 270 | asm volatile("btr %2,%1\n\t" | 270 | asm volatile("btr %2,%1\n\t" |
| 271 | "sbb %0,%0" | 271 | "sbb %0,%0" |
| 272 | : "=r" (oldbit), ADDR | 272 | : "=r" (oldbit), ADDR |
| 273 | : "Ir" (nr)); | 273 | : "Ir" (nr)); |
| 274 | return oldbit; | 274 | return oldbit; |
| 275 | } | 275 | } |
| 276 | 276 | ||
| 277 | /* WARNING: non atomic and it can be reordered! */ | 277 | /* WARNING: non atomic and it can be reordered! */ |
| 278 | static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) | 278 | static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) |
| 279 | { | 279 | { |
| 280 | int oldbit; | 280 | int oldbit; |
| 281 | 281 | ||
| 282 | asm volatile("btc %2,%1\n\t" | 282 | asm volatile("btc %2,%1\n\t" |
| 283 | "sbb %0,%0" | 283 | "sbb %0,%0" |
| 284 | : "=r" (oldbit), ADDR | 284 | : "=r" (oldbit), ADDR |
| 285 | : "Ir" (nr) : "memory"); | 285 | : "Ir" (nr) : "memory"); |
| 286 | 286 | ||
| 287 | return oldbit; | 287 | return oldbit; |
| 288 | } | 288 | } |
| 289 | 289 | ||
| 290 | /** | 290 | /** |
| 291 | * test_and_change_bit - Change a bit and return its old value | 291 | * test_and_change_bit - Change a bit and return its old value |
| 292 | * @nr: Bit to change | 292 | * @nr: Bit to change |
| 293 | * @addr: Address to count from | 293 | * @addr: Address to count from |
| 294 | * | 294 | * |
| 295 | * This operation is atomic and cannot be reordered. | 295 | * This operation is atomic and cannot be reordered. |
| 296 | * It also implies a memory barrier. | 296 | * It also implies a memory barrier. |
| 297 | */ | 297 | */ |
| 298 | static inline int test_and_change_bit(int nr, volatile unsigned long *addr) | 298 | static inline int test_and_change_bit(int nr, volatile unsigned long *addr) |
| 299 | { | 299 | { |
| 300 | int oldbit; | 300 | int oldbit; |
| 301 | 301 | ||
| 302 | asm volatile(LOCK_PREFIX "btc %2,%1\n\t" | 302 | asm volatile(LOCK_PREFIX "btc %2,%1\n\t" |
| 303 | "sbb %0,%0" | 303 | "sbb %0,%0" |
| 304 | : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); | 304 | : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); |
| 305 | 305 | ||
| 306 | return oldbit; | 306 | return oldbit; |
| 307 | } | 307 | } |
| 308 | 308 | ||
| 309 | static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) | 309 | static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) |
| 310 | { | 310 | { |
| 311 | return ((1UL << (nr % BITS_PER_LONG)) & | 311 | return ((1UL << (nr % BITS_PER_LONG)) & |
| 312 | (addr[nr / BITS_PER_LONG])) != 0; | 312 | (addr[nr / BITS_PER_LONG])) != 0; |
| 313 | } | 313 | } |
| 314 | 314 | ||
| 315 | static inline int variable_test_bit(int nr, volatile const unsigned long *addr) | 315 | static inline int variable_test_bit(int nr, volatile const unsigned long *addr) |
| 316 | { | 316 | { |
| 317 | int oldbit; | 317 | int oldbit; |
| 318 | 318 | ||
| 319 | asm volatile("bt %2,%1\n\t" | 319 | asm volatile("bt %2,%1\n\t" |
| 320 | "sbb %0,%0" | 320 | "sbb %0,%0" |
| 321 | : "=r" (oldbit) | 321 | : "=r" (oldbit) |
| 322 | : "m" (*(unsigned long *)addr), "Ir" (nr)); | 322 | : "m" (*(unsigned long *)addr), "Ir" (nr)); |
| 323 | 323 | ||
| 324 | return oldbit; | 324 | return oldbit; |
| 325 | } | 325 | } |
| 326 | 326 | ||
| 327 | #if 0 /* Fool kernel-doc since it doesn't do macros yet */ | 327 | #if 0 /* Fool kernel-doc since it doesn't do macros yet */ |
| 328 | /** | 328 | /** |
| 329 | * test_bit - Determine whether a bit is set | 329 | * test_bit - Determine whether a bit is set |
| 330 | * @nr: bit number to test | 330 | * @nr: bit number to test |
| 331 | * @addr: Address to start counting from | 331 | * @addr: Address to start counting from |
| 332 | */ | 332 | */ |
| 333 | static int test_bit(int nr, const volatile unsigned long *addr); | 333 | static int test_bit(int nr, const volatile unsigned long *addr); |
| 334 | #endif | 334 | #endif |
| 335 | 335 | ||
| 336 | #define test_bit(nr, addr) \ | 336 | #define test_bit(nr, addr) \ |
| 337 | (__builtin_constant_p((nr)) \ | 337 | (__builtin_constant_p((nr)) \ |
| 338 | ? constant_test_bit((nr), (addr)) \ | 338 | ? constant_test_bit((nr), (addr)) \ |
| 339 | : variable_test_bit((nr), (addr))) | 339 | : variable_test_bit((nr), (addr))) |
| 340 | 340 | ||
| 341 | /** | 341 | /** |
| 342 | * __ffs - find first set bit in word | 342 | * __ffs - find first set bit in word |
| 343 | * @word: The word to search | 343 | * @word: The word to search |
| 344 | * | 344 | * |
| 345 | * Undefined if no bit exists, so code should check against 0 first. | 345 | * Undefined if no bit exists, so code should check against 0 first. |
| 346 | */ | 346 | */ |
| 347 | static inline unsigned long __ffs(unsigned long word) | 347 | static inline unsigned long __ffs(unsigned long word) |
| 348 | { | 348 | { |
| 349 | asm("bsf %1,%0" | 349 | asm("bsf %1,%0" |
| 350 | : "=r" (word) | 350 | : "=r" (word) |
| 351 | : "rm" (word)); | 351 | : "rm" (word)); |
| 352 | return word; | 352 | return word; |
| 353 | } | 353 | } |
| 354 | 354 | ||
| 355 | /** | 355 | /** |
| 356 | * ffz - find first zero bit in word | 356 | * ffz - find first zero bit in word |
| 357 | * @word: The word to search | 357 | * @word: The word to search |
| 358 | * | 358 | * |
| 359 | * Undefined if no zero exists, so code should check against ~0UL first. | 359 | * Undefined if no zero exists, so code should check against ~0UL first. |
| 360 | */ | 360 | */ |
| 361 | static inline unsigned long ffz(unsigned long word) | 361 | static inline unsigned long ffz(unsigned long word) |
| 362 | { | 362 | { |
| 363 | asm("bsf %1,%0" | 363 | asm("bsf %1,%0" |
| 364 | : "=r" (word) | 364 | : "=r" (word) |
| 365 | : "r" (~word)); | 365 | : "r" (~word)); |
| 366 | return word; | 366 | return word; |
| 367 | } | 367 | } |
| 368 | 368 | ||
| 369 | /* | 369 | /* |
| 370 | * __fls: find last set bit in word | 370 | * __fls: find last set bit in word |
| 371 | * @word: The word to search | 371 | * @word: The word to search |
| 372 | * | 372 | * |
| 373 | * Undefined if no set bit exists, so code should check against 0 first. | 373 | * Undefined if no set bit exists, so code should check against 0 first. |
| 374 | */ | 374 | */ |
| 375 | static inline unsigned long __fls(unsigned long word) | 375 | static inline unsigned long __fls(unsigned long word) |
| 376 | { | 376 | { |
| 377 | asm("bsr %1,%0" | 377 | asm("bsr %1,%0" |
| 378 | : "=r" (word) | 378 | : "=r" (word) |
| 379 | : "rm" (word)); | 379 | : "rm" (word)); |
| 380 | return word; | 380 | return word; |
| 381 | } | 381 | } |
| 382 | 382 | ||
| 383 | #ifdef __KERNEL__ | 383 | #ifdef __KERNEL__ |
| 384 | /** | 384 | /** |
| 385 | * ffs - find first set bit in word | 385 | * ffs - find first set bit in word |
| 386 | * @x: the word to search | 386 | * @x: the word to search |
| 387 | * | 387 | * |
| 388 | * This is defined the same way as the libc and compiler builtin ffs | 388 | * This is defined the same way as the libc and compiler builtin ffs |
| 389 | * routines, therefore differs in spirit from the other bitops. | 389 | * routines, therefore differs in spirit from the other bitops. |
| 390 | * | 390 | * |
| 391 | * ffs(value) returns 0 if value is 0 or the position of the first | 391 | * ffs(value) returns 0 if value is 0 or the position of the first |
| 392 | * set bit if value is nonzero. The first (least significant) bit | 392 | * set bit if value is nonzero. The first (least significant) bit |
| 393 | * is at position 1. | 393 | * is at position 1. |
| 394 | */ | 394 | */ |
| 395 | static inline int ffs(int x) | 395 | static inline int ffs(int x) |
| 396 | { | 396 | { |
| 397 | int r; | 397 | int r; |
| 398 | #ifdef CONFIG_X86_CMOV | 398 | #ifdef CONFIG_X86_CMOV |
| 399 | asm("bsfl %1,%0\n\t" | 399 | asm("bsfl %1,%0\n\t" |
| 400 | "cmovzl %2,%0" | 400 | "cmovzl %2,%0" |
| 401 | : "=r" (r) : "rm" (x), "r" (-1)); | 401 | : "=r" (r) : "rm" (x), "r" (-1)); |
| 402 | #else | 402 | #else |
| 403 | asm("bsfl %1,%0\n\t" | 403 | asm("bsfl %1,%0\n\t" |
| 404 | "jnz 1f\n\t" | 404 | "jnz 1f\n\t" |
| 405 | "movl $-1,%0\n" | 405 | "movl $-1,%0\n" |
| 406 | "1:" : "=r" (r) : "rm" (x)); | 406 | "1:" : "=r" (r) : "rm" (x)); |
| 407 | #endif | 407 | #endif |
| 408 | return r + 1; | 408 | return r + 1; |
| 409 | } | 409 | } |
| 410 | 410 | ||
| 411 | /** | 411 | /** |
| 412 | * fls - find last set bit in word | 412 | * fls - find last set bit in word |
| 413 | * @x: the word to search | 413 | * @x: the word to search |
| 414 | * | 414 | * |
| 415 | * This is defined in a similar way as the libc and compiler builtin | 415 | * This is defined in a similar way as the libc and compiler builtin |
| 416 | * ffs, but returns the position of the most significant set bit. | 416 | * ffs, but returns the position of the most significant set bit. |
| 417 | * | 417 | * |
| 418 | * fls(value) returns 0 if value is 0 or the position of the last | 418 | * fls(value) returns 0 if value is 0 or the position of the last |
| 419 | * set bit if value is nonzero. The last (most significant) bit is | 419 | * set bit if value is nonzero. The last (most significant) bit is |
| 420 | * at position 32. | 420 | * at position 32. |
| 421 | */ | 421 | */ |
| 422 | static inline int fls(int x) | 422 | static inline int fls(int x) |
| 423 | { | 423 | { |
| 424 | int r; | 424 | int r; |
| 425 | #ifdef CONFIG_X86_CMOV | 425 | #ifdef CONFIG_X86_CMOV |
| 426 | asm("bsrl %1,%0\n\t" | 426 | asm("bsrl %1,%0\n\t" |
| 427 | "cmovzl %2,%0" | 427 | "cmovzl %2,%0" |
| 428 | : "=&r" (r) : "rm" (x), "rm" (-1)); | 428 | : "=&r" (r) : "rm" (x), "rm" (-1)); |
| 429 | #else | 429 | #else |
| 430 | asm("bsrl %1,%0\n\t" | 430 | asm("bsrl %1,%0\n\t" |
| 431 | "jnz 1f\n\t" | 431 | "jnz 1f\n\t" |
| 432 | "movl $-1,%0\n" | 432 | "movl $-1,%0\n" |
| 433 | "1:" : "=r" (r) : "rm" (x)); | 433 | "1:" : "=r" (r) : "rm" (x)); |
| 434 | #endif | 434 | #endif |
| 435 | return r + 1; | 435 | return r + 1; |
| 436 | } | 436 | } |
| 437 | #endif /* __KERNEL__ */ | 437 | #endif /* __KERNEL__ */ |
| 438 | 438 | ||
| 439 | #undef ADDR | 439 | #undef ADDR |
| 440 | 440 | ||
| 441 | #ifdef __KERNEL__ | 441 | #ifdef __KERNEL__ |
| 442 | 442 | ||
| 443 | #include <asm-generic/bitops/find.h> | 443 | #include <asm-generic/bitops/find.h> |
| 444 | 444 | ||
| 445 | #include <asm-generic/bitops/sched.h> | 445 | #include <asm-generic/bitops/sched.h> |
| 446 | 446 | ||
| 447 | #define ARCH_HAS_FAST_MULTIPLIER 1 | 447 | #define ARCH_HAS_FAST_MULTIPLIER 1 |
| 448 | 448 | ||
| 449 | #include <asm/arch_hweight.h> | 449 | #include <asm/arch_hweight.h> |
| 450 | 450 | ||
| 451 | #include <asm-generic/bitops/const_hweight.h> | 451 | #include <asm-generic/bitops/const_hweight.h> |
| 452 | 452 | ||
| 453 | #endif /* __KERNEL__ */ | 453 | #endif /* __KERNEL__ */ |
| 454 | 454 | ||
| 455 | #include <asm-generic/bitops/fls64.h> | 455 | #include <asm-generic/bitops/fls64.h> |
| 456 | 456 | ||
| 457 | #ifdef __KERNEL__ | 457 | #ifdef __KERNEL__ |
| 458 | 458 | ||
| 459 | #include <asm-generic/bitops/le.h> | 459 | #include <asm-generic/bitops/le.h> |
| 460 | 460 | ||
| 461 | #define ext2_set_bit_atomic(lock, nr, addr) \ | 461 | #define ext2_set_bit_atomic(lock, nr, addr) \ |
| 462 | test_and_set_bit((nr), (unsigned long *)(addr)) | 462 | test_and_set_bit((nr), (unsigned long *)(addr)) |
| 463 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | 463 | #define ext2_clear_bit_atomic(lock, nr, addr) \ |
| 464 | test_and_clear_bit((nr), (unsigned long *)(addr)) | 464 | test_and_clear_bit((nr), (unsigned long *)(addr)) |
| 465 | 465 | ||
| 466 | #include <asm-generic/bitops/minix.h> | ||
| 467 | |||
| 468 | #endif /* __KERNEL__ */ | 466 | #endif /* __KERNEL__ */ |
| 469 | #endif /* _ASM_X86_BITOPS_H */ | 467 | #endif /* _ASM_X86_BITOPS_H */ |
| 470 | 468 |
arch/xtensa/include/asm/bitops.h
| 1 | /* | 1 | /* |
| 2 | * include/asm-xtensa/bitops.h | 2 | * include/asm-xtensa/bitops.h |
| 3 | * | 3 | * |
| 4 | * Atomic operations that C can't guarantee us.Useful for resource counting etc. | 4 | * Atomic operations that C can't guarantee us.Useful for resource counting etc. |
| 5 | * | 5 | * |
| 6 | * This file is subject to the terms and conditions of the GNU General Public | 6 | * This file is subject to the terms and conditions of the GNU General Public |
| 7 | * License. See the file "COPYING" in the main directory of this archive | 7 | * License. See the file "COPYING" in the main directory of this archive |
| 8 | * for more details. | 8 | * for more details. |
| 9 | * | 9 | * |
| 10 | * Copyright (C) 2001 - 2007 Tensilica Inc. | 10 | * Copyright (C) 2001 - 2007 Tensilica Inc. |
| 11 | */ | 11 | */ |
| 12 | 12 | ||
| 13 | #ifndef _XTENSA_BITOPS_H | 13 | #ifndef _XTENSA_BITOPS_H |
| 14 | #define _XTENSA_BITOPS_H | 14 | #define _XTENSA_BITOPS_H |
| 15 | 15 | ||
| 16 | #ifdef __KERNEL__ | 16 | #ifdef __KERNEL__ |
| 17 | 17 | ||
| 18 | #ifndef _LINUX_BITOPS_H | 18 | #ifndef _LINUX_BITOPS_H |
| 19 | #error only <linux/bitops.h> can be included directly | 19 | #error only <linux/bitops.h> can be included directly |
| 20 | #endif | 20 | #endif |
| 21 | 21 | ||
| 22 | #include <asm/processor.h> | 22 | #include <asm/processor.h> |
| 23 | #include <asm/byteorder.h> | 23 | #include <asm/byteorder.h> |
| 24 | #include <asm/system.h> | 24 | #include <asm/system.h> |
| 25 | 25 | ||
| 26 | #ifdef CONFIG_SMP | 26 | #ifdef CONFIG_SMP |
| 27 | # error SMP not supported on this architecture | 27 | # error SMP not supported on this architecture |
| 28 | #endif | 28 | #endif |
| 29 | 29 | ||
| 30 | #define smp_mb__before_clear_bit() barrier() | 30 | #define smp_mb__before_clear_bit() barrier() |
| 31 | #define smp_mb__after_clear_bit() barrier() | 31 | #define smp_mb__after_clear_bit() barrier() |
| 32 | 32 | ||
| 33 | #include <asm-generic/bitops/atomic.h> | 33 | #include <asm-generic/bitops/atomic.h> |
| 34 | #include <asm-generic/bitops/non-atomic.h> | 34 | #include <asm-generic/bitops/non-atomic.h> |
| 35 | 35 | ||
| 36 | #if XCHAL_HAVE_NSA | 36 | #if XCHAL_HAVE_NSA |
| 37 | 37 | ||
| 38 | static inline unsigned long __cntlz (unsigned long x) | 38 | static inline unsigned long __cntlz (unsigned long x) |
| 39 | { | 39 | { |
| 40 | int lz; | 40 | int lz; |
| 41 | asm ("nsau %0, %1" : "=r" (lz) : "r" (x)); | 41 | asm ("nsau %0, %1" : "=r" (lz) : "r" (x)); |
| 42 | return lz; | 42 | return lz; |
| 43 | } | 43 | } |
| 44 | 44 | ||
| 45 | /* | 45 | /* |
| 46 | * ffz: Find first zero in word. Undefined if no zero exists. | 46 | * ffz: Find first zero in word. Undefined if no zero exists. |
| 47 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | 47 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). |
| 48 | */ | 48 | */ |
| 49 | 49 | ||
| 50 | static inline int ffz(unsigned long x) | 50 | static inline int ffz(unsigned long x) |
| 51 | { | 51 | { |
| 52 | return 31 - __cntlz(~x & -~x); | 52 | return 31 - __cntlz(~x & -~x); |
| 53 | } | 53 | } |
| 54 | 54 | ||
| 55 | /* | 55 | /* |
| 56 | * __ffs: Find first bit set in word. Return 0 for bit 0 | 56 | * __ffs: Find first bit set in word. Return 0 for bit 0 |
| 57 | */ | 57 | */ |
| 58 | 58 | ||
| 59 | static inline int __ffs(unsigned long x) | 59 | static inline int __ffs(unsigned long x) |
| 60 | { | 60 | { |
| 61 | return 31 - __cntlz(x & -x); | 61 | return 31 - __cntlz(x & -x); |
| 62 | } | 62 | } |
| 63 | 63 | ||
| 64 | /* | 64 | /* |
| 65 | * ffs: Find first bit set in word. This is defined the same way as | 65 | * ffs: Find first bit set in word. This is defined the same way as |
| 66 | * the libc and compiler builtin ffs routines, therefore | 66 | * the libc and compiler builtin ffs routines, therefore |
| 67 | * differs in spirit from the above ffz (man ffs). | 67 | * differs in spirit from the above ffz (man ffs). |
| 68 | */ | 68 | */ |
| 69 | 69 | ||
| 70 | static inline int ffs(unsigned long x) | 70 | static inline int ffs(unsigned long x) |
| 71 | { | 71 | { |
| 72 | return 32 - __cntlz(x & -x); | 72 | return 32 - __cntlz(x & -x); |
| 73 | } | 73 | } |
| 74 | 74 | ||
| 75 | /* | 75 | /* |
| 76 | * fls: Find last (most-significant) bit set in word. | 76 | * fls: Find last (most-significant) bit set in word. |
| 77 | * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. | 77 | * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. |
| 78 | */ | 78 | */ |
| 79 | 79 | ||
| 80 | static inline int fls (unsigned int x) | 80 | static inline int fls (unsigned int x) |
| 81 | { | 81 | { |
| 82 | return 32 - __cntlz(x); | 82 | return 32 - __cntlz(x); |
| 83 | } | 83 | } |
| 84 | 84 | ||
| 85 | /** | 85 | /** |
| 86 | * __fls - find last (most-significant) set bit in a long word | 86 | * __fls - find last (most-significant) set bit in a long word |
| 87 | * @word: the word to search | 87 | * @word: the word to search |
| 88 | * | 88 | * |
| 89 | * Undefined if no set bit exists, so code should check against 0 first. | 89 | * Undefined if no set bit exists, so code should check against 0 first. |
| 90 | */ | 90 | */ |
| 91 | static inline unsigned long __fls(unsigned long word) | 91 | static inline unsigned long __fls(unsigned long word) |
| 92 | { | 92 | { |
| 93 | return 31 - __cntlz(word); | 93 | return 31 - __cntlz(word); |
| 94 | } | 94 | } |
| 95 | #else | 95 | #else |
| 96 | 96 | ||
| 97 | /* Use the generic implementation if we don't have the nsa/nsau instructions. */ | 97 | /* Use the generic implementation if we don't have the nsa/nsau instructions. */ |
| 98 | 98 | ||
| 99 | # include <asm-generic/bitops/ffs.h> | 99 | # include <asm-generic/bitops/ffs.h> |
| 100 | # include <asm-generic/bitops/__ffs.h> | 100 | # include <asm-generic/bitops/__ffs.h> |
| 101 | # include <asm-generic/bitops/ffz.h> | 101 | # include <asm-generic/bitops/ffz.h> |
| 102 | # include <asm-generic/bitops/fls.h> | 102 | # include <asm-generic/bitops/fls.h> |
| 103 | # include <asm-generic/bitops/__fls.h> | 103 | # include <asm-generic/bitops/__fls.h> |
| 104 | 104 | ||
| 105 | #endif | 105 | #endif |
| 106 | 106 | ||
| 107 | #include <asm-generic/bitops/fls64.h> | 107 | #include <asm-generic/bitops/fls64.h> |
| 108 | #include <asm-generic/bitops/find.h> | 108 | #include <asm-generic/bitops/find.h> |
| 109 | #include <asm-generic/bitops/le.h> | 109 | #include <asm-generic/bitops/le.h> |
| 110 | 110 | ||
| 111 | #ifdef __XTENSA_EL__ | 111 | #ifdef __XTENSA_EL__ |
| 112 | # define ext2_set_bit_atomic(lock,nr,addr) \ | 112 | # define ext2_set_bit_atomic(lock,nr,addr) \ |
| 113 | test_and_set_bit((nr), (unsigned long*)(addr)) | 113 | test_and_set_bit((nr), (unsigned long*)(addr)) |
| 114 | # define ext2_clear_bit_atomic(lock,nr,addr) \ | 114 | # define ext2_clear_bit_atomic(lock,nr,addr) \ |
| 115 | test_and_clear_bit((nr), (unsigned long*)(addr)) | 115 | test_and_clear_bit((nr), (unsigned long*)(addr)) |
| 116 | #elif defined(__XTENSA_EB__) | 116 | #elif defined(__XTENSA_EB__) |
| 117 | # define ext2_set_bit_atomic(lock,nr,addr) \ | 117 | # define ext2_set_bit_atomic(lock,nr,addr) \ |
| 118 | test_and_set_bit((nr) ^ 0x18, (unsigned long*)(addr)) | 118 | test_and_set_bit((nr) ^ 0x18, (unsigned long*)(addr)) |
| 119 | # define ext2_clear_bit_atomic(lock,nr,addr) \ | 119 | # define ext2_clear_bit_atomic(lock,nr,addr) \ |
| 120 | test_and_clear_bit((nr) ^ 0x18, (unsigned long*)(addr)) | 120 | test_and_clear_bit((nr) ^ 0x18, (unsigned long*)(addr)) |
| 121 | #else | 121 | #else |
| 122 | # error processor byte order undefined! | 122 | # error processor byte order undefined! |
| 123 | #endif | 123 | #endif |
| 124 | 124 | ||
| 125 | #include <asm-generic/bitops/hweight.h> | 125 | #include <asm-generic/bitops/hweight.h> |
| 126 | #include <asm-generic/bitops/lock.h> | 126 | #include <asm-generic/bitops/lock.h> |
| 127 | #include <asm-generic/bitops/sched.h> | 127 | #include <asm-generic/bitops/sched.h> |
| 128 | #include <asm-generic/bitops/minix.h> | ||
| 129 | 128 | ||
| 130 | #endif /* __KERNEL__ */ | 129 | #endif /* __KERNEL__ */ |
| 131 | 130 | ||
| 132 | #endif /* _XTENSA_BITOPS_H */ | 131 | #endif /* _XTENSA_BITOPS_H */ |
| 133 | 132 |
fs/minix/Kconfig
| 1 | config MINIX_FS | 1 | config MINIX_FS |
| 2 | tristate "Minix file system support" | 2 | tristate "Minix file system support" |
| 3 | depends on BLOCK | 3 | depends on BLOCK |
| 4 | help | 4 | help |
| 5 | Minix is a simple operating system used in many classes about OS's. | 5 | Minix is a simple operating system used in many classes about OS's. |
| 6 | The minix file system (method to organize files on a hard disk | 6 | The minix file system (method to organize files on a hard disk |
| 7 | partition or a floppy disk) was the original file system for Linux, | 7 | partition or a floppy disk) was the original file system for Linux, |
| 8 | but has been superseded by the second extended file system ext2fs. | 8 | but has been superseded by the second extended file system ext2fs. |
| 9 | You don't want to use the minix file system on your hard disk | 9 | You don't want to use the minix file system on your hard disk |
| 10 | because of certain built-in restrictions, but it is sometimes found | 10 | because of certain built-in restrictions, but it is sometimes found |
| 11 | on older Linux floppy disks. This option will enlarge your kernel | 11 | on older Linux floppy disks. This option will enlarge your kernel |
| 12 | by about 28 KB. If unsure, say N. | 12 | by about 28 KB. If unsure, say N. |
| 13 | 13 | ||
| 14 | To compile this file system support as a module, choose M here: the | 14 | To compile this file system support as a module, choose M here: the |
| 15 | module will be called minix. Note that the file system of your root | 15 | module will be called minix. Note that the file system of your root |
| 16 | partition (the one containing the directory /) cannot be compiled as | 16 | partition (the one containing the directory /) cannot be compiled as |
| 17 | a module. | 17 | a module. |
| 18 | |||
| 19 | config MINIX_FS_NATIVE_ENDIAN | ||
| 20 | def_bool MINIX_FS | ||
| 21 | depends on H8300 || M32R || MICROBLAZE || MIPS || S390 || SUPERH || SPARC || XTENSA || (M68K && !MMU) | ||
| 22 | |||
| 23 | config MINIX_FS_BIG_ENDIAN_16BIT_INDEXED | ||
| 24 | def_bool MINIX_FS | ||
| 25 | depends on M68K && MMU | ||
| 18 | 26 |
fs/minix/minix.h
| 1 | #ifndef FS_MINIX_H | 1 | #ifndef FS_MINIX_H |
| 2 | #define FS_MINIX_H | 2 | #define FS_MINIX_H |
| 3 | 3 | ||
| 4 | #include <linux/fs.h> | 4 | #include <linux/fs.h> |
| 5 | #include <linux/pagemap.h> | 5 | #include <linux/pagemap.h> |
| 6 | #include <linux/minix_fs.h> | 6 | #include <linux/minix_fs.h> |
| 7 | 7 | ||
| 8 | #define INODE_VERSION(inode) minix_sb(inode->i_sb)->s_version | 8 | #define INODE_VERSION(inode) minix_sb(inode->i_sb)->s_version |
| 9 | #define MINIX_V1 0x0001 /* original minix fs */ | 9 | #define MINIX_V1 0x0001 /* original minix fs */ |
| 10 | #define MINIX_V2 0x0002 /* minix V2 fs */ | 10 | #define MINIX_V2 0x0002 /* minix V2 fs */ |
| 11 | #define MINIX_V3 0x0003 /* minix V3 fs */ | 11 | #define MINIX_V3 0x0003 /* minix V3 fs */ |
| 12 | 12 | ||
| 13 | /* | 13 | /* |
| 14 | * minix fs inode data in memory | 14 | * minix fs inode data in memory |
| 15 | */ | 15 | */ |
| 16 | struct minix_inode_info { | 16 | struct minix_inode_info { |
| 17 | union { | 17 | union { |
| 18 | __u16 i1_data[16]; | 18 | __u16 i1_data[16]; |
| 19 | __u32 i2_data[16]; | 19 | __u32 i2_data[16]; |
| 20 | } u; | 20 | } u; |
| 21 | struct inode vfs_inode; | 21 | struct inode vfs_inode; |
| 22 | }; | 22 | }; |
| 23 | 23 | ||
| 24 | /* | 24 | /* |
| 25 | * minix super-block data in memory | 25 | * minix super-block data in memory |
| 26 | */ | 26 | */ |
| 27 | struct minix_sb_info { | 27 | struct minix_sb_info { |
| 28 | unsigned long s_ninodes; | 28 | unsigned long s_ninodes; |
| 29 | unsigned long s_nzones; | 29 | unsigned long s_nzones; |
| 30 | unsigned long s_imap_blocks; | 30 | unsigned long s_imap_blocks; |
| 31 | unsigned long s_zmap_blocks; | 31 | unsigned long s_zmap_blocks; |
| 32 | unsigned long s_firstdatazone; | 32 | unsigned long s_firstdatazone; |
| 33 | unsigned long s_log_zone_size; | 33 | unsigned long s_log_zone_size; |
| 34 | unsigned long s_max_size; | 34 | unsigned long s_max_size; |
| 35 | int s_dirsize; | 35 | int s_dirsize; |
| 36 | int s_namelen; | 36 | int s_namelen; |
| 37 | int s_link_max; | 37 | int s_link_max; |
| 38 | struct buffer_head ** s_imap; | 38 | struct buffer_head ** s_imap; |
| 39 | struct buffer_head ** s_zmap; | 39 | struct buffer_head ** s_zmap; |
| 40 | struct buffer_head * s_sbh; | 40 | struct buffer_head * s_sbh; |
| 41 | struct minix_super_block * s_ms; | 41 | struct minix_super_block * s_ms; |
| 42 | unsigned short s_mount_state; | 42 | unsigned short s_mount_state; |
| 43 | unsigned short s_version; | 43 | unsigned short s_version; |
| 44 | }; | 44 | }; |
| 45 | 45 | ||
| 46 | extern struct inode *minix_iget(struct super_block *, unsigned long); | 46 | extern struct inode *minix_iget(struct super_block *, unsigned long); |
| 47 | extern struct minix_inode * minix_V1_raw_inode(struct super_block *, ino_t, struct buffer_head **); | 47 | extern struct minix_inode * minix_V1_raw_inode(struct super_block *, ino_t, struct buffer_head **); |
| 48 | extern struct minix2_inode * minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **); | 48 | extern struct minix2_inode * minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **); |
| 49 | extern struct inode * minix_new_inode(const struct inode *, int, int *); | 49 | extern struct inode * minix_new_inode(const struct inode *, int, int *); |
| 50 | extern void minix_free_inode(struct inode * inode); | 50 | extern void minix_free_inode(struct inode * inode); |
| 51 | extern unsigned long minix_count_free_inodes(struct minix_sb_info *sbi); | 51 | extern unsigned long minix_count_free_inodes(struct minix_sb_info *sbi); |
| 52 | extern int minix_new_block(struct inode * inode); | 52 | extern int minix_new_block(struct inode * inode); |
| 53 | extern void minix_free_block(struct inode *inode, unsigned long block); | 53 | extern void minix_free_block(struct inode *inode, unsigned long block); |
| 54 | extern unsigned long minix_count_free_blocks(struct minix_sb_info *sbi); | 54 | extern unsigned long minix_count_free_blocks(struct minix_sb_info *sbi); |
| 55 | extern int minix_getattr(struct vfsmount *, struct dentry *, struct kstat *); | 55 | extern int minix_getattr(struct vfsmount *, struct dentry *, struct kstat *); |
| 56 | extern int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len); | 56 | extern int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len); |
| 57 | 57 | ||
| 58 | extern void V1_minix_truncate(struct inode *); | 58 | extern void V1_minix_truncate(struct inode *); |
| 59 | extern void V2_minix_truncate(struct inode *); | 59 | extern void V2_minix_truncate(struct inode *); |
| 60 | extern void minix_truncate(struct inode *); | 60 | extern void minix_truncate(struct inode *); |
| 61 | extern void minix_set_inode(struct inode *, dev_t); | 61 | extern void minix_set_inode(struct inode *, dev_t); |
| 62 | extern int V1_minix_get_block(struct inode *, long, struct buffer_head *, int); | 62 | extern int V1_minix_get_block(struct inode *, long, struct buffer_head *, int); |
| 63 | extern int V2_minix_get_block(struct inode *, long, struct buffer_head *, int); | 63 | extern int V2_minix_get_block(struct inode *, long, struct buffer_head *, int); |
| 64 | extern unsigned V1_minix_blocks(loff_t, struct super_block *); | 64 | extern unsigned V1_minix_blocks(loff_t, struct super_block *); |
| 65 | extern unsigned V2_minix_blocks(loff_t, struct super_block *); | 65 | extern unsigned V2_minix_blocks(loff_t, struct super_block *); |
| 66 | 66 | ||
| 67 | extern struct minix_dir_entry *minix_find_entry(struct dentry*, struct page**); | 67 | extern struct minix_dir_entry *minix_find_entry(struct dentry*, struct page**); |
| 68 | extern int minix_add_link(struct dentry*, struct inode*); | 68 | extern int minix_add_link(struct dentry*, struct inode*); |
| 69 | extern int minix_delete_entry(struct minix_dir_entry*, struct page*); | 69 | extern int minix_delete_entry(struct minix_dir_entry*, struct page*); |
| 70 | extern int minix_make_empty(struct inode*, struct inode*); | 70 | extern int minix_make_empty(struct inode*, struct inode*); |
| 71 | extern int minix_empty_dir(struct inode*); | 71 | extern int minix_empty_dir(struct inode*); |
| 72 | extern void minix_set_link(struct minix_dir_entry*, struct page*, struct inode*); | 72 | extern void minix_set_link(struct minix_dir_entry*, struct page*, struct inode*); |
| 73 | extern struct minix_dir_entry *minix_dotdot(struct inode*, struct page**); | 73 | extern struct minix_dir_entry *minix_dotdot(struct inode*, struct page**); |
| 74 | extern ino_t minix_inode_by_name(struct dentry*); | 74 | extern ino_t minix_inode_by_name(struct dentry*); |
| 75 | 75 | ||
| 76 | extern const struct inode_operations minix_file_inode_operations; | 76 | extern const struct inode_operations minix_file_inode_operations; |
| 77 | extern const struct inode_operations minix_dir_inode_operations; | 77 | extern const struct inode_operations minix_dir_inode_operations; |
| 78 | extern const struct file_operations minix_file_operations; | 78 | extern const struct file_operations minix_file_operations; |
| 79 | extern const struct file_operations minix_dir_operations; | 79 | extern const struct file_operations minix_dir_operations; |
| 80 | 80 | ||
| 81 | static inline struct minix_sb_info *minix_sb(struct super_block *sb) | 81 | static inline struct minix_sb_info *minix_sb(struct super_block *sb) |
| 82 | { | 82 | { |
| 83 | return sb->s_fs_info; | 83 | return sb->s_fs_info; |
| 84 | } | 84 | } |
| 85 | 85 | ||
| 86 | static inline struct minix_inode_info *minix_i(struct inode *inode) | 86 | static inline struct minix_inode_info *minix_i(struct inode *inode) |
| 87 | { | 87 | { |
| 88 | return list_entry(inode, struct minix_inode_info, vfs_inode); | 88 | return list_entry(inode, struct minix_inode_info, vfs_inode); |
| 89 | } | 89 | } |
| 90 | 90 | ||
| 91 | #if defined(CONFIG_MINIX_FS_NATIVE_ENDIAN) && \ | ||
| 92 | defined(CONFIG_MINIX_FS_BIG_ENDIAN_16BIT_INDEXED) | ||
| 93 | |||
| 94 | #error Minix file system byte order broken | ||
| 95 | |||
| 96 | #elif defined(CONFIG_MINIX_FS_NATIVE_ENDIAN) | ||
| 97 | |||
| 98 | /* | ||
| 99 | * big-endian 32 or 64 bit indexed bitmaps on big-endian system or | ||
| 100 | * little-endian bitmaps on little-endian system | ||
| 101 | */ | ||
| 102 | |||
| 103 | #define minix_test_and_set_bit(nr, addr) \ | ||
| 104 | __test_and_set_bit((nr), (unsigned long *)(addr)) | ||
| 105 | #define minix_set_bit(nr, addr) \ | ||
| 106 | __set_bit((nr), (unsigned long *)(addr)) | ||
| 107 | #define minix_test_and_clear_bit(nr, addr) \ | ||
| 108 | __test_and_clear_bit((nr), (unsigned long *)(addr)) | ||
| 109 | #define minix_test_bit(nr, addr) \ | ||
| 110 | test_bit((nr), (unsigned long *)(addr)) | ||
| 111 | #define minix_find_first_zero_bit(addr, size) \ | ||
| 112 | find_first_zero_bit((unsigned long *)(addr), (size)) | ||
| 113 | |||
| 114 | #elif defined(CONFIG_MINIX_FS_BIG_ENDIAN_16BIT_INDEXED) | ||
| 115 | |||
| 116 | /* | ||
| 117 | * big-endian 16bit indexed bitmaps | ||
| 118 | */ | ||
| 119 | |||
| 120 | static inline int minix_find_first_zero_bit(const void *vaddr, unsigned size) | ||
| 121 | { | ||
| 122 | const unsigned short *p = vaddr, *addr = vaddr; | ||
| 123 | unsigned short num; | ||
| 124 | |||
| 125 | if (!size) | ||
| 126 | return 0; | ||
| 127 | |||
| 128 | size = (size >> 4) + ((size & 15) > 0); | ||
| 129 | while (*p++ == 0xffff) { | ||
| 130 | if (--size == 0) | ||
| 131 | return (p - addr) << 4; | ||
| 132 | } | ||
| 133 | |||
| 134 | num = *--p; | ||
| 135 | return ((p - addr) << 4) + ffz(num); | ||
| 136 | } | ||
| 137 | |||
| 138 | #define minix_test_and_set_bit(nr, addr) \ | ||
| 139 | __test_and_set_bit((nr) ^ 16, (unsigned long *)(addr)) | ||
| 140 | #define minix_set_bit(nr, addr) \ | ||
| 141 | __set_bit((nr) ^ 16, (unsigned long *)(addr)) | ||
| 142 | #define minix_test_and_clear_bit(nr, addr) \ | ||
| 143 | __test_and_clear_bit((nr) ^ 16, (unsigned long *)(addr)) | ||
| 144 | |||
| 145 | static inline int minix_test_bit(int nr, const void *vaddr) | ||
| 146 | { | ||
| 147 | const unsigned short *p = vaddr; | ||
| 148 | return (p[nr >> 4] & (1U << (nr & 15))) != 0; | ||
| 149 | } | ||
| 150 | |||
| 151 | #else | ||
| 152 | |||
| 153 | /* | ||
| 154 | * little-endian bitmaps | ||
| 155 | */ | ||
| 156 | |||
| 157 | #define minix_test_and_set_bit __test_and_set_bit_le | ||
| 158 | #define minix_set_bit __set_bit_le | ||
| 159 | #define minix_test_and_clear_bit __test_and_clear_bit_le | ||
| 160 | #define minix_test_bit test_bit_le | ||
| 161 | #define minix_find_first_zero_bit find_first_zero_bit_le | ||
| 162 | |||
| 163 | #endif | ||
| 164 | |||
| 91 | #endif /* FS_MINIX_H */ | 165 | #endif /* FS_MINIX_H */ |
| 92 | 166 |
include/asm-generic/bitops.h
| 1 | #ifndef __ASM_GENERIC_BITOPS_H | 1 | #ifndef __ASM_GENERIC_BITOPS_H |
| 2 | #define __ASM_GENERIC_BITOPS_H | 2 | #define __ASM_GENERIC_BITOPS_H |
| 3 | 3 | ||
| 4 | /* | 4 | /* |
| 5 | * For the benefit of those who are trying to port Linux to another | 5 | * For the benefit of those who are trying to port Linux to another |
| 6 | * architecture, here are some C-language equivalents. You should | 6 | * architecture, here are some C-language equivalents. You should |
| 7 | * recode these in the native assembly language, if at all possible. | 7 | * recode these in the native assembly language, if at all possible. |
| 8 | * | 8 | * |
| 9 | * C language equivalents written by Theodore Ts'o, 9/26/92 | 9 | * C language equivalents written by Theodore Ts'o, 9/26/92 |
| 10 | */ | 10 | */ |
| 11 | 11 | ||
| 12 | #include <linux/irqflags.h> | 12 | #include <linux/irqflags.h> |
| 13 | #include <linux/compiler.h> | 13 | #include <linux/compiler.h> |
| 14 | 14 | ||
| 15 | /* | 15 | /* |
| 16 | * clear_bit may not imply a memory barrier | 16 | * clear_bit may not imply a memory barrier |
| 17 | */ | 17 | */ |
| 18 | #ifndef smp_mb__before_clear_bit | 18 | #ifndef smp_mb__before_clear_bit |
| 19 | #define smp_mb__before_clear_bit() smp_mb() | 19 | #define smp_mb__before_clear_bit() smp_mb() |
| 20 | #define smp_mb__after_clear_bit() smp_mb() | 20 | #define smp_mb__after_clear_bit() smp_mb() |
| 21 | #endif | 21 | #endif |
| 22 | 22 | ||
| 23 | #include <asm-generic/bitops/__ffs.h> | 23 | #include <asm-generic/bitops/__ffs.h> |
| 24 | #include <asm-generic/bitops/ffz.h> | 24 | #include <asm-generic/bitops/ffz.h> |
| 25 | #include <asm-generic/bitops/fls.h> | 25 | #include <asm-generic/bitops/fls.h> |
| 26 | #include <asm-generic/bitops/__fls.h> | 26 | #include <asm-generic/bitops/__fls.h> |
| 27 | #include <asm-generic/bitops/fls64.h> | 27 | #include <asm-generic/bitops/fls64.h> |
| 28 | #include <asm-generic/bitops/find.h> | 28 | #include <asm-generic/bitops/find.h> |
| 29 | 29 | ||
| 30 | #ifndef _LINUX_BITOPS_H | 30 | #ifndef _LINUX_BITOPS_H |
| 31 | #error only <linux/bitops.h> can be included directly | 31 | #error only <linux/bitops.h> can be included directly |
| 32 | #endif | 32 | #endif |
| 33 | 33 | ||
| 34 | #include <asm-generic/bitops/sched.h> | 34 | #include <asm-generic/bitops/sched.h> |
| 35 | #include <asm-generic/bitops/ffs.h> | 35 | #include <asm-generic/bitops/ffs.h> |
| 36 | #include <asm-generic/bitops/hweight.h> | 36 | #include <asm-generic/bitops/hweight.h> |
| 37 | #include <asm-generic/bitops/lock.h> | 37 | #include <asm-generic/bitops/lock.h> |
| 38 | 38 | ||
| 39 | #include <asm-generic/bitops/atomic.h> | 39 | #include <asm-generic/bitops/atomic.h> |
| 40 | #include <asm-generic/bitops/non-atomic.h> | 40 | #include <asm-generic/bitops/non-atomic.h> |
| 41 | #include <asm-generic/bitops/le.h> | 41 | #include <asm-generic/bitops/le.h> |
| 42 | #include <asm-generic/bitops/ext2-atomic.h> | 42 | #include <asm-generic/bitops/ext2-atomic.h> |
| 43 | #include <asm-generic/bitops/minix.h> | ||
| 44 | 43 | ||
| 45 | #endif /* __ASM_GENERIC_BITOPS_H */ | 44 | #endif /* __ASM_GENERIC_BITOPS_H */ |
| 46 | 45 |
include/asm-generic/bitops/minix-le.h
| 1 | #ifndef _ASM_GENERIC_BITOPS_MINIX_LE_H_ | File was deleted | |
| 2 | #define _ASM_GENERIC_BITOPS_MINIX_LE_H_ | ||
| 3 | |||
| 4 | #define minix_test_and_set_bit(nr,addr) \ | ||
| 5 | __test_and_set_bit_le((nr), (unsigned long *)(addr)) | ||
| 6 | #define minix_set_bit(nr,addr) \ | ||
| 7 | __set_bit_le((nr), (unsigned long *)(addr)) | ||
| 8 | #define minix_test_and_clear_bit(nr,addr) \ | ||
| 9 | __test_and_clear_bit_le((nr), (unsigned long *)(addr)) | ||
| 10 | #define minix_test_bit(nr,addr) \ | ||
| 11 | test_bit_le((nr), (unsigned long *)(addr)) | ||
| 12 | #define minix_find_first_zero_bit(addr,size) \ | ||
| 13 | find_first_zero_bit_le((unsigned long *)(addr), (size)) | ||
| 14 | |||
| 15 | #endif /* _ASM_GENERIC_BITOPS_MINIX_LE_H_ */ | ||
| 16 | 1 | #ifndef _ASM_GENERIC_BITOPS_MINIX_LE_H_ |
include/asm-generic/bitops/minix.h
| 1 | #ifndef _ASM_GENERIC_BITOPS_MINIX_H_ | File was deleted | |
| 2 | #define _ASM_GENERIC_BITOPS_MINIX_H_ | ||
| 3 | |||
| 4 | #define minix_test_and_set_bit(nr,addr) \ | ||
| 5 | __test_and_set_bit((nr),(unsigned long *)(addr)) | ||
| 6 | #define minix_set_bit(nr,addr) \ | ||
| 7 | __set_bit((nr),(unsigned long *)(addr)) | ||
| 8 | #define minix_test_and_clear_bit(nr,addr) \ | ||
| 9 | __test_and_clear_bit((nr),(unsigned long *)(addr)) | ||
| 10 | #define minix_test_bit(nr,addr) \ | ||
| 11 | test_bit((nr),(unsigned long *)(addr)) | ||
| 12 | #define minix_find_first_zero_bit(addr,size) \ | ||
| 13 | find_first_zero_bit((unsigned long *)(addr),(size)) | ||
| 14 | |||
| 15 | #endif /* _ASM_GENERIC_BITOPS_MINIX_H_ */ | ||
| 16 | 1 | #ifndef _ASM_GENERIC_BITOPS_MINIX_H_ |