Commit 171d809df1896c1022f9778cd2788be6c255a7dc

Authored by Greg Ungerer
1 parent f941f5caa0

m68k: merge mmu and non-mmu bitops.h

The following patch merges the mmu and non-mmu versions of the m68k
bitops.h files. Now there is a good deal of difference between the two
files, but none of it is actually an mmu specific difference. It is
all about the specific m68k/coldfire varient we are targeting. So it
makes an awful lot of sense to merge these into a single bitops.h.

There is a number of ways I can see to factor this code. The approach
I have taken here is to keep the various versions of each macro/function
type together. This means that there is some ifdefery with each to handle
each CPU type.

I have added some comments in a couple of appropriate places to try
and make it clear what the differences we are dealing with are.
Specifically the instruction and addressing mode differences we have
to deal with.

The merged form keeps the same underlying optimizations for each CPU
type for all the general bit clear/set/change and find bit operations.
It does switch to using the generic le operations though, instead of
any local varients.

Build tested on ColdFire, 68328, 68360 (which is cpu32) and 68020+.
Run tested on ColdFire and ARAnyM.

Signed-off-by: Greg Ungerer <gerg@uclinux.org>
Acked-by: Geert Uytterhoeven <geert@linux-m68k.org>

Showing 5 changed files with 535 additions and 837 deletions Side-by-side Diff

... ... @@ -41,6 +41,10 @@
41 41 config ZONE_DMA
42 42 bool
43 43 default y
  44 +
  45 +config CPU_HAS_NO_BITFIELDS
  46 + bool
  47 +
44 48 config HZ
45 49 int
46 50 default 1000 if CLEOPATRA
arch/m68k/Kconfig.nommu
... ... @@ -16,6 +16,7 @@
16 16  
17 17 config M68000
18 18 bool
  19 + select CPU_HAS_NO_BITFIELDS
19 20 help
20 21 The Freescale (was Motorola) 68000 CPU is the first generation of
21 22 the well known M68K family of processors. The CPU core as well as
... ... @@ -25,6 +26,7 @@
25 26  
26 27 config MCPU32
27 28 bool
  29 + select CPU_HAS_NO_BITFIELDS
28 30 help
29 31 The Freescale (was then Motorola) CPU32 is a CPU core that is
30 32 based on the 68020 processor. For the most part it is used in
... ... @@ -34,6 +36,7 @@
34 36 bool
35 37 select GENERIC_GPIO
36 38 select ARCH_REQUIRE_GPIOLIB
  39 + select CPU_HAS_NO_BITFIELDS
37 40 help
38 41 The Freescale ColdFire family of processors is a modern derivitive
39 42 of the 68000 processor family. They are mainly targeted at embedded
arch/m68k/include/asm/bitops.h
1   -#ifdef __uClinux__
2   -#include "bitops_no.h"
  1 +#ifndef _M68K_BITOPS_H
  2 +#define _M68K_BITOPS_H
  3 +/*
  4 + * Copyright 1992, Linus Torvalds.
  5 + *
  6 + * This file is subject to the terms and conditions of the GNU General Public
  7 + * License. See the file COPYING in the main directory of this archive
  8 + * for more details.
  9 + */
  10 +
  11 +#ifndef _LINUX_BITOPS_H
  12 +#error only <linux/bitops.h> can be included directly
  13 +#endif
  14 +
  15 +#include <linux/compiler.h>
  16 +
  17 +/*
  18 + * Bit access functions vary across the ColdFire and 68k families.
  19 + * So we will break them out here, and then macro in the ones we want.
  20 + *
  21 + * ColdFire - supports standard bset/bclr/bchg with register operand only
  22 + * 68000 - supports standard bset/bclr/bchg with memory operand
  23 + * >= 68020 - also supports the bfset/bfclr/bfchg instructions
  24 + *
  25 + * Although it is possible to use only the bset/bclr/bchg with register
  26 + * operands on all platforms you end up with larger generated code.
  27 + * So we use the best form possible on a given platform.
  28 + */
  29 +
  30 +static inline void bset_reg_set_bit(int nr, volatile unsigned long *vaddr)
  31 +{
  32 + char *p = (char *)vaddr + (nr ^ 31) / 8;
  33 +
  34 + __asm__ __volatile__ ("bset %1,(%0)"
  35 + :
  36 + : "a" (p), "di" (nr & 7)
  37 + : "memory");
  38 +}
  39 +
  40 +static inline void bset_mem_set_bit(int nr, volatile unsigned long *vaddr)
  41 +{
  42 + char *p = (char *)vaddr + (nr ^ 31) / 8;
  43 +
  44 + __asm__ __volatile__ ("bset %1,%0"
  45 + : "+m" (*p)
  46 + : "di" (nr & 7));
  47 +}
  48 +
  49 +static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr)
  50 +{
  51 + __asm__ __volatile__ ("bfset %1{%0:#1}"
  52 + :
  53 + : "d" (nr ^ 31), "o" (*vaddr)
  54 + : "memory");
  55 +}
  56 +
  57 +#if defined(CONFIG_COLDFIRE)
  58 +#define set_bit(nr, vaddr) bset_reg_set_bit(nr, vaddr)
  59 +#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  60 +#define set_bit(nr, vaddr) bset_mem_set_bit(nr, vaddr)
3 61 #else
4   -#include "bitops_mm.h"
  62 +#define set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
  63 + bset_mem_set_bit(nr, vaddr) : \
  64 + bfset_mem_set_bit(nr, vaddr))
5 65 #endif
  66 +
  67 +#define __set_bit(nr, vaddr) set_bit(nr, vaddr)
  68 +
  69 +
  70 +/*
  71 + * clear_bit() doesn't provide any barrier for the compiler.
  72 + */
  73 +#define smp_mb__before_clear_bit() barrier()
  74 +#define smp_mb__after_clear_bit() barrier()
  75 +
  76 +static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr)
  77 +{
  78 + char *p = (char *)vaddr + (nr ^ 31) / 8;
  79 +
  80 + __asm__ __volatile__ ("bclr %1,(%0)"
  81 + :
  82 + : "a" (p), "di" (nr & 7)
  83 + : "memory");
  84 +}
  85 +
  86 +static inline void bclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
  87 +{
  88 + char *p = (char *)vaddr + (nr ^ 31) / 8;
  89 +
  90 + __asm__ __volatile__ ("bclr %1,%0"
  91 + : "+m" (*p)
  92 + : "di" (nr & 7));
  93 +}
  94 +
  95 +static inline void bfclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
  96 +{
  97 + __asm__ __volatile__ ("bfclr %1{%0:#1}"
  98 + :
  99 + : "d" (nr ^ 31), "o" (*vaddr)
  100 + : "memory");
  101 +}
  102 +
  103 +#if defined(CONFIG_COLDFIRE)
  104 +#define clear_bit(nr, vaddr) bclr_reg_clear_bit(nr, vaddr)
  105 +#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  106 +#define clear_bit(nr, vaddr) bclr_mem_clear_bit(nr, vaddr)
  107 +#else
  108 +#define clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
  109 + bclr_mem_clear_bit(nr, vaddr) : \
  110 + bfclr_mem_clear_bit(nr, vaddr))
  111 +#endif
  112 +
  113 +#define __clear_bit(nr, vaddr) clear_bit(nr, vaddr)
  114 +
  115 +
  116 +static inline void bchg_reg_change_bit(int nr, volatile unsigned long *vaddr)
  117 +{
  118 + char *p = (char *)vaddr + (nr ^ 31) / 8;
  119 +
  120 + __asm__ __volatile__ ("bchg %1,(%0)"
  121 + :
  122 + : "a" (p), "di" (nr & 7)
  123 + : "memory");
  124 +}
  125 +
  126 +static inline void bchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
  127 +{
  128 + char *p = (char *)vaddr + (nr ^ 31) / 8;
  129 +
  130 + __asm__ __volatile__ ("bchg %1,%0"
  131 + : "+m" (*p)
  132 + : "di" (nr & 7));
  133 +}
  134 +
  135 +static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
  136 +{
  137 + __asm__ __volatile__ ("bfchg %1{%0:#1}"
  138 + :
  139 + : "d" (nr ^ 31), "o" (*vaddr)
  140 + : "memory");
  141 +}
  142 +
  143 +#if defined(CONFIG_COLDFIRE)
  144 +#define change_bit(nr, vaddr) bchg_reg_change_bit(nr, vaddr)
  145 +#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  146 +#define change_bit(nr, vaddr) bchg_mem_change_bit(nr, vaddr)
  147 +#else
  148 +#define change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
  149 + bchg_mem_change_bit(nr, vaddr) : \
  150 + bfchg_mem_change_bit(nr, vaddr))
  151 +#endif
  152 +
  153 +#define __change_bit(nr, vaddr) change_bit(nr, vaddr)
  154 +
  155 +
  156 +static inline int test_bit(int nr, const unsigned long *vaddr)
  157 +{
  158 + return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
  159 +}
  160 +
  161 +
  162 +static inline int bset_reg_test_and_set_bit(int nr,
  163 + volatile unsigned long *vaddr)
  164 +{
  165 + char *p = (char *)vaddr + (nr ^ 31) / 8;
  166 + char retval;
  167 +
  168 + __asm__ __volatile__ ("bset %2,(%1); sne %0"
  169 + : "=d" (retval)
  170 + : "a" (p), "di" (nr & 7)
  171 + : "memory");
  172 + return retval;
  173 +}
  174 +
  175 +static inline int bset_mem_test_and_set_bit(int nr,
  176 + volatile unsigned long *vaddr)
  177 +{
  178 + char *p = (char *)vaddr + (nr ^ 31) / 8;
  179 + char retval;
  180 +
  181 + __asm__ __volatile__ ("bset %2,%1; sne %0"
  182 + : "=d" (retval), "+m" (*p)
  183 + : "di" (nr & 7));
  184 + return retval;
  185 +}
  186 +
  187 +static inline int bfset_mem_test_and_set_bit(int nr,
  188 + volatile unsigned long *vaddr)
  189 +{
  190 + char retval;
  191 +
  192 + __asm__ __volatile__ ("bfset %2{%1:#1}; sne %0"
  193 + : "=d" (retval)
  194 + : "d" (nr ^ 31), "o" (*vaddr)
  195 + : "memory");
  196 + return retval;
  197 +}
  198 +
  199 +#if defined(CONFIG_COLDFIRE)
  200 +#define test_and_set_bit(nr, vaddr) bset_reg_test_and_set_bit(nr, vaddr)
  201 +#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  202 +#define test_and_set_bit(nr, vaddr) bset_mem_test_and_set_bit(nr, vaddr)
  203 +#else
  204 +#define test_and_set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
  205 + bset_mem_test_and_set_bit(nr, vaddr) : \
  206 + bfset_mem_test_and_set_bit(nr, vaddr))
  207 +#endif
  208 +
  209 +#define __test_and_set_bit(nr, vaddr) test_and_set_bit(nr, vaddr)
  210 +
  211 +
  212 +static inline int bclr_reg_test_and_clear_bit(int nr,
  213 + volatile unsigned long *vaddr)
  214 +{
  215 + char *p = (char *)vaddr + (nr ^ 31) / 8;
  216 + char retval;
  217 +
  218 + __asm__ __volatile__ ("bclr %2,(%1); sne %0"
  219 + : "=d" (retval)
  220 + : "a" (p), "di" (nr & 7)
  221 + : "memory");
  222 + return retval;
  223 +}
  224 +
  225 +static inline int bclr_mem_test_and_clear_bit(int nr,
  226 + volatile unsigned long *vaddr)
  227 +{
  228 + char *p = (char *)vaddr + (nr ^ 31) / 8;
  229 + char retval;
  230 +
  231 + __asm__ __volatile__ ("bclr %2,%1; sne %0"
  232 + : "=d" (retval), "+m" (*p)
  233 + : "di" (nr & 7));
  234 + return retval;
  235 +}
  236 +
  237 +static inline int bfclr_mem_test_and_clear_bit(int nr,
  238 + volatile unsigned long *vaddr)
  239 +{
  240 + char retval;
  241 +
  242 + __asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0"
  243 + : "=d" (retval)
  244 + : "d" (nr ^ 31), "o" (*vaddr)
  245 + : "memory");
  246 + return retval;
  247 +}
  248 +
  249 +#if defined(CONFIG_COLDFIRE)
  250 +#define test_and_clear_bit(nr, vaddr) bclr_reg_test_and_clear_bit(nr, vaddr)
  251 +#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  252 +#define test_and_clear_bit(nr, vaddr) bclr_mem_test_and_clear_bit(nr, vaddr)
  253 +#else
  254 +#define test_and_clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
  255 + bclr_mem_test_and_clear_bit(nr, vaddr) : \
  256 + bfclr_mem_test_and_clear_bit(nr, vaddr))
  257 +#endif
  258 +
  259 +#define __test_and_clear_bit(nr, vaddr) test_and_clear_bit(nr, vaddr)
  260 +
  261 +
  262 +static inline int bchg_reg_test_and_change_bit(int nr,
  263 + volatile unsigned long *vaddr)
  264 +{
  265 + char *p = (char *)vaddr + (nr ^ 31) / 8;
  266 + char retval;
  267 +
  268 + __asm__ __volatile__ ("bchg %2,(%1); sne %0"
  269 + : "=d" (retval)
  270 + : "a" (p), "di" (nr & 7)
  271 + : "memory");
  272 + return retval;
  273 +}
  274 +
  275 +static inline int bchg_mem_test_and_change_bit(int nr,
  276 + volatile unsigned long *vaddr)
  277 +{
  278 + char *p = (char *)vaddr + (nr ^ 31) / 8;
  279 + char retval;
  280 +
  281 + __asm__ __volatile__ ("bchg %2,%1; sne %0"
  282 + : "=d" (retval), "+m" (*p)
  283 + : "di" (nr & 7));
  284 + return retval;
  285 +}
  286 +
  287 +static inline int bfchg_mem_test_and_change_bit(int nr,
  288 + volatile unsigned long *vaddr)
  289 +{
  290 + char retval;
  291 +
  292 + __asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0"
  293 + : "=d" (retval)
  294 + : "d" (nr ^ 31), "o" (*vaddr)
  295 + : "memory");
  296 + return retval;
  297 +}
  298 +
  299 +#if defined(CONFIG_COLDFIRE)
  300 +#define test_and_change_bit(nr, vaddr) bchg_reg_test_and_change_bit(nr, vaddr)
  301 +#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  302 +#define test_and_change_bit(nr, vaddr) bchg_mem_test_and_change_bit(nr, vaddr)
  303 +#else
  304 +#define test_and_change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
  305 + bchg_mem_test_and_change_bit(nr, vaddr) : \
  306 + bfchg_mem_test_and_change_bit(nr, vaddr))
  307 +#endif
  308 +
  309 +#define __test_and_change_bit(nr, vaddr) test_and_change_bit(nr, vaddr)
  310 +
  311 +
  312 +/*
  313 + * The true 68020 and more advanced processors support the "bfffo"
  314 + * instruction for finding bits. ColdFire and simple 68000 parts
  315 + * (including CPU32) do not support this. They simply use the generic
  316 + * functions.
  317 + */
  318 +#if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  319 +#include <asm-generic/bitops/find.h>
  320 +#include <asm-generic/bitops/ffz.h>
  321 +#else
  322 +
  323 +static inline int find_first_zero_bit(const unsigned long *vaddr,
  324 + unsigned size)
  325 +{
  326 + const unsigned long *p = vaddr;
  327 + int res = 32;
  328 + unsigned int words;
  329 + unsigned long num;
  330 +
  331 + if (!size)
  332 + return 0;
  333 +
  334 + words = (size + 31) >> 5;
  335 + while (!(num = ~*p++)) {
  336 + if (!--words)
  337 + goto out;
  338 + }
  339 +
  340 + __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  341 + : "=d" (res) : "d" (num & -num));
  342 + res ^= 31;
  343 +out:
  344 + res += ((long)p - (long)vaddr - 4) * 8;
  345 + return res < size ? res : size;
  346 +}
  347 +#define find_first_zero_bit find_first_zero_bit
  348 +
  349 +static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
  350 + int offset)
  351 +{
  352 + const unsigned long *p = vaddr + (offset >> 5);
  353 + int bit = offset & 31UL, res;
  354 +
  355 + if (offset >= size)
  356 + return size;
  357 +
  358 + if (bit) {
  359 + unsigned long num = ~*p++ & (~0UL << bit);
  360 + offset -= bit;
  361 +
  362 + /* Look for zero in first longword */
  363 + __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  364 + : "=d" (res) : "d" (num & -num));
  365 + if (res < 32) {
  366 + offset += res ^ 31;
  367 + return offset < size ? offset : size;
  368 + }
  369 + offset += 32;
  370 +
  371 + if (offset >= size)
  372 + return size;
  373 + }
  374 + /* No zero yet, search remaining full bytes for a zero */
  375 + return offset + find_first_zero_bit(p, size - offset);
  376 +}
  377 +#define find_next_zero_bit find_next_zero_bit
  378 +
  379 +static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
  380 +{
  381 + const unsigned long *p = vaddr;
  382 + int res = 32;
  383 + unsigned int words;
  384 + unsigned long num;
  385 +
  386 + if (!size)
  387 + return 0;
  388 +
  389 + words = (size + 31) >> 5;
  390 + while (!(num = *p++)) {
  391 + if (!--words)
  392 + goto out;
  393 + }
  394 +
  395 + __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  396 + : "=d" (res) : "d" (num & -num));
  397 + res ^= 31;
  398 +out:
  399 + res += ((long)p - (long)vaddr - 4) * 8;
  400 + return res < size ? res : size;
  401 +}
  402 +#define find_first_bit find_first_bit
  403 +
  404 +static inline int find_next_bit(const unsigned long *vaddr, int size,
  405 + int offset)
  406 +{
  407 + const unsigned long *p = vaddr + (offset >> 5);
  408 + int bit = offset & 31UL, res;
  409 +
  410 + if (offset >= size)
  411 + return size;
  412 +
  413 + if (bit) {
  414 + unsigned long num = *p++ & (~0UL << bit);
  415 + offset -= bit;
  416 +
  417 + /* Look for one in first longword */
  418 + __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  419 + : "=d" (res) : "d" (num & -num));
  420 + if (res < 32) {
  421 + offset += res ^ 31;
  422 + return offset < size ? offset : size;
  423 + }
  424 + offset += 32;
  425 +
  426 + if (offset >= size)
  427 + return size;
  428 + }
  429 + /* No one yet, search remaining full bytes for a one */
  430 + return offset + find_first_bit(p, size - offset);
  431 +}
  432 +#define find_next_bit find_next_bit
  433 +
  434 +/*
  435 + * ffz = Find First Zero in word. Undefined if no zero exists,
  436 + * so code should check against ~0UL first..
  437 + */
  438 +static inline unsigned long ffz(unsigned long word)
  439 +{
  440 + int res;
  441 +
  442 + __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  443 + : "=d" (res) : "d" (~word & -~word));
  444 + return res ^ 31;
  445 +}
  446 +
  447 +#endif
  448 +
  449 +#ifdef __KERNEL__
  450 +
  451 +#if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  452 +
  453 +/*
  454 + * The newer ColdFire family members support a "bitrev" instruction
  455 + * and we can use that to implement a fast ffs. Older Coldfire parts,
  456 + * and normal 68000 parts don't have anything special, so we use the
  457 + * generic functions for those.
  458 + */
  459 +#if (defined(__mcfisaaplus__) || defined(__mcfisac__)) && \
  460 + !defined(CONFIG_M68000) && !defined(CONFIG_MCPU32)
  461 +static inline int __ffs(int x)
  462 +{
  463 + __asm__ __volatile__ ("bitrev %0; ff1 %0"
  464 + : "=d" (x)
  465 + : "0" (x));
  466 + return x;
  467 +}
  468 +
  469 +static inline int ffs(int x)
  470 +{
  471 + if (!x)
  472 + return 0;
  473 + return __ffs(x) + 1;
  474 +}
  475 +
  476 +#else
  477 +#include <asm-generic/bitops/ffs.h>
  478 +#include <asm-generic/bitops/__ffs.h>
  479 +#endif
  480 +
  481 +#include <asm-generic/bitops/fls.h>
  482 +#include <asm-generic/bitops/__fls.h>
  483 +
  484 +#else
  485 +
  486 +/*
  487 + * ffs: find first bit set. This is defined the same way as
  488 + * the libc and compiler builtin ffs routines, therefore
  489 + * differs in spirit from the above ffz (man ffs).
  490 + */
  491 +static inline int ffs(int x)
  492 +{
  493 + int cnt;
  494 +
  495 + __asm__ ("bfffo %1{#0:#0},%0"
  496 + : "=d" (cnt)
  497 + : "dm" (x & -x));
  498 + return 32 - cnt;
  499 +}
  500 +#define __ffs(x) (ffs(x) - 1)
  501 +
  502 +/*
  503 + * fls: find last bit set.
  504 + */
  505 +static inline int fls(int x)
  506 +{
  507 + int cnt;
  508 +
  509 + __asm__ ("bfffo %1{#0,#0},%0"
  510 + : "=d" (cnt)
  511 + : "dm" (x));
  512 + return 32 - cnt;
  513 +}
  514 +
  515 +static inline int __fls(int x)
  516 +{
  517 + return fls(x) - 1;
  518 +}
  519 +
  520 +#endif
  521 +
  522 +#include <asm-generic/bitops/ext2-atomic.h>
  523 +#include <asm-generic/bitops/le.h>
  524 +#include <asm-generic/bitops/fls64.h>
  525 +#include <asm-generic/bitops/sched.h>
  526 +#include <asm-generic/bitops/hweight.h>
  527 +#include <asm-generic/bitops/lock.h>
  528 +#endif /* __KERNEL__ */
  529 +
  530 +#endif /* _M68K_BITOPS_H */
arch/m68k/include/asm/bitops_mm.h
1   -#ifndef _M68K_BITOPS_H
2   -#define _M68K_BITOPS_H
3   -/*
4   - * Copyright 1992, Linus Torvalds.
5   - *
6   - * This file is subject to the terms and conditions of the GNU General Public
7   - * License. See the file COPYING in the main directory of this archive
8   - * for more details.
9   - */
10   -
11   -#ifndef _LINUX_BITOPS_H
12   -#error only <linux/bitops.h> can be included directly
13   -#endif
14   -
15   -#include <linux/compiler.h>
16   -
17   -/*
18   - * Require 68020 or better.
19   - *
20   - * They use the standard big-endian m680x0 bit ordering.
21   - */
22   -
23   -#define test_and_set_bit(nr,vaddr) \
24   - (__builtin_constant_p(nr) ? \
25   - __constant_test_and_set_bit(nr, vaddr) : \
26   - __generic_test_and_set_bit(nr, vaddr))
27   -
28   -#define __test_and_set_bit(nr,vaddr) test_and_set_bit(nr,vaddr)
29   -
30   -static inline int __constant_test_and_set_bit(int nr, unsigned long *vaddr)
31   -{
32   - char *p = (char *)vaddr + (nr ^ 31) / 8;
33   - char retval;
34   -
35   - __asm__ __volatile__ ("bset %2,%1; sne %0"
36   - : "=d" (retval), "+m" (*p)
37   - : "di" (nr & 7));
38   -
39   - return retval;
40   -}
41   -
42   -static inline int __generic_test_and_set_bit(int nr, unsigned long *vaddr)
43   -{
44   - char retval;
45   -
46   - __asm__ __volatile__ ("bfset %2{%1:#1}; sne %0"
47   - : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
48   -
49   - return retval;
50   -}
51   -
52   -#define set_bit(nr,vaddr) \
53   - (__builtin_constant_p(nr) ? \
54   - __constant_set_bit(nr, vaddr) : \
55   - __generic_set_bit(nr, vaddr))
56   -
57   -#define __set_bit(nr,vaddr) set_bit(nr,vaddr)
58   -
59   -static inline void __constant_set_bit(int nr, volatile unsigned long *vaddr)
60   -{
61   - char *p = (char *)vaddr + (nr ^ 31) / 8;
62   - __asm__ __volatile__ ("bset %1,%0"
63   - : "+m" (*p) : "di" (nr & 7));
64   -}
65   -
66   -static inline void __generic_set_bit(int nr, volatile unsigned long *vaddr)
67   -{
68   - __asm__ __volatile__ ("bfset %1{%0:#1}"
69   - : : "d" (nr^31), "o" (*vaddr) : "memory");
70   -}
71   -
72   -#define test_and_clear_bit(nr,vaddr) \
73   - (__builtin_constant_p(nr) ? \
74   - __constant_test_and_clear_bit(nr, vaddr) : \
75   - __generic_test_and_clear_bit(nr, vaddr))
76   -
77   -#define __test_and_clear_bit(nr,vaddr) test_and_clear_bit(nr,vaddr)
78   -
79   -static inline int __constant_test_and_clear_bit(int nr, unsigned long *vaddr)
80   -{
81   - char *p = (char *)vaddr + (nr ^ 31) / 8;
82   - char retval;
83   -
84   - __asm__ __volatile__ ("bclr %2,%1; sne %0"
85   - : "=d" (retval), "+m" (*p)
86   - : "di" (nr & 7));
87   -
88   - return retval;
89   -}
90   -
91   -static inline int __generic_test_and_clear_bit(int nr, unsigned long *vaddr)
92   -{
93   - char retval;
94   -
95   - __asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0"
96   - : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
97   -
98   - return retval;
99   -}
100   -
101   -/*
102   - * clear_bit() doesn't provide any barrier for the compiler.
103   - */
104   -#define smp_mb__before_clear_bit() barrier()
105   -#define smp_mb__after_clear_bit() barrier()
106   -
107   -#define clear_bit(nr,vaddr) \
108   - (__builtin_constant_p(nr) ? \
109   - __constant_clear_bit(nr, vaddr) : \
110   - __generic_clear_bit(nr, vaddr))
111   -#define __clear_bit(nr,vaddr) clear_bit(nr,vaddr)
112   -
113   -static inline void __constant_clear_bit(int nr, volatile unsigned long *vaddr)
114   -{
115   - char *p = (char *)vaddr + (nr ^ 31) / 8;
116   - __asm__ __volatile__ ("bclr %1,%0"
117   - : "+m" (*p) : "di" (nr & 7));
118   -}
119   -
120   -static inline void __generic_clear_bit(int nr, volatile unsigned long *vaddr)
121   -{
122   - __asm__ __volatile__ ("bfclr %1{%0:#1}"
123   - : : "d" (nr^31), "o" (*vaddr) : "memory");
124   -}
125   -
126   -#define test_and_change_bit(nr,vaddr) \
127   - (__builtin_constant_p(nr) ? \
128   - __constant_test_and_change_bit(nr, vaddr) : \
129   - __generic_test_and_change_bit(nr, vaddr))
130   -
131   -#define __test_and_change_bit(nr,vaddr) test_and_change_bit(nr,vaddr)
132   -#define __change_bit(nr,vaddr) change_bit(nr,vaddr)
133   -
134   -static inline int __constant_test_and_change_bit(int nr, unsigned long *vaddr)
135   -{
136   - char *p = (char *)vaddr + (nr ^ 31) / 8;
137   - char retval;
138   -
139   - __asm__ __volatile__ ("bchg %2,%1; sne %0"
140   - : "=d" (retval), "+m" (*p)
141   - : "di" (nr & 7));
142   -
143   - return retval;
144   -}
145   -
146   -static inline int __generic_test_and_change_bit(int nr, unsigned long *vaddr)
147   -{
148   - char retval;
149   -
150   - __asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0"
151   - : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
152   -
153   - return retval;
154   -}
155   -
156   -#define change_bit(nr,vaddr) \
157   - (__builtin_constant_p(nr) ? \
158   - __constant_change_bit(nr, vaddr) : \
159   - __generic_change_bit(nr, vaddr))
160   -
161   -static inline void __constant_change_bit(int nr, unsigned long *vaddr)
162   -{
163   - char *p = (char *)vaddr + (nr ^ 31) / 8;
164   - __asm__ __volatile__ ("bchg %1,%0"
165   - : "+m" (*p) : "di" (nr & 7));
166   -}
167   -
168   -static inline void __generic_change_bit(int nr, unsigned long *vaddr)
169   -{
170   - __asm__ __volatile__ ("bfchg %1{%0:#1}"
171   - : : "d" (nr^31), "o" (*vaddr) : "memory");
172   -}
173   -
174   -static inline int test_bit(int nr, const unsigned long *vaddr)
175   -{
176   - return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
177   -}
178   -
179   -static inline int find_first_zero_bit(const unsigned long *vaddr,
180   - unsigned size)
181   -{
182   - const unsigned long *p = vaddr;
183   - int res = 32;
184   - unsigned int words;
185   - unsigned long num;
186   -
187   - if (!size)
188   - return 0;
189   -
190   - words = (size + 31) >> 5;
191   - while (!(num = ~*p++)) {
192   - if (!--words)
193   - goto out;
194   - }
195   -
196   - __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
197   - : "=d" (res) : "d" (num & -num));
198   - res ^= 31;
199   -out:
200   - res += ((long)p - (long)vaddr - 4) * 8;
201   - return res < size ? res : size;
202   -}
203   -#define find_first_zero_bit find_first_zero_bit
204   -
205   -static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
206   - int offset)
207   -{
208   - const unsigned long *p = vaddr + (offset >> 5);
209   - int bit = offset & 31UL, res;
210   -
211   - if (offset >= size)
212   - return size;
213   -
214   - if (bit) {
215   - unsigned long num = ~*p++ & (~0UL << bit);
216   - offset -= bit;
217   -
218   - /* Look for zero in first longword */
219   - __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
220   - : "=d" (res) : "d" (num & -num));
221   - if (res < 32) {
222   - offset += res ^ 31;
223   - return offset < size ? offset : size;
224   - }
225   - offset += 32;
226   -
227   - if (offset >= size)
228   - return size;
229   - }
230   - /* No zero yet, search remaining full bytes for a zero */
231   - return offset + find_first_zero_bit(p, size - offset);
232   -}
233   -#define find_next_zero_bit find_next_zero_bit
234   -
235   -static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
236   -{
237   - const unsigned long *p = vaddr;
238   - int res = 32;
239   - unsigned int words;
240   - unsigned long num;
241   -
242   - if (!size)
243   - return 0;
244   -
245   - words = (size + 31) >> 5;
246   - while (!(num = *p++)) {
247   - if (!--words)
248   - goto out;
249   - }
250   -
251   - __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
252   - : "=d" (res) : "d" (num & -num));
253   - res ^= 31;
254   -out:
255   - res += ((long)p - (long)vaddr - 4) * 8;
256   - return res < size ? res : size;
257   -}
258   -#define find_first_bit find_first_bit
259   -
260   -static inline int find_next_bit(const unsigned long *vaddr, int size,
261   - int offset)
262   -{
263   - const unsigned long *p = vaddr + (offset >> 5);
264   - int bit = offset & 31UL, res;
265   -
266   - if (offset >= size)
267   - return size;
268   -
269   - if (bit) {
270   - unsigned long num = *p++ & (~0UL << bit);
271   - offset -= bit;
272   -
273   - /* Look for one in first longword */
274   - __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
275   - : "=d" (res) : "d" (num & -num));
276   - if (res < 32) {
277   - offset += res ^ 31;
278   - return offset < size ? offset : size;
279   - }
280   - offset += 32;
281   -
282   - if (offset >= size)
283   - return size;
284   - }
285   - /* No one yet, search remaining full bytes for a one */
286   - return offset + find_first_bit(p, size - offset);
287   -}
288   -#define find_next_bit find_next_bit
289   -
290   -/*
291   - * ffz = Find First Zero in word. Undefined if no zero exists,
292   - * so code should check against ~0UL first..
293   - */
294   -static inline unsigned long ffz(unsigned long word)
295   -{
296   - int res;
297   -
298   - __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
299   - : "=d" (res) : "d" (~word & -~word));
300   - return res ^ 31;
301   -}
302   -
303   -#ifdef __KERNEL__
304   -
305   -/*
306   - * ffs: find first bit set. This is defined the same way as
307   - * the libc and compiler builtin ffs routines, therefore
308   - * differs in spirit from the above ffz (man ffs).
309   - */
310   -
311   -static inline int ffs(int x)
312   -{
313   - int cnt;
314   -
315   - asm ("bfffo %1{#0:#0},%0" : "=d" (cnt) : "dm" (x & -x));
316   -
317   - return 32 - cnt;
318   -}
319   -#define __ffs(x) (ffs(x) - 1)
320   -
321   -/*
322   - * fls: find last bit set.
323   - */
324   -
325   -static inline int fls(int x)
326   -{
327   - int cnt;
328   -
329   - asm ("bfffo %1{#0,#0},%0" : "=d" (cnt) : "dm" (x));
330   -
331   - return 32 - cnt;
332   -}
333   -
334   -static inline int __fls(int x)
335   -{
336   - return fls(x) - 1;
337   -}
338   -
339   -#include <asm-generic/bitops/fls64.h>
340   -#include <asm-generic/bitops/sched.h>
341   -#include <asm-generic/bitops/hweight.h>
342   -#include <asm-generic/bitops/lock.h>
343   -
344   -/* Bitmap functions for the little endian bitmap. */
345   -
346   -static inline void __set_bit_le(int nr, void *addr)
347   -{
348   - __set_bit(nr ^ 24, addr);
349   -}
350   -
351   -static inline void __clear_bit_le(int nr, void *addr)
352   -{
353   - __clear_bit(nr ^ 24, addr);
354   -}
355   -
356   -static inline int __test_and_set_bit_le(int nr, void *addr)
357   -{
358   - return __test_and_set_bit(nr ^ 24, addr);
359   -}
360   -
361   -static inline int test_and_set_bit_le(int nr, void *addr)
362   -{
363   - return test_and_set_bit(nr ^ 24, addr);
364   -}
365   -
366   -static inline int __test_and_clear_bit_le(int nr, void *addr)
367   -{
368   - return __test_and_clear_bit(nr ^ 24, addr);
369   -}
370   -
371   -static inline int test_and_clear_bit_le(int nr, void *addr)
372   -{
373   - return test_and_clear_bit(nr ^ 24, addr);
374   -}
375   -
376   -static inline int test_bit_le(int nr, const void *vaddr)
377   -{
378   - const unsigned char *p = vaddr;
379   - return (p[nr >> 3] & (1U << (nr & 7))) != 0;
380   -}
381   -
382   -static inline int find_first_zero_bit_le(const void *vaddr, unsigned size)
383   -{
384   - const unsigned long *p = vaddr, *addr = vaddr;
385   - int res = 0;
386   - unsigned int words;
387   -
388   - if (!size)
389   - return 0;
390   -
391   - words = (size >> 5) + ((size & 31) > 0);
392   - while (*p++ == ~0UL) {
393   - if (--words == 0)
394   - goto out;
395   - }
396   -
397   - --p;
398   - for (res = 0; res < 32; res++)
399   - if (!test_bit_le(res, p))
400   - break;
401   -out:
402   - res += (p - addr) * 32;
403   - return res < size ? res : size;
404   -}
405   -#define find_first_zero_bit_le find_first_zero_bit_le
406   -
407   -static inline unsigned long find_next_zero_bit_le(const void *addr,
408   - unsigned long size, unsigned long offset)
409   -{
410   - const unsigned long *p = addr;
411   - int bit = offset & 31UL, res;
412   -
413   - if (offset >= size)
414   - return size;
415   -
416   - p += offset >> 5;
417   -
418   - if (bit) {
419   - offset -= bit;
420   - /* Look for zero in first longword */
421   - for (res = bit; res < 32; res++)
422   - if (!test_bit_le(res, p)) {
423   - offset += res;
424   - return offset < size ? offset : size;
425   - }
426   - p++;
427   - offset += 32;
428   -
429   - if (offset >= size)
430   - return size;
431   - }
432   - /* No zero yet, search remaining full bytes for a zero */
433   - return offset + find_first_zero_bit_le(p, size - offset);
434   -}
435   -#define find_next_zero_bit_le find_next_zero_bit_le
436   -
437   -static inline int find_first_bit_le(const void *vaddr, unsigned size)
438   -{
439   - const unsigned long *p = vaddr, *addr = vaddr;
440   - int res = 0;
441   - unsigned int words;
442   -
443   - if (!size)
444   - return 0;
445   -
446   - words = (size >> 5) + ((size & 31) > 0);
447   - while (*p++ == 0UL) {
448   - if (--words == 0)
449   - goto out;
450   - }
451   -
452   - --p;
453   - for (res = 0; res < 32; res++)
454   - if (test_bit_le(res, p))
455   - break;
456   -out:
457   - res += (p - addr) * 32;
458   - return res < size ? res : size;
459   -}
460   -#define find_first_bit_le find_first_bit_le
461   -
462   -static inline unsigned long find_next_bit_le(const void *addr,
463   - unsigned long size, unsigned long offset)
464   -{
465   - const unsigned long *p = addr;
466   - int bit = offset & 31UL, res;
467   -
468   - if (offset >= size)
469   - return size;
470   -
471   - p += offset >> 5;
472   -
473   - if (bit) {
474   - offset -= bit;
475   - /* Look for one in first longword */
476   - for (res = bit; res < 32; res++)
477   - if (test_bit_le(res, p)) {
478   - offset += res;
479   - return offset < size ? offset : size;
480   - }
481   - p++;
482   - offset += 32;
483   -
484   - if (offset >= size)
485   - return size;
486   - }
487   - /* No set bit yet, search remaining full bytes for a set bit */
488   - return offset + find_first_bit_le(p, size - offset);
489   -}
490   -#define find_next_bit_le find_next_bit_le
491   -
492   -/* Bitmap functions for the ext2 filesystem. */
493   -
494   -#define ext2_set_bit_atomic(lock, nr, addr) \
495   - test_and_set_bit_le(nr, addr)
496   -#define ext2_clear_bit_atomic(lock, nr, addr) \
497   - test_and_clear_bit_le(nr, addr)
498   -
499   -#endif /* __KERNEL__ */
500   -
501   -#endif /* _M68K_BITOPS_H */
arch/m68k/include/asm/bitops_no.h
1   -#ifndef _M68KNOMMU_BITOPS_H
2   -#define _M68KNOMMU_BITOPS_H
3   -
4   -/*
5   - * Copyright 1992, Linus Torvalds.
6   - */
7   -
8   -#include <linux/compiler.h>
9   -#include <asm/byteorder.h> /* swab32 */
10   -
11   -#ifdef __KERNEL__
12   -
13   -#ifndef _LINUX_BITOPS_H
14   -#error only <linux/bitops.h> can be included directly
15   -#endif
16   -
17   -#if defined (__mcfisaaplus__) || defined (__mcfisac__)
18   -static inline int ffs(unsigned int val)
19   -{
20   - if (!val)
21   - return 0;
22   -
23   - asm volatile(
24   - "bitrev %0\n\t"
25   - "ff1 %0\n\t"
26   - : "=d" (val)
27   - : "0" (val)
28   - );
29   - val++;
30   - return val;
31   -}
32   -
33   -static inline int __ffs(unsigned int val)
34   -{
35   - asm volatile(
36   - "bitrev %0\n\t"
37   - "ff1 %0\n\t"
38   - : "=d" (val)
39   - : "0" (val)
40   - );
41   - return val;
42   -}
43   -
44   -#else
45   -#include <asm-generic/bitops/ffs.h>
46   -#include <asm-generic/bitops/__ffs.h>
47   -#endif
48   -
49   -#include <asm-generic/bitops/sched.h>
50   -#include <asm-generic/bitops/ffz.h>
51   -
52   -static __inline__ void set_bit(int nr, volatile unsigned long * addr)
53   -{
54   -#ifdef CONFIG_COLDFIRE
55   - __asm__ __volatile__ ("lea %0,%%a0; bset %1,(%%a0)"
56   - : "+m" (((volatile char *)addr)[(nr^31) >> 3])
57   - : "d" (nr)
58   - : "%a0", "cc");
59   -#else
60   - __asm__ __volatile__ ("bset %1,%0"
61   - : "+m" (((volatile char *)addr)[(nr^31) >> 3])
62   - : "di" (nr)
63   - : "cc");
64   -#endif
65   -}
66   -
67   -#define __set_bit(nr, addr) set_bit(nr, addr)
68   -
69   -/*
70   - * clear_bit() doesn't provide any barrier for the compiler.
71   - */
72   -#define smp_mb__before_clear_bit() barrier()
73   -#define smp_mb__after_clear_bit() barrier()
74   -
75   -static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
76   -{
77   -#ifdef CONFIG_COLDFIRE
78   - __asm__ __volatile__ ("lea %0,%%a0; bclr %1,(%%a0)"
79   - : "+m" (((volatile char *)addr)[(nr^31) >> 3])
80   - : "d" (nr)
81   - : "%a0", "cc");
82   -#else
83   - __asm__ __volatile__ ("bclr %1,%0"
84   - : "+m" (((volatile char *)addr)[(nr^31) >> 3])
85   - : "di" (nr)
86   - : "cc");
87   -#endif
88   -}
89   -
90   -#define __clear_bit(nr, addr) clear_bit(nr, addr)
91   -
92   -static __inline__ void change_bit(int nr, volatile unsigned long * addr)
93   -{
94   -#ifdef CONFIG_COLDFIRE
95   - __asm__ __volatile__ ("lea %0,%%a0; bchg %1,(%%a0)"
96   - : "+m" (((volatile char *)addr)[(nr^31) >> 3])
97   - : "d" (nr)
98   - : "%a0", "cc");
99   -#else
100   - __asm__ __volatile__ ("bchg %1,%0"
101   - : "+m" (((volatile char *)addr)[(nr^31) >> 3])
102   - : "di" (nr)
103   - : "cc");
104   -#endif
105   -}
106   -
107   -#define __change_bit(nr, addr) change_bit(nr, addr)
108   -
109   -static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
110   -{
111   - char retval;
112   -
113   -#ifdef CONFIG_COLDFIRE
114   - __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
115   - : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
116   - : "d" (nr)
117   - : "%a0");
118   -#else
119   - __asm__ __volatile__ ("bset %2,%1; sne %0"
120   - : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
121   - : "di" (nr)
122   - /* No clobber */);
123   -#endif
124   -
125   - return retval;
126   -}
127   -
128   -#define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
129   -
130   -static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
131   -{
132   - char retval;
133   -
134   -#ifdef CONFIG_COLDFIRE
135   - __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
136   - : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
137   - : "d" (nr)
138   - : "%a0");
139   -#else
140   - __asm__ __volatile__ ("bclr %2,%1; sne %0"
141   - : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
142   - : "di" (nr)
143   - /* No clobber */);
144   -#endif
145   -
146   - return retval;
147   -}
148   -
149   -#define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
150   -
151   -static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
152   -{
153   - char retval;
154   -
155   -#ifdef CONFIG_COLDFIRE
156   - __asm__ __volatile__ ("lea %1,%%a0\n\tbchg %2,(%%a0)\n\tsne %0"
157   - : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
158   - : "d" (nr)
159   - : "%a0");
160   -#else
161   - __asm__ __volatile__ ("bchg %2,%1; sne %0"
162   - : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
163   - : "di" (nr)
164   - /* No clobber */);
165   -#endif
166   -
167   - return retval;
168   -}
169   -
170   -#define __test_and_change_bit(nr, addr) test_and_change_bit(nr, addr)
171   -
172   -/*
173   - * This routine doesn't need to be atomic.
174   - */
175   -static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr)
176   -{
177   - return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
178   -}
179   -
180   -static __inline__ int __test_bit(int nr, const volatile unsigned long * addr)
181   -{
182   - int * a = (int *) addr;
183   - int mask;
184   -
185   - a += nr >> 5;
186   - mask = 1 << (nr & 0x1f);
187   - return ((mask & *a) != 0);
188   -}
189   -
190   -#define test_bit(nr,addr) \
191   -(__builtin_constant_p(nr) ? \
192   - __constant_test_bit((nr),(addr)) : \
193   - __test_bit((nr),(addr)))
194   -
195   -#include <asm-generic/bitops/find.h>
196   -#include <asm-generic/bitops/hweight.h>
197   -#include <asm-generic/bitops/lock.h>
198   -
199   -#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
200   -
201   -static inline void __set_bit_le(int nr, void *addr)
202   -{
203   - __set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
204   -}
205   -
206   -static inline void __clear_bit_le(int nr, void *addr)
207   -{
208   - __clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
209   -}
210   -
211   -static inline int __test_and_set_bit_le(int nr, volatile void *addr)
212   -{
213   - char retval;
214   -
215   -#ifdef CONFIG_COLDFIRE
216   - __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
217   - : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
218   - : "d" (nr)
219   - : "%a0");
220   -#else
221   - __asm__ __volatile__ ("bset %2,%1; sne %0"
222   - : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
223   - : "di" (nr)
224   - /* No clobber */);
225   -#endif
226   -
227   - return retval;
228   -}
229   -
230   -static inline int __test_and_clear_bit_le(int nr, volatile void *addr)
231   -{
232   - char retval;
233   -
234   -#ifdef CONFIG_COLDFIRE
235   - __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
236   - : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
237   - : "d" (nr)
238   - : "%a0");
239   -#else
240   - __asm__ __volatile__ ("bclr %2,%1; sne %0"
241   - : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
242   - : "di" (nr)
243   - /* No clobber */);
244   -#endif
245   -
246   - return retval;
247   -}
248   -
249   -#include <asm-generic/bitops/ext2-atomic.h>
250   -
251   -static inline int test_bit_le(int nr, const volatile void *addr)
252   -{
253   - char retval;
254   -
255   -#ifdef CONFIG_COLDFIRE
256   - __asm__ __volatile__ ("lea %1,%%a0; btst %2,(%%a0); sne %0"
257   - : "=d" (retval)
258   - : "m" (((const volatile char *)addr)[nr >> 3]), "d" (nr)
259   - : "%a0");
260   -#else
261   - __asm__ __volatile__ ("btst %2,%1; sne %0"
262   - : "=d" (retval)
263   - : "m" (((const volatile char *)addr)[nr >> 3]), "di" (nr)
264   - /* No clobber */);
265   -#endif
266   -
267   - return retval;
268   -}
269   -
270   -#define find_first_zero_bit_le(addr, size) \
271   - find_next_zero_bit_le((addr), (size), 0)
272   -
273   -static inline unsigned long find_next_zero_bit_le(void *addr, unsigned long size, unsigned long offset)
274   -{
275   - unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
276   - unsigned long result = offset & ~31UL;
277   - unsigned long tmp;
278   -
279   - if (offset >= size)
280   - return size;
281   - size -= result;
282   - offset &= 31UL;
283   - if(offset) {
284   - /* We hold the little endian value in tmp, but then the
285   - * shift is illegal. So we could keep a big endian value
286   - * in tmp, like this:
287   - *
288   - * tmp = __swab32(*(p++));
289   - * tmp |= ~0UL >> (32-offset);
290   - *
291   - * but this would decrease performance, so we change the
292   - * shift:
293   - */
294   - tmp = *(p++);
295   - tmp |= __swab32(~0UL >> (32-offset));
296   - if(size < 32)
297   - goto found_first;
298   - if(~tmp)
299   - goto found_middle;
300   - size -= 32;
301   - result += 32;
302   - }
303   - while(size & ~31UL) {
304   - if(~(tmp = *(p++)))
305   - goto found_middle;
306   - result += 32;
307   - size -= 32;
308   - }
309   - if(!size)
310   - return result;
311   - tmp = *p;
312   -
313   -found_first:
314   - /* tmp is little endian, so we would have to swab the shift,
315   - * see above. But then we have to swab tmp below for ffz, so
316   - * we might as well do this here.
317   - */
318   - return result + ffz(__swab32(tmp) | (~0UL << size));
319   -found_middle:
320   - return result + ffz(__swab32(tmp));
321   -}
322   -#define find_next_zero_bit_le find_next_zero_bit_le
323   -
324   -extern unsigned long find_next_bit_le(const void *addr,
325   - unsigned long size, unsigned long offset);
326   -
327   -#endif /* __KERNEL__ */
328   -
329   -#include <asm-generic/bitops/fls.h>
330   -#include <asm-generic/bitops/__fls.h>
331   -#include <asm-generic/bitops/fls64.h>
332   -
333   -#endif /* _M68KNOMMU_BITOPS_H */