Blame view

include/asm-xtensa/byteorder.h 2.39 KB
9a8fd5589   Chris Zankel   [PATCH] xtensa: A...
1
2
3
4
5
6
7
8
9
10
11
12
  /*
   * include/asm-xtensa/byteorder.h
   *
   * This file is subject to the terms and conditions of the GNU General Public
   * License.  See the file "COPYING" in the main directory of this archive
   * for more details.
   *
   * Copyright (C) 2001 - 2005 Tensilica Inc.
   */
  
  #ifndef _XTENSA_BYTEORDER_H
  #define _XTENSA_BYTEORDER_H
9a8fd5589   Chris Zankel   [PATCH] xtensa: A...
13
  #include <asm/types.h>
de4f6e5b4   Chris Zankel   [XTENSA] clean-up...
14
  #include <linux/compiler.h>
9a8fd5589   Chris Zankel   [PATCH] xtensa: A...
15

fd43fe19b   Chris Zankel   [PATCH] xtensa: f...
16
  static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
9a8fd5589   Chris Zankel   [PATCH] xtensa: A...
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
  {
      __u32 res;
      /* instruction sequence from Xtensa ISA release 2/2000 */
      __asm__("ssai     8           
  \t"
  	    "srli     %0, %1, 16  
  \t"
  	    "src      %0, %0, %1  
  \t"
  	    "src      %0, %0, %0  
  \t"
  	    "src      %0, %1, %0  
  "
  	    : "=&a" (res)
  	    : "a" (x)
  	    );
      return res;
  }
fd43fe19b   Chris Zankel   [PATCH] xtensa: f...
35
  static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 x)
9a8fd5589   Chris Zankel   [PATCH] xtensa: A...
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
  {
      /* Given that 'short' values are signed (i.e., can be negative),
       * we cannot assume that the upper 16-bits of the register are
       * zero.  We are careful to mask values after shifting.
       */
  
      /* There exists an anomaly between xt-gcc and xt-xcc.  xt-gcc
       * inserts an extui instruction after putting this function inline
       * to ensure that it uses only the least-significant 16 bits of
       * the result.  xt-xcc doesn't use an extui, but assumes the
       * __asm__ macro follows convention that the upper 16 bits of an
       * 'unsigned short' result are still zero.  This macro doesn't
       * follow convention; indeed, it leaves garbage in the upport 16
       * bits of the register.
  
       * Declaring the temporary variables 'res' and 'tmp' to be 32-bit
       * types while the return type of the function is a 16-bit type
       * forces both compilers to insert exactly one extui instruction
       * (or equivalent) to mask off the upper 16 bits. */
  
      __u32 res;
      __u32 tmp;
  
      __asm__("extui    %1, %2, 8, 8
  \t"
  	    "slli     %0, %2, 8   
  \t"
  	    "or       %0, %0, %1  
  "
  	    : "=&a" (res), "=&a" (tmp)
  	    : "a" (x)
  	    );
  
      return res;
  }
  
  #define __arch__swab32(x) ___arch__swab32(x)
  #define __arch__swab16(x) ___arch__swab16(x)
  
  #if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
  #  define __BYTEORDER_HAS_U64__
  #  define __SWAB_64_THRU_32__
  #endif
  
  #ifdef __XTENSA_EL__
  # include <linux/byteorder/little_endian.h>
  #elif defined(__XTENSA_EB__)
  # include <linux/byteorder/big_endian.h>
  #else
  # error processor byte order undefined!
  #endif
de4f6e5b4   Chris Zankel   [XTENSA] clean-up...
87
  #endif /* _XTENSA_BYTEORDER_H */