Blame view

arch/sh/math-emu/sfp-util.h 2.37 KB
4b565680d   Takashi YOSHII   sh: math-emu support
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
  /*
   * These are copied from glibc/stdlib/longlong.h
   */
  
  #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
    do {                                                                  \
      UWtype __x;                                                         \
      __x = (al) + (bl);                                                  \
      (sh) = (ah) + (bh) + (__x < (al));                                  \
      (sl) = __x;                                                         \
    } while (0)
  
  #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
    do {                                                                  \
      UWtype __x;                                                         \
      __x = (al) - (bl);                                                  \
      (sh) = (ah) - (bh) - (__x > (al));                                  \
      (sl) = __x;                                                         \
    } while (0)
  
  #define umul_ppmm(w1, w0, u, v) \
    __asm__ ("dmulu.l %2,%3
  \tsts    macl,%1
  \tsts  mach,%0"	\
  	: "=r" ((u32)(w1)), "=r" ((u32)(w0))	\
  	:  "r" ((u32)(u)),   "r" ((u32)(v))	\
  	: "macl", "mach")
  
  #define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
  #define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
  #define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
  
  #define udiv_qrnnd(q, r, n1, n0, d) \
    do {									\
      UWtype __d1, __d0, __q1, __q0;					\
      UWtype __r1, __r0, __m;						\
      __d1 = __ll_highpart (d);						\
      __d0 = __ll_lowpart (d);						\
  									\
      __r1 = (n1) % __d1;							\
      __q1 = (n1) / __d1;							\
      __m = (UWtype) __q1 * __d0;						\
      __r1 = __r1 * __ll_B | __ll_highpart (n0);				\
      if (__r1 < __m)							\
        {									\
  	__q1--, __r1 += (d);						\
  	if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
  	  if (__r1 < __m)						\
  	    __q1--, __r1 += (d);					\
        }									\
      __r1 -= __m;							\
  									\
      __r0 = __r1 % __d1;							\
      __q0 = __r1 / __d1;							\
      __m = (UWtype) __q0 * __d0;						\
      __r0 = __r0 * __ll_B | __ll_lowpart (n0);				\
      if (__r0 < __m)							\
        {									\
  	__q0--, __r0 += (d);						\
  	if (__r0 >= (d))						\
  	  if (__r0 < __m)						\
  	    __q0--, __r0 += (d);					\
        }									\
      __r0 -= __m;							\
  									\
      (q) = (UWtype) __q1 * __ll_B | __q0;				\
      (r) = __r0;								\
    } while (0)
  
  #define abort()	return 0
13da9e200   Linus Torvalds   Revert "endian: #...
71
72
  
  #define __BYTE_ORDER __LITTLE_ENDIAN