Blame view

include/linux/math64.h 5.17 KB
2418f4f28   Roman Zippel   introduce explici...
1
2
3
4
5
6
7
  #ifndef _LINUX_MATH64_H
  #define _LINUX_MATH64_H
  
  #include <linux/types.h>
  #include <asm/div64.h>
  
  #if BITS_PER_LONG == 64
c2853c8df   Alex Shi   include/linux/mat...
8
9
  #define div64_long(x, y) div64_s64((x), (y))
  #define div64_ul(x, y)   div64_u64((x), (y))
f910381a5   Sasha Levin   math: Introduce d...
10

2418f4f28   Roman Zippel   introduce explici...
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
  /**
   * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
   *
   * This is commonly provided by 32bit archs to provide an optimized 64bit
   * divide.
   */
  static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
  {
  	*remainder = dividend % divisor;
  	return dividend / divisor;
  }
  
  /**
   * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
   */
  static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
  {
  	*remainder = dividend % divisor;
  	return dividend / divisor;
  }
6f6d6a1a6   Roman Zippel   rename div64_64 t...
31
  /**
eb18cba78   Mike Snitzer   math64: New separ...
32
33
34
35
36
37
38
39
40
   * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
   */
  static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
  {
  	*remainder = dividend % divisor;
  	return dividend / divisor;
  }
  
  /**
6f6d6a1a6   Roman Zippel   rename div64_64 t...
41
42
43
44
45
46
   * div64_u64 - unsigned 64bit divide with 64bit divisor
   */
  static inline u64 div64_u64(u64 dividend, u64 divisor)
  {
  	return dividend / divisor;
  }
658716d19   Brian Behlendorf   div64_u64(): impr...
47
48
49
50
51
52
53
  /**
   * div64_s64 - signed 64bit divide with 64bit divisor
   */
  static inline s64 div64_s64(s64 dividend, s64 divisor)
  {
  	return dividend / divisor;
  }
2418f4f28   Roman Zippel   introduce explici...
54
  #elif BITS_PER_LONG == 32
c2853c8df   Alex Shi   include/linux/mat...
55
56
  #define div64_long(x, y) div_s64((x), (y))
  #define div64_ul(x, y)   div_u64((x), (y))
f910381a5   Sasha Levin   math: Introduce d...
57

2418f4f28   Roman Zippel   introduce explici...
58
59
60
61
62
63
64
65
66
67
68
  #ifndef div_u64_rem
  static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
  {
  	*remainder = do_div(dividend, divisor);
  	return dividend;
  }
  #endif
  
  #ifndef div_s64_rem
  extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
  #endif
eb18cba78   Mike Snitzer   math64: New separ...
69
70
71
  #ifndef div64_u64_rem
  extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
  #endif
6f6d6a1a6   Roman Zippel   rename div64_64 t...
72
  #ifndef div64_u64
f30021341   Stanislaw Gruszka   Revert "math64: N...
73
  extern u64 div64_u64(u64 dividend, u64 divisor);
6f6d6a1a6   Roman Zippel   rename div64_64 t...
74
  #endif
658716d19   Brian Behlendorf   div64_u64(): impr...
75
76
77
  #ifndef div64_s64
  extern s64 div64_s64(s64 dividend, s64 divisor);
  #endif
2418f4f28   Roman Zippel   introduce explici...
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
  #endif /* BITS_PER_LONG */
  
  /**
   * div_u64 - unsigned 64bit divide with 32bit divisor
   *
   * This is the most common 64bit divide and should be used if possible,
   * as many 32bit archs can optimize this variant better than a full 64bit
   * divide.
   */
  #ifndef div_u64
  static inline u64 div_u64(u64 dividend, u32 divisor)
  {
  	u32 remainder;
  	return div_u64_rem(dividend, divisor, &remainder);
  }
  #endif
  
  /**
   * div_s64 - signed 64bit divide with 32bit divisor
   */
  #ifndef div_s64
  static inline s64 div_s64(s64 dividend, s32 divisor)
  {
  	s32 remainder;
  	return div_s64_rem(dividend, divisor, &remainder);
  }
  #endif
f595ec964   Jeremy Fitzhardinge   common implementa...
105
  u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
d5e181f78   Jeremy Fitzhardinge   add an inlined ve...
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
  static __always_inline u32
  __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
  {
  	u32 ret = 0;
  
  	while (dividend >= divisor) {
  		/* The following asm() prevents the compiler from
  		   optimising this loop into a modulo operation.  */
  		asm("" : "+rm"(dividend));
  
  		dividend -= divisor;
  		ret++;
  	}
  
  	*remainder = dividend;
  
  	return ret;
  }
be5e610c0   Peter Zijlstra   math64: Add mul_u...
124
125
126
127
128
129
130
131
  #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
  
  #ifndef mul_u64_u32_shr
  static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
  {
  	return (u64)(((unsigned __int128)a * mul) >> shift);
  }
  #endif /* mul_u64_u32_shr */
35181e86d   Haozhong Zhang   KVM: x86: Add a c...
132
133
134
135
136
137
  #ifndef mul_u64_u64_shr
  static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
  {
  	return (u64)(((unsigned __int128)a * mul) >> shift);
  }
  #endif /* mul_u64_u64_shr */
be5e610c0   Peter Zijlstra   math64: Add mul_u...
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
  #else
  
  #ifndef mul_u64_u32_shr
  static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
  {
  	u32 ah, al;
  	u64 ret;
  
  	al = a;
  	ah = a >> 32;
  
  	ret = ((u64)al * mul) >> shift;
  	if (ah)
  		ret += ((u64)ah * mul) << (32 - shift);
  
  	return ret;
  }
  #endif /* mul_u64_u32_shr */
35181e86d   Haozhong Zhang   KVM: x86: Add a c...
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
  #ifndef mul_u64_u64_shr
  static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
  {
  	union {
  		u64 ll;
  		struct {
  #ifdef __BIG_ENDIAN
  			u32 high, low;
  #else
  			u32 low, high;
  #endif
  		} l;
  	} rl, rm, rn, rh, a0, b0;
  	u64 c;
  
  	a0.ll = a;
  	b0.ll = b;
  
  	rl.ll = (u64)a0.l.low * b0.l.low;
  	rm.ll = (u64)a0.l.low * b0.l.high;
  	rn.ll = (u64)a0.l.high * b0.l.low;
  	rh.ll = (u64)a0.l.high * b0.l.high;
  
  	/*
  	 * Each of these lines computes a 64-bit intermediate result into "c",
  	 * starting at bits 32-95.  The low 32-bits go into the result of the
  	 * multiplication, the high 32-bits are carried into the next step.
  	 */
  	rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low;
  	rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low;
  	rh.l.high = (c >> 32) + rh.l.high;
  
  	/*
  	 * The 128-bit result of the multiplication is in rl.ll and rh.ll,
  	 * shift it right and throw away the high part of the result.
  	 */
  	if (shift == 0)
  		return rl.ll;
  	if (shift < 64)
  		return (rl.ll >> shift) | (rh.ll << (64 - shift));
  	return rh.ll >> (shift & 63);
  }
  #endif /* mul_u64_u64_shr */
be5e610c0   Peter Zijlstra   math64: Add mul_u...
199
  #endif
381d585c8   Haozhong Zhang   KVM: x86: Replace...
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
  #ifndef mul_u64_u32_div
  static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
  {
  	union {
  		u64 ll;
  		struct {
  #ifdef __BIG_ENDIAN
  			u32 high, low;
  #else
  			u32 low, high;
  #endif
  		} l;
  	} u, rl, rh;
  
  	u.ll = a;
  	rl.ll = (u64)u.l.low * mul;
  	rh.ll = (u64)u.l.high * mul + rl.l.high;
  
  	/* Bits 32-63 of the result will be in rh.l.low. */
  	rl.l.high = do_div(rh.ll, divisor);
  
  	/* Bits 0-31 of the result will be in rl.l.low.	*/
  	do_div(rl.ll, divisor);
  
  	rl.l.high = rh.l.low;
  	return rl.ll;
  }
  #endif /* mul_u64_u32_div */
2418f4f28   Roman Zippel   introduce explici...
228
  #endif /* _LINUX_MATH64_H */