Commit 658716d19f8f155c67d4677ba68034b8e492dfbe

Authored by Brian Behlendorf
Committed by Linus Torvalds
1 parent 5d051decfc

div64_u64(): improve precision on 32bit platforms

The current implementation of div64_u64 for 32bit systems returns an
approximately correct result when the divisor exceeds 32bits.  Since doing
64bit division using 32bit hardware is a long since solved problem we just
use one of the existing proven methods.

Additionally, add a div64_s64 function to correctly handle doing signed
64bit division.

Addresses https://bugzilla.redhat.com/show_bug.cgi?id=616105

Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Cc: Ben Woodard <bwoodard@llnl.gov>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Mark Grondona <mgrondona@llnl.gov>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 3 changed files with 59 additions and 10 deletions Side-by-side Diff

include/linux/kernel.h
... ... @@ -173,6 +173,11 @@
173 173 (__x < 0) ? -__x : __x; \
174 174 })
175 175  
  176 +#define abs64(x) ({ \
  177 + s64 __x = (x); \
  178 + (__x < 0) ? -__x : __x; \
  179 + })
  180 +
176 181 #ifdef CONFIG_PROVE_LOCKING
177 182 void might_fault(void);
178 183 #else
include/linux/math64.h
... ... @@ -35,6 +35,14 @@
35 35 return dividend / divisor;
36 36 }
37 37  
  38 +/**
  39 + * div64_s64 - signed 64bit divide with 64bit divisor
  40 + */
  41 +static inline s64 div64_s64(s64 dividend, s64 divisor)
  42 +{
  43 + return dividend / divisor;
  44 +}
  45 +
38 46 #elif BITS_PER_LONG == 32
39 47  
40 48 #ifndef div_u64_rem
... ... @@ -51,6 +59,10 @@
51 59  
52 60 #ifndef div64_u64
53 61 extern u64 div64_u64(u64 dividend, u64 divisor);
  62 +#endif
  63 +
  64 +#ifndef div64_s64
  65 +extern s64 div64_s64(s64 dividend, s64 divisor);
54 66 #endif
55 67  
56 68 #endif /* BITS_PER_LONG */
... ... @@ -77,24 +77,56 @@
77 77 EXPORT_SYMBOL(div_s64_rem);
78 78 #endif
79 79  
80   -/* 64bit divisor, dividend and result. dynamic precision */
  80 +/**
  81 + * div64_u64 - unsigned 64bit divide with 64bit divisor
  82 + * @dividend: 64bit dividend
  83 + * @divisor: 64bit divisor
  84 + *
  85 + * This implementation is a modified version of the algorithm proposed
  86 + * by the book 'Hacker's Delight'. The original source and full proof
  87 + * can be found here and is available for use without restriction.
  88 + *
  89 + * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c'
  90 + */
81 91 #ifndef div64_u64
82 92 u64 div64_u64(u64 dividend, u64 divisor)
83 93 {
84   - u32 high, d;
  94 + u32 high = divisor >> 32;
  95 + u64 quot;
85 96  
86   - high = divisor >> 32;
87   - if (high) {
88   - unsigned int shift = fls(high);
  97 + if (high == 0) {
  98 + quot = div_u64(dividend, divisor);
  99 + } else {
  100 + int n = 1 + fls(high);
  101 + quot = div_u64(dividend >> n, divisor >> n);
89 102  
90   - d = divisor >> shift;
91   - dividend >>= shift;
92   - } else
93   - d = divisor;
  103 + if (quot != 0)
  104 + quot--;
  105 + if ((dividend - quot * divisor) >= divisor)
  106 + quot++;
  107 + }
94 108  
95   - return div_u64(dividend, d);
  109 + return quot;
96 110 }
97 111 EXPORT_SYMBOL(div64_u64);
  112 +#endif
  113 +
  114 +/**
  115 + * div64_s64 - signed 64bit divide with 64bit divisor
  116 + * @dividend: 64bit dividend
  117 + * @divisor: 64bit divisor
  118 + */
  119 +#ifndef div64_s64
  120 +s64 div64_s64(s64 dividend, s64 divisor)
  121 +{
  122 + s64 quot, t;
  123 +
  124 + quot = div64_u64(abs64(dividend), abs64(divisor));
  125 + t = (dividend ^ divisor) >> 63;
  126 +
  127 + return (quot ^ t) - t;
  128 +}
  129 +EXPORT_SYMBOL(div64_s64);
98 130 #endif
99 131  
100 132 #endif /* BITS_PER_LONG == 32 */