Commit 4e701482d1d7b90c358e2bd244bb71623f767120

Authored by Matthew Wilcox
Committed by Linus Torvalds
1 parent d9ae90ac4b

hash: add explicit u32 and u64 versions of hash

The 32-bit version is more efficient (and apparently gives better hash
results than the 64-bit version), so users who are only hashing a 32-bit
quantity can now opt to use the 32-bit version explicitly, rather than
promoting to a long.

Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
Cc: William Lee Irwin III <wli@holomorphy.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 27 additions and 15 deletions Side-by-side Diff

include/linux/hash.h
1 1 #ifndef _LINUX_HASH_H
2 2 #define _LINUX_HASH_H
3   -/* Fast hashing routine for a long.
  3 +/* Fast hashing routine for ints, longs and pointers.
4 4 (C) 2002 William Lee Irwin III, IBM */
5 5  
6 6 /*
7 7  
8 8  
9 9  
10 10  
11 11  
12 12  
13 13  
... ... @@ -13,23 +13,30 @@
13 13 * them can use shifts and additions instead of multiplications for
14 14 * machines where multiplications are slow.
15 15 */
16   -#if BITS_PER_LONG == 32
  16 +
  17 +#include <asm/types.h>
  18 +
17 19 /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
18   -#define GOLDEN_RATIO_PRIME 0x9e370001UL
19   -#elif BITS_PER_LONG == 64
  20 +#define GOLDEN_RATIO_PRIME_32 0x9e370001UL
20 21 /* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
21   -#define GOLDEN_RATIO_PRIME 0x9e37fffffffc0001UL
  22 +#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL
  23 +
  24 +#if BITS_PER_LONG == 32
  25 +#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_32
  26 +#define hash_long(val, bits) hash_32(val, bits)
  27 +#elif BITS_PER_LONG == 64
  28 +#define hash_long(val, bits) hash_64(val, bits)
  29 +#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_64
22 30 #else
23   -#error Define GOLDEN_RATIO_PRIME for your wordsize.
  31 +#error Wordsize not 32 or 64
24 32 #endif
25 33  
26   -static inline unsigned long hash_long(unsigned long val, unsigned int bits)
  34 +static inline u64 hash_64(u64 val, unsigned int bits)
27 35 {
28   - unsigned long hash = val;
  36 + u64 hash = val;
29 37  
30   -#if BITS_PER_LONG == 64
31 38 /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
32   - unsigned long n = hash;
  39 + u64 n = hash;
33 40 n <<= 18;
34 41 hash -= n;
35 42 n <<= 33;
36 43  
37 44  
38 45  
... ... @@ -42,15 +49,20 @@
42 49 hash += n;
43 50 n <<= 2;
44 51 hash += n;
45   -#else
  52 +
  53 + /* High bits are more random, so use them. */
  54 + return hash >> (64 - bits);
  55 +}
  56 +
  57 +static inline u32 hash_32(u32 val, unsigned int bits)
  58 +{
46 59 /* On some cpus multiply is faster, on others gcc will do shifts */
47   - hash *= GOLDEN_RATIO_PRIME;
48   -#endif
  60 + u32 hash = val * GOLDEN_RATIO_PRIME_32;
49 61  
50 62 /* High bits are more random, so use them. */
51   - return hash >> (BITS_PER_LONG - bits);
  63 + return hash >> (32 - bits);
52 64 }
53   -
  65 +
54 66 static inline unsigned long hash_ptr(void *ptr, unsigned int bits)
55 67 {
56 68 return hash_long((unsigned long)ptr, bits);