Commit f682cefa5ad204d3bfaa54a58046c66d2d035ac1

Authored by Changli Gao
Committed by David S. Miller
1 parent 6623e3b24a

netfilter: fix the race when initializing nf_ct_expect_hash_rnd

Since nf_ct_expect_dst_hash() may be called without nf_conntrack_lock
locked, nf_ct_expect_hash_rnd should be initialized in the atomic way.

In this patch, we use nf_conntrack_hash_rnd instead of
nf_ct_expect_hash_rnd.

Signed-off-by: Changli Gao <xiaosuo@gmail.com>
Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 3 changed files with 22 additions and 20 deletions Side-by-side Diff

include/net/netfilter/nf_conntrack.h
... ... @@ -298,6 +298,8 @@
298 298 extern int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
299 299 extern unsigned int nf_conntrack_htable_size;
300 300 extern unsigned int nf_conntrack_max;
  301 +extern unsigned int nf_conntrack_hash_rnd;
  302 +void init_nf_conntrack_hash_rnd(void);
301 303  
302 304 #define NF_CT_STAT_INC(net, count) \
303 305 __this_cpu_inc((net)->ct.stat->count)
net/netfilter/nf_conntrack_core.c
... ... @@ -65,7 +65,7 @@
65 65 DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
66 66 EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
67 67  
68   -static unsigned int nf_conntrack_hash_rnd __read_mostly;
  68 +unsigned int nf_conntrack_hash_rnd __read_mostly;
69 69  
70 70 static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
71 71 {
... ... @@ -596,6 +596,21 @@
596 596 return dropped;
597 597 }
598 598  
  599 +void init_nf_conntrack_hash_rnd(void)
  600 +{
  601 + unsigned int rand;
  602 +
  603 + /*
  604 + * Why not initialize nf_conntrack_rnd in a "init()" function ?
  605 + * Because there isn't enough entropy when system initializing,
  606 + * and we initialize it as late as possible.
  607 + */
  608 + do {
  609 + get_random_bytes(&rand, sizeof(rand));
  610 + } while (!rand);
  611 + cmpxchg(&nf_conntrack_hash_rnd, 0, rand);
  612 +}
  613 +
599 614 static struct nf_conn *
600 615 __nf_conntrack_alloc(struct net *net, u16 zone,
601 616 const struct nf_conntrack_tuple *orig,
... ... @@ -605,18 +620,7 @@
605 620 struct nf_conn *ct;
606 621  
607 622 if (unlikely(!nf_conntrack_hash_rnd)) {
608   - unsigned int rand;
609   -
610   - /*
611   - * Why not initialize nf_conntrack_rnd in a "init()" function ?
612   - * Because there isn't enough entropy when system initializing,
613   - * and we initialize it as late as possible.
614   - */
615   - do {
616   - get_random_bytes(&rand, sizeof(rand));
617   - } while (!rand);
618   - cmpxchg(&nf_conntrack_hash_rnd, 0, rand);
619   -
  623 + init_nf_conntrack_hash_rnd();
620 624 /* recompute the hash as nf_conntrack_hash_rnd is initialized */
621 625 hash = hash_conntrack_raw(orig, zone);
622 626 }
net/netfilter/nf_conntrack_expect.c
... ... @@ -32,9 +32,7 @@
32 32 unsigned int nf_ct_expect_hsize __read_mostly;
33 33 EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
34 34  
35   -static unsigned int nf_ct_expect_hash_rnd __read_mostly;
36 35 unsigned int nf_ct_expect_max __read_mostly;
37   -static int nf_ct_expect_hash_rnd_initted __read_mostly;
38 36  
39 37 static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
40 38  
41 39  
... ... @@ -77,15 +75,13 @@
77 75 {
78 76 unsigned int hash;
79 77  
80   - if (unlikely(!nf_ct_expect_hash_rnd_initted)) {
81   - get_random_bytes(&nf_ct_expect_hash_rnd,
82   - sizeof(nf_ct_expect_hash_rnd));
83   - nf_ct_expect_hash_rnd_initted = 1;
  78 + if (unlikely(!nf_conntrack_hash_rnd)) {
  79 + init_nf_conntrack_hash_rnd();
84 80 }
85 81  
86 82 hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
87 83 (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
88   - (__force __u16)tuple->dst.u.all) ^ nf_ct_expect_hash_rnd);
  84 + (__force __u16)tuple->dst.u.all) ^ nf_conntrack_hash_rnd);
89 85 return ((u64)hash * nf_ct_expect_hsize) >> 32;
90 86 }
91 87