Blame view
net/netfilter/nf_conntrack_core.c
62.5 KB
9fb9cbb10 [NETFILTER]: Add ... |
1 2 3 4 5 |
/* Connection state tracking for netfilter. This is separated from, but required by, the NAT layer; it can also be used by an iptables extension. */ /* (C) 1999-2001 Paul `Rusty' Russell |
dc808fe28 [NETFILTER] nf_co... |
6 |
* (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> |
9fb9cbb10 [NETFILTER]: Add ... |
7 |
* (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org> |
f229f6ce4 netfilter: add my... |
8 |
* (C) 2005-2012 Patrick McHardy <kaber@trash.net> |
9fb9cbb10 [NETFILTER]: Add ... |
9 10 11 12 |
* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. |
9fb9cbb10 [NETFILTER]: Add ... |
13 |
*/ |
ccd63c20f netfilter: nf_con... |
14 |
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
9fb9cbb10 [NETFILTER]: Add ... |
15 16 17 |
#include <linux/types.h> #include <linux/netfilter.h> #include <linux/module.h> |
d43c36dc6 headers: remove s... |
18 |
#include <linux/sched.h> |
9fb9cbb10 [NETFILTER]: Add ... |
19 20 21 22 23 24 25 26 27 28 29 30 31 32 |
#include <linux/skbuff.h> #include <linux/proc_fs.h> #include <linux/vmalloc.h> #include <linux/stddef.h> #include <linux/slab.h> #include <linux/random.h> #include <linux/jhash.h> #include <linux/err.h> #include <linux/percpu.h> #include <linux/moduleparam.h> #include <linux/notifier.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/socket.h> |
d7fe0f241 [PATCH] severing ... |
33 |
#include <linux/mm.h> |
d696c7bda netfilter: nf_con... |
34 |
#include <linux/nsproxy.h> |
ea781f197 netfilter: nf_con... |
35 |
#include <linux/rculist_nulls.h> |
9fb9cbb10 [NETFILTER]: Add ... |
36 |
|
9fb9cbb10 [NETFILTER]: Add ... |
37 |
#include <net/netfilter/nf_conntrack.h> |
605dcad6c [NETFILTER]: nf_c... |
38 |
#include <net/netfilter/nf_conntrack_l4proto.h> |
77ab9cff0 [NETFILTER]: nf_c... |
39 |
#include <net/netfilter/nf_conntrack_expect.h> |
9fb9cbb10 [NETFILTER]: Add ... |
40 |
#include <net/netfilter/nf_conntrack_helper.h> |
41d73ec05 netfilter: nf_con... |
41 |
#include <net/netfilter/nf_conntrack_seqadj.h> |
9fb9cbb10 [NETFILTER]: Add ... |
42 |
#include <net/netfilter/nf_conntrack_core.h> |
ecfab2c9f [NETFILTER]: nf_c... |
43 |
#include <net/netfilter/nf_conntrack_extend.h> |
584015727 netfilter: accoun... |
44 |
#include <net/netfilter/nf_conntrack_acct.h> |
a0891aa6a netfilter: conntr... |
45 |
#include <net/netfilter/nf_conntrack_ecache.h> |
5d0aa2ccd netfilter: nf_con... |
46 |
#include <net/netfilter/nf_conntrack_zones.h> |
a992ca2a0 netfilter: nf_con... |
47 |
#include <net/netfilter/nf_conntrack_timestamp.h> |
dd7050724 netfilter: nf_ct_... |
48 |
#include <net/netfilter/nf_conntrack_timeout.h> |
c539f0171 netfilter: add co... |
49 |
#include <net/netfilter/nf_conntrack_labels.h> |
48b1de4c1 netfilter: add SY... |
50 |
#include <net/netfilter/nf_conntrack_synproxy.h> |
e6a7d3c04 netfilter: ctnetl... |
51 |
#include <net/netfilter/nf_nat.h> |
e17b666a4 netfilter: nf_con... |
52 |
#include <net/netfilter/nf_nat_core.h> |
493763684 netfilter: nf_con... |
53 |
#include <net/netfilter/nf_nat_helper.h> |
1b8c8a9f6 netfilter: conntr... |
54 |
#include <net/netns/hash.h> |
6816d931c netfilter: conntr... |
55 |
#include <net/ip.h> |
9fb9cbb10 [NETFILTER]: Add ... |
56 |
|
e2a750070 netfilter: conntr... |
57 |
#include "nf_internals.h" |
93bb0ceb7 netfilter: conntr... |
58 59 |
__cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS]; EXPORT_SYMBOL_GPL(nf_conntrack_locks); |
9fb9cbb10 [NETFILTER]: Add ... |
60 |
|
ca7433df3 netfilter: conntr... |
61 62 |
__cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock); EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock); |
56d52d489 netfilter: conntr... |
63 64 |
struct hlist_nulls_head *nf_conntrack_hash __read_mostly; EXPORT_SYMBOL_GPL(nf_conntrack_hash); |
b87a2f919 netfilter: conntr... |
65 66 67 68 |
struct conntrack_gc_work { struct delayed_work dwork; u32 last_bucket; bool exiting; |
c6dd940b1 netfilter: allow ... |
69 |
bool early_drop; |
e0df8cae6 netfilter: conntr... |
70 |
long next_gc_run; |
b87a2f919 netfilter: conntr... |
71 |
}; |
0c5366b3a netfilter: conntr... |
72 |
static __read_mostly struct kmem_cache *nf_conntrack_cachep; |
b16c29191 netfilter: nf_con... |
73 |
static __read_mostly spinlock_t nf_conntrack_locks_all_lock; |
70d72b7e0 netfilter: conntr... |
74 |
static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock); |
b16c29191 netfilter: nf_con... |
75 |
static __read_mostly bool nf_conntrack_locks_all; |
e0df8cae6 netfilter: conntr... |
76 |
/* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */ |
e5072053b netfilter: conntr... |
77 78 79 80 81 |
#define GC_MAX_BUCKETS_DIV 128u /* upper bound of full table scan */ #define GC_MAX_SCAN_JIFFIES (16u * HZ) /* desired ratio of entries found to be expired */ #define GC_EVICT_RATIO 50u |
b87a2f919 netfilter: conntr... |
82 83 |
static struct conntrack_gc_work conntrack_gc_work; |
b16c29191 netfilter: nf_con... |
84 85 |
void nf_conntrack_lock(spinlock_t *lock) __acquires(lock) { |
3ef0c7a73 net/netfilter/nf_... |
86 |
/* 1) Acquire the lock */ |
b16c29191 netfilter: nf_con... |
87 |
spin_lock(lock); |
b316ff783 locking/spinlock,... |
88 |
|
3ef0c7a73 net/netfilter/nf_... |
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 |
/* 2) read nf_conntrack_locks_all, with ACQUIRE semantics * It pairs with the smp_store_release() in nf_conntrack_all_unlock() */ if (likely(smp_load_acquire(&nf_conntrack_locks_all) == false)) return; /* fast path failed, unlock */ spin_unlock(lock); /* Slow path 1) get global lock */ spin_lock(&nf_conntrack_locks_all_lock); /* Slow path 2) get the lock we want */ spin_lock(lock); /* Slow path 3) release the global lock */ spin_unlock(&nf_conntrack_locks_all_lock); |
b16c29191 netfilter: nf_con... |
106 107 |
} EXPORT_SYMBOL_GPL(nf_conntrack_lock); |
93bb0ceb7 netfilter: conntr... |
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 |
static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2) { h1 %= CONNTRACK_LOCKS; h2 %= CONNTRACK_LOCKS; spin_unlock(&nf_conntrack_locks[h1]); if (h1 != h2) spin_unlock(&nf_conntrack_locks[h2]); } /* return true if we need to recompute hashes (in case hash table was resized) */ static bool nf_conntrack_double_lock(struct net *net, unsigned int h1, unsigned int h2, unsigned int sequence) { h1 %= CONNTRACK_LOCKS; h2 %= CONNTRACK_LOCKS; if (h1 <= h2) { |
b16c29191 netfilter: nf_con... |
124 |
nf_conntrack_lock(&nf_conntrack_locks[h1]); |
93bb0ceb7 netfilter: conntr... |
125 126 127 128 |
if (h1 != h2) spin_lock_nested(&nf_conntrack_locks[h2], SINGLE_DEPTH_NESTING); } else { |
b16c29191 netfilter: nf_con... |
129 |
nf_conntrack_lock(&nf_conntrack_locks[h2]); |
93bb0ceb7 netfilter: conntr... |
130 131 132 |
spin_lock_nested(&nf_conntrack_locks[h1], SINGLE_DEPTH_NESTING); } |
a3efd8120 netfilter: conntr... |
133 |
if (read_seqcount_retry(&nf_conntrack_generation, sequence)) { |
93bb0ceb7 netfilter: conntr... |
134 135 136 137 138 139 140 141 142 |
nf_conntrack_double_unlock(h1, h2); return true; } return false; } static void nf_conntrack_all_lock(void) { int i; |
b16c29191 netfilter: nf_con... |
143 |
spin_lock(&nf_conntrack_locks_all_lock); |
b16c29191 netfilter: nf_con... |
144 |
|
3ef0c7a73 net/netfilter/nf_... |
145 |
nf_conntrack_locks_all = true; |
b316ff783 locking/spinlock,... |
146 |
|
b16c29191 netfilter: nf_con... |
147 |
for (i = 0; i < CONNTRACK_LOCKS; i++) { |
3ef0c7a73 net/netfilter/nf_... |
148 149 150 151 152 153 154 |
spin_lock(&nf_conntrack_locks[i]); /* This spin_unlock provides the "release" to ensure that * nf_conntrack_locks_all==true is visible to everyone that * acquired spin_lock(&nf_conntrack_locks[]). */ spin_unlock(&nf_conntrack_locks[i]); |
b16c29191 netfilter: nf_con... |
155 |
} |
93bb0ceb7 netfilter: conntr... |
156 157 158 159 |
} static void nf_conntrack_all_unlock(void) { |
3ef0c7a73 net/netfilter/nf_... |
160 |
/* All prior stores must be complete before we clear |
b316ff783 locking/spinlock,... |
161 162 |
* 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock() * might observe the false value but not the entire |
3ef0c7a73 net/netfilter/nf_... |
163 164 |
* critical section. * It pairs with the smp_load_acquire() in nf_conntrack_lock() |
b316ff783 locking/spinlock,... |
165 166 |
*/ smp_store_release(&nf_conntrack_locks_all, false); |
b16c29191 netfilter: nf_con... |
167 |
spin_unlock(&nf_conntrack_locks_all_lock); |
93bb0ceb7 netfilter: conntr... |
168 |
} |
e2b7606cd [NETFILTER]: More... |
169 |
unsigned int nf_conntrack_htable_size __read_mostly; |
2567c4eae netfilter: nf_con... |
170 |
EXPORT_SYMBOL_GPL(nf_conntrack_htable_size); |
e478075c6 netfilter: nf_con... |
171 |
unsigned int nf_conntrack_max __read_mostly; |
538c5672b netfilter: ctnetl... |
172 |
EXPORT_SYMBOL_GPL(nf_conntrack_max); |
92e47ba88 netfilter: conntr... |
173 |
seqcount_t nf_conntrack_generation __read_mostly; |
141658fb0 netfilter: conntr... |
174 |
static unsigned int nf_conntrack_hash_rnd __read_mostly; |
9fb9cbb10 [NETFILTER]: Add ... |
175 |
|
1b8c8a9f6 netfilter: conntr... |
176 177 |
static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, const struct net *net) |
9fb9cbb10 [NETFILTER]: Add ... |
178 |
{ |
0794935e2 [NETFILTER]: nf_c... |
179 |
unsigned int n; |
1b8c8a9f6 netfilter: conntr... |
180 |
u32 seed; |
0794935e2 [NETFILTER]: nf_c... |
181 |
|
141658fb0 netfilter: conntr... |
182 |
get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd)); |
0794935e2 [NETFILTER]: nf_c... |
183 184 185 186 |
/* The direction must be ignored, so we hash everything up to the * destination ports (which is a multiple of 4) and treat the last * three bytes manually. */ |
1b8c8a9f6 netfilter: conntr... |
187 |
seed = nf_conntrack_hash_rnd ^ net_hash_mix(net); |
0794935e2 [NETFILTER]: nf_c... |
188 |
n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32); |
1b8c8a9f6 netfilter: conntr... |
189 |
return jhash2((u32 *)tuple, n, seed ^ |
99f07e91b netfilter: save t... |
190 191 192 |
(((__force __u16)tuple->dst.u.all << 16) | tuple->dst.protonum)); } |
56d52d489 netfilter: conntr... |
193 |
static u32 scale_hash(u32 hash) |
99f07e91b netfilter: save t... |
194 |
{ |
56d52d489 netfilter: conntr... |
195 |
return reciprocal_scale(hash, nf_conntrack_htable_size); |
99f07e91b netfilter: save t... |
196 |
} |
0794935e2 [NETFILTER]: nf_c... |
197 |
|
1b8c8a9f6 netfilter: conntr... |
198 199 200 |
static u32 __hash_conntrack(const struct net *net, const struct nf_conntrack_tuple *tuple, unsigned int size) |
99f07e91b netfilter: save t... |
201 |
{ |
1b8c8a9f6 netfilter: conntr... |
202 |
return reciprocal_scale(hash_conntrack_raw(tuple, net), size); |
9fb9cbb10 [NETFILTER]: Add ... |
203 |
} |
1b8c8a9f6 netfilter: conntr... |
204 205 |
static u32 hash_conntrack(const struct net *net, const struct nf_conntrack_tuple *tuple) |
9fb9cbb10 [NETFILTER]: Add ... |
206 |
{ |
56d52d489 netfilter: conntr... |
207 |
return scale_hash(hash_conntrack_raw(tuple, net)); |
9fb9cbb10 [NETFILTER]: Add ... |
208 |
} |
60e3be94e openvswitch: use ... |
209 |
static bool |
9fb9cbb10 [NETFILTER]: Add ... |
210 211 212 213 214 |
nf_ct_get_tuple(const struct sk_buff *skb, unsigned int nhoff, unsigned int dataoff, u_int16_t l3num, u_int8_t protonum, |
a31f1adc0 netfilter: nf_con... |
215 |
struct net *net, |
9fb9cbb10 [NETFILTER]: Add ... |
216 |
struct nf_conntrack_tuple *tuple, |
605dcad6c [NETFILTER]: nf_c... |
217 |
const struct nf_conntrack_l4proto *l4proto) |
9fb9cbb10 [NETFILTER]: Add ... |
218 |
{ |
47a91b14d netfilter: conntr... |
219 220 221 |
unsigned int size; const __be32 *ap; __be32 _addrs[8]; |
97e08caec netfilter: conntr... |
222 223 224 225 |
struct { __be16 sport; __be16 dport; } _inet_hdr, *inet_hdr; |
47a91b14d netfilter: conntr... |
226 |
|
443a70d50 netfilter: nf_con... |
227 |
memset(tuple, 0, sizeof(*tuple)); |
9fb9cbb10 [NETFILTER]: Add ... |
228 229 |
tuple->src.l3num = l3num; |
47a91b14d netfilter: conntr... |
230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 |
switch (l3num) { case NFPROTO_IPV4: nhoff += offsetof(struct iphdr, saddr); size = 2 * sizeof(__be32); break; case NFPROTO_IPV6: nhoff += offsetof(struct ipv6hdr, saddr); size = sizeof(_addrs); break; default: return true; } ap = skb_header_pointer(skb, nhoff, size, _addrs); if (!ap) |
5f2b4c900 [NETFILTER]: nf_c... |
245 |
return false; |
9fb9cbb10 [NETFILTER]: Add ... |
246 |
|
47a91b14d netfilter: conntr... |
247 248 249 250 251 252 253 254 255 256 |
switch (l3num) { case NFPROTO_IPV4: tuple->src.u3.ip = ap[0]; tuple->dst.u3.ip = ap[1]; break; case NFPROTO_IPV6: memcpy(tuple->src.u3.ip6, ap, sizeof(tuple->src.u3.ip6)); memcpy(tuple->dst.u3.ip6, ap + 4, sizeof(tuple->dst.u3.ip6)); break; } |
9fb9cbb10 [NETFILTER]: Add ... |
257 258 |
tuple->dst.protonum = protonum; tuple->dst.dir = IP_CT_DIR_ORIGINAL; |
97e08caec netfilter: conntr... |
259 260 261 262 263 264 265 266 267 268 269 |
if (unlikely(l4proto->pkt_to_tuple)) return l4proto->pkt_to_tuple(skb, dataoff, net, tuple); /* Actually only need first 4 bytes to get ports. */ inet_hdr = skb_header_pointer(skb, dataoff, sizeof(_inet_hdr), &_inet_hdr); if (!inet_hdr) return false; tuple->src.u.udp.port = inet_hdr->sport; tuple->dst.u.udp.port = inet_hdr->dport; return true; |
9fb9cbb10 [NETFILTER]: Add ... |
270 |
} |
6816d931c netfilter: conntr... |
271 272 273 274 |
static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, u_int8_t *protonum) { int dataoff = -1; |
6816d931c netfilter: conntr... |
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 |
const struct iphdr *iph; struct iphdr _iph; iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); if (!iph) return -1; /* Conntrack defragments packets, we might still see fragments * inside ICMP packets though. */ if (iph->frag_off & htons(IP_OFFSET)) return -1; dataoff = nhoff + (iph->ihl << 2); *protonum = iph->protocol; /* Check bogus IP headers */ if (dataoff > skb->len) { pr_debug("bogus IPv4 packet: nhoff %u, ihl %u, skblen %u ", nhoff, iph->ihl << 2, skb->len); return -1; } |
6816d931c netfilter: conntr... |
298 299 |
return dataoff; } |
a0ae2562c netfilter: conntr... |
300 |
#if IS_ENABLED(CONFIG_IPV6) |
6816d931c netfilter: conntr... |
301 302 303 304 |
static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, u8 *protonum) { int protoff = -1; |
6816d931c netfilter: conntr... |
305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 |
unsigned int extoff = nhoff + sizeof(struct ipv6hdr); __be16 frag_off; u8 nexthdr; if (skb_copy_bits(skb, nhoff + offsetof(struct ipv6hdr, nexthdr), &nexthdr, sizeof(nexthdr)) != 0) { pr_debug("can't get nexthdr "); return -1; } protoff = ipv6_skip_exthdr(skb, extoff, &nexthdr, &frag_off); /* * (protoff == skb->len) means the packet has not data, just * IPv6 and possibly extensions headers, but it is tracked anyway */ if (protoff < 0 || (frag_off & htons(~0x7)) != 0) { pr_debug("can't find proto in pkt "); return -1; } *protonum = nexthdr; |
6816d931c netfilter: conntr... |
327 328 |
return protoff; } |
a0ae2562c netfilter: conntr... |
329 |
#endif |
6816d931c netfilter: conntr... |
330 331 332 333 334 335 336 |
static int get_l4proto(const struct sk_buff *skb, unsigned int nhoff, u8 pf, u8 *l4num) { switch (pf) { case NFPROTO_IPV4: return ipv4_get_l4proto(skb, nhoff, l4num); |
a0ae2562c netfilter: conntr... |
337 |
#if IS_ENABLED(CONFIG_IPV6) |
6816d931c netfilter: conntr... |
338 339 |
case NFPROTO_IPV6: return ipv6_get_l4proto(skb, nhoff, l4num); |
a0ae2562c netfilter: conntr... |
340 |
#endif |
6816d931c netfilter: conntr... |
341 342 343 344 345 |
default: *l4num = 0; break; } return -1; |
9fb9cbb10 [NETFILTER]: Add ... |
346 |
} |
5f2b4c900 [NETFILTER]: nf_c... |
347 |
bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff, |
a31f1adc0 netfilter: nf_con... |
348 349 |
u_int16_t l3num, struct net *net, struct nf_conntrack_tuple *tuple) |
e2a3123fb [NETFILTER]: nf_c... |
350 |
{ |
b3480fe05 netfilter: conntr... |
351 |
const struct nf_conntrack_l4proto *l4proto; |
6816d931c netfilter: conntr... |
352 353 |
u8 protonum; int protoff; |
e2a3123fb [NETFILTER]: nf_c... |
354 355 356 |
int ret; rcu_read_lock(); |
6816d931c netfilter: conntr... |
357 358 |
protoff = get_l4proto(skb, nhoff, l3num, &protonum); if (protoff <= 0) { |
e2a3123fb [NETFILTER]: nf_c... |
359 |
rcu_read_unlock(); |
5f2b4c900 [NETFILTER]: nf_c... |
360 |
return false; |
e2a3123fb [NETFILTER]: nf_c... |
361 362 363 |
} l4proto = __nf_ct_l4proto_find(l3num, protonum); |
a31f1adc0 netfilter: nf_con... |
364 |
ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple, |
47a91b14d netfilter: conntr... |
365 |
l4proto); |
e2a3123fb [NETFILTER]: nf_c... |
366 367 368 369 370 |
rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr); |
5f2b4c900 [NETFILTER]: nf_c... |
371 |
bool |
9fb9cbb10 [NETFILTER]: Add ... |
372 373 |
nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, const struct nf_conntrack_tuple *orig, |
605dcad6c [NETFILTER]: nf_c... |
374 |
const struct nf_conntrack_l4proto *l4proto) |
9fb9cbb10 [NETFILTER]: Add ... |
375 |
{ |
443a70d50 netfilter: nf_con... |
376 |
memset(inverse, 0, sizeof(*inverse)); |
9fb9cbb10 [NETFILTER]: Add ... |
377 378 |
inverse->src.l3num = orig->src.l3num; |
d1b6fe949 netfilter: conntr... |
379 380 381 382 383 384 385 386 387 388 389 390 391 |
switch (orig->src.l3num) { case NFPROTO_IPV4: inverse->src.u3.ip = orig->dst.u3.ip; inverse->dst.u3.ip = orig->src.u3.ip; break; case NFPROTO_IPV6: inverse->src.u3.in6 = orig->dst.u3.in6; inverse->dst.u3.in6 = orig->src.u3.in6; break; default: break; } |
9fb9cbb10 [NETFILTER]: Add ... |
392 393 394 395 |
inverse->dst.dir = !orig->dst.dir; inverse->dst.protonum = orig->dst.protonum; |
8b3892ea8 netfilter: conntr... |
396 397 398 399 400 401 402 |
if (unlikely(l4proto->invert_tuple)) return l4proto->invert_tuple(inverse, orig); inverse->src.u.all = orig->dst.u.all; inverse->dst.u.all = orig->src.u.all; return true; |
9fb9cbb10 [NETFILTER]: Add ... |
403 |
} |
13b183391 [NETFILTER]: nf_c... |
404 |
EXPORT_SYMBOL_GPL(nf_ct_invert_tuple); |
9fb9cbb10 [NETFILTER]: Add ... |
405 |
|
9fb9cbb10 [NETFILTER]: Add ... |
406 407 408 |
static void clean_from_lists(struct nf_conn *ct) { |
0d53778e8 [NETFILTER]: Conv... |
409 410 |
pr_debug("clean_from_lists(%p) ", ct); |
ea781f197 netfilter: nf_con... |
411 412 |
hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode); |
9fb9cbb10 [NETFILTER]: Add ... |
413 414 |
/* Destroy all pending expectations */ |
c1d10adb4 [NETFILTER]: Add ... |
415 |
nf_ct_remove_expectations(ct); |
9fb9cbb10 [NETFILTER]: Add ... |
416 |
} |
b7779d06f netfilter: conntr... |
417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 |
/* must be called with local_bh_disable */ static void nf_ct_add_to_dying_list(struct nf_conn *ct) { struct ct_pcpu *pcpu; /* add this conntrack to the (per cpu) dying list */ ct->cpu = smp_processor_id(); pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); spin_lock(&pcpu->lock); hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, &pcpu->dying); spin_unlock(&pcpu->lock); } /* must be called with local_bh_disable */ static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct) { struct ct_pcpu *pcpu; /* add this conntrack to the (per cpu) unconfirmed list */ ct->cpu = smp_processor_id(); pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); spin_lock(&pcpu->lock); hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, &pcpu->unconfirmed); spin_unlock(&pcpu->lock); } /* must be called with local_bh_disable */ static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct) { struct ct_pcpu *pcpu; /* We overload first tuple to link into unconfirmed or dying list.*/ pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); spin_lock(&pcpu->lock); BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode)); hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); spin_unlock(&pcpu->lock); } |
303223092 netfilter: guaran... |
460 |
#define NFCT_ALIGN(len) (((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK) |
0838aa7fc netfilter: fix ne... |
461 |
/* Released via destroy_conntrack() */ |
308ac9143 netfilter: nf_con... |
462 463 464 |
struct nf_conn *nf_ct_tmpl_alloc(struct net *net, const struct nf_conntrack_zone *zone, gfp_t flags) |
0838aa7fc netfilter: fix ne... |
465 |
{ |
303223092 netfilter: guaran... |
466 |
struct nf_conn *tmpl, *p; |
0838aa7fc netfilter: fix ne... |
467 |
|
303223092 netfilter: guaran... |
468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 |
if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK) { tmpl = kzalloc(sizeof(*tmpl) + NFCT_INFOMASK, flags); if (!tmpl) return NULL; p = tmpl; tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p); if (tmpl != p) { tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p); tmpl->proto.tmpl_padto = (char *)tmpl - (char *)p; } } else { tmpl = kzalloc(sizeof(*tmpl), flags); if (!tmpl) return NULL; } |
0838aa7fc netfilter: fix ne... |
484 485 486 |
tmpl->status = IPS_TEMPLATE; write_pnet(&tmpl->ct_net, net); |
6c8dee984 netfilter: move z... |
487 |
nf_ct_zone_add(tmpl, zone); |
0838aa7fc netfilter: fix ne... |
488 489 490 |
atomic_set(&tmpl->ct_general.use, 0); return tmpl; |
0838aa7fc netfilter: fix ne... |
491 492 |
} EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc); |
9cf94eab8 netfilter: conntr... |
493 |
void nf_ct_tmpl_free(struct nf_conn *tmpl) |
0838aa7fc netfilter: fix ne... |
494 495 496 |
{ nf_ct_ext_destroy(tmpl); nf_ct_ext_free(tmpl); |
303223092 netfilter: guaran... |
497 498 499 500 501 |
if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK) kfree((char *)tmpl - tmpl->proto.tmpl_padto); else kfree(tmpl); |
0838aa7fc netfilter: fix ne... |
502 |
} |
9cf94eab8 netfilter: conntr... |
503 |
EXPORT_SYMBOL_GPL(nf_ct_tmpl_free); |
0838aa7fc netfilter: fix ne... |
504 |
|
9fb9cbb10 [NETFILTER]: Add ... |
505 506 507 508 |
static void destroy_conntrack(struct nf_conntrack *nfct) { struct nf_conn *ct = (struct nf_conn *)nfct; |
b3480fe05 netfilter: conntr... |
509 |
const struct nf_conntrack_l4proto *l4proto; |
9fb9cbb10 [NETFILTER]: Add ... |
510 |
|
0d53778e8 [NETFILTER]: Conv... |
511 512 |
pr_debug("destroy_conntrack(%p) ", ct); |
44d6e2f27 net: Replace NF_C... |
513 |
WARN_ON(atomic_read(&nfct->use) != 0); |
9fb9cbb10 [NETFILTER]: Add ... |
514 |
|
0838aa7fc netfilter: fix ne... |
515 516 517 518 |
if (unlikely(nf_ct_is_template(ct))) { nf_ct_tmpl_free(ct); return; } |
5e8fbe2ac [NETFILTER]: nf_c... |
519 |
l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); |
4b4ceb9db netfilter: conntr... |
520 |
if (l4proto->destroy) |
605dcad6c [NETFILTER]: nf_c... |
521 |
l4proto->destroy(ct); |
9fb9cbb10 [NETFILTER]: Add ... |
522 |
|
ca7433df3 netfilter: conntr... |
523 |
local_bh_disable(); |
9fb9cbb10 [NETFILTER]: Add ... |
524 525 526 |
/* Expectations will have been removed in clean_from_lists, * except TFTP can create an expectation on the first packet, * before connection is in the list, so we need to clean here, |
ca7433df3 netfilter: conntr... |
527 528 |
* too. */ |
c1d10adb4 [NETFILTER]: Add ... |
529 |
nf_ct_remove_expectations(ct); |
9fb9cbb10 [NETFILTER]: Add ... |
530 |
|
b7779d06f netfilter: conntr... |
531 |
nf_ct_del_from_dying_or_unconfirmed_list(ct); |
9fb9cbb10 [NETFILTER]: Add ... |
532 |
|
ca7433df3 netfilter: conntr... |
533 |
local_bh_enable(); |
9fb9cbb10 [NETFILTER]: Add ... |
534 535 536 |
if (ct->master) nf_ct_put(ct->master); |
0d53778e8 [NETFILTER]: Conv... |
537 538 |
pr_debug("destroy_conntrack: returning ct=%p to slab ", ct); |
9fb9cbb10 [NETFILTER]: Add ... |
539 540 |
nf_conntrack_free(ct); } |
02982c27b netfilter: nf_con... |
541 |
static void nf_ct_delete_from_lists(struct nf_conn *ct) |
9fb9cbb10 [NETFILTER]: Add ... |
542 |
{ |
0d55af879 netfilter: netns ... |
543 |
struct net *net = nf_ct_net(ct); |
93bb0ceb7 netfilter: conntr... |
544 |
unsigned int hash, reply_hash; |
93bb0ceb7 netfilter: conntr... |
545 |
unsigned int sequence; |
9fb9cbb10 [NETFILTER]: Add ... |
546 |
|
9858a3ae1 netfilter: conntr... |
547 |
nf_ct_helper_destroy(ct); |
93bb0ceb7 netfilter: conntr... |
548 549 550 |
local_bh_disable(); do { |
a3efd8120 netfilter: conntr... |
551 |
sequence = read_seqcount_begin(&nf_conntrack_generation); |
deedb5903 netfilter: nf_con... |
552 |
hash = hash_conntrack(net, |
93bb0ceb7 netfilter: conntr... |
553 |
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); |
deedb5903 netfilter: nf_con... |
554 |
reply_hash = hash_conntrack(net, |
93bb0ceb7 netfilter: conntr... |
555 556 |
&ct->tuplehash[IP_CT_DIR_REPLY].tuple); } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence)); |
9fb9cbb10 [NETFILTER]: Add ... |
557 |
clean_from_lists(ct); |
93bb0ceb7 netfilter: conntr... |
558 |
nf_conntrack_double_unlock(hash, reply_hash); |
b7779d06f netfilter: conntr... |
559 |
nf_ct_add_to_dying_list(ct); |
93bb0ceb7 netfilter: conntr... |
560 |
|
93bb0ceb7 netfilter: conntr... |
561 |
local_bh_enable(); |
dd7669a92 netfilter: conntr... |
562 |
} |
dd7669a92 netfilter: conntr... |
563 |
|
02982c27b netfilter: nf_con... |
564 |
bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report) |
dd7669a92 netfilter: conntr... |
565 |
{ |
a992ca2a0 netfilter: nf_con... |
566 |
struct nf_conn_tstamp *tstamp; |
f330a7fdb netfilter: conntr... |
567 568 |
if (test_and_set_bit(IPS_DYING_BIT, &ct->status)) return false; |
a992ca2a0 netfilter: nf_con... |
569 570 |
tstamp = nf_conn_tstamp_find(ct); if (tstamp && tstamp->stop == 0) |
d2de875c6 net: use ktime_ge... |
571 |
tstamp->stop = ktime_get_real_ns(); |
dd7669a92 netfilter: conntr... |
572 |
|
9500507c6 netfilter: conntr... |
573 574 |
if (nf_conntrack_event_report(IPCT_DESTROY, ct, portid, report) < 0) { |
f330a7fdb netfilter: conntr... |
575 576 577 |
/* destroy event was not delivered. nf_ct_put will * be done by event cache worker on redelivery. */ |
dd7669a92 netfilter: conntr... |
578 |
nf_ct_delete_from_lists(ct); |
9500507c6 netfilter: conntr... |
579 |
nf_conntrack_ecache_delayed_work(nf_ct_net(ct)); |
02982c27b netfilter: nf_con... |
580 |
return false; |
dd7669a92 netfilter: conntr... |
581 |
} |
9500507c6 netfilter: conntr... |
582 583 |
nf_conntrack_ecache_work(nf_ct_net(ct)); |
dd7669a92 netfilter: conntr... |
584 |
nf_ct_delete_from_lists(ct); |
9fb9cbb10 [NETFILTER]: Add ... |
585 |
nf_ct_put(ct); |
02982c27b netfilter: nf_con... |
586 587 588 |
return true; } EXPORT_SYMBOL_GPL(nf_ct_delete); |
c6825c097 netfilter: nf_con... |
589 590 |
static inline bool nf_ct_key_equal(struct nf_conntrack_tuple_hash *h, |
308ac9143 netfilter: nf_con... |
591 |
const struct nf_conntrack_tuple *tuple, |
e0c7d4722 netfilter: conntr... |
592 593 |
const struct nf_conntrack_zone *zone, const struct net *net) |
c6825c097 netfilter: nf_con... |
594 595 596 597 598 599 600 |
{ struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); /* A conntrack can be recreated with the equal tuple, * so we need to check that the conntrack is confirmed */ return nf_ct_tuple_equal(tuple, &h->tuple) && |
deedb5903 netfilter: nf_con... |
601 |
nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) && |
e0c7d4722 netfilter: conntr... |
602 603 |
nf_ct_is_confirmed(ct) && net_eq(net, nf_ct_net(ct)); |
c6825c097 netfilter: nf_con... |
604 |
} |
ed07d9a02 netfilter: nf_con... |
605 606 607 608 609 610 611 612 613 614 615 |
static inline bool nf_ct_match(const struct nf_conn *ct1, const struct nf_conn *ct2) { return nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_ORIGINAL].tuple, &ct2->tuplehash[IP_CT_DIR_ORIGINAL].tuple) && nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_REPLY].tuple, &ct2->tuplehash[IP_CT_DIR_REPLY].tuple) && nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_ORIGINAL) && nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_REPLY) && net_eq(nf_ct_net(ct1), nf_ct_net(ct2)); } |
f330a7fdb netfilter: conntr... |
616 617 618 619 620 621 622 623 624 625 626 |
/* caller must hold rcu readlock and none of the nf_conntrack_locks */ static void nf_ct_gc_expired(struct nf_conn *ct) { if (!atomic_inc_not_zero(&ct->ct_general.use)) return; if (nf_ct_should_gc(ct)) nf_ct_kill(ct); nf_ct_put(ct); } |
ea781f197 netfilter: nf_con... |
627 628 629 630 |
/* * Warning : * - Caller must take a reference on returned object * and recheck nf_ct_tuple_equal(tuple, &h->tuple) |
ea781f197 netfilter: nf_con... |
631 |
*/ |
99f07e91b netfilter: save t... |
632 |
static struct nf_conntrack_tuple_hash * |
308ac9143 netfilter: nf_con... |
633 |
____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone, |
99f07e91b netfilter: save t... |
634 |
const struct nf_conntrack_tuple *tuple, u32 hash) |
9fb9cbb10 [NETFILTER]: Add ... |
635 636 |
{ struct nf_conntrack_tuple_hash *h; |
5e3c61f98 netfilter: conntr... |
637 |
struct hlist_nulls_head *ct_hash; |
ea781f197 netfilter: nf_con... |
638 |
struct hlist_nulls_node *n; |
92e47ba88 netfilter: conntr... |
639 |
unsigned int bucket, hsize; |
9fb9cbb10 [NETFILTER]: Add ... |
640 |
|
ea781f197 netfilter: nf_con... |
641 |
begin: |
92e47ba88 netfilter: conntr... |
642 643 |
nf_conntrack_get_ht(&ct_hash, &hsize); bucket = reciprocal_scale(hash, hsize); |
5e3c61f98 netfilter: conntr... |
644 645 |
hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) { |
f330a7fdb netfilter: conntr... |
646 647 648 649 650 651 652 653 654 655 |
struct nf_conn *ct; ct = nf_ct_tuplehash_to_ctrack(h); if (nf_ct_is_expired(ct)) { nf_ct_gc_expired(ct); continue; } if (nf_ct_is_dying(ct)) continue; |
8e8118f89 netfilter: conntr... |
656 |
if (nf_ct_key_equal(h, tuple, zone, net)) |
9fb9cbb10 [NETFILTER]: Add ... |
657 |
return h; |
9fb9cbb10 [NETFILTER]: Add ... |
658 |
} |
ea781f197 netfilter: nf_con... |
659 660 661 662 663 |
/* * if the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. * We probably met an item that was moved to another chain. */ |
99f07e91b netfilter: save t... |
664 |
if (get_nulls_value(n) != bucket) { |
2cf123480 netfilter: conntr... |
665 |
NF_CT_STAT_INC_ATOMIC(net, search_restart); |
ea781f197 netfilter: nf_con... |
666 |
goto begin; |
af740b2c8 netfilter: nf_con... |
667 |
} |
9fb9cbb10 [NETFILTER]: Add ... |
668 669 670 |
return NULL; } |
99f07e91b netfilter: save t... |
671 |
|
9fb9cbb10 [NETFILTER]: Add ... |
672 |
/* Find a connection corresponding to a tuple. */ |
99f07e91b netfilter: save t... |
673 |
static struct nf_conntrack_tuple_hash * |
308ac9143 netfilter: nf_con... |
674 |
__nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone, |
99f07e91b netfilter: save t... |
675 |
const struct nf_conntrack_tuple *tuple, u32 hash) |
9fb9cbb10 [NETFILTER]: Add ... |
676 677 |
{ struct nf_conntrack_tuple_hash *h; |
76507f69c [NETFILTER]: nf_c... |
678 |
struct nf_conn *ct; |
9fb9cbb10 [NETFILTER]: Add ... |
679 |
|
76507f69c [NETFILTER]: nf_c... |
680 |
rcu_read_lock(); |
ea781f197 netfilter: nf_con... |
681 |
begin: |
99f07e91b netfilter: save t... |
682 |
h = ____nf_conntrack_find(net, zone, tuple, hash); |
76507f69c [NETFILTER]: nf_c... |
683 684 |
if (h) { ct = nf_ct_tuplehash_to_ctrack(h); |
8d8890b77 netfilter: nf_con... |
685 686 |
if (unlikely(nf_ct_is_dying(ct) || !atomic_inc_not_zero(&ct->ct_general.use))) |
76507f69c [NETFILTER]: nf_c... |
687 |
h = NULL; |
ea781f197 netfilter: nf_con... |
688 |
else { |
e0c7d4722 netfilter: conntr... |
689 |
if (unlikely(!nf_ct_key_equal(h, tuple, zone, net))) { |
ea781f197 netfilter: nf_con... |
690 691 692 693 |
nf_ct_put(ct); goto begin; } } |
76507f69c [NETFILTER]: nf_c... |
694 695 |
} rcu_read_unlock(); |
9fb9cbb10 [NETFILTER]: Add ... |
696 697 698 |
return h; } |
99f07e91b netfilter: save t... |
699 700 |
struct nf_conntrack_tuple_hash * |
308ac9143 netfilter: nf_con... |
701 |
nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone, |
99f07e91b netfilter: save t... |
702 703 704 |
const struct nf_conntrack_tuple *tuple) { return __nf_conntrack_find_get(net, zone, tuple, |
1b8c8a9f6 netfilter: conntr... |
705 |
hash_conntrack_raw(tuple, net)); |
99f07e91b netfilter: save t... |
706 |
} |
13b183391 [NETFILTER]: nf_c... |
707 |
EXPORT_SYMBOL_GPL(nf_conntrack_find_get); |
9fb9cbb10 [NETFILTER]: Add ... |
708 |
|
c1d10adb4 [NETFILTER]: Add ... |
709 710 |
static void __nf_conntrack_hash_insert(struct nf_conn *ct, unsigned int hash, |
b476b72a0 netfilter: trivia... |
711 |
unsigned int reply_hash) |
c1d10adb4 [NETFILTER]: Add ... |
712 |
{ |
ea781f197 netfilter: nf_con... |
713 |
hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, |
56d52d489 netfilter: conntr... |
714 |
&nf_conntrack_hash[hash]); |
ea781f197 netfilter: nf_con... |
715 |
hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode, |
56d52d489 netfilter: conntr... |
716 |
&nf_conntrack_hash[reply_hash]); |
c1d10adb4 [NETFILTER]: Add ... |
717 |
} |
7d367e066 netfilter: ctnetl... |
718 719 |
int nf_conntrack_hash_check_insert(struct nf_conn *ct) |
c1d10adb4 [NETFILTER]: Add ... |
720 |
{ |
308ac9143 netfilter: nf_con... |
721 |
const struct nf_conntrack_zone *zone; |
d696c7bda netfilter: nf_con... |
722 |
struct net *net = nf_ct_net(ct); |
b476b72a0 netfilter: trivia... |
723 |
unsigned int hash, reply_hash; |
7d367e066 netfilter: ctnetl... |
724 725 |
struct nf_conntrack_tuple_hash *h; struct hlist_nulls_node *n; |
93bb0ceb7 netfilter: conntr... |
726 |
unsigned int sequence; |
c1d10adb4 [NETFILTER]: Add ... |
727 |
|
5d0aa2ccd netfilter: nf_con... |
728 |
zone = nf_ct_zone(ct); |
7d367e066 netfilter: ctnetl... |
729 |
|
93bb0ceb7 netfilter: conntr... |
730 731 |
local_bh_disable(); do { |
a3efd8120 netfilter: conntr... |
732 |
sequence = read_seqcount_begin(&nf_conntrack_generation); |
deedb5903 netfilter: nf_con... |
733 |
hash = hash_conntrack(net, |
93bb0ceb7 netfilter: conntr... |
734 |
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); |
deedb5903 netfilter: nf_con... |
735 |
reply_hash = hash_conntrack(net, |
93bb0ceb7 netfilter: conntr... |
736 737 |
&ct->tuplehash[IP_CT_DIR_REPLY].tuple); } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence)); |
7d367e066 netfilter: ctnetl... |
738 739 |
/* See if there's one in the list already, including reverse */ |
56d52d489 netfilter: conntr... |
740 |
hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode) |
868043485 netfilter: conntr... |
741 |
if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, |
e0c7d4722 netfilter: conntr... |
742 |
zone, net)) |
7d367e066 netfilter: ctnetl... |
743 |
goto out; |
868043485 netfilter: conntr... |
744 |
|
56d52d489 netfilter: conntr... |
745 |
hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode) |
868043485 netfilter: conntr... |
746 |
if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, |
e0c7d4722 netfilter: conntr... |
747 |
zone, net)) |
7d367e066 netfilter: ctnetl... |
748 |
goto out; |
c1d10adb4 [NETFILTER]: Add ... |
749 |
|
e53376bef netfilter: nf_con... |
750 751 752 |
smp_wmb(); /* The caller holds a reference to this object */ atomic_set(&ct->ct_general.use, 2); |
b476b72a0 netfilter: trivia... |
753 |
__nf_conntrack_hash_insert(ct, hash, reply_hash); |
93bb0ceb7 netfilter: conntr... |
754 |
nf_conntrack_double_unlock(hash, reply_hash); |
7d367e066 netfilter: ctnetl... |
755 |
NF_CT_STAT_INC(net, insert); |
93bb0ceb7 netfilter: conntr... |
756 |
local_bh_enable(); |
7d367e066 netfilter: ctnetl... |
757 758 759 |
return 0; out: |
93bb0ceb7 netfilter: conntr... |
760 |
nf_conntrack_double_unlock(hash, reply_hash); |
7d367e066 netfilter: ctnetl... |
761 |
NF_CT_STAT_INC(net, insert_failed); |
93bb0ceb7 netfilter: conntr... |
762 |
local_bh_enable(); |
7d367e066 netfilter: ctnetl... |
763 |
return -EEXIST; |
c1d10adb4 [NETFILTER]: Add ... |
764 |
} |
7d367e066 netfilter: ctnetl... |
765 |
EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert); |
c1d10adb4 [NETFILTER]: Add ... |
766 |
|
ba76738c0 netfilter: conntr... |
767 768 769 770 771 772 773 774 775 776 777 778 779 780 |
static inline void nf_ct_acct_update(struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int len) { struct nf_conn_acct *acct; acct = nf_conn_acct_find(ct); if (acct) { struct nf_conn_counter *counter = acct->counter; atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets); atomic64_add(len, &counter[CTINFO2DIR(ctinfo)].bytes); } } |
71d8c47fc netfilter: conntr... |
781 782 783 784 785 786 787 788 |
static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo, const struct nf_conn *loser_ct) { struct nf_conn_acct *acct; acct = nf_conn_acct_find(loser_ct); if (acct) { struct nf_conn_counter *counter = acct->counter; |
71d8c47fc netfilter: conntr... |
789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 |
unsigned int bytes; /* u32 should be fine since we must have seen one packet. */ bytes = atomic64_read(&counter[CTINFO2DIR(ctinfo)].bytes); nf_ct_acct_update(ct, ctinfo, bytes); } } /* Resolve race on insertion if this protocol allows this. */ static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb, enum ip_conntrack_info ctinfo, struct nf_conntrack_tuple_hash *h) { /* This is the conntrack entry already in hashes that won race. */ struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); |
b3480fe05 netfilter: conntr... |
804 |
const struct nf_conntrack_l4proto *l4proto; |
ed07d9a02 netfilter: nf_con... |
805 806 |
enum ip_conntrack_info oldinfo; struct nf_conn *loser_ct = nf_ct_get(skb, &oldinfo); |
71d8c47fc netfilter: conntr... |
807 808 809 810 811 |
l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); if (l4proto->allow_clash && !nf_ct_is_dying(ct) && atomic_inc_not_zero(&ct->ct_general.use)) { |
ed07d9a02 netfilter: nf_con... |
812 813 814 815 816 817 818 819 |
if (((ct->status & IPS_NAT_DONE_MASK) == 0) || nf_ct_match(ct, loser_ct)) { nf_ct_acct_merge(ct, ctinfo, loser_ct); nf_conntrack_put(&loser_ct->ct_general); nf_ct_set(skb, ct, oldinfo); return NF_ACCEPT; } nf_ct_put(ct); |
71d8c47fc netfilter: conntr... |
820 821 822 823 |
} NF_CT_STAT_INC(net, drop); return NF_DROP; } |
9fb9cbb10 [NETFILTER]: Add ... |
824 825 |
/* Confirm a connection given skb; places it in hash table */ int |
3db05fea5 [NETFILTER]: Repl... |
826 |
__nf_conntrack_confirm(struct sk_buff *skb) |
9fb9cbb10 [NETFILTER]: Add ... |
827 |
{ |
308ac9143 netfilter: nf_con... |
828 |
const struct nf_conntrack_zone *zone; |
b476b72a0 netfilter: trivia... |
829 |
unsigned int hash, reply_hash; |
df0933dcb [NETFILTER]: kill... |
830 |
struct nf_conntrack_tuple_hash *h; |
9fb9cbb10 [NETFILTER]: Add ... |
831 |
struct nf_conn *ct; |
df0933dcb [NETFILTER]: kill... |
832 |
struct nf_conn_help *help; |
a992ca2a0 netfilter: nf_con... |
833 |
struct nf_conn_tstamp *tstamp; |
ea781f197 netfilter: nf_con... |
834 |
struct hlist_nulls_node *n; |
9fb9cbb10 [NETFILTER]: Add ... |
835 |
enum ip_conntrack_info ctinfo; |
400dad39d netfilter: netns ... |
836 |
struct net *net; |
93bb0ceb7 netfilter: conntr... |
837 |
unsigned int sequence; |
71d8c47fc netfilter: conntr... |
838 |
int ret = NF_DROP; |
9fb9cbb10 [NETFILTER]: Add ... |
839 |
|
3db05fea5 [NETFILTER]: Repl... |
840 |
ct = nf_ct_get(skb, &ctinfo); |
400dad39d netfilter: netns ... |
841 |
net = nf_ct_net(ct); |
9fb9cbb10 [NETFILTER]: Add ... |
842 843 844 845 846 847 848 |
/* ipt_REJECT uses nf_conntrack_attach to attach related ICMP/TCP RST packets in other direction. Actual packet which created connection will be IP_CT_NEW or for an expected connection, IP_CT_RELATED. */ if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) return NF_ACCEPT; |
5d0aa2ccd netfilter: nf_con... |
849 |
zone = nf_ct_zone(ct); |
93bb0ceb7 netfilter: conntr... |
850 851 852 |
local_bh_disable(); do { |
a3efd8120 netfilter: conntr... |
853 |
sequence = read_seqcount_begin(&nf_conntrack_generation); |
93bb0ceb7 netfilter: conntr... |
854 855 |
/* reuse the hash saved before */ hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev; |
56d52d489 netfilter: conntr... |
856 |
hash = scale_hash(hash); |
deedb5903 netfilter: nf_con... |
857 |
reply_hash = hash_conntrack(net, |
93bb0ceb7 netfilter: conntr... |
858 859 860 |
&ct->tuplehash[IP_CT_DIR_REPLY].tuple); } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence)); |
9fb9cbb10 [NETFILTER]: Add ... |
861 862 |
/* We're not in hash table, and we refuse to set up related |
93bb0ceb7 netfilter: conntr... |
863 864 865 |
* connections for unconfirmed conns. But packet copies and * REJECT will give spurious warnings here. */ |
9fb9cbb10 [NETFILTER]: Add ... |
866 |
|
3a1ce9793 netfilter: conntr... |
867 868 869 870 |
/* Another skb with the same unconfirmed conntrack may * win the race. This may happen for bridge(br_flood) * or broadcast/multicast packets do skb_clone with * unconfirmed conntrack. |
93bb0ceb7 netfilter: conntr... |
871 |
*/ |
3a1ce9793 netfilter: conntr... |
872 873 874 875 876 877 |
if (unlikely(nf_ct_is_confirmed(ct))) { WARN_ON_ONCE(1); nf_conntrack_double_unlock(hash, reply_hash); local_bh_enable(); return NF_DROP; } |
0d53778e8 [NETFILTER]: Conv... |
878 879 |
pr_debug("Confirming conntrack %p ", ct); |
8ca3f5e97 netfilter: conntr... |
880 881 882 883 884 885 |
/* We have to check the DYING flag after unlink to prevent * a race against nf_ct_get_next_corpse() possibly called from * user context, else we insert an already 'dead' hash, blocking * further use of that particular connection -JM. */ nf_ct_del_from_dying_or_unconfirmed_list(ct); |
71d8c47fc netfilter: conntr... |
886 887 888 889 |
if (unlikely(nf_ct_is_dying(ct))) { nf_ct_add_to_dying_list(ct); goto dying; } |
fc350777c netfilter: nf_con... |
890 |
|
9fb9cbb10 [NETFILTER]: Add ... |
891 892 893 |
/* See if there's one in the list already, including reverse: NAT could have grabbed it without realizing, since we're not in the hash. If there is, we lost race. */ |
56d52d489 netfilter: conntr... |
894 |
hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode) |
868043485 netfilter: conntr... |
895 |
if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, |
e0c7d4722 netfilter: conntr... |
896 |
zone, net)) |
df0933dcb [NETFILTER]: kill... |
897 |
goto out; |
868043485 netfilter: conntr... |
898 |
|
56d52d489 netfilter: conntr... |
899 |
hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode) |
868043485 netfilter: conntr... |
900 |
if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, |
e0c7d4722 netfilter: conntr... |
901 |
zone, net)) |
df0933dcb [NETFILTER]: kill... |
902 |
goto out; |
9fb9cbb10 [NETFILTER]: Add ... |
903 |
|
df0933dcb [NETFILTER]: kill... |
904 905 906 |
/* Timer relative to confirmation time, not original setting time, otherwise we'd get timer wrap in weird delay cases. */ |
f330a7fdb netfilter: conntr... |
907 |
ct->timeout += nfct_time_stamp; |
df0933dcb [NETFILTER]: kill... |
908 |
atomic_inc(&ct->ct_general.use); |
45eec3419 netfilter: nf_con... |
909 |
ct->status |= IPS_CONFIRMED; |
5c8ec910e netfilter: nf_con... |
910 |
|
a992ca2a0 netfilter: nf_con... |
911 912 913 |
/* set conntrack timestamp, if enabled. */ tstamp = nf_conn_tstamp_find(ct); if (tstamp) { |
2456e8553 ktime: Get rid of... |
914 |
if (skb->tstamp == 0) |
e3192690a net: Remove casts... |
915 |
__net_timestamp(skb); |
a992ca2a0 netfilter: nf_con... |
916 917 918 |
tstamp->start = ktime_to_ns(skb->tstamp); } |
5c8ec910e netfilter: nf_con... |
919 920 921 922 923 |
/* Since the lookup is lockless, hash insertion must be done after * starting the timer and setting the CONFIRMED bit. The RCU barriers * guarantee that no other CPU can find the conntrack before the above * stores are visible. */ |
b476b72a0 netfilter: trivia... |
924 |
__nf_conntrack_hash_insert(ct, hash, reply_hash); |
93bb0ceb7 netfilter: conntr... |
925 |
nf_conntrack_double_unlock(hash, reply_hash); |
93bb0ceb7 netfilter: conntr... |
926 |
local_bh_enable(); |
5c8ec910e netfilter: nf_con... |
927 |
|
df0933dcb [NETFILTER]: kill... |
928 929 |
help = nfct_help(ct); if (help && help->helper) |
a71996fcc netfilter: netns ... |
930 |
nf_conntrack_event_cache(IPCT_HELPER, ct); |
17e6e4eac netfilter: conntr... |
931 |
|
df0933dcb [NETFILTER]: kill... |
932 |
nf_conntrack_event_cache(master_ct(ct) ? |
a71996fcc netfilter: netns ... |
933 |
IPCT_RELATED : IPCT_NEW, ct); |
df0933dcb [NETFILTER]: kill... |
934 |
return NF_ACCEPT; |
9fb9cbb10 [NETFILTER]: Add ... |
935 |
|
df0933dcb [NETFILTER]: kill... |
936 |
out: |
8ca3f5e97 netfilter: conntr... |
937 |
nf_ct_add_to_dying_list(ct); |
71d8c47fc netfilter: conntr... |
938 939 |
ret = nf_ct_resolve_clash(net, skb, ctinfo, h); dying: |
93bb0ceb7 netfilter: conntr... |
940 |
nf_conntrack_double_unlock(hash, reply_hash); |
0d55af879 netfilter: netns ... |
941 |
NF_CT_STAT_INC(net, insert_failed); |
93bb0ceb7 netfilter: conntr... |
942 |
local_bh_enable(); |
71d8c47fc netfilter: conntr... |
943 |
return ret; |
9fb9cbb10 [NETFILTER]: Add ... |
944 |
} |
13b183391 [NETFILTER]: nf_c... |
945 |
EXPORT_SYMBOL_GPL(__nf_conntrack_confirm); |
9fb9cbb10 [NETFILTER]: Add ... |
946 947 948 949 950 951 952 |
/* Returns true if a connection correspondings to the tuple (required for NAT). */ int nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, const struct nf_conn *ignored_conntrack) { |
400dad39d netfilter: netns ... |
953 |
struct net *net = nf_ct_net(ignored_conntrack); |
308ac9143 netfilter: nf_con... |
954 |
const struct nf_conntrack_zone *zone; |
9fb9cbb10 [NETFILTER]: Add ... |
955 |
struct nf_conntrack_tuple_hash *h; |
5e3c61f98 netfilter: conntr... |
956 |
struct hlist_nulls_head *ct_hash; |
92e47ba88 netfilter: conntr... |
957 |
unsigned int hash, hsize; |
ea781f197 netfilter: nf_con... |
958 |
struct hlist_nulls_node *n; |
5d0aa2ccd netfilter: nf_con... |
959 |
struct nf_conn *ct; |
308ac9143 netfilter: nf_con... |
960 961 |
zone = nf_ct_zone(ignored_conntrack); |
9fb9cbb10 [NETFILTER]: Add ... |
962 |
|
2cf123480 netfilter: conntr... |
963 |
rcu_read_lock(); |
95a8d19f2 netfilter: restar... |
964 |
begin: |
92e47ba88 netfilter: conntr... |
965 966 |
nf_conntrack_get_ht(&ct_hash, &hsize); hash = __hash_conntrack(net, tuple, hsize); |
5e3c61f98 netfilter: conntr... |
967 968 |
hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) { |
5d0aa2ccd netfilter: nf_con... |
969 |
ct = nf_ct_tuplehash_to_ctrack(h); |
f330a7fdb netfilter: conntr... |
970 971 972 973 974 975 976 977 978 979 |
if (ct == ignored_conntrack) continue; if (nf_ct_is_expired(ct)) { nf_ct_gc_expired(ct); continue; } if (nf_ct_key_equal(h, tuple, zone, net)) { |
5058447bf netfilter: nf_nat... |
980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 |
/* Tuple is taken already, so caller will need to find * a new source port to use. * * Only exception: * If the *original tuples* are identical, then both * conntracks refer to the same flow. * This is a rare situation, it can occur e.g. when * more than one UDP packet is sent from same socket * in different threads. * * Let nf_ct_resolve_clash() deal with this later. */ if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)) continue; |
2cf123480 netfilter: conntr... |
995 996 |
NF_CT_STAT_INC_ATOMIC(net, found); rcu_read_unlock(); |
ba419aff2 [NETFILTER]: nf_c... |
997 998 |
return 1; } |
ba419aff2 [NETFILTER]: nf_c... |
999 |
} |
95a8d19f2 netfilter: restar... |
1000 1001 1002 1003 1004 |
if (get_nulls_value(n) != hash) { NF_CT_STAT_INC_ATOMIC(net, search_restart); goto begin; } |
2cf123480 netfilter: conntr... |
1005 |
rcu_read_unlock(); |
9fb9cbb10 [NETFILTER]: Add ... |
1006 |
|
ba419aff2 [NETFILTER]: nf_c... |
1007 |
return 0; |
9fb9cbb10 [NETFILTER]: Add ... |
1008 |
} |
13b183391 [NETFILTER]: nf_c... |
1009 |
EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken); |
9fb9cbb10 [NETFILTER]: Add ... |
1010 |
|
7ae7730fd [NETFILTER]: nf_c... |
1011 |
#define NF_CT_EVICTION_RANGE 8 |
9fb9cbb10 [NETFILTER]: Add ... |
1012 1013 |
/* There's a small race here where we may free a just-assured connection. Too bad: we're in trouble anyway. */ |
242922a02 netfilter: conntr... |
1014 1015 |
static unsigned int early_drop_list(struct net *net, struct hlist_nulls_head *head) |
9fb9cbb10 [NETFILTER]: Add ... |
1016 |
{ |
9fb9cbb10 [NETFILTER]: Add ... |
1017 |
struct nf_conntrack_tuple_hash *h; |
ea781f197 netfilter: nf_con... |
1018 |
struct hlist_nulls_node *n; |
242922a02 netfilter: conntr... |
1019 1020 |
unsigned int drops = 0; struct nf_conn *tmp; |
3e86638e9 netfilter: conntr... |
1021 |
|
242922a02 netfilter: conntr... |
1022 1023 |
hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) { tmp = nf_ct_tuplehash_to_ctrack(h); |
9fb9cbb10 [NETFILTER]: Add ... |
1024 |
|
90964016e netfilter: nf_con... |
1025 1026 |
if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) continue; |
f330a7fdb netfilter: conntr... |
1027 1028 1029 1030 |
if (nf_ct_is_expired(tmp)) { nf_ct_gc_expired(tmp); continue; } |
242922a02 netfilter: conntr... |
1031 1032 1033 1034 |
if (test_bit(IPS_ASSURED_BIT, &tmp->status) || !net_eq(nf_ct_net(tmp), net) || nf_ct_is_dying(tmp)) continue; |
76507f69c [NETFILTER]: nf_c... |
1035 |
|
242922a02 netfilter: conntr... |
1036 1037 |
if (!atomic_inc_not_zero(&tmp->ct_general.use)) continue; |
76507f69c [NETFILTER]: nf_c... |
1038 |
|
242922a02 netfilter: conntr... |
1039 |
/* kill only if still in same netns -- might have moved due to |
5f0d5a3ae mm: Rename SLAB_D... |
1040 |
* SLAB_TYPESAFE_BY_RCU rules. |
242922a02 netfilter: conntr... |
1041 1042 1043 1044 1045 1046 1047 |
* * We steal the timer reference. If that fails timer has * already fired or someone else deleted it. Just drop ref * and move to next entry. */ if (net_eq(nf_ct_net(tmp), net) && nf_ct_is_confirmed(tmp) && |
242922a02 netfilter: conntr... |
1048 1049 1050 1051 |
nf_ct_delete(tmp, 0, 0)) drops++; nf_ct_put(tmp); |
9fb9cbb10 [NETFILTER]: Add ... |
1052 |
} |
3e86638e9 netfilter: conntr... |
1053 |
|
242922a02 netfilter: conntr... |
1054 1055 |
return drops; } |
9fb9cbb10 [NETFILTER]: Add ... |
1056 |
|
1be1576a1 netfilter: conntr... |
1057 |
static noinline int early_drop(struct net *net, unsigned int hash) |
242922a02 netfilter: conntr... |
1058 |
{ |
1be1576a1 netfilter: conntr... |
1059 |
unsigned int i, bucket; |
9fb9cbb10 [NETFILTER]: Add ... |
1060 |
|
242922a02 netfilter: conntr... |
1061 1062 |
for (i = 0; i < NF_CT_EVICTION_RANGE; i++) { struct hlist_nulls_head *ct_hash; |
1be1576a1 netfilter: conntr... |
1063 |
unsigned int hsize, drops; |
242922a02 netfilter: conntr... |
1064 |
|
3101e0fc1 netfilter: conntr... |
1065 |
rcu_read_lock(); |
92e47ba88 netfilter: conntr... |
1066 |
nf_conntrack_get_ht(&ct_hash, &hsize); |
1be1576a1 netfilter: conntr... |
1067 1068 1069 1070 |
if (!i) bucket = reciprocal_scale(hash, hsize); else bucket = (bucket + 1) % hsize; |
242922a02 netfilter: conntr... |
1071 |
|
1be1576a1 netfilter: conntr... |
1072 |
drops = early_drop_list(net, &ct_hash[bucket]); |
3101e0fc1 netfilter: conntr... |
1073 |
rcu_read_unlock(); |
242922a02 netfilter: conntr... |
1074 1075 1076 |
if (drops) { NF_CT_STAT_ADD_ATOMIC(net, early_drop, drops); return true; |
741385119 netfilter: nf_con... |
1077 |
} |
9fb9cbb10 [NETFILTER]: Add ... |
1078 |
} |
3e86638e9 netfilter: conntr... |
1079 |
|
242922a02 netfilter: conntr... |
1080 |
return false; |
9fb9cbb10 [NETFILTER]: Add ... |
1081 |
} |
c6dd940b1 netfilter: allow ... |
1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 |
static bool gc_worker_skip_ct(const struct nf_conn *ct) { return !nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct); } static bool gc_worker_can_early_drop(const struct nf_conn *ct) { const struct nf_conntrack_l4proto *l4proto; if (!test_bit(IPS_ASSURED_BIT, &ct->status)) return true; l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); if (l4proto->can_early_drop && l4proto->can_early_drop(ct)) return true; return false; } |
90964016e netfilter: nf_con... |
1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 |
#define DAY (86400 * HZ) /* Set an arbitrary timeout large enough not to ever expire, this save * us a check for the IPS_OFFLOAD_BIT from the packet path via * nf_ct_is_expired(). */ static void nf_ct_offload_timeout(struct nf_conn *ct) { if (nf_ct_expires(ct) < DAY / 2) ct->timeout = nfct_time_stamp + DAY; } |
b87a2f919 netfilter: conntr... |
1111 1112 |
static void gc_worker(struct work_struct *work) { |
e5072053b netfilter: conntr... |
1113 |
unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u); |
b87a2f919 netfilter: conntr... |
1114 |
unsigned int i, goal, buckets = 0, expired_count = 0; |
c6dd940b1 netfilter: allow ... |
1115 |
unsigned int nf_conntrack_max95 = 0; |
b87a2f919 netfilter: conntr... |
1116 |
struct conntrack_gc_work *gc_work; |
e0df8cae6 netfilter: conntr... |
1117 1118 |
unsigned int ratio, scanned = 0; unsigned long next_run; |
b87a2f919 netfilter: conntr... |
1119 1120 |
gc_work = container_of(work, struct conntrack_gc_work, dwork.work); |
e0df8cae6 netfilter: conntr... |
1121 |
goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV; |
b87a2f919 netfilter: conntr... |
1122 |
i = gc_work->last_bucket; |
c6dd940b1 netfilter: allow ... |
1123 1124 |
if (gc_work->early_drop) nf_conntrack_max95 = nf_conntrack_max / 100u * 95u; |
b87a2f919 netfilter: conntr... |
1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 |
do { struct nf_conntrack_tuple_hash *h; struct hlist_nulls_head *ct_hash; struct hlist_nulls_node *n; unsigned int hashsz; struct nf_conn *tmp; i++; rcu_read_lock(); nf_conntrack_get_ht(&ct_hash, &hashsz); if (i >= hashsz) i = 0; hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) { |
c6dd940b1 netfilter: allow ... |
1141 |
struct net *net; |
b87a2f919 netfilter: conntr... |
1142 |
tmp = nf_ct_tuplehash_to_ctrack(h); |
c023c0e4a netfilter: conntr... |
1143 |
scanned++; |
90964016e netfilter: nf_con... |
1144 1145 1146 1147 |
if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) { nf_ct_offload_timeout(tmp); continue; } |
b87a2f919 netfilter: conntr... |
1148 1149 1150 1151 1152 |
if (nf_ct_is_expired(tmp)) { nf_ct_gc_expired(tmp); expired_count++; continue; } |
c6dd940b1 netfilter: allow ... |
1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 |
if (nf_conntrack_max95 == 0 || gc_worker_skip_ct(tmp)) continue; net = nf_ct_net(tmp); if (atomic_read(&net->ct.count) < nf_conntrack_max95) continue; /* need to take reference to avoid possible races */ if (!atomic_inc_not_zero(&tmp->ct_general.use)) continue; if (gc_worker_skip_ct(tmp)) { nf_ct_put(tmp); continue; } if (gc_worker_can_early_drop(tmp)) nf_ct_kill(tmp); nf_ct_put(tmp); |
b87a2f919 netfilter: conntr... |
1174 1175 1176 1177 1178 1179 1180 |
} /* could check get_nulls_value() here and restart if ct * was moved to another chain. But given gc is best-effort * we will just continue with next hash slot. */ rcu_read_unlock(); |
ffa53c586 netfilter: Elimin... |
1181 |
cond_resched(); |
524b698db netfilter: conntr... |
1182 |
} while (++buckets < goal); |
b87a2f919 netfilter: conntr... |
1183 1184 1185 |
if (gc_work->exiting) return; |
e0df8cae6 netfilter: conntr... |
1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 |
/* * Eviction will normally happen from the packet path, and not * from this gc worker. * * This worker is only here to reap expired entries when system went * idle after a busy period. * * The heuristics below are supposed to balance conflicting goals: * * 1. Minimize time until we notice a stale entry * 2. Maximize scan intervals to not waste cycles * |
e5072053b netfilter: conntr... |
1198 |
* Normally, expire ratio will be close to 0. |
e0df8cae6 netfilter: conntr... |
1199 |
* |
e5072053b netfilter: conntr... |
1200 1201 |
* As soon as a sizeable fraction of the entries have expired * increase scan frequency. |
e0df8cae6 netfilter: conntr... |
1202 |
*/ |
c023c0e4a netfilter: conntr... |
1203 |
ratio = scanned ? expired_count * 100 / scanned : 0; |
e5072053b netfilter: conntr... |
1204 1205 |
if (ratio > GC_EVICT_RATIO) { gc_work->next_gc_run = min_interval; |
e0df8cae6 netfilter: conntr... |
1206 |
} else { |
e5072053b netfilter: conntr... |
1207 |
unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV; |
e0df8cae6 netfilter: conntr... |
1208 |
|
e5072053b netfilter: conntr... |
1209 1210 1211 1212 1213 |
BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0); gc_work->next_gc_run += min_interval; if (gc_work->next_gc_run > max) gc_work->next_gc_run = max; |
e0df8cae6 netfilter: conntr... |
1214 |
} |
c023c0e4a netfilter: conntr... |
1215 |
|
e5072053b netfilter: conntr... |
1216 |
next_run = gc_work->next_gc_run; |
b87a2f919 netfilter: conntr... |
1217 |
gc_work->last_bucket = i; |
c6dd940b1 netfilter: allow ... |
1218 |
gc_work->early_drop = false; |
0984d427c netfilter: conntr... |
1219 |
queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run); |
b87a2f919 netfilter: conntr... |
1220 1221 1222 1223 |
} static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work) { |
a232cd0e0 netfilter: conntr... |
1224 |
INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker); |
e5072053b netfilter: conntr... |
1225 |
gc_work->next_gc_run = HZ; |
b87a2f919 netfilter: conntr... |
1226 1227 |
gc_work->exiting = false; } |
99f07e91b netfilter: save t... |
1228 |
static struct nf_conn * |
308ac9143 netfilter: nf_con... |
1229 1230 |
__nf_conntrack_alloc(struct net *net, const struct nf_conntrack_zone *zone, |
99f07e91b netfilter: save t... |
1231 1232 1233 |
const struct nf_conntrack_tuple *orig, const struct nf_conntrack_tuple *repl, gfp_t gfp, u32 hash) |
9fb9cbb10 [NETFILTER]: Add ... |
1234 |
{ |
cd7fcbf1c netfilter 07/09: ... |
1235 |
struct nf_conn *ct; |
9fb9cbb10 [NETFILTER]: Add ... |
1236 |
|
5251e2d21 [NETFILTER]: conn... |
1237 |
/* We don't want any race condition at early drop stage */ |
49ac8713b netfilter: netns ... |
1238 |
atomic_inc(&net->ct.count); |
5251e2d21 [NETFILTER]: conn... |
1239 |
|
76eb94604 [NETFILTER]: nf_c... |
1240 |
if (nf_conntrack_max && |
49ac8713b netfilter: netns ... |
1241 |
unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { |
93bb0ceb7 netfilter: conntr... |
1242 |
if (!early_drop(net, hash)) { |
c6dd940b1 netfilter: allow ... |
1243 1244 |
if (!conntrack_gc_work.early_drop) conntrack_gc_work.early_drop = true; |
49ac8713b netfilter: netns ... |
1245 |
atomic_dec(&net->ct.count); |
e87cc4728 net: Convert net_... |
1246 1247 |
net_warn_ratelimited("nf_conntrack: table full, dropping packet "); |
9fb9cbb10 [NETFILTER]: Add ... |
1248 1249 1250 |
return ERR_PTR(-ENOMEM); } } |
941297f44 netfilter: nf_con... |
1251 1252 |
/* * Do not use kmem_cache_zalloc(), as this cache uses |
5f0d5a3ae mm: Rename SLAB_D... |
1253 |
* SLAB_TYPESAFE_BY_RCU. |
941297f44 netfilter: nf_con... |
1254 |
*/ |
0c5366b3a netfilter: conntr... |
1255 |
ct = kmem_cache_alloc(nf_conntrack_cachep, gfp); |
5e8018fc6 netfilter: nf_con... |
1256 1257 |
if (ct == NULL) goto out; |
440f0d588 netfilter: nf_con... |
1258 |
spin_lock_init(&ct->lock); |
c88130bcd [NETFILTER]: nf_c... |
1259 |
ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; |
941297f44 netfilter: nf_con... |
1260 |
ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL; |
c88130bcd [NETFILTER]: nf_c... |
1261 |
ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl; |
99f07e91b netfilter: save t... |
1262 1263 |
/* save hash for reusing when confirming */ *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash; |
c41884ce0 netfilter: conntr... |
1264 |
ct->status = 0; |
c2d9ba9bc net: CONFIG_NET_N... |
1265 |
write_pnet(&ct->ct_net, net); |
c41884ce0 netfilter: conntr... |
1266 1267 1268 |
memset(&ct->__nfct_init_offset[0], 0, offsetof(struct nf_conn, proto) - offsetof(struct nf_conn, __nfct_init_offset[0])); |
5e8018fc6 netfilter: nf_con... |
1269 |
|
6c8dee984 netfilter: move z... |
1270 |
nf_ct_zone_add(ct, zone); |
5e8018fc6 netfilter: nf_con... |
1271 |
|
e53376bef netfilter: nf_con... |
1272 1273 |
/* Because we use RCU lookups, we set ct_general.use to zero before * this is inserted in any list. |
941297f44 netfilter: nf_con... |
1274 |
*/ |
e53376bef netfilter: nf_con... |
1275 |
atomic_set(&ct->ct_general.use, 0); |
c88130bcd [NETFILTER]: nf_c... |
1276 |
return ct; |
5e8018fc6 netfilter: nf_con... |
1277 1278 |
out: atomic_dec(&net->ct.count); |
5d0aa2ccd netfilter: nf_con... |
1279 |
return ERR_PTR(-ENOMEM); |
9fb9cbb10 [NETFILTER]: Add ... |
1280 |
} |
99f07e91b netfilter: save t... |
1281 |
|
308ac9143 netfilter: nf_con... |
1282 1283 |
struct nf_conn *nf_conntrack_alloc(struct net *net, const struct nf_conntrack_zone *zone, |
99f07e91b netfilter: save t... |
1284 1285 1286 1287 1288 1289 |
const struct nf_conntrack_tuple *orig, const struct nf_conntrack_tuple *repl, gfp_t gfp) { return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0); } |
13b183391 [NETFILTER]: nf_c... |
1290 |
EXPORT_SYMBOL_GPL(nf_conntrack_alloc); |
9fb9cbb10 [NETFILTER]: Add ... |
1291 |
|
c88130bcd [NETFILTER]: nf_c... |
1292 |
void nf_conntrack_free(struct nf_conn *ct) |
76507f69c [NETFILTER]: nf_c... |
1293 |
{ |
1d45209d8 netfilter: nf_con... |
1294 |
struct net *net = nf_ct_net(ct); |
e53376bef netfilter: nf_con... |
1295 |
/* A freed object has refcnt == 0, that's |
5f0d5a3ae mm: Rename SLAB_D... |
1296 |
* the golden rule for SLAB_TYPESAFE_BY_RCU |
e53376bef netfilter: nf_con... |
1297 |
*/ |
44d6e2f27 net: Replace NF_C... |
1298 |
WARN_ON(atomic_read(&ct->ct_general.use) != 0); |
e53376bef netfilter: nf_con... |
1299 |
|
ceeff7541 netfilter: nf_con... |
1300 |
nf_ct_ext_destroy(ct); |
ea781f197 netfilter: nf_con... |
1301 |
nf_ct_ext_free(ct); |
0c5366b3a netfilter: conntr... |
1302 |
kmem_cache_free(nf_conntrack_cachep, ct); |
4e857c58e arch: Mass conver... |
1303 |
smp_mb__before_atomic(); |
0c3c6c00c netfilter: nf_con... |
1304 |
atomic_dec(&net->ct.count); |
76507f69c [NETFILTER]: nf_c... |
1305 |
} |
13b183391 [NETFILTER]: nf_c... |
1306 |
EXPORT_SYMBOL_GPL(nf_conntrack_free); |
9fb9cbb10 [NETFILTER]: Add ... |
1307 |
|
c539f0171 netfilter: add co... |
1308 |
|
9fb9cbb10 [NETFILTER]: Add ... |
1309 1310 |
/* Allocate a new conntrack: we return -ENOMEM if classification failed due to stress. Otherwise it really is unclassifiable. */ |
fc09e4a75 netfilter: nf_con... |
1311 |
static noinline struct nf_conntrack_tuple_hash * |
b2a15a604 netfilter: nf_con... |
1312 |
init_conntrack(struct net *net, struct nf_conn *tmpl, |
5a1fb391d netfilter: netns ... |
1313 |
const struct nf_conntrack_tuple *tuple, |
2a04aabf5 netfilter: consti... |
1314 |
const struct nf_conntrack_l4proto *l4proto, |
9fb9cbb10 [NETFILTER]: Add ... |
1315 |
struct sk_buff *skb, |
60b5f8f74 netfilter: nf_con... |
1316 |
unsigned int dataoff, u32 hash) |
9fb9cbb10 [NETFILTER]: Add ... |
1317 |
{ |
c88130bcd [NETFILTER]: nf_c... |
1318 |
struct nf_conn *ct; |
3c158f7f5 [NETFILTER]: nf_c... |
1319 |
struct nf_conn_help *help; |
9fb9cbb10 [NETFILTER]: Add ... |
1320 |
struct nf_conntrack_tuple repl_tuple; |
b2a15a604 netfilter: nf_con... |
1321 |
struct nf_conntrack_ecache *ecache; |
ca7433df3 netfilter: conntr... |
1322 |
struct nf_conntrack_expect *exp = NULL; |
308ac9143 netfilter: nf_con... |
1323 |
const struct nf_conntrack_zone *zone; |
60b5f8f74 netfilter: nf_con... |
1324 |
struct nf_conn_timeout *timeout_ext; |
5e8018fc6 netfilter: nf_con... |
1325 |
struct nf_conntrack_zone tmp; |
9fb9cbb10 [NETFILTER]: Add ... |
1326 |
|
d1b6fe949 netfilter: conntr... |
1327 |
if (!nf_ct_invert_tuple(&repl_tuple, tuple, l4proto)) { |
0d53778e8 [NETFILTER]: Conv... |
1328 1329 |
pr_debug("Can't invert tuple. "); |
9fb9cbb10 [NETFILTER]: Add ... |
1330 1331 |
return NULL; } |
5e8018fc6 netfilter: nf_con... |
1332 |
zone = nf_ct_zone_tmpl(tmpl, skb, &tmp); |
99f07e91b netfilter: save t... |
1333 1334 |
ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC, hash); |
0a9ee8134 netfilter: Remove... |
1335 |
if (IS_ERR(ct)) |
c88130bcd [NETFILTER]: nf_c... |
1336 |
return (struct nf_conntrack_tuple_hash *)ct; |
9fb9cbb10 [NETFILTER]: Add ... |
1337 |
|
4440a2ab3 netfilter: synpro... |
1338 1339 1340 |
if (!nf_ct_add_synproxy(ct, tmpl)) { nf_conntrack_free(ct); return ERR_PTR(-ENOMEM); |
48b1de4c1 netfilter: add SY... |
1341 |
} |
60b5f8f74 netfilter: nf_con... |
1342 |
timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL; |
60b5f8f74 netfilter: nf_con... |
1343 |
|
c779e8496 netfilter: conntr... |
1344 |
if (!l4proto->new(ct, skb, dataoff)) { |
c88130bcd [NETFILTER]: nf_c... |
1345 |
nf_conntrack_free(ct); |
ccd63c20f netfilter: nf_con... |
1346 1347 |
pr_debug("can't track with proto module "); |
9fb9cbb10 [NETFILTER]: Add ... |
1348 1349 |
return NULL; } |
60b5f8f74 netfilter: nf_con... |
1350 |
if (timeout_ext) |
ae2d708ed netfilter: conntr... |
1351 1352 |
nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout), GFP_ATOMIC); |
60b5f8f74 netfilter: nf_con... |
1353 |
|
584015727 netfilter: accoun... |
1354 |
nf_ct_acct_ext_add(ct, GFP_ATOMIC); |
a992ca2a0 netfilter: nf_con... |
1355 |
nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); |
c539f0171 netfilter: add co... |
1356 |
nf_ct_labels_ext_add(ct); |
b2a15a604 netfilter: nf_con... |
1357 1358 1359 1360 1361 |
ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL; nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0, ecache ? ecache->expmask : 0, GFP_ATOMIC); |
584015727 netfilter: accoun... |
1362 |
|
ca7433df3 netfilter: conntr... |
1363 1364 1365 1366 1367 |
local_bh_disable(); if (net->ct.expect_count) { spin_lock(&nf_conntrack_expect_lock); exp = nf_ct_find_expectation(net, zone, tuple); if (exp) { |
ccd63c20f netfilter: nf_con... |
1368 1369 |
pr_debug("expectation arrives ct=%p exp=%p ", |
ca7433df3 netfilter: conntr... |
1370 1371 1372 1373 1374 1375 |
ct, exp); /* Welcome, Mr. Bond. We've been expecting you... */ __set_bit(IPS_EXPECTED_BIT, &ct->status); /* exp->master safe, refcnt bumped in nf_ct_find_expectation */ ct->master = exp->master; if (exp->helper) { |
440534d3c netfilter: Remove... |
1376 |
help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); |
ca7433df3 netfilter: conntr... |
1377 1378 1379 |
if (help) rcu_assign_pointer(help->helper, exp->helper); } |
ceceae1b1 [NETFILTER]: nf_c... |
1380 |
|
9fb9cbb10 [NETFILTER]: Add ... |
1381 |
#ifdef CONFIG_NF_CONNTRACK_MARK |
ca7433df3 netfilter: conntr... |
1382 |
ct->mark = exp->master->mark; |
9fb9cbb10 [NETFILTER]: Add ... |
1383 |
#endif |
7c9728c39 [SECMARK]: Add se... |
1384 |
#ifdef CONFIG_NF_CONNTRACK_SECMARK |
ca7433df3 netfilter: conntr... |
1385 |
ct->secmark = exp->master->secmark; |
7c9728c39 [SECMARK]: Add se... |
1386 |
#endif |
ca7433df3 netfilter: conntr... |
1387 1388 1389 1390 |
NF_CT_STAT_INC(net, expect_new); } spin_unlock(&nf_conntrack_expect_lock); } |
8e8118f89 netfilter: conntr... |
1391 |
if (!exp) |
b2a15a604 netfilter: nf_con... |
1392 |
__nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC); |
9fb9cbb10 [NETFILTER]: Add ... |
1393 |
|
e53376bef netfilter: nf_con... |
1394 1395 |
/* Now it is inserted into the unconfirmed list, bump refcount */ nf_conntrack_get(&ct->ct_general); |
b7779d06f netfilter: conntr... |
1396 |
nf_ct_add_to_unconfirmed_list(ct); |
9fb9cbb10 [NETFILTER]: Add ... |
1397 |
|
ca7433df3 netfilter: conntr... |
1398 |
local_bh_enable(); |
9fb9cbb10 [NETFILTER]: Add ... |
1399 1400 1401 |
if (exp) { if (exp->expectfn) |
c88130bcd [NETFILTER]: nf_c... |
1402 |
exp->expectfn(ct, exp); |
6823645d6 [NETFILTER]: nf_c... |
1403 |
nf_ct_expect_put(exp); |
9fb9cbb10 [NETFILTER]: Add ... |
1404 |
} |
c88130bcd [NETFILTER]: nf_c... |
1405 |
return &ct->tuplehash[IP_CT_DIR_ORIGINAL]; |
9fb9cbb10 [NETFILTER]: Add ... |
1406 |
} |
fc09e4a75 netfilter: nf_con... |
1407 1408 |
/* On success, returns 0, sets skb->_nfct | ctinfo */ static int |
b2a15a604 netfilter: nf_con... |
1409 |
resolve_normal_ct(struct net *net, struct nf_conn *tmpl, |
a702a65fc netfilter: netns ... |
1410 |
struct sk_buff *skb, |
9fb9cbb10 [NETFILTER]: Add ... |
1411 1412 1413 |
unsigned int dataoff, u_int16_t l3num, u_int8_t protonum, |
2a04aabf5 netfilter: consti... |
1414 |
const struct nf_conntrack_l4proto *l4proto) |
9fb9cbb10 [NETFILTER]: Add ... |
1415 |
{ |
308ac9143 netfilter: nf_con... |
1416 |
const struct nf_conntrack_zone *zone; |
9fb9cbb10 [NETFILTER]: Add ... |
1417 1418 |
struct nf_conntrack_tuple tuple; struct nf_conntrack_tuple_hash *h; |
fc09e4a75 netfilter: nf_con... |
1419 |
enum ip_conntrack_info ctinfo; |
5e8018fc6 netfilter: nf_con... |
1420 |
struct nf_conntrack_zone tmp; |
9fb9cbb10 [NETFILTER]: Add ... |
1421 |
struct nf_conn *ct; |
99f07e91b netfilter: save t... |
1422 |
u32 hash; |
9fb9cbb10 [NETFILTER]: Add ... |
1423 |
|
bbe735e42 [SK_BUFF]: Introd... |
1424 |
if (!nf_ct_get_tuple(skb, skb_network_offset(skb), |
47a91b14d netfilter: conntr... |
1425 |
dataoff, l3num, protonum, net, &tuple, l4proto)) { |
ccd63c20f netfilter: nf_con... |
1426 1427 |
pr_debug("Can't get tuple "); |
fc09e4a75 netfilter: nf_con... |
1428 |
return 0; |
9fb9cbb10 [NETFILTER]: Add ... |
1429 1430 1431 |
} /* look for tuple match */ |
5e8018fc6 netfilter: nf_con... |
1432 |
zone = nf_ct_zone_tmpl(tmpl, skb, &tmp); |
1b8c8a9f6 netfilter: conntr... |
1433 |
hash = hash_conntrack_raw(&tuple, net); |
99f07e91b netfilter: save t... |
1434 |
h = __nf_conntrack_find_get(net, zone, &tuple, hash); |
9fb9cbb10 [NETFILTER]: Add ... |
1435 |
if (!h) { |
d1b6fe949 netfilter: conntr... |
1436 |
h = init_conntrack(net, tmpl, &tuple, l4proto, |
60b5f8f74 netfilter: nf_con... |
1437 |
skb, dataoff, hash); |
9fb9cbb10 [NETFILTER]: Add ... |
1438 |
if (!h) |
fc09e4a75 netfilter: nf_con... |
1439 |
return 0; |
9fb9cbb10 [NETFILTER]: Add ... |
1440 |
if (IS_ERR(h)) |
fc09e4a75 netfilter: nf_con... |
1441 |
return PTR_ERR(h); |
9fb9cbb10 [NETFILTER]: Add ... |
1442 1443 1444 1445 1446 |
} ct = nf_ct_tuplehash_to_ctrack(h); /* It exists; we have (non-exclusive) reference. */ if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) { |
fc09e4a75 netfilter: nf_con... |
1447 |
ctinfo = IP_CT_ESTABLISHED_REPLY; |
9fb9cbb10 [NETFILTER]: Add ... |
1448 1449 1450 |
} else { /* Once we've had two way comms, always ESTABLISHED. */ if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { |
ccd63c20f netfilter: nf_con... |
1451 1452 |
pr_debug("normal packet for %p ", ct); |
fc09e4a75 netfilter: nf_con... |
1453 |
ctinfo = IP_CT_ESTABLISHED; |
9fb9cbb10 [NETFILTER]: Add ... |
1454 |
} else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) { |
ccd63c20f netfilter: nf_con... |
1455 1456 |
pr_debug("related packet for %p ", ct); |
fc09e4a75 netfilter: nf_con... |
1457 |
ctinfo = IP_CT_RELATED; |
9fb9cbb10 [NETFILTER]: Add ... |
1458 |
} else { |
ccd63c20f netfilter: nf_con... |
1459 1460 |
pr_debug("new packet for %p ", ct); |
fc09e4a75 netfilter: nf_con... |
1461 |
ctinfo = IP_CT_NEW; |
9fb9cbb10 [NETFILTER]: Add ... |
1462 |
} |
9fb9cbb10 [NETFILTER]: Add ... |
1463 |
} |
fc09e4a75 netfilter: nf_con... |
1464 1465 |
nf_ct_set(skb, ct, ctinfo); return 0; |
9fb9cbb10 [NETFILTER]: Add ... |
1466 1467 1468 |
} unsigned int |
a702a65fc netfilter: netns ... |
1469 1470 |
nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, struct sk_buff *skb) |
9fb9cbb10 [NETFILTER]: Add ... |
1471 |
{ |
b3480fe05 netfilter: conntr... |
1472 |
const struct nf_conntrack_l4proto *l4proto; |
97a6ad13d netfilter: reduce... |
1473 |
struct nf_conn *ct, *tmpl; |
9fb9cbb10 [NETFILTER]: Add ... |
1474 |
enum ip_conntrack_info ctinfo; |
9fb9cbb10 [NETFILTER]: Add ... |
1475 |
u_int8_t protonum; |
6816d931c netfilter: conntr... |
1476 |
int dataoff, ret; |
9fb9cbb10 [NETFILTER]: Add ... |
1477 |
|
97a6ad13d netfilter: reduce... |
1478 |
tmpl = nf_ct_get(skb, &ctinfo); |
cc41c84b7 netfilter: kill t... |
1479 |
if (tmpl || ctinfo == IP_CT_UNTRACKED) { |
b2a15a604 netfilter: nf_con... |
1480 |
/* Previously seen (loopback or untracked)? Ignore. */ |
cc41c84b7 netfilter: kill t... |
1481 1482 |
if ((tmpl && !nf_ct_is_template(tmpl)) || ctinfo == IP_CT_UNTRACKED) { |
b2a15a604 netfilter: nf_con... |
1483 1484 1485 |
NF_CT_STAT_INC_ATOMIC(net, ignore); return NF_ACCEPT; } |
a9e419dc7 netfilter: merge ... |
1486 |
skb->_nfct = 0; |
9fb9cbb10 [NETFILTER]: Add ... |
1487 |
} |
e2361cb90 netfilter: Remove... |
1488 |
/* rcu_read_lock()ed by nf_hook_thresh */ |
6816d931c netfilter: conntr... |
1489 1490 |
dataoff = get_l4proto(skb, skb_network_offset(skb), pf, &protonum); if (dataoff <= 0) { |
25985edce Fix common misspe... |
1491 1492 |
pr_debug("not prepared to track yet or error occurred "); |
0d55af879 netfilter: netns ... |
1493 1494 |
NF_CT_STAT_INC_ATOMIC(net, error); NF_CT_STAT_INC_ATOMIC(net, invalid); |
6816d931c netfilter: conntr... |
1495 |
ret = NF_ACCEPT; |
b2a15a604 netfilter: nf_con... |
1496 |
goto out; |
9fb9cbb10 [NETFILTER]: Add ... |
1497 |
} |
76108cea0 netfilter: Use un... |
1498 |
l4proto = __nf_ct_l4proto_find(pf, protonum); |
9fb9cbb10 [NETFILTER]: Add ... |
1499 1500 1501 1502 |
/* It may be an special packet, error, unclean... * inverse of the return code tells to the netfilter * core what to do with the packet. */ |
74c51a149 netfilter: netns ... |
1503 |
if (l4proto->error != NULL) { |
11df4b760 netfilter: conntr... |
1504 |
ret = l4proto->error(net, tmpl, skb, dataoff, pf, hooknum); |
74c51a149 netfilter: netns ... |
1505 |
if (ret <= 0) { |
0d55af879 netfilter: netns ... |
1506 1507 |
NF_CT_STAT_INC_ATOMIC(net, error); NF_CT_STAT_INC_ATOMIC(net, invalid); |
b2a15a604 netfilter: nf_con... |
1508 1509 |
ret = -ret; goto out; |
74c51a149 netfilter: netns ... |
1510 |
} |
88ed01d17 netfilter: nf_con... |
1511 |
/* ICMP[v6] protocol trackers may assign one conntrack. */ |
a9e419dc7 netfilter: merge ... |
1512 |
if (skb->_nfct) |
88ed01d17 netfilter: nf_con... |
1513 |
goto out; |
9fb9cbb10 [NETFILTER]: Add ... |
1514 |
} |
08733a0cb netfilter: handle... |
1515 |
repeat: |
d1b6fe949 netfilter: conntr... |
1516 |
ret = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum, l4proto); |
fc09e4a75 netfilter: nf_con... |
1517 |
if (ret < 0) { |
9fb9cbb10 [NETFILTER]: Add ... |
1518 |
/* Too stressed to deal. */ |
0d55af879 netfilter: netns ... |
1519 |
NF_CT_STAT_INC_ATOMIC(net, drop); |
b2a15a604 netfilter: nf_con... |
1520 1521 |
ret = NF_DROP; goto out; |
9fb9cbb10 [NETFILTER]: Add ... |
1522 |
} |
fc09e4a75 netfilter: nf_con... |
1523 1524 1525 1526 1527 1528 1529 |
ct = nf_ct_get(skb, &ctinfo); if (!ct) { /* Not valid part of a connection */ NF_CT_STAT_INC_ATOMIC(net, invalid); ret = NF_ACCEPT; goto out; } |
9fb9cbb10 [NETFILTER]: Add ... |
1530 |
|
c779e8496 netfilter: conntr... |
1531 |
ret = l4proto->packet(ct, skb, dataoff, ctinfo); |
ec8d54096 netfilter: conntr... |
1532 |
if (ret <= 0) { |
9fb9cbb10 [NETFILTER]: Add ... |
1533 1534 |
/* Invalid: inverse of the return code tells * the netfilter core what to do */ |
0d53778e8 [NETFILTER]: Conv... |
1535 1536 |
pr_debug("nf_conntrack_in: Can't track with proto module "); |
97a6ad13d netfilter: reduce... |
1537 |
nf_conntrack_put(&ct->ct_general); |
a9e419dc7 netfilter: merge ... |
1538 |
skb->_nfct = 0; |
0d55af879 netfilter: netns ... |
1539 |
NF_CT_STAT_INC_ATOMIC(net, invalid); |
7d1e04598 netfilter: nf_con... |
1540 1541 |
if (ret == -NF_DROP) NF_CT_STAT_INC_ATOMIC(net, drop); |
56a62e221 netfilter: conntr... |
1542 1543 1544 1545 1546 1547 |
/* Special case: TCP tracker reports an attempt to reopen a * closed/aborted connection. We have to go back and create a * fresh conntrack. */ if (ret == -NF_REPEAT) goto repeat; |
b2a15a604 netfilter: nf_con... |
1548 1549 |
ret = -ret; goto out; |
9fb9cbb10 [NETFILTER]: Add ... |
1550 |
} |
fc09e4a75 netfilter: nf_con... |
1551 1552 |
if (ctinfo == IP_CT_ESTABLISHED_REPLY && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) |
858b31330 netfilter: nf_con... |
1553 |
nf_conntrack_event_cache(IPCT_REPLY, ct); |
b2a15a604 netfilter: nf_con... |
1554 |
out: |
56a62e221 netfilter: conntr... |
1555 1556 |
if (tmpl) nf_ct_put(tmpl); |
9fb9cbb10 [NETFILTER]: Add ... |
1557 1558 1559 |
return ret; } |
13b183391 [NETFILTER]: nf_c... |
1560 |
EXPORT_SYMBOL_GPL(nf_conntrack_in); |
9fb9cbb10 [NETFILTER]: Add ... |
1561 |
|
5f2b4c900 [NETFILTER]: nf_c... |
1562 1563 |
bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse, const struct nf_conntrack_tuple *orig) |
9fb9cbb10 [NETFILTER]: Add ... |
1564 |
{ |
5f2b4c900 [NETFILTER]: nf_c... |
1565 |
bool ret; |
923f4902f [NETFILTER]: nf_c... |
1566 1567 1568 |
rcu_read_lock(); ret = nf_ct_invert_tuple(inverse, orig, |
923f4902f [NETFILTER]: nf_c... |
1569 1570 1571 1572 |
__nf_ct_l4proto_find(orig->src.l3num, orig->dst.protonum)); rcu_read_unlock(); return ret; |
9fb9cbb10 [NETFILTER]: Add ... |
1573 |
} |
13b183391 [NETFILTER]: nf_c... |
1574 |
EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr); |
9fb9cbb10 [NETFILTER]: Add ... |
1575 |
|
5b1158e90 [NETFILTER]: Add ... |
1576 1577 1578 1579 1580 1581 |
/* Alter reply tuple (maybe alter helper). This is for NAT, and is implicitly racy: see __nf_conntrack_confirm */ void nf_conntrack_alter_reply(struct nf_conn *ct, const struct nf_conntrack_tuple *newreply) { struct nf_conn_help *help = nfct_help(ct); |
5b1158e90 [NETFILTER]: Add ... |
1582 |
/* Should be unconfirmed, so not in hash table yet */ |
44d6e2f27 net: Replace NF_C... |
1583 |
WARN_ON(nf_ct_is_confirmed(ct)); |
5b1158e90 [NETFILTER]: Add ... |
1584 |
|
0d53778e8 [NETFILTER]: Conv... |
1585 |
pr_debug("Altering reply tuple of %p to ", ct); |
3c9fba656 [NETFILTER]: nf_c... |
1586 |
nf_ct_dump_tuple(newreply); |
5b1158e90 [NETFILTER]: Add ... |
1587 1588 |
ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply; |
ef1a5a50b [NETFILTER]: nf_c... |
1589 |
if (ct->master || (help && !hlist_empty(&help->expectations))) |
c52fbb410 [NETFILTER]: nf_c... |
1590 |
return; |
ceceae1b1 [NETFILTER]: nf_c... |
1591 |
|
c52fbb410 [NETFILTER]: nf_c... |
1592 |
rcu_read_lock(); |
b2a15a604 netfilter: nf_con... |
1593 |
__nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC); |
c52fbb410 [NETFILTER]: nf_c... |
1594 |
rcu_read_unlock(); |
5b1158e90 [NETFILTER]: Add ... |
1595 |
} |
13b183391 [NETFILTER]: nf_c... |
1596 |
EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply); |
5b1158e90 [NETFILTER]: Add ... |
1597 |
|
9fb9cbb10 [NETFILTER]: Add ... |
1598 1599 1600 1601 1602 1603 1604 |
/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */ void __nf_ct_refresh_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo, const struct sk_buff *skb, unsigned long extra_jiffies, int do_acct) { |
44d6e2f27 net: Replace NF_C... |
1605 |
WARN_ON(!skb); |
9fb9cbb10 [NETFILTER]: Add ... |
1606 |
|
997ae831a [NETFILTER]: conn... |
1607 |
/* Only update if this is not a fixed timeout */ |
47d950454 [NETFILTER]: nf_c... |
1608 1609 |
if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) goto acct; |
997ae831a [NETFILTER]: conn... |
1610 |
|
9fb9cbb10 [NETFILTER]: Add ... |
1611 |
/* If not in hash table, timer will not be active yet */ |
f330a7fdb netfilter: conntr... |
1612 1613 |
if (nf_ct_is_confirmed(ct)) extra_jiffies += nfct_time_stamp; |
9fb9cbb10 [NETFILTER]: Add ... |
1614 |
|
f330a7fdb netfilter: conntr... |
1615 |
ct->timeout = extra_jiffies; |
47d950454 [NETFILTER]: nf_c... |
1616 |
acct: |
ba76738c0 netfilter: conntr... |
1617 1618 |
if (do_acct) nf_ct_acct_update(ct, ctinfo, skb->len); |
9fb9cbb10 [NETFILTER]: Add ... |
1619 |
} |
13b183391 [NETFILTER]: nf_c... |
1620 |
EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct); |
9fb9cbb10 [NETFILTER]: Add ... |
1621 |
|
ad66713f5 netfilter: remove... |
1622 1623 1624 |
bool nf_ct_kill_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo, const struct sk_buff *skb) |
51091764f netfilter: nf_con... |
1625 |
{ |
ad66713f5 netfilter: remove... |
1626 |
nf_ct_acct_update(ct, ctinfo, skb->len); |
584015727 netfilter: accoun... |
1627 |
|
f330a7fdb netfilter: conntr... |
1628 |
return nf_ct_delete(ct, 0, 0); |
51091764f netfilter: nf_con... |
1629 |
} |
ad66713f5 netfilter: remove... |
1630 |
EXPORT_SYMBOL_GPL(nf_ct_kill_acct); |
51091764f netfilter: nf_con... |
1631 |
|
c0cd11566 net:netfilter: us... |
1632 |
#if IS_ENABLED(CONFIG_NF_CT_NETLINK) |
c1d10adb4 [NETFILTER]: Add ... |
1633 1634 1635 |
#include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/nfnetlink_conntrack.h> |
57b47a53e [NET]: sem2mutex ... |
1636 |
#include <linux/mutex.h> |
c1d10adb4 [NETFILTER]: Add ... |
1637 1638 1639 |
/* Generic function for tcp/udp/sctp/dccp and alike. This needs to be * in ip_conntrack_core, since we don't want the protocols to autoload * or depend on ctnetlink */ |
fdf708322 [NETFILTER]: nfne... |
1640 |
int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, |
c1d10adb4 [NETFILTER]: Add ... |
1641 1642 |
const struct nf_conntrack_tuple *tuple) { |
bae65be89 nf_conntrack_core... |
1643 1644 1645 |
if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) || nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port)) goto nla_put_failure; |
c1d10adb4 [NETFILTER]: Add ... |
1646 |
return 0; |
df6fb868d [NETFILTER]: nfne... |
1647 |
nla_put_failure: |
c1d10adb4 [NETFILTER]: Add ... |
1648 1649 |
return -1; } |
fdf708322 [NETFILTER]: nfne... |
1650 |
EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr); |
c1d10adb4 [NETFILTER]: Add ... |
1651 |
|
f73e924cd [NETFILTER]: ctne... |
1652 1653 1654 |
const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = { [CTA_PROTO_SRC_PORT] = { .type = NLA_U16 }, [CTA_PROTO_DST_PORT] = { .type = NLA_U16 }, |
c1d10adb4 [NETFILTER]: Add ... |
1655 |
}; |
f73e924cd [NETFILTER]: ctne... |
1656 |
EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy); |
c1d10adb4 [NETFILTER]: Add ... |
1657 |
|
fdf708322 [NETFILTER]: nfne... |
1658 |
int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[], |
c1d10adb4 [NETFILTER]: Add ... |
1659 1660 |
struct nf_conntrack_tuple *t) { |
df6fb868d [NETFILTER]: nfne... |
1661 |
if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT]) |
c1d10adb4 [NETFILTER]: Add ... |
1662 |
return -EINVAL; |
77236b6e3 [NETFILTER]: ctne... |
1663 1664 |
t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]); t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]); |
c1d10adb4 [NETFILTER]: Add ... |
1665 1666 1667 |
return 0; } |
fdf708322 [NETFILTER]: nfne... |
1668 |
EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple); |
5c0de29d0 netfilter: nf_con... |
1669 |
|
5caaed151 netfilter: conntr... |
1670 |
unsigned int nf_ct_port_nlattr_tuple_size(void) |
5c0de29d0 netfilter: nf_con... |
1671 |
{ |
5caaed151 netfilter: conntr... |
1672 1673 1674 1675 1676 1677 |
static unsigned int size __read_mostly; if (!size) size = nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1); return size; |
5c0de29d0 netfilter: nf_con... |
1678 1679 |
} EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size); |
c1d10adb4 [NETFILTER]: Add ... |
1680 |
#endif |
9fb9cbb10 [NETFILTER]: Add ... |
1681 |
/* Used by ipt_REJECT and ip6t_REJECT. */ |
312a0c16c netfilter: nf_con... |
1682 |
static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb) |
9fb9cbb10 [NETFILTER]: Add ... |
1683 1684 1685 1686 1687 1688 1689 |
{ struct nf_conn *ct; enum ip_conntrack_info ctinfo; /* This ICMP is in reverse direction to the packet which caused it */ ct = nf_ct_get(skb, &ctinfo); if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) |
fb0488337 netfilter: add mo... |
1690 |
ctinfo = IP_CT_RELATED_REPLY; |
9fb9cbb10 [NETFILTER]: Add ... |
1691 1692 1693 1694 |
else ctinfo = IP_CT_RELATED; /* Attach to new skbuff, and increment count */ |
c74454fad netfilter: add an... |
1695 |
nf_ct_set(nskb, ct, ctinfo); |
cb9c68363 skbuff: add and u... |
1696 |
nf_conntrack_get(skb_nfct(nskb)); |
9fb9cbb10 [NETFILTER]: Add ... |
1697 |
} |
368982cd7 netfilter: nfnetl... |
1698 1699 |
static int nf_conntrack_update(struct net *net, struct sk_buff *skb) { |
368982cd7 netfilter: nfnetl... |
1700 1701 1702 1703 1704 |
const struct nf_conntrack_l4proto *l4proto; struct nf_conntrack_tuple_hash *h; struct nf_conntrack_tuple tuple; enum ip_conntrack_info ctinfo; struct nf_nat_hook *nat_hook; |
6816d931c netfilter: conntr... |
1705 |
unsigned int status; |
368982cd7 netfilter: nfnetl... |
1706 |
struct nf_conn *ct; |
6816d931c netfilter: conntr... |
1707 |
int dataoff; |
368982cd7 netfilter: nfnetl... |
1708 1709 1710 1711 1712 1713 1714 1715 |
u16 l3num; u8 l4num; ct = nf_ct_get(skb, &ctinfo); if (!ct || nf_ct_is_confirmed(ct)) return 0; l3num = nf_ct_l3num(ct); |
368982cd7 netfilter: nfnetl... |
1716 |
|
6816d931c netfilter: conntr... |
1717 1718 |
dataoff = get_l4proto(skb, skb_network_offset(skb), l3num, &l4num); if (dataoff <= 0) |
368982cd7 netfilter: nfnetl... |
1719 1720 1721 1722 1723 |
return -1; l4proto = nf_ct_l4proto_find_get(l3num, l4num); if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num, |
47a91b14d netfilter: conntr... |
1724 |
l4num, net, &tuple, l4proto)) |
368982cd7 netfilter: nfnetl... |
1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 |
return -1; if (ct->status & IPS_SRC_NAT) { memcpy(tuple.src.u3.all, ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.all, sizeof(tuple.src.u3.all)); tuple.src.u.all = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.all; } if (ct->status & IPS_DST_NAT) { memcpy(tuple.dst.u3.all, ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.all, sizeof(tuple.dst.u3.all)); tuple.dst.u.all = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all; } h = nf_conntrack_find_get(net, nf_ct_zone(ct), &tuple); if (!h) return 0; /* Store status bits of the conntrack that is clashing to re-do NAT * mangling according to what it has been done already to this packet. */ status = ct->status; nf_ct_put(ct); ct = nf_ct_tuplehash_to_ctrack(h); nf_ct_set(skb, ct, ctinfo); nat_hook = rcu_dereference(nf_nat_hook); if (!nat_hook) return 0; if (status & IPS_SRC_NAT && nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_SRC, IP_CT_DIR_ORIGINAL) == NF_DROP) return -1; if (status & IPS_DST_NAT && nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_DST, IP_CT_DIR_ORIGINAL) == NF_DROP) return -1; return 0; } |
b60a60405 netfilter: Add nf... |
1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 |
static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, const struct sk_buff *skb) { const struct nf_conntrack_tuple *src_tuple; const struct nf_conntrack_tuple_hash *hash; struct nf_conntrack_tuple srctuple; enum ip_conntrack_info ctinfo; struct nf_conn *ct; ct = nf_ct_get(skb, &ctinfo); if (ct) { src_tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo)); memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple)); return true; } if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), NFPROTO_IPV4, dev_net(skb->dev), &srctuple)) return false; hash = nf_conntrack_find_get(dev_net(skb->dev), &nf_ct_zone_dflt, &srctuple); if (!hash) return false; ct = nf_ct_tuplehash_to_ctrack(hash); src_tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir); memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple)); nf_ct_put(ct); return true; } |
9fb9cbb10 [NETFILTER]: Add ... |
1806 |
/* Bring out ya dead! */ |
df0933dcb [NETFILTER]: kill... |
1807 |
static struct nf_conn * |
2843fb699 netfilter: conntr... |
1808 |
get_next_corpse(int (*iter)(struct nf_conn *i, void *data), |
9fb9cbb10 [NETFILTER]: Add ... |
1809 1810 |
void *data, unsigned int *bucket) { |
df0933dcb [NETFILTER]: kill... |
1811 1812 |
struct nf_conntrack_tuple_hash *h; struct nf_conn *ct; |
ea781f197 netfilter: nf_con... |
1813 |
struct hlist_nulls_node *n; |
93bb0ceb7 netfilter: conntr... |
1814 |
spinlock_t *lockp; |
9fb9cbb10 [NETFILTER]: Add ... |
1815 |
|
56d52d489 netfilter: conntr... |
1816 |
for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { |
93bb0ceb7 netfilter: conntr... |
1817 1818 |
lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS]; local_bh_disable(); |
b16c29191 netfilter: nf_con... |
1819 |
nf_conntrack_lock(lockp); |
56d52d489 netfilter: conntr... |
1820 1821 |
if (*bucket < nf_conntrack_htable_size) { hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) { |
93bb0ceb7 netfilter: conntr... |
1822 1823 1824 |
if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) continue; ct = nf_ct_tuplehash_to_ctrack(h); |
2843fb699 netfilter: conntr... |
1825 |
if (iter(ct, data)) |
93bb0ceb7 netfilter: conntr... |
1826 1827 |
goto found; } |
df0933dcb [NETFILTER]: kill... |
1828 |
} |
93bb0ceb7 netfilter: conntr... |
1829 1830 |
spin_unlock(lockp); local_bh_enable(); |
d93c6258e netfilter: conntr... |
1831 |
cond_resched(); |
601e68e10 [NETFILTER]: Fix ... |
1832 |
} |
b7779d06f netfilter: conntr... |
1833 |
|
b0feacaad netfilter: conntr... |
1834 1835 1836 1837 1838 1839 1840 |
return NULL; found: atomic_inc(&ct->ct_general.use); spin_unlock(lockp); local_bh_enable(); return ct; } |
2843fb699 netfilter: conntr... |
1841 1842 1843 |
static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), void *data, u32 portid, int report) { |
0d02d5646 netfilter: conntr... |
1844 |
unsigned int bucket = 0, sequence; |
2843fb699 netfilter: conntr... |
1845 |
struct nf_conn *ct; |
2843fb699 netfilter: conntr... |
1846 1847 |
might_sleep(); |
0d02d5646 netfilter: conntr... |
1848 1849 |
for (;;) { sequence = read_seqcount_begin(&nf_conntrack_generation); |
2843fb699 netfilter: conntr... |
1850 |
|
0d02d5646 netfilter: conntr... |
1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 |
while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) { /* Time to push up daises... */ nf_ct_delete(ct, portid, report); nf_ct_put(ct); cond_resched(); } if (!read_seqcount_retry(&nf_conntrack_generation, sequence)) break; bucket = 0; |
2843fb699 netfilter: conntr... |
1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 |
} } struct iter_data { int (*iter)(struct nf_conn *i, void *data); void *data; struct net *net; }; static int iter_net_only(struct nf_conn *i, void *data) { struct iter_data *d = data; if (!net_eq(d->net, nf_ct_net(i))) return 0; return d->iter(i, d->data); } |
b0feacaad netfilter: conntr... |
1880 1881 1882 1883 |
static void __nf_ct_unconfirmed_destroy(struct net *net) { int cpu; |
b7779d06f netfilter: conntr... |
1884 |
for_each_possible_cpu(cpu) { |
b0feacaad netfilter: conntr... |
1885 1886 1887 1888 1889 |
struct nf_conntrack_tuple_hash *h; struct hlist_nulls_node *n; struct ct_pcpu *pcpu; pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); |
b7779d06f netfilter: conntr... |
1890 1891 1892 |
spin_lock_bh(&pcpu->lock); hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) { |
b0feacaad netfilter: conntr... |
1893 |
struct nf_conn *ct; |
b7779d06f netfilter: conntr... |
1894 |
ct = nf_ct_tuplehash_to_ctrack(h); |
b0feacaad netfilter: conntr... |
1895 1896 1897 1898 1899 |
/* we cannot call iter() on unconfirmed list, the * owning cpu can reallocate ct->ext at any time. */ set_bit(IPS_DYING_BIT, &ct->status); |
b7779d06f netfilter: conntr... |
1900 1901 |
} spin_unlock_bh(&pcpu->lock); |
d93c6258e netfilter: conntr... |
1902 |
cond_resched(); |
b7779d06f netfilter: conntr... |
1903 |
} |
9fb9cbb10 [NETFILTER]: Add ... |
1904 |
} |
84657984c netfilter: add an... |
1905 1906 1907 1908 1909 1910 |
void nf_ct_unconfirmed_destroy(struct net *net) { might_sleep(); if (atomic_read(&net->ct.count) > 0) { __nf_ct_unconfirmed_destroy(net); |
e2a750070 netfilter: conntr... |
1911 |
nf_queue_nf_hook_drop(net); |
84657984c netfilter: add an... |
1912 1913 1914 1915 |
synchronize_net(); } } EXPORT_SYMBOL_GPL(nf_ct_unconfirmed_destroy); |
9fd6452d6 netfilter: conntr... |
1916 1917 1918 |
void nf_ct_iterate_cleanup_net(struct net *net, int (*iter)(struct nf_conn *i, void *data), void *data, u32 portid, int report) |
9fb9cbb10 [NETFILTER]: Add ... |
1919 |
{ |
2843fb699 netfilter: conntr... |
1920 |
struct iter_data d; |
9fb9cbb10 [NETFILTER]: Add ... |
1921 |
|
d93c6258e netfilter: conntr... |
1922 |
might_sleep(); |
88b68bc52 netfilter: conntr... |
1923 1924 |
if (atomic_read(&net->ct.count) == 0) return; |
2843fb699 netfilter: conntr... |
1925 1926 1927 |
d.iter = iter; d.data = data; d.net = net; |
2843fb699 netfilter: conntr... |
1928 1929 1930 |
nf_ct_iterate_cleanup(iter_net_only, &d, portid, report); } EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup_net); |
9fb9cbb10 [NETFILTER]: Add ... |
1931 |
|
2843fb699 netfilter: conntr... |
1932 1933 1934 1935 1936 1937 1938 1939 |
/** * nf_ct_iterate_destroy - destroy unconfirmed conntracks and iterate table * @iter: callback to invoke for each conntrack * @data: data to pass to @iter * * Like nf_ct_iterate_cleanup, but first marks conntracks on the * unconfirmed list as dying (so they will not be inserted into * main table). |
7866cc57b netns: add and us... |
1940 1941 |
* * Can only be called in module exit path. |
2843fb699 netfilter: conntr... |
1942 1943 1944 1945 1946 |
*/ void nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data) { struct net *net; |
f0b07bb15 net: Introduce ne... |
1947 |
down_read(&net_rwsem); |
2843fb699 netfilter: conntr... |
1948 1949 1950 1951 |
for_each_net(net) { if (atomic_read(&net->ct.count) == 0) continue; __nf_ct_unconfirmed_destroy(net); |
e2a750070 netfilter: conntr... |
1952 |
nf_queue_nf_hook_drop(net); |
9fb9cbb10 [NETFILTER]: Add ... |
1953 |
} |
f0b07bb15 net: Introduce ne... |
1954 |
up_read(&net_rwsem); |
2843fb699 netfilter: conntr... |
1955 |
|
7866cc57b netns: add and us... |
1956 1957 1958 1959 1960 1961 |
/* Need to wait for netns cleanup worker to finish, if its * running -- it might have deleted a net namespace from * the global list, so our __nf_ct_unconfirmed_destroy() might * not have affected all namespaces. */ net_ns_barrier(); |
2843fb699 netfilter: conntr... |
1962 1963 1964 1965 1966 1967 1968 |
/* a conntrack could have been unlinked from unconfirmed list * before we grabbed pcpu lock in __nf_ct_unconfirmed_destroy(). * This makes sure its inserted into conntrack table. */ synchronize_net(); nf_ct_iterate_cleanup(iter, data, 0, 0); |
9fb9cbb10 [NETFILTER]: Add ... |
1969 |
} |
2843fb699 netfilter: conntr... |
1970 |
EXPORT_SYMBOL_GPL(nf_ct_iterate_destroy); |
9fb9cbb10 [NETFILTER]: Add ... |
1971 |
|
274d383b9 netfilter: conntr... |
1972 1973 |
static int kill_all(struct nf_conn *i, void *data) { |
2843fb699 netfilter: conntr... |
1974 |
return net_eq(nf_ct_net(i), data); |
274d383b9 netfilter: conntr... |
1975 |
} |
f94161c1b netfilter: nf_con... |
1976 |
void nf_conntrack_cleanup_start(void) |
9fb9cbb10 [NETFILTER]: Add ... |
1977 |
{ |
b87a2f919 netfilter: conntr... |
1978 |
conntrack_gc_work.exiting = true; |
f94161c1b netfilter: nf_con... |
1979 1980 1981 1982 1983 |
RCU_INIT_POINTER(ip_ct_attach, NULL); } void nf_conntrack_cleanup_end(void) { |
1f4b24397 netfilter: add st... |
1984 |
RCU_INIT_POINTER(nf_ct_hook, NULL); |
b87a2f919 netfilter: conntr... |
1985 |
cancel_delayed_work_sync(&conntrack_gc_work.dwork); |
285189c78 netfilter: use kv... |
1986 |
kvfree(nf_conntrack_hash); |
56d52d489 netfilter: conntr... |
1987 |
|
04d870017 netfilter: nf_ct_... |
1988 |
nf_conntrack_proto_fini(); |
41d73ec05 netfilter: nf_con... |
1989 |
nf_conntrack_seqadj_fini(); |
5f69b8f52 netfilter: nf_ct_... |
1990 |
nf_conntrack_labels_fini(); |
5e615b220 netfilter: nf_ct_... |
1991 |
nf_conntrack_helper_fini(); |
8684094cf netfilter: nf_ct_... |
1992 |
nf_conntrack_timeout_fini(); |
3fe0f943d netfilter: nf_ct_... |
1993 |
nf_conntrack_ecache_fini(); |
73f4001a5 netfilter: nf_ct_... |
1994 |
nf_conntrack_tstamp_fini(); |
b7ff3a1fa netfilter: nf_ct_... |
1995 |
nf_conntrack_acct_fini(); |
83b4dbe19 netfilter: nf_ct_... |
1996 |
nf_conntrack_expect_fini(); |
775711497 netfilter: conntr... |
1997 1998 |
kmem_cache_destroy(nf_conntrack_cachep); |
08f6547d2 netfilter: netns ... |
1999 |
} |
9fb9cbb10 [NETFILTER]: Add ... |
2000 |
|
f94161c1b netfilter: nf_con... |
2001 2002 2003 2004 2005 |
/* * Mishearing the voices in his head, our hero wonders how he's * supposed to kill the mall. */ void nf_conntrack_cleanup_net(struct net *net) |
08f6547d2 netfilter: netns ... |
2006 |
{ |
dece40e84 netfilter: nf_con... |
2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 |
LIST_HEAD(single); list_add(&net->exit_list, &single); nf_conntrack_cleanup_net_list(&single); } void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list) { int busy; struct net *net; |
f94161c1b netfilter: nf_con... |
2017 2018 2019 2020 2021 2022 |
/* * This makes sure all current packets have passed through * netfilter framework. Roll on, two-stage module * delete... */ synchronize_net(); |
dece40e84 netfilter: nf_con... |
2023 2024 2025 |
i_see_dead_people: busy = 0; list_for_each_entry(net, net_exit_list, exit_list) { |
2843fb699 netfilter: conntr... |
2026 |
nf_ct_iterate_cleanup(kill_all, net, 0, 0); |
dece40e84 netfilter: nf_con... |
2027 2028 2029 2030 |
if (atomic_read(&net->ct.count) != 0) busy = 1; } if (busy) { |
9fb9cbb10 [NETFILTER]: Add ... |
2031 2032 2033 |
schedule(); goto i_see_dead_people; } |
dece40e84 netfilter: nf_con... |
2034 |
list_for_each_entry(net, net_exit_list, exit_list) { |
dece40e84 netfilter: nf_con... |
2035 2036 2037 2038 2039 2040 |
nf_conntrack_proto_pernet_fini(net); nf_conntrack_helper_pernet_fini(net); nf_conntrack_ecache_pernet_fini(net); nf_conntrack_tstamp_pernet_fini(net); nf_conntrack_acct_pernet_fini(net); nf_conntrack_expect_pernet_fini(net); |
dece40e84 netfilter: nf_con... |
2041 |
free_percpu(net->ct.stat); |
b7779d06f netfilter: conntr... |
2042 |
free_percpu(net->ct.pcpu_lists); |
dece40e84 netfilter: nf_con... |
2043 |
} |
08f6547d2 netfilter: netns ... |
2044 |
} |
d862a6622 netfilter: nf_con... |
2045 |
void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls) |
9fb9cbb10 [NETFILTER]: Add ... |
2046 |
{ |
ea781f197 netfilter: nf_con... |
2047 2048 |
struct hlist_nulls_head *hash; unsigned int nr_slots, i; |
9fb9cbb10 [NETFILTER]: Add ... |
2049 |
|
9cc1c73ad netfilter: conntr... |
2050 2051 |
if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head))) return NULL; |
ea781f197 netfilter: nf_con... |
2052 2053 |
BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head)); nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head)); |
9cc1c73ad netfilter: conntr... |
2054 |
|
285189c78 netfilter: use kv... |
2055 2056 |
hash = kvmalloc_array(nr_slots, sizeof(struct hlist_nulls_head), GFP_KERNEL | __GFP_ZERO); |
9fb9cbb10 [NETFILTER]: Add ... |
2057 |
|
ea781f197 netfilter: nf_con... |
2058 2059 2060 |
if (hash && nulls) for (i = 0; i < nr_slots; i++) INIT_HLIST_NULLS_HEAD(&hash[i], i); |
9fb9cbb10 [NETFILTER]: Add ... |
2061 2062 2063 |
return hash; } |
ac565e5fc [NETFILTER]: nf_c... |
2064 |
EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable); |
9fb9cbb10 [NETFILTER]: Add ... |
2065 |
|
3183ab899 netfilter: conntr... |
2066 |
int nf_conntrack_hash_resize(unsigned int hashsize) |
9fb9cbb10 [NETFILTER]: Add ... |
2067 |
{ |
3183ab899 netfilter: conntr... |
2068 2069 |
int i, bucket; unsigned int old_size; |
ea781f197 netfilter: nf_con... |
2070 |
struct hlist_nulls_head *hash, *old_hash; |
9fb9cbb10 [NETFILTER]: Add ... |
2071 |
struct nf_conntrack_tuple_hash *h; |
5d0aa2ccd netfilter: nf_con... |
2072 |
struct nf_conn *ct; |
9fb9cbb10 [NETFILTER]: Add ... |
2073 |
|
9fb9cbb10 [NETFILTER]: Add ... |
2074 2075 |
if (!hashsize) return -EINVAL; |
d862a6622 netfilter: nf_con... |
2076 |
hash = nf_ct_alloc_hashtable(&hashsize, 1); |
9fb9cbb10 [NETFILTER]: Add ... |
2077 2078 |
if (!hash) return -ENOMEM; |
3183ab899 netfilter: conntr... |
2079 2080 |
old_size = nf_conntrack_htable_size; if (old_size == hashsize) { |
285189c78 netfilter: use kv... |
2081 |
kvfree(hash); |
3183ab899 netfilter: conntr... |
2082 2083 |
return 0; } |
93bb0ceb7 netfilter: conntr... |
2084 2085 |
local_bh_disable(); nf_conntrack_all_lock(); |
a3efd8120 netfilter: conntr... |
2086 |
write_seqcount_begin(&nf_conntrack_generation); |
93bb0ceb7 netfilter: conntr... |
2087 |
|
76507f69c [NETFILTER]: nf_c... |
2088 2089 2090 |
/* Lookups in the old hash might happen in parallel, which means we * might get false negatives during connection lookup. New connections * created because of a false negative won't make it into the hash |
93bb0ceb7 netfilter: conntr... |
2091 |
* though since that required taking the locks. |
76507f69c [NETFILTER]: nf_c... |
2092 |
*/ |
93bb0ceb7 netfilter: conntr... |
2093 |
|
56d52d489 netfilter: conntr... |
2094 2095 2096 2097 |
for (i = 0; i < nf_conntrack_htable_size; i++) { while (!hlist_nulls_empty(&nf_conntrack_hash[i])) { h = hlist_nulls_entry(nf_conntrack_hash[i].first, struct nf_conntrack_tuple_hash, hnnode); |
5d0aa2ccd netfilter: nf_con... |
2098 |
ct = nf_ct_tuplehash_to_ctrack(h); |
ea781f197 netfilter: nf_con... |
2099 |
hlist_nulls_del_rcu(&h->hnnode); |
1b8c8a9f6 netfilter: conntr... |
2100 2101 |
bucket = __hash_conntrack(nf_ct_net(ct), &h->tuple, hashsize); |
ea781f197 netfilter: nf_con... |
2102 |
hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); |
9fb9cbb10 [NETFILTER]: Add ... |
2103 2104 |
} } |
56d52d489 netfilter: conntr... |
2105 2106 |
old_size = nf_conntrack_htable_size; old_hash = nf_conntrack_hash; |
9fb9cbb10 [NETFILTER]: Add ... |
2107 |
|
56d52d489 netfilter: conntr... |
2108 2109 |
nf_conntrack_hash = hash; nf_conntrack_htable_size = hashsize; |
93bb0ceb7 netfilter: conntr... |
2110 |
|
a3efd8120 netfilter: conntr... |
2111 |
write_seqcount_end(&nf_conntrack_generation); |
93bb0ceb7 netfilter: conntr... |
2112 2113 |
nf_conntrack_all_unlock(); local_bh_enable(); |
9fb9cbb10 [NETFILTER]: Add ... |
2114 |
|
5e3c61f98 netfilter: conntr... |
2115 |
synchronize_net(); |
285189c78 netfilter: use kv... |
2116 |
kvfree(old_hash); |
9fb9cbb10 [NETFILTER]: Add ... |
2117 2118 |
return 0; } |
3183ab899 netfilter: conntr... |
2119 |
|
e4dca7b7a treewide: Fix fun... |
2120 |
int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp) |
3183ab899 netfilter: conntr... |
2121 2122 2123 2124 2125 2126 2127 2128 |
{ unsigned int hashsize; int rc; if (current->nsproxy->net_ns != &init_net) return -EOPNOTSUPP; /* On boot, we can set this without any fancy locking. */ |
2045cdfa1 netfilter: nf_con... |
2129 |
if (!nf_conntrack_hash) |
3183ab899 netfilter: conntr... |
2130 2131 2132 2133 2134 2135 2136 2137 |
return param_set_uint(val, kp); rc = kstrtouint(val, 0, &hashsize); if (rc) return rc; return nf_conntrack_hash_resize(hashsize); } |
fae718dda [NETFILTER]: nf_c... |
2138 |
EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize); |
9fb9cbb10 [NETFILTER]: Add ... |
2139 |
|
ab71632c4 netfilter: conntr... |
2140 |
static __always_inline unsigned int total_extension_size(void) |
b3a5db109 netfilter: conntr... |
2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 |
{ /* remember to add new extensions below */ BUILD_BUG_ON(NF_CT_EXT_NUM > 9); return sizeof(struct nf_ct_ext) + sizeof(struct nf_conn_help) #if IS_ENABLED(CONFIG_NF_NAT) + sizeof(struct nf_conn_nat) #endif + sizeof(struct nf_conn_seqadj) + sizeof(struct nf_conn_acct) #ifdef CONFIG_NF_CONNTRACK_EVENTS + sizeof(struct nf_conntrack_ecache) #endif #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP + sizeof(struct nf_conn_tstamp) #endif #ifdef CONFIG_NF_CONNTRACK_TIMEOUT + sizeof(struct nf_conn_timeout) #endif #ifdef CONFIG_NF_CONNTRACK_LABELS + sizeof(struct nf_conn_labels) #endif #if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY) + sizeof(struct nf_conn_synproxy) #endif ; }; |
f94161c1b netfilter: nf_con... |
2169 |
int nf_conntrack_init_start(void) |
9fb9cbb10 [NETFILTER]: Add ... |
2170 |
{ |
f205c5e0c [NETFILTER]: nf_c... |
2171 |
int max_factor = 8; |
0c5366b3a netfilter: conntr... |
2172 |
int ret = -ENOMEM; |
cc41c84b7 netfilter: kill t... |
2173 |
int i; |
93bb0ceb7 netfilter: conntr... |
2174 |
|
b3a5db109 netfilter: conntr... |
2175 2176 |
/* struct nf_ct_ext uses u8 to store offsets/size */ BUILD_BUG_ON(total_extension_size() > 255u); |
a3efd8120 netfilter: conntr... |
2177 |
seqcount_init(&nf_conntrack_generation); |
d5d20912d netfilter: conntr... |
2178 |
for (i = 0; i < CONNTRACK_LOCKS; i++) |
93bb0ceb7 netfilter: conntr... |
2179 |
spin_lock_init(&nf_conntrack_locks[i]); |
9fb9cbb10 [NETFILTER]: Add ... |
2180 |
|
9fb9cbb10 [NETFILTER]: Add ... |
2181 |
if (!nf_conntrack_htable_size) { |
88eab472e netfilter: conntr... |
2182 2183 2184 2185 2186 |
/* Idea from tcp.c: use 1/16384 of memory. * On i386: 32MB machine has 512 buckets. * >= 1GB machines have 16384 buckets. * >= 4GB machines have 65536 buckets. */ |
9fb9cbb10 [NETFILTER]: Add ... |
2187 |
nf_conntrack_htable_size |
4481374ce mm: replace vario... |
2188 |
= (((totalram_pages << PAGE_SHIFT) / 16384) |
f205c5e0c [NETFILTER]: nf_c... |
2189 |
/ sizeof(struct hlist_head)); |
88eab472e netfilter: conntr... |
2190 2191 2192 |
if (totalram_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE))) nf_conntrack_htable_size = 65536; else if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE)) |
f205c5e0c [NETFILTER]: nf_c... |
2193 2194 2195 2196 2197 2198 2199 2200 2201 |
nf_conntrack_htable_size = 16384; if (nf_conntrack_htable_size < 32) nf_conntrack_htable_size = 32; /* Use a max. factor of four by default to get the same max as * with the old struct list_heads. When a table size is given * we use the old value of 8 to avoid reducing the max. * entries. */ max_factor = 4; |
9fb9cbb10 [NETFILTER]: Add ... |
2202 |
} |
56d52d489 netfilter: conntr... |
2203 2204 2205 2206 |
nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1); if (!nf_conntrack_hash) return -ENOMEM; |
f205c5e0c [NETFILTER]: nf_c... |
2207 |
nf_conntrack_max = max_factor * nf_conntrack_htable_size; |
8e5105a0c [NETFILTER]: nf_c... |
2208 |
|
0c5366b3a netfilter: conntr... |
2209 |
nf_conntrack_cachep = kmem_cache_create("nf_conntrack", |
a9e419dc7 netfilter: merge ... |
2210 2211 |
sizeof(struct nf_conn), NFCT_INFOMASK + 1, |
5f0d5a3ae mm: Rename SLAB_D... |
2212 |
SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN, NULL); |
0c5366b3a netfilter: conntr... |
2213 2214 |
if (!nf_conntrack_cachep) goto err_cachep; |
83b4dbe19 netfilter: nf_ct_... |
2215 2216 2217 |
ret = nf_conntrack_expect_init(); if (ret < 0) goto err_expect; |
b7ff3a1fa netfilter: nf_ct_... |
2218 2219 2220 |
ret = nf_conntrack_acct_init(); if (ret < 0) goto err_acct; |
73f4001a5 netfilter: nf_ct_... |
2221 2222 2223 |
ret = nf_conntrack_tstamp_init(); if (ret < 0) goto err_tstamp; |
3fe0f943d netfilter: nf_ct_... |
2224 2225 2226 |
ret = nf_conntrack_ecache_init(); if (ret < 0) goto err_ecache; |
8684094cf netfilter: nf_ct_... |
2227 2228 2229 |
ret = nf_conntrack_timeout_init(); if (ret < 0) goto err_timeout; |
5e615b220 netfilter: nf_ct_... |
2230 2231 2232 |
ret = nf_conntrack_helper_init(); if (ret < 0) goto err_helper; |
5f69b8f52 netfilter: nf_ct_... |
2233 2234 2235 |
ret = nf_conntrack_labels_init(); if (ret < 0) goto err_labels; |
41d73ec05 netfilter: nf_con... |
2236 2237 2238 |
ret = nf_conntrack_seqadj_init(); if (ret < 0) goto err_seqadj; |
04d870017 netfilter: nf_ct_... |
2239 2240 2241 |
ret = nf_conntrack_proto_init(); if (ret < 0) goto err_proto; |
b87a2f919 netfilter: conntr... |
2242 |
conntrack_gc_work_init(&conntrack_gc_work); |
0984d427c netfilter: conntr... |
2243 |
queue_delayed_work(system_power_efficient_wq, &conntrack_gc_work.dwork, HZ); |
b87a2f919 netfilter: conntr... |
2244 |
|
08f6547d2 netfilter: netns ... |
2245 |
return 0; |
04d870017 netfilter: nf_ct_... |
2246 |
err_proto: |
41d73ec05 netfilter: nf_con... |
2247 2248 |
nf_conntrack_seqadj_fini(); err_seqadj: |
04d870017 netfilter: nf_ct_... |
2249 |
nf_conntrack_labels_fini(); |
5f69b8f52 netfilter: nf_ct_... |
2250 2251 |
err_labels: nf_conntrack_helper_fini(); |
5e615b220 netfilter: nf_ct_... |
2252 2253 |
err_helper: nf_conntrack_timeout_fini(); |
8684094cf netfilter: nf_ct_... |
2254 2255 |
err_timeout: nf_conntrack_ecache_fini(); |
3fe0f943d netfilter: nf_ct_... |
2256 2257 |
err_ecache: nf_conntrack_tstamp_fini(); |
73f4001a5 netfilter: nf_ct_... |
2258 2259 |
err_tstamp: nf_conntrack_acct_fini(); |
b7ff3a1fa netfilter: nf_ct_... |
2260 2261 |
err_acct: nf_conntrack_expect_fini(); |
83b4dbe19 netfilter: nf_ct_... |
2262 |
err_expect: |
0c5366b3a netfilter: conntr... |
2263 2264 |
kmem_cache_destroy(nf_conntrack_cachep); err_cachep: |
285189c78 netfilter: use kv... |
2265 |
kvfree(nf_conntrack_hash); |
08f6547d2 netfilter: netns ... |
2266 2267 |
return ret; } |
1f4b24397 netfilter: add st... |
2268 |
static struct nf_ct_hook nf_conntrack_hook = { |
368982cd7 netfilter: nfnetl... |
2269 |
.update = nf_conntrack_update, |
1f4b24397 netfilter: add st... |
2270 |
.destroy = destroy_conntrack, |
b60a60405 netfilter: Add nf... |
2271 |
.get_tuple_skb = nf_conntrack_get_tuple_skb, |
1f4b24397 netfilter: add st... |
2272 |
}; |
f94161c1b netfilter: nf_con... |
2273 2274 2275 2276 |
void nf_conntrack_init_end(void) { /* For use by REJECT target */ RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach); |
1f4b24397 netfilter: add st... |
2277 |
RCU_INIT_POINTER(nf_ct_hook, &nf_conntrack_hook); |
f94161c1b netfilter: nf_con... |
2278 |
} |
8cc20198c netfilter: nf_con... |
2279 2280 2281 2282 2283 |
/* * We need to use special "null" values, not used in hash table */ #define UNCONFIRMED_NULLS_VAL ((1<<30)+0) #define DYING_NULLS_VAL ((1<<30)+1) |
252b3e8c1 netfilter: xt_CT:... |
2284 |
#define TEMPLATE_NULLS_VAL ((1<<30)+2) |
8cc20198c netfilter: nf_con... |
2285 |
|
f94161c1b netfilter: nf_con... |
2286 |
int nf_conntrack_init_net(struct net *net) |
08f6547d2 netfilter: netns ... |
2287 |
{ |
b7779d06f netfilter: conntr... |
2288 2289 |
int ret = -ENOMEM; int cpu; |
ceceae1b1 [NETFILTER]: nf_c... |
2290 |
|
cc41c84b7 netfilter: kill t... |
2291 |
BUILD_BUG_ON(IP_CT_UNTRACKED == IP_CT_NUMBER); |
08f6547d2 netfilter: netns ... |
2292 |
atomic_set(&net->ct.count, 0); |
b7779d06f netfilter: conntr... |
2293 2294 2295 |
net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu); if (!net->ct.pcpu_lists) |
08f6547d2 netfilter: netns ... |
2296 |
goto err_stat; |
b7779d06f netfilter: conntr... |
2297 2298 2299 2300 2301 2302 2303 |
for_each_possible_cpu(cpu) { struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); spin_lock_init(&pcpu->lock); INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL); INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL); |
08f6547d2 netfilter: netns ... |
2304 |
} |
5b3501faa netfilter: nf_con... |
2305 |
|
b7779d06f netfilter: conntr... |
2306 2307 2308 |
net->ct.stat = alloc_percpu(struct ip_conntrack_stat); if (!net->ct.stat) goto err_pcpu_lists; |
83b4dbe19 netfilter: nf_ct_... |
2309 |
ret = nf_conntrack_expect_pernet_init(net); |
08f6547d2 netfilter: netns ... |
2310 2311 |
if (ret < 0) goto err_expect; |
b7ff3a1fa netfilter: nf_ct_... |
2312 |
ret = nf_conntrack_acct_pernet_init(net); |
584015727 netfilter: accoun... |
2313 |
if (ret < 0) |
08f6547d2 netfilter: netns ... |
2314 |
goto err_acct; |
73f4001a5 netfilter: nf_ct_... |
2315 |
ret = nf_conntrack_tstamp_pernet_init(net); |
a992ca2a0 netfilter: nf_con... |
2316 2317 |
if (ret < 0) goto err_tstamp; |
3fe0f943d netfilter: nf_ct_... |
2318 |
ret = nf_conntrack_ecache_pernet_init(net); |
a0891aa6a netfilter: conntr... |
2319 2320 |
if (ret < 0) goto err_ecache; |
5e615b220 netfilter: nf_ct_... |
2321 |
ret = nf_conntrack_helper_pernet_init(net); |
a90068926 netfilter: nf_ct_... |
2322 2323 |
if (ret < 0) goto err_helper; |
04d870017 netfilter: nf_ct_... |
2324 |
ret = nf_conntrack_proto_pernet_init(net); |
f94161c1b netfilter: nf_con... |
2325 2326 |
if (ret < 0) goto err_proto; |
08f6547d2 netfilter: netns ... |
2327 |
return 0; |
c539f0171 netfilter: add co... |
2328 |
|
f94161c1b netfilter: nf_con... |
2329 |
err_proto: |
5e615b220 netfilter: nf_ct_... |
2330 |
nf_conntrack_helper_pernet_fini(net); |
a90068926 netfilter: nf_ct_... |
2331 |
err_helper: |
3fe0f943d netfilter: nf_ct_... |
2332 |
nf_conntrack_ecache_pernet_fini(net); |
a0891aa6a netfilter: conntr... |
2333 |
err_ecache: |
73f4001a5 netfilter: nf_ct_... |
2334 |
nf_conntrack_tstamp_pernet_fini(net); |
a992ca2a0 netfilter: nf_con... |
2335 |
err_tstamp: |
b7ff3a1fa netfilter: nf_ct_... |
2336 |
nf_conntrack_acct_pernet_fini(net); |
08f6547d2 netfilter: netns ... |
2337 |
err_acct: |
83b4dbe19 netfilter: nf_ct_... |
2338 |
nf_conntrack_expect_pernet_fini(net); |
08f6547d2 netfilter: netns ... |
2339 |
err_expect: |
0d55af879 netfilter: netns ... |
2340 |
free_percpu(net->ct.stat); |
b7779d06f netfilter: conntr... |
2341 2342 |
err_pcpu_lists: free_percpu(net->ct.pcpu_lists); |
0d55af879 netfilter: netns ... |
2343 |
err_stat: |
08f6547d2 netfilter: netns ... |
2344 2345 |
return ret; } |