Blame view
net/ipv6/reassembly.c
19.4 KB
1da177e4c
|
1 2 |
/* * IPv6 fragment reassembly |
1ab1457c4
|
3 |
* Linux INET6 implementation |
1da177e4c
|
4 5 |
* * Authors: |
1ab1457c4
|
6 |
* Pedro Roque <roque@di.fc.ul.pt> |
1da177e4c
|
7 |
* |
1da177e4c
|
8 9 10 11 12 13 14 |
* Based on: net/ipv4/ip_fragment.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ |
1ab1457c4
|
15 16 |
/* * Fixes: |
1da177e4c
|
17 18 19 20 21 22 23 24 25 26 27 |
* Andi Kleen Make it work with multiple hosts. * More RFC compliance. * * Horst von Brand Add missing #include <linux/string.h> * Alexey Kuznetsov SMP races, threading, cleanup. * Patrick McHardy LRU queue of frag heads for evictor. * Mitsuru KANDA @USAGI Register inet6_protocol{}. * David Stevens and * YOSHIFUJI,H. @USAGI Always remove fragment header to * calculate ICV correctly. */ |
5a3da1fe9
|
28 29 |
#define pr_fmt(fmt) "IPv6: " fmt |
1da177e4c
|
30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
#include <linux/errno.h> #include <linux/types.h> #include <linux/string.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/jiffies.h> #include <linux/net.h> #include <linux/list.h> #include <linux/netdevice.h> #include <linux/in6.h> #include <linux/ipv6.h> #include <linux/icmpv6.h> #include <linux/random.h> #include <linux/jhash.h> |
f61944efd
|
44 |
#include <linux/skbuff.h> |
5a0e3ad6a
|
45 |
#include <linux/slab.h> |
bc3b2d7fb
|
46 |
#include <linux/export.h> |
1da177e4c
|
47 48 49 50 51 |
#include <net/sock.h> #include <net/snmp.h> #include <net/ipv6.h> |
a11d206d0
|
52 |
#include <net/ip6_route.h> |
1da177e4c
|
53 54 55 56 57 |
#include <net/protocol.h> #include <net/transp_v6.h> #include <net/rawv6.h> #include <net/ndisc.h> #include <net/addrconf.h> |
5ab11c98d
|
58 |
#include <net/inet_frag.h> |
eec2e6185
|
59 |
#include <net/inet_ecn.h> |
1da177e4c
|
60 |
|
d4ad4d22e
|
61 |
static const char ip6_frag_cache_name[] = "ip6-frags"; |
cc24becae
|
62 |
struct ip6frag_skb_cb { |
1da177e4c
|
63 64 65 |
struct inet6_skb_parm h; int offset; }; |
67ba4152e
|
66 |
#define FRAG6_CB(skb) ((struct ip6frag_skb_cb *)((skb)->cb)) |
1da177e4c
|
67 |
|
fc08c2581
|
68 |
static u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h) |
eec2e6185
|
69 70 71 |
{ return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK); } |
1da177e4c
|
72 |
|
7eb95156d
|
73 |
static struct inet_frags ip6_frags; |
1da177e4c
|
74 |
|
f61944efd
|
75 76 |
static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev); |
f6596f9d2
|
77 78 79 80 |
/* * callers should be careful not to use the hash value outside the ipfrag_lock * as doing so could race with ipfrag_hash_rnd being recalculated. */ |
b1190570b
|
81 82 |
static unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr, const struct in6_addr *daddr) |
1da177e4c
|
83 |
{ |
b1190570b
|
84 |
net_get_random_once(&ip6_frags.rnd, sizeof(ip6_frags.rnd)); |
fb3cfe6e7
|
85 86 |
return jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr), (__force u32)id, ip6_frags.rnd); |
1da177e4c
|
87 |
} |
36c777821
|
88 |
static unsigned int ip6_hashfn(const struct inet_frag_queue *q) |
1da177e4c
|
89 |
{ |
36c777821
|
90 |
const struct frag_queue *fq; |
1da177e4c
|
91 |
|
321a3a99e
|
92 |
fq = container_of(q, struct frag_queue, q); |
b1190570b
|
93 |
return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr); |
1da177e4c
|
94 |
} |
36c777821
|
95 |
bool ip6_frag_match(const struct inet_frag_queue *q, const void *a) |
abd6523d1
|
96 |
{ |
36c777821
|
97 98 |
const struct frag_queue *fq; const struct ip6_create_arg *arg = a; |
abd6523d1
|
99 100 |
fq = container_of(q, struct frag_queue, q); |
cbc264cac
|
101 102 103 |
return fq->id == arg->id && fq->user == arg->user && ipv6_addr_equal(&fq->saddr, arg->src) && |
264640fc2
|
104 105 106 107 |
ipv6_addr_equal(&fq->daddr, arg->dst) && (arg->iif == fq->iif || !(ipv6_addr_type(arg->dst) & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL))); |
abd6523d1
|
108 109 |
} EXPORT_SYMBOL(ip6_frag_match); |
36c777821
|
110 |
void ip6_frag_init(struct inet_frag_queue *q, const void *a) |
1da177e4c
|
111 |
{ |
c6fda2822
|
112 |
struct frag_queue *fq = container_of(q, struct frag_queue, q); |
36c777821
|
113 |
const struct ip6_create_arg *arg = a; |
c6fda2822
|
114 115 |
fq->id = arg->id; |
0b5ccb2ee
|
116 |
fq->user = arg->user; |
4e3fd7a06
|
117 118 |
fq->saddr = *arg->src; fq->daddr = *arg->dst; |
eec2e6185
|
119 |
fq->ecn = arg->ecn; |
1da177e4c
|
120 |
} |
c6fda2822
|
121 |
EXPORT_SYMBOL(ip6_frag_init); |
1da177e4c
|
122 |
|
b836c99fd
|
123 124 |
void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq, struct inet_frags *frags) |
1da177e4c
|
125 |
{ |
a11d206d0
|
126 |
struct net_device *dev = NULL; |
e521db9d7
|
127 |
|
5ab11c98d
|
128 |
spin_lock(&fq->q.lock); |
1da177e4c
|
129 |
|
06aa8b8a0
|
130 |
if (fq->q.flags & INET_FRAG_COMPLETE) |
1da177e4c
|
131 |
goto out; |
b836c99fd
|
132 |
inet_frag_kill(&fq->q, frags); |
1da177e4c
|
133 |
|
69df9d599
|
134 135 |
rcu_read_lock(); dev = dev_get_by_index_rcu(net, fq->iif); |
a11d206d0
|
136 |
if (!dev) |
69df9d599
|
137 |
goto out_rcu_unlock; |
a11d206d0
|
138 |
|
483a47d2f
|
139 |
IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); |
1da177e4c
|
140 |
|
caaecdd3d
|
141 |
if (inet_frag_evicting(&fq->q)) |
2e404f632
|
142 143 144 |
goto out_rcu_unlock; IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); |
78c784c47
|
145 |
/* Don't send error if the first segment did not arrive. */ |
06aa8b8a0
|
146 |
if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !fq->q.fragments) |
69df9d599
|
147 |
goto out_rcu_unlock; |
78c784c47
|
148 |
|
2e404f632
|
149 150 151 |
/* But use as source device on which LAST ARRIVED * segment was received. And do not use fq->dev * pointer directly, device might already disappeared. |
78c784c47
|
152 |
*/ |
5ab11c98d
|
153 |
fq->q.fragments->dev = dev; |
3ffe533c8
|
154 |
icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0); |
69df9d599
|
155 156 |
out_rcu_unlock: rcu_read_unlock(); |
1da177e4c
|
157 |
out: |
5ab11c98d
|
158 |
spin_unlock(&fq->q.lock); |
b836c99fd
|
159 160 161 162 163 164 165 166 167 168 169 170 171 |
inet_frag_put(&fq->q, frags); } EXPORT_SYMBOL(ip6_expire_frag_queue); static void ip6_frag_expire(unsigned long data) { struct frag_queue *fq; struct net *net; fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q); net = container_of(fq->q.net, struct net, ipv6.frags); ip6_expire_frag_queue(net, fq, &ip6_frags); |
1da177e4c
|
172 |
} |
fc08c2581
|
173 |
static struct frag_queue * |
eec2e6185
|
174 |
fq_find(struct net *net, __be32 id, const struct in6_addr *src, |
264640fc2
|
175 |
const struct in6_addr *dst, int iif, u8 ecn) |
1da177e4c
|
176 |
{ |
c6fda2822
|
177 178 |
struct inet_frag_queue *q; struct ip6_create_arg arg; |
abd6523d1
|
179 |
unsigned int hash; |
1da177e4c
|
180 |
|
c6fda2822
|
181 |
arg.id = id; |
0b5ccb2ee
|
182 |
arg.user = IP6_DEFRAG_LOCAL_DELIVER; |
c6fda2822
|
183 184 |
arg.src = src; arg.dst = dst; |
264640fc2
|
185 |
arg.iif = iif; |
eec2e6185
|
186 |
arg.ecn = ecn; |
9a375803f
|
187 |
|
b1190570b
|
188 |
hash = inet6_hash_frag(id, src, dst); |
1da177e4c
|
189 |
|
ac18e7509
|
190 |
q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash); |
5a3da1fe9
|
191 192 |
if (IS_ERR_OR_NULL(q)) { inet_frag_maybe_warn_overflow(q, pr_fmt()); |
9546377c4
|
193 |
return NULL; |
5a3da1fe9
|
194 |
} |
c6fda2822
|
195 |
return container_of(q, struct frag_queue, q); |
1da177e4c
|
196 |
} |
f61944efd
|
197 |
static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, |
1da177e4c
|
198 199 200 |
struct frag_hdr *fhdr, int nhoff) { struct sk_buff *prev, *next; |
f61944efd
|
201 |
struct net_device *dev; |
1da177e4c
|
202 |
int offset, end; |
adf30907d
|
203 |
struct net *net = dev_net(skb_dst(skb)->dev); |
eec2e6185
|
204 |
u8 ecn; |
1da177e4c
|
205 |
|
06aa8b8a0
|
206 |
if (fq->q.flags & INET_FRAG_COMPLETE) |
1da177e4c
|
207 208 209 |
goto err; offset = ntohs(fhdr->frag_off) & ~0x7; |
0660e03f6
|
210 211 |
end = offset + (ntohs(ipv6_hdr(skb)->payload_len) - ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1))); |
1da177e4c
|
212 213 |
if ((unsigned int)end > IPV6_MAXPLEN) { |
adf30907d
|
214 |
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), |
a11d206d0
|
215 |
IPSTATS_MIB_INHDRERRORS); |
d56f90a7c
|
216 217 218 |
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, ((u8 *)&fhdr->frag_off - skb_network_header(skb))); |
f61944efd
|
219 |
return -1; |
1da177e4c
|
220 |
} |
eec2e6185
|
221 |
ecn = ip6_frag_ecn(ipv6_hdr(skb)); |
d56f90a7c
|
222 223 |
if (skb->ip_summed == CHECKSUM_COMPLETE) { const unsigned char *nh = skb_network_header(skb); |
1ab1457c4
|
224 |
skb->csum = csum_sub(skb->csum, |
d56f90a7c
|
225 226 227 |
csum_partial(nh, (u8 *)(fhdr + 1) - nh, 0)); } |
1da177e4c
|
228 229 230 231 232 233 |
/* Is this the final fragment? */ if (!(fhdr->frag_off & htons(IP6_MF))) { /* If we already have some bits beyond end * or have different end, the segment is corrupted. */ |
5ab11c98d
|
234 |
if (end < fq->q.len || |
06aa8b8a0
|
235 |
((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) |
1da177e4c
|
236 |
goto err; |
06aa8b8a0
|
237 |
fq->q.flags |= INET_FRAG_LAST_IN; |
5ab11c98d
|
238 |
fq->q.len = end; |
1da177e4c
|
239 240 241 242 243 244 245 246 |
} else { /* Check if the fragment is rounded to 8 bytes. * Required by the RFC. */ if (end & 0x7) { /* RFC2460 says always send parameter problem in * this case. -DaveM */ |
adf30907d
|
247 |
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), |
a11d206d0
|
248 |
IPSTATS_MIB_INHDRERRORS); |
1ab1457c4
|
249 |
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, |
1da177e4c
|
250 |
offsetof(struct ipv6hdr, payload_len)); |
f61944efd
|
251 |
return -1; |
1da177e4c
|
252 |
} |
5ab11c98d
|
253 |
if (end > fq->q.len) { |
1da177e4c
|
254 |
/* Some bits beyond end -> corruption. */ |
06aa8b8a0
|
255 |
if (fq->q.flags & INET_FRAG_LAST_IN) |
1da177e4c
|
256 |
goto err; |
5ab11c98d
|
257 |
fq->q.len = end; |
1da177e4c
|
258 259 260 261 262 263 264 265 266 |
} } if (end == offset) goto err; /* Point into the IP datagram 'data' part. */ if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) goto err; |
1ab1457c4
|
267 |
|
42ca89c18
|
268 269 |
if (pskb_trim_rcsum(skb, end - offset)) goto err; |
1da177e4c
|
270 271 272 273 274 |
/* Find out which fragments are in front and at the back of us * in the chain of fragments so far. We must know where to put * this fragment, right? */ |
d6bebca92
|
275 276 277 278 279 |
prev = fq->q.fragments_tail; if (!prev || FRAG6_CB(prev)->offset < offset) { next = NULL; goto found; } |
1da177e4c
|
280 |
prev = NULL; |
67ba4152e
|
281 |
for (next = fq->q.fragments; next != NULL; next = next->next) { |
1da177e4c
|
282 283 284 285 |
if (FRAG6_CB(next)->offset >= offset) break; /* bingo! */ prev = next; } |
d6bebca92
|
286 |
found: |
5de658f87
|
287 288 |
/* RFC5722, Section 4, amended by Errata ID : 3089 * When reassembling an IPv6 datagram, if |
70789d705
|
289 290 |
* one or more its constituent fragments is determined to be an * overlapping fragment, the entire datagram (and any constituent |
5de658f87
|
291 |
* fragments) MUST be silently discarded. |
1da177e4c
|
292 |
*/ |
1da177e4c
|
293 |
|
70789d705
|
294 295 |
/* Check for overlap with preceding fragment. */ if (prev && |
f46421416
|
296 |
(FRAG6_CB(prev)->offset + prev->len) > offset) |
70789d705
|
297 |
goto discard_fq; |
1da177e4c
|
298 |
|
70789d705
|
299 300 301 |
/* Look for overlap with succeeding segment. */ if (next && FRAG6_CB(next)->offset < end) goto discard_fq; |
1da177e4c
|
302 303 304 305 306 |
FRAG6_CB(skb)->offset = offset; /* Insert this fragment in the chain of fragments. */ skb->next = next; |
d6bebca92
|
307 308 |
if (!next) fq->q.fragments_tail = skb; |
1da177e4c
|
309 310 311 |
if (prev) prev->next = skb; else |
5ab11c98d
|
312 |
fq->q.fragments = skb; |
1da177e4c
|
313 |
|
f61944efd
|
314 315 316 317 318 |
dev = skb->dev; if (dev) { fq->iif = dev->ifindex; skb->dev = NULL; } |
5ab11c98d
|
319 320 |
fq->q.stamp = skb->tstamp; fq->q.meat += skb->len; |
eec2e6185
|
321 |
fq->ecn |= ecn; |
0e60d245a
|
322 |
add_frag_mem_limit(fq->q.net, skb->truesize); |
1da177e4c
|
323 324 325 326 327 328 |
/* The first fragment. * nhoffset is obtained from the first fragment, of course. */ if (offset == 0) { fq->nhoffset = nhoff; |
06aa8b8a0
|
329 |
fq->q.flags |= INET_FRAG_FIRST_IN; |
1da177e4c
|
330 |
} |
f61944efd
|
331 |
|
06aa8b8a0
|
332 |
if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && |
97599dc79
|
333 334 335 336 337 338 339 340 341 |
fq->q.meat == fq->q.len) { int res; unsigned long orefdst = skb->_skb_refdst; skb->_skb_refdst = 0UL; res = ip6_frag_reasm(fq, prev, dev); skb->_skb_refdst = orefdst; return res; } |
f61944efd
|
342 |
|
97599dc79
|
343 |
skb_dst_drop(skb); |
f61944efd
|
344 |
return -1; |
1da177e4c
|
345 |
|
70789d705
|
346 |
discard_fq: |
b836c99fd
|
347 |
inet_frag_kill(&fq->q, &ip6_frags); |
1da177e4c
|
348 |
err: |
d2373862b
|
349 350 |
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS); |
1da177e4c
|
351 |
kfree_skb(skb); |
f61944efd
|
352 |
return -1; |
1da177e4c
|
353 354 355 356 357 358 359 360 361 362 363 |
} /* * Check if this packet is complete. * Returns NULL on failure by any reason, and pointer * to current nexthdr field in reassembled frame. * * It is called with locked fq, and caller must check that * queue is eligible for reassembly i.e. it is not COMPLETE, * the last and the first frames arrived and all the bits are here. */ |
f61944efd
|
364 |
static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, |
1da177e4c
|
365 366 |
struct net_device *dev) { |
2bad35b7c
|
367 |
struct net *net = container_of(fq->q.net, struct net, ipv6.frags); |
5ab11c98d
|
368 |
struct sk_buff *fp, *head = fq->q.fragments; |
1da177e4c
|
369 370 |
int payload_len; unsigned int nhoff; |
ec16439e1
|
371 |
int sum_truesize; |
eec2e6185
|
372 |
u8 ecn; |
1da177e4c
|
373 |
|
b836c99fd
|
374 |
inet_frag_kill(&fq->q, &ip6_frags); |
1da177e4c
|
375 |
|
eec2e6185
|
376 377 378 |
ecn = ip_frag_ecn_table[fq->ecn]; if (unlikely(ecn == 0xff)) goto out_fail; |
f61944efd
|
379 380 381 382 383 384 385 386 387 |
/* Make the one we just received the head. */ if (prev) { head = prev->next; fp = skb_clone(head, GFP_ATOMIC); if (!fp) goto out_oom; fp->next = head->next; |
d6bebca92
|
388 389 |
if (!fp->next) fq->q.fragments_tail = fp; |
f61944efd
|
390 |
prev->next = fp; |
5ab11c98d
|
391 392 |
skb_morph(head, fq->q.fragments); head->next = fq->q.fragments->next; |
f61944efd
|
393 |
|
808db80a7
|
394 |
consume_skb(fq->q.fragments); |
5ab11c98d
|
395 |
fq->q.fragments = head; |
f61944efd
|
396 |
} |
547b792ca
|
397 398 |
WARN_ON(head == NULL); WARN_ON(FRAG6_CB(head)->offset != 0); |
1da177e4c
|
399 400 |
/* Unfragmented part is taken from the first segment. */ |
d56f90a7c
|
401 |
payload_len = ((head->data - skb_network_header(head)) - |
5ab11c98d
|
402 |
sizeof(struct ipv6hdr) + fq->q.len - |
d56f90a7c
|
403 |
sizeof(struct frag_hdr)); |
1da177e4c
|
404 405 406 407 |
if (payload_len > IPV6_MAXPLEN) goto out_oversize; /* Head of list must not be cloned. */ |
14bbd6a56
|
408 |
if (skb_unclone(head, GFP_ATOMIC)) |
1da177e4c
|
409 410 411 412 413 |
goto out_oom; /* If the first fragment is fragmented itself, we split * it to two chunks: the first with data and paged part * and the second, holding only fragments. */ |
21dc33015
|
414 |
if (skb_has_frag_list(head)) { |
1da177e4c
|
415 416 |
struct sk_buff *clone; int i, plen = 0; |
e5d08d718
|
417 |
clone = alloc_skb(0, GFP_ATOMIC); |
63159f29b
|
418 |
if (!clone) |
1da177e4c
|
419 420 421 422 |
goto out_oom; clone->next = head->next; head->next = clone; skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; |
4d9092bb4
|
423 |
skb_frag_list_init(head); |
9e903e085
|
424 425 |
for (i = 0; i < skb_shinfo(head)->nr_frags; i++) plen += skb_frag_size(&skb_shinfo(head)->frags[i]); |
1da177e4c
|
426 427 428 429 430 |
clone->len = clone->data_len = head->data_len - plen; head->data_len -= clone->len; head->len -= clone->len; clone->csum = 0; clone->ip_summed = head->ip_summed; |
0e60d245a
|
431 |
add_frag_mem_limit(fq->q.net, clone->truesize); |
1da177e4c
|
432 433 434 435 436 |
} /* We have to remove fragment header from datagram and to relocate * header in order to calculate ICV correctly. */ nhoff = fq->nhoffset; |
b0e380b1d
|
437 |
skb_network_header(head)[nhoff] = skb_transport_header(head)[0]; |
1ab1457c4
|
438 |
memmove(head->head + sizeof(struct frag_hdr), head->head, |
1da177e4c
|
439 |
(head->data - head->head) - sizeof(struct frag_hdr)); |
b0e380b1d
|
440 441 |
head->mac_header += sizeof(struct frag_hdr); head->network_header += sizeof(struct frag_hdr); |
1da177e4c
|
442 |
|
badff6d01
|
443 |
skb_reset_transport_header(head); |
d56f90a7c
|
444 |
skb_push(head, head->data - skb_network_header(head)); |
1da177e4c
|
445 |
|
ec16439e1
|
446 447 448 449 450 451 452 |
sum_truesize = head->truesize; for (fp = head->next; fp;) { bool headstolen; int delta; struct sk_buff *next = fp->next; sum_truesize += fp->truesize; |
1da177e4c
|
453 454 |
if (head->ip_summed != fp->ip_summed) head->ip_summed = CHECKSUM_NONE; |
84fa7933a
|
455 |
else if (head->ip_summed == CHECKSUM_COMPLETE) |
1da177e4c
|
456 |
head->csum = csum_add(head->csum, fp->csum); |
ec16439e1
|
457 458 459 460 461 462 463 464 465 466 467 |
if (skb_try_coalesce(head, fp, &headstolen, &delta)) { kfree_skb_partial(fp, headstolen); } else { if (!skb_shinfo(head)->frag_list) skb_shinfo(head)->frag_list = fp; head->data_len += fp->len; head->len += fp->len; head->truesize += fp->truesize; } fp = next; |
1da177e4c
|
468 |
} |
0e60d245a
|
469 |
sub_frag_mem_limit(fq->q.net, sum_truesize); |
1da177e4c
|
470 471 472 |
head->next = NULL; head->dev = dev; |
5ab11c98d
|
473 |
head->tstamp = fq->q.stamp; |
0660e03f6
|
474 |
ipv6_hdr(head)->payload_len = htons(payload_len); |
eec2e6185
|
475 |
ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn); |
951dbc8ac
|
476 |
IP6CB(head)->nhoff = nhoff; |
f46078cfc
|
477 |
IP6CB(head)->flags |= IP6SKB_FRAGMENTED; |
1da177e4c
|
478 |
|
1da177e4c
|
479 |
/* Yes, and fold redundant checksum back. 8) */ |
6b83d28a5
|
480 481 |
skb_postpush_rcsum(head, skb_network_header(head), skb_network_header_len(head)); |
1da177e4c
|
482 |
|
a11d206d0
|
483 |
rcu_read_lock(); |
2bad35b7c
|
484 |
IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS); |
a11d206d0
|
485 |
rcu_read_unlock(); |
5ab11c98d
|
486 |
fq->q.fragments = NULL; |
d6bebca92
|
487 |
fq->q.fragments_tail = NULL; |
1da177e4c
|
488 489 490 |
return 1; out_oversize: |
e87cc4728
|
491 492 |
net_dbg_ratelimited("ip6_frag_reasm: payload len = %d ", payload_len); |
1da177e4c
|
493 494 |
goto out_fail; out_oom: |
e87cc4728
|
495 496 |
net_dbg_ratelimited("ip6_frag_reasm: no memory for reassembly "); |
1da177e4c
|
497 |
out_fail: |
a11d206d0
|
498 |
rcu_read_lock(); |
2bad35b7c
|
499 |
IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); |
a11d206d0
|
500 |
rcu_read_unlock(); |
1da177e4c
|
501 502 |
return -1; } |
e5bbef20e
|
503 |
static int ipv6_frag_rcv(struct sk_buff *skb) |
1da177e4c
|
504 |
{ |
1da177e4c
|
505 506 |
struct frag_hdr *fhdr; struct frag_queue *fq; |
b71d1d426
|
507 |
const struct ipv6hdr *hdr = ipv6_hdr(skb); |
adf30907d
|
508 |
struct net *net = dev_net(skb_dst(skb)->dev); |
1da177e4c
|
509 |
|
f46078cfc
|
510 511 |
if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED) goto fail_hdr; |
adf30907d
|
512 |
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS); |
1da177e4c
|
513 514 |
/* Jumbo payload inhibits frag. header */ |
67ba4152e
|
515 |
if (hdr->payload_len == 0) |
98b3377ca
|
516 |
goto fail_hdr; |
ea2ae17d6
|
517 |
if (!pskb_may_pull(skb, (skb_transport_offset(skb) + |
98b3377ca
|
518 519 |
sizeof(struct frag_hdr)))) goto fail_hdr; |
1da177e4c
|
520 |
|
0660e03f6
|
521 |
hdr = ipv6_hdr(skb); |
9c70220b7
|
522 |
fhdr = (struct frag_hdr *)skb_transport_header(skb); |
1da177e4c
|
523 524 525 |
if (!(fhdr->frag_off & htons(0xFFF9))) { /* It is not a fragmented frame */ |
b0e380b1d
|
526 |
skb->transport_header += sizeof(struct frag_hdr); |
483a47d2f
|
527 |
IP6_INC_STATS_BH(net, |
adf30907d
|
528 |
ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS); |
1da177e4c
|
529 |
|
d56f90a7c
|
530 |
IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); |
f46078cfc
|
531 |
IP6CB(skb)->flags |= IP6SKB_FRAGMENTED; |
1da177e4c
|
532 533 |
return 1; } |
eec2e6185
|
534 |
fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr, |
264640fc2
|
535 |
skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr)); |
53b24b8f9
|
536 |
if (fq) { |
f61944efd
|
537 |
int ret; |
1da177e4c
|
538 |
|
5ab11c98d
|
539 |
spin_lock(&fq->q.lock); |
1da177e4c
|
540 |
|
f61944efd
|
541 |
ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff); |
1da177e4c
|
542 |
|
5ab11c98d
|
543 |
spin_unlock(&fq->q.lock); |
b836c99fd
|
544 |
inet_frag_put(&fq->q, &ip6_frags); |
1da177e4c
|
545 546 |
return ret; } |
adf30907d
|
547 |
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS); |
1da177e4c
|
548 549 |
kfree_skb(skb); return -1; |
98b3377ca
|
550 551 |
fail_hdr: |
d2373862b
|
552 553 |
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); |
98b3377ca
|
554 555 |
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb)); return -1; |
1da177e4c
|
556 |
} |
cc24becae
|
557 |
static const struct inet6_protocol frag_protocol = { |
1da177e4c
|
558 559 560 |
.handler = ipv6_frag_rcv, .flags = INET6_PROTO_NOPOLICY, }; |
8d8354d2f
|
561 |
#ifdef CONFIG_SYSCTL |
1bab4c750
|
562 |
static int zero; |
0a64b4b81
|
563 |
static struct ctl_table ip6_frags_ns_ctl_table[] = { |
8d8354d2f
|
564 |
{ |
8d8354d2f
|
565 |
.procname = "ip6frag_high_thresh", |
e31e0bdc7
|
566 |
.data = &init_net.ipv6.frags.high_thresh, |
8d8354d2f
|
567 568 |
.maxlen = sizeof(int), .mode = 0644, |
1bab4c750
|
569 570 |
.proc_handler = proc_dointvec_minmax, .extra1 = &init_net.ipv6.frags.low_thresh |
8d8354d2f
|
571 572 |
}, { |
8d8354d2f
|
573 |
.procname = "ip6frag_low_thresh", |
e31e0bdc7
|
574 |
.data = &init_net.ipv6.frags.low_thresh, |
8d8354d2f
|
575 576 |
.maxlen = sizeof(int), .mode = 0644, |
1bab4c750
|
577 578 579 |
.proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &init_net.ipv6.frags.high_thresh |
8d8354d2f
|
580 581 |
}, { |
8d8354d2f
|
582 |
.procname = "ip6frag_time", |
b2fd5321d
|
583 |
.data = &init_net.ipv6.frags.timeout, |
8d8354d2f
|
584 585 |
.maxlen = sizeof(int), .mode = 0644, |
6d9f239a1
|
586 |
.proc_handler = proc_dointvec_jiffies, |
8d8354d2f
|
587 |
}, |
7d291ebb8
|
588 589 |
{ } }; |
e3a57d18b
|
590 591 |
/* secret interval has been deprecated */ static int ip6_frags_secret_interval_unused; |
7d291ebb8
|
592 |
static struct ctl_table ip6_frags_ctl_table[] = { |
8d8354d2f
|
593 |
{ |
8d8354d2f
|
594 |
.procname = "ip6frag_secret_interval", |
e3a57d18b
|
595 |
.data = &ip6_frags_secret_interval_unused, |
8d8354d2f
|
596 597 |
.maxlen = sizeof(int), .mode = 0644, |
6d9f239a1
|
598 |
.proc_handler = proc_dointvec_jiffies, |
8d8354d2f
|
599 600 601 |
}, { } }; |
2c8c1e729
|
602 |
static int __net_init ip6_frags_ns_sysctl_register(struct net *net) |
8d8354d2f
|
603 |
{ |
e4a2d5c2b
|
604 |
struct ctl_table *table; |
8d8354d2f
|
605 |
struct ctl_table_header *hdr; |
0a64b4b81
|
606 |
table = ip6_frags_ns_ctl_table; |
09ad9bc75
|
607 |
if (!net_eq(net, &init_net)) { |
0a64b4b81
|
608 |
table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL); |
63159f29b
|
609 |
if (!table) |
e4a2d5c2b
|
610 |
goto err_alloc; |
e31e0bdc7
|
611 |
table[0].data = &net->ipv6.frags.high_thresh; |
1bab4c750
|
612 613 |
table[0].extra1 = &net->ipv6.frags.low_thresh; table[0].extra2 = &init_net.ipv6.frags.high_thresh; |
e31e0bdc7
|
614 |
table[1].data = &net->ipv6.frags.low_thresh; |
1bab4c750
|
615 |
table[1].extra2 = &net->ipv6.frags.high_thresh; |
b2fd5321d
|
616 |
table[2].data = &net->ipv6.frags.timeout; |
464dc801c
|
617 618 619 620 |
/* Don't export sysctls to unprivileged users */ if (net->user_ns != &init_user_ns) table[0].procname = NULL; |
e4a2d5c2b
|
621 |
} |
ec8f23ce0
|
622 |
hdr = register_net_sysctl(net, "net/ipv6", table); |
63159f29b
|
623 |
if (!hdr) |
e4a2d5c2b
|
624 625 626 627 628 629 |
goto err_reg; net->ipv6.sysctl.frags_hdr = hdr; return 0; err_reg: |
09ad9bc75
|
630 |
if (!net_eq(net, &init_net)) |
e4a2d5c2b
|
631 632 633 634 |
kfree(table); err_alloc: return -ENOMEM; } |
2c8c1e729
|
635 |
static void __net_exit ip6_frags_ns_sysctl_unregister(struct net *net) |
e4a2d5c2b
|
636 637 638 639 640 |
{ struct ctl_table *table; table = net->ipv6.sysctl.frags_hdr->ctl_table_arg; unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr); |
3705e11a2
|
641 642 |
if (!net_eq(net, &init_net)) kfree(table); |
8d8354d2f
|
643 |
} |
7d291ebb8
|
644 645 646 647 648 |
static struct ctl_table_header *ip6_ctl_header; static int ip6_frags_sysctl_register(void) { |
434447579
|
649 |
ip6_ctl_header = register_net_sysctl(&init_net, "net/ipv6", |
7d291ebb8
|
650 651 652 653 654 655 656 657 |
ip6_frags_ctl_table); return ip6_ctl_header == NULL ? -ENOMEM : 0; } static void ip6_frags_sysctl_unregister(void) { unregister_net_sysctl_table(ip6_ctl_header); } |
8d8354d2f
|
658 |
#else |
fc08c2581
|
659 |
static int ip6_frags_ns_sysctl_register(struct net *net) |
e71e0349e
|
660 |
{ |
8d8354d2f
|
661 662 |
return 0; } |
e4a2d5c2b
|
663 |
|
fc08c2581
|
664 |
static void ip6_frags_ns_sysctl_unregister(struct net *net) |
e4a2d5c2b
|
665 666 |
{ } |
7d291ebb8
|
667 |
|
fc08c2581
|
668 |
static int ip6_frags_sysctl_register(void) |
7d291ebb8
|
669 670 671 |
{ return 0; } |
fc08c2581
|
672 |
static void ip6_frags_sysctl_unregister(void) |
7d291ebb8
|
673 674 |
{ } |
8d8354d2f
|
675 |
#endif |
7d460db95
|
676 |
|
2c8c1e729
|
677 |
static int __net_init ipv6_frags_init_net(struct net *net) |
8d8354d2f
|
678 |
{ |
1d6119baf
|
679 |
int res; |
7c070aa94
|
680 681 |
net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH; net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH; |
b2fd5321d
|
682 |
net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT; |
8d8354d2f
|
683 |
|
1d6119baf
|
684 685 686 687 688 689 690 |
res = inet_frags_init_net(&net->ipv6.frags); if (res) return res; res = ip6_frags_ns_sysctl_register(net); if (res) inet_frags_uninit_net(&net->ipv6.frags); return res; |
e71e0349e
|
691 |
} |
2c8c1e729
|
692 |
static void __net_exit ipv6_frags_exit_net(struct net *net) |
81566e832
|
693 |
{ |
0a64b4b81
|
694 |
ip6_frags_ns_sysctl_unregister(net); |
81566e832
|
695 696 697 698 699 700 701 |
inet_frags_exit_net(&net->ipv6.frags, &ip6_frags); } static struct pernet_operations ip6_frags_ops = { .init = ipv6_frags_init_net, .exit = ipv6_frags_exit_net, }; |
853cbbaaa
|
702 |
int __init ipv6_frag_init(void) |
1da177e4c
|
703 |
{ |
853cbbaaa
|
704 |
int ret; |
1da177e4c
|
705 |
|
853cbbaaa
|
706 707 708 |
ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT); if (ret) goto out; |
e71e0349e
|
709 |
|
7d291ebb8
|
710 711 712 |
ret = ip6_frags_sysctl_register(); if (ret) goto err_sysctl; |
0002c630c
|
713 714 715 |
ret = register_pernet_subsys(&ip6_frags_ops); if (ret) goto err_pernet; |
8d8354d2f
|
716 |
|
321a3a99e
|
717 |
ip6_frags.hashfn = ip6_hashfn; |
c6fda2822
|
718 |
ip6_frags.constructor = ip6_frag_init; |
c95477090
|
719 |
ip6_frags.destructor = NULL; |
1e4b82873
|
720 |
ip6_frags.qsize = sizeof(struct frag_queue); |
abd6523d1
|
721 |
ip6_frags.match = ip6_frag_match; |
e521db9d7
|
722 |
ip6_frags.frag_expire = ip6_frag_expire; |
d4ad4d22e
|
723 724 725 726 |
ip6_frags.frags_cache_name = ip6_frag_cache_name; ret = inet_frags_init(&ip6_frags); if (ret) goto err_pernet; |
853cbbaaa
|
727 728 |
out: return ret; |
0002c630c
|
729 730 |
err_pernet: |
7d291ebb8
|
731 732 |
ip6_frags_sysctl_unregister(); err_sysctl: |
0002c630c
|
733 734 |
inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT); goto out; |
853cbbaaa
|
735 736 737 738 739 |
} void ipv6_frag_exit(void) { inet_frags_fini(&ip6_frags); |
7d291ebb8
|
740 |
ip6_frags_sysctl_unregister(); |
81566e832
|
741 |
unregister_pernet_subsys(&ip6_frags_ops); |
853cbbaaa
|
742 |
inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT); |
1da177e4c
|
743 |
} |