Blame view

net/ipv4/udp_offload.c 16.5 KB
2874c5fd2   Thomas Gleixner   treewide: Replace...
1
  // SPDX-License-Identifier: GPL-2.0-or-later
da5bab079   Daniel Borkmann   net: udp4: move G...
2
3
4
5
  /*
   *	IPV4 GSO/GRO offload support
   *	Linux INET implementation
   *
da5bab079   Daniel Borkmann   net: udp4: move G...
6
7
8
9
10
11
   *	UDPv4 GSO support
   */
  
  #include <linux/skbuff.h>
  #include <net/udp.h>
  #include <net/protocol.h>
028e0a476   Paolo Abeni   net: use indirect...
12
  #include <net/inet_common.h>
da5bab079   Daniel Borkmann   net: udp4: move G...
13

8bce6d7d0   Tom Herbert   udp: Generalize s...
14
15
16
17
  static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
  	netdev_features_t features,
  	struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
  					     netdev_features_t features),
4bcb877d2   Tom Herbert   udp: Offload oute...
18
  	__be16 new_protocol, bool is_ipv6)
155e010ed   Tom Herbert   udp: Move udp_tun...
19
  {
dbef491eb   Alexander Duyck   udp: Use uh->len ...
20
  	int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
880388aa3   David S. Miller   net: Remove all r...
21
  	bool remcsum, need_csum, offload_csum, gso_partial;
155e010ed   Tom Herbert   udp: Move udp_tun...
22
  	struct sk_buff *segs = ERR_PTR(-EINVAL);
dbef491eb   Alexander Duyck   udp: Use uh->len ...
23
  	struct udphdr *uh = udp_hdr(skb);
155e010ed   Tom Herbert   udp: Move udp_tun...
24
  	u16 mac_offset = skb->mac_header;
155e010ed   Tom Herbert   udp: Move udp_tun...
25
  	__be16 protocol = skb->protocol;
dbef491eb   Alexander Duyck   udp: Use uh->len ...
26
  	u16 mac_len = skb->mac_len;
155e010ed   Tom Herbert   udp: Move udp_tun...
27
  	int udp_offset, outer_hlen;
083348249   Alexander Duyck   GSO/UDP: Use skb-...
28
  	__wsum partial;
b40c5f4fd   Ansis Atteka   udp: disable inne...
29
  	bool need_ipsec;
155e010ed   Tom Herbert   udp: Move udp_tun...
30
31
32
  
  	if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
  		goto out;
083348249   Alexander Duyck   GSO/UDP: Use skb-...
33
34
35
36
37
38
  	/* Adjust partial header checksum to negate old length.
  	 * We cannot rely on the value contained in uh->len as it is
  	 * possible that the actual value exceeds the boundaries of the
  	 * 16 bit length field due to the header being added outside of an
  	 * IP or IPv6 frame that was already limited to 64K - 1.
  	 */
802ab55ad   Alexander Duyck   GSO: Support part...
39
40
41
42
43
  	if (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL)
  		partial = (__force __wsum)uh->len;
  	else
  		partial = (__force __wsum)htonl(skb->len);
  	partial = csum_sub(csum_unfold(uh->check), partial);
dbef491eb   Alexander Duyck   udp: Use uh->len ...
44
45
  
  	/* setup inner skb. */
155e010ed   Tom Herbert   udp: Move udp_tun...
46
  	skb->encapsulation = 0;
5197f3499   Alexander Duyck   net: Reset encap_...
47
  	SKB_GSO_CB(skb)->encap_level = 0;
155e010ed   Tom Herbert   udp: Move udp_tun...
48
49
50
51
  	__skb_pull(skb, tnl_hlen);
  	skb_reset_mac_header(skb);
  	skb_set_network_header(skb, skb_inner_network_offset(skb));
  	skb->mac_len = skb_inner_network_offset(skb);
8bce6d7d0   Tom Herbert   udp: Generalize s...
52
  	skb->protocol = new_protocol;
fdaefd62f   Alexander Duyck   udp: Clean up the...
53
54
  
  	need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
4bcb877d2   Tom Herbert   udp: Offload oute...
55
  	skb->encap_hdr_csum = need_csum;
fdaefd62f   Alexander Duyck   udp: Clean up the...
56
57
  
  	remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM);
e585f2363   Tom Herbert   udp: Changes to u...
58
  	skb->remcsum_offload = remcsum;
155e010ed   Tom Herbert   udp: Move udp_tun...
59

b40c5f4fd   Ansis Atteka   udp: disable inne...
60
  	need_ipsec = skb_dst(skb) && dst_xfrm(skb_dst(skb));
4bcb877d2   Tom Herbert   udp: Offload oute...
61
62
  	/* Try to offload checksum if possible */
  	offload_csum = !!(need_csum &&
b40c5f4fd   Ansis Atteka   udp: disable inne...
63
  			  !need_ipsec &&
fdaefd62f   Alexander Duyck   udp: Clean up the...
64
65
66
  			  (skb->dev->features &
  			   (is_ipv6 ? (NETIF_F_HW_CSUM | NETIF_F_IPV6_CSUM) :
  				      (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM))));
155e010ed   Tom Herbert   udp: Move udp_tun...
67

bef3c6c93   Alexander Duyck   net: Drop unecess...
68
  	features &= skb->dev->hw_enc_features;
7fbeffed7   Alexander Duyck   net: Update remot...
69
70
71
72
  	/* The only checksum offload we care about from here on out is the
  	 * outer one so strip the existing checksum feature flags and
  	 * instead set the flag based on our outer checksum offload value.
  	 */
880388aa3   David S. Miller   net: Remove all r...
73
  	if (remcsum) {
7fbeffed7   Alexander Duyck   net: Update remot...
74
  		features &= ~NETIF_F_CSUM_MASK;
224638766   Alexander Duyck   GSO: Provide soft...
75
  		if (!need_csum || offload_csum)
7fbeffed7   Alexander Duyck   net: Update remot...
76
77
  			features |= NETIF_F_HW_CSUM;
  	}
155e010ed   Tom Herbert   udp: Move udp_tun...
78
  	/* segment inner packet. */
bef3c6c93   Alexander Duyck   net: Drop unecess...
79
  	segs = gso_inner_segment(skb, features);
27446442a   Himangi Saraogi   net/udp_offload: ...
80
  	if (IS_ERR_OR_NULL(segs)) {
155e010ed   Tom Herbert   udp: Move udp_tun...
81
82
83
84
  		skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
  				     mac_len);
  		goto out;
  	}
07b26c945   Steffen Klassert   gso: Support part...
85
  	gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
155e010ed   Tom Herbert   udp: Move udp_tun...
86
87
88
89
  	outer_hlen = skb_tnl_header_len(skb);
  	udp_offset = outer_hlen - tnl_hlen;
  	skb = segs;
  	do {
802ab55ad   Alexander Duyck   GSO: Support part...
90
  		unsigned int len;
4bcb877d2   Tom Herbert   udp: Offload oute...
91

fdaefd62f   Alexander Duyck   udp: Clean up the...
92
  		if (remcsum)
4bcb877d2   Tom Herbert   udp: Offload oute...
93
  			skb->ip_summed = CHECKSUM_NONE;
fdaefd62f   Alexander Duyck   udp: Clean up the...
94
95
96
  
  		/* Set up inner headers if we are offloading inner checksum */
  		if (skb->ip_summed == CHECKSUM_PARTIAL) {
4bcb877d2   Tom Herbert   udp: Offload oute...
97
98
99
  			skb_reset_inner_headers(skb);
  			skb->encapsulation = 1;
  		}
155e010ed   Tom Herbert   udp: Move udp_tun...
100
101
  
  		skb->mac_len = mac_len;
4bcb877d2   Tom Herbert   udp: Offload oute...
102
  		skb->protocol = protocol;
155e010ed   Tom Herbert   udp: Move udp_tun...
103

dbef491eb   Alexander Duyck   udp: Use uh->len ...
104
  		__skb_push(skb, outer_hlen);
155e010ed   Tom Herbert   udp: Move udp_tun...
105
106
107
  		skb_reset_mac_header(skb);
  		skb_set_network_header(skb, mac_len);
  		skb_set_transport_header(skb, udp_offset);
802ab55ad   Alexander Duyck   GSO: Support part...
108
  		len = skb->len - udp_offset;
155e010ed   Tom Herbert   udp: Move udp_tun...
109
  		uh = udp_hdr(skb);
802ab55ad   Alexander Duyck   GSO: Support part...
110
111
112
113
114
  
  		/* If we are only performing partial GSO the inner header
  		 * will be using a length value equal to only one MSS sized
  		 * segment instead of the entire frame.
  		 */
3d0241d57   Alexey Kodanev   gso: fix payload ...
115
  		if (gso_partial && skb_is_gso(skb)) {
802ab55ad   Alexander Duyck   GSO: Support part...
116
117
118
119
120
121
  			uh->len = htons(skb_shinfo(skb)->gso_size +
  					SKB_GSO_CB(skb)->data_offset +
  					skb->head - (unsigned char *)uh);
  		} else {
  			uh->len = htons(len);
  		}
155e010ed   Tom Herbert   udp: Move udp_tun...
122

4bcb877d2   Tom Herbert   udp: Offload oute...
123
124
  		if (!need_csum)
  			continue;
802ab55ad   Alexander Duyck   GSO: Support part...
125
126
  		uh->check = ~csum_fold(csum_add(partial,
  				       (__force __wsum)htonl(len)));
155e010ed   Tom Herbert   udp: Move udp_tun...
127

fdaefd62f   Alexander Duyck   udp: Clean up the...
128
129
  		if (skb->encapsulation || !offload_csum) {
  			uh->check = gso_make_checksum(skb, ~uh->check);
155e010ed   Tom Herbert   udp: Move udp_tun...
130
131
  			if (uh->check == 0)
  				uh->check = CSUM_MANGLED_0;
fdaefd62f   Alexander Duyck   udp: Clean up the...
132
133
134
135
  		} else {
  			skb->ip_summed = CHECKSUM_PARTIAL;
  			skb->csum_start = skb_transport_header(skb) - skb->head;
  			skb->csum_offset = offsetof(struct udphdr, check);
155e010ed   Tom Herbert   udp: Move udp_tun...
136
  		}
155e010ed   Tom Herbert   udp: Move udp_tun...
137
138
139
140
  	} while ((skb = skb->next));
  out:
  	return segs;
  }
8bce6d7d0   Tom Herbert   udp: Generalize s...
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
  struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
  				       netdev_features_t features,
  				       bool is_ipv6)
  {
  	__be16 protocol = skb->protocol;
  	const struct net_offload **offloads;
  	const struct net_offload *ops;
  	struct sk_buff *segs = ERR_PTR(-EINVAL);
  	struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
  					     netdev_features_t features);
  
  	rcu_read_lock();
  
  	switch (skb->inner_protocol_type) {
  	case ENCAP_TYPE_ETHER:
  		protocol = skb->inner_protocol;
  		gso_inner_segment = skb_mac_gso_segment;
  		break;
  	case ENCAP_TYPE_IPPROTO:
  		offloads = is_ipv6 ? inet6_offloads : inet_offloads;
  		ops = rcu_dereference(offloads[skb->inner_ipproto]);
  		if (!ops || !ops->callbacks.gso_segment)
  			goto out_unlock;
  		gso_inner_segment = ops->callbacks.gso_segment;
  		break;
  	default:
  		goto out_unlock;
  	}
  
  	segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment,
4bcb877d2   Tom Herbert   udp: Offload oute...
171
  					protocol, is_ipv6);
8bce6d7d0   Tom Herbert   udp: Generalize s...
172
173
174
175
176
177
  
  out_unlock:
  	rcu_read_unlock();
  
  	return segs;
  }
a6024562f   Tom Herbert   udp: Add GRO func...
178
  EXPORT_SYMBOL(skb_udp_tunnel_segment);
8bce6d7d0   Tom Herbert   udp: Generalize s...
179

9fd1ff5d2   Steffen Klassert   udp: Support UDP ...
180
181
182
183
184
185
186
187
188
189
190
191
192
  static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb,
  					      netdev_features_t features)
  {
  	unsigned int mss = skb_shinfo(skb)->gso_size;
  
  	skb = skb_segment_list(skb, features, skb_mac_header_len(skb));
  	if (IS_ERR(skb))
  		return skb;
  
  	udp_hdr(skb)->len = htons(sizeof(struct udphdr) + mss);
  
  	return skb;
  }
ee80d1ebe   Willem de Bruijn   udp: add udp gso
193
  struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
9a0d41b35   Alexander Duyck   udp: Do not pass ...
194
  				  netdev_features_t features)
ee80d1ebe   Willem de Bruijn   udp: add udp gso
195
  {
ad405857b   Willem de Bruijn   udp: better wmem ...
196
197
  	struct sock *sk = gso_skb->sk;
  	unsigned int sum_truesize = 0;
ee80d1ebe   Willem de Bruijn   udp: add udp gso
198
  	struct sk_buff *segs, *seg;
ee80d1ebe   Willem de Bruijn   udp: add udp gso
199
  	struct udphdr *uh;
b21c034b3   Alexander Duyck   udp: Do not pass ...
200
  	unsigned int mss;
04d55b257   Alexander Duyck   udp: Do not copy ...
201
  	bool copy_dtor;
9a0d41b35   Alexander Duyck   udp: Do not pass ...
202
203
  	__sum16 check;
  	__be16 newlen;
ee80d1ebe   Willem de Bruijn   udp: add udp gso
204

9fd1ff5d2   Steffen Klassert   udp: Support UDP ...
205
206
  	if (skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST)
  		return __udp_gso_segment_list(gso_skb, features);
b21c034b3   Alexander Duyck   udp: Do not pass ...
207
  	mss = skb_shinfo(gso_skb)->gso_size;
ee80d1ebe   Willem de Bruijn   udp: add udp gso
208
209
  	if (gso_skb->len <= sizeof(*uh) + mss)
  		return ERR_PTR(-EINVAL);
ee80d1ebe   Willem de Bruijn   udp: add udp gso
210
  	skb_pull(gso_skb, sizeof(*uh));
ad405857b   Willem de Bruijn   udp: better wmem ...
211
  	/* clear destructor to avoid skb_segment assigning it to tail */
04d55b257   Alexander Duyck   udp: Do not copy ...
212
213
214
  	copy_dtor = gso_skb->destructor == sock_wfree;
  	if (copy_dtor)
  		gso_skb->destructor = NULL;
ad405857b   Willem de Bruijn   udp: better wmem ...
215

ee80d1ebe   Willem de Bruijn   udp: add udp gso
216
  	segs = skb_segment(gso_skb, features);
88e235b80   Enrico Weigelt   net: ipv4: drop u...
217
  	if (IS_ERR_OR_NULL(segs)) {
04d55b257   Alexander Duyck   udp: Do not copy ...
218
219
  		if (copy_dtor)
  			gso_skb->destructor = sock_wfree;
ee80d1ebe   Willem de Bruijn   udp: add udp gso
220
  		return segs;
ad405857b   Willem de Bruijn   udp: better wmem ...
221
  	}
ee80d1ebe   Willem de Bruijn   udp: add udp gso
222

6053d0f18   Alexander Duyck   udp: Add support ...
223
224
225
226
227
228
  	/* GSO partial and frag_list segmentation only requires splitting
  	 * the frame into an MSS multiple and possibly a remainder, both
  	 * cases return a GSO skb. So update the mss now.
  	 */
  	if (skb_is_gso(segs))
  		mss *= skb_shinfo(segs)->gso_segs;
0ad650957   Alexander Duyck   udp: Partially un...
229
230
  	seg = segs;
  	uh = udp_hdr(seg);
9a0d41b35   Alexander Duyck   udp: Do not pass ...
231

76e21533a   Fred Klassen   net/udp_gso: Allo...
232
233
234
235
  	/* preserve TX timestamp flags and TS key for first segment */
  	skb_shinfo(seg)->tskey = skb_shinfo(gso_skb)->tskey;
  	skb_shinfo(seg)->tx_flags |=
  			(skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP);
9a0d41b35   Alexander Duyck   udp: Do not pass ...
236
237
238
  	/* compute checksum adjustment based on old length versus new */
  	newlen = htons(sizeof(*uh) + mss);
  	check = csum16_add(csum16_sub(uh->check, uh->len), newlen);
0ad650957   Alexander Duyck   udp: Partially un...
239
  	for (;;) {
04d55b257   Alexander Duyck   udp: Do not copy ...
240
241
242
243
244
  		if (copy_dtor) {
  			seg->destructor = sock_wfree;
  			seg->sk = sk;
  			sum_truesize += seg->truesize;
  		}
ee80d1ebe   Willem de Bruijn   udp: add udp gso
245

0ad650957   Alexander Duyck   udp: Partially un...
246
247
  		if (!seg->next)
  			break;
9a0d41b35   Alexander Duyck   udp: Do not pass ...
248
249
250
  
  		uh->len = newlen;
  		uh->check = check;
ad405857b   Willem de Bruijn   udp: better wmem ...
251

6053d0f18   Alexander Duyck   udp: Add support ...
252
253
254
255
256
  		if (seg->ip_summed == CHECKSUM_PARTIAL)
  			gso_reset_checksum(seg, ~check);
  		else
  			uh->check = gso_make_checksum(seg, ~check) ? :
  				    CSUM_MANGLED_0;
0ad650957   Alexander Duyck   udp: Partially un...
257
258
  		seg = seg->next;
  		uh = udp_hdr(seg);
ee80d1ebe   Willem de Bruijn   udp: add udp gso
259
  	}
0ad650957   Alexander Duyck   udp: Partially un...
260
261
262
263
264
265
266
  	/* last packet can be partial gso_size, account for that in checksum */
  	newlen = htons(skb_tail_pointer(seg) - skb_transport_header(seg) +
  		       seg->data_len);
  	check = csum16_add(csum16_sub(uh->check, uh->len), newlen);
  
  	uh->len = newlen;
  	uh->check = check;
6053d0f18   Alexander Duyck   udp: Add support ...
267
268
269
270
  	if (seg->ip_summed == CHECKSUM_PARTIAL)
  		gso_reset_checksum(seg, ~check);
  	else
  		uh->check = gso_make_checksum(seg, ~check) ? : CSUM_MANGLED_0;
0ad650957   Alexander Duyck   udp: Partially un...
271
  	/* update refcount for the packet */
575b65bc5   Eric Dumazet   udp: avoid refcou...
272
273
274
275
276
277
278
279
280
281
282
  	if (copy_dtor) {
  		int delta = sum_truesize - gso_skb->truesize;
  
  		/* In some pathological cases, delta can be negative.
  		 * We need to either use refcount_add() or refcount_sub_and_test()
  		 */
  		if (likely(delta >= 0))
  			refcount_add(delta, &sk->sk_wmem_alloc);
  		else
  			WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
  	}
ee80d1ebe   Willem de Bruijn   udp: add udp gso
283
284
285
  	return segs;
  }
  EXPORT_SYMBOL_GPL(__udp_gso_segment);
0c19f846d   Willem de Bruijn   net: accept UFO d...
286
287
  static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
  					 netdev_features_t features)
da5bab079   Daniel Borkmann   net: udp4: move G...
288
289
  {
  	struct sk_buff *segs = ERR_PTR(-EINVAL);
0c19f846d   Willem de Bruijn   net: accept UFO d...
290
291
292
293
  	unsigned int mss;
  	__wsum csum;
  	struct udphdr *uh;
  	struct iphdr *iph;
7a7ffbabf   Wei-Chun Chao   ipv4: fix tunnele...
294
295
  
  	if (skb->encapsulation &&
0f4f4ffa7   Tom Herbert   net: Add GSO supp...
296
  	    (skb_shinfo(skb)->gso_type &
0c19f846d   Willem de Bruijn   net: accept UFO d...
297
  	     (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) {
8bce6d7d0   Tom Herbert   udp: Generalize s...
298
  		segs = skb_udp_tunnel_segment(skb, features, false);
0c19f846d   Willem de Bruijn   net: accept UFO d...
299
300
  		goto out;
  	}
ee80d1ebe   Willem de Bruijn   udp: add udp gso
301
  	if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_UDP | SKB_GSO_UDP_L4)))
121d57af3   Willem de Bruijn   gso: validate gso...
302
  		goto out;
0c19f846d   Willem de Bruijn   net: accept UFO d...
303
304
  	if (!pskb_may_pull(skb, sizeof(struct udphdr)))
  		goto out;
ee80d1ebe   Willem de Bruijn   udp: add udp gso
305
  	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
6053d0f18   Alexander Duyck   udp: Add support ...
306
  		return __udp_gso_segment(skb, features);
ee80d1ebe   Willem de Bruijn   udp: add udp gso
307

0c19f846d   Willem de Bruijn   net: accept UFO d...
308
309
310
311
312
313
314
315
  	mss = skb_shinfo(skb)->gso_size;
  	if (unlikely(skb->len <= mss))
  		goto out;
  
  	/* Do software UFO. Complete and fill in the UDP checksum as
  	 * HW cannot do checksum of UDP packets sent as multiple
  	 * IP fragments.
  	 */
f71470b37   Tom Herbert   udp: move logic o...
316

0c19f846d   Willem de Bruijn   net: accept UFO d...
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
  	uh = udp_hdr(skb);
  	iph = ip_hdr(skb);
  
  	uh->check = 0;
  	csum = skb_checksum(skb, 0, skb->len, 0);
  	uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum);
  	if (uh->check == 0)
  		uh->check = CSUM_MANGLED_0;
  
  	skb->ip_summed = CHECKSUM_UNNECESSARY;
  
  	/* If there is no outer header we can fake a checksum offload
  	 * due to the fact that we have already done the checksum in
  	 * software prior to segmenting the frame.
  	 */
  	if (!skb->encap_hdr_csum)
  		features |= NETIF_F_HW_CSUM;
  
  	/* Fragment the skb. IP headers of the fragments are updated in
  	 * inet_gso_segment()
  	 */
  	segs = skb_segment(skb, features);
  out:
da5bab079   Daniel Borkmann   net: udp4: move G...
340
341
  	return segs;
  }
e20cf8d3f   Paolo Abeni   udp: implement GR...
342
343
344
345
  #define UDP_GRO_CNT_MAX 64
  static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
  					       struct sk_buff *skb)
  {
4b1a86281   Alexander Lobakin   net: udp: fix UDP...
346
  	struct udphdr *uh = udp_gro_udphdr(skb);
e20cf8d3f   Paolo Abeni   udp: implement GR...
347
348
349
  	struct sk_buff *pp = NULL;
  	struct udphdr *uh2;
  	struct sk_buff *p;
4dd2b82d5   Eric Dumazet   udp: fix GRO pack...
350
  	unsigned int ulen;
9fd1ff5d2   Steffen Klassert   udp: Support UDP ...
351
  	int ret = 0;
e20cf8d3f   Paolo Abeni   udp: implement GR...
352
353
354
355
356
357
  
  	/* requires non zero csum, for symmetry with GSO */
  	if (!uh->check) {
  		NAPI_GRO_CB(skb)->flush = 1;
  		return NULL;
  	}
4dd2b82d5   Eric Dumazet   udp: fix GRO pack...
358
359
360
361
362
363
  	/* Do not deal with padded or malicious packets, sorry ! */
  	ulen = ntohs(uh->len);
  	if (ulen <= sizeof(*uh) || ulen != skb_gro_len(skb)) {
  		NAPI_GRO_CB(skb)->flush = 1;
  		return NULL;
  	}
e20cf8d3f   Paolo Abeni   udp: implement GR...
364
365
  	/* pull encapsulating udp header */
  	skb_gro_pull(skb, sizeof(struct udphdr));
e20cf8d3f   Paolo Abeni   udp: implement GR...
366
367
368
369
370
371
372
373
374
375
376
377
  
  	list_for_each_entry(p, head, list) {
  		if (!NAPI_GRO_CB(p)->same_flow)
  			continue;
  
  		uh2 = udp_hdr(p);
  
  		/* Match ports only, as csum is always non zero */
  		if ((*(u32 *)&uh->source != *(u32 *)&uh2->source)) {
  			NAPI_GRO_CB(p)->same_flow = 0;
  			continue;
  		}
9fd1ff5d2   Steffen Klassert   udp: Support UDP ...
378
379
380
381
  		if (NAPI_GRO_CB(skb)->is_flist != NAPI_GRO_CB(p)->is_flist) {
  			NAPI_GRO_CB(skb)->flush = 1;
  			return p;
  		}
e20cf8d3f   Paolo Abeni   udp: implement GR...
382
383
  		/* Terminate the flow on len mismatch or if it grow "too much".
  		 * Under small packet flood GRO count could elsewhere grow a lot
4dd2b82d5   Eric Dumazet   udp: fix GRO pack...
384
  		 * leading to excessive truesize values.
21f1b8a66   Paolo Abeni   udp: fix GRO rece...
385
386
  		 * On len mismatch merge the first packet shorter than gso_size,
  		 * otherwise complete the GRO packet.
e20cf8d3f   Paolo Abeni   udp: implement GR...
387
  		 */
9fd1ff5d2   Steffen Klassert   udp: Support UDP ...
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
  		if (ulen > ntohs(uh2->len)) {
  			pp = p;
  		} else {
  			if (NAPI_GRO_CB(skb)->is_flist) {
  				if (!pskb_may_pull(skb, skb_gro_offset(skb))) {
  					NAPI_GRO_CB(skb)->flush = 1;
  					return NULL;
  				}
  				if ((skb->ip_summed != p->ip_summed) ||
  				    (skb->csum_level != p->csum_level)) {
  					NAPI_GRO_CB(skb)->flush = 1;
  					return NULL;
  				}
  				ret = skb_gro_receive_list(p, skb);
  			} else {
  				skb_gro_postpull_rcsum(skb, uh,
  						       sizeof(struct udphdr));
  
  				ret = skb_gro_receive(p, skb);
  			}
  		}
  
  		if (ret || ulen != ntohs(uh2->len) ||
e20cf8d3f   Paolo Abeni   udp: implement GR...
411
412
  		    NAPI_GRO_CB(p)->count >= UDP_GRO_CNT_MAX)
  			pp = p;
e20cf8d3f   Paolo Abeni   udp: implement GR...
413
414
415
416
417
418
419
  
  		return pp;
  	}
  
  	/* mismatch, but we never need to flush */
  	return NULL;
  }
d4546c250   David Miller   net: Convert GRO ...
420
  struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
9fd1ff5d2   Steffen Klassert   udp: Support UDP ...
421
  				struct udphdr *uh, struct sock *sk)
b582ef099   Or Gerlitz   net: Add GRO supp...
422
  {
d4546c250   David Miller   net: Convert GRO ...
423
424
  	struct sk_buff *pp = NULL;
  	struct sk_buff *p;
57c67ff4b   Tom Herbert   udp: additional G...
425
426
  	struct udphdr *uh2;
  	unsigned int off = skb_gro_offset(skb);
b582ef099   Or Gerlitz   net: Add GRO supp...
427
  	int flush = 1;
bde1b56f8   Xin Long   udp: initialize i...
428
  	NAPI_GRO_CB(skb)->is_flist = 0;
9fd1ff5d2   Steffen Klassert   udp: Support UDP ...
429
430
  	if (skb->dev->features & NETIF_F_GRO_FRAGLIST)
  		NAPI_GRO_CB(skb)->is_flist = sk ? !udp_sk(sk)->gro_enabled: 1;
e20cf8d3f   Paolo Abeni   udp: implement GR...
431

9fd1ff5d2   Steffen Klassert   udp: Support UDP ...
432
  	if ((sk && udp_sk(sk)->gro_enabled) || NAPI_GRO_CB(skb)->is_flist) {
e20cf8d3f   Paolo Abeni   udp: implement GR...
433
  		pp = call_gro_receive(udp_gro_receive_segment, head, skb);
e20cf8d3f   Paolo Abeni   udp: implement GR...
434
435
  		return pp;
  	}
9fd1ff5d2   Steffen Klassert   udp: Support UDP ...
436
  	if (!sk || NAPI_GRO_CB(skb)->encap_mark ||
662880f44   Tom Herbert   net: Allow GRO to...
437
438
  	    (skb->ip_summed != CHECKSUM_PARTIAL &&
  	     NAPI_GRO_CB(skb)->csum_cnt == 0 &&
e20cf8d3f   Paolo Abeni   udp: implement GR...
439
440
  	     !NAPI_GRO_CB(skb)->csum_valid) ||
  	    !udp_sk(sk)->gro_receive)
9fd1ff5d2   Steffen Klassert   udp: Support UDP ...
441
  		goto out;
b582ef099   Or Gerlitz   net: Add GRO supp...
442

fac8e0f57   Jesse Gross   tunnels: Don't ap...
443
444
  	/* mark that this skb passed once through the tunnel gro layer */
  	NAPI_GRO_CB(skb)->encap_mark = 1;
b582ef099   Or Gerlitz   net: Add GRO supp...
445

b582ef099   Or Gerlitz   net: Add GRO supp...
446
  	flush = 0;
d4546c250   David Miller   net: Convert GRO ...
447
  	list_for_each_entry(p, head, list) {
b582ef099   Or Gerlitz   net: Add GRO supp...
448
449
450
451
  		if (!NAPI_GRO_CB(p)->same_flow)
  			continue;
  
  		uh2 = (struct udphdr   *)(p->data + off);
57c67ff4b   Tom Herbert   udp: additional G...
452
453
454
455
456
457
  
  		/* Match ports and either checksums are either both zero
  		 * or nonzero.
  		 */
  		if ((*(u32 *)&uh->source != *(u32 *)&uh2->source) ||
  		    (!uh->check ^ !uh2->check)) {
b582ef099   Or Gerlitz   net: Add GRO supp...
458
459
460
461
462
463
  			NAPI_GRO_CB(p)->same_flow = 0;
  			continue;
  		}
  	}
  
  	skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
6bae1d4cc   Tom Herbert   net: Add skb_gro_...
464
  	skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
fcd91dd44   Sabrina Dubroca   net: add recursio...
465
  	pp = call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb);
b582ef099   Or Gerlitz   net: Add GRO supp...
466

9fd1ff5d2   Steffen Klassert   udp: Support UDP ...
467
  out:
603d4cf8f   Sabrina Dubroca   net: fix use-afte...
468
  	skb_gro_flush_final(skb, pp, flush);
b582ef099   Or Gerlitz   net: Add GRO supp...
469
470
  	return pp;
  }
a6024562f   Tom Herbert   udp: Add GRO func...
471
  EXPORT_SYMBOL(udp_gro_receive);
b582ef099   Or Gerlitz   net: Add GRO supp...
472

55e729889   Alexander Lobakin   net: udp: fix IP ...
473
474
475
476
477
478
479
480
481
  static struct sock *udp4_gro_lookup_skb(struct sk_buff *skb, __be16 sport,
  					__be16 dport)
  {
  	const struct iphdr *iph = skb_gro_network_header(skb);
  
  	return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport,
  				 iph->daddr, dport, inet_iif(skb),
  				 inet_sdif(skb), &udp_table, NULL);
  }
028e0a476   Paolo Abeni   net: use indirect...
482
483
  INDIRECT_CALLABLE_SCOPE
  struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb)
57c67ff4b   Tom Herbert   udp: additional G...
484
485
  {
  	struct udphdr *uh = udp_gro_udphdr(skb);
55e729889   Alexander Lobakin   net: udp: fix IP ...
486
  	struct sock *sk = NULL;
9fd1ff5d2   Steffen Klassert   udp: Support UDP ...
487
  	struct sk_buff *pp;
57c67ff4b   Tom Herbert   udp: additional G...
488

9fd1ff5d2   Steffen Klassert   udp: Support UDP ...
489
  	if (unlikely(!uh))
2abb7cdc0   Tom Herbert   udp: Add support ...
490
  		goto flush;
57c67ff4b   Tom Herbert   udp: additional G...
491

2abb7cdc0   Tom Herbert   udp: Add support ...
492
  	/* Don't bother verifying checksum if we're going to flush anyway. */
2d8f7e2c8   Scott Wood   udp: Fix inverted...
493
  	if (NAPI_GRO_CB(skb)->flush)
2abb7cdc0   Tom Herbert   udp: Add support ...
494
495
496
497
498
499
  		goto skip;
  
  	if (skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check,
  						 inet_gro_compute_pseudo))
  		goto flush;
  	else if (uh->check)
b39c78b2a   Li RongQing   net: remove the c...
500
  		skb_gro_checksum_try_convert(skb, IPPROTO_UDP,
2abb7cdc0   Tom Herbert   udp: Add support ...
501
502
  					     inet_gro_compute_pseudo);
  skip:
efc98d08e   Tom Herbert   fou: eliminate IP...
503
  	NAPI_GRO_CB(skb)->is_ipv6 = 0;
9fd1ff5d2   Steffen Klassert   udp: Support UDP ...
504
  	rcu_read_lock();
55e729889   Alexander Lobakin   net: udp: fix IP ...
505
506
507
  
  	if (static_branch_unlikely(&udp_encap_needed_key))
  		sk = udp4_gro_lookup_skb(skb, uh->source, uh->dest);
9fd1ff5d2   Steffen Klassert   udp: Support UDP ...
508
509
510
  	pp = udp_gro_receive(head, skb, uh, sk);
  	rcu_read_unlock();
  	return pp;
2abb7cdc0   Tom Herbert   udp: Add support ...
511
512
513
514
  
  flush:
  	NAPI_GRO_CB(skb)->flush = 1;
  	return NULL;
57c67ff4b   Tom Herbert   udp: additional G...
515
  }
e20cf8d3f   Paolo Abeni   udp: implement GR...
516
517
518
519
520
521
522
523
524
525
526
527
  static int udp_gro_complete_segment(struct sk_buff *skb)
  {
  	struct udphdr *uh = udp_hdr(skb);
  
  	skb->csum_start = (unsigned char *)uh - skb->head;
  	skb->csum_offset = offsetof(struct udphdr, check);
  	skb->ip_summed = CHECKSUM_PARTIAL;
  
  	skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
  	skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
  	return 0;
  }
a6024562f   Tom Herbert   udp: Add GRO func...
528
529
  int udp_gro_complete(struct sk_buff *skb, int nhoff,
  		     udp_lookup_t lookup)
b582ef099   Or Gerlitz   net: Add GRO supp...
530
  {
b582ef099   Or Gerlitz   net: Add GRO supp...
531
532
533
  	__be16 newlen = htons(skb->len - nhoff);
  	struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
  	int err = -ENOSYS;
a6024562f   Tom Herbert   udp: Add GRO func...
534
  	struct sock *sk;
b582ef099   Or Gerlitz   net: Add GRO supp...
535
536
537
538
  
  	uh->len = newlen;
  
  	rcu_read_lock();
4f24ed77d   Paolo Abeni   udp: use indirect...
539
540
  	sk = INDIRECT_CALL_INET(lookup, udp6_lib_lookup_skb,
  				udp4_lib_lookup_skb, skb, uh->source, uh->dest);
9fd1ff5d2   Steffen Klassert   udp: Support UDP ...
541
  	if (sk && udp_sk(sk)->gro_complete) {
e20cf8d3f   Paolo Abeni   udp: implement GR...
542
543
544
545
546
547
548
  		skb_shinfo(skb)->gso_type = uh->check ? SKB_GSO_UDP_TUNNEL_CSUM
  					: SKB_GSO_UDP_TUNNEL;
  
  		/* Set encapsulation before calling into inner gro_complete()
  		 * functions to make them set up the inner offsets.
  		 */
  		skb->encapsulation = 1;
a6024562f   Tom Herbert   udp: Add GRO func...
549
550
  		err = udp_sk(sk)->gro_complete(sk, skb,
  				nhoff + sizeof(struct udphdr));
9fd1ff5d2   Steffen Klassert   udp: Support UDP ...
551
552
  	} else {
  		err = udp_gro_complete_segment(skb);
e20cf8d3f   Paolo Abeni   udp: implement GR...
553
  	}
b582ef099   Or Gerlitz   net: Add GRO supp...
554
  	rcu_read_unlock();
6db93ea13   Tom Herbert   udp: Set SKB_GSO_...
555
556
557
  
  	if (skb->remcsum_offload)
  		skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM;
b582ef099   Or Gerlitz   net: Add GRO supp...
558
559
  	return err;
  }
a6024562f   Tom Herbert   udp: Add GRO func...
560
  EXPORT_SYMBOL(udp_gro_complete);
b582ef099   Or Gerlitz   net: Add GRO supp...
561

028e0a476   Paolo Abeni   net: use indirect...
562
  INDIRECT_CALLABLE_SCOPE int udp4_gro_complete(struct sk_buff *skb, int nhoff)
57c67ff4b   Tom Herbert   udp: additional G...
563
564
565
  {
  	const struct iphdr *iph = ip_hdr(skb);
  	struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
9fd1ff5d2   Steffen Klassert   udp: Support UDP ...
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
  	if (NAPI_GRO_CB(skb)->is_flist) {
  		uh->len = htons(skb->len - nhoff);
  
  		skb_shinfo(skb)->gso_type |= (SKB_GSO_FRAGLIST|SKB_GSO_UDP_L4);
  		skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
  
  		if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
  			if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
  				skb->csum_level++;
  		} else {
  			skb->ip_summed = CHECKSUM_UNNECESSARY;
  			skb->csum_level = 0;
  		}
  
  		return 0;
  	}
e20cf8d3f   Paolo Abeni   udp: implement GR...
582
  	if (uh->check)
57c67ff4b   Tom Herbert   udp: additional G...
583
584
  		uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr,
  					  iph->daddr, 0);
a6024562f   Tom Herbert   udp: Add GRO func...
585
  	return udp_gro_complete(skb, nhoff, udp4_lib_lookup_skb);
57c67ff4b   Tom Herbert   udp: additional G...
586
  }
da5bab079   Daniel Borkmann   net: udp4: move G...
587
588
  static const struct net_offload udpv4_offload = {
  	.callbacks = {
0c19f846d   Willem de Bruijn   net: accept UFO d...
589
  		.gso_segment = udp4_ufo_fragment,
57c67ff4b   Tom Herbert   udp: additional G...
590
591
  		.gro_receive  =	udp4_gro_receive,
  		.gro_complete =	udp4_gro_complete,
da5bab079   Daniel Borkmann   net: udp4: move G...
592
593
594
595
596
597
598
  	},
  };
  
  int __init udpv4_offload_init(void)
  {
  	return inet_add_offload(&udpv4_offload, IPPROTO_UDP);
  }