Blame view

net/ipv4/tcp_offload.c 8.13 KB
2874c5fd2   Thomas Gleixner   treewide: Replace...
1
  // SPDX-License-Identifier: GPL-2.0-or-later
28850dc7c   Daniel Borkmann   net: tcp: move GR...
2
3
4
5
  /*
   *	IPV4 GSO/GRO offload support
   *	Linux INET implementation
   *
28850dc7c   Daniel Borkmann   net: tcp: move GR...
6
7
   *	TCPv4 GSO/GRO support
   */
028e0a476   Paolo Abeni   net: use indirect...
8
  #include <linux/indirect_call_wrapper.h>
28850dc7c   Daniel Borkmann   net: tcp: move GR...
9
10
11
  #include <linux/skbuff.h>
  #include <net/tcp.h>
  #include <net/protocol.h>
f066e2b09   Willem de Bruijn   net-timestamp: cu...
12
13
  static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
  			   unsigned int seq, unsigned int mss)
4ed2d765d   Willem de Bruijn   net-timestamp: TC...
14
15
  {
  	while (skb) {
f066e2b09   Willem de Bruijn   net-timestamp: cu...
16
17
  		if (before(ts_seq, seq + mss)) {
  			skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
4ed2d765d   Willem de Bruijn   net-timestamp: TC...
18
19
20
21
22
23
24
25
  			skb_shinfo(skb)->tskey = ts_seq;
  			return;
  		}
  
  		skb = skb->next;
  		seq += mss;
  	}
  }
74abc20ce   Eric Dumazet   tcp: cleanup stat...
26
27
  static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
  					netdev_features_t features)
d020f8f73   Tom Herbert   tcp: move logic o...
28
  {
121d57af3   Willem de Bruijn   gso: validate gso...
29
30
  	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
  		return ERR_PTR(-EINVAL);
d020f8f73   Tom Herbert   tcp: move logic o...
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
  	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
  		return ERR_PTR(-EINVAL);
  
  	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
  		const struct iphdr *iph = ip_hdr(skb);
  		struct tcphdr *th = tcp_hdr(skb);
  
  		/* Set up checksum pseudo header, usually expect stack to
  		 * have done this already.
  		 */
  
  		th->check = 0;
  		skb->ip_summed = CHECKSUM_PARTIAL;
  		__tcp_v4_send_check(skb, iph->saddr, iph->daddr);
  	}
  
  	return tcp_gso_segment(skb, features);
  }
28be6e07e   Eric Dumazet   tcp: rename tcp_t...
49
  struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
28850dc7c   Daniel Borkmann   net: tcp: move GR...
50
51
52
  				netdev_features_t features)
  {
  	struct sk_buff *segs = ERR_PTR(-EINVAL);
0d08c42cf   Eric Dumazet   tcp: gso: fix tru...
53
  	unsigned int sum_truesize = 0;
28850dc7c   Daniel Borkmann   net: tcp: move GR...
54
55
56
57
58
59
60
61
62
  	struct tcphdr *th;
  	unsigned int thlen;
  	unsigned int seq;
  	__be32 delta;
  	unsigned int oldlen;
  	unsigned int mss;
  	struct sk_buff *gso_skb = skb;
  	__sum16 newcheck;
  	bool ooo_okay, copy_destructor;
28850dc7c   Daniel Borkmann   net: tcp: move GR...
63
64
65
66
67
68
69
70
71
72
  	th = tcp_hdr(skb);
  	thlen = th->doff * 4;
  	if (thlen < sizeof(*th))
  		goto out;
  
  	if (!pskb_may_pull(skb, thlen))
  		goto out;
  
  	oldlen = (u16)~skb->len;
  	__skb_pull(skb, thlen);
a7eea416c   Eric Dumazet   tcp: reserve tcp_...
73
  	mss = skb_shinfo(skb)->gso_size;
28850dc7c   Daniel Borkmann   net: tcp: move GR...
74
75
76
77
78
  	if (unlikely(skb->len <= mss))
  		goto out;
  
  	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
  		/* Packet is from an untrusted source, reset gso_segs. */
28850dc7c   Daniel Borkmann   net: tcp: move GR...
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
  
  		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
  
  		segs = NULL;
  		goto out;
  	}
  
  	copy_destructor = gso_skb->destructor == tcp_wfree;
  	ooo_okay = gso_skb->ooo_okay;
  	/* All segments but the first should have ooo_okay cleared */
  	skb->ooo_okay = 0;
  
  	segs = skb_segment(skb, features);
  	if (IS_ERR(segs))
  		goto out;
  
  	/* Only first segment might have ooo_okay set */
  	segs->ooo_okay = ooo_okay;
07b26c945   Steffen Klassert   gso: Support part...
97
98
99
100
101
102
  	/* GSO partial and frag_list segmentation only requires splitting
  	 * the frame into an MSS multiple and possibly a remainder, both
  	 * cases return a GSO skb. So update the mss now.
  	 */
  	if (skb_is_gso(segs))
  		mss *= skb_shinfo(segs)->gso_segs;
28850dc7c   Daniel Borkmann   net: tcp: move GR...
103
104
105
106
107
  	delta = htonl(oldlen + (thlen + mss));
  
  	skb = segs;
  	th = tcp_hdr(skb);
  	seq = ntohl(th->seq);
4ed2d765d   Willem de Bruijn   net-timestamp: TC...
108
109
  	if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
  		tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
28850dc7c   Daniel Borkmann   net: tcp: move GR...
110
111
  	newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
  					       (__force u32)delta));
802ab55ad   Alexander Duyck   GSO: Support part...
112
  	while (skb->next) {
28850dc7c   Daniel Borkmann   net: tcp: move GR...
113
114
  		th->fin = th->psh = 0;
  		th->check = newcheck;
08b64fcca   Alexander Duyck   net: Store checks...
115
116
117
  		if (skb->ip_summed == CHECKSUM_PARTIAL)
  			gso_reset_checksum(skb, ~th->check);
  		else
e9c3a24b3   Tom Herbert   tcp: Call gso_mak...
118
  			th->check = gso_make_checksum(skb, ~th->check);
28850dc7c   Daniel Borkmann   net: tcp: move GR...
119
120
121
122
123
  
  		seq += mss;
  		if (copy_destructor) {
  			skb->destructor = gso_skb->destructor;
  			skb->sk = gso_skb->sk;
0d08c42cf   Eric Dumazet   tcp: gso: fix tru...
124
  			sum_truesize += skb->truesize;
28850dc7c   Daniel Borkmann   net: tcp: move GR...
125
126
127
128
129
130
  		}
  		skb = skb->next;
  		th = tcp_hdr(skb);
  
  		th->seq = htonl(seq);
  		th->cwr = 0;
802ab55ad   Alexander Duyck   GSO: Support part...
131
  	}
28850dc7c   Daniel Borkmann   net: tcp: move GR...
132
133
134
135
136
137
138
  
  	/* Following permits TCP Small Queues to work well with GSO :
  	 * The callback to TCP stack will be called at the time last frag
  	 * is freed at TX completion, and not right now when gso_skb
  	 * is freed by GSO engine
  	 */
  	if (copy_destructor) {
7ec318fee   Eric Dumazet   tcp: gso: avoid r...
139
  		int delta;
28850dc7c   Daniel Borkmann   net: tcp: move GR...
140
141
  		swap(gso_skb->sk, skb->sk);
  		swap(gso_skb->destructor, skb->destructor);
0d08c42cf   Eric Dumazet   tcp: gso: fix tru...
142
  		sum_truesize += skb->truesize;
7ec318fee   Eric Dumazet   tcp: gso: avoid r...
143
144
145
146
147
148
149
150
  		delta = sum_truesize - gso_skb->truesize;
  		/* In some pathological cases, delta can be negative.
  		 * We need to either use refcount_add() or refcount_sub_and_test()
  		 */
  		if (likely(delta >= 0))
  			refcount_add(delta, &skb->sk->sk_wmem_alloc);
  		else
  			WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
28850dc7c   Daniel Borkmann   net: tcp: move GR...
151
152
153
154
155
156
157
  	}
  
  	delta = htonl(oldlen + (skb_tail_pointer(skb) -
  				skb_transport_header(skb)) +
  		      skb->data_len);
  	th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
  				(__force u32)delta));
08b64fcca   Alexander Duyck   net: Store checks...
158
159
160
  	if (skb->ip_summed == CHECKSUM_PARTIAL)
  		gso_reset_checksum(skb, ~th->check);
  	else
e9c3a24b3   Tom Herbert   tcp: Call gso_mak...
161
  		th->check = gso_make_checksum(skb, ~th->check);
28850dc7c   Daniel Borkmann   net: tcp: move GR...
162
163
164
  out:
  	return segs;
  }
28850dc7c   Daniel Borkmann   net: tcp: move GR...
165

d4546c250   David Miller   net: Convert GRO ...
166
  struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
28850dc7c   Daniel Borkmann   net: tcp: move GR...
167
  {
d4546c250   David Miller   net: Convert GRO ...
168
  	struct sk_buff *pp = NULL;
28850dc7c   Daniel Borkmann   net: tcp: move GR...
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
  	struct sk_buff *p;
  	struct tcphdr *th;
  	struct tcphdr *th2;
  	unsigned int len;
  	unsigned int thlen;
  	__be32 flags;
  	unsigned int mss = 1;
  	unsigned int hlen;
  	unsigned int off;
  	int flush = 1;
  	int i;
  
  	off = skb_gro_offset(skb);
  	hlen = off + sizeof(*th);
  	th = skb_gro_header_fast(skb, off);
  	if (skb_gro_header_hard(skb, hlen)) {
  		th = skb_gro_header_slow(skb, hlen, off);
  		if (unlikely(!th))
  			goto out;
  	}
  
  	thlen = th->doff * 4;
  	if (thlen < sizeof(*th))
  		goto out;
  
  	hlen = off + thlen;
  	if (skb_gro_header_hard(skb, hlen)) {
  		th = skb_gro_header_slow(skb, hlen, off);
  		if (unlikely(!th))
  			goto out;
  	}
  
  	skb_gro_pull(skb, thlen);
  
  	len = skb_gro_len(skb);
  	flags = tcp_flag_word(th);
d4546c250   David Miller   net: Convert GRO ...
205
  	list_for_each_entry(p, head, list) {
28850dc7c   Daniel Borkmann   net: tcp: move GR...
206
207
208
209
210
211
212
213
214
215
216
217
  		if (!NAPI_GRO_CB(p)->same_flow)
  			continue;
  
  		th2 = tcp_hdr(p);
  
  		if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
  			NAPI_GRO_CB(p)->same_flow = 0;
  			continue;
  		}
  
  		goto found;
  	}
d4546c250   David Miller   net: Convert GRO ...
218
  	p = NULL;
28850dc7c   Daniel Borkmann   net: tcp: move GR...
219
220
221
  	goto out_check_final;
  
  found:
bf5a755f5   Jerry Chu   net-gre-gro: Add ...
222
  	/* Include the IP ID check below from the inner most IP hdr */
1530545ed   Alexander Duyck   GRO: Add support ...
223
  	flush = NAPI_GRO_CB(p)->flush;
28850dc7c   Daniel Borkmann   net: tcp: move GR...
224
225
226
227
228
229
230
  	flush |= (__force int)(flags & TCP_FLAG_CWR);
  	flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
  		  ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
  	flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
  	for (i = sizeof(*th); i < thlen; i += 4)
  		flush |= *(u32 *)((u8 *)th + i) ^
  			 *(u32 *)((u8 *)th2 + i);
1530545ed   Alexander Duyck   GRO: Add support ...
231
232
233
234
235
236
237
238
239
240
  	/* When we receive our second frame we can made a decision on if we
  	 * continue this flow as an atomic flow with a fixed ID or if we use
  	 * an incrementing ID.
  	 */
  	if (NAPI_GRO_CB(p)->flush_id != 1 ||
  	    NAPI_GRO_CB(p)->count != 1 ||
  	    !NAPI_GRO_CB(p)->is_atomic)
  		flush |= NAPI_GRO_CB(p)->flush_id;
  	else
  		NAPI_GRO_CB(p)->is_atomic = false;
a7eea416c   Eric Dumazet   tcp: reserve tcp_...
241
  	mss = skb_shinfo(p)->gso_size;
28850dc7c   Daniel Borkmann   net: tcp: move GR...
242
243
244
  
  	flush |= (len - 1) >= mss;
  	flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
41ed9c04a   Boris Pismenny   tcp: Don't coales...
245
246
247
  #ifdef CONFIG_TLS_DEVICE
  	flush |= p->decrypted ^ skb->decrypted;
  #endif
28850dc7c   Daniel Borkmann   net: tcp: move GR...
248

d4546c250   David Miller   net: Convert GRO ...
249
  	if (flush || skb_gro_receive(p, skb)) {
28850dc7c   Daniel Borkmann   net: tcp: move GR...
250
251
252
  		mss = 1;
  		goto out_check_final;
  	}
28850dc7c   Daniel Borkmann   net: tcp: move GR...
253
254
255
256
257
258
259
260
261
  	tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
  
  out_check_final:
  	flush = len < mss;
  	flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
  					TCP_FLAG_RST | TCP_FLAG_SYN |
  					TCP_FLAG_FIN));
  
  	if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
d4546c250   David Miller   net: Convert GRO ...
262
  		pp = p;
28850dc7c   Daniel Borkmann   net: tcp: move GR...
263
264
  
  out:
bf5a755f5   Jerry Chu   net-gre-gro: Add ...
265
  	NAPI_GRO_CB(skb)->flush |= (flush != 0);
28850dc7c   Daniel Borkmann   net: tcp: move GR...
266
267
268
  
  	return pp;
  }
28850dc7c   Daniel Borkmann   net: tcp: move GR...
269
270
271
272
  
  int tcp_gro_complete(struct sk_buff *skb)
  {
  	struct tcphdr *th = tcp_hdr(skb);
299603e83   Jerry Chu   net-gro: Prepare ...
273
  	skb->csum_start = (unsigned char *)th - skb->head;
28850dc7c   Daniel Borkmann   net: tcp: move GR...
274
275
276
277
278
279
280
281
282
283
284
  	skb->csum_offset = offsetof(struct tcphdr, check);
  	skb->ip_summed = CHECKSUM_PARTIAL;
  
  	skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
  
  	if (th->cwr)
  		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
  
  	return 0;
  }
  EXPORT_SYMBOL(tcp_gro_complete);
028e0a476   Paolo Abeni   net: use indirect...
285
286
  INDIRECT_CALLABLE_SCOPE
  struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
28850dc7c   Daniel Borkmann   net: tcp: move GR...
287
  {
cc5c00bbb   Herbert Xu   gro: Only verify ...
288
  	/* Don't bother verifying checksum if we're going to flush anyway. */
149d0774a   Tom Herbert   tcp: Call skb_gro...
289
290
291
  	if (!NAPI_GRO_CB(skb)->flush &&
  	    skb_gro_checksum_validate(skb, IPPROTO_TCP,
  				      inet_gro_compute_pseudo)) {
28850dc7c   Daniel Borkmann   net: tcp: move GR...
292
293
  		NAPI_GRO_CB(skb)->flush = 1;
  		return NULL;
28850dc7c   Daniel Borkmann   net: tcp: move GR...
294
295
296
297
  	}
  
  	return tcp_gro_receive(head, skb);
  }
028e0a476   Paolo Abeni   net: use indirect...
298
  INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
28850dc7c   Daniel Borkmann   net: tcp: move GR...
299
300
301
  {
  	const struct iphdr *iph = ip_hdr(skb);
  	struct tcphdr *th = tcp_hdr(skb);
299603e83   Jerry Chu   net-gro: Prepare ...
302
303
  	th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
  				  iph->daddr, 0);
c3caf1192   Jerry Chu   net-gre-gro: Fix ...
304
  	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
28850dc7c   Daniel Borkmann   net: tcp: move GR...
305

1530545ed   Alexander Duyck   GRO: Add support ...
306
307
  	if (NAPI_GRO_CB(skb)->is_atomic)
  		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
28850dc7c   Daniel Borkmann   net: tcp: move GR...
308
309
310
311
312
  	return tcp_gro_complete(skb);
  }
  
  static const struct net_offload tcpv4_offload = {
  	.callbacks = {
d020f8f73   Tom Herbert   tcp: move logic o...
313
  		.gso_segment	=	tcp4_gso_segment,
28850dc7c   Daniel Borkmann   net: tcp: move GR...
314
315
316
317
318
319
320
321
322
  		.gro_receive	=	tcp4_gro_receive,
  		.gro_complete	=	tcp4_gro_complete,
  	},
  };
  
  int __init tcpv4_offload_init(void)
  {
  	return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
  }