Blame view

net/tls/tls_device.c 34.4 KB
e8f697998   Ilya Lesokhin   net/tls: Add gene...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
  /* Copyright (c) 2018, Mellanox Technologies All rights reserved.
   *
   * This software is available to you under a choice of one of two
   * licenses.  You may choose to be licensed under the terms of the GNU
   * General Public License (GPL) Version 2, available from the file
   * COPYING in the main directory of this source tree, or the
   * OpenIB.org BSD license below:
   *
   *     Redistribution and use in source and binary forms, with or
   *     without modification, are permitted provided that the following
   *     conditions are met:
   *
   *      - Redistributions of source code must retain the above
   *        copyright notice, this list of conditions and the following
   *        disclaimer.
   *
   *      - Redistributions in binary form must reproduce the above
   *        copyright notice, this list of conditions and the following
   *        disclaimer in the documentation and/or other materials
   *        provided with the distribution.
   *
   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
   * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
   * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
   * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
   * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
   * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
   * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
   * SOFTWARE.
   */
  
  #include <crypto/aead.h>
  #include <linux/highmem.h>
  #include <linux/module.h>
  #include <linux/netdevice.h>
  #include <net/dst.h>
  #include <net/inet_connection_sock.h>
  #include <net/tcp.h>
  #include <net/tls.h>
8538d29ce   Jakub Kicinski   net/tls: add trac...
40
  #include "trace.h"
e8f697998   Ilya Lesokhin   net/tls: Add gene...
41
42
43
44
45
46
47
48
49
50
51
52
53
54
  /* device_offload_lock is used to synchronize tls_dev_add
   * against NETDEV_DOWN notifications.
   */
  static DECLARE_RWSEM(device_offload_lock);
  
  static void tls_device_gc_task(struct work_struct *work);
  
  static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task);
  static LIST_HEAD(tls_device_gc_list);
  static LIST_HEAD(tls_device_list);
  static DEFINE_SPINLOCK(tls_device_lock);
  
  static void tls_device_free_ctx(struct tls_context *ctx)
  {
5a03bc73a   Jakub Kicinski   net/tls: fix the ...
55
  	if (ctx->tx_conf == TLS_HW) {
4799ac81e   Boris Pismenny   tls: Add rx inlin...
56
  		kfree(tls_offload_ctx_tx(ctx));
5a03bc73a   Jakub Kicinski   net/tls: fix the ...
57
58
59
  		kfree(ctx->tx.rec_seq);
  		kfree(ctx->tx.iv);
  	}
4799ac81e   Boris Pismenny   tls: Add rx inlin...
60
61
62
  
  	if (ctx->rx_conf == TLS_HW)
  		kfree(tls_offload_ctx_rx(ctx));
e8f697998   Ilya Lesokhin   net/tls: Add gene...
63

15a7dea75   Jakub Kicinski   net/tls: use RCU ...
64
  	tls_ctx_free(NULL, ctx);
e8f697998   Ilya Lesokhin   net/tls: Add gene...
65
66
67
68
69
70
71
72
73
74
75
76
77
78
  }
  
  static void tls_device_gc_task(struct work_struct *work)
  {
  	struct tls_context *ctx, *tmp;
  	unsigned long flags;
  	LIST_HEAD(gc_list);
  
  	spin_lock_irqsave(&tls_device_lock, flags);
  	list_splice_init(&tls_device_gc_list, &gc_list);
  	spin_unlock_irqrestore(&tls_device_lock, flags);
  
  	list_for_each_entry_safe(ctx, tmp, &gc_list, list) {
  		struct net_device *netdev = ctx->netdev;
4799ac81e   Boris Pismenny   tls: Add rx inlin...
79
  		if (netdev && ctx->tx_conf == TLS_HW) {
e8f697998   Ilya Lesokhin   net/tls: Add gene...
80
81
82
  			netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
  							TLS_OFFLOAD_CTX_DIR_TX);
  			dev_put(netdev);
4799ac81e   Boris Pismenny   tls: Add rx inlin...
83
  			ctx->netdev = NULL;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
  		}
  
  		list_del(&ctx->list);
  		tls_device_free_ctx(ctx);
  	}
  }
  
  static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
  {
  	unsigned long flags;
  
  	spin_lock_irqsave(&tls_device_lock, flags);
  	list_move_tail(&ctx->list, &tls_device_gc_list);
  
  	/* schedule_work inside the spinlock
  	 * to make sure tls_device_down waits for that work.
  	 */
  	schedule_work(&tls_device_gc_work);
  
  	spin_unlock_irqrestore(&tls_device_lock, flags);
  }
  
  /* We assume that the socket is already connected */
  static struct net_device *get_netdev_for_sock(struct sock *sk)
  {
  	struct dst_entry *dst = sk_dst_get(sk);
  	struct net_device *netdev = NULL;
  
  	if (likely(dst)) {
  		netdev = dst->dev;
  		dev_hold(netdev);
  	}
  
  	dst_release(dst);
  
  	return netdev;
  }
  
  static void destroy_record(struct tls_record_info *record)
  {
7ccd45191   Jakub Kicinski   net/tls: unref fr...
124
  	int i;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
125

7ccd45191   Jakub Kicinski   net/tls: unref fr...
126
127
  	for (i = 0; i < record->num_frags; i++)
  		__skb_frag_unref(&record->frags[i]);
e8f697998   Ilya Lesokhin   net/tls: Add gene...
128
129
  	kfree(record);
  }
d80a1b9d1   Boris Pismenny   tls: Refactor tls...
130
  static void delete_all_records(struct tls_offload_context_tx *offload_ctx)
e8f697998   Ilya Lesokhin   net/tls: Add gene...
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
  {
  	struct tls_record_info *info, *temp;
  
  	list_for_each_entry_safe(info, temp, &offload_ctx->records_list, list) {
  		list_del(&info->list);
  		destroy_record(info);
  	}
  
  	offload_ctx->retransmit_hint = NULL;
  }
  
  static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
  {
  	struct tls_context *tls_ctx = tls_get_ctx(sk);
  	struct tls_record_info *info, *temp;
d80a1b9d1   Boris Pismenny   tls: Refactor tls...
146
  	struct tls_offload_context_tx *ctx;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
147
148
149
150
151
  	u64 deleted_records = 0;
  	unsigned long flags;
  
  	if (!tls_ctx)
  		return;
d80a1b9d1   Boris Pismenny   tls: Refactor tls...
152
  	ctx = tls_offload_ctx_tx(tls_ctx);
e8f697998   Ilya Lesokhin   net/tls: Add gene...
153
154
155
  
  	spin_lock_irqsave(&ctx->lock, flags);
  	info = ctx->retransmit_hint;
6e3d02b67   Jakub Kicinski   net/tls: dedup th...
156
  	if (info && !before(acked_seq, info->end_seq))
e8f697998   Ilya Lesokhin   net/tls: Add gene...
157
  		ctx->retransmit_hint = NULL;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
  
  	list_for_each_entry_safe(info, temp, &ctx->records_list, list) {
  		if (before(acked_seq, info->end_seq))
  			break;
  		list_del(&info->list);
  
  		destroy_record(info);
  		deleted_records++;
  	}
  
  	ctx->unacked_record_sn += deleted_records;
  	spin_unlock_irqrestore(&ctx->lock, flags);
  }
  
  /* At this point, there should be no references on this
   * socket and no in-flight SKBs associated with this
   * socket, so it is safe to free all the resources.
   */
8d5a49e9e   Jakub Kicinski   net/tls: add help...
176
  void tls_device_sk_destruct(struct sock *sk)
e8f697998   Ilya Lesokhin   net/tls: Add gene...
177
178
  {
  	struct tls_context *tls_ctx = tls_get_ctx(sk);
d80a1b9d1   Boris Pismenny   tls: Refactor tls...
179
  	struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
e8f697998   Ilya Lesokhin   net/tls: Add gene...
180

4799ac81e   Boris Pismenny   tls: Add rx inlin...
181
  	tls_ctx->sk_destruct(sk);
e8f697998   Ilya Lesokhin   net/tls: Add gene...
182

4799ac81e   Boris Pismenny   tls: Add rx inlin...
183
184
185
186
187
188
189
  	if (tls_ctx->tx_conf == TLS_HW) {
  		if (ctx->open_record)
  			destroy_record(ctx->open_record);
  		delete_all_records(ctx);
  		crypto_free_aead(ctx->aead_send);
  		clean_acked_data_disable(inet_csk(sk));
  	}
e8f697998   Ilya Lesokhin   net/tls: Add gene...
190
191
192
193
  
  	if (refcount_dec_and_test(&tls_ctx->refcount))
  		tls_device_queue_ctx_destruction(tls_ctx);
  }
8d5a49e9e   Jakub Kicinski   net/tls: add help...
194
  EXPORT_SYMBOL_GPL(tls_device_sk_destruct);
e8f697998   Ilya Lesokhin   net/tls: Add gene...
195

35b71a34a   Jakub Kicinski   net/tls: don't le...
196
197
198
199
200
201
  void tls_device_free_resources_tx(struct sock *sk)
  {
  	struct tls_context *tls_ctx = tls_get_ctx(sk);
  
  	tls_free_partial_record(sk, tls_ctx);
  }
8538d29ce   Jakub Kicinski   net/tls: add trac...
202
203
204
205
206
207
208
209
  void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq)
  {
  	struct tls_context *tls_ctx = tls_get_ctx(sk);
  
  	trace_tls_device_tx_resync_req(sk, got_seq, exp_seq);
  	WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags));
  }
  EXPORT_SYMBOL_GPL(tls_offload_tx_resync_request);
501800740   Jakub Kicinski   net/tls: add kern...
210
211
212
213
214
  static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
  				 u32 seq)
  {
  	struct net_device *netdev;
  	struct sk_buff *skb;
b5d9a834f   Dirk van der Merwe   net/tls: don't cl...
215
  	int err = 0;
501800740   Jakub Kicinski   net/tls: add kern...
216
217
218
219
220
221
222
  	u8 *rcd_sn;
  
  	skb = tcp_write_queue_tail(sk);
  	if (skb)
  		TCP_SKB_CB(skb)->eor = 1;
  
  	rcd_sn = tls_ctx->tx.rec_seq;
8538d29ce   Jakub Kicinski   net/tls: add trac...
223
  	trace_tls_device_tx_resync_send(sk, seq, rcd_sn);
501800740   Jakub Kicinski   net/tls: add kern...
224
225
226
  	down_read(&device_offload_lock);
  	netdev = tls_ctx->netdev;
  	if (netdev)
b5d9a834f   Dirk van der Merwe   net/tls: don't cl...
227
228
229
  		err = netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq,
  							 rcd_sn,
  							 TLS_OFFLOAD_CTX_DIR_TX);
501800740   Jakub Kicinski   net/tls: add kern...
230
  	up_read(&device_offload_lock);
b5d9a834f   Dirk van der Merwe   net/tls: don't cl...
231
232
  	if (err)
  		return;
501800740   Jakub Kicinski   net/tls: add kern...
233
234
235
  
  	clear_bit_unlock(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
  }
e8f697998   Ilya Lesokhin   net/tls: Add gene...
236
237
238
239
240
241
242
  static void tls_append_frag(struct tls_record_info *record,
  			    struct page_frag *pfrag,
  			    int size)
  {
  	skb_frag_t *frag;
  
  	frag = &record->frags[record->num_frags - 1];
d8e18a516   Matthew Wilcox (Oracle)   net: Use skb acce...
243
  	if (skb_frag_page(frag) == pfrag->page &&
b54c9d5bd   Jonathan Lemon   net: Use skb_frag...
244
  	    skb_frag_off(frag) + skb_frag_size(frag) == pfrag->offset) {
d8e18a516   Matthew Wilcox (Oracle)   net: Use skb acce...
245
  		skb_frag_size_add(frag, size);
e8f697998   Ilya Lesokhin   net/tls: Add gene...
246
247
  	} else {
  		++frag;
d8e18a516   Matthew Wilcox (Oracle)   net: Use skb acce...
248
  		__skb_frag_set_page(frag, pfrag->page);
b54c9d5bd   Jonathan Lemon   net: Use skb_frag...
249
  		skb_frag_off_set(frag, pfrag->offset);
d8e18a516   Matthew Wilcox (Oracle)   net: Use skb acce...
250
  		skb_frag_size_set(frag, size);
e8f697998   Ilya Lesokhin   net/tls: Add gene...
251
252
253
254
255
256
257
258
259
260
  		++record->num_frags;
  		get_page(pfrag->page);
  	}
  
  	pfrag->offset += size;
  	record->len += size;
  }
  
  static int tls_push_record(struct sock *sk,
  			   struct tls_context *ctx,
d80a1b9d1   Boris Pismenny   tls: Refactor tls...
261
  			   struct tls_offload_context_tx *offload_ctx,
e8f697998   Ilya Lesokhin   net/tls: Add gene...
262
  			   struct tls_record_info *record,
e7b159a48   Jakub Kicinski   net/tls: remove t...
263
  			   int flags)
e8f697998   Ilya Lesokhin   net/tls: Add gene...
264
  {
4509de146   Vakul Garg   net/tls: Move pro...
265
  	struct tls_prot_info *prot = &ctx->prot_info;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
266
  	struct tcp_sock *tp = tcp_sk(sk);
e8f697998   Ilya Lesokhin   net/tls: Add gene...
267
268
  	skb_frag_t *frag;
  	int i;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
269
  	record->end_seq = tp->write_seq + record->len;
d4774ac0d   Jakub Kicinski   net/tls: use RCU ...
270
  	list_add_tail_rcu(&record->list, &offload_ctx->records_list);
e8f697998   Ilya Lesokhin   net/tls: Add gene...
271
  	offload_ctx->open_record = NULL;
501800740   Jakub Kicinski   net/tls: add kern...
272
273
274
  
  	if (test_bit(TLS_TX_SYNC_SCHED, &ctx->flags))
  		tls_device_resync_tx(sk, ctx, tp->write_seq);
fb0f886fa   Jakub Kicinski   net/tls: don't pa...
275
  	tls_advance_record_sn(sk, prot, &ctx->tx);
e8f697998   Ilya Lesokhin   net/tls: Add gene...
276
277
278
279
280
  
  	for (i = 0; i < record->num_frags; i++) {
  		frag = &record->frags[i];
  		sg_unmark_end(&offload_ctx->sg_tx_data[i]);
  		sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag),
b54c9d5bd   Jonathan Lemon   net: Use skb_frag...
281
  			    skb_frag_size(frag), skb_frag_off(frag));
d8e18a516   Matthew Wilcox (Oracle)   net: Use skb acce...
282
  		sk_mem_charge(sk, skb_frag_size(frag));
e8f697998   Ilya Lesokhin   net/tls: Add gene...
283
284
285
286
287
288
289
  		get_page(skb_frag_page(frag));
  	}
  	sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]);
  
  	/* all ready, send */
  	return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
  }
e7b159a48   Jakub Kicinski   net/tls: remove t...
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
  static int tls_device_record_close(struct sock *sk,
  				   struct tls_context *ctx,
  				   struct tls_record_info *record,
  				   struct page_frag *pfrag,
  				   unsigned char record_type)
  {
  	struct tls_prot_info *prot = &ctx->prot_info;
  	int ret;
  
  	/* append tag
  	 * device will fill in the tag, we just need to append a placeholder
  	 * use socket memory to improve coalescing (re-using a single buffer
  	 * increases frag count)
  	 * if we can't allocate memory now, steal some back from data
  	 */
  	if (likely(skb_page_frag_refill(prot->tag_size, pfrag,
  					sk->sk_allocation))) {
  		ret = 0;
  		tls_append_frag(record, pfrag, prot->tag_size);
  	} else {
  		ret = prot->tag_size;
  		if (record->len <= prot->overhead_size)
  			return -ENOMEM;
  	}
  
  	/* fill prepend */
  	tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]),
  			 record->len - prot->overhead_size,
  			 record_type, prot->version);
  	return ret;
  }
d80a1b9d1   Boris Pismenny   tls: Refactor tls...
321
  static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx,
e8f697998   Ilya Lesokhin   net/tls: Add gene...
322
323
324
325
326
327
328
329
330
331
332
333
  				 struct page_frag *pfrag,
  				 size_t prepend_size)
  {
  	struct tls_record_info *record;
  	skb_frag_t *frag;
  
  	record = kmalloc(sizeof(*record), GFP_KERNEL);
  	if (!record)
  		return -ENOMEM;
  
  	frag = &record->frags[0];
  	__skb_frag_set_page(frag, pfrag->page);
b54c9d5bd   Jonathan Lemon   net: Use skb_frag...
334
  	skb_frag_off_set(frag, pfrag->offset);
e8f697998   Ilya Lesokhin   net/tls: Add gene...
335
336
337
338
339
340
341
342
343
344
345
346
  	skb_frag_size_set(frag, prepend_size);
  
  	get_page(pfrag->page);
  	pfrag->offset += prepend_size;
  
  	record->num_frags = 1;
  	record->len = prepend_size;
  	offload_ctx->open_record = record;
  	return 0;
  }
  
  static int tls_do_allocation(struct sock *sk,
d80a1b9d1   Boris Pismenny   tls: Refactor tls...
347
  			     struct tls_offload_context_tx *offload_ctx,
e8f697998   Ilya Lesokhin   net/tls: Add gene...
348
349
350
351
352
353
354
355
  			     struct page_frag *pfrag,
  			     size_t prepend_size)
  {
  	int ret;
  
  	if (!offload_ctx->open_record) {
  		if (unlikely(!skb_page_frag_refill(prepend_size, pfrag,
  						   sk->sk_allocation))) {
d5bee7374   Jakub Sitnicki   net/tls: Annotate...
356
  			READ_ONCE(sk->sk_prot)->enter_memory_pressure(sk);
e8f697998   Ilya Lesokhin   net/tls: Add gene...
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
  			sk_stream_moderate_sndbuf(sk);
  			return -ENOMEM;
  		}
  
  		ret = tls_create_new_record(offload_ctx, pfrag, prepend_size);
  		if (ret)
  			return ret;
  
  		if (pfrag->size > pfrag->offset)
  			return 0;
  	}
  
  	if (!sk_page_frag_refill(sk, pfrag))
  		return -ENOMEM;
  
  	return 0;
  }
e681cc603   Jakub Kicinski   net/tls: align no...
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
  static int tls_device_copy_data(void *addr, size_t bytes, struct iov_iter *i)
  {
  	size_t pre_copy, nocache;
  
  	pre_copy = ~((unsigned long)addr - 1) & (SMP_CACHE_BYTES - 1);
  	if (pre_copy) {
  		pre_copy = min(pre_copy, bytes);
  		if (copy_from_iter(addr, pre_copy, i) != pre_copy)
  			return -EFAULT;
  		bytes -= pre_copy;
  		addr += pre_copy;
  	}
  
  	nocache = round_down(bytes, SMP_CACHE_BYTES);
  	if (copy_from_iter_nocache(addr, nocache, i) != nocache)
  		return -EFAULT;
  	bytes -= nocache;
  	addr += nocache;
  
  	if (bytes && copy_from_iter(addr, bytes, i) != bytes)
  		return -EFAULT;
  
  	return 0;
  }
e8f697998   Ilya Lesokhin   net/tls: Add gene...
398
399
400
401
402
403
  static int tls_push_data(struct sock *sk,
  			 struct iov_iter *msg_iter,
  			 size_t size, int flags,
  			 unsigned char record_type)
  {
  	struct tls_context *tls_ctx = tls_get_ctx(sk);
4509de146   Vakul Garg   net/tls: Move pro...
404
  	struct tls_prot_info *prot = &tls_ctx->prot_info;
d80a1b9d1   Boris Pismenny   tls: Refactor tls...
405
  	struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
e8f697998   Ilya Lesokhin   net/tls: Add gene...
406
  	struct tls_record_info *record = ctx->open_record;
414776621   Jakub Kicinski   net/tls: prevent ...
407
  	int tls_push_record_flags;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
408
409
410
  	struct page_frag *pfrag;
  	size_t orig_size = size;
  	u32 max_open_record_len;
ea1dd3e9d   Rohit Maheshwari   net/tls: sendfile...
411
  	bool more = false;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
412
  	bool done = false;
ea1dd3e9d   Rohit Maheshwari   net/tls: sendfile...
413
  	int copy, rc = 0;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
414
415
416
417
  	long timeo;
  
  	if (flags &
  	    ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST))
4a5cdc604   Valentin Vidic   net/tls: Fix retu...
418
  		return -EOPNOTSUPP;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
419

93277b258   Jakub Kicinski   net/tls: mark sk-...
420
  	if (unlikely(sk->sk_err))
e8f697998   Ilya Lesokhin   net/tls: Add gene...
421
  		return -sk->sk_err;
414776621   Jakub Kicinski   net/tls: prevent ...
422
423
  	flags |= MSG_SENDPAGE_DECRYPTED;
  	tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
424
  	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
94850257c   Boris Pismenny   tls: Fix tls_devi...
425
426
427
428
429
  	if (tls_is_partially_sent_record(tls_ctx)) {
  		rc = tls_push_partial_record(sk, tls_ctx, flags);
  		if (rc < 0)
  			return rc;
  	}
e8f697998   Ilya Lesokhin   net/tls: Add gene...
430
431
432
433
434
435
436
  
  	pfrag = sk_page_frag(sk);
  
  	/* TLS_HEADER_SIZE is not counted as part of the TLS record, and
  	 * we need to leave room for an authentication tag.
  	 */
  	max_open_record_len = TLS_MAX_PAYLOAD_SIZE +
4509de146   Vakul Garg   net/tls: Move pro...
437
  			      prot->prepend_size;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
438
  	do {
34ef1ed19   Jakub Kicinski   net/tls: make all...
439
440
  		rc = tls_do_allocation(sk, ctx, pfrag, prot->prepend_size);
  		if (unlikely(rc)) {
e8f697998   Ilya Lesokhin   net/tls: Add gene...
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
  			rc = sk_stream_wait_memory(sk, &timeo);
  			if (!rc)
  				continue;
  
  			record = ctx->open_record;
  			if (!record)
  				break;
  handle_error:
  			if (record_type != TLS_RECORD_TYPE_DATA) {
  				/* avoid sending partial
  				 * record with type !=
  				 * application_data
  				 */
  				size = orig_size;
  				destroy_record(record);
  				ctx->open_record = NULL;
4509de146   Vakul Garg   net/tls: Move pro...
457
  			} else if (record->len > prot->prepend_size) {
e8f697998   Ilya Lesokhin   net/tls: Add gene...
458
459
460
461
462
463
464
465
466
  				goto last_record;
  			}
  
  			break;
  		}
  
  		record = ctx->open_record;
  		copy = min_t(size_t, size, (pfrag->size - pfrag->offset));
  		copy = min_t(size_t, copy, (max_open_record_len - record->len));
e681cc603   Jakub Kicinski   net/tls: align no...
467
468
469
  		rc = tls_device_copy_data(page_address(pfrag->page) +
  					  pfrag->offset, copy, msg_iter);
  		if (rc)
e8f697998   Ilya Lesokhin   net/tls: Add gene...
470
  			goto handle_error;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
471
472
473
474
475
476
  		tls_append_frag(record, pfrag, copy);
  
  		size -= copy;
  		if (!size) {
  last_record:
  			tls_push_record_flags = flags;
ea1dd3e9d   Rohit Maheshwari   net/tls: sendfile...
477
478
  			if (flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE)) {
  				more = true;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
479
480
481
482
483
484
485
486
  				break;
  			}
  
  			done = true;
  		}
  
  		if (done || record->len >= max_open_record_len ||
  		    (record->num_frags >= MAX_SKB_FRAGS - 1)) {
e7b159a48   Jakub Kicinski   net/tls: remove t...
487
488
489
490
491
492
493
494
495
496
497
498
  			rc = tls_device_record_close(sk, tls_ctx, record,
  						     pfrag, record_type);
  			if (rc) {
  				if (rc > 0) {
  					size += rc;
  				} else {
  					size = orig_size;
  					destroy_record(record);
  					ctx->open_record = NULL;
  					break;
  				}
  			}
e8f697998   Ilya Lesokhin   net/tls: Add gene...
499
500
501
502
  			rc = tls_push_record(sk,
  					     tls_ctx,
  					     ctx,
  					     record,
e7b159a48   Jakub Kicinski   net/tls: remove t...
503
  					     tls_push_record_flags);
e8f697998   Ilya Lesokhin   net/tls: Add gene...
504
505
506
507
  			if (rc < 0)
  				break;
  		}
  	} while (!done);
ea1dd3e9d   Rohit Maheshwari   net/tls: sendfile...
508
  	tls_ctx->pending_open_record_frags = more;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
509
510
511
512
513
514
515
516
517
  	if (orig_size - size > 0)
  		rc = orig_size - size;
  
  	return rc;
  }
  
  int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
  {
  	unsigned char record_type = TLS_RECORD_TYPE_DATA;
79ffe6087   Jakub Kicinski   net/tls: add a TX...
518
  	struct tls_context *tls_ctx = tls_get_ctx(sk);
e8f697998   Ilya Lesokhin   net/tls: Add gene...
519
  	int rc;
79ffe6087   Jakub Kicinski   net/tls: add a TX...
520
  	mutex_lock(&tls_ctx->tx_lock);
e8f697998   Ilya Lesokhin   net/tls: Add gene...
521
522
523
524
525
526
527
528
529
530
531
532
533
  	lock_sock(sk);
  
  	if (unlikely(msg->msg_controllen)) {
  		rc = tls_proccess_cmsg(sk, msg, &record_type);
  		if (rc)
  			goto out;
  	}
  
  	rc = tls_push_data(sk, &msg->msg_iter, size,
  			   msg->msg_flags, record_type);
  
  out:
  	release_sock(sk);
79ffe6087   Jakub Kicinski   net/tls: add a TX...
534
  	mutex_unlock(&tls_ctx->tx_lock);
e8f697998   Ilya Lesokhin   net/tls: Add gene...
535
536
537
538
539
540
  	return rc;
  }
  
  int tls_device_sendpage(struct sock *sk, struct page *page,
  			int offset, size_t size, int flags)
  {
79ffe6087   Jakub Kicinski   net/tls: add a TX...
541
  	struct tls_context *tls_ctx = tls_get_ctx(sk);
e8f697998   Ilya Lesokhin   net/tls: Add gene...
542
  	struct iov_iter	msg_iter;
b06c19d9f   Ira Weiny   net/tls: Fix kmap...
543
  	char *kaddr;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
544
545
546
547
548
  	struct kvec iov;
  	int rc;
  
  	if (flags & MSG_SENDPAGE_NOTLAST)
  		flags |= MSG_MORE;
79ffe6087   Jakub Kicinski   net/tls: add a TX...
549
  	mutex_lock(&tls_ctx->tx_lock);
e8f697998   Ilya Lesokhin   net/tls: Add gene...
550
551
552
  	lock_sock(sk);
  
  	if (flags & MSG_OOB) {
4a5cdc604   Valentin Vidic   net/tls: Fix retu...
553
  		rc = -EOPNOTSUPP;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
554
555
  		goto out;
  	}
b06c19d9f   Ira Weiny   net/tls: Fix kmap...
556
  	kaddr = kmap(page);
e8f697998   Ilya Lesokhin   net/tls: Add gene...
557
558
  	iov.iov_base = kaddr + offset;
  	iov.iov_len = size;
aa563d7bc   David Howells   iov_iter: Separat...
559
  	iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size);
e8f697998   Ilya Lesokhin   net/tls: Add gene...
560
561
562
563
564
565
  	rc = tls_push_data(sk, &msg_iter, size,
  			   flags, TLS_RECORD_TYPE_DATA);
  	kunmap(page);
  
  out:
  	release_sock(sk);
79ffe6087   Jakub Kicinski   net/tls: add a TX...
566
  	mutex_unlock(&tls_ctx->tx_lock);
e8f697998   Ilya Lesokhin   net/tls: Add gene...
567
568
  	return rc;
  }
d80a1b9d1   Boris Pismenny   tls: Refactor tls...
569
  struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
e8f697998   Ilya Lesokhin   net/tls: Add gene...
570
571
572
  				       u32 seq, u64 *p_record_sn)
  {
  	u64 record_sn = context->hint_record_sn;
06f5201c6   Rohit Maheshwari   net/tls: Fix to a...
573
  	struct tls_record_info *info, *last;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
574
575
576
577
578
579
580
  
  	info = context->retransmit_hint;
  	if (!info ||
  	    before(seq, info->end_seq - info->len)) {
  		/* if retransmit_hint is irrelevant start
  		 * from the beggining of the list
  		 */
d4774ac0d   Jakub Kicinski   net/tls: use RCU ...
581
582
583
584
  		info = list_first_entry_or_null(&context->records_list,
  						struct tls_record_info, list);
  		if (!info)
  			return NULL;
06f5201c6   Rohit Maheshwari   net/tls: Fix to a...
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
  		/* send the start_marker record if seq number is before the
  		 * tls offload start marker sequence number. This record is
  		 * required to handle TCP packets which are before TLS offload
  		 * started.
  		 *  And if it's not start marker, look if this seq number
  		 * belongs to the list.
  		 */
  		if (likely(!tls_record_is_start_marker(info))) {
  			/* we have the first record, get the last record to see
  			 * if this seq number belongs to the list.
  			 */
  			last = list_last_entry(&context->records_list,
  					       struct tls_record_info, list);
  
  			if (!between(seq, tls_record_start_seq(info),
  				     last->end_seq))
  				return NULL;
  		}
e8f697998   Ilya Lesokhin   net/tls: Add gene...
603
604
  		record_sn = context->unacked_record_sn;
  	}
d4774ac0d   Jakub Kicinski   net/tls: use RCU ...
605
606
607
  	/* We just need the _rcu for the READ_ONCE() */
  	rcu_read_lock();
  	list_for_each_entry_from_rcu(info, &context->records_list, list) {
e8f697998   Ilya Lesokhin   net/tls: Add gene...
608
609
610
611
612
613
614
615
  		if (before(seq, info->end_seq)) {
  			if (!context->retransmit_hint ||
  			    after(info->end_seq,
  				  context->retransmit_hint->end_seq)) {
  				context->hint_record_sn = record_sn;
  				context->retransmit_hint = info;
  			}
  			*p_record_sn = record_sn;
d4774ac0d   Jakub Kicinski   net/tls: use RCU ...
616
  			goto exit_rcu_unlock;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
617
618
619
  		}
  		record_sn++;
  	}
d4774ac0d   Jakub Kicinski   net/tls: use RCU ...
620
  	info = NULL;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
621

d4774ac0d   Jakub Kicinski   net/tls: use RCU ...
622
623
624
  exit_rcu_unlock:
  	rcu_read_unlock();
  	return info;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
625
626
627
628
629
630
  }
  EXPORT_SYMBOL(tls_get_record);
  
  static int tls_device_push_pending_record(struct sock *sk, int flags)
  {
  	struct iov_iter	msg_iter;
aa563d7bc   David Howells   iov_iter: Separat...
631
  	iov_iter_kvec(&msg_iter, WRITE, NULL, 0, 0);
e8f697998   Ilya Lesokhin   net/tls: Add gene...
632
633
  	return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA);
  }
7463d3a2d   Boris Pismenny   tls: Fix write sp...
634
635
  void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
  {
02b1fa07b   Jakub Kicinski   net/tls: don't pa...
636
  	if (tls_is_partially_sent_record(ctx)) {
7463d3a2d   Boris Pismenny   tls: Fix write sp...
637
  		gfp_t sk_allocation = sk->sk_allocation;
02b1fa07b   Jakub Kicinski   net/tls: don't pa...
638
  		WARN_ON_ONCE(sk->sk_write_pending);
7463d3a2d   Boris Pismenny   tls: Fix write sp...
639
  		sk->sk_allocation = GFP_ATOMIC;
414776621   Jakub Kicinski   net/tls: prevent ...
640
641
642
  		tls_push_partial_record(sk, ctx,
  					MSG_DONTWAIT | MSG_NOSIGNAL |
  					MSG_SENDPAGE_DECRYPTED);
7463d3a2d   Boris Pismenny   tls: Fix write sp...
643
644
  		sk->sk_allocation = sk_allocation;
  	}
7463d3a2d   Boris Pismenny   tls: Fix write sp...
645
  }
e52972c11   Jakub Kicinski   net/tls: replace ...
646
  static void tls_device_resync_rx(struct tls_context *tls_ctx,
89fec474f   Jakub Kicinski   net/tls: pass rec...
647
  				 struct sock *sk, u32 seq, u8 *rcd_sn)
e52972c11   Jakub Kicinski   net/tls: replace ...
648
  {
8538d29ce   Jakub Kicinski   net/tls: add trac...
649
  	struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
e52972c11   Jakub Kicinski   net/tls: replace ...
650
651
652
653
  	struct net_device *netdev;
  
  	if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags)))
  		return;
8538d29ce   Jakub Kicinski   net/tls: add trac...
654
655
  
  	trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type);
e52972c11   Jakub Kicinski   net/tls: replace ...
656
657
  	netdev = READ_ONCE(tls_ctx->netdev);
  	if (netdev)
eeb2efaf3   Jakub Kicinski   net/tls: generali...
658
659
  		netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
  						   TLS_OFFLOAD_CTX_DIR_RX);
e52972c11   Jakub Kicinski   net/tls: replace ...
660
  	clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
a4d26fdbc   Jakub Kicinski   net/tls: add TlsD...
661
  	TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC);
e52972c11   Jakub Kicinski   net/tls: replace ...
662
  }
ed9b7646b   Boris Pismenny   net/tls: Add asyn...
663
664
  static bool
  tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async,
138559b9f   Tariq Toukan   net/tls: Fix wron...
665
  			   s64 resync_req, u32 *seq, u16 *rcd_delta)
ed9b7646b   Boris Pismenny   net/tls: Add asyn...
666
667
668
669
  {
  	u32 is_async = resync_req & RESYNC_REQ_ASYNC;
  	u32 req_seq = resync_req >> 32;
  	u32 req_end = req_seq + ((resync_req >> 16) & 0xffff);
138559b9f   Tariq Toukan   net/tls: Fix wron...
670
671
672
  	u16 i;
  
  	*rcd_delta = 0;
ed9b7646b   Boris Pismenny   net/tls: Add asyn...
673
674
  
  	if (is_async) {
138559b9f   Tariq Toukan   net/tls: Fix wron...
675
676
677
678
679
  		/* shouldn't get to wraparound:
  		 * too long in async stage, something bad happened
  		 */
  		if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX))
  			return false;
ed9b7646b   Boris Pismenny   net/tls: Add asyn...
680
681
682
  		/* asynchronous stage: log all headers seq such that
  		 * req_seq <= seq <= end_seq, and wait for real resync request
  		 */
138559b9f   Tariq Toukan   net/tls: Fix wron...
683
684
685
  		if (before(*seq, req_seq))
  			return false;
  		if (!after(*seq, req_end) &&
ed9b7646b   Boris Pismenny   net/tls: Add asyn...
686
687
  		    resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX)
  			resync_async->log[resync_async->loglen++] = *seq;
138559b9f   Tariq Toukan   net/tls: Fix wron...
688
  		resync_async->rcd_delta++;
ed9b7646b   Boris Pismenny   net/tls: Add asyn...
689
690
691
692
693
694
  		return false;
  	}
  
  	/* synchronous stage: check against the logged entries and
  	 * proceed to check the next entries if no match was found
  	 */
138559b9f   Tariq Toukan   net/tls: Fix wron...
695
696
697
698
  	for (i = 0; i < resync_async->loglen; i++)
  		if (req_seq == resync_async->log[i] &&
  		    atomic64_try_cmpxchg(&resync_async->req, &resync_req, 0)) {
  			*rcd_delta = resync_async->rcd_delta - i;
ed9b7646b   Boris Pismenny   net/tls: Add asyn...
699
  			*seq = req_seq;
138559b9f   Tariq Toukan   net/tls: Fix wron...
700
701
  			resync_async->loglen = 0;
  			resync_async->rcd_delta = 0;
ed9b7646b   Boris Pismenny   net/tls: Add asyn...
702
703
  			return true;
  		}
138559b9f   Tariq Toukan   net/tls: Fix wron...
704
705
706
  
  	resync_async->loglen = 0;
  	resync_async->rcd_delta = 0;
ed9b7646b   Boris Pismenny   net/tls: Add asyn...
707
708
709
710
711
712
713
714
  
  	if (req_seq == *seq &&
  	    atomic64_try_cmpxchg(&resync_async->req,
  				 &resync_req, 0))
  		return true;
  
  	return false;
  }
f953d33ba   Jakub Kicinski   net/tls: add kern...
715
  void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
4799ac81e   Boris Pismenny   tls: Add rx inlin...
716
717
  {
  	struct tls_context *tls_ctx = tls_get_ctx(sk);
4799ac81e   Boris Pismenny   tls: Add rx inlin...
718
  	struct tls_offload_context_rx *rx_ctx;
f953d33ba   Jakub Kicinski   net/tls: add kern...
719
  	u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
acb5a07aa   Boris Pismenny   Revert "net/tls: ...
720
  	u32 sock_data, is_req_pending;
f953d33ba   Jakub Kicinski   net/tls: add kern...
721
  	struct tls_prot_info *prot;
4799ac81e   Boris Pismenny   tls: Add rx inlin...
722
  	s64 resync_req;
138559b9f   Tariq Toukan   net/tls: Fix wron...
723
  	u16 rcd_delta;
4799ac81e   Boris Pismenny   tls: Add rx inlin...
724
725
726
727
  	u32 req_seq;
  
  	if (tls_ctx->rx_conf != TLS_HW)
  		return;
f953d33ba   Jakub Kicinski   net/tls: add kern...
728
  	prot = &tls_ctx->prot_info;
4799ac81e   Boris Pismenny   tls: Add rx inlin...
729
  	rx_ctx = tls_offload_ctx_rx(tls_ctx);
f953d33ba   Jakub Kicinski   net/tls: add kern...
730
731
732
733
734
735
736
  	memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
  
  	switch (rx_ctx->resync_type) {
  	case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ:
  		resync_req = atomic64_read(&rx_ctx->resync_req);
  		req_seq = resync_req >> 32;
  		seq += TLS_HEADER_SIZE - 1;
acb5a07aa   Boris Pismenny   Revert "net/tls: ...
737
  		is_req_pending = resync_req;
f953d33ba   Jakub Kicinski   net/tls: add kern...
738

acb5a07aa   Boris Pismenny   Revert "net/tls: ...
739
  		if (likely(!is_req_pending) || req_seq != seq ||
f953d33ba   Jakub Kicinski   net/tls: add kern...
740
741
742
743
744
745
746
747
748
749
  		    !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
  			return;
  		break;
  	case TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT:
  		if (likely(!rx_ctx->resync_nh_do_now))
  			return;
  
  		/* head of next rec is already in, note that the sock_inq will
  		 * include the currently parsed message when called from parser
  		 */
8538d29ce   Jakub Kicinski   net/tls: add trac...
750
751
752
753
  		sock_data = tcp_inq(sk);
  		if (sock_data > rcd_len) {
  			trace_tls_device_rx_resync_nh_delay(sk, sock_data,
  							    rcd_len);
f953d33ba   Jakub Kicinski   net/tls: add kern...
754
  			return;
8538d29ce   Jakub Kicinski   net/tls: add trac...
755
  		}
f953d33ba   Jakub Kicinski   net/tls: add kern...
756
757
758
759
760
  
  		rx_ctx->resync_nh_do_now = 0;
  		seq += rcd_len;
  		tls_bigint_increment(rcd_sn, prot->rec_seq_size);
  		break;
ed9b7646b   Boris Pismenny   net/tls: Add asyn...
761
762
763
764
765
766
767
  	case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC:
  		resync_req = atomic64_read(&rx_ctx->resync_async->req);
  		is_req_pending = resync_req;
  		if (likely(!is_req_pending))
  			return;
  
  		if (!tls_device_rx_resync_async(rx_ctx->resync_async,
138559b9f   Tariq Toukan   net/tls: Fix wron...
768
  						resync_req, &seq, &rcd_delta))
ed9b7646b   Boris Pismenny   net/tls: Add asyn...
769
  			return;
138559b9f   Tariq Toukan   net/tls: Fix wron...
770
  		tls_bigint_subtract(rcd_sn, rcd_delta);
ed9b7646b   Boris Pismenny   net/tls: Add asyn...
771
  		break;
f953d33ba   Jakub Kicinski   net/tls: add kern...
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
  	}
  
  	tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
  }
  
  static void tls_device_core_ctrl_rx_resync(struct tls_context *tls_ctx,
  					   struct tls_offload_context_rx *ctx,
  					   struct sock *sk, struct sk_buff *skb)
  {
  	struct strp_msg *rxm;
  
  	/* device will request resyncs by itself based on stream scan */
  	if (ctx->resync_type != TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT)
  		return;
  	/* already scheduled */
  	if (ctx->resync_nh_do_now)
  		return;
  	/* seen decrypted fragments since last fully-failed record */
  	if (ctx->resync_nh_reset) {
  		ctx->resync_nh_reset = 0;
  		ctx->resync_nh.decrypted_failed = 1;
  		ctx->resync_nh.decrypted_tgt = TLS_DEVICE_RESYNC_NH_START_IVAL;
  		return;
  	}
  
  	if (++ctx->resync_nh.decrypted_failed <= ctx->resync_nh.decrypted_tgt)
  		return;
  
  	/* doing resync, bump the next target in case it fails */
  	if (ctx->resync_nh.decrypted_tgt < TLS_DEVICE_RESYNC_NH_MAX_IVAL)
  		ctx->resync_nh.decrypted_tgt *= 2;
  	else
  		ctx->resync_nh.decrypted_tgt += TLS_DEVICE_RESYNC_NH_MAX_IVAL;
  
  	rxm = strp_msg(skb);
  
  	/* head of next rec is already in, parser will sync for us */
  	if (tcp_inq(sk) > rxm->full_len) {
8538d29ce   Jakub Kicinski   net/tls: add trac...
810
  		trace_tls_device_rx_resync_nh_schedule(sk);
f953d33ba   Jakub Kicinski   net/tls: add kern...
811
812
813
814
815
816
817
818
819
820
821
  		ctx->resync_nh_do_now = 1;
  	} else {
  		struct tls_prot_info *prot = &tls_ctx->prot_info;
  		u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
  
  		memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
  		tls_bigint_increment(rcd_sn, prot->rec_seq_size);
  
  		tls_device_resync_rx(tls_ctx, sk, tcp_sk(sk)->copied_seq,
  				     rcd_sn);
  	}
4799ac81e   Boris Pismenny   tls: Add rx inlin...
822
823
824
825
826
  }
  
  static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
  {
  	struct strp_msg *rxm = strp_msg(skb);
eb3d38d5a   Jakub Kicinski   net/tls: fix copy...
827
  	int err = 0, offset = rxm->offset, copy, nsg, data_len, pos;
4799ac81e   Boris Pismenny   tls: Add rx inlin...
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
  	struct sk_buff *skb_iter, *unused;
  	struct scatterlist sg[1];
  	char *orig_buf, *buf;
  
  	orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE +
  			   TLS_CIPHER_AES_GCM_128_IV_SIZE, sk->sk_allocation);
  	if (!orig_buf)
  		return -ENOMEM;
  	buf = orig_buf;
  
  	nsg = skb_cow_data(skb, 0, &unused);
  	if (unlikely(nsg < 0)) {
  		err = nsg;
  		goto free_buf;
  	}
  
  	sg_init_table(sg, 1);
  	sg_set_buf(&sg[0], buf,
  		   rxm->full_len + TLS_HEADER_SIZE +
  		   TLS_CIPHER_AES_GCM_128_IV_SIZE);
aeb11ff0d   Jakub Kicinski   net/tls: check re...
848
849
850
851
  	err = skb_copy_bits(skb, offset, buf,
  			    TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE);
  	if (err)
  		goto free_buf;
4799ac81e   Boris Pismenny   tls: Add rx inlin...
852
853
854
855
856
857
858
  
  	/* We are interested only in the decrypted data not the auth */
  	err = decrypt_skb(sk, skb, sg);
  	if (err != -EBADMSG)
  		goto free_buf;
  	else
  		err = 0;
eb3d38d5a   Jakub Kicinski   net/tls: fix copy...
859
  	data_len = rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE;
4799ac81e   Boris Pismenny   tls: Add rx inlin...
860

97e1caa51   Jakub Kicinski   net/tls: don't co...
861
  	if (skb_pagelen(skb) > offset) {
eb3d38d5a   Jakub Kicinski   net/tls: fix copy...
862
  		copy = min_t(int, skb_pagelen(skb) - offset, data_len);
4799ac81e   Boris Pismenny   tls: Add rx inlin...
863

aeb11ff0d   Jakub Kicinski   net/tls: check re...
864
865
866
867
868
  		if (skb->decrypted) {
  			err = skb_store_bits(skb, offset, buf, copy);
  			if (err)
  				goto free_buf;
  		}
4799ac81e   Boris Pismenny   tls: Add rx inlin...
869

97e1caa51   Jakub Kicinski   net/tls: don't co...
870
871
872
  		offset += copy;
  		buf += copy;
  	}
4799ac81e   Boris Pismenny   tls: Add rx inlin...
873

eb3d38d5a   Jakub Kicinski   net/tls: fix copy...
874
  	pos = skb_pagelen(skb);
4799ac81e   Boris Pismenny   tls: Add rx inlin...
875
  	skb_walk_frags(skb, skb_iter) {
eb3d38d5a   Jakub Kicinski   net/tls: fix copy...
876
877
878
879
880
881
882
883
884
885
886
887
888
889
  		int frag_pos;
  
  		/* Practically all frags must belong to msg if reencrypt
  		 * is needed with current strparser and coalescing logic,
  		 * but strparser may "get optimized", so let's be safe.
  		 */
  		if (pos + skb_iter->len <= offset)
  			goto done_with_frag;
  		if (pos >= data_len + rxm->offset)
  			break;
  
  		frag_pos = offset - pos;
  		copy = min_t(int, skb_iter->len - frag_pos,
  			     data_len + rxm->offset - offset);
4799ac81e   Boris Pismenny   tls: Add rx inlin...
890

aeb11ff0d   Jakub Kicinski   net/tls: check re...
891
892
893
894
895
  		if (skb_iter->decrypted) {
  			err = skb_store_bits(skb_iter, frag_pos, buf, copy);
  			if (err)
  				goto free_buf;
  		}
4799ac81e   Boris Pismenny   tls: Add rx inlin...
896
897
898
  
  		offset += copy;
  		buf += copy;
eb3d38d5a   Jakub Kicinski   net/tls: fix copy...
899
900
  done_with_frag:
  		pos += skb_iter->len;
4799ac81e   Boris Pismenny   tls: Add rx inlin...
901
902
903
904
905
906
  	}
  
  free_buf:
  	kfree(orig_buf);
  	return err;
  }
4de30a8d5   Jakub Kicinski   net/tls: pass con...
907
908
  int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
  			 struct sk_buff *skb, struct strp_msg *rxm)
4799ac81e   Boris Pismenny   tls: Add rx inlin...
909
  {
4799ac81e   Boris Pismenny   tls: Add rx inlin...
910
911
912
913
  	struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx);
  	int is_decrypted = skb->decrypted;
  	int is_encrypted = !is_decrypted;
  	struct sk_buff *skb_iter;
4799ac81e   Boris Pismenny   tls: Add rx inlin...
914
915
916
917
918
  	/* Check if all the data is decrypted already */
  	skb_walk_frags(skb, skb_iter) {
  		is_decrypted &= skb_iter->decrypted;
  		is_encrypted &= !skb_iter->decrypted;
  	}
9ec1c6ac2   Jakub Kicinski   net/tls: add devi...
919
920
921
  	trace_tls_device_decrypted(sk, tcp_sk(sk)->copied_seq - rxm->full_len,
  				   tls_ctx->rx.rec_seq, rxm->full_len,
  				   is_encrypted, is_decrypted);
4799ac81e   Boris Pismenny   tls: Add rx inlin...
922
  	ctx->sw.decrypted |= is_decrypted;
f953d33ba   Jakub Kicinski   net/tls: add kern...
923
  	/* Return immediately if the record is either entirely plaintext or
4799ac81e   Boris Pismenny   tls: Add rx inlin...
924
925
926
  	 * entirely ciphertext. Otherwise handle reencrypt partially decrypted
  	 * record.
  	 */
f953d33ba   Jakub Kicinski   net/tls: add kern...
927
928
929
930
931
932
933
934
935
936
937
  	if (is_decrypted) {
  		ctx->resync_nh_reset = 1;
  		return 0;
  	}
  	if (is_encrypted) {
  		tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb);
  		return 0;
  	}
  
  	ctx->resync_nh_reset = 1;
  	return tls_device_reencrypt(sk, skb);
4799ac81e   Boris Pismenny   tls: Add rx inlin...
938
  }
9e9957973   Jakub Kicinski   net/tls: remove o...
939
940
941
942
943
944
945
946
947
948
949
950
  static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
  			      struct net_device *netdev)
  {
  	if (sk->sk_destruct != tls_device_sk_destruct) {
  		refcount_set(&ctx->refcount, 1);
  		dev_hold(netdev);
  		ctx->netdev = netdev;
  		spin_lock_irq(&tls_device_lock);
  		list_add_tail(&ctx->list, &tls_device_list);
  		spin_unlock_irq(&tls_device_lock);
  
  		ctx->sk_destruct = sk->sk_destruct;
8d5a49e9e   Jakub Kicinski   net/tls: add help...
951
  		smp_store_release(&sk->sk_destruct, tls_device_sk_destruct);
9e9957973   Jakub Kicinski   net/tls: remove o...
952
953
  	}
  }
e8f697998   Ilya Lesokhin   net/tls: Add gene...
954
955
956
  int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
  {
  	u16 nonce_size, tag_size, iv_size, rec_seq_size;
4509de146   Vakul Garg   net/tls: Move pro...
957
958
  	struct tls_context *tls_ctx = tls_get_ctx(sk);
  	struct tls_prot_info *prot = &tls_ctx->prot_info;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
959
  	struct tls_record_info *start_marker_record;
d80a1b9d1   Boris Pismenny   tls: Refactor tls...
960
  	struct tls_offload_context_tx *offload_ctx;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
961
962
963
964
  	struct tls_crypto_info *crypto_info;
  	struct net_device *netdev;
  	char *iv, *rec_seq;
  	struct sk_buff *skb;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
965
  	__be64 rcd_sn;
90962b489   Jakub Kicinski   net/tls: don't ju...
966
  	int rc;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
967
968
  
  	if (!ctx)
90962b489   Jakub Kicinski   net/tls: don't ju...
969
  		return -EINVAL;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
970

90962b489   Jakub Kicinski   net/tls: don't ju...
971
972
  	if (ctx->priv_ctx_tx)
  		return -EEXIST;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
973
974
  
  	start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL);
90962b489   Jakub Kicinski   net/tls: don't ju...
975
976
  	if (!start_marker_record)
  		return -ENOMEM;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
977

d80a1b9d1   Boris Pismenny   tls: Refactor tls...
978
  	offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX, GFP_KERNEL);
e8f697998   Ilya Lesokhin   net/tls: Add gene...
979
980
981
982
  	if (!offload_ctx) {
  		rc = -ENOMEM;
  		goto free_marker_record;
  	}
86029d10a   Sabrina Dubroca   tls: zero the cry...
983
  	crypto_info = &ctx->crypto_send.info;
618bac459   Jakub Kicinski   net/tls: reject o...
984
985
986
987
  	if (crypto_info->version != TLS_1_2_VERSION) {
  		rc = -EOPNOTSUPP;
  		goto free_offload_ctx;
  	}
e8f697998   Ilya Lesokhin   net/tls: Add gene...
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
  	switch (crypto_info->cipher_type) {
  	case TLS_CIPHER_AES_GCM_128:
  		nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
  		tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
  		iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
  		iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
  		rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
  		rec_seq =
  		 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
  		break;
  	default:
  		rc = -EINVAL;
  		goto free_offload_ctx;
  	}
89fec474f   Jakub Kicinski   net/tls: pass rec...
1002
1003
1004
1005
1006
  	/* Sanity-check the rec_seq_size for stack allocations */
  	if (rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
  		rc = -EINVAL;
  		goto free_offload_ctx;
  	}
ab232e61e   Jakub Kicinski   net/tls: add miss...
1007
1008
  	prot->version = crypto_info->version;
  	prot->cipher_type = crypto_info->cipher_type;
4509de146   Vakul Garg   net/tls: Move pro...
1009
1010
1011
1012
  	prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
  	prot->tag_size = tag_size;
  	prot->overhead_size = prot->prepend_size + prot->tag_size;
  	prot->iv_size = iv_size;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
1013
1014
1015
1016
1017
1018
1019
1020
  	ctx->tx.iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
  			     GFP_KERNEL);
  	if (!ctx->tx.iv) {
  		rc = -ENOMEM;
  		goto free_offload_ctx;
  	}
  
  	memcpy(ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
4509de146   Vakul Garg   net/tls: Move pro...
1021
  	prot->rec_seq_size = rec_seq_size;
969d50900   zhong jiang   net/tls: Use kmem...
1022
  	ctx->tx.rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
e8f697998   Ilya Lesokhin   net/tls: Add gene...
1023
1024
1025
1026
  	if (!ctx->tx.rec_seq) {
  		rc = -ENOMEM;
  		goto free_iv;
  	}
e8f697998   Ilya Lesokhin   net/tls: Add gene...
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
  
  	rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info);
  	if (rc)
  		goto free_rec_seq;
  
  	/* start at rec_seq - 1 to account for the start marker record */
  	memcpy(&rcd_sn, ctx->tx.rec_seq, sizeof(rcd_sn));
  	offload_ctx->unacked_record_sn = be64_to_cpu(rcd_sn) - 1;
  
  	start_marker_record->end_seq = tcp_sk(sk)->write_seq;
  	start_marker_record->len = 0;
  	start_marker_record->num_frags = 0;
  
  	INIT_LIST_HEAD(&offload_ctx->records_list);
  	list_add_tail(&start_marker_record->list, &offload_ctx->records_list);
  	spin_lock_init(&offload_ctx->lock);
895262d85   Boris Pismenny   tls: Fix tls_devi...
1043
1044
  	sg_init_table(offload_ctx->sg_tx_data,
  		      ARRAY_SIZE(offload_ctx->sg_tx_data));
e8f697998   Ilya Lesokhin   net/tls: Add gene...
1045
1046
1047
  
  	clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked);
  	ctx->push_pending_record = tls_device_push_pending_record;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
1048
1049
1050
1051
1052
1053
1054
1055
  
  	/* TLS offload is greatly simplified if we don't send
  	 * SKBs where only part of the payload needs to be encrypted.
  	 * So mark the last skb in the write queue as end of record.
  	 */
  	skb = tcp_write_queue_tail(sk);
  	if (skb)
  		TCP_SKB_CB(skb)->eor = 1;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
1056
1057
1058
1059
1060
  	netdev = get_netdev_for_sock(sk);
  	if (!netdev) {
  		pr_err_ratelimited("%s: netdev not found
  ", __func__);
  		rc = -EINVAL;
3544c98ac   Jakub Kicinski   net/tls: narrow d...
1061
  		goto disable_cad;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
1062
1063
1064
  	}
  
  	if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
4a5cdc604   Valentin Vidic   net/tls: Fix retu...
1065
  		rc = -EOPNOTSUPP;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
1066
1067
1068
1069
1070
1071
  		goto release_netdev;
  	}
  
  	/* Avoid offloading if the device is down
  	 * We don't want to offload new flows after
  	 * the NETDEV_DOWN event
3544c98ac   Jakub Kicinski   net/tls: narrow d...
1072
1073
1074
1075
  	 *
  	 * device_offload_lock is taken in tls_devices's NETDEV_DOWN
  	 * handler thus protecting from the device going down before
  	 * ctx was added to tls_device_list.
e8f697998   Ilya Lesokhin   net/tls: Add gene...
1076
  	 */
3544c98ac   Jakub Kicinski   net/tls: narrow d...
1077
  	down_read(&device_offload_lock);
e8f697998   Ilya Lesokhin   net/tls: Add gene...
1078
1079
  	if (!(netdev->flags & IFF_UP)) {
  		rc = -EINVAL;
3544c98ac   Jakub Kicinski   net/tls: narrow d...
1080
  		goto release_lock;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
1081
1082
1083
1084
  	}
  
  	ctx->priv_ctx_tx = offload_ctx;
  	rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
86029d10a   Sabrina Dubroca   tls: zero the cry...
1085
  					     &ctx->crypto_send.info,
e8f697998   Ilya Lesokhin   net/tls: Add gene...
1086
  					     tcp_sk(sk)->write_seq);
8538d29ce   Jakub Kicinski   net/tls: add trac...
1087
1088
  	trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_TX,
  				     tcp_sk(sk)->write_seq, rec_seq, rc);
e8f697998   Ilya Lesokhin   net/tls: Add gene...
1089
  	if (rc)
3544c98ac   Jakub Kicinski   net/tls: narrow d...
1090
  		goto release_lock;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
1091

4799ac81e   Boris Pismenny   tls: Add rx inlin...
1092
  	tls_device_attach(ctx, sk, netdev);
3544c98ac   Jakub Kicinski   net/tls: narrow d...
1093
  	up_read(&device_offload_lock);
e8f697998   Ilya Lesokhin   net/tls: Add gene...
1094

e8f697998   Ilya Lesokhin   net/tls: Add gene...
1095
1096
1097
1098
  	/* following this assignment tls_is_sk_tx_device_offloaded
  	 * will return true and the context might be accessed
  	 * by the netdev's xmit function.
  	 */
4799ac81e   Boris Pismenny   tls: Add rx inlin...
1099
1100
  	smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb);
  	dev_put(netdev);
90962b489   Jakub Kicinski   net/tls: don't ju...
1101
1102
  
  	return 0;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
1103

e8f697998   Ilya Lesokhin   net/tls: Add gene...
1104
1105
  release_lock:
  	up_read(&device_offload_lock);
3544c98ac   Jakub Kicinski   net/tls: narrow d...
1106
1107
1108
  release_netdev:
  	dev_put(netdev);
  disable_cad:
e8f697998   Ilya Lesokhin   net/tls: Add gene...
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
  	clean_acked_data_disable(inet_csk(sk));
  	crypto_free_aead(offload_ctx->aead_send);
  free_rec_seq:
  	kfree(ctx->tx.rec_seq);
  free_iv:
  	kfree(ctx->tx.iv);
  free_offload_ctx:
  	kfree(offload_ctx);
  	ctx->priv_ctx_tx = NULL;
  free_marker_record:
  	kfree(start_marker_record);
e8f697998   Ilya Lesokhin   net/tls: Add gene...
1120
1121
  	return rc;
  }
4799ac81e   Boris Pismenny   tls: Add rx inlin...
1122
1123
  int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
  {
8538d29ce   Jakub Kicinski   net/tls: add trac...
1124
  	struct tls12_crypto_info_aes_gcm_128 *info;
4799ac81e   Boris Pismenny   tls: Add rx inlin...
1125
1126
1127
  	struct tls_offload_context_rx *context;
  	struct net_device *netdev;
  	int rc = 0;
618bac459   Jakub Kicinski   net/tls: reject o...
1128
1129
  	if (ctx->crypto_recv.info.version != TLS_1_2_VERSION)
  		return -EOPNOTSUPP;
4799ac81e   Boris Pismenny   tls: Add rx inlin...
1130
1131
1132
1133
  	netdev = get_netdev_for_sock(sk);
  	if (!netdev) {
  		pr_err_ratelimited("%s: netdev not found
  ", __func__);
3544c98ac   Jakub Kicinski   net/tls: narrow d...
1134
  		return -EINVAL;
4799ac81e   Boris Pismenny   tls: Add rx inlin...
1135
1136
1137
  	}
  
  	if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
4a5cdc604   Valentin Vidic   net/tls: Fix retu...
1138
  		rc = -EOPNOTSUPP;
4799ac81e   Boris Pismenny   tls: Add rx inlin...
1139
1140
1141
1142
1143
1144
  		goto release_netdev;
  	}
  
  	/* Avoid offloading if the device is down
  	 * We don't want to offload new flows after
  	 * the NETDEV_DOWN event
3544c98ac   Jakub Kicinski   net/tls: narrow d...
1145
1146
1147
1148
  	 *
  	 * device_offload_lock is taken in tls_devices's NETDEV_DOWN
  	 * handler thus protecting from the device going down before
  	 * ctx was added to tls_device_list.
4799ac81e   Boris Pismenny   tls: Add rx inlin...
1149
  	 */
3544c98ac   Jakub Kicinski   net/tls: narrow d...
1150
  	down_read(&device_offload_lock);
4799ac81e   Boris Pismenny   tls: Add rx inlin...
1151
1152
  	if (!(netdev->flags & IFF_UP)) {
  		rc = -EINVAL;
3544c98ac   Jakub Kicinski   net/tls: narrow d...
1153
  		goto release_lock;
4799ac81e   Boris Pismenny   tls: Add rx inlin...
1154
1155
1156
1157
1158
  	}
  
  	context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL);
  	if (!context) {
  		rc = -ENOMEM;
3544c98ac   Jakub Kicinski   net/tls: narrow d...
1159
  		goto release_lock;
4799ac81e   Boris Pismenny   tls: Add rx inlin...
1160
  	}
f953d33ba   Jakub Kicinski   net/tls: add kern...
1161
  	context->resync_nh_reset = 1;
4799ac81e   Boris Pismenny   tls: Add rx inlin...
1162
1163
1164
1165
1166
1167
1168
  
  	ctx->priv_ctx_rx = context;
  	rc = tls_set_sw_offload(sk, ctx, 0);
  	if (rc)
  		goto release_ctx;
  
  	rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
86029d10a   Sabrina Dubroca   tls: zero the cry...
1169
  					     &ctx->crypto_recv.info,
4799ac81e   Boris Pismenny   tls: Add rx inlin...
1170
  					     tcp_sk(sk)->copied_seq);
8538d29ce   Jakub Kicinski   net/tls: add trac...
1171
1172
1173
  	info = (void *)&ctx->crypto_recv.info;
  	trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_RX,
  				     tcp_sk(sk)->copied_seq, info->rec_seq, rc);
e49d268db   Jakub Kicinski   net/tls: don't lo...
1174
  	if (rc)
4799ac81e   Boris Pismenny   tls: Add rx inlin...
1175
  		goto free_sw_resources;
4799ac81e   Boris Pismenny   tls: Add rx inlin...
1176
1177
  
  	tls_device_attach(ctx, sk, netdev);
90962b489   Jakub Kicinski   net/tls: don't ju...
1178
1179
1180
1181
1182
  	up_read(&device_offload_lock);
  
  	dev_put(netdev);
  
  	return 0;
4799ac81e   Boris Pismenny   tls: Add rx inlin...
1183
1184
  
  free_sw_resources:
62ef81d56   Jakub Kicinski   net/tls: avoid po...
1185
  	up_read(&device_offload_lock);
4799ac81e   Boris Pismenny   tls: Add rx inlin...
1186
  	tls_sw_free_resources_rx(sk);
62ef81d56   Jakub Kicinski   net/tls: avoid po...
1187
  	down_read(&device_offload_lock);
4799ac81e   Boris Pismenny   tls: Add rx inlin...
1188
1189
  release_ctx:
  	ctx->priv_ctx_rx = NULL;
4799ac81e   Boris Pismenny   tls: Add rx inlin...
1190
1191
  release_lock:
  	up_read(&device_offload_lock);
3544c98ac   Jakub Kicinski   net/tls: narrow d...
1192
1193
  release_netdev:
  	dev_put(netdev);
4799ac81e   Boris Pismenny   tls: Add rx inlin...
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
  	return rc;
  }
  
  void tls_device_offload_cleanup_rx(struct sock *sk)
  {
  	struct tls_context *tls_ctx = tls_get_ctx(sk);
  	struct net_device *netdev;
  
  	down_read(&device_offload_lock);
  	netdev = tls_ctx->netdev;
  	if (!netdev)
  		goto out;
4799ac81e   Boris Pismenny   tls: Add rx inlin...
1206
1207
1208
1209
1210
1211
  	netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx,
  					TLS_OFFLOAD_CTX_DIR_RX);
  
  	if (tls_ctx->tx_conf != TLS_HW) {
  		dev_put(netdev);
  		tls_ctx->netdev = NULL;
025cc2fb6   Maxim Mikityanskiy   net/tls: Protect ...
1212
1213
  	} else {
  		set_bit(TLS_RX_DEV_CLOSED, &tls_ctx->flags);
4799ac81e   Boris Pismenny   tls: Add rx inlin...
1214
1215
1216
  	}
  out:
  	up_read(&device_offload_lock);
4799ac81e   Boris Pismenny   tls: Add rx inlin...
1217
1218
  	tls_sw_release_resources_rx(sk);
  }
e8f697998   Ilya Lesokhin   net/tls: Add gene...
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
  static int tls_device_down(struct net_device *netdev)
  {
  	struct tls_context *ctx, *tmp;
  	unsigned long flags;
  	LIST_HEAD(list);
  
  	/* Request a write lock to block new offload attempts */
  	down_write(&device_offload_lock);
  
  	spin_lock_irqsave(&tls_device_lock, flags);
  	list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) {
  		if (ctx->netdev != netdev ||
  		    !refcount_inc_not_zero(&ctx->refcount))
  			continue;
  
  		list_move(&ctx->list, &list);
  	}
  	spin_unlock_irqrestore(&tls_device_lock, flags);
  
  	list_for_each_entry_safe(ctx, tmp, &list, list)	{
4799ac81e   Boris Pismenny   tls: Add rx inlin...
1239
1240
1241
  		if (ctx->tx_conf == TLS_HW)
  			netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
  							TLS_OFFLOAD_CTX_DIR_TX);
025cc2fb6   Maxim Mikityanskiy   net/tls: Protect ...
1242
1243
  		if (ctx->rx_conf == TLS_HW &&
  		    !test_bit(TLS_RX_DEV_CLOSED, &ctx->flags))
4799ac81e   Boris Pismenny   tls: Add rx inlin...
1244
1245
  			netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
  							TLS_OFFLOAD_CTX_DIR_RX);
e52972c11   Jakub Kicinski   net/tls: replace ...
1246
1247
1248
1249
  		WRITE_ONCE(ctx->netdev, NULL);
  		smp_mb__before_atomic(); /* pairs with test_and_set_bit() */
  		while (test_bit(TLS_RX_SYNC_RUNNING, &ctx->flags))
  			usleep_range(10, 200);
e8f697998   Ilya Lesokhin   net/tls: Add gene...
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
  		dev_put(netdev);
  		list_del_init(&ctx->list);
  
  		if (refcount_dec_and_test(&ctx->refcount))
  			tls_device_free_ctx(ctx);
  	}
  
  	up_write(&device_offload_lock);
  
  	flush_work(&tls_device_gc_work);
  
  	return NOTIFY_DONE;
  }
  
  static int tls_dev_event(struct notifier_block *this, unsigned long event,
  			 void *ptr)
  {
  	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
c3f4a6c39   Jakub Kicinski   net/tls: don't ig...
1268
1269
  	if (!dev->tlsdev_ops &&
  	    !(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX)))
e8f697998   Ilya Lesokhin   net/tls: Add gene...
1270
1271
1272
1273
1274
  		return NOTIFY_DONE;
  
  	switch (event) {
  	case NETDEV_REGISTER:
  	case NETDEV_FEAT_CHANGE:
4799ac81e   Boris Pismenny   tls: Add rx inlin...
1275
  		if ((dev->features & NETIF_F_HW_TLS_RX) &&
eeb2efaf3   Jakub Kicinski   net/tls: generali...
1276
  		    !dev->tlsdev_ops->tls_dev_resync)
4799ac81e   Boris Pismenny   tls: Add rx inlin...
1277
  			return NOTIFY_BAD;
e8f697998   Ilya Lesokhin   net/tls: Add gene...
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
  		if  (dev->tlsdev_ops &&
  		     dev->tlsdev_ops->tls_dev_add &&
  		     dev->tlsdev_ops->tls_dev_del)
  			return NOTIFY_DONE;
  		else
  			return NOTIFY_BAD;
  	case NETDEV_DOWN:
  		return tls_device_down(dev);
  	}
  	return NOTIFY_DONE;
  }
  
  static struct notifier_block tls_dev_notifier = {
  	.notifier_call	= tls_dev_event,
  };
  
  void __init tls_device_init(void)
  {
  	register_netdevice_notifier(&tls_dev_notifier);
  }
  
  void __exit tls_device_cleanup(void)
  {
  	unregister_netdevice_notifier(&tls_dev_notifier);
  	flush_work(&tls_device_gc_work);
494bc1d28   Jakub Kicinski   net/tcp: use defe...
1303
  	clean_acked_data_flush();
e8f697998   Ilya Lesokhin   net/tls: Add gene...
1304
  }