Blame view

crypto/ablkcipher.c 13 KB
b5b7f0886   Herbert Xu   [CRYPTO] api: Add...
1
2
  /*
   * Asynchronous block chaining cipher operations.
c4ede64a6   Richard Hartmann   crypto: ablkciphe...
3
   *
b5b7f0886   Herbert Xu   [CRYPTO] api: Add...
4
5
6
7
8
9
10
   * This is the asynchronous version of blkcipher.c indicating completion
   * via a callback.
   *
   * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
   *
   * This program is free software; you can redistribute it and/or modify it
   * under the terms of the GNU General Public License as published by the Free
c4ede64a6   Richard Hartmann   crypto: ablkciphe...
11
   * Software Foundation; either version 2 of the License, or (at your option)
b5b7f0886   Herbert Xu   [CRYPTO] api: Add...
12
13
14
   * any later version.
   *
   */
378f4f51f   Herbert Xu   [CRYPTO] skcipher...
15
16
  #include <crypto/internal/skcipher.h>
  #include <linux/err.h>
791b4d5f7   Herbert Xu   [CRYPTO] api: Add...
17
  #include <linux/kernel.h>
791b4d5f7   Herbert Xu   [CRYPTO] api: Add...
18
  #include <linux/slab.h>
b5b7f0886   Herbert Xu   [CRYPTO] api: Add...
19
  #include <linux/seq_file.h>
29ffc8764   Steffen Klassert   crypto: Add users...
20
  #include <linux/cryptouser.h>
d8c34b949   Gideon Israel Dsouza   crypto: Replaced ...
21
  #include <linux/compiler.h>
29ffc8764   Steffen Klassert   crypto: Add users...
22
  #include <net/netlink.h>
b5b7f0886   Herbert Xu   [CRYPTO] api: Add...
23

bf06099db   David S. Miller   crypto: skcipher ...
24
  #include <crypto/scatterwalk.h>
378f4f51f   Herbert Xu   [CRYPTO] skcipher...
25
  #include "internal.h"
bf06099db   David S. Miller   crypto: skcipher ...
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
  struct ablkcipher_buffer {
  	struct list_head	entry;
  	struct scatter_walk	dst;
  	unsigned int		len;
  	void			*data;
  };
  
  enum {
  	ABLKCIPHER_WALK_SLOW = 1 << 0,
  };
  
  static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
  {
  	scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
  }
  
  void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
  {
  	struct ablkcipher_buffer *p, *tmp;
  
  	list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
  		ablkcipher_buffer_write(p);
  		list_del(&p->entry);
  		kfree(p);
  	}
  }
  EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
  
  static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
  					  struct ablkcipher_buffer *p)
  {
  	p->dst = walk->out;
  	list_add_tail(&p->entry, &walk->buffers);
  }
  
  /* Get a spot of the specified length that does not straddle a page.
   * The caller needs to ensure that there is enough space for this operation.
   */
  static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
  {
  	u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
a861afbc9   Joshua I. James   crypto: ablkciphe...
67

bf06099db   David S. Miller   crypto: skcipher ...
68
69
  	return max(start, end_page);
  }
68432fd16   Eric Biggers   crypto: ablkciphe...
70
71
  static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk,
  					unsigned int n)
bf06099db   David S. Miller   crypto: skcipher ...
72
  {
bf06099db   David S. Miller   crypto: skcipher ...
73
74
75
76
77
78
79
80
81
  	for (;;) {
  		unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
  
  		if (len_this_page > n)
  			len_this_page = n;
  		scatterwalk_advance(&walk->out, n);
  		if (n == len_this_page)
  			break;
  		n -= len_this_page;
5be4d4c94   Cristian Stoica   crypto: replace s...
82
  		scatterwalk_start(&walk->out, sg_next(walk->out.sg));
bf06099db   David S. Miller   crypto: skcipher ...
83
  	}
bf06099db   David S. Miller   crypto: skcipher ...
84
  }
68432fd16   Eric Biggers   crypto: ablkciphe...
85
86
  static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk,
  					unsigned int n)
bf06099db   David S. Miller   crypto: skcipher ...
87
88
89
  {
  	scatterwalk_advance(&walk->in, n);
  	scatterwalk_advance(&walk->out, n);
bf06099db   David S. Miller   crypto: skcipher ...
90
91
92
93
94
95
96
97
98
  }
  
  static int ablkcipher_walk_next(struct ablkcipher_request *req,
  				struct ablkcipher_walk *walk);
  
  int ablkcipher_walk_done(struct ablkcipher_request *req,
  			 struct ablkcipher_walk *walk, int err)
  {
  	struct crypto_tfm *tfm = req->base.tfm;
68432fd16   Eric Biggers   crypto: ablkciphe...
99
100
  	unsigned int n; /* bytes processed */
  	bool more;
bf06099db   David S. Miller   crypto: skcipher ...
101

68432fd16   Eric Biggers   crypto: ablkciphe...
102
103
  	if (unlikely(err < 0))
  		goto finish;
bf06099db   David S. Miller   crypto: skcipher ...
104

68432fd16   Eric Biggers   crypto: ablkciphe...
105
106
107
  	n = walk->nbytes - err;
  	walk->total -= n;
  	more = (walk->total != 0);
bf06099db   David S. Miller   crypto: skcipher ...
108

68432fd16   Eric Biggers   crypto: ablkciphe...
109
110
111
112
113
114
115
116
117
  	if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) {
  		ablkcipher_done_fast(walk, n);
  	} else {
  		if (WARN_ON(err)) {
  			/* unexpected case; didn't process all bytes */
  			err = -EINVAL;
  			goto finish;
  		}
  		ablkcipher_done_slow(walk, n);
bf06099db   David S. Miller   crypto: skcipher ...
118
  	}
68432fd16   Eric Biggers   crypto: ablkciphe...
119
120
  	scatterwalk_done(&walk->in, 0, more);
  	scatterwalk_done(&walk->out, 1, more);
bf06099db   David S. Miller   crypto: skcipher ...
121

68432fd16   Eric Biggers   crypto: ablkciphe...
122
  	if (more) {
bf06099db   David S. Miller   crypto: skcipher ...
123
124
125
  		crypto_yield(req->base.flags);
  		return ablkcipher_walk_next(req, walk);
  	}
68432fd16   Eric Biggers   crypto: ablkciphe...
126
127
128
  	err = 0;
  finish:
  	walk->nbytes = 0;
bf06099db   David S. Miller   crypto: skcipher ...
129
130
  	if (walk->iv != req->info)
  		memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
33c7c0fb2   Davidlohr Bueso   crypto: skcipher ...
131
  	kfree(walk->iv_buffer);
bf06099db   David S. Miller   crypto: skcipher ...
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
  	return err;
  }
  EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
  
  static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
  				       struct ablkcipher_walk *walk,
  				       unsigned int bsize,
  				       unsigned int alignmask,
  				       void **src_p, void **dst_p)
  {
  	unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
  	struct ablkcipher_buffer *p;
  	void *src, *dst, *base;
  	unsigned int n;
  
  	n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
  	n += (aligned_bsize * 3 - (alignmask + 1) +
  	      (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
  
  	p = kmalloc(n, GFP_ATOMIC);
  	if (!p)
2716fbf63   Jiri Slaby   crypto: skcipher ...
153
  		return ablkcipher_walk_done(req, walk, -ENOMEM);
bf06099db   David S. Miller   crypto: skcipher ...
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
  
  	base = p + 1;
  
  	dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
  	src = dst = ablkcipher_get_spot(dst, bsize);
  
  	p->len = bsize;
  	p->data = dst;
  
  	scatterwalk_copychunks(src, &walk->in, bsize, 0);
  
  	ablkcipher_queue_write(walk, p);
  
  	walk->nbytes = bsize;
  	walk->flags |= ABLKCIPHER_WALK_SLOW;
  
  	*src_p = src;
  	*dst_p = dst;
  
  	return 0;
  }
  
  static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
  				     struct crypto_tfm *tfm,
  				     unsigned int alignmask)
  {
  	unsigned bs = walk->blocksize;
  	unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
  	unsigned aligned_bs = ALIGN(bs, alignmask + 1);
  	unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
  			    (alignmask + 1);
  	u8 *iv;
  
  	size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
  	walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
  	if (!walk->iv_buffer)
  		return -ENOMEM;
  
  	iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
  	iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
  	iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
  	iv = ablkcipher_get_spot(iv, ivsize);
  
  	walk->iv = memcpy(iv, walk->iv, ivsize);
  	return 0;
  }
  
  static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
  				       struct ablkcipher_walk *walk)
  {
  	walk->src.page = scatterwalk_page(&walk->in);
  	walk->src.offset = offset_in_page(walk->in.offset);
  	walk->dst.page = scatterwalk_page(&walk->out);
  	walk->dst.offset = offset_in_page(walk->out.offset);
  
  	return 0;
  }
  
  static int ablkcipher_walk_next(struct ablkcipher_request *req,
  				struct ablkcipher_walk *walk)
  {
  	struct crypto_tfm *tfm = req->base.tfm;
  	unsigned int alignmask, bsize, n;
  	void *src, *dst;
  	int err;
  
  	alignmask = crypto_tfm_alg_alignmask(tfm);
  	n = walk->total;
  	if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
  		req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
  		return ablkcipher_walk_done(req, walk, -EINVAL);
  	}
  
  	walk->flags &= ~ABLKCIPHER_WALK_SLOW;
  	src = dst = NULL;
  
  	bsize = min(walk->blocksize, n);
  	n = scatterwalk_clamp(&walk->in, n);
  	n = scatterwalk_clamp(&walk->out, n);
  
  	if (n < bsize ||
  	    !scatterwalk_aligned(&walk->in, alignmask) ||
  	    !scatterwalk_aligned(&walk->out, alignmask)) {
  		err = ablkcipher_next_slow(req, walk, bsize, alignmask,
  					   &src, &dst);
  		goto set_phys_lowmem;
  	}
  
  	walk->nbytes = n;
  
  	return ablkcipher_next_fast(req, walk);
  
  set_phys_lowmem:
  	if (err >= 0) {
  		walk->src.page = virt_to_page(src);
  		walk->dst.page = virt_to_page(dst);
  		walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
  		walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
  	}
  
  	return err;
  }
  
  static int ablkcipher_walk_first(struct ablkcipher_request *req,
  				 struct ablkcipher_walk *walk)
  {
  	struct crypto_tfm *tfm = req->base.tfm;
  	unsigned int alignmask;
  
  	alignmask = crypto_tfm_alg_alignmask(tfm);
  	if (WARN_ON_ONCE(in_irq()))
  		return -EDEADLK;
70d906bc1   Jason A. Donenfeld   crypto: skcipher ...
266
  	walk->iv = req->info;
bf06099db   David S. Miller   crypto: skcipher ...
267
268
269
270
271
  	walk->nbytes = walk->total;
  	if (unlikely(!walk->total))
  		return 0;
  
  	walk->iv_buffer = NULL;
bf06099db   David S. Miller   crypto: skcipher ...
272
273
  	if (unlikely(((unsigned long)walk->iv & alignmask))) {
  		int err = ablkcipher_copy_iv(walk, tfm, alignmask);
a861afbc9   Joshua I. James   crypto: ablkciphe...
274

bf06099db   David S. Miller   crypto: skcipher ...
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
  		if (err)
  			return err;
  	}
  
  	scatterwalk_start(&walk->in, walk->in.sg);
  	scatterwalk_start(&walk->out, walk->out.sg);
  
  	return ablkcipher_walk_next(req, walk);
  }
  
  int ablkcipher_walk_phys(struct ablkcipher_request *req,
  			 struct ablkcipher_walk *walk)
  {
  	walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
  	return ablkcipher_walk_first(req, walk);
  }
  EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
791b4d5f7   Herbert Xu   [CRYPTO] api: Add...
292
293
  static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
  			    unsigned int keylen)
ca7c39385   Sebastian Siewior   [CRYPTO] api: Han...
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
  {
  	struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
  	unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
  	int ret;
  	u8 *buffer, *alignbuffer;
  	unsigned long absize;
  
  	absize = keylen + alignmask;
  	buffer = kmalloc(absize, GFP_ATOMIC);
  	if (!buffer)
  		return -ENOMEM;
  
  	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
  	memcpy(alignbuffer, key, keylen);
  	ret = cipher->setkey(tfm, alignbuffer, keylen);
068171767   Sebastian Siewior   [CRYPTO] api: fix...
309
  	memset(alignbuffer, 0, keylen);
ca7c39385   Sebastian Siewior   [CRYPTO] api: Han...
310
311
312
  	kfree(buffer);
  	return ret;
  }
b5b7f0886   Herbert Xu   [CRYPTO] api: Add...
313
314
315
316
  static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  		  unsigned int keylen)
  {
  	struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
ca7c39385   Sebastian Siewior   [CRYPTO] api: Han...
317
  	unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
b5b7f0886   Herbert Xu   [CRYPTO] api: Add...
318
319
320
321
322
  
  	if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
  		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  		return -EINVAL;
  	}
ca7c39385   Sebastian Siewior   [CRYPTO] api: Han...
323
324
  	if ((unsigned long)key & alignmask)
  		return setkey_unaligned(tfm, key, keylen);
b5b7f0886   Herbert Xu   [CRYPTO] api: Add...
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
  	return cipher->setkey(tfm, key, keylen);
  }
  
  static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
  					      u32 mask)
  {
  	return alg->cra_ctxsize;
  }
  
  static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
  				      u32 mask)
  {
  	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
  	struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
  
  	if (alg->ivsize > PAGE_SIZE / 8)
  		return -EINVAL;
  
  	crt->setkey = setkey;
  	crt->encrypt = alg->encrypt;
  	crt->decrypt = alg->decrypt;
ecfc43292   Herbert Xu   [CRYPTO] skcipher...
346
  	crt->base = __crypto_ablkcipher_cast(tfm);
b5b7f0886   Herbert Xu   [CRYPTO] api: Add...
347
348
349
350
  	crt->ivsize = alg->ivsize;
  
  	return 0;
  }
3acc84739   Herbert Xu   crypto: algapi - ...
351
  #ifdef CONFIG_NET
29ffc8764   Steffen Klassert   crypto: Add users...
352
353
354
  static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
  {
  	struct crypto_report_blkcipher rblkcipher;
9a5467bf7   Mathias Krause   crypto: user - fi...
355
356
357
  	strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
  	strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
  		sizeof(rblkcipher.geniv));
29db27723   Stafford Horne   crypto: skcipher ...
358
  	rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
29ffc8764   Steffen Klassert   crypto: Add users...
359
360
361
362
363
  
  	rblkcipher.blocksize = alg->cra_blocksize;
  	rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
  	rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
  	rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
6662df33f   David S. Miller   crypto: Stop usin...
364
365
366
  	if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
  		    sizeof(struct crypto_report_blkcipher), &rblkcipher))
  		goto nla_put_failure;
29ffc8764   Steffen Klassert   crypto: Add users...
367
368
369
370
371
  	return 0;
  
  nla_put_failure:
  	return -EMSGSIZE;
  }
3acc84739   Herbert Xu   crypto: algapi - ...
372
373
374
375
376
377
  #else
  static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
  {
  	return -ENOSYS;
  }
  #endif
29ffc8764   Steffen Klassert   crypto: Add users...
378

b5b7f0886   Herbert Xu   [CRYPTO] api: Add...
379
  static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
d8c34b949   Gideon Israel Dsouza   crypto: Replaced ...
380
  	__maybe_unused;
b5b7f0886   Herbert Xu   [CRYPTO] api: Add...
381
382
383
384
385
386
  static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
  {
  	struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
  
  	seq_printf(m, "type         : ablkcipher
  ");
189ed66e9   Herbert Xu   [CRYPTO] api: Sho...
387
388
389
  	seq_printf(m, "async        : %s
  ", alg->cra_flags & CRYPTO_ALG_ASYNC ?
  					     "yes" : "no");
b5b7f0886   Herbert Xu   [CRYPTO] api: Add...
390
391
392
393
394
395
396
397
  	seq_printf(m, "blocksize    : %u
  ", alg->cra_blocksize);
  	seq_printf(m, "min keysize  : %u
  ", ablkcipher->min_keysize);
  	seq_printf(m, "max keysize  : %u
  ", ablkcipher->max_keysize);
  	seq_printf(m, "ivsize       : %u
  ", ablkcipher->ivsize);
23508e11a   Herbert Xu   [CRYPTO] skcipher...
398
399
  	seq_printf(m, "geniv        : %s
  ", ablkcipher->geniv ?: "<default>");
b5b7f0886   Herbert Xu   [CRYPTO] api: Add...
400
401
402
403
404
405
406
407
  }
  
  const struct crypto_type crypto_ablkcipher_type = {
  	.ctxsize = crypto_ablkcipher_ctxsize,
  	.init = crypto_init_ablkcipher_ops,
  #ifdef CONFIG_PROC_FS
  	.show = crypto_ablkcipher_show,
  #endif
29ffc8764   Steffen Klassert   crypto: Add users...
408
  	.report = crypto_ablkcipher_report,
b5b7f0886   Herbert Xu   [CRYPTO] api: Add...
409
410
  };
  EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
61da88e2b   Herbert Xu   [CRYPTO] skcipher...
411
412
413
414
415
416
417
418
  static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
  				      u32 mask)
  {
  	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
  	struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
  
  	if (alg->ivsize > PAGE_SIZE / 8)
  		return -EINVAL;
ecfc43292   Herbert Xu   [CRYPTO] skcipher...
419
420
  	crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ?
  		      alg->setkey : setkey;
61da88e2b   Herbert Xu   [CRYPTO] skcipher...
421
422
  	crt->encrypt = alg->encrypt;
  	crt->decrypt = alg->decrypt;
ecfc43292   Herbert Xu   [CRYPTO] skcipher...
423
  	crt->base = __crypto_ablkcipher_cast(tfm);
61da88e2b   Herbert Xu   [CRYPTO] skcipher...
424
425
426
427
  	crt->ivsize = alg->ivsize;
  
  	return 0;
  }
3acc84739   Herbert Xu   crypto: algapi - ...
428
  #ifdef CONFIG_NET
3e29c1095   Steffen Klassert   crypto: Add users...
429
430
431
  static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
  {
  	struct crypto_report_blkcipher rblkcipher;
9a5467bf7   Mathias Krause   crypto: user - fi...
432
433
434
  	strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
  	strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
  		sizeof(rblkcipher.geniv));
29db27723   Stafford Horne   crypto: skcipher ...
435
  	rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
3e29c1095   Steffen Klassert   crypto: Add users...
436
437
438
439
440
  
  	rblkcipher.blocksize = alg->cra_blocksize;
  	rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
  	rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
  	rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
6662df33f   David S. Miller   crypto: Stop usin...
441
442
443
  	if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
  		    sizeof(struct crypto_report_blkcipher), &rblkcipher))
  		goto nla_put_failure;
3e29c1095   Steffen Klassert   crypto: Add users...
444
445
446
447
448
  	return 0;
  
  nla_put_failure:
  	return -EMSGSIZE;
  }
3acc84739   Herbert Xu   crypto: algapi - ...
449
450
451
452
453
454
  #else
  static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
  {
  	return -ENOSYS;
  }
  #endif
3e29c1095   Steffen Klassert   crypto: Add users...
455

61da88e2b   Herbert Xu   [CRYPTO] skcipher...
456
  static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
d8c34b949   Gideon Israel Dsouza   crypto: Replaced ...
457
  	__maybe_unused;
61da88e2b   Herbert Xu   [CRYPTO] skcipher...
458
459
460
461
462
463
  static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
  {
  	struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
  
  	seq_printf(m, "type         : givcipher
  ");
189ed66e9   Herbert Xu   [CRYPTO] api: Sho...
464
465
466
  	seq_printf(m, "async        : %s
  ", alg->cra_flags & CRYPTO_ALG_ASYNC ?
  					     "yes" : "no");
61da88e2b   Herbert Xu   [CRYPTO] skcipher...
467
468
469
470
471
472
473
474
  	seq_printf(m, "blocksize    : %u
  ", alg->cra_blocksize);
  	seq_printf(m, "min keysize  : %u
  ", ablkcipher->min_keysize);
  	seq_printf(m, "max keysize  : %u
  ", ablkcipher->max_keysize);
  	seq_printf(m, "ivsize       : %u
  ", ablkcipher->ivsize);
23508e11a   Herbert Xu   [CRYPTO] skcipher...
475
476
  	seq_printf(m, "geniv        : %s
  ", ablkcipher->geniv ?: "<built-in>");
61da88e2b   Herbert Xu   [CRYPTO] skcipher...
477
478
479
480
481
482
483
484
  }
  
  const struct crypto_type crypto_givcipher_type = {
  	.ctxsize = crypto_ablkcipher_ctxsize,
  	.init = crypto_init_givcipher_ops,
  #ifdef CONFIG_PROC_FS
  	.show = crypto_givcipher_show,
  #endif
3e29c1095   Steffen Klassert   crypto: Add users...
485
  	.report = crypto_givcipher_report,
61da88e2b   Herbert Xu   [CRYPTO] skcipher...
486
487
  };
  EXPORT_SYMBOL_GPL(crypto_givcipher_type);