Blame view

crypto/ablkcipher.c 12.9 KB
b5b7f0886   Herbert Xu   [CRYPTO] api: Add...
1
2
  /*
   * Asynchronous block chaining cipher operations.
c4ede64a6   Richard Hartmann   crypto: ablkciphe...
3
   *
b5b7f0886   Herbert Xu   [CRYPTO] api: Add...
4
5
6
7
8
9
10
   * This is the asynchronous version of blkcipher.c indicating completion
   * via a callback.
   *
   * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
   *
   * This program is free software; you can redistribute it and/or modify it
   * under the terms of the GNU General Public License as published by the Free
c4ede64a6   Richard Hartmann   crypto: ablkciphe...
11
   * Software Foundation; either version 2 of the License, or (at your option)
b5b7f0886   Herbert Xu   [CRYPTO] api: Add...
12
13
14
   * any later version.
   *
   */
378f4f51f   Herbert Xu   [CRYPTO] skcipher...
15
16
  #include <crypto/internal/skcipher.h>
  #include <linux/err.h>
791b4d5f7   Herbert Xu   [CRYPTO] api: Add...
17
  #include <linux/kernel.h>
791b4d5f7   Herbert Xu   [CRYPTO] api: Add...
18
  #include <linux/slab.h>
b5b7f0886   Herbert Xu   [CRYPTO] api: Add...
19
  #include <linux/seq_file.h>
29ffc8764   Steffen Klassert   crypto: Add users...
20
21
  #include <linux/cryptouser.h>
  #include <net/netlink.h>
b5b7f0886   Herbert Xu   [CRYPTO] api: Add...
22

bf06099db   David S. Miller   crypto: skcipher ...
23
  #include <crypto/scatterwalk.h>
378f4f51f   Herbert Xu   [CRYPTO] skcipher...
24
  #include "internal.h"
bf06099db   David S. Miller   crypto: skcipher ...
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
  struct ablkcipher_buffer {
  	struct list_head	entry;
  	struct scatter_walk	dst;
  	unsigned int		len;
  	void			*data;
  };
  
  enum {
  	ABLKCIPHER_WALK_SLOW = 1 << 0,
  };
  
  static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
  {
  	scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
  }
  
  void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
  {
  	struct ablkcipher_buffer *p, *tmp;
  
  	list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
  		ablkcipher_buffer_write(p);
  		list_del(&p->entry);
  		kfree(p);
  	}
  }
  EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
  
  static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
  					  struct ablkcipher_buffer *p)
  {
  	p->dst = walk->out;
  	list_add_tail(&p->entry, &walk->buffers);
  }
  
  /* Get a spot of the specified length that does not straddle a page.
   * The caller needs to ensure that there is enough space for this operation.
   */
  static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
  {
  	u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
a861afbc9   Joshua I. James   crypto: ablkciphe...
66

bf06099db   David S. Miller   crypto: skcipher ...
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
  	return max(start, end_page);
  }
  
  static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
  						unsigned int bsize)
  {
  	unsigned int n = bsize;
  
  	for (;;) {
  		unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
  
  		if (len_this_page > n)
  			len_this_page = n;
  		scatterwalk_advance(&walk->out, n);
  		if (n == len_this_page)
  			break;
  		n -= len_this_page;
5be4d4c94   Cristian Stoica   crypto: replace s...
84
  		scatterwalk_start(&walk->out, sg_next(walk->out.sg));
bf06099db   David S. Miller   crypto: skcipher ...
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
  	}
  
  	return bsize;
  }
  
  static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
  						unsigned int n)
  {
  	scatterwalk_advance(&walk->in, n);
  	scatterwalk_advance(&walk->out, n);
  
  	return n;
  }
  
  static int ablkcipher_walk_next(struct ablkcipher_request *req,
  				struct ablkcipher_walk *walk);
  
  int ablkcipher_walk_done(struct ablkcipher_request *req,
  			 struct ablkcipher_walk *walk, int err)
  {
  	struct crypto_tfm *tfm = req->base.tfm;
  	unsigned int nbytes = 0;
  
  	if (likely(err >= 0)) {
  		unsigned int n = walk->nbytes - err;
  
  		if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
  			n = ablkcipher_done_fast(walk, n);
  		else if (WARN_ON(err)) {
  			err = -EINVAL;
  			goto err;
  		} else
  			n = ablkcipher_done_slow(walk, n);
  
  		nbytes = walk->total - n;
  		err = 0;
  	}
  
  	scatterwalk_done(&walk->in, 0, nbytes);
  	scatterwalk_done(&walk->out, 1, nbytes);
  
  err:
  	walk->total = nbytes;
  	walk->nbytes = nbytes;
  
  	if (nbytes) {
  		crypto_yield(req->base.flags);
  		return ablkcipher_walk_next(req, walk);
  	}
  
  	if (walk->iv != req->info)
  		memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
33c7c0fb2   Davidlohr Bueso   crypto: skcipher ...
137
  	kfree(walk->iv_buffer);
bf06099db   David S. Miller   crypto: skcipher ...
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
  
  	return err;
  }
  EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
  
  static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
  				       struct ablkcipher_walk *walk,
  				       unsigned int bsize,
  				       unsigned int alignmask,
  				       void **src_p, void **dst_p)
  {
  	unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
  	struct ablkcipher_buffer *p;
  	void *src, *dst, *base;
  	unsigned int n;
  
  	n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
  	n += (aligned_bsize * 3 - (alignmask + 1) +
  	      (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
  
  	p = kmalloc(n, GFP_ATOMIC);
  	if (!p)
2716fbf63   Jiri Slaby   crypto: skcipher ...
160
  		return ablkcipher_walk_done(req, walk, -ENOMEM);
bf06099db   David S. Miller   crypto: skcipher ...
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
  
  	base = p + 1;
  
  	dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
  	src = dst = ablkcipher_get_spot(dst, bsize);
  
  	p->len = bsize;
  	p->data = dst;
  
  	scatterwalk_copychunks(src, &walk->in, bsize, 0);
  
  	ablkcipher_queue_write(walk, p);
  
  	walk->nbytes = bsize;
  	walk->flags |= ABLKCIPHER_WALK_SLOW;
  
  	*src_p = src;
  	*dst_p = dst;
  
  	return 0;
  }
  
  static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
  				     struct crypto_tfm *tfm,
  				     unsigned int alignmask)
  {
  	unsigned bs = walk->blocksize;
  	unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
  	unsigned aligned_bs = ALIGN(bs, alignmask + 1);
  	unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
  			    (alignmask + 1);
  	u8 *iv;
  
  	size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
  	walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
  	if (!walk->iv_buffer)
  		return -ENOMEM;
  
  	iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
  	iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
  	iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
  	iv = ablkcipher_get_spot(iv, ivsize);
  
  	walk->iv = memcpy(iv, walk->iv, ivsize);
  	return 0;
  }
  
  static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
  				       struct ablkcipher_walk *walk)
  {
  	walk->src.page = scatterwalk_page(&walk->in);
  	walk->src.offset = offset_in_page(walk->in.offset);
  	walk->dst.page = scatterwalk_page(&walk->out);
  	walk->dst.offset = offset_in_page(walk->out.offset);
  
  	return 0;
  }
  
  static int ablkcipher_walk_next(struct ablkcipher_request *req,
  				struct ablkcipher_walk *walk)
  {
  	struct crypto_tfm *tfm = req->base.tfm;
  	unsigned int alignmask, bsize, n;
  	void *src, *dst;
  	int err;
  
  	alignmask = crypto_tfm_alg_alignmask(tfm);
  	n = walk->total;
  	if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
  		req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
  		return ablkcipher_walk_done(req, walk, -EINVAL);
  	}
  
  	walk->flags &= ~ABLKCIPHER_WALK_SLOW;
  	src = dst = NULL;
  
  	bsize = min(walk->blocksize, n);
  	n = scatterwalk_clamp(&walk->in, n);
  	n = scatterwalk_clamp(&walk->out, n);
  
  	if (n < bsize ||
  	    !scatterwalk_aligned(&walk->in, alignmask) ||
  	    !scatterwalk_aligned(&walk->out, alignmask)) {
  		err = ablkcipher_next_slow(req, walk, bsize, alignmask,
  					   &src, &dst);
  		goto set_phys_lowmem;
  	}
  
  	walk->nbytes = n;
  
  	return ablkcipher_next_fast(req, walk);
  
  set_phys_lowmem:
  	if (err >= 0) {
  		walk->src.page = virt_to_page(src);
  		walk->dst.page = virt_to_page(dst);
  		walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
  		walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
  	}
  
  	return err;
  }
  
  static int ablkcipher_walk_first(struct ablkcipher_request *req,
  				 struct ablkcipher_walk *walk)
  {
  	struct crypto_tfm *tfm = req->base.tfm;
  	unsigned int alignmask;
  
  	alignmask = crypto_tfm_alg_alignmask(tfm);
  	if (WARN_ON_ONCE(in_irq()))
  		return -EDEADLK;
70d906bc1   Jason A. Donenfeld   crypto: skcipher ...
273
  	walk->iv = req->info;
bf06099db   David S. Miller   crypto: skcipher ...
274
275
276
277
278
  	walk->nbytes = walk->total;
  	if (unlikely(!walk->total))
  		return 0;
  
  	walk->iv_buffer = NULL;
bf06099db   David S. Miller   crypto: skcipher ...
279
280
  	if (unlikely(((unsigned long)walk->iv & alignmask))) {
  		int err = ablkcipher_copy_iv(walk, tfm, alignmask);
a861afbc9   Joshua I. James   crypto: ablkciphe...
281

bf06099db   David S. Miller   crypto: skcipher ...
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
  		if (err)
  			return err;
  	}
  
  	scatterwalk_start(&walk->in, walk->in.sg);
  	scatterwalk_start(&walk->out, walk->out.sg);
  
  	return ablkcipher_walk_next(req, walk);
  }
  
  int ablkcipher_walk_phys(struct ablkcipher_request *req,
  			 struct ablkcipher_walk *walk)
  {
  	walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
  	return ablkcipher_walk_first(req, walk);
  }
  EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
791b4d5f7   Herbert Xu   [CRYPTO] api: Add...
299
300
  static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
  			    unsigned int keylen)
ca7c39385   Sebastian Siewior   [CRYPTO] api: Han...
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
  {
  	struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
  	unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
  	int ret;
  	u8 *buffer, *alignbuffer;
  	unsigned long absize;
  
  	absize = keylen + alignmask;
  	buffer = kmalloc(absize, GFP_ATOMIC);
  	if (!buffer)
  		return -ENOMEM;
  
  	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
  	memcpy(alignbuffer, key, keylen);
  	ret = cipher->setkey(tfm, alignbuffer, keylen);
068171767   Sebastian Siewior   [CRYPTO] api: fix...
316
  	memset(alignbuffer, 0, keylen);
ca7c39385   Sebastian Siewior   [CRYPTO] api: Han...
317
318
319
  	kfree(buffer);
  	return ret;
  }
b5b7f0886   Herbert Xu   [CRYPTO] api: Add...
320
321
322
323
  static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  		  unsigned int keylen)
  {
  	struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
ca7c39385   Sebastian Siewior   [CRYPTO] api: Han...
324
  	unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
b5b7f0886   Herbert Xu   [CRYPTO] api: Add...
325
326
327
328
329
  
  	if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
  		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  		return -EINVAL;
  	}
ca7c39385   Sebastian Siewior   [CRYPTO] api: Han...
330
331
  	if ((unsigned long)key & alignmask)
  		return setkey_unaligned(tfm, key, keylen);
b5b7f0886   Herbert Xu   [CRYPTO] api: Add...
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
  	return cipher->setkey(tfm, key, keylen);
  }
  
  static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
  					      u32 mask)
  {
  	return alg->cra_ctxsize;
  }
  
  static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
  				      u32 mask)
  {
  	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
  	struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
  
  	if (alg->ivsize > PAGE_SIZE / 8)
  		return -EINVAL;
  
  	crt->setkey = setkey;
  	crt->encrypt = alg->encrypt;
  	crt->decrypt = alg->decrypt;
ecfc43292   Herbert Xu   [CRYPTO] skcipher...
353
  	crt->base = __crypto_ablkcipher_cast(tfm);
b5b7f0886   Herbert Xu   [CRYPTO] api: Add...
354
355
356
357
  	crt->ivsize = alg->ivsize;
  
  	return 0;
  }
3acc84739   Herbert Xu   crypto: algapi - ...
358
  #ifdef CONFIG_NET
29ffc8764   Steffen Klassert   crypto: Add users...
359
360
361
  static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
  {
  	struct crypto_report_blkcipher rblkcipher;
9a5467bf7   Mathias Krause   crypto: user - fi...
362
363
364
  	strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
  	strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
  		sizeof(rblkcipher.geniv));
29ffc8764   Steffen Klassert   crypto: Add users...
365
366
367
368
369
  
  	rblkcipher.blocksize = alg->cra_blocksize;
  	rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
  	rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
  	rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
6662df33f   David S. Miller   crypto: Stop usin...
370
371
372
  	if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
  		    sizeof(struct crypto_report_blkcipher), &rblkcipher))
  		goto nla_put_failure;
29ffc8764   Steffen Klassert   crypto: Add users...
373
374
375
376
377
  	return 0;
  
  nla_put_failure:
  	return -EMSGSIZE;
  }
3acc84739   Herbert Xu   crypto: algapi - ...
378
379
380
381
382
383
  #else
  static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
  {
  	return -ENOSYS;
  }
  #endif
29ffc8764   Steffen Klassert   crypto: Add users...
384

b5b7f0886   Herbert Xu   [CRYPTO] api: Add...
385
386
387
388
389
390
391
392
  static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
  	__attribute__ ((unused));
  static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
  {
  	struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
  
  	seq_printf(m, "type         : ablkcipher
  ");
189ed66e9   Herbert Xu   [CRYPTO] api: Sho...
393
394
395
  	seq_printf(m, "async        : %s
  ", alg->cra_flags & CRYPTO_ALG_ASYNC ?
  					     "yes" : "no");
b5b7f0886   Herbert Xu   [CRYPTO] api: Add...
396
397
398
399
400
401
402
403
  	seq_printf(m, "blocksize    : %u
  ", alg->cra_blocksize);
  	seq_printf(m, "min keysize  : %u
  ", ablkcipher->min_keysize);
  	seq_printf(m, "max keysize  : %u
  ", ablkcipher->max_keysize);
  	seq_printf(m, "ivsize       : %u
  ", ablkcipher->ivsize);
23508e11a   Herbert Xu   [CRYPTO] skcipher...
404
405
  	seq_printf(m, "geniv        : %s
  ", ablkcipher->geniv ?: "<default>");
b5b7f0886   Herbert Xu   [CRYPTO] api: Add...
406
407
408
409
410
411
412
413
  }
  
  const struct crypto_type crypto_ablkcipher_type = {
  	.ctxsize = crypto_ablkcipher_ctxsize,
  	.init = crypto_init_ablkcipher_ops,
  #ifdef CONFIG_PROC_FS
  	.show = crypto_ablkcipher_show,
  #endif
29ffc8764   Steffen Klassert   crypto: Add users...
414
  	.report = crypto_ablkcipher_report,
b5b7f0886   Herbert Xu   [CRYPTO] api: Add...
415
416
  };
  EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
61da88e2b   Herbert Xu   [CRYPTO] skcipher...
417
418
419
420
421
422
423
424
  static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
  				      u32 mask)
  {
  	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
  	struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
  
  	if (alg->ivsize > PAGE_SIZE / 8)
  		return -EINVAL;
ecfc43292   Herbert Xu   [CRYPTO] skcipher...
425
426
  	crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ?
  		      alg->setkey : setkey;
61da88e2b   Herbert Xu   [CRYPTO] skcipher...
427
428
  	crt->encrypt = alg->encrypt;
  	crt->decrypt = alg->decrypt;
ecfc43292   Herbert Xu   [CRYPTO] skcipher...
429
  	crt->base = __crypto_ablkcipher_cast(tfm);
61da88e2b   Herbert Xu   [CRYPTO] skcipher...
430
431
432
433
  	crt->ivsize = alg->ivsize;
  
  	return 0;
  }
3acc84739   Herbert Xu   crypto: algapi - ...
434
  #ifdef CONFIG_NET
3e29c1095   Steffen Klassert   crypto: Add users...
435
436
437
  static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
  {
  	struct crypto_report_blkcipher rblkcipher;
9a5467bf7   Mathias Krause   crypto: user - fi...
438
439
440
  	strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
  	strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
  		sizeof(rblkcipher.geniv));
3e29c1095   Steffen Klassert   crypto: Add users...
441
442
443
444
445
  
  	rblkcipher.blocksize = alg->cra_blocksize;
  	rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
  	rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
  	rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
6662df33f   David S. Miller   crypto: Stop usin...
446
447
448
  	if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
  		    sizeof(struct crypto_report_blkcipher), &rblkcipher))
  		goto nla_put_failure;
3e29c1095   Steffen Klassert   crypto: Add users...
449
450
451
452
453
  	return 0;
  
  nla_put_failure:
  	return -EMSGSIZE;
  }
3acc84739   Herbert Xu   crypto: algapi - ...
454
455
456
457
458
459
  #else
  static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
  {
  	return -ENOSYS;
  }
  #endif
3e29c1095   Steffen Klassert   crypto: Add users...
460

61da88e2b   Herbert Xu   [CRYPTO] skcipher...
461
462
463
464
465
466
467
468
  static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
  	__attribute__ ((unused));
  static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
  {
  	struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
  
  	seq_printf(m, "type         : givcipher
  ");
189ed66e9   Herbert Xu   [CRYPTO] api: Sho...
469
470
471
  	seq_printf(m, "async        : %s
  ", alg->cra_flags & CRYPTO_ALG_ASYNC ?
  					     "yes" : "no");
61da88e2b   Herbert Xu   [CRYPTO] skcipher...
472
473
474
475
476
477
478
479
  	seq_printf(m, "blocksize    : %u
  ", alg->cra_blocksize);
  	seq_printf(m, "min keysize  : %u
  ", ablkcipher->min_keysize);
  	seq_printf(m, "max keysize  : %u
  ", ablkcipher->max_keysize);
  	seq_printf(m, "ivsize       : %u
  ", ablkcipher->ivsize);
23508e11a   Herbert Xu   [CRYPTO] skcipher...
480
481
  	seq_printf(m, "geniv        : %s
  ", ablkcipher->geniv ?: "<built-in>");
61da88e2b   Herbert Xu   [CRYPTO] skcipher...
482
483
484
485
486
487
488
489
  }
  
  const struct crypto_type crypto_givcipher_type = {
  	.ctxsize = crypto_ablkcipher_ctxsize,
  	.init = crypto_init_givcipher_ops,
  #ifdef CONFIG_PROC_FS
  	.show = crypto_givcipher_show,
  #endif
3e29c1095   Steffen Klassert   crypto: Add users...
490
  	.report = crypto_givcipher_report,
61da88e2b   Herbert Xu   [CRYPTO] skcipher...
491
492
  };
  EXPORT_SYMBOL_GPL(crypto_givcipher_type);