Blame view

crypto/blkcipher.c 19.4 KB
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
  /*
   * Block chaining cipher operations.
   * 
   * Generic encrypt/decrypt wrapper for ciphers, handles operations across
   * multiple page boundaries by using temporary blocks.  In user context,
   * the kernel is given a chance to schedule us once per page.
   *
   * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
   *
   * This program is free software; you can redistribute it and/or modify it
   * under the terms of the GNU General Public License as published by the Free
   * Software Foundation; either version 2 of the License, or (at your option) 
   * any later version.
   *
   */
ecfc43292   Herbert Xu   [CRYPTO] skcipher...
16
  #include <crypto/internal/skcipher.h>
42c271c6c   Herbert Xu   [CRYPTO] scatterw...
17
  #include <crypto/scatterwalk.h>
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
18
  #include <linux/errno.h>
fb469840b   Herbert Xu   [CRYPTO] all: Che...
19
  #include <linux/hardirq.h>
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
20
  #include <linux/kernel.h>
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
21
22
23
24
25
  #include <linux/module.h>
  #include <linux/scatterlist.h>
  #include <linux/seq_file.h>
  #include <linux/slab.h>
  #include <linux/string.h>
50496a1fa   Steffen Klassert   crypto: Add users...
26
27
  #include <linux/cryptouser.h>
  #include <net/netlink.h>
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
28
29
  
  #include "internal.h"
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
  
  enum {
  	BLKCIPHER_WALK_PHYS = 1 << 0,
  	BLKCIPHER_WALK_SLOW = 1 << 1,
  	BLKCIPHER_WALK_COPY = 1 << 2,
  	BLKCIPHER_WALK_DIFF = 1 << 3,
  };
  
  static int blkcipher_walk_next(struct blkcipher_desc *desc,
  			       struct blkcipher_walk *walk);
  static int blkcipher_walk_first(struct blkcipher_desc *desc,
  				struct blkcipher_walk *walk);
  
  static inline void blkcipher_map_src(struct blkcipher_walk *walk)
  {
  	walk->src.virt.addr = scatterwalk_map(&walk->in, 0);
  }
  
  static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
  {
  	walk->dst.virt.addr = scatterwalk_map(&walk->out, 1);
  }
  
  static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
  {
  	scatterwalk_unmap(walk->src.virt.addr, 0);
  }
  
  static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
  {
  	scatterwalk_unmap(walk->dst.virt.addr, 1);
  }
e4630f9fd   Herbert Xu   [CRYPTO] blkciphe...
62
63
64
  /* Get a spot of the specified length that does not straddle a page.
   * The caller needs to ensure that there is enough space for this operation.
   */
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
65
66
  static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
  {
e4630f9fd   Herbert Xu   [CRYPTO] blkciphe...
67
  	u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
5aaff0c8f   Ingo Oeser   [CRYPTO] blkciphe...
68
  	return max(start, end_page);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
  }
  
  static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm,
  					       struct blkcipher_walk *walk,
  					       unsigned int bsize)
  {
  	u8 *addr;
  	unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
  
  	addr = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
  	addr = blkcipher_get_spot(addr, bsize);
  	scatterwalk_copychunks(addr, &walk->out, bsize, 1);
  	return bsize;
  }
  
  static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
  					       unsigned int n)
  {
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
87
88
89
90
91
  	if (walk->flags & BLKCIPHER_WALK_COPY) {
  		blkcipher_map_dst(walk);
  		memcpy(walk->dst.virt.addr, walk->page, n);
  		blkcipher_unmap_dst(walk);
  	} else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
92
93
  		if (walk->flags & BLKCIPHER_WALK_DIFF)
  			blkcipher_unmap_dst(walk);
61ecdb801   Peter Zijlstra   mm: strictly nest...
94
  		blkcipher_unmap_src(walk);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
  	}
  
  	scatterwalk_advance(&walk->in, n);
  	scatterwalk_advance(&walk->out, n);
  
  	return n;
  }
  
  int blkcipher_walk_done(struct blkcipher_desc *desc,
  			struct blkcipher_walk *walk, int err)
  {
  	struct crypto_blkcipher *tfm = desc->tfm;
  	unsigned int nbytes = 0;
  
  	if (likely(err >= 0)) {
7607bd8ff   Herbert Xu   [CRYPTO] blkciphe...
110
  		unsigned int n = walk->nbytes - err;
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
111
112
  
  		if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
7607bd8ff   Herbert Xu   [CRYPTO] blkciphe...
113
114
115
116
117
118
  			n = blkcipher_done_fast(walk, n);
  		else if (WARN_ON(err)) {
  			err = -EINVAL;
  			goto err;
  		} else
  			n = blkcipher_done_slow(tfm, walk, n);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
119
120
121
122
123
124
125
  
  		nbytes = walk->total - n;
  		err = 0;
  	}
  
  	scatterwalk_done(&walk->in, 0, nbytes);
  	scatterwalk_done(&walk->out, 1, nbytes);
bac1b5c46   Herbert Xu   crypto: blkcipher...
126
  err:
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
  	walk->total = nbytes;
  	walk->nbytes = nbytes;
  
  	if (nbytes) {
  		crypto_yield(desc->flags);
  		return blkcipher_walk_next(desc, walk);
  	}
  
  	if (walk->iv != desc->info)
  		memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm));
  	if (walk->buffer != walk->page)
  		kfree(walk->buffer);
  	if (walk->page)
  		free_page((unsigned long)walk->page);
  
  	return err;
  }
  EXPORT_SYMBOL_GPL(blkcipher_walk_done);
  
  static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
  				      struct blkcipher_walk *walk,
  				      unsigned int bsize,
  				      unsigned int alignmask)
  {
  	unsigned int n;
70613783f   Herbert Xu   [CRYPTO] blkciphe...
152
  	unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
153
154
155
156
157
158
159
  
  	if (walk->buffer)
  		goto ok;
  
  	walk->buffer = walk->page;
  	if (walk->buffer)
  		goto ok;
2614de1b9   Herbert Xu   [CRYPTO] blkciphe...
160
  	n = aligned_bsize * 3 - (alignmask + 1) +
e4630f9fd   Herbert Xu   [CRYPTO] blkciphe...
161
  	    (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
162
163
164
165
166
167
168
169
  	walk->buffer = kmalloc(n, GFP_ATOMIC);
  	if (!walk->buffer)
  		return blkcipher_walk_done(desc, walk, -ENOMEM);
  
  ok:
  	walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
  					  alignmask + 1);
  	walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
70613783f   Herbert Xu   [CRYPTO] blkciphe...
170
171
  	walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
  						 aligned_bsize, bsize);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
  
  	scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
  
  	walk->nbytes = bsize;
  	walk->flags |= BLKCIPHER_WALK_SLOW;
  
  	return 0;
  }
  
  static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
  {
  	u8 *tmp = walk->page;
  
  	blkcipher_map_src(walk);
  	memcpy(tmp, walk->src.virt.addr, walk->nbytes);
  	blkcipher_unmap_src(walk);
  
  	walk->src.virt.addr = tmp;
  	walk->dst.virt.addr = tmp;
  
  	return 0;
  }
  
  static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
  				      struct blkcipher_walk *walk)
  {
  	unsigned long diff;
  
  	walk->src.phys.page = scatterwalk_page(&walk->in);
  	walk->src.phys.offset = offset_in_page(walk->in.offset);
  	walk->dst.phys.page = scatterwalk_page(&walk->out);
  	walk->dst.phys.offset = offset_in_page(walk->out.offset);
  
  	if (walk->flags & BLKCIPHER_WALK_PHYS)
  		return 0;
  
  	diff = walk->src.phys.offset - walk->dst.phys.offset;
  	diff |= walk->src.virt.page - walk->dst.virt.page;
  
  	blkcipher_map_src(walk);
  	walk->dst.virt.addr = walk->src.virt.addr;
  
  	if (diff) {
  		walk->flags |= BLKCIPHER_WALK_DIFF;
  		blkcipher_map_dst(walk);
  	}
  
  	return 0;
  }
  
  static int blkcipher_walk_next(struct blkcipher_desc *desc,
  			       struct blkcipher_walk *walk)
  {
  	struct crypto_blkcipher *tfm = desc->tfm;
  	unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
7607bd8ff   Herbert Xu   [CRYPTO] blkciphe...
227
  	unsigned int bsize;
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
228
229
230
231
  	unsigned int n;
  	int err;
  
  	n = walk->total;
7607bd8ff   Herbert Xu   [CRYPTO] blkciphe...
232
  	if (unlikely(n < crypto_blkcipher_blocksize(tfm))) {
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
  		desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
  		return blkcipher_walk_done(desc, walk, -EINVAL);
  	}
  
  	walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
  			 BLKCIPHER_WALK_DIFF);
  	if (!scatterwalk_aligned(&walk->in, alignmask) ||
  	    !scatterwalk_aligned(&walk->out, alignmask)) {
  		walk->flags |= BLKCIPHER_WALK_COPY;
  		if (!walk->page) {
  			walk->page = (void *)__get_free_page(GFP_ATOMIC);
  			if (!walk->page)
  				n = 0;
  		}
  	}
7607bd8ff   Herbert Xu   [CRYPTO] blkciphe...
248
  	bsize = min(walk->blocksize, n);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
  	n = scatterwalk_clamp(&walk->in, n);
  	n = scatterwalk_clamp(&walk->out, n);
  
  	if (unlikely(n < bsize)) {
  		err = blkcipher_next_slow(desc, walk, bsize, alignmask);
  		goto set_phys_lowmem;
  	}
  
  	walk->nbytes = n;
  	if (walk->flags & BLKCIPHER_WALK_COPY) {
  		err = blkcipher_next_copy(walk);
  		goto set_phys_lowmem;
  	}
  
  	return blkcipher_next_fast(desc, walk);
  
  set_phys_lowmem:
  	if (walk->flags & BLKCIPHER_WALK_PHYS) {
  		walk->src.phys.page = virt_to_page(walk->src.virt.addr);
  		walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
  		walk->src.phys.offset &= PAGE_SIZE - 1;
  		walk->dst.phys.offset &= PAGE_SIZE - 1;
  	}
  	return err;
  }
  
  static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
  				    struct crypto_blkcipher *tfm,
  				    unsigned int alignmask)
  {
7607bd8ff   Herbert Xu   [CRYPTO] blkciphe...
279
  	unsigned bs = walk->blocksize;
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
280
  	unsigned int ivsize = crypto_blkcipher_ivsize(tfm);
70613783f   Herbert Xu   [CRYPTO] blkciphe...
281
282
283
  	unsigned aligned_bs = ALIGN(bs, alignmask + 1);
  	unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
  			    (alignmask + 1);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
284
285
286
287
288
289
290
291
  	u8 *iv;
  
  	size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
  	walk->buffer = kmalloc(size, GFP_ATOMIC);
  	if (!walk->buffer)
  		return -ENOMEM;
  
  	iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
70613783f   Herbert Xu   [CRYPTO] blkciphe...
292
293
  	iv = blkcipher_get_spot(iv, bs) + aligned_bs;
  	iv = blkcipher_get_spot(iv, bs) + aligned_bs;
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
294
295
296
297
298
299
300
301
302
303
  	iv = blkcipher_get_spot(iv, ivsize);
  
  	walk->iv = memcpy(iv, walk->iv, ivsize);
  	return 0;
  }
  
  int blkcipher_walk_virt(struct blkcipher_desc *desc,
  			struct blkcipher_walk *walk)
  {
  	walk->flags &= ~BLKCIPHER_WALK_PHYS;
7607bd8ff   Herbert Xu   [CRYPTO] blkciphe...
304
  	walk->blocksize = crypto_blkcipher_blocksize(desc->tfm);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
305
306
307
308
309
310
311
312
  	return blkcipher_walk_first(desc, walk);
  }
  EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
  
  int blkcipher_walk_phys(struct blkcipher_desc *desc,
  			struct blkcipher_walk *walk)
  {
  	walk->flags |= BLKCIPHER_WALK_PHYS;
7607bd8ff   Herbert Xu   [CRYPTO] blkciphe...
313
  	walk->blocksize = crypto_blkcipher_blocksize(desc->tfm);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
314
315
316
317
318
319
320
321
322
  	return blkcipher_walk_first(desc, walk);
  }
  EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
  
  static int blkcipher_walk_first(struct blkcipher_desc *desc,
  				struct blkcipher_walk *walk)
  {
  	struct crypto_blkcipher *tfm = desc->tfm;
  	unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
fb469840b   Herbert Xu   [CRYPTO] all: Che...
323
324
  	if (WARN_ON_ONCE(in_irq()))
  		return -EDEADLK;
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
  	walk->nbytes = walk->total;
  	if (unlikely(!walk->total))
  		return 0;
  
  	walk->buffer = NULL;
  	walk->iv = desc->info;
  	if (unlikely(((unsigned long)walk->iv & alignmask))) {
  		int err = blkcipher_copy_iv(walk, tfm, alignmask);
  		if (err)
  			return err;
  	}
  
  	scatterwalk_start(&walk->in, walk->in.sg);
  	scatterwalk_start(&walk->out, walk->out.sg);
  	walk->page = NULL;
  
  	return blkcipher_walk_next(desc, walk);
  }
7607bd8ff   Herbert Xu   [CRYPTO] blkciphe...
343
344
345
346
347
348
349
350
351
  int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
  			      struct blkcipher_walk *walk,
  			      unsigned int blocksize)
  {
  	walk->flags &= ~BLKCIPHER_WALK_PHYS;
  	walk->blocksize = blocksize;
  	return blkcipher_walk_first(desc, walk);
  }
  EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
791b4d5f7   Herbert Xu   [CRYPTO] api: Add...
352
353
  static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
  			    unsigned int keylen)
ca7c39385   Sebastian Siewior   [CRYPTO] api: Han...
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
  {
  	struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
  	unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
  	int ret;
  	u8 *buffer, *alignbuffer;
  	unsigned long absize;
  
  	absize = keylen + alignmask;
  	buffer = kmalloc(absize, GFP_ATOMIC);
  	if (!buffer)
  		return -ENOMEM;
  
  	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
  	memcpy(alignbuffer, key, keylen);
  	ret = cipher->setkey(tfm, alignbuffer, keylen);
068171767   Sebastian Siewior   [CRYPTO] api: fix...
369
  	memset(alignbuffer, 0, keylen);
ca7c39385   Sebastian Siewior   [CRYPTO] api: Han...
370
371
372
  	kfree(buffer);
  	return ret;
  }
791b4d5f7   Herbert Xu   [CRYPTO] api: Add...
373
  static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
374
375
  {
  	struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
ca7c39385   Sebastian Siewior   [CRYPTO] api: Han...
376
  	unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
377
378
379
380
381
  
  	if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
  		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
  		return -EINVAL;
  	}
ca7c39385   Sebastian Siewior   [CRYPTO] api: Han...
382
383
  	if ((unsigned long)key & alignmask)
  		return setkey_unaligned(tfm, key, keylen);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
384
385
  	return cipher->setkey(tfm, key, keylen);
  }
32e3983fe   Herbert Xu   [CRYPTO] api: Add...
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
  static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  			unsigned int keylen)
  {
  	return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
  }
  
  static int async_encrypt(struct ablkcipher_request *req)
  {
  	struct crypto_tfm *tfm = req->base.tfm;
  	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  	struct blkcipher_desc desc = {
  		.tfm = __crypto_blkcipher_cast(tfm),
  		.info = req->info,
  		.flags = req->base.flags,
  	};
  
  
  	return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
  }
  
  static int async_decrypt(struct ablkcipher_request *req)
  {
  	struct crypto_tfm *tfm = req->base.tfm;
  	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  	struct blkcipher_desc desc = {
  		.tfm = __crypto_blkcipher_cast(tfm),
  		.info = req->info,
  		.flags = req->base.flags,
  	};
  
  	return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
  }
27d2a3300   Herbert Xu   [CRYPTO] api: All...
418
419
  static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
  					     u32 mask)
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
420
421
422
  {
  	struct blkcipher_alg *cipher = &alg->cra_blkcipher;
  	unsigned int len = alg->cra_ctxsize;
332f8840f   Herbert Xu   [CRYPTO] ablkciph...
423
424
  	if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
  	    cipher->ivsize) {
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
425
426
427
428
429
430
  		len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
  		len += cipher->ivsize;
  	}
  
  	return len;
  }
32e3983fe   Herbert Xu   [CRYPTO] api: Add...
431
432
433
434
435
436
437
438
  static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
  {
  	struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
  	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  
  	crt->setkey = async_setkey;
  	crt->encrypt = async_encrypt;
  	crt->decrypt = async_decrypt;
b9c55aa47   Herbert Xu   [CRYPTO] skcipher...
439
440
441
442
  	if (!alg->ivsize) {
  		crt->givencrypt = skcipher_null_givencrypt;
  		crt->givdecrypt = skcipher_null_givdecrypt;
  	}
ecfc43292   Herbert Xu   [CRYPTO] skcipher...
443
  	crt->base = __crypto_ablkcipher_cast(tfm);
32e3983fe   Herbert Xu   [CRYPTO] api: Add...
444
445
446
447
448
449
  	crt->ivsize = alg->ivsize;
  
  	return 0;
  }
  
  static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
450
451
452
453
454
  {
  	struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
  	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  	unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
  	unsigned long addr;
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
455
456
457
458
459
460
461
462
463
464
465
  	crt->setkey = setkey;
  	crt->encrypt = alg->encrypt;
  	crt->decrypt = alg->decrypt;
  
  	addr = (unsigned long)crypto_tfm_ctx(tfm);
  	addr = ALIGN(addr, align);
  	addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
  	crt->iv = (void *)addr;
  
  	return 0;
  }
32e3983fe   Herbert Xu   [CRYPTO] api: Add...
466
467
468
469
470
471
  static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
  {
  	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  
  	if (alg->ivsize > PAGE_SIZE / 8)
  		return -EINVAL;
332f8840f   Herbert Xu   [CRYPTO] ablkciph...
472
  	if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
32e3983fe   Herbert Xu   [CRYPTO] api: Add...
473
474
475
476
  		return crypto_init_blkcipher_ops_sync(tfm);
  	else
  		return crypto_init_blkcipher_ops_async(tfm);
  }
3acc84739   Herbert Xu   crypto: algapi - ...
477
  #ifdef CONFIG_NET
50496a1fa   Steffen Klassert   crypto: Add users...
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
  static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
  {
  	struct crypto_report_blkcipher rblkcipher;
  
  	snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "blkcipher");
  	snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s",
  		 alg->cra_blkcipher.geniv ?: "<default>");
  
  	rblkcipher.blocksize = alg->cra_blocksize;
  	rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
  	rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
  	rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
  
  	NLA_PUT(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
  		sizeof(struct crypto_report_blkcipher), &rblkcipher);
  
  	return 0;
  
  nla_put_failure:
  	return -EMSGSIZE;
  }
3acc84739   Herbert Xu   crypto: algapi - ...
499
500
501
502
503
504
  #else
  static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
  {
  	return -ENOSYS;
  }
  #endif
50496a1fa   Steffen Klassert   crypto: Add users...
505

5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
506
  static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
03f5d8ced   Herbert Xu   [CRYPTO] api: Pro...
507
  	__attribute__ ((unused));
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
508
509
510
511
512
513
514
515
516
517
518
519
  static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
  {
  	seq_printf(m, "type         : blkcipher
  ");
  	seq_printf(m, "blocksize    : %u
  ", alg->cra_blocksize);
  	seq_printf(m, "min keysize  : %u
  ", alg->cra_blkcipher.min_keysize);
  	seq_printf(m, "max keysize  : %u
  ", alg->cra_blkcipher.max_keysize);
  	seq_printf(m, "ivsize       : %u
  ", alg->cra_blkcipher.ivsize);
23508e11a   Herbert Xu   [CRYPTO] skcipher...
520
521
522
  	seq_printf(m, "geniv        : %s
  ", alg->cra_blkcipher.geniv ?:
  					     "<default>");
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
523
524
525
526
527
528
529
530
  }
  
  const struct crypto_type crypto_blkcipher_type = {
  	.ctxsize = crypto_blkcipher_ctxsize,
  	.init = crypto_init_blkcipher_ops,
  #ifdef CONFIG_PROC_FS
  	.show = crypto_blkcipher_show,
  #endif
50496a1fa   Steffen Klassert   crypto: Add users...
531
  	.report = crypto_blkcipher_report,
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
532
533
  };
  EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
ecfc43292   Herbert Xu   [CRYPTO] skcipher...
534
535
536
537
538
539
540
  static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn,
  				const char *name, u32 type, u32 mask)
  {
  	struct crypto_alg *alg;
  	int err;
  
  	type = crypto_skcipher_type(type);
b170a137f   Herbert Xu   crypto: skcipher ...
541
  	mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV;
ecfc43292   Herbert Xu   [CRYPTO] skcipher...
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
  
  	alg = crypto_alg_mod_lookup(name, type, mask);
  	if (IS_ERR(alg))
  		return PTR_ERR(alg);
  
  	err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
  	crypto_mod_put(alg);
  	return err;
  }
  
  struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
  					     struct rtattr **tb, u32 type,
  					     u32 mask)
  {
  	struct {
  		int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
  			      unsigned int keylen);
  		int (*encrypt)(struct ablkcipher_request *req);
  		int (*decrypt)(struct ablkcipher_request *req);
  
  		unsigned int min_keysize;
  		unsigned int max_keysize;
  		unsigned int ivsize;
  
  		const char *geniv;
  	} balg;
  	const char *name;
  	struct crypto_skcipher_spawn *spawn;
  	struct crypto_attr_type *algt;
  	struct crypto_instance *inst;
  	struct crypto_alg *alg;
  	int err;
  
  	algt = crypto_get_attr_type(tb);
  	err = PTR_ERR(algt);
  	if (IS_ERR(algt))
  		return ERR_PTR(err);
  
  	if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) &
  	    algt->mask)
  		return ERR_PTR(-EINVAL);
  
  	name = crypto_attr_alg_name(tb[1]);
  	err = PTR_ERR(name);
  	if (IS_ERR(name))
  		return ERR_PTR(err);
  
  	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
  	if (!inst)
  		return ERR_PTR(-ENOMEM);
  
  	spawn = crypto_instance_ctx(inst);
  
  	/* Ignore async algorithms if necessary. */
  	mask |= crypto_requires_sync(algt->type, algt->mask);
  
  	crypto_set_skcipher_spawn(spawn, inst);
  	err = crypto_grab_nivcipher(spawn, name, type, mask);
  	if (err)
  		goto err_free_inst;
  
  	alg = crypto_skcipher_spawn_alg(spawn);
  
  	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
  	    CRYPTO_ALG_TYPE_BLKCIPHER) {
  		balg.ivsize = alg->cra_blkcipher.ivsize;
  		balg.min_keysize = alg->cra_blkcipher.min_keysize;
  		balg.max_keysize = alg->cra_blkcipher.max_keysize;
  
  		balg.setkey = async_setkey;
  		balg.encrypt = async_encrypt;
  		balg.decrypt = async_decrypt;
  
  		balg.geniv = alg->cra_blkcipher.geniv;
  	} else {
  		balg.ivsize = alg->cra_ablkcipher.ivsize;
  		balg.min_keysize = alg->cra_ablkcipher.min_keysize;
  		balg.max_keysize = alg->cra_ablkcipher.max_keysize;
  
  		balg.setkey = alg->cra_ablkcipher.setkey;
  		balg.encrypt = alg->cra_ablkcipher.encrypt;
  		balg.decrypt = alg->cra_ablkcipher.decrypt;
  
  		balg.geniv = alg->cra_ablkcipher.geniv;
  	}
  
  	err = -EINVAL;
  	if (!balg.ivsize)
  		goto err_drop_alg;
  
  	/*
  	 * This is only true if we're constructing an algorithm with its
  	 * default IV generator.  For the default generator we elide the
  	 * template name and double-check the IV generator.
  	 */
  	if (algt->mask & CRYPTO_ALG_GENIV) {
  		if (!balg.geniv)
  			balg.geniv = crypto_default_geniv(alg);
  		err = -EAGAIN;
  		if (strcmp(tmpl->name, balg.geniv))
  			goto err_drop_alg;
  
  		memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
  		memcpy(inst->alg.cra_driver_name, alg->cra_driver_name,
  		       CRYPTO_MAX_ALG_NAME);
  	} else {
  		err = -ENAMETOOLONG;
  		if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
  			     "%s(%s)", tmpl->name, alg->cra_name) >=
  		    CRYPTO_MAX_ALG_NAME)
  			goto err_drop_alg;
  		if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
  			     "%s(%s)", tmpl->name, alg->cra_driver_name) >=
  		    CRYPTO_MAX_ALG_NAME)
  			goto err_drop_alg;
  	}
  
  	inst->alg.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV;
  	inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
  	inst->alg.cra_priority = alg->cra_priority;
  	inst->alg.cra_blocksize = alg->cra_blocksize;
  	inst->alg.cra_alignmask = alg->cra_alignmask;
  	inst->alg.cra_type = &crypto_givcipher_type;
  
  	inst->alg.cra_ablkcipher.ivsize = balg.ivsize;
  	inst->alg.cra_ablkcipher.min_keysize = balg.min_keysize;
  	inst->alg.cra_ablkcipher.max_keysize = balg.max_keysize;
  	inst->alg.cra_ablkcipher.geniv = balg.geniv;
  
  	inst->alg.cra_ablkcipher.setkey = balg.setkey;
  	inst->alg.cra_ablkcipher.encrypt = balg.encrypt;
  	inst->alg.cra_ablkcipher.decrypt = balg.decrypt;
  
  out:
  	return inst;
  
  err_drop_alg:
  	crypto_drop_skcipher(spawn);
  err_free_inst:
  	kfree(inst);
  	inst = ERR_PTR(err);
  	goto out;
  }
  EXPORT_SYMBOL_GPL(skcipher_geniv_alloc);
  
  void skcipher_geniv_free(struct crypto_instance *inst)
  {
  	crypto_drop_skcipher(crypto_instance_ctx(inst));
  	kfree(inst);
  }
  EXPORT_SYMBOL_GPL(skcipher_geniv_free);
  
  int skcipher_geniv_init(struct crypto_tfm *tfm)
  {
  	struct crypto_instance *inst = (void *)tfm->__crt_alg;
  	struct crypto_ablkcipher *cipher;
  
  	cipher = crypto_spawn_skcipher(crypto_instance_ctx(inst));
  	if (IS_ERR(cipher))
  		return PTR_ERR(cipher);
  
  	tfm->crt_ablkcipher.base = cipher;
  	tfm->crt_ablkcipher.reqsize += crypto_ablkcipher_reqsize(cipher);
  
  	return 0;
  }
  EXPORT_SYMBOL_GPL(skcipher_geniv_init);
  
  void skcipher_geniv_exit(struct crypto_tfm *tfm)
  {
  	crypto_free_ablkcipher(tfm->crt_ablkcipher.base);
  }
  EXPORT_SYMBOL_GPL(skcipher_geniv_exit);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
715
716
  MODULE_LICENSE("GPL");
  MODULE_DESCRIPTION("Generic block chaining cipher type");