Blame view

crypto/blkcipher.c 15.1 KB
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
1
2
  /*
   * Block chaining cipher operations.
d8c34b949   Gideon Israel Dsouza   crypto: Replaced ...
3
   *
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
4
5
6
7
8
9
10
11
   * Generic encrypt/decrypt wrapper for ciphers, handles operations across
   * multiple page boundaries by using temporary blocks.  In user context,
   * the kernel is given a chance to schedule us once per page.
   *
   * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
   *
   * This program is free software; you can redistribute it and/or modify it
   * under the terms of the GNU General Public License as published by the Free
d8c34b949   Gideon Israel Dsouza   crypto: Replaced ...
12
   * Software Foundation; either version 2 of the License, or (at your option)
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
13
14
15
   * any later version.
   *
   */
d1a2fd500   Herbert Xu   crypto: blkcipher...
16
  #include <crypto/aead.h>
ecfc43292   Herbert Xu   [CRYPTO] skcipher...
17
  #include <crypto/internal/skcipher.h>
42c271c6c   Herbert Xu   [CRYPTO] scatterw...
18
  #include <crypto/scatterwalk.h>
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
19
  #include <linux/errno.h>
fb469840b   Herbert Xu   [CRYPTO] all: Che...
20
  #include <linux/hardirq.h>
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
21
  #include <linux/kernel.h>
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
22
  #include <linux/module.h>
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
23
24
25
  #include <linux/seq_file.h>
  #include <linux/slab.h>
  #include <linux/string.h>
50496a1fa   Steffen Klassert   crypto: Add users...
26
  #include <linux/cryptouser.h>
d8c34b949   Gideon Israel Dsouza   crypto: Replaced ...
27
  #include <linux/compiler.h>
50496a1fa   Steffen Klassert   crypto: Add users...
28
  #include <net/netlink.h>
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
29
30
  
  #include "internal.h"
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
  
  enum {
  	BLKCIPHER_WALK_PHYS = 1 << 0,
  	BLKCIPHER_WALK_SLOW = 1 << 1,
  	BLKCIPHER_WALK_COPY = 1 << 2,
  	BLKCIPHER_WALK_DIFF = 1 << 3,
  };
  
  static int blkcipher_walk_next(struct blkcipher_desc *desc,
  			       struct blkcipher_walk *walk);
  static int blkcipher_walk_first(struct blkcipher_desc *desc,
  				struct blkcipher_walk *walk);
  
  static inline void blkcipher_map_src(struct blkcipher_walk *walk)
  {
f0dfc0b0b   Cong Wang   crypto: remove th...
46
  	walk->src.virt.addr = scatterwalk_map(&walk->in);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
47
48
49
50
  }
  
  static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
  {
f0dfc0b0b   Cong Wang   crypto: remove th...
51
  	walk->dst.virt.addr = scatterwalk_map(&walk->out);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
52
53
54
55
  }
  
  static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
  {
f0dfc0b0b   Cong Wang   crypto: remove th...
56
  	scatterwalk_unmap(walk->src.virt.addr);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
57
58
59
60
  }
  
  static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
  {
f0dfc0b0b   Cong Wang   crypto: remove th...
61
  	scatterwalk_unmap(walk->dst.virt.addr);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
62
  }
e4630f9fd   Herbert Xu   [CRYPTO] blkciphe...
63
64
65
  /* Get a spot of the specified length that does not straddle a page.
   * The caller needs to ensure that there is enough space for this operation.
   */
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
66
67
  static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
  {
e4630f9fd   Herbert Xu   [CRYPTO] blkciphe...
68
  	u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
5aaff0c8f   Ingo Oeser   [CRYPTO] blkciphe...
69
  	return max(start, end_page);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
70
  }
2cde72d94   Eric Biggers   crypto: blkcipher...
71
72
  static inline void blkcipher_done_slow(struct blkcipher_walk *walk,
  				       unsigned int bsize)
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
73
74
  {
  	u8 *addr;
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
75

822be00fe   Ard Biesheuvel   crypto: remove di...
76
  	addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
77
78
  	addr = blkcipher_get_spot(addr, bsize);
  	scatterwalk_copychunks(addr, &walk->out, bsize, 1);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
79
  }
2cde72d94   Eric Biggers   crypto: blkcipher...
80
81
  static inline void blkcipher_done_fast(struct blkcipher_walk *walk,
  				       unsigned int n)
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
82
  {
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
83
84
85
86
87
  	if (walk->flags & BLKCIPHER_WALK_COPY) {
  		blkcipher_map_dst(walk);
  		memcpy(walk->dst.virt.addr, walk->page, n);
  		blkcipher_unmap_dst(walk);
  	} else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
88
89
  		if (walk->flags & BLKCIPHER_WALK_DIFF)
  			blkcipher_unmap_dst(walk);
61ecdb801   Peter Zijlstra   mm: strictly nest...
90
  		blkcipher_unmap_src(walk);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
91
92
93
94
  	}
  
  	scatterwalk_advance(&walk->in, n);
  	scatterwalk_advance(&walk->out, n);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
95
96
97
98
99
  }
  
  int blkcipher_walk_done(struct blkcipher_desc *desc,
  			struct blkcipher_walk *walk, int err)
  {
2cde72d94   Eric Biggers   crypto: blkcipher...
100
101
  	unsigned int n; /* bytes processed */
  	bool more;
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
102

2cde72d94   Eric Biggers   crypto: blkcipher...
103
104
  	if (unlikely(err < 0))
  		goto finish;
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
105

2cde72d94   Eric Biggers   crypto: blkcipher...
106
107
108
  	n = walk->nbytes - err;
  	walk->total -= n;
  	more = (walk->total != 0);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
109

2cde72d94   Eric Biggers   crypto: blkcipher...
110
111
112
113
114
115
116
117
118
  	if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) {
  		blkcipher_done_fast(walk, n);
  	} else {
  		if (WARN_ON(err)) {
  			/* unexpected case; didn't process all bytes */
  			err = -EINVAL;
  			goto finish;
  		}
  		blkcipher_done_slow(walk, n);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
119
  	}
2cde72d94   Eric Biggers   crypto: blkcipher...
120
121
  	scatterwalk_done(&walk->in, 0, more);
  	scatterwalk_done(&walk->out, 1, more);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
122

2cde72d94   Eric Biggers   crypto: blkcipher...
123
  	if (more) {
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
124
125
126
  		crypto_yield(desc->flags);
  		return blkcipher_walk_next(desc, walk);
  	}
2cde72d94   Eric Biggers   crypto: blkcipher...
127
128
129
  	err = 0;
  finish:
  	walk->nbytes = 0;
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
130
  	if (walk->iv != desc->info)
822be00fe   Ard Biesheuvel   crypto: remove di...
131
  		memcpy(desc->info, walk->iv, walk->ivsize);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
132
133
134
135
  	if (walk->buffer != walk->page)
  		kfree(walk->buffer);
  	if (walk->page)
  		free_page((unsigned long)walk->page);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
136
137
138
139
140
141
142
143
144
145
  	return err;
  }
  EXPORT_SYMBOL_GPL(blkcipher_walk_done);
  
  static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
  				      struct blkcipher_walk *walk,
  				      unsigned int bsize,
  				      unsigned int alignmask)
  {
  	unsigned int n;
70613783f   Herbert Xu   [CRYPTO] blkciphe...
146
  	unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
147
148
149
150
151
152
153
  
  	if (walk->buffer)
  		goto ok;
  
  	walk->buffer = walk->page;
  	if (walk->buffer)
  		goto ok;
2614de1b9   Herbert Xu   [CRYPTO] blkciphe...
154
  	n = aligned_bsize * 3 - (alignmask + 1) +
e4630f9fd   Herbert Xu   [CRYPTO] blkciphe...
155
  	    (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
156
157
158
159
160
161
162
163
  	walk->buffer = kmalloc(n, GFP_ATOMIC);
  	if (!walk->buffer)
  		return blkcipher_walk_done(desc, walk, -ENOMEM);
  
  ok:
  	walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
  					  alignmask + 1);
  	walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
70613783f   Herbert Xu   [CRYPTO] blkciphe...
164
165
  	walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
  						 aligned_bsize, bsize);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
  
  	scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
  
  	walk->nbytes = bsize;
  	walk->flags |= BLKCIPHER_WALK_SLOW;
  
  	return 0;
  }
  
  static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
  {
  	u8 *tmp = walk->page;
  
  	blkcipher_map_src(walk);
  	memcpy(tmp, walk->src.virt.addr, walk->nbytes);
  	blkcipher_unmap_src(walk);
  
  	walk->src.virt.addr = tmp;
  	walk->dst.virt.addr = tmp;
  
  	return 0;
  }
  
  static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
  				      struct blkcipher_walk *walk)
  {
  	unsigned long diff;
  
  	walk->src.phys.page = scatterwalk_page(&walk->in);
  	walk->src.phys.offset = offset_in_page(walk->in.offset);
  	walk->dst.phys.page = scatterwalk_page(&walk->out);
  	walk->dst.phys.offset = offset_in_page(walk->out.offset);
  
  	if (walk->flags & BLKCIPHER_WALK_PHYS)
  		return 0;
  
  	diff = walk->src.phys.offset - walk->dst.phys.offset;
  	diff |= walk->src.virt.page - walk->dst.virt.page;
  
  	blkcipher_map_src(walk);
  	walk->dst.virt.addr = walk->src.virt.addr;
  
  	if (diff) {
  		walk->flags |= BLKCIPHER_WALK_DIFF;
  		blkcipher_map_dst(walk);
  	}
  
  	return 0;
  }
  
  static int blkcipher_walk_next(struct blkcipher_desc *desc,
  			       struct blkcipher_walk *walk)
  {
7607bd8ff   Herbert Xu   [CRYPTO] blkciphe...
219
  	unsigned int bsize;
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
220
221
222
223
  	unsigned int n;
  	int err;
  
  	n = walk->total;
822be00fe   Ard Biesheuvel   crypto: remove di...
224
  	if (unlikely(n < walk->cipher_blocksize)) {
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
225
226
227
  		desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
  		return blkcipher_walk_done(desc, walk, -EINVAL);
  	}
acdb04d0b   Herbert Xu   crypto: skcipher ...
228
  	bsize = min(walk->walk_blocksize, n);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
229
230
  	walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
  			 BLKCIPHER_WALK_DIFF);
822be00fe   Ard Biesheuvel   crypto: remove di...
231
232
  	if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
  	    !scatterwalk_aligned(&walk->out, walk->alignmask)) {
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
233
234
235
236
237
238
239
240
241
242
243
244
  		walk->flags |= BLKCIPHER_WALK_COPY;
  		if (!walk->page) {
  			walk->page = (void *)__get_free_page(GFP_ATOMIC);
  			if (!walk->page)
  				n = 0;
  		}
  	}
  
  	n = scatterwalk_clamp(&walk->in, n);
  	n = scatterwalk_clamp(&walk->out, n);
  
  	if (unlikely(n < bsize)) {
822be00fe   Ard Biesheuvel   crypto: remove di...
245
  		err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
  		goto set_phys_lowmem;
  	}
  
  	walk->nbytes = n;
  	if (walk->flags & BLKCIPHER_WALK_COPY) {
  		err = blkcipher_next_copy(walk);
  		goto set_phys_lowmem;
  	}
  
  	return blkcipher_next_fast(desc, walk);
  
  set_phys_lowmem:
  	if (walk->flags & BLKCIPHER_WALK_PHYS) {
  		walk->src.phys.page = virt_to_page(walk->src.virt.addr);
  		walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
  		walk->src.phys.offset &= PAGE_SIZE - 1;
  		walk->dst.phys.offset &= PAGE_SIZE - 1;
  	}
  	return err;
  }
822be00fe   Ard Biesheuvel   crypto: remove di...
266
  static inline int blkcipher_copy_iv(struct blkcipher_walk *walk)
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
267
  {
822be00fe   Ard Biesheuvel   crypto: remove di...
268
269
270
271
272
  	unsigned bs = walk->walk_blocksize;
  	unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1);
  	unsigned int size = aligned_bs * 2 +
  			    walk->ivsize + max(aligned_bs, walk->ivsize) -
  			    (walk->alignmask + 1);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
273
  	u8 *iv;
822be00fe   Ard Biesheuvel   crypto: remove di...
274
  	size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
275
276
277
  	walk->buffer = kmalloc(size, GFP_ATOMIC);
  	if (!walk->buffer)
  		return -ENOMEM;
822be00fe   Ard Biesheuvel   crypto: remove di...
278
  	iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
70613783f   Herbert Xu   [CRYPTO] blkciphe...
279
280
  	iv = blkcipher_get_spot(iv, bs) + aligned_bs;
  	iv = blkcipher_get_spot(iv, bs) + aligned_bs;
822be00fe   Ard Biesheuvel   crypto: remove di...
281
  	iv = blkcipher_get_spot(iv, walk->ivsize);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
282

822be00fe   Ard Biesheuvel   crypto: remove di...
283
  	walk->iv = memcpy(iv, walk->iv, walk->ivsize);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
284
285
286
287
288
289
290
  	return 0;
  }
  
  int blkcipher_walk_virt(struct blkcipher_desc *desc,
  			struct blkcipher_walk *walk)
  {
  	walk->flags &= ~BLKCIPHER_WALK_PHYS;
822be00fe   Ard Biesheuvel   crypto: remove di...
291
292
293
294
  	walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
  	walk->cipher_blocksize = walk->walk_blocksize;
  	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
  	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
295
296
297
298
299
300
301
302
  	return blkcipher_walk_first(desc, walk);
  }
  EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
  
  int blkcipher_walk_phys(struct blkcipher_desc *desc,
  			struct blkcipher_walk *walk)
  {
  	walk->flags |= BLKCIPHER_WALK_PHYS;
822be00fe   Ard Biesheuvel   crypto: remove di...
303
304
305
306
  	walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
  	walk->cipher_blocksize = walk->walk_blocksize;
  	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
  	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
307
308
309
310
311
312
313
  	return blkcipher_walk_first(desc, walk);
  }
  EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
  
  static int blkcipher_walk_first(struct blkcipher_desc *desc,
  				struct blkcipher_walk *walk)
  {
fb469840b   Herbert Xu   [CRYPTO] all: Che...
314
315
  	if (WARN_ON_ONCE(in_irq()))
  		return -EDEADLK;
70d906bc1   Jason A. Donenfeld   crypto: skcipher ...
316
  	walk->iv = desc->info;
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
317
318
319
320
321
  	walk->nbytes = walk->total;
  	if (unlikely(!walk->total))
  		return 0;
  
  	walk->buffer = NULL;
822be00fe   Ard Biesheuvel   crypto: remove di...
322
323
  	if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
  		int err = blkcipher_copy_iv(walk);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
324
325
326
327
328
329
330
331
332
333
  		if (err)
  			return err;
  	}
  
  	scatterwalk_start(&walk->in, walk->in.sg);
  	scatterwalk_start(&walk->out, walk->out.sg);
  	walk->page = NULL;
  
  	return blkcipher_walk_next(desc, walk);
  }
7607bd8ff   Herbert Xu   [CRYPTO] blkciphe...
334
335
336
337
338
  int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
  			      struct blkcipher_walk *walk,
  			      unsigned int blocksize)
  {
  	walk->flags &= ~BLKCIPHER_WALK_PHYS;
822be00fe   Ard Biesheuvel   crypto: remove di...
339
340
341
342
  	walk->walk_blocksize = blocksize;
  	walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm);
  	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
  	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
7607bd8ff   Herbert Xu   [CRYPTO] blkciphe...
343
344
345
  	return blkcipher_walk_first(desc, walk);
  }
  EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
4f7f1d7cf   Ard Biesheuvel   crypto: allow blk...
346
347
348
349
350
351
352
353
354
355
356
357
358
  int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
  				   struct blkcipher_walk *walk,
  				   struct crypto_aead *tfm,
  				   unsigned int blocksize)
  {
  	walk->flags &= ~BLKCIPHER_WALK_PHYS;
  	walk->walk_blocksize = blocksize;
  	walk->cipher_blocksize = crypto_aead_blocksize(tfm);
  	walk->ivsize = crypto_aead_ivsize(tfm);
  	walk->alignmask = crypto_aead_alignmask(tfm);
  	return blkcipher_walk_first(desc, walk);
  }
  EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block);
791b4d5f7   Herbert Xu   [CRYPTO] api: Add...
359
360
  static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
  			    unsigned int keylen)
ca7c39385   Sebastian Siewior   [CRYPTO] api: Han...
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
  {
  	struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
  	unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
  	int ret;
  	u8 *buffer, *alignbuffer;
  	unsigned long absize;
  
  	absize = keylen + alignmask;
  	buffer = kmalloc(absize, GFP_ATOMIC);
  	if (!buffer)
  		return -ENOMEM;
  
  	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
  	memcpy(alignbuffer, key, keylen);
  	ret = cipher->setkey(tfm, alignbuffer, keylen);
068171767   Sebastian Siewior   [CRYPTO] api: fix...
376
  	memset(alignbuffer, 0, keylen);
ca7c39385   Sebastian Siewior   [CRYPTO] api: Han...
377
378
379
  	kfree(buffer);
  	return ret;
  }
791b4d5f7   Herbert Xu   [CRYPTO] api: Add...
380
  static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
381
382
  {
  	struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
ca7c39385   Sebastian Siewior   [CRYPTO] api: Han...
383
  	unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
384
385
386
387
388
  
  	if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
  		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
  		return -EINVAL;
  	}
ca7c39385   Sebastian Siewior   [CRYPTO] api: Han...
389
390
  	if ((unsigned long)key & alignmask)
  		return setkey_unaligned(tfm, key, keylen);
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
391
392
  	return cipher->setkey(tfm, key, keylen);
  }
32e3983fe   Herbert Xu   [CRYPTO] api: Add...
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
  static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  			unsigned int keylen)
  {
  	return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
  }
  
  static int async_encrypt(struct ablkcipher_request *req)
  {
  	struct crypto_tfm *tfm = req->base.tfm;
  	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  	struct blkcipher_desc desc = {
  		.tfm = __crypto_blkcipher_cast(tfm),
  		.info = req->info,
  		.flags = req->base.flags,
  	};
  
  
  	return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
  }
  
  static int async_decrypt(struct ablkcipher_request *req)
  {
  	struct crypto_tfm *tfm = req->base.tfm;
  	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  	struct blkcipher_desc desc = {
  		.tfm = __crypto_blkcipher_cast(tfm),
  		.info = req->info,
  		.flags = req->base.flags,
  	};
  
  	return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
  }
27d2a3300   Herbert Xu   [CRYPTO] api: All...
425
426
  static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
  					     u32 mask)
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
427
428
429
  {
  	struct blkcipher_alg *cipher = &alg->cra_blkcipher;
  	unsigned int len = alg->cra_ctxsize;
332f8840f   Herbert Xu   [CRYPTO] ablkciph...
430
431
  	if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
  	    cipher->ivsize) {
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
432
433
434
435
436
437
  		len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
  		len += cipher->ivsize;
  	}
  
  	return len;
  }
32e3983fe   Herbert Xu   [CRYPTO] api: Add...
438
439
440
441
442
443
444
445
  static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
  {
  	struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
  	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  
  	crt->setkey = async_setkey;
  	crt->encrypt = async_encrypt;
  	crt->decrypt = async_decrypt;
ecfc43292   Herbert Xu   [CRYPTO] skcipher...
446
  	crt->base = __crypto_ablkcipher_cast(tfm);
32e3983fe   Herbert Xu   [CRYPTO] api: Add...
447
448
449
450
451
452
  	crt->ivsize = alg->ivsize;
  
  	return 0;
  }
  
  static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
453
454
455
456
457
  {
  	struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
  	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  	unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
  	unsigned long addr;
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
458
459
460
461
462
463
464
465
466
467
468
  	crt->setkey = setkey;
  	crt->encrypt = alg->encrypt;
  	crt->decrypt = alg->decrypt;
  
  	addr = (unsigned long)crypto_tfm_ctx(tfm);
  	addr = ALIGN(addr, align);
  	addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
  	crt->iv = (void *)addr;
  
  	return 0;
  }
32e3983fe   Herbert Xu   [CRYPTO] api: Add...
469
470
471
472
473
474
  static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
  {
  	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  
  	if (alg->ivsize > PAGE_SIZE / 8)
  		return -EINVAL;
332f8840f   Herbert Xu   [CRYPTO] ablkciph...
475
  	if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
32e3983fe   Herbert Xu   [CRYPTO] api: Add...
476
477
478
479
  		return crypto_init_blkcipher_ops_sync(tfm);
  	else
  		return crypto_init_blkcipher_ops_async(tfm);
  }
3acc84739   Herbert Xu   crypto: algapi - ...
480
  #ifdef CONFIG_NET
50496a1fa   Steffen Klassert   crypto: Add users...
481
482
483
  static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
  {
  	struct crypto_report_blkcipher rblkcipher;
9a5467bf7   Mathias Krause   crypto: user - fi...
484
485
486
  	strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
  	strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
  		sizeof(rblkcipher.geniv));
29db27723   Stafford Horne   crypto: skcipher ...
487
  	rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
50496a1fa   Steffen Klassert   crypto: Add users...
488
489
490
491
492
  
  	rblkcipher.blocksize = alg->cra_blocksize;
  	rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
  	rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
  	rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
6662df33f   David S. Miller   crypto: Stop usin...
493
494
495
  	if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
  		    sizeof(struct crypto_report_blkcipher), &rblkcipher))
  		goto nla_put_failure;
50496a1fa   Steffen Klassert   crypto: Add users...
496
497
498
499
500
  	return 0;
  
  nla_put_failure:
  	return -EMSGSIZE;
  }
3acc84739   Herbert Xu   crypto: algapi - ...
501
502
503
504
505
506
  #else
  static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
  {
  	return -ENOSYS;
  }
  #endif
50496a1fa   Steffen Klassert   crypto: Add users...
507

5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
508
  static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
d8c34b949   Gideon Israel Dsouza   crypto: Replaced ...
509
  	__maybe_unused;
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
510
511
512
513
514
515
516
517
518
519
520
521
  static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
  {
  	seq_printf(m, "type         : blkcipher
  ");
  	seq_printf(m, "blocksize    : %u
  ", alg->cra_blocksize);
  	seq_printf(m, "min keysize  : %u
  ", alg->cra_blkcipher.min_keysize);
  	seq_printf(m, "max keysize  : %u
  ", alg->cra_blkcipher.max_keysize);
  	seq_printf(m, "ivsize       : %u
  ", alg->cra_blkcipher.ivsize);
23508e11a   Herbert Xu   [CRYPTO] skcipher...
522
523
524
  	seq_printf(m, "geniv        : %s
  ", alg->cra_blkcipher.geniv ?:
  					     "<default>");
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
525
526
527
528
529
530
531
532
  }
  
  const struct crypto_type crypto_blkcipher_type = {
  	.ctxsize = crypto_blkcipher_ctxsize,
  	.init = crypto_init_blkcipher_ops,
  #ifdef CONFIG_PROC_FS
  	.show = crypto_blkcipher_show,
  #endif
50496a1fa   Steffen Klassert   crypto: Add users...
533
  	.report = crypto_blkcipher_report,
5cde0af2a   Herbert Xu   [CRYPTO] cipher: ...
534
535
536
537
538
  };
  EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
  
  MODULE_LICENSE("GPL");
  MODULE_DESCRIPTION("Generic block chaining cipher type");