Blame view
crypto/ctr.c
9.96 KB
2874c5fd2
|
1 |
// SPDX-License-Identifier: GPL-2.0-or-later |
23e353c8a
|
2 3 4 5 |
/* * CTR: Counter mode * * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com> |
23e353c8a
|
6 7 8 |
*/ #include <crypto/algapi.h> |
5311f248b
|
9 |
#include <crypto/ctr.h> |
69d3150cf
|
10 |
#include <crypto/internal/skcipher.h> |
23e353c8a
|
11 12 13 14 |
#include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> |
23e353c8a
|
15 |
#include <linux/slab.h> |
5311f248b
|
16 |
struct crypto_rfc3686_ctx { |
b2b39c2f9
|
17 |
struct crypto_skcipher *child; |
5311f248b
|
18 |
u8 nonce[CTR_RFC3686_NONCE_SIZE]; |
23e353c8a
|
19 |
}; |
69d3150cf
|
20 21 |
struct crypto_rfc3686_req_ctx { u8 iv[CTR_RFC3686_BLOCK_SIZE]; |
b2b39c2f9
|
22 |
struct skcipher_request subreq CRYPTO_MINALIGN_ATTR; |
69d3150cf
|
23 |
}; |
11f14630c
|
24 |
static void crypto_ctr_crypt_final(struct skcipher_walk *walk, |
5311f248b
|
25 |
struct crypto_cipher *tfm) |
0971eb0de
|
26 27 |
{ unsigned int bsize = crypto_cipher_blocksize(tfm); |
5311f248b
|
28 29 |
unsigned long alignmask = crypto_cipher_alignmask(tfm); u8 *ctrblk = walk->iv; |
6650c4de6
|
30 |
u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK]; |
5311f248b
|
31 |
u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1); |
0971eb0de
|
32 33 34 35 36 |
u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; unsigned int nbytes = walk->nbytes; crypto_cipher_encrypt_one(tfm, keystream, ctrblk); |
45fe93dff
|
37 |
crypto_xor_cpy(dst, keystream, src, nbytes); |
5311f248b
|
38 39 |
crypto_inc(ctrblk, bsize); |
0971eb0de
|
40 |
} |
11f14630c
|
41 |
static int crypto_ctr_crypt_segment(struct skcipher_walk *walk, |
5311f248b
|
42 |
struct crypto_cipher *tfm) |
23e353c8a
|
43 44 45 46 |
{ void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = crypto_cipher_alg(tfm)->cia_encrypt; unsigned int bsize = crypto_cipher_blocksize(tfm); |
5311f248b
|
47 |
u8 *ctrblk = walk->iv; |
23e353c8a
|
48 49 50 51 52 53 |
u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; unsigned int nbytes = walk->nbytes; do { /* create keystream */ |
0971eb0de
|
54 55 |
fn(crypto_cipher_tfm(tfm), dst, ctrblk); crypto_xor(dst, src, bsize); |
23e353c8a
|
56 57 |
/* increment counter in counterblock */ |
5311f248b
|
58 |
crypto_inc(ctrblk, bsize); |
23e353c8a
|
59 |
|
23e353c8a
|
60 61 |
src += bsize; dst += bsize; |
0971eb0de
|
62 |
} while ((nbytes -= bsize) >= bsize); |
23e353c8a
|
63 |
|
0971eb0de
|
64 |
return nbytes; |
23e353c8a
|
65 |
} |
11f14630c
|
66 |
static int crypto_ctr_crypt_inplace(struct skcipher_walk *walk, |
5311f248b
|
67 |
struct crypto_cipher *tfm) |
23e353c8a
|
68 69 70 71 |
{ void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = crypto_cipher_alg(tfm)->cia_encrypt; unsigned int bsize = crypto_cipher_blocksize(tfm); |
5311f248b
|
72 |
unsigned long alignmask = crypto_cipher_alignmask(tfm); |
23e353c8a
|
73 |
unsigned int nbytes = walk->nbytes; |
5311f248b
|
74 |
u8 *ctrblk = walk->iv; |
23e353c8a
|
75 |
u8 *src = walk->src.virt.addr; |
6650c4de6
|
76 |
u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK]; |
5311f248b
|
77 |
u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1); |
23e353c8a
|
78 79 80 81 |
do { /* create keystream */ fn(crypto_cipher_tfm(tfm), keystream, ctrblk); |
0971eb0de
|
82 |
crypto_xor(src, keystream, bsize); |
23e353c8a
|
83 84 |
/* increment counter in counterblock */ |
5311f248b
|
85 |
crypto_inc(ctrblk, bsize); |
23e353c8a
|
86 |
|
23e353c8a
|
87 |
src += bsize; |
0971eb0de
|
88 |
} while ((nbytes -= bsize) >= bsize); |
23e353c8a
|
89 |
|
0971eb0de
|
90 |
return nbytes; |
23e353c8a
|
91 |
} |
11f14630c
|
92 |
static int crypto_ctr_crypt(struct skcipher_request *req) |
23e353c8a
|
93 |
{ |
11f14630c
|
94 95 96 97 98 |
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); const unsigned int bsize = crypto_cipher_blocksize(cipher); struct skcipher_walk walk; unsigned int nbytes; |
23e353c8a
|
99 |
int err; |
11f14630c
|
100 |
err = skcipher_walk_virt(&walk, req, false); |
23e353c8a
|
101 |
|
0971eb0de
|
102 |
while (walk.nbytes >= bsize) { |
23e353c8a
|
103 |
if (walk.src.virt.addr == walk.dst.virt.addr) |
11f14630c
|
104 |
nbytes = crypto_ctr_crypt_inplace(&walk, cipher); |
23e353c8a
|
105 |
else |
11f14630c
|
106 |
nbytes = crypto_ctr_crypt_segment(&walk, cipher); |
23e353c8a
|
107 |
|
11f14630c
|
108 |
err = skcipher_walk_done(&walk, nbytes); |
23e353c8a
|
109 |
} |
0971eb0de
|
110 111 |
if (walk.nbytes) { |
11f14630c
|
112 113 |
crypto_ctr_crypt_final(&walk, cipher); err = skcipher_walk_done(&walk, 0); |
0971eb0de
|
114 |
} |
23e353c8a
|
115 116 |
return err; } |
11f14630c
|
117 |
static int crypto_ctr_create(struct crypto_template *tmpl, struct rtattr **tb) |
23e353c8a
|
118 |
{ |
11f14630c
|
119 |
struct skcipher_instance *inst; |
23e353c8a
|
120 |
struct crypto_alg *alg; |
23e353c8a
|
121 |
int err; |
b3c16bfc6
|
122 |
inst = skcipher_alloc_instance_simple(tmpl, tb); |
11f14630c
|
123 124 |
if (IS_ERR(inst)) return PTR_ERR(inst); |
23e353c8a
|
125 |
|
b3c16bfc6
|
126 |
alg = skcipher_ialg_simple(inst); |
5311f248b
|
127 |
/* Block size must be >= 4 bytes. */ |
23e353c8a
|
128 |
err = -EINVAL; |
5311f248b
|
129 |
if (alg->cra_blocksize < 4) |
11f14630c
|
130 |
goto out_free_inst; |
23e353c8a
|
131 |
|
3f8214ea3
|
132 |
/* If this is false we'd fail the alignment of crypto_inc. */ |
5311f248b
|
133 |
if (alg->cra_blocksize % 4) |
11f14630c
|
134 |
goto out_free_inst; |
23e353c8a
|
135 |
|
11f14630c
|
136 137 |
/* CTR mode is a stream cipher. */ inst->alg.base.cra_blocksize = 1; |
23e353c8a
|
138 |
|
11f14630c
|
139 140 141 142 143 |
/* * To simplify the implementation, configure the skcipher walk to only * give a partial block at the very end, never earlier. */ inst->alg.chunksize = alg->cra_blocksize; |
23e353c8a
|
144 |
|
11f14630c
|
145 146 |
inst->alg.encrypt = crypto_ctr_crypt; inst->alg.decrypt = crypto_ctr_crypt; |
23e353c8a
|
147 |
|
11f14630c
|
148 |
err = skcipher_register_instance(tmpl, inst); |
b3c16bfc6
|
149 |
if (err) { |
11f14630c
|
150 |
out_free_inst: |
b3c16bfc6
|
151 152 |
inst->free(inst); } |
11f14630c
|
153 |
return err; |
23e353c8a
|
154 |
} |
b2b39c2f9
|
155 |
static int crypto_rfc3686_setkey(struct crypto_skcipher *parent, |
69d3150cf
|
156 |
const u8 *key, unsigned int keylen) |
5311f248b
|
157 |
{ |
b2b39c2f9
|
158 159 |
struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(parent); struct crypto_skcipher *child = ctx->child; |
5311f248b
|
160 161 162 163 164 165 166 167 168 |
/* the nonce is stored in bytes at end of key */ if (keylen < CTR_RFC3686_NONCE_SIZE) return -EINVAL; memcpy(ctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE); keylen -= CTR_RFC3686_NONCE_SIZE; |
b2b39c2f9
|
169 170 171 |
crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & CRYPTO_TFM_REQ_MASK); |
af5034e8e
|
172 |
return crypto_skcipher_setkey(child, key, keylen); |
5311f248b
|
173 |
} |
b2b39c2f9
|
174 |
static int crypto_rfc3686_crypt(struct skcipher_request *req) |
5311f248b
|
175 |
{ |
b2b39c2f9
|
176 177 178 179 |
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm); struct crypto_skcipher *child = ctx->child; unsigned long align = crypto_skcipher_alignmask(tfm); |
69d3150cf
|
180 |
struct crypto_rfc3686_req_ctx *rctx = |
b2b39c2f9
|
181 182 |
(void *)PTR_ALIGN((u8 *)skcipher_request_ctx(req), align + 1); struct skcipher_request *subreq = &rctx->subreq; |
69d3150cf
|
183 |
u8 *iv = rctx->iv; |
5311f248b
|
184 185 186 |
/* set up counter block */ memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE); |
b2b39c2f9
|
187 |
memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->iv, CTR_RFC3686_IV_SIZE); |
5311f248b
|
188 189 190 191 |
/* initialize counter portion of counter block */ *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); |
b2b39c2f9
|
192 193 194 195 196 |
skcipher_request_set_tfm(subreq, child); skcipher_request_set_callback(subreq, req->base.flags, req->base.complete, req->base.data); skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv); |
5311f248b
|
197 |
|
b2b39c2f9
|
198 |
return crypto_skcipher_encrypt(subreq); |
5311f248b
|
199 |
} |
b2b39c2f9
|
200 |
static int crypto_rfc3686_init_tfm(struct crypto_skcipher *tfm) |
5311f248b
|
201 |
{ |
b2b39c2f9
|
202 203 204 205 |
struct skcipher_instance *inst = skcipher_alg_instance(tfm); struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst); struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm); struct crypto_skcipher *cipher; |
69d3150cf
|
206 |
unsigned long align; |
b2b39c2f9
|
207 |
unsigned int reqsize; |
5311f248b
|
208 |
|
60425a8ba
|
209 |
cipher = crypto_spawn_skcipher(spawn); |
5311f248b
|
210 211 212 213 |
if (IS_ERR(cipher)) return PTR_ERR(cipher); ctx->child = cipher; |
b2b39c2f9
|
214 |
align = crypto_skcipher_alignmask(tfm); |
69d3150cf
|
215 |
align &= ~(crypto_tfm_ctx_alignment() - 1); |
b2b39c2f9
|
216 217 218 |
reqsize = align + sizeof(struct crypto_rfc3686_req_ctx) + crypto_skcipher_reqsize(cipher); crypto_skcipher_set_reqsize(tfm, reqsize); |
69d3150cf
|
219 |
|
5311f248b
|
220 221 |
return 0; } |
b2b39c2f9
|
222 |
static void crypto_rfc3686_exit_tfm(struct crypto_skcipher *tfm) |
5311f248b
|
223 |
{ |
b2b39c2f9
|
224 225 226 227 |
struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm); crypto_free_skcipher(ctx->child); } |
5311f248b
|
228 |
|
b2b39c2f9
|
229 230 231 232 233 234 |
static void crypto_rfc3686_free(struct skcipher_instance *inst) { struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst); crypto_drop_skcipher(spawn); kfree(inst); |
5311f248b
|
235 |
} |
b2b39c2f9
|
236 237 |
static int crypto_rfc3686_create(struct crypto_template *tmpl, struct rtattr **tb) |
5311f248b
|
238 |
{ |
69d3150cf
|
239 |
struct crypto_attr_type *algt; |
b2b39c2f9
|
240 241 |
struct skcipher_instance *inst; struct skcipher_alg *alg; |
69d3150cf
|
242 243 |
struct crypto_skcipher_spawn *spawn; const char *cipher_name; |
d2c2a85cf
|
244 |
u32 mask; |
5311f248b
|
245 |
int err; |
69d3150cf
|
246 |
algt = crypto_get_attr_type(tb); |
69d3150cf
|
247 |
if (IS_ERR(algt)) |
b2b39c2f9
|
248 |
return PTR_ERR(algt); |
5311f248b
|
249 |
|
b2b39c2f9
|
250 251 |
if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) return -EINVAL; |
69d3150cf
|
252 253 |
cipher_name = crypto_attr_alg_name(tb[1]); |
69d3150cf
|
254 |
if (IS_ERR(cipher_name)) |
b2b39c2f9
|
255 |
return PTR_ERR(cipher_name); |
5311f248b
|
256 |
|
69d3150cf
|
257 258 |
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) |
b2b39c2f9
|
259 |
return -ENOMEM; |
69d3150cf
|
260 |
|
d2c2a85cf
|
261 262 263 |
mask = crypto_requires_sync(algt->type, algt->mask) | crypto_requires_off(algt->type, algt->mask, CRYPTO_ALG_NEED_FALLBACK); |
b2b39c2f9
|
264 |
spawn = skcipher_instance_ctx(inst); |
69d3150cf
|
265 |
|
b9f76dddb
|
266 267 |
err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst), cipher_name, 0, mask); |
69d3150cf
|
268 269 |
if (err) goto err_free_inst; |
b2b39c2f9
|
270 |
alg = crypto_spawn_skcipher_alg(spawn); |
69d3150cf
|
271 |
|
5311f248b
|
272 273 |
/* We only support 16-byte blocks. */ err = -EINVAL; |
b2b39c2f9
|
274 |
if (crypto_skcipher_alg_ivsize(alg) != CTR_RFC3686_BLOCK_SIZE) |
69d3150cf
|
275 |
goto err_drop_spawn; |
5311f248b
|
276 277 |
/* Not a stream cipher? */ |
b2b39c2f9
|
278 |
if (alg->base.cra_blocksize != 1) |
69d3150cf
|
279 |
goto err_drop_spawn; |
5311f248b
|
280 |
|
69d3150cf
|
281 |
err = -ENAMETOOLONG; |
b2b39c2f9
|
282 283 |
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, "rfc3686(%s)", alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME) |
69d3150cf
|
284 |
goto err_drop_spawn; |
b2b39c2f9
|
285 286 287 |
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "rfc3686(%s)", alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) |
69d3150cf
|
288 |
goto err_drop_spawn; |
5311f248b
|
289 |
|
b2b39c2f9
|
290 291 292 |
inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; |
5311f248b
|
293 |
|
b2b39c2f9
|
294 |
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; |
69d3150cf
|
295 |
|
b2b39c2f9
|
296 297 298 299 300 301 |
inst->alg.ivsize = CTR_RFC3686_IV_SIZE; inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) + CTR_RFC3686_NONCE_SIZE; inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) + CTR_RFC3686_NONCE_SIZE; |
5311f248b
|
302 |
|
b2b39c2f9
|
303 304 305 |
inst->alg.setkey = crypto_rfc3686_setkey; inst->alg.encrypt = crypto_rfc3686_crypt; inst->alg.decrypt = crypto_rfc3686_crypt; |
69d3150cf
|
306 |
|
b2b39c2f9
|
307 |
inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc3686_ctx); |
0a270321d
|
308 |
|
b2b39c2f9
|
309 310 |
inst->alg.init = crypto_rfc3686_init_tfm; inst->alg.exit = crypto_rfc3686_exit_tfm; |
5311f248b
|
311 |
|
b2b39c2f9
|
312 |
inst->free = crypto_rfc3686_free; |
5311f248b
|
313 |
|
b2b39c2f9
|
314 315 316 317 318 319 |
err = skcipher_register_instance(tmpl, inst); if (err) goto err_drop_spawn; out: return err; |
5311f248b
|
320 |
|
69d3150cf
|
321 322 323 324 |
err_drop_spawn: crypto_drop_skcipher(spawn); err_free_inst: kfree(inst); |
b2b39c2f9
|
325 |
goto out; |
5311f248b
|
326 |
} |
9f8ef365e
|
327 328 329 330 331 332 333 334 335 336 |
static struct crypto_template crypto_ctr_tmpls[] = { { .name = "ctr", .create = crypto_ctr_create, .module = THIS_MODULE, }, { .name = "rfc3686", .create = crypto_rfc3686_create, .module = THIS_MODULE, }, |
5311f248b
|
337 |
}; |
23e353c8a
|
338 339 |
static int __init crypto_ctr_module_init(void) { |
9f8ef365e
|
340 341 |
return crypto_register_templates(crypto_ctr_tmpls, ARRAY_SIZE(crypto_ctr_tmpls)); |
23e353c8a
|
342 343 344 345 |
} static void __exit crypto_ctr_module_exit(void) { |
9f8ef365e
|
346 347 |
crypto_unregister_templates(crypto_ctr_tmpls, ARRAY_SIZE(crypto_ctr_tmpls)); |
23e353c8a
|
348 |
} |
c4741b230
|
349 |
subsys_initcall(crypto_ctr_module_init); |
23e353c8a
|
350 351 352 |
module_exit(crypto_ctr_module_exit); MODULE_LICENSE("GPL"); |
11f14630c
|
353 |
MODULE_DESCRIPTION("CTR block cipher mode of operation"); |
5d26a105b
|
354 |
MODULE_ALIAS_CRYPTO("rfc3686"); |
4943ba16b
|
355 |
MODULE_ALIAS_CRYPTO("ctr"); |