Blame view
crypto/xts.c
11.7 KB
2874c5fd2
|
1 |
// SPDX-License-Identifier: GPL-2.0-or-later |
f19f5111c
|
2 3 |
/* XTS: as defined in IEEE1619/D16 * http://grouper.ieee.org/groups/1619/email/pdf00086.pdf |
f19f5111c
|
4 5 6 |
* * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org> * |
ddbc73616
|
7 |
* Based on ecb.c |
f19f5111c
|
8 |
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> |
f19f5111c
|
9 |
*/ |
f1c131b45
|
10 11 |
#include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> |
f19f5111c
|
12 13 14 15 16 17 |
#include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/scatterlist.h> #include <linux/slab.h> |
ce0045561
|
18 |
#include <crypto/xts.h> |
f19f5111c
|
19 20 |
#include <crypto/b128ops.h> #include <crypto/gf128mul.h> |
a874f5910
|
21 |
struct xts_tfm_ctx { |
f1c131b45
|
22 |
struct crypto_skcipher *child; |
f19f5111c
|
23 24 |
struct crypto_cipher *tweak; }; |
f1c131b45
|
25 26 27 28 |
struct xts_instance_ctx { struct crypto_skcipher_spawn spawn; char name[CRYPTO_MAX_ALG_NAME]; }; |
a874f5910
|
29 |
struct xts_request_ctx { |
e55318c84
|
30 |
le128 t; |
8083b1bf8
|
31 32 |
struct scatterlist *tail; struct scatterlist sg[2]; |
f1c131b45
|
33 34 |
struct skcipher_request subreq; }; |
a874f5910
|
35 36 |
static int xts_setkey(struct crypto_skcipher *parent, const u8 *key, unsigned int keylen) |
f19f5111c
|
37 |
{ |
a874f5910
|
38 |
struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(parent); |
f1c131b45
|
39 40 |
struct crypto_skcipher *child; struct crypto_cipher *tweak; |
f19f5111c
|
41 |
int err; |
f1c131b45
|
42 |
err = xts_verify_key(parent, key, keylen); |
28856a9e5
|
43 44 |
if (err) return err; |
f19f5111c
|
45 |
|
f1c131b45
|
46 |
keylen /= 2; |
25985edce
|
47 |
/* we need two cipher instances: one to compute the initial 'tweak' |
f19f5111c
|
48 49 50 51 |
* by encrypting the IV (usually the 'plain' iv) and the other * one to encrypt and decrypt the data */ /* tweak cipher, uses Key2 i.e. the second half of *key */ |
f1c131b45
|
52 53 54 |
tweak = ctx->tweak; crypto_cipher_clear_flags(tweak, CRYPTO_TFM_REQ_MASK); crypto_cipher_set_flags(tweak, crypto_skcipher_get_flags(parent) & |
f19f5111c
|
55 |
CRYPTO_TFM_REQ_MASK); |
f1c131b45
|
56 |
err = crypto_cipher_setkey(tweak, key + keylen, keylen); |
f19f5111c
|
57 58 |
if (err) return err; |
f1c131b45
|
59 |
/* data cipher, uses Key1 i.e. the first half of *key */ |
f19f5111c
|
60 |
child = ctx->child; |
f1c131b45
|
61 62 63 |
crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & CRYPTO_TFM_REQ_MASK); |
af5034e8e
|
64 |
return crypto_skcipher_setkey(child, key, keylen); |
f1c131b45
|
65 |
} |
f19f5111c
|
66 |
|
78105c7e7
|
67 68 69 70 71 72 |
/* * We compute the tweak masks twice (both before and after the ECB encryption or * decryption) to avoid having to allocate a temporary buffer and/or make * mutliple calls to the 'ecb(..)' instance, which usually would be slower than * just doing the gf128mul_x_ble() calls again. */ |
a874f5910
|
73 74 |
static int xts_xor_tweak(struct skcipher_request *req, bool second_pass, bool enc) |
f1c131b45
|
75 |
{ |
a874f5910
|
76 |
struct xts_request_ctx *rctx = skcipher_request_ctx(req); |
78105c7e7
|
77 |
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
8083b1bf8
|
78 |
const bool cts = (req->cryptlen % XTS_BLOCK_SIZE); |
f1c131b45
|
79 80 |
const int bs = XTS_BLOCK_SIZE; struct skcipher_walk w; |
78105c7e7
|
81 |
le128 t = rctx->t; |
f1c131b45
|
82 |
int err; |
f19f5111c
|
83 |
|
78105c7e7
|
84 85 86 87 |
if (second_pass) { req = &rctx->subreq; /* set to our TFM to enforce correct alignment: */ skcipher_request_set_tfm(req, tfm); |
f1c131b45
|
88 |
} |
78105c7e7
|
89 |
err = skcipher_walk_virt(&w, req, false); |
f19f5111c
|
90 |
|
f1c131b45
|
91 92 |
while (w.nbytes) { unsigned int avail = w.nbytes; |
e55318c84
|
93 94 |
le128 *wsrc; le128 *wdst; |
f19f5111c
|
95 |
|
f1c131b45
|
96 97 |
wsrc = w.src.virt.addr; wdst = w.dst.virt.addr; |
f19f5111c
|
98 |
|
f1c131b45
|
99 |
do { |
8083b1bf8
|
100 101 102 103 104 105 106 107 108 109 110 111 112 |
if (unlikely(cts) && w.total - w.nbytes + avail < 2 * XTS_BLOCK_SIZE) { if (!enc) { if (second_pass) rctx->t = t; gf128mul_x_ble(&t, &t); } le128_xor(wdst, &t, wsrc); if (enc && second_pass) gf128mul_x_ble(&rctx->t, &t); skcipher_walk_done(&w, avail - bs); return 0; } |
78105c7e7
|
113 114 |
le128_xor(wdst++, &t, wsrc++); gf128mul_x_ble(&t, &t); |
f19f5111c
|
115 |
} while ((avail -= bs) >= bs); |
f1c131b45
|
116 117 |
err = skcipher_walk_done(&w, avail); } |
f19f5111c
|
118 119 |
return err; } |
a874f5910
|
120 |
static int xts_xor_tweak_pre(struct skcipher_request *req, bool enc) |
8083b1bf8
|
121 |
{ |
a874f5910
|
122 |
return xts_xor_tweak(req, false, enc); |
8083b1bf8
|
123 |
} |
a874f5910
|
124 |
static int xts_xor_tweak_post(struct skcipher_request *req, bool enc) |
8083b1bf8
|
125 |
{ |
a874f5910
|
126 |
return xts_xor_tweak(req, true, enc); |
8083b1bf8
|
127 |
} |
a874f5910
|
128 |
static void xts_cts_done(struct crypto_async_request *areq, int err) |
f19f5111c
|
129 |
{ |
8083b1bf8
|
130 131 132 133 |
struct skcipher_request *req = areq->data; le128 b; if (!err) { |
a874f5910
|
134 |
struct xts_request_ctx *rctx = skcipher_request_ctx(req); |
8083b1bf8
|
135 136 137 138 139 140 141 142 |
scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 0); le128_xor(&b, &rctx->t, &b); scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 1); } skcipher_request_complete(req, err); } |
a874f5910
|
143 144 |
static int xts_cts_final(struct skcipher_request *req, int (*crypt)(struct skcipher_request *req)) |
8083b1bf8
|
145 |
{ |
a874f5910
|
146 147 |
const struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); |
8083b1bf8
|
148 |
int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1); |
a874f5910
|
149 |
struct xts_request_ctx *rctx = skcipher_request_ctx(req); |
8083b1bf8
|
150 151 152 153 154 155 156 157 158 |
struct skcipher_request *subreq = &rctx->subreq; int tail = req->cryptlen % XTS_BLOCK_SIZE; le128 b[2]; int err; rctx->tail = scatterwalk_ffwd(rctx->sg, req->dst, offset - XTS_BLOCK_SIZE); scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0); |
958ea4e0d
|
159 |
b[1] = b[0]; |
8083b1bf8
|
160 161 162 163 164 165 166 |
scatterwalk_map_and_copy(b, req->src, offset, tail, 0); le128_xor(b, &rctx->t, b); scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE + tail, 1); skcipher_request_set_tfm(subreq, ctx->child); |
a874f5910
|
167 168 |
skcipher_request_set_callback(subreq, req->base.flags, xts_cts_done, req); |
8083b1bf8
|
169 170 171 172 173 174 175 176 177 178 179 180 |
skcipher_request_set_crypt(subreq, rctx->tail, rctx->tail, XTS_BLOCK_SIZE, NULL); err = crypt(subreq); if (err) return err; scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0); le128_xor(b, &rctx->t, b); scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 1); return 0; |
f1c131b45
|
181 |
} |
a874f5910
|
182 |
static void xts_encrypt_done(struct crypto_async_request *areq, int err) |
f1c131b45
|
183 |
{ |
8083b1bf8
|
184 185 186 |
struct skcipher_request *req = areq->data; if (!err) { |
a874f5910
|
187 |
struct xts_request_ctx *rctx = skcipher_request_ctx(req); |
8083b1bf8
|
188 189 |
rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; |
a874f5910
|
190 |
err = xts_xor_tweak_post(req, true); |
8083b1bf8
|
191 192 |
if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) { |
a874f5910
|
193 |
err = xts_cts_final(req, crypto_skcipher_encrypt); |
8083b1bf8
|
194 195 196 197 198 199 |
if (err == -EINPROGRESS) return; } } skcipher_request_complete(req, err); |
f1c131b45
|
200 |
} |
a874f5910
|
201 |
static void xts_decrypt_done(struct crypto_async_request *areq, int err) |
f1c131b45
|
202 203 |
{ struct skcipher_request *req = areq->data; |
aa4a829bd
|
204 |
|
44427c0fb
|
205 |
if (!err) { |
a874f5910
|
206 |
struct xts_request_ctx *rctx = skcipher_request_ctx(req); |
44427c0fb
|
207 208 |
rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; |
a874f5910
|
209 |
err = xts_xor_tweak_post(req, false); |
8083b1bf8
|
210 211 |
if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) { |
a874f5910
|
212 |
err = xts_cts_final(req, crypto_skcipher_decrypt); |
8083b1bf8
|
213 214 215 |
if (err == -EINPROGRESS) return; } |
44427c0fb
|
216 |
} |
f1c131b45
|
217 |
|
f1c131b45
|
218 219 |
skcipher_request_complete(req, err); } |
a874f5910
|
220 221 |
static int xts_init_crypt(struct skcipher_request *req, crypto_completion_t compl) |
f1c131b45
|
222 |
{ |
a874f5910
|
223 224 225 |
const struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); struct xts_request_ctx *rctx = skcipher_request_ctx(req); |
78105c7e7
|
226 |
struct skcipher_request *subreq = &rctx->subreq; |
f1c131b45
|
227 |
|
8083b1bf8
|
228 229 |
if (req->cryptlen < XTS_BLOCK_SIZE) return -EINVAL; |
78105c7e7
|
230 |
skcipher_request_set_tfm(subreq, ctx->child); |
8083b1bf8
|
231 |
skcipher_request_set_callback(subreq, req->base.flags, compl, req); |
78105c7e7
|
232 |
skcipher_request_set_crypt(subreq, req->dst, req->dst, |
8083b1bf8
|
233 |
req->cryptlen & ~(XTS_BLOCK_SIZE - 1), NULL); |
f1c131b45
|
234 |
|
78105c7e7
|
235 236 |
/* calculate first value of T */ crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv); |
8083b1bf8
|
237 238 |
return 0; |
f1c131b45
|
239 |
} |
a874f5910
|
240 |
static int xts_encrypt(struct skcipher_request *req) |
f1c131b45
|
241 |
{ |
a874f5910
|
242 |
struct xts_request_ctx *rctx = skcipher_request_ctx(req); |
78105c7e7
|
243 |
struct skcipher_request *subreq = &rctx->subreq; |
8083b1bf8
|
244 |
int err; |
a874f5910
|
245 246 |
err = xts_init_crypt(req, xts_encrypt_done) ?: xts_xor_tweak_pre(req, true) ?: |
8083b1bf8
|
247 |
crypto_skcipher_encrypt(subreq) ?: |
a874f5910
|
248 |
xts_xor_tweak_post(req, true); |
f1c131b45
|
249 |
|
8083b1bf8
|
250 251 |
if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0)) return err; |
a874f5910
|
252 |
return xts_cts_final(req, crypto_skcipher_encrypt); |
f1c131b45
|
253 |
} |
a874f5910
|
254 |
static int xts_decrypt(struct skcipher_request *req) |
f1c131b45
|
255 |
{ |
a874f5910
|
256 |
struct xts_request_ctx *rctx = skcipher_request_ctx(req); |
78105c7e7
|
257 |
struct skcipher_request *subreq = &rctx->subreq; |
8083b1bf8
|
258 |
int err; |
a874f5910
|
259 260 |
err = xts_init_crypt(req, xts_decrypt_done) ?: xts_xor_tweak_pre(req, false) ?: |
8083b1bf8
|
261 |
crypto_skcipher_decrypt(subreq) ?: |
a874f5910
|
262 |
xts_xor_tweak_post(req, false); |
8083b1bf8
|
263 264 265 |
if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0)) return err; |
78105c7e7
|
266 |
|
a874f5910
|
267 |
return xts_cts_final(req, crypto_skcipher_decrypt); |
f19f5111c
|
268 |
} |
a874f5910
|
269 |
static int xts_init_tfm(struct crypto_skcipher *tfm) |
f19f5111c
|
270 |
{ |
f1c131b45
|
271 272 |
struct skcipher_instance *inst = skcipher_alg_instance(tfm); struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst); |
a874f5910
|
273 |
struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); |
f1c131b45
|
274 275 |
struct crypto_skcipher *child; struct crypto_cipher *tweak; |
f19f5111c
|
276 |
|
f1c131b45
|
277 278 279 |
child = crypto_spawn_skcipher(&ictx->spawn); if (IS_ERR(child)) return PTR_ERR(child); |
f19f5111c
|
280 |
|
f1c131b45
|
281 |
ctx->child = child; |
f19f5111c
|
282 |
|
f1c131b45
|
283 284 285 286 |
tweak = crypto_alloc_cipher(ictx->name, 0, 0); if (IS_ERR(tweak)) { crypto_free_skcipher(ctx->child); return PTR_ERR(tweak); |
f19f5111c
|
287 |
} |
f1c131b45
|
288 289 290 |
ctx->tweak = tweak; crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) + |
a874f5910
|
291 |
sizeof(struct xts_request_ctx)); |
f19f5111c
|
292 293 294 |
return 0; } |
a874f5910
|
295 |
static void xts_exit_tfm(struct crypto_skcipher *tfm) |
f19f5111c
|
296 |
{ |
a874f5910
|
297 |
struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); |
f1c131b45
|
298 299 |
crypto_free_skcipher(ctx->child); |
f19f5111c
|
300 301 |
crypto_free_cipher(ctx->tweak); } |
a874f5910
|
302 |
static void xts_free_instance(struct skcipher_instance *inst) |
f1c131b45
|
303 |
{ |
a874f5910
|
304 305 306 |
struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst); crypto_drop_skcipher(&ictx->spawn); |
f1c131b45
|
307 308 |
kfree(inst); } |
a874f5910
|
309 |
static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) |
f19f5111c
|
310 |
{ |
f1c131b45
|
311 |
struct skcipher_instance *inst; |
f1c131b45
|
312 313 314 |
struct xts_instance_ctx *ctx; struct skcipher_alg *alg; const char *cipher_name; |
89027579b
|
315 |
u32 mask; |
f19f5111c
|
316 |
int err; |
7bcb2c99f
|
317 318 319 |
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); if (err) return err; |
f1c131b45
|
320 321 322 323 324 325 326 327 328 329 |
cipher_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(cipher_name)) return PTR_ERR(cipher_name); inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) return -ENOMEM; ctx = skcipher_instance_ctx(inst); |
b9f76dddb
|
330 331 |
err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst), cipher_name, 0, mask); |
f1c131b45
|
332 333 334 335 336 |
if (err == -ENOENT) { err = -ENAMETOOLONG; if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", cipher_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; |
b9f76dddb
|
337 338 339 |
err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst), ctx->name, 0, mask); |
f1c131b45
|
340 |
} |
f19f5111c
|
341 |
if (err) |
f1c131b45
|
342 |
goto err_free_inst; |
f19f5111c
|
343 |
|
f1c131b45
|
344 |
alg = crypto_skcipher_spawn_alg(&ctx->spawn); |
f19f5111c
|
345 |
|
f1c131b45
|
346 347 |
err = -EINVAL; if (alg->base.cra_blocksize != XTS_BLOCK_SIZE) |
732e54095
|
348 |
goto err_free_inst; |
f19f5111c
|
349 |
|
f1c131b45
|
350 |
if (crypto_skcipher_alg_ivsize(alg)) |
732e54095
|
351 |
goto err_free_inst; |
f19f5111c
|
352 |
|
f1c131b45
|
353 354 355 |
err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts", &alg->base); if (err) |
732e54095
|
356 |
goto err_free_inst; |
f19f5111c
|
357 |
|
f1c131b45
|
358 359 |
err = -EINVAL; cipher_name = alg->base.cra_name; |
f19f5111c
|
360 |
|
f1c131b45
|
361 362 363 364 365 |
/* Alas we screwed up the naming so we have to mangle the * cipher name. */ if (!strncmp(cipher_name, "ecb(", 4)) { unsigned len; |
f19f5111c
|
366 |
|
f1c131b45
|
367 368 |
len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name)); if (len < 2 || len >= sizeof(ctx->name)) |
732e54095
|
369 |
goto err_free_inst; |
f19f5111c
|
370 |
|
f1c131b45
|
371 |
if (ctx->name[len - 1] != ')') |
732e54095
|
372 |
goto err_free_inst; |
f19f5111c
|
373 |
|
f1c131b45
|
374 |
ctx->name[len - 1] = 0; |
f19f5111c
|
375 |
|
f1c131b45
|
376 |
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, |
5125e4e86
|
377 378 |
"xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) { err = -ENAMETOOLONG; |
732e54095
|
379 |
goto err_free_inst; |
5125e4e86
|
380 |
} |
f1c131b45
|
381 |
} else |
732e54095
|
382 |
goto err_free_inst; |
f19f5111c
|
383 |
|
f1c131b45
|
384 385 386 387 388 389 390 391 |
inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE; inst->alg.base.cra_alignmask = alg->base.cra_alignmask | (__alignof__(u64) - 1); inst->alg.ivsize = XTS_BLOCK_SIZE; inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2; inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2; |
a874f5910
|
392 |
inst->alg.base.cra_ctxsize = sizeof(struct xts_tfm_ctx); |
f1c131b45
|
393 |
|
a874f5910
|
394 395 |
inst->alg.init = xts_init_tfm; inst->alg.exit = xts_exit_tfm; |
f1c131b45
|
396 |
|
a874f5910
|
397 398 399 |
inst->alg.setkey = xts_setkey; inst->alg.encrypt = xts_encrypt; inst->alg.decrypt = xts_decrypt; |
f1c131b45
|
400 |
|
a874f5910
|
401 |
inst->free = xts_free_instance; |
f1c131b45
|
402 403 |
err = skcipher_register_instance(tmpl, inst); |
732e54095
|
404 |
if (err) { |
f1c131b45
|
405 |
err_free_inst: |
a874f5910
|
406 |
xts_free_instance(inst); |
732e54095
|
407 408 |
} return err; |
f19f5111c
|
409 |
} |
a874f5910
|
410 |
static struct crypto_template xts_tmpl = { |
f19f5111c
|
411 |
.name = "xts", |
a874f5910
|
412 |
.create = xts_create, |
f19f5111c
|
413 414 |
.module = THIS_MODULE, }; |
a874f5910
|
415 |
static int __init xts_module_init(void) |
f19f5111c
|
416 |
{ |
a874f5910
|
417 |
return crypto_register_template(&xts_tmpl); |
f19f5111c
|
418 |
} |
a874f5910
|
419 |
static void __exit xts_module_exit(void) |
f19f5111c
|
420 |
{ |
a874f5910
|
421 |
crypto_unregister_template(&xts_tmpl); |
f19f5111c
|
422 |
} |
a874f5910
|
423 424 |
subsys_initcall(xts_module_init); module_exit(xts_module_exit); |
f19f5111c
|
425 426 427 |
MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("XTS block cipher mode"); |
4943ba16b
|
428 |
MODULE_ALIAS_CRYPTO("xts"); |