Blame view
block/blk-crypto-fallback.c
18.3 KB
cfd7e6c13 FROMLIST: Update ... |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 |
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2019 Google LLC */ /* * Refer to Documentation/block/inline-encryption.rst for detailed explanation. */ #define pr_fmt(fmt) "blk-crypto-fallback: " fmt #include <crypto/skcipher.h> #include <linux/blk-cgroup.h> #include <linux/blk-crypto.h> |
c2b86b727 FROMLIST: Update ... |
15 |
#include <linux/blkdev.h> |
cfd7e6c13 FROMLIST: Update ... |
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
#include <linux/crypto.h> #include <linux/keyslot-manager.h> #include <linux/mempool.h> #include <linux/module.h> #include <linux/random.h> #include "blk-crypto-internal.h" static unsigned int num_prealloc_bounce_pg = 32; module_param(num_prealloc_bounce_pg, uint, 0); MODULE_PARM_DESC(num_prealloc_bounce_pg, "Number of preallocated bounce pages for the blk-crypto crypto API fallback"); static unsigned int blk_crypto_num_keyslots = 100; module_param_named(num_keyslots, blk_crypto_num_keyslots, uint, 0); MODULE_PARM_DESC(num_keyslots, "Number of keyslots for the blk-crypto crypto API fallback"); static unsigned int num_prealloc_fallback_crypt_ctxs = 128; module_param(num_prealloc_fallback_crypt_ctxs, uint, 0); MODULE_PARM_DESC(num_prealloc_crypt_fallback_ctxs, "Number of preallocated bio fallback crypto contexts for blk-crypto to use during crypto API fallback"); struct bio_fallback_crypt_ctx { struct bio_crypt_ctx crypt_ctx; /* * Copy of the bvec_iter when this bio was submitted. * We only want to en/decrypt the part of the bio as described by the * bvec_iter upon submission because bio might be split before being * resubmitted */ struct bvec_iter crypt_iter; |
c2b86b727 FROMLIST: Update ... |
48 49 50 51 52 53 54 55 56 57 |
union { struct { struct work_struct work; struct bio *bio; }; struct { void *bi_private_orig; bio_end_io_t *bi_end_io_orig; }; }; |
cfd7e6c13 FROMLIST: Update ... |
58 |
}; |
cfd7e6c13 FROMLIST: Update ... |
59 60 61 62 63 64 65 66 67 68 69 70 71 72 |
static struct kmem_cache *bio_fallback_crypt_ctx_cache; static mempool_t *bio_fallback_crypt_ctx_pool; /* * Allocating a crypto tfm during I/O can deadlock, so we have to preallocate * all of a mode's tfms when that mode starts being used. Since each mode may * need all the keyslots at some point, each mode needs its own tfm for each * keyslot; thus, a keyslot may contain tfms for multiple modes. However, to * match the behavior of real inline encryption hardware (which only supports a * single encryption context per keyslot), we only allow one tfm per keyslot to * be used at a time - the rest of the unused tfms have their keys cleared. */ static DEFINE_MUTEX(tfms_init_lock); static bool tfms_inited[BLK_ENCRYPTION_MODE_MAX]; |
cfd7e6c13 FROMLIST: Update ... |
73 |
static struct blk_crypto_keyslot { |
cfd7e6c13 FROMLIST: Update ... |
74 75 76 |
enum blk_crypto_mode_num crypto_mode; struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX]; } *blk_crypto_keyslots; |
c2b86b727 FROMLIST: Update ... |
77 |
static struct blk_keyslot_manager blk_crypto_ksm; |
cfd7e6c13 FROMLIST: Update ... |
78 79 |
static struct workqueue_struct *blk_crypto_wq; static mempool_t *blk_crypto_bounce_page_pool; |
cfd7e6c13 FROMLIST: Update ... |
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 |
/* * This is the key we set when evicting a keyslot. This *should* be the all 0's * key, but AES-XTS rejects that key, so we use some random bytes instead. */ static u8 blank_key[BLK_CRYPTO_MAX_KEY_SIZE]; static void blk_crypto_evict_keyslot(unsigned int slot) { struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot]; enum blk_crypto_mode_num crypto_mode = slotp->crypto_mode; int err; WARN_ON(slotp->crypto_mode == BLK_ENCRYPTION_MODE_INVALID); /* Clear the key in the skcipher */ err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], blank_key, blk_crypto_modes[crypto_mode].keysize); WARN_ON(err); slotp->crypto_mode = BLK_ENCRYPTION_MODE_INVALID; } |
c2b86b727 FROMLIST: Update ... |
101 |
static int blk_crypto_keyslot_program(struct blk_keyslot_manager *ksm, |
cfd7e6c13 FROMLIST: Update ... |
102 103 104 105 |
const struct blk_crypto_key *key, unsigned int slot) { struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot]; |
c2b86b727 FROMLIST: Update ... |
106 107 |
const enum blk_crypto_mode_num crypto_mode = key->crypto_cfg.crypto_mode; |
cfd7e6c13 FROMLIST: Update ... |
108 109 110 |
int err; if (crypto_mode != slotp->crypto_mode && |
c2b86b727 FROMLIST: Update ... |
111 |
slotp->crypto_mode != BLK_ENCRYPTION_MODE_INVALID) |
cfd7e6c13 FROMLIST: Update ... |
112 |
blk_crypto_evict_keyslot(slot); |
cfd7e6c13 FROMLIST: Update ... |
113 |
|
cfd7e6c13 FROMLIST: Update ... |
114 115 116 117 118 119 120 121 122 |
slotp->crypto_mode = crypto_mode; err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], key->raw, key->size); if (err) { blk_crypto_evict_keyslot(slot); return err; } return 0; } |
c2b86b727 FROMLIST: Update ... |
123 |
static int blk_crypto_keyslot_evict(struct blk_keyslot_manager *ksm, |
cfd7e6c13 FROMLIST: Update ... |
124 125 126 127 128 129 130 131 132 |
const struct blk_crypto_key *key, unsigned int slot) { blk_crypto_evict_keyslot(slot); return 0; } /* * The crypto API fallback KSM ops - only used for a bio when it specifies a |
c2b86b727 FROMLIST: Update ... |
133 134 |
* blk_crypto_key that was not supported by the device's inline encryption * hardware. |
cfd7e6c13 FROMLIST: Update ... |
135 |
*/ |
c2b86b727 FROMLIST: Update ... |
136 |
static const struct blk_ksm_ll_ops blk_crypto_ksm_ll_ops = { |
cfd7e6c13 FROMLIST: Update ... |
137 138 139 |
.keyslot_program = blk_crypto_keyslot_program, .keyslot_evict = blk_crypto_keyslot_evict, }; |
c2b86b727 FROMLIST: Update ... |
140 |
static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio) |
cfd7e6c13 FROMLIST: Update ... |
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 |
{ struct bio *src_bio = enc_bio->bi_private; int i; for (i = 0; i < enc_bio->bi_vcnt; i++) mempool_free(enc_bio->bi_io_vec[i].bv_page, blk_crypto_bounce_page_pool); src_bio->bi_status = enc_bio->bi_status; bio_put(enc_bio); bio_endio(src_bio); } static struct bio *blk_crypto_clone_bio(struct bio *bio_src) { struct bvec_iter iter; struct bio_vec bv; struct bio *bio; bio = bio_alloc_bioset(GFP_NOIO, bio_segments(bio_src), NULL); if (!bio) return NULL; bio->bi_disk = bio_src->bi_disk; bio->bi_opf = bio_src->bi_opf; bio->bi_ioprio = bio_src->bi_ioprio; bio->bi_write_hint = bio_src->bi_write_hint; bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; bio_for_each_segment(bv, bio_src, iter) bio->bi_io_vec[bio->bi_vcnt++] = bv; |
cfd7e6c13 FROMLIST: Update ... |
173 174 |
bio_clone_blkg_association(bio, bio_src); blkcg_bio_issue_init(bio); |
cb39ec0c1 ANDROID: dm: add ... |
175 |
bio_clone_skip_dm_default_key(bio, bio_src); |
cfd7e6c13 FROMLIST: Update ... |
176 177 |
return bio; } |
c2b86b727 FROMLIST: Update ... |
178 179 180 |
static bool blk_crypto_alloc_cipher_req(struct blk_ksm_keyslot *slot, struct skcipher_request **ciph_req_ret, struct crypto_wait *wait) |
cfd7e6c13 FROMLIST: Update ... |
181 182 183 |
{ struct skcipher_request *ciph_req; const struct blk_crypto_keyslot *slotp; |
c2b86b727 FROMLIST: Update ... |
184 |
int keyslot_idx = blk_ksm_get_slot_idx(slot); |
cfd7e6c13 FROMLIST: Update ... |
185 |
|
c2b86b727 FROMLIST: Update ... |
186 |
slotp = &blk_crypto_keyslots[keyslot_idx]; |
cfd7e6c13 FROMLIST: Update ... |
187 188 |
ciph_req = skcipher_request_alloc(slotp->tfms[slotp->crypto_mode], GFP_NOIO); |
c2b86b727 FROMLIST: Update ... |
189 190 |
if (!ciph_req) return false; |
cfd7e6c13 FROMLIST: Update ... |
191 192 193 194 195 196 |
skcipher_request_set_callback(ciph_req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done, wait); *ciph_req_ret = ciph_req; |
c2b86b727 FROMLIST: Update ... |
197 198 |
return true; |
cfd7e6c13 FROMLIST: Update ... |
199 |
} |
c2b86b727 FROMLIST: Update ... |
200 |
static bool blk_crypto_split_bio_if_needed(struct bio **bio_ptr) |
cfd7e6c13 FROMLIST: Update ... |
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 |
{ struct bio *bio = *bio_ptr; unsigned int i = 0; unsigned int num_sectors = 0; struct bio_vec bv; struct bvec_iter iter; bio_for_each_segment(bv, bio, iter) { num_sectors += bv.bv_len >> SECTOR_SHIFT; if (++i == BIO_MAX_PAGES) break; } if (num_sectors < bio_sectors(bio)) { struct bio *split_bio; split_bio = bio_split(bio, num_sectors, GFP_NOIO, NULL); if (!split_bio) { bio->bi_status = BLK_STS_RESOURCE; |
c2b86b727 FROMLIST: Update ... |
219 |
return false; |
cfd7e6c13 FROMLIST: Update ... |
220 221 |
} bio_chain(split_bio, bio); |
ed00aabd5 block: rename gen... |
222 |
submit_bio_noacct(bio); |
cfd7e6c13 FROMLIST: Update ... |
223 224 |
*bio_ptr = split_bio; } |
c2b86b727 FROMLIST: Update ... |
225 226 |
return true; |
cfd7e6c13 FROMLIST: Update ... |
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 |
} union blk_crypto_iv { __le64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; u8 bytes[BLK_CRYPTO_MAX_IV_SIZE]; }; static void blk_crypto_dun_to_iv(const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], union blk_crypto_iv *iv) { int i; for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) iv->dun[i] = cpu_to_le64(dun[i]); } /* * The crypto API fallback's encryption routine. * Allocate a bounce bio for encryption, encrypt the input bio using crypto API, * and replace *bio_ptr with the bounce bio. May split input bio if it's too |
c2b86b727 FROMLIST: Update ... |
247 248 |
* large. Returns true on success. Returns false and sets bio->bi_status on * error. |
cfd7e6c13 FROMLIST: Update ... |
249 |
*/ |
c2b86b727 FROMLIST: Update ... |
250 |
static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr) |
cfd7e6c13 FROMLIST: Update ... |
251 |
{ |
c2b86b727 FROMLIST: Update ... |
252 253 254 255 |
struct bio *src_bio, *enc_bio; struct bio_crypt_ctx *bc; struct blk_ksm_keyslot *slot; int data_unit_size; |
cfd7e6c13 FROMLIST: Update ... |
256 257 258 |
struct skcipher_request *ciph_req = NULL; DECLARE_CRYPTO_WAIT(wait); u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; |
cfd7e6c13 FROMLIST: Update ... |
259 |
struct scatterlist src, dst; |
c2b86b727 FROMLIST: Update ... |
260 |
union blk_crypto_iv iv; |
cfd7e6c13 FROMLIST: Update ... |
261 |
unsigned int i, j; |
c2b86b727 FROMLIST: Update ... |
262 263 |
bool ret = false; blk_status_t blk_st; |
cfd7e6c13 FROMLIST: Update ... |
264 265 |
/* Split the bio if it's too big for single page bvec */ |
c2b86b727 FROMLIST: Update ... |
266 267 |
if (!blk_crypto_split_bio_if_needed(bio_ptr)) return false; |
cfd7e6c13 FROMLIST: Update ... |
268 269 270 |
src_bio = *bio_ptr; bc = src_bio->bi_crypt_context; |
c2b86b727 FROMLIST: Update ... |
271 |
data_unit_size = bc->bc_key->crypto_cfg.data_unit_size; |
cfd7e6c13 FROMLIST: Update ... |
272 273 274 275 276 |
/* Allocate bounce bio for encryption */ enc_bio = blk_crypto_clone_bio(src_bio); if (!enc_bio) { src_bio->bi_status = BLK_STS_RESOURCE; |
c2b86b727 FROMLIST: Update ... |
277 |
return false; |
cfd7e6c13 FROMLIST: Update ... |
278 279 280 281 282 283 |
} /* * Use the crypto API fallback keyslot manager to get a crypto_skcipher * for the algorithm and key specified for this bio. */ |
c2b86b727 FROMLIST: Update ... |
284 285 286 |
blk_st = blk_ksm_get_slot_for_key(&blk_crypto_ksm, bc->bc_key, &slot); if (blk_st != BLK_STS_OK) { src_bio->bi_status = blk_st; |
cfd7e6c13 FROMLIST: Update ... |
287 288 289 290 |
goto out_put_enc_bio; } /* and then allocate an skcipher_request for it */ |
c2b86b727 FROMLIST: Update ... |
291 292 |
if (!blk_crypto_alloc_cipher_req(slot, &ciph_req, &wait)) { src_bio->bi_status = BLK_STS_RESOURCE; |
cfd7e6c13 FROMLIST: Update ... |
293 |
goto out_release_keyslot; |
c2b86b727 FROMLIST: Update ... |
294 |
} |
cfd7e6c13 FROMLIST: Update ... |
295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 |
memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun)); sg_init_table(&src, 1); sg_init_table(&dst, 1); skcipher_request_set_crypt(ciph_req, &src, &dst, data_unit_size, iv.bytes); /* Encrypt each page in the bounce bio */ for (i = 0; i < enc_bio->bi_vcnt; i++) { struct bio_vec *enc_bvec = &enc_bio->bi_io_vec[i]; struct page *plaintext_page = enc_bvec->bv_page; struct page *ciphertext_page = mempool_alloc(blk_crypto_bounce_page_pool, GFP_NOIO); enc_bvec->bv_page = ciphertext_page; if (!ciphertext_page) { src_bio->bi_status = BLK_STS_RESOURCE; |
cfd7e6c13 FROMLIST: Update ... |
314 315 316 317 318 319 320 321 322 323 324 |
goto out_free_bounce_pages; } sg_set_page(&src, plaintext_page, data_unit_size, enc_bvec->bv_offset); sg_set_page(&dst, ciphertext_page, data_unit_size, enc_bvec->bv_offset); /* Encrypt each data unit in this page */ for (j = 0; j < enc_bvec->bv_len; j += data_unit_size) { blk_crypto_dun_to_iv(curr_dun, &iv); |
c2b86b727 FROMLIST: Update ... |
325 326 |
if (crypto_wait_req(crypto_skcipher_encrypt(ciph_req), &wait)) { |
cfd7e6c13 FROMLIST: Update ... |
327 |
i++; |
c2b86b727 FROMLIST: Update ... |
328 |
src_bio->bi_status = BLK_STS_IOERR; |
cfd7e6c13 FROMLIST: Update ... |
329 330 331 332 333 334 335 336 337 |
goto out_free_bounce_pages; } bio_crypt_dun_increment(curr_dun, 1); src.offset += data_unit_size; dst.offset += data_unit_size; } } enc_bio->bi_private = src_bio; |
c2b86b727 FROMLIST: Update ... |
338 |
enc_bio->bi_end_io = blk_crypto_fallback_encrypt_endio; |
cfd7e6c13 FROMLIST: Update ... |
339 |
*bio_ptr = enc_bio; |
c2b86b727 FROMLIST: Update ... |
340 |
ret = true; |
cfd7e6c13 FROMLIST: Update ... |
341 342 |
enc_bio = NULL; |
cfd7e6c13 FROMLIST: Update ... |
343 344 345 346 347 348 349 350 351 |
goto out_free_ciph_req; out_free_bounce_pages: while (i > 0) mempool_free(enc_bio->bi_io_vec[--i].bv_page, blk_crypto_bounce_page_pool); out_free_ciph_req: skcipher_request_free(ciph_req); out_release_keyslot: |
c2b86b727 FROMLIST: Update ... |
352 |
blk_ksm_put_slot(slot); |
cfd7e6c13 FROMLIST: Update ... |
353 354 355 |
out_put_enc_bio: if (enc_bio) bio_put(enc_bio); |
c2b86b727 FROMLIST: Update ... |
356 |
return ret; |
cfd7e6c13 FROMLIST: Update ... |
357 358 359 360 |
} /* * The crypto API fallback's main decryption routine. |
c2b86b727 FROMLIST: Update ... |
361 |
* Decrypts input bio in place, and calls bio_endio on the bio. |
cfd7e6c13 FROMLIST: Update ... |
362 |
*/ |
c2b86b727 FROMLIST: Update ... |
363 |
static void blk_crypto_fallback_decrypt_bio(struct work_struct *work) |
cfd7e6c13 FROMLIST: Update ... |
364 |
{ |
c2b86b727 FROMLIST: Update ... |
365 366 367 368 369 |
struct bio_fallback_crypt_ctx *f_ctx = container_of(work, struct bio_fallback_crypt_ctx, work); struct bio *bio = f_ctx->bio; struct bio_crypt_ctx *bc = &f_ctx->crypt_ctx; struct blk_ksm_keyslot *slot; |
cfd7e6c13 FROMLIST: Update ... |
370 371 |
struct skcipher_request *ciph_req = NULL; DECLARE_CRYPTO_WAIT(wait); |
cfd7e6c13 FROMLIST: Update ... |
372 373 374 |
u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; union blk_crypto_iv iv; struct scatterlist sg; |
c2b86b727 FROMLIST: Update ... |
375 376 377 |
struct bio_vec bv; struct bvec_iter iter; const int data_unit_size = bc->bc_key->crypto_cfg.data_unit_size; |
cfd7e6c13 FROMLIST: Update ... |
378 |
unsigned int i; |
c2b86b727 FROMLIST: Update ... |
379 |
blk_status_t blk_st; |
cfd7e6c13 FROMLIST: Update ... |
380 381 382 383 384 |
/* * Use the crypto API fallback keyslot manager to get a crypto_skcipher * for the algorithm and key specified for this bio. */ |
c2b86b727 FROMLIST: Update ... |
385 386 387 |
blk_st = blk_ksm_get_slot_for_key(&blk_crypto_ksm, bc->bc_key, &slot); if (blk_st != BLK_STS_OK) { bio->bi_status = blk_st; |
cfd7e6c13 FROMLIST: Update ... |
388 389 390 391 |
goto out_no_keyslot; } /* and then allocate an skcipher_request for it */ |
c2b86b727 FROMLIST: Update ... |
392 393 |
if (!blk_crypto_alloc_cipher_req(slot, &ciph_req, &wait)) { bio->bi_status = BLK_STS_RESOURCE; |
cfd7e6c13 FROMLIST: Update ... |
394 |
goto out; |
c2b86b727 FROMLIST: Update ... |
395 |
} |
cfd7e6c13 FROMLIST: Update ... |
396 |
|
c2b86b727 FROMLIST: Update ... |
397 |
memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun)); |
cfd7e6c13 FROMLIST: Update ... |
398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 |
sg_init_table(&sg, 1); skcipher_request_set_crypt(ciph_req, &sg, &sg, data_unit_size, iv.bytes); /* Decrypt each segment in the bio */ __bio_for_each_segment(bv, bio, iter, f_ctx->crypt_iter) { struct page *page = bv.bv_page; sg_set_page(&sg, page, data_unit_size, bv.bv_offset); /* Decrypt each data unit in the segment */ for (i = 0; i < bv.bv_len; i += data_unit_size) { blk_crypto_dun_to_iv(curr_dun, &iv); if (crypto_wait_req(crypto_skcipher_decrypt(ciph_req), &wait)) { bio->bi_status = BLK_STS_IOERR; goto out; } bio_crypt_dun_increment(curr_dun, 1); sg.offset += data_unit_size; } } out: skcipher_request_free(ciph_req); |
c2b86b727 FROMLIST: Update ... |
423 |
blk_ksm_put_slot(slot); |
cfd7e6c13 FROMLIST: Update ... |
424 |
out_no_keyslot: |
c2b86b727 FROMLIST: Update ... |
425 |
mempool_free(f_ctx, bio_fallback_crypt_ctx_pool); |
cfd7e6c13 FROMLIST: Update ... |
426 427 |
bio_endio(bio); } |
c2b86b727 FROMLIST: Update ... |
428 429 430 431 432 433 434 |
/** * blk_crypto_fallback_decrypt_endio - queue bio for fallback decryption * * @bio: the bio to queue * * Restore bi_private and bi_end_io, and queue the bio for decryption into a * workqueue, since this function will be called from an atomic context. |
cfd7e6c13 FROMLIST: Update ... |
435 |
*/ |
c2b86b727 FROMLIST: Update ... |
436 |
static void blk_crypto_fallback_decrypt_endio(struct bio *bio) |
cfd7e6c13 FROMLIST: Update ... |
437 |
{ |
c2b86b727 FROMLIST: Update ... |
438 439 440 441 |
struct bio_fallback_crypt_ctx *f_ctx = bio->bi_private; bio->bi_private = f_ctx->bi_private_orig; bio->bi_end_io = f_ctx->bi_end_io_orig; |
cfd7e6c13 FROMLIST: Update ... |
442 443 |
/* If there was an IO error, don't queue for decrypt. */ |
c2b86b727 FROMLIST: Update ... |
444 445 446 447 448 |
if (bio->bi_status) { mempool_free(f_ctx, bio_fallback_crypt_ctx_pool); bio_endio(bio); return; } |
cfd7e6c13 FROMLIST: Update ... |
449 |
|
c2b86b727 FROMLIST: Update ... |
450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 |
INIT_WORK(&f_ctx->work, blk_crypto_fallback_decrypt_bio); f_ctx->bio = bio; queue_work(blk_crypto_wq, &f_ctx->work); } /** * blk_crypto_fallback_bio_prep - Prepare a bio to use fallback en/decryption * * @bio_ptr: pointer to the bio to prepare * * If bio is doing a WRITE operation, this splits the bio into two parts if it's * too big (see blk_crypto_split_bio_if_needed). It then allocates a bounce bio * for the first part, encrypts it, and update bio_ptr to point to the bounce * bio. * * For a READ operation, we mark the bio for decryption by using bi_private and * bi_end_io. * * In either case, this function will make the bio look like a regular bio (i.e. * as if no encryption context was ever specified) for the purposes of the rest * of the stack except for blk-integrity (blk-integrity and blk-crypto are not * currently supported together). * * Return: true on success. Sets bio->bi_status and returns false on error. */ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr) { struct bio *bio = *bio_ptr; struct bio_crypt_ctx *bc = bio->bi_crypt_context; struct bio_fallback_crypt_ctx *f_ctx; |
c2b86b727 FROMLIST: Update ... |
480 481 482 483 484 485 486 487 488 489 |
if (WARN_ON_ONCE(!tfms_inited[bc->bc_key->crypto_cfg.crypto_mode])) { /* User didn't call blk_crypto_start_using_key() first */ bio->bi_status = BLK_STS_IOERR; return false; } if (!blk_ksm_crypto_cfg_supported(&blk_crypto_ksm, &bc->bc_key->crypto_cfg)) { bio->bi_status = BLK_STS_NOTSUPP; return false; |
cfd7e6c13 FROMLIST: Update ... |
490 |
} |
c2b86b727 FROMLIST: Update ... |
491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 |
if (bio_data_dir(bio) == WRITE) return blk_crypto_fallback_encrypt_bio(bio_ptr); /* * bio READ case: Set up a f_ctx in the bio's bi_private and set the * bi_end_io appropriately to trigger decryption when the bio is ended. */ f_ctx = mempool_alloc(bio_fallback_crypt_ctx_pool, GFP_NOIO); f_ctx->crypt_ctx = *bc; f_ctx->crypt_iter = bio->bi_iter; f_ctx->bi_private_orig = bio->bi_private; f_ctx->bi_end_io_orig = bio->bi_end_io; bio->bi_private = (void *)f_ctx; bio->bi_end_io = blk_crypto_fallback_decrypt_endio; bio_crypt_free_ctx(bio); |
cfd7e6c13 FROMLIST: Update ... |
506 507 |
return true; |
c2b86b727 FROMLIST: Update ... |
508 509 510 511 512 513 514 515 516 517 518 |
} int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key) { return blk_ksm_evict_key(&blk_crypto_ksm, key); } static bool blk_crypto_fallback_inited; static int blk_crypto_fallback_init(void) { int i; |
e7ecc142e block: blk-crypto... |
519 |
int err; |
c2b86b727 FROMLIST: Update ... |
520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 |
if (blk_crypto_fallback_inited) return 0; prandom_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE); err = blk_ksm_init(&blk_crypto_ksm, blk_crypto_num_keyslots); if (err) goto out; err = -ENOMEM; blk_crypto_ksm.ksm_ll_ops = blk_crypto_ksm_ll_ops; blk_crypto_ksm.max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE; blk_crypto_ksm.features = BLK_CRYPTO_FEATURE_STANDARD_KEYS; /* All blk-crypto modes have a crypto API fallback. */ for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) blk_crypto_ksm.crypto_modes_supported[i] = 0xFFFFFFFF; blk_crypto_ksm.crypto_modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0; blk_crypto_wq = alloc_workqueue("blk_crypto_wq", WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, num_online_cpus()); if (!blk_crypto_wq) goto fail_free_ksm; blk_crypto_keyslots = kcalloc(blk_crypto_num_keyslots, sizeof(blk_crypto_keyslots[0]), GFP_KERNEL); if (!blk_crypto_keyslots) goto fail_free_wq; blk_crypto_bounce_page_pool = mempool_create_page_pool(num_prealloc_bounce_pg, 0); if (!blk_crypto_bounce_page_pool) goto fail_free_keyslots; bio_fallback_crypt_ctx_cache = KMEM_CACHE(bio_fallback_crypt_ctx, 0); if (!bio_fallback_crypt_ctx_cache) goto fail_free_bounce_page_pool; bio_fallback_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_fallback_crypt_ctxs, bio_fallback_crypt_ctx_cache); if (!bio_fallback_crypt_ctx_pool) goto fail_free_crypt_ctx_cache; blk_crypto_fallback_inited = true; return 0; fail_free_crypt_ctx_cache: kmem_cache_destroy(bio_fallback_crypt_ctx_cache); fail_free_bounce_page_pool: mempool_destroy(blk_crypto_bounce_page_pool); fail_free_keyslots: kfree(blk_crypto_keyslots); fail_free_wq: destroy_workqueue(blk_crypto_wq); fail_free_ksm: blk_ksm_destroy(&blk_crypto_ksm); |
cfd7e6c13 FROMLIST: Update ... |
580 |
out: |
c2b86b727 FROMLIST: Update ... |
581 |
return err; |
cfd7e6c13 FROMLIST: Update ... |
582 |
} |
fca1165b7 ANDROID: block: m... |
583 584 585 |
/* * Prepare blk-crypto-fallback for the specified crypto mode. * Returns -ENOPKG if the needed crypto API support is missing. |
cfd7e6c13 FROMLIST: Update ... |
586 |
*/ |
fca1165b7 ANDROID: block: m... |
587 |
int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num) |
cfd7e6c13 FROMLIST: Update ... |
588 |
{ |
fca1165b7 ANDROID: block: m... |
589 |
const char *cipher_str = blk_crypto_modes[mode_num].cipher_str; |
cfd7e6c13 FROMLIST: Update ... |
590 591 592 593 594 595 596 597 598 599 600 |
struct blk_crypto_keyslot *slotp; unsigned int i; int err = 0; /* * Fast path * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num] * for each i are visible before we try to access them. */ if (likely(smp_load_acquire(&tfms_inited[mode_num]))) return 0; |
cfd7e6c13 FROMLIST: Update ... |
601 |
mutex_lock(&tfms_init_lock); |
c2b86b727 FROMLIST: Update ... |
602 603 604 605 606 |
if (tfms_inited[mode_num]) goto out; err = blk_crypto_fallback_init(); if (err) |
cfd7e6c13 FROMLIST: Update ... |
607 608 609 610 |
goto out; for (i = 0; i < blk_crypto_num_keyslots; i++) { slotp = &blk_crypto_keyslots[i]; |
fca1165b7 ANDROID: block: m... |
611 |
slotp->tfms[mode_num] = crypto_alloc_skcipher(cipher_str, 0, 0); |
cfd7e6c13 FROMLIST: Update ... |
612 613 |
if (IS_ERR(slotp->tfms[mode_num])) { err = PTR_ERR(slotp->tfms[mode_num]); |
fca1165b7 ANDROID: block: m... |
614 615 616 617 618 619 |
if (err == -ENOENT) { pr_warn_once("Missing crypto API support for \"%s\" ", cipher_str); err = -ENOPKG; } |
cfd7e6c13 FROMLIST: Update ... |
620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 |
slotp->tfms[mode_num] = NULL; goto out_free_tfms; } crypto_skcipher_set_flags(slotp->tfms[mode_num], CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); } /* * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num] * for each i are visible before we set tfms_inited[mode_num]. */ smp_store_release(&tfms_inited[mode_num], true); goto out; out_free_tfms: for (i = 0; i < blk_crypto_num_keyslots; i++) { slotp = &blk_crypto_keyslots[i]; crypto_free_skcipher(slotp->tfms[mode_num]); slotp->tfms[mode_num] = NULL; } out: mutex_unlock(&tfms_init_lock); return err; } |