Blame view

block/blk-crypto.c 12.9 KB
600e29fce   Satya Tangirala   FROMLIST: block: ...
1
2
3
4
5
6
7
8
9
10
  // SPDX-License-Identifier: GPL-2.0
  /*
   * Copyright 2019 Google LLC
   */
  
  /*
   * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
   */
  
  #define pr_fmt(fmt) "blk-crypto: " fmt
c2b86b727   Satya Tangirala   FROMLIST: Update ...
11
  #include <linux/bio.h>
cfd7e6c13   Satya Tangirala   FROMLIST: Update ...
12
  #include <linux/blkdev.h>
600e29fce   Satya Tangirala   FROMLIST: block: ...
13
  #include <linux/keyslot-manager.h>
c2b86b727   Satya Tangirala   FROMLIST: Update ...
14
15
  #include <linux/module.h>
  #include <linux/slab.h>
cfd7e6c13   Satya Tangirala   FROMLIST: Update ...
16
17
  
  #include "blk-crypto-internal.h"
600e29fce   Satya Tangirala   FROMLIST: block: ...
18

cfd7e6c13   Satya Tangirala   FROMLIST: Update ...
19
  const struct blk_crypto_mode blk_crypto_modes[] = {
600e29fce   Satya Tangirala   FROMLIST: block: ...
20
21
22
  	[BLK_ENCRYPTION_MODE_AES_256_XTS] = {
  		.cipher_str = "xts(aes)",
  		.keysize = 64,
cfd7e6c13   Satya Tangirala   FROMLIST: Update ...
23
24
25
26
27
28
29
30
31
32
33
  		.ivsize = 16,
  	},
  	[BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = {
  		.cipher_str = "essiv(cbc(aes),sha256)",
  		.keysize = 16,
  		.ivsize = 16,
  	},
  	[BLK_ENCRYPTION_MODE_ADIANTUM] = {
  		.cipher_str = "adiantum(xchacha12,aes)",
  		.keysize = 32,
  		.ivsize = 32,
600e29fce   Satya Tangirala   FROMLIST: block: ...
34
35
  	},
  };
c2b86b727   Satya Tangirala   FROMLIST: Update ...
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
  /*
   * This number needs to be at least (the number of threads doing IO
   * concurrently) * (maximum recursive depth of a bio), so that we don't
   * deadlock on crypt_ctx allocations. The default is chosen to be the same
   * as the default number of post read contexts in both EXT4 and F2FS.
   */
  static int num_prealloc_crypt_ctxs = 128;
  
  module_param(num_prealloc_crypt_ctxs, int, 0444);
  MODULE_PARM_DESC(num_prealloc_crypt_ctxs,
  		"Number of bio crypto contexts to preallocate");
  
  static struct kmem_cache *bio_crypt_ctx_cache;
  static mempool_t *bio_crypt_ctx_pool;
  
  static int __init bio_crypt_ctx_init(void)
600e29fce   Satya Tangirala   FROMLIST: block: ...
52
  {
c2b86b727   Satya Tangirala   FROMLIST: Update ...
53
  	size_t i;
600e29fce   Satya Tangirala   FROMLIST: block: ...
54

c2b86b727   Satya Tangirala   FROMLIST: Update ...
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
  	bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0);
  	if (!bio_crypt_ctx_cache)
  		goto out_no_mem;
  
  	bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs,
  						      bio_crypt_ctx_cache);
  	if (!bio_crypt_ctx_pool)
  		goto out_no_mem;
  
  	/* This is assumed in various places. */
  	BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0);
  
  	/* Sanity check that no algorithm exceeds the defined limits. */
  	for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) {
  		BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE);
  		BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE);
600e29fce   Satya Tangirala   FROMLIST: block: ...
71
  	}
c2b86b727   Satya Tangirala   FROMLIST: Update ...
72

600e29fce   Satya Tangirala   FROMLIST: block: ...
73
  	return 0;
c2b86b727   Satya Tangirala   FROMLIST: Update ...
74
75
76
  out_no_mem:
  	panic("Failed to allocate mem for bio crypt ctxs
  ");
600e29fce   Satya Tangirala   FROMLIST: block: ...
77
  }
c2b86b727   Satya Tangirala   FROMLIST: Update ...
78
  subsys_initcall(bio_crypt_ctx_init);
600e29fce   Satya Tangirala   FROMLIST: block: ...
79

c2b86b727   Satya Tangirala   FROMLIST: Update ...
80
81
  void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key,
  		       const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask)
600e29fce   Satya Tangirala   FROMLIST: block: ...
82
  {
cf785af19   Eric Biggers   block: warn if !_...
83
84
85
86
87
88
89
90
91
  	struct bio_crypt_ctx *bc;
  
  	/*
  	 * The caller must use a gfp_mask that contains __GFP_DIRECT_RECLAIM so
  	 * that the mempool_alloc() can't fail.
  	 */
  	WARN_ON_ONCE(!(gfp_mask & __GFP_DIRECT_RECLAIM));
  
  	bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
600e29fce   Satya Tangirala   FROMLIST: block: ...
92

c2b86b727   Satya Tangirala   FROMLIST: Update ...
93
94
  	bc->bc_key = key;
  	memcpy(bc->bc_dun, dun, sizeof(bc->bc_dun));
600e29fce   Satya Tangirala   FROMLIST: block: ...
95

c2b86b727   Satya Tangirala   FROMLIST: Update ...
96
97
98
  	bio->bi_crypt_context = bc;
  }
  EXPORT_SYMBOL_GPL(bio_crypt_set_ctx);
600e29fce   Satya Tangirala   FROMLIST: block: ...
99

c2b86b727   Satya Tangirala   FROMLIST: Update ...
100
101
102
103
104
  void __bio_crypt_free_ctx(struct bio *bio)
  {
  	mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool);
  	bio->bi_crypt_context = NULL;
  }
07560151d   Eric Biggers   block: make bio_c...
105
  int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask)
c2b86b727   Satya Tangirala   FROMLIST: Update ...
106
107
  {
  	dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
07560151d   Eric Biggers   block: make bio_c...
108
109
  	if (!dst->bi_crypt_context)
  		return -ENOMEM;
c2b86b727   Satya Tangirala   FROMLIST: Update ...
110
  	*dst->bi_crypt_context = *src->bi_crypt_context;
07560151d   Eric Biggers   block: make bio_c...
111
  	return 0;
c2b86b727   Satya Tangirala   FROMLIST: Update ...
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
  }
  EXPORT_SYMBOL_GPL(__bio_crypt_clone);
  
  /* Increments @dun by @inc, treating @dun as a multi-limb integer. */
  void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
  			     unsigned int inc)
  {
  	int i;
  
  	for (i = 0; inc && i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
  		dun[i] += inc;
  		/*
  		 * If the addition in this limb overflowed, then we need to
  		 * carry 1 into the next limb. Else the carry is 0.
  		 */
  		if (dun[i] < inc)
  			inc = 1;
  		else
  			inc = 0;
600e29fce   Satya Tangirala   FROMLIST: block: ...
131
  	}
c2b86b727   Satya Tangirala   FROMLIST: Update ...
132
  }
600e29fce   Satya Tangirala   FROMLIST: block: ...
133

c2b86b727   Satya Tangirala   FROMLIST: Update ...
134
135
136
  void __bio_crypt_advance(struct bio *bio, unsigned int bytes)
  {
  	struct bio_crypt_ctx *bc = bio->bi_crypt_context;
600e29fce   Satya Tangirala   FROMLIST: block: ...
137

c2b86b727   Satya Tangirala   FROMLIST: Update ...
138
139
140
  	bio_crypt_dun_increment(bc->bc_dun,
  				bytes >> bc->bc_key->data_unit_size_bits);
  }
600e29fce   Satya Tangirala   FROMLIST: block: ...
141

c2b86b727   Satya Tangirala   FROMLIST: Update ...
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
  /*
   * Returns true if @bc->bc_dun plus @bytes converted to data units is equal to
   * @next_dun, treating the DUNs as multi-limb integers.
   */
  bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc,
  				 unsigned int bytes,
  				 const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
  {
  	int i;
  	unsigned int carry = bytes >> bc->bc_key->data_unit_size_bits;
  
  	for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
  		if (bc->bc_dun[i] + carry != next_dun[i])
  			return false;
  		/*
  		 * If the addition in this limb overflowed, then we need to
  		 * carry 1 into the next limb. Else the carry is 0.
  		 */
  		if ((bc->bc_dun[i] + carry) < carry)
  			carry = 1;
  		else
  			carry = 0;
600e29fce   Satya Tangirala   FROMLIST: block: ...
164
  	}
c2b86b727   Satya Tangirala   FROMLIST: Update ...
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
  	/* If the DUN wrapped through 0, don't treat it as contiguous. */
  	return carry == 0;
  }
  
  /*
   * Checks that two bio crypt contexts are compatible - i.e. that
   * they are mergeable except for data_unit_num continuity.
   */
  static bool bio_crypt_ctx_compatible(struct bio_crypt_ctx *bc1,
  				     struct bio_crypt_ctx *bc2)
  {
  	if (!bc1)
  		return !bc2;
  
  	return bc2 && bc1->bc_key == bc2->bc_key;
  }
  
  bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio)
  {
  	return bio_crypt_ctx_compatible(rq->crypt_ctx, bio->bi_crypt_context);
  }
  
  /*
   * Checks that two bio crypt contexts are compatible, and also
   * that their data_unit_nums are continuous (and can hence be merged)
   * in the order @bc1 followed by @bc2.
   */
  bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
  			     struct bio_crypt_ctx *bc2)
  {
  	if (!bio_crypt_ctx_compatible(bc1, bc2))
  		return false;
  
  	return !bc1 || bio_crypt_dun_is_contiguous(bc1, bc1_bytes, bc2->bc_dun);
  }
  
  /* Check that all I/O segments are data unit aligned. */
  static bool bio_crypt_check_alignment(struct bio *bio)
  {
  	const unsigned int data_unit_size =
  		bio->bi_crypt_context->bc_key->crypto_cfg.data_unit_size;
  	struct bvec_iter iter;
  	struct bio_vec bv;
  
  	bio_for_each_segment(bv, bio, iter) {
  		if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size))
  			return false;
600e29fce   Satya Tangirala   FROMLIST: block: ...
212
  	}
c2b86b727   Satya Tangirala   FROMLIST: Update ...
213
214
  	return true;
  }
600e29fce   Satya Tangirala   FROMLIST: block: ...
215

c2b86b727   Satya Tangirala   FROMLIST: Update ...
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
  blk_status_t __blk_crypto_init_request(struct request *rq)
  {
  	return blk_ksm_get_slot_for_key(rq->q->ksm, rq->crypt_ctx->bc_key,
  					&rq->crypt_keyslot);
  }
  
  /**
   * __blk_crypto_free_request - Uninitialize the crypto fields of a request.
   *
   * @rq: The request whose crypto fields to uninitialize.
   *
   * Completely uninitializes the crypto fields of a request. If a keyslot has
   * been programmed into some inline encryption hardware, that keyslot is
   * released. The rq->crypt_ctx is also freed.
   */
  void __blk_crypto_free_request(struct request *rq)
  {
  	blk_ksm_put_slot(rq->crypt_keyslot);
  	mempool_free(rq->crypt_ctx, bio_crypt_ctx_pool);
  	blk_crypto_rq_set_defaults(rq);
600e29fce   Satya Tangirala   FROMLIST: block: ...
236
237
238
  }
  
  /**
c2b86b727   Satya Tangirala   FROMLIST: Update ...
239
   * __blk_crypto_bio_prep - Prepare bio for inline encryption
600e29fce   Satya Tangirala   FROMLIST: block: ...
240
   *
c2b86b727   Satya Tangirala   FROMLIST: Update ...
241
242
243
244
   * @bio_ptr: pointer to original bio pointer
   *
   * If the bio crypt context provided for the bio is supported by the underlying
   * device's inline encryption hardware, do nothing.
600e29fce   Satya Tangirala   FROMLIST: block: ...
245
   *
c2b86b727   Satya Tangirala   FROMLIST: Update ...
246
247
248
249
   * Otherwise, try to perform en/decryption for this bio by falling back to the
   * kernel crypto API. When the crypto API fallback is used for encryption,
   * blk-crypto may choose to split the bio into 2 - the first one that will
   * continue to be processed and the second one that will be resubmitted via
ed00aabd5   Christoph Hellwig   block: rename gen...
250
   * submit_bio_noacct. A bounce bio will be allocated to encrypt the contents
c2b86b727   Satya Tangirala   FROMLIST: Update ...
251
252
   * of the aforementioned "first one", and *bio_ptr will be updated to this
   * bounce bio.
600e29fce   Satya Tangirala   FROMLIST: block: ...
253
   *
c2b86b727   Satya Tangirala   FROMLIST: Update ...
254
   * Caller must ensure bio has bio_crypt_ctx.
600e29fce   Satya Tangirala   FROMLIST: block: ...
255
   *
c2b86b727   Satya Tangirala   FROMLIST: Update ...
256
257
258
   * Return: true on success; false on error (and bio->bi_status will be set
   *	   appropriately, and bio_endio() will have been called so bio
   *	   submission should abort).
600e29fce   Satya Tangirala   FROMLIST: block: ...
259
   */
c2b86b727   Satya Tangirala   FROMLIST: Update ...
260
  bool __blk_crypto_bio_prep(struct bio **bio_ptr)
600e29fce   Satya Tangirala   FROMLIST: block: ...
261
  {
c2b86b727   Satya Tangirala   FROMLIST: Update ...
262
263
  	struct bio *bio = *bio_ptr;
  	const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key;
cfd7e6c13   Satya Tangirala   FROMLIST: Update ...
264

c2b86b727   Satya Tangirala   FROMLIST: Update ...
265
266
267
268
269
  	/* Error if bio has no data. */
  	if (WARN_ON_ONCE(!bio_has_data(bio))) {
  		bio->bi_status = BLK_STS_IOERR;
  		goto fail;
  	}
600e29fce   Satya Tangirala   FROMLIST: block: ...
270

c2b86b727   Satya Tangirala   FROMLIST: Update ...
271
272
273
  	if (!bio_crypt_check_alignment(bio)) {
  		bio->bi_status = BLK_STS_IOERR;
  		goto fail;
600e29fce   Satya Tangirala   FROMLIST: block: ...
274
  	}
c2b86b727   Satya Tangirala   FROMLIST: Update ...
275
276
277
278
279
280
281
  	/*
  	 * Success if device supports the encryption context, or if we succeeded
  	 * in falling back to the crypto API.
  	 */
  	if (blk_ksm_crypto_cfg_supported(bio->bi_disk->queue->ksm,
  					 &bc_key->crypto_cfg))
  		return true;
600e29fce   Satya Tangirala   FROMLIST: block: ...
282

c2b86b727   Satya Tangirala   FROMLIST: Update ...
283
284
285
286
287
288
  	if (blk_crypto_fallback_bio_prep(bio_ptr))
  		return true;
  fail:
  	bio_endio(*bio_ptr);
  	return false;
  }
93f221ae0   Eric Biggers   block: make blk_c...
289
290
  int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
  			     gfp_t gfp_mask)
c2b86b727   Satya Tangirala   FROMLIST: Update ...
291
  {
93f221ae0   Eric Biggers   block: make blk_c...
292
  	if (!rq->crypt_ctx) {
c2b86b727   Satya Tangirala   FROMLIST: Update ...
293
  		rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
93f221ae0   Eric Biggers   block: make blk_c...
294
295
296
  		if (!rq->crypt_ctx)
  			return -ENOMEM;
  	}
c2b86b727   Satya Tangirala   FROMLIST: Update ...
297
  	*rq->crypt_ctx = *bio->bi_crypt_context;
93f221ae0   Eric Biggers   block: make blk_c...
298
  	return 0;
600e29fce   Satya Tangirala   FROMLIST: block: ...
299
300
301
  }
  
  /**
cfd7e6c13   Satya Tangirala   FROMLIST: Update ...
302
303
   * blk_crypto_init_key() - Prepare a key for use with blk-crypto
   * @blk_key: Pointer to the blk_crypto_key to initialize.
d739474ec   Barani Muthukumaran   ANDROID: block: p...
304
305
306
307
   * @raw_key: Pointer to the raw key.
   * @raw_key_size: Size of raw key.  Must be at least the required size for the
   *                chosen @crypto_mode; see blk_crypto_modes[].  (It's allowed
   *                to be longer than the mode's actual key size, in order to
f5ecdc54d   Barani Muthukumaran   ANDROID: block: P...
308
309
310
   *                support inline encryption hardware that accepts wrapped keys.
   *                @is_hw_wrapped has to be set for such keys)
   * @is_hw_wrapped: Denotes @raw_key is wrapped.
cfd7e6c13   Satya Tangirala   FROMLIST: Update ...
311
   * @crypto_mode: identifier for the encryption algorithm to use
770581355   Eric Biggers   ANDROID: block: b...
312
313
   * @dun_bytes: number of bytes that will be used to specify the DUN when this
   *	       key is used
cfd7e6c13   Satya Tangirala   FROMLIST: Update ...
314
   * @data_unit_size: the data unit size to use for en/decryption
600e29fce   Satya Tangirala   FROMLIST: block: ...
315
   *
c2b86b727   Satya Tangirala   FROMLIST: Update ...
316
317
   * Return: 0 on success, -errno on failure.  The caller is responsible for
   *	   zeroizing both blk_key and raw_key when done with them.
600e29fce   Satya Tangirala   FROMLIST: block: ...
318
   */
d739474ec   Barani Muthukumaran   ANDROID: block: p...
319
320
  int blk_crypto_init_key(struct blk_crypto_key *blk_key,
  			const u8 *raw_key, unsigned int raw_key_size,
f5ecdc54d   Barani Muthukumaran   ANDROID: block: P...
321
  			bool is_hw_wrapped,
cfd7e6c13   Satya Tangirala   FROMLIST: Update ...
322
  			enum blk_crypto_mode_num crypto_mode,
770581355   Eric Biggers   ANDROID: block: b...
323
  			unsigned int dun_bytes,
cfd7e6c13   Satya Tangirala   FROMLIST: Update ...
324
  			unsigned int data_unit_size)
600e29fce   Satya Tangirala   FROMLIST: block: ...
325
  {
cfd7e6c13   Satya Tangirala   FROMLIST: Update ...
326
  	const struct blk_crypto_mode *mode;
600e29fce   Satya Tangirala   FROMLIST: block: ...
327

cfd7e6c13   Satya Tangirala   FROMLIST: Update ...
328
  	memset(blk_key, 0, sizeof(*blk_key));
600e29fce   Satya Tangirala   FROMLIST: block: ...
329

cfd7e6c13   Satya Tangirala   FROMLIST: Update ...
330
331
  	if (crypto_mode >= ARRAY_SIZE(blk_crypto_modes))
  		return -EINVAL;
600e29fce   Satya Tangirala   FROMLIST: block: ...
332

d739474ec   Barani Muthukumaran   ANDROID: block: p...
333
  	BUILD_BUG_ON(BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE < BLK_CRYPTO_MAX_KEY_SIZE);
cfd7e6c13   Satya Tangirala   FROMLIST: Update ...
334
  	mode = &blk_crypto_modes[crypto_mode];
f5ecdc54d   Barani Muthukumaran   ANDROID: block: P...
335
336
337
338
339
340
341
342
  	if (is_hw_wrapped) {
  		if (raw_key_size < mode->keysize ||
  		    raw_key_size > BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE)
  			return -EINVAL;
  	} else {
  		if (raw_key_size != mode->keysize)
  			return -EINVAL;
  	}
600e29fce   Satya Tangirala   FROMLIST: block: ...
343

c2b86b727   Satya Tangirala   FROMLIST: Update ...
344
  	if (dun_bytes == 0 || dun_bytes > BLK_CRYPTO_MAX_IV_SIZE)
770581355   Eric Biggers   ANDROID: block: b...
345
  		return -EINVAL;
cfd7e6c13   Satya Tangirala   FROMLIST: Update ...
346
347
  	if (!is_power_of_2(data_unit_size))
  		return -EINVAL;
c2b86b727   Satya Tangirala   FROMLIST: Update ...
348
349
350
351
  	blk_key->crypto_cfg.crypto_mode = crypto_mode;
  	blk_key->crypto_cfg.dun_bytes = dun_bytes;
  	blk_key->crypto_cfg.data_unit_size = data_unit_size;
  	blk_key->crypto_cfg.is_hw_wrapped = is_hw_wrapped;
cfd7e6c13   Satya Tangirala   FROMLIST: Update ...
352
  	blk_key->data_unit_size_bits = ilog2(data_unit_size);
d739474ec   Barani Muthukumaran   ANDROID: block: p...
353
354
  	blk_key->size = raw_key_size;
  	memcpy(blk_key->raw, raw_key, raw_key_size);
600e29fce   Satya Tangirala   FROMLIST: block: ...
355

cfd7e6c13   Satya Tangirala   FROMLIST: Update ...
356
  	return 0;
600e29fce   Satya Tangirala   FROMLIST: block: ...
357
  }
3f52b6637   Eric Biggers   ANDROID: block: e...
358
  EXPORT_SYMBOL_GPL(blk_crypto_init_key);
600e29fce   Satya Tangirala   FROMLIST: block: ...
359

c2b86b727   Satya Tangirala   FROMLIST: Update ...
360
361
362
363
364
365
366
367
  /*
   * Check if bios with @cfg can be en/decrypted by blk-crypto (i.e. either the
   * request queue it's submitted to supports inline crypto, or the
   * blk-crypto-fallback is enabled and supports the cfg).
   */
  bool blk_crypto_config_supported(struct request_queue *q,
  				 const struct blk_crypto_config *cfg)
  {
9c9596e98   Eric Biggers   ANDROID: block: f...
368
369
370
371
  	if (IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) &&
  	    !cfg->is_hw_wrapped)
  		return true;
  	return blk_ksm_crypto_cfg_supported(q->ksm, cfg);
c2b86b727   Satya Tangirala   FROMLIST: Update ...
372
  }
600e29fce   Satya Tangirala   FROMLIST: block: ...
373
  /**
c2b86b727   Satya Tangirala   FROMLIST: Update ...
374
375
   * blk_crypto_start_using_key() - Start using a blk_crypto_key on a device
   * @key: A key to use on the device
fca1165b7   Eric Biggers   ANDROID: block: m...
376
377
378
   * @q: the request queue for the device
   *
   * Upper layers must call this function to ensure that either the hardware
c2b86b727   Satya Tangirala   FROMLIST: Update ...
379
380
381
382
   * supports the key's crypto settings, or the crypto API fallback has transforms
   * for the needed mode allocated and ready to go. This function may allocate
   * an skcipher, and *should not* be called from the data path, since that might
   * cause a deadlock
fca1165b7   Eric Biggers   ANDROID: block: m...
383
   *
c2b86b727   Satya Tangirala   FROMLIST: Update ...
384
385
386
   * Return: 0 on success; -ENOPKG if the hardware doesn't support the key and
   *	   blk-crypto-fallback is either disabled or the needed algorithm
   *	   is disabled in the crypto API; or another -errno code.
fca1165b7   Eric Biggers   ANDROID: block: m...
387
   */
c2b86b727   Satya Tangirala   FROMLIST: Update ...
388
389
  int blk_crypto_start_using_key(const struct blk_crypto_key *key,
  			       struct request_queue *q)
fca1165b7   Eric Biggers   ANDROID: block: m...
390
  {
c2b86b727   Satya Tangirala   FROMLIST: Update ...
391
  	if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg))
fca1165b7   Eric Biggers   ANDROID: block: m...
392
  		return 0;
c2b86b727   Satya Tangirala   FROMLIST: Update ...
393
  	if (key->crypto_cfg.is_hw_wrapped) {
935b0c41f   Eric Biggers   ANDROID: block: r...
394
395
396
397
  		pr_warn_once("hardware doesn't support wrapped keys
  ");
  		return -EOPNOTSUPP;
  	}
c2b86b727   Satya Tangirala   FROMLIST: Update ...
398
  	return blk_crypto_fallback_start_using_mode(key->crypto_cfg.crypto_mode);
fca1165b7   Eric Biggers   ANDROID: block: m...
399
  }
c2b86b727   Satya Tangirala   FROMLIST: Update ...
400
  EXPORT_SYMBOL_GPL(blk_crypto_start_using_key);
fca1165b7   Eric Biggers   ANDROID: block: m...
401
402
  
  /**
600e29fce   Satya Tangirala   FROMLIST: block: ...
403
404
   * blk_crypto_evict_key() - Evict a key from any inline encryption hardware
   *			    it may have been programmed into
c2b86b727   Satya Tangirala   FROMLIST: Update ...
405
406
   * @q: The request queue who's associated inline encryption hardware this key
   *     might have been programmed into
cfd7e6c13   Satya Tangirala   FROMLIST: Update ...
407
   * @key: The key to evict
600e29fce   Satya Tangirala   FROMLIST: block: ...
408
   *
c2b86b727   Satya Tangirala   FROMLIST: Update ...
409
410
411
   * Upper layers (filesystems) must call this function to ensure that a key is
   * evicted from any hardware that it might have been programmed into.  The key
   * must not be in use by any in-flight IO when this function is called.
600e29fce   Satya Tangirala   FROMLIST: block: ...
412
   *
c2b86b727   Satya Tangirala   FROMLIST: Update ...
413
   * Return: 0 on success or if key is not present in the q's ksm, -err on error.
600e29fce   Satya Tangirala   FROMLIST: block: ...
414
   */
cfd7e6c13   Satya Tangirala   FROMLIST: Update ...
415
416
  int blk_crypto_evict_key(struct request_queue *q,
  			 const struct blk_crypto_key *key)
600e29fce   Satya Tangirala   FROMLIST: block: ...
417
  {
c2b86b727   Satya Tangirala   FROMLIST: Update ...
418
419
  	if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg))
  		return blk_ksm_evict_key(q->ksm, key);
600e29fce   Satya Tangirala   FROMLIST: block: ...
420

c2b86b727   Satya Tangirala   FROMLIST: Update ...
421
422
423
424
425
  	/*
  	 * If the request queue's associated inline encryption hardware didn't
  	 * have support for the key, then the key might have been programmed
  	 * into the fallback keyslot manager, so try to evict from there.
  	 */
cfd7e6c13   Satya Tangirala   FROMLIST: Update ...
426
  	return blk_crypto_fallback_evict_key(key);
600e29fce   Satya Tangirala   FROMLIST: block: ...
427
  }
3f52b6637   Eric Biggers   ANDROID: block: e...
428
  EXPORT_SYMBOL_GPL(blk_crypto_evict_key);