Blame view

block/blk-crypto-internal.h 5.37 KB
a892c8d52   Satya Tangirala   block: Inline enc...
1
2
3
4
5
6
7
8
9
10
11
12
13
  /* SPDX-License-Identifier: GPL-2.0 */
  /*
   * Copyright 2019 Google LLC
   */
  
  #ifndef __LINUX_BLK_CRYPTO_INTERNAL_H
  #define __LINUX_BLK_CRYPTO_INTERNAL_H
  
  #include <linux/bio.h>
  #include <linux/blkdev.h>
  
  /* Represents a crypto mode supported by blk-crypto  */
  struct blk_crypto_mode {
488f6682c   Satya Tangirala   block: blk-crypto...
14
  	const char *cipher_str; /* crypto API name (for fallback case) */
a892c8d52   Satya Tangirala   block: Inline enc...
15
16
17
  	unsigned int keysize; /* key size in bytes */
  	unsigned int ivsize; /* iv size in bytes */
  };
488f6682c   Satya Tangirala   block: blk-crypto...
18
  extern const struct blk_crypto_mode blk_crypto_modes[];
a892c8d52   Satya Tangirala   block: Inline enc...
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
  #ifdef CONFIG_BLK_INLINE_ENCRYPTION
  
  void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
  			     unsigned int inc);
  
  bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio);
  
  bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
  			     struct bio_crypt_ctx *bc2);
  
  static inline bool bio_crypt_ctx_back_mergeable(struct request *req,
  						struct bio *bio)
  {
  	return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req),
  				       bio->bi_crypt_context);
  }
  
  static inline bool bio_crypt_ctx_front_mergeable(struct request *req,
  						 struct bio *bio)
  {
  	return bio_crypt_ctx_mergeable(bio->bi_crypt_context,
  				       bio->bi_iter.bi_size, req->crypt_ctx);
  }
  
  static inline bool bio_crypt_ctx_merge_rq(struct request *req,
  					  struct request *next)
  {
  	return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req),
  				       next->crypt_ctx);
  }
  
  static inline void blk_crypto_rq_set_defaults(struct request *rq)
  {
  	rq->crypt_ctx = NULL;
  	rq->crypt_keyslot = NULL;
  }
  
  static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
  {
  	return rq->crypt_ctx;
  }
  
  #else /* CONFIG_BLK_INLINE_ENCRYPTION */
  
  static inline bool bio_crypt_rq_ctx_compatible(struct request *rq,
  					       struct bio *bio)
  {
  	return true;
  }
  
  static inline bool bio_crypt_ctx_front_mergeable(struct request *req,
  						 struct bio *bio)
  {
  	return true;
  }
  
  static inline bool bio_crypt_ctx_back_mergeable(struct request *req,
  						struct bio *bio)
  {
  	return true;
  }
  
  static inline bool bio_crypt_ctx_merge_rq(struct request *req,
  					  struct request *next)
  {
  	return true;
  }
  
  static inline void blk_crypto_rq_set_defaults(struct request *rq) { }
  
  static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
  {
  	return false;
  }
  
  #endif /* CONFIG_BLK_INLINE_ENCRYPTION */
  
  void __bio_crypt_advance(struct bio *bio, unsigned int bytes);
  static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes)
  {
  	if (bio_has_crypt_ctx(bio))
  		__bio_crypt_advance(bio, bytes);
  }
  
  void __bio_crypt_free_ctx(struct bio *bio);
  static inline void bio_crypt_free_ctx(struct bio *bio)
  {
  	if (bio_has_crypt_ctx(bio))
  		__bio_crypt_free_ctx(bio);
  }
  
  static inline void bio_crypt_do_front_merge(struct request *rq,
  					    struct bio *bio)
  {
  #ifdef CONFIG_BLK_INLINE_ENCRYPTION
  	if (bio_has_crypt_ctx(bio))
  		memcpy(rq->crypt_ctx->bc_dun, bio->bi_crypt_context->bc_dun,
  		       sizeof(rq->crypt_ctx->bc_dun));
  #endif
  }
  
  bool __blk_crypto_bio_prep(struct bio **bio_ptr);
  static inline bool blk_crypto_bio_prep(struct bio **bio_ptr)
  {
  	if (bio_has_crypt_ctx(*bio_ptr))
  		return __blk_crypto_bio_prep(bio_ptr);
  	return true;
  }
  
  blk_status_t __blk_crypto_init_request(struct request *rq);
  static inline blk_status_t blk_crypto_init_request(struct request *rq)
  {
  	if (blk_crypto_rq_is_encrypted(rq))
  		return __blk_crypto_init_request(rq);
  	return BLK_STS_OK;
  }
  
  void __blk_crypto_free_request(struct request *rq);
  static inline void blk_crypto_free_request(struct request *rq)
  {
  	if (blk_crypto_rq_is_encrypted(rq))
  		__blk_crypto_free_request(rq);
  }
93f221ae0   Eric Biggers   block: make blk_c...
142
143
144
145
146
147
148
149
150
151
152
153
154
155
  int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
  			     gfp_t gfp_mask);
  /**
   * blk_crypto_rq_bio_prep - Prepare a request's crypt_ctx when its first bio
   *			    is inserted
   * @rq: The request to prepare
   * @bio: The first bio being inserted into the request
   * @gfp_mask: Memory allocation flags
   *
   * Return: 0 on success, -ENOMEM if out of memory.  -ENOMEM is only possible if
   *	   @gfp_mask doesn't include %__GFP_DIRECT_RECLAIM.
   */
  static inline int blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
  					 gfp_t gfp_mask)
a892c8d52   Satya Tangirala   block: Inline enc...
156
157
  {
  	if (bio_has_crypt_ctx(bio))
93f221ae0   Eric Biggers   block: make blk_c...
158
159
  		return __blk_crypto_rq_bio_prep(rq, bio, gfp_mask);
  	return 0;
a892c8d52   Satya Tangirala   block: Inline enc...
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
  }
  
  /**
   * blk_crypto_insert_cloned_request - Prepare a cloned request to be inserted
   *				      into a request queue.
   * @rq: the request being queued
   *
   * Return: BLK_STS_OK on success, nonzero on error.
   */
  static inline blk_status_t blk_crypto_insert_cloned_request(struct request *rq)
  {
  
  	if (blk_crypto_rq_is_encrypted(rq))
  		return blk_crypto_init_request(rq);
  	return BLK_STS_OK;
  }
488f6682c   Satya Tangirala   block: blk-crypto...
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
  #ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK
  
  int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num);
  
  bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr);
  
  int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key);
  
  #else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
  
  static inline int
  blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
  {
  	pr_warn_once("crypto API fallback is disabled
  ");
  	return -ENOPKG;
  }
  
  static inline bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
  {
  	pr_warn_once("crypto API fallback disabled; failing request.
  ");
  	(*bio_ptr)->bi_status = BLK_STS_NOTSUPP;
  	return false;
  }
  
  static inline int
  blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
  {
  	return 0;
  }
  
  #endif /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
a892c8d52   Satya Tangirala   block: Inline enc...
209
  #endif /* __LINUX_BLK_CRYPTO_INTERNAL_H */