Commit 9cd899a32f611eb6328014f1d9e0ba31977812d9
1 parent
52861c7cd7
Exists in
master
and in
39 other branches
crypto: cryptd - Switch to template create API
This patch changes cryptd to use the template->create function instead of alloc in anticipation for the switch to new style ahash algorithms. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Showing 3 changed files with 32 additions and 27 deletions Inline Diff
crypto/cryptd.c
1 | /* | 1 | /* |
2 | * Software async crypto daemon. | 2 | * Software async crypto daemon. |
3 | * | 3 | * |
4 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | 4 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License as published by the Free | 7 | * under the terms of the GNU General Public License as published by the Free |
8 | * Software Foundation; either version 2 of the License, or (at your option) | 8 | * Software Foundation; either version 2 of the License, or (at your option) |
9 | * any later version. | 9 | * any later version. |
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <crypto/algapi.h> | 13 | #include <crypto/algapi.h> |
14 | #include <crypto/internal/hash.h> | 14 | #include <crypto/internal/hash.h> |
15 | #include <crypto/cryptd.h> | 15 | #include <crypto/cryptd.h> |
16 | #include <crypto/crypto_wq.h> | 16 | #include <crypto/crypto_wq.h> |
17 | #include <linux/err.h> | 17 | #include <linux/err.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
20 | #include <linux/list.h> | 20 | #include <linux/list.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/scatterlist.h> | 22 | #include <linux/scatterlist.h> |
23 | #include <linux/sched.h> | 23 | #include <linux/sched.h> |
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | 25 | ||
26 | #define CRYPTD_MAX_CPU_QLEN 100 | 26 | #define CRYPTD_MAX_CPU_QLEN 100 |
27 | 27 | ||
28 | struct cryptd_cpu_queue { | 28 | struct cryptd_cpu_queue { |
29 | struct crypto_queue queue; | 29 | struct crypto_queue queue; |
30 | struct work_struct work; | 30 | struct work_struct work; |
31 | }; | 31 | }; |
32 | 32 | ||
33 | struct cryptd_queue { | 33 | struct cryptd_queue { |
34 | struct cryptd_cpu_queue *cpu_queue; | 34 | struct cryptd_cpu_queue *cpu_queue; |
35 | }; | 35 | }; |
36 | 36 | ||
37 | struct cryptd_instance_ctx { | 37 | struct cryptd_instance_ctx { |
38 | struct crypto_spawn spawn; | 38 | struct crypto_spawn spawn; |
39 | struct cryptd_queue *queue; | 39 | struct cryptd_queue *queue; |
40 | }; | 40 | }; |
41 | 41 | ||
42 | struct hashd_instance_ctx { | 42 | struct hashd_instance_ctx { |
43 | struct crypto_shash_spawn spawn; | 43 | struct crypto_shash_spawn spawn; |
44 | struct cryptd_queue *queue; | 44 | struct cryptd_queue *queue; |
45 | }; | 45 | }; |
46 | 46 | ||
47 | struct cryptd_blkcipher_ctx { | 47 | struct cryptd_blkcipher_ctx { |
48 | struct crypto_blkcipher *child; | 48 | struct crypto_blkcipher *child; |
49 | }; | 49 | }; |
50 | 50 | ||
51 | struct cryptd_blkcipher_request_ctx { | 51 | struct cryptd_blkcipher_request_ctx { |
52 | crypto_completion_t complete; | 52 | crypto_completion_t complete; |
53 | }; | 53 | }; |
54 | 54 | ||
55 | struct cryptd_hash_ctx { | 55 | struct cryptd_hash_ctx { |
56 | struct crypto_shash *child; | 56 | struct crypto_shash *child; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | struct cryptd_hash_request_ctx { | 59 | struct cryptd_hash_request_ctx { |
60 | crypto_completion_t complete; | 60 | crypto_completion_t complete; |
61 | struct shash_desc desc; | 61 | struct shash_desc desc; |
62 | }; | 62 | }; |
63 | 63 | ||
64 | static void cryptd_queue_worker(struct work_struct *work); | 64 | static void cryptd_queue_worker(struct work_struct *work); |
65 | 65 | ||
66 | static int cryptd_init_queue(struct cryptd_queue *queue, | 66 | static int cryptd_init_queue(struct cryptd_queue *queue, |
67 | unsigned int max_cpu_qlen) | 67 | unsigned int max_cpu_qlen) |
68 | { | 68 | { |
69 | int cpu; | 69 | int cpu; |
70 | struct cryptd_cpu_queue *cpu_queue; | 70 | struct cryptd_cpu_queue *cpu_queue; |
71 | 71 | ||
72 | queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue); | 72 | queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue); |
73 | if (!queue->cpu_queue) | 73 | if (!queue->cpu_queue) |
74 | return -ENOMEM; | 74 | return -ENOMEM; |
75 | for_each_possible_cpu(cpu) { | 75 | for_each_possible_cpu(cpu) { |
76 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | 76 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); |
77 | crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); | 77 | crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); |
78 | INIT_WORK(&cpu_queue->work, cryptd_queue_worker); | 78 | INIT_WORK(&cpu_queue->work, cryptd_queue_worker); |
79 | } | 79 | } |
80 | return 0; | 80 | return 0; |
81 | } | 81 | } |
82 | 82 | ||
83 | static void cryptd_fini_queue(struct cryptd_queue *queue) | 83 | static void cryptd_fini_queue(struct cryptd_queue *queue) |
84 | { | 84 | { |
85 | int cpu; | 85 | int cpu; |
86 | struct cryptd_cpu_queue *cpu_queue; | 86 | struct cryptd_cpu_queue *cpu_queue; |
87 | 87 | ||
88 | for_each_possible_cpu(cpu) { | 88 | for_each_possible_cpu(cpu) { |
89 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | 89 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); |
90 | BUG_ON(cpu_queue->queue.qlen); | 90 | BUG_ON(cpu_queue->queue.qlen); |
91 | } | 91 | } |
92 | free_percpu(queue->cpu_queue); | 92 | free_percpu(queue->cpu_queue); |
93 | } | 93 | } |
94 | 94 | ||
95 | static int cryptd_enqueue_request(struct cryptd_queue *queue, | 95 | static int cryptd_enqueue_request(struct cryptd_queue *queue, |
96 | struct crypto_async_request *request) | 96 | struct crypto_async_request *request) |
97 | { | 97 | { |
98 | int cpu, err; | 98 | int cpu, err; |
99 | struct cryptd_cpu_queue *cpu_queue; | 99 | struct cryptd_cpu_queue *cpu_queue; |
100 | 100 | ||
101 | cpu = get_cpu(); | 101 | cpu = get_cpu(); |
102 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | 102 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); |
103 | err = crypto_enqueue_request(&cpu_queue->queue, request); | 103 | err = crypto_enqueue_request(&cpu_queue->queue, request); |
104 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); | 104 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); |
105 | put_cpu(); | 105 | put_cpu(); |
106 | 106 | ||
107 | return err; | 107 | return err; |
108 | } | 108 | } |
109 | 109 | ||
110 | /* Called in workqueue context, do one real cryption work (via | 110 | /* Called in workqueue context, do one real cryption work (via |
111 | * req->complete) and reschedule itself if there are more work to | 111 | * req->complete) and reschedule itself if there are more work to |
112 | * do. */ | 112 | * do. */ |
113 | static void cryptd_queue_worker(struct work_struct *work) | 113 | static void cryptd_queue_worker(struct work_struct *work) |
114 | { | 114 | { |
115 | struct cryptd_cpu_queue *cpu_queue; | 115 | struct cryptd_cpu_queue *cpu_queue; |
116 | struct crypto_async_request *req, *backlog; | 116 | struct crypto_async_request *req, *backlog; |
117 | 117 | ||
118 | cpu_queue = container_of(work, struct cryptd_cpu_queue, work); | 118 | cpu_queue = container_of(work, struct cryptd_cpu_queue, work); |
119 | /* Only handle one request at a time to avoid hogging crypto | 119 | /* Only handle one request at a time to avoid hogging crypto |
120 | * workqueue. preempt_disable/enable is used to prevent | 120 | * workqueue. preempt_disable/enable is used to prevent |
121 | * being preempted by cryptd_enqueue_request() */ | 121 | * being preempted by cryptd_enqueue_request() */ |
122 | preempt_disable(); | 122 | preempt_disable(); |
123 | backlog = crypto_get_backlog(&cpu_queue->queue); | 123 | backlog = crypto_get_backlog(&cpu_queue->queue); |
124 | req = crypto_dequeue_request(&cpu_queue->queue); | 124 | req = crypto_dequeue_request(&cpu_queue->queue); |
125 | preempt_enable(); | 125 | preempt_enable(); |
126 | 126 | ||
127 | if (!req) | 127 | if (!req) |
128 | return; | 128 | return; |
129 | 129 | ||
130 | if (backlog) | 130 | if (backlog) |
131 | backlog->complete(backlog, -EINPROGRESS); | 131 | backlog->complete(backlog, -EINPROGRESS); |
132 | req->complete(req, 0); | 132 | req->complete(req, 0); |
133 | 133 | ||
134 | if (cpu_queue->queue.qlen) | 134 | if (cpu_queue->queue.qlen) |
135 | queue_work(kcrypto_wq, &cpu_queue->work); | 135 | queue_work(kcrypto_wq, &cpu_queue->work); |
136 | } | 136 | } |
137 | 137 | ||
138 | static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) | 138 | static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) |
139 | { | 139 | { |
140 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | 140 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
141 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | 141 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); |
142 | return ictx->queue; | 142 | return ictx->queue; |
143 | } | 143 | } |
144 | 144 | ||
145 | static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, | 145 | static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, |
146 | const u8 *key, unsigned int keylen) | 146 | const u8 *key, unsigned int keylen) |
147 | { | 147 | { |
148 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent); | 148 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent); |
149 | struct crypto_blkcipher *child = ctx->child; | 149 | struct crypto_blkcipher *child = ctx->child; |
150 | int err; | 150 | int err; |
151 | 151 | ||
152 | crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 152 | crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
153 | crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & | 153 | crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & |
154 | CRYPTO_TFM_REQ_MASK); | 154 | CRYPTO_TFM_REQ_MASK); |
155 | err = crypto_blkcipher_setkey(child, key, keylen); | 155 | err = crypto_blkcipher_setkey(child, key, keylen); |
156 | crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) & | 156 | crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) & |
157 | CRYPTO_TFM_RES_MASK); | 157 | CRYPTO_TFM_RES_MASK); |
158 | return err; | 158 | return err; |
159 | } | 159 | } |
160 | 160 | ||
161 | static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, | 161 | static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, |
162 | struct crypto_blkcipher *child, | 162 | struct crypto_blkcipher *child, |
163 | int err, | 163 | int err, |
164 | int (*crypt)(struct blkcipher_desc *desc, | 164 | int (*crypt)(struct blkcipher_desc *desc, |
165 | struct scatterlist *dst, | 165 | struct scatterlist *dst, |
166 | struct scatterlist *src, | 166 | struct scatterlist *src, |
167 | unsigned int len)) | 167 | unsigned int len)) |
168 | { | 168 | { |
169 | struct cryptd_blkcipher_request_ctx *rctx; | 169 | struct cryptd_blkcipher_request_ctx *rctx; |
170 | struct blkcipher_desc desc; | 170 | struct blkcipher_desc desc; |
171 | 171 | ||
172 | rctx = ablkcipher_request_ctx(req); | 172 | rctx = ablkcipher_request_ctx(req); |
173 | 173 | ||
174 | if (unlikely(err == -EINPROGRESS)) | 174 | if (unlikely(err == -EINPROGRESS)) |
175 | goto out; | 175 | goto out; |
176 | 176 | ||
177 | desc.tfm = child; | 177 | desc.tfm = child; |
178 | desc.info = req->info; | 178 | desc.info = req->info; |
179 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | 179 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
180 | 180 | ||
181 | err = crypt(&desc, req->dst, req->src, req->nbytes); | 181 | err = crypt(&desc, req->dst, req->src, req->nbytes); |
182 | 182 | ||
183 | req->base.complete = rctx->complete; | 183 | req->base.complete = rctx->complete; |
184 | 184 | ||
185 | out: | 185 | out: |
186 | local_bh_disable(); | 186 | local_bh_disable(); |
187 | rctx->complete(&req->base, err); | 187 | rctx->complete(&req->base, err); |
188 | local_bh_enable(); | 188 | local_bh_enable(); |
189 | } | 189 | } |
190 | 190 | ||
191 | static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err) | 191 | static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err) |
192 | { | 192 | { |
193 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); | 193 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); |
194 | struct crypto_blkcipher *child = ctx->child; | 194 | struct crypto_blkcipher *child = ctx->child; |
195 | 195 | ||
196 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, | 196 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, |
197 | crypto_blkcipher_crt(child)->encrypt); | 197 | crypto_blkcipher_crt(child)->encrypt); |
198 | } | 198 | } |
199 | 199 | ||
200 | static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err) | 200 | static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err) |
201 | { | 201 | { |
202 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); | 202 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); |
203 | struct crypto_blkcipher *child = ctx->child; | 203 | struct crypto_blkcipher *child = ctx->child; |
204 | 204 | ||
205 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, | 205 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, |
206 | crypto_blkcipher_crt(child)->decrypt); | 206 | crypto_blkcipher_crt(child)->decrypt); |
207 | } | 207 | } |
208 | 208 | ||
209 | static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, | 209 | static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, |
210 | crypto_completion_t complete) | 210 | crypto_completion_t complete) |
211 | { | 211 | { |
212 | struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); | 212 | struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); |
213 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | 213 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
214 | struct cryptd_queue *queue; | 214 | struct cryptd_queue *queue; |
215 | 215 | ||
216 | queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm)); | 216 | queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm)); |
217 | rctx->complete = req->base.complete; | 217 | rctx->complete = req->base.complete; |
218 | req->base.complete = complete; | 218 | req->base.complete = complete; |
219 | 219 | ||
220 | return cryptd_enqueue_request(queue, &req->base); | 220 | return cryptd_enqueue_request(queue, &req->base); |
221 | } | 221 | } |
222 | 222 | ||
223 | static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) | 223 | static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) |
224 | { | 224 | { |
225 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt); | 225 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt); |
226 | } | 226 | } |
227 | 227 | ||
228 | static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req) | 228 | static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req) |
229 | { | 229 | { |
230 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt); | 230 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt); |
231 | } | 231 | } |
232 | 232 | ||
233 | static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm) | 233 | static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm) |
234 | { | 234 | { |
235 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | 235 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
236 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | 236 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); |
237 | struct crypto_spawn *spawn = &ictx->spawn; | 237 | struct crypto_spawn *spawn = &ictx->spawn; |
238 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | 238 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); |
239 | struct crypto_blkcipher *cipher; | 239 | struct crypto_blkcipher *cipher; |
240 | 240 | ||
241 | cipher = crypto_spawn_blkcipher(spawn); | 241 | cipher = crypto_spawn_blkcipher(spawn); |
242 | if (IS_ERR(cipher)) | 242 | if (IS_ERR(cipher)) |
243 | return PTR_ERR(cipher); | 243 | return PTR_ERR(cipher); |
244 | 244 | ||
245 | ctx->child = cipher; | 245 | ctx->child = cipher; |
246 | tfm->crt_ablkcipher.reqsize = | 246 | tfm->crt_ablkcipher.reqsize = |
247 | sizeof(struct cryptd_blkcipher_request_ctx); | 247 | sizeof(struct cryptd_blkcipher_request_ctx); |
248 | return 0; | 248 | return 0; |
249 | } | 249 | } |
250 | 250 | ||
251 | static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) | 251 | static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) |
252 | { | 252 | { |
253 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | 253 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); |
254 | 254 | ||
255 | crypto_free_blkcipher(ctx->child); | 255 | crypto_free_blkcipher(ctx->child); |
256 | } | 256 | } |
257 | 257 | ||
258 | static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, | 258 | static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, |
259 | unsigned int tail) | 259 | unsigned int tail) |
260 | { | 260 | { |
261 | struct crypto_instance *inst; | 261 | struct crypto_instance *inst; |
262 | int err; | 262 | int err; |
263 | 263 | ||
264 | inst = kzalloc(sizeof(*inst) + tail, GFP_KERNEL); | 264 | inst = kzalloc(sizeof(*inst) + tail, GFP_KERNEL); |
265 | if (!inst) { | 265 | if (!inst) { |
266 | inst = ERR_PTR(-ENOMEM); | 266 | inst = ERR_PTR(-ENOMEM); |
267 | goto out; | 267 | goto out; |
268 | } | 268 | } |
269 | 269 | ||
270 | err = -ENAMETOOLONG; | 270 | err = -ENAMETOOLONG; |
271 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 271 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
272 | "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | 272 | "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) |
273 | goto out_free_inst; | 273 | goto out_free_inst; |
274 | 274 | ||
275 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | 275 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); |
276 | 276 | ||
277 | inst->alg.cra_priority = alg->cra_priority + 50; | 277 | inst->alg.cra_priority = alg->cra_priority + 50; |
278 | inst->alg.cra_blocksize = alg->cra_blocksize; | 278 | inst->alg.cra_blocksize = alg->cra_blocksize; |
279 | inst->alg.cra_alignmask = alg->cra_alignmask; | 279 | inst->alg.cra_alignmask = alg->cra_alignmask; |
280 | 280 | ||
281 | out: | 281 | out: |
282 | return inst; | 282 | return inst; |
283 | 283 | ||
284 | out_free_inst: | 284 | out_free_inst: |
285 | kfree(inst); | 285 | kfree(inst); |
286 | inst = ERR_PTR(err); | 286 | inst = ERR_PTR(err); |
287 | goto out; | 287 | goto out; |
288 | } | 288 | } |
289 | 289 | ||
290 | static struct crypto_instance *cryptd_alloc_blkcipher( | 290 | static int cryptd_create_blkcipher(struct crypto_template *tmpl, |
291 | struct rtattr **tb, struct cryptd_queue *queue) | 291 | struct rtattr **tb, |
292 | struct cryptd_queue *queue) | ||
292 | { | 293 | { |
293 | struct cryptd_instance_ctx *ctx; | 294 | struct cryptd_instance_ctx *ctx; |
294 | struct crypto_instance *inst; | 295 | struct crypto_instance *inst; |
295 | struct crypto_alg *alg; | 296 | struct crypto_alg *alg; |
296 | int err; | 297 | int err; |
297 | 298 | ||
298 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, | 299 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, |
299 | CRYPTO_ALG_TYPE_MASK); | 300 | CRYPTO_ALG_TYPE_MASK); |
300 | if (IS_ERR(alg)) | 301 | if (IS_ERR(alg)) |
301 | return ERR_CAST(alg); | 302 | return PTR_ERR(alg); |
302 | 303 | ||
303 | inst = cryptd_alloc_instance(alg, sizeof(*ctx)); | 304 | inst = cryptd_alloc_instance(alg, sizeof(*ctx)); |
304 | if (IS_ERR(inst)) | 305 | if (IS_ERR(inst)) |
305 | goto out_put_alg; | 306 | goto out_put_alg; |
306 | 307 | ||
307 | ctx = crypto_instance_ctx(inst); | 308 | ctx = crypto_instance_ctx(inst); |
308 | ctx->queue = queue; | 309 | ctx->queue = queue; |
309 | 310 | ||
310 | err = crypto_init_spawn(&ctx->spawn, alg, inst, | 311 | err = crypto_init_spawn(&ctx->spawn, alg, inst, |
311 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); | 312 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); |
312 | if (err) | 313 | if (err) |
313 | goto out_free_inst; | 314 | goto out_free_inst; |
314 | 315 | ||
315 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; | 316 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; |
316 | inst->alg.cra_type = &crypto_ablkcipher_type; | 317 | inst->alg.cra_type = &crypto_ablkcipher_type; |
317 | 318 | ||
318 | inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize; | 319 | inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize; |
319 | inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize; | 320 | inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize; |
320 | inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize; | 321 | inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize; |
321 | 322 | ||
322 | inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv; | 323 | inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv; |
323 | 324 | ||
324 | inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx); | 325 | inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx); |
325 | 326 | ||
326 | inst->alg.cra_init = cryptd_blkcipher_init_tfm; | 327 | inst->alg.cra_init = cryptd_blkcipher_init_tfm; |
327 | inst->alg.cra_exit = cryptd_blkcipher_exit_tfm; | 328 | inst->alg.cra_exit = cryptd_blkcipher_exit_tfm; |
328 | 329 | ||
329 | inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey; | 330 | inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey; |
330 | inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; | 331 | inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; |
331 | inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; | 332 | inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; |
332 | 333 | ||
334 | err = crypto_register_instance(tmpl, inst); | ||
335 | if (err) { | ||
336 | crypto_drop_spawn(&ctx->spawn); | ||
337 | out_free_inst: | ||
338 | kfree(inst); | ||
339 | } | ||
340 | |||
333 | out_put_alg: | 341 | out_put_alg: |
334 | crypto_mod_put(alg); | 342 | crypto_mod_put(alg); |
335 | return inst; | 343 | return err; |
336 | |||
337 | out_free_inst: | ||
338 | kfree(inst); | ||
339 | inst = ERR_PTR(err); | ||
340 | goto out_put_alg; | ||
341 | } | 344 | } |
342 | 345 | ||
343 | static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) | 346 | static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) |
344 | { | 347 | { |
345 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | 348 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
346 | struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); | 349 | struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); |
347 | struct crypto_shash_spawn *spawn = &ictx->spawn; | 350 | struct crypto_shash_spawn *spawn = &ictx->spawn; |
348 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | 351 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
349 | struct crypto_shash *hash; | 352 | struct crypto_shash *hash; |
350 | 353 | ||
351 | hash = crypto_spawn_shash(spawn); | 354 | hash = crypto_spawn_shash(spawn); |
352 | if (IS_ERR(hash)) | 355 | if (IS_ERR(hash)) |
353 | return PTR_ERR(hash); | 356 | return PTR_ERR(hash); |
354 | 357 | ||
355 | ctx->child = hash; | 358 | ctx->child = hash; |
356 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | 359 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
357 | sizeof(struct cryptd_hash_request_ctx) + | 360 | sizeof(struct cryptd_hash_request_ctx) + |
358 | crypto_shash_descsize(hash)); | 361 | crypto_shash_descsize(hash)); |
359 | return 0; | 362 | return 0; |
360 | } | 363 | } |
361 | 364 | ||
362 | static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) | 365 | static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) |
363 | { | 366 | { |
364 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | 367 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
365 | 368 | ||
366 | crypto_free_shash(ctx->child); | 369 | crypto_free_shash(ctx->child); |
367 | } | 370 | } |
368 | 371 | ||
369 | static int cryptd_hash_setkey(struct crypto_ahash *parent, | 372 | static int cryptd_hash_setkey(struct crypto_ahash *parent, |
370 | const u8 *key, unsigned int keylen) | 373 | const u8 *key, unsigned int keylen) |
371 | { | 374 | { |
372 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); | 375 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); |
373 | struct crypto_shash *child = ctx->child; | 376 | struct crypto_shash *child = ctx->child; |
374 | int err; | 377 | int err; |
375 | 378 | ||
376 | crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 379 | crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
377 | crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & | 380 | crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & |
378 | CRYPTO_TFM_REQ_MASK); | 381 | CRYPTO_TFM_REQ_MASK); |
379 | err = crypto_shash_setkey(child, key, keylen); | 382 | err = crypto_shash_setkey(child, key, keylen); |
380 | crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & | 383 | crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & |
381 | CRYPTO_TFM_RES_MASK); | 384 | CRYPTO_TFM_RES_MASK); |
382 | return err; | 385 | return err; |
383 | } | 386 | } |
384 | 387 | ||
385 | static int cryptd_hash_enqueue(struct ahash_request *req, | 388 | static int cryptd_hash_enqueue(struct ahash_request *req, |
386 | crypto_completion_t complete) | 389 | crypto_completion_t complete) |
387 | { | 390 | { |
388 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 391 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
389 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 392 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
390 | struct cryptd_queue *queue = | 393 | struct cryptd_queue *queue = |
391 | cryptd_get_queue(crypto_ahash_tfm(tfm)); | 394 | cryptd_get_queue(crypto_ahash_tfm(tfm)); |
392 | 395 | ||
393 | rctx->complete = req->base.complete; | 396 | rctx->complete = req->base.complete; |
394 | req->base.complete = complete; | 397 | req->base.complete = complete; |
395 | 398 | ||
396 | return cryptd_enqueue_request(queue, &req->base); | 399 | return cryptd_enqueue_request(queue, &req->base); |
397 | } | 400 | } |
398 | 401 | ||
399 | static void cryptd_hash_init(struct crypto_async_request *req_async, int err) | 402 | static void cryptd_hash_init(struct crypto_async_request *req_async, int err) |
400 | { | 403 | { |
401 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | 404 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); |
402 | struct crypto_shash *child = ctx->child; | 405 | struct crypto_shash *child = ctx->child; |
403 | struct ahash_request *req = ahash_request_cast(req_async); | 406 | struct ahash_request *req = ahash_request_cast(req_async); |
404 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 407 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
405 | struct shash_desc *desc = &rctx->desc; | 408 | struct shash_desc *desc = &rctx->desc; |
406 | 409 | ||
407 | if (unlikely(err == -EINPROGRESS)) | 410 | if (unlikely(err == -EINPROGRESS)) |
408 | goto out; | 411 | goto out; |
409 | 412 | ||
410 | desc->tfm = child; | 413 | desc->tfm = child; |
411 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; | 414 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
412 | 415 | ||
413 | err = crypto_shash_init(desc); | 416 | err = crypto_shash_init(desc); |
414 | 417 | ||
415 | req->base.complete = rctx->complete; | 418 | req->base.complete = rctx->complete; |
416 | 419 | ||
417 | out: | 420 | out: |
418 | local_bh_disable(); | 421 | local_bh_disable(); |
419 | rctx->complete(&req->base, err); | 422 | rctx->complete(&req->base, err); |
420 | local_bh_enable(); | 423 | local_bh_enable(); |
421 | } | 424 | } |
422 | 425 | ||
423 | static int cryptd_hash_init_enqueue(struct ahash_request *req) | 426 | static int cryptd_hash_init_enqueue(struct ahash_request *req) |
424 | { | 427 | { |
425 | return cryptd_hash_enqueue(req, cryptd_hash_init); | 428 | return cryptd_hash_enqueue(req, cryptd_hash_init); |
426 | } | 429 | } |
427 | 430 | ||
428 | static void cryptd_hash_update(struct crypto_async_request *req_async, int err) | 431 | static void cryptd_hash_update(struct crypto_async_request *req_async, int err) |
429 | { | 432 | { |
430 | struct ahash_request *req = ahash_request_cast(req_async); | 433 | struct ahash_request *req = ahash_request_cast(req_async); |
431 | struct cryptd_hash_request_ctx *rctx; | 434 | struct cryptd_hash_request_ctx *rctx; |
432 | 435 | ||
433 | rctx = ahash_request_ctx(req); | 436 | rctx = ahash_request_ctx(req); |
434 | 437 | ||
435 | if (unlikely(err == -EINPROGRESS)) | 438 | if (unlikely(err == -EINPROGRESS)) |
436 | goto out; | 439 | goto out; |
437 | 440 | ||
438 | err = shash_ahash_update(req, &rctx->desc); | 441 | err = shash_ahash_update(req, &rctx->desc); |
439 | 442 | ||
440 | req->base.complete = rctx->complete; | 443 | req->base.complete = rctx->complete; |
441 | 444 | ||
442 | out: | 445 | out: |
443 | local_bh_disable(); | 446 | local_bh_disable(); |
444 | rctx->complete(&req->base, err); | 447 | rctx->complete(&req->base, err); |
445 | local_bh_enable(); | 448 | local_bh_enable(); |
446 | } | 449 | } |
447 | 450 | ||
448 | static int cryptd_hash_update_enqueue(struct ahash_request *req) | 451 | static int cryptd_hash_update_enqueue(struct ahash_request *req) |
449 | { | 452 | { |
450 | return cryptd_hash_enqueue(req, cryptd_hash_update); | 453 | return cryptd_hash_enqueue(req, cryptd_hash_update); |
451 | } | 454 | } |
452 | 455 | ||
453 | static void cryptd_hash_final(struct crypto_async_request *req_async, int err) | 456 | static void cryptd_hash_final(struct crypto_async_request *req_async, int err) |
454 | { | 457 | { |
455 | struct ahash_request *req = ahash_request_cast(req_async); | 458 | struct ahash_request *req = ahash_request_cast(req_async); |
456 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 459 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
457 | 460 | ||
458 | if (unlikely(err == -EINPROGRESS)) | 461 | if (unlikely(err == -EINPROGRESS)) |
459 | goto out; | 462 | goto out; |
460 | 463 | ||
461 | err = crypto_shash_final(&rctx->desc, req->result); | 464 | err = crypto_shash_final(&rctx->desc, req->result); |
462 | 465 | ||
463 | req->base.complete = rctx->complete; | 466 | req->base.complete = rctx->complete; |
464 | 467 | ||
465 | out: | 468 | out: |
466 | local_bh_disable(); | 469 | local_bh_disable(); |
467 | rctx->complete(&req->base, err); | 470 | rctx->complete(&req->base, err); |
468 | local_bh_enable(); | 471 | local_bh_enable(); |
469 | } | 472 | } |
470 | 473 | ||
471 | static int cryptd_hash_final_enqueue(struct ahash_request *req) | 474 | static int cryptd_hash_final_enqueue(struct ahash_request *req) |
472 | { | 475 | { |
473 | return cryptd_hash_enqueue(req, cryptd_hash_final); | 476 | return cryptd_hash_enqueue(req, cryptd_hash_final); |
474 | } | 477 | } |
475 | 478 | ||
476 | static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) | 479 | static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) |
477 | { | 480 | { |
478 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | 481 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); |
479 | struct crypto_shash *child = ctx->child; | 482 | struct crypto_shash *child = ctx->child; |
480 | struct ahash_request *req = ahash_request_cast(req_async); | 483 | struct ahash_request *req = ahash_request_cast(req_async); |
481 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 484 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
482 | struct shash_desc *desc = &rctx->desc; | 485 | struct shash_desc *desc = &rctx->desc; |
483 | 486 | ||
484 | if (unlikely(err == -EINPROGRESS)) | 487 | if (unlikely(err == -EINPROGRESS)) |
485 | goto out; | 488 | goto out; |
486 | 489 | ||
487 | desc->tfm = child; | 490 | desc->tfm = child; |
488 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; | 491 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
489 | 492 | ||
490 | err = shash_ahash_digest(req, desc); | 493 | err = shash_ahash_digest(req, desc); |
491 | 494 | ||
492 | req->base.complete = rctx->complete; | 495 | req->base.complete = rctx->complete; |
493 | 496 | ||
494 | out: | 497 | out: |
495 | local_bh_disable(); | 498 | local_bh_disable(); |
496 | rctx->complete(&req->base, err); | 499 | rctx->complete(&req->base, err); |
497 | local_bh_enable(); | 500 | local_bh_enable(); |
498 | } | 501 | } |
499 | 502 | ||
500 | static int cryptd_hash_digest_enqueue(struct ahash_request *req) | 503 | static int cryptd_hash_digest_enqueue(struct ahash_request *req) |
501 | { | 504 | { |
502 | return cryptd_hash_enqueue(req, cryptd_hash_digest); | 505 | return cryptd_hash_enqueue(req, cryptd_hash_digest); |
503 | } | 506 | } |
504 | 507 | ||
505 | static struct crypto_instance *cryptd_alloc_hash( | 508 | static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, |
506 | struct rtattr **tb, struct cryptd_queue *queue) | 509 | struct cryptd_queue *queue) |
507 | { | 510 | { |
508 | struct hashd_instance_ctx *ctx; | 511 | struct hashd_instance_ctx *ctx; |
509 | struct crypto_instance *inst; | 512 | struct crypto_instance *inst; |
510 | struct shash_alg *salg; | 513 | struct shash_alg *salg; |
511 | struct crypto_alg *alg; | 514 | struct crypto_alg *alg; |
512 | int err; | 515 | int err; |
513 | 516 | ||
514 | salg = shash_attr_alg(tb[1], 0, 0); | 517 | salg = shash_attr_alg(tb[1], 0, 0); |
515 | if (IS_ERR(salg)) | 518 | if (IS_ERR(salg)) |
516 | return ERR_CAST(salg); | 519 | return PTR_ERR(salg); |
517 | 520 | ||
518 | alg = &salg->base; | 521 | alg = &salg->base; |
519 | inst = cryptd_alloc_instance(alg, sizeof(*ctx)); | 522 | inst = cryptd_alloc_instance(alg, sizeof(*ctx)); |
520 | if (IS_ERR(inst)) | 523 | if (IS_ERR(inst)) |
521 | goto out_put_alg; | 524 | goto out_put_alg; |
522 | 525 | ||
523 | ctx = crypto_instance_ctx(inst); | 526 | ctx = crypto_instance_ctx(inst); |
524 | ctx->queue = queue; | 527 | ctx->queue = queue; |
525 | 528 | ||
526 | err = crypto_init_shash_spawn(&ctx->spawn, salg, inst); | 529 | err = crypto_init_shash_spawn(&ctx->spawn, salg, inst); |
527 | if (err) | 530 | if (err) |
528 | goto out_free_inst; | 531 | goto out_free_inst; |
529 | 532 | ||
530 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC; | 533 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC; |
531 | inst->alg.cra_type = &crypto_ahash_type; | 534 | inst->alg.cra_type = &crypto_ahash_type; |
532 | 535 | ||
533 | inst->alg.cra_ahash.digestsize = salg->digestsize; | 536 | inst->alg.cra_ahash.digestsize = salg->digestsize; |
534 | inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx); | 537 | inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx); |
535 | 538 | ||
536 | inst->alg.cra_init = cryptd_hash_init_tfm; | 539 | inst->alg.cra_init = cryptd_hash_init_tfm; |
537 | inst->alg.cra_exit = cryptd_hash_exit_tfm; | 540 | inst->alg.cra_exit = cryptd_hash_exit_tfm; |
538 | 541 | ||
539 | inst->alg.cra_ahash.init = cryptd_hash_init_enqueue; | 542 | inst->alg.cra_ahash.init = cryptd_hash_init_enqueue; |
540 | inst->alg.cra_ahash.update = cryptd_hash_update_enqueue; | 543 | inst->alg.cra_ahash.update = cryptd_hash_update_enqueue; |
541 | inst->alg.cra_ahash.final = cryptd_hash_final_enqueue; | 544 | inst->alg.cra_ahash.final = cryptd_hash_final_enqueue; |
542 | inst->alg.cra_ahash.setkey = cryptd_hash_setkey; | 545 | inst->alg.cra_ahash.setkey = cryptd_hash_setkey; |
543 | inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue; | 546 | inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue; |
544 | 547 | ||
548 | err = crypto_register_instance(tmpl, inst); | ||
549 | if (err) { | ||
550 | crypto_drop_shash(&ctx->spawn); | ||
551 | out_free_inst: | ||
552 | kfree(inst); | ||
553 | } | ||
554 | |||
545 | out_put_alg: | 555 | out_put_alg: |
546 | crypto_mod_put(alg); | 556 | crypto_mod_put(alg); |
547 | return inst; | 557 | return err; |
548 | |||
549 | out_free_inst: | ||
550 | kfree(inst); | ||
551 | inst = ERR_PTR(err); | ||
552 | goto out_put_alg; | ||
553 | } | 558 | } |
554 | 559 | ||
555 | static struct cryptd_queue queue; | 560 | static struct cryptd_queue queue; |
556 | 561 | ||
557 | static struct crypto_instance *cryptd_alloc(struct rtattr **tb) | 562 | static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) |
558 | { | 563 | { |
559 | struct crypto_attr_type *algt; | 564 | struct crypto_attr_type *algt; |
560 | 565 | ||
561 | algt = crypto_get_attr_type(tb); | 566 | algt = crypto_get_attr_type(tb); |
562 | if (IS_ERR(algt)) | 567 | if (IS_ERR(algt)) |
563 | return ERR_CAST(algt); | 568 | return PTR_ERR(algt); |
564 | 569 | ||
565 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | 570 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { |
566 | case CRYPTO_ALG_TYPE_BLKCIPHER: | 571 | case CRYPTO_ALG_TYPE_BLKCIPHER: |
567 | return cryptd_alloc_blkcipher(tb, &queue); | 572 | return cryptd_create_blkcipher(tmpl, tb, &queue); |
568 | case CRYPTO_ALG_TYPE_DIGEST: | 573 | case CRYPTO_ALG_TYPE_DIGEST: |
569 | return cryptd_alloc_hash(tb, &queue); | 574 | return cryptd_create_hash(tmpl, tb, &queue); |
570 | } | 575 | } |
571 | 576 | ||
572 | return ERR_PTR(-EINVAL); | 577 | return -EINVAL; |
573 | } | 578 | } |
574 | 579 | ||
575 | static void cryptd_free(struct crypto_instance *inst) | 580 | static void cryptd_free(struct crypto_instance *inst) |
576 | { | 581 | { |
577 | struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); | 582 | struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); |
578 | 583 | ||
579 | crypto_drop_spawn(&ctx->spawn); | 584 | crypto_drop_spawn(&ctx->spawn); |
580 | kfree(inst); | 585 | kfree(inst); |
581 | } | 586 | } |
582 | 587 | ||
583 | static struct crypto_template cryptd_tmpl = { | 588 | static struct crypto_template cryptd_tmpl = { |
584 | .name = "cryptd", | 589 | .name = "cryptd", |
585 | .alloc = cryptd_alloc, | 590 | .create = cryptd_create, |
586 | .free = cryptd_free, | 591 | .free = cryptd_free, |
587 | .module = THIS_MODULE, | 592 | .module = THIS_MODULE, |
588 | }; | 593 | }; |
589 | 594 | ||
590 | struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, | 595 | struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, |
591 | u32 type, u32 mask) | 596 | u32 type, u32 mask) |
592 | { | 597 | { |
593 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | 598 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; |
594 | struct crypto_tfm *tfm; | 599 | struct crypto_tfm *tfm; |
595 | 600 | ||
596 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, | 601 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, |
597 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | 602 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) |
598 | return ERR_PTR(-EINVAL); | 603 | return ERR_PTR(-EINVAL); |
599 | type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); | 604 | type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); |
600 | type |= CRYPTO_ALG_TYPE_BLKCIPHER; | 605 | type |= CRYPTO_ALG_TYPE_BLKCIPHER; |
601 | mask &= ~CRYPTO_ALG_TYPE_MASK; | 606 | mask &= ~CRYPTO_ALG_TYPE_MASK; |
602 | mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK); | 607 | mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK); |
603 | tfm = crypto_alloc_base(cryptd_alg_name, type, mask); | 608 | tfm = crypto_alloc_base(cryptd_alg_name, type, mask); |
604 | if (IS_ERR(tfm)) | 609 | if (IS_ERR(tfm)) |
605 | return ERR_CAST(tfm); | 610 | return ERR_CAST(tfm); |
606 | if (tfm->__crt_alg->cra_module != THIS_MODULE) { | 611 | if (tfm->__crt_alg->cra_module != THIS_MODULE) { |
607 | crypto_free_tfm(tfm); | 612 | crypto_free_tfm(tfm); |
608 | return ERR_PTR(-EINVAL); | 613 | return ERR_PTR(-EINVAL); |
609 | } | 614 | } |
610 | 615 | ||
611 | return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm)); | 616 | return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm)); |
612 | } | 617 | } |
613 | EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher); | 618 | EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher); |
614 | 619 | ||
615 | struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm) | 620 | struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm) |
616 | { | 621 | { |
617 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); | 622 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); |
618 | return ctx->child; | 623 | return ctx->child; |
619 | } | 624 | } |
620 | EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child); | 625 | EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child); |
621 | 626 | ||
622 | void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) | 627 | void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) |
623 | { | 628 | { |
624 | crypto_free_ablkcipher(&tfm->base); | 629 | crypto_free_ablkcipher(&tfm->base); |
625 | } | 630 | } |
626 | EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); | 631 | EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); |
627 | 632 | ||
628 | static int __init cryptd_init(void) | 633 | static int __init cryptd_init(void) |
629 | { | 634 | { |
630 | int err; | 635 | int err; |
631 | 636 | ||
632 | err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN); | 637 | err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN); |
633 | if (err) | 638 | if (err) |
634 | return err; | 639 | return err; |
635 | 640 | ||
636 | err = crypto_register_template(&cryptd_tmpl); | 641 | err = crypto_register_template(&cryptd_tmpl); |
637 | if (err) | 642 | if (err) |
638 | cryptd_fini_queue(&queue); | 643 | cryptd_fini_queue(&queue); |
639 | 644 | ||
640 | return err; | 645 | return err; |
641 | } | 646 | } |
642 | 647 | ||
643 | static void __exit cryptd_exit(void) | 648 | static void __exit cryptd_exit(void) |
644 | { | 649 | { |
crypto/internal.h
1 | /* | 1 | /* |
2 | * Cryptographic API. | 2 | * Cryptographic API. |
3 | * | 3 | * |
4 | * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> | 4 | * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> |
5 | * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> | 5 | * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify it | 7 | * This program is free software; you can redistribute it and/or modify it |
8 | * under the terms of the GNU General Public License as published by the Free | 8 | * under the terms of the GNU General Public License as published by the Free |
9 | * Software Foundation; either version 2 of the License, or (at your option) | 9 | * Software Foundation; either version 2 of the License, or (at your option) |
10 | * any later version. | 10 | * any later version. |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | #ifndef _CRYPTO_INTERNAL_H | 13 | #ifndef _CRYPTO_INTERNAL_H |
14 | #define _CRYPTO_INTERNAL_H | 14 | #define _CRYPTO_INTERNAL_H |
15 | 15 | ||
16 | #include <crypto/algapi.h> | 16 | #include <crypto/algapi.h> |
17 | #include <linux/completion.h> | 17 | #include <linux/completion.h> |
18 | #include <linux/mm.h> | 18 | #include <linux/mm.h> |
19 | #include <linux/highmem.h> | 19 | #include <linux/highmem.h> |
20 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/list.h> | 22 | #include <linux/list.h> |
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
25 | #include <linux/notifier.h> | 25 | #include <linux/notifier.h> |
26 | #include <linux/rwsem.h> | 26 | #include <linux/rwsem.h> |
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <linux/fips.h> | 28 | #include <linux/fips.h> |
29 | 29 | ||
30 | /* Crypto notification events. */ | 30 | /* Crypto notification events. */ |
31 | enum { | 31 | enum { |
32 | CRYPTO_MSG_ALG_REQUEST, | 32 | CRYPTO_MSG_ALG_REQUEST, |
33 | CRYPTO_MSG_ALG_REGISTER, | 33 | CRYPTO_MSG_ALG_REGISTER, |
34 | CRYPTO_MSG_ALG_UNREGISTER, | 34 | CRYPTO_MSG_ALG_UNREGISTER, |
35 | CRYPTO_MSG_TMPL_REGISTER, | 35 | CRYPTO_MSG_TMPL_REGISTER, |
36 | CRYPTO_MSG_TMPL_UNREGISTER, | 36 | CRYPTO_MSG_TMPL_UNREGISTER, |
37 | }; | 37 | }; |
38 | 38 | ||
39 | struct crypto_instance; | 39 | struct crypto_instance; |
40 | struct crypto_template; | 40 | struct crypto_template; |
41 | 41 | ||
42 | struct crypto_larval { | 42 | struct crypto_larval { |
43 | struct crypto_alg alg; | 43 | struct crypto_alg alg; |
44 | struct crypto_alg *adult; | 44 | struct crypto_alg *adult; |
45 | struct completion completion; | 45 | struct completion completion; |
46 | u32 mask; | 46 | u32 mask; |
47 | }; | 47 | }; |
48 | 48 | ||
49 | extern struct list_head crypto_alg_list; | 49 | extern struct list_head crypto_alg_list; |
50 | extern struct rw_semaphore crypto_alg_sem; | 50 | extern struct rw_semaphore crypto_alg_sem; |
51 | extern struct blocking_notifier_head crypto_chain; | 51 | extern struct blocking_notifier_head crypto_chain; |
52 | 52 | ||
53 | #ifdef CONFIG_PROC_FS | 53 | #ifdef CONFIG_PROC_FS |
54 | void __init crypto_init_proc(void); | 54 | void __init crypto_init_proc(void); |
55 | void __exit crypto_exit_proc(void); | 55 | void __exit crypto_exit_proc(void); |
56 | #else | 56 | #else |
57 | static inline void crypto_init_proc(void) | 57 | static inline void crypto_init_proc(void) |
58 | { } | 58 | { } |
59 | static inline void crypto_exit_proc(void) | 59 | static inline void crypto_exit_proc(void) |
60 | { } | 60 | { } |
61 | #endif | 61 | #endif |
62 | 62 | ||
63 | static inline unsigned int crypto_cipher_ctxsize(struct crypto_alg *alg) | 63 | static inline unsigned int crypto_cipher_ctxsize(struct crypto_alg *alg) |
64 | { | 64 | { |
65 | return alg->cra_ctxsize; | 65 | return alg->cra_ctxsize; |
66 | } | 66 | } |
67 | 67 | ||
68 | static inline unsigned int crypto_compress_ctxsize(struct crypto_alg *alg) | 68 | static inline unsigned int crypto_compress_ctxsize(struct crypto_alg *alg) |
69 | { | 69 | { |
70 | return alg->cra_ctxsize; | 70 | return alg->cra_ctxsize; |
71 | } | 71 | } |
72 | 72 | ||
73 | struct crypto_alg *crypto_mod_get(struct crypto_alg *alg); | 73 | struct crypto_alg *crypto_mod_get(struct crypto_alg *alg); |
74 | struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask); | 74 | struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask); |
75 | struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask); | 75 | struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask); |
76 | 76 | ||
77 | int crypto_init_cipher_ops(struct crypto_tfm *tfm); | 77 | int crypto_init_cipher_ops(struct crypto_tfm *tfm); |
78 | int crypto_init_compress_ops(struct crypto_tfm *tfm); | 78 | int crypto_init_compress_ops(struct crypto_tfm *tfm); |
79 | 79 | ||
80 | void crypto_exit_cipher_ops(struct crypto_tfm *tfm); | 80 | void crypto_exit_cipher_ops(struct crypto_tfm *tfm); |
81 | void crypto_exit_compress_ops(struct crypto_tfm *tfm); | 81 | void crypto_exit_compress_ops(struct crypto_tfm *tfm); |
82 | 82 | ||
83 | struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask); | 83 | struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask); |
84 | void crypto_larval_kill(struct crypto_alg *alg); | 84 | void crypto_larval_kill(struct crypto_alg *alg); |
85 | struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask); | 85 | struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask); |
86 | void crypto_larval_error(const char *name, u32 type, u32 mask); | 86 | void crypto_larval_error(const char *name, u32 type, u32 mask); |
87 | void crypto_alg_tested(const char *name, int err); | 87 | void crypto_alg_tested(const char *name, int err); |
88 | 88 | ||
89 | void crypto_shoot_alg(struct crypto_alg *alg); | 89 | void crypto_shoot_alg(struct crypto_alg *alg); |
90 | struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, | 90 | struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, |
91 | u32 mask); | 91 | u32 mask); |
92 | void *crypto_create_tfm(struct crypto_alg *alg, | 92 | void *crypto_create_tfm(struct crypto_alg *alg, |
93 | const struct crypto_type *frontend); | 93 | const struct crypto_type *frontend); |
94 | struct crypto_alg *crypto_find_alg(const char *alg_name, | 94 | struct crypto_alg *crypto_find_alg(const char *alg_name, |
95 | const struct crypto_type *frontend, | 95 | const struct crypto_type *frontend, |
96 | u32 type, u32 mask); | 96 | u32 type, u32 mask); |
97 | void *crypto_alloc_tfm(const char *alg_name, | 97 | void *crypto_alloc_tfm(const char *alg_name, |
98 | const struct crypto_type *frontend, u32 type, u32 mask); | 98 | const struct crypto_type *frontend, u32 type, u32 mask); |
99 | 99 | ||
100 | int crypto_register_instance(struct crypto_template *tmpl, | ||
101 | struct crypto_instance *inst); | ||
102 | |||
103 | int crypto_register_notifier(struct notifier_block *nb); | 100 | int crypto_register_notifier(struct notifier_block *nb); |
104 | int crypto_unregister_notifier(struct notifier_block *nb); | 101 | int crypto_unregister_notifier(struct notifier_block *nb); |
105 | int crypto_probing_notify(unsigned long val, void *v); | 102 | int crypto_probing_notify(unsigned long val, void *v); |
106 | 103 | ||
107 | static inline void crypto_alg_put(struct crypto_alg *alg) | 104 | static inline void crypto_alg_put(struct crypto_alg *alg) |
108 | { | 105 | { |
109 | if (atomic_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy) | 106 | if (atomic_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy) |
110 | alg->cra_destroy(alg); | 107 | alg->cra_destroy(alg); |
111 | } | 108 | } |
112 | 109 | ||
113 | static inline int crypto_tmpl_get(struct crypto_template *tmpl) | 110 | static inline int crypto_tmpl_get(struct crypto_template *tmpl) |
114 | { | 111 | { |
115 | return try_module_get(tmpl->module); | 112 | return try_module_get(tmpl->module); |
116 | } | 113 | } |
117 | 114 | ||
118 | static inline void crypto_tmpl_put(struct crypto_template *tmpl) | 115 | static inline void crypto_tmpl_put(struct crypto_template *tmpl) |
119 | { | 116 | { |
120 | module_put(tmpl->module); | 117 | module_put(tmpl->module); |
121 | } | 118 | } |
122 | 119 | ||
123 | static inline int crypto_is_larval(struct crypto_alg *alg) | 120 | static inline int crypto_is_larval(struct crypto_alg *alg) |
124 | { | 121 | { |
125 | return alg->cra_flags & CRYPTO_ALG_LARVAL; | 122 | return alg->cra_flags & CRYPTO_ALG_LARVAL; |
126 | } | 123 | } |
127 | 124 | ||
128 | static inline int crypto_is_dead(struct crypto_alg *alg) | 125 | static inline int crypto_is_dead(struct crypto_alg *alg) |
129 | { | 126 | { |
130 | return alg->cra_flags & CRYPTO_ALG_DEAD; | 127 | return alg->cra_flags & CRYPTO_ALG_DEAD; |
131 | } | 128 | } |
132 | 129 | ||
133 | static inline int crypto_is_moribund(struct crypto_alg *alg) | 130 | static inline int crypto_is_moribund(struct crypto_alg *alg) |
134 | { | 131 | { |
135 | return alg->cra_flags & (CRYPTO_ALG_DEAD | CRYPTO_ALG_DYING); | 132 | return alg->cra_flags & (CRYPTO_ALG_DEAD | CRYPTO_ALG_DYING); |
136 | } | 133 | } |
137 | 134 | ||
138 | static inline void crypto_notify(unsigned long val, void *v) | 135 | static inline void crypto_notify(unsigned long val, void *v) |
139 | { | 136 | { |
140 | blocking_notifier_call_chain(&crypto_chain, val, v); | 137 | blocking_notifier_call_chain(&crypto_chain, val, v); |
141 | } | 138 | } |
142 | 139 | ||
143 | #endif /* _CRYPTO_INTERNAL_H */ | 140 | #endif /* _CRYPTO_INTERNAL_H */ |
144 | 141 | ||
145 | 142 |
include/crypto/algapi.h
1 | /* | 1 | /* |
2 | * Cryptographic API for algorithms (i.e., low-level API). | 2 | * Cryptographic API for algorithms (i.e., low-level API). |
3 | * | 3 | * |
4 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | 4 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License as published by the Free | 7 | * under the terms of the GNU General Public License as published by the Free |
8 | * Software Foundation; either version 2 of the License, or (at your option) | 8 | * Software Foundation; either version 2 of the License, or (at your option) |
9 | * any later version. | 9 | * any later version. |
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | #ifndef _CRYPTO_ALGAPI_H | 12 | #ifndef _CRYPTO_ALGAPI_H |
13 | #define _CRYPTO_ALGAPI_H | 13 | #define _CRYPTO_ALGAPI_H |
14 | 14 | ||
15 | #include <linux/crypto.h> | 15 | #include <linux/crypto.h> |
16 | #include <linux/list.h> | 16 | #include <linux/list.h> |
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | 18 | ||
19 | struct module; | 19 | struct module; |
20 | struct rtattr; | 20 | struct rtattr; |
21 | struct seq_file; | 21 | struct seq_file; |
22 | 22 | ||
23 | struct crypto_type { | 23 | struct crypto_type { |
24 | unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask); | 24 | unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask); |
25 | unsigned int (*extsize)(struct crypto_alg *alg); | 25 | unsigned int (*extsize)(struct crypto_alg *alg); |
26 | int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask); | 26 | int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask); |
27 | int (*init_tfm)(struct crypto_tfm *tfm); | 27 | int (*init_tfm)(struct crypto_tfm *tfm); |
28 | void (*show)(struct seq_file *m, struct crypto_alg *alg); | 28 | void (*show)(struct seq_file *m, struct crypto_alg *alg); |
29 | struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask); | 29 | struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask); |
30 | 30 | ||
31 | unsigned int type; | 31 | unsigned int type; |
32 | unsigned int maskclear; | 32 | unsigned int maskclear; |
33 | unsigned int maskset; | 33 | unsigned int maskset; |
34 | unsigned int tfmsize; | 34 | unsigned int tfmsize; |
35 | }; | 35 | }; |
36 | 36 | ||
37 | struct crypto_instance { | 37 | struct crypto_instance { |
38 | struct crypto_alg alg; | 38 | struct crypto_alg alg; |
39 | 39 | ||
40 | struct crypto_template *tmpl; | 40 | struct crypto_template *tmpl; |
41 | struct hlist_node list; | 41 | struct hlist_node list; |
42 | 42 | ||
43 | void *__ctx[] CRYPTO_MINALIGN_ATTR; | 43 | void *__ctx[] CRYPTO_MINALIGN_ATTR; |
44 | }; | 44 | }; |
45 | 45 | ||
46 | struct crypto_template { | 46 | struct crypto_template { |
47 | struct list_head list; | 47 | struct list_head list; |
48 | struct hlist_head instances; | 48 | struct hlist_head instances; |
49 | struct module *module; | 49 | struct module *module; |
50 | 50 | ||
51 | struct crypto_instance *(*alloc)(struct rtattr **tb); | 51 | struct crypto_instance *(*alloc)(struct rtattr **tb); |
52 | void (*free)(struct crypto_instance *inst); | 52 | void (*free)(struct crypto_instance *inst); |
53 | int (*create)(struct crypto_template *tmpl, struct rtattr **tb); | 53 | int (*create)(struct crypto_template *tmpl, struct rtattr **tb); |
54 | 54 | ||
55 | char name[CRYPTO_MAX_ALG_NAME]; | 55 | char name[CRYPTO_MAX_ALG_NAME]; |
56 | }; | 56 | }; |
57 | 57 | ||
58 | struct crypto_spawn { | 58 | struct crypto_spawn { |
59 | struct list_head list; | 59 | struct list_head list; |
60 | struct crypto_alg *alg; | 60 | struct crypto_alg *alg; |
61 | struct crypto_instance *inst; | 61 | struct crypto_instance *inst; |
62 | const struct crypto_type *frontend; | 62 | const struct crypto_type *frontend; |
63 | u32 mask; | 63 | u32 mask; |
64 | }; | 64 | }; |
65 | 65 | ||
66 | struct crypto_queue { | 66 | struct crypto_queue { |
67 | struct list_head list; | 67 | struct list_head list; |
68 | struct list_head *backlog; | 68 | struct list_head *backlog; |
69 | 69 | ||
70 | unsigned int qlen; | 70 | unsigned int qlen; |
71 | unsigned int max_qlen; | 71 | unsigned int max_qlen; |
72 | }; | 72 | }; |
73 | 73 | ||
74 | struct scatter_walk { | 74 | struct scatter_walk { |
75 | struct scatterlist *sg; | 75 | struct scatterlist *sg; |
76 | unsigned int offset; | 76 | unsigned int offset; |
77 | }; | 77 | }; |
78 | 78 | ||
79 | struct blkcipher_walk { | 79 | struct blkcipher_walk { |
80 | union { | 80 | union { |
81 | struct { | 81 | struct { |
82 | struct page *page; | 82 | struct page *page; |
83 | unsigned long offset; | 83 | unsigned long offset; |
84 | } phys; | 84 | } phys; |
85 | 85 | ||
86 | struct { | 86 | struct { |
87 | u8 *page; | 87 | u8 *page; |
88 | u8 *addr; | 88 | u8 *addr; |
89 | } virt; | 89 | } virt; |
90 | } src, dst; | 90 | } src, dst; |
91 | 91 | ||
92 | struct scatter_walk in; | 92 | struct scatter_walk in; |
93 | unsigned int nbytes; | 93 | unsigned int nbytes; |
94 | 94 | ||
95 | struct scatter_walk out; | 95 | struct scatter_walk out; |
96 | unsigned int total; | 96 | unsigned int total; |
97 | 97 | ||
98 | void *page; | 98 | void *page; |
99 | u8 *buffer; | 99 | u8 *buffer; |
100 | u8 *iv; | 100 | u8 *iv; |
101 | 101 | ||
102 | int flags; | 102 | int flags; |
103 | unsigned int blocksize; | 103 | unsigned int blocksize; |
104 | }; | 104 | }; |
105 | 105 | ||
106 | extern const struct crypto_type crypto_ablkcipher_type; | 106 | extern const struct crypto_type crypto_ablkcipher_type; |
107 | extern const struct crypto_type crypto_aead_type; | 107 | extern const struct crypto_type crypto_aead_type; |
108 | extern const struct crypto_type crypto_blkcipher_type; | 108 | extern const struct crypto_type crypto_blkcipher_type; |
109 | extern const struct crypto_type crypto_hash_type; | 109 | extern const struct crypto_type crypto_hash_type; |
110 | 110 | ||
111 | void crypto_mod_put(struct crypto_alg *alg); | 111 | void crypto_mod_put(struct crypto_alg *alg); |
112 | 112 | ||
113 | int crypto_register_template(struct crypto_template *tmpl); | 113 | int crypto_register_template(struct crypto_template *tmpl); |
114 | void crypto_unregister_template(struct crypto_template *tmpl); | 114 | void crypto_unregister_template(struct crypto_template *tmpl); |
115 | struct crypto_template *crypto_lookup_template(const char *name); | 115 | struct crypto_template *crypto_lookup_template(const char *name); |
116 | 116 | ||
117 | int crypto_register_instance(struct crypto_template *tmpl, | ||
118 | struct crypto_instance *inst); | ||
119 | |||
117 | int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, | 120 | int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, |
118 | struct crypto_instance *inst, u32 mask); | 121 | struct crypto_instance *inst, u32 mask); |
119 | int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg, | 122 | int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg, |
120 | struct crypto_instance *inst, | 123 | struct crypto_instance *inst, |
121 | const struct crypto_type *frontend); | 124 | const struct crypto_type *frontend); |
122 | 125 | ||
123 | void crypto_drop_spawn(struct crypto_spawn *spawn); | 126 | void crypto_drop_spawn(struct crypto_spawn *spawn); |
124 | struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, | 127 | struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, |
125 | u32 mask); | 128 | u32 mask); |
126 | void *crypto_spawn_tfm2(struct crypto_spawn *spawn); | 129 | void *crypto_spawn_tfm2(struct crypto_spawn *spawn); |
127 | 130 | ||
128 | static inline void crypto_set_spawn(struct crypto_spawn *spawn, | 131 | static inline void crypto_set_spawn(struct crypto_spawn *spawn, |
129 | struct crypto_instance *inst) | 132 | struct crypto_instance *inst) |
130 | { | 133 | { |
131 | spawn->inst = inst; | 134 | spawn->inst = inst; |
132 | } | 135 | } |
133 | 136 | ||
134 | struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); | 137 | struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); |
135 | int crypto_check_attr_type(struct rtattr **tb, u32 type); | 138 | int crypto_check_attr_type(struct rtattr **tb, u32 type); |
136 | const char *crypto_attr_alg_name(struct rtattr *rta); | 139 | const char *crypto_attr_alg_name(struct rtattr *rta); |
137 | struct crypto_alg *crypto_attr_alg2(struct rtattr *rta, | 140 | struct crypto_alg *crypto_attr_alg2(struct rtattr *rta, |
138 | const struct crypto_type *frontend, | 141 | const struct crypto_type *frontend, |
139 | u32 type, u32 mask); | 142 | u32 type, u32 mask); |
140 | 143 | ||
141 | static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta, | 144 | static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta, |
142 | u32 type, u32 mask) | 145 | u32 type, u32 mask) |
143 | { | 146 | { |
144 | return crypto_attr_alg2(rta, NULL, type, mask); | 147 | return crypto_attr_alg2(rta, NULL, type, mask); |
145 | } | 148 | } |
146 | 149 | ||
147 | int crypto_attr_u32(struct rtattr *rta, u32 *num); | 150 | int crypto_attr_u32(struct rtattr *rta, u32 *num); |
148 | void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg, | 151 | void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg, |
149 | unsigned int head); | 152 | unsigned int head); |
150 | struct crypto_instance *crypto_alloc_instance(const char *name, | 153 | struct crypto_instance *crypto_alloc_instance(const char *name, |
151 | struct crypto_alg *alg); | 154 | struct crypto_alg *alg); |
152 | 155 | ||
153 | void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen); | 156 | void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen); |
154 | int crypto_enqueue_request(struct crypto_queue *queue, | 157 | int crypto_enqueue_request(struct crypto_queue *queue, |
155 | struct crypto_async_request *request); | 158 | struct crypto_async_request *request); |
156 | struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue); | 159 | struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue); |
157 | int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm); | 160 | int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm); |
158 | 161 | ||
159 | /* These functions require the input/output to be aligned as u32. */ | 162 | /* These functions require the input/output to be aligned as u32. */ |
160 | void crypto_inc(u8 *a, unsigned int size); | 163 | void crypto_inc(u8 *a, unsigned int size); |
161 | void crypto_xor(u8 *dst, const u8 *src, unsigned int size); | 164 | void crypto_xor(u8 *dst, const u8 *src, unsigned int size); |
162 | 165 | ||
163 | int blkcipher_walk_done(struct blkcipher_desc *desc, | 166 | int blkcipher_walk_done(struct blkcipher_desc *desc, |
164 | struct blkcipher_walk *walk, int err); | 167 | struct blkcipher_walk *walk, int err); |
165 | int blkcipher_walk_virt(struct blkcipher_desc *desc, | 168 | int blkcipher_walk_virt(struct blkcipher_desc *desc, |
166 | struct blkcipher_walk *walk); | 169 | struct blkcipher_walk *walk); |
167 | int blkcipher_walk_phys(struct blkcipher_desc *desc, | 170 | int blkcipher_walk_phys(struct blkcipher_desc *desc, |
168 | struct blkcipher_walk *walk); | 171 | struct blkcipher_walk *walk); |
169 | int blkcipher_walk_virt_block(struct blkcipher_desc *desc, | 172 | int blkcipher_walk_virt_block(struct blkcipher_desc *desc, |
170 | struct blkcipher_walk *walk, | 173 | struct blkcipher_walk *walk, |
171 | unsigned int blocksize); | 174 | unsigned int blocksize); |
172 | 175 | ||
173 | static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm) | 176 | static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm) |
174 | { | 177 | { |
175 | unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm); | 178 | unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm); |
176 | unsigned long align = crypto_tfm_alg_alignmask(tfm); | 179 | unsigned long align = crypto_tfm_alg_alignmask(tfm); |
177 | 180 | ||
178 | if (align <= crypto_tfm_ctx_alignment()) | 181 | if (align <= crypto_tfm_ctx_alignment()) |
179 | align = 1; | 182 | align = 1; |
180 | return (void *)ALIGN(addr, align); | 183 | return (void *)ALIGN(addr, align); |
181 | } | 184 | } |
182 | 185 | ||
183 | static inline struct crypto_instance *crypto_tfm_alg_instance( | 186 | static inline struct crypto_instance *crypto_tfm_alg_instance( |
184 | struct crypto_tfm *tfm) | 187 | struct crypto_tfm *tfm) |
185 | { | 188 | { |
186 | return container_of(tfm->__crt_alg, struct crypto_instance, alg); | 189 | return container_of(tfm->__crt_alg, struct crypto_instance, alg); |
187 | } | 190 | } |
188 | 191 | ||
189 | static inline void *crypto_instance_ctx(struct crypto_instance *inst) | 192 | static inline void *crypto_instance_ctx(struct crypto_instance *inst) |
190 | { | 193 | { |
191 | return inst->__ctx; | 194 | return inst->__ctx; |
192 | } | 195 | } |
193 | 196 | ||
194 | static inline struct ablkcipher_alg *crypto_ablkcipher_alg( | 197 | static inline struct ablkcipher_alg *crypto_ablkcipher_alg( |
195 | struct crypto_ablkcipher *tfm) | 198 | struct crypto_ablkcipher *tfm) |
196 | { | 199 | { |
197 | return &crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_ablkcipher; | 200 | return &crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_ablkcipher; |
198 | } | 201 | } |
199 | 202 | ||
200 | static inline void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm) | 203 | static inline void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm) |
201 | { | 204 | { |
202 | return crypto_tfm_ctx(&tfm->base); | 205 | return crypto_tfm_ctx(&tfm->base); |
203 | } | 206 | } |
204 | 207 | ||
205 | static inline void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm) | 208 | static inline void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm) |
206 | { | 209 | { |
207 | return crypto_tfm_ctx_aligned(&tfm->base); | 210 | return crypto_tfm_ctx_aligned(&tfm->base); |
208 | } | 211 | } |
209 | 212 | ||
210 | static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm) | 213 | static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm) |
211 | { | 214 | { |
212 | return &crypto_aead_tfm(tfm)->__crt_alg->cra_aead; | 215 | return &crypto_aead_tfm(tfm)->__crt_alg->cra_aead; |
213 | } | 216 | } |
214 | 217 | ||
215 | static inline void *crypto_aead_ctx(struct crypto_aead *tfm) | 218 | static inline void *crypto_aead_ctx(struct crypto_aead *tfm) |
216 | { | 219 | { |
217 | return crypto_tfm_ctx(&tfm->base); | 220 | return crypto_tfm_ctx(&tfm->base); |
218 | } | 221 | } |
219 | 222 | ||
220 | static inline struct crypto_instance *crypto_aead_alg_instance( | 223 | static inline struct crypto_instance *crypto_aead_alg_instance( |
221 | struct crypto_aead *aead) | 224 | struct crypto_aead *aead) |
222 | { | 225 | { |
223 | return crypto_tfm_alg_instance(&aead->base); | 226 | return crypto_tfm_alg_instance(&aead->base); |
224 | } | 227 | } |
225 | 228 | ||
226 | static inline struct crypto_blkcipher *crypto_spawn_blkcipher( | 229 | static inline struct crypto_blkcipher *crypto_spawn_blkcipher( |
227 | struct crypto_spawn *spawn) | 230 | struct crypto_spawn *spawn) |
228 | { | 231 | { |
229 | u32 type = CRYPTO_ALG_TYPE_BLKCIPHER; | 232 | u32 type = CRYPTO_ALG_TYPE_BLKCIPHER; |
230 | u32 mask = CRYPTO_ALG_TYPE_MASK; | 233 | u32 mask = CRYPTO_ALG_TYPE_MASK; |
231 | 234 | ||
232 | return __crypto_blkcipher_cast(crypto_spawn_tfm(spawn, type, mask)); | 235 | return __crypto_blkcipher_cast(crypto_spawn_tfm(spawn, type, mask)); |
233 | } | 236 | } |
234 | 237 | ||
235 | static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm) | 238 | static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm) |
236 | { | 239 | { |
237 | return crypto_tfm_ctx(&tfm->base); | 240 | return crypto_tfm_ctx(&tfm->base); |
238 | } | 241 | } |
239 | 242 | ||
240 | static inline void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm) | 243 | static inline void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm) |
241 | { | 244 | { |
242 | return crypto_tfm_ctx_aligned(&tfm->base); | 245 | return crypto_tfm_ctx_aligned(&tfm->base); |
243 | } | 246 | } |
244 | 247 | ||
245 | static inline struct crypto_cipher *crypto_spawn_cipher( | 248 | static inline struct crypto_cipher *crypto_spawn_cipher( |
246 | struct crypto_spawn *spawn) | 249 | struct crypto_spawn *spawn) |
247 | { | 250 | { |
248 | u32 type = CRYPTO_ALG_TYPE_CIPHER; | 251 | u32 type = CRYPTO_ALG_TYPE_CIPHER; |
249 | u32 mask = CRYPTO_ALG_TYPE_MASK; | 252 | u32 mask = CRYPTO_ALG_TYPE_MASK; |
250 | 253 | ||
251 | return __crypto_cipher_cast(crypto_spawn_tfm(spawn, type, mask)); | 254 | return __crypto_cipher_cast(crypto_spawn_tfm(spawn, type, mask)); |
252 | } | 255 | } |
253 | 256 | ||
254 | static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm) | 257 | static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm) |
255 | { | 258 | { |
256 | return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher; | 259 | return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher; |
257 | } | 260 | } |
258 | 261 | ||
259 | static inline struct crypto_hash *crypto_spawn_hash(struct crypto_spawn *spawn) | 262 | static inline struct crypto_hash *crypto_spawn_hash(struct crypto_spawn *spawn) |
260 | { | 263 | { |
261 | u32 type = CRYPTO_ALG_TYPE_HASH; | 264 | u32 type = CRYPTO_ALG_TYPE_HASH; |
262 | u32 mask = CRYPTO_ALG_TYPE_HASH_MASK; | 265 | u32 mask = CRYPTO_ALG_TYPE_HASH_MASK; |
263 | 266 | ||
264 | return __crypto_hash_cast(crypto_spawn_tfm(spawn, type, mask)); | 267 | return __crypto_hash_cast(crypto_spawn_tfm(spawn, type, mask)); |
265 | } | 268 | } |
266 | 269 | ||
267 | static inline void *crypto_hash_ctx(struct crypto_hash *tfm) | 270 | static inline void *crypto_hash_ctx(struct crypto_hash *tfm) |
268 | { | 271 | { |
269 | return crypto_tfm_ctx(&tfm->base); | 272 | return crypto_tfm_ctx(&tfm->base); |
270 | } | 273 | } |
271 | 274 | ||
272 | static inline void *crypto_hash_ctx_aligned(struct crypto_hash *tfm) | 275 | static inline void *crypto_hash_ctx_aligned(struct crypto_hash *tfm) |
273 | { | 276 | { |
274 | return crypto_tfm_ctx_aligned(&tfm->base); | 277 | return crypto_tfm_ctx_aligned(&tfm->base); |
275 | } | 278 | } |
276 | 279 | ||
277 | static inline void blkcipher_walk_init(struct blkcipher_walk *walk, | 280 | static inline void blkcipher_walk_init(struct blkcipher_walk *walk, |
278 | struct scatterlist *dst, | 281 | struct scatterlist *dst, |
279 | struct scatterlist *src, | 282 | struct scatterlist *src, |
280 | unsigned int nbytes) | 283 | unsigned int nbytes) |
281 | { | 284 | { |
282 | walk->in.sg = src; | 285 | walk->in.sg = src; |
283 | walk->out.sg = dst; | 286 | walk->out.sg = dst; |
284 | walk->total = nbytes; | 287 | walk->total = nbytes; |
285 | } | 288 | } |
286 | 289 | ||
287 | static inline struct crypto_async_request *crypto_get_backlog( | 290 | static inline struct crypto_async_request *crypto_get_backlog( |
288 | struct crypto_queue *queue) | 291 | struct crypto_queue *queue) |
289 | { | 292 | { |
290 | return queue->backlog == &queue->list ? NULL : | 293 | return queue->backlog == &queue->list ? NULL : |
291 | container_of(queue->backlog, struct crypto_async_request, list); | 294 | container_of(queue->backlog, struct crypto_async_request, list); |
292 | } | 295 | } |
293 | 296 | ||
294 | static inline int ablkcipher_enqueue_request(struct crypto_queue *queue, | 297 | static inline int ablkcipher_enqueue_request(struct crypto_queue *queue, |
295 | struct ablkcipher_request *request) | 298 | struct ablkcipher_request *request) |
296 | { | 299 | { |
297 | return crypto_enqueue_request(queue, &request->base); | 300 | return crypto_enqueue_request(queue, &request->base); |
298 | } | 301 | } |
299 | 302 | ||
300 | static inline struct ablkcipher_request *ablkcipher_dequeue_request( | 303 | static inline struct ablkcipher_request *ablkcipher_dequeue_request( |
301 | struct crypto_queue *queue) | 304 | struct crypto_queue *queue) |
302 | { | 305 | { |
303 | return ablkcipher_request_cast(crypto_dequeue_request(queue)); | 306 | return ablkcipher_request_cast(crypto_dequeue_request(queue)); |
304 | } | 307 | } |
305 | 308 | ||
306 | static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req) | 309 | static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req) |
307 | { | 310 | { |
308 | return req->__ctx; | 311 | return req->__ctx; |
309 | } | 312 | } |
310 | 313 | ||
311 | static inline int ablkcipher_tfm_in_queue(struct crypto_queue *queue, | 314 | static inline int ablkcipher_tfm_in_queue(struct crypto_queue *queue, |
312 | struct crypto_ablkcipher *tfm) | 315 | struct crypto_ablkcipher *tfm) |
313 | { | 316 | { |
314 | return crypto_tfm_in_queue(queue, crypto_ablkcipher_tfm(tfm)); | 317 | return crypto_tfm_in_queue(queue, crypto_ablkcipher_tfm(tfm)); |
315 | } | 318 | } |
316 | 319 | ||
317 | static inline void *aead_request_ctx(struct aead_request *req) | 320 | static inline void *aead_request_ctx(struct aead_request *req) |
318 | { | 321 | { |
319 | return req->__ctx; | 322 | return req->__ctx; |
320 | } | 323 | } |
321 | 324 | ||
322 | static inline void aead_request_complete(struct aead_request *req, int err) | 325 | static inline void aead_request_complete(struct aead_request *req, int err) |
323 | { | 326 | { |
324 | req->base.complete(&req->base, err); | 327 | req->base.complete(&req->base, err); |
325 | } | 328 | } |
326 | 329 | ||
327 | static inline u32 aead_request_flags(struct aead_request *req) | 330 | static inline u32 aead_request_flags(struct aead_request *req) |
328 | { | 331 | { |
329 | return req->base.flags; | 332 | return req->base.flags; |
330 | } | 333 | } |
331 | 334 | ||
332 | static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, | 335 | static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, |
333 | u32 type, u32 mask) | 336 | u32 type, u32 mask) |
334 | { | 337 | { |
335 | return crypto_attr_alg(tb[1], type, mask); | 338 | return crypto_attr_alg(tb[1], type, mask); |
336 | } | 339 | } |
337 | 340 | ||
338 | /* | 341 | /* |
339 | * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms. | 342 | * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms. |
340 | * Otherwise returns zero. | 343 | * Otherwise returns zero. |
341 | */ | 344 | */ |
342 | static inline int crypto_requires_sync(u32 type, u32 mask) | 345 | static inline int crypto_requires_sync(u32 type, u32 mask) |
343 | { | 346 | { |
344 | return (type ^ CRYPTO_ALG_ASYNC) & mask & CRYPTO_ALG_ASYNC; | 347 | return (type ^ CRYPTO_ALG_ASYNC) & mask & CRYPTO_ALG_ASYNC; |
345 | } | 348 | } |
346 | 349 | ||
347 | #endif /* _CRYPTO_ALGAPI_H */ | 350 | #endif /* _CRYPTO_ALGAPI_H */ |
348 | 351 | ||
349 | 352 |